summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorBjoern Brandenburg <bbb@bbb1-cs.cs.unc.edu>2010-05-19 15:58:28 -0400
committerBjoern Brandenburg <bbb@bbb1-cs.cs.unc.edu>2010-05-19 15:58:28 -0400
commit0423f5fb58e1712f1ca2b6a8c97f3e625b684406 (patch)
tree95b54e2292be4860f9c1e33f328c782bef037928
parentfd221f83a619ce00eda82d0fbf7ebd7bdf40bab0 (diff)
update final release software
-rw-r--r--download/2010.1/SHA256SUMS7
-rw-r--r--download/2010.1/ft_tools-2010.1.tgzbin0 -> 5577 bytes
-rw-r--r--download/2010.1/liblitmus-2010.1.tgzbin232635 -> 17633 bytes
-rw-r--r--download/2010.1/litmus-rt-2010.1.patch29507
4 files changed, 618 insertions, 28896 deletions
diff --git a/download/2010.1/SHA256SUMS b/download/2010.1/SHA256SUMS
index e095f25..e96ef1b 100644
--- a/download/2010.1/SHA256SUMS
+++ b/download/2010.1/SHA256SUMS
@@ -1,4 +1,7 @@
10cff900f43667fec7682fd7af83177a3d36c45514979a1c534fee4aa89a5b390 ft_tools-2010.1.tgz
28629ba294c695d9d33283eb0748aa533ac7a880b8d2773925434cb61475267df liblitmus-2010.1.tgz
37e2fa2b43d43f96ab11e3bb9136cd3f1528d32f023df5136dcde2e5c9ddc4c60 litmus-rt-2010.1.patch
126b2aa111452e31acffbb866fd9b66058aa640220e8b7d30c103be8ed96b5751 32bit-config 426b2aa111452e31acffbb866fd9b66058aa640220e8b7d30c103be8ed96b5751 32bit-config
291fbdbd565c02cfb2f0d69f9dbdfde0b4b401fcaba04f4af24d8b6cf61046aa2 64bit-config 591fbdbd565c02cfb2f0d69f9dbdfde0b4b401fcaba04f4af24d8b6cf61046aa2 64bit-config
3e3e99958a8872403e206b380c2be2be118495373361436d436a706e0bdd2df79 liblitmus-2010.1.tgz 6666a1a8d4c6e00c92178031fea1b1ee49e9c04f9347fb8e8d369772d4fede8b9 liblitmus-2010-config
479753deefdfdb3f37341c95c92efc520fd4c840fc54c323c35c1f8fe65d8840e litmus-rt-2010.1.patch 7
diff --git a/download/2010.1/ft_tools-2010.1.tgz b/download/2010.1/ft_tools-2010.1.tgz
new file mode 100644
index 0000000..c49f228
--- /dev/null
+++ b/download/2010.1/ft_tools-2010.1.tgz
Binary files differ
diff --git a/download/2010.1/liblitmus-2010.1.tgz b/download/2010.1/liblitmus-2010.1.tgz
index 58302f9..9c92dcb 100644
--- a/download/2010.1/liblitmus-2010.1.tgz
+++ b/download/2010.1/liblitmus-2010.1.tgz
Binary files differ
diff --git a/download/2010.1/litmus-rt-2010.1.patch b/download/2010.1/litmus-rt-2010.1.patch
index 9df4716..50013fb 100644
--- a/download/2010.1/litmus-rt-2010.1.patch
+++ b/download/2010.1/litmus-rt-2010.1.patch
@@ -1,12 +1,86 @@
1 Makefile | 4 +-
2 arch/x86/Kconfig | 2 +
3 arch/x86/include/asm/entry_arch.h | 1 +
4 arch/x86/include/asm/feather_trace.h | 17 +
5 arch/x86/include/asm/feather_trace_32.h | 80 +++
6 arch/x86/include/asm/feather_trace_64.h | 69 +++
7 arch/x86/include/asm/hw_irq.h | 3 +
8 arch/x86/include/asm/irq_vectors.h | 5 +
9 arch/x86/include/asm/processor.h | 2 +
10 arch/x86/include/asm/unistd_32.h | 6 +-
11 arch/x86/include/asm/unistd_64.h | 4 +
12 arch/x86/kernel/Makefile | 2 +
13 arch/x86/kernel/cpu/intel_cacheinfo.c | 17 +
14 arch/x86/kernel/entry_64.S | 2 +
15 arch/x86/kernel/ft_event.c | 112 ++++
16 arch/x86/kernel/irqinit.c | 3 +
17 arch/x86/kernel/smp.c | 28 +
18 arch/x86/kernel/syscall_table_32.S | 14 +
19 fs/exec.c | 13 +-
20 fs/inode.c | 2 +
21 include/linux/completion.h | 1 +
22 include/linux/fs.h | 21 +-
23 include/linux/hrtimer.h | 25 +
24 include/linux/sched.h | 17 +-
25 include/linux/smp.h | 5 +
26 include/linux/tick.h | 5 +
27 include/litmus/bheap.h | 77 +++
28 include/litmus/edf_common.h | 27 +
29 include/litmus/fdso.h | 69 +++
30 include/litmus/feather_buffer.h | 94 ++++
31 include/litmus/feather_trace.h | 49 ++
32 include/litmus/ftdev.h | 49 ++
33 include/litmus/jobs.h | 9 +
34 include/litmus/litmus.h | 252 +++++++++
35 include/litmus/rt_domain.h | 162 ++++++
36 include/litmus/rt_param.h | 189 +++++++
37 include/litmus/sched_plugin.h | 162 ++++++
38 include/litmus/sched_trace.h | 192 +++++++
39 include/litmus/trace.h | 113 ++++
40 include/litmus/unistd_32.h | 23 +
41 include/litmus/unistd_64.h | 37 ++
42 kernel/exit.c | 4 +
43 kernel/fork.c | 7 +
44 kernel/hrtimer.c | 82 +++
45 kernel/printk.c | 14 +-
46 kernel/sched.c | 109 ++++-
47 kernel/sched_fair.c | 2 +-
48 kernel/sched_rt.c | 2 +-
49 kernel/time/tick-sched.c | 48 ++-
50 litmus/Kconfig | 85 +++
51 litmus/Makefile | 23 +
52 litmus/bheap.c | 314 +++++++++++
53 litmus/ctrldev.c | 150 +++++
54 litmus/edf_common.c | 102 ++++
55 litmus/fdso.c | 281 ++++++++++
56 litmus/fmlp.c | 268 +++++++++
57 litmus/ft_event.c | 43 ++
58 litmus/ftdev.c | 359 +++++++++++++
59 litmus/jobs.c | 43 ++
60 litmus/litmus.c | 775 ++++++++++++++++++++++++++
61 litmus/rt_domain.c | 310 +++++++++++
62 litmus/sched_cedf.c | 756 ++++++++++++++++++++++++++
63 litmus/sched_gsn_edf.c | 828 ++++++++++++++++++++++++++++
64 litmus/sched_litmus.c | 318 +++++++++++
65 litmus/sched_pfair.c | 896 +++++++++++++++++++++++++++++++
66 litmus/sched_plugin.c | 265 +++++++++
67 litmus/sched_psn_edf.c | 478 ++++++++++++++++
68 litmus/sched_task_trace.c | 204 +++++++
69 litmus/sched_trace.c | 378 +++++++++++++
70 litmus/srp.c | 318 +++++++++++
71 litmus/sync.c | 104 ++++
72 litmus/trace.c | 103 ++++
73 72 files changed, 9596 insertions(+), 37 deletions(-)
74
1diff --git a/Makefile b/Makefile 75diff --git a/Makefile b/Makefile
2index ec932b2..2603066 100644 76index f5cdb72..2603066 100644
3--- a/Makefile 77--- a/Makefile
4+++ b/Makefile 78+++ b/Makefile
5@@ -1,7 +1,7 @@ 79@@ -1,7 +1,7 @@
6 VERSION = 2 80 VERSION = 2
7 PATCHLEVEL = 6 81 PATCHLEVEL = 6
8 SUBLEVEL = 32 82 SUBLEVEL = 32
9-EXTRAVERSION = .9 83-EXTRAVERSION =
10+EXTRAVERSION =-litmus2010 84+EXTRAVERSION =-litmus2010
11 NAME = Man-Eating Seals of Antiquity 85 NAME = Man-Eating Seals of Antiquity
12 86
@@ -20,2937 +94,16 @@ index ec932b2..2603066 100644
20 94
21 vmlinux-dirs := $(patsubst %/,%,$(filter %/, $(init-y) $(init-m) \ 95 vmlinux-dirs := $(patsubst %/,%,$(filter %/, $(init-y) $(init-m) \
22 $(core-y) $(core-m) $(drivers-y) $(drivers-m) \ 96 $(core-y) $(core-m) $(drivers-y) $(drivers-m) \
23diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c
24index 62619f2..9a3334a 100644
25--- a/arch/alpha/kernel/osf_sys.c
26+++ b/arch/alpha/kernel/osf_sys.c
27@@ -178,18 +178,25 @@ SYSCALL_DEFINE6(osf_mmap, unsigned long, addr, unsigned long, len,
28 unsigned long, prot, unsigned long, flags, unsigned long, fd,
29 unsigned long, off)
30 {
31- unsigned long ret = -EINVAL;
32+ struct file *file = NULL;
33+ unsigned long ret = -EBADF;
34
35 #if 0
36 if (flags & (_MAP_HASSEMAPHORE | _MAP_INHERIT | _MAP_UNALIGNED))
37 printk("%s: unimplemented OSF mmap flags %04lx\n",
38 current->comm, flags);
39 #endif
40- if ((off + PAGE_ALIGN(len)) < off)
41- goto out;
42- if (off & ~PAGE_MASK)
43- goto out;
44- ret = sys_mmap_pgoff(addr, len, prot, flags, fd, off >> PAGE_SHIFT);
45+ if (!(flags & MAP_ANONYMOUS)) {
46+ file = fget(fd);
47+ if (!file)
48+ goto out;
49+ }
50+ flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
51+ down_write(&current->mm->mmap_sem);
52+ ret = do_mmap(file, addr, len, prot, flags, off);
53+ up_write(&current->mm->mmap_sem);
54+ if (file)
55+ fput(file);
56 out:
57 return ret;
58 }
59diff --git a/arch/arm/include/asm/mman.h b/arch/arm/include/asm/mman.h
60index 41f99c5..8eebf89 100644
61--- a/arch/arm/include/asm/mman.h
62+++ b/arch/arm/include/asm/mman.h
63@@ -1,4 +1 @@
64 #include <asm-generic/mman.h>
65-
66-#define arch_mmap_check(addr, len, flags) \
67- (((flags) & MAP_FIXED && (addr) < FIRST_USER_ADDRESS) ? -EINVAL : 0)
68diff --git a/arch/arm/kernel/calls.S b/arch/arm/kernel/calls.S
69index 4f07168..fafce1b 100644
70--- a/arch/arm/kernel/calls.S
71+++ b/arch/arm/kernel/calls.S
72@@ -172,7 +172,7 @@
73 /* 160 */ CALL(sys_sched_get_priority_min)
74 CALL(sys_sched_rr_get_interval)
75 CALL(sys_nanosleep)
76- CALL(sys_mremap)
77+ CALL(sys_arm_mremap)
78 CALL(sys_setresuid16)
79 /* 165 */ CALL(sys_getresuid16)
80 CALL(sys_ni_syscall) /* vm86 */
81diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S
82index 2c1db77..f0fe95b 100644
83--- a/arch/arm/kernel/entry-common.S
84+++ b/arch/arm/kernel/entry-common.S
85@@ -416,12 +416,12 @@ sys_mmap2:
86 tst r5, #PGOFF_MASK
87 moveq r5, r5, lsr #PAGE_SHIFT - 12
88 streq r5, [sp, #4]
89- beq sys_mmap_pgoff
90+ beq do_mmap2
91 mov r0, #-EINVAL
92 mov pc, lr
93 #else
94 str r5, [sp, #4]
95- b sys_mmap_pgoff
96+ b do_mmap2
97 #endif
98 ENDPROC(sys_mmap2)
99
100diff --git a/arch/arm/kernel/sys_arm.c b/arch/arm/kernel/sys_arm.c
101index ae4027b..78ecaac 100644
102--- a/arch/arm/kernel/sys_arm.c
103+++ b/arch/arm/kernel/sys_arm.c
104@@ -28,6 +28,41 @@
105 #include <linux/ipc.h>
106 #include <linux/uaccess.h>
107
108+extern unsigned long do_mremap(unsigned long addr, unsigned long old_len,
109+ unsigned long new_len, unsigned long flags,
110+ unsigned long new_addr);
111+
112+/* common code for old and new mmaps */
113+inline long do_mmap2(
114+ unsigned long addr, unsigned long len,
115+ unsigned long prot, unsigned long flags,
116+ unsigned long fd, unsigned long pgoff)
117+{
118+ int error = -EINVAL;
119+ struct file * file = NULL;
120+
121+ flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
122+
123+ if (flags & MAP_FIXED && addr < FIRST_USER_ADDRESS)
124+ goto out;
125+
126+ error = -EBADF;
127+ if (!(flags & MAP_ANONYMOUS)) {
128+ file = fget(fd);
129+ if (!file)
130+ goto out;
131+ }
132+
133+ down_write(&current->mm->mmap_sem);
134+ error = do_mmap_pgoff(file, addr, len, prot, flags, pgoff);
135+ up_write(&current->mm->mmap_sem);
136+
137+ if (file)
138+ fput(file);
139+out:
140+ return error;
141+}
142+
143 struct mmap_arg_struct {
144 unsigned long addr;
145 unsigned long len;
146@@ -49,11 +84,29 @@ asmlinkage int old_mmap(struct mmap_arg_struct __user *arg)
147 if (a.offset & ~PAGE_MASK)
148 goto out;
149
150- error = sys_mmap_pgoff(a.addr, a.len, a.prot, a.flags, a.fd, a.offset >> PAGE_SHIFT);
151+ error = do_mmap2(a.addr, a.len, a.prot, a.flags, a.fd, a.offset >> PAGE_SHIFT);
152 out:
153 return error;
154 }
155
156+asmlinkage unsigned long
157+sys_arm_mremap(unsigned long addr, unsigned long old_len,
158+ unsigned long new_len, unsigned long flags,
159+ unsigned long new_addr)
160+{
161+ unsigned long ret = -EINVAL;
162+
163+ if (flags & MREMAP_FIXED && new_addr < FIRST_USER_ADDRESS)
164+ goto out;
165+
166+ down_write(&current->mm->mmap_sem);
167+ ret = do_mremap(addr, old_len, new_len, flags, new_addr);
168+ up_write(&current->mm->mmap_sem);
169+
170+out:
171+ return ret;
172+}
173+
174 /*
175 * Perform the select(nd, in, out, ex, tv) and mmap() system
176 * calls.
177diff --git a/arch/arm/mach-davinci/dm646x.c b/arch/arm/mach-davinci/dm646x.c
178index 36e4fb4..0976049 100644
179--- a/arch/arm/mach-davinci/dm646x.c
180+++ b/arch/arm/mach-davinci/dm646x.c
181@@ -789,14 +789,7 @@ static struct davinci_id dm646x_ids[] = {
182 .part_no = 0xb770,
183 .manufacturer = 0x017,
184 .cpu_id = DAVINCI_CPU_ID_DM6467,
185- .name = "dm6467_rev1.x",
186- },
187- {
188- .variant = 0x1,
189- .part_no = 0xb770,
190- .manufacturer = 0x017,
191- .cpu_id = DAVINCI_CPU_ID_DM6467,
192- .name = "dm6467_rev3.x",
193+ .name = "dm6467",
194 },
195 };
196
197diff --git a/arch/arm/mach-pxa/em-x270.c b/arch/arm/mach-pxa/em-x270.c
198index 86a8732..aec7f42 100644
199--- a/arch/arm/mach-pxa/em-x270.c
200+++ b/arch/arm/mach-pxa/em-x270.c
201@@ -497,15 +497,16 @@ static int em_x270_usb_hub_init(void)
202 goto err_free_vbus_gpio;
203
204 /* USB Hub power-on and reset */
205- gpio_direction_output(usb_hub_reset, 1);
206- gpio_direction_output(GPIO9_USB_VBUS_EN, 0);
207+ gpio_direction_output(usb_hub_reset, 0);
208 regulator_enable(em_x270_usb_ldo);
209- gpio_set_value(usb_hub_reset, 0);
210 gpio_set_value(usb_hub_reset, 1);
211+ gpio_set_value(usb_hub_reset, 0);
212 regulator_disable(em_x270_usb_ldo);
213 regulator_enable(em_x270_usb_ldo);
214- gpio_set_value(usb_hub_reset, 0);
215- gpio_set_value(GPIO9_USB_VBUS_EN, 1);
216+ gpio_set_value(usb_hub_reset, 1);
217+
218+ /* enable VBUS */
219+ gpio_direction_output(GPIO9_USB_VBUS_EN, 1);
220
221 return 0;
222
223diff --git a/arch/arm/mm/mmap.c b/arch/arm/mm/mmap.c
224index f5abc51..2b79964 100644
225--- a/arch/arm/mm/mmap.c
226+++ b/arch/arm/mm/mmap.c
227@@ -54,8 +54,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
228 * We enforce the MAP_FIXED case.
229 */
230 if (flags & MAP_FIXED) {
231- if (aliasing && flags & MAP_SHARED &&
232- (addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1))
233+ if (aliasing && flags & MAP_SHARED && addr & (SHMLBA - 1))
234 return -EINVAL;
235 return addr;
236 }
237diff --git a/arch/avr32/include/asm/syscalls.h b/arch/avr32/include/asm/syscalls.h
238index 66a1972..483d666 100644
239--- a/arch/avr32/include/asm/syscalls.h
240+++ b/arch/avr32/include/asm/syscalls.h
241@@ -29,6 +29,10 @@ asmlinkage int sys_sigaltstack(const stack_t __user *, stack_t __user *,
242 struct pt_regs *);
243 asmlinkage int sys_rt_sigreturn(struct pt_regs *);
244
245+/* kernel/sys_avr32.c */
246+asmlinkage long sys_mmap2(unsigned long, unsigned long, unsigned long,
247+ unsigned long, unsigned long, off_t);
248+
249 /* mm/cache.c */
250 asmlinkage int sys_cacheflush(int, void __user *, size_t);
251
252diff --git a/arch/avr32/kernel/sys_avr32.c b/arch/avr32/kernel/sys_avr32.c
253index 459349b..5d2daea 100644
254--- a/arch/avr32/kernel/sys_avr32.c
255+++ b/arch/avr32/kernel/sys_avr32.c
256@@ -5,8 +5,39 @@
257 * it under the terms of the GNU General Public License version 2 as
258 * published by the Free Software Foundation.
259 */
260+#include <linux/errno.h>
261+#include <linux/fs.h>
262+#include <linux/file.h>
263+#include <linux/mm.h>
264 #include <linux/unistd.h>
265
266+#include <asm/mman.h>
267+#include <asm/uaccess.h>
268+#include <asm/syscalls.h>
269+
270+asmlinkage long sys_mmap2(unsigned long addr, unsigned long len,
271+ unsigned long prot, unsigned long flags,
272+ unsigned long fd, off_t offset)
273+{
274+ int error = -EBADF;
275+ struct file *file = NULL;
276+
277+ flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
278+ if (!(flags & MAP_ANONYMOUS)) {
279+ file = fget(fd);
280+ if (!file)
281+ return error;
282+ }
283+
284+ down_write(&current->mm->mmap_sem);
285+ error = do_mmap_pgoff(file, addr, len, prot, flags, offset);
286+ up_write(&current->mm->mmap_sem);
287+
288+ if (file)
289+ fput(file);
290+ return error;
291+}
292+
293 int kernel_execve(const char *file, char **argv, char **envp)
294 {
295 register long scno asm("r8") = __NR_execve;
296diff --git a/arch/avr32/kernel/syscall-stubs.S b/arch/avr32/kernel/syscall-stubs.S
297index 0447a3e..f7244cd 100644
298--- a/arch/avr32/kernel/syscall-stubs.S
299+++ b/arch/avr32/kernel/syscall-stubs.S
300@@ -61,7 +61,7 @@ __sys_execve:
301 __sys_mmap2:
302 pushm lr
303 st.w --sp, ARG6
304- call sys_mmap_pgoff
305+ call sys_mmap2
306 sub sp, -4
307 popm pc
308
309diff --git a/arch/blackfin/include/asm/page.h b/arch/blackfin/include/asm/page.h
310index 1d04e40..944a07c 100644
311--- a/arch/blackfin/include/asm/page.h
312+++ b/arch/blackfin/include/asm/page.h
313@@ -10,9 +10,4 @@
314 #include <asm-generic/page.h>
315 #define MAP_NR(addr) (((unsigned long)(addr)-PAGE_OFFSET) >> PAGE_SHIFT)
316
317-#define VM_DATA_DEFAULT_FLAGS \
318- (VM_READ | VM_WRITE | \
319- ((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0 ) | \
320- VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
321-
322 #endif
323diff --git a/arch/blackfin/kernel/sys_bfin.c b/arch/blackfin/kernel/sys_bfin.c
324index 2e7f8e1..afcef12 100644
325--- a/arch/blackfin/kernel/sys_bfin.c
326+++ b/arch/blackfin/kernel/sys_bfin.c
327@@ -22,6 +22,39 @@
328 #include <asm/cacheflush.h>
329 #include <asm/dma.h>
330
331+/* common code for old and new mmaps */
332+static inline long
333+do_mmap2(unsigned long addr, unsigned long len,
334+ unsigned long prot, unsigned long flags,
335+ unsigned long fd, unsigned long pgoff)
336+{
337+ int error = -EBADF;
338+ struct file *file = NULL;
339+
340+ flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
341+ if (!(flags & MAP_ANONYMOUS)) {
342+ file = fget(fd);
343+ if (!file)
344+ goto out;
345+ }
346+
347+ down_write(&current->mm->mmap_sem);
348+ error = do_mmap_pgoff(file, addr, len, prot, flags, pgoff);
349+ up_write(&current->mm->mmap_sem);
350+
351+ if (file)
352+ fput(file);
353+ out:
354+ return error;
355+}
356+
357+asmlinkage long sys_mmap2(unsigned long addr, unsigned long len,
358+ unsigned long prot, unsigned long flags,
359+ unsigned long fd, unsigned long pgoff)
360+{
361+ return do_mmap2(addr, len, prot, flags, fd, pgoff);
362+}
363+
364 asmlinkage void *sys_sram_alloc(size_t size, unsigned long flags)
365 {
366 return sram_alloc_with_lsl(size, flags);
367diff --git a/arch/blackfin/mach-common/entry.S b/arch/blackfin/mach-common/entry.S
368index 1d8f00a..94a0375 100644
369--- a/arch/blackfin/mach-common/entry.S
370+++ b/arch/blackfin/mach-common/entry.S
371@@ -1422,7 +1422,7 @@ ENTRY(_sys_call_table)
372 .long _sys_ni_syscall /* streams2 */
373 .long _sys_vfork /* 190 */
374 .long _sys_getrlimit
375- .long _sys_mmap_pgoff
376+ .long _sys_mmap2
377 .long _sys_truncate64
378 .long _sys_ftruncate64
379 .long _sys_stat64 /* 195 */
380diff --git a/arch/cris/kernel/sys_cris.c b/arch/cris/kernel/sys_cris.c
381index c2bbb1a..2ad962c 100644
382--- a/arch/cris/kernel/sys_cris.c
383+++ b/arch/cris/kernel/sys_cris.c
384@@ -26,6 +26,31 @@
385 #include <asm/uaccess.h>
386 #include <asm/segment.h>
387
388+/* common code for old and new mmaps */
389+static inline long
390+do_mmap2(unsigned long addr, unsigned long len, unsigned long prot,
391+ unsigned long flags, unsigned long fd, unsigned long pgoff)
392+{
393+ int error = -EBADF;
394+ struct file * file = NULL;
395+
396+ flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
397+ if (!(flags & MAP_ANONYMOUS)) {
398+ file = fget(fd);
399+ if (!file)
400+ goto out;
401+ }
402+
403+ down_write(&current->mm->mmap_sem);
404+ error = do_mmap_pgoff(file, addr, len, prot, flags, pgoff);
405+ up_write(&current->mm->mmap_sem);
406+
407+ if (file)
408+ fput(file);
409+out:
410+ return error;
411+}
412+
413 asmlinkage unsigned long old_mmap(unsigned long __user *args)
414 {
415 unsigned long buffer[6];
416@@ -38,7 +63,7 @@ asmlinkage unsigned long old_mmap(unsigned long __user *args)
417 if (buffer[5] & ~PAGE_MASK) /* verify that offset is on page boundary */
418 goto out;
419
420- err = sys_mmap_pgoff(buffer[0], buffer[1], buffer[2], buffer[3],
421+ err = do_mmap2(buffer[0], buffer[1], buffer[2], buffer[3],
422 buffer[4], buffer[5] >> PAGE_SHIFT);
423 out:
424 return err;
425@@ -48,8 +73,7 @@ asmlinkage long
426 sys_mmap2(unsigned long addr, unsigned long len, unsigned long prot,
427 unsigned long flags, unsigned long fd, unsigned long pgoff)
428 {
429- /* bug(?): 8Kb pages here */
430- return sys_mmap_pgoff(addr, len, prot, flags, fd, pgoff);
431+ return do_mmap2(addr, len, prot, flags, fd, pgoff);
432 }
433
434 /*
435diff --git a/arch/frv/include/asm/page.h b/arch/frv/include/asm/page.h
436index 8c97068..25c6a50 100644
437--- a/arch/frv/include/asm/page.h
438+++ b/arch/frv/include/asm/page.h
439@@ -63,10 +63,12 @@ extern unsigned long max_pfn;
440 #define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
441
442
443+#ifdef CONFIG_MMU
444 #define VM_DATA_DEFAULT_FLAGS \
445 (VM_READ | VM_WRITE | \
446 ((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0 ) | \
447 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
448+#endif
449
450 #endif /* __ASSEMBLY__ */
451
452diff --git a/arch/frv/kernel/sys_frv.c b/arch/frv/kernel/sys_frv.c
453index 1d3d4c9..2b6b528 100644
454--- a/arch/frv/kernel/sys_frv.c
455+++ b/arch/frv/kernel/sys_frv.c
456@@ -31,6 +31,9 @@ asmlinkage long sys_mmap2(unsigned long addr, unsigned long len,
457 unsigned long prot, unsigned long flags,
458 unsigned long fd, unsigned long pgoff)
459 {
460+ int error = -EBADF;
461+ struct file * file = NULL;
462+
463 /* As with sparc32, make sure the shift for mmap2 is constant
464 (12), no matter what PAGE_SIZE we have.... */
465
466@@ -38,10 +41,69 @@ asmlinkage long sys_mmap2(unsigned long addr, unsigned long len,
467 trying to map something we can't */
468 if (pgoff & ((1 << (PAGE_SHIFT - 12)) - 1))
469 return -EINVAL;
470+ pgoff >>= PAGE_SHIFT - 12;
471+
472+ flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
473+ if (!(flags & MAP_ANONYMOUS)) {
474+ file = fget(fd);
475+ if (!file)
476+ goto out;
477+ }
478+
479+ down_write(&current->mm->mmap_sem);
480+ error = do_mmap_pgoff(file, addr, len, prot, flags, pgoff);
481+ up_write(&current->mm->mmap_sem);
482+
483+ if (file)
484+ fput(file);
485+out:
486+ return error;
487+}
488+
489+#if 0 /* DAVIDM - do we want this */
490+struct mmap_arg_struct64 {
491+ __u32 addr;
492+ __u32 len;
493+ __u32 prot;
494+ __u32 flags;
495+ __u64 offset; /* 64 bits */
496+ __u32 fd;
497+};
498+
499+asmlinkage long sys_mmap64(struct mmap_arg_struct64 *arg)
500+{
501+ int error = -EFAULT;
502+ struct file * file = NULL;
503+ struct mmap_arg_struct64 a;
504+ unsigned long pgoff;
505+
506+ if (copy_from_user(&a, arg, sizeof(a)))
507+ return -EFAULT;
508+
509+ if ((long)a.offset & ~PAGE_MASK)
510+ return -EINVAL;
511+
512+ pgoff = a.offset >> PAGE_SHIFT;
513+ if ((a.offset >> PAGE_SHIFT) != pgoff)
514+ return -EINVAL;
515+
516+ if (!(a.flags & MAP_ANONYMOUS)) {
517+ error = -EBADF;
518+ file = fget(a.fd);
519+ if (!file)
520+ goto out;
521+ }
522+ a.flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
523
524- return sys_mmap_pgoff(addr, len, prot, flags, fd,
525- pgoff >> (PAGE_SHIFT - 12));
526+ down_write(&current->mm->mmap_sem);
527+ error = do_mmap_pgoff(file, a.addr, a.len, a.prot, a.flags, pgoff);
528+ up_write(&current->mm->mmap_sem);
529+ if (file)
530+ fput(file);
531+out:
532+ return error;
533 }
534+#endif
535
536 /*
537 * sys_ipc() is the de-multiplexer for the SysV IPC calls..
538diff --git a/arch/h8300/kernel/sys_h8300.c b/arch/h8300/kernel/sys_h8300.c
539index b5969db..8cb5d73 100644
540--- a/arch/h8300/kernel/sys_h8300.c
541+++ b/arch/h8300/kernel/sys_h8300.c
542@@ -26,6 +26,39 @@
543 #include <asm/traps.h>
544 #include <asm/unistd.h>
545
546+/* common code for old and new mmaps */
547+static inline long do_mmap2(
548+ unsigned long addr, unsigned long len,
549+ unsigned long prot, unsigned long flags,
550+ unsigned long fd, unsigned long pgoff)
551+{
552+ int error = -EBADF;
553+ struct file * file = NULL;
554+
555+ flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
556+ if (!(flags & MAP_ANONYMOUS)) {
557+ file = fget(fd);
558+ if (!file)
559+ goto out;
560+ }
561+
562+ down_write(&current->mm->mmap_sem);
563+ error = do_mmap_pgoff(file, addr, len, prot, flags, pgoff);
564+ up_write(&current->mm->mmap_sem);
565+
566+ if (file)
567+ fput(file);
568+out:
569+ return error;
570+}
571+
572+asmlinkage long sys_mmap2(unsigned long addr, unsigned long len,
573+ unsigned long prot, unsigned long flags,
574+ unsigned long fd, unsigned long pgoff)
575+{
576+ return do_mmap2(addr, len, prot, flags, fd, pgoff);
577+}
578+
579 /*
580 * Perform the select(nd, in, out, ex, tv) and mmap() system
581 * calls. Linux/m68k cloned Linux/i386, which didn't use to be able to
582@@ -54,11 +87,57 @@ asmlinkage int old_mmap(struct mmap_arg_struct *arg)
583 if (a.offset & ~PAGE_MASK)
584 goto out;
585
586- error = sys_mmap_pgoff(a.addr, a.len, a.prot, a.flags, a.fd,
587- a.offset >> PAGE_SHIFT);
588+ a.flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
589+
590+ error = do_mmap2(a.addr, a.len, a.prot, a.flags, a.fd, a.offset >> PAGE_SHIFT);
591+out:
592+ return error;
593+}
594+
595+#if 0 /* DAVIDM - do we want this */
596+struct mmap_arg_struct64 {
597+ __u32 addr;
598+ __u32 len;
599+ __u32 prot;
600+ __u32 flags;
601+ __u64 offset; /* 64 bits */
602+ __u32 fd;
603+};
604+
605+asmlinkage long sys_mmap64(struct mmap_arg_struct64 *arg)
606+{
607+ int error = -EFAULT;
608+ struct file * file = NULL;
609+ struct mmap_arg_struct64 a;
610+ unsigned long pgoff;
611+
612+ if (copy_from_user(&a, arg, sizeof(a)))
613+ return -EFAULT;
614+
615+ if ((long)a.offset & ~PAGE_MASK)
616+ return -EINVAL;
617+
618+ pgoff = a.offset >> PAGE_SHIFT;
619+ if ((a.offset >> PAGE_SHIFT) != pgoff)
620+ return -EINVAL;
621+
622+ if (!(a.flags & MAP_ANONYMOUS)) {
623+ error = -EBADF;
624+ file = fget(a.fd);
625+ if (!file)
626+ goto out;
627+ }
628+ a.flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
629+
630+ down_write(&current->mm->mmap_sem);
631+ error = do_mmap_pgoff(file, a.addr, a.len, a.prot, a.flags, pgoff);
632+ up_write(&current->mm->mmap_sem);
633+ if (file)
634+ fput(file);
635 out:
636 return error;
637 }
638+#endif
639
640 struct sel_arg_struct {
641 unsigned long n;
642diff --git a/arch/h8300/kernel/syscalls.S b/arch/h8300/kernel/syscalls.S
643index 2d69881..4eb67fa 100644
644--- a/arch/h8300/kernel/syscalls.S
645+++ b/arch/h8300/kernel/syscalls.S
646@@ -206,7 +206,7 @@ SYMBOL_NAME_LABEL(sys_call_table)
647 .long SYMBOL_NAME(sys_ni_syscall) /* streams2 */
648 .long SYMBOL_NAME(sys_vfork) /* 190 */
649 .long SYMBOL_NAME(sys_getrlimit)
650- .long SYMBOL_NAME(sys_mmap_pgoff)
651+ .long SYMBOL_NAME(sys_mmap2)
652 .long SYMBOL_NAME(sys_truncate64)
653 .long SYMBOL_NAME(sys_ftruncate64)
654 .long SYMBOL_NAME(sys_stat64) /* 195 */
655diff --git a/arch/ia64/ia32/sys_ia32.c b/arch/ia64/ia32/sys_ia32.c
656index e031ee8..625ed8f 100644
657--- a/arch/ia64/ia32/sys_ia32.c
658+++ b/arch/ia64/ia32/sys_ia32.c
659@@ -858,9 +858,6 @@ ia32_do_mmap (struct file *file, unsigned long addr, unsigned long len, int prot
660
661 prot = get_prot32(prot);
662
663- if (flags & MAP_HUGETLB)
664- return -ENOMEM;
665-
666 #if PAGE_SHIFT > IA32_PAGE_SHIFT
667 mutex_lock(&ia32_mmap_mutex);
668 {
669diff --git a/arch/ia64/include/asm/io.h b/arch/ia64/include/asm/io.h
670index cc8335e..0d9d16e 100644
671--- a/arch/ia64/include/asm/io.h
672+++ b/arch/ia64/include/asm/io.h
673@@ -424,8 +424,6 @@ __writeq (unsigned long val, volatile void __iomem *addr)
674 extern void __iomem * ioremap(unsigned long offset, unsigned long size);
675 extern void __iomem * ioremap_nocache (unsigned long offset, unsigned long size);
676 extern void iounmap (volatile void __iomem *addr);
677-extern void __iomem * early_ioremap (unsigned long phys_addr, unsigned long size);
678-extern void early_iounmap (volatile void __iomem *addr, unsigned long size);
679
680 /*
681 * String version of IO memory access ops:
682diff --git a/arch/ia64/kernel/sys_ia64.c b/arch/ia64/kernel/sys_ia64.c
683index 609d500..92ed83f 100644
684--- a/arch/ia64/kernel/sys_ia64.c
685+++ b/arch/ia64/kernel/sys_ia64.c
686@@ -100,7 +100,51 @@ sys_getpagesize (void)
687 asmlinkage unsigned long
688 ia64_brk (unsigned long brk)
689 {
690- unsigned long retval = sys_brk(brk);
691+ unsigned long rlim, retval, newbrk, oldbrk;
692+ struct mm_struct *mm = current->mm;
693+
694+ /*
695+ * Most of this replicates the code in sys_brk() except for an additional safety
696+ * check and the clearing of r8. However, we can't call sys_brk() because we need
697+ * to acquire the mmap_sem before we can do the test...
698+ */
699+ down_write(&mm->mmap_sem);
700+
701+ if (brk < mm->end_code)
702+ goto out;
703+ newbrk = PAGE_ALIGN(brk);
704+ oldbrk = PAGE_ALIGN(mm->brk);
705+ if (oldbrk == newbrk)
706+ goto set_brk;
707+
708+ /* Always allow shrinking brk. */
709+ if (brk <= mm->brk) {
710+ if (!do_munmap(mm, newbrk, oldbrk-newbrk))
711+ goto set_brk;
712+ goto out;
713+ }
714+
715+ /* Check against unimplemented/unmapped addresses: */
716+ if ((newbrk - oldbrk) > RGN_MAP_LIMIT || REGION_OFFSET(newbrk) > RGN_MAP_LIMIT)
717+ goto out;
718+
719+ /* Check against rlimit.. */
720+ rlim = current->signal->rlim[RLIMIT_DATA].rlim_cur;
721+ if (rlim < RLIM_INFINITY && brk - mm->start_data > rlim)
722+ goto out;
723+
724+ /* Check against existing mmap mappings. */
725+ if (find_vma_intersection(mm, oldbrk, newbrk+PAGE_SIZE))
726+ goto out;
727+
728+ /* Ok, looks good - let it rip. */
729+ if (do_brk(oldbrk, newbrk-oldbrk) != oldbrk)
730+ goto out;
731+set_brk:
732+ mm->brk = brk;
733+out:
734+ retval = mm->brk;
735+ up_write(&mm->mmap_sem);
736 force_successful_syscall_return();
737 return retval;
738 }
739@@ -141,6 +185,39 @@ int ia64_mmap_check(unsigned long addr, unsigned long len,
740 return 0;
741 }
742
743+static inline unsigned long
744+do_mmap2 (unsigned long addr, unsigned long len, int prot, int flags, int fd, unsigned long pgoff)
745+{
746+ struct file *file = NULL;
747+
748+ flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
749+ if (!(flags & MAP_ANONYMOUS)) {
750+ file = fget(fd);
751+ if (!file)
752+ return -EBADF;
753+
754+ if (!file->f_op || !file->f_op->mmap) {
755+ addr = -ENODEV;
756+ goto out;
757+ }
758+ }
759+
760+ /* Careful about overflows.. */
761+ len = PAGE_ALIGN(len);
762+ if (!len || len > TASK_SIZE) {
763+ addr = -EINVAL;
764+ goto out;
765+ }
766+
767+ down_write(&current->mm->mmap_sem);
768+ addr = do_mmap_pgoff(file, addr, len, prot, flags, pgoff);
769+ up_write(&current->mm->mmap_sem);
770+
771+out: if (file)
772+ fput(file);
773+ return addr;
774+}
775+
776 /*
777 * mmap2() is like mmap() except that the offset is expressed in units
778 * of PAGE_SIZE (instead of bytes). This allows to mmap2() (pieces
779@@ -149,7 +226,7 @@ int ia64_mmap_check(unsigned long addr, unsigned long len,
780 asmlinkage unsigned long
781 sys_mmap2 (unsigned long addr, unsigned long len, int prot, int flags, int fd, long pgoff)
782 {
783- addr = sys_mmap_pgoff(addr, len, prot, flags, fd, pgoff);
784+ addr = do_mmap2(addr, len, prot, flags, fd, pgoff);
785 if (!IS_ERR((void *) addr))
786 force_successful_syscall_return();
787 return addr;
788@@ -161,7 +238,7 @@ sys_mmap (unsigned long addr, unsigned long len, int prot, int flags, int fd, lo
789 if (offset_in_page(off) != 0)
790 return -EINVAL;
791
792- addr = sys_mmap_pgoff(addr, len, prot, flags, fd, off >> PAGE_SHIFT);
793+ addr = do_mmap2(addr, len, prot, flags, fd, off >> PAGE_SHIFT);
794 if (!IS_ERR((void *) addr))
795 force_successful_syscall_return();
796 return addr;
797diff --git a/arch/ia64/mm/ioremap.c b/arch/ia64/mm/ioremap.c
798index 3dccdd8..2a14062 100644
799--- a/arch/ia64/mm/ioremap.c
800+++ b/arch/ia64/mm/ioremap.c
801@@ -22,12 +22,6 @@ __ioremap (unsigned long phys_addr)
802 }
803
804 void __iomem *
805-early_ioremap (unsigned long phys_addr, unsigned long size)
806-{
807- return __ioremap(phys_addr);
808-}
809-
810-void __iomem *
811 ioremap (unsigned long phys_addr, unsigned long size)
812 {
813 void __iomem *addr;
814@@ -108,11 +102,6 @@ ioremap_nocache (unsigned long phys_addr, unsigned long size)
815 EXPORT_SYMBOL(ioremap_nocache);
816
817 void
818-early_iounmap (volatile void __iomem *addr, unsigned long size)
819-{
820-}
821-
822-void
823 iounmap (volatile void __iomem *addr)
824 {
825 if (REGION_NUMBER(addr) == RGN_GATE)
826diff --git a/arch/m32r/kernel/sys_m32r.c b/arch/m32r/kernel/sys_m32r.c
827index d3c865c..305ac85 100644
828--- a/arch/m32r/kernel/sys_m32r.c
829+++ b/arch/m32r/kernel/sys_m32r.c
830@@ -76,6 +76,30 @@ asmlinkage int sys_tas(int __user *addr)
831 return oldval;
832 }
833
834+asmlinkage long sys_mmap2(unsigned long addr, unsigned long len,
835+ unsigned long prot, unsigned long flags,
836+ unsigned long fd, unsigned long pgoff)
837+{
838+ int error = -EBADF;
839+ struct file *file = NULL;
840+
841+ flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
842+ if (!(flags & MAP_ANONYMOUS)) {
843+ file = fget(fd);
844+ if (!file)
845+ goto out;
846+ }
847+
848+ down_write(&current->mm->mmap_sem);
849+ error = do_mmap_pgoff(file, addr, len, prot, flags, pgoff);
850+ up_write(&current->mm->mmap_sem);
851+
852+ if (file)
853+ fput(file);
854+out:
855+ return error;
856+}
857+
858 /*
859 * sys_ipc() is the de-multiplexer for the SysV IPC calls..
860 *
861diff --git a/arch/m32r/kernel/syscall_table.S b/arch/m32r/kernel/syscall_table.S
862index 60536e2..aa3bf4c 100644
863--- a/arch/m32r/kernel/syscall_table.S
864+++ b/arch/m32r/kernel/syscall_table.S
865@@ -191,7 +191,7 @@ ENTRY(sys_call_table)
866 .long sys_ni_syscall /* streams2 */
867 .long sys_vfork /* 190 */
868 .long sys_getrlimit
869- .long sys_mmap_pgoff
870+ .long sys_mmap2
871 .long sys_truncate64
872 .long sys_ftruncate64
873 .long sys_stat64 /* 195 */
874diff --git a/arch/m68k/kernel/sys_m68k.c b/arch/m68k/kernel/sys_m68k.c
875index 218f441..7deb402 100644
876--- a/arch/m68k/kernel/sys_m68k.c
877+++ b/arch/m68k/kernel/sys_m68k.c
878@@ -29,16 +29,37 @@
879 #include <asm/page.h>
880 #include <asm/unistd.h>
881
882+/* common code for old and new mmaps */
883+static inline long do_mmap2(
884+ unsigned long addr, unsigned long len,
885+ unsigned long prot, unsigned long flags,
886+ unsigned long fd, unsigned long pgoff)
887+{
888+ int error = -EBADF;
889+ struct file * file = NULL;
890+
891+ flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
892+ if (!(flags & MAP_ANONYMOUS)) {
893+ file = fget(fd);
894+ if (!file)
895+ goto out;
896+ }
897+
898+ down_write(&current->mm->mmap_sem);
899+ error = do_mmap_pgoff(file, addr, len, prot, flags, pgoff);
900+ up_write(&current->mm->mmap_sem);
901+
902+ if (file)
903+ fput(file);
904+out:
905+ return error;
906+}
907+
908 asmlinkage long sys_mmap2(unsigned long addr, unsigned long len,
909 unsigned long prot, unsigned long flags,
910 unsigned long fd, unsigned long pgoff)
911 {
912- /*
913- * This is wrong for sun3 - there PAGE_SIZE is 8Kb,
914- * so we need to shift the argument down by 1; m68k mmap64(3)
915- * (in libc) expects the last argument of mmap2 in 4Kb units.
916- */
917- return sys_mmap_pgoff(addr, len, prot, flags, fd, pgoff);
918+ return do_mmap2(addr, len, prot, flags, fd, pgoff);
919 }
920
921 /*
922@@ -69,11 +90,57 @@ asmlinkage int old_mmap(struct mmap_arg_struct __user *arg)
923 if (a.offset & ~PAGE_MASK)
924 goto out;
925
926- error = sys_mmap_pgoff(a.addr, a.len, a.prot, a.flags, a.fd,
927- a.offset >> PAGE_SHIFT);
928+ a.flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
929+
930+ error = do_mmap2(a.addr, a.len, a.prot, a.flags, a.fd, a.offset >> PAGE_SHIFT);
931+out:
932+ return error;
933+}
934+
935+#if 0
936+struct mmap_arg_struct64 {
937+ __u32 addr;
938+ __u32 len;
939+ __u32 prot;
940+ __u32 flags;
941+ __u64 offset; /* 64 bits */
942+ __u32 fd;
943+};
944+
945+asmlinkage long sys_mmap64(struct mmap_arg_struct64 *arg)
946+{
947+ int error = -EFAULT;
948+ struct file * file = NULL;
949+ struct mmap_arg_struct64 a;
950+ unsigned long pgoff;
951+
952+ if (copy_from_user(&a, arg, sizeof(a)))
953+ return -EFAULT;
954+
955+ if ((long)a.offset & ~PAGE_MASK)
956+ return -EINVAL;
957+
958+ pgoff = a.offset >> PAGE_SHIFT;
959+ if ((a.offset >> PAGE_SHIFT) != pgoff)
960+ return -EINVAL;
961+
962+ if (!(a.flags & MAP_ANONYMOUS)) {
963+ error = -EBADF;
964+ file = fget(a.fd);
965+ if (!file)
966+ goto out;
967+ }
968+ a.flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
969+
970+ down_write(&current->mm->mmap_sem);
971+ error = do_mmap_pgoff(file, a.addr, a.len, a.prot, a.flags, pgoff);
972+ up_write(&current->mm->mmap_sem);
973+ if (file)
974+ fput(file);
975 out:
976 return error;
977 }
978+#endif
979
980 struct sel_arg_struct {
981 unsigned long n;
982diff --git a/arch/m68knommu/kernel/sys_m68k.c b/arch/m68knommu/kernel/sys_m68k.c
983index b67cbc7..efdd090 100644
984--- a/arch/m68knommu/kernel/sys_m68k.c
985+++ b/arch/m68knommu/kernel/sys_m68k.c
986@@ -27,6 +27,39 @@
987 #include <asm/cacheflush.h>
988 #include <asm/unistd.h>
989
990+/* common code for old and new mmaps */
991+static inline long do_mmap2(
992+ unsigned long addr, unsigned long len,
993+ unsigned long prot, unsigned long flags,
994+ unsigned long fd, unsigned long pgoff)
995+{
996+ int error = -EBADF;
997+ struct file * file = NULL;
998+
999+ flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
1000+ if (!(flags & MAP_ANONYMOUS)) {
1001+ file = fget(fd);
1002+ if (!file)
1003+ goto out;
1004+ }
1005+
1006+ down_write(&current->mm->mmap_sem);
1007+ error = do_mmap_pgoff(file, addr, len, prot, flags, pgoff);
1008+ up_write(&current->mm->mmap_sem);
1009+
1010+ if (file)
1011+ fput(file);
1012+out:
1013+ return error;
1014+}
1015+
1016+asmlinkage long sys_mmap2(unsigned long addr, unsigned long len,
1017+ unsigned long prot, unsigned long flags,
1018+ unsigned long fd, unsigned long pgoff)
1019+{
1020+ return do_mmap2(addr, len, prot, flags, fd, pgoff);
1021+}
1022+
1023 /*
1024 * Perform the select(nd, in, out, ex, tv) and mmap() system
1025 * calls. Linux/m68k cloned Linux/i386, which didn't use to be able to
1026@@ -55,8 +88,9 @@ asmlinkage int old_mmap(struct mmap_arg_struct *arg)
1027 if (a.offset & ~PAGE_MASK)
1028 goto out;
1029
1030- error = sys_mmap_pgoff(a.addr, a.len, a.prot, a.flags, a.fd,
1031- a.offset >> PAGE_SHIFT);
1032+ a.flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
1033+
1034+ error = do_mmap2(a.addr, a.len, a.prot, a.flags, a.fd, a.offset >> PAGE_SHIFT);
1035 out:
1036 return error;
1037 }
1038diff --git a/arch/m68knommu/kernel/syscalltable.S b/arch/m68knommu/kernel/syscalltable.S
1039index 486837e..23535cc 100644
1040--- a/arch/m68knommu/kernel/syscalltable.S
1041+++ b/arch/m68knommu/kernel/syscalltable.S
1042@@ -210,7 +210,7 @@ ENTRY(sys_call_table)
1043 .long sys_ni_syscall /* streams2 */
1044 .long sys_vfork /* 190 */
1045 .long sys_getrlimit
1046- .long sys_mmap_pgoff
1047+ .long sys_mmap2
1048 .long sys_truncate64
1049 .long sys_ftruncate64
1050 .long sys_stat64 /* 195 */
1051diff --git a/arch/microblaze/kernel/sys_microblaze.c b/arch/microblaze/kernel/sys_microblaze.c
1052index 9f3c205..07cabed 100644
1053--- a/arch/microblaze/kernel/sys_microblaze.c
1054+++ b/arch/microblaze/kernel/sys_microblaze.c
1055@@ -62,14 +62,46 @@ out:
1056 return error;
1057 }
1058
1059+asmlinkage long
1060+sys_mmap2(unsigned long addr, unsigned long len,
1061+ unsigned long prot, unsigned long flags,
1062+ unsigned long fd, unsigned long pgoff)
1063+{
1064+ struct file *file = NULL;
1065+ int ret = -EBADF;
1066+
1067+ flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
1068+ if (!(flags & MAP_ANONYMOUS)) {
1069+ file = fget(fd);
1070+ if (!file) {
1071+ printk(KERN_INFO "no fd in mmap\r\n");
1072+ goto out;
1073+ }
1074+ }
1075+
1076+ down_write(&current->mm->mmap_sem);
1077+ ret = do_mmap_pgoff(file, addr, len, prot, flags, pgoff);
1078+ up_write(&current->mm->mmap_sem);
1079+ if (file)
1080+ fput(file);
1081+out:
1082+ return ret;
1083+}
1084+
1085 asmlinkage long sys_mmap(unsigned long addr, unsigned long len,
1086 unsigned long prot, unsigned long flags,
1087 unsigned long fd, off_t pgoff)
1088 {
1089- if (pgoff & ~PAGE_MASK)
1090- return -EINVAL;
1091+ int err = -EINVAL;
1092+
1093+ if (pgoff & ~PAGE_MASK) {
1094+ printk(KERN_INFO "no pagemask in mmap\r\n");
1095+ goto out;
1096+ }
1097
1098- return sys_mmap_pgoff(addr, len, prot, flags, fd, pgoff >> PAGE_SHIFT);
1099+ err = sys_mmap2(addr, len, prot, flags, fd, pgoff >> PAGE_SHIFT);
1100+out:
1101+ return err;
1102 }
1103
1104 /*
1105diff --git a/arch/microblaze/kernel/syscall_table.S b/arch/microblaze/kernel/syscall_table.S
1106index eb50ce5..ecec191 100644
1107--- a/arch/microblaze/kernel/syscall_table.S
1108+++ b/arch/microblaze/kernel/syscall_table.S
1109@@ -196,7 +196,7 @@ ENTRY(sys_call_table)
1110 .long sys_ni_syscall /* reserved for streams2 */
1111 .long sys_vfork /* 190 */
1112 .long sys_getrlimit
1113- .long sys_mmap_pgoff /* mmap2 */
1114+ .long sys_mmap2 /* mmap2 */
1115 .long sys_truncate64
1116 .long sys_ftruncate64
1117 .long sys_stat64 /* 195 */
1118diff --git a/arch/mips/kernel/linux32.c b/arch/mips/kernel/linux32.c
1119index ea4a746..b77fefa 100644
1120--- a/arch/mips/kernel/linux32.c
1121+++ b/arch/mips/kernel/linux32.c
1122@@ -67,13 +67,28 @@ SYSCALL_DEFINE6(32_mmap2, unsigned long, addr, unsigned long, len,
1123 unsigned long, prot, unsigned long, flags, unsigned long, fd,
1124 unsigned long, pgoff)
1125 {
1126+ struct file * file = NULL;
1127 unsigned long error;
1128
1129 error = -EINVAL;
1130 if (pgoff & (~PAGE_MASK >> 12))
1131 goto out;
1132- error = sys_mmap_pgoff(addr, len, prot, flags, fd,
1133- pgoff >> (PAGE_SHIFT-12));
1134+ pgoff >>= PAGE_SHIFT-12;
1135+
1136+ if (!(flags & MAP_ANONYMOUS)) {
1137+ error = -EBADF;
1138+ file = fget(fd);
1139+ if (!file)
1140+ goto out;
1141+ }
1142+ flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
1143+
1144+ down_write(&current->mm->mmap_sem);
1145+ error = do_mmap_pgoff(file, addr, len, prot, flags, pgoff);
1146+ up_write(&current->mm->mmap_sem);
1147+ if (file)
1148+ fput(file);
1149+
1150 out:
1151 return error;
1152 }
1153diff --git a/arch/mips/kernel/syscall.c b/arch/mips/kernel/syscall.c
1154index 3f7f466..fe0d798 100644
1155--- a/arch/mips/kernel/syscall.c
1156+++ b/arch/mips/kernel/syscall.c
1157@@ -93,8 +93,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
1158 * We do not accept a shared mapping if it would violate
1159 * cache aliasing constraints.
1160 */
1161- if ((flags & MAP_SHARED) &&
1162- ((addr - (pgoff << PAGE_SHIFT)) & shm_align_mask))
1163+ if ((flags & MAP_SHARED) && (addr & shm_align_mask))
1164 return -EINVAL;
1165 return addr;
1166 }
1167@@ -130,6 +129,31 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
1168 }
1169 }
1170
1171+/* common code for old and new mmaps */
1172+static inline unsigned long
1173+do_mmap2(unsigned long addr, unsigned long len, unsigned long prot,
1174+ unsigned long flags, unsigned long fd, unsigned long pgoff)
1175+{
1176+ unsigned long error = -EBADF;
1177+ struct file * file = NULL;
1178+
1179+ flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
1180+ if (!(flags & MAP_ANONYMOUS)) {
1181+ file = fget(fd);
1182+ if (!file)
1183+ goto out;
1184+ }
1185+
1186+ down_write(&current->mm->mmap_sem);
1187+ error = do_mmap_pgoff(file, addr, len, prot, flags, pgoff);
1188+ up_write(&current->mm->mmap_sem);
1189+
1190+ if (file)
1191+ fput(file);
1192+out:
1193+ return error;
1194+}
1195+
1196 SYSCALL_DEFINE6(mips_mmap, unsigned long, addr, unsigned long, len,
1197 unsigned long, prot, unsigned long, flags, unsigned long,
1198 fd, off_t, offset)
1199@@ -140,7 +164,7 @@ SYSCALL_DEFINE6(mips_mmap, unsigned long, addr, unsigned long, len,
1200 if (offset & ~PAGE_MASK)
1201 goto out;
1202
1203- result = sys_mmap_pgoff(addr, len, prot, flags, fd, offset >> PAGE_SHIFT);
1204+ result = do_mmap2(addr, len, prot, flags, fd, offset >> PAGE_SHIFT);
1205
1206 out:
1207 return result;
1208@@ -153,7 +177,7 @@ SYSCALL_DEFINE6(mips_mmap2, unsigned long, addr, unsigned long, len,
1209 if (pgoff & (~PAGE_MASK >> 12))
1210 return -EINVAL;
1211
1212- return sys_mmap_pgoff(addr, len, prot, flags, fd, pgoff >> (PAGE_SHIFT-12));
1213+ return do_mmap2(addr, len, prot, flags, fd, pgoff >> (PAGE_SHIFT-12));
1214 }
1215
1216 save_static_function(sys_fork);
1217diff --git a/arch/mn10300/include/asm/mman.h b/arch/mn10300/include/asm/mman.h
1218index db5c53d..8eebf89 100644
1219--- a/arch/mn10300/include/asm/mman.h
1220+++ b/arch/mn10300/include/asm/mman.h
1221@@ -1,6 +1 @@
1222 #include <asm-generic/mman.h>
1223-
1224-#define MIN_MAP_ADDR PAGE_SIZE /* minimum fixed mmap address */
1225-
1226-#define arch_mmap_check(addr, len, flags) \
1227- (((flags) & MAP_FIXED && (addr) < MIN_MAP_ADDR) ? -EINVAL : 0)
1228diff --git a/arch/mn10300/kernel/entry.S b/arch/mn10300/kernel/entry.S
1229index c9ee6c0..a94e7ea 100644
1230--- a/arch/mn10300/kernel/entry.S
1231+++ b/arch/mn10300/kernel/entry.S
1232@@ -578,7 +578,7 @@ ENTRY(sys_call_table)
1233 .long sys_ni_syscall /* reserved for streams2 */
1234 .long sys_vfork /* 190 */
1235 .long sys_getrlimit
1236- .long sys_mmap_pgoff
1237+ .long sys_mmap2
1238 .long sys_truncate64
1239 .long sys_ftruncate64
1240 .long sys_stat64 /* 195 */
1241diff --git a/arch/mn10300/kernel/sys_mn10300.c b/arch/mn10300/kernel/sys_mn10300.c
1242index 17cc6ce..8ca5af0 100644
1243--- a/arch/mn10300/kernel/sys_mn10300.c
1244+++ b/arch/mn10300/kernel/sys_mn10300.c
1245@@ -23,13 +23,47 @@
1246
1247 #include <asm/uaccess.h>
1248
1249+#define MIN_MAP_ADDR PAGE_SIZE /* minimum fixed mmap address */
1250+
1251+/*
1252+ * memory mapping syscall
1253+ */
1254+asmlinkage long sys_mmap2(unsigned long addr, unsigned long len,
1255+ unsigned long prot, unsigned long flags,
1256+ unsigned long fd, unsigned long pgoff)
1257+{
1258+ struct file *file = NULL;
1259+ long error = -EINVAL;
1260+
1261+ flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
1262+
1263+ if (flags & MAP_FIXED && addr < MIN_MAP_ADDR)
1264+ goto out;
1265+
1266+ error = -EBADF;
1267+ if (!(flags & MAP_ANONYMOUS)) {
1268+ file = fget(fd);
1269+ if (!file)
1270+ goto out;
1271+ }
1272+
1273+ down_write(&current->mm->mmap_sem);
1274+ error = do_mmap_pgoff(file, addr, len, prot, flags, pgoff);
1275+ up_write(&current->mm->mmap_sem);
1276+
1277+ if (file)
1278+ fput(file);
1279+out:
1280+ return error;
1281+}
1282+
1283 asmlinkage long old_mmap(unsigned long addr, unsigned long len,
1284 unsigned long prot, unsigned long flags,
1285 unsigned long fd, unsigned long offset)
1286 {
1287 if (offset & ~PAGE_MASK)
1288 return -EINVAL;
1289- return sys_mmap_pgoff(addr, len, prot, flags, fd, offset >> PAGE_SHIFT);
1290+ return sys_mmap2(addr, len, prot, flags, fd, offset >> PAGE_SHIFT);
1291 }
1292
1293 struct sel_arg_struct {
1294diff --git a/arch/parisc/kernel/sys_parisc.c b/arch/parisc/kernel/sys_parisc.c
1295index 9147391..71b3195 100644
1296--- a/arch/parisc/kernel/sys_parisc.c
1297+++ b/arch/parisc/kernel/sys_parisc.c
1298@@ -110,14 +110,37 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
1299 return addr;
1300 }
1301
1302+static unsigned long do_mmap2(unsigned long addr, unsigned long len,
1303+ unsigned long prot, unsigned long flags, unsigned long fd,
1304+ unsigned long pgoff)
1305+{
1306+ struct file * file = NULL;
1307+ unsigned long error = -EBADF;
1308+ if (!(flags & MAP_ANONYMOUS)) {
1309+ file = fget(fd);
1310+ if (!file)
1311+ goto out;
1312+ }
1313+
1314+ flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
1315+
1316+ down_write(&current->mm->mmap_sem);
1317+ error = do_mmap_pgoff(file, addr, len, prot, flags, pgoff);
1318+ up_write(&current->mm->mmap_sem);
1319+
1320+ if (file != NULL)
1321+ fput(file);
1322+out:
1323+ return error;
1324+}
1325+
1326 asmlinkage unsigned long sys_mmap2(unsigned long addr, unsigned long len,
1327 unsigned long prot, unsigned long flags, unsigned long fd,
1328 unsigned long pgoff)
1329 {
1330 /* Make sure the shift for mmap2 is constant (12), no matter what PAGE_SIZE
1331 we have. */
1332- return sys_mmap_pgoff(addr, len, prot, flags, fd,
1333- pgoff >> (PAGE_SHIFT - 12));
1334+ return do_mmap2(addr, len, prot, flags, fd, pgoff >> (PAGE_SHIFT - 12));
1335 }
1336
1337 asmlinkage unsigned long sys_mmap(unsigned long addr, unsigned long len,
1338@@ -125,8 +148,7 @@ asmlinkage unsigned long sys_mmap(unsigned long addr, unsigned long len,
1339 unsigned long offset)
1340 {
1341 if (!(offset & ~PAGE_MASK)) {
1342- return sys_mmap_pgoff(addr, len, prot, flags, fd,
1343- offset >> PAGE_SHIFT);
1344+ return do_mmap2(addr, len, prot, flags, fd, offset >> PAGE_SHIFT);
1345 } else {
1346 return -EINVAL;
1347 }
1348diff --git a/arch/powerpc/include/asm/elf.h b/arch/powerpc/include/asm/elf.h
1349index 5698502..014a624 100644
1350--- a/arch/powerpc/include/asm/elf.h
1351+++ b/arch/powerpc/include/asm/elf.h
1352@@ -236,10 +236,14 @@ typedef elf_vrregset_t elf_fpxregset_t;
1353 #ifdef __powerpc64__
1354 # define SET_PERSONALITY(ex) \
1355 do { \
1356+ unsigned long new_flags = 0; \
1357 if ((ex).e_ident[EI_CLASS] == ELFCLASS32) \
1358- set_thread_flag(TIF_32BIT); \
1359+ new_flags = _TIF_32BIT; \
1360+ if ((current_thread_info()->flags & _TIF_32BIT) \
1361+ != new_flags) \
1362+ set_thread_flag(TIF_ABI_PENDING); \
1363 else \
1364- clear_thread_flag(TIF_32BIT); \
1365+ clear_thread_flag(TIF_ABI_PENDING); \
1366 if (personality(current->personality) != PER_LINUX32) \
1367 set_personality(PER_LINUX | \
1368 (current->personality & (~PER_MASK))); \
1369diff --git a/arch/powerpc/include/asm/module.h b/arch/powerpc/include/asm/module.h
1370index 0192a4e..0845488 100644
1371--- a/arch/powerpc/include/asm/module.h
1372+++ b/arch/powerpc/include/asm/module.h
1373@@ -87,10 +87,5 @@ struct exception_table_entry;
1374 void sort_ex_table(struct exception_table_entry *start,
1375 struct exception_table_entry *finish);
1376
1377-#ifdef CONFIG_MODVERSIONS
1378-#define ARCH_RELOCATES_KCRCTAB
1379-
1380-extern const unsigned long reloc_start[];
1381-#endif
1382 #endif /* __KERNEL__ */
1383 #endif /* _ASM_POWERPC_MODULE_H */
1384diff --git a/arch/powerpc/include/asm/thread_info.h b/arch/powerpc/include/asm/thread_info.h
1385index aa9d383..c8b3292 100644
1386--- a/arch/powerpc/include/asm/thread_info.h
1387+++ b/arch/powerpc/include/asm/thread_info.h
1388@@ -111,6 +111,7 @@ static inline struct thread_info *current_thread_info(void)
1389 #define TIF_NOTIFY_RESUME 13 /* callback before returning to user */
1390 #define TIF_FREEZE 14 /* Freezing for suspend */
1391 #define TIF_RUNLATCH 15 /* Is the runlatch enabled? */
1392+#define TIF_ABI_PENDING 16 /* 32/64 bit switch needed */
1393
1394 /* as above, but as bit values */
1395 #define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
1396@@ -128,6 +129,7 @@ static inline struct thread_info *current_thread_info(void)
1397 #define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME)
1398 #define _TIF_FREEZE (1<<TIF_FREEZE)
1399 #define _TIF_RUNLATCH (1<<TIF_RUNLATCH)
1400+#define _TIF_ABI_PENDING (1<<TIF_ABI_PENDING)
1401 #define _TIF_SYSCALL_T_OR_A (_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SECCOMP)
1402
1403 #define _TIF_USER_WORK_MASK (_TIF_SIGPENDING | _TIF_NEED_RESCHED | \
1404diff --git a/arch/powerpc/kernel/align.c b/arch/powerpc/kernel/align.c
1405index f0c624f..a5b632e 100644
1406--- a/arch/powerpc/kernel/align.c
1407+++ b/arch/powerpc/kernel/align.c
1408@@ -642,14 +642,10 @@ static int emulate_spe(struct pt_regs *regs, unsigned int reg,
1409 */
1410 static int emulate_vsx(unsigned char __user *addr, unsigned int reg,
1411 unsigned int areg, struct pt_regs *regs,
1412- unsigned int flags, unsigned int length,
1413- unsigned int elsize)
1414+ unsigned int flags, unsigned int length)
1415 {
1416 char *ptr;
1417- unsigned long *lptr;
1418 int ret = 0;
1419- int sw = 0;
1420- int i, j;
1421
1422 flush_vsx_to_thread(current);
1423
1424@@ -658,35 +654,19 @@ static int emulate_vsx(unsigned char __user *addr, unsigned int reg,
1425 else
1426 ptr = (char *) &current->thread.vr[reg - 32];
1427
1428- lptr = (unsigned long *) ptr;
1429-
1430- if (flags & SW)
1431- sw = elsize-1;
1432-
1433- for (j = 0; j < length; j += elsize) {
1434- for (i = 0; i < elsize; ++i) {
1435- if (flags & ST)
1436- ret |= __put_user(ptr[i^sw], addr + i);
1437- else
1438- ret |= __get_user(ptr[i^sw], addr + i);
1439+ if (flags & ST)
1440+ ret = __copy_to_user(addr, ptr, length);
1441+ else {
1442+ if (flags & SPLT){
1443+ ret = __copy_from_user(ptr, addr, length);
1444+ ptr += length;
1445 }
1446- ptr += elsize;
1447- addr += elsize;
1448+ ret |= __copy_from_user(ptr, addr, length);
1449 }
1450-
1451- if (!ret) {
1452- if (flags & U)
1453- regs->gpr[areg] = regs->dar;
1454-
1455- /* Splat load copies the same data to top and bottom 8 bytes */
1456- if (flags & SPLT)
1457- lptr[1] = lptr[0];
1458- /* For 8 byte loads, zero the top 8 bytes */
1459- else if (!(flags & ST) && (8 == length))
1460- lptr[1] = 0;
1461- } else
1462+ if (flags & U)
1463+ regs->gpr[areg] = regs->dar;
1464+ if (ret)
1465 return -EFAULT;
1466-
1467 return 1;
1468 }
1469 #endif
1470@@ -787,25 +767,16 @@ int fix_alignment(struct pt_regs *regs)
1471
1472 #ifdef CONFIG_VSX
1473 if ((instruction & 0xfc00003e) == 0x7c000018) {
1474- unsigned int elsize;
1475-
1476- /* Additional register addressing bit (64 VSX vs 32 FPR/GPR) */
1477+ /* Additional register addressing bit (64 VSX vs 32 FPR/GPR */
1478 reg |= (instruction & 0x1) << 5;
1479 /* Simple inline decoder instead of a table */
1480- /* VSX has only 8 and 16 byte memory accesses */
1481- nb = 8;
1482 if (instruction & 0x200)
1483 nb = 16;
1484-
1485- /* Vector stores in little-endian mode swap individual
1486- elements, so process them separately */
1487- elsize = 4;
1488- if (instruction & 0x80)
1489- elsize = 8;
1490-
1491+ else if (instruction & 0x080)
1492+ nb = 8;
1493+ else
1494+ nb = 4;
1495 flags = 0;
1496- if (regs->msr & MSR_LE)
1497- flags |= SW;
1498 if (instruction & 0x100)
1499 flags |= ST;
1500 if (instruction & 0x040)
1501@@ -816,7 +787,7 @@ int fix_alignment(struct pt_regs *regs)
1502 nb = 8;
1503 }
1504 PPC_WARN_EMULATED(vsx);
1505- return emulate_vsx(addr, reg, areg, regs, flags, nb, elsize);
1506+ return emulate_vsx(addr, reg, areg, regs, flags, nb);
1507 }
1508 #endif
1509 /* A size of 0 indicates an instruction we don't support, with
1510diff --git a/arch/powerpc/kernel/pci-common.c b/arch/powerpc/kernel/pci-common.c
1511index cadbed6..e8dfdbd 100644
1512--- a/arch/powerpc/kernel/pci-common.c
1513+++ b/arch/powerpc/kernel/pci-common.c
1514@@ -1107,12 +1107,6 @@ void __devinit pcibios_setup_bus_devices(struct pci_bus *bus)
1515 list_for_each_entry(dev, &bus->devices, bus_list) {
1516 struct dev_archdata *sd = &dev->dev.archdata;
1517
1518- /* Cardbus can call us to add new devices to a bus, so ignore
1519- * those who are already fully discovered
1520- */
1521- if (dev->is_added)
1522- continue;
1523-
1524 /* Setup OF node pointer in archdata */
1525 sd->of_node = pci_device_to_OF_node(dev);
1526
1527@@ -1153,13 +1147,6 @@ void __devinit pcibios_fixup_bus(struct pci_bus *bus)
1528 }
1529 EXPORT_SYMBOL(pcibios_fixup_bus);
1530
1531-void __devinit pci_fixup_cardbus(struct pci_bus *bus)
1532-{
1533- /* Now fixup devices on that bus */
1534- pcibios_setup_bus_devices(bus);
1535-}
1536-
1537-
1538 static int skip_isa_ioresource_align(struct pci_dev *dev)
1539 {
1540 if ((ppc_pci_flags & PPC_PCI_CAN_SKIP_ISA_ALIGN) &&
1541diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
1542index 7b816da..c930ac3 100644
1543--- a/arch/powerpc/kernel/process.c
1544+++ b/arch/powerpc/kernel/process.c
1545@@ -554,6 +554,18 @@ void exit_thread(void)
1546
1547 void flush_thread(void)
1548 {
1549+#ifdef CONFIG_PPC64
1550+ struct thread_info *t = current_thread_info();
1551+
1552+ if (test_ti_thread_flag(t, TIF_ABI_PENDING)) {
1553+ clear_ti_thread_flag(t, TIF_ABI_PENDING);
1554+ if (test_ti_thread_flag(t, TIF_32BIT))
1555+ clear_ti_thread_flag(t, TIF_32BIT);
1556+ else
1557+ set_ti_thread_flag(t, TIF_32BIT);
1558+ }
1559+#endif
1560+
1561 discard_lazy_cpu_state();
1562
1563 if (current->thread.dabr) {
1564diff --git a/arch/powerpc/kernel/syscalls.c b/arch/powerpc/kernel/syscalls.c
1565index 3370e62..c04832c 100644
1566--- a/arch/powerpc/kernel/syscalls.c
1567+++ b/arch/powerpc/kernel/syscalls.c
1568@@ -140,6 +140,7 @@ static inline unsigned long do_mmap2(unsigned long addr, size_t len,
1569 unsigned long prot, unsigned long flags,
1570 unsigned long fd, unsigned long off, int shift)
1571 {
1572+ struct file * file = NULL;
1573 unsigned long ret = -EINVAL;
1574
1575 if (!arch_validate_prot(prot))
1576@@ -150,8 +151,20 @@ static inline unsigned long do_mmap2(unsigned long addr, size_t len,
1577 goto out;
1578 off >>= shift;
1579 }
1580+
1581+ ret = -EBADF;
1582+ if (!(flags & MAP_ANONYMOUS)) {
1583+ if (!(file = fget(fd)))
1584+ goto out;
1585+ }
1586+
1587+ flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
1588
1589- ret = sys_mmap_pgoff(addr, len, prot, flags, fd, off);
1590+ down_write(&current->mm->mmap_sem);
1591+ ret = do_mmap_pgoff(file, addr, len, prot, flags, off);
1592+ up_write(&current->mm->mmap_sem);
1593+ if (file)
1594+ fput(file);
1595 out:
1596 return ret;
1597 }
1598diff --git a/arch/powerpc/kernel/vector.S b/arch/powerpc/kernel/vector.S
1599index fe46048..67b6916 100644
1600--- a/arch/powerpc/kernel/vector.S
1601+++ b/arch/powerpc/kernel/vector.S
1602@@ -58,7 +58,7 @@ _GLOBAL(load_up_altivec)
1603 * all 1's
1604 */
1605 mfspr r4,SPRN_VRSAVE
1606- cmpwi 0,r4,0
1607+ cmpdi 0,r4,0
1608 bne+ 1f
1609 li r4,-1
1610 mtspr SPRN_VRSAVE,r4
1611diff --git a/arch/powerpc/kernel/vmlinux.lds.S b/arch/powerpc/kernel/vmlinux.lds.S
1612index dcd01c8..27735a7 100644
1613--- a/arch/powerpc/kernel/vmlinux.lds.S
1614+++ b/arch/powerpc/kernel/vmlinux.lds.S
1615@@ -38,9 +38,6 @@ jiffies = jiffies_64 + 4;
1616 #endif
1617 SECTIONS
1618 {
1619- . = 0;
1620- reloc_start = .;
1621-
1622 . = KERNELBASE;
1623
1624 /*
1625diff --git a/arch/powerpc/sysdev/fsl_pci.c b/arch/powerpc/sysdev/fsl_pci.c
1626index e82749b..ae88b14 100644
1627--- a/arch/powerpc/sysdev/fsl_pci.c
1628+++ b/arch/powerpc/sysdev/fsl_pci.c
1629@@ -392,22 +392,8 @@ DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_MPC8536, quirk_fsl_pcie_header);
1630 DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_MPC8641, quirk_fsl_pcie_header);
1631 DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_MPC8641D, quirk_fsl_pcie_header);
1632 DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_MPC8610, quirk_fsl_pcie_header);
1633-DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P1011E, quirk_fsl_pcie_header);
1634-DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P1011, quirk_fsl_pcie_header);
1635-DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P1013E, quirk_fsl_pcie_header);
1636-DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P1013, quirk_fsl_pcie_header);
1637-DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P1020E, quirk_fsl_pcie_header);
1638-DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P1020, quirk_fsl_pcie_header);
1639-DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P1022E, quirk_fsl_pcie_header);
1640-DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P1022, quirk_fsl_pcie_header);
1641-DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P2010E, quirk_fsl_pcie_header);
1642-DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P2010, quirk_fsl_pcie_header);
1643 DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P2020E, quirk_fsl_pcie_header);
1644 DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P2020, quirk_fsl_pcie_header);
1645-DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P4040E, quirk_fsl_pcie_header);
1646-DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P4040, quirk_fsl_pcie_header);
1647-DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P4080E, quirk_fsl_pcie_header);
1648-DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P4080, quirk_fsl_pcie_header);
1649 #endif /* CONFIG_PPC_85xx || CONFIG_PPC_86xx */
1650
1651 #if defined(CONFIG_PPC_83xx) || defined(CONFIG_PPC_MPC512x)
1652diff --git a/arch/s390/include/asm/kvm.h b/arch/s390/include/asm/kvm.h
1653index 82b32a1..3dfcaeb 100644
1654--- a/arch/s390/include/asm/kvm.h
1655+++ b/arch/s390/include/asm/kvm.h
1656@@ -1,5 +1,6 @@
1657 #ifndef __LINUX_KVM_S390_H
1658 #define __LINUX_KVM_S390_H
1659+
1660 /*
1661 * asm-s390/kvm.h - KVM s390 specific structures and definitions
1662 *
1663@@ -14,8 +15,6 @@
1664 */
1665 #include <linux/types.h>
1666
1667-#define __KVM_S390
1668-
1669 /* for KVM_GET_REGS and KVM_SET_REGS */
1670 struct kvm_regs {
1671 /* general purpose regs for s390 */
1672diff --git a/arch/s390/kernel/compat_linux.c b/arch/s390/kernel/compat_linux.c
1673index 9c746c0..0debcec 100644
1674--- a/arch/s390/kernel/compat_linux.c
1675+++ b/arch/s390/kernel/compat_linux.c
1676@@ -683,6 +683,38 @@ struct mmap_arg_struct_emu31 {
1677 u32 offset;
1678 };
1679
1680+/* common code for old and new mmaps */
1681+static inline long do_mmap2(
1682+ unsigned long addr, unsigned long len,
1683+ unsigned long prot, unsigned long flags,
1684+ unsigned long fd, unsigned long pgoff)
1685+{
1686+ struct file * file = NULL;
1687+ unsigned long error = -EBADF;
1688+
1689+ flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
1690+ if (!(flags & MAP_ANONYMOUS)) {
1691+ file = fget(fd);
1692+ if (!file)
1693+ goto out;
1694+ }
1695+
1696+ down_write(&current->mm->mmap_sem);
1697+ error = do_mmap_pgoff(file, addr, len, prot, flags, pgoff);
1698+ if (!IS_ERR((void *) error) && error + len >= 0x80000000ULL) {
1699+ /* Result is out of bounds. */
1700+ do_munmap(current->mm, addr, len);
1701+ error = -ENOMEM;
1702+ }
1703+ up_write(&current->mm->mmap_sem);
1704+
1705+ if (file)
1706+ fput(file);
1707+out:
1708+ return error;
1709+}
1710+
1711+
1712 asmlinkage unsigned long
1713 old32_mmap(struct mmap_arg_struct_emu31 __user *arg)
1714 {
1715@@ -696,8 +728,7 @@ old32_mmap(struct mmap_arg_struct_emu31 __user *arg)
1716 if (a.offset & ~PAGE_MASK)
1717 goto out;
1718
1719- error = sys_mmap_pgoff(a.addr, a.len, a.prot, a.flags, a.fd,
1720- a.offset >> PAGE_SHIFT);
1721+ error = do_mmap2(a.addr, a.len, a.prot, a.flags, a.fd, a.offset >> PAGE_SHIFT);
1722 out:
1723 return error;
1724 }
1725@@ -710,7 +741,7 @@ sys32_mmap2(struct mmap_arg_struct_emu31 __user *arg)
1726
1727 if (copy_from_user(&a, arg, sizeof(a)))
1728 goto out;
1729- error = sys_mmap_pgoff(a.addr, a.len, a.prot, a.flags, a.fd, a.offset);
1730+ error = do_mmap2(a.addr, a.len, a.prot, a.flags, a.fd, a.offset);
1731 out:
1732 return error;
1733 }
1734diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S
1735index e8ef21c..48215d1 100644
1736--- a/arch/s390/kernel/entry.S
1737+++ b/arch/s390/kernel/entry.S
1738@@ -571,7 +571,6 @@ pgm_svcper:
1739 mvc __THREAD_per+__PER_access_id(1,%r8),__LC_PER_ACCESS_ID
1740 oi __TI_flags+3(%r9),_TIF_SINGLE_STEP # set TIF_SINGLE_STEP
1741 TRACE_IRQS_ON
1742- lm %r2,%r6,SP_R2(%r15) # load svc arguments
1743 stosm __SF_EMPTY(%r15),0x03 # reenable interrupts
1744 b BASED(sysc_do_svc)
1745
1746diff --git a/arch/s390/kernel/entry64.S b/arch/s390/kernel/entry64.S
1747index f33658f..9aff1d4 100644
1748--- a/arch/s390/kernel/entry64.S
1749+++ b/arch/s390/kernel/entry64.S
1750@@ -549,7 +549,6 @@ pgm_svcper:
1751 mvc __THREAD_per+__PER_access_id(1,%r8),__LC_PER_ACCESS_ID
1752 oi __TI_flags+7(%r9),_TIF_SINGLE_STEP # set TIF_SINGLE_STEP
1753 TRACE_IRQS_ON
1754- lmg %r2,%r6,SP_R2(%r15) # load svc arguments
1755 stosm __SF_EMPTY(%r15),0x03 # reenable interrupts
1756 j sysc_do_svc
1757
1758diff --git a/arch/s390/kernel/head64.S b/arch/s390/kernel/head64.S
1759index d984a2a..6a25080 100644
1760--- a/arch/s390/kernel/head64.S
1761+++ b/arch/s390/kernel/head64.S
1762@@ -83,8 +83,6 @@ startup_continue:
1763 slr %r0,%r0 # set cpuid to zero
1764 sigp %r1,%r0,0x12 # switch to esame mode
1765 sam64 # switch to 64 bit mode
1766- llgfr %r13,%r13 # clear high-order half of base reg
1767- lmh %r0,%r15,.Lzero64-.LPG1(%r13) # clear high-order half
1768 lctlg %c0,%c15,.Lctl-.LPG1(%r13) # load control registers
1769 lg %r12,.Lparmaddr-.LPG1(%r13) # pointer to parameter area
1770 # move IPL device to lowcore
1771@@ -129,7 +127,6 @@ startup_continue:
1772 .L4malign:.quad 0xffffffffffc00000
1773 .Lscan2g:.quad 0x80000000 + 0x20000 - 8 # 2GB + 128K - 8
1774 .Lnop: .long 0x07000700
1775-.Lzero64:.fill 16,4,0x0
1776 #ifdef CONFIG_ZFCPDUMP
1777 .Lcurrent_cpu:
1778 .long 0x0
1779diff --git a/arch/s390/kernel/sys_s390.c b/arch/s390/kernel/sys_s390.c
1780index 86a74c9..e9d94f6 100644
1781--- a/arch/s390/kernel/sys_s390.c
1782+++ b/arch/s390/kernel/sys_s390.c
1783@@ -32,6 +32,32 @@
1784 #include <asm/uaccess.h>
1785 #include "entry.h"
1786
1787+/* common code for old and new mmaps */
1788+static inline long do_mmap2(
1789+ unsigned long addr, unsigned long len,
1790+ unsigned long prot, unsigned long flags,
1791+ unsigned long fd, unsigned long pgoff)
1792+{
1793+ long error = -EBADF;
1794+ struct file * file = NULL;
1795+
1796+ flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
1797+ if (!(flags & MAP_ANONYMOUS)) {
1798+ file = fget(fd);
1799+ if (!file)
1800+ goto out;
1801+ }
1802+
1803+ down_write(&current->mm->mmap_sem);
1804+ error = do_mmap_pgoff(file, addr, len, prot, flags, pgoff);
1805+ up_write(&current->mm->mmap_sem);
1806+
1807+ if (file)
1808+ fput(file);
1809+out:
1810+ return error;
1811+}
1812+
1813 /*
1814 * Perform the select(nd, in, out, ex, tv) and mmap() system
1815 * calls. Linux for S/390 isn't able to handle more than 5
1816@@ -55,7 +81,7 @@ SYSCALL_DEFINE1(mmap2, struct mmap_arg_struct __user *, arg)
1817
1818 if (copy_from_user(&a, arg, sizeof(a)))
1819 goto out;
1820- error = sys_mmap_pgoff(a.addr, a.len, a.prot, a.flags, a.fd, a.offset);
1821+ error = do_mmap2(a.addr, a.len, a.prot, a.flags, a.fd, a.offset);
1822 out:
1823 return error;
1824 }
1825@@ -72,7 +98,7 @@ SYSCALL_DEFINE1(s390_old_mmap, struct mmap_arg_struct __user *, arg)
1826 if (a.offset & ~PAGE_MASK)
1827 goto out;
1828
1829- error = sys_mmap_pgoff(a.addr, a.len, a.prot, a.flags, a.fd, a.offset >> PAGE_SHIFT);
1830+ error = do_mmap2(a.addr, a.len, a.prot, a.flags, a.fd, a.offset >> PAGE_SHIFT);
1831 out:
1832 return error;
1833 }
1834diff --git a/arch/s390/kvm/intercept.c b/arch/s390/kvm/intercept.c
1835index b400964..ba9d8a7 100644
1836--- a/arch/s390/kvm/intercept.c
1837+++ b/arch/s390/kvm/intercept.c
1838@@ -213,7 +213,7 @@ static int handle_instruction_and_prog(struct kvm_vcpu *vcpu)
1839 return rc2;
1840 }
1841
1842-static const intercept_handler_t intercept_funcs[] = {
1843+static const intercept_handler_t intercept_funcs[0x48 >> 2] = {
1844 [0x00 >> 2] = handle_noop,
1845 [0x04 >> 2] = handle_instruction,
1846 [0x08 >> 2] = handle_prog,
1847@@ -230,7 +230,7 @@ int kvm_handle_sie_intercept(struct kvm_vcpu *vcpu)
1848 intercept_handler_t func;
1849 u8 code = vcpu->arch.sie_block->icptcode;
1850
1851- if (code & 3 || (code >> 2) >= ARRAY_SIZE(intercept_funcs))
1852+ if (code & 3 || code > 0x48)
1853 return -ENOTSUPP;
1854 func = intercept_funcs[code >> 2];
1855 if (func)
1856diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
1857index ca2d312..07ced89 100644
1858--- a/arch/s390/kvm/kvm-s390.c
1859+++ b/arch/s390/kvm/kvm-s390.c
1860@@ -116,16 +116,10 @@ long kvm_arch_dev_ioctl(struct file *filp,
1861
1862 int kvm_dev_ioctl_check_extension(long ext)
1863 {
1864- int r;
1865-
1866 switch (ext) {
1867- case KVM_CAP_S390_PSW:
1868- r = 1;
1869- break;
1870 default:
1871- r = 0;
1872+ return 0;
1873 }
1874- return r;
1875 }
1876
1877 /* Section: vm related */
1878@@ -425,10 +419,8 @@ static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw)
1879 vcpu_load(vcpu);
1880 if (atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_RUNNING)
1881 rc = -EBUSY;
1882- else {
1883- vcpu->run->psw_mask = psw.mask;
1884- vcpu->run->psw_addr = psw.addr;
1885- }
1886+ else
1887+ vcpu->arch.sie_block->gpsw = psw;
1888 vcpu_put(vcpu);
1889 return rc;
1890 }
1891@@ -516,6 +508,9 @@ rerun_vcpu:
1892
1893 switch (kvm_run->exit_reason) {
1894 case KVM_EXIT_S390_SIEIC:
1895+ vcpu->arch.sie_block->gpsw.mask = kvm_run->s390_sieic.mask;
1896+ vcpu->arch.sie_block->gpsw.addr = kvm_run->s390_sieic.addr;
1897+ break;
1898 case KVM_EXIT_UNKNOWN:
1899 case KVM_EXIT_INTR:
1900 case KVM_EXIT_S390_RESET:
1901@@ -524,9 +519,6 @@ rerun_vcpu:
1902 BUG();
1903 }
1904
1905- vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask;
1906- vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr;
1907-
1908 might_fault();
1909
1910 do {
1911@@ -546,6 +538,8 @@ rerun_vcpu:
1912 /* intercept cannot be handled in-kernel, prepare kvm-run */
1913 kvm_run->exit_reason = KVM_EXIT_S390_SIEIC;
1914 kvm_run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode;
1915+ kvm_run->s390_sieic.mask = vcpu->arch.sie_block->gpsw.mask;
1916+ kvm_run->s390_sieic.addr = vcpu->arch.sie_block->gpsw.addr;
1917 kvm_run->s390_sieic.ipa = vcpu->arch.sie_block->ipa;
1918 kvm_run->s390_sieic.ipb = vcpu->arch.sie_block->ipb;
1919 rc = 0;
1920@@ -557,9 +551,6 @@ rerun_vcpu:
1921 rc = 0;
1922 }
1923
1924- kvm_run->psw_mask = vcpu->arch.sie_block->gpsw.mask;
1925- kvm_run->psw_addr = vcpu->arch.sie_block->gpsw.addr;
1926-
1927 if (vcpu->sigset_active)
1928 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
1929
1930diff --git a/arch/s390/kvm/sigp.c b/arch/s390/kvm/sigp.c
1931index 15ee111..40c8c67 100644
1932--- a/arch/s390/kvm/sigp.c
1933+++ b/arch/s390/kvm/sigp.c
1934@@ -188,9 +188,9 @@ static int __sigp_set_prefix(struct kvm_vcpu *vcpu, u16 cpu_addr, u32 address,
1935
1936 /* make sure that the new value is valid memory */
1937 address = address & 0x7fffe000u;
1938- if ((copy_from_user(&tmp, (void __user *)
1939- (address + vcpu->arch.sie_block->gmsor) , 1)) ||
1940- (copy_from_user(&tmp, (void __user *)(address +
1941+ if ((copy_from_guest(vcpu, &tmp,
1942+ (u64) (address + vcpu->arch.sie_block->gmsor) , 1)) ||
1943+ (copy_from_guest(vcpu, &tmp, (u64) (address +
1944 vcpu->arch.sie_block->gmsor + PAGE_SIZE), 1))) {
1945 *reg |= SIGP_STAT_INVALID_PARAMETER;
1946 return 1; /* invalid parameter */
1947diff --git a/arch/score/kernel/sys_score.c b/arch/score/kernel/sys_score.c
1948index 856ed68..0012494 100644
1949--- a/arch/score/kernel/sys_score.c
1950+++ b/arch/score/kernel/sys_score.c
1951@@ -36,16 +36,34 @@ asmlinkage long
1952 sys_mmap2(unsigned long addr, unsigned long len, unsigned long prot,
1953 unsigned long flags, unsigned long fd, unsigned long pgoff)
1954 {
1955- return sys_mmap_pgoff(addr, len, prot, flags, fd, pgoff);
1956+ int error = -EBADF;
1957+ struct file *file = NULL;
1958+
1959+ if (pgoff & (~PAGE_MASK >> 12))
1960+ return -EINVAL;
1961+
1962+ flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
1963+ if (!(flags & MAP_ANONYMOUS)) {
1964+ file = fget(fd);
1965+ if (!file)
1966+ return error;
1967+ }
1968+
1969+ down_write(&current->mm->mmap_sem);
1970+ error = do_mmap_pgoff(file, addr, len, prot, flags, pgoff);
1971+ up_write(&current->mm->mmap_sem);
1972+
1973+ if (file)
1974+ fput(file);
1975+
1976+ return error;
1977 }
1978
1979 asmlinkage long
1980 sys_mmap(unsigned long addr, unsigned long len, unsigned long prot,
1981- unsigned long flags, unsigned long fd, off_t offset)
1982+ unsigned long flags, unsigned long fd, off_t pgoff)
1983 {
1984- if (unlikely(offset & ~PAGE_MASK))
1985- return -EINVAL;
1986- return sys_mmap_pgoff(addr, len, prot, flags, fd, offset >> PAGE_SHIFT);
1987+ return sys_mmap2(addr, len, prot, flags, fd, pgoff >> PAGE_SHIFT);
1988 }
1989
1990 asmlinkage long
1991diff --git a/arch/sh/include/asm/pgtable_32.h b/arch/sh/include/asm/pgtable_32.h
1992index ba64e7f..c0d359c 100644
1993--- a/arch/sh/include/asm/pgtable_32.h
1994+++ b/arch/sh/include/asm/pgtable_32.h
1995@@ -344,8 +344,7 @@ static inline void set_pte(pte_t *ptep, pte_t pte)
1996 #define pte_special(pte) ((pte).pte_low & _PAGE_SPECIAL)
1997
1998 #ifdef CONFIG_X2TLB
1999-#define pte_write(pte) \
2000- ((pte).pte_high & (_PAGE_EXT_USER_WRITE | _PAGE_EXT_KERN_WRITE))
2001+#define pte_write(pte) ((pte).pte_high & _PAGE_EXT_USER_WRITE)
2002 #else
2003 #define pte_write(pte) ((pte).pte_low & _PAGE_RW)
2004 #endif
2005@@ -359,7 +358,7 @@ static inline pte_t pte_##fn(pte_t pte) { pte.pte_##h op; return pte; }
2006 * individually toggled (and user permissions are entirely decoupled from
2007 * kernel permissions), we attempt to couple them a bit more sanely here.
2008 */
2009-PTE_BIT_FUNC(high, wrprotect, &= ~(_PAGE_EXT_USER_WRITE | _PAGE_EXT_KERN_WRITE));
2010+PTE_BIT_FUNC(high, wrprotect, &= ~_PAGE_EXT_USER_WRITE);
2011 PTE_BIT_FUNC(high, mkwrite, |= _PAGE_EXT_USER_WRITE | _PAGE_EXT_KERN_WRITE);
2012 PTE_BIT_FUNC(high, mkhuge, |= _PAGE_SZHUGE);
2013 #else
2014diff --git a/arch/sh/kernel/process_64.c b/arch/sh/kernel/process_64.c
2015index 44aa119..1192398 100644
2016--- a/arch/sh/kernel/process_64.c
2017+++ b/arch/sh/kernel/process_64.c
2018@@ -367,7 +367,7 @@ void exit_thread(void)
2019 void flush_thread(void)
2020 {
2021
2022- /* Called by fs/exec.c (setup_new_exec) to remove traces of a
2023+ /* Called by fs/exec.c (flush_old_exec) to remove traces of a
2024 * previously running executable. */
2025 #ifdef CONFIG_SH_FPU
2026 if (last_task_used_math == current) {
2027diff --git a/arch/sh/kernel/sys_sh.c b/arch/sh/kernel/sys_sh.c
2028index 71399cd..8aa5d1c 100644
2029--- a/arch/sh/kernel/sys_sh.c
2030+++ b/arch/sh/kernel/sys_sh.c
2031@@ -28,13 +28,37 @@
2032 #include <asm/cacheflush.h>
2033 #include <asm/cachectl.h>
2034
2035+static inline long
2036+do_mmap2(unsigned long addr, unsigned long len, unsigned long prot,
2037+ unsigned long flags, int fd, unsigned long pgoff)
2038+{
2039+ int error = -EBADF;
2040+ struct file *file = NULL;
2041+
2042+ flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
2043+ if (!(flags & MAP_ANONYMOUS)) {
2044+ file = fget(fd);
2045+ if (!file)
2046+ goto out;
2047+ }
2048+
2049+ down_write(&current->mm->mmap_sem);
2050+ error = do_mmap_pgoff(file, addr, len, prot, flags, pgoff);
2051+ up_write(&current->mm->mmap_sem);
2052+
2053+ if (file)
2054+ fput(file);
2055+out:
2056+ return error;
2057+}
2058+
2059 asmlinkage int old_mmap(unsigned long addr, unsigned long len,
2060 unsigned long prot, unsigned long flags,
2061 int fd, unsigned long off)
2062 {
2063 if (off & ~PAGE_MASK)
2064 return -EINVAL;
2065- return sys_mmap_pgoff(addr, len, prot, flags, fd, off>>PAGE_SHIFT);
2066+ return do_mmap2(addr, len, prot, flags, fd, off>>PAGE_SHIFT);
2067 }
2068
2069 asmlinkage long sys_mmap2(unsigned long addr, unsigned long len,
2070@@ -50,7 +74,7 @@ asmlinkage long sys_mmap2(unsigned long addr, unsigned long len,
2071
2072 pgoff >>= PAGE_SHIFT - 12;
2073
2074- return sys_mmap_pgoff(addr, len, prot, flags, fd, pgoff);
2075+ return do_mmap2(addr, len, prot, flags, fd, pgoff);
2076 }
2077
2078 /*
2079diff --git a/arch/sh/mm/mmap.c b/arch/sh/mm/mmap.c
2080index afeb710..d2984fa 100644
2081--- a/arch/sh/mm/mmap.c
2082+++ b/arch/sh/mm/mmap.c
2083@@ -54,8 +54,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
2084 /* We do not accept a shared mapping if it would violate
2085 * cache aliasing constraints.
2086 */
2087- if ((flags & MAP_SHARED) &&
2088- ((addr - (pgoff << PAGE_SHIFT)) & shm_align_mask))
2089+ if ((flags & MAP_SHARED) && (addr & shm_align_mask))
2090 return -EINVAL;
2091 return addr;
2092 }
2093diff --git a/arch/sparc/Makefile b/arch/sparc/Makefile
2094index 113225b..dfe272d 100644
2095--- a/arch/sparc/Makefile
2096+++ b/arch/sparc/Makefile
2097@@ -27,7 +27,6 @@ AS := $(AS) -32
2098 LDFLAGS := -m elf32_sparc
2099 CHECKFLAGS += -D__sparc__
2100 export BITS := 32
2101-UTS_MACHINE := sparc
2102
2103 #KBUILD_CFLAGS += -g -pipe -fcall-used-g5 -fcall-used-g7
2104 KBUILD_CFLAGS += -m32 -pipe -mno-fpu -fcall-used-g5 -fcall-used-g7
2105@@ -47,7 +46,6 @@ CHECKFLAGS += -D__sparc__ -D__sparc_v9__ -D__arch64__ -m64
2106
2107 LDFLAGS := -m elf64_sparc
2108 export BITS := 64
2109-UTS_MACHINE := sparc64
2110
2111 KBUILD_CFLAGS += -m64 -pipe -mno-fpu -mcpu=ultrasparc -mcmodel=medlow \
2112 -ffixed-g4 -ffixed-g5 -fcall-used-g7 -Wno-sign-compare \
2113diff --git a/arch/sparc/include/asm/elf_64.h b/arch/sparc/include/asm/elf_64.h
2114index 9968085..d42e393 100644
2115--- a/arch/sparc/include/asm/elf_64.h
2116+++ b/arch/sparc/include/asm/elf_64.h
2117@@ -196,10 +196,17 @@ static inline unsigned int sparc64_elf_hwcap(void)
2118 #define ELF_PLATFORM (NULL)
2119
2120 #define SET_PERSONALITY(ex) \
2121-do { if ((ex).e_ident[EI_CLASS] == ELFCLASS32) \
2122- set_thread_flag(TIF_32BIT); \
2123+do { unsigned long new_flags = current_thread_info()->flags; \
2124+ new_flags &= _TIF_32BIT; \
2125+ if ((ex).e_ident[EI_CLASS] == ELFCLASS32) \
2126+ new_flags |= _TIF_32BIT; \
2127 else \
2128- clear_thread_flag(TIF_32BIT); \
2129+ new_flags &= ~_TIF_32BIT; \
2130+ if ((current_thread_info()->flags & _TIF_32BIT) \
2131+ != new_flags) \
2132+ set_thread_flag(TIF_ABI_PENDING); \
2133+ else \
2134+ clear_thread_flag(TIF_ABI_PENDING); \
2135 /* flush_thread will update pgd cache */ \
2136 if (personality(current->personality) != PER_LINUX32) \
2137 set_personality(PER_LINUX | \
2138diff --git a/arch/sparc/include/asm/thread_info_64.h b/arch/sparc/include/asm/thread_info_64.h
2139index f78ad9a..1b45a7b 100644
2140--- a/arch/sparc/include/asm/thread_info_64.h
2141+++ b/arch/sparc/include/asm/thread_info_64.h
2142@@ -227,11 +227,12 @@ register struct thread_info *current_thread_info_reg asm("g6");
2143 /* flag bit 8 is available */
2144 #define TIF_SECCOMP 9 /* secure computing */
2145 #define TIF_SYSCALL_AUDIT 10 /* syscall auditing active */
2146+/* flag bit 11 is available */
2147 /* NOTE: Thread flags >= 12 should be ones we have no interest
2148 * in using in assembly, else we can't use the mask as
2149 * an immediate value in instructions such as andcc.
2150 */
2151-/* flag bit 12 is available */
2152+#define TIF_ABI_PENDING 12
2153 #define TIF_MEMDIE 13
2154 #define TIF_POLLING_NRFLAG 14
2155 #define TIF_FREEZE 15 /* is freezing for suspend */
2156@@ -245,6 +246,7 @@ register struct thread_info *current_thread_info_reg asm("g6");
2157 #define _TIF_32BIT (1<<TIF_32BIT)
2158 #define _TIF_SECCOMP (1<<TIF_SECCOMP)
2159 #define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT)
2160+#define _TIF_ABI_PENDING (1<<TIF_ABI_PENDING)
2161 #define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)
2162 #define _TIF_FREEZE (1<<TIF_FREEZE)
2163
2164diff --git a/arch/sparc/kernel/ldc.c b/arch/sparc/kernel/ldc.c
2165index e0ba898..cb3c72c 100644
2166--- a/arch/sparc/kernel/ldc.c
2167+++ b/arch/sparc/kernel/ldc.c
2168@@ -1242,13 +1242,13 @@ int ldc_bind(struct ldc_channel *lp, const char *name)
2169 snprintf(lp->tx_irq_name, LDC_IRQ_NAME_MAX, "%s TX", name);
2170
2171 err = request_irq(lp->cfg.rx_irq, ldc_rx,
2172- IRQF_SAMPLE_RANDOM | IRQF_DISABLED,
2173+ IRQF_SAMPLE_RANDOM | IRQF_DISABLED | IRQF_SHARED,
2174 lp->rx_irq_name, lp);
2175 if (err)
2176 return err;
2177
2178 err = request_irq(lp->cfg.tx_irq, ldc_tx,
2179- IRQF_SAMPLE_RANDOM | IRQF_DISABLED,
2180+ IRQF_SAMPLE_RANDOM | IRQF_DISABLED | IRQF_SHARED,
2181 lp->tx_irq_name, lp);
2182 if (err) {
2183 free_irq(lp->cfg.rx_irq, lp);
2184diff --git a/arch/sparc/kernel/nmi.c b/arch/sparc/kernel/nmi.c
2185index 4771274..b129611 100644
2186--- a/arch/sparc/kernel/nmi.c
2187+++ b/arch/sparc/kernel/nmi.c
2188@@ -96,6 +96,7 @@ notrace __kprobes void perfctr_irq(int irq, struct pt_regs *regs)
2189 int cpu = smp_processor_id();
2190
2191 clear_softint(1 << irq);
2192+ pcr_ops->write(PCR_PIC_PRIV);
2193
2194 local_cpu_data().__nmi_count++;
2195
2196@@ -104,8 +105,6 @@ notrace __kprobes void perfctr_irq(int irq, struct pt_regs *regs)
2197 if (notify_die(DIE_NMI, "nmi", regs, 0,
2198 pt_regs_trap_type(regs), SIGINT) == NOTIFY_STOP)
2199 touched = 1;
2200- else
2201- pcr_ops->write(PCR_PIC_PRIV);
2202
2203 sum = kstat_irqs_cpu(0, cpu);
2204 if (__get_cpu_var(nmi_touch)) {
2205diff --git a/arch/sparc/kernel/of_device_64.c b/arch/sparc/kernel/of_device_64.c
2206index 0a6f2d1..881947e 100644
2207--- a/arch/sparc/kernel/of_device_64.c
2208+++ b/arch/sparc/kernel/of_device_64.c
2209@@ -104,19 +104,9 @@ static int of_bus_pci_map(u32 *addr, const u32 *range,
2210 int i;
2211
2212 /* Check address type match */
2213- if (!((addr[0] ^ range[0]) & 0x03000000))
2214- goto type_match;
2215-
2216- /* Special exception, we can map a 64-bit address into
2217- * a 32-bit range.
2218- */
2219- if ((addr[0] & 0x03000000) == 0x03000000 &&
2220- (range[0] & 0x03000000) == 0x02000000)
2221- goto type_match;
2222-
2223- return -EINVAL;
2224+ if ((addr[0] ^ range[0]) & 0x03000000)
2225+ return -EINVAL;
2226
2227-type_match:
2228 if (of_out_of_range(addr + 1, range + 1, range + na + pna,
2229 na - 1, ns))
2230 return -EINVAL;
2231diff --git a/arch/sparc/kernel/perf_event.c b/arch/sparc/kernel/perf_event.c
2232index 198fb4e..fa5936e 100644
2233--- a/arch/sparc/kernel/perf_event.c
2234+++ b/arch/sparc/kernel/perf_event.c
2235@@ -986,17 +986,6 @@ static int __kprobes perf_event_nmi_handler(struct notifier_block *self,
2236 data.addr = 0;
2237
2238 cpuc = &__get_cpu_var(cpu_hw_events);
2239-
2240- /* If the PMU has the TOE IRQ enable bits, we need to do a
2241- * dummy write to the %pcr to clear the overflow bits and thus
2242- * the interrupt.
2243- *
2244- * Do this before we peek at the counters to determine
2245- * overflow so we don't lose any events.
2246- */
2247- if (sparc_pmu->irq_bit)
2248- pcr_ops->write(cpuc->pcr);
2249-
2250 for (idx = 0; idx < MAX_HWEVENTS; idx++) {
2251 struct perf_event *event = cpuc->events[idx];
2252 struct hw_perf_event *hwc;
2253diff --git a/arch/sparc/kernel/process_64.c b/arch/sparc/kernel/process_64.c
2254index c3f1cce..18d6785 100644
2255--- a/arch/sparc/kernel/process_64.c
2256+++ b/arch/sparc/kernel/process_64.c
2257@@ -365,6 +365,14 @@ void flush_thread(void)
2258 struct thread_info *t = current_thread_info();
2259 struct mm_struct *mm;
2260
2261+ if (test_ti_thread_flag(t, TIF_ABI_PENDING)) {
2262+ clear_ti_thread_flag(t, TIF_ABI_PENDING);
2263+ if (test_ti_thread_flag(t, TIF_32BIT))
2264+ clear_ti_thread_flag(t, TIF_32BIT);
2265+ else
2266+ set_ti_thread_flag(t, TIF_32BIT);
2267+ }
2268+
2269 mm = t->task->mm;
2270 if (mm)
2271 tsb_context_switch(mm);
2272diff --git a/arch/sparc/kernel/sys_sparc_32.c b/arch/sparc/kernel/sys_sparc_32.c
2273index 3a82e65..03035c8 100644
2274--- a/arch/sparc/kernel/sys_sparc_32.c
2275+++ b/arch/sparc/kernel/sys_sparc_32.c
2276@@ -45,8 +45,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
2277 /* We do not accept a shared mapping if it would violate
2278 * cache aliasing constraints.
2279 */
2280- if ((flags & MAP_SHARED) &&
2281- ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
2282+ if ((flags & MAP_SHARED) && (addr & (SHMLBA - 1)))
2283 return -EINVAL;
2284 return addr;
2285 }
2286@@ -80,6 +79,15 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
2287 }
2288 }
2289
2290+asmlinkage unsigned long sparc_brk(unsigned long brk)
2291+{
2292+ if(ARCH_SUN4C) {
2293+ if ((brk & 0xe0000000) != (current->mm->brk & 0xe0000000))
2294+ return current->mm->brk;
2295+ }
2296+ return sys_brk(brk);
2297+}
2298+
2299 /*
2300 * sys_pipe() is the normal C calling standard for creating
2301 * a pipe. It's not the way unix traditionally does this, though.
2302@@ -226,6 +234,31 @@ int sparc_mmap_check(unsigned long addr, unsigned long len)
2303 }
2304
2305 /* Linux version of mmap */
2306+static unsigned long do_mmap2(unsigned long addr, unsigned long len,
2307+ unsigned long prot, unsigned long flags, unsigned long fd,
2308+ unsigned long pgoff)
2309+{
2310+ struct file * file = NULL;
2311+ unsigned long retval = -EBADF;
2312+
2313+ if (!(flags & MAP_ANONYMOUS)) {
2314+ file = fget(fd);
2315+ if (!file)
2316+ goto out;
2317+ }
2318+
2319+ len = PAGE_ALIGN(len);
2320+ flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
2321+
2322+ down_write(&current->mm->mmap_sem);
2323+ retval = do_mmap_pgoff(file, addr, len, prot, flags, pgoff);
2324+ up_write(&current->mm->mmap_sem);
2325+
2326+ if (file)
2327+ fput(file);
2328+out:
2329+ return retval;
2330+}
2331
2332 asmlinkage unsigned long sys_mmap2(unsigned long addr, unsigned long len,
2333 unsigned long prot, unsigned long flags, unsigned long fd,
2334@@ -233,16 +266,14 @@ asmlinkage unsigned long sys_mmap2(unsigned long addr, unsigned long len,
2335 {
2336 /* Make sure the shift for mmap2 is constant (12), no matter what PAGE_SIZE
2337 we have. */
2338- return sys_mmap_pgoff(addr, len, prot, flags, fd,
2339- pgoff >> (PAGE_SHIFT - 12));
2340+ return do_mmap2(addr, len, prot, flags, fd, pgoff >> (PAGE_SHIFT - 12));
2341 }
2342
2343 asmlinkage unsigned long sys_mmap(unsigned long addr, unsigned long len,
2344 unsigned long prot, unsigned long flags, unsigned long fd,
2345 unsigned long off)
2346 {
2347- /* no alignment check? */
2348- return sys_mmap_pgoff(addr, len, prot, flags, fd, off >> PAGE_SHIFT);
2349+ return do_mmap2(addr, len, prot, flags, fd, off >> PAGE_SHIFT);
2350 }
2351
2352 long sparc_remap_file_pages(unsigned long start, unsigned long size,
2353@@ -256,6 +287,27 @@ long sparc_remap_file_pages(unsigned long start, unsigned long size,
2354 (pgoff >> (PAGE_SHIFT - 12)), flags);
2355 }
2356
2357+extern unsigned long do_mremap(unsigned long addr,
2358+ unsigned long old_len, unsigned long new_len,
2359+ unsigned long flags, unsigned long new_addr);
2360+
2361+asmlinkage unsigned long sparc_mremap(unsigned long addr,
2362+ unsigned long old_len, unsigned long new_len,
2363+ unsigned long flags, unsigned long new_addr)
2364+{
2365+ unsigned long ret = -EINVAL;
2366+
2367+ if (unlikely(sparc_mmap_check(addr, old_len)))
2368+ goto out;
2369+ if (unlikely(sparc_mmap_check(new_addr, new_len)))
2370+ goto out;
2371+ down_write(&current->mm->mmap_sem);
2372+ ret = do_mremap(addr, old_len, new_len, flags, new_addr);
2373+ up_write(&current->mm->mmap_sem);
2374+out:
2375+ return ret;
2376+}
2377+
2378 /* we come to here via sys_nis_syscall so it can setup the regs argument */
2379 asmlinkage unsigned long
2380 c_sys_nis_syscall (struct pt_regs *regs)
2381diff --git a/arch/sparc/kernel/sys_sparc_64.c b/arch/sparc/kernel/sys_sparc_64.c
2382index cfa0e19..e2d1024 100644
2383--- a/arch/sparc/kernel/sys_sparc_64.c
2384+++ b/arch/sparc/kernel/sys_sparc_64.c
2385@@ -317,14 +317,10 @@ bottomup:
2386 unsigned long get_fb_unmapped_area(struct file *filp, unsigned long orig_addr, unsigned long len, unsigned long pgoff, unsigned long flags)
2387 {
2388 unsigned long align_goal, addr = -ENOMEM;
2389- unsigned long (*get_area)(struct file *, unsigned long,
2390- unsigned long, unsigned long, unsigned long);
2391-
2392- get_area = current->mm->get_unmapped_area;
2393
2394 if (flags & MAP_FIXED) {
2395 /* Ok, don't mess with it. */
2396- return get_area(NULL, orig_addr, len, pgoff, flags);
2397+ return get_unmapped_area(NULL, orig_addr, len, pgoff, flags);
2398 }
2399 flags &= ~MAP_SHARED;
2400
2401@@ -337,7 +333,7 @@ unsigned long get_fb_unmapped_area(struct file *filp, unsigned long orig_addr, u
2402 align_goal = (64UL * 1024);
2403
2404 do {
2405- addr = get_area(NULL, orig_addr, len + (align_goal - PAGE_SIZE), pgoff, flags);
2406+ addr = get_unmapped_area(NULL, orig_addr, len + (align_goal - PAGE_SIZE), pgoff, flags);
2407 if (!(addr & ~PAGE_MASK)) {
2408 addr = (addr + (align_goal - 1UL)) & ~(align_goal - 1UL);
2409 break;
2410@@ -355,7 +351,7 @@ unsigned long get_fb_unmapped_area(struct file *filp, unsigned long orig_addr, u
2411 * be obtained.
2412 */
2413 if (addr & ~PAGE_MASK)
2414- addr = get_area(NULL, orig_addr, len, pgoff, flags);
2415+ addr = get_unmapped_area(NULL, orig_addr, len, pgoff, flags);
2416
2417 return addr;
2418 }
2419@@ -403,6 +399,18 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
2420 }
2421 }
2422
2423+SYSCALL_DEFINE1(sparc_brk, unsigned long, brk)
2424+{
2425+ /* People could try to be nasty and use ta 0x6d in 32bit programs */
2426+ if (test_thread_flag(TIF_32BIT) && brk >= STACK_TOP32)
2427+ return current->mm->brk;
2428+
2429+ if (unlikely(straddles_64bit_va_hole(current->mm->brk, brk)))
2430+ return current->mm->brk;
2431+
2432+ return sys_brk(brk);
2433+}
2434+
2435 /*
2436 * sys_pipe() is the normal C calling standard for creating
2437 * a pipe. It's not the way unix traditionally does this, though.
2438@@ -560,13 +568,23 @@ SYSCALL_DEFINE6(mmap, unsigned long, addr, unsigned long, len,
2439 unsigned long, prot, unsigned long, flags, unsigned long, fd,
2440 unsigned long, off)
2441 {
2442- unsigned long retval = -EINVAL;
2443+ struct file * file = NULL;
2444+ unsigned long retval = -EBADF;
2445
2446- if ((off + PAGE_ALIGN(len)) < off)
2447- goto out;
2448- if (off & ~PAGE_MASK)
2449- goto out;
2450- retval = sys_mmap_pgoff(addr, len, prot, flags, fd, off >> PAGE_SHIFT);
2451+ if (!(flags & MAP_ANONYMOUS)) {
2452+ file = fget(fd);
2453+ if (!file)
2454+ goto out;
2455+ }
2456+ flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
2457+ len = PAGE_ALIGN(len);
2458+
2459+ down_write(&current->mm->mmap_sem);
2460+ retval = do_mmap(file, addr, len, prot, flags, off);
2461+ up_write(&current->mm->mmap_sem);
2462+
2463+ if (file)
2464+ fput(file);
2465 out:
2466 return retval;
2467 }
2468@@ -596,6 +614,12 @@ SYSCALL_DEFINE5(64_mremap, unsigned long, addr, unsigned long, old_len,
2469
2470 if (test_thread_flag(TIF_32BIT))
2471 goto out;
2472+ if (unlikely(new_len >= VA_EXCLUDE_START))
2473+ goto out;
2474+ if (unlikely(sparc_mmap_check(addr, old_len)))
2475+ goto out;
2476+ if (unlikely(sparc_mmap_check(new_addr, new_len)))
2477+ goto out;
2478
2479 down_write(&current->mm->mmap_sem);
2480 ret = do_mremap(addr, old_len, new_len, flags, new_addr);
2481diff --git a/arch/sparc/kernel/systbls.h b/arch/sparc/kernel/systbls.h
2482index d2f999a..a63c5d2 100644
2483--- a/arch/sparc/kernel/systbls.h
2484+++ b/arch/sparc/kernel/systbls.h
2485@@ -9,6 +9,7 @@
2486 struct new_utsname;
2487
2488 extern asmlinkage unsigned long sys_getpagesize(void);
2489+extern asmlinkage unsigned long sparc_brk(unsigned long brk);
2490 extern asmlinkage long sparc_pipe(struct pt_regs *regs);
2491 extern asmlinkage long sys_ipc(unsigned int call, int first,
2492 unsigned long second,
2493diff --git a/arch/sparc/kernel/systbls_32.S b/arch/sparc/kernel/systbls_32.S
2494index 14f950a..0f1658d 100644
2495--- a/arch/sparc/kernel/systbls_32.S
2496+++ b/arch/sparc/kernel/systbls_32.S
2497@@ -19,7 +19,7 @@ sys_call_table:
2498 /*0*/ .long sys_restart_syscall, sys_exit, sys_fork, sys_read, sys_write
2499 /*5*/ .long sys_open, sys_close, sys_wait4, sys_creat, sys_link
2500 /*10*/ .long sys_unlink, sunos_execv, sys_chdir, sys_chown16, sys_mknod
2501-/*15*/ .long sys_chmod, sys_lchown16, sys_brk, sys_nis_syscall, sys_lseek
2502+/*15*/ .long sys_chmod, sys_lchown16, sparc_brk, sys_nis_syscall, sys_lseek
2503 /*20*/ .long sys_getpid, sys_capget, sys_capset, sys_setuid16, sys_getuid16
2504 /*25*/ .long sys_vmsplice, sys_ptrace, sys_alarm, sys_sigaltstack, sys_pause
2505 /*30*/ .long sys_utime, sys_lchown, sys_fchown, sys_access, sys_nice
2506@@ -67,7 +67,7 @@ sys_call_table:
2507 /*235*/ .long sys_fstatfs64, sys_llseek, sys_mlock, sys_munlock, sys_mlockall
2508 /*240*/ .long sys_munlockall, sys_sched_setparam, sys_sched_getparam, sys_sched_setscheduler, sys_sched_getscheduler
2509 /*245*/ .long sys_sched_yield, sys_sched_get_priority_max, sys_sched_get_priority_min, sys_sched_rr_get_interval, sys_nanosleep
2510-/*250*/ .long sys_mremap, sys_sysctl, sys_getsid, sys_fdatasync, sys_nfsservctl
2511+/*250*/ .long sparc_mremap, sys_sysctl, sys_getsid, sys_fdatasync, sys_nfsservctl
2512 /*255*/ .long sys_sync_file_range, sys_clock_settime, sys_clock_gettime, sys_clock_getres, sys_clock_nanosleep
2513 /*260*/ .long sys_sched_getaffinity, sys_sched_setaffinity, sys_timer_settime, sys_timer_gettime, sys_timer_getoverrun
2514 /*265*/ .long sys_timer_delete, sys_timer_create, sys_nis_syscall, sys_io_setup, sys_io_destroy
2515diff --git a/arch/sparc/kernel/systbls_64.S b/arch/sparc/kernel/systbls_64.S
2516index f63c871..009825f 100644
2517--- a/arch/sparc/kernel/systbls_64.S
2518+++ b/arch/sparc/kernel/systbls_64.S
2519@@ -21,7 +21,7 @@ sys_call_table32:
2520 /*0*/ .word sys_restart_syscall, sys32_exit, sys_fork, sys_read, sys_write
2521 /*5*/ .word sys32_open, sys_close, sys32_wait4, sys32_creat, sys_link
2522 /*10*/ .word sys_unlink, sunos_execv, sys_chdir, sys_chown16, sys32_mknod
2523-/*15*/ .word sys_chmod, sys_lchown16, sys_brk, sys32_perfctr, sys32_lseek
2524+/*15*/ .word sys_chmod, sys_lchown16, sys_sparc_brk, sys32_perfctr, sys32_lseek
2525 /*20*/ .word sys_getpid, sys_capget, sys_capset, sys_setuid16, sys_getuid16
2526 /*25*/ .word sys32_vmsplice, compat_sys_ptrace, sys_alarm, sys32_sigaltstack, sys_pause
2527 /*30*/ .word compat_sys_utime, sys_lchown, sys_fchown, sys32_access, sys32_nice
2528@@ -96,7 +96,7 @@ sys_call_table:
2529 /*0*/ .word sys_restart_syscall, sparc_exit, sys_fork, sys_read, sys_write
2530 /*5*/ .word sys_open, sys_close, sys_wait4, sys_creat, sys_link
2531 /*10*/ .word sys_unlink, sys_nis_syscall, sys_chdir, sys_chown, sys_mknod
2532-/*15*/ .word sys_chmod, sys_lchown, sys_brk, sys_perfctr, sys_lseek
2533+/*15*/ .word sys_chmod, sys_lchown, sys_sparc_brk, sys_perfctr, sys_lseek
2534 /*20*/ .word sys_getpid, sys_capget, sys_capset, sys_setuid, sys_getuid
2535 /*25*/ .word sys_vmsplice, sys_ptrace, sys_alarm, sys_sigaltstack, sys_nis_syscall
2536 /*30*/ .word sys_utime, sys_nis_syscall, sys_nis_syscall, sys_access, sys_nice
2537diff --git a/arch/sparc/lib/mcount.S b/arch/sparc/lib/mcount.S
2538index 24b8b12..7ce9c65 100644
2539--- a/arch/sparc/lib/mcount.S
2540+++ b/arch/sparc/lib/mcount.S
2541@@ -64,9 +64,8 @@ mcount:
2542 2: sethi %hi(softirq_stack), %g3
2543 or %g3, %lo(softirq_stack), %g3
2544 ldx [%g3 + %g1], %g7
2545- sub %g7, STACK_BIAS, %g7
2546 cmp %sp, %g7
2547- bleu,pt %xcc, 3f
2548+ bleu,pt %xcc, 2f
2549 sethi %hi(THREAD_SIZE), %g3
2550 add %g7, %g3, %g7
2551 cmp %sp, %g7
2552@@ -76,7 +75,7 @@ mcount:
2553 * again, we are already trying to output the stack overflow
2554 * message.
2555 */
2556-3: sethi %hi(ovstack), %g7 ! cant move to panic stack fast enough
2557+ sethi %hi(ovstack), %g7 ! cant move to panic stack fast enough
2558 or %g7, %lo(ovstack), %g7
2559 add %g7, OVSTACKSIZE, %g3
2560 sub %g3, STACK_BIAS + 192, %g3
2561diff --git a/arch/um/kernel/syscall.c b/arch/um/kernel/syscall.c
2562index cccab85..a4625c7 100644
2563--- a/arch/um/kernel/syscall.c
2564+++ b/arch/um/kernel/syscall.c
2565@@ -8,7 +8,6 @@
2566 #include "linux/mm.h"
2567 #include "linux/sched.h"
2568 #include "linux/utsname.h"
2569-#include "linux/syscalls.h"
2570 #include "asm/current.h"
2571 #include "asm/mman.h"
2572 #include "asm/uaccess.h"
2573@@ -38,6 +37,31 @@ long sys_vfork(void)
2574 return ret;
2575 }
2576
2577+/* common code for old and new mmaps */
2578+long sys_mmap2(unsigned long addr, unsigned long len,
2579+ unsigned long prot, unsigned long flags,
2580+ unsigned long fd, unsigned long pgoff)
2581+{
2582+ long error = -EBADF;
2583+ struct file * file = NULL;
2584+
2585+ flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
2586+ if (!(flags & MAP_ANONYMOUS)) {
2587+ file = fget(fd);
2588+ if (!file)
2589+ goto out;
2590+ }
2591+
2592+ down_write(&current->mm->mmap_sem);
2593+ error = do_mmap_pgoff(file, addr, len, prot, flags, pgoff);
2594+ up_write(&current->mm->mmap_sem);
2595+
2596+ if (file)
2597+ fput(file);
2598+ out:
2599+ return error;
2600+}
2601+
2602 long old_mmap(unsigned long addr, unsigned long len,
2603 unsigned long prot, unsigned long flags,
2604 unsigned long fd, unsigned long offset)
2605@@ -46,7 +70,7 @@ long old_mmap(unsigned long addr, unsigned long len,
2606 if (offset & ~PAGE_MASK)
2607 goto out;
2608
2609- err = sys_mmap_pgoff(addr, len, prot, flags, fd, offset >> PAGE_SHIFT);
2610+ err = sys_mmap2(addr, len, prot, flags, fd, offset >> PAGE_SHIFT);
2611 out:
2612 return err;
2613 }
2614diff --git a/arch/um/sys-i386/shared/sysdep/syscalls.h b/arch/um/sys-i386/shared/sysdep/syscalls.h
2615index e778767..9056981 100644
2616--- a/arch/um/sys-i386/shared/sysdep/syscalls.h
2617+++ b/arch/um/sys-i386/shared/sysdep/syscalls.h
2618@@ -20,3 +20,7 @@ extern syscall_handler_t *sys_call_table[];
2619 #define EXECUTE_SYSCALL(syscall, regs) \
2620 ((long (*)(struct syscall_args)) \
2621 (*sys_call_table[syscall]))(SYSCALL_ARGS(&regs->regs))
2622+
2623+extern long sys_mmap2(unsigned long addr, unsigned long len,
2624+ unsigned long prot, unsigned long flags,
2625+ unsigned long fd, unsigned long pgoff);
2626diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig 97diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
2627index 4fdb669..e2cd95e 100644 98index 72ace95..e2cd95e 100644
2628--- a/arch/x86/Kconfig 99--- a/arch/x86/Kconfig
2629+++ b/arch/x86/Kconfig 100+++ b/arch/x86/Kconfig
2630@@ -984,6 +984,12 @@ config X86_CPUID 101@@ -2092,3 +2092,5 @@ source "crypto/Kconfig"
2631 with major 203 and minors 0 to 31 for /dev/cpu/0/cpuid to
2632 /dev/cpu/31/cpuid.
2633
2634+config X86_CPU_DEBUG
2635+ tristate "/sys/kernel/debug/x86/cpu/* - CPU Debug support"
2636+ ---help---
2637+ If you select this option, this will provide various x86 CPUs
2638+ information through debugfs.
2639+
2640 choice
2641 prompt "High Memory Support"
2642 default HIGHMEM4G if !X86_NUMAQ
2643@@ -2086,3 +2092,5 @@ source "crypto/Kconfig"
2644 source "arch/x86/kvm/Kconfig" 102 source "arch/x86/kvm/Kconfig"
2645 103
2646 source "lib/Kconfig" 104 source "lib/Kconfig"
2647+ 105+
2648+source "litmus/Kconfig" 106+source "litmus/Kconfig"
2649diff --git a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu
2650index f2824fb..2649840 100644
2651--- a/arch/x86/Kconfig.cpu
2652+++ b/arch/x86/Kconfig.cpu
2653@@ -400,7 +400,7 @@ config X86_TSC
2654
2655 config X86_CMPXCHG64
2656 def_bool y
2657- depends on X86_PAE || X86_64 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MATOM
2658+ depends on !M386 && !M486
2659
2660 # this should be set for all -march=.. options where the compiler
2661 # generates cmov.
2662diff --git a/arch/x86/Makefile_32.cpu b/arch/x86/Makefile_32.cpu
2663index 1937226..30e9a26 100644
2664--- a/arch/x86/Makefile_32.cpu
2665+++ b/arch/x86/Makefile_32.cpu
2666@@ -46,13 +46,6 @@ cflags-$(CONFIG_MGEODEGX1) += -march=pentium-mmx
2667 # cpu entries
2668 cflags-$(CONFIG_X86_GENERIC) += $(call tune,generic,$(call tune,i686))
2669
2670-# Work around the pentium-mmx code generator madness of gcc4.4.x which
2671-# does stack alignment by generating horrible code _before_ the mcount
2672-# prologue (push %ebp, mov %esp, %ebp) which breaks the function graph
2673-# tracer assumptions. For i686, generic, core2 this is set by the
2674-# compiler anyway
2675-cflags-$(CONFIG_FUNCTION_GRAPH_TRACER) += $(call cc-option,-maccumulate-outgoing-args)
2676-
2677 # Bug fix for binutils: this option is required in order to keep
2678 # binutils from generating NOPL instructions against our will.
2679 ifneq ($(CONFIG_X86_P6_NOP),y)
2680diff --git a/arch/x86/ia32/ia32_aout.c b/arch/x86/ia32/ia32_aout.c
2681index f9f4724..2a4d073 100644
2682--- a/arch/x86/ia32/ia32_aout.c
2683+++ b/arch/x86/ia32/ia32_aout.c
2684@@ -308,16 +308,15 @@ static int load_aout_binary(struct linux_binprm *bprm, struct pt_regs *regs)
2685 if (retval)
2686 return retval;
2687
2688- /* OK, This is the point of no return */
2689- set_personality(PER_LINUX);
2690- set_thread_flag(TIF_IA32);
2691-
2692- setup_new_exec(bprm);
2693-
2694 regs->cs = __USER32_CS;
2695 regs->r8 = regs->r9 = regs->r10 = regs->r11 = regs->r12 =
2696 regs->r13 = regs->r14 = regs->r15 = 0;
2697
2698+ /* OK, This is the point of no return */
2699+ set_personality(PER_LINUX);
2700+ set_thread_flag(TIF_IA32);
2701+ clear_thread_flag(TIF_ABI_PENDING);
2702+
2703 current->mm->end_code = ex.a_text +
2704 (current->mm->start_code = N_TXTADDR(ex));
2705 current->mm->end_data = ex.a_data +
2706diff --git a/arch/x86/ia32/ia32entry.S b/arch/x86/ia32/ia32entry.S
2707index 5294d84..581b056 100644
2708--- a/arch/x86/ia32/ia32entry.S
2709+++ b/arch/x86/ia32/ia32entry.S
2710@@ -696,7 +696,7 @@ ia32_sys_call_table:
2711 .quad quiet_ni_syscall /* streams2 */
2712 .quad stub32_vfork /* 190 */
2713 .quad compat_sys_getrlimit
2714- .quad sys_mmap_pgoff
2715+ .quad sys32_mmap2
2716 .quad sys32_truncate64
2717 .quad sys32_ftruncate64
2718 .quad sys32_stat64 /* 195 */
2719diff --git a/arch/x86/ia32/sys_ia32.c b/arch/x86/ia32/sys_ia32.c
2720index 016218c..9f55271 100644
2721--- a/arch/x86/ia32/sys_ia32.c
2722+++ b/arch/x86/ia32/sys_ia32.c
2723@@ -155,6 +155,9 @@ struct mmap_arg_struct {
2724 asmlinkage long sys32_mmap(struct mmap_arg_struct __user *arg)
2725 {
2726 struct mmap_arg_struct a;
2727+ struct file *file = NULL;
2728+ unsigned long retval;
2729+ struct mm_struct *mm ;
2730
2731 if (copy_from_user(&a, arg, sizeof(a)))
2732 return -EFAULT;
2733@@ -162,8 +165,22 @@ asmlinkage long sys32_mmap(struct mmap_arg_struct __user *arg)
2734 if (a.offset & ~PAGE_MASK)
2735 return -EINVAL;
2736
2737- return sys_mmap_pgoff(a.addr, a.len, a.prot, a.flags, a.fd,
2738+ if (!(a.flags & MAP_ANONYMOUS)) {
2739+ file = fget(a.fd);
2740+ if (!file)
2741+ return -EBADF;
2742+ }
2743+
2744+ mm = current->mm;
2745+ down_write(&mm->mmap_sem);
2746+ retval = do_mmap_pgoff(file, a.addr, a.len, a.prot, a.flags,
2747 a.offset>>PAGE_SHIFT);
2748+ if (file)
2749+ fput(file);
2750+
2751+ up_write(&mm->mmap_sem);
2752+
2753+ return retval;
2754 }
2755
2756 asmlinkage long sys32_mprotect(unsigned long start, size_t len,
2757@@ -522,6 +539,30 @@ asmlinkage long sys32_sendfile(int out_fd, int in_fd,
2758 return ret;
2759 }
2760
2761+asmlinkage long sys32_mmap2(unsigned long addr, unsigned long len,
2762+ unsigned long prot, unsigned long flags,
2763+ unsigned long fd, unsigned long pgoff)
2764+{
2765+ struct mm_struct *mm = current->mm;
2766+ unsigned long error;
2767+ struct file *file = NULL;
2768+
2769+ flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
2770+ if (!(flags & MAP_ANONYMOUS)) {
2771+ file = fget(fd);
2772+ if (!file)
2773+ return -EBADF;
2774+ }
2775+
2776+ down_write(&mm->mmap_sem);
2777+ error = do_mmap_pgoff(file, addr, len, prot, flags, pgoff);
2778+ up_write(&mm->mmap_sem);
2779+
2780+ if (file)
2781+ fput(file);
2782+ return error;
2783+}
2784+
2785 asmlinkage long sys32_olduname(struct oldold_utsname __user *name)
2786 {
2787 char *arch = "x86_64";
2788diff --git a/arch/x86/include/asm/amd_iommu.h b/arch/x86/include/asm/amd_iommu.h
2789index 18aa3f8..4b18089 100644
2790--- a/arch/x86/include/asm/amd_iommu.h
2791+++ b/arch/x86/include/asm/amd_iommu.h
2792@@ -32,7 +32,6 @@ extern void amd_iommu_flush_all_domains(void);
2793 extern void amd_iommu_flush_all_devices(void);
2794 extern void amd_iommu_shutdown(void);
2795 extern void amd_iommu_apply_erratum_63(u16 devid);
2796-extern void amd_iommu_init_api(void);
2797 #else
2798 static inline int amd_iommu_init(void) { return -ENODEV; }
2799 static inline void amd_iommu_detect(void) { }
2800diff --git a/arch/x86/include/asm/cpu_debug.h b/arch/x86/include/asm/cpu_debug.h
2801new file mode 100644
2802index 0000000..d96c1ee
2803--- /dev/null
2804+++ b/arch/x86/include/asm/cpu_debug.h
2805@@ -0,0 +1,127 @@
2806+#ifndef _ASM_X86_CPU_DEBUG_H
2807+#define _ASM_X86_CPU_DEBUG_H
2808+
2809+/*
2810+ * CPU x86 architecture debug
2811+ *
2812+ * Copyright(C) 2009 Jaswinder Singh Rajput
2813+ */
2814+
2815+/* Register flags */
2816+enum cpu_debug_bit {
2817+/* Model Specific Registers (MSRs) */
2818+ CPU_MC_BIT, /* Machine Check */
2819+ CPU_MONITOR_BIT, /* Monitor */
2820+ CPU_TIME_BIT, /* Time */
2821+ CPU_PMC_BIT, /* Performance Monitor */
2822+ CPU_PLATFORM_BIT, /* Platform */
2823+ CPU_APIC_BIT, /* APIC */
2824+ CPU_POWERON_BIT, /* Power-on */
2825+ CPU_CONTROL_BIT, /* Control */
2826+ CPU_FEATURES_BIT, /* Features control */
2827+ CPU_LBRANCH_BIT, /* Last Branch */
2828+ CPU_BIOS_BIT, /* BIOS */
2829+ CPU_FREQ_BIT, /* Frequency */
2830+ CPU_MTTR_BIT, /* MTRR */
2831+ CPU_PERF_BIT, /* Performance */
2832+ CPU_CACHE_BIT, /* Cache */
2833+ CPU_SYSENTER_BIT, /* Sysenter */
2834+ CPU_THERM_BIT, /* Thermal */
2835+ CPU_MISC_BIT, /* Miscellaneous */
2836+ CPU_DEBUG_BIT, /* Debug */
2837+ CPU_PAT_BIT, /* PAT */
2838+ CPU_VMX_BIT, /* VMX */
2839+ CPU_CALL_BIT, /* System Call */
2840+ CPU_BASE_BIT, /* BASE Address */
2841+ CPU_VER_BIT, /* Version ID */
2842+ CPU_CONF_BIT, /* Configuration */
2843+ CPU_SMM_BIT, /* System mgmt mode */
2844+ CPU_SVM_BIT, /*Secure Virtual Machine*/
2845+ CPU_OSVM_BIT, /* OS-Visible Workaround*/
2846+/* Standard Registers */
2847+ CPU_TSS_BIT, /* Task Stack Segment */
2848+ CPU_CR_BIT, /* Control Registers */
2849+ CPU_DT_BIT, /* Descriptor Table */
2850+/* End of Registers flags */
2851+ CPU_REG_ALL_BIT, /* Select all Registers */
2852+};
2853+
2854+#define CPU_REG_ALL (~0) /* Select all Registers */
2855+
2856+#define CPU_MC (1 << CPU_MC_BIT)
2857+#define CPU_MONITOR (1 << CPU_MONITOR_BIT)
2858+#define CPU_TIME (1 << CPU_TIME_BIT)
2859+#define CPU_PMC (1 << CPU_PMC_BIT)
2860+#define CPU_PLATFORM (1 << CPU_PLATFORM_BIT)
2861+#define CPU_APIC (1 << CPU_APIC_BIT)
2862+#define CPU_POWERON (1 << CPU_POWERON_BIT)
2863+#define CPU_CONTROL (1 << CPU_CONTROL_BIT)
2864+#define CPU_FEATURES (1 << CPU_FEATURES_BIT)
2865+#define CPU_LBRANCH (1 << CPU_LBRANCH_BIT)
2866+#define CPU_BIOS (1 << CPU_BIOS_BIT)
2867+#define CPU_FREQ (1 << CPU_FREQ_BIT)
2868+#define CPU_MTRR (1 << CPU_MTTR_BIT)
2869+#define CPU_PERF (1 << CPU_PERF_BIT)
2870+#define CPU_CACHE (1 << CPU_CACHE_BIT)
2871+#define CPU_SYSENTER (1 << CPU_SYSENTER_BIT)
2872+#define CPU_THERM (1 << CPU_THERM_BIT)
2873+#define CPU_MISC (1 << CPU_MISC_BIT)
2874+#define CPU_DEBUG (1 << CPU_DEBUG_BIT)
2875+#define CPU_PAT (1 << CPU_PAT_BIT)
2876+#define CPU_VMX (1 << CPU_VMX_BIT)
2877+#define CPU_CALL (1 << CPU_CALL_BIT)
2878+#define CPU_BASE (1 << CPU_BASE_BIT)
2879+#define CPU_VER (1 << CPU_VER_BIT)
2880+#define CPU_CONF (1 << CPU_CONF_BIT)
2881+#define CPU_SMM (1 << CPU_SMM_BIT)
2882+#define CPU_SVM (1 << CPU_SVM_BIT)
2883+#define CPU_OSVM (1 << CPU_OSVM_BIT)
2884+#define CPU_TSS (1 << CPU_TSS_BIT)
2885+#define CPU_CR (1 << CPU_CR_BIT)
2886+#define CPU_DT (1 << CPU_DT_BIT)
2887+
2888+/* Register file flags */
2889+enum cpu_file_bit {
2890+ CPU_INDEX_BIT, /* index */
2891+ CPU_VALUE_BIT, /* value */
2892+};
2893+
2894+#define CPU_FILE_VALUE (1 << CPU_VALUE_BIT)
2895+
2896+#define MAX_CPU_FILES 512
2897+
2898+struct cpu_private {
2899+ unsigned cpu;
2900+ unsigned type;
2901+ unsigned reg;
2902+ unsigned file;
2903+};
2904+
2905+struct cpu_debug_base {
2906+ char *name; /* Register name */
2907+ unsigned flag; /* Register flag */
2908+ unsigned write; /* Register write flag */
2909+};
2910+
2911+/*
2912+ * Currently it looks similar to cpu_debug_base but once we add more files
2913+ * cpu_file_base will go in different direction
2914+ */
2915+struct cpu_file_base {
2916+ char *name; /* Register file name */
2917+ unsigned flag; /* Register file flag */
2918+ unsigned write; /* Register write flag */
2919+};
2920+
2921+struct cpu_cpuX_base {
2922+ struct dentry *dentry; /* Register dentry */
2923+ int init; /* Register index file */
2924+};
2925+
2926+struct cpu_debug_range {
2927+ unsigned min; /* Register range min */
2928+ unsigned max; /* Register range max */
2929+ unsigned flag; /* Supported flags */
2930+};
2931+
2932+#endif /* _ASM_X86_CPU_DEBUG_H */
2933diff --git a/arch/x86/include/asm/elf.h b/arch/x86/include/asm/elf.h
2934index 8ac9d9a..456a304 100644
2935--- a/arch/x86/include/asm/elf.h
2936+++ b/arch/x86/include/asm/elf.h
2937@@ -197,8 +197,14 @@ do { \
2938 set_fs(USER_DS); \
2939 } while (0)
2940
2941-void set_personality_ia32(void);
2942-#define COMPAT_SET_PERSONALITY(ex) set_personality_ia32()
2943+#define COMPAT_SET_PERSONALITY(ex) \
2944+do { \
2945+ if (test_thread_flag(TIF_IA32)) \
2946+ clear_thread_flag(TIF_ABI_PENDING); \
2947+ else \
2948+ set_thread_flag(TIF_ABI_PENDING); \
2949+ current->personality |= force_personality32; \
2950+} while (0)
2951
2952 #define COMPAT_ELF_PLATFORM ("i686")
2953
2954diff --git a/arch/x86/include/asm/entry_arch.h b/arch/x86/include/asm/entry_arch.h 107diff --git a/arch/x86/include/asm/entry_arch.h b/arch/x86/include/asm/entry_arch.h
2955index f5693c8..19e22e3 100644 108index f5693c8..19e22e3 100644
2956--- a/arch/x86/include/asm/entry_arch.h 109--- a/arch/x86/include/asm/entry_arch.h
@@ -3147,18 +300,6 @@ index 0000000..1cffa4e
3147+ : : "r" (p), "r" (p2), "r" (p3) : CLOBBER_LIST) 300+ : : "r" (p), "r" (p2), "r" (p3) : CLOBBER_LIST)
3148+ 301+
3149+#define __ARCH_HAS_FEATHER_TRACE 302+#define __ARCH_HAS_FEATHER_TRACE
3150diff --git a/arch/x86/include/asm/hpet.h b/arch/x86/include/asm/hpet.h
3151index 3251e23..1c22cb0 100644
3152--- a/arch/x86/include/asm/hpet.h
3153+++ b/arch/x86/include/asm/hpet.h
3154@@ -66,7 +66,6 @@
3155 extern unsigned long hpet_address;
3156 extern unsigned long force_hpet_address;
3157 extern int hpet_force_user;
3158-extern u8 hpet_msi_disable;
3159 extern int is_hpet_enabled(void);
3160 extern int hpet_enable(void);
3161 extern void hpet_disable(void);
3162diff --git a/arch/x86/include/asm/hw_irq.h b/arch/x86/include/asm/hw_irq.h 303diff --git a/arch/x86/include/asm/hw_irq.h b/arch/x86/include/asm/hw_irq.h
3163index ba180d9..68900e7 100644 304index ba180d9..68900e7 100644
3164--- a/arch/x86/include/asm/hw_irq.h 305--- a/arch/x86/include/asm/hw_irq.h
@@ -3181,7 +322,7 @@ index ba180d9..68900e7 100644
3181 extern void smp_invalidate_interrupt(struct pt_regs *); 322 extern void smp_invalidate_interrupt(struct pt_regs *);
3182 #else 323 #else
3183diff --git a/arch/x86/include/asm/irq_vectors.h b/arch/x86/include/asm/irq_vectors.h 324diff --git a/arch/x86/include/asm/irq_vectors.h b/arch/x86/include/asm/irq_vectors.h
3184index 6e90a04..28c3bf3 100644 325index 5b21f0e..28c3bf3 100644
3185--- a/arch/x86/include/asm/irq_vectors.h 326--- a/arch/x86/include/asm/irq_vectors.h
3186+++ b/arch/x86/include/asm/irq_vectors.h 327+++ b/arch/x86/include/asm/irq_vectors.h
3187@@ -104,6 +104,11 @@ 328@@ -104,6 +104,11 @@
@@ -3196,156 +337,19 @@ index 6e90a04..28c3bf3 100644
3196 * Generic system vector for platform specific use 337 * Generic system vector for platform specific use
3197 */ 338 */
3198 #define GENERIC_INTERRUPT_VECTOR 0xed 339 #define GENERIC_INTERRUPT_VECTOR 0xed
3199@@ -113,7 +118,7 @@
3200 */
3201 #define LOCAL_PENDING_VECTOR 0xec
3202
3203-#define UV_BAU_MESSAGE 0xea
3204+#define UV_BAU_MESSAGE 0xec
3205
3206 /*
3207 * Self IPI vector for machine checks
3208diff --git a/arch/x86/include/asm/kvm_emulate.h b/arch/x86/include/asm/kvm_emulate.h
3209index 7c18e12..b7ed2c4 100644
3210--- a/arch/x86/include/asm/kvm_emulate.h
3211+++ b/arch/x86/include/asm/kvm_emulate.h
3212@@ -129,7 +129,7 @@ struct decode_cache {
3213 u8 seg_override;
3214 unsigned int d;
3215 unsigned long regs[NR_VCPU_REGS];
3216- unsigned long eip, eip_orig;
3217+ unsigned long eip;
3218 /* modrm */
3219 u8 modrm;
3220 u8 modrm_mod;
3221diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
3222index d759a1f..d838922 100644
3223--- a/arch/x86/include/asm/kvm_host.h
3224+++ b/arch/x86/include/asm/kvm_host.h
3225@@ -412,7 +412,6 @@ struct kvm_arch{
3226 unsigned long irq_sources_bitmap;
3227 unsigned long irq_states[KVM_IOAPIC_NUM_PINS];
3228 u64 vm_init_tsc;
3229- s64 kvmclock_offset;
3230 };
3231
3232 struct kvm_vm_stat {
3233diff --git a/arch/x86/include/asm/mce.h b/arch/x86/include/asm/mce.h
3234index a479023..f1363b7 100644
3235--- a/arch/x86/include/asm/mce.h
3236+++ b/arch/x86/include/asm/mce.h
3237@@ -214,11 +214,5 @@ void intel_init_thermal(struct cpuinfo_x86 *c);
3238
3239 void mce_log_therm_throt_event(__u64 status);
3240
3241-#ifdef CONFIG_X86_THERMAL_VECTOR
3242-extern void mcheck_intel_therm_init(void);
3243-#else
3244-static inline void mcheck_intel_therm_init(void) { }
3245-#endif
3246-
3247 #endif /* __KERNEL__ */
3248 #endif /* _ASM_X86_MCE_H */
3249diff --git a/arch/x86/include/asm/msr.h b/arch/x86/include/asm/msr.h
3250index 0e3e728..7e2b6ba 100644
3251--- a/arch/x86/include/asm/msr.h
3252+++ b/arch/x86/include/asm/msr.h
3253@@ -27,18 +27,6 @@ struct msr {
3254 };
3255 };
3256
3257-struct msr_info {
3258- u32 msr_no;
3259- struct msr reg;
3260- struct msr *msrs;
3261- int err;
3262-};
3263-
3264-struct msr_regs_info {
3265- u32 *regs;
3266- int err;
3267-};
3268-
3269 static inline unsigned long long native_read_tscp(unsigned int *aux)
3270 {
3271 unsigned long low, high;
3272@@ -256,14 +244,11 @@ do { \
3273
3274 #define write_rdtscp_aux(val) wrmsr(0xc0000103, (val), 0)
3275
3276-struct msr *msrs_alloc(void);
3277-void msrs_free(struct msr *msrs);
3278-
3279 #ifdef CONFIG_SMP
3280 int rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h);
3281 int wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h);
3282-void rdmsr_on_cpus(const struct cpumask *mask, u32 msr_no, struct msr *msrs);
3283-void wrmsr_on_cpus(const struct cpumask *mask, u32 msr_no, struct msr *msrs);
3284+void rdmsr_on_cpus(const cpumask_t *mask, u32 msr_no, struct msr *msrs);
3285+void wrmsr_on_cpus(const cpumask_t *mask, u32 msr_no, struct msr *msrs);
3286 int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h);
3287 int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h);
3288 int rdmsr_safe_regs_on_cpu(unsigned int cpu, u32 regs[8]);
3289diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h 340diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
3290index 13b1885..c978648 100644 341index c978648..e75daac 100644
3291--- a/arch/x86/include/asm/processor.h 342--- a/arch/x86/include/asm/processor.h
3292+++ b/arch/x86/include/asm/processor.h 343+++ b/arch/x86/include/asm/processor.h
3293@@ -180,7 +180,7 @@ static inline void native_cpuid(unsigned int *eax, unsigned int *ebx, 344@@ -172,6 +172,8 @@ extern void print_cpu_info(struct cpuinfo_x86 *);
3294 unsigned int *ecx, unsigned int *edx) 345 extern void init_scattered_cpuid_features(struct cpuinfo_x86 *c);
3295 { 346 extern unsigned int init_intel_cacheinfo(struct cpuinfo_x86 *c);
3296 /* ecx is often an input as well as an output. */ 347 extern unsigned short num_cache_leaves;
3297- asm volatile("cpuid" 348+extern int get_shared_cpu_map(cpumask_var_t mask,
3298+ asm("cpuid" 349+ unsigned int cpu, int index);
3299 : "=a" (*eax), 350
3300 "=b" (*ebx), 351 extern void detect_extended_topology(struct cpuinfo_x86 *c);
3301 "=c" (*ecx), 352 extern void detect_ht(struct cpuinfo_x86 *c);
3302diff --git a/arch/x86/include/asm/sys_ia32.h b/arch/x86/include/asm/sys_ia32.h
3303index 77c1184..72a6dcd 100644
3304--- a/arch/x86/include/asm/sys_ia32.h
3305+++ b/arch/x86/include/asm/sys_ia32.h
3306@@ -62,6 +62,9 @@ asmlinkage long sys32_pwrite(unsigned int, char __user *, u32, u32, u32);
3307 asmlinkage long sys32_personality(unsigned long);
3308 asmlinkage long sys32_sendfile(int, int, compat_off_t __user *, s32);
3309
3310+asmlinkage long sys32_mmap2(unsigned long, unsigned long, unsigned long,
3311+ unsigned long, unsigned long, unsigned long);
3312+
3313 struct oldold_utsname;
3314 struct old_utsname;
3315 asmlinkage long sys32_olduname(struct oldold_utsname __user *);
3316diff --git a/arch/x86/include/asm/syscalls.h b/arch/x86/include/asm/syscalls.h
3317index 1bb6e39..372b76e 100644
3318--- a/arch/x86/include/asm/syscalls.h
3319+++ b/arch/x86/include/asm/syscalls.h
3320@@ -55,6 +55,8 @@ struct sel_arg_struct;
3321 struct oldold_utsname;
3322 struct old_utsname;
3323
3324+asmlinkage long sys_mmap2(unsigned long, unsigned long, unsigned long,
3325+ unsigned long, unsigned long, unsigned long);
3326 asmlinkage int old_mmap(struct mmap_arg_struct __user *);
3327 asmlinkage int old_select(struct sel_arg_struct __user *);
3328 asmlinkage int sys_ipc(uint, int, int, int, void __user *, long);
3329diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h
3330index 19c3ce4..d27d0a2 100644
3331--- a/arch/x86/include/asm/thread_info.h
3332+++ b/arch/x86/include/asm/thread_info.h
3333@@ -86,6 +86,7 @@ struct thread_info {
3334 #define TIF_NOTSC 16 /* TSC is not accessible in userland */
3335 #define TIF_IA32 17 /* 32bit process */
3336 #define TIF_FORK 18 /* ret_from_fork */
3337+#define TIF_ABI_PENDING 19
3338 #define TIF_MEMDIE 20
3339 #define TIF_DEBUG 21 /* uses debug registers */
3340 #define TIF_IO_BITMAP 22 /* uses I/O bitmap */
3341@@ -109,6 +110,7 @@ struct thread_info {
3342 #define _TIF_NOTSC (1 << TIF_NOTSC)
3343 #define _TIF_IA32 (1 << TIF_IA32)
3344 #define _TIF_FORK (1 << TIF_FORK)
3345+#define _TIF_ABI_PENDING (1 << TIF_ABI_PENDING)
3346 #define _TIF_DEBUG (1 << TIF_DEBUG)
3347 #define _TIF_IO_BITMAP (1 << TIF_IO_BITMAP)
3348 #define _TIF_FREEZE (1 << TIF_FREEZE)
3349diff --git a/arch/x86/include/asm/unistd_32.h b/arch/x86/include/asm/unistd_32.h 353diff --git a/arch/x86/include/asm/unistd_32.h b/arch/x86/include/asm/unistd_32.h
3350index 6fb3c20..f9b507f 100644 354index 6fb3c20..f9b507f 100644
3351--- a/arch/x86/include/asm/unistd_32.h 355--- a/arch/x86/include/asm/unistd_32.h
@@ -3380,188 +384,6 @@ index 8d3ad0a..33b2003 100644
3380 #ifndef __NO_STUBS 384 #ifndef __NO_STUBS
3381 #define __ARCH_WANT_OLD_READDIR 385 #define __ARCH_WANT_OLD_READDIR
3382 #define __ARCH_WANT_OLD_STAT 386 #define __ARCH_WANT_OLD_STAT
3383diff --git a/arch/x86/include/asm/uv/uv_hub.h b/arch/x86/include/asm/uv/uv_hub.h
3384index e90a8a9..d1414af 100644
3385--- a/arch/x86/include/asm/uv/uv_hub.h
3386+++ b/arch/x86/include/asm/uv/uv_hub.h
3387@@ -31,20 +31,20 @@
3388 * contiguous (although various IO spaces may punch holes in
3389 * it)..
3390 *
3391- * N - Number of bits in the node portion of a socket physical
3392- * address.
3393+ * N - Number of bits in the node portion of a socket physical
3394+ * address.
3395 *
3396- * NASID - network ID of a router, Mbrick or Cbrick. Nasid values of
3397- * routers always have low bit of 1, C/MBricks have low bit
3398- * equal to 0. Most addressing macros that target UV hub chips
3399- * right shift the NASID by 1 to exclude the always-zero bit.
3400- * NASIDs contain up to 15 bits.
3401+ * NASID - network ID of a router, Mbrick or Cbrick. Nasid values of
3402+ * routers always have low bit of 1, C/MBricks have low bit
3403+ * equal to 0. Most addressing macros that target UV hub chips
3404+ * right shift the NASID by 1 to exclude the always-zero bit.
3405+ * NASIDs contain up to 15 bits.
3406 *
3407 * GNODE - NASID right shifted by 1 bit. Most mmrs contain gnodes instead
3408 * of nasids.
3409 *
3410- * PNODE - the low N bits of the GNODE. The PNODE is the most useful variant
3411- * of the nasid for socket usage.
3412+ * PNODE - the low N bits of the GNODE. The PNODE is the most useful variant
3413+ * of the nasid for socket usage.
3414 *
3415 *
3416 * NumaLink Global Physical Address Format:
3417@@ -71,12 +71,12 @@
3418 *
3419 *
3420 * APICID format
3421- * NOTE!!!!!! This is the current format of the APICID. However, code
3422- * should assume that this will change in the future. Use functions
3423- * in this file for all APICID bit manipulations and conversion.
3424+ * NOTE!!!!!! This is the current format of the APICID. However, code
3425+ * should assume that this will change in the future. Use functions
3426+ * in this file for all APICID bit manipulations and conversion.
3427 *
3428- * 1111110000000000
3429- * 5432109876543210
3430+ * 1111110000000000
3431+ * 5432109876543210
3432 * pppppppppplc0cch
3433 * sssssssssss
3434 *
3435@@ -89,9 +89,9 @@
3436 * Note: Processor only supports 12 bits in the APICID register. The ACPI
3437 * tables hold all 16 bits. Software needs to be aware of this.
3438 *
3439- * Unless otherwise specified, all references to APICID refer to
3440- * the FULL value contained in ACPI tables, not the subset in the
3441- * processor APICID register.
3442+ * Unless otherwise specified, all references to APICID refer to
3443+ * the FULL value contained in ACPI tables, not the subset in the
3444+ * processor APICID register.
3445 */
3446
3447
3448@@ -151,16 +151,16 @@ struct uv_hub_info_s {
3449 };
3450
3451 DECLARE_PER_CPU(struct uv_hub_info_s, __uv_hub_info);
3452-#define uv_hub_info (&__get_cpu_var(__uv_hub_info))
3453+#define uv_hub_info (&__get_cpu_var(__uv_hub_info))
3454 #define uv_cpu_hub_info(cpu) (&per_cpu(__uv_hub_info, cpu))
3455
3456 /*
3457 * Local & Global MMR space macros.
3458- * Note: macros are intended to be used ONLY by inline functions
3459- * in this file - not by other kernel code.
3460- * n - NASID (full 15-bit global nasid)
3461- * g - GNODE (full 15-bit global nasid, right shifted 1)
3462- * p - PNODE (local part of nsids, right shifted 1)
3463+ * Note: macros are intended to be used ONLY by inline functions
3464+ * in this file - not by other kernel code.
3465+ * n - NASID (full 15-bit global nasid)
3466+ * g - GNODE (full 15-bit global nasid, right shifted 1)
3467+ * p - PNODE (local part of nsids, right shifted 1)
3468 */
3469 #define UV_NASID_TO_PNODE(n) (((n) >> 1) & uv_hub_info->pnode_mask)
3470 #define UV_PNODE_TO_GNODE(p) ((p) |uv_hub_info->gnode_extra)
3471@@ -213,8 +213,8 @@ DECLARE_PER_CPU(struct uv_hub_info_s, __uv_hub_info);
3472 /*
3473 * Macros for converting between kernel virtual addresses, socket local physical
3474 * addresses, and UV global physical addresses.
3475- * Note: use the standard __pa() & __va() macros for converting
3476- * between socket virtual and socket physical addresses.
3477+ * Note: use the standard __pa() & __va() macros for converting
3478+ * between socket virtual and socket physical addresses.
3479 */
3480
3481 /* socket phys RAM --> UV global physical address */
3482@@ -265,18 +265,21 @@ static inline int uv_apicid_to_pnode(int apicid)
3483 * Access global MMRs using the low memory MMR32 space. This region supports
3484 * faster MMR access but not all MMRs are accessible in this space.
3485 */
3486-static inline unsigned long *uv_global_mmr32_address(int pnode, unsigned long offset)
3487+static inline unsigned long *uv_global_mmr32_address(int pnode,
3488+ unsigned long offset)
3489 {
3490 return __va(UV_GLOBAL_MMR32_BASE |
3491 UV_GLOBAL_MMR32_PNODE_BITS(pnode) | offset);
3492 }
3493
3494-static inline void uv_write_global_mmr32(int pnode, unsigned long offset, unsigned long val)
3495+static inline void uv_write_global_mmr32(int pnode, unsigned long offset,
3496+ unsigned long val)
3497 {
3498 writeq(val, uv_global_mmr32_address(pnode, offset));
3499 }
3500
3501-static inline unsigned long uv_read_global_mmr32(int pnode, unsigned long offset)
3502+static inline unsigned long uv_read_global_mmr32(int pnode,
3503+ unsigned long offset)
3504 {
3505 return readq(uv_global_mmr32_address(pnode, offset));
3506 }
3507@@ -285,32 +288,25 @@ static inline unsigned long uv_read_global_mmr32(int pnode, unsigned long offset
3508 * Access Global MMR space using the MMR space located at the top of physical
3509 * memory.
3510 */
3511-static inline unsigned long *uv_global_mmr64_address(int pnode, unsigned long offset)
3512+static inline unsigned long *uv_global_mmr64_address(int pnode,
3513+ unsigned long offset)
3514 {
3515 return __va(UV_GLOBAL_MMR64_BASE |
3516 UV_GLOBAL_MMR64_PNODE_BITS(pnode) | offset);
3517 }
3518
3519-static inline void uv_write_global_mmr64(int pnode, unsigned long offset, unsigned long val)
3520+static inline void uv_write_global_mmr64(int pnode, unsigned long offset,
3521+ unsigned long val)
3522 {
3523 writeq(val, uv_global_mmr64_address(pnode, offset));
3524 }
3525
3526-static inline unsigned long uv_read_global_mmr64(int pnode, unsigned long offset)
3527+static inline unsigned long uv_read_global_mmr64(int pnode,
3528+ unsigned long offset)
3529 {
3530 return readq(uv_global_mmr64_address(pnode, offset));
3531 }
3532
3533-static inline void uv_write_global_mmr8(int pnode, unsigned long offset, unsigned char val)
3534-{
3535- writeb(val, uv_global_mmr64_address(pnode, offset));
3536-}
3537-
3538-static inline unsigned char uv_read_global_mmr8(int pnode, unsigned long offset)
3539-{
3540- return readb(uv_global_mmr64_address(pnode, offset));
3541-}
3542-
3543 /*
3544 * Access hub local MMRs. Faster than using global space but only local MMRs
3545 * are accessible.
3546@@ -430,17 +426,11 @@ static inline void uv_set_scir_bits(unsigned char value)
3547 }
3548 }
3549
3550-static inline unsigned long uv_scir_offset(int apicid)
3551-{
3552- return SCIR_LOCAL_MMR_BASE | (apicid & 0x3f);
3553-}
3554-
3555 static inline void uv_set_cpu_scir_bits(int cpu, unsigned char value)
3556 {
3557 if (uv_cpu_hub_info(cpu)->scir.state != value) {
3558- uv_write_global_mmr8(uv_cpu_to_pnode(cpu),
3559- uv_cpu_hub_info(cpu)->scir.offset, value);
3560 uv_cpu_hub_info(cpu)->scir.state = value;
3561+ uv_write_local_mmr8(uv_cpu_hub_info(cpu)->scir.offset, value);
3562 }
3563 }
3564
3565diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile 387diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile
3566index d8e5d0c..a99b34d 100644 388index d8e5d0c..a99b34d 100644
3567--- a/arch/x86/kernel/Makefile 389--- a/arch/x86/kernel/Makefile
@@ -3575,1176 +397,34 @@ index d8e5d0c..a99b34d 100644
3575 ### 397 ###
3576 # 64 bit specific files 398 # 64 bit specific files
3577 ifeq ($(CONFIG_X86_64),y) 399 ifeq ($(CONFIG_X86_64),y)
3578diff --git a/arch/x86/kernel/acpi/cstate.c b/arch/x86/kernel/acpi/cstate.c 400diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c
3579index 2e837f5..59cdfa4 100644 401index 804c40e..3167c3d 100644
3580--- a/arch/x86/kernel/acpi/cstate.c 402--- a/arch/x86/kernel/cpu/intel_cacheinfo.c
3581+++ b/arch/x86/kernel/acpi/cstate.c 403+++ b/arch/x86/kernel/cpu/intel_cacheinfo.c
3582@@ -48,7 +48,7 @@ void acpi_processor_power_init_bm_check(struct acpi_processor_flags *flags, 404@@ -515,6 +515,23 @@ unsigned int __cpuinit init_intel_cacheinfo(struct cpuinfo_x86 *c)
3583 * P4, Core and beyond CPUs 405 static DEFINE_PER_CPU(struct _cpuid4_info *, cpuid4_info);
3584 */ 406 #define CPUID4_INFO_IDX(x, y) (&((per_cpu(cpuid4_info, x))[y]))
3585 if (c->x86_vendor == X86_VENDOR_INTEL &&
3586- (c->x86 > 0xf || (c->x86 == 6 && c->x86_model >= 0x0f)))
3587+ (c->x86 > 0xf || (c->x86 == 6 && c->x86_model >= 14)))
3588 flags->bm_control = 0;
3589 }
3590 EXPORT_SYMBOL(acpi_processor_power_init_bm_check);
3591diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c
3592index 23fc9fe..0285521 100644
3593--- a/arch/x86/kernel/amd_iommu.c
3594+++ b/arch/x86/kernel/amd_iommu.c
3595@@ -540,7 +540,7 @@ static void flush_all_devices_for_iommu(struct amd_iommu *iommu)
3596 static void flush_devices_by_domain(struct protection_domain *domain)
3597 {
3598 struct amd_iommu *iommu;
3599- unsigned long i;
3600+ int i;
3601
3602 for (i = 0; i <= amd_iommu_last_bdf; ++i) {
3603 if ((domain == NULL && amd_iommu_pd_table[i] == NULL) ||
3604@@ -1230,10 +1230,9 @@ static void __detach_device(struct protection_domain *domain, u16 devid)
3605
3606 /*
3607 * If we run in passthrough mode the device must be assigned to the
3608- * passthrough domain if it is detached from any other domain.
3609- * Make sure we can deassign from the pt_domain itself.
3610+ * passthrough domain if it is detached from any other domain
3611 */
3612- if (iommu_pass_through && domain != pt_domain) {
3613+ if (iommu_pass_through) {
3614 struct amd_iommu *iommu = amd_iommu_rlookup_table[devid];
3615 __attach_device(iommu, pt_domain, devid);
3616 }
3617@@ -2048,10 +2047,10 @@ static void prealloc_protection_domains(void)
3618 struct pci_dev *dev = NULL;
3619 struct dma_ops_domain *dma_dom;
3620 struct amd_iommu *iommu;
3621- u16 devid, __devid;
3622+ u16 devid;
3623
3624 while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) {
3625- __devid = devid = calc_devid(dev->bus->number, dev->devfn);
3626+ devid = calc_devid(dev->bus->number, dev->devfn);
3627 if (devid > amd_iommu_last_bdf)
3628 continue;
3629 devid = amd_iommu_alias_table[devid];
3630@@ -2066,10 +2065,6 @@ static void prealloc_protection_domains(void)
3631 init_unity_mappings_for_device(dma_dom, devid);
3632 dma_dom->target_dev = devid;
3633
3634- attach_device(iommu, &dma_dom->domain, devid);
3635- if (__devid != devid)
3636- attach_device(iommu, &dma_dom->domain, __devid);
3637-
3638 list_add_tail(&dma_dom->list, &iommu_pd_list);
3639 }
3640 }
3641@@ -2084,11 +2079,6 @@ static struct dma_map_ops amd_iommu_dma_ops = {
3642 .dma_supported = amd_iommu_dma_supported,
3643 };
3644
3645-void __init amd_iommu_init_api(void)
3646-{
3647- register_iommu(&amd_iommu_ops);
3648-}
3649-
3650 /*
3651 * The function which clues the AMD IOMMU driver into dma_ops.
3652 */
3653@@ -2130,6 +2120,8 @@ int __init amd_iommu_init_dma_ops(void)
3654 /* Make the driver finally visible to the drivers */
3655 dma_ops = &amd_iommu_dma_ops;
3656
3657+ register_iommu(&amd_iommu_ops);
3658+
3659 bus_register_notifier(&pci_bus_type, &device_nb);
3660
3661 amd_iommu_stats_init();
3662diff --git a/arch/x86/kernel/amd_iommu_init.c b/arch/x86/kernel/amd_iommu_init.c
3663index 362ab88..c20001e 100644
3664--- a/arch/x86/kernel/amd_iommu_init.c
3665+++ b/arch/x86/kernel/amd_iommu_init.c
3666@@ -136,11 +136,6 @@ LIST_HEAD(amd_iommu_list); /* list of all AMD IOMMUs in the
3667 system */
3668
3669 /*
3670- * Set to true if ACPI table parsing and hardware intialization went properly
3671- */
3672-static bool amd_iommu_initialized;
3673-
3674-/*
3675 * Pointer to the device table which is shared by all AMD IOMMUs
3676 * it is indexed by the PCI device id or the HT unit id and contains
3677 * information about the domain the device belongs to as well as the
3678@@ -918,8 +913,6 @@ static int __init init_iommu_all(struct acpi_table_header *table)
3679 }
3680 WARN_ON(p != end);
3681
3682- amd_iommu_initialized = true;
3683-
3684 return 0;
3685 }
3686
3687@@ -932,7 +925,7 @@ static int __init init_iommu_all(struct acpi_table_header *table)
3688 *
3689 ****************************************************************************/
3690
3691-static int iommu_setup_msi(struct amd_iommu *iommu)
3692+static int __init iommu_setup_msi(struct amd_iommu *iommu)
3693 {
3694 int r;
3695
3696@@ -1270,9 +1263,6 @@ int __init amd_iommu_init(void)
3697 if (acpi_table_parse("IVRS", init_iommu_all) != 0)
3698 goto free;
3699
3700- if (!amd_iommu_initialized)
3701- goto free;
3702-
3703 if (acpi_table_parse("IVRS", init_memory_definitions) != 0)
3704 goto free;
3705
3706@@ -1288,12 +1278,9 @@ int __init amd_iommu_init(void)
3707 ret = amd_iommu_init_passthrough();
3708 else
3709 ret = amd_iommu_init_dma_ops();
3710-
3711 if (ret)
3712 goto free;
3713
3714- amd_iommu_init_api();
3715-
3716 enable_iommus();
3717
3718 if (iommu_pass_through)
3719diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
3720index c86dbcf..894aa97 100644
3721--- a/arch/x86/kernel/apic/apic.c
3722+++ b/arch/x86/kernel/apic/apic.c
3723@@ -246,7 +246,7 @@ static int modern_apic(void)
3724 */
3725 static void native_apic_write_dummy(u32 reg, u32 v)
3726 {
3727- WARN_ON_ONCE(cpu_has_apic && !disable_apic);
3728+ WARN_ON_ONCE((cpu_has_apic || !disable_apic));
3729 }
3730
3731 static u32 native_apic_read_dummy(u32 reg)
3732diff --git a/arch/x86/kernel/apic/apic_flat_64.c b/arch/x86/kernel/apic/apic_flat_64.c
3733index 873f81f..d0c99ab 100644
3734--- a/arch/x86/kernel/apic/apic_flat_64.c
3735+++ b/arch/x86/kernel/apic/apic_flat_64.c
3736@@ -240,11 +240,6 @@ static int physflat_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
3737 printk(KERN_DEBUG "system APIC only can use physical flat");
3738 return 1;
3739 }
3740-
3741- if (!strncmp(oem_id, "IBM", 3) && !strncmp(oem_table_id, "EXA", 3)) {
3742- printk(KERN_DEBUG "IBM Summit detected, will use apic physical");
3743- return 1;
3744- }
3745 #endif
3746
3747 return 0;
3748diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
3749index c107e83..dc69f28 100644
3750--- a/arch/x86/kernel/apic/io_apic.c
3751+++ b/arch/x86/kernel/apic/io_apic.c
3752@@ -3157,7 +3157,6 @@ unsigned int create_irq_nr(unsigned int irq_want, int node)
3753 continue;
3754
3755 desc_new = move_irq_desc(desc_new, node);
3756- cfg_new = desc_new->chip_data;
3757
3758 if (__assign_irq_vector(new, cfg_new, apic->target_cpus()) == 0)
3759 irq = new;
3760diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c
3761index 9ee87cf..326c254 100644
3762--- a/arch/x86/kernel/apic/x2apic_uv_x.c
3763+++ b/arch/x86/kernel/apic/x2apic_uv_x.c
3764@@ -364,13 +364,13 @@ static __init void get_lowmem_redirect(unsigned long *base, unsigned long *size)
3765
3766 enum map_type {map_wb, map_uc};
3767
3768-static __init void map_high(char *id, unsigned long base, int pshift,
3769- int bshift, int max_pnode, enum map_type map_type)
3770+static __init void map_high(char *id, unsigned long base, int shift,
3771+ int max_pnode, enum map_type map_type)
3772 {
3773 unsigned long bytes, paddr;
3774
3775- paddr = base << pshift;
3776- bytes = (1UL << bshift) * (max_pnode + 1);
3777+ paddr = base << shift;
3778+ bytes = (1UL << shift) * (max_pnode + 1);
3779 printk(KERN_INFO "UV: Map %s_HI 0x%lx - 0x%lx\n", id, paddr,
3780 paddr + bytes);
3781 if (map_type == map_uc)
3782@@ -386,7 +386,7 @@ static __init void map_gru_high(int max_pnode)
3783
3784 gru.v = uv_read_local_mmr(UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR);
3785 if (gru.s.enable)
3786- map_high("GRU", gru.s.base, shift, shift, max_pnode, map_wb);
3787+ map_high("GRU", gru.s.base, shift, max_pnode, map_wb);
3788 }
3789
3790 static __init void map_mmr_high(int max_pnode)
3791@@ -396,7 +396,7 @@ static __init void map_mmr_high(int max_pnode)
3792
3793 mmr.v = uv_read_local_mmr(UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR);
3794 if (mmr.s.enable)
3795- map_high("MMR", mmr.s.base, shift, shift, max_pnode, map_uc);
3796+ map_high("MMR", mmr.s.base, shift, max_pnode, map_uc);
3797 }
3798
3799 static __init void map_mmioh_high(int max_pnode)
3800@@ -406,8 +406,7 @@ static __init void map_mmioh_high(int max_pnode)
3801
3802 mmioh.v = uv_read_local_mmr(UVH_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR);
3803 if (mmioh.s.enable)
3804- map_high("MMIOH", mmioh.s.base, shift, mmioh.s.m_io,
3805- max_pnode, map_uc);
3806+ map_high("MMIOH", mmioh.s.base, shift, max_pnode, map_uc);
3807 }
3808
3809 static __init void uv_rtc_init(void)
3810@@ -608,10 +607,8 @@ void __init uv_system_init(void)
3811 uv_rtc_init();
3812
3813 for_each_present_cpu(cpu) {
3814- int apicid = per_cpu(x86_cpu_to_apicid, cpu);
3815-
3816 nid = cpu_to_node(cpu);
3817- pnode = uv_apicid_to_pnode(apicid);
3818+ pnode = uv_apicid_to_pnode(per_cpu(x86_cpu_to_apicid, cpu));
3819 blade = boot_pnode_to_blade(pnode);
3820 lcpu = uv_blade_info[blade].nr_possible_cpus;
3821 uv_blade_info[blade].nr_possible_cpus++;
3822@@ -632,13 +629,15 @@ void __init uv_system_init(void)
3823 uv_cpu_hub_info(cpu)->gnode_extra = gnode_extra;
3824 uv_cpu_hub_info(cpu)->global_mmr_base = mmr_base;
3825 uv_cpu_hub_info(cpu)->coherency_domain_number = sn_coherency_id;
3826- uv_cpu_hub_info(cpu)->scir.offset = uv_scir_offset(apicid);
3827+ uv_cpu_hub_info(cpu)->scir.offset = SCIR_LOCAL_MMR_BASE + lcpu;
3828 uv_node_to_blade[nid] = blade;
3829 uv_cpu_to_blade[cpu] = blade;
3830 max_pnode = max(pnode, max_pnode);
3831
3832- printk(KERN_DEBUG "UV: cpu %d, apicid 0x%x, pnode %d, nid %d, lcpu %d, blade %d\n",
3833- cpu, apicid, pnode, nid, lcpu, blade);
3834+ printk(KERN_DEBUG "UV: cpu %d, apicid 0x%x, pnode %d, nid %d, "
3835+ "lcpu %d, blade %d\n",
3836+ cpu, per_cpu(x86_cpu_to_apicid, cpu), pnode, nid,
3837+ lcpu, blade);
3838 }
3839
3840 /* Add blade/pnode info for nodes without cpus */
3841diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile
3842index ff502cc..68537e9 100644
3843--- a/arch/x86/kernel/cpu/Makefile
3844+++ b/arch/x86/kernel/cpu/Makefile
3845@@ -18,6 +18,8 @@ obj-y += vmware.o hypervisor.o sched.o
3846 obj-$(CONFIG_X86_32) += bugs.o cmpxchg.o
3847 obj-$(CONFIG_X86_64) += bugs_64.o
3848 407
3849+obj-$(CONFIG_X86_CPU_DEBUG) += cpu_debug.o 408+/* returns CPUs that share the index cache with cpu */
3850+ 409+int get_shared_cpu_map(cpumask_var_t mask, unsigned int cpu, int index)
3851 obj-$(CONFIG_CPU_SUP_INTEL) += intel.o
3852 obj-$(CONFIG_CPU_SUP_AMD) += amd.o
3853 obj-$(CONFIG_CPU_SUP_CYRIX_32) += cyrix.o
3854diff --git a/arch/x86/kernel/cpu/cpu_debug.c b/arch/x86/kernel/cpu/cpu_debug.c
3855new file mode 100644
3856index 0000000..dca325c
3857--- /dev/null
3858+++ b/arch/x86/kernel/cpu/cpu_debug.c
3859@@ -0,0 +1,688 @@
3860+/*
3861+ * CPU x86 architecture debug code
3862+ *
3863+ * Copyright(C) 2009 Jaswinder Singh Rajput
3864+ *
3865+ * For licencing details see kernel-base/COPYING
3866+ */
3867+
3868+#include <linux/interrupt.h>
3869+#include <linux/compiler.h>
3870+#include <linux/seq_file.h>
3871+#include <linux/debugfs.h>
3872+#include <linux/kprobes.h>
3873+#include <linux/uaccess.h>
3874+#include <linux/kernel.h>
3875+#include <linux/module.h>
3876+#include <linux/percpu.h>
3877+#include <linux/signal.h>
3878+#include <linux/errno.h>
3879+#include <linux/sched.h>
3880+#include <linux/types.h>
3881+#include <linux/init.h>
3882+#include <linux/slab.h>
3883+#include <linux/smp.h>
3884+
3885+#include <asm/cpu_debug.h>
3886+#include <asm/paravirt.h>
3887+#include <asm/system.h>
3888+#include <asm/traps.h>
3889+#include <asm/apic.h>
3890+#include <asm/desc.h>
3891+
3892+static DEFINE_PER_CPU(struct cpu_cpuX_base [CPU_REG_ALL_BIT], cpu_arr);
3893+static DEFINE_PER_CPU(struct cpu_private * [MAX_CPU_FILES], priv_arr);
3894+static DEFINE_PER_CPU(int, cpu_priv_count);
3895+
3896+static DEFINE_MUTEX(cpu_debug_lock);
3897+
3898+static struct dentry *cpu_debugfs_dir;
3899+
3900+static struct cpu_debug_base cpu_base[] = {
3901+ { "mc", CPU_MC, 0 },
3902+ { "monitor", CPU_MONITOR, 0 },
3903+ { "time", CPU_TIME, 0 },
3904+ { "pmc", CPU_PMC, 1 },
3905+ { "platform", CPU_PLATFORM, 0 },
3906+ { "apic", CPU_APIC, 0 },
3907+ { "poweron", CPU_POWERON, 0 },
3908+ { "control", CPU_CONTROL, 0 },
3909+ { "features", CPU_FEATURES, 0 },
3910+ { "lastbranch", CPU_LBRANCH, 0 },
3911+ { "bios", CPU_BIOS, 0 },
3912+ { "freq", CPU_FREQ, 0 },
3913+ { "mtrr", CPU_MTRR, 0 },
3914+ { "perf", CPU_PERF, 0 },
3915+ { "cache", CPU_CACHE, 0 },
3916+ { "sysenter", CPU_SYSENTER, 0 },
3917+ { "therm", CPU_THERM, 0 },
3918+ { "misc", CPU_MISC, 0 },
3919+ { "debug", CPU_DEBUG, 0 },
3920+ { "pat", CPU_PAT, 0 },
3921+ { "vmx", CPU_VMX, 0 },
3922+ { "call", CPU_CALL, 0 },
3923+ { "base", CPU_BASE, 0 },
3924+ { "ver", CPU_VER, 0 },
3925+ { "conf", CPU_CONF, 0 },
3926+ { "smm", CPU_SMM, 0 },
3927+ { "svm", CPU_SVM, 0 },
3928+ { "osvm", CPU_OSVM, 0 },
3929+ { "tss", CPU_TSS, 0 },
3930+ { "cr", CPU_CR, 0 },
3931+ { "dt", CPU_DT, 0 },
3932+ { "registers", CPU_REG_ALL, 0 },
3933+};
3934+
3935+static struct cpu_file_base cpu_file[] = {
3936+ { "index", CPU_REG_ALL, 0 },
3937+ { "value", CPU_REG_ALL, 1 },
3938+};
3939+
3940+/* CPU Registers Range */
3941+static struct cpu_debug_range cpu_reg_range[] = {
3942+ { 0x00000000, 0x00000001, CPU_MC, },
3943+ { 0x00000006, 0x00000007, CPU_MONITOR, },
3944+ { 0x00000010, 0x00000010, CPU_TIME, },
3945+ { 0x00000011, 0x00000013, CPU_PMC, },
3946+ { 0x00000017, 0x00000017, CPU_PLATFORM, },
3947+ { 0x0000001B, 0x0000001B, CPU_APIC, },
3948+ { 0x0000002A, 0x0000002B, CPU_POWERON, },
3949+ { 0x0000002C, 0x0000002C, CPU_FREQ, },
3950+ { 0x0000003A, 0x0000003A, CPU_CONTROL, },
3951+ { 0x00000040, 0x00000047, CPU_LBRANCH, },
3952+ { 0x00000060, 0x00000067, CPU_LBRANCH, },
3953+ { 0x00000079, 0x00000079, CPU_BIOS, },
3954+ { 0x00000088, 0x0000008A, CPU_CACHE, },
3955+ { 0x0000008B, 0x0000008B, CPU_BIOS, },
3956+ { 0x0000009B, 0x0000009B, CPU_MONITOR, },
3957+ { 0x000000C1, 0x000000C4, CPU_PMC, },
3958+ { 0x000000CD, 0x000000CD, CPU_FREQ, },
3959+ { 0x000000E7, 0x000000E8, CPU_PERF, },
3960+ { 0x000000FE, 0x000000FE, CPU_MTRR, },
3961+
3962+ { 0x00000116, 0x0000011E, CPU_CACHE, },
3963+ { 0x00000174, 0x00000176, CPU_SYSENTER, },
3964+ { 0x00000179, 0x0000017B, CPU_MC, },
3965+ { 0x00000186, 0x00000189, CPU_PMC, },
3966+ { 0x00000198, 0x00000199, CPU_PERF, },
3967+ { 0x0000019A, 0x0000019A, CPU_TIME, },
3968+ { 0x0000019B, 0x0000019D, CPU_THERM, },
3969+ { 0x000001A0, 0x000001A0, CPU_MISC, },
3970+ { 0x000001C9, 0x000001C9, CPU_LBRANCH, },
3971+ { 0x000001D7, 0x000001D8, CPU_LBRANCH, },
3972+ { 0x000001D9, 0x000001D9, CPU_DEBUG, },
3973+ { 0x000001DA, 0x000001E0, CPU_LBRANCH, },
3974+
3975+ { 0x00000200, 0x0000020F, CPU_MTRR, },
3976+ { 0x00000250, 0x00000250, CPU_MTRR, },
3977+ { 0x00000258, 0x00000259, CPU_MTRR, },
3978+ { 0x00000268, 0x0000026F, CPU_MTRR, },
3979+ { 0x00000277, 0x00000277, CPU_PAT, },
3980+ { 0x000002FF, 0x000002FF, CPU_MTRR, },
3981+
3982+ { 0x00000300, 0x00000311, CPU_PMC, },
3983+ { 0x00000345, 0x00000345, CPU_PMC, },
3984+ { 0x00000360, 0x00000371, CPU_PMC, },
3985+ { 0x0000038D, 0x00000390, CPU_PMC, },
3986+ { 0x000003A0, 0x000003BE, CPU_PMC, },
3987+ { 0x000003C0, 0x000003CD, CPU_PMC, },
3988+ { 0x000003E0, 0x000003E1, CPU_PMC, },
3989+ { 0x000003F0, 0x000003F2, CPU_PMC, },
3990+
3991+ { 0x00000400, 0x00000417, CPU_MC, },
3992+ { 0x00000480, 0x0000048B, CPU_VMX, },
3993+
3994+ { 0x00000600, 0x00000600, CPU_DEBUG, },
3995+ { 0x00000680, 0x0000068F, CPU_LBRANCH, },
3996+ { 0x000006C0, 0x000006CF, CPU_LBRANCH, },
3997+
3998+ { 0x000107CC, 0x000107D3, CPU_PMC, },
3999+
4000+ { 0xC0000080, 0xC0000080, CPU_FEATURES, },
4001+ { 0xC0000081, 0xC0000084, CPU_CALL, },
4002+ { 0xC0000100, 0xC0000102, CPU_BASE, },
4003+ { 0xC0000103, 0xC0000103, CPU_TIME, },
4004+
4005+ { 0xC0010000, 0xC0010007, CPU_PMC, },
4006+ { 0xC0010010, 0xC0010010, CPU_CONF, },
4007+ { 0xC0010015, 0xC0010015, CPU_CONF, },
4008+ { 0xC0010016, 0xC001001A, CPU_MTRR, },
4009+ { 0xC001001D, 0xC001001D, CPU_MTRR, },
4010+ { 0xC001001F, 0xC001001F, CPU_CONF, },
4011+ { 0xC0010030, 0xC0010035, CPU_BIOS, },
4012+ { 0xC0010044, 0xC0010048, CPU_MC, },
4013+ { 0xC0010050, 0xC0010056, CPU_SMM, },
4014+ { 0xC0010058, 0xC0010058, CPU_CONF, },
4015+ { 0xC0010060, 0xC0010060, CPU_CACHE, },
4016+ { 0xC0010061, 0xC0010068, CPU_SMM, },
4017+ { 0xC0010069, 0xC001006B, CPU_SMM, },
4018+ { 0xC0010070, 0xC0010071, CPU_SMM, },
4019+ { 0xC0010111, 0xC0010113, CPU_SMM, },
4020+ { 0xC0010114, 0xC0010118, CPU_SVM, },
4021+ { 0xC0010140, 0xC0010141, CPU_OSVM, },
4022+ { 0xC0011022, 0xC0011023, CPU_CONF, },
4023+};
4024+
4025+static int is_typeflag_valid(unsigned cpu, unsigned flag)
4026+{
4027+ int i;
4028+
4029+ /* Standard Registers should be always valid */
4030+ if (flag >= CPU_TSS)
4031+ return 1;
4032+
4033+ for (i = 0; i < ARRAY_SIZE(cpu_reg_range); i++) {
4034+ if (cpu_reg_range[i].flag == flag)
4035+ return 1;
4036+ }
4037+
4038+ /* Invalid */
4039+ return 0;
4040+}
4041+
4042+static unsigned get_cpu_range(unsigned cpu, unsigned *min, unsigned *max,
4043+ int index, unsigned flag)
4044+{
4045+ if (cpu_reg_range[index].flag == flag) {
4046+ *min = cpu_reg_range[index].min;
4047+ *max = cpu_reg_range[index].max;
4048+ } else
4049+ *max = 0;
4050+
4051+ return *max;
4052+}
4053+
4054+/* This function can also be called with seq = NULL for printk */
4055+static void print_cpu_data(struct seq_file *seq, unsigned type,
4056+ u32 low, u32 high)
4057+{ 410+{
4058+ struct cpu_private *priv; 411+ int ret = 0;
4059+ u64 val = high; 412+ struct _cpuid4_info *this_leaf;
4060+
4061+ if (seq) {
4062+ priv = seq->private;
4063+ if (priv->file) {
4064+ val = (val << 32) | low;
4065+ seq_printf(seq, "0x%llx\n", val);
4066+ } else
4067+ seq_printf(seq, " %08x: %08x_%08x\n",
4068+ type, high, low);
4069+ } else
4070+ printk(KERN_INFO " %08x: %08x_%08x\n", type, high, low);
4071+}
4072+
4073+/* This function can also be called with seq = NULL for printk */
4074+static void print_msr(struct seq_file *seq, unsigned cpu, unsigned flag)
4075+{
4076+ unsigned msr, msr_min, msr_max;
4077+ struct cpu_private *priv;
4078+ u32 low, high;
4079+ int i;
4080+
4081+ if (seq) {
4082+ priv = seq->private;
4083+ if (priv->file) {
4084+ if (!rdmsr_safe_on_cpu(priv->cpu, priv->reg,
4085+ &low, &high))
4086+ print_cpu_data(seq, priv->reg, low, high);
4087+ return;
4088+ }
4089+ }
4090+
4091+ for (i = 0; i < ARRAY_SIZE(cpu_reg_range); i++) {
4092+ if (!get_cpu_range(cpu, &msr_min, &msr_max, i, flag))
4093+ continue;
4094+
4095+ for (msr = msr_min; msr <= msr_max; msr++) {
4096+ if (rdmsr_safe_on_cpu(cpu, msr, &low, &high))
4097+ continue;
4098+ print_cpu_data(seq, msr, low, high);
4099+ }
4100+ }
4101+}
4102+
4103+static void print_tss(void *arg)
4104+{
4105+ struct pt_regs *regs = task_pt_regs(current);
4106+ struct seq_file *seq = arg;
4107+ unsigned int seg;
4108+
4109+ seq_printf(seq, " RAX\t: %016lx\n", regs->ax);
4110+ seq_printf(seq, " RBX\t: %016lx\n", regs->bx);
4111+ seq_printf(seq, " RCX\t: %016lx\n", regs->cx);
4112+ seq_printf(seq, " RDX\t: %016lx\n", regs->dx);
4113+
4114+ seq_printf(seq, " RSI\t: %016lx\n", regs->si);
4115+ seq_printf(seq, " RDI\t: %016lx\n", regs->di);
4116+ seq_printf(seq, " RBP\t: %016lx\n", regs->bp);
4117+ seq_printf(seq, " ESP\t: %016lx\n", regs->sp);
4118+
4119+#ifdef CONFIG_X86_64
4120+ seq_printf(seq, " R08\t: %016lx\n", regs->r8);
4121+ seq_printf(seq, " R09\t: %016lx\n", regs->r9);
4122+ seq_printf(seq, " R10\t: %016lx\n", regs->r10);
4123+ seq_printf(seq, " R11\t: %016lx\n", regs->r11);
4124+ seq_printf(seq, " R12\t: %016lx\n", regs->r12);
4125+ seq_printf(seq, " R13\t: %016lx\n", regs->r13);
4126+ seq_printf(seq, " R14\t: %016lx\n", regs->r14);
4127+ seq_printf(seq, " R15\t: %016lx\n", regs->r15);
4128+#endif
4129+
4130+ asm("movl %%cs,%0" : "=r" (seg));
4131+ seq_printf(seq, " CS\t: %04x\n", seg);
4132+ asm("movl %%ds,%0" : "=r" (seg));
4133+ seq_printf(seq, " DS\t: %04x\n", seg);
4134+ seq_printf(seq, " SS\t: %04lx\n", regs->ss & 0xffff);
4135+ asm("movl %%es,%0" : "=r" (seg));
4136+ seq_printf(seq, " ES\t: %04x\n", seg);
4137+ asm("movl %%fs,%0" : "=r" (seg));
4138+ seq_printf(seq, " FS\t: %04x\n", seg);
4139+ asm("movl %%gs,%0" : "=r" (seg));
4140+ seq_printf(seq, " GS\t: %04x\n", seg);
4141+
4142+ seq_printf(seq, " EFLAGS\t: %016lx\n", regs->flags);
4143+
4144+ seq_printf(seq, " EIP\t: %016lx\n", regs->ip);
4145+}
4146+
4147+static void print_cr(void *arg)
4148+{
4149+ struct seq_file *seq = arg;
4150+
4151+ seq_printf(seq, " cr0\t: %016lx\n", read_cr0());
4152+ seq_printf(seq, " cr2\t: %016lx\n", read_cr2());
4153+ seq_printf(seq, " cr3\t: %016lx\n", read_cr3());
4154+ seq_printf(seq, " cr4\t: %016lx\n", read_cr4_safe());
4155+#ifdef CONFIG_X86_64
4156+ seq_printf(seq, " cr8\t: %016lx\n", read_cr8());
4157+#endif
4158+}
4159+
4160+static void print_desc_ptr(char *str, struct seq_file *seq, struct desc_ptr dt)
4161+{
4162+ seq_printf(seq, " %s\t: %016llx\n", str, (u64)(dt.address | dt.size));
4163+}
4164+
4165+static void print_dt(void *seq)
4166+{
4167+ struct desc_ptr dt;
4168+ unsigned long ldt;
4169+
4170+ /* IDT */
4171+ store_idt((struct desc_ptr *)&dt);
4172+ print_desc_ptr("IDT", seq, dt);
4173+
4174+ /* GDT */
4175+ store_gdt((struct desc_ptr *)&dt);
4176+ print_desc_ptr("GDT", seq, dt);
4177+
4178+ /* LDT */
4179+ store_ldt(ldt);
4180+ seq_printf(seq, " LDT\t: %016lx\n", ldt);
4181+
4182+ /* TR */
4183+ store_tr(ldt);
4184+ seq_printf(seq, " TR\t: %016lx\n", ldt);
4185+}
4186+
4187+static void print_dr(void *arg)
4188+{
4189+ struct seq_file *seq = arg;
4190+ unsigned long dr;
4191+ int i;
4192+
4193+ for (i = 0; i < 8; i++) {
4194+ /* Ignore db4, db5 */
4195+ if ((i == 4) || (i == 5))
4196+ continue;
4197+ get_debugreg(dr, i);
4198+ seq_printf(seq, " dr%d\t: %016lx\n", i, dr);
4199+ }
4200+
4201+ seq_printf(seq, "\n MSR\t:\n");
4202+}
4203+
4204+static void print_apic(void *arg)
4205+{
4206+ struct seq_file *seq = arg;
4207+
4208+#ifdef CONFIG_X86_LOCAL_APIC
4209+ seq_printf(seq, " LAPIC\t:\n");
4210+ seq_printf(seq, " ID\t\t: %08x\n", apic_read(APIC_ID) >> 24);
4211+ seq_printf(seq, " LVR\t\t: %08x\n", apic_read(APIC_LVR));
4212+ seq_printf(seq, " TASKPRI\t: %08x\n", apic_read(APIC_TASKPRI));
4213+ seq_printf(seq, " ARBPRI\t\t: %08x\n", apic_read(APIC_ARBPRI));
4214+ seq_printf(seq, " PROCPRI\t: %08x\n", apic_read(APIC_PROCPRI));
4215+ seq_printf(seq, " LDR\t\t: %08x\n", apic_read(APIC_LDR));
4216+ seq_printf(seq, " DFR\t\t: %08x\n", apic_read(APIC_DFR));
4217+ seq_printf(seq, " SPIV\t\t: %08x\n", apic_read(APIC_SPIV));
4218+ seq_printf(seq, " ISR\t\t: %08x\n", apic_read(APIC_ISR));
4219+ seq_printf(seq, " ESR\t\t: %08x\n", apic_read(APIC_ESR));
4220+ seq_printf(seq, " ICR\t\t: %08x\n", apic_read(APIC_ICR));
4221+ seq_printf(seq, " ICR2\t\t: %08x\n", apic_read(APIC_ICR2));
4222+ seq_printf(seq, " LVTT\t\t: %08x\n", apic_read(APIC_LVTT));
4223+ seq_printf(seq, " LVTTHMR\t: %08x\n", apic_read(APIC_LVTTHMR));
4224+ seq_printf(seq, " LVTPC\t\t: %08x\n", apic_read(APIC_LVTPC));
4225+ seq_printf(seq, " LVT0\t\t: %08x\n", apic_read(APIC_LVT0));
4226+ seq_printf(seq, " LVT1\t\t: %08x\n", apic_read(APIC_LVT1));
4227+ seq_printf(seq, " LVTERR\t\t: %08x\n", apic_read(APIC_LVTERR));
4228+ seq_printf(seq, " TMICT\t\t: %08x\n", apic_read(APIC_TMICT));
4229+ seq_printf(seq, " TMCCT\t\t: %08x\n", apic_read(APIC_TMCCT));
4230+ seq_printf(seq, " TDCR\t\t: %08x\n", apic_read(APIC_TDCR));
4231+ if (boot_cpu_has(X86_FEATURE_EXTAPIC)) {
4232+ unsigned int i, v, maxeilvt;
4233+
4234+ v = apic_read(APIC_EFEAT);
4235+ maxeilvt = (v >> 16) & 0xff;
4236+ seq_printf(seq, " EFEAT\t\t: %08x\n", v);
4237+ seq_printf(seq, " ECTRL\t\t: %08x\n", apic_read(APIC_ECTRL));
4238+
4239+ for (i = 0; i < maxeilvt; i++) {
4240+ v = apic_read(APIC_EILVTn(i));
4241+ seq_printf(seq, " EILVT%d\t\t: %08x\n", i, v);
4242+ }
4243+ }
4244+#endif /* CONFIG_X86_LOCAL_APIC */
4245+ seq_printf(seq, "\n MSR\t:\n");
4246+}
4247+
4248+static int cpu_seq_show(struct seq_file *seq, void *v)
4249+{
4250+ struct cpu_private *priv = seq->private;
4251+
4252+ if (priv == NULL)
4253+ return -EINVAL;
4254+
4255+ switch (cpu_base[priv->type].flag) {
4256+ case CPU_TSS:
4257+ smp_call_function_single(priv->cpu, print_tss, seq, 1);
4258+ break;
4259+ case CPU_CR:
4260+ smp_call_function_single(priv->cpu, print_cr, seq, 1);
4261+ break;
4262+ case CPU_DT:
4263+ smp_call_function_single(priv->cpu, print_dt, seq, 1);
4264+ break;
4265+ case CPU_DEBUG:
4266+ if (priv->file == CPU_INDEX_BIT)
4267+ smp_call_function_single(priv->cpu, print_dr, seq, 1);
4268+ print_msr(seq, priv->cpu, cpu_base[priv->type].flag);
4269+ break;
4270+ case CPU_APIC:
4271+ if (priv->file == CPU_INDEX_BIT)
4272+ smp_call_function_single(priv->cpu, print_apic, seq, 1);
4273+ print_msr(seq, priv->cpu, cpu_base[priv->type].flag);
4274+ break;
4275+
4276+ default:
4277+ print_msr(seq, priv->cpu, cpu_base[priv->type].flag);
4278+ break;
4279+ }
4280+ seq_printf(seq, "\n");
4281+
4282+ return 0;
4283+}
4284+
4285+static void *cpu_seq_start(struct seq_file *seq, loff_t *pos)
4286+{
4287+ if (*pos == 0) /* One time is enough ;-) */
4288+ return seq;
4289+
4290+ return NULL;
4291+}
4292+
4293+static void *cpu_seq_next(struct seq_file *seq, void *v, loff_t *pos)
4294+{
4295+ (*pos)++;
4296+
4297+ return cpu_seq_start(seq, pos);
4298+}
4299+
4300+static void cpu_seq_stop(struct seq_file *seq, void *v)
4301+{
4302+}
4303+
4304+static const struct seq_operations cpu_seq_ops = {
4305+ .start = cpu_seq_start,
4306+ .next = cpu_seq_next,
4307+ .stop = cpu_seq_stop,
4308+ .show = cpu_seq_show,
4309+};
4310+
4311+static int cpu_seq_open(struct inode *inode, struct file *file)
4312+{
4313+ struct cpu_private *priv = inode->i_private;
4314+ struct seq_file *seq;
4315+ int err;
4316+ 413+
4317+ err = seq_open(file, &cpu_seq_ops); 414+ if (index >= num_cache_leaves) {
4318+ if (!err) { 415+ index = num_cache_leaves - 1;
4319+ seq = file->private_data; 416+ ret = index;
4320+ seq->private = priv;
4321+ } 417+ }
4322+ 418+
4323+ return err; 419+ this_leaf = CPUID4_INFO_IDX(cpu,index);
4324+} 420+ cpumask_copy(mask, to_cpumask(this_leaf->shared_cpu_map));
4325+
4326+static int write_msr(struct cpu_private *priv, u64 val)
4327+{
4328+ u32 low, high;
4329+
4330+ high = (val >> 32) & 0xffffffff;
4331+ low = val & 0xffffffff;
4332+
4333+ if (!wrmsr_safe_on_cpu(priv->cpu, priv->reg, low, high))
4334+ return 0;
4335+
4336+ return -EPERM;
4337+}
4338+
4339+static int write_cpu_register(struct cpu_private *priv, const char *buf)
4340+{
4341+ int ret = -EPERM;
4342+ u64 val;
4343+
4344+ ret = strict_strtoull(buf, 0, &val);
4345+ if (ret < 0)
4346+ return ret;
4347+
4348+ /* Supporting only MSRs */
4349+ if (priv->type < CPU_TSS_BIT)
4350+ return write_msr(priv, val);
4351+ 421+
4352+ return ret; 422+ return ret;
4353+} 423+}
4354+ 424+
4355+static ssize_t cpu_write(struct file *file, const char __user *ubuf, 425 #ifdef CONFIG_SMP
4356+ size_t count, loff_t *off) 426 static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index)
4357+{
4358+ struct seq_file *seq = file->private_data;
4359+ struct cpu_private *priv = seq->private;
4360+ char buf[19];
4361+
4362+ if ((priv == NULL) || (count >= sizeof(buf)))
4363+ return -EINVAL;
4364+
4365+ if (copy_from_user(&buf, ubuf, count))
4366+ return -EFAULT;
4367+
4368+ buf[count] = 0;
4369+
4370+ if ((cpu_base[priv->type].write) && (cpu_file[priv->file].write))
4371+ if (!write_cpu_register(priv, buf))
4372+ return count;
4373+
4374+ return -EACCES;
4375+}
4376+
4377+static const struct file_operations cpu_fops = {
4378+ .owner = THIS_MODULE,
4379+ .open = cpu_seq_open,
4380+ .read = seq_read,
4381+ .write = cpu_write,
4382+ .llseek = seq_lseek,
4383+ .release = seq_release,
4384+};
4385+
4386+static int cpu_create_file(unsigned cpu, unsigned type, unsigned reg,
4387+ unsigned file, struct dentry *dentry)
4388+{
4389+ struct cpu_private *priv = NULL;
4390+
4391+ /* Already intialized */
4392+ if (file == CPU_INDEX_BIT)
4393+ if (per_cpu(cpu_arr[type].init, cpu))
4394+ return 0;
4395+
4396+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
4397+ if (priv == NULL)
4398+ return -ENOMEM;
4399+
4400+ priv->cpu = cpu;
4401+ priv->type = type;
4402+ priv->reg = reg;
4403+ priv->file = file;
4404+ mutex_lock(&cpu_debug_lock);
4405+ per_cpu(priv_arr[type], cpu) = priv;
4406+ per_cpu(cpu_priv_count, cpu)++;
4407+ mutex_unlock(&cpu_debug_lock);
4408+
4409+ if (file)
4410+ debugfs_create_file(cpu_file[file].name, S_IRUGO,
4411+ dentry, (void *)priv, &cpu_fops);
4412+ else {
4413+ debugfs_create_file(cpu_base[type].name, S_IRUGO,
4414+ per_cpu(cpu_arr[type].dentry, cpu),
4415+ (void *)priv, &cpu_fops);
4416+ mutex_lock(&cpu_debug_lock);
4417+ per_cpu(cpu_arr[type].init, cpu) = 1;
4418+ mutex_unlock(&cpu_debug_lock);
4419+ }
4420+
4421+ return 0;
4422+}
4423+
4424+static int cpu_init_regfiles(unsigned cpu, unsigned int type, unsigned reg,
4425+ struct dentry *dentry)
4426+{
4427+ unsigned file;
4428+ int err = 0;
4429+
4430+ for (file = 0; file < ARRAY_SIZE(cpu_file); file++) {
4431+ err = cpu_create_file(cpu, type, reg, file, dentry);
4432+ if (err)
4433+ return err;
4434+ }
4435+
4436+ return err;
4437+}
4438+
4439+static int cpu_init_msr(unsigned cpu, unsigned type, struct dentry *dentry)
4440+{
4441+ struct dentry *cpu_dentry = NULL;
4442+ unsigned reg, reg_min, reg_max;
4443+ int i, err = 0;
4444+ char reg_dir[12];
4445+ u32 low, high;
4446+
4447+ for (i = 0; i < ARRAY_SIZE(cpu_reg_range); i++) {
4448+ if (!get_cpu_range(cpu, &reg_min, &reg_max, i,
4449+ cpu_base[type].flag))
4450+ continue;
4451+
4452+ for (reg = reg_min; reg <= reg_max; reg++) {
4453+ if (rdmsr_safe_on_cpu(cpu, reg, &low, &high))
4454+ continue;
4455+
4456+ sprintf(reg_dir, "0x%x", reg);
4457+ cpu_dentry = debugfs_create_dir(reg_dir, dentry);
4458+ err = cpu_init_regfiles(cpu, type, reg, cpu_dentry);
4459+ if (err)
4460+ return err;
4461+ }
4462+ }
4463+
4464+ return err;
4465+}
4466+
4467+static int cpu_init_allreg(unsigned cpu, struct dentry *dentry)
4468+{
4469+ struct dentry *cpu_dentry = NULL;
4470+ unsigned type;
4471+ int err = 0;
4472+
4473+ for (type = 0; type < ARRAY_SIZE(cpu_base) - 1; type++) {
4474+ if (!is_typeflag_valid(cpu, cpu_base[type].flag))
4475+ continue;
4476+ cpu_dentry = debugfs_create_dir(cpu_base[type].name, dentry);
4477+ per_cpu(cpu_arr[type].dentry, cpu) = cpu_dentry;
4478+
4479+ if (type < CPU_TSS_BIT)
4480+ err = cpu_init_msr(cpu, type, cpu_dentry);
4481+ else
4482+ err = cpu_create_file(cpu, type, 0, CPU_INDEX_BIT,
4483+ cpu_dentry);
4484+ if (err)
4485+ return err;
4486+ }
4487+
4488+ return err;
4489+}
4490+
4491+static int cpu_init_cpu(void)
4492+{
4493+ struct dentry *cpu_dentry = NULL;
4494+ struct cpuinfo_x86 *cpui;
4495+ char cpu_dir[12];
4496+ unsigned cpu;
4497+ int err = 0;
4498+
4499+ for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
4500+ cpui = &cpu_data(cpu);
4501+ if (!cpu_has(cpui, X86_FEATURE_MSR))
4502+ continue;
4503+
4504+ sprintf(cpu_dir, "cpu%d", cpu);
4505+ cpu_dentry = debugfs_create_dir(cpu_dir, cpu_debugfs_dir);
4506+ err = cpu_init_allreg(cpu, cpu_dentry);
4507+
4508+ pr_info("cpu%d(%d) debug files %d\n",
4509+ cpu, nr_cpu_ids, per_cpu(cpu_priv_count, cpu));
4510+ if (per_cpu(cpu_priv_count, cpu) > MAX_CPU_FILES) {
4511+ pr_err("Register files count %d exceeds limit %d\n",
4512+ per_cpu(cpu_priv_count, cpu), MAX_CPU_FILES);
4513+ per_cpu(cpu_priv_count, cpu) = MAX_CPU_FILES;
4514+ err = -ENFILE;
4515+ }
4516+ if (err)
4517+ return err;
4518+ }
4519+
4520+ return err;
4521+}
4522+
4523+static int __init cpu_debug_init(void)
4524+{
4525+ cpu_debugfs_dir = debugfs_create_dir("cpu", arch_debugfs_dir);
4526+
4527+ return cpu_init_cpu();
4528+}
4529+
4530+static void __exit cpu_debug_exit(void)
4531+{
4532+ int i, cpu;
4533+
4534+ if (cpu_debugfs_dir)
4535+ debugfs_remove_recursive(cpu_debugfs_dir);
4536+
4537+ for (cpu = 0; cpu < nr_cpu_ids; cpu++)
4538+ for (i = 0; i < per_cpu(cpu_priv_count, cpu); i++)
4539+ kfree(per_cpu(priv_arr[i], cpu));
4540+}
4541+
4542+module_init(cpu_debug_init);
4543+module_exit(cpu_debug_exit);
4544+
4545+MODULE_AUTHOR("Jaswinder Singh Rajput");
4546+MODULE_DESCRIPTION("CPU Debug module");
4547+MODULE_LICENSE("GPL");
4548diff --git a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c
4549index ab1cd30..3f12dab 100644
4550--- a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c
4551+++ b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c
4552@@ -1351,7 +1351,6 @@ static int __devexit powernowk8_cpu_exit(struct cpufreq_policy *pol)
4553
4554 kfree(data->powernow_table);
4555 kfree(data);
4556- per_cpu(powernow_data, pol->cpu) = NULL;
4557
4558 return 0;
4559 }
4560@@ -1371,7 +1370,7 @@ static unsigned int powernowk8_get(unsigned int cpu)
4561 int err;
4562
4563 if (!data)
4564- return 0;
4565+ return -EINVAL;
4566
4567 smp_call_function_single(cpu, query_values_on_cpu, &err, true);
4568 if (err)
4569diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
4570index a2a03cf..40e1835 100644
4571--- a/arch/x86/kernel/cpu/intel.c
4572+++ b/arch/x86/kernel/cpu/intel.c
4573@@ -70,6 +70,7 @@ static void __cpuinit early_init_intel(struct cpuinfo_x86 *c)
4574 if (c->x86_power & (1 << 8)) {
4575 set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
4576 set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC);
4577+ set_cpu_cap(c, X86_FEATURE_TSC_RELIABLE);
4578 sched_clock_stable = 1;
4579 }
4580
4581diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c
4582index 8178d03..804c40e 100644
4583--- a/arch/x86/kernel/cpu/intel_cacheinfo.c
4584+++ b/arch/x86/kernel/cpu/intel_cacheinfo.c
4585@@ -94,7 +94,7 @@ static const struct _cache_table __cpuinitconst cache_table[] =
4586 { 0xd1, LVL_3, 1024 }, /* 4-way set assoc, 64 byte line size */
4587 { 0xd2, LVL_3, 2048 }, /* 4-way set assoc, 64 byte line size */
4588 { 0xd6, LVL_3, 1024 }, /* 8-way set assoc, 64 byte line size */
4589- { 0xd7, LVL_3, 2048 }, /* 8-way set assoc, 64 byte line size */
4590+ { 0xd7, LVL_3, 2038 }, /* 8-way set assoc, 64 byte line size */
4591 { 0xd8, LVL_3, 4096 }, /* 12-way set assoc, 64 byte line size */
4592 { 0xdc, LVL_3, 2048 }, /* 12-way set assoc, 64 byte line size */
4593 { 0xdd, LVL_3, 4096 }, /* 12-way set assoc, 64 byte line size */
4594@@ -102,9 +102,6 @@ static const struct _cache_table __cpuinitconst cache_table[] =
4595 { 0xe2, LVL_3, 2048 }, /* 16-way set assoc, 64 byte line size */
4596 { 0xe3, LVL_3, 4096 }, /* 16-way set assoc, 64 byte line size */
4597 { 0xe4, LVL_3, 8192 }, /* 16-way set assoc, 64 byte line size */
4598- { 0xea, LVL_3, 12288 }, /* 24-way set assoc, 64 byte line size */
4599- { 0xeb, LVL_3, 18432 }, /* 24-way set assoc, 64 byte line size */
4600- { 0xec, LVL_3, 24576 }, /* 24-way set assoc, 64 byte line size */
4601 { 0x00, 0, 0}
4602 };
4603
4604diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
4605index 0f16a2b..721a77c 100644
4606--- a/arch/x86/kernel/cpu/mcheck/mce.c
4607+++ b/arch/x86/kernel/cpu/mcheck/mce.c
4608@@ -1374,14 +1374,13 @@ static void mce_init_timer(void)
4609 struct timer_list *t = &__get_cpu_var(mce_timer);
4610 int *n = &__get_cpu_var(mce_next_interval);
4611
4612- setup_timer(t, mcheck_timer, smp_processor_id());
4613-
4614 if (mce_ignore_ce)
4615 return;
4616
4617 *n = check_interval * HZ;
4618 if (!*n)
4619 return;
4620+ setup_timer(t, mcheck_timer, smp_processor_id());
4621 t->expires = round_jiffies(jiffies + *n);
4622 add_timer_on(t, smp_processor_id());
4623 }
4624@@ -1992,11 +1991,9 @@ mce_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
4625 break;
4626 case CPU_DOWN_FAILED:
4627 case CPU_DOWN_FAILED_FROZEN:
4628- if (!mce_ignore_ce && check_interval) {
4629- t->expires = round_jiffies(jiffies +
4630+ t->expires = round_jiffies(jiffies +
4631 __get_cpu_var(mce_next_interval));
4632- add_timer_on(t, cpu);
4633- }
4634+ add_timer_on(t, cpu);
4635 smp_call_function_single(cpu, mce_reenable_cpu, &action, 1);
4636 break;
4637 case CPU_POST_DEAD:
4638diff --git a/arch/x86/kernel/cpu/mcheck/therm_throt.c b/arch/x86/kernel/cpu/mcheck/therm_throt.c
4639index 687638e..b3a1dba 100644
4640--- a/arch/x86/kernel/cpu/mcheck/therm_throt.c
4641+++ b/arch/x86/kernel/cpu/mcheck/therm_throt.c
4642@@ -49,8 +49,6 @@ static DEFINE_PER_CPU(struct thermal_state, thermal_state);
4643
4644 static atomic_t therm_throt_en = ATOMIC_INIT(0);
4645
4646-static u32 lvtthmr_init __read_mostly;
4647-
4648 #ifdef CONFIG_SYSFS
4649 #define define_therm_throt_sysdev_one_ro(_name) \
4650 static SYSDEV_ATTR(_name, 0444, therm_throt_sysdev_show_##_name, NULL)
4651@@ -256,27 +254,14 @@ asmlinkage void smp_thermal_interrupt(struct pt_regs *regs)
4652 ack_APIC_irq();
4653 }
4654
4655-void __init mcheck_intel_therm_init(void)
4656-{
4657- /*
4658- * This function is only called on boot CPU. Save the init thermal
4659- * LVT value on BSP and use that value to restore APs' thermal LVT
4660- * entry BIOS programmed later
4661- */
4662- if (cpu_has(&boot_cpu_data, X86_FEATURE_ACPI) &&
4663- cpu_has(&boot_cpu_data, X86_FEATURE_ACC))
4664- lvtthmr_init = apic_read(APIC_LVTTHMR);
4665-}
4666-
4667 void intel_init_thermal(struct cpuinfo_x86 *c)
4668 { 427 {
4669 unsigned int cpu = smp_processor_id();
4670 int tm2 = 0;
4671 u32 l, h;
4672
4673- /* Thermal monitoring depends on APIC, ACPI and clock modulation */
4674- if (!cpu_has_apic || !cpu_has(c, X86_FEATURE_ACPI) ||
4675- !cpu_has(c, X86_FEATURE_ACC))
4676+ /* Thermal monitoring depends on ACPI and clock modulation*/
4677+ if (!cpu_has(c, X86_FEATURE_ACPI) || !cpu_has(c, X86_FEATURE_ACC))
4678 return;
4679
4680 /*
4681@@ -285,20 +270,7 @@ void intel_init_thermal(struct cpuinfo_x86 *c)
4682 * since it might be delivered via SMI already:
4683 */
4684 rdmsr(MSR_IA32_MISC_ENABLE, l, h);
4685-
4686- /*
4687- * The initial value of thermal LVT entries on all APs always reads
4688- * 0x10000 because APs are woken up by BSP issuing INIT-SIPI-SIPI
4689- * sequence to them and LVT registers are reset to 0s except for
4690- * the mask bits which are set to 1s when APs receive INIT IPI.
4691- * Always restore the value that BIOS has programmed on AP based on
4692- * BSP's info we saved since BIOS is always setting the same value
4693- * for all threads/cores
4694- */
4695- apic_write(APIC_LVTTHMR, lvtthmr_init);
4696-
4697- h = lvtthmr_init;
4698-
4699+ h = apic_read(APIC_LVTTHMR);
4700 if ((l & MSR_IA32_MISC_ENABLE_TM1) && (h & APIC_DM_SMI)) {
4701 printk(KERN_DEBUG
4702 "CPU%d: Thermal monitoring handled by SMI\n", cpu);
4703diff --git a/arch/x86/kernel/cpu/perfctr-watchdog.c b/arch/x86/kernel/cpu/perfctr-watchdog.c
4704index 898df97..fab786f 100644
4705--- a/arch/x86/kernel/cpu/perfctr-watchdog.c
4706+++ b/arch/x86/kernel/cpu/perfctr-watchdog.c
4707@@ -712,7 +712,7 @@ static void probe_nmi_watchdog(void)
4708 switch (boot_cpu_data.x86_vendor) {
4709 case X86_VENDOR_AMD:
4710 if (boot_cpu_data.x86 != 6 && boot_cpu_data.x86 != 15 &&
4711- boot_cpu_data.x86 != 16 && boot_cpu_data.x86 != 17)
4712+ boot_cpu_data.x86 != 16)
4713 return;
4714 wd_ops = &k7_wd_ops;
4715 break;
4716diff --git a/arch/x86/kernel/cpuid.c b/arch/x86/kernel/cpuid.c
4717index 0c91110..6a52d4b 100644
4718--- a/arch/x86/kernel/cpuid.c
4719+++ b/arch/x86/kernel/cpuid.c
4720@@ -192,8 +192,7 @@ static int __init cpuid_init(void)
4721 int i, err = 0;
4722 i = 0;
4723
4724- if (__register_chrdev(CPUID_MAJOR, 0, NR_CPUS,
4725- "cpu/cpuid", &cpuid_fops)) {
4726+ if (register_chrdev(CPUID_MAJOR, "cpu/cpuid", &cpuid_fops)) {
4727 printk(KERN_ERR "cpuid: unable to get major %d for cpuid\n",
4728 CPUID_MAJOR);
4729 err = -EBUSY;
4730@@ -222,7 +221,7 @@ out_class:
4731 }
4732 class_destroy(cpuid_class);
4733 out_chrdev:
4734- __unregister_chrdev(CPUID_MAJOR, 0, NR_CPUS, "cpu/cpuid");
4735+ unregister_chrdev(CPUID_MAJOR, "cpu/cpuid");
4736 out:
4737 return err;
4738 }
4739@@ -234,7 +233,7 @@ static void __exit cpuid_exit(void)
4740 for_each_online_cpu(cpu)
4741 cpuid_device_destroy(cpu);
4742 class_destroy(cpuid_class);
4743- __unregister_chrdev(CPUID_MAJOR, 0, NR_CPUS, "cpu/cpuid");
4744+ unregister_chrdev(CPUID_MAJOR, "cpu/cpuid");
4745 unregister_hotcpu_notifier(&cpuid_class_cpu_notifier);
4746 }
4747
4748diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S 428diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
4749index b5c061f..5e9b0e5 100644 429index b5c061f..5e9b0e5 100644
4750--- a/arch/x86/kernel/entry_64.S 430--- a/arch/x86/kernel/entry_64.S
@@ -4876,39 +556,6 @@ index 0000000..e07ee30
4876+ 556+
4877+#endif 557+#endif
4878+ 558+
4879diff --git a/arch/x86/kernel/hpet.c b/arch/x86/kernel/hpet.c
4880index 5877873..dedc2bd 100644
4881--- a/arch/x86/kernel/hpet.c
4882+++ b/arch/x86/kernel/hpet.c
4883@@ -33,8 +33,6 @@
4884 * HPET address is set in acpi/boot.c, when an ACPI entry exists
4885 */
4886 unsigned long hpet_address;
4887-u8 hpet_msi_disable;
4888-
4889 #ifdef CONFIG_PCI_MSI
4890 static unsigned long hpet_num_timers;
4891 #endif
4892@@ -586,9 +584,6 @@ static void hpet_msi_capability_lookup(unsigned int start_timer)
4893 unsigned int num_timers_used = 0;
4894 int i;
4895
4896- if (hpet_msi_disable)
4897- return;
4898-
4899 id = hpet_readl(HPET_ID);
4900
4901 num_timers = ((id & HPET_ID_NUMBER) >> HPET_ID_NUMBER_SHIFT);
4902@@ -916,9 +911,6 @@ static __init int hpet_late_init(void)
4903 hpet_reserve_platform_timers(hpet_readl(HPET_ID));
4904 hpet_print_config();
4905
4906- if (hpet_msi_disable)
4907- return 0;
4908-
4909 for_each_online_cpu(cpu) {
4910 hpet_cpuhp_notify(NULL, CPU_ONLINE, (void *)(long)cpu);
4911 }
4912diff --git a/arch/x86/kernel/irqinit.c b/arch/x86/kernel/irqinit.c 559diff --git a/arch/x86/kernel/irqinit.c b/arch/x86/kernel/irqinit.c
4913index 40f3077..f5fa64c 100644 560index 40f3077..f5fa64c 100644
4914--- a/arch/x86/kernel/irqinit.c 561--- a/arch/x86/kernel/irqinit.c
@@ -4923,316 +570,6 @@ index 40f3077..f5fa64c 100644
4923 /* Low priority IPI to cleanup after moving an irq */ 570 /* Low priority IPI to cleanup after moving an irq */
4924 set_intr_gate(IRQ_MOVE_CLEANUP_VECTOR, irq_move_cleanup_interrupt); 571 set_intr_gate(IRQ_MOVE_CLEANUP_VECTOR, irq_move_cleanup_interrupt);
4925 set_bit(IRQ_MOVE_CLEANUP_VECTOR, used_vectors); 572 set_bit(IRQ_MOVE_CLEANUP_VECTOR, used_vectors);
4926diff --git a/arch/x86/kernel/msr.c b/arch/x86/kernel/msr.c
4927index 5eaeb5e..6a3cefc 100644
4928--- a/arch/x86/kernel/msr.c
4929+++ b/arch/x86/kernel/msr.c
4930@@ -251,7 +251,7 @@ static int __init msr_init(void)
4931 int i, err = 0;
4932 i = 0;
4933
4934- if (__register_chrdev(MSR_MAJOR, 0, NR_CPUS, "cpu/msr", &msr_fops)) {
4935+ if (register_chrdev(MSR_MAJOR, "cpu/msr", &msr_fops)) {
4936 printk(KERN_ERR "msr: unable to get major %d for msr\n",
4937 MSR_MAJOR);
4938 err = -EBUSY;
4939@@ -279,7 +279,7 @@ out_class:
4940 msr_device_destroy(i);
4941 class_destroy(msr_class);
4942 out_chrdev:
4943- __unregister_chrdev(MSR_MAJOR, 0, NR_CPUS, "cpu/msr");
4944+ unregister_chrdev(MSR_MAJOR, "cpu/msr");
4945 out:
4946 return err;
4947 }
4948@@ -290,7 +290,7 @@ static void __exit msr_exit(void)
4949 for_each_online_cpu(cpu)
4950 msr_device_destroy(cpu);
4951 class_destroy(msr_class);
4952- __unregister_chrdev(MSR_MAJOR, 0, NR_CPUS, "cpu/msr");
4953+ unregister_chrdev(MSR_MAJOR, "cpu/msr");
4954 unregister_hotcpu_notifier(&msr_class_cpu_notifier);
4955 }
4956
4957diff --git a/arch/x86/kernel/pci-calgary_64.c b/arch/x86/kernel/pci-calgary_64.c
4958index e6ec8a2..971a3be 100644
4959--- a/arch/x86/kernel/pci-calgary_64.c
4960+++ b/arch/x86/kernel/pci-calgary_64.c
4961@@ -318,15 +318,13 @@ static inline struct iommu_table *find_iommu_table(struct device *dev)
4962
4963 pdev = to_pci_dev(dev);
4964
4965- /* search up the device tree for an iommu */
4966 pbus = pdev->bus;
4967- do {
4968- tbl = pci_iommu(pbus);
4969- if (tbl && tbl->it_busno == pbus->number)
4970- break;
4971- tbl = NULL;
4972+
4973+ /* is the device behind a bridge? Look for the root bus */
4974+ while (pbus->parent)
4975 pbus = pbus->parent;
4976- } while (pbus);
4977+
4978+ tbl = pci_iommu(pbus);
4979
4980 BUG_ON(tbl && (tbl->it_busno != pbus->number));
4981
4982diff --git a/arch/x86/kernel/pci-dma.c b/arch/x86/kernel/pci-dma.c
4983index 6ac3931..a6e804d 100644
4984--- a/arch/x86/kernel/pci-dma.c
4985+++ b/arch/x86/kernel/pci-dma.c
4986@@ -214,7 +214,7 @@ static __init int iommu_setup(char *p)
4987 if (!strncmp(p, "allowdac", 8))
4988 forbid_dac = 0;
4989 if (!strncmp(p, "nodac", 5))
4990- forbid_dac = 1;
4991+ forbid_dac = -1;
4992 if (!strncmp(p, "usedac", 6)) {
4993 forbid_dac = -1;
4994 return 1;
4995diff --git a/arch/x86/kernel/pci-gart_64.c b/arch/x86/kernel/pci-gart_64.c
4996index fcc0b5c..a7f1b64 100644
4997--- a/arch/x86/kernel/pci-gart_64.c
4998+++ b/arch/x86/kernel/pci-gart_64.c
4999@@ -856,7 +856,7 @@ void __init gart_parse_options(char *p)
5000 #endif
5001 if (isdigit(*p) && get_option(&p, &arg))
5002 iommu_size = arg;
5003- if (!strncmp(p, "fullflush", 9))
5004+ if (!strncmp(p, "fullflush", 8))
5005 iommu_fullflush = 1;
5006 if (!strncmp(p, "nofullflush", 11))
5007 iommu_fullflush = 0;
5008diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
5009index f010ab4..5284cd2 100644
5010--- a/arch/x86/kernel/process.c
5011+++ b/arch/x86/kernel/process.c
5012@@ -91,6 +91,18 @@ void flush_thread(void)
5013 {
5014 struct task_struct *tsk = current;
5015
5016+#ifdef CONFIG_X86_64
5017+ if (test_tsk_thread_flag(tsk, TIF_ABI_PENDING)) {
5018+ clear_tsk_thread_flag(tsk, TIF_ABI_PENDING);
5019+ if (test_tsk_thread_flag(tsk, TIF_IA32)) {
5020+ clear_tsk_thread_flag(tsk, TIF_IA32);
5021+ } else {
5022+ set_tsk_thread_flag(tsk, TIF_IA32);
5023+ current_thread_info()->status |= TS_COMPAT;
5024+ }
5025+ }
5026+#endif
5027+
5028 clear_tsk_thread_flag(tsk, TIF_DEBUG);
5029
5030 tsk->thread.debugreg0 = 0;
5031diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
5032index f9ce04f..eb62cbc 100644
5033--- a/arch/x86/kernel/process_64.c
5034+++ b/arch/x86/kernel/process_64.c
5035@@ -540,17 +540,6 @@ sys_clone(unsigned long clone_flags, unsigned long newsp,
5036 return do_fork(clone_flags, newsp, regs, 0, parent_tid, child_tid);
5037 }
5038
5039-void set_personality_ia32(void)
5040-{
5041- /* inherit personality from parent */
5042-
5043- /* Make sure to be in 32bit mode */
5044- set_thread_flag(TIF_IA32);
5045-
5046- /* Prepare the first "return" to user space */
5047- current_thread_info()->status |= TS_COMPAT;
5048-}
5049-
5050 unsigned long get_wchan(struct task_struct *p)
5051 {
5052 unsigned long stack;
5053diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c
5054index c06acdd..7b058a2 100644
5055--- a/arch/x86/kernel/ptrace.c
5056+++ b/arch/x86/kernel/ptrace.c
5057@@ -408,14 +408,14 @@ static int genregs_get(struct task_struct *target,
5058 {
5059 if (kbuf) {
5060 unsigned long *k = kbuf;
5061- while (count >= sizeof(*k)) {
5062+ while (count > 0) {
5063 *k++ = getreg(target, pos);
5064 count -= sizeof(*k);
5065 pos += sizeof(*k);
5066 }
5067 } else {
5068 unsigned long __user *u = ubuf;
5069- while (count >= sizeof(*u)) {
5070+ while (count > 0) {
5071 if (__put_user(getreg(target, pos), u++))
5072 return -EFAULT;
5073 count -= sizeof(*u);
5074@@ -434,14 +434,14 @@ static int genregs_set(struct task_struct *target,
5075 int ret = 0;
5076 if (kbuf) {
5077 const unsigned long *k = kbuf;
5078- while (count >= sizeof(*k) && !ret) {
5079+ while (count > 0 && !ret) {
5080 ret = putreg(target, pos, *k++);
5081 count -= sizeof(*k);
5082 pos += sizeof(*k);
5083 }
5084 } else {
5085 const unsigned long __user *u = ubuf;
5086- while (count >= sizeof(*u) && !ret) {
5087+ while (count > 0 && !ret) {
5088 unsigned long word;
5089 ret = __get_user(word, u++);
5090 if (ret)
5091@@ -1219,14 +1219,14 @@ static int genregs32_get(struct task_struct *target,
5092 {
5093 if (kbuf) {
5094 compat_ulong_t *k = kbuf;
5095- while (count >= sizeof(*k)) {
5096+ while (count > 0) {
5097 getreg32(target, pos, k++);
5098 count -= sizeof(*k);
5099 pos += sizeof(*k);
5100 }
5101 } else {
5102 compat_ulong_t __user *u = ubuf;
5103- while (count >= sizeof(*u)) {
5104+ while (count > 0) {
5105 compat_ulong_t word;
5106 getreg32(target, pos, &word);
5107 if (__put_user(word, u++))
5108@@ -1247,14 +1247,14 @@ static int genregs32_set(struct task_struct *target,
5109 int ret = 0;
5110 if (kbuf) {
5111 const compat_ulong_t *k = kbuf;
5112- while (count >= sizeof(*k) && !ret) {
5113+ while (count > 0 && !ret) {
5114 ret = putreg32(target, pos, *k++);
5115 count -= sizeof(*k);
5116 pos += sizeof(*k);
5117 }
5118 } else {
5119 const compat_ulong_t __user *u = ubuf;
5120- while (count >= sizeof(*u) && !ret) {
5121+ while (count > 0 && !ret) {
5122 compat_ulong_t word;
5123 ret = __get_user(word, u++);
5124 if (ret)
5125diff --git a/arch/x86/kernel/quirks.c b/arch/x86/kernel/quirks.c
5126index 0040164..6c3b2c6 100644
5127--- a/arch/x86/kernel/quirks.c
5128+++ b/arch/x86/kernel/quirks.c
5129@@ -491,19 +491,6 @@ void force_hpet_resume(void)
5130 break;
5131 }
5132 }
5133-
5134-/*
5135- * HPET MSI on some boards (ATI SB700/SB800) has side effect on
5136- * floppy DMA. Disable HPET MSI on such platforms.
5137- */
5138-static void force_disable_hpet_msi(struct pci_dev *unused)
5139-{
5140- hpet_msi_disable = 1;
5141-}
5142-
5143-DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_SBX00_SMBUS,
5144- force_disable_hpet_msi);
5145-
5146 #endif
5147
5148 #if defined(CONFIG_PCI) && defined(CONFIG_NUMA)
5149diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
5150index bff34d6..f930787 100644
5151--- a/arch/x86/kernel/reboot.c
5152+++ b/arch/x86/kernel/reboot.c
5153@@ -203,15 +203,6 @@ static struct dmi_system_id __initdata reboot_dmi_table[] = {
5154 DMI_MATCH(DMI_BOARD_NAME, "0T656F"),
5155 },
5156 },
5157- { /* Handle problems with rebooting on Dell OptiPlex 760 with 0G919G*/
5158- .callback = set_bios_reboot,
5159- .ident = "Dell OptiPlex 760",
5160- .matches = {
5161- DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
5162- DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex 760"),
5163- DMI_MATCH(DMI_BOARD_NAME, "0G919G"),
5164- },
5165- },
5166 { /* Handle problems with rebooting on Dell 2400's */
5167 .callback = set_bios_reboot,
5168 .ident = "Dell PowerEdge 2400",
5169@@ -268,14 +259,6 @@ static struct dmi_system_id __initdata reboot_dmi_table[] = {
5170 DMI_MATCH(DMI_PRODUCT_NAME, "SBC-FITPC2"),
5171 },
5172 },
5173- { /* Handle problems with rebooting on ASUS P4S800 */
5174- .callback = set_bios_reboot,
5175- .ident = "ASUS P4S800",
5176- .matches = {
5177- DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."),
5178- DMI_MATCH(DMI_BOARD_NAME, "P4S800"),
5179- },
5180- },
5181 { }
5182 };
5183
5184diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
5185index 8425f7e..2a34f9c 100644
5186--- a/arch/x86/kernel/setup.c
5187+++ b/arch/x86/kernel/setup.c
5188@@ -109,7 +109,6 @@
5189 #ifdef CONFIG_X86_64
5190 #include <asm/numa_64.h>
5191 #endif
5192-#include <asm/mce.h>
5193
5194 /*
5195 * end_pfn only includes RAM, while max_pfn_mapped includes all e820 entries.
5196@@ -667,27 +666,19 @@ static struct dmi_system_id __initdata bad_bios_dmi_table[] = {
5197 DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix/MSC"),
5198 },
5199 },
5200+ {
5201 /*
5202- * AMI BIOS with low memory corruption was found on Intel DG45ID and
5203- * DG45FC boards.
5204- * It has a different DMI_BIOS_VENDOR = "Intel Corp.", for now we will
5205+ * AMI BIOS with low memory corruption was found on Intel DG45ID board.
5206+ * It hase different DMI_BIOS_VENDOR = "Intel Corp.", for now we will
5207 * match only DMI_BOARD_NAME and see if there is more bad products
5208 * with this vendor.
5209 */
5210- {
5211 .callback = dmi_low_memory_corruption,
5212 .ident = "AMI BIOS",
5213 .matches = {
5214 DMI_MATCH(DMI_BOARD_NAME, "DG45ID"),
5215 },
5216 },
5217- {
5218- .callback = dmi_low_memory_corruption,
5219- .ident = "AMI BIOS",
5220- .matches = {
5221- DMI_MATCH(DMI_BOARD_NAME, "DG45FC"),
5222- },
5223- },
5224 #endif
5225 {}
5226 };
5227@@ -1040,8 +1031,6 @@ void __init setup_arch(char **cmdline_p)
5228 #endif
5229 #endif
5230 x86_init.oem.banner();
5231-
5232- mcheck_intel_therm_init();
5233 }
5234
5235 #ifdef CONFIG_X86_32
5236diff --git a/arch/x86/kernel/smp.c b/arch/x86/kernel/smp.c 573diff --git a/arch/x86/kernel/smp.c b/arch/x86/kernel/smp.c
5237index ec1de97..a93528b 100644 574index ec1de97..a93528b 100644
5238--- a/arch/x86/kernel/smp.c 575--- a/arch/x86/kernel/smp.c
@@ -5301,96 +638,10 @@ index ec1de97..a93528b 100644
5301 struct smp_ops smp_ops = { 638 struct smp_ops smp_ops = {
5302 .smp_prepare_boot_cpu = native_smp_prepare_boot_cpu, 639 .smp_prepare_boot_cpu = native_smp_prepare_boot_cpu,
5303 .smp_prepare_cpus = native_smp_prepare_cpus, 640 .smp_prepare_cpus = native_smp_prepare_cpus,
5304diff --git a/arch/x86/kernel/sys_i386_32.c b/arch/x86/kernel/sys_i386_32.c
5305index dee1ff7..1884a8d 100644
5306--- a/arch/x86/kernel/sys_i386_32.c
5307+++ b/arch/x86/kernel/sys_i386_32.c
5308@@ -24,6 +24,31 @@
5309
5310 #include <asm/syscalls.h>
5311
5312+asmlinkage long sys_mmap2(unsigned long addr, unsigned long len,
5313+ unsigned long prot, unsigned long flags,
5314+ unsigned long fd, unsigned long pgoff)
5315+{
5316+ int error = -EBADF;
5317+ struct file *file = NULL;
5318+ struct mm_struct *mm = current->mm;
5319+
5320+ flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
5321+ if (!(flags & MAP_ANONYMOUS)) {
5322+ file = fget(fd);
5323+ if (!file)
5324+ goto out;
5325+ }
5326+
5327+ down_write(&mm->mmap_sem);
5328+ error = do_mmap_pgoff(file, addr, len, prot, flags, pgoff);
5329+ up_write(&mm->mmap_sem);
5330+
5331+ if (file)
5332+ fput(file);
5333+out:
5334+ return error;
5335+}
5336+
5337 /*
5338 * Perform the select(nd, in, out, ex, tv) and mmap() system
5339 * calls. Linux/i386 didn't use to be able to handle more than
5340@@ -52,7 +77,7 @@ asmlinkage int old_mmap(struct mmap_arg_struct __user *arg)
5341 if (a.offset & ~PAGE_MASK)
5342 goto out;
5343
5344- err = sys_mmap_pgoff(a.addr, a.len, a.prot, a.flags,
5345+ err = sys_mmap2(a.addr, a.len, a.prot, a.flags,
5346 a.fd, a.offset >> PAGE_SHIFT);
5347 out:
5348 return err;
5349diff --git a/arch/x86/kernel/sys_x86_64.c b/arch/x86/kernel/sys_x86_64.c
5350index 8aa2057..45e00eb 100644
5351--- a/arch/x86/kernel/sys_x86_64.c
5352+++ b/arch/x86/kernel/sys_x86_64.c
5353@@ -23,11 +23,26 @@ SYSCALL_DEFINE6(mmap, unsigned long, addr, unsigned long, len,
5354 unsigned long, fd, unsigned long, off)
5355 {
5356 long error;
5357+ struct file *file;
5358+
5359 error = -EINVAL;
5360 if (off & ~PAGE_MASK)
5361 goto out;
5362
5363- error = sys_mmap_pgoff(addr, len, prot, flags, fd, off >> PAGE_SHIFT);
5364+ error = -EBADF;
5365+ file = NULL;
5366+ flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
5367+ if (!(flags & MAP_ANONYMOUS)) {
5368+ file = fget(fd);
5369+ if (!file)
5370+ goto out;
5371+ }
5372+ down_write(&current->mm->mmap_sem);
5373+ error = do_mmap_pgoff(file, addr, len, prot, flags, off >> PAGE_SHIFT);
5374+ up_write(&current->mm->mmap_sem);
5375+
5376+ if (file)
5377+ fput(file);
5378 out:
5379 return error;
5380 }
5381diff --git a/arch/x86/kernel/syscall_table_32.S b/arch/x86/kernel/syscall_table_32.S 641diff --git a/arch/x86/kernel/syscall_table_32.S b/arch/x86/kernel/syscall_table_32.S
5382index 76d70a4..17fcb3a 100644 642index 0157cd2..17fcb3a 100644
5383--- a/arch/x86/kernel/syscall_table_32.S 643--- a/arch/x86/kernel/syscall_table_32.S
5384+++ b/arch/x86/kernel/syscall_table_32.S 644+++ b/arch/x86/kernel/syscall_table_32.S
5385@@ -191,7 +191,7 @@ ENTRY(sys_call_table)
5386 .long sys_ni_syscall /* reserved for streams2 */
5387 .long ptregs_vfork /* 190 */
5388 .long sys_getrlimit
5389- .long sys_mmap_pgoff
5390+ .long sys_mmap2
5391 .long sys_truncate64
5392 .long sys_ftruncate64
5393 .long sys_stat64 /* 195 */
5394@@ -336,3 +336,17 @@ ENTRY(sys_call_table) 645@@ -336,3 +336,17 @@ ENTRY(sys_call_table)
5395 .long sys_pwritev 646 .long sys_pwritev
5396 .long sys_rt_tgsigqueueinfo /* 335 */ 647 .long sys_rt_tgsigqueueinfo /* 335 */
@@ -5409,15759 +660,8 @@ index 76d70a4..17fcb3a 100644
5409+ .long sys_wait_for_ts_release 660+ .long sys_wait_for_ts_release
5410+ .long sys_release_ts 661+ .long sys_release_ts
5411+ .long sys_null_call 662+ .long sys_null_call
5412diff --git a/arch/x86/kernel/tlb_uv.c b/arch/x86/kernel/tlb_uv.c
5413index 364d015..1740c85 100644
5414--- a/arch/x86/kernel/tlb_uv.c
5415+++ b/arch/x86/kernel/tlb_uv.c
5416@@ -817,8 +817,10 @@ static int __init uv_init_blade(int blade)
5417 */
5418 apicid = blade_to_first_apicid(blade);
5419 pa = uv_read_global_mmr64(pnode, UVH_BAU_DATA_CONFIG);
5420- uv_write_global_mmr64(pnode, UVH_BAU_DATA_CONFIG,
5421+ if ((pa & 0xff) != UV_BAU_MESSAGE) {
5422+ uv_write_global_mmr64(pnode, UVH_BAU_DATA_CONFIG,
5423 ((apicid << 32) | UV_BAU_MESSAGE));
5424+ }
5425 return 0;
5426 }
5427
5428diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
5429index 597683a..cd982f4 100644
5430--- a/arch/x86/kernel/tsc.c
5431+++ b/arch/x86/kernel/tsc.c
5432@@ -763,7 +763,6 @@ void mark_tsc_unstable(char *reason)
5433 {
5434 if (!tsc_unstable) {
5435 tsc_unstable = 1;
5436- sched_clock_stable = 0;
5437 printk(KERN_INFO "Marking TSC unstable due to %s\n", reason);
5438 /* Change only the rating, when not registered */
5439 if (clocksource_tsc.mult)
5440diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
5441index e02dbb6..1be5cd6 100644
5442--- a/arch/x86/kvm/emulate.c
5443+++ b/arch/x86/kvm/emulate.c
5444@@ -613,9 +613,6 @@ static int do_insn_fetch(struct x86_emulate_ctxt *ctxt,
5445 {
5446 int rc = 0;
5447
5448- /* x86 instructions are limited to 15 bytes. */
5449- if (eip + size - ctxt->decode.eip_orig > 15)
5450- return X86EMUL_UNHANDLEABLE;
5451 eip += ctxt->cs_base;
5452 while (size--) {
5453 rc = do_fetch_insn_byte(ctxt, ops, eip++, dest++);
5454@@ -874,7 +871,7 @@ x86_decode_insn(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops)
5455 /* Shadow copy of register state. Committed on successful emulation. */
5456
5457 memset(c, 0, sizeof(struct decode_cache));
5458- c->eip = c->eip_orig = kvm_rip_read(ctxt->vcpu);
5459+ c->eip = kvm_rip_read(ctxt->vcpu);
5460 ctxt->cs_base = seg_base(ctxt, VCPU_SREG_CS);
5461 memcpy(c->regs, ctxt->vcpu->arch.regs, sizeof c->regs);
5462
5463diff --git a/arch/x86/kvm/i8254.c b/arch/x86/kvm/i8254.c
5464index 88ad162..144e7f6 100644
5465--- a/arch/x86/kvm/i8254.c
5466+++ b/arch/x86/kvm/i8254.c
5467@@ -465,9 +465,6 @@ static int pit_ioport_read(struct kvm_io_device *this,
5468 return -EOPNOTSUPP;
5469
5470 addr &= KVM_PIT_CHANNEL_MASK;
5471- if (addr == 3)
5472- return 0;
5473-
5474 s = &pit_state->channels[addr];
5475
5476 mutex_lock(&pit_state->lock);
5477diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
5478index 8dfeaaa..23c2176 100644
5479--- a/arch/x86/kvm/lapic.c
5480+++ b/arch/x86/kvm/lapic.c
5481@@ -374,12 +374,6 @@ static int __apic_accept_irq(struct kvm_lapic *apic, int delivery_mode,
5482 if (unlikely(!apic_enabled(apic)))
5483 break;
5484
5485- if (trig_mode) {
5486- apic_debug("level trig mode for vector %d", vector);
5487- apic_set_vector(vector, apic->regs + APIC_TMR);
5488- } else
5489- apic_clear_vector(vector, apic->regs + APIC_TMR);
5490-
5491 result = !apic_test_and_set_irr(vector, apic);
5492 trace_kvm_apic_accept_irq(vcpu->vcpu_id, delivery_mode,
5493 trig_mode, vector, !result);
5494@@ -390,6 +384,11 @@ static int __apic_accept_irq(struct kvm_lapic *apic, int delivery_mode,
5495 break;
5496 }
5497
5498+ if (trig_mode) {
5499+ apic_debug("level trig mode for vector %d", vector);
5500+ apic_set_vector(vector, apic->regs + APIC_TMR);
5501+ } else
5502+ apic_clear_vector(vector, apic->regs + APIC_TMR);
5503 kvm_vcpu_kick(vcpu);
5504 break;
5505
5506@@ -1157,7 +1156,6 @@ void kvm_apic_post_state_restore(struct kvm_vcpu *vcpu)
5507 hrtimer_cancel(&apic->lapic_timer.timer);
5508 update_divide_count(apic);
5509 start_apic_timer(apic);
5510- apic->irr_pending = true;
5511 }
5512
5513 void __kvm_migrate_apic_timer(struct kvm_vcpu *vcpu)
5514diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
5515index 3a01519..818b92a 100644
5516--- a/arch/x86/kvm/mmu.c
5517+++ b/arch/x86/kvm/mmu.c
5518@@ -477,7 +477,7 @@ static int host_mapping_level(struct kvm *kvm, gfn_t gfn)
5519
5520 addr = gfn_to_hva(kvm, gfn);
5521 if (kvm_is_error_hva(addr))
5522- return PT_PAGE_TABLE_LEVEL;
5523+ return page_size;
5524
5525 down_read(&current->mm->mmap_sem);
5526 vma = find_vma(current->mm, addr);
5527@@ -515,9 +515,11 @@ static int mapping_level(struct kvm_vcpu *vcpu, gfn_t large_gfn)
5528 if (host_level == PT_PAGE_TABLE_LEVEL)
5529 return host_level;
5530
5531- for (level = PT_DIRECTORY_LEVEL; level <= host_level; ++level)
5532+ for (level = PT_DIRECTORY_LEVEL; level <= host_level; ++level) {
5533+
5534 if (has_wrprotected_page(vcpu->kvm, large_gfn, level))
5535 break;
5536+ }
5537
5538 return level - 1;
5539 }
5540diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
5541index 5fa3325..72558f8 100644
5542--- a/arch/x86/kvm/paging_tmpl.h
5543+++ b/arch/x86/kvm/paging_tmpl.h
5544@@ -150,9 +150,7 @@ walk:
5545 walker->table_gfn[walker->level - 1] = table_gfn;
5546 walker->pte_gpa[walker->level - 1] = pte_gpa;
5547
5548- if (kvm_read_guest(vcpu->kvm, pte_gpa, &pte, sizeof(pte)))
5549- goto not_present;
5550-
5551+ kvm_read_guest(vcpu->kvm, pte_gpa, &pte, sizeof(pte));
5552 trace_kvm_mmu_paging_element(pte, walker->level);
5553
5554 if (!is_present_gpte(pte))
5555@@ -457,6 +455,8 @@ out_unlock:
5556 static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva)
5557 {
5558 struct kvm_shadow_walk_iterator iterator;
5559+ pt_element_t gpte;
5560+ gpa_t pte_gpa = -1;
5561 int level;
5562 u64 *sptep;
5563 int need_flush = 0;
5564@@ -471,6 +471,10 @@ static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva)
5565 if (level == PT_PAGE_TABLE_LEVEL ||
5566 ((level == PT_DIRECTORY_LEVEL && is_large_pte(*sptep))) ||
5567 ((level == PT_PDPE_LEVEL && is_large_pte(*sptep)))) {
5568+ struct kvm_mmu_page *sp = page_header(__pa(sptep));
5569+
5570+ pte_gpa = (sp->gfn << PAGE_SHIFT);
5571+ pte_gpa += (sptep - sp->spt) * sizeof(pt_element_t);
5572
5573 if (is_shadow_present_pte(*sptep)) {
5574 rmap_remove(vcpu->kvm, sptep);
5575@@ -489,6 +493,18 @@ static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva)
5576 if (need_flush)
5577 kvm_flush_remote_tlbs(vcpu->kvm);
5578 spin_unlock(&vcpu->kvm->mmu_lock);
5579+
5580+ if (pte_gpa == -1)
5581+ return;
5582+ if (kvm_read_guest_atomic(vcpu->kvm, pte_gpa, &gpte,
5583+ sizeof(pt_element_t)))
5584+ return;
5585+ if (is_present_gpte(gpte) && (gpte & PT_ACCESSED_MASK)) {
5586+ if (mmu_topup_memory_caches(vcpu))
5587+ return;
5588+ kvm_mmu_pte_write(vcpu, pte_gpa, (const u8 *)&gpte,
5589+ sizeof(pt_element_t), 0);
5590+ }
5591 }
5592
5593 static gpa_t FNAME(gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t vaddr)
5594diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
5595index e78d990..ae07d26 100644
5596--- a/arch/x86/kvm/x86.c
5597+++ b/arch/x86/kvm/x86.c
5598@@ -484,19 +484,16 @@ static inline u32 bit(int bitno)
5599 * and KVM_SET_MSRS, and KVM_GET_MSR_INDEX_LIST.
5600 *
5601 * This list is modified at module load time to reflect the
5602- * capabilities of the host cpu. This capabilities test skips MSRs that are
5603- * kvm-specific. Those are put in the beginning of the list.
5604+ * capabilities of the host cpu.
5605 */
5606-
5607-#define KVM_SAVE_MSRS_BEGIN 2
5608 static u32 msrs_to_save[] = {
5609- MSR_KVM_SYSTEM_TIME, MSR_KVM_WALL_CLOCK,
5610 MSR_IA32_SYSENTER_CS, MSR_IA32_SYSENTER_ESP, MSR_IA32_SYSENTER_EIP,
5611 MSR_K6_STAR,
5612 #ifdef CONFIG_X86_64
5613 MSR_CSTAR, MSR_KERNEL_GS_BASE, MSR_SYSCALL_MASK, MSR_LSTAR,
5614 #endif
5615- MSR_IA32_TSC, MSR_IA32_PERF_STATUS, MSR_IA32_CR_PAT, MSR_VM_HSAVE_PA
5616+ MSR_IA32_TSC, MSR_KVM_SYSTEM_TIME, MSR_KVM_WALL_CLOCK,
5617+ MSR_IA32_PERF_STATUS, MSR_IA32_CR_PAT, MSR_VM_HSAVE_PA
5618 };
5619
5620 static unsigned num_msrs_to_save;
5621@@ -583,7 +580,7 @@ static void kvm_write_wall_clock(struct kvm *kvm, gpa_t wall_clock)
5622 {
5623 static int version;
5624 struct pvclock_wall_clock wc;
5625- struct timespec boot;
5626+ struct timespec now, sys, boot;
5627
5628 if (!wall_clock)
5629 return;
5630@@ -598,7 +595,9 @@ static void kvm_write_wall_clock(struct kvm *kvm, gpa_t wall_clock)
5631 * wall clock specified here. guest system time equals host
5632 * system time for us, thus we must fill in host boot time here.
5633 */
5634- getboottime(&boot);
5635+ now = current_kernel_time();
5636+ ktime_get_ts(&sys);
5637+ boot = ns_to_timespec(timespec_to_ns(&now) - timespec_to_ns(&sys));
5638
5639 wc.sec = boot.tv_sec;
5640 wc.nsec = boot.tv_nsec;
5641@@ -673,14 +672,12 @@ static void kvm_write_guest_time(struct kvm_vcpu *v)
5642 local_irq_save(flags);
5643 kvm_get_msr(v, MSR_IA32_TSC, &vcpu->hv_clock.tsc_timestamp);
5644 ktime_get_ts(&ts);
5645- monotonic_to_bootbased(&ts);
5646 local_irq_restore(flags);
5647
5648 /* With all the info we got, fill in the values */
5649
5650 vcpu->hv_clock.system_time = ts.tv_nsec +
5651- (NSEC_PER_SEC * (u64)ts.tv_sec) + v->kvm->arch.kvmclock_offset;
5652-
5653+ (NSEC_PER_SEC * (u64)ts.tv_sec);
5654 /*
5655 * The interface expects us to write an even number signaling that the
5656 * update is finished. Since the guest won't see the intermediate
5657@@ -1227,7 +1224,6 @@ int kvm_dev_ioctl_check_extension(long ext)
5658 case KVM_CAP_PIT2:
5659 case KVM_CAP_PIT_STATE2:
5660 case KVM_CAP_SET_IDENTITY_MAP_ADDR:
5661- case KVM_CAP_ADJUST_CLOCK:
5662 r = 1;
5663 break;
5664 case KVM_CAP_COALESCED_MMIO:
5665@@ -2425,44 +2421,6 @@ long kvm_arch_vm_ioctl(struct file *filp,
5666 r = 0;
5667 break;
5668 }
5669- case KVM_SET_CLOCK: {
5670- struct timespec now;
5671- struct kvm_clock_data user_ns;
5672- u64 now_ns;
5673- s64 delta;
5674-
5675- r = -EFAULT;
5676- if (copy_from_user(&user_ns, argp, sizeof(user_ns)))
5677- goto out;
5678-
5679- r = -EINVAL;
5680- if (user_ns.flags)
5681- goto out;
5682-
5683- r = 0;
5684- ktime_get_ts(&now);
5685- now_ns = timespec_to_ns(&now);
5686- delta = user_ns.clock - now_ns;
5687- kvm->arch.kvmclock_offset = delta;
5688- break;
5689- }
5690- case KVM_GET_CLOCK: {
5691- struct timespec now;
5692- struct kvm_clock_data user_ns;
5693- u64 now_ns;
5694-
5695- ktime_get_ts(&now);
5696- now_ns = timespec_to_ns(&now);
5697- user_ns.clock = kvm->arch.kvmclock_offset + now_ns;
5698- user_ns.flags = 0;
5699-
5700- r = -EFAULT;
5701- if (copy_to_user(argp, &user_ns, sizeof(user_ns)))
5702- goto out;
5703- r = 0;
5704- break;
5705- }
5706-
5707 default:
5708 ;
5709 }
5710@@ -2475,8 +2433,7 @@ static void kvm_init_msr_list(void)
5711 u32 dummy[2];
5712 unsigned i, j;
5713
5714- /* skip the first msrs in the list. KVM-specific */
5715- for (i = j = KVM_SAVE_MSRS_BEGIN; i < ARRAY_SIZE(msrs_to_save); i++) {
5716+ for (i = j = 0; i < ARRAY_SIZE(msrs_to_save); i++) {
5717 if (rdmsr_safe(msrs_to_save[i], &dummy[0], &dummy[1]) < 0)
5718 continue;
5719 if (j < i)
5720@@ -4805,13 +4762,12 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
5721 GFP_KERNEL);
5722 if (!vcpu->arch.mce_banks) {
5723 r = -ENOMEM;
5724- goto fail_free_lapic;
5725+ goto fail_mmu_destroy;
5726 }
5727 vcpu->arch.mcg_cap = KVM_MAX_MCE_BANKS;
5728
5729 return 0;
5730-fail_free_lapic:
5731- kvm_free_lapic(vcpu);
5732+
5733 fail_mmu_destroy:
5734 kvm_mmu_destroy(vcpu);
5735 fail_free_pio_data:
5736@@ -4822,7 +4778,6 @@ fail:
5737
5738 void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
5739 {
5740- kfree(vcpu->arch.mce_banks);
5741 kvm_free_lapic(vcpu);
5742 down_read(&vcpu->kvm->slots_lock);
5743 kvm_mmu_destroy(vcpu);
5744diff --git a/arch/x86/lib/Makefile b/arch/x86/lib/Makefile
5745index c2b6f39..85f5db9 100644
5746--- a/arch/x86/lib/Makefile
5747+++ b/arch/x86/lib/Makefile
5748@@ -2,14 +2,14 @@
5749 # Makefile for x86 specific library files.
5750 #
5751
5752-obj-$(CONFIG_SMP) += msr-smp.o
5753+obj-$(CONFIG_SMP) := msr.o
5754
5755 lib-y := delay.o
5756 lib-y += thunk_$(BITS).o
5757 lib-y += usercopy_$(BITS).o getuser.o putuser.o
5758 lib-y += memcpy_$(BITS).o
5759
5760-obj-y += msr.o msr-reg.o msr-reg-export.o
5761+obj-y += msr-reg.o msr-reg-export.o
5762
5763 ifeq ($(CONFIG_X86_32),y)
5764 obj-y += atomic64_32.o
5765diff --git a/arch/x86/lib/msr.c b/arch/x86/lib/msr.c
5766index 8f8eebd..33a1e3c 100644
5767--- a/arch/x86/lib/msr.c
5768+++ b/arch/x86/lib/msr.c
5769@@ -1,23 +1,226 @@
5770 #include <linux/module.h>
5771 #include <linux/preempt.h>
5772+#include <linux/smp.h>
5773 #include <asm/msr.h>
5774
5775-struct msr *msrs_alloc(void)
5776+struct msr_info {
5777+ u32 msr_no;
5778+ struct msr reg;
5779+ struct msr *msrs;
5780+ int off;
5781+ int err;
5782+};
5783+
5784+static void __rdmsr_on_cpu(void *info)
5785+{
5786+ struct msr_info *rv = info;
5787+ struct msr *reg;
5788+ int this_cpu = raw_smp_processor_id();
5789+
5790+ if (rv->msrs)
5791+ reg = &rv->msrs[this_cpu - rv->off];
5792+ else
5793+ reg = &rv->reg;
5794+
5795+ rdmsr(rv->msr_no, reg->l, reg->h);
5796+}
5797+
5798+static void __wrmsr_on_cpu(void *info)
5799+{
5800+ struct msr_info *rv = info;
5801+ struct msr *reg;
5802+ int this_cpu = raw_smp_processor_id();
5803+
5804+ if (rv->msrs)
5805+ reg = &rv->msrs[this_cpu - rv->off];
5806+ else
5807+ reg = &rv->reg;
5808+
5809+ wrmsr(rv->msr_no, reg->l, reg->h);
5810+}
5811+
5812+int rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h)
5813+{
5814+ int err;
5815+ struct msr_info rv;
5816+
5817+ memset(&rv, 0, sizeof(rv));
5818+
5819+ rv.msr_no = msr_no;
5820+ err = smp_call_function_single(cpu, __rdmsr_on_cpu, &rv, 1);
5821+ *l = rv.reg.l;
5822+ *h = rv.reg.h;
5823+
5824+ return err;
5825+}
5826+EXPORT_SYMBOL(rdmsr_on_cpu);
5827+
5828+int wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h)
5829+{
5830+ int err;
5831+ struct msr_info rv;
5832+
5833+ memset(&rv, 0, sizeof(rv));
5834+
5835+ rv.msr_no = msr_no;
5836+ rv.reg.l = l;
5837+ rv.reg.h = h;
5838+ err = smp_call_function_single(cpu, __wrmsr_on_cpu, &rv, 1);
5839+
5840+ return err;
5841+}
5842+EXPORT_SYMBOL(wrmsr_on_cpu);
5843+
5844+/* rdmsr on a bunch of CPUs
5845+ *
5846+ * @mask: which CPUs
5847+ * @msr_no: which MSR
5848+ * @msrs: array of MSR values
5849+ *
5850+ */
5851+void rdmsr_on_cpus(const cpumask_t *mask, u32 msr_no, struct msr *msrs)
5852+{
5853+ struct msr_info rv;
5854+ int this_cpu;
5855+
5856+ memset(&rv, 0, sizeof(rv));
5857+
5858+ rv.off = cpumask_first(mask);
5859+ rv.msrs = msrs;
5860+ rv.msr_no = msr_no;
5861+
5862+ this_cpu = get_cpu();
5863+
5864+ if (cpumask_test_cpu(this_cpu, mask))
5865+ __rdmsr_on_cpu(&rv);
5866+
5867+ smp_call_function_many(mask, __rdmsr_on_cpu, &rv, 1);
5868+ put_cpu();
5869+}
5870+EXPORT_SYMBOL(rdmsr_on_cpus);
5871+
5872+/*
5873+ * wrmsr on a bunch of CPUs
5874+ *
5875+ * @mask: which CPUs
5876+ * @msr_no: which MSR
5877+ * @msrs: array of MSR values
5878+ *
5879+ */
5880+void wrmsr_on_cpus(const cpumask_t *mask, u32 msr_no, struct msr *msrs)
5881+{
5882+ struct msr_info rv;
5883+ int this_cpu;
5884+
5885+ memset(&rv, 0, sizeof(rv));
5886+
5887+ rv.off = cpumask_first(mask);
5888+ rv.msrs = msrs;
5889+ rv.msr_no = msr_no;
5890+
5891+ this_cpu = get_cpu();
5892+
5893+ if (cpumask_test_cpu(this_cpu, mask))
5894+ __wrmsr_on_cpu(&rv);
5895+
5896+ smp_call_function_many(mask, __wrmsr_on_cpu, &rv, 1);
5897+ put_cpu();
5898+}
5899+EXPORT_SYMBOL(wrmsr_on_cpus);
5900+
5901+/* These "safe" variants are slower and should be used when the target MSR
5902+ may not actually exist. */
5903+static void __rdmsr_safe_on_cpu(void *info)
5904+{
5905+ struct msr_info *rv = info;
5906+
5907+ rv->err = rdmsr_safe(rv->msr_no, &rv->reg.l, &rv->reg.h);
5908+}
5909+
5910+static void __wrmsr_safe_on_cpu(void *info)
5911+{
5912+ struct msr_info *rv = info;
5913+
5914+ rv->err = wrmsr_safe(rv->msr_no, rv->reg.l, rv->reg.h);
5915+}
5916+
5917+int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h)
5918 {
5919- struct msr *msrs = NULL;
5920+ int err;
5921+ struct msr_info rv;
5922
5923- msrs = alloc_percpu(struct msr);
5924- if (!msrs) {
5925- pr_warning("%s: error allocating msrs\n", __func__);
5926- return NULL;
5927- }
5928+ memset(&rv, 0, sizeof(rv));
5929
5930- return msrs;
5931+ rv.msr_no = msr_no;
5932+ err = smp_call_function_single(cpu, __rdmsr_safe_on_cpu, &rv, 1);
5933+ *l = rv.reg.l;
5934+ *h = rv.reg.h;
5935+
5936+ return err ? err : rv.err;
5937 }
5938-EXPORT_SYMBOL(msrs_alloc);
5939+EXPORT_SYMBOL(rdmsr_safe_on_cpu);
5940
5941-void msrs_free(struct msr *msrs)
5942+int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h)
5943 {
5944- free_percpu(msrs);
5945+ int err;
5946+ struct msr_info rv;
5947+
5948+ memset(&rv, 0, sizeof(rv));
5949+
5950+ rv.msr_no = msr_no;
5951+ rv.reg.l = l;
5952+ rv.reg.h = h;
5953+ err = smp_call_function_single(cpu, __wrmsr_safe_on_cpu, &rv, 1);
5954+
5955+ return err ? err : rv.err;
5956+}
5957+EXPORT_SYMBOL(wrmsr_safe_on_cpu);
5958+
5959+/*
5960+ * These variants are significantly slower, but allows control over
5961+ * the entire 32-bit GPR set.
5962+ */
5963+struct msr_regs_info {
5964+ u32 *regs;
5965+ int err;
5966+};
5967+
5968+static void __rdmsr_safe_regs_on_cpu(void *info)
5969+{
5970+ struct msr_regs_info *rv = info;
5971+
5972+ rv->err = rdmsr_safe_regs(rv->regs);
5973+}
5974+
5975+static void __wrmsr_safe_regs_on_cpu(void *info)
5976+{
5977+ struct msr_regs_info *rv = info;
5978+
5979+ rv->err = wrmsr_safe_regs(rv->regs);
5980+}
5981+
5982+int rdmsr_safe_regs_on_cpu(unsigned int cpu, u32 *regs)
5983+{
5984+ int err;
5985+ struct msr_regs_info rv;
5986+
5987+ rv.regs = regs;
5988+ rv.err = -EIO;
5989+ err = smp_call_function_single(cpu, __rdmsr_safe_regs_on_cpu, &rv, 1);
5990+
5991+ return err ? err : rv.err;
5992+}
5993+EXPORT_SYMBOL(rdmsr_safe_regs_on_cpu);
5994+
5995+int wrmsr_safe_regs_on_cpu(unsigned int cpu, u32 *regs)
5996+{
5997+ int err;
5998+ struct msr_regs_info rv;
5999+
6000+ rv.regs = regs;
6001+ rv.err = -EIO;
6002+ err = smp_call_function_single(cpu, __wrmsr_safe_regs_on_cpu, &rv, 1);
6003+
6004+ return err ? err : rv.err;
6005 }
6006-EXPORT_SYMBOL(msrs_free);
6007+EXPORT_SYMBOL(wrmsr_safe_regs_on_cpu);
6008diff --git a/arch/x86/mm/srat_64.c b/arch/x86/mm/srat_64.c
6009index 3871c60..dbb5381 100644
6010--- a/arch/x86/mm/srat_64.c
6011+++ b/arch/x86/mm/srat_64.c
6012@@ -229,11 +229,9 @@ update_nodes_add(int node, unsigned long start, unsigned long end)
6013 printk(KERN_ERR "SRAT: Hotplug zone not continuous. Partly ignored\n");
6014 }
6015
6016- if (changed) {
6017- node_set(node, cpu_nodes_parsed);
6018+ if (changed)
6019 printk(KERN_INFO "SRAT: hot plug zone found %Lx - %Lx\n",
6020 nd->start, nd->end);
6021- }
6022 }
6023
6024 /* Callback for parsing of the Proximity Domain <-> Memory Area mappings */
6025diff --git a/arch/x86/oprofile/nmi_int.c b/arch/x86/oprofile/nmi_int.c
6026index 3347f69..cb88b1a 100644
6027--- a/arch/x86/oprofile/nmi_int.c
6028+++ b/arch/x86/oprofile/nmi_int.c
6029@@ -222,7 +222,7 @@ static void nmi_cpu_switch(void *dummy)
6030
6031 /* move to next set */
6032 si += model->num_counters;
6033- if ((si >= model->num_virt_counters) || (counter_config[si].count == 0))
6034+ if ((si > model->num_virt_counters) || (counter_config[si].count == 0))
6035 per_cpu(switch_index, cpu) = 0;
6036 else
6037 per_cpu(switch_index, cpu) = si;
6038@@ -598,7 +598,6 @@ static int __init ppro_init(char **cpu_type)
6039 case 15: case 23:
6040 *cpu_type = "i386/core_2";
6041 break;
6042- case 0x2e:
6043 case 26:
6044 spec = &op_arch_perfmon_spec;
6045 *cpu_type = "i386/core_i7";
6046diff --git a/arch/x86/pci/i386.c b/arch/x86/pci/i386.c
6047index a672f12..b22d13b 100644
6048--- a/arch/x86/pci/i386.c
6049+++ b/arch/x86/pci/i386.c
6050@@ -282,15 +282,6 @@ int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
6051 return -EINVAL;
6052
6053 prot = pgprot_val(vma->vm_page_prot);
6054-
6055- /*
6056- * Return error if pat is not enabled and write_combine is requested.
6057- * Caller can followup with UC MINUS request and add a WC mtrr if there
6058- * is a free mtrr slot.
6059- */
6060- if (!pat_enabled && write_combine)
6061- return -EINVAL;
6062-
6063 if (pat_enabled && write_combine)
6064 prot |= _PAGE_CACHE_WC;
6065 else if (pat_enabled || boot_cpu_data.x86 > 3)
6066diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
6067index 79f9738..dfbf70e 100644
6068--- a/arch/x86/xen/enlighten.c
6069+++ b/arch/x86/xen/enlighten.c
6070@@ -138,23 +138,24 @@ static void xen_vcpu_setup(int cpu)
6071 */
6072 void xen_vcpu_restore(void)
6073 {
6074- int cpu;
6075+ if (have_vcpu_info_placement) {
6076+ int cpu;
6077
6078- for_each_online_cpu(cpu) {
6079- bool other_cpu = (cpu != smp_processor_id());
6080+ for_each_online_cpu(cpu) {
6081+ bool other_cpu = (cpu != smp_processor_id());
6082
6083- if (other_cpu &&
6084- HYPERVISOR_vcpu_op(VCPUOP_down, cpu, NULL))
6085- BUG();
6086-
6087- xen_setup_runstate_info(cpu);
6088+ if (other_cpu &&
6089+ HYPERVISOR_vcpu_op(VCPUOP_down, cpu, NULL))
6090+ BUG();
6091
6092- if (have_vcpu_info_placement)
6093 xen_vcpu_setup(cpu);
6094
6095- if (other_cpu &&
6096- HYPERVISOR_vcpu_op(VCPUOP_up, cpu, NULL))
6097- BUG();
6098+ if (other_cpu &&
6099+ HYPERVISOR_vcpu_op(VCPUOP_up, cpu, NULL))
6100+ BUG();
6101+ }
6102+
6103+ BUG_ON(!have_vcpu_info_placement);
6104 }
6105 }
6106
6107@@ -1181,8 +1182,6 @@ asmlinkage void __init xen_start_kernel(void)
6108
6109 xen_raw_console_write("about to get started...\n");
6110
6111- xen_setup_runstate_info(0);
6112-
6113 /* Start the world */
6114 #ifdef CONFIG_X86_32
6115 i386_start_kernel();
6116diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
6117index bf4cd6b..3bf7b1d 100644
6118--- a/arch/x86/xen/mmu.c
6119+++ b/arch/x86/xen/mmu.c
6120@@ -185,7 +185,7 @@ static inline unsigned p2m_index(unsigned long pfn)
6121 }
6122
6123 /* Build the parallel p2m_top_mfn structures */
6124-void xen_build_mfn_list_list(void)
6125+static void __init xen_build_mfn_list_list(void)
6126 {
6127 unsigned pfn, idx;
6128
6129diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
6130index 360f8d8..fe03eee 100644
6131--- a/arch/x86/xen/smp.c
6132+++ b/arch/x86/xen/smp.c
6133@@ -295,7 +295,6 @@ static int __cpuinit xen_cpu_up(unsigned int cpu)
6134 (unsigned long)task_stack_page(idle) -
6135 KERNEL_STACK_OFFSET + THREAD_SIZE;
6136 #endif
6137- xen_setup_runstate_info(cpu);
6138 xen_setup_timer(cpu);
6139 xen_init_lock_cpu(cpu);
6140
6141diff --git a/arch/x86/xen/suspend.c b/arch/x86/xen/suspend.c
6142index 987267f..95be7b4 100644
6143--- a/arch/x86/xen/suspend.c
6144+++ b/arch/x86/xen/suspend.c
6145@@ -1,5 +1,4 @@
6146 #include <linux/types.h>
6147-#include <linux/clockchips.h>
6148
6149 #include <xen/interface/xen.h>
6150 #include <xen/grant_table.h>
6151@@ -28,8 +27,6 @@ void xen_pre_suspend(void)
6152
6153 void xen_post_suspend(int suspend_cancelled)
6154 {
6155- xen_build_mfn_list_list();
6156-
6157 xen_setup_shared_info();
6158
6159 if (suspend_cancelled) {
6160@@ -47,19 +44,7 @@ void xen_post_suspend(int suspend_cancelled)
6161
6162 }
6163
6164-static void xen_vcpu_notify_restore(void *data)
6165-{
6166- unsigned long reason = (unsigned long)data;
6167-
6168- /* Boot processor notified via generic timekeeping_resume() */
6169- if ( smp_processor_id() == 0)
6170- return;
6171-
6172- clockevents_notify(reason, NULL);
6173-}
6174-
6175 void xen_arch_resume(void)
6176 {
6177- smp_call_function(xen_vcpu_notify_restore,
6178- (void *)CLOCK_EVT_NOTIFY_RESUME, 1);
6179+ /* nothing */
6180 }
6181diff --git a/arch/x86/xen/time.c b/arch/x86/xen/time.c
6182index 9d1f853..0a5aa44 100644
6183--- a/arch/x86/xen/time.c
6184+++ b/arch/x86/xen/time.c
6185@@ -100,7 +100,7 @@ bool xen_vcpu_stolen(int vcpu)
6186 return per_cpu(runstate, vcpu).state == RUNSTATE_runnable;
6187 }
6188
6189-void xen_setup_runstate_info(int cpu)
6190+static void setup_runstate_info(int cpu)
6191 {
6192 struct vcpu_register_runstate_memory_area area;
6193
6194@@ -434,7 +434,7 @@ void xen_setup_timer(int cpu)
6195 name = "<timer kasprintf failed>";
6196
6197 irq = bind_virq_to_irqhandler(VIRQ_TIMER, cpu, xen_timer_interrupt,
6198- IRQF_DISABLED|IRQF_PERCPU|IRQF_NOBALANCING|IRQF_TIMER,
6199+ IRQF_DISABLED|IRQF_PERCPU|IRQF_NOBALANCING,
6200 name, NULL);
6201
6202 evt = &per_cpu(xen_clock_events, cpu);
6203@@ -442,6 +442,8 @@ void xen_setup_timer(int cpu)
6204
6205 evt->cpumask = cpumask_of(cpu);
6206 evt->irq = irq;
6207+
6208+ setup_runstate_info(cpu);
6209 }
6210
6211 void xen_teardown_timer(int cpu)
6212@@ -492,7 +494,6 @@ __init void xen_time_init(void)
6213
6214 setup_force_cpu_cap(X86_FEATURE_TSC);
6215
6216- xen_setup_runstate_info(cpu);
6217 xen_setup_timer(cpu);
6218 xen_setup_cpu_clockevents();
6219 }
6220diff --git a/arch/x86/xen/xen-asm_64.S b/arch/x86/xen/xen-asm_64.S
6221index 53adefd..02f496a 100644
6222--- a/arch/x86/xen/xen-asm_64.S
6223+++ b/arch/x86/xen/xen-asm_64.S
6224@@ -96,7 +96,7 @@ ENTRY(xen_sysret32)
6225 pushq $__USER32_CS
6226 pushq %rcx
6227
6228- pushq $0
6229+ pushq $VGCF_in_syscall
6230 1: jmp hypercall_iret
6231 ENDPATCH(xen_sysret32)
6232 RELOC(xen_sysret32, 1b+1)
6233@@ -151,7 +151,7 @@ ENTRY(xen_syscall32_target)
6234 ENTRY(xen_sysenter_target)
6235 lea 16(%rsp), %rsp /* strip %rcx, %r11 */
6236 mov $-ENOSYS, %rax
6237- pushq $0
6238+ pushq $VGCF_in_syscall
6239 jmp hypercall_iret
6240 ENDPROC(xen_syscall32_target)
6241 ENDPROC(xen_sysenter_target)
6242diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h
6243index f9153a3..355fa6b 100644
6244--- a/arch/x86/xen/xen-ops.h
6245+++ b/arch/x86/xen/xen-ops.h
6246@@ -25,7 +25,6 @@ extern struct shared_info *HYPERVISOR_shared_info;
6247
6248 void xen_setup_mfn_list_list(void);
6249 void xen_setup_shared_info(void);
6250-void xen_build_mfn_list_list(void);
6251 void xen_setup_machphys_mapping(void);
6252 pgd_t *xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn);
6253 void xen_ident_map_ISA(void);
6254@@ -42,7 +41,6 @@ void __init xen_build_dynamic_phys_to_machine(void);
6255
6256 void xen_init_irq_ops(void);
6257 void xen_setup_timer(int cpu);
6258-void xen_setup_runstate_info(int cpu);
6259 void xen_teardown_timer(int cpu);
6260 cycle_t xen_clocksource_read(void);
6261 void xen_setup_cpu_clockevents(void);
6262diff --git a/arch/xtensa/include/asm/syscall.h b/arch/xtensa/include/asm/syscall.h
6263index 4352dbe..05cebf8 100644
6264--- a/arch/xtensa/include/asm/syscall.h
6265+++ b/arch/xtensa/include/asm/syscall.h
6266@@ -13,6 +13,8 @@ struct sigaction;
6267 asmlinkage long xtensa_execve(char*, char**, char**, struct pt_regs*);
6268 asmlinkage long xtensa_clone(unsigned long, unsigned long, struct pt_regs*);
6269 asmlinkage long xtensa_pipe(int __user *);
6270+asmlinkage long xtensa_mmap2(unsigned long, unsigned long, unsigned long,
6271+ unsigned long, unsigned long, unsigned long);
6272 asmlinkage long xtensa_ptrace(long, long, long, long);
6273 asmlinkage long xtensa_sigreturn(struct pt_regs*);
6274 asmlinkage long xtensa_rt_sigreturn(struct pt_regs*);
6275diff --git a/arch/xtensa/include/asm/unistd.h b/arch/xtensa/include/asm/unistd.h
6276index 9a5c354..c092c8f 100644
6277--- a/arch/xtensa/include/asm/unistd.h
6278+++ b/arch/xtensa/include/asm/unistd.h
6279@@ -189,7 +189,7 @@ __SYSCALL( 79, sys_fremovexattr, 2)
6280 /* File Map / Shared Memory Operations */
6281
6282 #define __NR_mmap2 80
6283-__SYSCALL( 80, sys_mmap_pgoff, 6)
6284+__SYSCALL( 80, xtensa_mmap2, 6)
6285 #define __NR_munmap 81
6286 __SYSCALL( 81, sys_munmap, 2)
6287 #define __NR_mprotect 82
6288diff --git a/arch/xtensa/kernel/syscall.c b/arch/xtensa/kernel/syscall.c
6289index 1e67bab..ac15ecb 100644
6290--- a/arch/xtensa/kernel/syscall.c
6291+++ b/arch/xtensa/kernel/syscall.c
6292@@ -57,6 +57,31 @@ asmlinkage long xtensa_pipe(int __user *userfds)
6293 return error;
6294 }
6295
6296+
6297+asmlinkage long xtensa_mmap2(unsigned long addr, unsigned long len,
6298+ unsigned long prot, unsigned long flags,
6299+ unsigned long fd, unsigned long pgoff)
6300+{
6301+ int error = -EBADF;
6302+ struct file * file = NULL;
6303+
6304+ flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
6305+ if (!(flags & MAP_ANONYMOUS)) {
6306+ file = fget(fd);
6307+ if (!file)
6308+ goto out;
6309+ }
6310+
6311+ down_write(&current->mm->mmap_sem);
6312+ error = do_mmap_pgoff(file, addr, len, prot, flags, pgoff);
6313+ up_write(&current->mm->mmap_sem);
6314+
6315+ if (file)
6316+ fput(file);
6317+out:
6318+ return error;
6319+}
6320+
6321 asmlinkage long xtensa_shmat(int shmid, char __user *shmaddr, int shmflg)
6322 {
6323 unsigned long ret;
6324diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c
6325index 49f6ede..7411915 100644
6326--- a/drivers/acpi/bus.c
6327+++ b/drivers/acpi/bus.c
6328@@ -344,167 +344,6 @@ bool acpi_bus_can_wakeup(acpi_handle handle)
6329
6330 EXPORT_SYMBOL(acpi_bus_can_wakeup);
6331
6332-static void acpi_print_osc_error(acpi_handle handle,
6333- struct acpi_osc_context *context, char *error)
6334-{
6335- struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER};
6336- int i;
6337-
6338- if (ACPI_FAILURE(acpi_get_name(handle, ACPI_FULL_PATHNAME, &buffer)))
6339- printk(KERN_DEBUG "%s\n", error);
6340- else {
6341- printk(KERN_DEBUG "%s:%s\n", (char *)buffer.pointer, error);
6342- kfree(buffer.pointer);
6343- }
6344- printk(KERN_DEBUG"_OSC request data:");
6345- for (i = 0; i < context->cap.length; i += sizeof(u32))
6346- printk("%x ", *((u32 *)(context->cap.pointer + i)));
6347- printk("\n");
6348-}
6349-
6350-static u8 hex_val(unsigned char c)
6351-{
6352- return isdigit(c) ? c - '0' : toupper(c) - 'A' + 10;
6353-}
6354-
6355-static acpi_status acpi_str_to_uuid(char *str, u8 *uuid)
6356-{
6357- int i;
6358- static int opc_map_to_uuid[16] = {6, 4, 2, 0, 11, 9, 16, 14, 19, 21,
6359- 24, 26, 28, 30, 32, 34};
6360-
6361- if (strlen(str) != 36)
6362- return AE_BAD_PARAMETER;
6363- for (i = 0; i < 36; i++) {
6364- if (i == 8 || i == 13 || i == 18 || i == 23) {
6365- if (str[i] != '-')
6366- return AE_BAD_PARAMETER;
6367- } else if (!isxdigit(str[i]))
6368- return AE_BAD_PARAMETER;
6369- }
6370- for (i = 0; i < 16; i++) {
6371- uuid[i] = hex_val(str[opc_map_to_uuid[i]]) << 4;
6372- uuid[i] |= hex_val(str[opc_map_to_uuid[i] + 1]);
6373- }
6374- return AE_OK;
6375-}
6376-
6377-acpi_status acpi_run_osc(acpi_handle handle, struct acpi_osc_context *context)
6378-{
6379- acpi_status status;
6380- struct acpi_object_list input;
6381- union acpi_object in_params[4];
6382- union acpi_object *out_obj;
6383- u8 uuid[16];
6384- u32 errors;
6385- struct acpi_buffer output = {ACPI_ALLOCATE_BUFFER, NULL};
6386-
6387- if (!context)
6388- return AE_ERROR;
6389- if (ACPI_FAILURE(acpi_str_to_uuid(context->uuid_str, uuid)))
6390- return AE_ERROR;
6391- context->ret.length = ACPI_ALLOCATE_BUFFER;
6392- context->ret.pointer = NULL;
6393-
6394- /* Setting up input parameters */
6395- input.count = 4;
6396- input.pointer = in_params;
6397- in_params[0].type = ACPI_TYPE_BUFFER;
6398- in_params[0].buffer.length = 16;
6399- in_params[0].buffer.pointer = uuid;
6400- in_params[1].type = ACPI_TYPE_INTEGER;
6401- in_params[1].integer.value = context->rev;
6402- in_params[2].type = ACPI_TYPE_INTEGER;
6403- in_params[2].integer.value = context->cap.length/sizeof(u32);
6404- in_params[3].type = ACPI_TYPE_BUFFER;
6405- in_params[3].buffer.length = context->cap.length;
6406- in_params[3].buffer.pointer = context->cap.pointer;
6407-
6408- status = acpi_evaluate_object(handle, "_OSC", &input, &output);
6409- if (ACPI_FAILURE(status))
6410- return status;
6411-
6412- if (!output.length)
6413- return AE_NULL_OBJECT;
6414-
6415- out_obj = output.pointer;
6416- if (out_obj->type != ACPI_TYPE_BUFFER
6417- || out_obj->buffer.length != context->cap.length) {
6418- acpi_print_osc_error(handle, context,
6419- "_OSC evaluation returned wrong type");
6420- status = AE_TYPE;
6421- goto out_kfree;
6422- }
6423- /* Need to ignore the bit0 in result code */
6424- errors = *((u32 *)out_obj->buffer.pointer) & ~(1 << 0);
6425- if (errors) {
6426- if (errors & OSC_REQUEST_ERROR)
6427- acpi_print_osc_error(handle, context,
6428- "_OSC request failed");
6429- if (errors & OSC_INVALID_UUID_ERROR)
6430- acpi_print_osc_error(handle, context,
6431- "_OSC invalid UUID");
6432- if (errors & OSC_INVALID_REVISION_ERROR)
6433- acpi_print_osc_error(handle, context,
6434- "_OSC invalid revision");
6435- if (errors & OSC_CAPABILITIES_MASK_ERROR) {
6436- if (((u32 *)context->cap.pointer)[OSC_QUERY_TYPE]
6437- & OSC_QUERY_ENABLE)
6438- goto out_success;
6439- status = AE_SUPPORT;
6440- goto out_kfree;
6441- }
6442- status = AE_ERROR;
6443- goto out_kfree;
6444- }
6445-out_success:
6446- context->ret.length = out_obj->buffer.length;
6447- context->ret.pointer = kmalloc(context->ret.length, GFP_KERNEL);
6448- if (!context->ret.pointer) {
6449- status = AE_NO_MEMORY;
6450- goto out_kfree;
6451- }
6452- memcpy(context->ret.pointer, out_obj->buffer.pointer,
6453- context->ret.length);
6454- status = AE_OK;
6455-
6456-out_kfree:
6457- kfree(output.pointer);
6458- if (status != AE_OK)
6459- context->ret.pointer = NULL;
6460- return status;
6461-}
6462-EXPORT_SYMBOL(acpi_run_osc);
6463-
6464-static u8 sb_uuid_str[] = "0811B06E-4A27-44F9-8D60-3CBBC22E7B48";
6465-static void acpi_bus_osc_support(void)
6466-{
6467- u32 capbuf[2];
6468- struct acpi_osc_context context = {
6469- .uuid_str = sb_uuid_str,
6470- .rev = 1,
6471- .cap.length = 8,
6472- .cap.pointer = capbuf,
6473- };
6474- acpi_handle handle;
6475-
6476- capbuf[OSC_QUERY_TYPE] = OSC_QUERY_ENABLE;
6477- capbuf[OSC_SUPPORT_TYPE] = OSC_SB_PR3_SUPPORT; /* _PR3 is in use */
6478-#if defined(CONFIG_ACPI_PROCESSOR_AGGREGATOR) ||\
6479- defined(CONFIG_ACPI_PROCESSOR_AGGREGATOR_MODULE)
6480- capbuf[OSC_SUPPORT_TYPE] |= OSC_SB_PAD_SUPPORT;
6481-#endif
6482-
6483-#if defined(CONFIG_ACPI_PROCESSOR) || defined(CONFIG_ACPI_PROCESSOR_MODULE)
6484- capbuf[OSC_SUPPORT_TYPE] |= OSC_SB_PPC_OST_SUPPORT;
6485-#endif
6486- if (ACPI_FAILURE(acpi_get_handle(NULL, "\\_SB", &handle)))
6487- return;
6488- if (ACPI_SUCCESS(acpi_run_osc(handle, &context)))
6489- kfree(context.ret.pointer);
6490- /* do we need to check the returned cap? Sounds no */
6491-}
6492-
6493 /* --------------------------------------------------------------------------
6494 Event Management
6495 -------------------------------------------------------------------------- */
6496@@ -895,8 +734,6 @@ static int __init acpi_bus_init(void)
6497 status = acpi_ec_ecdt_probe();
6498 /* Ignore result. Not having an ECDT is not fatal. */
6499
6500- acpi_bus_osc_support();
6501-
6502 status = acpi_initialize_objects(ACPI_FULL_INITIALIZATION);
6503 if (ACPI_FAILURE(status)) {
6504 printk(KERN_ERR PREFIX "Unable to initialize ACPI objects\n");
6505diff --git a/drivers/acpi/button.c b/drivers/acpi/button.c
6506index 8a95e83..0c9c6a9 100644
6507--- a/drivers/acpi/button.c
6508+++ b/drivers/acpi/button.c
6509@@ -282,13 +282,6 @@ static int acpi_lid_send_state(struct acpi_device *device)
6510 if (ret == NOTIFY_DONE)
6511 ret = blocking_notifier_call_chain(&acpi_lid_notifier, state,
6512 device);
6513- if (ret == NOTIFY_DONE || ret == NOTIFY_OK) {
6514- /*
6515- * It is also regarded as success if the notifier_chain
6516- * returns NOTIFY_OK or NOTIFY_DONE.
6517- */
6518- ret = 0;
6519- }
6520 return ret;
6521 }
6522
6523diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
6524index f1670e0..baef28c 100644
6525--- a/drivers/acpi/ec.c
6526+++ b/drivers/acpi/ec.c
6527@@ -201,13 +201,14 @@ unlock:
6528 spin_unlock_irqrestore(&ec->curr_lock, flags);
6529 }
6530
6531-static int acpi_ec_sync_query(struct acpi_ec *ec);
6532+static void acpi_ec_gpe_query(void *ec_cxt);
6533
6534-static int ec_check_sci_sync(struct acpi_ec *ec, u8 state)
6535+static int ec_check_sci(struct acpi_ec *ec, u8 state)
6536 {
6537 if (state & ACPI_EC_FLAG_SCI) {
6538 if (!test_and_set_bit(EC_FLAGS_QUERY_PENDING, &ec->flags))
6539- return acpi_ec_sync_query(ec);
6540+ return acpi_os_execute(OSL_EC_BURST_HANDLER,
6541+ acpi_ec_gpe_query, ec);
6542 }
6543 return 0;
6544 }
6545@@ -248,6 +249,11 @@ static int acpi_ec_transaction_unlocked(struct acpi_ec *ec,
6546 {
6547 unsigned long tmp;
6548 int ret = 0;
6549+ pr_debug(PREFIX "transaction start\n");
6550+ /* disable GPE during transaction if storm is detected */
6551+ if (test_bit(EC_FLAGS_GPE_STORM, &ec->flags)) {
6552+ acpi_disable_gpe(NULL, ec->gpe);
6553+ }
6554 if (EC_FLAGS_MSI)
6555 udelay(ACPI_EC_MSI_UDELAY);
6556 /* start transaction */
6557@@ -259,9 +265,20 @@ static int acpi_ec_transaction_unlocked(struct acpi_ec *ec,
6558 clear_bit(EC_FLAGS_QUERY_PENDING, &ec->flags);
6559 spin_unlock_irqrestore(&ec->curr_lock, tmp);
6560 ret = ec_poll(ec);
6561+ pr_debug(PREFIX "transaction end\n");
6562 spin_lock_irqsave(&ec->curr_lock, tmp);
6563 ec->curr = NULL;
6564 spin_unlock_irqrestore(&ec->curr_lock, tmp);
6565+ if (test_bit(EC_FLAGS_GPE_STORM, &ec->flags)) {
6566+ /* check if we received SCI during transaction */
6567+ ec_check_sci(ec, acpi_ec_read_status(ec));
6568+ /* it is safe to enable GPE outside of transaction */
6569+ acpi_enable_gpe(NULL, ec->gpe);
6570+ } else if (t->irq_count > ACPI_EC_STORM_THRESHOLD) {
6571+ pr_info(PREFIX "GPE storm detected, "
6572+ "transactions will use polling mode\n");
6573+ set_bit(EC_FLAGS_GPE_STORM, &ec->flags);
6574+ }
6575 return ret;
6576 }
6577
6578@@ -304,26 +321,7 @@ static int acpi_ec_transaction(struct acpi_ec *ec, struct transaction *t)
6579 status = -ETIME;
6580 goto end;
6581 }
6582- pr_debug(PREFIX "transaction start\n");
6583- /* disable GPE during transaction if storm is detected */
6584- if (test_bit(EC_FLAGS_GPE_STORM, &ec->flags)) {
6585- acpi_disable_gpe(NULL, ec->gpe);
6586- }
6587-
6588 status = acpi_ec_transaction_unlocked(ec, t);
6589-
6590- /* check if we received SCI during transaction */
6591- ec_check_sci_sync(ec, acpi_ec_read_status(ec));
6592- if (test_bit(EC_FLAGS_GPE_STORM, &ec->flags)) {
6593- msleep(1);
6594- /* it is safe to enable GPE outside of transaction */
6595- acpi_enable_gpe(NULL, ec->gpe);
6596- } else if (t->irq_count > ACPI_EC_STORM_THRESHOLD) {
6597- pr_info(PREFIX "GPE storm detected, "
6598- "transactions will use polling mode\n");
6599- set_bit(EC_FLAGS_GPE_STORM, &ec->flags);
6600- }
6601- pr_debug(PREFIX "transaction end\n");
6602 end:
6603 if (ec->global_lock)
6604 acpi_release_global_lock(glk);
6605@@ -445,7 +443,7 @@ int ec_transaction(u8 command,
6606
6607 EXPORT_SYMBOL(ec_transaction);
6608
6609-static int acpi_ec_query_unlocked(struct acpi_ec *ec, u8 * data)
6610+static int acpi_ec_query(struct acpi_ec *ec, u8 * data)
6611 {
6612 int result;
6613 u8 d;
6614@@ -454,16 +452,20 @@ static int acpi_ec_query_unlocked(struct acpi_ec *ec, u8 * data)
6615 .wlen = 0, .rlen = 1};
6616 if (!ec || !data)
6617 return -EINVAL;
6618+
6619 /*
6620 * Query the EC to find out which _Qxx method we need to evaluate.
6621 * Note that successful completion of the query causes the ACPI_EC_SCI
6622 * bit to be cleared (and thus clearing the interrupt source).
6623 */
6624- result = acpi_ec_transaction_unlocked(ec, &t);
6625+
6626+ result = acpi_ec_transaction(ec, &t);
6627 if (result)
6628 return result;
6629+
6630 if (!d)
6631 return -ENODATA;
6632+
6633 *data = d;
6634 return 0;
6635 }
6636@@ -507,78 +509,43 @@ void acpi_ec_remove_query_handler(struct acpi_ec *ec, u8 query_bit)
6637
6638 EXPORT_SYMBOL_GPL(acpi_ec_remove_query_handler);
6639
6640-static void acpi_ec_run(void *cxt)
6641-{
6642- struct acpi_ec_query_handler *handler = cxt;
6643- if (!handler)
6644- return;
6645- pr_debug(PREFIX "start query execution\n");
6646- if (handler->func)
6647- handler->func(handler->data);
6648- else if (handler->handle)
6649- acpi_evaluate_object(handler->handle, NULL, NULL, NULL);
6650- pr_debug(PREFIX "stop query execution\n");
6651- kfree(handler);
6652-}
6653-
6654-static int acpi_ec_sync_query(struct acpi_ec *ec)
6655+static void acpi_ec_gpe_query(void *ec_cxt)
6656 {
6657+ struct acpi_ec *ec = ec_cxt;
6658 u8 value = 0;
6659- int status;
6660- struct acpi_ec_query_handler *handler, *copy;
6661- if ((status = acpi_ec_query_unlocked(ec, &value)))
6662- return status;
6663+ struct acpi_ec_query_handler *handler, copy;
6664+
6665+ if (!ec || acpi_ec_query(ec, &value))
6666+ return;
6667+ mutex_lock(&ec->lock);
6668 list_for_each_entry(handler, &ec->list, node) {
6669 if (value == handler->query_bit) {
6670 /* have custom handler for this bit */
6671- copy = kmalloc(sizeof(*handler), GFP_KERNEL);
6672- if (!copy)
6673- return -ENOMEM;
6674- memcpy(copy, handler, sizeof(*copy));
6675- pr_debug(PREFIX "push query execution (0x%2x) on queue\n", value);
6676- return acpi_os_execute(OSL_GPE_HANDLER,
6677- acpi_ec_run, copy);
6678+ memcpy(&copy, handler, sizeof(copy));
6679+ mutex_unlock(&ec->lock);
6680+ if (copy.func) {
6681+ copy.func(copy.data);
6682+ } else if (copy.handle) {
6683+ acpi_evaluate_object(copy.handle, NULL, NULL, NULL);
6684+ }
6685+ return;
6686 }
6687 }
6688- return 0;
6689-}
6690-
6691-static void acpi_ec_gpe_query(void *ec_cxt)
6692-{
6693- struct acpi_ec *ec = ec_cxt;
6694- if (!ec)
6695- return;
6696- mutex_lock(&ec->lock);
6697- acpi_ec_sync_query(ec);
6698 mutex_unlock(&ec->lock);
6699 }
6700
6701-static void acpi_ec_gpe_query(void *ec_cxt);
6702-
6703-static int ec_check_sci(struct acpi_ec *ec, u8 state)
6704-{
6705- if (state & ACPI_EC_FLAG_SCI) {
6706- if (!test_and_set_bit(EC_FLAGS_QUERY_PENDING, &ec->flags)) {
6707- pr_debug(PREFIX "push gpe query to the queue\n");
6708- return acpi_os_execute(OSL_NOTIFY_HANDLER,
6709- acpi_ec_gpe_query, ec);
6710- }
6711- }
6712- return 0;
6713-}
6714-
6715 static u32 acpi_ec_gpe_handler(void *data)
6716 {
6717 struct acpi_ec *ec = data;
6718+ u8 status;
6719
6720 pr_debug(PREFIX "~~~> interrupt\n");
6721+ status = acpi_ec_read_status(ec);
6722
6723- advance_transaction(ec, acpi_ec_read_status(ec));
6724- if (ec_transaction_done(ec) &&
6725- (acpi_ec_read_status(ec) & ACPI_EC_FLAG_IBF) == 0) {
6726+ advance_transaction(ec, status);
6727+ if (ec_transaction_done(ec) && (status & ACPI_EC_FLAG_IBF) == 0)
6728 wake_up(&ec->wait);
6729- ec_check_sci(ec, acpi_ec_read_status(ec));
6730- }
6731+ ec_check_sci(ec, status);
6732 return ACPI_INTERRUPT_HANDLED;
6733 }
6734
6735@@ -949,7 +916,6 @@ static int ec_validate_ecdt(const struct dmi_system_id *id)
6736 /* MSI EC needs special treatment, enable it */
6737 static int ec_flag_msi(const struct dmi_system_id *id)
6738 {
6739- printk(KERN_DEBUG PREFIX "Detected MSI hardware, enabling workarounds.\n");
6740 EC_FLAGS_MSI = 1;
6741 EC_FLAGS_VALIDATE_ECDT = 1;
6742 return 0;
6743@@ -962,13 +928,8 @@ static struct dmi_system_id __initdata ec_dmi_table[] = {
6744 DMI_MATCH(DMI_BOARD_NAME, "JFL92") }, NULL},
6745 {
6746 ec_flag_msi, "MSI hardware", {
6747- DMI_MATCH(DMI_BIOS_VENDOR, "Micro-Star")}, NULL},
6748- {
6749- ec_flag_msi, "MSI hardware", {
6750- DMI_MATCH(DMI_SYS_VENDOR, "Micro-Star")}, NULL},
6751- {
6752- ec_flag_msi, "MSI hardware", {
6753- DMI_MATCH(DMI_CHASSIS_VENDOR, "MICRO-Star")}, NULL},
6754+ DMI_MATCH(DMI_BIOS_VENDOR, "Micro-Star"),
6755+ DMI_MATCH(DMI_CHASSIS_VENDOR, "MICRO-Star") }, NULL},
6756 {
6757 ec_validate_ecdt, "ASUS hardware", {
6758 DMI_MATCH(DMI_BIOS_VENDOR, "ASUS") }, NULL},
6759diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
6760index d9f78f6..bbd066e 100644
6761--- a/drivers/acpi/processor_idle.c
6762+++ b/drivers/acpi/processor_idle.c
6763@@ -110,14 +110,6 @@ static struct dmi_system_id __cpuinitdata processor_power_dmi_table[] = {
6764 DMI_MATCH(DMI_BIOS_VENDOR,"Phoenix Technologies LTD"),
6765 DMI_MATCH(DMI_BIOS_VERSION,"SHE845M0.86C.0013.D.0302131307")},
6766 (void *)2},
6767- { set_max_cstate, "Pavilion zv5000", {
6768- DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
6769- DMI_MATCH(DMI_PRODUCT_NAME,"Pavilion zv5000 (DS502A#ABA)")},
6770- (void *)1},
6771- { set_max_cstate, "Asus L8400B", {
6772- DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer Inc."),
6773- DMI_MATCH(DMI_PRODUCT_NAME,"L8400B series Notebook PC")},
6774- (void *)1},
6775 {},
6776 };
6777
6778@@ -307,17 +299,6 @@ static int acpi_processor_get_power_info_fadt(struct acpi_processor *pr)
6779 pr->power.states[ACPI_STATE_C2].latency = acpi_gbl_FADT.C2latency;
6780 pr->power.states[ACPI_STATE_C3].latency = acpi_gbl_FADT.C3latency;
6781
6782- /*
6783- * FADT specified C2 latency must be less than or equal to
6784- * 100 microseconds.
6785- */
6786- if (acpi_gbl_FADT.C2latency > ACPI_PROCESSOR_MAX_C2_LATENCY) {
6787- ACPI_DEBUG_PRINT((ACPI_DB_INFO,
6788- "C2 latency too large [%d]\n", acpi_gbl_FADT.C2latency));
6789- /* invalidate C2 */
6790- pr->power.states[ACPI_STATE_C2].address = 0;
6791- }
6792-
6793 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
6794 "lvl2[0x%08x] lvl3[0x%08x]\n",
6795 pr->power.states[ACPI_STATE_C2].address,
6796@@ -514,6 +495,16 @@ static void acpi_processor_power_verify_c2(struct acpi_processor_cx *cx)
6797 return;
6798
6799 /*
6800+ * C2 latency must be less than or equal to 100
6801+ * microseconds.
6802+ */
6803+ else if (cx->latency > ACPI_PROCESSOR_MAX_C2_LATENCY) {
6804+ ACPI_DEBUG_PRINT((ACPI_DB_INFO,
6805+ "latency too large [%d]\n", cx->latency));
6806+ return;
6807+ }
6808+
6809+ /*
6810 * Otherwise we've met all of our C2 requirements.
6811 * Normalize the C2 latency to expidite policy
6812 */
6813diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
6814index 0b09703..14a7481 100644
6815--- a/drivers/acpi/scan.c
6816+++ b/drivers/acpi/scan.c
6817@@ -1357,9 +1357,6 @@ int acpi_bus_start(struct acpi_device *device)
6818 {
6819 struct acpi_bus_ops ops;
6820
6821- if (!device)
6822- return -EINVAL;
6823-
6824 memset(&ops, 0, sizeof(ops));
6825 ops.acpi_op_start = 1;
6826
6827diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
6828index 9b37502..a3241a1 100644
6829--- a/drivers/ata/ahci.c
6830+++ b/drivers/ata/ahci.c
6831@@ -113,7 +113,6 @@ enum {
6832 board_ahci_mcp65 = 6,
6833 board_ahci_nopmp = 7,
6834 board_ahci_yesncq = 8,
6835- board_ahci_nosntf = 9,
6836
6837 /* global controller registers */
6838 HOST_CAP = 0x00, /* host capabilities */
6839@@ -236,7 +235,6 @@ enum {
6840 AHCI_HFLAG_NO_SUSPEND = (1 << 10), /* don't suspend */
6841 AHCI_HFLAG_SRST_TOUT_IS_OFFLINE = (1 << 11), /* treat SRST timeout as
6842 link offline */
6843- AHCI_HFLAG_NO_SNTF = (1 << 12), /* no sntf */
6844
6845 /* ap->flags bits */
6846
6847@@ -510,7 +508,7 @@ static const struct ata_port_info ahci_port_info[] = {
6848 .udma_mask = ATA_UDMA6,
6849 .port_ops = &ahci_ops,
6850 },
6851- [board_ahci_yesncq] =
6852+ /* board_ahci_yesncq */
6853 {
6854 AHCI_HFLAGS (AHCI_HFLAG_YES_NCQ),
6855 .flags = AHCI_FLAG_COMMON,
6856@@ -518,14 +516,6 @@ static const struct ata_port_info ahci_port_info[] = {
6857 .udma_mask = ATA_UDMA6,
6858 .port_ops = &ahci_ops,
6859 },
6860- [board_ahci_nosntf] =
6861- {
6862- AHCI_HFLAGS (AHCI_HFLAG_NO_SNTF),
6863- .flags = AHCI_FLAG_COMMON,
6864- .pio_mask = ATA_PIO4,
6865- .udma_mask = ATA_UDMA6,
6866- .port_ops = &ahci_ops,
6867- },
6868 };
6869
6870 static const struct pci_device_id ahci_pci_tbl[] = {
6871@@ -541,7 +531,7 @@ static const struct pci_device_id ahci_pci_tbl[] = {
6872 { PCI_VDEVICE(INTEL, 0x2683), board_ahci }, /* ESB2 */
6873 { PCI_VDEVICE(INTEL, 0x27c6), board_ahci }, /* ICH7-M DH */
6874 { PCI_VDEVICE(INTEL, 0x2821), board_ahci }, /* ICH8 */
6875- { PCI_VDEVICE(INTEL, 0x2822), board_ahci_nosntf }, /* ICH8 */
6876+ { PCI_VDEVICE(INTEL, 0x2822), board_ahci }, /* ICH8 */
6877 { PCI_VDEVICE(INTEL, 0x2824), board_ahci }, /* ICH8 */
6878 { PCI_VDEVICE(INTEL, 0x2829), board_ahci }, /* ICH8M */
6879 { PCI_VDEVICE(INTEL, 0x282a), board_ahci }, /* ICH8M */
6880@@ -859,12 +849,6 @@ static void ahci_save_initial_config(struct pci_dev *pdev,
6881 cap &= ~HOST_CAP_PMP;
6882 }
6883
6884- if ((cap & HOST_CAP_SNTF) && (hpriv->flags & AHCI_HFLAG_NO_SNTF)) {
6885- dev_printk(KERN_INFO, &pdev->dev,
6886- "controller can't do SNTF, turning off CAP_SNTF\n");
6887- cap &= ~HOST_CAP_SNTF;
6888- }
6889-
6890 if (pdev->vendor == PCI_VENDOR_ID_JMICRON && pdev->device == 0x2361 &&
6891 port_map != 1) {
6892 dev_printk(KERN_INFO, &pdev->dev,
6893@@ -2868,21 +2852,6 @@ static bool ahci_broken_suspend(struct pci_dev *pdev)
6894 },
6895 .driver_data = "F.23", /* cutoff BIOS version */
6896 },
6897- /*
6898- * Acer eMachines G725 has the same problem. BIOS
6899- * V1.03 is known to be broken. V3.04 is known to
6900- * work. Inbetween, there are V1.06, V2.06 and V3.03
6901- * that we don't have much idea about. For now,
6902- * blacklist anything older than V3.04.
6903- */
6904- {
6905- .ident = "G725",
6906- .matches = {
6907- DMI_MATCH(DMI_SYS_VENDOR, "eMachines"),
6908- DMI_MATCH(DMI_PRODUCT_NAME, "eMachines G725"),
6909- },
6910- .driver_data = "V3.04", /* cutoff BIOS version */
6911- },
6912 { } /* terminate list */
6913 };
6914 const struct dmi_system_id *dmi = dmi_first_match(sysids);
6915diff --git a/drivers/ata/ata_piix.c b/drivers/ata/ata_piix.c
6916index 0c6155f..9ac4e37 100644
6917--- a/drivers/ata/ata_piix.c
6918+++ b/drivers/ata/ata_piix.c
6919@@ -869,10 +869,10 @@ static void do_pata_set_dmamode(struct ata_port *ap, struct ata_device *adev, in
6920 (timings[pio][1] << 8);
6921 }
6922
6923- if (ap->udma_mask)
6924+ if (ap->udma_mask) {
6925 udma_enable &= ~(1 << devid);
6926-
6927- pci_write_config_word(dev, master_port, master_data);
6928+ pci_write_config_word(dev, master_port, master_data);
6929+ }
6930 }
6931 /* Don't scribble on 0x48 if the controller does not support UDMA */
6932 if (ap->udma_mask)
6933diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
6934index 91fed3c..dc72690 100644
6935--- a/drivers/ata/libata-core.c
6936+++ b/drivers/ata/libata-core.c
6937@@ -3790,45 +3790,21 @@ int sata_link_debounce(struct ata_link *link, const unsigned long *params,
6938 int sata_link_resume(struct ata_link *link, const unsigned long *params,
6939 unsigned long deadline)
6940 {
6941- int tries = ATA_LINK_RESUME_TRIES;
6942 u32 scontrol, serror;
6943 int rc;
6944
6945 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
6946 return rc;
6947
6948- /*
6949- * Writes to SControl sometimes get ignored under certain
6950- * controllers (ata_piix SIDPR). Make sure DET actually is
6951- * cleared.
6952- */
6953- do {
6954- scontrol = (scontrol & 0x0f0) | 0x300;
6955- if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
6956- return rc;
6957- /*
6958- * Some PHYs react badly if SStatus is pounded
6959- * immediately after resuming. Delay 200ms before
6960- * debouncing.
6961- */
6962- msleep(200);
6963+ scontrol = (scontrol & 0x0f0) | 0x300;
6964
6965- /* is SControl restored correctly? */
6966- if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
6967- return rc;
6968- } while ((scontrol & 0xf0f) != 0x300 && --tries);
6969-
6970- if ((scontrol & 0xf0f) != 0x300) {
6971- ata_link_printk(link, KERN_ERR,
6972- "failed to resume link (SControl %X)\n",
6973- scontrol);
6974- return 0;
6975- }
6976+ if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
6977+ return rc;
6978
6979- if (tries < ATA_LINK_RESUME_TRIES)
6980- ata_link_printk(link, KERN_WARNING,
6981- "link resume succeeded after %d retries\n",
6982- ATA_LINK_RESUME_TRIES - tries);
6983+ /* Some PHYs react badly if SStatus is pounded immediately
6984+ * after resuming. Delay 200ms before debouncing.
6985+ */
6986+ msleep(200);
6987
6988 if ((rc = sata_link_debounce(link, params, deadline)))
6989 return rc;
6990diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
6991index 7d8d3c3..bba2ae5 100644
6992--- a/drivers/ata/libata-eh.c
6993+++ b/drivers/ata/libata-eh.c
6994@@ -2019,9 +2019,8 @@ static void ata_eh_link_autopsy(struct ata_link *link)
6995 qc->err_mask &= ~(AC_ERR_DEV | AC_ERR_OTHER);
6996
6997 /* determine whether the command is worth retrying */
6998- if (qc->flags & ATA_QCFLAG_IO ||
6999- (!(qc->err_mask & AC_ERR_INVALID) &&
7000- qc->err_mask != AC_ERR_DEV))
7001+ if (!(qc->err_mask & AC_ERR_INVALID) &&
7002+ ((qc->flags & ATA_QCFLAG_IO) || qc->err_mask != AC_ERR_DEV))
7003 qc->flags |= ATA_QCFLAG_RETRY;
7004
7005 /* accumulate error info */
7006diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c
7007index 2ae15c3..bbbb1fa 100644
7008--- a/drivers/ata/libata-sff.c
7009+++ b/drivers/ata/libata-sff.c
7010@@ -893,9 +893,6 @@ static void ata_pio_sector(struct ata_queued_cmd *qc)
7011 do_write);
7012 }
7013
7014- if (!do_write)
7015- flush_dcache_page(page);
7016-
7017 qc->curbytes += qc->sect_size;
7018 qc->cursg_ofs += qc->sect_size;
7019
7020diff --git a/drivers/ata/pata_cmd64x.c b/drivers/ata/pata_cmd64x.c
7021index f0bad9b..f98dffe 100644
7022--- a/drivers/ata/pata_cmd64x.c
7023+++ b/drivers/ata/pata_cmd64x.c
7024@@ -219,7 +219,7 @@ static void cmd64x_set_dmamode(struct ata_port *ap, struct ata_device *adev)
7025 regU |= udma_data[adev->dma_mode - XFER_UDMA_0] << shift;
7026 /* Merge the control bits */
7027 regU |= 1 << adev->devno; /* UDMA on */
7028- if (adev->dma_mode > XFER_UDMA_2) /* 15nS timing */
7029+ if (adev->dma_mode > 2) /* 15nS timing */
7030 regU |= 4 << adev->devno;
7031 } else {
7032 regU &= ~ (1 << adev->devno); /* UDMA off */
7033diff --git a/drivers/ata/pata_hpt37x.c b/drivers/ata/pata_hpt37x.c
7034index ec07c53..d0a7df2 100644
7035--- a/drivers/ata/pata_hpt37x.c
7036+++ b/drivers/ata/pata_hpt37x.c
7037@@ -24,7 +24,7 @@
7038 #include <linux/libata.h>
7039
7040 #define DRV_NAME "pata_hpt37x"
7041-#define DRV_VERSION "0.6.14"
7042+#define DRV_VERSION "0.6.12"
7043
7044 struct hpt_clock {
7045 u8 xfer_speed;
7046@@ -404,8 +404,9 @@ static void hpt370_set_piomode(struct ata_port *ap, struct ata_device *adev)
7047
7048 pci_read_config_dword(pdev, addr1, &reg);
7049 mode = hpt37x_find_mode(ap, adev->pio_mode);
7050- mode &= 0xCFC3FFFF; /* Leave DMA bits alone */
7051- reg &= ~0xCFC3FFFF; /* Strip timing bits */
7052+ mode &= ~0x8000000; /* No FIFO in PIO */
7053+ mode &= ~0x30070000; /* Leave config bits alone */
7054+ reg &= 0x30070000; /* Strip timing bits */
7055 pci_write_config_dword(pdev, addr1, reg | mode);
7056 }
7057
7058@@ -422,7 +423,8 @@ static void hpt370_set_dmamode(struct ata_port *ap, struct ata_device *adev)
7059 {
7060 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
7061 u32 addr1, addr2;
7062- u32 reg, mode, mask;
7063+ u32 reg;
7064+ u32 mode;
7065 u8 fast;
7066
7067 addr1 = 0x40 + 4 * (adev->devno + 2 * ap->port_no);
7068@@ -434,12 +436,11 @@ static void hpt370_set_dmamode(struct ata_port *ap, struct ata_device *adev)
7069 fast |= 0x01;
7070 pci_write_config_byte(pdev, addr2, fast);
7071
7072- mask = adev->dma_mode < XFER_UDMA_0 ? 0x31C001FF : 0x303C0000;
7073-
7074 pci_read_config_dword(pdev, addr1, &reg);
7075 mode = hpt37x_find_mode(ap, adev->dma_mode);
7076- mode &= mask;
7077- reg &= ~mask;
7078+ mode |= 0x8000000; /* FIFO in MWDMA or UDMA */
7079+ mode &= ~0xC0000000; /* Leave config bits alone */
7080+ reg &= 0xC0000000; /* Strip timing bits */
7081 pci_write_config_dword(pdev, addr1, reg | mode);
7082 }
7083
7084@@ -507,8 +508,9 @@ static void hpt372_set_piomode(struct ata_port *ap, struct ata_device *adev)
7085 mode = hpt37x_find_mode(ap, adev->pio_mode);
7086
7087 printk("Find mode for %d reports %X\n", adev->pio_mode, mode);
7088- mode &= 0xCFC3FFFF; /* Leave DMA bits alone */
7089- reg &= ~0xCFC3FFFF; /* Strip timing bits */
7090+ mode &= ~0x80000000; /* No FIFO in PIO */
7091+ mode &= ~0x30070000; /* Leave config bits alone */
7092+ reg &= 0x30070000; /* Strip timing bits */
7093 pci_write_config_dword(pdev, addr1, reg | mode);
7094 }
7095
7096@@ -525,7 +527,8 @@ static void hpt372_set_dmamode(struct ata_port *ap, struct ata_device *adev)
7097 {
7098 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
7099 u32 addr1, addr2;
7100- u32 reg, mode, mask;
7101+ u32 reg;
7102+ u32 mode;
7103 u8 fast;
7104
7105 addr1 = 0x40 + 4 * (adev->devno + 2 * ap->port_no);
7106@@ -536,13 +539,12 @@ static void hpt372_set_dmamode(struct ata_port *ap, struct ata_device *adev)
7107 fast &= ~0x07;
7108 pci_write_config_byte(pdev, addr2, fast);
7109
7110- mask = adev->dma_mode < XFER_UDMA_0 ? 0x31C001FF : 0x303C0000;
7111-
7112 pci_read_config_dword(pdev, addr1, &reg);
7113 mode = hpt37x_find_mode(ap, adev->dma_mode);
7114 printk("Find mode for DMA %d reports %X\n", adev->dma_mode, mode);
7115- mode &= mask;
7116- reg &= ~mask;
7117+ mode &= ~0xC0000000; /* Leave config bits alone */
7118+ mode |= 0x80000000; /* FIFO in MWDMA or UDMA */
7119+ reg &= 0xC0000000; /* Strip timing bits */
7120 pci_write_config_dword(pdev, addr1, reg | mode);
7121 }
7122
7123diff --git a/drivers/ata/pata_hpt3x2n.c b/drivers/ata/pata_hpt3x2n.c
7124index d16e87e..3d59fe0 100644
7125--- a/drivers/ata/pata_hpt3x2n.c
7126+++ b/drivers/ata/pata_hpt3x2n.c
7127@@ -8,7 +8,7 @@
7128 * Copyright (C) 1999-2003 Andre Hedrick <andre@linux-ide.org>
7129 * Portions Copyright (C) 2001 Sun Microsystems, Inc.
7130 * Portions Copyright (C) 2003 Red Hat Inc
7131- * Portions Copyright (C) 2005-2009 MontaVista Software, Inc.
7132+ * Portions Copyright (C) 2005-2007 MontaVista Software, Inc.
7133 *
7134 *
7135 * TODO
7136@@ -25,7 +25,7 @@
7137 #include <linux/libata.h>
7138
7139 #define DRV_NAME "pata_hpt3x2n"
7140-#define DRV_VERSION "0.3.8"
7141+#define DRV_VERSION "0.3.4"
7142
7143 enum {
7144 HPT_PCI_FAST = (1 << 31),
7145@@ -185,8 +185,9 @@ static void hpt3x2n_set_piomode(struct ata_port *ap, struct ata_device *adev)
7146
7147 pci_read_config_dword(pdev, addr1, &reg);
7148 mode = hpt3x2n_find_mode(ap, adev->pio_mode);
7149- mode &= 0xCFC3FFFF; /* Leave DMA bits alone */
7150- reg &= ~0xCFC3FFFF; /* Strip timing bits */
7151+ mode &= ~0x8000000; /* No FIFO in PIO */
7152+ mode &= ~0x30070000; /* Leave config bits alone */
7153+ reg &= 0x30070000; /* Strip timing bits */
7154 pci_write_config_dword(pdev, addr1, reg | mode);
7155 }
7156
7157@@ -203,7 +204,8 @@ static void hpt3x2n_set_dmamode(struct ata_port *ap, struct ata_device *adev)
7158 {
7159 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
7160 u32 addr1, addr2;
7161- u32 reg, mode, mask;
7162+ u32 reg;
7163+ u32 mode;
7164 u8 fast;
7165
7166 addr1 = 0x40 + 4 * (adev->devno + 2 * ap->port_no);
7167@@ -214,12 +216,11 @@ static void hpt3x2n_set_dmamode(struct ata_port *ap, struct ata_device *adev)
7168 fast &= ~0x07;
7169 pci_write_config_byte(pdev, addr2, fast);
7170
7171- mask = adev->dma_mode < XFER_UDMA_0 ? 0x31C001FF : 0x303C0000;
7172-
7173 pci_read_config_dword(pdev, addr1, &reg);
7174 mode = hpt3x2n_find_mode(ap, adev->dma_mode);
7175- mode &= mask;
7176- reg &= ~mask;
7177+ mode |= 0x8000000; /* FIFO in MWDMA or UDMA */
7178+ mode &= ~0xC0000000; /* Leave config bits alone */
7179+ reg &= 0xC0000000; /* Strip timing bits */
7180 pci_write_config_dword(pdev, addr1, reg | mode);
7181 }
7182
7183@@ -262,7 +263,7 @@ static void hpt3x2n_bmdma_stop(struct ata_queued_cmd *qc)
7184
7185 static void hpt3x2n_set_clock(struct ata_port *ap, int source)
7186 {
7187- void __iomem *bmdma = ap->ioaddr.bmdma_addr - ap->port_no * 8;
7188+ void __iomem *bmdma = ap->ioaddr.bmdma_addr;
7189
7190 /* Tristate the bus */
7191 iowrite8(0x80, bmdma+0x73);
7192@@ -272,9 +273,9 @@ static void hpt3x2n_set_clock(struct ata_port *ap, int source)
7193 iowrite8(source, bmdma+0x7B);
7194 iowrite8(0xC0, bmdma+0x79);
7195
7196- /* Reset state machines, avoid enabling the disabled channels */
7197- iowrite8(ioread8(bmdma+0x70) | 0x32, bmdma+0x70);
7198- iowrite8(ioread8(bmdma+0x74) | 0x32, bmdma+0x74);
7199+ /* Reset state machines */
7200+ iowrite8(0x37, bmdma+0x70);
7201+ iowrite8(0x37, bmdma+0x74);
7202
7203 /* Complete reset */
7204 iowrite8(0x00, bmdma+0x79);
7205@@ -284,10 +285,21 @@ static void hpt3x2n_set_clock(struct ata_port *ap, int source)
7206 iowrite8(0x00, bmdma+0x77);
7207 }
7208
7209+/* Check if our partner interface is busy */
7210+
7211+static int hpt3x2n_pair_idle(struct ata_port *ap)
7212+{
7213+ struct ata_host *host = ap->host;
7214+ struct ata_port *pair = host->ports[ap->port_no ^ 1];
7215+
7216+ if (pair->hsm_task_state == HSM_ST_IDLE)
7217+ return 1;
7218+ return 0;
7219+}
7220+
7221 static int hpt3x2n_use_dpll(struct ata_port *ap, int writing)
7222 {
7223 long flags = (long)ap->host->private_data;
7224-
7225 /* See if we should use the DPLL */
7226 if (writing)
7227 return USE_DPLL; /* Needed for write */
7228@@ -296,35 +308,20 @@ static int hpt3x2n_use_dpll(struct ata_port *ap, int writing)
7229 return 0;
7230 }
7231
7232-static int hpt3x2n_qc_defer(struct ata_queued_cmd *qc)
7233-{
7234- struct ata_port *ap = qc->ap;
7235- struct ata_port *alt = ap->host->ports[ap->port_no ^ 1];
7236- int rc, flags = (long)ap->host->private_data;
7237- int dpll = hpt3x2n_use_dpll(ap, qc->tf.flags & ATA_TFLAG_WRITE);
7238-
7239- /* First apply the usual rules */
7240- rc = ata_std_qc_defer(qc);
7241- if (rc != 0)
7242- return rc;
7243-
7244- if ((flags & USE_DPLL) != dpll && alt->qc_active)
7245- return ATA_DEFER_PORT;
7246- return 0;
7247-}
7248-
7249 static unsigned int hpt3x2n_qc_issue(struct ata_queued_cmd *qc)
7250 {
7251+ struct ata_taskfile *tf = &qc->tf;
7252 struct ata_port *ap = qc->ap;
7253 int flags = (long)ap->host->private_data;
7254- int dpll = hpt3x2n_use_dpll(ap, qc->tf.flags & ATA_TFLAG_WRITE);
7255-
7256- if ((flags & USE_DPLL) != dpll) {
7257- flags &= ~USE_DPLL;
7258- flags |= dpll;
7259- ap->host->private_data = (void *)(long)flags;
7260
7261- hpt3x2n_set_clock(ap, dpll ? 0x21 : 0x23);
7262+ if (hpt3x2n_pair_idle(ap)) {
7263+ int dpll = hpt3x2n_use_dpll(ap, (tf->flags & ATA_TFLAG_WRITE));
7264+ if ((flags & USE_DPLL) != dpll) {
7265+ if (dpll == 1)
7266+ hpt3x2n_set_clock(ap, 0x21);
7267+ else
7268+ hpt3x2n_set_clock(ap, 0x23);
7269+ }
7270 }
7271 return ata_sff_qc_issue(qc);
7272 }
7273@@ -341,8 +338,6 @@ static struct ata_port_operations hpt3x2n_port_ops = {
7274 .inherits = &ata_bmdma_port_ops,
7275
7276 .bmdma_stop = hpt3x2n_bmdma_stop,
7277-
7278- .qc_defer = hpt3x2n_qc_defer,
7279 .qc_issue = hpt3x2n_qc_issue,
7280
7281 .cable_detect = hpt3x2n_cable_detect,
7282@@ -460,7 +455,7 @@ static int hpt3x2n_init_one(struct pci_dev *dev, const struct pci_device_id *id)
7283 unsigned int f_low, f_high;
7284 int adjust;
7285 unsigned long iobase = pci_resource_start(dev, 4);
7286- void *hpriv = (void *)USE_DPLL;
7287+ void *hpriv = NULL;
7288 int rc;
7289
7290 rc = pcim_enable_device(dev);
7291@@ -548,7 +543,7 @@ static int hpt3x2n_init_one(struct pci_dev *dev, const struct pci_device_id *id)
7292 /* Set our private data up. We only need a few flags so we use
7293 it directly */
7294 if (pci_mhz > 60) {
7295- hpriv = (void *)(PCI66 | USE_DPLL);
7296+ hpriv = (void *)PCI66;
7297 /*
7298 * On HPT371N, if ATA clock is 66 MHz we must set bit 2 in
7299 * the MISC. register to stretch the UltraDMA Tss timing.
7300diff --git a/drivers/base/class.c b/drivers/base/class.c
7301index 6e2c3b0..161746d 100644
7302--- a/drivers/base/class.c
7303+++ b/drivers/base/class.c
7304@@ -59,8 +59,6 @@ static void class_release(struct kobject *kobj)
7305 else
7306 pr_debug("class '%s' does not have a release() function, "
7307 "be careful\n", class->name);
7308-
7309- kfree(cp);
7310 }
7311
7312 static struct sysfs_ops class_sysfs_ops = {
7313diff --git a/drivers/base/core.c b/drivers/base/core.c
7314index 1093179..6bee6af 100644
7315--- a/drivers/base/core.c
7316+++ b/drivers/base/core.c
7317@@ -56,14 +56,7 @@ static inline int device_is_not_partition(struct device *dev)
7318 */
7319 const char *dev_driver_string(const struct device *dev)
7320 {
7321- struct device_driver *drv;
7322-
7323- /* dev->driver can change to NULL underneath us because of unbinding,
7324- * so be careful about accessing it. dev->bus and dev->class should
7325- * never change once they are set, so they don't need special care.
7326- */
7327- drv = ACCESS_ONCE(dev->driver);
7328- return drv ? drv->name :
7329+ return dev->driver ? dev->driver->name :
7330 (dev->bus ? dev->bus->name :
7331 (dev->class ? dev->class->name : ""));
7332 }
7333diff --git a/drivers/base/devtmpfs.c b/drivers/base/devtmpfs.c
7334index 33faaa2..a1cb5af 100644
7335--- a/drivers/base/devtmpfs.c
7336+++ b/drivers/base/devtmpfs.c
7337@@ -353,7 +353,6 @@ int __init devtmpfs_init(void)
7338 {
7339 int err;
7340 struct vfsmount *mnt;
7341- char options[] = "mode=0755";
7342
7343 err = register_filesystem(&dev_fs_type);
7344 if (err) {
7345@@ -362,7 +361,7 @@ int __init devtmpfs_init(void)
7346 return err;
7347 }
7348
7349- mnt = kern_mount_data(&dev_fs_type, options);
7350+ mnt = kern_mount(&dev_fs_type);
7351 if (IS_ERR(mnt)) {
7352 err = PTR_ERR(mnt);
7353 printk(KERN_ERR "devtmpfs: unable to create devtmpfs %i\n", err);
7354diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c
7355index 0a4b75f..846d89e 100644
7356--- a/drivers/base/power/runtime.c
7357+++ b/drivers/base/power/runtime.c
7358@@ -777,7 +777,7 @@ int __pm_runtime_set_status(struct device *dev, unsigned int status)
7359 }
7360
7361 if (parent) {
7362- spin_lock_nested(&parent->power.lock, SINGLE_DEPTH_NESTING);
7363+ spin_lock(&parent->power.lock);
7364
7365 /*
7366 * It is invalid to put an active child under a parent that is
7367diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
7368index ca9c548..92b1263 100644
7369--- a/drivers/block/cciss.c
7370+++ b/drivers/block/cciss.c
7371@@ -339,9 +339,6 @@ static int cciss_seq_show(struct seq_file *seq, void *v)
7372 if (*pos > h->highest_lun)
7373 return 0;
7374
7375- if (drv == NULL) /* it's possible for h->drv[] to have holes. */
7376- return 0;
7377-
7378 if (drv->heads == 0)
7379 return 0;
7380
7381diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
7382index 68b5957..2ddf03a 100644
7383--- a/drivers/block/pktcdvd.c
7384+++ b/drivers/block/pktcdvd.c
7385@@ -322,7 +322,7 @@ static void pkt_sysfs_dev_remove(struct pktcdvd_device *pd)
7386 pkt_kobj_remove(pd->kobj_stat);
7387 pkt_kobj_remove(pd->kobj_wqueue);
7388 if (class_pktcdvd)
7389- device_unregister(pd->dev);
7390+ device_destroy(class_pktcdvd, pd->pkt_dev);
7391 }
7392
7393
7394diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
7395index 1be7631..44bc8bb 100644
7396--- a/drivers/bluetooth/btusb.c
7397+++ b/drivers/bluetooth/btusb.c
7398@@ -307,7 +307,6 @@ static void btusb_bulk_complete(struct urb *urb)
7399 return;
7400
7401 usb_anchor_urb(urb, &data->bulk_anchor);
7402- usb_mark_last_busy(data->udev);
7403
7404 err = usb_submit_urb(urb, GFP_ATOMIC);
7405 if (err < 0) {
7406diff --git a/drivers/char/agp/intel-agp.c b/drivers/char/agp/intel-agp.c
7407index 4dcfef0..3cb56a0 100644
7408--- a/drivers/char/agp/intel-agp.c
7409+++ b/drivers/char/agp/intel-agp.c
7410@@ -178,7 +178,6 @@ static struct _intel_private {
7411 * popup and for the GTT.
7412 */
7413 int gtt_entries; /* i830+ */
7414- int gtt_total_size;
7415 union {
7416 void __iomem *i9xx_flush_page;
7417 void *i8xx_flush_page;
7418@@ -1154,7 +1153,7 @@ static int intel_i915_configure(void)
7419 readl(intel_private.registers+I810_PGETBL_CTL); /* PCI Posting. */
7420
7421 if (agp_bridge->driver->needs_scratch_page) {
7422- for (i = intel_private.gtt_entries; i < intel_private.gtt_total_size; i++) {
7423+ for (i = intel_private.gtt_entries; i < current_size->num_entries; i++) {
7424 writel(agp_bridge->scratch_page, intel_private.gtt+i);
7425 }
7426 readl(intel_private.gtt+i-1); /* PCI Posting. */
7427@@ -1309,8 +1308,6 @@ static int intel_i915_create_gatt_table(struct agp_bridge_data *bridge)
7428 if (!intel_private.gtt)
7429 return -ENOMEM;
7430
7431- intel_private.gtt_total_size = gtt_map_size / 4;
7432-
7433 temp &= 0xfff80000;
7434
7435 intel_private.registers = ioremap(temp, 128 * 4096);
7436@@ -1398,8 +1395,6 @@ static int intel_i965_create_gatt_table(struct agp_bridge_data *bridge)
7437 if (!intel_private.gtt)
7438 return -ENOMEM;
7439
7440- intel_private.gtt_total_size = gtt_size / 4;
7441-
7442 intel_private.registers = ioremap(temp, 128 * 4096);
7443 if (!intel_private.registers) {
7444 iounmap(intel_private.gtt);
7445diff --git a/drivers/char/mem.c b/drivers/char/mem.c
7446index aef3fb4..a074fce 100644
7447--- a/drivers/char/mem.c
7448+++ b/drivers/char/mem.c
7449@@ -35,19 +35,6 @@
7450 # include <linux/efi.h>
7451 #endif
7452
7453-static inline unsigned long size_inside_page(unsigned long start,
7454- unsigned long size)
7455-{
7456- unsigned long sz;
7457-
7458- if (-start & (PAGE_SIZE - 1))
7459- sz = -start & (PAGE_SIZE - 1);
7460- else
7461- sz = PAGE_SIZE;
7462-
7463- return min_t(unsigned long, sz, size);
7464-}
7465-
7466 /*
7467 * Architectures vary in how they handle caching for addresses
7468 * outside of main memory.
7469@@ -421,7 +408,6 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
7470 unsigned long p = *ppos;
7471 ssize_t low_count, read, sz;
7472 char * kbuf; /* k-addr because vread() takes vmlist_lock rwlock */
7473- int err = 0;
7474
7475 read = 0;
7476 if (p < (unsigned long) high_memory) {
7477@@ -444,7 +430,15 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
7478 }
7479 #endif
7480 while (low_count > 0) {
7481- sz = size_inside_page(p, low_count);
7482+ /*
7483+ * Handle first page in case it's not aligned
7484+ */
7485+ if (-p & (PAGE_SIZE - 1))
7486+ sz = -p & (PAGE_SIZE - 1);
7487+ else
7488+ sz = PAGE_SIZE;
7489+
7490+ sz = min_t(unsigned long, sz, low_count);
7491
7492 /*
7493 * On ia64 if a page has been mapped somewhere as
7494@@ -468,18 +462,16 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
7495 if (!kbuf)
7496 return -ENOMEM;
7497 while (count > 0) {
7498- int len = size_inside_page(p, count);
7499+ int len = count;
7500
7501- if (!is_vmalloc_or_module_addr((void *)p)) {
7502- err = -ENXIO;
7503- break;
7504- }
7505+ if (len > PAGE_SIZE)
7506+ len = PAGE_SIZE;
7507 len = vread(kbuf, (char *)p, len);
7508 if (!len)
7509 break;
7510 if (copy_to_user(buf, kbuf, len)) {
7511- err = -EFAULT;
7512- break;
7513+ free_page((unsigned long)kbuf);
7514+ return -EFAULT;
7515 }
7516 count -= len;
7517 buf += len;
7518@@ -488,8 +480,8 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
7519 }
7520 free_page((unsigned long)kbuf);
7521 }
7522- *ppos = p;
7523- return read ? read : err;
7524+ *ppos = p;
7525+ return read;
7526 }
7527
7528
7529@@ -518,8 +510,15 @@ do_write_kmem(void *p, unsigned long realp, const char __user * buf,
7530
7531 while (count > 0) {
7532 char *ptr;
7533+ /*
7534+ * Handle first page in case it's not aligned
7535+ */
7536+ if (-realp & (PAGE_SIZE - 1))
7537+ sz = -realp & (PAGE_SIZE - 1);
7538+ else
7539+ sz = PAGE_SIZE;
7540
7541- sz = size_inside_page(realp, count);
7542+ sz = min_t(unsigned long, sz, count);
7543
7544 /*
7545 * On ia64 if a page has been mapped somewhere as
7546@@ -558,7 +557,6 @@ static ssize_t write_kmem(struct file * file, const char __user * buf,
7547 ssize_t virtr = 0;
7548 ssize_t written;
7549 char * kbuf; /* k-addr because vwrite() takes vmlist_lock rwlock */
7550- int err = 0;
7551
7552 if (p < (unsigned long) high_memory) {
7553
7554@@ -580,20 +578,20 @@ static ssize_t write_kmem(struct file * file, const char __user * buf,
7555 if (!kbuf)
7556 return wrote ? wrote : -ENOMEM;
7557 while (count > 0) {
7558- int len = size_inside_page(p, count);
7559+ int len = count;
7560
7561- if (!is_vmalloc_or_module_addr((void *)p)) {
7562- err = -ENXIO;
7563- break;
7564- }
7565+ if (len > PAGE_SIZE)
7566+ len = PAGE_SIZE;
7567 if (len) {
7568 written = copy_from_user(kbuf, buf, len);
7569 if (written) {
7570- err = -EFAULT;
7571- break;
7572+ if (wrote + virtr)
7573+ break;
7574+ free_page((unsigned long)kbuf);
7575+ return -EFAULT;
7576 }
7577 }
7578- vwrite(kbuf, (char *)p, len);
7579+ len = vwrite(kbuf, (char *)p, len);
7580 count -= len;
7581 buf += len;
7582 virtr += len;
7583@@ -602,8 +600,8 @@ static ssize_t write_kmem(struct file * file, const char __user * buf,
7584 free_page((unsigned long)kbuf);
7585 }
7586
7587- *ppos = p;
7588- return virtr + wrote ? : err;
7589+ *ppos = p;
7590+ return virtr + wrote;
7591 }
7592 #endif
7593
7594diff --git a/drivers/char/nozomi.c b/drivers/char/nozomi.c
7595index dc52f75..d3400b2 100644
7596--- a/drivers/char/nozomi.c
7597+++ b/drivers/char/nozomi.c
7598@@ -1629,10 +1629,10 @@ static void ntty_close(struct tty_struct *tty, struct file *file)
7599
7600 dc->open_ttys--;
7601 port->count--;
7602+ tty_port_tty_set(port, NULL);
7603
7604 if (port->count == 0) {
7605 DBG1("close: %d", nport->token_dl);
7606- tty_port_tty_set(port, NULL);
7607 spin_lock_irqsave(&dc->spin_mutex, flags);
7608 dc->last_ier &= ~(nport->token_dl);
7609 writew(dc->last_ier, dc->reg_ier);
7610diff --git a/drivers/char/random.c b/drivers/char/random.c
7611index 908ac1f..04b505e 100644
7612--- a/drivers/char/random.c
7613+++ b/drivers/char/random.c
7614@@ -1051,6 +1051,12 @@ random_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos)
7615 /* like a named pipe */
7616 }
7617
7618+ /*
7619+ * If we gave the user some bytes, update the access time.
7620+ */
7621+ if (count)
7622+ file_accessed(file);
7623+
7624 return (count ? count : retval);
7625 }
7626
7627@@ -1101,6 +1107,7 @@ static ssize_t random_write(struct file *file, const char __user *buffer,
7628 size_t count, loff_t *ppos)
7629 {
7630 size_t ret;
7631+ struct inode *inode = file->f_path.dentry->d_inode;
7632
7633 ret = write_pool(&blocking_pool, buffer, count);
7634 if (ret)
7635@@ -1109,6 +1116,8 @@ static ssize_t random_write(struct file *file, const char __user *buffer,
7636 if (ret)
7637 return ret;
7638
7639+ inode->i_mtime = current_fs_time(inode->i_sb);
7640+ mark_inode_dirty(inode);
7641 return (ssize_t)count;
7642 }
7643
7644diff --git a/drivers/char/tpm/tpm_infineon.c b/drivers/char/tpm/tpm_infineon.c
7645index f584407..ecba494 100644
7646--- a/drivers/char/tpm/tpm_infineon.c
7647+++ b/drivers/char/tpm/tpm_infineon.c
7648@@ -39,12 +39,12 @@
7649 struct tpm_inf_dev {
7650 int iotype;
7651
7652- void __iomem *mem_base; /* MMIO ioremap'd addr */
7653- unsigned long map_base; /* phys MMIO base */
7654- unsigned long map_size; /* MMIO region size */
7655- unsigned int index_off; /* index register offset */
7656+ void __iomem *mem_base; /* MMIO ioremap'd addr */
7657+ unsigned long map_base; /* phys MMIO base */
7658+ unsigned long map_size; /* MMIO region size */
7659+ unsigned int index_off; /* index register offset */
7660
7661- unsigned int data_regs; /* Data registers */
7662+ unsigned int data_regs; /* Data registers */
7663 unsigned int data_size;
7664
7665 unsigned int config_port; /* IO Port config index reg */
7666@@ -406,14 +406,14 @@ static const struct tpm_vendor_specific tpm_inf = {
7667 .miscdev = {.fops = &inf_ops,},
7668 };
7669
7670-static const struct pnp_device_id tpm_inf_pnp_tbl[] = {
7671+static const struct pnp_device_id tpm_pnp_tbl[] = {
7672 /* Infineon TPMs */
7673 {"IFX0101", 0},
7674 {"IFX0102", 0},
7675 {"", 0}
7676 };
7677
7678-MODULE_DEVICE_TABLE(pnp, tpm_inf_pnp_tbl);
7679+MODULE_DEVICE_TABLE(pnp, tpm_pnp_tbl);
7680
7681 static int __devinit tpm_inf_pnp_probe(struct pnp_dev *dev,
7682 const struct pnp_device_id *dev_id)
7683@@ -430,7 +430,7 @@ static int __devinit tpm_inf_pnp_probe(struct pnp_dev *dev,
7684 if (pnp_port_valid(dev, 0) && pnp_port_valid(dev, 1) &&
7685 !(pnp_port_flags(dev, 0) & IORESOURCE_DISABLED)) {
7686
7687- tpm_dev.iotype = TPM_INF_IO_PORT;
7688+ tpm_dev.iotype = TPM_INF_IO_PORT;
7689
7690 tpm_dev.config_port = pnp_port_start(dev, 0);
7691 tpm_dev.config_size = pnp_port_len(dev, 0);
7692@@ -459,9 +459,9 @@ static int __devinit tpm_inf_pnp_probe(struct pnp_dev *dev,
7693 goto err_last;
7694 }
7695 } else if (pnp_mem_valid(dev, 0) &&
7696- !(pnp_mem_flags(dev, 0) & IORESOURCE_DISABLED)) {
7697+ !(pnp_mem_flags(dev, 0) & IORESOURCE_DISABLED)) {
7698
7699- tpm_dev.iotype = TPM_INF_IO_MEM;
7700+ tpm_dev.iotype = TPM_INF_IO_MEM;
7701
7702 tpm_dev.map_base = pnp_mem_start(dev, 0);
7703 tpm_dev.map_size = pnp_mem_len(dev, 0);
7704@@ -563,11 +563,11 @@ static int __devinit tpm_inf_pnp_probe(struct pnp_dev *dev,
7705 "product id 0x%02x%02x"
7706 "%s\n",
7707 tpm_dev.iotype == TPM_INF_IO_PORT ?
7708- tpm_dev.config_port :
7709- tpm_dev.map_base + tpm_dev.index_off,
7710+ tpm_dev.config_port :
7711+ tpm_dev.map_base + tpm_dev.index_off,
7712 tpm_dev.iotype == TPM_INF_IO_PORT ?
7713- tpm_dev.data_regs :
7714- tpm_dev.map_base + tpm_dev.data_regs,
7715+ tpm_dev.data_regs :
7716+ tpm_dev.map_base + tpm_dev.data_regs,
7717 version[0], version[1],
7718 vendorid[0], vendorid[1],
7719 productid[0], productid[1], chipname);
7720@@ -607,55 +607,20 @@ static __devexit void tpm_inf_pnp_remove(struct pnp_dev *dev)
7721 iounmap(tpm_dev.mem_base);
7722 release_mem_region(tpm_dev.map_base, tpm_dev.map_size);
7723 }
7724- tpm_dev_vendor_release(chip);
7725 tpm_remove_hardware(chip->dev);
7726 }
7727 }
7728
7729-static int tpm_inf_pnp_suspend(struct pnp_dev *dev, pm_message_t pm_state)
7730-{
7731- struct tpm_chip *chip = pnp_get_drvdata(dev);
7732- int rc;
7733- if (chip) {
7734- u8 savestate[] = {
7735- 0, 193, /* TPM_TAG_RQU_COMMAND */
7736- 0, 0, 0, 10, /* blob length (in bytes) */
7737- 0, 0, 0, 152 /* TPM_ORD_SaveState */
7738- };
7739- dev_info(&dev->dev, "saving TPM state\n");
7740- rc = tpm_inf_send(chip, savestate, sizeof(savestate));
7741- if (rc < 0) {
7742- dev_err(&dev->dev, "error while saving TPM state\n");
7743- return rc;
7744- }
7745- }
7746- return 0;
7747-}
7748-
7749-static int tpm_inf_pnp_resume(struct pnp_dev *dev)
7750-{
7751- /* Re-configure TPM after suspending */
7752- tpm_config_out(ENABLE_REGISTER_PAIR, TPM_INF_ADDR);
7753- tpm_config_out(IOLIMH, TPM_INF_ADDR);
7754- tpm_config_out((tpm_dev.data_regs >> 8) & 0xff, TPM_INF_DATA);
7755- tpm_config_out(IOLIML, TPM_INF_ADDR);
7756- tpm_config_out((tpm_dev.data_regs & 0xff), TPM_INF_DATA);
7757- /* activate register */
7758- tpm_config_out(TPM_DAR, TPM_INF_ADDR);
7759- tpm_config_out(0x01, TPM_INF_DATA);
7760- tpm_config_out(DISABLE_REGISTER_PAIR, TPM_INF_ADDR);
7761- /* disable RESET, LP and IRQC */
7762- tpm_data_out(RESET_LP_IRQC_DISABLE, CMD);
7763- return tpm_pm_resume(&dev->dev);
7764-}
7765-
7766 static struct pnp_driver tpm_inf_pnp_driver = {
7767 .name = "tpm_inf_pnp",
7768- .id_table = tpm_inf_pnp_tbl,
7769+ .driver = {
7770+ .owner = THIS_MODULE,
7771+ .suspend = tpm_pm_suspend,
7772+ .resume = tpm_pm_resume,
7773+ },
7774+ .id_table = tpm_pnp_tbl,
7775 .probe = tpm_inf_pnp_probe,
7776- .suspend = tpm_inf_pnp_suspend,
7777- .resume = tpm_inf_pnp_resume,
7778- .remove = __devexit_p(tpm_inf_pnp_remove)
7779+ .remove = __devexit_p(tpm_inf_pnp_remove),
7780 };
7781
7782 static int __init init_inf(void)
7783@@ -673,5 +638,5 @@ module_exit(cleanup_inf);
7784
7785 MODULE_AUTHOR("Marcel Selhorst <m.selhorst@sirrix.com>");
7786 MODULE_DESCRIPTION("Driver for Infineon TPM SLD 9630 TT 1.1 / SLB 9635 TT 1.2");
7787-MODULE_VERSION("1.9.2");
7788+MODULE_VERSION("1.9");
7789 MODULE_LICENSE("GPL");
7790diff --git a/drivers/char/tty_io.c b/drivers/char/tty_io.c
7791index 05cab2c..59499ee 100644
7792--- a/drivers/char/tty_io.c
7793+++ b/drivers/char/tty_io.c
7794@@ -1930,10 +1930,8 @@ static int tty_fasync(int fd, struct file *filp, int on)
7795 pid = task_pid(current);
7796 type = PIDTYPE_PID;
7797 }
7798- get_pid(pid);
7799 spin_unlock_irqrestore(&tty->ctrl_lock, flags);
7800 retval = __f_setown(filp, pid, type, 0);
7801- put_pid(pid);
7802 if (retval)
7803 goto out;
7804 } else {
7805diff --git a/drivers/connector/connector.c b/drivers/connector/connector.c
7806index 537c29a..f060246 100644
7807--- a/drivers/connector/connector.c
7808+++ b/drivers/connector/connector.c
7809@@ -36,6 +36,17 @@ MODULE_LICENSE("GPL");
7810 MODULE_AUTHOR("Evgeniy Polyakov <zbr@ioremap.net>");
7811 MODULE_DESCRIPTION("Generic userspace <-> kernelspace connector.");
7812
7813+static u32 cn_idx = CN_IDX_CONNECTOR;
7814+static u32 cn_val = CN_VAL_CONNECTOR;
7815+
7816+module_param(cn_idx, uint, 0);
7817+module_param(cn_val, uint, 0);
7818+MODULE_PARM_DESC(cn_idx, "Connector's main device idx.");
7819+MODULE_PARM_DESC(cn_val, "Connector's main device val.");
7820+
7821+static DEFINE_MUTEX(notify_lock);
7822+static LIST_HEAD(notify_list);
7823+
7824 static struct cn_dev cdev;
7825
7826 static int cn_already_initialized;
7827@@ -199,6 +210,54 @@ static void cn_rx_skb(struct sk_buff *__skb)
7828 }
7829
7830 /*
7831+ * Notification routing.
7832+ *
7833+ * Gets id and checks if there are notification request for it's idx
7834+ * and val. If there are such requests notify the listeners with the
7835+ * given notify event.
7836+ *
7837+ */
7838+static void cn_notify(struct cb_id *id, u32 notify_event)
7839+{
7840+ struct cn_ctl_entry *ent;
7841+
7842+ mutex_lock(&notify_lock);
7843+ list_for_each_entry(ent, &notify_list, notify_entry) {
7844+ int i;
7845+ struct cn_notify_req *req;
7846+ struct cn_ctl_msg *ctl = ent->msg;
7847+ int idx_found, val_found;
7848+
7849+ idx_found = val_found = 0;
7850+
7851+ req = (struct cn_notify_req *)ctl->data;
7852+ for (i = 0; i < ctl->idx_notify_num; ++i, ++req) {
7853+ if (id->idx >= req->first &&
7854+ id->idx < req->first + req->range) {
7855+ idx_found = 1;
7856+ break;
7857+ }
7858+ }
7859+
7860+ for (i = 0; i < ctl->val_notify_num; ++i, ++req) {
7861+ if (id->val >= req->first &&
7862+ id->val < req->first + req->range) {
7863+ val_found = 1;
7864+ break;
7865+ }
7866+ }
7867+
7868+ if (idx_found && val_found) {
7869+ struct cn_msg m = { .ack = notify_event, };
7870+
7871+ memcpy(&m.id, id, sizeof(m.id));
7872+ cn_netlink_send(&m, ctl->group, GFP_KERNEL);
7873+ }
7874+ }
7875+ mutex_unlock(&notify_lock);
7876+}
7877+
7878+/*
7879 * Callback add routing - adds callback with given ID and name.
7880 * If there is registered callback with the same ID it will not be added.
7881 *
7882@@ -217,6 +276,8 @@ int cn_add_callback(struct cb_id *id, char *name,
7883 if (err)
7884 return err;
7885
7886+ cn_notify(id, 0);
7887+
7888 return 0;
7889 }
7890 EXPORT_SYMBOL_GPL(cn_add_callback);
7891@@ -234,9 +295,111 @@ void cn_del_callback(struct cb_id *id)
7892 struct cn_dev *dev = &cdev;
7893
7894 cn_queue_del_callback(dev->cbdev, id);
7895+ cn_notify(id, 1);
7896 }
7897 EXPORT_SYMBOL_GPL(cn_del_callback);
7898
7899+/*
7900+ * Checks two connector's control messages to be the same.
7901+ * Returns 1 if they are the same or if the first one is corrupted.
7902+ */
7903+static int cn_ctl_msg_equals(struct cn_ctl_msg *m1, struct cn_ctl_msg *m2)
7904+{
7905+ int i;
7906+ struct cn_notify_req *req1, *req2;
7907+
7908+ if (m1->idx_notify_num != m2->idx_notify_num)
7909+ return 0;
7910+
7911+ if (m1->val_notify_num != m2->val_notify_num)
7912+ return 0;
7913+
7914+ if (m1->len != m2->len)
7915+ return 0;
7916+
7917+ if ((m1->idx_notify_num + m1->val_notify_num) * sizeof(*req1) !=
7918+ m1->len)
7919+ return 1;
7920+
7921+ req1 = (struct cn_notify_req *)m1->data;
7922+ req2 = (struct cn_notify_req *)m2->data;
7923+
7924+ for (i = 0; i < m1->idx_notify_num; ++i) {
7925+ if (req1->first != req2->first || req1->range != req2->range)
7926+ return 0;
7927+ req1++;
7928+ req2++;
7929+ }
7930+
7931+ for (i = 0; i < m1->val_notify_num; ++i) {
7932+ if (req1->first != req2->first || req1->range != req2->range)
7933+ return 0;
7934+ req1++;
7935+ req2++;
7936+ }
7937+
7938+ return 1;
7939+}
7940+
7941+/*
7942+ * Main connector device's callback.
7943+ *
7944+ * Used for notification of a request's processing.
7945+ */
7946+static void cn_callback(struct cn_msg *msg, struct netlink_skb_parms *nsp)
7947+{
7948+ struct cn_ctl_msg *ctl;
7949+ struct cn_ctl_entry *ent;
7950+ u32 size;
7951+
7952+ if (msg->len < sizeof(*ctl))
7953+ return;
7954+
7955+ ctl = (struct cn_ctl_msg *)msg->data;
7956+
7957+ size = (sizeof(*ctl) + ((ctl->idx_notify_num +
7958+ ctl->val_notify_num) *
7959+ sizeof(struct cn_notify_req)));
7960+
7961+ if (msg->len != size)
7962+ return;
7963+
7964+ if (ctl->len + sizeof(*ctl) != msg->len)
7965+ return;
7966+
7967+ /*
7968+ * Remove notification.
7969+ */
7970+ if (ctl->group == 0) {
7971+ struct cn_ctl_entry *n;
7972+
7973+ mutex_lock(&notify_lock);
7974+ list_for_each_entry_safe(ent, n, &notify_list, notify_entry) {
7975+ if (cn_ctl_msg_equals(ent->msg, ctl)) {
7976+ list_del(&ent->notify_entry);
7977+ kfree(ent);
7978+ }
7979+ }
7980+ mutex_unlock(&notify_lock);
7981+
7982+ return;
7983+ }
7984+
7985+ size += sizeof(*ent);
7986+
7987+ ent = kzalloc(size, GFP_KERNEL);
7988+ if (!ent)
7989+ return;
7990+
7991+ ent->msg = (struct cn_ctl_msg *)(ent + 1);
7992+
7993+ memcpy(ent->msg, ctl, size - sizeof(*ent));
7994+
7995+ mutex_lock(&notify_lock);
7996+ list_add(&ent->notify_entry, &notify_list);
7997+ mutex_unlock(&notify_lock);
7998+}
7999+
8000 static int cn_proc_show(struct seq_file *m, void *v)
8001 {
8002 struct cn_queue_dev *dev = cdev.cbdev;
8003@@ -274,8 +437,11 @@ static const struct file_operations cn_file_ops = {
8004 static int __devinit cn_init(void)
8005 {
8006 struct cn_dev *dev = &cdev;
8007+ int err;
8008
8009 dev->input = cn_rx_skb;
8010+ dev->id.idx = cn_idx;
8011+ dev->id.val = cn_val;
8012
8013 dev->nls = netlink_kernel_create(&init_net, NETLINK_CONNECTOR,
8014 CN_NETLINK_USERS + 0xf,
8015@@ -291,6 +457,14 @@ static int __devinit cn_init(void)
8016
8017 cn_already_initialized = 1;
8018
8019+ err = cn_add_callback(&dev->id, "connector", &cn_callback);
8020+ if (err) {
8021+ cn_already_initialized = 0;
8022+ cn_queue_free_dev(dev->cbdev);
8023+ netlink_kernel_release(dev->nls);
8024+ return -EINVAL;
8025+ }
8026+
8027 proc_net_fops_create(&init_net, "connector", S_IRUGO, &cn_file_ops);
8028
8029 return 0;
8030@@ -304,6 +478,7 @@ static void __devexit cn_fini(void)
8031
8032 proc_net_remove(&init_net, "connector");
8033
8034+ cn_del_callback(&dev->id);
8035 cn_queue_free_dev(dev->cbdev);
8036 netlink_kernel_release(dev->nls);
8037 }
8038diff --git a/drivers/cpuidle/governors/menu.c b/drivers/cpuidle/governors/menu.c
8039index 73655ae..6810443 100644
8040--- a/drivers/cpuidle/governors/menu.c
8041+++ b/drivers/cpuidle/governors/menu.c
8042@@ -18,7 +18,6 @@
8043 #include <linux/hrtimer.h>
8044 #include <linux/tick.h>
8045 #include <linux/sched.h>
8046-#include <linux/math64.h>
8047
8048 #define BUCKETS 12
8049 #define RESOLUTION 1024
8050@@ -170,12 +169,6 @@ static DEFINE_PER_CPU(struct menu_device, menu_devices);
8051
8052 static void menu_update(struct cpuidle_device *dev);
8053
8054-/* This implements DIV_ROUND_CLOSEST but avoids 64 bit division */
8055-static u64 div_round64(u64 dividend, u32 divisor)
8056-{
8057- return div_u64(dividend + (divisor / 2), divisor);
8058-}
8059-
8060 /**
8061 * menu_select - selects the next idle state to enter
8062 * @dev: the CPU
8063@@ -216,8 +209,9 @@ static int menu_select(struct cpuidle_device *dev)
8064 data->correction_factor[data->bucket] = RESOLUTION * DECAY;
8065
8066 /* Make sure to round up for half microseconds */
8067- data->predicted_us = div_round64(data->expected_us * data->correction_factor[data->bucket],
8068- RESOLUTION * DECAY);
8069+ data->predicted_us = DIV_ROUND_CLOSEST(
8070+ data->expected_us * data->correction_factor[data->bucket],
8071+ RESOLUTION * DECAY);
8072
8073 /*
8074 * We want to default to C1 (hlt), not to busy polling
8075diff --git a/drivers/crypto/padlock-sha.c b/drivers/crypto/padlock-sha.c
8076index d3a27e0..0af8057 100644
8077--- a/drivers/crypto/padlock-sha.c
8078+++ b/drivers/crypto/padlock-sha.c
8079@@ -57,23 +57,6 @@ static int padlock_sha_update(struct shash_desc *desc,
8080 return crypto_shash_update(&dctx->fallback, data, length);
8081 }
8082
8083-static int padlock_sha_export(struct shash_desc *desc, void *out)
8084-{
8085- struct padlock_sha_desc *dctx = shash_desc_ctx(desc);
8086-
8087- return crypto_shash_export(&dctx->fallback, out);
8088-}
8089-
8090-static int padlock_sha_import(struct shash_desc *desc, const void *in)
8091-{
8092- struct padlock_sha_desc *dctx = shash_desc_ctx(desc);
8093- struct padlock_sha_ctx *ctx = crypto_shash_ctx(desc->tfm);
8094-
8095- dctx->fallback.tfm = ctx->fallback;
8096- dctx->fallback.flags = desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP;
8097- return crypto_shash_import(&dctx->fallback, in);
8098-}
8099-
8100 static inline void padlock_output_block(uint32_t *src,
8101 uint32_t *dst, size_t count)
8102 {
8103@@ -252,10 +235,7 @@ static struct shash_alg sha1_alg = {
8104 .update = padlock_sha_update,
8105 .finup = padlock_sha1_finup,
8106 .final = padlock_sha1_final,
8107- .export = padlock_sha_export,
8108- .import = padlock_sha_import,
8109 .descsize = sizeof(struct padlock_sha_desc),
8110- .statesize = sizeof(struct sha1_state),
8111 .base = {
8112 .cra_name = "sha1",
8113 .cra_driver_name = "sha1-padlock",
8114@@ -276,10 +256,7 @@ static struct shash_alg sha256_alg = {
8115 .update = padlock_sha_update,
8116 .finup = padlock_sha256_finup,
8117 .final = padlock_sha256_final,
8118- .export = padlock_sha_export,
8119- .import = padlock_sha_import,
8120 .descsize = sizeof(struct padlock_sha_desc),
8121- .statesize = sizeof(struct sha256_state),
8122 .base = {
8123 .cra_name = "sha256",
8124 .cra_driver_name = "sha256-padlock",
8125diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c
8126index c558fa1..7585c41 100644
8127--- a/drivers/dma/at_hdmac.c
8128+++ b/drivers/dma/at_hdmac.c
8129@@ -815,7 +815,7 @@ atc_is_tx_complete(struct dma_chan *chan,
8130 dev_vdbg(chan2dev(chan), "is_tx_complete: %d (d%d, u%d)\n",
8131 cookie, done ? *done : 0, used ? *used : 0);
8132
8133- spin_lock_bh(&atchan->lock);
8134+ spin_lock_bh(atchan->lock);
8135
8136 last_complete = atchan->completed_cookie;
8137 last_used = chan->cookie;
8138@@ -830,7 +830,7 @@ atc_is_tx_complete(struct dma_chan *chan,
8139 ret = dma_async_is_complete(cookie, last_complete, last_used);
8140 }
8141
8142- spin_unlock_bh(&atchan->lock);
8143+ spin_unlock_bh(atchan->lock);
8144
8145 if (done)
8146 *done = last_complete;
8147diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c
8148index dcc4ab7..c524d36 100644
8149--- a/drivers/dma/ioat/dma.c
8150+++ b/drivers/dma/ioat/dma.c
8151@@ -1032,7 +1032,7 @@ int __devinit ioat_probe(struct ioatdma_device *device)
8152 dma->dev = &pdev->dev;
8153
8154 if (!dma->chancnt) {
8155- dev_err(dev, "channel enumeration error\n");
8156+ dev_err(dev, "zero channels detected\n");
8157 goto err_setup_interrupts;
8158 }
8159
8160diff --git a/drivers/dma/ioat/dma.h b/drivers/dma/ioat/dma.h
8161index bbc3e78..45edde9 100644
8162--- a/drivers/dma/ioat/dma.h
8163+++ b/drivers/dma/ioat/dma.h
8164@@ -60,7 +60,6 @@
8165 * @dca: direct cache access context
8166 * @intr_quirk: interrupt setup quirk (for ioat_v1 devices)
8167 * @enumerate_channels: hw version specific channel enumeration
8168- * @reset_hw: hw version specific channel (re)initialization
8169 * @cleanup_tasklet: select between the v2 and v3 cleanup routines
8170 * @timer_fn: select between the v2 and v3 timer watchdog routines
8171 * @self_test: hardware version specific self test for each supported op type
8172@@ -79,7 +78,6 @@ struct ioatdma_device {
8173 struct dca_provider *dca;
8174 void (*intr_quirk)(struct ioatdma_device *device);
8175 int (*enumerate_channels)(struct ioatdma_device *device);
8176- int (*reset_hw)(struct ioat_chan_common *chan);
8177 void (*cleanup_tasklet)(unsigned long data);
8178 void (*timer_fn)(unsigned long data);
8179 int (*self_test)(struct ioatdma_device *device);
8180@@ -266,22 +264,6 @@ static inline void ioat_suspend(struct ioat_chan_common *chan)
8181 writeb(IOAT_CHANCMD_SUSPEND, chan->reg_base + IOAT_CHANCMD_OFFSET(ver));
8182 }
8183
8184-static inline void ioat_reset(struct ioat_chan_common *chan)
8185-{
8186- u8 ver = chan->device->version;
8187-
8188- writeb(IOAT_CHANCMD_RESET, chan->reg_base + IOAT_CHANCMD_OFFSET(ver));
8189-}
8190-
8191-static inline bool ioat_reset_pending(struct ioat_chan_common *chan)
8192-{
8193- u8 ver = chan->device->version;
8194- u8 cmd;
8195-
8196- cmd = readb(chan->reg_base + IOAT_CHANCMD_OFFSET(ver));
8197- return (cmd & IOAT_CHANCMD_RESET) == IOAT_CHANCMD_RESET;
8198-}
8199-
8200 static inline void ioat_set_chainaddr(struct ioat_dma_chan *ioat, u64 addr)
8201 {
8202 struct ioat_chan_common *chan = &ioat->base;
8203diff --git a/drivers/dma/ioat/dma_v2.c b/drivers/dma/ioat/dma_v2.c
8204index 5cc37af..8f1f7f0 100644
8205--- a/drivers/dma/ioat/dma_v2.c
8206+++ b/drivers/dma/ioat/dma_v2.c
8207@@ -239,50 +239,20 @@ void __ioat2_restart_chan(struct ioat2_dma_chan *ioat)
8208 __ioat2_start_null_desc(ioat);
8209 }
8210
8211-int ioat2_quiesce(struct ioat_chan_common *chan, unsigned long tmo)
8212+static void ioat2_restart_channel(struct ioat2_dma_chan *ioat)
8213 {
8214- unsigned long end = jiffies + tmo;
8215- int err = 0;
8216+ struct ioat_chan_common *chan = &ioat->base;
8217+ unsigned long phys_complete;
8218 u32 status;
8219
8220 status = ioat_chansts(chan);
8221 if (is_ioat_active(status) || is_ioat_idle(status))
8222 ioat_suspend(chan);
8223 while (is_ioat_active(status) || is_ioat_idle(status)) {
8224- if (tmo && time_after(jiffies, end)) {
8225- err = -ETIMEDOUT;
8226- break;
8227- }
8228 status = ioat_chansts(chan);
8229 cpu_relax();
8230 }
8231
8232- return err;
8233-}
8234-
8235-int ioat2_reset_sync(struct ioat_chan_common *chan, unsigned long tmo)
8236-{
8237- unsigned long end = jiffies + tmo;
8238- int err = 0;
8239-
8240- ioat_reset(chan);
8241- while (ioat_reset_pending(chan)) {
8242- if (end && time_after(jiffies, end)) {
8243- err = -ETIMEDOUT;
8244- break;
8245- }
8246- cpu_relax();
8247- }
8248-
8249- return err;
8250-}
8251-
8252-static void ioat2_restart_channel(struct ioat2_dma_chan *ioat)
8253-{
8254- struct ioat_chan_common *chan = &ioat->base;
8255- unsigned long phys_complete;
8256-
8257- ioat2_quiesce(chan, 0);
8258 if (ioat_cleanup_preamble(chan, &phys_complete))
8259 __cleanup(ioat, phys_complete);
8260
8261@@ -348,19 +318,6 @@ void ioat2_timer_event(unsigned long data)
8262 spin_unlock_bh(&chan->cleanup_lock);
8263 }
8264
8265-static int ioat2_reset_hw(struct ioat_chan_common *chan)
8266-{
8267- /* throw away whatever the channel was doing and get it initialized */
8268- u32 chanerr;
8269-
8270- ioat2_quiesce(chan, msecs_to_jiffies(100));
8271-
8272- chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET);
8273- writel(chanerr, chan->reg_base + IOAT_CHANERR_OFFSET);
8274-
8275- return ioat2_reset_sync(chan, msecs_to_jiffies(200));
8276-}
8277-
8278 /**
8279 * ioat2_enumerate_channels - find and initialize the device's channels
8280 * @device: the device to be enumerated
8281@@ -403,10 +360,6 @@ int ioat2_enumerate_channels(struct ioatdma_device *device)
8282 (unsigned long) ioat);
8283 ioat->xfercap_log = xfercap_log;
8284 spin_lock_init(&ioat->ring_lock);
8285- if (device->reset_hw(&ioat->base)) {
8286- i = 0;
8287- break;
8288- }
8289 }
8290 dma->chancnt = i;
8291 return i;
8292@@ -514,6 +467,7 @@ int ioat2_alloc_chan_resources(struct dma_chan *c)
8293 struct ioat2_dma_chan *ioat = to_ioat2_chan(c);
8294 struct ioat_chan_common *chan = &ioat->base;
8295 struct ioat_ring_ent **ring;
8296+ u32 chanerr;
8297 int order;
8298
8299 /* have we already been set up? */
8300@@ -523,6 +477,12 @@ int ioat2_alloc_chan_resources(struct dma_chan *c)
8301 /* Setup register to interrupt and write completion status on error */
8302 writew(IOAT_CHANCTRL_RUN, chan->reg_base + IOAT_CHANCTRL_OFFSET);
8303
8304+ chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET);
8305+ if (chanerr) {
8306+ dev_err(to_dev(chan), "CHANERR = %x, clearing\n", chanerr);
8307+ writel(chanerr, chan->reg_base + IOAT_CHANERR_OFFSET);
8308+ }
8309+
8310 /* allocate a completion writeback area */
8311 /* doing 2 32bit writes to mmio since 1 64b write doesn't work */
8312 chan->completion = pci_pool_alloc(chan->device->completion_pool,
8313@@ -786,7 +746,13 @@ void ioat2_free_chan_resources(struct dma_chan *c)
8314 tasklet_disable(&chan->cleanup_task);
8315 del_timer_sync(&chan->timer);
8316 device->cleanup_tasklet((unsigned long) ioat);
8317- device->reset_hw(chan);
8318+
8319+ /* Delay 100ms after reset to allow internal DMA logic to quiesce
8320+ * before removing DMA descriptor resources.
8321+ */
8322+ writeb(IOAT_CHANCMD_RESET,
8323+ chan->reg_base + IOAT_CHANCMD_OFFSET(chan->device->version));
8324+ mdelay(100);
8325
8326 spin_lock_bh(&ioat->ring_lock);
8327 descs = ioat2_ring_space(ioat);
8328@@ -873,7 +839,6 @@ int __devinit ioat2_dma_probe(struct ioatdma_device *device, int dca)
8329 int err;
8330
8331 device->enumerate_channels = ioat2_enumerate_channels;
8332- device->reset_hw = ioat2_reset_hw;
8333 device->cleanup_tasklet = ioat2_cleanup_tasklet;
8334 device->timer_fn = ioat2_timer_event;
8335 device->self_test = ioat_dma_self_test;
8336diff --git a/drivers/dma/ioat/dma_v2.h b/drivers/dma/ioat/dma_v2.h
8337index 3afad8d..1d849ef 100644
8338--- a/drivers/dma/ioat/dma_v2.h
8339+++ b/drivers/dma/ioat/dma_v2.h
8340@@ -185,8 +185,6 @@ bool reshape_ring(struct ioat2_dma_chan *ioat, int order);
8341 void __ioat2_issue_pending(struct ioat2_dma_chan *ioat);
8342 void ioat2_cleanup_tasklet(unsigned long data);
8343 void ioat2_timer_event(unsigned long data);
8344-int ioat2_quiesce(struct ioat_chan_common *chan, unsigned long tmo);
8345-int ioat2_reset_sync(struct ioat_chan_common *chan, unsigned long tmo);
8346 extern struct kobj_type ioat2_ktype;
8347 extern struct kmem_cache *ioat2_cache;
8348 #endif /* IOATDMA_V2_H */
8349diff --git a/drivers/dma/ioat/dma_v3.c b/drivers/dma/ioat/dma_v3.c
8350index 9908c9e..42f6f10 100644
8351--- a/drivers/dma/ioat/dma_v3.c
8352+++ b/drivers/dma/ioat/dma_v3.c
8353@@ -650,11 +650,9 @@ __ioat3_prep_pq_lock(struct dma_chan *c, enum sum_check_flags *result,
8354
8355 num_descs = ioat2_xferlen_to_descs(ioat, len);
8356 /* we need 2x the number of descriptors to cover greater than 3
8357- * sources (we need 1 extra source in the q-only continuation
8358- * case and 3 extra sources in the p+q continuation case.
8359+ * sources
8360 */
8361- if (src_cnt + dmaf_p_disabled_continue(flags) > 3 ||
8362- (dmaf_continue(flags) && !dmaf_p_disabled_continue(flags))) {
8363+ if (src_cnt > 3 || flags & DMA_PREP_CONTINUE) {
8364 with_ext = 1;
8365 num_descs *= 2;
8366 } else
8367@@ -1130,45 +1128,6 @@ static int __devinit ioat3_dma_self_test(struct ioatdma_device *device)
8368 return 0;
8369 }
8370
8371-static int ioat3_reset_hw(struct ioat_chan_common *chan)
8372-{
8373- /* throw away whatever the channel was doing and get it
8374- * initialized, with ioat3 specific workarounds
8375- */
8376- struct ioatdma_device *device = chan->device;
8377- struct pci_dev *pdev = device->pdev;
8378- u32 chanerr;
8379- u16 dev_id;
8380- int err;
8381-
8382- ioat2_quiesce(chan, msecs_to_jiffies(100));
8383-
8384- chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET);
8385- writel(chanerr, chan->reg_base + IOAT_CHANERR_OFFSET);
8386-
8387- /* -= IOAT ver.3 workarounds =- */
8388- /* Write CHANERRMSK_INT with 3E07h to mask out the errors
8389- * that can cause stability issues for IOAT ver.3, and clear any
8390- * pending errors
8391- */
8392- pci_write_config_dword(pdev, IOAT_PCI_CHANERRMASK_INT_OFFSET, 0x3e07);
8393- err = pci_read_config_dword(pdev, IOAT_PCI_CHANERR_INT_OFFSET, &chanerr);
8394- if (err) {
8395- dev_err(&pdev->dev, "channel error register unreachable\n");
8396- return err;
8397- }
8398- pci_write_config_dword(pdev, IOAT_PCI_CHANERR_INT_OFFSET, chanerr);
8399-
8400- /* Clear DMAUNCERRSTS Cfg-Reg Parity Error status bit
8401- * (workaround for spurious config parity error after restart)
8402- */
8403- pci_read_config_word(pdev, IOAT_PCI_DEVICE_ID_OFFSET, &dev_id);
8404- if (dev_id == PCI_DEVICE_ID_INTEL_IOAT_TBG0)
8405- pci_write_config_dword(pdev, IOAT_PCI_DMAUNCERRSTS_OFFSET, 0x10);
8406-
8407- return ioat2_reset_sync(chan, msecs_to_jiffies(200));
8408-}
8409-
8410 int __devinit ioat3_dma_probe(struct ioatdma_device *device, int dca)
8411 {
8412 struct pci_dev *pdev = device->pdev;
8413@@ -1178,10 +1137,10 @@ int __devinit ioat3_dma_probe(struct ioatdma_device *device, int dca)
8414 struct ioat_chan_common *chan;
8415 bool is_raid_device = false;
8416 int err;
8417+ u16 dev_id;
8418 u32 cap;
8419
8420 device->enumerate_channels = ioat2_enumerate_channels;
8421- device->reset_hw = ioat3_reset_hw;
8422 device->self_test = ioat3_dma_self_test;
8423 dma = &device->common;
8424 dma->device_prep_dma_memcpy = ioat2_dma_prep_memcpy_lock;
8425@@ -1257,6 +1216,19 @@ int __devinit ioat3_dma_probe(struct ioatdma_device *device, int dca)
8426 dma->device_prep_dma_xor_val = NULL;
8427 #endif
8428
8429+ /* -= IOAT ver.3 workarounds =- */
8430+ /* Write CHANERRMSK_INT with 3E07h to mask out the errors
8431+ * that can cause stability issues for IOAT ver.3
8432+ */
8433+ pci_write_config_dword(pdev, IOAT_PCI_CHANERRMASK_INT_OFFSET, 0x3e07);
8434+
8435+ /* Clear DMAUNCERRSTS Cfg-Reg Parity Error status bit
8436+ * (workaround for spurious config parity error after restart)
8437+ */
8438+ pci_read_config_word(pdev, IOAT_PCI_DEVICE_ID_OFFSET, &dev_id);
8439+ if (dev_id == PCI_DEVICE_ID_INTEL_IOAT_TBG0)
8440+ pci_write_config_dword(pdev, IOAT_PCI_DMAUNCERRSTS_OFFSET, 0x10);
8441+
8442 err = ioat_probe(device);
8443 if (err)
8444 return err;
8445diff --git a/drivers/dma/ioat/registers.h b/drivers/dma/ioat/registers.h
8446index e8ae63b..f015ec1 100644
8447--- a/drivers/dma/ioat/registers.h
8448+++ b/drivers/dma/ioat/registers.h
8449@@ -27,7 +27,6 @@
8450
8451 #define IOAT_PCI_DEVICE_ID_OFFSET 0x02
8452 #define IOAT_PCI_DMAUNCERRSTS_OFFSET 0x148
8453-#define IOAT_PCI_CHANERR_INT_OFFSET 0x180
8454 #define IOAT_PCI_CHANERRMASK_INT_OFFSET 0x184
8455
8456 /* MMIO Device Registers */
8457diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
8458index 01bc8e2..a38831c 100644
8459--- a/drivers/edac/amd64_edac.c
8460+++ b/drivers/edac/amd64_edac.c
8461@@ -13,8 +13,6 @@ module_param(report_gart_errors, int, 0644);
8462 static int ecc_enable_override;
8463 module_param(ecc_enable_override, int, 0644);
8464
8465-static struct msr *msrs;
8466-
8467 /* Lookup table for all possible MC control instances */
8468 struct amd64_pvt;
8469 static struct mem_ctl_info *mci_lookup[EDAC_MAX_NUMNODES];
8470@@ -2620,90 +2618,6 @@ static int amd64_init_csrows(struct mem_ctl_info *mci)
8471 return empty;
8472 }
8473
8474-/* get all cores on this DCT */
8475-static void get_cpus_on_this_dct_cpumask(struct cpumask *mask, int nid)
8476-{
8477- int cpu;
8478-
8479- for_each_online_cpu(cpu)
8480- if (amd_get_nb_id(cpu) == nid)
8481- cpumask_set_cpu(cpu, mask);
8482-}
8483-
8484-/* check MCG_CTL on all the cpus on this node */
8485-static bool amd64_nb_mce_bank_enabled_on_node(int nid)
8486-{
8487- cpumask_var_t mask;
8488- int cpu, nbe;
8489- bool ret = false;
8490-
8491- if (!zalloc_cpumask_var(&mask, GFP_KERNEL)) {
8492- amd64_printk(KERN_WARNING, "%s: error allocating mask\n",
8493- __func__);
8494- return false;
8495- }
8496-
8497- get_cpus_on_this_dct_cpumask(mask, nid);
8498-
8499- rdmsr_on_cpus(mask, MSR_IA32_MCG_CTL, msrs);
8500-
8501- for_each_cpu(cpu, mask) {
8502- struct msr *reg = per_cpu_ptr(msrs, cpu);
8503- nbe = reg->l & K8_MSR_MCGCTL_NBE;
8504-
8505- debugf0("core: %u, MCG_CTL: 0x%llx, NB MSR is %s\n",
8506- cpu, reg->q,
8507- (nbe ? "enabled" : "disabled"));
8508-
8509- if (!nbe)
8510- goto out;
8511- }
8512- ret = true;
8513-
8514-out:
8515- free_cpumask_var(mask);
8516- return ret;
8517-}
8518-
8519-static int amd64_toggle_ecc_err_reporting(struct amd64_pvt *pvt, bool on)
8520-{
8521- cpumask_var_t cmask;
8522- int cpu;
8523-
8524- if (!zalloc_cpumask_var(&cmask, GFP_KERNEL)) {
8525- amd64_printk(KERN_WARNING, "%s: error allocating mask\n",
8526- __func__);
8527- return false;
8528- }
8529-
8530- get_cpus_on_this_dct_cpumask(cmask, pvt->mc_node_id);
8531-
8532- rdmsr_on_cpus(cmask, MSR_IA32_MCG_CTL, msrs);
8533-
8534- for_each_cpu(cpu, cmask) {
8535-
8536- struct msr *reg = per_cpu_ptr(msrs, cpu);
8537-
8538- if (on) {
8539- if (reg->l & K8_MSR_MCGCTL_NBE)
8540- pvt->flags.ecc_report = 1;
8541-
8542- reg->l |= K8_MSR_MCGCTL_NBE;
8543- } else {
8544- /*
8545- * Turn off ECC reporting only when it was off before
8546- */
8547- if (!pvt->flags.ecc_report)
8548- reg->l &= ~K8_MSR_MCGCTL_NBE;
8549- }
8550- }
8551- wrmsr_on_cpus(cmask, MSR_IA32_MCG_CTL, msrs);
8552-
8553- free_cpumask_var(cmask);
8554-
8555- return 0;
8556-}
8557-
8558 /*
8559 * Only if 'ecc_enable_override' is set AND BIOS had ECC disabled, do "we"
8560 * enable it.
8561@@ -2711,12 +2625,17 @@ static int amd64_toggle_ecc_err_reporting(struct amd64_pvt *pvt, bool on)
8562 static void amd64_enable_ecc_error_reporting(struct mem_ctl_info *mci)
8563 {
8564 struct amd64_pvt *pvt = mci->pvt_info;
8565- int err = 0;
8566- u32 value, mask = K8_NBCTL_CECCEn | K8_NBCTL_UECCEn;
8567+ const cpumask_t *cpumask = cpumask_of_node(pvt->mc_node_id);
8568+ int cpu, idx = 0, err = 0;
8569+ struct msr msrs[cpumask_weight(cpumask)];
8570+ u32 value;
8571+ u32 mask = K8_NBCTL_CECCEn | K8_NBCTL_UECCEn;
8572
8573 if (!ecc_enable_override)
8574 return;
8575
8576+ memset(msrs, 0, sizeof(msrs));
8577+
8578 amd64_printk(KERN_WARNING,
8579 "'ecc_enable_override' parameter is active, "
8580 "Enabling AMD ECC hardware now: CAUTION\n");
8581@@ -2732,9 +2651,16 @@ static void amd64_enable_ecc_error_reporting(struct mem_ctl_info *mci)
8582 value |= mask;
8583 pci_write_config_dword(pvt->misc_f3_ctl, K8_NBCTL, value);
8584
8585- if (amd64_toggle_ecc_err_reporting(pvt, ON))
8586- amd64_printk(KERN_WARNING, "Error enabling ECC reporting over "
8587- "MCGCTL!\n");
8588+ rdmsr_on_cpus(cpumask, K8_MSR_MCGCTL, msrs);
8589+
8590+ for_each_cpu(cpu, cpumask) {
8591+ if (msrs[idx].l & K8_MSR_MCGCTL_NBE)
8592+ set_bit(idx, &pvt->old_mcgctl);
8593+
8594+ msrs[idx].l |= K8_MSR_MCGCTL_NBE;
8595+ idx++;
8596+ }
8597+ wrmsr_on_cpus(cpumask, K8_MSR_MCGCTL, msrs);
8598
8599 err = pci_read_config_dword(pvt->misc_f3_ctl, K8_NBCFG, &value);
8600 if (err)
8601@@ -2775,12 +2701,17 @@ static void amd64_enable_ecc_error_reporting(struct mem_ctl_info *mci)
8602
8603 static void amd64_restore_ecc_error_reporting(struct amd64_pvt *pvt)
8604 {
8605- int err = 0;
8606- u32 value, mask = K8_NBCTL_CECCEn | K8_NBCTL_UECCEn;
8607+ const cpumask_t *cpumask = cpumask_of_node(pvt->mc_node_id);
8608+ int cpu, idx = 0, err = 0;
8609+ struct msr msrs[cpumask_weight(cpumask)];
8610+ u32 value;
8611+ u32 mask = K8_NBCTL_CECCEn | K8_NBCTL_UECCEn;
8612
8613 if (!pvt->nbctl_mcgctl_saved)
8614 return;
8615
8616+ memset(msrs, 0, sizeof(msrs));
8617+
8618 err = pci_read_config_dword(pvt->misc_f3_ctl, K8_NBCTL, &value);
8619 if (err)
8620 debugf0("Reading K8_NBCTL failed\n");
8621@@ -2790,9 +2721,66 @@ static void amd64_restore_ecc_error_reporting(struct amd64_pvt *pvt)
8622 /* restore the NB Enable MCGCTL bit */
8623 pci_write_config_dword(pvt->misc_f3_ctl, K8_NBCTL, value);
8624
8625- if (amd64_toggle_ecc_err_reporting(pvt, OFF))
8626- amd64_printk(KERN_WARNING, "Error restoring ECC reporting over "
8627- "MCGCTL!\n");
8628+ rdmsr_on_cpus(cpumask, K8_MSR_MCGCTL, msrs);
8629+
8630+ for_each_cpu(cpu, cpumask) {
8631+ msrs[idx].l &= ~K8_MSR_MCGCTL_NBE;
8632+ msrs[idx].l |=
8633+ test_bit(idx, &pvt->old_mcgctl) << K8_MSR_MCGCTL_NBE;
8634+ idx++;
8635+ }
8636+
8637+ wrmsr_on_cpus(cpumask, K8_MSR_MCGCTL, msrs);
8638+}
8639+
8640+/* get all cores on this DCT */
8641+static void get_cpus_on_this_dct_cpumask(cpumask_t *mask, int nid)
8642+{
8643+ int cpu;
8644+
8645+ for_each_online_cpu(cpu)
8646+ if (amd_get_nb_id(cpu) == nid)
8647+ cpumask_set_cpu(cpu, mask);
8648+}
8649+
8650+/* check MCG_CTL on all the cpus on this node */
8651+static bool amd64_nb_mce_bank_enabled_on_node(int nid)
8652+{
8653+ cpumask_t mask;
8654+ struct msr *msrs;
8655+ int cpu, nbe, idx = 0;
8656+ bool ret = false;
8657+
8658+ cpumask_clear(&mask);
8659+
8660+ get_cpus_on_this_dct_cpumask(&mask, nid);
8661+
8662+ msrs = kzalloc(sizeof(struct msr) * cpumask_weight(&mask), GFP_KERNEL);
8663+ if (!msrs) {
8664+ amd64_printk(KERN_WARNING, "%s: error allocating msrs\n",
8665+ __func__);
8666+ return false;
8667+ }
8668+
8669+ rdmsr_on_cpus(&mask, MSR_IA32_MCG_CTL, msrs);
8670+
8671+ for_each_cpu(cpu, &mask) {
8672+ nbe = msrs[idx].l & K8_MSR_MCGCTL_NBE;
8673+
8674+ debugf0("core: %u, MCG_CTL: 0x%llx, NB MSR is %s\n",
8675+ cpu, msrs[idx].q,
8676+ (nbe ? "enabled" : "disabled"));
8677+
8678+ if (!nbe)
8679+ goto out;
8680+
8681+ idx++;
8682+ }
8683+ ret = true;
8684+
8685+out:
8686+ kfree(msrs);
8687+ return ret;
8688 }
8689
8690 /*
8691@@ -2801,11 +2789,10 @@ static void amd64_restore_ecc_error_reporting(struct amd64_pvt *pvt)
8692 * the memory system completely. A command line option allows to force-enable
8693 * hardware ECC later in amd64_enable_ecc_error_reporting().
8694 */
8695-static const char *ecc_msg =
8696- "ECC disabled in the BIOS or no ECC capability, module will not load.\n"
8697- " Either enable ECC checking or force module loading by setting "
8698- "'ecc_enable_override'.\n"
8699- " (Note that use of the override may cause unknown side effects.)\n";
8700+static const char *ecc_warning =
8701+ "WARNING: ECC is disabled by BIOS. Module will NOT be loaded.\n"
8702+ " Either Enable ECC in the BIOS, or set 'ecc_enable_override'.\n"
8703+ " Also, use of the override can cause unknown side effects.\n";
8704
8705 static int amd64_check_ecc_enabled(struct amd64_pvt *pvt)
8706 {
8707@@ -2820,7 +2807,7 @@ static int amd64_check_ecc_enabled(struct amd64_pvt *pvt)
8708
8709 ecc_enabled = !!(value & K8_NBCFG_ECC_ENABLE);
8710 if (!ecc_enabled)
8711- amd64_printk(KERN_NOTICE, "This node reports that Memory ECC "
8712+ amd64_printk(KERN_WARNING, "This node reports that Memory ECC "
8713 "is currently disabled, set F3x%x[22] (%s).\n",
8714 K8_NBCFG, pci_name(pvt->misc_f3_ctl));
8715 else
8716@@ -2828,17 +2815,18 @@ static int amd64_check_ecc_enabled(struct amd64_pvt *pvt)
8717
8718 nb_mce_en = amd64_nb_mce_bank_enabled_on_node(pvt->mc_node_id);
8719 if (!nb_mce_en)
8720- amd64_printk(KERN_NOTICE, "NB MCE bank disabled, set MSR "
8721+ amd64_printk(KERN_WARNING, "NB MCE bank disabled, set MSR "
8722 "0x%08x[4] on node %d to enable.\n",
8723 MSR_IA32_MCG_CTL, pvt->mc_node_id);
8724
8725 if (!ecc_enabled || !nb_mce_en) {
8726 if (!ecc_enable_override) {
8727- amd64_printk(KERN_NOTICE, "%s", ecc_msg);
8728+ amd64_printk(KERN_WARNING, "%s", ecc_warning);
8729 return -ENODEV;
8730 }
8731+ } else
8732+ /* CLEAR the override, since BIOS controlled it */
8733 ecc_enable_override = 0;
8734- }
8735
8736 return 0;
8737 }
8738@@ -2921,6 +2909,7 @@ static int amd64_probe_one_instance(struct pci_dev *dram_f2_ctl,
8739 pvt->ext_model = boot_cpu_data.x86_model >> 4;
8740 pvt->mc_type_index = mc_type_index;
8741 pvt->ops = family_ops(mc_type_index);
8742+ pvt->old_mcgctl = 0;
8743
8744 /*
8745 * We have the dram_f2_ctl device as an argument, now go reserve its
8746@@ -3082,15 +3071,16 @@ static void __devexit amd64_remove_one_instance(struct pci_dev *pdev)
8747
8748 amd64_free_mc_sibling_devices(pvt);
8749
8750+ kfree(pvt);
8751+ mci->pvt_info = NULL;
8752+
8753+ mci_lookup[pvt->mc_node_id] = NULL;
8754+
8755 /* unregister from EDAC MCE */
8756 amd_report_gart_errors(false);
8757 amd_unregister_ecc_decoder(amd64_decode_bus_error);
8758
8759 /* Free the EDAC CORE resources */
8760- mci->pvt_info = NULL;
8761- mci_lookup[pvt->mc_node_id] = NULL;
8762-
8763- kfree(pvt);
8764 edac_mc_free(mci);
8765 }
8766
8767@@ -3167,29 +3157,23 @@ static void amd64_setup_pci_device(void)
8768 static int __init amd64_edac_init(void)
8769 {
8770 int nb, err = -ENODEV;
8771- bool load_ok = false;
8772
8773 edac_printk(KERN_INFO, EDAC_MOD_STR, EDAC_AMD64_VERSION "\n");
8774
8775 opstate_init();
8776
8777 if (cache_k8_northbridges() < 0)
8778- goto err_ret;
8779-
8780- msrs = msrs_alloc();
8781- if (!msrs)
8782- goto err_ret;
8783+ return err;
8784
8785 err = pci_register_driver(&amd64_pci_driver);
8786 if (err)
8787- goto err_pci;
8788+ return err;
8789
8790 /*
8791 * At this point, the array 'pvt_lookup[]' contains pointers to alloc'd
8792 * amd64_pvt structs. These will be used in the 2nd stage init function
8793 * to finish initialization of the MC instances.
8794 */
8795- err = -ENODEV;
8796 for (nb = 0; nb < num_k8_northbridges; nb++) {
8797 if (!pvt_lookup[nb])
8798 continue;
8799@@ -3197,21 +3181,16 @@ static int __init amd64_edac_init(void)
8800 err = amd64_init_2nd_stage(pvt_lookup[nb]);
8801 if (err)
8802 goto err_2nd_stage;
8803-
8804- load_ok = true;
8805 }
8806
8807- if (load_ok) {
8808- amd64_setup_pci_device();
8809- return 0;
8810- }
8811+ amd64_setup_pci_device();
8812+
8813+ return 0;
8814
8815 err_2nd_stage:
8816+ debugf0("2nd stage failed\n");
8817 pci_unregister_driver(&amd64_pci_driver);
8818-err_pci:
8819- msrs_free(msrs);
8820- msrs = NULL;
8821-err_ret:
8822+
8823 return err;
8824 }
8825
8826@@ -3221,9 +3200,6 @@ static void __exit amd64_edac_exit(void)
8827 edac_pci_release_generic_ctl(amd64_ctl_pci);
8828
8829 pci_unregister_driver(&amd64_pci_driver);
8830-
8831- msrs_free(msrs);
8832- msrs = NULL;
8833 }
8834
8835 module_init(amd64_edac_init);
8836diff --git a/drivers/edac/amd64_edac.h b/drivers/edac/amd64_edac.h
8837index bba6c94..c6f359a 100644
8838--- a/drivers/edac/amd64_edac.h
8839+++ b/drivers/edac/amd64_edac.h
8840@@ -147,8 +147,6 @@
8841 #define MAX_CS_COUNT 8
8842 #define DRAM_REG_COUNT 8
8843
8844-#define ON true
8845-#define OFF false
8846
8847 /*
8848 * PCI-defined configuration space registers
8849@@ -388,7 +386,10 @@ enum {
8850 #define K8_NBCAP_DUAL_NODE BIT(1)
8851 #define K8_NBCAP_DCT_DUAL BIT(0)
8852
8853-/* MSRs */
8854+/*
8855+ * MSR Regs
8856+ */
8857+#define K8_MSR_MCGCTL 0x017b
8858 #define K8_MSR_MCGCTL_NBE BIT(4)
8859
8860 #define K8_MSR_MC4CTL 0x0410
8861@@ -486,6 +487,7 @@ struct amd64_pvt {
8862 /* Save old hw registers' values before we modified them */
8863 u32 nbctl_mcgctl_saved; /* When true, following 2 are valid */
8864 u32 old_nbctl;
8865+ unsigned long old_mcgctl; /* per core on this node */
8866
8867 /* MC Type Index value: socket F vs Family 10h */
8868 u32 mc_type_index;
8869@@ -493,7 +495,6 @@ struct amd64_pvt {
8870 /* misc settings */
8871 struct flags {
8872 unsigned long cf8_extcfg:1;
8873- unsigned long ecc_report:1;
8874 } flags;
8875 };
8876
8877diff --git a/drivers/edac/i5000_edac.c b/drivers/edac/i5000_edac.c
8878index adc10a2..77a9579 100644
8879--- a/drivers/edac/i5000_edac.c
8880+++ b/drivers/edac/i5000_edac.c
8881@@ -577,13 +577,7 @@ static void i5000_process_nonfatal_error_info(struct mem_ctl_info *mci,
8882 debugf0("\tUncorrected bits= 0x%x\n", ue_errors);
8883
8884 branch = EXTRACT_FBDCHAN_INDX(info->ferr_nf_fbd);
8885-
8886- /*
8887- * According with i5000 datasheet, bit 28 has no significance
8888- * for errors M4Err-M12Err and M17Err-M21Err, on FERR_NF_FBD
8889- */
8890- channel = branch & 2;
8891-
8892+ channel = branch;
8893 bank = NREC_BANK(info->nrecmema);
8894 rank = NREC_RANK(info->nrecmema);
8895 rdwr = NREC_RDWR(info->nrecmema);
8896diff --git a/drivers/firewire/core-card.c b/drivers/firewire/core-card.c
8897index ed635ae..e4864e8 100644
8898--- a/drivers/firewire/core-card.c
8899+++ b/drivers/firewire/core-card.c
8900@@ -57,9 +57,6 @@ static LIST_HEAD(card_list);
8901 static LIST_HEAD(descriptor_list);
8902 static int descriptor_count;
8903
8904-/* ROM header, bus info block, root dir header, capabilities = 7 quadlets */
8905-static size_t config_rom_length = 1 + 4 + 1 + 1;
8906-
8907 #define BIB_CRC(v) ((v) << 0)
8908 #define BIB_CRC_LENGTH(v) ((v) << 16)
8909 #define BIB_INFO_LENGTH(v) ((v) << 24)
8910@@ -75,7 +72,7 @@ static size_t config_rom_length = 1 + 4 + 1 + 1;
8911 #define BIB_CMC ((1) << 30)
8912 #define BIB_IMC ((1) << 31)
8913
8914-static u32 *generate_config_rom(struct fw_card *card)
8915+static u32 *generate_config_rom(struct fw_card *card, size_t *config_rom_length)
8916 {
8917 struct fw_descriptor *desc;
8918 static u32 config_rom[256];
8919@@ -134,7 +131,7 @@ static u32 *generate_config_rom(struct fw_card *card)
8920 for (i = 0; i < j; i += length + 1)
8921 length = fw_compute_block_crc(config_rom + i);
8922
8923- WARN_ON(j != config_rom_length);
8924+ *config_rom_length = j;
8925
8926 return config_rom;
8927 }
8928@@ -143,24 +140,17 @@ static void update_config_roms(void)
8929 {
8930 struct fw_card *card;
8931 u32 *config_rom;
8932+ size_t length;
8933
8934 list_for_each_entry (card, &card_list, link) {
8935- config_rom = generate_config_rom(card);
8936- card->driver->set_config_rom(card, config_rom,
8937- config_rom_length);
8938+ config_rom = generate_config_rom(card, &length);
8939+ card->driver->set_config_rom(card, config_rom, length);
8940 }
8941 }
8942
8943-static size_t required_space(struct fw_descriptor *desc)
8944-{
8945- /* descriptor + entry into root dir + optional immediate entry */
8946- return desc->length + 1 + (desc->immediate > 0 ? 1 : 0);
8947-}
8948-
8949 int fw_core_add_descriptor(struct fw_descriptor *desc)
8950 {
8951 size_t i;
8952- int ret;
8953
8954 /*
8955 * Check descriptor is valid; the length of all blocks in the
8956@@ -176,21 +166,15 @@ int fw_core_add_descriptor(struct fw_descriptor *desc)
8957
8958 mutex_lock(&card_mutex);
8959
8960- if (config_rom_length + required_space(desc) > 256) {
8961- ret = -EBUSY;
8962- } else {
8963- list_add_tail(&desc->link, &descriptor_list);
8964- config_rom_length += required_space(desc);
8965+ list_add_tail(&desc->link, &descriptor_list);
8966+ descriptor_count++;
8967+ if (desc->immediate > 0)
8968 descriptor_count++;
8969- if (desc->immediate > 0)
8970- descriptor_count++;
8971- update_config_roms();
8972- ret = 0;
8973- }
8974+ update_config_roms();
8975
8976 mutex_unlock(&card_mutex);
8977
8978- return ret;
8979+ return 0;
8980 }
8981 EXPORT_SYMBOL(fw_core_add_descriptor);
8982
8983@@ -199,7 +183,6 @@ void fw_core_remove_descriptor(struct fw_descriptor *desc)
8984 mutex_lock(&card_mutex);
8985
8986 list_del(&desc->link);
8987- config_rom_length -= required_space(desc);
8988 descriptor_count--;
8989 if (desc->immediate > 0)
8990 descriptor_count--;
8991@@ -453,6 +436,7 @@ int fw_card_add(struct fw_card *card,
8992 u32 max_receive, u32 link_speed, u64 guid)
8993 {
8994 u32 *config_rom;
8995+ size_t length;
8996 int ret;
8997
8998 card->max_receive = max_receive;
8999@@ -461,8 +445,8 @@ int fw_card_add(struct fw_card *card,
9000
9001 mutex_lock(&card_mutex);
9002
9003- config_rom = generate_config_rom(card);
9004- ret = card->driver->enable(card, config_rom, config_rom_length);
9005+ config_rom = generate_config_rom(card, &length);
9006+ ret = card->driver->enable(card, config_rom, length);
9007 if (ret == 0)
9008 list_add_tail(&card->link, &card_list);
9009
9010diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c
9011index 720b39b..94260aa 100644
9012--- a/drivers/firewire/ohci.c
9013+++ b/drivers/firewire/ohci.c
9014@@ -2209,13 +2209,6 @@ static int ohci_queue_iso_receive_dualbuffer(struct fw_iso_context *base,
9015 page = payload >> PAGE_SHIFT;
9016 offset = payload & ~PAGE_MASK;
9017 rest = p->payload_length;
9018- /*
9019- * The controllers I've tested have not worked correctly when
9020- * second_req_count is zero. Rather than do something we know won't
9021- * work, return an error
9022- */
9023- if (rest == 0)
9024- return -EINVAL;
9025
9026 /* FIXME: make packet-per-buffer/dual-buffer a context option */
9027 while (rest > 0) {
9028@@ -2269,7 +2262,7 @@ static int ohci_queue_iso_receive_packet_per_buffer(struct fw_iso_context *base,
9029 unsigned long payload)
9030 {
9031 struct iso_context *ctx = container_of(base, struct iso_context, base);
9032- struct descriptor *d, *pd;
9033+ struct descriptor *d = NULL, *pd = NULL;
9034 struct fw_iso_packet *p = packet;
9035 dma_addr_t d_bus, page_bus;
9036 u32 z, header_z, rest;
9037@@ -2307,9 +2300,8 @@ static int ohci_queue_iso_receive_packet_per_buffer(struct fw_iso_context *base,
9038 d->data_address = cpu_to_le32(d_bus + (z * sizeof(*d)));
9039
9040 rest = payload_per_buffer;
9041- pd = d;
9042 for (j = 1; j < z; j++) {
9043- pd++;
9044+ pd = d + j;
9045 pd->control = cpu_to_le16(DESCRIPTOR_STATUS |
9046 DESCRIPTOR_INPUT_MORE);
9047
9048@@ -2412,7 +2404,6 @@ static void ohci_pmac_off(struct pci_dev *dev)
9049
9050 #define PCI_VENDOR_ID_AGERE PCI_VENDOR_ID_ATT
9051 #define PCI_DEVICE_ID_AGERE_FW643 0x5901
9052-#define PCI_DEVICE_ID_TI_TSB43AB23 0x8024
9053
9054 static int __devinit pci_probe(struct pci_dev *dev,
9055 const struct pci_device_id *ent)
9056@@ -2478,8 +2469,7 @@ static int __devinit pci_probe(struct pci_dev *dev,
9057 #if !defined(CONFIG_X86_32)
9058 /* dual-buffer mode is broken with descriptor addresses above 2G */
9059 if (dev->vendor == PCI_VENDOR_ID_TI &&
9060- (dev->device == PCI_DEVICE_ID_TI_TSB43AB22 ||
9061- dev->device == PCI_DEVICE_ID_TI_TSB43AB23))
9062+ dev->device == PCI_DEVICE_ID_TI_TSB43AB22)
9063 ohci->use_dualbuffer = false;
9064 #endif
9065
9066diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c
9067index 3a2ccb0..938100f 100644
9068--- a/drivers/firmware/dmi_scan.c
9069+++ b/drivers/firmware/dmi_scan.c
9070@@ -429,7 +429,7 @@ static bool dmi_matches(const struct dmi_system_id *dmi)
9071 for (i = 0; i < ARRAY_SIZE(dmi->matches); i++) {
9072 int s = dmi->matches[i].slot;
9073 if (s == DMI_NONE)
9074- break;
9075+ continue;
9076 if (dmi_ident[s]
9077 && strstr(dmi_ident[s], dmi->matches[i].substr))
9078 continue;
9079@@ -440,15 +440,6 @@ static bool dmi_matches(const struct dmi_system_id *dmi)
9080 }
9081
9082 /**
9083- * dmi_is_end_of_table - check for end-of-table marker
9084- * @dmi: pointer to the dmi_system_id structure to check
9085- */
9086-static bool dmi_is_end_of_table(const struct dmi_system_id *dmi)
9087-{
9088- return dmi->matches[0].slot == DMI_NONE;
9089-}
9090-
9091-/**
9092 * dmi_check_system - check system DMI data
9093 * @list: array of dmi_system_id structures to match against
9094 * All non-null elements of the list must match
9095@@ -466,7 +457,7 @@ int dmi_check_system(const struct dmi_system_id *list)
9096 int count = 0;
9097 const struct dmi_system_id *d;
9098
9099- for (d = list; !dmi_is_end_of_table(d); d++)
9100+ for (d = list; d->ident; d++)
9101 if (dmi_matches(d)) {
9102 count++;
9103 if (d->callback && d->callback(d))
9104@@ -493,7 +484,7 @@ const struct dmi_system_id *dmi_first_match(const struct dmi_system_id *list)
9105 {
9106 const struct dmi_system_id *d;
9107
9108- for (d = list; !dmi_is_end_of_table(d); d++)
9109+ for (d = list; d->ident; d++)
9110 if (dmi_matches(d))
9111 return d;
9112
9113diff --git a/drivers/gpu/drm/ati_pcigart.c b/drivers/gpu/drm/ati_pcigart.c
9114index a1fce68..628eae3 100644
9115--- a/drivers/gpu/drm/ati_pcigart.c
9116+++ b/drivers/gpu/drm/ati_pcigart.c
9117@@ -39,7 +39,8 @@ static int drm_ati_alloc_pcigart_table(struct drm_device *dev,
9118 struct drm_ati_pcigart_info *gart_info)
9119 {
9120 gart_info->table_handle = drm_pci_alloc(dev, gart_info->table_size,
9121- PAGE_SIZE);
9122+ PAGE_SIZE,
9123+ gart_info->table_mask);
9124 if (gart_info->table_handle == NULL)
9125 return -ENOMEM;
9126
9127@@ -111,13 +112,6 @@ int drm_ati_pcigart_init(struct drm_device *dev, struct drm_ati_pcigart_info *ga
9128 if (gart_info->gart_table_location == DRM_ATI_GART_MAIN) {
9129 DRM_DEBUG("PCI: no table in VRAM: using normal RAM\n");
9130
9131- if (pci_set_dma_mask(dev->pdev, gart_info->table_mask)) {
9132- DRM_ERROR("fail to set dma mask to 0x%Lx\n",
9133- gart_info->table_mask);
9134- ret = 1;
9135- goto done;
9136- }
9137-
9138 ret = drm_ati_alloc_pcigart_table(dev, gart_info);
9139 if (ret) {
9140 DRM_ERROR("cannot allocate PCI GART page!\n");
9141diff --git a/drivers/gpu/drm/drm_bufs.c b/drivers/gpu/drm/drm_bufs.c
9142index 8417cc4..3d09e30 100644
9143--- a/drivers/gpu/drm/drm_bufs.c
9144+++ b/drivers/gpu/drm/drm_bufs.c
9145@@ -326,7 +326,7 @@ static int drm_addmap_core(struct drm_device * dev, resource_size_t offset,
9146 * As we're limiting the address to 2^32-1 (or less),
9147 * casting it down to 32 bits is no problem, but we
9148 * need to point to a 64bit variable first. */
9149- dmah = drm_pci_alloc(dev, map->size, map->size);
9150+ dmah = drm_pci_alloc(dev, map->size, map->size, 0xffffffffUL);
9151 if (!dmah) {
9152 kfree(map);
9153 return -ENOMEM;
9154@@ -885,7 +885,7 @@ int drm_addbufs_pci(struct drm_device * dev, struct drm_buf_desc * request)
9155
9156 while (entry->buf_count < count) {
9157
9158- dmah = drm_pci_alloc(dev, PAGE_SIZE << page_order, 0x1000);
9159+ dmah = drm_pci_alloc(dev, PAGE_SIZE << page_order, 0x1000, 0xfffffffful);
9160
9161 if (!dmah) {
9162 /* Set count correctly so we free the proper amount. */
9163diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
9164index afed886..bbfd110 100644
9165--- a/drivers/gpu/drm/drm_crtc_helper.c
9166+++ b/drivers/gpu/drm/drm_crtc_helper.c
9167@@ -1020,9 +1020,6 @@ bool drm_helper_initial_config(struct drm_device *dev)
9168 {
9169 int count = 0;
9170
9171- /* disable all the possible outputs/crtcs before entering KMS mode */
9172- drm_helper_disable_unused_functions(dev);
9173-
9174 drm_fb_helper_parse_command_line(dev);
9175
9176 count = drm_helper_probe_connector_modes(dev,
9177diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
9178index 8bf3770..e9dbb48 100644
9179--- a/drivers/gpu/drm/drm_gem.c
9180+++ b/drivers/gpu/drm/drm_gem.c
9181@@ -142,6 +142,19 @@ drm_gem_object_alloc(struct drm_device *dev, size_t size)
9182 if (IS_ERR(obj->filp))
9183 goto free;
9184
9185+ /* Basically we want to disable the OOM killer and handle ENOMEM
9186+ * ourselves by sacrificing pages from cached buffers.
9187+ * XXX shmem_file_[gs]et_gfp_mask()
9188+ */
9189+ mapping_set_gfp_mask(obj->filp->f_path.dentry->d_inode->i_mapping,
9190+ GFP_HIGHUSER |
9191+ __GFP_COLD |
9192+ __GFP_FS |
9193+ __GFP_RECLAIMABLE |
9194+ __GFP_NORETRY |
9195+ __GFP_NOWARN |
9196+ __GFP_NOMEMALLOC);
9197+
9198 kref_init(&obj->refcount);
9199 kref_init(&obj->handlecount);
9200 obj->size = size;
9201diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c
9202index 332d743..0a6f0b3 100644
9203--- a/drivers/gpu/drm/drm_irq.c
9204+++ b/drivers/gpu/drm/drm_irq.c
9205@@ -429,21 +429,15 @@ int drm_vblank_get(struct drm_device *dev, int crtc)
9206
9207 spin_lock_irqsave(&dev->vbl_lock, irqflags);
9208 /* Going from 0->1 means we have to enable interrupts again */
9209- if (atomic_add_return(1, &dev->vblank_refcount[crtc]) == 1) {
9210- if (!dev->vblank_enabled[crtc]) {
9211- ret = dev->driver->enable_vblank(dev, crtc);
9212- DRM_DEBUG("enabling vblank on crtc %d, ret: %d\n", crtc, ret);
9213- if (ret)
9214- atomic_dec(&dev->vblank_refcount[crtc]);
9215- else {
9216- dev->vblank_enabled[crtc] = 1;
9217- drm_update_vblank_count(dev, crtc);
9218- }
9219- }
9220- } else {
9221- if (!dev->vblank_enabled[crtc]) {
9222+ if (atomic_add_return(1, &dev->vblank_refcount[crtc]) == 1 &&
9223+ !dev->vblank_enabled[crtc]) {
9224+ ret = dev->driver->enable_vblank(dev, crtc);
9225+ DRM_DEBUG("enabling vblank on crtc %d, ret: %d\n", crtc, ret);
9226+ if (ret)
9227 atomic_dec(&dev->vblank_refcount[crtc]);
9228- ret = -EINVAL;
9229+ else {
9230+ dev->vblank_enabled[crtc] = 1;
9231+ drm_update_vblank_count(dev, crtc);
9232 }
9233 }
9234 spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
9235@@ -470,18 +464,6 @@ void drm_vblank_put(struct drm_device *dev, int crtc)
9236 }
9237 EXPORT_SYMBOL(drm_vblank_put);
9238
9239-void drm_vblank_off(struct drm_device *dev, int crtc)
9240-{
9241- unsigned long irqflags;
9242-
9243- spin_lock_irqsave(&dev->vbl_lock, irqflags);
9244- DRM_WAKEUP(&dev->vbl_queue[crtc]);
9245- dev->vblank_enabled[crtc] = 0;
9246- dev->last_vblank[crtc] = dev->driver->get_vblank_counter(dev, crtc);
9247- spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
9248-}
9249-EXPORT_SYMBOL(drm_vblank_off);
9250-
9251 /**
9252 * drm_vblank_pre_modeset - account for vblanks across mode sets
9253 * @dev: DRM device
9254diff --git a/drivers/gpu/drm/drm_pci.c b/drivers/gpu/drm/drm_pci.c
9255index e68ebf9..577094f 100644
9256--- a/drivers/gpu/drm/drm_pci.c
9257+++ b/drivers/gpu/drm/drm_pci.c
9258@@ -47,7 +47,8 @@
9259 /**
9260 * \brief Allocate a PCI consistent memory block, for DMA.
9261 */
9262-drm_dma_handle_t *drm_pci_alloc(struct drm_device * dev, size_t size, size_t align)
9263+drm_dma_handle_t *drm_pci_alloc(struct drm_device * dev, size_t size, size_t align,
9264+ dma_addr_t maxaddr)
9265 {
9266 drm_dma_handle_t *dmah;
9267 #if 1
9268@@ -62,6 +63,11 @@ drm_dma_handle_t *drm_pci_alloc(struct drm_device * dev, size_t size, size_t ali
9269 if (align > size)
9270 return NULL;
9271
9272+ if (pci_set_dma_mask(dev->pdev, maxaddr) != 0) {
9273+ DRM_ERROR("Setting pci dma mask failed\n");
9274+ return NULL;
9275+ }
9276+
9277 dmah = kmalloc(sizeof(drm_dma_handle_t), GFP_KERNEL);
9278 if (!dmah)
9279 return NULL;
9280diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
9281index 7e859d6..26bf055 100644
9282--- a/drivers/gpu/drm/i915/i915_debugfs.c
9283+++ b/drivers/gpu/drm/i915/i915_debugfs.c
9284@@ -288,7 +288,7 @@ static int i915_batchbuffer_info(struct seq_file *m, void *data)
9285 list_for_each_entry(obj_priv, &dev_priv->mm.active_list, list) {
9286 obj = obj_priv->obj;
9287 if (obj->read_domains & I915_GEM_DOMAIN_COMMAND) {
9288- ret = i915_gem_object_get_pages(obj, 0);
9289+ ret = i915_gem_object_get_pages(obj);
9290 if (ret) {
9291 DRM_ERROR("Failed to get pages: %d\n", ret);
9292 spin_unlock(&dev_priv->mm.active_list_lock);
9293@@ -384,7 +384,37 @@ out:
9294 return 0;
9295 }
9296
9297+static int i915_registers_info(struct seq_file *m, void *data) {
9298+ struct drm_info_node *node = (struct drm_info_node *) m->private;
9299+ struct drm_device *dev = node->minor->dev;
9300+ drm_i915_private_t *dev_priv = dev->dev_private;
9301+ uint32_t reg;
9302+
9303+#define DUMP_RANGE(start, end) \
9304+ for (reg=start; reg < end; reg += 4) \
9305+ seq_printf(m, "%08x\t%08x\n", reg, I915_READ(reg));
9306+
9307+ DUMP_RANGE(0x00000, 0x00fff); /* VGA registers */
9308+ DUMP_RANGE(0x02000, 0x02fff); /* instruction, memory, interrupt control registers */
9309+ DUMP_RANGE(0x03000, 0x031ff); /* FENCE and PPGTT control registers */
9310+ DUMP_RANGE(0x03200, 0x03fff); /* frame buffer compression registers */
9311+ DUMP_RANGE(0x05000, 0x05fff); /* I/O control registers */
9312+ DUMP_RANGE(0x06000, 0x06fff); /* clock control registers */
9313+ DUMP_RANGE(0x07000, 0x07fff); /* 3D internal debug registers */
9314+ DUMP_RANGE(0x07400, 0x088ff); /* GPE debug registers */
9315+ DUMP_RANGE(0x0a000, 0x0afff); /* display palette registers */
9316+ DUMP_RANGE(0x10000, 0x13fff); /* MMIO MCHBAR */
9317+ DUMP_RANGE(0x30000, 0x3ffff); /* overlay registers */
9318+ DUMP_RANGE(0x60000, 0x6ffff); /* display engine pipeline registers */
9319+ DUMP_RANGE(0x70000, 0x72fff); /* display and cursor registers */
9320+ DUMP_RANGE(0x73000, 0x73fff); /* performance counters */
9321+
9322+ return 0;
9323+}
9324+
9325+
9326 static struct drm_info_list i915_debugfs_list[] = {
9327+ {"i915_regs", i915_registers_info, 0},
9328 {"i915_gem_active", i915_gem_object_list_info, 0, (void *) ACTIVE_LIST},
9329 {"i915_gem_flushing", i915_gem_object_list_info, 0, (void *) FLUSHING_LIST},
9330 {"i915_gem_inactive", i915_gem_object_list_info, 0, (void *) INACTIVE_LIST},
9331diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
9332index eaa1893..e5b138b 100644
9333--- a/drivers/gpu/drm/i915/i915_dma.c
9334+++ b/drivers/gpu/drm/i915/i915_dma.c
9335@@ -123,7 +123,7 @@ static int i915_init_phys_hws(struct drm_device *dev)
9336 drm_i915_private_t *dev_priv = dev->dev_private;
9337 /* Program Hardware Status Page */
9338 dev_priv->status_page_dmah =
9339- drm_pci_alloc(dev, PAGE_SIZE, PAGE_SIZE);
9340+ drm_pci_alloc(dev, PAGE_SIZE, PAGE_SIZE, 0xffffffff);
9341
9342 if (!dev_priv->status_page_dmah) {
9343 DRM_ERROR("Can not allocate hardware status page\n");
9344@@ -1111,8 +1111,7 @@ static void i915_setup_compression(struct drm_device *dev, int size)
9345 {
9346 struct drm_i915_private *dev_priv = dev->dev_private;
9347 struct drm_mm_node *compressed_fb, *compressed_llb;
9348- unsigned long cfb_base;
9349- unsigned long ll_base = 0;
9350+ unsigned long cfb_base, ll_base;
9351
9352 /* Leave 1M for line length buffer & misc. */
9353 compressed_fb = drm_mm_search_free(&dev_priv->vram, size, 4096, 0);
9354@@ -1252,8 +1251,6 @@ static int i915_load_modeset_init(struct drm_device *dev,
9355 if (ret)
9356 goto destroy_ringbuffer;
9357
9358- intel_modeset_init(dev);
9359-
9360 ret = drm_irq_install(dev);
9361 if (ret)
9362 goto destroy_ringbuffer;
9363@@ -1268,6 +1265,8 @@ static int i915_load_modeset_init(struct drm_device *dev,
9364
9365 I915_WRITE(INSTPM, (1 << 5) | (1 << 21));
9366
9367+ intel_modeset_init(dev);
9368+
9369 drm_helper_initial_config(dev);
9370
9371 return 0;
9372diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
9373index f5d49a7..a725f65 100644
9374--- a/drivers/gpu/drm/i915/i915_drv.h
9375+++ b/drivers/gpu/drm/i915/i915_drv.h
9376@@ -467,15 +467,6 @@ typedef struct drm_i915_private {
9377 struct list_head flushing_list;
9378
9379 /**
9380- * List of objects currently pending a GPU write flush.
9381- *
9382- * All elements on this list will belong to either the
9383- * active_list or flushing_list, last_rendering_seqno can
9384- * be used to differentiate between the two elements.
9385- */
9386- struct list_head gpu_write_list;
9387-
9388- /**
9389 * LRU list of objects which are not in the ringbuffer and
9390 * are ready to unbind, but are still in the GTT.
9391 *
9392@@ -555,7 +546,6 @@ typedef struct drm_i915_private {
9393 struct timer_list idle_timer;
9394 bool busy;
9395 u16 orig_clock;
9396- struct drm_connector *int_lvds_connector;
9397 } drm_i915_private_t;
9398
9399 /** driver private structure attached to each drm_gem_object */
9400@@ -567,8 +557,6 @@ struct drm_i915_gem_object {
9401
9402 /** This object's place on the active/flushing/inactive lists */
9403 struct list_head list;
9404- /** This object's place on GPU write list */
9405- struct list_head gpu_write_list;
9406
9407 /** This object's place on the fenced object LRU */
9408 struct list_head fence_list;
9409@@ -825,17 +813,15 @@ void i915_gem_cleanup_ringbuffer(struct drm_device *dev);
9410 int i915_gem_do_init(struct drm_device *dev, unsigned long start,
9411 unsigned long end);
9412 int i915_gem_idle(struct drm_device *dev);
9413-int i915_lp_ring_sync(struct drm_device *dev);
9414 int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
9415 int i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj,
9416 int write);
9417-int i915_gem_object_set_to_display_plane(struct drm_gem_object *obj);
9418 int i915_gem_attach_phys_object(struct drm_device *dev,
9419 struct drm_gem_object *obj, int id);
9420 void i915_gem_detach_phys_object(struct drm_device *dev,
9421 struct drm_gem_object *obj);
9422 void i915_gem_free_all_phys_object(struct drm_device *dev);
9423-int i915_gem_object_get_pages(struct drm_gem_object *obj, gfp_t gfpmask);
9424+int i915_gem_object_get_pages(struct drm_gem_object *obj);
9425 void i915_gem_object_put_pages(struct drm_gem_object *obj);
9426 void i915_gem_release(struct drm_device * dev, struct drm_file *file_priv);
9427
9428@@ -971,7 +957,6 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
9429 #define IS_I85X(dev) ((dev)->pci_device == 0x3582)
9430 #define IS_I855(dev) ((dev)->pci_device == 0x3582)
9431 #define IS_I865G(dev) ((dev)->pci_device == 0x2572)
9432-#define IS_I8XX(dev) (IS_I830(dev) || IS_845G(dev) || IS_I85X(dev) || IS_I865G(dev))
9433
9434 #define IS_I915G(dev) ((dev)->pci_device == 0x2582 || (dev)->pci_device == 0x258a)
9435 #define IS_I915GM(dev) ((dev)->pci_device == 0x2592)
9436@@ -1033,12 +1018,9 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
9437 */
9438 #define HAS_128_BYTE_Y_TILING(dev) (IS_I9XX(dev) && !(IS_I915G(dev) || \
9439 IS_I915GM(dev)))
9440-#define SUPPORTS_DIGITAL_OUTPUTS(dev) (IS_I9XX(dev) && !IS_IGD(dev))
9441 #define SUPPORTS_INTEGRATED_HDMI(dev) (IS_G4X(dev) || IS_IGDNG(dev))
9442 #define SUPPORTS_INTEGRATED_DP(dev) (IS_G4X(dev) || IS_IGDNG(dev))
9443 #define SUPPORTS_EDP(dev) (IS_IGDNG_M(dev))
9444-#define SUPPORTS_TV(dev) (IS_I9XX(dev) && IS_MOBILE(dev) && \
9445- !IS_IGDNG(dev) && !IS_IGD(dev))
9446 #define I915_HAS_HOTPLUG(dev) (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev) || IS_I965G(dev))
9447 /* dsparb controlled by hw only */
9448 #define DSPARB_HWCONTROL(dev) (IS_G4X(dev) || IS_IGDNG(dev))
9449diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
9450index 04da731..abfc27b 100644
9451--- a/drivers/gpu/drm/i915/i915_gem.c
9452+++ b/drivers/gpu/drm/i915/i915_gem.c
9453@@ -277,7 +277,7 @@ i915_gem_shmem_pread_fast(struct drm_device *dev, struct drm_gem_object *obj,
9454
9455 mutex_lock(&dev->struct_mutex);
9456
9457- ret = i915_gem_object_get_pages(obj, 0);
9458+ ret = i915_gem_object_get_pages(obj);
9459 if (ret != 0)
9460 goto fail_unlock;
9461
9462@@ -321,24 +321,40 @@ fail_unlock:
9463 return ret;
9464 }
9465
9466+static inline gfp_t
9467+i915_gem_object_get_page_gfp_mask (struct drm_gem_object *obj)
9468+{
9469+ return mapping_gfp_mask(obj->filp->f_path.dentry->d_inode->i_mapping);
9470+}
9471+
9472+static inline void
9473+i915_gem_object_set_page_gfp_mask (struct drm_gem_object *obj, gfp_t gfp)
9474+{
9475+ mapping_set_gfp_mask(obj->filp->f_path.dentry->d_inode->i_mapping, gfp);
9476+}
9477+
9478 static int
9479 i915_gem_object_get_pages_or_evict(struct drm_gem_object *obj)
9480 {
9481 int ret;
9482
9483- ret = i915_gem_object_get_pages(obj, __GFP_NORETRY | __GFP_NOWARN);
9484+ ret = i915_gem_object_get_pages(obj);
9485
9486 /* If we've insufficient memory to map in the pages, attempt
9487 * to make some space by throwing out some old buffers.
9488 */
9489 if (ret == -ENOMEM) {
9490 struct drm_device *dev = obj->dev;
9491+ gfp_t gfp;
9492
9493 ret = i915_gem_evict_something(dev, obj->size);
9494 if (ret)
9495 return ret;
9496
9497- ret = i915_gem_object_get_pages(obj, 0);
9498+ gfp = i915_gem_object_get_page_gfp_mask(obj);
9499+ i915_gem_object_set_page_gfp_mask(obj, gfp & ~__GFP_NORETRY);
9500+ ret = i915_gem_object_get_pages(obj);
9501+ i915_gem_object_set_page_gfp_mask (obj, gfp);
9502 }
9503
9504 return ret;
9505@@ -774,7 +790,7 @@ i915_gem_shmem_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj,
9506
9507 mutex_lock(&dev->struct_mutex);
9508
9509- ret = i915_gem_object_get_pages(obj, 0);
9510+ ret = i915_gem_object_get_pages(obj);
9511 if (ret != 0)
9512 goto fail_unlock;
9513
9514@@ -1272,7 +1288,6 @@ i915_gem_create_mmap_offset(struct drm_gem_object *obj)
9515 list->hash.key = list->file_offset_node->start;
9516 if (drm_ht_insert_item(&mm->offset_hash, &list->hash)) {
9517 DRM_ERROR("failed to add to map hash\n");
9518- ret = -ENOMEM;
9519 goto out_free_mm;
9520 }
9521
9522@@ -1552,8 +1567,6 @@ i915_gem_object_move_to_inactive(struct drm_gem_object *obj)
9523 else
9524 list_move_tail(&obj_priv->list, &dev_priv->mm.inactive_list);
9525
9526- BUG_ON(!list_empty(&obj_priv->gpu_write_list));
9527-
9528 obj_priv->last_rendering_seqno = 0;
9529 if (obj_priv->active) {
9530 obj_priv->active = 0;
9531@@ -1624,8 +1637,7 @@ i915_add_request(struct drm_device *dev, struct drm_file *file_priv,
9532 struct drm_i915_gem_object *obj_priv, *next;
9533
9534 list_for_each_entry_safe(obj_priv, next,
9535- &dev_priv->mm.gpu_write_list,
9536- gpu_write_list) {
9537+ &dev_priv->mm.flushing_list, list) {
9538 struct drm_gem_object *obj = obj_priv->obj;
9539
9540 if ((obj->write_domain & flush_domains) ==
9541@@ -1633,7 +1645,6 @@ i915_add_request(struct drm_device *dev, struct drm_file *file_priv,
9542 uint32_t old_write_domain = obj->write_domain;
9543
9544 obj->write_domain = 0;
9545- list_del_init(&obj_priv->gpu_write_list);
9546 i915_gem_object_move_to_active(obj, seqno);
9547
9548 trace_i915_gem_object_change_domain(obj,
9549@@ -1809,8 +1820,12 @@ i915_gem_retire_work_handler(struct work_struct *work)
9550 mutex_unlock(&dev->struct_mutex);
9551 }
9552
9553+/**
9554+ * Waits for a sequence number to be signaled, and cleans up the
9555+ * request and object lists appropriately for that event.
9556+ */
9557 static int
9558-i915_do_wait_request(struct drm_device *dev, uint32_t seqno, int interruptible)
9559+i915_wait_request(struct drm_device *dev, uint32_t seqno)
9560 {
9561 drm_i915_private_t *dev_priv = dev->dev_private;
9562 u32 ier;
9563@@ -1837,15 +1852,10 @@ i915_do_wait_request(struct drm_device *dev, uint32_t seqno, int interruptible)
9564
9565 dev_priv->mm.waiting_gem_seqno = seqno;
9566 i915_user_irq_get(dev);
9567- if (interruptible)
9568- ret = wait_event_interruptible(dev_priv->irq_queue,
9569- i915_seqno_passed(i915_get_gem_seqno(dev), seqno) ||
9570- atomic_read(&dev_priv->mm.wedged));
9571- else
9572- wait_event(dev_priv->irq_queue,
9573- i915_seqno_passed(i915_get_gem_seqno(dev), seqno) ||
9574- atomic_read(&dev_priv->mm.wedged));
9575-
9576+ ret = wait_event_interruptible(dev_priv->irq_queue,
9577+ i915_seqno_passed(i915_get_gem_seqno(dev),
9578+ seqno) ||
9579+ atomic_read(&dev_priv->mm.wedged));
9580 i915_user_irq_put(dev);
9581 dev_priv->mm.waiting_gem_seqno = 0;
9582
9583@@ -1869,34 +1879,6 @@ i915_do_wait_request(struct drm_device *dev, uint32_t seqno, int interruptible)
9584 return ret;
9585 }
9586
9587-/**
9588- * Waits for a sequence number to be signaled, and cleans up the
9589- * request and object lists appropriately for that event.
9590- */
9591-static int
9592-i915_wait_request(struct drm_device *dev, uint32_t seqno)
9593-{
9594- return i915_do_wait_request(dev, seqno, 1);
9595-}
9596-
9597-/**
9598- * Waits for the ring to finish up to the latest request. Usefull for waiting
9599- * for flip events, e.g for the overlay support. */
9600-int i915_lp_ring_sync(struct drm_device *dev)
9601-{
9602- uint32_t seqno;
9603- int ret;
9604-
9605- seqno = i915_add_request(dev, NULL, 0);
9606-
9607- if (seqno == 0)
9608- return -ENOMEM;
9609-
9610- ret = i915_do_wait_request(dev, seqno, 0);
9611- BUG_ON(ret == -ERESTARTSYS);
9612- return ret;
9613-}
9614-
9615 static void
9616 i915_gem_flush(struct drm_device *dev,
9617 uint32_t invalidate_domains,
9618@@ -1965,7 +1947,7 @@ i915_gem_flush(struct drm_device *dev,
9619 #endif
9620 BEGIN_LP_RING(2);
9621 OUT_RING(cmd);
9622- OUT_RING(MI_NOOP);
9623+ OUT_RING(0); /* noop */
9624 ADVANCE_LP_RING();
9625 }
9626 }
9627@@ -2027,6 +2009,9 @@ i915_gem_object_unbind(struct drm_gem_object *obj)
9628 /* blow away mappings if mapped through GTT */
9629 i915_gem_release_mmap(obj);
9630
9631+ if (obj_priv->fence_reg != I915_FENCE_REG_NONE)
9632+ i915_gem_clear_fence_reg(obj);
9633+
9634 /* Move the object to the CPU domain to ensure that
9635 * any possible CPU writes while it's not in the GTT
9636 * are flushed when we go to remap it. This will
9637@@ -2042,10 +2027,6 @@ i915_gem_object_unbind(struct drm_gem_object *obj)
9638
9639 BUG_ON(obj_priv->active);
9640
9641- /* release the fence reg _after_ flushing */
9642- if (obj_priv->fence_reg != I915_FENCE_REG_NONE)
9643- i915_gem_clear_fence_reg(obj);
9644-
9645 if (obj_priv->agp_mem != NULL) {
9646 drm_unbind_agp(obj_priv->agp_mem);
9647 drm_free_agp(obj_priv->agp_mem, obj->size / PAGE_SIZE);
9648@@ -2106,8 +2087,8 @@ static int
9649 i915_gem_evict_everything(struct drm_device *dev)
9650 {
9651 drm_i915_private_t *dev_priv = dev->dev_private;
9652- int ret;
9653 uint32_t seqno;
9654+ int ret;
9655 bool lists_empty;
9656
9657 spin_lock(&dev_priv->mm.active_list_lock);
9658@@ -2129,8 +2110,6 @@ i915_gem_evict_everything(struct drm_device *dev)
9659 if (ret)
9660 return ret;
9661
9662- BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
9663-
9664 ret = i915_gem_evict_from_inactive_list(dev);
9665 if (ret)
9666 return ret;
9667@@ -2238,8 +2217,7 @@ i915_gem_evict_something(struct drm_device *dev, int min_size)
9668 }
9669
9670 int
9671-i915_gem_object_get_pages(struct drm_gem_object *obj,
9672- gfp_t gfpmask)
9673+i915_gem_object_get_pages(struct drm_gem_object *obj)
9674 {
9675 struct drm_i915_gem_object *obj_priv = obj->driver_private;
9676 int page_count, i;
9677@@ -2265,10 +2243,7 @@ i915_gem_object_get_pages(struct drm_gem_object *obj,
9678 inode = obj->filp->f_path.dentry->d_inode;
9679 mapping = inode->i_mapping;
9680 for (i = 0; i < page_count; i++) {
9681- page = read_cache_page_gfp(mapping, i,
9682- mapping_gfp_mask (mapping) |
9683- __GFP_COLD |
9684- gfpmask);
9685+ page = read_mapping_page(mapping, i, NULL);
9686 if (IS_ERR(page)) {
9687 ret = PTR_ERR(page);
9688 i915_gem_object_put_pages(obj);
9689@@ -2591,9 +2566,12 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
9690 drm_i915_private_t *dev_priv = dev->dev_private;
9691 struct drm_i915_gem_object *obj_priv = obj->driver_private;
9692 struct drm_mm_node *free_space;
9693- gfp_t gfpmask = __GFP_NORETRY | __GFP_NOWARN;
9694+ bool retry_alloc = false;
9695 int ret;
9696
9697+ if (dev_priv->mm.suspended)
9698+ return -EBUSY;
9699+
9700 if (obj_priv->madv != I915_MADV_WILLNEED) {
9701 DRM_ERROR("Attempting to bind a purgeable object\n");
9702 return -EINVAL;
9703@@ -2635,7 +2613,15 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
9704 DRM_INFO("Binding object of size %zd at 0x%08x\n",
9705 obj->size, obj_priv->gtt_offset);
9706 #endif
9707- ret = i915_gem_object_get_pages(obj, gfpmask);
9708+ if (retry_alloc) {
9709+ i915_gem_object_set_page_gfp_mask (obj,
9710+ i915_gem_object_get_page_gfp_mask (obj) & ~__GFP_NORETRY);
9711+ }
9712+ ret = i915_gem_object_get_pages(obj);
9713+ if (retry_alloc) {
9714+ i915_gem_object_set_page_gfp_mask (obj,
9715+ i915_gem_object_get_page_gfp_mask (obj) | __GFP_NORETRY);
9716+ }
9717 if (ret) {
9718 drm_mm_put_block(obj_priv->gtt_space);
9719 obj_priv->gtt_space = NULL;
9720@@ -2645,9 +2631,9 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
9721 ret = i915_gem_evict_something(dev, obj->size);
9722 if (ret) {
9723 /* now try to shrink everyone else */
9724- if (gfpmask) {
9725- gfpmask = 0;
9726- goto search_free;
9727+ if (! retry_alloc) {
9728+ retry_alloc = true;
9729+ goto search_free;
9730 }
9731
9732 return ret;
9733@@ -2725,7 +2711,7 @@ i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj)
9734 old_write_domain = obj->write_domain;
9735 i915_gem_flush(dev, 0, obj->write_domain);
9736 seqno = i915_add_request(dev, NULL, obj->write_domain);
9737- BUG_ON(obj->write_domain);
9738+ obj->write_domain = 0;
9739 i915_gem_object_move_to_active(obj, seqno);
9740
9741 trace_i915_gem_object_change_domain(obj,
9742@@ -2825,57 +2811,6 @@ i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, int write)
9743 return 0;
9744 }
9745
9746-/*
9747- * Prepare buffer for display plane. Use uninterruptible for possible flush
9748- * wait, as in modesetting process we're not supposed to be interrupted.
9749- */
9750-int
9751-i915_gem_object_set_to_display_plane(struct drm_gem_object *obj)
9752-{
9753- struct drm_device *dev = obj->dev;
9754- struct drm_i915_gem_object *obj_priv = obj->driver_private;
9755- uint32_t old_write_domain, old_read_domains;
9756- int ret;
9757-
9758- /* Not valid to be called on unbound objects. */
9759- if (obj_priv->gtt_space == NULL)
9760- return -EINVAL;
9761-
9762- i915_gem_object_flush_gpu_write_domain(obj);
9763-
9764- /* Wait on any GPU rendering and flushing to occur. */
9765- if (obj_priv->active) {
9766-#if WATCH_BUF
9767- DRM_INFO("%s: object %p wait for seqno %08x\n",
9768- __func__, obj, obj_priv->last_rendering_seqno);
9769-#endif
9770- ret = i915_do_wait_request(dev, obj_priv->last_rendering_seqno, 0);
9771- if (ret != 0)
9772- return ret;
9773- }
9774-
9775- old_write_domain = obj->write_domain;
9776- old_read_domains = obj->read_domains;
9777-
9778- obj->read_domains &= I915_GEM_DOMAIN_GTT;
9779-
9780- i915_gem_object_flush_cpu_write_domain(obj);
9781-
9782- /* It should now be out of any other write domains, and we can update
9783- * the domain values for our changes.
9784- */
9785- BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
9786- obj->read_domains |= I915_GEM_DOMAIN_GTT;
9787- obj->write_domain = I915_GEM_DOMAIN_GTT;
9788- obj_priv->dirty = 1;
9789-
9790- trace_i915_gem_object_change_domain(obj,
9791- old_read_domains,
9792- old_write_domain);
9793-
9794- return 0;
9795-}
9796-
9797 /**
9798 * Moves a single object to the CPU read, and possibly write domain.
9799 *
9800@@ -3796,23 +3731,16 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
9801 i915_gem_flush(dev,
9802 dev->invalidate_domains,
9803 dev->flush_domains);
9804- if (dev->flush_domains & I915_GEM_GPU_DOMAINS)
9805+ if (dev->flush_domains)
9806 (void)i915_add_request(dev, file_priv,
9807 dev->flush_domains);
9808 }
9809
9810 for (i = 0; i < args->buffer_count; i++) {
9811 struct drm_gem_object *obj = object_list[i];
9812- struct drm_i915_gem_object *obj_priv = obj->driver_private;
9813 uint32_t old_write_domain = obj->write_domain;
9814
9815 obj->write_domain = obj->pending_write_domain;
9816- if (obj->write_domain)
9817- list_move_tail(&obj_priv->gpu_write_list,
9818- &dev_priv->mm.gpu_write_list);
9819- else
9820- list_del_init(&obj_priv->gpu_write_list);
9821-
9822 trace_i915_gem_object_change_domain(obj,
9823 obj->read_domains,
9824 old_write_domain);
9825@@ -4205,7 +4133,6 @@ int i915_gem_init_object(struct drm_gem_object *obj)
9826 obj_priv->obj = obj;
9827 obj_priv->fence_reg = I915_FENCE_REG_NONE;
9828 INIT_LIST_HEAD(&obj_priv->list);
9829- INIT_LIST_HEAD(&obj_priv->gpu_write_list);
9830 INIT_LIST_HEAD(&obj_priv->fence_list);
9831 obj_priv->madv = I915_MADV_WILLNEED;
9832
9833@@ -4657,7 +4584,6 @@ i915_gem_load(struct drm_device *dev)
9834 spin_lock_init(&dev_priv->mm.active_list_lock);
9835 INIT_LIST_HEAD(&dev_priv->mm.active_list);
9836 INIT_LIST_HEAD(&dev_priv->mm.flushing_list);
9837- INIT_LIST_HEAD(&dev_priv->mm.gpu_write_list);
9838 INIT_LIST_HEAD(&dev_priv->mm.inactive_list);
9839 INIT_LIST_HEAD(&dev_priv->mm.request_list);
9840 INIT_LIST_HEAD(&dev_priv->mm.fence_list);
9841@@ -4712,7 +4638,7 @@ int i915_gem_init_phys_object(struct drm_device *dev,
9842
9843 phys_obj->id = id;
9844
9845- phys_obj->handle = drm_pci_alloc(dev, size, 0);
9846+ phys_obj->handle = drm_pci_alloc(dev, size, 0, 0xffffffff);
9847 if (!phys_obj->handle) {
9848 ret = -ENOMEM;
9849 goto kfree_obj;
9850@@ -4770,7 +4696,7 @@ void i915_gem_detach_phys_object(struct drm_device *dev,
9851 if (!obj_priv->phys_obj)
9852 return;
9853
9854- ret = i915_gem_object_get_pages(obj, 0);
9855+ ret = i915_gem_object_get_pages(obj);
9856 if (ret)
9857 goto out;
9858
9859@@ -4828,7 +4754,7 @@ i915_gem_attach_phys_object(struct drm_device *dev,
9860 obj_priv->phys_obj = dev_priv->mm.phys_objs[id - 1];
9861 obj_priv->phys_obj->cur_obj = obj;
9862
9863- ret = i915_gem_object_get_pages(obj, 0);
9864+ ret = i915_gem_object_get_pages(obj);
9865 if (ret) {
9866 DRM_ERROR("failed to get page list\n");
9867 goto out;
9868diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
9869index 63f28ad..aa7fd82 100644
9870--- a/drivers/gpu/drm/i915/i915_irq.c
9871+++ b/drivers/gpu/drm/i915/i915_irq.c
9872@@ -255,6 +255,7 @@ irqreturn_t igdng_irq_handler(struct drm_device *dev)
9873 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
9874 int ret = IRQ_NONE;
9875 u32 de_iir, gt_iir, de_ier;
9876+ u32 new_de_iir, new_gt_iir;
9877 struct drm_i915_master_private *master_priv;
9878
9879 /* disable master interrupt before clearing iir */
9880@@ -265,31 +266,35 @@ irqreturn_t igdng_irq_handler(struct drm_device *dev)
9881 de_iir = I915_READ(DEIIR);
9882 gt_iir = I915_READ(GTIIR);
9883
9884- if (de_iir == 0 && gt_iir == 0)
9885- goto done;
9886+ for (;;) {
9887+ if (de_iir == 0 && gt_iir == 0)
9888+ break;
9889
9890- ret = IRQ_HANDLED;
9891+ ret = IRQ_HANDLED;
9892
9893- if (dev->primary->master) {
9894- master_priv = dev->primary->master->driver_priv;
9895- if (master_priv->sarea_priv)
9896- master_priv->sarea_priv->last_dispatch =
9897- READ_BREADCRUMB(dev_priv);
9898- }
9899+ I915_WRITE(DEIIR, de_iir);
9900+ new_de_iir = I915_READ(DEIIR);
9901+ I915_WRITE(GTIIR, gt_iir);
9902+ new_gt_iir = I915_READ(GTIIR);
9903
9904- if (gt_iir & GT_USER_INTERRUPT) {
9905- u32 seqno = i915_get_gem_seqno(dev);
9906- dev_priv->mm.irq_gem_seqno = seqno;
9907- trace_i915_gem_request_complete(dev, seqno);
9908- DRM_WAKEUP(&dev_priv->irq_queue);
9909- dev_priv->hangcheck_count = 0;
9910- mod_timer(&dev_priv->hangcheck_timer, jiffies + DRM_I915_HANGCHECK_PERIOD);
9911- }
9912+ if (dev->primary->master) {
9913+ master_priv = dev->primary->master->driver_priv;
9914+ if (master_priv->sarea_priv)
9915+ master_priv->sarea_priv->last_dispatch =
9916+ READ_BREADCRUMB(dev_priv);
9917+ }
9918+
9919+ if (gt_iir & GT_USER_INTERRUPT) {
9920+ u32 seqno = i915_get_gem_seqno(dev);
9921+ dev_priv->mm.irq_gem_seqno = seqno;
9922+ trace_i915_gem_request_complete(dev, seqno);
9923+ DRM_WAKEUP(&dev_priv->irq_queue);
9924+ }
9925
9926- I915_WRITE(GTIIR, gt_iir);
9927- I915_WRITE(DEIIR, de_iir);
9928+ de_iir = new_de_iir;
9929+ gt_iir = new_gt_iir;
9930+ }
9931
9932-done:
9933 I915_WRITE(DEIER, de_ier);
9934 (void)I915_READ(DEIER);
9935
9936@@ -1044,10 +1049,6 @@ void i915_driver_irq_preinstall(struct drm_device * dev)
9937 (void) I915_READ(IER);
9938 }
9939
9940-/*
9941- * Must be called after intel_modeset_init or hotplug interrupts won't be
9942- * enabled correctly.
9943- */
9944 int i915_driver_irq_postinstall(struct drm_device *dev)
9945 {
9946 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
9947@@ -1070,23 +1071,19 @@ int i915_driver_irq_postinstall(struct drm_device *dev)
9948 if (I915_HAS_HOTPLUG(dev)) {
9949 u32 hotplug_en = I915_READ(PORT_HOTPLUG_EN);
9950
9951- /* Note HDMI and DP share bits */
9952- if (dev_priv->hotplug_supported_mask & HDMIB_HOTPLUG_INT_STATUS)
9953- hotplug_en |= HDMIB_HOTPLUG_INT_EN;
9954- if (dev_priv->hotplug_supported_mask & HDMIC_HOTPLUG_INT_STATUS)
9955- hotplug_en |= HDMIC_HOTPLUG_INT_EN;
9956- if (dev_priv->hotplug_supported_mask & HDMID_HOTPLUG_INT_STATUS)
9957- hotplug_en |= HDMID_HOTPLUG_INT_EN;
9958- if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS)
9959- hotplug_en |= SDVOC_HOTPLUG_INT_EN;
9960- if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS)
9961- hotplug_en |= SDVOB_HOTPLUG_INT_EN;
9962- if (dev_priv->hotplug_supported_mask & CRT_HOTPLUG_INT_STATUS)
9963- hotplug_en |= CRT_HOTPLUG_INT_EN;
9964- /* Ignore TV since it's buggy */
9965-
9966+ /* Leave other bits alone */
9967+ hotplug_en |= HOTPLUG_EN_MASK;
9968 I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
9969
9970+ dev_priv->hotplug_supported_mask = CRT_HOTPLUG_INT_STATUS |
9971+ TV_HOTPLUG_INT_STATUS | SDVOC_HOTPLUG_INT_STATUS |
9972+ SDVOB_HOTPLUG_INT_STATUS;
9973+ if (IS_G4X(dev)) {
9974+ dev_priv->hotplug_supported_mask |=
9975+ HDMIB_HOTPLUG_INT_STATUS |
9976+ HDMIC_HOTPLUG_INT_STATUS |
9977+ HDMID_HOTPLUG_INT_STATUS;
9978+ }
9979 /* Enable in IER... */
9980 enable_mask |= I915_DISPLAY_PORT_INTERRUPT;
9981 /* and unmask in IMR */
9982diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
9983index cc9b49a..1687edf 100644
9984--- a/drivers/gpu/drm/i915/i915_reg.h
9985+++ b/drivers/gpu/drm/i915/i915_reg.h
9986@@ -329,7 +329,6 @@
9987 #define FBC_CTL_PERIODIC (1<<30)
9988 #define FBC_CTL_INTERVAL_SHIFT (16)
9989 #define FBC_CTL_UNCOMPRESSIBLE (1<<14)
9990-#define FBC_C3_IDLE (1<<13)
9991 #define FBC_CTL_STRIDE_SHIFT (5)
9992 #define FBC_CTL_FENCENO (1<<0)
9993 #define FBC_COMMAND 0x0320c
9994@@ -406,13 +405,6 @@
9995 # define GPIO_DATA_VAL_IN (1 << 12)
9996 # define GPIO_DATA_PULLUP_DISABLE (1 << 13)
9997
9998-#define GMBUS0 0x5100
9999-#define GMBUS1 0x5104
10000-#define GMBUS2 0x5108
10001-#define GMBUS3 0x510c
10002-#define GMBUS4 0x5110
10003-#define GMBUS5 0x5120
10004-
10005 /*
10006 * Clock control & power management
10007 */
10008@@ -871,6 +863,14 @@
10009 #define CRT_HOTPLUG_DETECT_VOLTAGE_475MV (1 << 2)
10010 #define CRT_HOTPLUG_MASK (0x3fc) /* Bits 9-2 */
10011 #define CRT_FORCE_HOTPLUG_MASK 0xfffffe1f
10012+#define HOTPLUG_EN_MASK (HDMIB_HOTPLUG_INT_EN | \
10013+ HDMIC_HOTPLUG_INT_EN | \
10014+ HDMID_HOTPLUG_INT_EN | \
10015+ SDVOB_HOTPLUG_INT_EN | \
10016+ SDVOC_HOTPLUG_INT_EN | \
10017+ TV_HOTPLUG_INT_EN | \
10018+ CRT_HOTPLUG_INT_EN)
10019+
10020
10021 #define PORT_HOTPLUG_STAT 0x61114
10022 #define HDMIB_HOTPLUG_INT_STATUS (1 << 29)
10023@@ -968,8 +968,6 @@
10024 #define LVDS_PORT_EN (1 << 31)
10025 /* Selects pipe B for LVDS data. Must be set on pre-965. */
10026 #define LVDS_PIPEB_SELECT (1 << 30)
10027-/* LVDS dithering flag on 965/g4x platform */
10028-#define LVDS_ENABLE_DITHER (1 << 25)
10029 /* Enable border for unscaled (or aspect-scaled) display */
10030 #define LVDS_BORDER_ENABLE (1 << 15)
10031 /*
10032@@ -1739,8 +1737,6 @@
10033
10034 /* Display & cursor control */
10035
10036-/* dithering flag on Ironlake */
10037-#define PIPE_ENABLE_DITHER (1 << 4)
10038 /* Pipe A */
10039 #define PIPEADSL 0x70000
10040 #define PIPEACONF 0x70008
10041@@ -2161,13 +2157,6 @@
10042 #define PCH_GPIOE 0xc5020
10043 #define PCH_GPIOF 0xc5024
10044
10045-#define PCH_GMBUS0 0xc5100
10046-#define PCH_GMBUS1 0xc5104
10047-#define PCH_GMBUS2 0xc5108
10048-#define PCH_GMBUS3 0xc510c
10049-#define PCH_GMBUS4 0xc5110
10050-#define PCH_GMBUS5 0xc5120
10051-
10052 #define PCH_DPLL_A 0xc6014
10053 #define PCH_DPLL_B 0xc6018
10054
10055diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c
10056index 7ad742f..6eec817 100644
10057--- a/drivers/gpu/drm/i915/i915_suspend.c
10058+++ b/drivers/gpu/drm/i915/i915_suspend.c
10059@@ -27,7 +27,7 @@
10060 #include "drmP.h"
10061 #include "drm.h"
10062 #include "i915_drm.h"
10063-#include "intel_drv.h"
10064+#include "i915_drv.h"
10065
10066 static bool i915_pipe_enabled(struct drm_device *dev, enum pipe pipe)
10067 {
10068@@ -846,9 +846,6 @@ int i915_restore_state(struct drm_device *dev)
10069 for (i = 0; i < 3; i++)
10070 I915_WRITE(SWF30 + (i << 2), dev_priv->saveSWF2[i]);
10071
10072- /* I2C state */
10073- intel_i2c_reset_gmbus(dev);
10074-
10075 return 0;
10076 }
10077
10078diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
10079index 5e730e6..e505144 100644
10080--- a/drivers/gpu/drm/i915/intel_crt.c
10081+++ b/drivers/gpu/drm/i915/intel_crt.c
10082@@ -185,9 +185,6 @@ static bool intel_igdng_crt_detect_hotplug(struct drm_connector *connector)
10083 adpa = I915_READ(PCH_ADPA);
10084
10085 adpa &= ~ADPA_CRT_HOTPLUG_MASK;
10086- /* disable HPD first */
10087- I915_WRITE(PCH_ADPA, adpa);
10088- (void)I915_READ(PCH_ADPA);
10089
10090 adpa |= (ADPA_CRT_HOTPLUG_PERIOD_128 |
10091 ADPA_CRT_HOTPLUG_WARMUP_10MS |
10092@@ -579,6 +576,4 @@ void intel_crt_init(struct drm_device *dev)
10093 drm_connector_helper_add(connector, &intel_crt_connector_helper_funcs);
10094
10095 drm_sysfs_connector_add(connector);
10096-
10097- dev_priv->hotplug_supported_mask |= CRT_HOTPLUG_INT_STATUS;
10098 }
10099diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
10100index b00a1aa..099f420 100644
10101--- a/drivers/gpu/drm/i915/intel_display.c
10102+++ b/drivers/gpu/drm/i915/intel_display.c
10103@@ -988,8 +988,6 @@ static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
10104
10105 /* enable it... */
10106 fbc_ctl = FBC_CTL_EN | FBC_CTL_PERIODIC;
10107- if (IS_I945GM(dev))
10108- fbc_ctl |= FBC_C3_IDLE; /* 945 needs special SR handling */
10109 fbc_ctl |= (dev_priv->cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT;
10110 fbc_ctl |= (interval & 0x2fff) << FBC_CTL_INTERVAL_SHIFT;
10111 if (obj_priv->tiling_mode != I915_TILING_NONE)
10112@@ -1253,7 +1251,7 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
10113 return ret;
10114 }
10115
10116- ret = i915_gem_object_set_to_display_plane(obj);
10117+ ret = i915_gem_object_set_to_gtt_domain(obj, 1);
10118 if (ret != 0) {
10119 i915_gem_object_unpin(obj);
10120 mutex_unlock(&dev->struct_mutex);
10121@@ -1475,10 +1473,6 @@ static void igdng_crtc_dpms(struct drm_crtc *crtc, int mode)
10122 int trans_vsync_reg = (pipe == 0) ? TRANS_VSYNC_A : TRANS_VSYNC_B;
10123 u32 temp;
10124 int tries = 5, j, n;
10125- u32 pipe_bpc;
10126-
10127- temp = I915_READ(pipeconf_reg);
10128- pipe_bpc = temp & PIPE_BPC_MASK;
10129
10130 /* XXX: When our outputs are all unaware of DPMS modes other than off
10131 * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC.
10132@@ -1488,15 +1482,6 @@ static void igdng_crtc_dpms(struct drm_crtc *crtc, int mode)
10133 case DRM_MODE_DPMS_STANDBY:
10134 case DRM_MODE_DPMS_SUSPEND:
10135 DRM_DEBUG("crtc %d dpms on\n", pipe);
10136-
10137- if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
10138- temp = I915_READ(PCH_LVDS);
10139- if ((temp & LVDS_PORT_EN) == 0) {
10140- I915_WRITE(PCH_LVDS, temp | LVDS_PORT_EN);
10141- POSTING_READ(PCH_LVDS);
10142- }
10143- }
10144-
10145 if (HAS_eDP) {
10146 /* enable eDP PLL */
10147 igdng_enable_pll_edp(crtc);
10148@@ -1510,12 +1495,6 @@ static void igdng_crtc_dpms(struct drm_crtc *crtc, int mode)
10149
10150 /* enable PCH FDI RX PLL, wait warmup plus DMI latency */
10151 temp = I915_READ(fdi_rx_reg);
10152- /*
10153- * make the BPC in FDI Rx be consistent with that in
10154- * pipeconf reg.
10155- */
10156- temp &= ~(0x7 << 16);
10157- temp |= (pipe_bpc << 11);
10158 I915_WRITE(fdi_rx_reg, temp | FDI_RX_PLL_ENABLE |
10159 FDI_SEL_PCDCLK |
10160 FDI_DP_PORT_WIDTH_X4); /* default 4 lanes */
10161@@ -1656,12 +1635,6 @@ static void igdng_crtc_dpms(struct drm_crtc *crtc, int mode)
10162
10163 /* enable PCH transcoder */
10164 temp = I915_READ(transconf_reg);
10165- /*
10166- * make the BPC in transcoder be consistent with
10167- * that in pipeconf reg.
10168- */
10169- temp &= ~PIPE_BPC_MASK;
10170- temp |= pipe_bpc;
10171 I915_WRITE(transconf_reg, temp | TRANS_ENABLE);
10172 I915_READ(transconf_reg);
10173
10174@@ -1693,6 +1666,8 @@ static void igdng_crtc_dpms(struct drm_crtc *crtc, int mode)
10175 case DRM_MODE_DPMS_OFF:
10176 DRM_DEBUG("crtc %d dpms off\n", pipe);
10177
10178+ i915_disable_vga(dev);
10179+
10180 /* Disable display plane */
10181 temp = I915_READ(dspcntr_reg);
10182 if ((temp & DISPLAY_PLANE_ENABLE) != 0) {
10183@@ -1702,8 +1677,6 @@ static void igdng_crtc_dpms(struct drm_crtc *crtc, int mode)
10184 I915_READ(dspbase_reg);
10185 }
10186
10187- i915_disable_vga(dev);
10188-
10189 /* disable cpu pipe, disable after all planes disabled */
10190 temp = I915_READ(pipeconf_reg);
10191 if ((temp & PIPEACONF_ENABLE) != 0) {
10192@@ -1724,15 +1697,9 @@ static void igdng_crtc_dpms(struct drm_crtc *crtc, int mode)
10193 } else
10194 DRM_DEBUG("crtc %d is disabled\n", pipe);
10195
10196- udelay(100);
10197-
10198- /* Disable PF */
10199- temp = I915_READ(pf_ctl_reg);
10200- if ((temp & PF_ENABLE) != 0) {
10201- I915_WRITE(pf_ctl_reg, temp & ~PF_ENABLE);
10202- I915_READ(pf_ctl_reg);
10203+ if (HAS_eDP) {
10204+ igdng_disable_pll_edp(crtc);
10205 }
10206- I915_WRITE(pf_win_size, 0);
10207
10208 /* disable CPU FDI tx and PCH FDI rx */
10209 temp = I915_READ(fdi_tx_reg);
10210@@ -1740,9 +1707,6 @@ static void igdng_crtc_dpms(struct drm_crtc *crtc, int mode)
10211 I915_READ(fdi_tx_reg);
10212
10213 temp = I915_READ(fdi_rx_reg);
10214- /* BPC in FDI rx is consistent with that in pipeconf */
10215- temp &= ~(0x07 << 16);
10216- temp |= (pipe_bpc << 11);
10217 I915_WRITE(fdi_rx_reg, temp & ~FDI_RX_ENABLE);
10218 I915_READ(fdi_rx_reg);
10219
10220@@ -1761,13 +1725,6 @@ static void igdng_crtc_dpms(struct drm_crtc *crtc, int mode)
10221
10222 udelay(100);
10223
10224- if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
10225- temp = I915_READ(PCH_LVDS);
10226- I915_WRITE(PCH_LVDS, temp & ~LVDS_PORT_EN);
10227- I915_READ(PCH_LVDS);
10228- udelay(100);
10229- }
10230-
10231 /* disable PCH transcoder */
10232 temp = I915_READ(transconf_reg);
10233 if ((temp & TRANS_ENABLE) != 0) {
10234@@ -1786,13 +1743,6 @@ static void igdng_crtc_dpms(struct drm_crtc *crtc, int mode)
10235 }
10236 }
10237 }
10238- temp = I915_READ(transconf_reg);
10239- /* BPC in transcoder is consistent with that in pipeconf */
10240- temp &= ~PIPE_BPC_MASK;
10241- temp |= pipe_bpc;
10242- I915_WRITE(transconf_reg, temp);
10243- I915_READ(transconf_reg);
10244- udelay(100);
10245
10246 /* disable PCH DPLL */
10247 temp = I915_READ(pch_dpll_reg);
10248@@ -1801,19 +1751,13 @@ static void igdng_crtc_dpms(struct drm_crtc *crtc, int mode)
10249 I915_READ(pch_dpll_reg);
10250 }
10251
10252- if (HAS_eDP) {
10253- igdng_disable_pll_edp(crtc);
10254- }
10255-
10256 temp = I915_READ(fdi_rx_reg);
10257- temp &= ~FDI_SEL_PCDCLK;
10258- I915_WRITE(fdi_rx_reg, temp);
10259- I915_READ(fdi_rx_reg);
10260-
10261- temp = I915_READ(fdi_rx_reg);
10262- temp &= ~FDI_RX_PLL_ENABLE;
10263- I915_WRITE(fdi_rx_reg, temp);
10264- I915_READ(fdi_rx_reg);
10265+ if ((temp & FDI_RX_PLL_ENABLE) != 0) {
10266+ temp &= ~FDI_SEL_PCDCLK;
10267+ temp &= ~FDI_RX_PLL_ENABLE;
10268+ I915_WRITE(fdi_rx_reg, temp);
10269+ I915_READ(fdi_rx_reg);
10270+ }
10271
10272 /* Disable CPU FDI TX PLL */
10273 temp = I915_READ(fdi_tx_reg);
10274@@ -1823,8 +1767,16 @@ static void igdng_crtc_dpms(struct drm_crtc *crtc, int mode)
10275 udelay(100);
10276 }
10277
10278+ /* Disable PF */
10279+ temp = I915_READ(pf_ctl_reg);
10280+ if ((temp & PF_ENABLE) != 0) {
10281+ I915_WRITE(pf_ctl_reg, temp & ~PF_ENABLE);
10282+ I915_READ(pf_ctl_reg);
10283+ }
10284+ I915_WRITE(pf_win_size, 0);
10285+
10286 /* Wait for the clocks to turn off. */
10287- udelay(100);
10288+ udelay(150);
10289 break;
10290 }
10291 }
10292@@ -1893,7 +1845,6 @@ static void i9xx_crtc_dpms(struct drm_crtc *crtc, int mode)
10293 intel_update_watermarks(dev);
10294 /* Give the overlay scaler a chance to disable if it's on this pipe */
10295 //intel_crtc_dpms_video(crtc, FALSE); TODO
10296- drm_vblank_off(dev, pipe);
10297
10298 if (dev_priv->cfb_plane == plane &&
10299 dev_priv->display.disable_fbc)
10300@@ -2540,10 +2491,6 @@ static void g4x_update_wm(struct drm_device *dev, int planea_clock,
10301 sr_entries = roundup(sr_entries / cacheline_size, 1);
10302 DRM_DEBUG("self-refresh entries: %d\n", sr_entries);
10303 I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN);
10304- } else {
10305- /* Turn off self refresh if both pipes are enabled */
10306- I915_WRITE(FW_BLC_SELF, I915_READ(FW_BLC_SELF)
10307- & ~FW_BLC_SELF_EN);
10308 }
10309
10310 DRM_DEBUG("Setting FIFO watermarks - A: %d, B: %d, SR %d\n",
10311@@ -2562,43 +2509,15 @@ static void g4x_update_wm(struct drm_device *dev, int planea_clock,
10312 (cursor_sr << DSPFW_CURSOR_SR_SHIFT));
10313 }
10314
10315-static void i965_update_wm(struct drm_device *dev, int planea_clock,
10316- int planeb_clock, int sr_hdisplay, int pixel_size)
10317+static void i965_update_wm(struct drm_device *dev, int unused, int unused2,
10318+ int unused3, int unused4)
10319 {
10320 struct drm_i915_private *dev_priv = dev->dev_private;
10321- unsigned long line_time_us;
10322- int sr_clock, sr_entries, srwm = 1;
10323-
10324- /* Calc sr entries for one plane configs */
10325- if (sr_hdisplay && (!planea_clock || !planeb_clock)) {
10326- /* self-refresh has much higher latency */
10327- const static int sr_latency_ns = 12000;
10328-
10329- sr_clock = planea_clock ? planea_clock : planeb_clock;
10330- line_time_us = ((sr_hdisplay * 1000) / sr_clock);
10331-
10332- /* Use ns/us then divide to preserve precision */
10333- sr_entries = (((sr_latency_ns / line_time_us) + 1) *
10334- pixel_size * sr_hdisplay) / 1000;
10335- sr_entries = roundup(sr_entries / I915_FIFO_LINE_SIZE, 1);
10336- DRM_DEBUG("self-refresh entries: %d\n", sr_entries);
10337- srwm = I945_FIFO_SIZE - sr_entries;
10338- if (srwm < 0)
10339- srwm = 1;
10340- srwm &= 0x3f;
10341- I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN);
10342- } else {
10343- /* Turn off self refresh if both pipes are enabled */
10344- I915_WRITE(FW_BLC_SELF, I915_READ(FW_BLC_SELF)
10345- & ~FW_BLC_SELF_EN);
10346- }
10347
10348- DRM_DEBUG_KMS("Setting FIFO watermarks - A: 8, B: 8, C: 8, SR %d\n",
10349- srwm);
10350+ DRM_DEBUG("Setting FIFO watermarks - A: 8, B: 8, C: 8, SR 8\n");
10351
10352 /* 965 has limitations... */
10353- I915_WRITE(DSPFW1, (srwm << DSPFW_SR_SHIFT) | (8 << 16) | (8 << 8) |
10354- (8 << 0));
10355+ I915_WRITE(DSPFW1, (8 << 16) | (8 << 8) | (8 << 0));
10356 I915_WRITE(DSPFW2, (8 << 8) | (8 << 0));
10357 }
10358
10359@@ -2659,10 +2578,6 @@ static void i9xx_update_wm(struct drm_device *dev, int planea_clock,
10360 if (srwm < 0)
10361 srwm = 1;
10362 I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN | (srwm & 0x3f));
10363- } else {
10364- /* Turn off self refresh if both pipes are enabled */
10365- I915_WRITE(FW_BLC_SELF, I915_READ(FW_BLC_SELF)
10366- & ~FW_BLC_SELF_EN);
10367 }
10368
10369 DRM_DEBUG("Setting FIFO watermarks - A: %d, B: %d, C: %d, SR %d\n",
10370@@ -2939,18 +2854,6 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
10371
10372 /* determine panel color depth */
10373 temp = I915_READ(pipeconf_reg);
10374- temp &= ~PIPE_BPC_MASK;
10375- if (is_lvds) {
10376- int lvds_reg = I915_READ(PCH_LVDS);
10377- /* the BPC will be 6 if it is 18-bit LVDS panel */
10378- if ((lvds_reg & LVDS_A3_POWER_MASK) == LVDS_A3_POWER_UP)
10379- temp |= PIPE_8BPC;
10380- else
10381- temp |= PIPE_6BPC;
10382- } else
10383- temp |= PIPE_8BPC;
10384- I915_WRITE(pipeconf_reg, temp);
10385- I915_READ(pipeconf_reg);
10386
10387 switch (temp & PIPE_BPC_MASK) {
10388 case PIPE_8BPC:
10389@@ -3178,20 +3081,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
10390 * appropriately here, but we need to look more thoroughly into how
10391 * panels behave in the two modes.
10392 */
10393- /* set the dithering flag */
10394- if (IS_I965G(dev)) {
10395- if (dev_priv->lvds_dither) {
10396- if (IS_IGDNG(dev))
10397- pipeconf |= PIPE_ENABLE_DITHER;
10398- else
10399- lvds |= LVDS_ENABLE_DITHER;
10400- } else {
10401- if (IS_IGDNG(dev))
10402- pipeconf &= ~PIPE_ENABLE_DITHER;
10403- else
10404- lvds &= ~LVDS_ENABLE_DITHER;
10405- }
10406- }
10407+
10408 I915_WRITE(lvds_reg, lvds);
10409 I915_READ(lvds_reg);
10410 }
10411@@ -3775,6 +3665,125 @@ static void intel_gpu_idle_timer(unsigned long arg)
10412 queue_work(dev_priv->wq, &dev_priv->idle_work);
10413 }
10414
10415+void intel_increase_renderclock(struct drm_device *dev, bool schedule)
10416+{
10417+ drm_i915_private_t *dev_priv = dev->dev_private;
10418+
10419+ if (IS_IGDNG(dev))
10420+ return;
10421+
10422+ if (!dev_priv->render_reclock_avail) {
10423+ DRM_DEBUG("not reclocking render clock\n");
10424+ return;
10425+ }
10426+
10427+ /* Restore render clock frequency to original value */
10428+ if (IS_G4X(dev) || IS_I9XX(dev))
10429+ pci_write_config_word(dev->pdev, GCFGC, dev_priv->orig_clock);
10430+ else if (IS_I85X(dev))
10431+ pci_write_config_word(dev->pdev, HPLLCC, dev_priv->orig_clock);
10432+ DRM_DEBUG("increasing render clock frequency\n");
10433+
10434+ /* Schedule downclock */
10435+ if (schedule)
10436+ mod_timer(&dev_priv->idle_timer, jiffies +
10437+ msecs_to_jiffies(GPU_IDLE_TIMEOUT));
10438+}
10439+
10440+void intel_decrease_renderclock(struct drm_device *dev)
10441+{
10442+ drm_i915_private_t *dev_priv = dev->dev_private;
10443+
10444+ if (IS_IGDNG(dev))
10445+ return;
10446+
10447+ if (!dev_priv->render_reclock_avail) {
10448+ DRM_DEBUG("not reclocking render clock\n");
10449+ return;
10450+ }
10451+
10452+ if (IS_G4X(dev)) {
10453+ u16 gcfgc;
10454+
10455+ /* Adjust render clock... */
10456+ pci_read_config_word(dev->pdev, GCFGC, &gcfgc);
10457+
10458+ /* Down to minimum... */
10459+ gcfgc &= ~GM45_GC_RENDER_CLOCK_MASK;
10460+ gcfgc |= GM45_GC_RENDER_CLOCK_266_MHZ;
10461+
10462+ pci_write_config_word(dev->pdev, GCFGC, gcfgc);
10463+ } else if (IS_I965G(dev)) {
10464+ u16 gcfgc;
10465+
10466+ /* Adjust render clock... */
10467+ pci_read_config_word(dev->pdev, GCFGC, &gcfgc);
10468+
10469+ /* Down to minimum... */
10470+ gcfgc &= ~I965_GC_RENDER_CLOCK_MASK;
10471+ gcfgc |= I965_GC_RENDER_CLOCK_267_MHZ;
10472+
10473+ pci_write_config_word(dev->pdev, GCFGC, gcfgc);
10474+ } else if (IS_I945G(dev) || IS_I945GM(dev)) {
10475+ u16 gcfgc;
10476+
10477+ /* Adjust render clock... */
10478+ pci_read_config_word(dev->pdev, GCFGC, &gcfgc);
10479+
10480+ /* Down to minimum... */
10481+ gcfgc &= ~I945_GC_RENDER_CLOCK_MASK;
10482+ gcfgc |= I945_GC_RENDER_CLOCK_166_MHZ;
10483+
10484+ pci_write_config_word(dev->pdev, GCFGC, gcfgc);
10485+ } else if (IS_I915G(dev)) {
10486+ u16 gcfgc;
10487+
10488+ /* Adjust render clock... */
10489+ pci_read_config_word(dev->pdev, GCFGC, &gcfgc);
10490+
10491+ /* Down to minimum... */
10492+ gcfgc &= ~I915_GC_RENDER_CLOCK_MASK;
10493+ gcfgc |= I915_GC_RENDER_CLOCK_166_MHZ;
10494+
10495+ pci_write_config_word(dev->pdev, GCFGC, gcfgc);
10496+ } else if (IS_I85X(dev)) {
10497+ u16 hpllcc;
10498+
10499+ /* Adjust render clock... */
10500+ pci_read_config_word(dev->pdev, HPLLCC, &hpllcc);
10501+
10502+ /* Up to maximum... */
10503+ hpllcc &= ~GC_CLOCK_CONTROL_MASK;
10504+ hpllcc |= GC_CLOCK_133_200;
10505+
10506+ pci_write_config_word(dev->pdev, HPLLCC, hpllcc);
10507+ }
10508+ DRM_DEBUG("decreasing render clock frequency\n");
10509+}
10510+
10511+/* Note that no increase function is needed for this - increase_renderclock()
10512+ * will also rewrite these bits
10513+ */
10514+void intel_decrease_displayclock(struct drm_device *dev)
10515+{
10516+ if (IS_IGDNG(dev))
10517+ return;
10518+
10519+ if (IS_I945G(dev) || IS_I945GM(dev) || IS_I915G(dev) ||
10520+ IS_I915GM(dev)) {
10521+ u16 gcfgc;
10522+
10523+ /* Adjust render clock... */
10524+ pci_read_config_word(dev->pdev, GCFGC, &gcfgc);
10525+
10526+ /* Down to minimum... */
10527+ gcfgc &= ~0xf0;
10528+ gcfgc |= 0x80;
10529+
10530+ pci_write_config_word(dev->pdev, GCFGC, gcfgc);
10531+ }
10532+}
10533+
10534 #define CRTC_IDLE_TIMEOUT 1000 /* ms */
10535
10536 static void intel_crtc_idle_timer(unsigned long arg)
10537@@ -3888,6 +3897,12 @@ static void intel_idle_update(struct work_struct *work)
10538
10539 mutex_lock(&dev->struct_mutex);
10540
10541+ /* GPU isn't processing, downclock it. */
10542+ if (!dev_priv->busy) {
10543+ intel_decrease_renderclock(dev);
10544+ intel_decrease_displayclock(dev);
10545+ }
10546+
10547 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
10548 /* Skip inactive CRTCs */
10549 if (!crtc->fb)
10550@@ -3922,6 +3937,7 @@ void intel_mark_busy(struct drm_device *dev, struct drm_gem_object *obj)
10551 return;
10552
10553 dev_priv->busy = true;
10554+ intel_increase_renderclock(dev, true);
10555
10556 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
10557 if (!crtc->fb)
10558@@ -4102,51 +4118,37 @@ static void intel_setup_outputs(struct drm_device *dev)
10559 if (I915_READ(PCH_DP_D) & DP_DETECTED)
10560 intel_dp_init(dev, PCH_DP_D);
10561
10562- } else if (SUPPORTS_DIGITAL_OUTPUTS(dev)) {
10563+ } else if (IS_I9XX(dev)) {
10564 bool found = false;
10565
10566 if (I915_READ(SDVOB) & SDVO_DETECTED) {
10567- DRM_DEBUG_KMS("probing SDVOB\n");
10568 found = intel_sdvo_init(dev, SDVOB);
10569- if (!found && SUPPORTS_INTEGRATED_HDMI(dev)) {
10570- DRM_DEBUG_KMS("probing HDMI on SDVOB\n");
10571+ if (!found && SUPPORTS_INTEGRATED_HDMI(dev))
10572 intel_hdmi_init(dev, SDVOB);
10573- }
10574
10575- if (!found && SUPPORTS_INTEGRATED_DP(dev)) {
10576- DRM_DEBUG_KMS("probing DP_B\n");
10577+ if (!found && SUPPORTS_INTEGRATED_DP(dev))
10578 intel_dp_init(dev, DP_B);
10579- }
10580 }
10581
10582 /* Before G4X SDVOC doesn't have its own detect register */
10583
10584- if (I915_READ(SDVOB) & SDVO_DETECTED) {
10585- DRM_DEBUG_KMS("probing SDVOC\n");
10586+ if (I915_READ(SDVOB) & SDVO_DETECTED)
10587 found = intel_sdvo_init(dev, SDVOC);
10588- }
10589
10590 if (!found && (I915_READ(SDVOC) & SDVO_DETECTED)) {
10591
10592- if (SUPPORTS_INTEGRATED_HDMI(dev)) {
10593- DRM_DEBUG_KMS("probing HDMI on SDVOC\n");
10594+ if (SUPPORTS_INTEGRATED_HDMI(dev))
10595 intel_hdmi_init(dev, SDVOC);
10596- }
10597- if (SUPPORTS_INTEGRATED_DP(dev)) {
10598- DRM_DEBUG_KMS("probing DP_C\n");
10599+ if (SUPPORTS_INTEGRATED_DP(dev))
10600 intel_dp_init(dev, DP_C);
10601- }
10602 }
10603
10604- if (SUPPORTS_INTEGRATED_DP(dev) &&
10605- (I915_READ(DP_D) & DP_DETECTED)) {
10606- DRM_DEBUG_KMS("probing DP_D\n");
10607+ if (SUPPORTS_INTEGRATED_DP(dev) && (I915_READ(DP_D) & DP_DETECTED))
10608 intel_dp_init(dev, DP_D);
10609- }
10610- } else if (IS_I8XX(dev))
10611+ } else
10612 intel_dvo_init(dev);
10613
10614- if (SUPPORTS_TV(dev))
10615+ if (IS_I9XX(dev) && IS_MOBILE(dev) && !IS_IGDNG(dev))
10616 intel_tv_init(dev);
10617
10618 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
10619@@ -4440,6 +4442,7 @@ void intel_modeset_cleanup(struct drm_device *dev)
10620 del_timer_sync(&intel_crtc->idle_timer);
10621 }
10622
10623+ intel_increase_renderclock(dev, false);
10624 del_timer_sync(&dev_priv->idle_timer);
10625
10626 mutex_unlock(&dev->struct_mutex);
10627diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
10628index d487771..d834475 100644
10629--- a/drivers/gpu/drm/i915/intel_dp.c
10630+++ b/drivers/gpu/drm/i915/intel_dp.c
10631@@ -1254,11 +1254,11 @@ intel_dp_init(struct drm_device *dev, int output_reg)
10632 else
10633 intel_output->type = INTEL_OUTPUT_DISPLAYPORT;
10634
10635- if (output_reg == DP_B || output_reg == PCH_DP_B)
10636+ if (output_reg == DP_B)
10637 intel_output->clone_mask = (1 << INTEL_DP_B_CLONE_BIT);
10638- else if (output_reg == DP_C || output_reg == PCH_DP_C)
10639+ else if (output_reg == DP_C)
10640 intel_output->clone_mask = (1 << INTEL_DP_C_CLONE_BIT);
10641- else if (output_reg == DP_D || output_reg == PCH_DP_D)
10642+ else if (output_reg == DP_D)
10643 intel_output->clone_mask = (1 << INTEL_DP_D_CLONE_BIT);
10644
10645 if (IS_eDP(intel_output)) {
10646@@ -1290,20 +1290,14 @@ intel_dp_init(struct drm_device *dev, int output_reg)
10647 break;
10648 case DP_B:
10649 case PCH_DP_B:
10650- dev_priv->hotplug_supported_mask |=
10651- HDMIB_HOTPLUG_INT_STATUS;
10652 name = "DPDDC-B";
10653 break;
10654 case DP_C:
10655 case PCH_DP_C:
10656- dev_priv->hotplug_supported_mask |=
10657- HDMIC_HOTPLUG_INT_STATUS;
10658 name = "DPDDC-C";
10659 break;
10660 case DP_D:
10661 case PCH_DP_D:
10662- dev_priv->hotplug_supported_mask |=
10663- HDMID_HOTPLUG_INT_STATUS;
10664 name = "DPDDC-D";
10665 break;
10666 }
10667diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
10668index 6c7c19f..ef61fe9 100644
10669--- a/drivers/gpu/drm/i915/intel_drv.h
10670+++ b/drivers/gpu/drm/i915/intel_drv.h
10671@@ -134,8 +134,6 @@ void intel_i2c_destroy(struct i2c_adapter *adapter);
10672 int intel_ddc_get_modes(struct intel_output *intel_output);
10673 extern bool intel_ddc_probe(struct intel_output *intel_output);
10674 void intel_i2c_quirk_set(struct drm_device *dev, bool enable);
10675-void intel_i2c_reset_gmbus(struct drm_device *dev);
10676-
10677 extern void intel_crt_init(struct drm_device *dev);
10678 extern void intel_hdmi_init(struct drm_device *dev, int sdvox_reg);
10679 extern bool intel_sdvo_init(struct drm_device *dev, int output_device);
10680diff --git a/drivers/gpu/drm/i915/intel_fb.c b/drivers/gpu/drm/i915/intel_fb.c
10681index 1318ac2..2b0fe54 100644
10682--- a/drivers/gpu/drm/i915/intel_fb.c
10683+++ b/drivers/gpu/drm/i915/intel_fb.c
10684@@ -148,7 +148,7 @@ static int intelfb_create(struct drm_device *dev, uint32_t fb_width,
10685
10686 mutex_lock(&dev->struct_mutex);
10687
10688- ret = i915_gem_object_pin(fbo, 64*1024);
10689+ ret = i915_gem_object_pin(fbo, PAGE_SIZE);
10690 if (ret) {
10691 DRM_ERROR("failed to pin fb: %d\n", ret);
10692 goto out_unref;
10693diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
10694index 85760bf..c33451a 100644
10695--- a/drivers/gpu/drm/i915/intel_hdmi.c
10696+++ b/drivers/gpu/drm/i915/intel_hdmi.c
10697@@ -254,26 +254,21 @@ void intel_hdmi_init(struct drm_device *dev, int sdvox_reg)
10698 if (sdvox_reg == SDVOB) {
10699 intel_output->clone_mask = (1 << INTEL_HDMIB_CLONE_BIT);
10700 intel_output->ddc_bus = intel_i2c_create(dev, GPIOE, "HDMIB");
10701- dev_priv->hotplug_supported_mask |= HDMIB_HOTPLUG_INT_STATUS;
10702 } else if (sdvox_reg == SDVOC) {
10703 intel_output->clone_mask = (1 << INTEL_HDMIC_CLONE_BIT);
10704 intel_output->ddc_bus = intel_i2c_create(dev, GPIOD, "HDMIC");
10705- dev_priv->hotplug_supported_mask |= HDMIC_HOTPLUG_INT_STATUS;
10706 } else if (sdvox_reg == HDMIB) {
10707 intel_output->clone_mask = (1 << INTEL_HDMID_CLONE_BIT);
10708 intel_output->ddc_bus = intel_i2c_create(dev, PCH_GPIOE,
10709 "HDMIB");
10710- dev_priv->hotplug_supported_mask |= HDMIB_HOTPLUG_INT_STATUS;
10711 } else if (sdvox_reg == HDMIC) {
10712 intel_output->clone_mask = (1 << INTEL_HDMIE_CLONE_BIT);
10713 intel_output->ddc_bus = intel_i2c_create(dev, PCH_GPIOD,
10714 "HDMIC");
10715- dev_priv->hotplug_supported_mask |= HDMIC_HOTPLUG_INT_STATUS;
10716 } else if (sdvox_reg == HDMID) {
10717 intel_output->clone_mask = (1 << INTEL_HDMIF_CLONE_BIT);
10718 intel_output->ddc_bus = intel_i2c_create(dev, PCH_GPIOF,
10719 "HDMID");
10720- dev_priv->hotplug_supported_mask |= HDMID_HOTPLUG_INT_STATUS;
10721 }
10722 if (!intel_output->ddc_bus)
10723 goto err_connector;
10724diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c
10725index b94acc4..c7eab72 100644
10726--- a/drivers/gpu/drm/i915/intel_i2c.c
10727+++ b/drivers/gpu/drm/i915/intel_i2c.c
10728@@ -118,23 +118,6 @@ static void set_data(void *data, int state_high)
10729 udelay(I2C_RISEFALL_TIME); /* wait for the line to change state */
10730 }
10731
10732-/* Clears the GMBUS setup. Our driver doesn't make use of the GMBUS I2C
10733- * engine, but if the BIOS leaves it enabled, then that can break our use
10734- * of the bit-banging I2C interfaces. This is notably the case with the
10735- * Mac Mini in EFI mode.
10736- */
10737-void
10738-intel_i2c_reset_gmbus(struct drm_device *dev)
10739-{
10740- struct drm_i915_private *dev_priv = dev->dev_private;
10741-
10742- if (IS_IGDNG(dev)) {
10743- I915_WRITE(PCH_GMBUS0, 0);
10744- } else {
10745- I915_WRITE(GMBUS0, 0);
10746- }
10747-}
10748-
10749 /**
10750 * intel_i2c_create - instantiate an Intel i2c bus using the specified GPIO reg
10751 * @dev: DRM device
10752@@ -185,8 +168,6 @@ struct i2c_adapter *intel_i2c_create(struct drm_device *dev, const u32 reg,
10753 if(i2c_bit_add_bus(&chan->adapter))
10754 goto out_free;
10755
10756- intel_i2c_reset_gmbus(dev);
10757-
10758 /* JJJ: raise SCL and SDA? */
10759 intel_i2c_quirk_set(dev, true);
10760 set_data(chan, 1);
10761diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
10762index 952bb4e..05598ae 100644
10763--- a/drivers/gpu/drm/i915/intel_lvds.c
10764+++ b/drivers/gpu/drm/i915/intel_lvds.c
10765@@ -602,33 +602,12 @@ static void intel_lvds_mode_set(struct drm_encoder *encoder,
10766 /* Some lid devices report incorrect lid status, assume they're connected */
10767 static const struct dmi_system_id bad_lid_status[] = {
10768 {
10769- .ident = "Compaq nx9020",
10770- .matches = {
10771- DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
10772- DMI_MATCH(DMI_BOARD_NAME, "3084"),
10773- },
10774- },
10775- {
10776- .ident = "Samsung SX20S",
10777- .matches = {
10778- DMI_MATCH(DMI_SYS_VENDOR, "Samsung Electronics"),
10779- DMI_MATCH(DMI_BOARD_NAME, "SX20S"),
10780- },
10781- },
10782- {
10783 .ident = "Aspire One",
10784 .matches = {
10785 DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
10786 DMI_MATCH(DMI_PRODUCT_NAME, "Aspire one"),
10787 },
10788 },
10789- {
10790- .ident = "PC-81005",
10791- .matches = {
10792- DMI_MATCH(DMI_SYS_VENDOR, "MALATA"),
10793- DMI_MATCH(DMI_PRODUCT_NAME, "PC-81005"),
10794- },
10795- },
10796 { }
10797 };
10798
10799@@ -700,14 +679,7 @@ static int intel_lid_notify(struct notifier_block *nb, unsigned long val,
10800 struct drm_i915_private *dev_priv =
10801 container_of(nb, struct drm_i915_private, lid_notifier);
10802 struct drm_device *dev = dev_priv->dev;
10803- struct drm_connector *connector = dev_priv->int_lvds_connector;
10804
10805- /*
10806- * check and update the status of LVDS connector after receiving
10807- * the LID nofication event.
10808- */
10809- if (connector)
10810- connector->status = connector->funcs->detect(connector);
10811 if (!acpi_lid_open()) {
10812 dev_priv->modeset_on_lid = 1;
10813 return NOTIFY_OK;
10814@@ -1113,8 +1085,6 @@ out:
10815 DRM_DEBUG("lid notifier registration failed\n");
10816 dev_priv->lid_notifier.notifier_call = NULL;
10817 }
10818- /* keep the LVDS connector */
10819- dev_priv->int_lvds_connector = connector;
10820 drm_sysfs_connector_add(connector);
10821 return;
10822
10823diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
10824index 3f5aaf1..083bec2 100644
10825--- a/drivers/gpu/drm/i915/intel_sdvo.c
10826+++ b/drivers/gpu/drm/i915/intel_sdvo.c
10827@@ -472,63 +472,14 @@ static int intel_sdvo_get_pixel_multiplier(struct drm_display_mode *mode)
10828 }
10829
10830 /**
10831- * Try to read the response after issuie the DDC switch command. But it
10832- * is noted that we must do the action of reading response and issuing DDC
10833- * switch command in one I2C transaction. Otherwise when we try to start
10834- * another I2C transaction after issuing the DDC bus switch, it will be
10835- * switched to the internal SDVO register.
10836+ * Don't check status code from this as it switches the bus back to the
10837+ * SDVO chips which defeats the purpose of doing a bus switch in the first
10838+ * place.
10839 */
10840 static void intel_sdvo_set_control_bus_switch(struct intel_output *intel_output,
10841 u8 target)
10842 {
10843- struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv;
10844- u8 out_buf[2], cmd_buf[2], ret_value[2], ret;
10845- struct i2c_msg msgs[] = {
10846- {
10847- .addr = sdvo_priv->slave_addr >> 1,
10848- .flags = 0,
10849- .len = 2,
10850- .buf = out_buf,
10851- },
10852- /* the following two are to read the response */
10853- {
10854- .addr = sdvo_priv->slave_addr >> 1,
10855- .flags = 0,
10856- .len = 1,
10857- .buf = cmd_buf,
10858- },
10859- {
10860- .addr = sdvo_priv->slave_addr >> 1,
10861- .flags = I2C_M_RD,
10862- .len = 1,
10863- .buf = ret_value,
10864- },
10865- };
10866-
10867- intel_sdvo_debug_write(intel_output, SDVO_CMD_SET_CONTROL_BUS_SWITCH,
10868- &target, 1);
10869- /* write the DDC switch command argument */
10870- intel_sdvo_write_byte(intel_output, SDVO_I2C_ARG_0, target);
10871-
10872- out_buf[0] = SDVO_I2C_OPCODE;
10873- out_buf[1] = SDVO_CMD_SET_CONTROL_BUS_SWITCH;
10874- cmd_buf[0] = SDVO_I2C_CMD_STATUS;
10875- cmd_buf[1] = 0;
10876- ret_value[0] = 0;
10877- ret_value[1] = 0;
10878-
10879- ret = i2c_transfer(intel_output->i2c_bus, msgs, 3);
10880- if (ret != 3) {
10881- /* failure in I2C transfer */
10882- DRM_DEBUG_KMS("I2c transfer returned %d\n", ret);
10883- return;
10884- }
10885- if (ret_value[0] != SDVO_CMD_STATUS_SUCCESS) {
10886- DRM_DEBUG_KMS("DDC switch command returns response %d\n",
10887- ret_value[0]);
10888- return;
10889- }
10890- return;
10891+ intel_sdvo_write_cmd(intel_output, SDVO_CMD_SET_CONTROL_BUS_SWITCH, &target, 1);
10892 }
10893
10894 static bool intel_sdvo_set_target_input(struct intel_output *intel_output, bool target_0, bool target_1)
10895@@ -1638,32 +1589,6 @@ intel_sdvo_hdmi_sink_detect(struct drm_connector *connector, u16 response)
10896 edid = drm_get_edid(&intel_output->base,
10897 intel_output->ddc_bus);
10898
10899- /* This is only applied to SDVO cards with multiple outputs */
10900- if (edid == NULL && intel_sdvo_multifunc_encoder(intel_output)) {
10901- uint8_t saved_ddc, temp_ddc;
10902- saved_ddc = sdvo_priv->ddc_bus;
10903- temp_ddc = sdvo_priv->ddc_bus >> 1;
10904- /*
10905- * Don't use the 1 as the argument of DDC bus switch to get
10906- * the EDID. It is used for SDVO SPD ROM.
10907- */
10908- while(temp_ddc > 1) {
10909- sdvo_priv->ddc_bus = temp_ddc;
10910- edid = drm_get_edid(&intel_output->base,
10911- intel_output->ddc_bus);
10912- if (edid) {
10913- /*
10914- * When we can get the EDID, maybe it is the
10915- * correct DDC bus. Update it.
10916- */
10917- sdvo_priv->ddc_bus = temp_ddc;
10918- break;
10919- }
10920- temp_ddc >>= 1;
10921- }
10922- if (edid == NULL)
10923- sdvo_priv->ddc_bus = saved_ddc;
10924- }
10925 /* when there is no edid and no monitor is connected with VGA
10926 * port, try to use the CRT ddc to read the EDID for DVI-connector
10927 */
10928@@ -2743,7 +2668,6 @@ static void intel_sdvo_create_enhance_property(struct drm_connector *connector)
10929
10930 bool intel_sdvo_init(struct drm_device *dev, int output_device)
10931 {
10932- struct drm_i915_private *dev_priv = dev->dev_private;
10933 struct drm_connector *connector;
10934 struct intel_output *intel_output;
10935 struct intel_sdvo_priv *sdvo_priv;
10936@@ -2790,12 +2714,10 @@ bool intel_sdvo_init(struct drm_device *dev, int output_device)
10937 intel_output->ddc_bus = intel_i2c_create(dev, GPIOE, "SDVOB DDC BUS");
10938 sdvo_priv->analog_ddc_bus = intel_i2c_create(dev, GPIOA,
10939 "SDVOB/VGA DDC BUS");
10940- dev_priv->hotplug_supported_mask |= SDVOB_HOTPLUG_INT_STATUS;
10941 } else {
10942 intel_output->ddc_bus = intel_i2c_create(dev, GPIOE, "SDVOC DDC BUS");
10943 sdvo_priv->analog_ddc_bus = intel_i2c_create(dev, GPIOA,
10944 "SDVOC/VGA DDC BUS");
10945- dev_priv->hotplug_supported_mask |= SDVOC_HOTPLUG_INT_STATUS;
10946 }
10947
10948 if (intel_output->ddc_bus == NULL)
10949diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
10950index ce026f0..9ca9179 100644
10951--- a/drivers/gpu/drm/i915/intel_tv.c
10952+++ b/drivers/gpu/drm/i915/intel_tv.c
10953@@ -1213,17 +1213,20 @@ intel_tv_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
10954 tv_ctl |= TV_TRILEVEL_SYNC;
10955 if (tv_mode->pal_burst)
10956 tv_ctl |= TV_PAL_BURST;
10957-
10958 scctl1 = 0;
10959- if (tv_mode->dda1_inc)
10960+ /* dda1 implies valid video levels */
10961+ if (tv_mode->dda1_inc) {
10962 scctl1 |= TV_SC_DDA1_EN;
10963+ }
10964+
10965 if (tv_mode->dda2_inc)
10966 scctl1 |= TV_SC_DDA2_EN;
10967+
10968 if (tv_mode->dda3_inc)
10969 scctl1 |= TV_SC_DDA3_EN;
10970+
10971 scctl1 |= tv_mode->sc_reset;
10972- if (video_levels)
10973- scctl1 |= video_levels->burst << TV_BURST_LEVEL_SHIFT;
10974+ scctl1 |= video_levels->burst << TV_BURST_LEVEL_SHIFT;
10975 scctl1 |= tv_mode->dda1_inc << TV_SCDDA1_INC_SHIFT;
10976
10977 scctl2 = tv_mode->dda2_size << TV_SCDDA2_SIZE_SHIFT |
10978@@ -1801,8 +1804,6 @@ intel_tv_init(struct drm_device *dev)
10979 drm_connector_attach_property(connector,
10980 dev->mode_config.tv_bottom_margin_property,
10981 tv_priv->margin[TV_MARGIN_BOTTOM]);
10982-
10983- dev_priv->hotplug_supported_mask |= TV_HOTPLUG_INT_STATUS;
10984 out:
10985 drm_sysfs_connector_add(connector);
10986 }
10987diff --git a/drivers/gpu/drm/radeon/atom.c b/drivers/gpu/drm/radeon/atom.c
10988index fed2291..d67c425 100644
10989--- a/drivers/gpu/drm/radeon/atom.c
10990+++ b/drivers/gpu/drm/radeon/atom.c
10991@@ -607,7 +607,7 @@ static void atom_op_delay(atom_exec_context *ctx, int *ptr, int arg)
10992 uint8_t count = U8((*ptr)++);
10993 SDEBUG(" count: %d\n", count);
10994 if (arg == ATOM_UNIT_MICROSEC)
10995- udelay(count);
10996+ schedule_timeout_uninterruptible(usecs_to_jiffies(count));
10997 else
10998 schedule_timeout_uninterruptible(msecs_to_jiffies(count));
10999 }
11000diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
11001index 19f93f2..c15287a 100644
11002--- a/drivers/gpu/drm/radeon/atombios_crtc.c
11003+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
11004@@ -241,7 +241,6 @@ void atombios_crtc_dpms(struct drm_crtc *crtc, int mode)
11005 {
11006 struct drm_device *dev = crtc->dev;
11007 struct radeon_device *rdev = dev->dev_private;
11008- struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
11009
11010 switch (mode) {
11011 case DRM_MODE_DPMS_ON:
11012@@ -249,21 +248,20 @@ void atombios_crtc_dpms(struct drm_crtc *crtc, int mode)
11013 if (ASIC_IS_DCE3(rdev))
11014 atombios_enable_crtc_memreq(crtc, 1);
11015 atombios_blank_crtc(crtc, 0);
11016- if (rdev->family < CHIP_R600)
11017- drm_vblank_post_modeset(dev, radeon_crtc->crtc_id);
11018- radeon_crtc_load_lut(crtc);
11019 break;
11020 case DRM_MODE_DPMS_STANDBY:
11021 case DRM_MODE_DPMS_SUSPEND:
11022 case DRM_MODE_DPMS_OFF:
11023- if (rdev->family < CHIP_R600)
11024- drm_vblank_pre_modeset(dev, radeon_crtc->crtc_id);
11025 atombios_blank_crtc(crtc, 1);
11026 if (ASIC_IS_DCE3(rdev))
11027 atombios_enable_crtc_memreq(crtc, 0);
11028 atombios_enable_crtc(crtc, 0);
11029 break;
11030 }
11031+
11032+ if (mode != DRM_MODE_DPMS_OFF) {
11033+ radeon_crtc_load_lut(crtc);
11034+ }
11035 }
11036
11037 static void
11038diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
11039index 969502a..2ed88a8 100644
11040--- a/drivers/gpu/drm/radeon/radeon_atombios.c
11041+++ b/drivers/gpu/drm/radeon/radeon_atombios.c
11042@@ -135,14 +135,6 @@ static bool radeon_atom_apply_quirks(struct drm_device *dev,
11043 }
11044 }
11045
11046- /* HIS X1300 is DVI+VGA, not DVI+DVI */
11047- if ((dev->pdev->device == 0x7146) &&
11048- (dev->pdev->subsystem_vendor == 0x17af) &&
11049- (dev->pdev->subsystem_device == 0x2058)) {
11050- if (supported_device == ATOM_DEVICE_DFP1_SUPPORT)
11051- return false;
11052- }
11053-
11054 /* Funky macbooks */
11055 if ((dev->pdev->device == 0x71C5) &&
11056 (dev->pdev->subsystem_vendor == 0x106b) &&
11057diff --git a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
11058index 22ce4d6..8d0b7aa 100644
11059--- a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
11060+++ b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
11061@@ -292,7 +292,8 @@ void radeon_crtc_dpms(struct drm_crtc *crtc, int mode)
11062 uint32_t mask;
11063
11064 if (radeon_crtc->crtc_id)
11065- mask = (RADEON_CRTC2_DISP_DIS |
11066+ mask = (RADEON_CRTC2_EN |
11067+ RADEON_CRTC2_DISP_DIS |
11068 RADEON_CRTC2_VSYNC_DIS |
11069 RADEON_CRTC2_HSYNC_DIS |
11070 RADEON_CRTC2_DISP_REQ_EN_B);
11071@@ -304,7 +305,7 @@ void radeon_crtc_dpms(struct drm_crtc *crtc, int mode)
11072 switch (mode) {
11073 case DRM_MODE_DPMS_ON:
11074 if (radeon_crtc->crtc_id)
11075- WREG32_P(RADEON_CRTC2_GEN_CNTL, RADEON_CRTC2_EN, ~(RADEON_CRTC2_EN | mask));
11076+ WREG32_P(RADEON_CRTC2_GEN_CNTL, RADEON_CRTC2_EN, ~mask);
11077 else {
11078 WREG32_P(RADEON_CRTC_GEN_CNTL, RADEON_CRTC_EN, ~(RADEON_CRTC_EN |
11079 RADEON_CRTC_DISP_REQ_EN_B));
11080@@ -318,7 +319,7 @@ void radeon_crtc_dpms(struct drm_crtc *crtc, int mode)
11081 case DRM_MODE_DPMS_OFF:
11082 drm_vblank_pre_modeset(dev, radeon_crtc->crtc_id);
11083 if (radeon_crtc->crtc_id)
11084- WREG32_P(RADEON_CRTC2_GEN_CNTL, mask, ~(RADEON_CRTC2_EN | mask));
11085+ WREG32_P(RADEON_CRTC2_GEN_CNTL, mask, ~mask);
11086 else {
11087 WREG32_P(RADEON_CRTC_GEN_CNTL, RADEON_CRTC_DISP_REQ_EN_B, ~(RADEON_CRTC_EN |
11088 RADEON_CRTC_DISP_REQ_EN_B));
11089diff --git a/drivers/gpu/drm/radeon/radeon_test.c b/drivers/gpu/drm/radeon/radeon_test.c
11090index c8942ca..f8a465d 100644
11091--- a/drivers/gpu/drm/radeon/radeon_test.c
11092+++ b/drivers/gpu/drm/radeon/radeon_test.c
11093@@ -42,8 +42,8 @@ void radeon_test_moves(struct radeon_device *rdev)
11094 /* Number of tests =
11095 * (Total GTT - IB pool - writeback page - ring buffer) / test size
11096 */
11097- n = ((u32)(rdev->mc.gtt_size - RADEON_IB_POOL_SIZE*64*1024 - RADEON_GPU_PAGE_SIZE -
11098- rdev->cp.ring_size)) / size;
11099+ n = (rdev->mc.gtt_size - RADEON_IB_POOL_SIZE*64*1024 - RADEON_GPU_PAGE_SIZE -
11100+ rdev->cp.ring_size) / size;
11101
11102 gtt_obj = kzalloc(n * sizeof(*gtt_obj), GFP_KERNEL);
11103 if (!gtt_obj) {
11104diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c
11105index 4444f48..5f117cd 100644
11106--- a/drivers/gpu/drm/radeon/rs600.c
11107+++ b/drivers/gpu/drm/radeon/rs600.c
11108@@ -301,7 +301,9 @@ int rs600_mc_wait_for_idle(struct radeon_device *rdev)
11109
11110 void rs600_gpu_init(struct radeon_device *rdev)
11111 {
11112+ /* FIXME: HDP same place on rs600 ? */
11113 r100_hdp_reset(rdev);
11114+ /* FIXME: is this correct ? */
11115 r420_pipes_init(rdev);
11116 /* Wait for mc idle */
11117 if (rs600_mc_wait_for_idle(rdev))
11118@@ -310,20 +312,9 @@ void rs600_gpu_init(struct radeon_device *rdev)
11119
11120 void rs600_vram_info(struct radeon_device *rdev)
11121 {
11122+ /* FIXME: to do or is these values sane ? */
11123 rdev->mc.vram_is_ddr = true;
11124 rdev->mc.vram_width = 128;
11125-
11126- rdev->mc.real_vram_size = RREG32(RADEON_CONFIG_MEMSIZE);
11127- rdev->mc.mc_vram_size = rdev->mc.real_vram_size;
11128-
11129- rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0);
11130- rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0);
11131-
11132- if (rdev->mc.mc_vram_size > rdev->mc.aper_size)
11133- rdev->mc.mc_vram_size = rdev->mc.aper_size;
11134-
11135- if (rdev->mc.real_vram_size > rdev->mc.aper_size)
11136- rdev->mc.real_vram_size = rdev->mc.aper_size;
11137 }
11138
11139 void rs600_bandwidth_update(struct radeon_device *rdev)
11140diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c
11141index b12ff76..2754717 100644
11142--- a/drivers/gpu/drm/radeon/rs690.c
11143+++ b/drivers/gpu/drm/radeon/rs690.c
11144@@ -131,25 +131,24 @@ void rs690_pm_info(struct radeon_device *rdev)
11145
11146 void rs690_vram_info(struct radeon_device *rdev)
11147 {
11148+ uint32_t tmp;
11149 fixed20_12 a;
11150
11151 rs400_gart_adjust_size(rdev);
11152-
11153+ /* DDR for all card after R300 & IGP */
11154 rdev->mc.vram_is_ddr = true;
11155- rdev->mc.vram_width = 128;
11156-
11157+ /* FIXME: is this correct for RS690/RS740 ? */
11158+ tmp = RREG32(RADEON_MEM_CNTL);
11159+ if (tmp & R300_MEM_NUM_CHANNELS_MASK) {
11160+ rdev->mc.vram_width = 128;
11161+ } else {
11162+ rdev->mc.vram_width = 64;
11163+ }
11164 rdev->mc.real_vram_size = RREG32(RADEON_CONFIG_MEMSIZE);
11165 rdev->mc.mc_vram_size = rdev->mc.real_vram_size;
11166
11167 rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0);
11168 rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0);
11169-
11170- if (rdev->mc.mc_vram_size > rdev->mc.aper_size)
11171- rdev->mc.mc_vram_size = rdev->mc.aper_size;
11172-
11173- if (rdev->mc.real_vram_size > rdev->mc.aper_size)
11174- rdev->mc.real_vram_size = rdev->mc.aper_size;
11175-
11176 rs690_pm_info(rdev);
11177 /* FIXME: we should enforce default clock in case GPU is not in
11178 * default setup
11179diff --git a/drivers/hid/hid-apple.c b/drivers/hid/hid-apple.c
11180index 5b4d66d..4b96e7a 100644
11181--- a/drivers/hid/hid-apple.c
11182+++ b/drivers/hid/hid-apple.c
11183@@ -431,13 +431,6 @@ static const struct hid_device_id apple_devices[] = {
11184 .driver_data = APPLE_HAS_FN | APPLE_ISO_KEYBOARD },
11185 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING3_JIS),
11186 .driver_data = APPLE_HAS_FN | APPLE_RDESC_JIS },
11187- { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ANSI),
11188- .driver_data = APPLE_NUMLOCK_EMULATION | APPLE_HAS_FN },
11189- { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ISO),
11190- .driver_data = APPLE_NUMLOCK_EMULATION | APPLE_HAS_FN |
11191- APPLE_ISO_KEYBOARD },
11192- { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_JIS),
11193- .driver_data = APPLE_NUMLOCK_EMULATION | APPLE_HAS_FN },
11194 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_TP_ONLY),
11195 .driver_data = APPLE_NUMLOCK_EMULATION | APPLE_HAS_FN },
11196 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY),
11197diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
11198index 9678354..7d05c4b 100644
11199--- a/drivers/hid/hid-core.c
11200+++ b/drivers/hid/hid-core.c
11201@@ -1287,9 +1287,6 @@ static const struct hid_device_id hid_blacklist[] = {
11202 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING3_ANSI) },
11203 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING3_ISO) },
11204 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING3_JIS) },
11205- { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ANSI) },
11206- { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ISO) },
11207- { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_JIS) },
11208 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_TP_ONLY) },
11209 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY) },
11210 { HID_USB_DEVICE(USB_VENDOR_ID_BELKIN, USB_DEVICE_ID_FLIP_KVM) },
11211diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
11212index e380e7b..adbef5d 100644
11213--- a/drivers/hid/hid-ids.h
11214+++ b/drivers/hid/hid-ids.h
11215@@ -88,9 +88,6 @@
11216 #define USB_DEVICE_ID_APPLE_WELLSPRING3_ANSI 0x0236
11217 #define USB_DEVICE_ID_APPLE_WELLSPRING3_ISO 0x0237
11218 #define USB_DEVICE_ID_APPLE_WELLSPRING3_JIS 0x0238
11219-#define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ANSI 0x0239
11220-#define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ISO 0x023a
11221-#define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_JIS 0x023b
11222 #define USB_DEVICE_ID_APPLE_FOUNTAIN_TP_ONLY 0x030a
11223 #define USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY 0x030b
11224 #define USB_DEVICE_ID_APPLE_ATV_IRCONTROL 0x8241
11225diff --git a/drivers/hid/usbhid/hid-core.c b/drivers/hid/usbhid/hid-core.c
11226index 5d901f6..03bd703 100644
11227--- a/drivers/hid/usbhid/hid-core.c
11228+++ b/drivers/hid/usbhid/hid-core.c
11229@@ -998,8 +998,7 @@ static int usbhid_start(struct hid_device *hid)
11230 usbhid->urbctrl->transfer_dma = usbhid->ctrlbuf_dma;
11231 usbhid->urbctrl->transfer_flags |= (URB_NO_TRANSFER_DMA_MAP | URB_NO_SETUP_DMA_MAP);
11232
11233- if (!(hid->quirks & HID_QUIRK_NO_INIT_REPORTS))
11234- usbhid_init_reports(hid);
11235+ usbhid_init_reports(hid);
11236
11237 set_bit(HID_STARTED, &usbhid->iofl);
11238
11239diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c
11240index 5713b93..0d9045a 100644
11241--- a/drivers/hid/usbhid/hid-quirks.c
11242+++ b/drivers/hid/usbhid/hid-quirks.c
11243@@ -280,7 +280,7 @@ u32 usbhid_lookup_quirk(const u16 idVendor, const u16 idProduct)
11244 if (idVendor == USB_VENDOR_ID_NCR &&
11245 idProduct >= USB_DEVICE_ID_NCR_FIRST &&
11246 idProduct <= USB_DEVICE_ID_NCR_LAST)
11247- return HID_QUIRK_NO_INIT_REPORTS;
11248+ return HID_QUIRK_NOGET;
11249
11250 down_read(&dquirks_rwsem);
11251 bl_entry = usbhid_exists_dquirk(idVendor, idProduct);
11252diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig
11253index c1f7ea0..700e93a 100644
11254--- a/drivers/hwmon/Kconfig
11255+++ b/drivers/hwmon/Kconfig
11256@@ -374,7 +374,7 @@ config SENSORS_GL520SM
11257
11258 config SENSORS_CORETEMP
11259 tristate "Intel Core/Core2/Atom temperature sensor"
11260- depends on X86 && PCI && EXPERIMENTAL
11261+ depends on X86 && EXPERIMENTAL
11262 help
11263 If you say yes here you get support for the temperature
11264 sensor inside your CPU. Most of the family 6 CPUs
11265diff --git a/drivers/hwmon/adt7462.c b/drivers/hwmon/adt7462.c
11266index 14f910d..1852f27 100644
11267--- a/drivers/hwmon/adt7462.c
11268+++ b/drivers/hwmon/adt7462.c
11269@@ -97,7 +97,7 @@ I2C_CLIENT_INSMOD_1(adt7462);
11270 #define ADT7462_PIN24_SHIFT 6
11271 #define ADT7462_PIN26_VOLT_INPUT 0x08
11272 #define ADT7462_PIN25_VOLT_INPUT 0x20
11273-#define ADT7462_PIN28_SHIFT 4 /* cfg3 */
11274+#define ADT7462_PIN28_SHIFT 6 /* cfg3 */
11275 #define ADT7462_PIN28_VOLT 0x5
11276
11277 #define ADT7462_REG_ALARM1 0xB8
11278@@ -182,7 +182,7 @@ I2C_CLIENT_INSMOD_1(adt7462);
11279 *
11280 * Some, but not all, of these voltages have low/high limits.
11281 */
11282-#define ADT7462_VOLT_COUNT 13
11283+#define ADT7462_VOLT_COUNT 12
11284
11285 #define ADT7462_VENDOR 0x41
11286 #define ADT7462_DEVICE 0x62
11287diff --git a/drivers/hwmon/coretemp.c b/drivers/hwmon/coretemp.c
11288index 2d7bcee..caef39c 100644
11289--- a/drivers/hwmon/coretemp.c
11290+++ b/drivers/hwmon/coretemp.c
11291@@ -33,7 +33,6 @@
11292 #include <linux/list.h>
11293 #include <linux/platform_device.h>
11294 #include <linux/cpu.h>
11295-#include <linux/pci.h>
11296 #include <asm/msr.h>
11297 #include <asm/processor.h>
11298
11299@@ -162,7 +161,6 @@ static int __devinit adjust_tjmax(struct cpuinfo_x86 *c, u32 id, struct device *
11300 int usemsr_ee = 1;
11301 int err;
11302 u32 eax, edx;
11303- struct pci_dev *host_bridge;
11304
11305 /* Early chips have no MSR for TjMax */
11306
11307@@ -170,21 +168,11 @@ static int __devinit adjust_tjmax(struct cpuinfo_x86 *c, u32 id, struct device *
11308 usemsr_ee = 0;
11309 }
11310
11311- /* Atom CPUs */
11312+ /* Atoms seems to have TjMax at 90C */
11313
11314 if (c->x86_model == 0x1c) {
11315 usemsr_ee = 0;
11316-
11317- host_bridge = pci_get_bus_and_slot(0, PCI_DEVFN(0, 0));
11318-
11319- if (host_bridge && host_bridge->vendor == PCI_VENDOR_ID_INTEL
11320- && (host_bridge->device == 0xa000 /* NM10 based nettop */
11321- || host_bridge->device == 0xa010)) /* NM10 based netbook */
11322- tjmax = 100000;
11323- else
11324- tjmax = 90000;
11325-
11326- pci_dev_put(host_bridge);
11327+ tjmax = 90000;
11328 }
11329
11330 if ((c->x86_model > 0xe) && (usemsr_ee)) {
11331diff --git a/drivers/hwmon/fschmd.c b/drivers/hwmon/fschmd.c
11332index f600813..da1b1f9 100644
11333--- a/drivers/hwmon/fschmd.c
11334+++ b/drivers/hwmon/fschmd.c
11335@@ -767,7 +767,6 @@ leave:
11336 static int watchdog_open(struct inode *inode, struct file *filp)
11337 {
11338 struct fschmd_data *pos, *data = NULL;
11339- int watchdog_is_open;
11340
11341 /* We get called from drivers/char/misc.c with misc_mtx hold, and we
11342 call misc_register() from fschmd_probe() with watchdog_data_mutex
11343@@ -782,12 +781,10 @@ static int watchdog_open(struct inode *inode, struct file *filp)
11344 }
11345 }
11346 /* Note we can never not have found data, so we don't check for this */
11347- watchdog_is_open = test_and_set_bit(0, &data->watchdog_is_open);
11348- if (!watchdog_is_open)
11349- kref_get(&data->kref);
11350+ kref_get(&data->kref);
11351 mutex_unlock(&watchdog_data_mutex);
11352
11353- if (watchdog_is_open)
11354+ if (test_and_set_bit(0, &data->watchdog_is_open))
11355 return -EBUSY;
11356
11357 /* Start the watchdog */
11358diff --git a/drivers/hwmon/lm78.c b/drivers/hwmon/lm78.c
11359index 1508e0a..f7e7016 100644
11360--- a/drivers/hwmon/lm78.c
11361+++ b/drivers/hwmon/lm78.c
11362@@ -870,16 +870,17 @@ static struct lm78_data *lm78_update_device(struct device *dev)
11363 static int __init lm78_isa_found(unsigned short address)
11364 {
11365 int val, save, found = 0;
11366- int port;
11367-
11368- /* Some boards declare base+0 to base+7 as a PNP device, some base+4
11369- * to base+7 and some base+5 to base+6. So we better request each port
11370- * individually for the probing phase. */
11371- for (port = address; port < address + LM78_EXTENT; port++) {
11372- if (!request_region(port, 1, "lm78")) {
11373- pr_debug("lm78: Failed to request port 0x%x\n", port);
11374- goto release;
11375- }
11376+
11377+ /* We have to request the region in two parts because some
11378+ boards declare base+4 to base+7 as a PNP device */
11379+ if (!request_region(address, 4, "lm78")) {
11380+ pr_debug("lm78: Failed to request low part of region\n");
11381+ return 0;
11382+ }
11383+ if (!request_region(address + 4, 4, "lm78")) {
11384+ pr_debug("lm78: Failed to request high part of region\n");
11385+ release_region(address, 4);
11386+ return 0;
11387 }
11388
11389 #define REALLY_SLOW_IO
11390@@ -943,8 +944,8 @@ static int __init lm78_isa_found(unsigned short address)
11391 val & 0x80 ? "LM79" : "LM78", (int)address);
11392
11393 release:
11394- for (port--; port >= address; port--)
11395- release_region(port, 1);
11396+ release_region(address + 4, 4);
11397+ release_region(address, 4);
11398 return found;
11399 }
11400
11401diff --git a/drivers/hwmon/sht15.c b/drivers/hwmon/sht15.c
11402index 864a371..ebe38b6 100644
11403--- a/drivers/hwmon/sht15.c
11404+++ b/drivers/hwmon/sht15.c
11405@@ -305,7 +305,7 @@ static inline int sht15_calc_temp(struct sht15_data *data)
11406 int d1 = 0;
11407 int i;
11408
11409- for (i = 1; i < ARRAY_SIZE(temppoints); i++)
11410+ for (i = 1; i < ARRAY_SIZE(temppoints) - 1; i++)
11411 /* Find pointer to interpolate */
11412 if (data->supply_uV > temppoints[i - 1].vdd) {
11413 d1 = (data->supply_uV/1000 - temppoints[i - 1].vdd)
11414@@ -332,12 +332,12 @@ static inline int sht15_calc_humid(struct sht15_data *data)
11415
11416 const int c1 = -4;
11417 const int c2 = 40500; /* x 10 ^ -6 */
11418- const int c3 = -2800; /* x10 ^ -9 */
11419+ const int c3 = 2800; /* x10 ^ -9 */
11420
11421 RHlinear = c1*1000
11422 + c2 * data->val_humid/1000
11423 + (data->val_humid * data->val_humid * c3)/1000000;
11424- return (temp - 25000) * (10000 + 80 * data->val_humid)
11425+ return (temp - 25000) * (10000 + 800 * data->val_humid)
11426 / 1000000 + RHlinear;
11427 }
11428
11429diff --git a/drivers/hwmon/w83781d.c b/drivers/hwmon/w83781d.c
11430index f0b6883..d27ed1b 100644
11431--- a/drivers/hwmon/w83781d.c
11432+++ b/drivers/hwmon/w83781d.c
11433@@ -1818,17 +1818,17 @@ static int __init
11434 w83781d_isa_found(unsigned short address)
11435 {
11436 int val, save, found = 0;
11437- int port;
11438-
11439- /* Some boards declare base+0 to base+7 as a PNP device, some base+4
11440- * to base+7 and some base+5 to base+6. So we better request each port
11441- * individually for the probing phase. */
11442- for (port = address; port < address + W83781D_EXTENT; port++) {
11443- if (!request_region(port, 1, "w83781d")) {
11444- pr_debug("w83781d: Failed to request port 0x%x\n",
11445- port);
11446- goto release;
11447- }
11448+
11449+ /* We have to request the region in two parts because some
11450+ boards declare base+4 to base+7 as a PNP device */
11451+ if (!request_region(address, 4, "w83781d")) {
11452+ pr_debug("w83781d: Failed to request low part of region\n");
11453+ return 0;
11454+ }
11455+ if (!request_region(address + 4, 4, "w83781d")) {
11456+ pr_debug("w83781d: Failed to request high part of region\n");
11457+ release_region(address, 4);
11458+ return 0;
11459 }
11460
11461 #define REALLY_SLOW_IO
11462@@ -1902,8 +1902,8 @@ w83781d_isa_found(unsigned short address)
11463 val == 0x30 ? "W83782D" : "W83781D", (int)address);
11464
11465 release:
11466- for (port--; port >= address; port--)
11467- release_region(port, 1);
11468+ release_region(address + 4, 4);
11469+ release_region(address, 4);
11470 return found;
11471 }
11472
11473diff --git a/drivers/i2c/busses/i2c-pca-isa.c b/drivers/i2c/busses/i2c-pca-isa.c
11474index f7346a9..0ed68e2 100644
11475--- a/drivers/i2c/busses/i2c-pca-isa.c
11476+++ b/drivers/i2c/busses/i2c-pca-isa.c
11477@@ -75,7 +75,7 @@ static int pca_isa_waitforcompletion(void *pd)
11478 unsigned long timeout;
11479
11480 if (irq > -1) {
11481- ret = wait_event_timeout(pca_wait,
11482+ ret = wait_event_interruptible_timeout(pca_wait,
11483 pca_isa_readbyte(pd, I2C_PCA_CON)
11484 & I2C_PCA_CON_SI, pca_isa_ops.timeout);
11485 } else {
11486@@ -96,7 +96,7 @@ static void pca_isa_resetchip(void *pd)
11487 }
11488
11489 static irqreturn_t pca_handler(int this_irq, void *dev_id) {
11490- wake_up(&pca_wait);
11491+ wake_up_interruptible(&pca_wait);
11492 return IRQ_HANDLED;
11493 }
11494
11495diff --git a/drivers/i2c/busses/i2c-pca-platform.c b/drivers/i2c/busses/i2c-pca-platform.c
11496index 5b2213d..c4df9d4 100644
11497--- a/drivers/i2c/busses/i2c-pca-platform.c
11498+++ b/drivers/i2c/busses/i2c-pca-platform.c
11499@@ -84,7 +84,7 @@ static int i2c_pca_pf_waitforcompletion(void *pd)
11500 unsigned long timeout;
11501
11502 if (i2c->irq) {
11503- ret = wait_event_timeout(i2c->wait,
11504+ ret = wait_event_interruptible_timeout(i2c->wait,
11505 i2c->algo_data.read_byte(i2c, I2C_PCA_CON)
11506 & I2C_PCA_CON_SI, i2c->adap.timeout);
11507 } else {
11508@@ -122,7 +122,7 @@ static irqreturn_t i2c_pca_pf_handler(int this_irq, void *dev_id)
11509 if ((i2c->algo_data.read_byte(i2c, I2C_PCA_CON) & I2C_PCA_CON_SI) == 0)
11510 return IRQ_NONE;
11511
11512- wake_up(&i2c->wait);
11513+ wake_up_interruptible(&i2c->wait);
11514
11515 return IRQ_HANDLED;
11516 }
11517diff --git a/drivers/i2c/busses/i2c-tiny-usb.c b/drivers/i2c/busses/i2c-tiny-usb.c
11518index e29b6d5..b1c050f 100644
11519--- a/drivers/i2c/busses/i2c-tiny-usb.c
11520+++ b/drivers/i2c/busses/i2c-tiny-usb.c
11521@@ -13,7 +13,6 @@
11522 #include <linux/kernel.h>
11523 #include <linux/errno.h>
11524 #include <linux/module.h>
11525-#include <linux/types.h>
11526
11527 /* include interfaces to usb layer */
11528 #include <linux/usb.h>
11529@@ -32,8 +31,8 @@
11530 #define CMD_I2C_IO_END (1<<1)
11531
11532 /* i2c bit delay, default is 10us -> 100kHz */
11533-static unsigned short delay = 10;
11534-module_param(delay, ushort, 0);
11535+static int delay = 10;
11536+module_param(delay, int, 0);
11537 MODULE_PARM_DESC(delay, "bit delay in microseconds, "
11538 "e.g. 10 for 100kHz (default is 100kHz)");
11539
11540@@ -110,7 +109,7 @@ static int usb_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs, int num)
11541
11542 static u32 usb_func(struct i2c_adapter *adapter)
11543 {
11544- __le32 func;
11545+ u32 func;
11546
11547 /* get functionality from adapter */
11548 if (usb_read(adapter, CMD_GET_FUNC, 0, 0, &func, sizeof(func)) !=
11549@@ -119,7 +118,7 @@ static u32 usb_func(struct i2c_adapter *adapter)
11550 return 0;
11551 }
11552
11553- return le32_to_cpu(func);
11554+ return func;
11555 }
11556
11557 /* This is the actual algorithm we define */
11558@@ -217,7 +216,8 @@ static int i2c_tiny_usb_probe(struct usb_interface *interface,
11559 "i2c-tiny-usb at bus %03d device %03d",
11560 dev->usb_dev->bus->busnum, dev->usb_dev->devnum);
11561
11562- if (usb_write(&dev->adapter, CMD_SET_DELAY, delay, 0, NULL, 0) != 0) {
11563+ if (usb_write(&dev->adapter, CMD_SET_DELAY,
11564+ cpu_to_le16(delay), 0, NULL, 0) != 0) {
11565 dev_err(&dev->adapter.dev,
11566 "failure setting delay to %dus\n", delay);
11567 retval = -EIO;
11568diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c
11569index 3bf7b0a..2965043 100644
11570--- a/drivers/i2c/i2c-core.c
11571+++ b/drivers/i2c/i2c-core.c
11572@@ -801,9 +801,6 @@ int i2c_del_adapter(struct i2c_adapter *adap)
11573 adap->dev.parent);
11574 #endif
11575
11576- /* device name is gone after device_unregister */
11577- dev_dbg(&adap->dev, "adapter [%s] unregistered\n", adap->name);
11578-
11579 /* clean up the sysfs representation */
11580 init_completion(&adap->dev_released);
11581 device_unregister(&adap->dev);
11582@@ -816,6 +813,8 @@ int i2c_del_adapter(struct i2c_adapter *adap)
11583 idr_remove(&i2c_adapter_idr, adap->nr);
11584 mutex_unlock(&core_lock);
11585
11586+ dev_dbg(&adap->dev, "adapter [%s] unregistered\n", adap->name);
11587+
11588 /* Clear the device structure in case this adapter is ever going to be
11589 added again */
11590 memset(&adap->dev, 0, sizeof(adap->dev));
11591diff --git a/drivers/ide/slc90e66.c b/drivers/ide/slc90e66.c
11592index 1ccfb40..9aec78d 100644
11593--- a/drivers/ide/slc90e66.c
11594+++ b/drivers/ide/slc90e66.c
11595@@ -91,7 +91,8 @@ static void slc90e66_set_dma_mode(ide_drive_t *drive, const u8 speed)
11596
11597 if (!(reg48 & u_flag))
11598 pci_write_config_word(dev, 0x48, reg48|u_flag);
11599- if ((reg4a & a_speed) != u_speed) {
11600+ /* FIXME: (reg4a & a_speed) ? */
11601+ if ((reg4a & u_speed) != u_speed) {
11602 pci_write_config_word(dev, 0x4a, reg4a & ~a_speed);
11603 pci_read_config_word(dev, 0x4a, &reg4a);
11604 pci_write_config_word(dev, 0x4a, reg4a|u_speed);
11605diff --git a/drivers/infiniband/hw/ipath/ipath_fs.c b/drivers/infiniband/hw/ipath/ipath_fs.c
11606index 100da85..b368406 100644
11607--- a/drivers/infiniband/hw/ipath/ipath_fs.c
11608+++ b/drivers/infiniband/hw/ipath/ipath_fs.c
11609@@ -346,8 +346,10 @@ static int ipathfs_fill_super(struct super_block *sb, void *data,
11610 list_for_each_entry_safe(dd, tmp, &ipath_dev_list, ipath_list) {
11611 spin_unlock_irqrestore(&ipath_devs_lock, flags);
11612 ret = create_device_files(sb, dd);
11613- if (ret)
11614+ if (ret) {
11615+ deactivate_locked_super(sb);
11616 goto bail;
11617+ }
11618 spin_lock_irqsave(&ipath_devs_lock, flags);
11619 }
11620
11621diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
11622index df3eb8c..2bf5116 100644
11623--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
11624+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
11625@@ -884,7 +884,6 @@ struct ipoib_neigh *ipoib_neigh_alloc(struct neighbour *neighbour,
11626
11627 neigh->neighbour = neighbour;
11628 neigh->dev = dev;
11629- memset(&neigh->dgid.raw, 0, sizeof (union ib_gid));
11630 *to_ipoib_neigh(neighbour) = neigh;
11631 skb_queue_head_init(&neigh->queue);
11632 ipoib_cm_set(neigh, NULL);
11633diff --git a/drivers/input/misc/winbond-cir.c b/drivers/input/misc/winbond-cir.c
11634index c8f5a9a..33309fe 100644
11635--- a/drivers/input/misc/winbond-cir.c
11636+++ b/drivers/input/misc/winbond-cir.c
11637@@ -768,7 +768,7 @@ wbcir_parse_rc6(struct device *dev, struct wbcir_data *data)
11638 return;
11639 }
11640
11641- dev_dbg(dev, "IR-RC6 ad 0x%02X cm 0x%02X cu 0x%04X "
11642+ dev_info(dev, "IR-RC6 ad 0x%02X cm 0x%02X cu 0x%04X "
11643 "toggle %u mode %u scan 0x%08X\n",
11644 address,
11645 command,
11646diff --git a/drivers/input/mouse/alps.c b/drivers/input/mouse/alps.c
11647index fc8823b..f361106 100644
11648--- a/drivers/input/mouse/alps.c
11649+++ b/drivers/input/mouse/alps.c
11650@@ -5,7 +5,6 @@
11651 * Copyright (c) 2003-2005 Peter Osterlund <petero2@telia.com>
11652 * Copyright (c) 2004 Dmitry Torokhov <dtor@mail.ru>
11653 * Copyright (c) 2005 Vojtech Pavlik <vojtech@suse.cz>
11654- * Copyright (c) 2009 Sebastian Kapfer <sebastian_kapfer@gmx.net>
11655 *
11656 * ALPS detection, tap switching and status querying info is taken from
11657 * tpconfig utility (by C. Scott Ananian and Bruce Kall).
11658@@ -36,8 +35,6 @@
11659 #define ALPS_OLDPROTO 0x10
11660 #define ALPS_PASS 0x20
11661 #define ALPS_FW_BK_2 0x40
11662-#define ALPS_PS2_INTERLEAVED 0x80 /* 3-byte PS/2 packet interleaved with
11663- 6-byte ALPS packet */
11664
11665 static const struct alps_model_info alps_model_data[] = {
11666 { { 0x32, 0x02, 0x14 }, 0xf8, 0xf8, ALPS_PASS | ALPS_DUALPOINT }, /* Toshiba Salellite Pro M10 */
11667@@ -58,9 +55,7 @@ static const struct alps_model_info alps_model_data[] = {
11668 { { 0x20, 0x02, 0x0e }, 0xf8, 0xf8, ALPS_PASS | ALPS_DUALPOINT }, /* XXX */
11669 { { 0x22, 0x02, 0x0a }, 0xf8, 0xf8, ALPS_PASS | ALPS_DUALPOINT },
11670 { { 0x22, 0x02, 0x14 }, 0xff, 0xff, ALPS_PASS | ALPS_DUALPOINT }, /* Dell Latitude D600 */
11671- /* Dell Latitude E5500, E6400, E6500, Precision M4400 */
11672- { { 0x62, 0x02, 0x14 }, 0xcf, 0xcf,
11673- ALPS_PASS | ALPS_DUALPOINT | ALPS_PS2_INTERLEAVED },
11674+ { { 0x62, 0x02, 0x14 }, 0xcf, 0xcf, ALPS_PASS | ALPS_DUALPOINT }, /* Dell Latitude E6500 */
11675 { { 0x73, 0x02, 0x50 }, 0xcf, 0xcf, ALPS_FW_BK_1 }, /* Dell Vostro 1400 */
11676 };
11677
11678@@ -71,88 +66,20 @@ static const struct alps_model_info alps_model_data[] = {
11679 */
11680
11681 /*
11682- * PS/2 packet format
11683- *
11684- * byte 0: 0 0 YSGN XSGN 1 M R L
11685- * byte 1: X7 X6 X5 X4 X3 X2 X1 X0
11686- * byte 2: Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0
11687- *
11688- * Note that the device never signals overflow condition.
11689- *
11690- * ALPS absolute Mode - new format
11691+ * ALPS abolute Mode - new format
11692 *
11693 * byte 0: 1 ? ? ? 1 ? ? ?
11694 * byte 1: 0 x6 x5 x4 x3 x2 x1 x0
11695- * byte 2: 0 x10 x9 x8 x7 ? fin ges
11696+ * byte 2: 0 x10 x9 x8 x7 ? fin ges
11697 * byte 3: 0 y9 y8 y7 1 M R L
11698 * byte 4: 0 y6 y5 y4 y3 y2 y1 y0
11699 * byte 5: 0 z6 z5 z4 z3 z2 z1 z0
11700 *
11701- * Dualpoint device -- interleaved packet format
11702- *
11703- * byte 0: 1 1 0 0 1 1 1 1
11704- * byte 1: 0 x6 x5 x4 x3 x2 x1 x0
11705- * byte 2: 0 x10 x9 x8 x7 0 fin ges
11706- * byte 3: 0 0 YSGN XSGN 1 1 1 1
11707- * byte 4: X7 X6 X5 X4 X3 X2 X1 X0
11708- * byte 5: Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0
11709- * byte 6: 0 y9 y8 y7 1 m r l
11710- * byte 7: 0 y6 y5 y4 y3 y2 y1 y0
11711- * byte 8: 0 z6 z5 z4 z3 z2 z1 z0
11712- *
11713- * CAPITALS = stick, miniscules = touchpad
11714- *
11715 * ?'s can have different meanings on different models,
11716 * such as wheel rotation, extra buttons, stick buttons
11717 * on a dualpoint, etc.
11718 */
11719
11720-static bool alps_is_valid_first_byte(const struct alps_model_info *model,
11721- unsigned char data)
11722-{
11723- return (data & model->mask0) == model->byte0;
11724-}
11725-
11726-static void alps_report_buttons(struct psmouse *psmouse,
11727- struct input_dev *dev1, struct input_dev *dev2,
11728- int left, int right, int middle)
11729-{
11730- struct alps_data *priv = psmouse->private;
11731- const struct alps_model_info *model = priv->i;
11732-
11733- if (model->flags & ALPS_PS2_INTERLEAVED) {
11734- struct input_dev *dev;
11735-
11736- /*
11737- * If shared button has already been reported on the
11738- * other device (dev2) then this event should be also
11739- * sent through that device.
11740- */
11741- dev = test_bit(BTN_LEFT, dev2->key) ? dev2 : dev1;
11742- input_report_key(dev, BTN_LEFT, left);
11743-
11744- dev = test_bit(BTN_RIGHT, dev2->key) ? dev2 : dev1;
11745- input_report_key(dev, BTN_RIGHT, right);
11746-
11747- dev = test_bit(BTN_MIDDLE, dev2->key) ? dev2 : dev1;
11748- input_report_key(dev, BTN_MIDDLE, middle);
11749-
11750- /*
11751- * Sync the _other_ device now, we'll do the first
11752- * device later once we report the rest of the events.
11753- */
11754- input_sync(dev2);
11755- } else {
11756- /*
11757- * For devices with non-interleaved packets we know what
11758- * device buttons belong to so we can simply report them.
11759- */
11760- input_report_key(dev1, BTN_LEFT, left);
11761- input_report_key(dev1, BTN_RIGHT, right);
11762- input_report_key(dev1, BTN_MIDDLE, middle);
11763- }
11764-}
11765-
11766 static void alps_process_packet(struct psmouse *psmouse)
11767 {
11768 struct alps_data *priv = psmouse->private;
11769@@ -162,6 +89,18 @@ static void alps_process_packet(struct psmouse *psmouse)
11770 int x, y, z, ges, fin, left, right, middle;
11771 int back = 0, forward = 0;
11772
11773+ if ((packet[0] & 0xc8) == 0x08) { /* 3-byte PS/2 packet */
11774+ input_report_key(dev2, BTN_LEFT, packet[0] & 1);
11775+ input_report_key(dev2, BTN_RIGHT, packet[0] & 2);
11776+ input_report_key(dev2, BTN_MIDDLE, packet[0] & 4);
11777+ input_report_rel(dev2, REL_X,
11778+ packet[1] ? packet[1] - ((packet[0] << 4) & 0x100) : 0);
11779+ input_report_rel(dev2, REL_Y,
11780+ packet[2] ? ((packet[0] << 3) & 0x100) - packet[2] : 0);
11781+ input_sync(dev2);
11782+ return;
11783+ }
11784+
11785 if (priv->i->flags & ALPS_OLDPROTO) {
11786 left = packet[2] & 0x10;
11787 right = packet[2] & 0x08;
11788@@ -197,13 +136,18 @@ static void alps_process_packet(struct psmouse *psmouse)
11789 input_report_rel(dev2, REL_X, (x > 383 ? (x - 768) : x));
11790 input_report_rel(dev2, REL_Y, -(y > 255 ? (y - 512) : y));
11791
11792- alps_report_buttons(psmouse, dev2, dev, left, right, middle);
11793+ input_report_key(dev2, BTN_LEFT, left);
11794+ input_report_key(dev2, BTN_RIGHT, right);
11795+ input_report_key(dev2, BTN_MIDDLE, middle);
11796
11797+ input_sync(dev);
11798 input_sync(dev2);
11799 return;
11800 }
11801
11802- alps_report_buttons(psmouse, dev, dev2, left, right, middle);
11803+ input_report_key(dev, BTN_LEFT, left);
11804+ input_report_key(dev, BTN_RIGHT, right);
11805+ input_report_key(dev, BTN_MIDDLE, middle);
11806
11807 /* Convert hardware tap to a reasonable Z value */
11808 if (ges && !fin) z = 40;
11809@@ -244,168 +188,25 @@ static void alps_process_packet(struct psmouse *psmouse)
11810 input_sync(dev);
11811 }
11812
11813-static void alps_report_bare_ps2_packet(struct psmouse *psmouse,
11814- unsigned char packet[],
11815- bool report_buttons)
11816-{
11817- struct alps_data *priv = psmouse->private;
11818- struct input_dev *dev2 = priv->dev2;
11819-
11820- if (report_buttons)
11821- alps_report_buttons(psmouse, dev2, psmouse->dev,
11822- packet[0] & 1, packet[0] & 2, packet[0] & 4);
11823-
11824- input_report_rel(dev2, REL_X,
11825- packet[1] ? packet[1] - ((packet[0] << 4) & 0x100) : 0);
11826- input_report_rel(dev2, REL_Y,
11827- packet[2] ? ((packet[0] << 3) & 0x100) - packet[2] : 0);
11828-
11829- input_sync(dev2);
11830-}
11831-
11832-static psmouse_ret_t alps_handle_interleaved_ps2(struct psmouse *psmouse)
11833-{
11834- struct alps_data *priv = psmouse->private;
11835-
11836- if (psmouse->pktcnt < 6)
11837- return PSMOUSE_GOOD_DATA;
11838-
11839- if (psmouse->pktcnt == 6) {
11840- /*
11841- * Start a timer to flush the packet if it ends up last
11842- * 6-byte packet in the stream. Timer needs to fire
11843- * psmouse core times out itself. 20 ms should be enough
11844- * to decide if we are getting more data or not.
11845- */
11846- mod_timer(&priv->timer, jiffies + msecs_to_jiffies(20));
11847- return PSMOUSE_GOOD_DATA;
11848- }
11849-
11850- del_timer(&priv->timer);
11851-
11852- if (psmouse->packet[6] & 0x80) {
11853-
11854- /*
11855- * Highest bit is set - that means we either had
11856- * complete ALPS packet and this is start of the
11857- * next packet or we got garbage.
11858- */
11859-
11860- if (((psmouse->packet[3] |
11861- psmouse->packet[4] |
11862- psmouse->packet[5]) & 0x80) ||
11863- (!alps_is_valid_first_byte(priv->i, psmouse->packet[6]))) {
11864- dbg("refusing packet %x %x %x %x "
11865- "(suspected interleaved ps/2)\n",
11866- psmouse->packet[3], psmouse->packet[4],
11867- psmouse->packet[5], psmouse->packet[6]);
11868- return PSMOUSE_BAD_DATA;
11869- }
11870-
11871- alps_process_packet(psmouse);
11872-
11873- /* Continue with the next packet */
11874- psmouse->packet[0] = psmouse->packet[6];
11875- psmouse->pktcnt = 1;
11876-
11877- } else {
11878-
11879- /*
11880- * High bit is 0 - that means that we indeed got a PS/2
11881- * packet in the middle of ALPS packet.
11882- *
11883- * There is also possibility that we got 6-byte ALPS
11884- * packet followed by 3-byte packet from trackpoint. We
11885- * can not distinguish between these 2 scenarios but
11886- * becase the latter is unlikely to happen in course of
11887- * normal operation (user would need to press all
11888- * buttons on the pad and start moving trackpoint
11889- * without touching the pad surface) we assume former.
11890- * Even if we are wrong the wost thing that would happen
11891- * the cursor would jump but we should not get protocol
11892- * desynchronization.
11893- */
11894-
11895- alps_report_bare_ps2_packet(psmouse, &psmouse->packet[3],
11896- false);
11897-
11898- /*
11899- * Continue with the standard ALPS protocol handling,
11900- * but make sure we won't process it as an interleaved
11901- * packet again, which may happen if all buttons are
11902- * pressed. To avoid this let's reset the 4th bit which
11903- * is normally 1.
11904- */
11905- psmouse->packet[3] = psmouse->packet[6] & 0xf7;
11906- psmouse->pktcnt = 4;
11907- }
11908-
11909- return PSMOUSE_GOOD_DATA;
11910-}
11911-
11912-static void alps_flush_packet(unsigned long data)
11913-{
11914- struct psmouse *psmouse = (struct psmouse *)data;
11915-
11916- serio_pause_rx(psmouse->ps2dev.serio);
11917-
11918- if (psmouse->pktcnt == 6) {
11919-
11920- /*
11921- * We did not any more data in reasonable amount of time.
11922- * Validate the last 3 bytes and process as a standard
11923- * ALPS packet.
11924- */
11925- if ((psmouse->packet[3] |
11926- psmouse->packet[4] |
11927- psmouse->packet[5]) & 0x80) {
11928- dbg("refusing packet %x %x %x "
11929- "(suspected interleaved ps/2)\n",
11930- psmouse->packet[3], psmouse->packet[4],
11931- psmouse->packet[5]);
11932- } else {
11933- alps_process_packet(psmouse);
11934- }
11935- psmouse->pktcnt = 0;
11936- }
11937-
11938- serio_continue_rx(psmouse->ps2dev.serio);
11939-}
11940-
11941 static psmouse_ret_t alps_process_byte(struct psmouse *psmouse)
11942 {
11943 struct alps_data *priv = psmouse->private;
11944- const struct alps_model_info *model = priv->i;
11945
11946 if ((psmouse->packet[0] & 0xc8) == 0x08) { /* PS/2 packet */
11947 if (psmouse->pktcnt == 3) {
11948- alps_report_bare_ps2_packet(psmouse, psmouse->packet,
11949- true);
11950+ alps_process_packet(psmouse);
11951 return PSMOUSE_FULL_PACKET;
11952 }
11953 return PSMOUSE_GOOD_DATA;
11954 }
11955
11956- /* Check for PS/2 packet stuffed in the middle of ALPS packet. */
11957-
11958- if ((model->flags & ALPS_PS2_INTERLEAVED) &&
11959- psmouse->pktcnt >= 4 && (psmouse->packet[3] & 0x0f) == 0x0f) {
11960- return alps_handle_interleaved_ps2(psmouse);
11961- }
11962-
11963- if (!alps_is_valid_first_byte(model, psmouse->packet[0])) {
11964- dbg("refusing packet[0] = %x (mask0 = %x, byte0 = %x)\n",
11965- psmouse->packet[0], model->mask0, model->byte0);
11966+ if ((psmouse->packet[0] & priv->i->mask0) != priv->i->byte0)
11967 return PSMOUSE_BAD_DATA;
11968- }
11969
11970 /* Bytes 2 - 6 should have 0 in the highest bit */
11971 if (psmouse->pktcnt >= 2 && psmouse->pktcnt <= 6 &&
11972- (psmouse->packet[psmouse->pktcnt - 1] & 0x80)) {
11973- dbg("refusing packet[%i] = %x\n",
11974- psmouse->pktcnt - 1, psmouse->packet[psmouse->pktcnt - 1]);
11975+ (psmouse->packet[psmouse->pktcnt - 1] & 0x80))
11976 return PSMOUSE_BAD_DATA;
11977- }
11978
11979 if (psmouse->pktcnt == 6) {
11980 alps_process_packet(psmouse);
11981@@ -644,7 +445,6 @@ static void alps_disconnect(struct psmouse *psmouse)
11982 struct alps_data *priv = psmouse->private;
11983
11984 psmouse_reset(psmouse);
11985- del_timer_sync(&priv->timer);
11986 input_unregister_device(priv->dev2);
11987 kfree(priv);
11988 }
11989@@ -661,8 +461,6 @@ int alps_init(struct psmouse *psmouse)
11990 goto init_fail;
11991
11992 priv->dev2 = dev2;
11993- setup_timer(&priv->timer, alps_flush_packet, (unsigned long)psmouse);
11994-
11995 psmouse->private = priv;
11996
11997 if (alps_hw_init(psmouse, &version))
11998diff --git a/drivers/input/mouse/alps.h b/drivers/input/mouse/alps.h
11999index 904ed8b..bc87936 100644
12000--- a/drivers/input/mouse/alps.h
12001+++ b/drivers/input/mouse/alps.h
12002@@ -23,7 +23,6 @@ struct alps_data {
12003 char phys[32]; /* Phys */
12004 const struct alps_model_info *i;/* Info */
12005 int prev_fin; /* Finger bit from previous packet */
12006- struct timer_list timer;
12007 };
12008
12009 #ifdef CONFIG_MOUSE_PS2_ALPS
12010diff --git a/drivers/input/mouse/psmouse-base.c b/drivers/input/mouse/psmouse-base.c
12011index 0876d82..07c5379 100644
12012--- a/drivers/input/mouse/psmouse-base.c
12013+++ b/drivers/input/mouse/psmouse-base.c
12014@@ -667,6 +667,19 @@ static int psmouse_extensions(struct psmouse *psmouse,
12015 max_proto = PSMOUSE_IMEX;
12016 }
12017
12018+/*
12019+ * Try Finger Sensing Pad
12020+ */
12021+ if (max_proto > PSMOUSE_IMEX) {
12022+ if (fsp_detect(psmouse, set_properties) == 0) {
12023+ if (!set_properties || fsp_init(psmouse) == 0)
12024+ return PSMOUSE_FSP;
12025+/*
12026+ * Init failed, try basic relative protocols
12027+ */
12028+ max_proto = PSMOUSE_IMEX;
12029+ }
12030+ }
12031
12032 if (max_proto > PSMOUSE_IMEX) {
12033 if (genius_detect(psmouse, set_properties) == 0)
12034@@ -683,21 +696,6 @@ static int psmouse_extensions(struct psmouse *psmouse,
12035 }
12036
12037 /*
12038- * Try Finger Sensing Pad. We do it here because its probe upsets
12039- * Trackpoint devices (causing TP_READ_ID command to time out).
12040- */
12041- if (max_proto > PSMOUSE_IMEX) {
12042- if (fsp_detect(psmouse, set_properties) == 0) {
12043- if (!set_properties || fsp_init(psmouse) == 0)
12044- return PSMOUSE_FSP;
12045-/*
12046- * Init failed, try basic relative protocols
12047- */
12048- max_proto = PSMOUSE_IMEX;
12049- }
12050- }
12051-
12052-/*
12053 * Reset to defaults in case the device got confused by extended
12054 * protocol probes. Note that we follow up with full reset because
12055 * some mice put themselves to sleep when they see PSMOUSE_RESET_DIS.
12056diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h
12057index 2a5982e..2bcf1ac 100644
12058--- a/drivers/input/serio/i8042-x86ia64io.h
12059+++ b/drivers/input/serio/i8042-x86ia64io.h
12060@@ -67,12 +67,10 @@ static inline void i8042_write_command(int val)
12061
12062 #include <linux/dmi.h>
12063
12064-static const struct dmi_system_id __initconst i8042_dmi_noloop_table[] = {
12065+static struct dmi_system_id __initdata i8042_dmi_noloop_table[] = {
12066 {
12067- /*
12068- * Arima-Rioworks HDAMB -
12069- * AUX LOOP command does not raise AUX IRQ
12070- */
12071+ /* AUX LOOP command does not raise AUX IRQ */
12072+ .ident = "Arima-Rioworks HDAMB",
12073 .matches = {
12074 DMI_MATCH(DMI_BOARD_VENDOR, "RIOWORKS"),
12075 DMI_MATCH(DMI_BOARD_NAME, "HDAMB"),
12076@@ -80,7 +78,7 @@ static const struct dmi_system_id __initconst i8042_dmi_noloop_table[] = {
12077 },
12078 },
12079 {
12080- /* ASUS G1S */
12081+ .ident = "ASUS G1S",
12082 .matches = {
12083 DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer Inc."),
12084 DMI_MATCH(DMI_BOARD_NAME, "G1S"),
12085@@ -88,7 +86,8 @@ static const struct dmi_system_id __initconst i8042_dmi_noloop_table[] = {
12086 },
12087 },
12088 {
12089- /* ASUS P65UP5 - AUX LOOP command does not raise AUX IRQ */
12090+ /* AUX LOOP command does not raise AUX IRQ */
12091+ .ident = "ASUS P65UP5",
12092 .matches = {
12093 DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."),
12094 DMI_MATCH(DMI_BOARD_NAME, "P/I-P65UP5"),
12095@@ -96,6 +95,7 @@ static const struct dmi_system_id __initconst i8042_dmi_noloop_table[] = {
12096 },
12097 },
12098 {
12099+ .ident = "Compaq Proliant 8500",
12100 .matches = {
12101 DMI_MATCH(DMI_SYS_VENDOR, "Compaq"),
12102 DMI_MATCH(DMI_PRODUCT_NAME , "ProLiant"),
12103@@ -103,6 +103,7 @@ static const struct dmi_system_id __initconst i8042_dmi_noloop_table[] = {
12104 },
12105 },
12106 {
12107+ .ident = "Compaq Proliant DL760",
12108 .matches = {
12109 DMI_MATCH(DMI_SYS_VENDOR, "Compaq"),
12110 DMI_MATCH(DMI_PRODUCT_NAME , "ProLiant"),
12111@@ -110,7 +111,7 @@ static const struct dmi_system_id __initconst i8042_dmi_noloop_table[] = {
12112 },
12113 },
12114 {
12115- /* OQO Model 01 */
12116+ .ident = "OQO Model 01",
12117 .matches = {
12118 DMI_MATCH(DMI_SYS_VENDOR, "OQO"),
12119 DMI_MATCH(DMI_PRODUCT_NAME, "ZEPTO"),
12120@@ -118,7 +119,8 @@ static const struct dmi_system_id __initconst i8042_dmi_noloop_table[] = {
12121 },
12122 },
12123 {
12124- /* ULI EV4873 - AUX LOOP does not work properly */
12125+ /* AUX LOOP does not work properly */
12126+ .ident = "ULI EV4873",
12127 .matches = {
12128 DMI_MATCH(DMI_SYS_VENDOR, "ULI"),
12129 DMI_MATCH(DMI_PRODUCT_NAME, "EV4873"),
12130@@ -126,7 +128,7 @@ static const struct dmi_system_id __initconst i8042_dmi_noloop_table[] = {
12131 },
12132 },
12133 {
12134- /* Microsoft Virtual Machine */
12135+ .ident = "Microsoft Virtual Machine",
12136 .matches = {
12137 DMI_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"),
12138 DMI_MATCH(DMI_PRODUCT_NAME, "Virtual Machine"),
12139@@ -134,7 +136,7 @@ static const struct dmi_system_id __initconst i8042_dmi_noloop_table[] = {
12140 },
12141 },
12142 {
12143- /* Medion MAM 2070 */
12144+ .ident = "Medion MAM 2070",
12145 .matches = {
12146 DMI_MATCH(DMI_SYS_VENDOR, "Notebook"),
12147 DMI_MATCH(DMI_PRODUCT_NAME, "MAM 2070"),
12148@@ -142,7 +144,7 @@ static const struct dmi_system_id __initconst i8042_dmi_noloop_table[] = {
12149 },
12150 },
12151 {
12152- /* Blue FB5601 */
12153+ .ident = "Blue FB5601",
12154 .matches = {
12155 DMI_MATCH(DMI_SYS_VENDOR, "blue"),
12156 DMI_MATCH(DMI_PRODUCT_NAME, "FB5601"),
12157@@ -150,7 +152,7 @@ static const struct dmi_system_id __initconst i8042_dmi_noloop_table[] = {
12158 },
12159 },
12160 {
12161- /* Gigabyte M912 */
12162+ .ident = "Gigabyte M912",
12163 .matches = {
12164 DMI_MATCH(DMI_SYS_VENDOR, "GIGABYTE"),
12165 DMI_MATCH(DMI_PRODUCT_NAME, "M912"),
12166@@ -158,14 +160,7 @@ static const struct dmi_system_id __initconst i8042_dmi_noloop_table[] = {
12167 },
12168 },
12169 {
12170- /* Gigabyte M1022M netbook */
12171- .matches = {
12172- DMI_MATCH(DMI_BOARD_VENDOR, "Gigabyte Technology Co.,Ltd."),
12173- DMI_MATCH(DMI_BOARD_NAME, "M1022E"),
12174- DMI_MATCH(DMI_BOARD_VERSION, "1.02"),
12175- },
12176- },
12177- {
12178+ .ident = "HP DV9700",
12179 .matches = {
12180 DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
12181 DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion dv9700"),
12182@@ -182,72 +177,72 @@ static const struct dmi_system_id __initconst i8042_dmi_noloop_table[] = {
12183 * ... apparently some Toshibas don't like MUX mode either and
12184 * die horrible death on reboot.
12185 */
12186-static const struct dmi_system_id __initconst i8042_dmi_nomux_table[] = {
12187+static struct dmi_system_id __initdata i8042_dmi_nomux_table[] = {
12188 {
12189- /* Fujitsu Lifebook P7010/P7010D */
12190+ .ident = "Fujitsu Lifebook P7010/P7010D",
12191 .matches = {
12192 DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
12193 DMI_MATCH(DMI_PRODUCT_NAME, "P7010"),
12194 },
12195 },
12196 {
12197- /* Fujitsu Lifebook P7010 */
12198+ .ident = "Fujitsu Lifebook P7010",
12199 .matches = {
12200 DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU SIEMENS"),
12201 DMI_MATCH(DMI_PRODUCT_NAME, "0000000000"),
12202 },
12203 },
12204 {
12205- /* Fujitsu Lifebook P5020D */
12206+ .ident = "Fujitsu Lifebook P5020D",
12207 .matches = {
12208 DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
12209 DMI_MATCH(DMI_PRODUCT_NAME, "LifeBook P Series"),
12210 },
12211 },
12212 {
12213- /* Fujitsu Lifebook S2000 */
12214+ .ident = "Fujitsu Lifebook S2000",
12215 .matches = {
12216 DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
12217 DMI_MATCH(DMI_PRODUCT_NAME, "LifeBook S Series"),
12218 },
12219 },
12220 {
12221- /* Fujitsu Lifebook S6230 */
12222+ .ident = "Fujitsu Lifebook S6230",
12223 .matches = {
12224 DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
12225 DMI_MATCH(DMI_PRODUCT_NAME, "LifeBook S6230"),
12226 },
12227 },
12228 {
12229- /* Fujitsu T70H */
12230+ .ident = "Fujitsu T70H",
12231 .matches = {
12232 DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
12233 DMI_MATCH(DMI_PRODUCT_NAME, "FMVLT70H"),
12234 },
12235 },
12236 {
12237- /* Fujitsu-Siemens Lifebook T3010 */
12238+ .ident = "Fujitsu-Siemens Lifebook T3010",
12239 .matches = {
12240 DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU SIEMENS"),
12241 DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK T3010"),
12242 },
12243 },
12244 {
12245- /* Fujitsu-Siemens Lifebook E4010 */
12246+ .ident = "Fujitsu-Siemens Lifebook E4010",
12247 .matches = {
12248 DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU SIEMENS"),
12249 DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK E4010"),
12250 },
12251 },
12252 {
12253- /* Fujitsu-Siemens Amilo Pro 2010 */
12254+ .ident = "Fujitsu-Siemens Amilo Pro 2010",
12255 .matches = {
12256 DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU SIEMENS"),
12257 DMI_MATCH(DMI_PRODUCT_NAME, "AMILO Pro V2010"),
12258 },
12259 },
12260 {
12261- /* Fujitsu-Siemens Amilo Pro 2030 */
12262+ .ident = "Fujitsu-Siemens Amilo Pro 2030",
12263 .matches = {
12264 DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU SIEMENS"),
12265 DMI_MATCH(DMI_PRODUCT_NAME, "AMILO PRO V2030"),
12266@@ -258,7 +253,7 @@ static const struct dmi_system_id __initconst i8042_dmi_nomux_table[] = {
12267 * No data is coming from the touchscreen unless KBC
12268 * is in legacy mode.
12269 */
12270- /* Panasonic CF-29 */
12271+ .ident = "Panasonic CF-29",
12272 .matches = {
12273 DMI_MATCH(DMI_SYS_VENDOR, "Matsushita"),
12274 DMI_MATCH(DMI_PRODUCT_NAME, "CF-29"),
12275@@ -266,10 +261,10 @@ static const struct dmi_system_id __initconst i8042_dmi_nomux_table[] = {
12276 },
12277 {
12278 /*
12279- * HP Pavilion DV4017EA -
12280- * errors on MUX ports are reported without raising AUXDATA
12281+ * Errors on MUX ports are reported without raising AUXDATA
12282 * causing "spurious NAK" messages.
12283 */
12284+ .ident = "HP Pavilion DV4017EA",
12285 .matches = {
12286 DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
12287 DMI_MATCH(DMI_PRODUCT_NAME, "Pavilion dv4000 (EA032EA#ABF)"),
12288@@ -277,9 +272,9 @@ static const struct dmi_system_id __initconst i8042_dmi_nomux_table[] = {
12289 },
12290 {
12291 /*
12292- * HP Pavilion ZT1000 -
12293- * like DV4017EA does not raise AUXERR for errors on MUX ports.
12294+ * Like DV4017EA does not raise AUXERR for errors on MUX ports.
12295 */
12296+ .ident = "HP Pavilion ZT1000",
12297 .matches = {
12298 DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
12299 DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion Notebook PC"),
12300@@ -288,41 +283,44 @@ static const struct dmi_system_id __initconst i8042_dmi_nomux_table[] = {
12301 },
12302 {
12303 /*
12304- * HP Pavilion DV4270ca -
12305- * like DV4017EA does not raise AUXERR for errors on MUX ports.
12306+ * Like DV4017EA does not raise AUXERR for errors on MUX ports.
12307 */
12308+ .ident = "HP Pavilion DV4270ca",
12309 .matches = {
12310 DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
12311 DMI_MATCH(DMI_PRODUCT_NAME, "Pavilion dv4000 (EH476UA#ABL)"),
12312 },
12313 },
12314 {
12315+ .ident = "Toshiba P10",
12316 .matches = {
12317 DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
12318 DMI_MATCH(DMI_PRODUCT_NAME, "Satellite P10"),
12319 },
12320 },
12321 {
12322+ .ident = "Toshiba Equium A110",
12323 .matches = {
12324 DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
12325 DMI_MATCH(DMI_PRODUCT_NAME, "EQUIUM A110"),
12326 },
12327 },
12328 {
12329+ .ident = "Alienware Sentia",
12330 .matches = {
12331 DMI_MATCH(DMI_SYS_VENDOR, "ALIENWARE"),
12332 DMI_MATCH(DMI_PRODUCT_NAME, "Sentia"),
12333 },
12334 },
12335 {
12336- /* Sharp Actius MM20 */
12337+ .ident = "Sharp Actius MM20",
12338 .matches = {
12339 DMI_MATCH(DMI_SYS_VENDOR, "SHARP"),
12340 DMI_MATCH(DMI_PRODUCT_NAME, "PC-MM20 Series"),
12341 },
12342 },
12343 {
12344- /* Sony Vaio FS-115b */
12345+ .ident = "Sony Vaio FS-115b",
12346 .matches = {
12347 DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
12348 DMI_MATCH(DMI_PRODUCT_NAME, "VGN-FS115B"),
12349@@ -330,72 +328,73 @@ static const struct dmi_system_id __initconst i8042_dmi_nomux_table[] = {
12350 },
12351 {
12352 /*
12353- * Sony Vaio FZ-240E -
12354- * reset and GET ID commands issued via KBD port are
12355+ * Reset and GET ID commands issued via KBD port are
12356 * sometimes being delivered to AUX3.
12357 */
12358+ .ident = "Sony Vaio FZ-240E",
12359 .matches = {
12360 DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
12361 DMI_MATCH(DMI_PRODUCT_NAME, "VGN-FZ240E"),
12362 },
12363 },
12364 {
12365- /* Amoi M636/A737 */
12366+ .ident = "Amoi M636/A737",
12367 .matches = {
12368 DMI_MATCH(DMI_SYS_VENDOR, "Amoi Electronics CO.,LTD."),
12369 DMI_MATCH(DMI_PRODUCT_NAME, "M636/A737 platform"),
12370 },
12371 },
12372 {
12373- /* Lenovo 3000 n100 */
12374+ .ident = "Lenovo 3000 n100",
12375 .matches = {
12376 DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
12377 DMI_MATCH(DMI_PRODUCT_NAME, "076804U"),
12378 },
12379 },
12380 {
12381+ .ident = "Acer Aspire 1360",
12382 .matches = {
12383 DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
12384 DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 1360"),
12385 },
12386 },
12387 {
12388- /* Gericom Bellagio */
12389+ .ident = "Gericom Bellagio",
12390 .matches = {
12391 DMI_MATCH(DMI_SYS_VENDOR, "Gericom"),
12392 DMI_MATCH(DMI_PRODUCT_NAME, "N34AS6"),
12393 },
12394 },
12395 {
12396- /* IBM 2656 */
12397+ .ident = "IBM 2656",
12398 .matches = {
12399 DMI_MATCH(DMI_SYS_VENDOR, "IBM"),
12400 DMI_MATCH(DMI_PRODUCT_NAME, "2656"),
12401 },
12402 },
12403 {
12404- /* Dell XPS M1530 */
12405+ .ident = "Dell XPS M1530",
12406 .matches = {
12407 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
12408 DMI_MATCH(DMI_PRODUCT_NAME, "XPS M1530"),
12409 },
12410 },
12411 {
12412- /* Compal HEL80I */
12413+ .ident = "Compal HEL80I",
12414 .matches = {
12415 DMI_MATCH(DMI_SYS_VENDOR, "COMPAL"),
12416 DMI_MATCH(DMI_PRODUCT_NAME, "HEL80I"),
12417 },
12418 },
12419 {
12420- /* Dell Vostro 1510 */
12421+ .ident = "Dell Vostro 1510",
12422 .matches = {
12423 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
12424 DMI_MATCH(DMI_PRODUCT_NAME, "Vostro1510"),
12425 },
12426 },
12427 {
12428- /* Acer Aspire 5536 */
12429+ .ident = "Acer Aspire 5536",
12430 .matches = {
12431 DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
12432 DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5536"),
12433@@ -405,65 +404,65 @@ static const struct dmi_system_id __initconst i8042_dmi_nomux_table[] = {
12434 { }
12435 };
12436
12437-static const struct dmi_system_id __initconst i8042_dmi_reset_table[] = {
12438+static struct dmi_system_id __initdata i8042_dmi_reset_table[] = {
12439 {
12440- /* MSI Wind U-100 */
12441+ .ident = "MSI Wind U-100",
12442 .matches = {
12443 DMI_MATCH(DMI_BOARD_NAME, "U-100"),
12444 DMI_MATCH(DMI_BOARD_VENDOR, "MICRO-STAR INTERNATIONAL CO., LTD"),
12445 },
12446 },
12447 {
12448- /* LG Electronics X110 */
12449+ .ident = "LG Electronics X110",
12450 .matches = {
12451 DMI_MATCH(DMI_BOARD_NAME, "X110"),
12452 DMI_MATCH(DMI_BOARD_VENDOR, "LG Electronics Inc."),
12453 },
12454 },
12455 {
12456- /* Acer Aspire One 150 */
12457+ .ident = "Acer Aspire One 150",
12458 .matches = {
12459 DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
12460 DMI_MATCH(DMI_PRODUCT_NAME, "AOA150"),
12461 },
12462 },
12463 {
12464- /* Advent 4211 */
12465+ .ident = "Advent 4211",
12466 .matches = {
12467 DMI_MATCH(DMI_SYS_VENDOR, "DIXONSXP"),
12468 DMI_MATCH(DMI_PRODUCT_NAME, "Advent 4211"),
12469 },
12470 },
12471 {
12472- /* Medion Akoya Mini E1210 */
12473+ .ident = "Medion Akoya Mini E1210",
12474 .matches = {
12475 DMI_MATCH(DMI_SYS_VENDOR, "MEDION"),
12476 DMI_MATCH(DMI_PRODUCT_NAME, "E1210"),
12477 },
12478 },
12479 {
12480- /* Mivvy M310 */
12481+ .ident = "Mivvy M310",
12482 .matches = {
12483 DMI_MATCH(DMI_SYS_VENDOR, "VIOOO"),
12484 DMI_MATCH(DMI_PRODUCT_NAME, "N10"),
12485 },
12486 },
12487 {
12488- /* Dell Vostro 1320 */
12489+ .ident = "Dell Vostro 1320",
12490 .matches = {
12491 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
12492 DMI_MATCH(DMI_PRODUCT_NAME, "Vostro 1320"),
12493 },
12494 },
12495 {
12496- /* Dell Vostro 1520 */
12497+ .ident = "Dell Vostro 1520",
12498 .matches = {
12499 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
12500 DMI_MATCH(DMI_PRODUCT_NAME, "Vostro 1520"),
12501 },
12502 },
12503 {
12504- /* Dell Vostro 1720 */
12505+ .ident = "Dell Vostro 1720",
12506 .matches = {
12507 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
12508 DMI_MATCH(DMI_PRODUCT_NAME, "Vostro 1720"),
12509@@ -473,16 +472,16 @@ static const struct dmi_system_id __initconst i8042_dmi_reset_table[] = {
12510 };
12511
12512 #ifdef CONFIG_PNP
12513-static const struct dmi_system_id __initconst i8042_dmi_nopnp_table[] = {
12514+static struct dmi_system_id __initdata i8042_dmi_nopnp_table[] = {
12515 {
12516- /* Intel MBO Desktop D845PESV */
12517+ .ident = "Intel MBO Desktop D845PESV",
12518 .matches = {
12519 DMI_MATCH(DMI_BOARD_NAME, "D845PESV"),
12520 DMI_MATCH(DMI_BOARD_VENDOR, "Intel Corporation"),
12521 },
12522 },
12523 {
12524- /* MSI Wind U-100 */
12525+ .ident = "MSI Wind U-100",
12526 .matches = {
12527 DMI_MATCH(DMI_BOARD_NAME, "U-100"),
12528 DMI_MATCH(DMI_BOARD_VENDOR, "MICRO-STAR INTERNATIONAL CO., LTD"),
12529@@ -491,23 +490,27 @@ static const struct dmi_system_id __initconst i8042_dmi_nopnp_table[] = {
12530 { }
12531 };
12532
12533-static const struct dmi_system_id __initconst i8042_dmi_laptop_table[] = {
12534+static struct dmi_system_id __initdata i8042_dmi_laptop_table[] = {
12535 {
12536+ .ident = "Portable",
12537 .matches = {
12538 DMI_MATCH(DMI_CHASSIS_TYPE, "8"), /* Portable */
12539 },
12540 },
12541 {
12542+ .ident = "Laptop",
12543 .matches = {
12544 DMI_MATCH(DMI_CHASSIS_TYPE, "9"), /* Laptop */
12545 },
12546 },
12547 {
12548+ .ident = "Notebook",
12549 .matches = {
12550 DMI_MATCH(DMI_CHASSIS_TYPE, "10"), /* Notebook */
12551 },
12552 },
12553 {
12554+ .ident = "Sub-Notebook",
12555 .matches = {
12556 DMI_MATCH(DMI_CHASSIS_TYPE, "14"), /* Sub-Notebook */
12557 },
12558@@ -522,65 +525,58 @@ static const struct dmi_system_id __initconst i8042_dmi_laptop_table[] = {
12559 * Originally, this was just confined to older laptops, but a few Acer laptops
12560 * have turned up in 2007 that also need this again.
12561 */
12562-static const struct dmi_system_id __initconst i8042_dmi_dritek_table[] = {
12563- {
12564- /* Acer Aspire 5610 */
12565- .matches = {
12566- DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
12567- DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5610"),
12568- },
12569- },
12570+static struct dmi_system_id __initdata i8042_dmi_dritek_table[] = {
12571 {
12572- /* Acer Aspire 5630 */
12573+ .ident = "Acer Aspire 5630",
12574 .matches = {
12575 DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
12576 DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5630"),
12577 },
12578 },
12579 {
12580- /* Acer Aspire 5650 */
12581+ .ident = "Acer Aspire 5650",
12582 .matches = {
12583 DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
12584 DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5650"),
12585 },
12586 },
12587 {
12588- /* Acer Aspire 5680 */
12589+ .ident = "Acer Aspire 5680",
12590 .matches = {
12591 DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
12592 DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5680"),
12593 },
12594 },
12595 {
12596- /* Acer Aspire 5720 */
12597+ .ident = "Acer Aspire 5720",
12598 .matches = {
12599 DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
12600 DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5720"),
12601 },
12602 },
12603 {
12604- /* Acer Aspire 9110 */
12605+ .ident = "Acer Aspire 9110",
12606 .matches = {
12607 DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
12608 DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 9110"),
12609 },
12610 },
12611 {
12612- /* Acer TravelMate 660 */
12613+ .ident = "Acer TravelMate 660",
12614 .matches = {
12615 DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
12616 DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate 660"),
12617 },
12618 },
12619 {
12620- /* Acer TravelMate 2490 */
12621+ .ident = "Acer TravelMate 2490",
12622 .matches = {
12623 DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
12624 DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate 2490"),
12625 },
12626 },
12627 {
12628- /* Acer TravelMate 4280 */
12629+ .ident = "Acer TravelMate 4280",
12630 .matches = {
12631 DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
12632 DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate 4280"),
12633diff --git a/drivers/lguest/segments.c b/drivers/lguest/segments.c
12634index ede4658..951c57b 100644
12635--- a/drivers/lguest/segments.c
12636+++ b/drivers/lguest/segments.c
12637@@ -179,10 +179,8 @@ void load_guest_gdt_entry(struct lg_cpu *cpu, u32 num, u32 lo, u32 hi)
12638 * We assume the Guest has the same number of GDT entries as the
12639 * Host, otherwise we'd have to dynamically allocate the Guest GDT.
12640 */
12641- if (num >= ARRAY_SIZE(cpu->arch.gdt)) {
12642+ if (num >= ARRAY_SIZE(cpu->arch.gdt))
12643 kill_guest(cpu, "too many gdt entries %i", num);
12644- return;
12645- }
12646
12647 /* Set it up, then fix it. */
12648 cpu->arch.gdt[num].a = lo;
12649diff --git a/drivers/macintosh/therm_adt746x.c b/drivers/macintosh/therm_adt746x.c
12650index 386a797..556f0fe 100644
12651--- a/drivers/macintosh/therm_adt746x.c
12652+++ b/drivers/macintosh/therm_adt746x.c
12653@@ -79,7 +79,6 @@ struct thermostat {
12654 u8 limits[3];
12655 int last_speed[2];
12656 int last_var[2];
12657- int pwm_inv[2];
12658 };
12659
12660 static enum {ADT7460, ADT7467} therm_type;
12661@@ -230,23 +229,19 @@ static void write_fan_speed(struct thermostat *th, int speed, int fan)
12662
12663 if (speed >= 0) {
12664 manual = read_reg(th, MANUAL_MODE[fan]);
12665- manual &= ~INVERT_MASK;
12666 write_reg(th, MANUAL_MODE[fan],
12667- manual | MANUAL_MASK | th->pwm_inv[fan]);
12668+ (manual|MANUAL_MASK) & (~INVERT_MASK));
12669 write_reg(th, FAN_SPD_SET[fan], speed);
12670 } else {
12671 /* back to automatic */
12672 if(therm_type == ADT7460) {
12673 manual = read_reg(th,
12674 MANUAL_MODE[fan]) & (~MANUAL_MASK);
12675- manual &= ~INVERT_MASK;
12676- manual |= th->pwm_inv[fan];
12677+
12678 write_reg(th,
12679 MANUAL_MODE[fan], manual|REM_CONTROL[fan]);
12680 } else {
12681 manual = read_reg(th, MANUAL_MODE[fan]);
12682- manual &= ~INVERT_MASK;
12683- manual |= th->pwm_inv[fan];
12684 write_reg(th, MANUAL_MODE[fan], manual&(~AUTO_MASK));
12685 }
12686 }
12687@@ -423,10 +418,6 @@ static int probe_thermostat(struct i2c_client *client,
12688
12689 thermostat = th;
12690
12691- /* record invert bit status because fw can corrupt it after suspend */
12692- th->pwm_inv[0] = read_reg(th, MANUAL_MODE[0]) & INVERT_MASK;
12693- th->pwm_inv[1] = read_reg(th, MANUAL_MODE[1]) & INVERT_MASK;
12694-
12695 /* be sure to really write fan speed the first time */
12696 th->last_speed[0] = -2;
12697 th->last_speed[1] = -2;
12698diff --git a/drivers/macintosh/windfarm_smu_controls.c b/drivers/macintosh/windfarm_smu_controls.c
12699index 6c68b9e..961fa0e 100644
12700--- a/drivers/macintosh/windfarm_smu_controls.c
12701+++ b/drivers/macintosh/windfarm_smu_controls.c
12702@@ -202,8 +202,6 @@ static struct smu_fan_control *smu_fan_create(struct device_node *node,
12703 fct->ctrl.name = "cpu-front-fan-1";
12704 else if (!strcmp(l, "CPU A PUMP"))
12705 fct->ctrl.name = "cpu-pump-0";
12706- else if (!strcmp(l, "CPU B PUMP"))
12707- fct->ctrl.name = "cpu-pump-1";
12708 else if (!strcmp(l, "Slots Fan") || !strcmp(l, "Slots fan") ||
12709 !strcmp(l, "EXPANSION SLOTS INTAKE"))
12710 fct->ctrl.name = "slots-fan";
12711diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c
12712index a5e5f2f..60e2b32 100644
12713--- a/drivers/md/bitmap.c
12714+++ b/drivers/md/bitmap.c
12715@@ -1078,31 +1078,23 @@ static bitmap_counter_t *bitmap_get_counter(struct bitmap *bitmap,
12716 * out to disk
12717 */
12718
12719-void bitmap_daemon_work(mddev_t *mddev)
12720+void bitmap_daemon_work(struct bitmap *bitmap)
12721 {
12722- struct bitmap *bitmap;
12723 unsigned long j;
12724 unsigned long flags;
12725 struct page *page = NULL, *lastpage = NULL;
12726 int blocks;
12727 void *paddr;
12728
12729- /* Use a mutex to guard daemon_work against
12730- * bitmap_destroy.
12731- */
12732- mutex_lock(&mddev->bitmap_mutex);
12733- bitmap = mddev->bitmap;
12734- if (bitmap == NULL) {
12735- mutex_unlock(&mddev->bitmap_mutex);
12736+ if (bitmap == NULL)
12737 return;
12738- }
12739 if (time_before(jiffies, bitmap->daemon_lastrun + bitmap->daemon_sleep*HZ))
12740 goto done;
12741
12742 bitmap->daemon_lastrun = jiffies;
12743 if (bitmap->allclean) {
12744 bitmap->mddev->thread->timeout = MAX_SCHEDULE_TIMEOUT;
12745- goto done;
12746+ return;
12747 }
12748 bitmap->allclean = 1;
12749
12750@@ -1211,7 +1203,6 @@ void bitmap_daemon_work(mddev_t *mddev)
12751 done:
12752 if (bitmap->allclean == 0)
12753 bitmap->mddev->thread->timeout = bitmap->daemon_sleep * HZ;
12754- mutex_unlock(&mddev->bitmap_mutex);
12755 }
12756
12757 static bitmap_counter_t *bitmap_get_counter(struct bitmap *bitmap,
12758@@ -1550,9 +1541,9 @@ void bitmap_flush(mddev_t *mddev)
12759 */
12760 sleep = bitmap->daemon_sleep;
12761 bitmap->daemon_sleep = 0;
12762- bitmap_daemon_work(mddev);
12763- bitmap_daemon_work(mddev);
12764- bitmap_daemon_work(mddev);
12765+ bitmap_daemon_work(bitmap);
12766+ bitmap_daemon_work(bitmap);
12767+ bitmap_daemon_work(bitmap);
12768 bitmap->daemon_sleep = sleep;
12769 bitmap_update_sb(bitmap);
12770 }
12771@@ -1583,7 +1574,6 @@ static void bitmap_free(struct bitmap *bitmap)
12772 kfree(bp);
12773 kfree(bitmap);
12774 }
12775-
12776 void bitmap_destroy(mddev_t *mddev)
12777 {
12778 struct bitmap *bitmap = mddev->bitmap;
12779@@ -1591,9 +1581,7 @@ void bitmap_destroy(mddev_t *mddev)
12780 if (!bitmap) /* there was no bitmap */
12781 return;
12782
12783- mutex_lock(&mddev->bitmap_mutex);
12784 mddev->bitmap = NULL; /* disconnect from the md device */
12785- mutex_unlock(&mddev->bitmap_mutex);
12786 if (mddev->thread)
12787 mddev->thread->timeout = MAX_SCHEDULE_TIMEOUT;
12788
12789diff --git a/drivers/md/bitmap.h b/drivers/md/bitmap.h
12790index 7e38d13..e989006 100644
12791--- a/drivers/md/bitmap.h
12792+++ b/drivers/md/bitmap.h
12793@@ -282,7 +282,7 @@ void bitmap_close_sync(struct bitmap *bitmap);
12794 void bitmap_cond_end_sync(struct bitmap *bitmap, sector_t sector);
12795
12796 void bitmap_unplug(struct bitmap *bitmap);
12797-void bitmap_daemon_work(mddev_t *mddev);
12798+void bitmap_daemon_work(struct bitmap *bitmap);
12799 #endif
12800
12801 #endif
12802diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
12803index 959d6d1..ed10381 100644
12804--- a/drivers/md/dm-crypt.c
12805+++ b/drivers/md/dm-crypt.c
12806@@ -1,7 +1,7 @@
12807 /*
12808 * Copyright (C) 2003 Christophe Saout <christophe@saout.de>
12809 * Copyright (C) 2004 Clemens Fruhwirth <clemens@endorphin.org>
12810- * Copyright (C) 2006-2009 Red Hat, Inc. All rights reserved.
12811+ * Copyright (C) 2006-2008 Red Hat, Inc. All rights reserved.
12812 *
12813 * This file is released under the GPL.
12814 */
12815@@ -71,21 +71,10 @@ struct crypt_iv_operations {
12816 int (*ctr)(struct crypt_config *cc, struct dm_target *ti,
12817 const char *opts);
12818 void (*dtr)(struct crypt_config *cc);
12819- int (*init)(struct crypt_config *cc);
12820- int (*wipe)(struct crypt_config *cc);
12821+ const char *(*status)(struct crypt_config *cc);
12822 int (*generator)(struct crypt_config *cc, u8 *iv, sector_t sector);
12823 };
12824
12825-struct iv_essiv_private {
12826- struct crypto_cipher *tfm;
12827- struct crypto_hash *hash_tfm;
12828- u8 *salt;
12829-};
12830-
12831-struct iv_benbi_private {
12832- int shift;
12833-};
12834-
12835 /*
12836 * Crypt: maps a linear range of a block device
12837 * and encrypts / decrypts at the same time.
12838@@ -113,8 +102,8 @@ struct crypt_config {
12839 struct crypt_iv_operations *iv_gen_ops;
12840 char *iv_mode;
12841 union {
12842- struct iv_essiv_private essiv;
12843- struct iv_benbi_private benbi;
12844+ struct crypto_cipher *essiv_tfm;
12845+ int benbi_shift;
12846 } iv_gen_private;
12847 sector_t iv_offset;
12848 unsigned int iv_size;
12849@@ -180,114 +169,88 @@ static int crypt_iv_plain_gen(struct crypt_config *cc, u8 *iv, sector_t sector)
12850 return 0;
12851 }
12852
12853-/* Initialise ESSIV - compute salt but no local memory allocations */
12854-static int crypt_iv_essiv_init(struct crypt_config *cc)
12855-{
12856- struct iv_essiv_private *essiv = &cc->iv_gen_private.essiv;
12857- struct hash_desc desc;
12858- struct scatterlist sg;
12859- int err;
12860-
12861- sg_init_one(&sg, cc->key, cc->key_size);
12862- desc.tfm = essiv->hash_tfm;
12863- desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP;
12864-
12865- err = crypto_hash_digest(&desc, &sg, cc->key_size, essiv->salt);
12866- if (err)
12867- return err;
12868-
12869- return crypto_cipher_setkey(essiv->tfm, essiv->salt,
12870- crypto_hash_digestsize(essiv->hash_tfm));
12871-}
12872-
12873-/* Wipe salt and reset key derived from volume key */
12874-static int crypt_iv_essiv_wipe(struct crypt_config *cc)
12875-{
12876- struct iv_essiv_private *essiv = &cc->iv_gen_private.essiv;
12877- unsigned salt_size = crypto_hash_digestsize(essiv->hash_tfm);
12878-
12879- memset(essiv->salt, 0, salt_size);
12880-
12881- return crypto_cipher_setkey(essiv->tfm, essiv->salt, salt_size);
12882-}
12883-
12884-static void crypt_iv_essiv_dtr(struct crypt_config *cc)
12885-{
12886- struct iv_essiv_private *essiv = &cc->iv_gen_private.essiv;
12887-
12888- crypto_free_cipher(essiv->tfm);
12889- essiv->tfm = NULL;
12890-
12891- crypto_free_hash(essiv->hash_tfm);
12892- essiv->hash_tfm = NULL;
12893-
12894- kzfree(essiv->salt);
12895- essiv->salt = NULL;
12896-}
12897-
12898 static int crypt_iv_essiv_ctr(struct crypt_config *cc, struct dm_target *ti,
12899 const char *opts)
12900 {
12901- struct crypto_cipher *essiv_tfm = NULL;
12902- struct crypto_hash *hash_tfm = NULL;
12903- u8 *salt = NULL;
12904+ struct crypto_cipher *essiv_tfm;
12905+ struct crypto_hash *hash_tfm;
12906+ struct hash_desc desc;
12907+ struct scatterlist sg;
12908+ unsigned int saltsize;
12909+ u8 *salt;
12910 int err;
12911
12912- if (!opts) {
12913+ if (opts == NULL) {
12914 ti->error = "Digest algorithm missing for ESSIV mode";
12915 return -EINVAL;
12916 }
12917
12918- /* Allocate hash algorithm */
12919+ /* Hash the cipher key with the given hash algorithm */
12920 hash_tfm = crypto_alloc_hash(opts, 0, CRYPTO_ALG_ASYNC);
12921 if (IS_ERR(hash_tfm)) {
12922 ti->error = "Error initializing ESSIV hash";
12923- err = PTR_ERR(hash_tfm);
12924- goto bad;
12925+ return PTR_ERR(hash_tfm);
12926 }
12927
12928- salt = kzalloc(crypto_hash_digestsize(hash_tfm), GFP_KERNEL);
12929- if (!salt) {
12930+ saltsize = crypto_hash_digestsize(hash_tfm);
12931+ salt = kmalloc(saltsize, GFP_KERNEL);
12932+ if (salt == NULL) {
12933 ti->error = "Error kmallocing salt storage in ESSIV";
12934- err = -ENOMEM;
12935- goto bad;
12936+ crypto_free_hash(hash_tfm);
12937+ return -ENOMEM;
12938 }
12939
12940- /* Allocate essiv_tfm */
12941+ sg_init_one(&sg, cc->key, cc->key_size);
12942+ desc.tfm = hash_tfm;
12943+ desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP;
12944+ err = crypto_hash_digest(&desc, &sg, cc->key_size, salt);
12945+ crypto_free_hash(hash_tfm);
12946+
12947+ if (err) {
12948+ ti->error = "Error calculating hash in ESSIV";
12949+ kfree(salt);
12950+ return err;
12951+ }
12952+
12953+ /* Setup the essiv_tfm with the given salt */
12954 essiv_tfm = crypto_alloc_cipher(cc->cipher, 0, CRYPTO_ALG_ASYNC);
12955 if (IS_ERR(essiv_tfm)) {
12956 ti->error = "Error allocating crypto tfm for ESSIV";
12957- err = PTR_ERR(essiv_tfm);
12958- goto bad;
12959+ kfree(salt);
12960+ return PTR_ERR(essiv_tfm);
12961 }
12962 if (crypto_cipher_blocksize(essiv_tfm) !=
12963 crypto_ablkcipher_ivsize(cc->tfm)) {
12964 ti->error = "Block size of ESSIV cipher does "
12965 "not match IV size of block cipher";
12966- err = -EINVAL;
12967- goto bad;
12968+ crypto_free_cipher(essiv_tfm);
12969+ kfree(salt);
12970+ return -EINVAL;
12971 }
12972+ err = crypto_cipher_setkey(essiv_tfm, salt, saltsize);
12973+ if (err) {
12974+ ti->error = "Failed to set key for ESSIV cipher";
12975+ crypto_free_cipher(essiv_tfm);
12976+ kfree(salt);
12977+ return err;
12978+ }
12979+ kfree(salt);
12980
12981- cc->iv_gen_private.essiv.salt = salt;
12982- cc->iv_gen_private.essiv.tfm = essiv_tfm;
12983- cc->iv_gen_private.essiv.hash_tfm = hash_tfm;
12984-
12985+ cc->iv_gen_private.essiv_tfm = essiv_tfm;
12986 return 0;
12987+}
12988
12989-bad:
12990- if (essiv_tfm && !IS_ERR(essiv_tfm))
12991- crypto_free_cipher(essiv_tfm);
12992- if (hash_tfm && !IS_ERR(hash_tfm))
12993- crypto_free_hash(hash_tfm);
12994- kfree(salt);
12995- return err;
12996+static void crypt_iv_essiv_dtr(struct crypt_config *cc)
12997+{
12998+ crypto_free_cipher(cc->iv_gen_private.essiv_tfm);
12999+ cc->iv_gen_private.essiv_tfm = NULL;
13000 }
13001
13002 static int crypt_iv_essiv_gen(struct crypt_config *cc, u8 *iv, sector_t sector)
13003 {
13004 memset(iv, 0, cc->iv_size);
13005 *(u64 *)iv = cpu_to_le64(sector);
13006- crypto_cipher_encrypt_one(cc->iv_gen_private.essiv.tfm, iv, iv);
13007+ crypto_cipher_encrypt_one(cc->iv_gen_private.essiv_tfm, iv, iv);
13008 return 0;
13009 }
13010
13011@@ -310,7 +273,7 @@ static int crypt_iv_benbi_ctr(struct crypt_config *cc, struct dm_target *ti,
13012 return -EINVAL;
13013 }
13014
13015- cc->iv_gen_private.benbi.shift = 9 - log;
13016+ cc->iv_gen_private.benbi_shift = 9 - log;
13017
13018 return 0;
13019 }
13020@@ -325,7 +288,7 @@ static int crypt_iv_benbi_gen(struct crypt_config *cc, u8 *iv, sector_t sector)
13021
13022 memset(iv, 0, cc->iv_size - sizeof(u64)); /* rest is cleared below */
13023
13024- val = cpu_to_be64(((u64)sector << cc->iv_gen_private.benbi.shift) + 1);
13025+ val = cpu_to_be64(((u64)sector << cc->iv_gen_private.benbi_shift) + 1);
13026 put_unaligned(val, (__be64 *)(iv + cc->iv_size - sizeof(u64)));
13027
13028 return 0;
13029@@ -345,8 +308,6 @@ static struct crypt_iv_operations crypt_iv_plain_ops = {
13030 static struct crypt_iv_operations crypt_iv_essiv_ops = {
13031 .ctr = crypt_iv_essiv_ctr,
13032 .dtr = crypt_iv_essiv_dtr,
13033- .init = crypt_iv_essiv_init,
13034- .wipe = crypt_iv_essiv_wipe,
13035 .generator = crypt_iv_essiv_gen
13036 };
13037
13038@@ -1078,12 +1039,6 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
13039 cc->iv_gen_ops->ctr(cc, ti, ivopts) < 0)
13040 goto bad_ivmode;
13041
13042- if (cc->iv_gen_ops && cc->iv_gen_ops->init &&
13043- cc->iv_gen_ops->init(cc) < 0) {
13044- ti->error = "Error initialising IV";
13045- goto bad_slab_pool;
13046- }
13047-
13048 cc->iv_size = crypto_ablkcipher_ivsize(tfm);
13049 if (cc->iv_size)
13050 /* at least a 64 bit sector number should fit in our buffer */
13051@@ -1323,7 +1278,6 @@ static void crypt_resume(struct dm_target *ti)
13052 static int crypt_message(struct dm_target *ti, unsigned argc, char **argv)
13053 {
13054 struct crypt_config *cc = ti->private;
13055- int ret = -EINVAL;
13056
13057 if (argc < 2)
13058 goto error;
13059@@ -1333,22 +1287,10 @@ static int crypt_message(struct dm_target *ti, unsigned argc, char **argv)
13060 DMWARN("not suspended during key manipulation.");
13061 return -EINVAL;
13062 }
13063- if (argc == 3 && !strnicmp(argv[1], MESG_STR("set"))) {
13064- ret = crypt_set_key(cc, argv[2]);
13065- if (ret)
13066- return ret;
13067- if (cc->iv_gen_ops && cc->iv_gen_ops->init)
13068- ret = cc->iv_gen_ops->init(cc);
13069- return ret;
13070- }
13071- if (argc == 2 && !strnicmp(argv[1], MESG_STR("wipe"))) {
13072- if (cc->iv_gen_ops && cc->iv_gen_ops->wipe) {
13073- ret = cc->iv_gen_ops->wipe(cc);
13074- if (ret)
13075- return ret;
13076- }
13077+ if (argc == 3 && !strnicmp(argv[1], MESG_STR("set")))
13078+ return crypt_set_key(cc, argv[2]);
13079+ if (argc == 2 && !strnicmp(argv[1], MESG_STR("wipe")))
13080 return crypt_wipe_key(cc);
13081- }
13082 }
13083
13084 error:
13085diff --git a/drivers/md/dm-exception-store.c b/drivers/md/dm-exception-store.c
13086index 2052159..7dbe652 100644
13087--- a/drivers/md/dm-exception-store.c
13088+++ b/drivers/md/dm-exception-store.c
13089@@ -216,8 +216,7 @@ int dm_exception_store_create(struct dm_target *ti, int argc, char **argv,
13090 type = get_type("N");
13091 else {
13092 ti->error = "Persistent flag is not P or N";
13093- r = -EINVAL;
13094- goto bad_type;
13095+ return -EINVAL;
13096 }
13097
13098 if (!type) {
13099diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
13100index d19854c..a679429 100644
13101--- a/drivers/md/dm-ioctl.c
13102+++ b/drivers/md/dm-ioctl.c
13103@@ -56,11 +56,6 @@ static void dm_hash_remove_all(int keep_open_devices);
13104 */
13105 static DECLARE_RWSEM(_hash_lock);
13106
13107-/*
13108- * Protects use of mdptr to obtain hash cell name and uuid from mapped device.
13109- */
13110-static DEFINE_MUTEX(dm_hash_cells_mutex);
13111-
13112 static void init_buckets(struct list_head *buckets)
13113 {
13114 unsigned int i;
13115@@ -211,9 +206,7 @@ static int dm_hash_insert(const char *name, const char *uuid, struct mapped_devi
13116 list_add(&cell->uuid_list, _uuid_buckets + hash_str(uuid));
13117 }
13118 dm_get(md);
13119- mutex_lock(&dm_hash_cells_mutex);
13120 dm_set_mdptr(md, cell);
13121- mutex_unlock(&dm_hash_cells_mutex);
13122 up_write(&_hash_lock);
13123
13124 return 0;
13125@@ -231,9 +224,7 @@ static void __hash_remove(struct hash_cell *hc)
13126 /* remove from the dev hash */
13127 list_del(&hc->uuid_list);
13128 list_del(&hc->name_list);
13129- mutex_lock(&dm_hash_cells_mutex);
13130 dm_set_mdptr(hc->md, NULL);
13131- mutex_unlock(&dm_hash_cells_mutex);
13132
13133 table = dm_get_table(hc->md);
13134 if (table) {
13135@@ -330,9 +321,7 @@ static int dm_hash_rename(uint32_t cookie, const char *old, const char *new)
13136 */
13137 list_del(&hc->name_list);
13138 old_name = hc->name;
13139- mutex_lock(&dm_hash_cells_mutex);
13140 hc->name = new_name;
13141- mutex_unlock(&dm_hash_cells_mutex);
13142 list_add(&hc->name_list, _name_buckets + hash_str(new_name));
13143
13144 /*
13145@@ -1593,7 +1582,8 @@ int dm_copy_name_and_uuid(struct mapped_device *md, char *name, char *uuid)
13146 if (!md)
13147 return -ENXIO;
13148
13149- mutex_lock(&dm_hash_cells_mutex);
13150+ dm_get(md);
13151+ down_read(&_hash_lock);
13152 hc = dm_get_mdptr(md);
13153 if (!hc || hc->md != md) {
13154 r = -ENXIO;
13155@@ -1606,7 +1596,8 @@ int dm_copy_name_and_uuid(struct mapped_device *md, char *name, char *uuid)
13156 strcpy(uuid, hc->uuid ? : "");
13157
13158 out:
13159- mutex_unlock(&dm_hash_cells_mutex);
13160+ up_read(&_hash_lock);
13161+ dm_put(md);
13162
13163 return r;
13164 }
13165diff --git a/drivers/md/dm-log-userspace-transfer.c b/drivers/md/dm-log-userspace-transfer.c
13166index f1c8cae..54abf9e 100644
13167--- a/drivers/md/dm-log-userspace-transfer.c
13168+++ b/drivers/md/dm-log-userspace-transfer.c
13169@@ -172,15 +172,11 @@ int dm_consult_userspace(const char *uuid, uint64_t luid, int request_type,
13170 {
13171 int r = 0;
13172 size_t dummy = 0;
13173- int overhead_size = sizeof(struct dm_ulog_request) + sizeof(struct cn_msg);
13174+ int overhead_size =
13175+ sizeof(struct dm_ulog_request *) + sizeof(struct cn_msg);
13176 struct dm_ulog_request *tfr = prealloced_ulog_tfr;
13177 struct receiving_pkg pkg;
13178
13179- /*
13180- * Given the space needed to hold the 'struct cn_msg' and
13181- * 'struct dm_ulog_request' - do we have enough payload
13182- * space remaining?
13183- */
13184 if (data_size > (DM_ULOG_PREALLOCED_SIZE - overhead_size)) {
13185 DMINFO("Size of tfr exceeds preallocated size");
13186 return -EINVAL;
13187@@ -195,7 +191,7 @@ resend:
13188 */
13189 mutex_lock(&dm_ulog_lock);
13190
13191- memset(tfr, 0, DM_ULOG_PREALLOCED_SIZE - sizeof(struct cn_msg));
13192+ memset(tfr, 0, DM_ULOG_PREALLOCED_SIZE - overhead_size);
13193 memcpy(tfr->uuid, uuid, DM_UUID_LEN);
13194 tfr->luid = luid;
13195 tfr->seq = dm_ulog_seq++;
13196diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c
13197index 8a4a9c8..3a3ba46 100644
13198--- a/drivers/md/dm-snap.c
13199+++ b/drivers/md/dm-snap.c
13200@@ -553,8 +553,6 @@ static int init_hash_tables(struct dm_snapshot *s)
13201 hash_size = min(origin_dev_size, cow_dev_size) >> s->store->chunk_shift;
13202 hash_size = min(hash_size, max_buckets);
13203
13204- if (hash_size < 64)
13205- hash_size = 64;
13206 hash_size = rounddown_pow_of_two(hash_size);
13207 if (init_exception_table(&s->complete, hash_size,
13208 DM_CHUNK_CONSECUTIVE_BITS))
13209@@ -1154,11 +1152,10 @@ static int snapshot_status(struct dm_target *ti, status_type_t type,
13210 unsigned sz = 0;
13211 struct dm_snapshot *snap = ti->private;
13212
13213+ down_write(&snap->lock);
13214+
13215 switch (type) {
13216 case STATUSTYPE_INFO:
13217-
13218- down_write(&snap->lock);
13219-
13220 if (!snap->valid)
13221 DMEMIT("Invalid");
13222 else {
13223@@ -1174,9 +1171,6 @@ static int snapshot_status(struct dm_target *ti, status_type_t type,
13224 else
13225 DMEMIT("Unknown");
13226 }
13227-
13228- up_write(&snap->lock);
13229-
13230 break;
13231
13232 case STATUSTYPE_TABLE:
13233@@ -1191,6 +1185,8 @@ static int snapshot_status(struct dm_target *ti, status_type_t type,
13234 break;
13235 }
13236
13237+ up_write(&snap->lock);
13238+
13239 return 0;
13240 }
13241
13242diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c
13243index bd58703..e0efc1a 100644
13244--- a/drivers/md/dm-stripe.c
13245+++ b/drivers/md/dm-stripe.c
13246@@ -110,7 +110,7 @@ static int stripe_ctr(struct dm_target *ti, unsigned int argc, char **argv)
13247 }
13248
13249 stripes = simple_strtoul(argv[0], &end, 10);
13250- if (!stripes || *end) {
13251+ if (*end) {
13252 ti->error = "Invalid stripe count";
13253 return -EINVAL;
13254 }
13255diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
13256index e869128..1a6cb3c 100644
13257--- a/drivers/md/dm-table.c
13258+++ b/drivers/md/dm-table.c
13259@@ -499,15 +499,16 @@ int dm_set_device_limits(struct dm_target *ti, struct dm_dev *dev,
13260 return 0;
13261 }
13262
13263- if (bdev_stack_limits(limits, bdev, start) < 0)
13264- DMWARN("%s: adding target device %s caused an alignment inconsistency: "
13265+ if (blk_stack_limits(limits, &q->limits, start << 9) < 0)
13266+ DMWARN("%s: target device %s is misaligned: "
13267 "physical_block_size=%u, logical_block_size=%u, "
13268 "alignment_offset=%u, start=%llu",
13269 dm_device_name(ti->table->md), bdevname(bdev, b),
13270 q->limits.physical_block_size,
13271 q->limits.logical_block_size,
13272 q->limits.alignment_offset,
13273- (unsigned long long) start << SECTOR_SHIFT);
13274+ (unsigned long long) start << 9);
13275+
13276
13277 /*
13278 * Check if merge fn is supported.
13279@@ -1024,9 +1025,9 @@ combine_limits:
13280 * for the table.
13281 */
13282 if (blk_stack_limits(limits, &ti_limits, 0) < 0)
13283- DMWARN("%s: adding target device "
13284+ DMWARN("%s: target device "
13285 "(start sect %llu len %llu) "
13286- "caused an alignment inconsistency",
13287+ "is misaligned",
13288 dm_device_name(table->md),
13289 (unsigned long long) ti->begin,
13290 (unsigned long long) ti->len);
13291@@ -1078,6 +1079,15 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
13292 struct queue_limits *limits)
13293 {
13294 /*
13295+ * Each target device in the table has a data area that should normally
13296+ * be aligned such that the DM device's alignment_offset is 0.
13297+ * FIXME: Propagate alignment_offsets up the stack and warn of
13298+ * sub-optimal or inconsistent settings.
13299+ */
13300+ limits->alignment_offset = 0;
13301+ limits->misaligned = 0;
13302+
13303+ /*
13304 * Copy table's limits to the DM device's request_queue
13305 */
13306 q->limits = *limits;
13307diff --git a/drivers/md/dm-uevent.c b/drivers/md/dm-uevent.c
13308index c7c555a..6f65883 100644
13309--- a/drivers/md/dm-uevent.c
13310+++ b/drivers/md/dm-uevent.c
13311@@ -139,13 +139,14 @@ void dm_send_uevents(struct list_head *events, struct kobject *kobj)
13312 list_del_init(&event->elist);
13313
13314 /*
13315- * When a device is being removed this copy fails and we
13316- * discard these unsent events.
13317+ * Need to call dm_copy_name_and_uuid from here for now.
13318+ * Context of previous var adds and locking used for
13319+ * hash_cell not compatable.
13320 */
13321 if (dm_copy_name_and_uuid(event->md, event->name,
13322 event->uuid)) {
13323- DMINFO("%s: skipping sending uevent for lost device",
13324- __func__);
13325+ DMERR("%s: dm_copy_name_and_uuid() failed",
13326+ __func__);
13327 goto uevent_free;
13328 }
13329
13330diff --git a/drivers/md/md.c b/drivers/md/md.c
13331index 08f7471..b182f86 100644
13332--- a/drivers/md/md.c
13333+++ b/drivers/md/md.c
13334@@ -282,9 +282,7 @@ static void mddev_put(mddev_t *mddev)
13335 if (!atomic_dec_and_lock(&mddev->active, &all_mddevs_lock))
13336 return;
13337 if (!mddev->raid_disks && list_empty(&mddev->disks) &&
13338- mddev->ctime == 0 && !mddev->hold_active) {
13339- /* Array is not configured at all, and not held active,
13340- * so destroy it */
13341+ !mddev->hold_active) {
13342 list_del(&mddev->all_mddevs);
13343 if (mddev->gendisk) {
13344 /* we did a probe so need to clean up.
13345@@ -369,7 +367,6 @@ static mddev_t * mddev_find(dev_t unit)
13346
13347 mutex_init(&new->open_mutex);
13348 mutex_init(&new->reconfig_mutex);
13349- mutex_init(&new->bitmap_mutex);
13350 INIT_LIST_HEAD(&new->disks);
13351 INIT_LIST_HEAD(&new->all_mddevs);
13352 init_timer(&new->safemode_timer);
13353@@ -4173,7 +4170,7 @@ static int do_md_run(mddev_t * mddev)
13354 mddev->barriers_work = 1;
13355 mddev->ok_start_degraded = start_dirty_degraded;
13356
13357- if (start_readonly && mddev->ro == 0)
13358+ if (start_readonly)
13359 mddev->ro = 2; /* read-only, but switch on first write */
13360
13361 err = mddev->pers->run(mddev);
13362@@ -5073,10 +5070,6 @@ static int set_array_info(mddev_t * mddev, mdu_array_info_t *info)
13363 mddev->minor_version = info->minor_version;
13364 mddev->patch_version = info->patch_version;
13365 mddev->persistent = !info->not_persistent;
13366- /* ensure mddev_put doesn't delete this now that there
13367- * is some minimal configuration.
13368- */
13369- mddev->ctime = get_seconds();
13370 return 0;
13371 }
13372 mddev->major_version = MD_MAJOR_VERSION;
13373@@ -6636,7 +6629,7 @@ void md_check_recovery(mddev_t *mddev)
13374
13375
13376 if (mddev->bitmap)
13377- bitmap_daemon_work(mddev);
13378+ bitmap_daemon_work(mddev->bitmap);
13379
13380 if (mddev->ro)
13381 return;
13382diff --git a/drivers/md/md.h b/drivers/md/md.h
13383index 87430fe..f184b69 100644
13384--- a/drivers/md/md.h
13385+++ b/drivers/md/md.h
13386@@ -289,7 +289,6 @@ struct mddev_s
13387 * hot-adding a bitmap. It should
13388 * eventually be settable by sysfs.
13389 */
13390- struct mutex bitmap_mutex;
13391
13392 struct list_head all_mddevs;
13393 };
13394diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
13395index 431b9b2..d29215d 100644
13396--- a/drivers/md/raid5.c
13397+++ b/drivers/md/raid5.c
13398@@ -5432,11 +5432,11 @@ static int raid5_start_reshape(mddev_t *mddev)
13399 !test_bit(Faulty, &rdev->flags)) {
13400 if (raid5_add_disk(mddev, rdev) == 0) {
13401 char nm[20];
13402- if (rdev->raid_disk >= conf->previous_raid_disks) {
13403+ if (rdev->raid_disk >= conf->previous_raid_disks)
13404 set_bit(In_sync, &rdev->flags);
13405- added_devices++;
13406- } else
13407+ else
13408 rdev->recovery_offset = 0;
13409+ added_devices++;
13410 sprintf(nm, "rd%d", rdev->raid_disk);
13411 if (sysfs_create_link(&mddev->kobj,
13412 &rdev->kobj, nm))
13413@@ -5448,12 +5448,9 @@ static int raid5_start_reshape(mddev_t *mddev)
13414 break;
13415 }
13416
13417- /* When a reshape changes the number of devices, ->degraded
13418- * is measured against the large of the pre and post number of
13419- * devices.*/
13420 if (mddev->delta_disks > 0) {
13421 spin_lock_irqsave(&conf->device_lock, flags);
13422- mddev->degraded += (conf->raid_disks - conf->previous_raid_disks)
13423+ mddev->degraded = (conf->raid_disks - conf->previous_raid_disks)
13424 - added_devices;
13425 spin_unlock_irqrestore(&conf->device_lock, flags);
13426 }
13427diff --git a/drivers/media/common/tuners/mxl5007t.c b/drivers/media/common/tuners/mxl5007t.c
13428index 7eb1bf7..2d02698 100644
13429--- a/drivers/media/common/tuners/mxl5007t.c
13430+++ b/drivers/media/common/tuners/mxl5007t.c
13431@@ -196,7 +196,7 @@ static void copy_reg_bits(struct reg_pair_t *reg_pair1,
13432 i = j = 0;
13433
13434 while (reg_pair1[i].reg || reg_pair1[i].val) {
13435- while (reg_pair2[j].reg || reg_pair2[j].val) {
13436+ while (reg_pair2[j].reg || reg_pair2[j].reg) {
13437 if (reg_pair1[i].reg != reg_pair2[j].reg) {
13438 j++;
13439 continue;
13440diff --git a/drivers/media/dvb/dvb-core/dmxdev.c b/drivers/media/dvb/dvb-core/dmxdev.c
13441index 9ddc579..c37790a 100644
13442--- a/drivers/media/dvb/dvb-core/dmxdev.c
13443+++ b/drivers/media/dvb/dvb-core/dmxdev.c
13444@@ -761,6 +761,7 @@ static int dvb_demux_open(struct inode *inode, struct file *file)
13445 dvb_ringbuffer_init(&dmxdevfilter->buffer, NULL, 8192);
13446 dmxdevfilter->type = DMXDEV_TYPE_NONE;
13447 dvb_dmxdev_filter_state_set(dmxdevfilter, DMXDEV_STATE_ALLOCATED);
13448+ INIT_LIST_HEAD(&dmxdevfilter->feed.ts);
13449 init_timer(&dmxdevfilter->timer);
13450
13451 dvbdev->users++;
13452@@ -886,7 +887,6 @@ static int dvb_dmxdev_pes_filter_set(struct dmxdev *dmxdev,
13453 dmxdevfilter->type = DMXDEV_TYPE_PES;
13454 memcpy(&dmxdevfilter->params, params,
13455 sizeof(struct dmx_pes_filter_params));
13456- INIT_LIST_HEAD(&dmxdevfilter->feed.ts);
13457
13458 dvb_dmxdev_filter_state_set(dmxdevfilter, DMXDEV_STATE_SET);
13459
13460diff --git a/drivers/media/dvb/siano/smsusb.c b/drivers/media/dvb/siano/smsusb.c
13461index 6b03dbf..8f88a58 100644
13462--- a/drivers/media/dvb/siano/smsusb.c
13463+++ b/drivers/media/dvb/siano/smsusb.c
13464@@ -533,18 +533,8 @@ struct usb_device_id smsusb_id_table[] = {
13465 .driver_info = SMS1XXX_BOARD_HAUPPAUGE_WINDHAM },
13466 { USB_DEVICE(0x2040, 0xb910),
13467 .driver_info = SMS1XXX_BOARD_HAUPPAUGE_WINDHAM },
13468- { USB_DEVICE(0x2040, 0xb980),
13469- .driver_info = SMS1XXX_BOARD_HAUPPAUGE_WINDHAM },
13470- { USB_DEVICE(0x2040, 0xb990),
13471- .driver_info = SMS1XXX_BOARD_HAUPPAUGE_WINDHAM },
13472 { USB_DEVICE(0x2040, 0xc000),
13473 .driver_info = SMS1XXX_BOARD_HAUPPAUGE_WINDHAM },
13474- { USB_DEVICE(0x2040, 0xc010),
13475- .driver_info = SMS1XXX_BOARD_HAUPPAUGE_WINDHAM },
13476- { USB_DEVICE(0x2040, 0xc080),
13477- .driver_info = SMS1XXX_BOARD_HAUPPAUGE_WINDHAM },
13478- { USB_DEVICE(0x2040, 0xc090),
13479- .driver_info = SMS1XXX_BOARD_HAUPPAUGE_WINDHAM },
13480 { } /* Terminating entry */
13481 };
13482
13483diff --git a/drivers/media/video/gspca/ov519.c b/drivers/media/video/gspca/ov519.c
13484index e165578..a5c190e 100644
13485--- a/drivers/media/video/gspca/ov519.c
13486+++ b/drivers/media/video/gspca/ov519.c
13487@@ -3364,7 +3364,6 @@ static const __devinitdata struct usb_device_id device_table[] = {
13488 {USB_DEVICE(0x041e, 0x4061), .driver_info = BRIDGE_OV519 },
13489 {USB_DEVICE(0x041e, 0x4064),
13490 .driver_info = BRIDGE_OV519 | BRIDGE_INVERT_LED },
13491- {USB_DEVICE(0x041e, 0x4067), .driver_info = BRIDGE_OV519 },
13492 {USB_DEVICE(0x041e, 0x4068),
13493 .driver_info = BRIDGE_OV519 | BRIDGE_INVERT_LED },
13494 {USB_DEVICE(0x045e, 0x028c), .driver_info = BRIDGE_OV519 },
13495diff --git a/drivers/media/video/gspca/sn9c20x.c b/drivers/media/video/gspca/sn9c20x.c
13496index e0a3b75..cdad3db 100644
13497--- a/drivers/media/video/gspca/sn9c20x.c
13498+++ b/drivers/media/video/gspca/sn9c20x.c
13499@@ -2319,7 +2319,7 @@ static void do_autogain(struct gspca_dev *gspca_dev, u16 avg_lum)
13500 }
13501 }
13502 if (avg_lum > MAX_AVG_LUM) {
13503- if (sd->gain >= 1) {
13504+ if (sd->gain - 1 >= 0) {
13505 sd->gain--;
13506 set_gain(gspca_dev);
13507 }
13508diff --git a/drivers/media/video/gspca/sunplus.c b/drivers/media/video/gspca/sunplus.c
13509index 28b4625..aa8f995 100644
13510--- a/drivers/media/video/gspca/sunplus.c
13511+++ b/drivers/media/video/gspca/sunplus.c
13512@@ -705,7 +705,7 @@ static void spca504B_SetSizeType(struct gspca_dev *gspca_dev)
13513 rc = spca504B_PollingDataReady(gspca_dev);
13514
13515 /* Init the cam width height with some values get on init ? */
13516- reg_w_riv(dev, 0x31, 0x04, 0);
13517+ reg_w_riv(dev, 0x31, 0, 0x04);
13518 spca504B_WaitCmdStatus(gspca_dev);
13519 rc = spca504B_PollingDataReady(gspca_dev);
13520 break;
13521@@ -807,14 +807,14 @@ static void init_ctl_reg(struct gspca_dev *gspca_dev)
13522 default:
13523 /* case BRIDGE_SPCA533: */
13524 /* case BRIDGE_SPCA504B: */
13525- reg_w_riv(dev, 0, 0x21ad, 0x00); /* hue */
13526- reg_w_riv(dev, 0, 0x21ac, 0x01); /* sat/hue */
13527- reg_w_riv(dev, 0, 0x21a3, 0x00); /* gamma */
13528+ reg_w_riv(dev, 0, 0x00, 0x21ad); /* hue */
13529+ reg_w_riv(dev, 0, 0x01, 0x21ac); /* sat/hue */
13530+ reg_w_riv(dev, 0, 0x00, 0x21a3); /* gamma */
13531 break;
13532 case BRIDGE_SPCA536:
13533- reg_w_riv(dev, 0, 0x20f5, 0x40);
13534- reg_w_riv(dev, 0, 0x20f4, 0x01);
13535- reg_w_riv(dev, 0, 0x2089, 0x00);
13536+ reg_w_riv(dev, 0, 0x40, 0x20f5);
13537+ reg_w_riv(dev, 0, 0x01, 0x20f4);
13538+ reg_w_riv(dev, 0, 0x00, 0x2089);
13539 break;
13540 }
13541 if (pollreg)
13542@@ -888,11 +888,11 @@ static int sd_init(struct gspca_dev *gspca_dev)
13543 switch (sd->bridge) {
13544 case BRIDGE_SPCA504B:
13545 reg_w_riv(dev, 0x1d, 0x00, 0);
13546- reg_w_riv(dev, 0, 0x2306, 0x01);
13547- reg_w_riv(dev, 0, 0x0d04, 0x00);
13548- reg_w_riv(dev, 0, 0x2000, 0x00);
13549- reg_w_riv(dev, 0, 0x2301, 0x13);
13550- reg_w_riv(dev, 0, 0x2306, 0x00);
13551+ reg_w_riv(dev, 0, 0x01, 0x2306);
13552+ reg_w_riv(dev, 0, 0x00, 0x0d04);
13553+ reg_w_riv(dev, 0, 0x00, 0x2000);
13554+ reg_w_riv(dev, 0, 0x13, 0x2301);
13555+ reg_w_riv(dev, 0, 0x00, 0x2306);
13556 /* fall thru */
13557 case BRIDGE_SPCA533:
13558 spca504B_PollingDataReady(gspca_dev);
13559@@ -1011,7 +1011,7 @@ static int sd_start(struct gspca_dev *gspca_dev)
13560 spca504B_WaitCmdStatus(gspca_dev);
13561 break;
13562 default:
13563- reg_w_riv(dev, 0x31, 0x04, 0);
13564+ reg_w_riv(dev, 0x31, 0, 0x04);
13565 spca504B_WaitCmdStatus(gspca_dev);
13566 spca504B_PollingDataReady(gspca_dev);
13567 break;
13568diff --git a/drivers/media/video/ov511.c b/drivers/media/video/ov511.c
13569index 2bed9e2..0bc2cf5 100644
13570--- a/drivers/media/video/ov511.c
13571+++ b/drivers/media/video/ov511.c
13572@@ -5878,7 +5878,7 @@ ov51x_probe(struct usb_interface *intf, const struct usb_device_id *id)
13573 goto error;
13574 }
13575
13576- mutex_unlock(&ov->lock);
13577+ mutex_lock(&ov->lock);
13578
13579 return 0;
13580
13581diff --git a/drivers/media/video/saa7134/saa7134-cards.c b/drivers/media/video/saa7134/saa7134-cards.c
13582index 6781a07..0901322 100644
13583--- a/drivers/media/video/saa7134/saa7134-cards.c
13584+++ b/drivers/media/video/saa7134/saa7134-cards.c
13585@@ -5279,30 +5279,6 @@ struct saa7134_board saa7134_boards[] = {
13586 .amux = TV,
13587 },
13588 },
13589- [SAA7134_BOARD_ASUS_EUROPA_HYBRID] = {
13590- .name = "Asus Europa Hybrid OEM",
13591- .audio_clock = 0x00187de7,
13592- .tuner_type = TUNER_PHILIPS_TD1316,
13593- .radio_type = UNSET,
13594- .tuner_addr = 0x61,
13595- .radio_addr = ADDR_UNSET,
13596- .tda9887_conf = TDA9887_PRESENT | TDA9887_PORT1_ACTIVE,
13597- .mpeg = SAA7134_MPEG_DVB,
13598- .inputs = { {
13599- .name = name_tv,
13600- .vmux = 3,
13601- .amux = TV,
13602- .tv = 1,
13603- }, {
13604- .name = name_comp1,
13605- .vmux = 4,
13606- .amux = LINE2,
13607- }, {
13608- .name = name_svideo,
13609- .vmux = 8,
13610- .amux = LINE2,
13611- } },
13612- },
13613
13614 };
13615
13616@@ -6442,12 +6418,6 @@ struct pci_device_id saa7134_pci_tbl[] = {
13617 .subdevice = 0x2004,
13618 .driver_data = SAA7134_BOARD_ZOLID_HYBRID_PCI,
13619 }, {
13620- .vendor = PCI_VENDOR_ID_PHILIPS,
13621- .device = PCI_DEVICE_ID_PHILIPS_SAA7134,
13622- .subvendor = 0x1043,
13623- .subdevice = 0x4847,
13624- .driver_data = SAA7134_BOARD_ASUS_EUROPA_HYBRID,
13625- }, {
13626 /* --- boards without eeprom + subsystem ID --- */
13627 .vendor = PCI_VENDOR_ID_PHILIPS,
13628 .device = PCI_DEVICE_ID_PHILIPS_SAA7134,
13629@@ -7109,7 +7079,6 @@ int saa7134_board_init2(struct saa7134_dev *dev)
13630 /* break intentionally omitted */
13631 case SAA7134_BOARD_VIDEOMATE_DVBT_300:
13632 case SAA7134_BOARD_ASUS_EUROPA2_HYBRID:
13633- case SAA7134_BOARD_ASUS_EUROPA_HYBRID:
13634 {
13635
13636 /* The Philips EUROPA based hybrid boards have the tuner
13637diff --git a/drivers/media/video/saa7134/saa7134-dvb.c b/drivers/media/video/saa7134/saa7134-dvb.c
13638index b8a805c..a26e997 100644
13639--- a/drivers/media/video/saa7134/saa7134-dvb.c
13640+++ b/drivers/media/video/saa7134/saa7134-dvb.c
13641@@ -1116,7 +1116,6 @@ static int dvb_init(struct saa7134_dev *dev)
13642 break;
13643 case SAA7134_BOARD_PHILIPS_EUROPA:
13644 case SAA7134_BOARD_VIDEOMATE_DVBT_300:
13645- case SAA7134_BOARD_ASUS_EUROPA_HYBRID:
13646 fe0->dvb.frontend = dvb_attach(tda10046_attach,
13647 &philips_europa_config,
13648 &dev->i2c_adap);
13649diff --git a/drivers/media/video/saa7134/saa7134.h b/drivers/media/video/saa7134/saa7134.h
13650index 94e1a3b..f8697d4 100644
13651--- a/drivers/media/video/saa7134/saa7134.h
13652+++ b/drivers/media/video/saa7134/saa7134.h
13653@@ -297,7 +297,6 @@ struct saa7134_format {
13654 #define SAA7134_BOARD_BEHOLD_X7 171
13655 #define SAA7134_BOARD_ROVERMEDIA_LINK_PRO_FM 172
13656 #define SAA7134_BOARD_ZOLID_HYBRID_PCI 173
13657-#define SAA7134_BOARD_ASUS_EUROPA_HYBRID 174
13658
13659 #define SAA7134_MAXBOARDS 32
13660 #define SAA7134_INPUT_MAX 8
13661diff --git a/drivers/media/video/uvc/uvc_ctrl.c b/drivers/media/video/uvc/uvc_ctrl.c
13662index 4a293b4..1b89735 100644
13663--- a/drivers/media/video/uvc/uvc_ctrl.c
13664+++ b/drivers/media/video/uvc/uvc_ctrl.c
13665@@ -1405,7 +1405,7 @@ uvc_ctrl_prune_entity(struct uvc_device *dev, struct uvc_entity *entity)
13666 size = entity->processing.bControlSize;
13667
13668 for (i = 0; i < ARRAY_SIZE(blacklist); ++i) {
13669- if (!usb_match_one_id(dev->intf, &blacklist[i].id))
13670+ if (!usb_match_id(dev->intf, &blacklist[i].id))
13671 continue;
13672
13673 if (blacklist[i].index >= 8 * size ||
13674diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c
13675index b6992b7..610e914 100644
13676--- a/drivers/message/fusion/mptbase.c
13677+++ b/drivers/message/fusion/mptbase.c
13678@@ -4330,8 +4330,6 @@ initChainBuffers(MPT_ADAPTER *ioc)
13679
13680 if (ioc->bus_type == SPI)
13681 num_chain *= MPT_SCSI_CAN_QUEUE;
13682- else if (ioc->bus_type == SAS)
13683- num_chain *= MPT_SAS_CAN_QUEUE;
13684 else
13685 num_chain *= MPT_FC_CAN_QUEUE;
13686
13687diff --git a/drivers/message/fusion/mptscsih.c b/drivers/message/fusion/mptscsih.c
13688index 6cea718..c295786 100644
13689--- a/drivers/message/fusion/mptscsih.c
13690+++ b/drivers/message/fusion/mptscsih.c
13691@@ -1720,7 +1720,7 @@ mptscsih_abort(struct scsi_cmnd * SCpnt)
13692 dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "task abort: "
13693 "Command not in the active list! (sc=%p)\n", ioc->name,
13694 SCpnt));
13695- retval = SUCCESS;
13696+ retval = 0;
13697 goto out;
13698 }
13699
13700diff --git a/drivers/mfd/wm8350-core.c b/drivers/mfd/wm8350-core.c
13701index ca6b098..ba27c9d 100644
13702--- a/drivers/mfd/wm8350-core.c
13703+++ b/drivers/mfd/wm8350-core.c
13704@@ -134,7 +134,8 @@ static inline int is_reg_locked(struct wm8350 *wm8350, u8 reg)
13705 wm8350->reg_cache[WM8350_SECURITY] == WM8350_UNLOCK_KEY)
13706 return 0;
13707
13708- if ((reg >= WM8350_GPIO_FUNCTION_SELECT_1 &&
13709+ if ((reg == WM8350_GPIO_CONFIGURATION_I_O) ||
13710+ (reg >= WM8350_GPIO_FUNCTION_SELECT_1 &&
13711 reg <= WM8350_GPIO_FUNCTION_SELECT_4) ||
13712 (reg >= WM8350_BATTERY_CHARGER_CONTROL_1 &&
13713 reg <= WM8350_BATTERY_CHARGER_CONTROL_3))
13714diff --git a/drivers/misc/enclosure.c b/drivers/misc/enclosure.c
13715index 1eac626..e9eae4a 100644
13716--- a/drivers/misc/enclosure.c
13717+++ b/drivers/misc/enclosure.c
13718@@ -391,7 +391,6 @@ static const char *const enclosure_status [] = {
13719 [ENCLOSURE_STATUS_NOT_INSTALLED] = "not installed",
13720 [ENCLOSURE_STATUS_UNKNOWN] = "unknown",
13721 [ENCLOSURE_STATUS_UNAVAILABLE] = "unavailable",
13722- [ENCLOSURE_STATUS_MAX] = NULL,
13723 };
13724
13725 static const char *const enclosure_type [] = {
13726diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
13727index 1f552c6..85f0e8c 100644
13728--- a/drivers/mmc/card/block.c
13729+++ b/drivers/mmc/card/block.c
13730@@ -85,14 +85,7 @@ static void mmc_blk_put(struct mmc_blk_data *md)
13731 mutex_lock(&open_lock);
13732 md->usage--;
13733 if (md->usage == 0) {
13734- int devmaj = MAJOR(disk_devt(md->disk));
13735 int devidx = MINOR(disk_devt(md->disk)) >> MMC_SHIFT;
13736-
13737- if (!devmaj)
13738- devidx = md->disk->first_minor >> MMC_SHIFT;
13739-
13740- blk_cleanup_queue(md->queue.queue);
13741-
13742 __clear_bit(devidx, dev_use);
13743
13744 put_disk(md->disk);
13745@@ -620,7 +613,6 @@ static int mmc_blk_probe(struct mmc_card *card)
13746 return 0;
13747
13748 out:
13749- mmc_cleanup_queue(&md->queue);
13750 mmc_blk_put(md);
13751
13752 return err;
13753diff --git a/drivers/mmc/card/queue.c b/drivers/mmc/card/queue.c
13754index c5a7a85..49e5823 100644
13755--- a/drivers/mmc/card/queue.c
13756+++ b/drivers/mmc/card/queue.c
13757@@ -90,10 +90,9 @@ static void mmc_request(struct request_queue *q)
13758 struct request *req;
13759
13760 if (!mq) {
13761- while ((req = blk_fetch_request(q)) != NULL) {
13762- req->cmd_flags |= REQ_QUIET;
13763+ printk(KERN_ERR "MMC: killing requests for dead queue\n");
13764+ while ((req = blk_fetch_request(q)) != NULL)
13765 __blk_end_request_all(req, -EIO);
13766- }
13767 return;
13768 }
13769
13770@@ -224,18 +223,17 @@ void mmc_cleanup_queue(struct mmc_queue *mq)
13771 struct request_queue *q = mq->queue;
13772 unsigned long flags;
13773
13774+ /* Mark that we should start throwing out stragglers */
13775+ spin_lock_irqsave(q->queue_lock, flags);
13776+ q->queuedata = NULL;
13777+ spin_unlock_irqrestore(q->queue_lock, flags);
13778+
13779 /* Make sure the queue isn't suspended, as that will deadlock */
13780 mmc_queue_resume(mq);
13781
13782 /* Then terminate our worker thread */
13783 kthread_stop(mq->thread);
13784
13785- /* Empty the queue */
13786- spin_lock_irqsave(q->queue_lock, flags);
13787- q->queuedata = NULL;
13788- blk_start_queue(q);
13789- spin_unlock_irqrestore(q->queue_lock, flags);
13790-
13791 if (mq->bounce_sg)
13792 kfree(mq->bounce_sg);
13793 mq->bounce_sg = NULL;
13794@@ -247,6 +245,8 @@ void mmc_cleanup_queue(struct mmc_queue *mq)
13795 kfree(mq->bounce_buf);
13796 mq->bounce_buf = NULL;
13797
13798+ blk_cleanup_queue(mq->queue);
13799+
13800 mq->card = NULL;
13801 }
13802 EXPORT_SYMBOL(mmc_cleanup_queue);
13803diff --git a/drivers/mtd/ubi/cdev.c b/drivers/mtd/ubi/cdev.c
13804index 111ea41..f237ddb 100644
13805--- a/drivers/mtd/ubi/cdev.c
13806+++ b/drivers/mtd/ubi/cdev.c
13807@@ -853,6 +853,7 @@ static long ubi_cdev_ioctl(struct file *file, unsigned int cmd,
13808 break;
13809 }
13810
13811+ req.name[req.name_len] = '\0';
13812 err = verify_mkvol_req(ubi, &req);
13813 if (err)
13814 break;
13815diff --git a/drivers/mtd/ubi/upd.c b/drivers/mtd/ubi/upd.c
13816index 425bf5a..74fdc40 100644
13817--- a/drivers/mtd/ubi/upd.c
13818+++ b/drivers/mtd/ubi/upd.c
13819@@ -147,15 +147,12 @@ int ubi_start_update(struct ubi_device *ubi, struct ubi_volume *vol,
13820 }
13821
13822 if (bytes == 0) {
13823- err = ubi_wl_flush(ubi);
13824- if (err)
13825- return err;
13826-
13827 err = clear_update_marker(ubi, vol, 0);
13828 if (err)
13829 return err;
13830- vol->updating = 0;
13831- return 0;
13832+ err = ubi_wl_flush(ubi);
13833+ if (!err)
13834+ vol->updating = 0;
13835 }
13836
13837 vol->upd_buf = vmalloc(ubi->leb_size);
13838@@ -365,16 +362,16 @@ int ubi_more_update_data(struct ubi_device *ubi, struct ubi_volume *vol,
13839
13840 ubi_assert(vol->upd_received <= vol->upd_bytes);
13841 if (vol->upd_received == vol->upd_bytes) {
13842- err = ubi_wl_flush(ubi);
13843- if (err)
13844- return err;
13845 /* The update is finished, clear the update marker */
13846 err = clear_update_marker(ubi, vol, vol->upd_bytes);
13847 if (err)
13848 return err;
13849- vol->updating = 0;
13850- err = to_write;
13851- vfree(vol->upd_buf);
13852+ err = ubi_wl_flush(ubi);
13853+ if (err == 0) {
13854+ vol->updating = 0;
13855+ err = to_write;
13856+ vfree(vol->upd_buf);
13857+ }
13858 }
13859
13860 return err;
13861diff --git a/drivers/mtd/ubi/vtbl.c b/drivers/mtd/ubi/vtbl.c
13862index 4004402..1afc61e 100644
13863--- a/drivers/mtd/ubi/vtbl.c
13864+++ b/drivers/mtd/ubi/vtbl.c
13865@@ -566,7 +566,6 @@ static int init_volumes(struct ubi_device *ubi, const struct ubi_scan_info *si,
13866 vol->reserved_pebs = be32_to_cpu(vtbl[i].reserved_pebs);
13867 vol->alignment = be32_to_cpu(vtbl[i].alignment);
13868 vol->data_pad = be32_to_cpu(vtbl[i].data_pad);
13869- vol->upd_marker = vtbl[i].upd_marker;
13870 vol->vol_type = vtbl[i].vol_type == UBI_VID_DYNAMIC ?
13871 UBI_DYNAMIC_VOLUME : UBI_STATIC_VOLUME;
13872 vol->name_len = be16_to_cpu(vtbl[i].name_len);
13873diff --git a/drivers/net/atl1c/atl1c.h b/drivers/net/atl1c/atl1c.h
13874index 790e55b..2a1120a 100644
13875--- a/drivers/net/atl1c/atl1c.h
13876+++ b/drivers/net/atl1c/atl1c.h
13877@@ -534,9 +534,6 @@ struct atl1c_adapter {
13878 #define __AT_TESTING 0x0001
13879 #define __AT_RESETTING 0x0002
13880 #define __AT_DOWN 0x0003
13881- u8 work_event;
13882-#define ATL1C_WORK_EVENT_RESET 0x01
13883-#define ATL1C_WORK_EVENT_LINK_CHANGE 0x02
13884 u32 msg_enable;
13885
13886 bool have_msi;
13887@@ -548,7 +545,8 @@ struct atl1c_adapter {
13888 spinlock_t tx_lock;
13889 atomic_t irq_sem;
13890
13891- struct work_struct common_task;
13892+ struct work_struct reset_task;
13893+ struct work_struct link_chg_task;
13894 struct timer_list watchdog_timer;
13895 struct timer_list phy_config_timer;
13896
13897diff --git a/drivers/net/atl1c/atl1c_main.c b/drivers/net/atl1c/atl1c_main.c
13898index be00ee9..1372e9a 100644
13899--- a/drivers/net/atl1c/atl1c_main.c
13900+++ b/drivers/net/atl1c/atl1c_main.c
13901@@ -198,12 +198,27 @@ static void atl1c_phy_config(unsigned long data)
13902
13903 void atl1c_reinit_locked(struct atl1c_adapter *adapter)
13904 {
13905+
13906 WARN_ON(in_interrupt());
13907 atl1c_down(adapter);
13908 atl1c_up(adapter);
13909 clear_bit(__AT_RESETTING, &adapter->flags);
13910 }
13911
13912+static void atl1c_reset_task(struct work_struct *work)
13913+{
13914+ struct atl1c_adapter *adapter;
13915+ struct net_device *netdev;
13916+
13917+ adapter = container_of(work, struct atl1c_adapter, reset_task);
13918+ netdev = adapter->netdev;
13919+
13920+ netif_device_detach(netdev);
13921+ atl1c_down(adapter);
13922+ atl1c_up(adapter);
13923+ netif_device_attach(netdev);
13924+}
13925+
13926 static void atl1c_check_link_status(struct atl1c_adapter *adapter)
13927 {
13928 struct atl1c_hw *hw = &adapter->hw;
13929@@ -260,6 +275,18 @@ static void atl1c_check_link_status(struct atl1c_adapter *adapter)
13930 }
13931 }
13932
13933+/*
13934+ * atl1c_link_chg_task - deal with link change event Out of interrupt context
13935+ * @netdev: network interface device structure
13936+ */
13937+static void atl1c_link_chg_task(struct work_struct *work)
13938+{
13939+ struct atl1c_adapter *adapter;
13940+
13941+ adapter = container_of(work, struct atl1c_adapter, link_chg_task);
13942+ atl1c_check_link_status(adapter);
13943+}
13944+
13945 static void atl1c_link_chg_event(struct atl1c_adapter *adapter)
13946 {
13947 struct net_device *netdev = adapter->netdev;
13948@@ -284,39 +311,19 @@ static void atl1c_link_chg_event(struct atl1c_adapter *adapter)
13949 adapter->link_speed = SPEED_0;
13950 }
13951 }
13952-
13953- adapter->work_event |= ATL1C_WORK_EVENT_LINK_CHANGE;
13954- schedule_work(&adapter->common_task);
13955-}
13956-
13957-static void atl1c_common_task(struct work_struct *work)
13958-{
13959- struct atl1c_adapter *adapter;
13960- struct net_device *netdev;
13961-
13962- adapter = container_of(work, struct atl1c_adapter, common_task);
13963- netdev = adapter->netdev;
13964-
13965- if (adapter->work_event & ATL1C_WORK_EVENT_RESET) {
13966- netif_device_detach(netdev);
13967- atl1c_down(adapter);
13968- atl1c_up(adapter);
13969- netif_device_attach(netdev);
13970- return;
13971- }
13972-
13973- if (adapter->work_event & ATL1C_WORK_EVENT_LINK_CHANGE)
13974- atl1c_check_link_status(adapter);
13975-
13976- return;
13977+ schedule_work(&adapter->link_chg_task);
13978 }
13979
13980-
13981 static void atl1c_del_timer(struct atl1c_adapter *adapter)
13982 {
13983 del_timer_sync(&adapter->phy_config_timer);
13984 }
13985
13986+static void atl1c_cancel_work(struct atl1c_adapter *adapter)
13987+{
13988+ cancel_work_sync(&adapter->reset_task);
13989+ cancel_work_sync(&adapter->link_chg_task);
13990+}
13991
13992 /*
13993 * atl1c_tx_timeout - Respond to a Tx Hang
13994@@ -327,8 +334,7 @@ static void atl1c_tx_timeout(struct net_device *netdev)
13995 struct atl1c_adapter *adapter = netdev_priv(netdev);
13996
13997 /* Do the reset outside of interrupt context */
13998- adapter->work_event |= ATL1C_WORK_EVENT_RESET;
13999- schedule_work(&adapter->common_task);
14000+ schedule_work(&adapter->reset_task);
14001 }
14002
14003 /*
14004@@ -1530,8 +1536,7 @@ static irqreturn_t atl1c_intr(int irq, void *data)
14005 /* reset MAC */
14006 hw->intr_mask &= ~ISR_ERROR;
14007 AT_WRITE_REG(hw, REG_IMR, hw->intr_mask);
14008- adapter->work_event |= ATL1C_WORK_EVENT_RESET;
14009- schedule_work(&adapter->common_task);
14010+ schedule_work(&adapter->reset_task);
14011 break;
14012 }
14013
14014@@ -2195,7 +2200,8 @@ void atl1c_down(struct atl1c_adapter *adapter)
14015 struct net_device *netdev = adapter->netdev;
14016
14017 atl1c_del_timer(adapter);
14018- adapter->work_event = 0; /* clear all event */
14019+ atl1c_cancel_work(adapter);
14020+
14021 /* signal that we're down so the interrupt handler does not
14022 * reschedule our watchdog timer */
14023 set_bit(__AT_DOWN, &adapter->flags);
14024@@ -2595,8 +2601,8 @@ static int __devinit atl1c_probe(struct pci_dev *pdev,
14025 adapter->hw.mac_addr[4], adapter->hw.mac_addr[5]);
14026
14027 atl1c_hw_set_mac_addr(&adapter->hw);
14028- INIT_WORK(&adapter->common_task, atl1c_common_task);
14029- adapter->work_event = 0;
14030+ INIT_WORK(&adapter->reset_task, atl1c_reset_task);
14031+ INIT_WORK(&adapter->link_chg_task, atl1c_link_chg_task);
14032 err = register_netdev(netdev);
14033 if (err) {
14034 dev_err(&pdev->dev, "register netdevice failed\n");
14035diff --git a/drivers/net/atl1e/atl1e_main.c b/drivers/net/atl1e/atl1e_main.c
14036index 1b5facf..955da73 100644
14037--- a/drivers/net/atl1e/atl1e_main.c
14038+++ b/drivers/net/atl1e/atl1e_main.c
14039@@ -1666,6 +1666,41 @@ static int atl1e_tso_csum(struct atl1e_adapter *adapter,
14040 }
14041 return 0;
14042 }
14043+
14044+ if (offload_type & SKB_GSO_TCPV6) {
14045+ real_len = (((unsigned char *)ipv6_hdr(skb) - skb->data)
14046+ + ntohs(ipv6_hdr(skb)->payload_len));
14047+ if (real_len < skb->len)
14048+ pskb_trim(skb, real_len);
14049+
14050+ /* check payload == 0 byte ? */
14051+ hdr_len = (skb_transport_offset(skb) + tcp_hdrlen(skb));
14052+ if (unlikely(skb->len == hdr_len)) {
14053+ /* only xsum need */
14054+ dev_warn(&pdev->dev,
14055+ "IPV6 tso with zero data??\n");
14056+ goto check_sum;
14057+ } else {
14058+ tcp_hdr(skb)->check = ~csum_ipv6_magic(
14059+ &ipv6_hdr(skb)->saddr,
14060+ &ipv6_hdr(skb)->daddr,
14061+ 0, IPPROTO_TCP, 0);
14062+ tpd->word3 |= 1 << TPD_IP_VERSION_SHIFT;
14063+ hdr_len >>= 1;
14064+ tpd->word3 |= (hdr_len & TPD_V6_IPHLLO_MASK) <<
14065+ TPD_V6_IPHLLO_SHIFT;
14066+ tpd->word3 |= ((hdr_len >> 3) &
14067+ TPD_V6_IPHLHI_MASK) <<
14068+ TPD_V6_IPHLHI_SHIFT;
14069+ tpd->word3 |= (tcp_hdrlen(skb) >> 2 &
14070+ TPD_TCPHDRLEN_MASK) <<
14071+ TPD_TCPHDRLEN_SHIFT;
14072+ tpd->word3 |= ((skb_shinfo(skb)->gso_size) &
14073+ TPD_MSS_MASK) << TPD_MSS_SHIFT;
14074+ tpd->word3 |= 1 << TPD_SEGMENT_EN_SHIFT;
14075+ }
14076+ }
14077+ return 0;
14078 }
14079
14080 check_sum:
14081@@ -2254,6 +2289,7 @@ static int atl1e_init_netdev(struct net_device *netdev, struct pci_dev *pdev)
14082 NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
14083 netdev->features |= NETIF_F_LLTX;
14084 netdev->features |= NETIF_F_TSO;
14085+ netdev->features |= NETIF_F_TSO6;
14086
14087 return 0;
14088 }
14089diff --git a/drivers/net/b44.c b/drivers/net/b44.c
14090index 4869adb..2a91323 100644
14091--- a/drivers/net/b44.c
14092+++ b/drivers/net/b44.c
14093@@ -1505,7 +1505,8 @@ static int b44_magic_pattern(u8 *macaddr, u8 *ppattern, u8 *pmask, int offset)
14094 for (k = 0; k< ethaddr_bytes; k++) {
14095 ppattern[offset + magicsync +
14096 (j * ETH_ALEN) + k] = macaddr[k];
14097- set_bit(len++, (unsigned long *) pmask);
14098+ len++;
14099+ set_bit(len, (unsigned long *) pmask);
14100 }
14101 }
14102 return len - 1;
14103diff --git a/drivers/net/bcm63xx_enet.c b/drivers/net/bcm63xx_enet.c
14104index d110c1b..ba29dc3 100644
14105--- a/drivers/net/bcm63xx_enet.c
14106+++ b/drivers/net/bcm63xx_enet.c
14107@@ -1248,15 +1248,9 @@ static void bcm_enet_get_drvinfo(struct net_device *netdev,
14108 drvinfo->n_stats = BCM_ENET_STATS_LEN;
14109 }
14110
14111-static int bcm_enet_get_sset_count(struct net_device *netdev,
14112- int string_set)
14113+static int bcm_enet_get_stats_count(struct net_device *netdev)
14114 {
14115- switch (string_set) {
14116- case ETH_SS_STATS:
14117- return BCM_ENET_STATS_LEN;
14118- default:
14119- return -EINVAL;
14120- }
14121+ return BCM_ENET_STATS_LEN;
14122 }
14123
14124 static void bcm_enet_get_strings(struct net_device *netdev,
14125@@ -1482,7 +1476,7 @@ static int bcm_enet_set_pauseparam(struct net_device *dev,
14126
14127 static struct ethtool_ops bcm_enet_ethtool_ops = {
14128 .get_strings = bcm_enet_get_strings,
14129- .get_sset_count = bcm_enet_get_sset_count,
14130+ .get_stats_count = bcm_enet_get_stats_count,
14131 .get_ethtool_stats = bcm_enet_get_ethtool_stats,
14132 .get_settings = bcm_enet_get_settings,
14133 .set_settings = bcm_enet_set_settings,
14134diff --git a/drivers/net/benet/be.h b/drivers/net/benet/be.h
14135index 511b922..3b79a22 100644
14136--- a/drivers/net/benet/be.h
14137+++ b/drivers/net/benet/be.h
14138@@ -35,31 +35,20 @@
14139 #define DRV_VER "2.101.205"
14140 #define DRV_NAME "be2net"
14141 #define BE_NAME "ServerEngines BladeEngine2 10Gbps NIC"
14142-#define BE3_NAME "ServerEngines BladeEngine3 10Gbps NIC"
14143 #define OC_NAME "Emulex OneConnect 10Gbps NIC"
14144-#define OC_NAME1 "Emulex OneConnect 10Gbps NIC (be3)"
14145 #define DRV_DESC BE_NAME "Driver"
14146
14147 #define BE_VENDOR_ID 0x19a2
14148 #define BE_DEVICE_ID1 0x211
14149-#define BE_DEVICE_ID2 0x221
14150 #define OC_DEVICE_ID1 0x700
14151 #define OC_DEVICE_ID2 0x701
14152-#define OC_DEVICE_ID3 0x710
14153
14154 static inline char *nic_name(struct pci_dev *pdev)
14155 {
14156- switch (pdev->device) {
14157- case OC_DEVICE_ID1:
14158- case OC_DEVICE_ID2:
14159+ if (pdev->device == OC_DEVICE_ID1 || pdev->device == OC_DEVICE_ID2)
14160 return OC_NAME;
14161- case OC_DEVICE_ID3:
14162- return OC_NAME1;
14163- case BE_DEVICE_ID2:
14164- return BE3_NAME;
14165- default:
14166+ else
14167 return BE_NAME;
14168- }
14169 }
14170
14171 /* Number of bytes of an RX frame that are copied to skb->data */
14172@@ -272,13 +261,8 @@ struct be_adapter {
14173 u32 cap;
14174 u32 rx_fc; /* Rx flow control */
14175 u32 tx_fc; /* Tx flow control */
14176- u8 generation; /* BladeEngine ASIC generation */
14177 };
14178
14179-/* BladeEngine Generation numbers */
14180-#define BE_GEN2 2
14181-#define BE_GEN3 3
14182-
14183 extern const struct ethtool_ops be_ethtool_ops;
14184
14185 #define drvr_stats(adapter) (&adapter->stats.drvr_stats)
14186diff --git a/drivers/net/benet/be_cmds.h b/drivers/net/benet/be_cmds.h
14187index ad33d55..e5f9676 100644
14188--- a/drivers/net/benet/be_cmds.h
14189+++ b/drivers/net/benet/be_cmds.h
14190@@ -154,8 +154,7 @@ struct be_cmd_req_hdr {
14191 u8 domain; /* dword 0 */
14192 u32 timeout; /* dword 1 */
14193 u32 request_length; /* dword 2 */
14194- u8 version; /* dword 3 */
14195- u8 rsvd[3]; /* dword 3 */
14196+ u32 rsvd; /* dword 3 */
14197 };
14198
14199 #define RESP_HDR_INFO_OPCODE_SHIFT 0 /* bits 0 - 7 */
14200diff --git a/drivers/net/benet/be_main.c b/drivers/net/benet/be_main.c
14201index ec983cb..876b357 100644
14202--- a/drivers/net/benet/be_main.c
14203+++ b/drivers/net/benet/be_main.c
14204@@ -31,10 +31,8 @@ MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
14205
14206 static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
14207 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
14208- { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
14209 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
14210 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
14211- { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID3) },
14212 { 0 }
14213 };
14214 MODULE_DEVICE_TABLE(pci, be_dev_ids);
14215@@ -1944,7 +1942,6 @@ static void be_unmap_pci_bars(struct be_adapter *adapter)
14216 static int be_map_pci_bars(struct be_adapter *adapter)
14217 {
14218 u8 __iomem *addr;
14219- int pcicfg_reg;
14220
14221 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 2),
14222 pci_resource_len(adapter->pdev, 2));
14223@@ -1958,13 +1955,8 @@ static int be_map_pci_bars(struct be_adapter *adapter)
14224 goto pci_map_err;
14225 adapter->db = addr;
14226
14227- if (adapter->generation == BE_GEN2)
14228- pcicfg_reg = 1;
14229- else
14230- pcicfg_reg = 0;
14231-
14232- addr = ioremap_nocache(pci_resource_start(adapter->pdev, pcicfg_reg),
14233- pci_resource_len(adapter->pdev, pcicfg_reg));
14234+ addr = ioremap_nocache(pci_resource_start(adapter->pdev, 1),
14235+ pci_resource_len(adapter->pdev, 1));
14236 if (addr == NULL)
14237 goto pci_map_err;
14238 adapter->pcicfg = addr;
14239@@ -2034,7 +2026,6 @@ static int be_stats_init(struct be_adapter *adapter)
14240 cmd->va = pci_alloc_consistent(adapter->pdev, cmd->size, &cmd->dma);
14241 if (cmd->va == NULL)
14242 return -1;
14243- memset(cmd->va, 0, cmd->size);
14244 return 0;
14245 }
14246
14247@@ -2108,20 +2099,6 @@ static int __devinit be_probe(struct pci_dev *pdev,
14248 goto rel_reg;
14249 }
14250 adapter = netdev_priv(netdev);
14251-
14252- switch (pdev->device) {
14253- case BE_DEVICE_ID1:
14254- case OC_DEVICE_ID1:
14255- adapter->generation = BE_GEN2;
14256- break;
14257- case BE_DEVICE_ID2:
14258- case OC_DEVICE_ID2:
14259- adapter->generation = BE_GEN3;
14260- break;
14261- default:
14262- adapter->generation = 0;
14263- }
14264-
14265 adapter->pdev = pdev;
14266 pci_set_drvdata(pdev, adapter);
14267 adapter->netdev = netdev;
14268diff --git a/drivers/net/e100.c b/drivers/net/e100.c
14269index 0c53c92..d269a68 100644
14270--- a/drivers/net/e100.c
14271+++ b/drivers/net/e100.c
14272@@ -1817,7 +1817,6 @@ static int e100_alloc_cbs(struct nic *nic)
14273 &nic->cbs_dma_addr);
14274 if (!nic->cbs)
14275 return -ENOMEM;
14276- memset(nic->cbs, 0, count * sizeof(struct cb));
14277
14278 for (cb = nic->cbs, i = 0; i < count; cb++, i++) {
14279 cb->next = (i + 1 < count) ? cb + 1 : nic->cbs;
14280@@ -1826,6 +1825,7 @@ static int e100_alloc_cbs(struct nic *nic)
14281 cb->dma_addr = nic->cbs_dma_addr + i * sizeof(struct cb);
14282 cb->link = cpu_to_le32(nic->cbs_dma_addr +
14283 ((i+1) % count) * sizeof(struct cb));
14284+ cb->skb = NULL;
14285 }
14286
14287 nic->cb_to_use = nic->cb_to_send = nic->cb_to_clean = nic->cbs;
14288diff --git a/drivers/net/e1000/e1000.h b/drivers/net/e1000/e1000.h
14289index 4a2ee85..42e2b7e 100644
14290--- a/drivers/net/e1000/e1000.h
14291+++ b/drivers/net/e1000/e1000.h
14292@@ -326,8 +326,6 @@ struct e1000_adapter {
14293 /* for ioport free */
14294 int bars;
14295 int need_ioport;
14296-
14297- bool discarding;
14298 };
14299
14300 enum e1000_state_t {
14301diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c
14302index 1a23f16..bcd192c 100644
14303--- a/drivers/net/e1000/e1000_main.c
14304+++ b/drivers/net/e1000/e1000_main.c
14305@@ -1698,6 +1698,18 @@ static void e1000_setup_rctl(struct e1000_adapter *adapter)
14306 rctl &= ~E1000_RCTL_SZ_4096;
14307 rctl |= E1000_RCTL_BSEX;
14308 switch (adapter->rx_buffer_len) {
14309+ case E1000_RXBUFFER_256:
14310+ rctl |= E1000_RCTL_SZ_256;
14311+ rctl &= ~E1000_RCTL_BSEX;
14312+ break;
14313+ case E1000_RXBUFFER_512:
14314+ rctl |= E1000_RCTL_SZ_512;
14315+ rctl &= ~E1000_RCTL_BSEX;
14316+ break;
14317+ case E1000_RXBUFFER_1024:
14318+ rctl |= E1000_RCTL_SZ_1024;
14319+ rctl &= ~E1000_RCTL_BSEX;
14320+ break;
14321 case E1000_RXBUFFER_2048:
14322 default:
14323 rctl |= E1000_RCTL_SZ_2048;
14324@@ -3142,7 +3154,13 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
14325 * however with the new *_jumbo_rx* routines, jumbo receives will use
14326 * fragmented skbs */
14327
14328- if (max_frame <= E1000_RXBUFFER_2048)
14329+ if (max_frame <= E1000_RXBUFFER_256)
14330+ adapter->rx_buffer_len = E1000_RXBUFFER_256;
14331+ else if (max_frame <= E1000_RXBUFFER_512)
14332+ adapter->rx_buffer_len = E1000_RXBUFFER_512;
14333+ else if (max_frame <= E1000_RXBUFFER_1024)
14334+ adapter->rx_buffer_len = E1000_RXBUFFER_1024;
14335+ else if (max_frame <= E1000_RXBUFFER_2048)
14336 adapter->rx_buffer_len = E1000_RXBUFFER_2048;
14337 else
14338 #if (PAGE_SIZE >= E1000_RXBUFFER_16384)
14339@@ -3809,22 +3827,13 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
14340
14341 length = le16_to_cpu(rx_desc->length);
14342 /* !EOP means multiple descriptors were used to store a single
14343- * packet, if thats the case we need to toss it. In fact, we
14344- * to toss every packet with the EOP bit clear and the next
14345- * frame that _does_ have the EOP bit set, as it is by
14346- * definition only a frame fragment
14347- */
14348- if (unlikely(!(status & E1000_RXD_STAT_EOP)))
14349- adapter->discarding = true;
14350-
14351- if (adapter->discarding) {
14352+ * packet, also make sure the frame isn't just CRC only */
14353+ if (unlikely(!(status & E1000_RXD_STAT_EOP) || (length <= 4))) {
14354 /* All receives must fit into a single buffer */
14355 E1000_DBG("%s: Receive packet consumed multiple"
14356 " buffers\n", netdev->name);
14357 /* recycle */
14358 buffer_info->skb = skb;
14359- if (status & E1000_RXD_STAT_EOP)
14360- adapter->discarding = false;
14361 goto next_desc;
14362 }
14363
14364diff --git a/drivers/net/e1000e/e1000.h b/drivers/net/e1000e/e1000.h
14365index 47db9bd..3e187b0 100644
14366--- a/drivers/net/e1000e/e1000.h
14367+++ b/drivers/net/e1000e/e1000.h
14368@@ -417,7 +417,6 @@ struct e1000_info {
14369 /* CRC Stripping defines */
14370 #define FLAG2_CRC_STRIPPING (1 << 0)
14371 #define FLAG2_HAS_PHY_WAKEUP (1 << 1)
14372-#define FLAG2_IS_DISCARDING (1 << 2)
14373
14374 #define E1000_RX_DESC_PS(R, i) \
14375 (&(((union e1000_rx_desc_packet_split *)((R).desc))[i]))
14376diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c
14377index 2154530..fad8f9e 100644
14378--- a/drivers/net/e1000e/netdev.c
14379+++ b/drivers/net/e1000e/netdev.c
14380@@ -482,24 +482,14 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
14381
14382 length = le16_to_cpu(rx_desc->length);
14383
14384- /*
14385- * !EOP means multiple descriptors were used to store a single
14386- * packet, if that's the case we need to toss it. In fact, we
14387- * need to toss every packet with the EOP bit clear and the
14388- * next frame that _does_ have the EOP bit set, as it is by
14389- * definition only a frame fragment
14390- */
14391- if (unlikely(!(status & E1000_RXD_STAT_EOP)))
14392- adapter->flags2 |= FLAG2_IS_DISCARDING;
14393-
14394- if (adapter->flags2 & FLAG2_IS_DISCARDING) {
14395+ /* !EOP means multiple descriptors were used to store a single
14396+ * packet, also make sure the frame isn't just CRC only */
14397+ if (!(status & E1000_RXD_STAT_EOP) || (length <= 4)) {
14398 /* All receives must fit into a single buffer */
14399 e_dbg("%s: Receive packet consumed multiple buffers\n",
14400 netdev->name);
14401 /* recycle */
14402 buffer_info->skb = skb;
14403- if (status & E1000_RXD_STAT_EOP)
14404- adapter->flags2 &= ~FLAG2_IS_DISCARDING;
14405 goto next_desc;
14406 }
14407
14408@@ -757,16 +747,10 @@ static bool e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
14409 PCI_DMA_FROMDEVICE);
14410 buffer_info->dma = 0;
14411
14412- /* see !EOP comment in other rx routine */
14413- if (!(staterr & E1000_RXD_STAT_EOP))
14414- adapter->flags2 |= FLAG2_IS_DISCARDING;
14415-
14416- if (adapter->flags2 & FLAG2_IS_DISCARDING) {
14417+ if (!(staterr & E1000_RXD_STAT_EOP)) {
14418 e_dbg("%s: Packet Split buffers didn't pick up the "
14419 "full packet\n", netdev->name);
14420 dev_kfree_skb_irq(skb);
14421- if (staterr & E1000_RXD_STAT_EOP)
14422- adapter->flags2 &= ~FLAG2_IS_DISCARDING;
14423 goto next_desc;
14424 }
14425
14426@@ -1136,7 +1120,6 @@ static void e1000_clean_rx_ring(struct e1000_adapter *adapter)
14427
14428 rx_ring->next_to_clean = 0;
14429 rx_ring->next_to_use = 0;
14430- adapter->flags2 &= ~FLAG2_IS_DISCARDING;
14431
14432 writel(0, adapter->hw.hw_addr + rx_ring->head);
14433 writel(0, adapter->hw.hw_addr + rx_ring->tail);
14434@@ -2347,6 +2330,18 @@ static void e1000_setup_rctl(struct e1000_adapter *adapter)
14435 rctl &= ~E1000_RCTL_SZ_4096;
14436 rctl |= E1000_RCTL_BSEX;
14437 switch (adapter->rx_buffer_len) {
14438+ case 256:
14439+ rctl |= E1000_RCTL_SZ_256;
14440+ rctl &= ~E1000_RCTL_BSEX;
14441+ break;
14442+ case 512:
14443+ rctl |= E1000_RCTL_SZ_512;
14444+ rctl &= ~E1000_RCTL_BSEX;
14445+ break;
14446+ case 1024:
14447+ rctl |= E1000_RCTL_SZ_1024;
14448+ rctl &= ~E1000_RCTL_BSEX;
14449+ break;
14450 case 2048:
14451 default:
14452 rctl |= E1000_RCTL_SZ_2048;
14453@@ -4326,7 +4321,13 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
14454 * fragmented skbs
14455 */
14456
14457- if (max_frame <= 2048)
14458+ if (max_frame <= 256)
14459+ adapter->rx_buffer_len = 256;
14460+ else if (max_frame <= 512)
14461+ adapter->rx_buffer_len = 512;
14462+ else if (max_frame <= 1024)
14463+ adapter->rx_buffer_len = 1024;
14464+ else if (max_frame <= 2048)
14465 adapter->rx_buffer_len = 2048;
14466 else
14467 adapter->rx_buffer_len = 4096;
14468diff --git a/drivers/net/qlge/qlge_main.c b/drivers/net/qlge/qlge_main.c
14469index 35d896b..a2fc70a 100644
14470--- a/drivers/net/qlge/qlge_main.c
14471+++ b/drivers/net/qlge/qlge_main.c
14472@@ -3310,8 +3310,10 @@ static int ql_adapter_initialize(struct ql_adapter *qdev)
14473
14474 /* Initialize the port and set the max framesize. */
14475 status = qdev->nic_ops->port_initialize(qdev);
14476- if (status)
14477- QPRINTK(qdev, IFUP, ERR, "Failed to start port.\n");
14478+ if (status) {
14479+ QPRINTK(qdev, IFUP, ERR, "Failed to start port.\n");
14480+ return status;
14481+ }
14482
14483 /* Set up the MAC address and frame routing filter. */
14484 status = ql_cam_route_initialize(qdev);
14485@@ -3712,6 +3714,9 @@ static int qlge_set_mac_address(struct net_device *ndev, void *p)
14486 struct sockaddr *addr = p;
14487 int status;
14488
14489+ if (netif_running(ndev))
14490+ return -EBUSY;
14491+
14492 if (!is_valid_ether_addr(addr->sa_data))
14493 return -EADDRNOTAVAIL;
14494 memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len);
14495@@ -3863,7 +3868,8 @@ static int __devinit ql_init_device(struct pci_dev *pdev,
14496 struct net_device *ndev, int cards_found)
14497 {
14498 struct ql_adapter *qdev = netdev_priv(ndev);
14499- int err = 0;
14500+ int pos, err = 0;
14501+ u16 val16;
14502
14503 memset((void *)qdev, 0, sizeof(*qdev));
14504 err = pci_enable_device(pdev);
14505@@ -3875,12 +3881,18 @@ static int __devinit ql_init_device(struct pci_dev *pdev,
14506 qdev->ndev = ndev;
14507 qdev->pdev = pdev;
14508 pci_set_drvdata(pdev, ndev);
14509-
14510- /* Set PCIe read request size */
14511- err = pcie_set_readrq(pdev, 4096);
14512- if (err) {
14513- dev_err(&pdev->dev, "Set readrq failed.\n");
14514- goto err_out;
14515+ pos = pci_find_capability(pdev, PCI_CAP_ID_EXP);
14516+ if (pos <= 0) {
14517+ dev_err(&pdev->dev, PFX "Cannot find PCI Express capability, "
14518+ "aborting.\n");
14519+ return pos;
14520+ } else {
14521+ pci_read_config_word(pdev, pos + PCI_EXP_DEVCTL, &val16);
14522+ val16 &= ~PCI_EXP_DEVCTL_NOSNOOP_EN;
14523+ val16 |= (PCI_EXP_DEVCTL_CERE |
14524+ PCI_EXP_DEVCTL_NFERE |
14525+ PCI_EXP_DEVCTL_FERE | PCI_EXP_DEVCTL_URRE);
14526+ pci_write_config_word(pdev, pos + PCI_EXP_DEVCTL, val16);
14527 }
14528
14529 err = pci_request_regions(pdev, DRV_NAME);
14530diff --git a/drivers/net/qlge/qlge_mpi.c b/drivers/net/qlge/qlge_mpi.c
14531index 32b1e1f..aec05f2 100644
14532--- a/drivers/net/qlge/qlge_mpi.c
14533+++ b/drivers/net/qlge/qlge_mpi.c
14534@@ -446,9 +446,6 @@ static int ql_mpi_handler(struct ql_adapter *qdev, struct mbox_params *mbcp)
14535 ql_aen_lost(qdev, mbcp);
14536 break;
14537
14538- case AEN_DCBX_CHG:
14539- /* Need to support AEN 8110 */
14540- break;
14541 default:
14542 QPRINTK(qdev, DRV, ERR,
14543 "Unsupported AE %.08x.\n", mbcp->mbox_out[0]);
14544diff --git a/drivers/net/sfc/tx.c b/drivers/net/sfc/tx.c
14545index d443ad7..489c4de 100644
14546--- a/drivers/net/sfc/tx.c
14547+++ b/drivers/net/sfc/tx.c
14548@@ -821,6 +821,8 @@ static void efx_enqueue_unwind(struct efx_tx_queue *tx_queue)
14549 tx_queue->efx->type->txd_ring_mask];
14550 efx_tsoh_free(tx_queue, buffer);
14551 EFX_BUG_ON_PARANOID(buffer->skb);
14552+ buffer->len = 0;
14553+ buffer->continuation = true;
14554 if (buffer->unmap_len) {
14555 unmap_addr = (buffer->dma_addr + buffer->len -
14556 buffer->unmap_len);
14557@@ -834,8 +836,6 @@ static void efx_enqueue_unwind(struct efx_tx_queue *tx_queue)
14558 PCI_DMA_TODEVICE);
14559 buffer->unmap_len = 0;
14560 }
14561- buffer->len = 0;
14562- buffer->continuation = true;
14563 }
14564 }
14565
14566diff --git a/drivers/net/sky2.c b/drivers/net/sky2.c
14567index f3600b3..6a10d7b 100644
14568--- a/drivers/net/sky2.c
14569+++ b/drivers/net/sky2.c
14570@@ -1806,8 +1806,7 @@ static void sky2_tx_complete(struct sky2_port *sky2, u16 done)
14571 sky2->tx_cons = idx;
14572 smp_mb();
14573
14574- /* Wake unless it's detached, and called e.g. from sky2_down() */
14575- if (tx_avail(sky2) > MAX_SKB_TX_LE + 4 && netif_device_present(dev))
14576+ if (tx_avail(sky2) > MAX_SKB_TX_LE + 4)
14577 netif_wake_queue(dev);
14578 }
14579
14580diff --git a/drivers/net/starfire.c b/drivers/net/starfire.c
14581index e65ee4d..a36e2b5 100644
14582--- a/drivers/net/starfire.c
14583+++ b/drivers/net/starfire.c
14584@@ -1063,7 +1063,7 @@ static int netdev_open(struct net_device *dev)
14585 if (retval) {
14586 printk(KERN_ERR "starfire: Failed to load firmware \"%s\"\n",
14587 FIRMWARE_RX);
14588- goto out_init;
14589+ return retval;
14590 }
14591 if (fw_rx->size % 4) {
14592 printk(KERN_ERR "starfire: bogus length %zu in \"%s\"\n",
14593@@ -1108,9 +1108,6 @@ out_tx:
14594 release_firmware(fw_tx);
14595 out_rx:
14596 release_firmware(fw_rx);
14597-out_init:
14598- if (retval)
14599- netdev_close(dev);
14600 return retval;
14601 }
14602
14603diff --git a/drivers/net/usb/rtl8150.c b/drivers/net/usb/rtl8150.c
14604index f14d225..b091e20 100644
14605--- a/drivers/net/usb/rtl8150.c
14606+++ b/drivers/net/usb/rtl8150.c
14607@@ -324,7 +324,7 @@ static int rtl8150_set_mac_address(struct net_device *netdev, void *p)
14608 dbg("%02X:", netdev->dev_addr[i]);
14609 dbg("%02X\n", netdev->dev_addr[i]);
14610 /* Set the IDR registers. */
14611- set_registers(dev, IDR, netdev->addr_len, netdev->dev_addr);
14612+ set_registers(dev, IDR, sizeof(netdev->dev_addr), netdev->dev_addr);
14613 #ifdef EEPROM_WRITE
14614 {
14615 u8 cr;
14616diff --git a/drivers/net/wireless/ath/ar9170/usb.c b/drivers/net/wireless/ath/ar9170/usb.c
14617index f141a4f..e974e58 100644
14618--- a/drivers/net/wireless/ath/ar9170/usb.c
14619+++ b/drivers/net/wireless/ath/ar9170/usb.c
14620@@ -68,10 +68,8 @@ static struct usb_device_id ar9170_usb_ids[] = {
14621 { USB_DEVICE(0x0cf3, 0x1002) },
14622 /* Cace Airpcap NX */
14623 { USB_DEVICE(0xcace, 0x0300) },
14624- /* D-Link DWA 160 A1 */
14625+ /* D-Link DWA 160A */
14626 { USB_DEVICE(0x07d1, 0x3c10) },
14627- /* D-Link DWA 160 A2 */
14628- { USB_DEVICE(0x07d1, 0x3a09) },
14629 /* Netgear WNDA3100 */
14630 { USB_DEVICE(0x0846, 0x9010) },
14631 /* Netgear WN111 v2 */
14632diff --git a/drivers/net/wireless/ath/ath5k/base.c b/drivers/net/wireless/ath/ath5k/base.c
14633index 8a82c75..95a8e23 100644
14634--- a/drivers/net/wireless/ath/ath5k/base.c
14635+++ b/drivers/net/wireless/ath/ath5k/base.c
14636@@ -2349,9 +2349,6 @@ ath5k_init(struct ath5k_softc *sc)
14637 */
14638 ath5k_stop_locked(sc);
14639
14640- /* Set PHY calibration interval */
14641- ah->ah_cal_intval = ath5k_calinterval;
14642-
14643 /*
14644 * The basic interface to setting the hardware in a good
14645 * state is ``reset''. On return the hardware is known to
14646@@ -2379,6 +2376,10 @@ ath5k_init(struct ath5k_softc *sc)
14647
14648 /* Set ack to be sent at low bit-rates */
14649 ath5k_hw_set_ack_bitrate_high(ah, false);
14650+
14651+ /* Set PHY calibration inteval */
14652+ ah->ah_cal_intval = ath5k_calinterval;
14653+
14654 ret = 0;
14655 done:
14656 mmiowb();
14657diff --git a/drivers/net/wireless/ath/ath5k/eeprom.c b/drivers/net/wireless/ath/ath5k/eeprom.c
14658index 9a96550..644962a 100644
14659--- a/drivers/net/wireless/ath/ath5k/eeprom.c
14660+++ b/drivers/net/wireless/ath/ath5k/eeprom.c
14661@@ -97,7 +97,6 @@ ath5k_eeprom_init_header(struct ath5k_hw *ah)
14662 struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom;
14663 int ret;
14664 u16 val;
14665- u32 cksum, offset, eep_max = AR5K_EEPROM_INFO_MAX;
14666
14667 /*
14668 * Read values from EEPROM and store them in the capability structure
14669@@ -112,44 +111,20 @@ ath5k_eeprom_init_header(struct ath5k_hw *ah)
14670 if (ah->ah_ee_version < AR5K_EEPROM_VERSION_3_0)
14671 return 0;
14672
14673+#ifdef notyet
14674 /*
14675 * Validate the checksum of the EEPROM date. There are some
14676 * devices with invalid EEPROMs.
14677 */
14678- AR5K_EEPROM_READ(AR5K_EEPROM_SIZE_UPPER, val);
14679- if (val) {
14680- eep_max = (val & AR5K_EEPROM_SIZE_UPPER_MASK) <<
14681- AR5K_EEPROM_SIZE_ENDLOC_SHIFT;
14682- AR5K_EEPROM_READ(AR5K_EEPROM_SIZE_LOWER, val);
14683- eep_max = (eep_max | val) - AR5K_EEPROM_INFO_BASE;
14684-
14685- /*
14686- * Fail safe check to prevent stupid loops due
14687- * to busted EEPROMs. XXX: This value is likely too
14688- * big still, waiting on a better value.
14689- */
14690- if (eep_max > (3 * AR5K_EEPROM_INFO_MAX)) {
14691- ATH5K_ERR(ah->ah_sc, "Invalid max custom EEPROM size: "
14692- "%d (0x%04x) max expected: %d (0x%04x)\n",
14693- eep_max, eep_max,
14694- 3 * AR5K_EEPROM_INFO_MAX,
14695- 3 * AR5K_EEPROM_INFO_MAX);
14696- return -EIO;
14697- }
14698- }
14699-
14700- for (cksum = 0, offset = 0; offset < eep_max; offset++) {
14701+ for (cksum = 0, offset = 0; offset < AR5K_EEPROM_INFO_MAX; offset++) {
14702 AR5K_EEPROM_READ(AR5K_EEPROM_INFO(offset), val);
14703 cksum ^= val;
14704 }
14705 if (cksum != AR5K_EEPROM_INFO_CKSUM) {
14706- ATH5K_ERR(ah->ah_sc, "Invalid EEPROM "
14707- "checksum: 0x%04x eep_max: 0x%04x (%s)\n",
14708- cksum, eep_max,
14709- eep_max == AR5K_EEPROM_INFO_MAX ?
14710- "default size" : "custom size");
14711+ ATH5K_ERR(ah->ah_sc, "Invalid EEPROM checksum 0x%04x\n", cksum);
14712 return -EIO;
14713 }
14714+#endif
14715
14716 AR5K_EEPROM_READ_HDR(AR5K_EEPROM_ANT_GAIN(ah->ah_ee_version),
14717 ee_ant_gain);
14718diff --git a/drivers/net/wireless/ath/ath5k/eeprom.h b/drivers/net/wireless/ath/ath5k/eeprom.h
14719index 473a483..0123f35 100644
14720--- a/drivers/net/wireless/ath/ath5k/eeprom.h
14721+++ b/drivers/net/wireless/ath/ath5k/eeprom.h
14722@@ -37,14 +37,6 @@
14723 #define AR5K_EEPROM_RFKILL_POLARITY_S 1
14724
14725 #define AR5K_EEPROM_REG_DOMAIN 0x00bf /* EEPROM regdom */
14726-
14727-/* FLASH(EEPROM) Defines for AR531X chips */
14728-#define AR5K_EEPROM_SIZE_LOWER 0x1b /* size info -- lower */
14729-#define AR5K_EEPROM_SIZE_UPPER 0x1c /* size info -- upper */
14730-#define AR5K_EEPROM_SIZE_UPPER_MASK 0xfff0
14731-#define AR5K_EEPROM_SIZE_UPPER_SHIFT 4
14732-#define AR5K_EEPROM_SIZE_ENDLOC_SHIFT 12
14733-
14734 #define AR5K_EEPROM_CHECKSUM 0x00c0 /* EEPROM checksum */
14735 #define AR5K_EEPROM_INFO_BASE 0x00c0 /* EEPROM header */
14736 #define AR5K_EEPROM_INFO_MAX (0x400 - AR5K_EEPROM_INFO_BASE)
14737diff --git a/drivers/net/wireless/ath/ath5k/phy.c b/drivers/net/wireless/ath/ath5k/phy.c
14738index 9d67647..1a039f2 100644
14739--- a/drivers/net/wireless/ath/ath5k/phy.c
14740+++ b/drivers/net/wireless/ath/ath5k/phy.c
14741@@ -2954,6 +2954,8 @@ ath5k_hw_txpower(struct ath5k_hw *ah, struct ieee80211_channel *channel,
14742 ATH5K_ERR(ah->ah_sc, "invalid tx power: %u\n", txpower);
14743 return -EINVAL;
14744 }
14745+ if (txpower == 0)
14746+ txpower = AR5K_TUNE_DEFAULT_TXPOWER;
14747
14748 /* Reset TX power values */
14749 memset(&ah->ah_txpower, 0, sizeof(ah->ah_txpower));
14750diff --git a/drivers/net/wireless/ath/ath9k/ath9k.h b/drivers/net/wireless/ath/ath9k/ath9k.h
14751index cdb90c5..1d59f10 100644
14752--- a/drivers/net/wireless/ath/ath9k/ath9k.h
14753+++ b/drivers/net/wireless/ath/ath9k/ath9k.h
14754@@ -139,7 +139,6 @@ struct ath_buf {
14755 dma_addr_t bf_daddr; /* physical addr of desc */
14756 dma_addr_t bf_buf_addr; /* physical addr of data buffer */
14757 bool bf_stale;
14758- bool bf_isnullfunc;
14759 u16 bf_flags;
14760 struct ath_buf_state bf_state;
14761 dma_addr_t bf_dmacontext;
14762@@ -525,8 +524,6 @@ struct ath_led {
14763 #define SC_OP_BEACON_SYNC BIT(19)
14764 #define SC_OP_BTCOEX_ENABLED BIT(20)
14765 #define SC_OP_BT_PRIORITY_DETECTED BIT(21)
14766-#define SC_OP_NULLFUNC_COMPLETED BIT(22)
14767-#define SC_OP_PS_ENABLED BIT(23)
14768
14769 struct ath_bus_ops {
14770 void (*read_cachesize)(struct ath_softc *sc, int *csz);
14771diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c
14772index 0905b38..ca7694c 100644
14773--- a/drivers/net/wireless/ath/ath9k/hw.c
14774+++ b/drivers/net/wireless/ath/ath9k/hw.c
14775@@ -880,11 +880,12 @@ static void ath9k_hw_init_mode_gain_regs(struct ath_hw *ah)
14776 }
14777 }
14778
14779-static void ath9k_hw_init_eeprom_fix(struct ath_hw *ah)
14780+static void ath9k_hw_init_11a_eeprom_fix(struct ath_hw *ah)
14781 {
14782 u32 i, j;
14783
14784- if (ah->hw_version.devid == AR9280_DEVID_PCI) {
14785+ if ((ah->hw_version.devid == AR9280_DEVID_PCI) &&
14786+ test_bit(ATH9K_MODE_11A, ah->caps.wireless_modes)) {
14787
14788 /* EEPROM Fixup */
14789 for (i = 0; i < ah->iniModes.ia_rows; i++) {
14790@@ -936,11 +937,6 @@ int ath9k_hw_init(struct ath_hw *ah)
14791 DPRINTF(ah->ah_sc, ATH_DBG_RESET, "serialize_regmode is %d\n",
14792 ah->config.serialize_regmode);
14793
14794- if (AR_SREV_9285(ah) || AR_SREV_9271(ah))
14795- ah->config.max_txtrig_level = MAX_TX_FIFO_THRESHOLD >> 1;
14796- else
14797- ah->config.max_txtrig_level = MAX_TX_FIFO_THRESHOLD;
14798-
14799 if (!ath9k_hw_macversion_supported(ah->hw_version.macVersion)) {
14800 DPRINTF(ah->ah_sc, ATH_DBG_FATAL,
14801 "Mac Chip Rev 0x%02x.%x is not supported by "
14802@@ -979,7 +975,7 @@ int ath9k_hw_init(struct ath_hw *ah)
14803
14804 ath9k_hw_init_mode_gain_regs(ah);
14805 ath9k_hw_fill_cap_info(ah);
14806- ath9k_hw_init_eeprom_fix(ah);
14807+ ath9k_hw_init_11a_eeprom_fix(ah);
14808
14809 r = ath9k_hw_init_macaddr(ah);
14810 if (r) {
14811@@ -3674,11 +3670,7 @@ void ath9k_hw_fill_cap_info(struct ath_hw *ah)
14812 pCap->keycache_size = AR_KEYTABLE_SIZE;
14813
14814 pCap->hw_caps |= ATH9K_HW_CAP_FASTCC;
14815-
14816- if (AR_SREV_9285(ah) || AR_SREV_9271(ah))
14817- pCap->tx_triglevel_max = MAX_TX_FIFO_THRESHOLD >> 1;
14818- else
14819- pCap->tx_triglevel_max = MAX_TX_FIFO_THRESHOLD;
14820+ pCap->tx_triglevel_max = MAX_TX_FIFO_THRESHOLD;
14821
14822 if (AR_SREV_9285_10_OR_LATER(ah))
14823 pCap->num_gpio_pins = AR9285_NUM_GPIO;
14824diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h
14825index ff4383b..b892345 100644
14826--- a/drivers/net/wireless/ath/ath9k/hw.h
14827+++ b/drivers/net/wireless/ath/ath9k/hw.h
14828@@ -218,7 +218,6 @@ struct ath9k_ops_config {
14829 #define AR_SPUR_FEEQ_BOUND_HT20 10
14830 int spurmode;
14831 u16 spurchans[AR_EEPROM_MODAL_SPURS][2];
14832- u8 max_txtrig_level;
14833 };
14834
14835 enum ath9k_int {
14836@@ -408,7 +407,7 @@ struct ath9k_hw_version {
14837 * Using de Bruijin sequence to to look up 1's index in a 32 bit number
14838 * debruijn32 = 0000 0111 0111 1100 1011 0101 0011 0001
14839 */
14840-#define debruijn32 0x077CB531U
14841+#define debruijn32 0x077CB531UL
14842
14843 struct ath_gen_timer_configuration {
14844 u32 next_addr;
14845diff --git a/drivers/net/wireless/ath/ath9k/mac.c b/drivers/net/wireless/ath/ath9k/mac.c
14846index 110c16d..800bfab 100644
14847--- a/drivers/net/wireless/ath/ath9k/mac.c
14848+++ b/drivers/net/wireless/ath/ath9k/mac.c
14849@@ -70,7 +70,7 @@ bool ath9k_hw_updatetxtriglevel(struct ath_hw *ah, bool bIncTrigLevel)
14850 u32 txcfg, curLevel, newLevel;
14851 enum ath9k_int omask;
14852
14853- if (ah->tx_trig_level >= ah->config.max_txtrig_level)
14854+ if (ah->tx_trig_level >= MAX_TX_FIFO_THRESHOLD)
14855 return false;
14856
14857 omask = ath9k_hw_set_interrupts(ah, ah->mask_reg & ~ATH9K_INT_GLOBAL);
14858@@ -79,7 +79,7 @@ bool ath9k_hw_updatetxtriglevel(struct ath_hw *ah, bool bIncTrigLevel)
14859 curLevel = MS(txcfg, AR_FTRIG);
14860 newLevel = curLevel;
14861 if (bIncTrigLevel) {
14862- if (curLevel < ah->config.max_txtrig_level)
14863+ if (curLevel < MAX_TX_FIFO_THRESHOLD)
14864 newLevel++;
14865 } else if (curLevel > MIN_TX_FIFO_THRESHOLD)
14866 newLevel--;
14867@@ -155,7 +155,7 @@ bool ath9k_hw_stoptxdma(struct ath_hw *ah, u32 q)
14868 wait = wait_time;
14869 while (ath9k_hw_numtxpending(ah, q)) {
14870 if ((--wait) == 0) {
14871- DPRINTF(ah->ah_sc, ATH_DBG_FATAL,
14872+ DPRINTF(ah->ah_sc, ATH_DBG_QUEUE,
14873 "Failed to stop TX DMA in 100 "
14874 "msec after killing last frame\n");
14875 break;
14876@@ -222,8 +222,6 @@ int ath9k_hw_txprocdesc(struct ath_hw *ah, struct ath_desc *ds)
14877 ds->ds_txstat.ts_status = 0;
14878 ds->ds_txstat.ts_flags = 0;
14879
14880- if (ads->ds_txstatus1 & AR_FrmXmitOK)
14881- ds->ds_txstat.ts_status |= ATH9K_TX_ACKED;
14882 if (ads->ds_txstatus1 & AR_ExcessiveRetries)
14883 ds->ds_txstat.ts_status |= ATH9K_TXERR_XRETRY;
14884 if (ads->ds_txstatus1 & AR_Filtered)
14885diff --git a/drivers/net/wireless/ath/ath9k/mac.h b/drivers/net/wireless/ath/ath9k/mac.h
14886index 9720c4d..f56e77d 100644
14887--- a/drivers/net/wireless/ath/ath9k/mac.h
14888+++ b/drivers/net/wireless/ath/ath9k/mac.h
14889@@ -76,10 +76,6 @@
14890 #define ATH9K_TXERR_FIFO 0x04
14891 #define ATH9K_TXERR_XTXOP 0x08
14892 #define ATH9K_TXERR_TIMER_EXPIRED 0x10
14893-#define ATH9K_TX_ACKED 0x20
14894-#define ATH9K_TXERR_MASK \
14895- (ATH9K_TXERR_XRETRY | ATH9K_TXERR_FILT | ATH9K_TXERR_FIFO | \
14896- ATH9K_TXERR_XTXOP | ATH9K_TXERR_TIMER_EXPIRED)
14897
14898 #define ATH9K_TX_BA 0x01
14899 #define ATH9K_TX_PWRMGMT 0x02
14900diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
14901index 5864eaa..43d2be9 100644
14902--- a/drivers/net/wireless/ath/ath9k/main.c
14903+++ b/drivers/net/wireless/ath/ath9k/main.c
14904@@ -2147,9 +2147,6 @@ static void ath9k_stop(struct ieee80211_hw *hw)
14905 return; /* another wiphy still in use */
14906 }
14907
14908- /* Ensure HW is awake when we try to shut it down. */
14909- ath9k_ps_wakeup(sc);
14910-
14911 if (sc->sc_flags & SC_OP_BTCOEX_ENABLED) {
14912 ath9k_hw_btcoex_disable(sc->sc_ah);
14913 if (sc->btcoex_info.btcoex_scheme == ATH_BTCOEX_CFG_3WIRE)
14914@@ -2170,9 +2167,6 @@ static void ath9k_stop(struct ieee80211_hw *hw)
14915 /* disable HAL and put h/w to sleep */
14916 ath9k_hw_disable(sc->sc_ah);
14917 ath9k_hw_configpcipowersave(sc->sc_ah, 1, 1);
14918- ath9k_ps_restore(sc);
14919-
14920- /* Finally, put the chip in FULL SLEEP mode */
14921 ath9k_hw_setpower(sc->sc_ah, ATH9K_PM_FULL_SLEEP);
14922
14923 sc->sc_flags |= SC_OP_INVALID;
14924@@ -2283,12 +2277,10 @@ static void ath9k_remove_interface(struct ieee80211_hw *hw,
14925 if ((sc->sc_ah->opmode == NL80211_IFTYPE_AP) ||
14926 (sc->sc_ah->opmode == NL80211_IFTYPE_ADHOC) ||
14927 (sc->sc_ah->opmode == NL80211_IFTYPE_MESH_POINT)) {
14928- ath9k_ps_wakeup(sc);
14929 ath9k_hw_stoptxdma(sc->sc_ah, sc->beacon.beaconq);
14930- ath9k_ps_restore(sc);
14931+ ath_beacon_return(sc, avp);
14932 }
14933
14934- ath_beacon_return(sc, avp);
14935 sc->sc_flags &= ~SC_OP_BEACONS;
14936
14937 for (i = 0; i < ARRAY_SIZE(sc->beacon.bslot); i++) {
14938@@ -2335,7 +2327,6 @@ static int ath9k_config(struct ieee80211_hw *hw, u32 changed)
14939
14940 if (changed & IEEE80211_CONF_CHANGE_PS) {
14941 if (conf->flags & IEEE80211_CONF_PS) {
14942- sc->sc_flags |= SC_OP_PS_ENABLED;
14943 if (!(ah->caps.hw_caps &
14944 ATH9K_HW_CAP_AUTOSLEEP)) {
14945 if ((sc->imask & ATH9K_INT_TIM_TIMER) == 0) {
14946@@ -2343,17 +2334,11 @@ static int ath9k_config(struct ieee80211_hw *hw, u32 changed)
14947 ath9k_hw_set_interrupts(sc->sc_ah,
14948 sc->imask);
14949 }
14950- }
14951- sc->ps_enabled = true;
14952- if ((sc->sc_flags & SC_OP_NULLFUNC_COMPLETED)) {
14953- sc->sc_flags &= ~SC_OP_NULLFUNC_COMPLETED;
14954- sc->ps_enabled = true;
14955 ath9k_hw_setrxabort(sc->sc_ah, 1);
14956 }
14957+ sc->ps_enabled = true;
14958 } else {
14959 sc->ps_enabled = false;
14960- sc->sc_flags &= ~(SC_OP_PS_ENABLED |
14961- SC_OP_NULLFUNC_COMPLETED);
14962 ath9k_hw_setpower(sc->sc_ah, ATH9K_PM_AWAKE);
14963 if (!(ah->caps.hw_caps &
14964 ATH9K_HW_CAP_AUTOSLEEP)) {
14965@@ -2732,21 +2717,15 @@ static int ath9k_ampdu_action(struct ieee80211_hw *hw,
14966 case IEEE80211_AMPDU_RX_STOP:
14967 break;
14968 case IEEE80211_AMPDU_TX_START:
14969- ath9k_ps_wakeup(sc);
14970 ath_tx_aggr_start(sc, sta, tid, ssn);
14971 ieee80211_start_tx_ba_cb_irqsafe(hw, sta->addr, tid);
14972- ath9k_ps_restore(sc);
14973 break;
14974 case IEEE80211_AMPDU_TX_STOP:
14975- ath9k_ps_wakeup(sc);
14976 ath_tx_aggr_stop(sc, sta, tid);
14977 ieee80211_stop_tx_ba_cb_irqsafe(hw, sta->addr, tid);
14978- ath9k_ps_restore(sc);
14979 break;
14980 case IEEE80211_AMPDU_TX_OPERATIONAL:
14981- ath9k_ps_wakeup(sc);
14982 ath_tx_aggr_resume(sc, sta, tid);
14983- ath9k_ps_restore(sc);
14984 break;
14985 default:
14986 DPRINTF(sc, ATH_DBG_FATAL, "Unknown AMPDU action\n");
14987diff --git a/drivers/net/wireless/ath/ath9k/reg.h b/drivers/net/wireless/ath/ath9k/reg.h
14988index c0d7e65..d83b77f 100644
14989--- a/drivers/net/wireless/ath/ath9k/reg.h
14990+++ b/drivers/net/wireless/ath/ath9k/reg.h
14991@@ -969,10 +969,10 @@ enum {
14992 #define AR_GPIO_INPUT_EN_VAL_BT_ACTIVE_S 4
14993 #define AR_GPIO_INPUT_EN_VAL_RFSILENT_DEF 0x00000080
14994 #define AR_GPIO_INPUT_EN_VAL_RFSILENT_DEF_S 7
14995-#define AR_GPIO_INPUT_EN_VAL_BT_PRIORITY_BB 0x00000400
14996-#define AR_GPIO_INPUT_EN_VAL_BT_PRIORITY_BB_S 10
14997 #define AR_GPIO_INPUT_EN_VAL_BT_ACTIVE_BB 0x00001000
14998 #define AR_GPIO_INPUT_EN_VAL_BT_ACTIVE_BB_S 12
14999+#define AR_GPIO_INPUT_EN_VAL_BT_PRIORITY_BB 0x00001000
15000+#define AR_GPIO_INPUT_EN_VAL_BT_PRIORITY_BB_S 1
15001 #define AR_GPIO_INPUT_EN_VAL_RFSILENT_BB 0x00008000
15002 #define AR_GPIO_INPUT_EN_VAL_RFSILENT_BB_S 15
15003 #define AR_GPIO_RTC_RESET_OVERRIDE_ENABLE 0x00010000
15004diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
15005index 9009bac..42551a4 100644
15006--- a/drivers/net/wireless/ath/ath9k/xmit.c
15007+++ b/drivers/net/wireless/ath/ath9k/xmit.c
15008@@ -1076,10 +1076,10 @@ void ath_drain_all_txq(struct ath_softc *sc, bool retry_tx)
15009 if (npend) {
15010 int r;
15011
15012- DPRINTF(sc, ATH_DBG_FATAL, "Unable to stop TxDMA. Reset HAL!\n");
15013+ DPRINTF(sc, ATH_DBG_XMIT, "Unable to stop TxDMA. Reset HAL!\n");
15014
15015 spin_lock_bh(&sc->sc_resetlock);
15016- r = ath9k_hw_reset(ah, sc->sc_ah->curchan, false);
15017+ r = ath9k_hw_reset(ah, sc->sc_ah->curchan, true);
15018 if (r)
15019 DPRINTF(sc, ATH_DBG_FATAL,
15020 "Unable to reset hardware; reset status %d\n",
15021@@ -1563,7 +1563,7 @@ static int ath_tx_setup_buffer(struct ieee80211_hw *hw, struct ath_buf *bf,
15022
15023 bf->bf_frmlen = skb->len + FCS_LEN - (hdrlen & 3);
15024
15025- if (conf_is_ht(&sc->hw->conf))
15026+ if (conf_is_ht(&sc->hw->conf) && !is_pae(skb))
15027 bf->bf_state.bf_type |= BUF_HT;
15028
15029 bf->bf_flags = setup_tx_flags(sc, skb, txctl->txq);
15030@@ -1592,13 +1592,6 @@ static int ath_tx_setup_buffer(struct ieee80211_hw *hw, struct ath_buf *bf,
15031 }
15032
15033 bf->bf_buf_addr = bf->bf_dmacontext;
15034-
15035- if (ieee80211_is_nullfunc(fc) && ieee80211_has_pm(fc)) {
15036- bf->bf_isnullfunc = true;
15037- sc->sc_flags &= ~SC_OP_NULLFUNC_COMPLETED;
15038- } else
15039- bf->bf_isnullfunc = false;
15040-
15041 return 0;
15042 }
15043
15044@@ -1648,7 +1641,7 @@ static void ath_tx_start_dma(struct ath_softc *sc, struct ath_buf *bf,
15045 goto tx_done;
15046 }
15047
15048- if ((tx_info->flags & IEEE80211_TX_CTL_AMPDU) && !is_pae(skb)) {
15049+ if (tx_info->flags & IEEE80211_TX_CTL_AMPDU) {
15050 /*
15051 * Try aggregation if it's a unicast data frame
15052 * and the destination is HT capable.
15053@@ -1996,15 +1989,6 @@ static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
15054 if (ds == txq->axq_gatingds)
15055 txq->axq_gatingds = NULL;
15056
15057- if (bf->bf_isnullfunc &&
15058- (ds->ds_txstat.ts_status & ATH9K_TX_ACKED)) {
15059- if ((sc->sc_flags & SC_OP_PS_ENABLED)) {
15060- sc->ps_enabled = true;
15061- ath9k_hw_setrxabort(sc->sc_ah, 1);
15062- } else
15063- sc->sc_flags |= SC_OP_NULLFUNC_COMPLETED;
15064- }
15065-
15066 /*
15067 * Remove ath_buf's of the same transmit unit from txq,
15068 * however leave the last descriptor back as the holding
15069@@ -2020,7 +2004,7 @@ static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
15070 if (bf_isaggr(bf))
15071 txq->axq_aggr_depth--;
15072
15073- txok = !(ds->ds_txstat.ts_status & ATH9K_TXERR_MASK);
15074+ txok = (ds->ds_txstat.ts_status == 0);
15075 txq->axq_tx_inprogress = false;
15076 spin_unlock_bh(&txq->axq_lock);
15077
15078@@ -2081,9 +2065,7 @@ static void ath_tx_complete_poll_work(struct work_struct *work)
15079
15080 if (needreset) {
15081 DPRINTF(sc, ATH_DBG_RESET, "tx hung, resetting the chip\n");
15082- ath9k_ps_wakeup(sc);
15083 ath_reset(sc, false);
15084- ath9k_ps_restore(sc);
15085 }
15086
15087 ieee80211_queue_delayed_work(sc->hw, &sc->tx_complete_work,
15088diff --git a/drivers/net/wireless/b43/b43.h b/drivers/net/wireless/b43/b43.h
15089index 0e6b154..6607162 100644
15090--- a/drivers/net/wireless/b43/b43.h
15091+++ b/drivers/net/wireless/b43/b43.h
15092@@ -117,7 +117,6 @@
15093 #define B43_MMIO_TSF_2 0x636 /* core rev < 3 only */
15094 #define B43_MMIO_TSF_3 0x638 /* core rev < 3 only */
15095 #define B43_MMIO_RNG 0x65A
15096-#define B43_MMIO_IFSSLOT 0x684 /* Interframe slot time */
15097 #define B43_MMIO_IFSCTL 0x688 /* Interframe space control */
15098 #define B43_MMIO_IFSCTL_USE_EDCF 0x0004
15099 #define B43_MMIO_POWERUP_DELAY 0x6A8
15100diff --git a/drivers/net/wireless/b43/main.c b/drivers/net/wireless/b43/main.c
15101index 9ca253e..098dda1 100644
15102--- a/drivers/net/wireless/b43/main.c
15103+++ b/drivers/net/wireless/b43/main.c
15104@@ -628,17 +628,10 @@ static void b43_upload_card_macaddress(struct b43_wldev *dev)
15105 static void b43_set_slot_time(struct b43_wldev *dev, u16 slot_time)
15106 {
15107 /* slot_time is in usec. */
15108- /* This test used to exit for all but a G PHY. */
15109- if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ)
15110+ if (dev->phy.type != B43_PHYTYPE_G)
15111 return;
15112- b43_write16(dev, B43_MMIO_IFSSLOT, 510 + slot_time);
15113- /* Shared memory location 0x0010 is the slot time and should be
15114- * set to slot_time; however, this register is initially 0 and changing
15115- * the value adversely affects the transmit rate for BCM4311
15116- * devices. Until this behavior is unterstood, delete this step
15117- *
15118- * b43_shm_write16(dev, B43_SHM_SHARED, 0x0010, slot_time);
15119- */
15120+ b43_write16(dev, 0x684, 510 + slot_time);
15121+ b43_shm_write16(dev, B43_SHM_SHARED, 0x0010, slot_time);
15122 }
15123
15124 static void b43_short_slot_timing_enable(struct b43_wldev *dev)
15125diff --git a/drivers/net/wireless/b43/rfkill.c b/drivers/net/wireless/b43/rfkill.c
15126index 78016ae..ffdce6f 100644
15127--- a/drivers/net/wireless/b43/rfkill.c
15128+++ b/drivers/net/wireless/b43/rfkill.c
15129@@ -33,14 +33,8 @@ bool b43_is_hw_radio_enabled(struct b43_wldev *dev)
15130 & B43_MMIO_RADIO_HWENABLED_HI_MASK))
15131 return 1;
15132 } else {
15133- /* To prevent CPU fault on PPC, do not read a register
15134- * unless the interface is started; however, on resume
15135- * for hibernation, this routine is entered early. When
15136- * that happens, unconditionally return TRUE.
15137- */
15138- if (b43_status(dev) < B43_STAT_STARTED)
15139- return 1;
15140- if (b43_read16(dev, B43_MMIO_RADIO_HWENABLED_LO)
15141+ if (b43_status(dev) >= B43_STAT_STARTED &&
15142+ b43_read16(dev, B43_MMIO_RADIO_HWENABLED_LO)
15143 & B43_MMIO_RADIO_HWENABLED_LO_MASK)
15144 return 1;
15145 }
15146diff --git a/drivers/net/wireless/b43legacy/rfkill.c b/drivers/net/wireless/b43legacy/rfkill.c
15147index d579df7..8783022 100644
15148--- a/drivers/net/wireless/b43legacy/rfkill.c
15149+++ b/drivers/net/wireless/b43legacy/rfkill.c
15150@@ -34,13 +34,6 @@ bool b43legacy_is_hw_radio_enabled(struct b43legacy_wldev *dev)
15151 & B43legacy_MMIO_RADIO_HWENABLED_HI_MASK))
15152 return 1;
15153 } else {
15154- /* To prevent CPU fault on PPC, do not read a register
15155- * unless the interface is started; however, on resume
15156- * for hibernation, this routine is entered early. When
15157- * that happens, unconditionally return TRUE.
15158- */
15159- if (b43legacy_status(dev) < B43legacy_STAT_STARTED)
15160- return 1;
15161 if (b43legacy_read16(dev, B43legacy_MMIO_RADIO_HWENABLED_LO)
15162 & B43legacy_MMIO_RADIO_HWENABLED_LO_MASK)
15163 return 1;
15164diff --git a/drivers/net/wireless/ipw2x00/ipw2100.c b/drivers/net/wireless/ipw2x00/ipw2100.c
15165index 43102bf..6e2fc0c 100644
15166--- a/drivers/net/wireless/ipw2x00/ipw2100.c
15167+++ b/drivers/net/wireless/ipw2x00/ipw2100.c
15168@@ -6487,16 +6487,6 @@ static int ipw2100_resume(struct pci_dev *pci_dev)
15169 }
15170 #endif
15171
15172-static void ipw2100_shutdown(struct pci_dev *pci_dev)
15173-{
15174- struct ipw2100_priv *priv = pci_get_drvdata(pci_dev);
15175-
15176- /* Take down the device; powers it off, etc. */
15177- ipw2100_down(priv);
15178-
15179- pci_disable_device(pci_dev);
15180-}
15181-
15182 #define IPW2100_DEV_ID(x) { PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, x }
15183
15184 static struct pci_device_id ipw2100_pci_id_table[] __devinitdata = {
15185@@ -6560,7 +6550,6 @@ static struct pci_driver ipw2100_pci_driver = {
15186 .suspend = ipw2100_suspend,
15187 .resume = ipw2100_resume,
15188 #endif
15189- .shutdown = ipw2100_shutdown,
15190 };
15191
15192 /**
15193diff --git a/drivers/net/wireless/iwlwifi/iwl-3945.c b/drivers/net/wireless/iwlwifi/iwl-3945.c
15194index 9d60f6c..f059b49 100644
15195--- a/drivers/net/wireless/iwlwifi/iwl-3945.c
15196+++ b/drivers/net/wireless/iwlwifi/iwl-3945.c
15197@@ -2895,7 +2895,6 @@ static struct iwl_cfg iwl3945_bg_cfg = {
15198 .mod_params = &iwl3945_mod_params,
15199 .use_isr_legacy = true,
15200 .ht_greenfield_support = false,
15201- .broken_powersave = true,
15202 };
15203
15204 static struct iwl_cfg iwl3945_abg_cfg = {
15205@@ -2910,7 +2909,6 @@ static struct iwl_cfg iwl3945_abg_cfg = {
15206 .mod_params = &iwl3945_mod_params,
15207 .use_isr_legacy = true,
15208 .ht_greenfield_support = false,
15209- .broken_powersave = true,
15210 };
15211
15212 struct pci_device_id iwl3945_hw_card_ids[] = {
15213diff --git a/drivers/net/wireless/iwlwifi/iwl-4965.c b/drivers/net/wireless/iwlwifi/iwl-4965.c
15214index 99331ed..6f703a0 100644
15215--- a/drivers/net/wireless/iwlwifi/iwl-4965.c
15216+++ b/drivers/net/wireless/iwlwifi/iwl-4965.c
15217@@ -1337,7 +1337,7 @@ static int iwl4965_fill_txpower_tbl(struct iwl_priv *priv, u8 band, u16 channel,
15218 iwl4965_interpolate_chan(priv, channel, &ch_eeprom_info);
15219
15220 /* calculate tx gain adjustment based on power supply voltage */
15221- voltage = le16_to_cpu(priv->calib_info->voltage);
15222+ voltage = priv->calib_info->voltage;
15223 init_voltage = (s32)le32_to_cpu(priv->card_alive_init.voltage);
15224 voltage_compensation =
15225 iwl4965_get_voltage_compensation(voltage, init_voltage);
15226@@ -2087,7 +2087,7 @@ static void iwl4965_rx_reply_tx(struct iwl_priv *priv,
15227 struct ieee80211_tx_info *info;
15228 struct iwl4965_tx_resp *tx_resp = (void *)&pkt->u.raw[0];
15229 u32 status = le32_to_cpu(tx_resp->u.status);
15230- int tid = MAX_TID_COUNT - 1;
15231+ int tid = MAX_TID_COUNT;
15232 int sta_id;
15233 int freed;
15234 u8 *qc = NULL;
15235diff --git a/drivers/net/wireless/iwlwifi/iwl-5000-hw.h b/drivers/net/wireless/iwlwifi/iwl-5000-hw.h
15236index bc056e9..4ef6804 100644
15237--- a/drivers/net/wireless/iwlwifi/iwl-5000-hw.h
15238+++ b/drivers/net/wireless/iwlwifi/iwl-5000-hw.h
15239@@ -92,15 +92,11 @@
15240
15241 static inline s32 iwl_temp_calib_to_offset(struct iwl_priv *priv)
15242 {
15243- u16 temperature, voltage;
15244- __le16 *temp_calib =
15245- (__le16 *)iwl_eeprom_query_addr(priv, EEPROM_5000_TEMPERATURE);
15246-
15247- temperature = le16_to_cpu(temp_calib[0]);
15248- voltage = le16_to_cpu(temp_calib[1]);
15249-
15250- /* offset = temp - volt / coeff */
15251- return (s32)(temperature - voltage / IWL_5150_VOLTAGE_TO_TEMPERATURE_COEFF);
15252+ u16 *temp_calib = (u16 *)iwl_eeprom_query_addr(priv,
15253+ EEPROM_5000_TEMPERATURE);
15254+ /* offset = temperature - voltage / coef */
15255+ s32 offset = (s32)(temp_calib[0] - temp_calib[1] / IWL_5150_VOLTAGE_TO_TEMPERATURE_COEFF);
15256+ return offset;
15257 }
15258
15259 /* Fixed (non-configurable) rx data from phy */
15260diff --git a/drivers/net/wireless/iwlwifi/iwl-5000.c b/drivers/net/wireless/iwlwifi/iwl-5000.c
15261index 133df70..6e6f516 100644
15262--- a/drivers/net/wireless/iwlwifi/iwl-5000.c
15263+++ b/drivers/net/wireless/iwlwifi/iwl-5000.c
15264@@ -460,15 +460,14 @@ static void iwl5000_set_ct_threshold(struct iwl_priv *priv)
15265 static int iwl5000_set_Xtal_calib(struct iwl_priv *priv)
15266 {
15267 struct iwl_calib_xtal_freq_cmd cmd;
15268- __le16 *xtal_calib =
15269- (__le16 *)iwl_eeprom_query_addr(priv, EEPROM_5000_XTAL);
15270+ u16 *xtal_calib = (u16 *)iwl_eeprom_query_addr(priv, EEPROM_5000_XTAL);
15271
15272 cmd.hdr.op_code = IWL_PHY_CALIBRATE_CRYSTAL_FRQ_CMD;
15273 cmd.hdr.first_group = 0;
15274 cmd.hdr.groups_num = 1;
15275 cmd.hdr.data_valid = 1;
15276- cmd.cap_pin1 = le16_to_cpu(xtal_calib[0]);
15277- cmd.cap_pin2 = le16_to_cpu(xtal_calib[1]);
15278+ cmd.cap_pin1 = (u8)xtal_calib[0];
15279+ cmd.cap_pin2 = (u8)xtal_calib[1];
15280 return iwl_calib_set(&priv->calib_results[IWL_CALIB_XTAL],
15281 (u8 *)&cmd, sizeof(cmd));
15282 }
15283@@ -1666,7 +1665,6 @@ struct iwl_cfg iwl5300_agn_cfg = {
15284 .valid_rx_ant = ANT_ABC,
15285 .need_pll_cfg = true,
15286 .ht_greenfield_support = true,
15287- .use_rts_for_ht = true, /* use rts/cts protection */
15288 };
15289
15290 struct iwl_cfg iwl5100_bg_cfg = {
15291@@ -1718,7 +1716,6 @@ struct iwl_cfg iwl5100_agn_cfg = {
15292 .valid_rx_ant = ANT_AB,
15293 .need_pll_cfg = true,
15294 .ht_greenfield_support = true,
15295- .use_rts_for_ht = true, /* use rts/cts protection */
15296 };
15297
15298 struct iwl_cfg iwl5350_agn_cfg = {
15299@@ -1736,7 +1733,6 @@ struct iwl_cfg iwl5350_agn_cfg = {
15300 .valid_rx_ant = ANT_ABC,
15301 .need_pll_cfg = true,
15302 .ht_greenfield_support = true,
15303- .use_rts_for_ht = true, /* use rts/cts protection */
15304 };
15305
15306 struct iwl_cfg iwl5150_agn_cfg = {
15307@@ -1754,7 +1750,6 @@ struct iwl_cfg iwl5150_agn_cfg = {
15308 .valid_rx_ant = ANT_AB,
15309 .need_pll_cfg = true,
15310 .ht_greenfield_support = true,
15311- .use_rts_for_ht = true, /* use rts/cts protection */
15312 };
15313
15314 MODULE_FIRMWARE(IWL5000_MODULE_FIRMWARE(IWL5000_UCODE_API_MAX));
15315diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-rs.c b/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
15316index 0eb2591..81726ee 100644
15317--- a/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
15318+++ b/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
15319@@ -2808,7 +2808,7 @@ static void rs_fill_link_cmd(struct iwl_priv *priv,
15320 repeat_rate--;
15321 }
15322
15323- lq_cmd->agg_params.agg_frame_cnt_limit = LINK_QUAL_AGG_FRAME_LIMIT_DEF;
15324+ lq_cmd->agg_params.agg_frame_cnt_limit = LINK_QUAL_AGG_FRAME_LIMIT_MAX;
15325 lq_cmd->agg_params.agg_dis_start_th = LINK_QUAL_AGG_DISABLE_START_DEF;
15326 lq_cmd->agg_params.agg_time_limit =
15327 cpu_to_le16(LINK_QUAL_AGG_TIME_LIMIT_DEF);
15328diff --git a/drivers/net/wireless/iwlwifi/iwl-core.c b/drivers/net/wireless/iwlwifi/iwl-core.c
15329index 0cd4ec4..2dc9287 100644
15330--- a/drivers/net/wireless/iwlwifi/iwl-core.c
15331+++ b/drivers/net/wireless/iwlwifi/iwl-core.c
15332@@ -2645,7 +2645,6 @@ int iwl_mac_config(struct ieee80211_hw *hw, u32 changed)
15333 if ((le16_to_cpu(priv->staging_rxon.channel) != ch))
15334 priv->staging_rxon.flags = 0;
15335
15336- iwl_set_rxon_ht(priv, ht_conf);
15337 iwl_set_rxon_channel(priv, conf->channel);
15338
15339 iwl_set_flags_for_band(priv, conf->channel->band);
15340diff --git a/drivers/net/wireless/iwlwifi/iwl-dev.h b/drivers/net/wireless/iwlwifi/iwl-dev.h
15341index cea2ee2..028d505 100644
15342--- a/drivers/net/wireless/iwlwifi/iwl-dev.h
15343+++ b/drivers/net/wireless/iwlwifi/iwl-dev.h
15344@@ -703,7 +703,7 @@ extern void iwl_txq_ctx_stop(struct iwl_priv *priv);
15345 extern int iwl_queue_space(const struct iwl_queue *q);
15346 static inline int iwl_queue_used(const struct iwl_queue *q, int i)
15347 {
15348- return q->write_ptr >= q->read_ptr ?
15349+ return q->write_ptr > q->read_ptr ?
15350 (i >= q->read_ptr && i < q->write_ptr) :
15351 !(i < q->read_ptr && i >= q->write_ptr);
15352 }
15353@@ -1149,7 +1149,7 @@ struct iwl_priv {
15354 u32 last_beacon_time;
15355 u64 last_tsf;
15356
15357- /* eeprom -- this is in the card's little endian byte order */
15358+ /* eeprom */
15359 u8 *eeprom;
15360 int nvm_device_type;
15361 struct iwl_eeprom_calib_info *calib_info;
15362diff --git a/drivers/net/wireless/iwlwifi/iwl-eeprom.c b/drivers/net/wireless/iwlwifi/iwl-eeprom.c
15363index 18dc3a4..e14c995 100644
15364--- a/drivers/net/wireless/iwlwifi/iwl-eeprom.c
15365+++ b/drivers/net/wireless/iwlwifi/iwl-eeprom.c
15366@@ -337,7 +337,7 @@ static int iwl_init_otp_access(struct iwl_priv *priv)
15367 return ret;
15368 }
15369
15370-static int iwl_read_otp_word(struct iwl_priv *priv, u16 addr, __le16 *eeprom_data)
15371+static int iwl_read_otp_word(struct iwl_priv *priv, u16 addr, u16 *eeprom_data)
15372 {
15373 int ret = 0;
15374 u32 r;
15375@@ -370,7 +370,7 @@ static int iwl_read_otp_word(struct iwl_priv *priv, u16 addr, __le16 *eeprom_dat
15376 CSR_OTP_GP_REG_ECC_CORR_STATUS_MSK);
15377 IWL_ERR(priv, "Correctable OTP ECC error, continue read\n");
15378 }
15379- *eeprom_data = cpu_to_le16(r >> 16);
15380+ *eeprom_data = le16_to_cpu((__force __le16)(r >> 16));
15381 return 0;
15382 }
15383
15384@@ -379,8 +379,7 @@ static int iwl_read_otp_word(struct iwl_priv *priv, u16 addr, __le16 *eeprom_dat
15385 */
15386 static bool iwl_is_otp_empty(struct iwl_priv *priv)
15387 {
15388- u16 next_link_addr = 0;
15389- __le16 link_value;
15390+ u16 next_link_addr = 0, link_value;
15391 bool is_empty = false;
15392
15393 /* locate the beginning of OTP link list */
15394@@ -410,8 +409,7 @@ static bool iwl_is_otp_empty(struct iwl_priv *priv)
15395 static int iwl_find_otp_image(struct iwl_priv *priv,
15396 u16 *validblockaddr)
15397 {
15398- u16 next_link_addr = 0, valid_addr;
15399- __le16 link_value = 0;
15400+ u16 next_link_addr = 0, link_value = 0, valid_addr;
15401 int usedblocks = 0;
15402
15403 /* set addressing mode to absolute to traverse the link list */
15404@@ -431,7 +429,7 @@ static int iwl_find_otp_image(struct iwl_priv *priv,
15405 * check for more block on the link list
15406 */
15407 valid_addr = next_link_addr;
15408- next_link_addr = le16_to_cpu(link_value) * sizeof(u16);
15409+ next_link_addr = link_value * sizeof(u16);
15410 IWL_DEBUG_INFO(priv, "OTP blocks %d addr 0x%x\n",
15411 usedblocks, next_link_addr);
15412 if (iwl_read_otp_word(priv, next_link_addr, &link_value))
15413@@ -465,7 +463,7 @@ static int iwl_find_otp_image(struct iwl_priv *priv,
15414 */
15415 int iwl_eeprom_init(struct iwl_priv *priv)
15416 {
15417- __le16 *e;
15418+ u16 *e;
15419 u32 gp = iwl_read32(priv, CSR_EEPROM_GP);
15420 int sz;
15421 int ret;
15422@@ -484,7 +482,7 @@ int iwl_eeprom_init(struct iwl_priv *priv)
15423 ret = -ENOMEM;
15424 goto alloc_err;
15425 }
15426- e = (__le16 *)priv->eeprom;
15427+ e = (u16 *)priv->eeprom;
15428
15429 ret = priv->cfg->ops->lib->eeprom_ops.verify_signature(priv);
15430 if (ret < 0) {
15431@@ -523,7 +521,7 @@ int iwl_eeprom_init(struct iwl_priv *priv)
15432 }
15433 for (addr = validblockaddr; addr < validblockaddr + sz;
15434 addr += sizeof(u16)) {
15435- __le16 eeprom_data;
15436+ u16 eeprom_data;
15437
15438 ret = iwl_read_otp_word(priv, addr, &eeprom_data);
15439 if (ret)
15440@@ -547,7 +545,7 @@ int iwl_eeprom_init(struct iwl_priv *priv)
15441 goto done;
15442 }
15443 r = _iwl_read_direct32(priv, CSR_EEPROM_REG);
15444- e[addr / 2] = cpu_to_le16(r >> 16);
15445+ e[addr / 2] = le16_to_cpu((__force __le16)(r >> 16));
15446 }
15447 }
15448 ret = 0;
15449@@ -711,8 +709,7 @@ static int iwl_mod_ht40_chan_info(struct iwl_priv *priv,
15450 ch_info->ht40_min_power = 0;
15451 ch_info->ht40_scan_power = eeprom_ch->max_power_avg;
15452 ch_info->ht40_flags = eeprom_ch->flags;
15453- if (eeprom_ch->flags & EEPROM_CHANNEL_VALID)
15454- ch_info->ht40_extension_channel &= ~clear_ht40_extension_channel;
15455+ ch_info->ht40_extension_channel &= ~clear_ht40_extension_channel;
15456
15457 return 0;
15458 }
15459diff --git a/drivers/net/wireless/iwlwifi/iwl-eeprom.h b/drivers/net/wireless/iwlwifi/iwl-eeprom.h
15460index fc93f12..80b9e45 100644
15461--- a/drivers/net/wireless/iwlwifi/iwl-eeprom.h
15462+++ b/drivers/net/wireless/iwlwifi/iwl-eeprom.h
15463@@ -133,7 +133,7 @@ struct iwl_eeprom_channel {
15464 *
15465 */
15466 struct iwl_eeprom_enhanced_txpwr {
15467- __le16 common;
15468+ u16 reserved;
15469 s8 chain_a_max;
15470 s8 chain_b_max;
15471 s8 chain_c_max;
15472@@ -347,7 +347,7 @@ struct iwl_eeprom_calib_subband_info {
15473 struct iwl_eeprom_calib_info {
15474 u8 saturation_power24; /* half-dBm (e.g. "34" = 17 dBm) */
15475 u8 saturation_power52; /* half-dBm */
15476- __le16 voltage; /* signed */
15477+ s16 voltage; /* signed */
15478 struct iwl_eeprom_calib_subband_info
15479 band_info[EEPROM_TX_POWER_BANDS];
15480 } __attribute__ ((packed));
15481diff --git a/drivers/net/wireless/iwlwifi/iwl3945-base.c b/drivers/net/wireless/iwlwifi/iwl3945-base.c
15482index 5f26c93..d00a803 100644
15483--- a/drivers/net/wireless/iwlwifi/iwl3945-base.c
15484+++ b/drivers/net/wireless/iwlwifi/iwl3945-base.c
15485@@ -562,9 +562,6 @@ static int iwl3945_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
15486 txq = &priv->txq[txq_id];
15487 q = &txq->q;
15488
15489- if ((iwl_queue_space(q) < q->high_mark))
15490- goto drop;
15491-
15492 spin_lock_irqsave(&priv->lock, flags);
15493
15494 idx = get_cmd_index(q, q->write_ptr, 0);
15495@@ -3857,11 +3854,9 @@ static int iwl3945_setup_mac(struct iwl_priv *priv)
15496 /* Tell mac80211 our characteristics */
15497 hw->flags = IEEE80211_HW_SIGNAL_DBM |
15498 IEEE80211_HW_NOISE_DBM |
15499- IEEE80211_HW_SPECTRUM_MGMT;
15500-
15501- if (!priv->cfg->broken_powersave)
15502- hw->flags |= IEEE80211_HW_SUPPORTS_PS |
15503- IEEE80211_HW_SUPPORTS_DYNAMIC_PS;
15504+ IEEE80211_HW_SPECTRUM_MGMT |
15505+ IEEE80211_HW_SUPPORTS_PS |
15506+ IEEE80211_HW_SUPPORTS_DYNAMIC_PS;
15507
15508 hw->wiphy->interface_modes =
15509 BIT(NL80211_IFTYPE_STATION) |
15510diff --git a/drivers/net/wireless/iwmc3200wifi/iwm.h b/drivers/net/wireless/iwmc3200wifi/iwm.h
15511index 93c8989..1b02a4e 100644
15512--- a/drivers/net/wireless/iwmc3200wifi/iwm.h
15513+++ b/drivers/net/wireless/iwmc3200wifi/iwm.h
15514@@ -258,7 +258,7 @@ struct iwm_priv {
15515
15516 struct sk_buff_head rx_list;
15517 struct list_head rx_tickets;
15518- struct list_head rx_packets[IWM_RX_ID_HASH + 1];
15519+ struct list_head rx_packets[IWM_RX_ID_HASH];
15520 struct workqueue_struct *rx_wq;
15521 struct work_struct rx_worker;
15522
15523diff --git a/drivers/net/wireless/libertas/scan.c b/drivers/net/wireless/libertas/scan.c
15524index 06d66a1..6c95af3 100644
15525--- a/drivers/net/wireless/libertas/scan.c
15526+++ b/drivers/net/wireless/libertas/scan.c
15527@@ -399,8 +399,11 @@ int lbs_scan_networks(struct lbs_private *priv, int full_scan)
15528 chan_count = lbs_scan_create_channel_list(priv, chan_list);
15529
15530 netif_stop_queue(priv->dev);
15531- if (priv->mesh_dev)
15532+ netif_carrier_off(priv->dev);
15533+ if (priv->mesh_dev) {
15534 netif_stop_queue(priv->mesh_dev);
15535+ netif_carrier_off(priv->mesh_dev);
15536+ }
15537
15538 /* Prepare to continue an interrupted scan */
15539 lbs_deb_scan("chan_count %d, scan_channel %d\n",
15540@@ -464,13 +467,16 @@ out2:
15541 priv->scan_channel = 0;
15542
15543 out:
15544- if (priv->connect_status == LBS_CONNECTED && !priv->tx_pending_len)
15545- netif_wake_queue(priv->dev);
15546-
15547- if (priv->mesh_dev && (priv->mesh_connect_status == LBS_CONNECTED) &&
15548- !priv->tx_pending_len)
15549- netif_wake_queue(priv->mesh_dev);
15550-
15551+ if (priv->connect_status == LBS_CONNECTED) {
15552+ netif_carrier_on(priv->dev);
15553+ if (!priv->tx_pending_len)
15554+ netif_wake_queue(priv->dev);
15555+ }
15556+ if (priv->mesh_dev && (priv->mesh_connect_status == LBS_CONNECTED)) {
15557+ netif_carrier_on(priv->mesh_dev);
15558+ if (!priv->tx_pending_len)
15559+ netif_wake_queue(priv->mesh_dev);
15560+ }
15561 kfree(chan_list);
15562
15563 lbs_deb_leave_args(LBS_DEB_SCAN, "ret %d", ret);
15564diff --git a/drivers/net/wireless/libertas/wext.c b/drivers/net/wireless/libertas/wext.c
15565index 01c738b..be837a0 100644
15566--- a/drivers/net/wireless/libertas/wext.c
15567+++ b/drivers/net/wireless/libertas/wext.c
15568@@ -1953,8 +1953,10 @@ static int lbs_get_essid(struct net_device *dev, struct iw_request_info *info,
15569 if (priv->connect_status == LBS_CONNECTED) {
15570 memcpy(extra, priv->curbssparams.ssid,
15571 priv->curbssparams.ssid_len);
15572+ extra[priv->curbssparams.ssid_len] = '\0';
15573 } else {
15574 memset(extra, 0, 32);
15575+ extra[priv->curbssparams.ssid_len] = '\0';
15576 }
15577 /*
15578 * If none, we may want to get the one that was set
15579diff --git a/drivers/net/wireless/orinoco/wext.c b/drivers/net/wireless/orinoco/wext.c
15580index 31ca241..7698fdd 100644
15581--- a/drivers/net/wireless/orinoco/wext.c
15582+++ b/drivers/net/wireless/orinoco/wext.c
15583@@ -23,7 +23,7 @@
15584 #define MAX_RID_LEN 1024
15585
15586 /* Helper routine to record keys
15587- * It is called under orinoco_lock so it may not sleep */
15588+ * Do not call from interrupt context */
15589 static int orinoco_set_key(struct orinoco_private *priv, int index,
15590 enum orinoco_alg alg, const u8 *key, int key_len,
15591 const u8 *seq, int seq_len)
15592@@ -32,14 +32,14 @@ static int orinoco_set_key(struct orinoco_private *priv, int index,
15593 kzfree(priv->keys[index].seq);
15594
15595 if (key_len) {
15596- priv->keys[index].key = kzalloc(key_len, GFP_ATOMIC);
15597+ priv->keys[index].key = kzalloc(key_len, GFP_KERNEL);
15598 if (!priv->keys[index].key)
15599 goto nomem;
15600 } else
15601 priv->keys[index].key = NULL;
15602
15603 if (seq_len) {
15604- priv->keys[index].seq = kzalloc(seq_len, GFP_ATOMIC);
15605+ priv->keys[index].seq = kzalloc(seq_len, GFP_KERNEL);
15606 if (!priv->keys[index].seq)
15607 goto free_key;
15608 } else
15609diff --git a/drivers/net/wireless/rt2x00/rt61pci.c b/drivers/net/wireless/rt2x00/rt61pci.c
15610index 9a6ceb4..b20e3ea 100644
15611--- a/drivers/net/wireless/rt2x00/rt61pci.c
15612+++ b/drivers/net/wireless/rt2x00/rt61pci.c
15613@@ -2538,11 +2538,6 @@ static int rt61pci_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
15614 unsigned int i;
15615
15616 /*
15617- * Disable powersaving as default.
15618- */
15619- rt2x00dev->hw->wiphy->ps_default = false;
15620-
15621- /*
15622 * Initialize all hw fields.
15623 */
15624 rt2x00dev->hw->flags =
15625diff --git a/drivers/net/wireless/rtl818x/rtl8187.h b/drivers/net/wireless/rtl818x/rtl8187.h
15626index 99406bf..bf9175a 100644
15627--- a/drivers/net/wireless/rtl818x/rtl8187.h
15628+++ b/drivers/net/wireless/rtl818x/rtl8187.h
15629@@ -23,7 +23,6 @@
15630 #define RTL8187_EEPROM_TXPWR_CHAN_1 0x16 /* 3 channels */
15631 #define RTL8187_EEPROM_TXPWR_CHAN_6 0x1B /* 2 channels */
15632 #define RTL8187_EEPROM_TXPWR_CHAN_4 0x3D /* 2 channels */
15633-#define RTL8187_EEPROM_SELECT_GPIO 0x3B
15634
15635 #define RTL8187_REQT_READ 0xC0
15636 #define RTL8187_REQT_WRITE 0x40
15637@@ -32,9 +31,6 @@
15638
15639 #define RTL8187_MAX_RX 0x9C4
15640
15641-#define RFKILL_MASK_8187_89_97 0x2
15642-#define RFKILL_MASK_8198 0x4
15643-
15644 struct rtl8187_rx_info {
15645 struct urb *urb;
15646 struct ieee80211_hw *dev;
15647@@ -127,7 +123,6 @@ struct rtl8187_priv {
15648 u8 noise;
15649 u8 slot_time;
15650 u8 aifsn[4];
15651- u8 rfkill_mask;
15652 struct {
15653 __le64 buf;
15654 struct sk_buff_head queue;
15655diff --git a/drivers/net/wireless/rtl818x/rtl8187_dev.c b/drivers/net/wireless/rtl818x/rtl8187_dev.c
15656index 9921147..2017ccc 100644
15657--- a/drivers/net/wireless/rtl818x/rtl8187_dev.c
15658+++ b/drivers/net/wireless/rtl818x/rtl8187_dev.c
15659@@ -65,7 +65,6 @@ static struct usb_device_id rtl8187_table[] __devinitdata = {
15660 /* Sitecom */
15661 {USB_DEVICE(0x0df6, 0x000d), .driver_info = DEVICE_RTL8187},
15662 {USB_DEVICE(0x0df6, 0x0028), .driver_info = DEVICE_RTL8187B},
15663- {USB_DEVICE(0x0df6, 0x0029), .driver_info = DEVICE_RTL8187B},
15664 /* Sphairon Access Systems GmbH */
15665 {USB_DEVICE(0x114B, 0x0150), .driver_info = DEVICE_RTL8187},
15666 /* Dick Smith Electronics */
15667@@ -1330,7 +1329,6 @@ static int __devinit rtl8187_probe(struct usb_interface *intf,
15668 struct ieee80211_channel *channel;
15669 const char *chip_name;
15670 u16 txpwr, reg;
15671- u16 product_id = le16_to_cpu(udev->descriptor.idProduct);
15672 int err, i;
15673
15674 dev = ieee80211_alloc_hw(sizeof(*priv), &rtl8187_ops);
15675@@ -1490,13 +1488,6 @@ static int __devinit rtl8187_probe(struct usb_interface *intf,
15676 (*channel++).hw_value = txpwr & 0xFF;
15677 (*channel++).hw_value = txpwr >> 8;
15678 }
15679- /* Handle the differing rfkill GPIO bit in different models */
15680- priv->rfkill_mask = RFKILL_MASK_8187_89_97;
15681- if (product_id == 0x8197 || product_id == 0x8198) {
15682- eeprom_93cx6_read(&eeprom, RTL8187_EEPROM_SELECT_GPIO, &reg);
15683- if (reg & 0xFF00)
15684- priv->rfkill_mask = RFKILL_MASK_8198;
15685- }
15686
15687 /*
15688 * XXX: Once this driver supports anything that requires
15689@@ -1525,9 +1516,9 @@ static int __devinit rtl8187_probe(struct usb_interface *intf,
15690 mutex_init(&priv->conf_mutex);
15691 skb_queue_head_init(&priv->b_tx_status.queue);
15692
15693- printk(KERN_INFO "%s: hwaddr %pM, %s V%d + %s, rfkill mask %d\n",
15694+ printk(KERN_INFO "%s: hwaddr %pM, %s V%d + %s\n",
15695 wiphy_name(dev->wiphy), dev->wiphy->perm_addr,
15696- chip_name, priv->asic_rev, priv->rf->name, priv->rfkill_mask);
15697+ chip_name, priv->asic_rev, priv->rf->name);
15698
15699 #ifdef CONFIG_RTL8187_LEDS
15700 eeprom_93cx6_read(&eeprom, 0x3F, &reg);
15701diff --git a/drivers/net/wireless/rtl818x/rtl8187_rfkill.c b/drivers/net/wireless/rtl818x/rtl8187_rfkill.c
15702index 03555e1..cad8037 100644
15703--- a/drivers/net/wireless/rtl818x/rtl8187_rfkill.c
15704+++ b/drivers/net/wireless/rtl818x/rtl8187_rfkill.c
15705@@ -25,10 +25,10 @@ static bool rtl8187_is_radio_enabled(struct rtl8187_priv *priv)
15706 u8 gpio;
15707
15708 gpio = rtl818x_ioread8(priv, &priv->map->GPIO0);
15709- rtl818x_iowrite8(priv, &priv->map->GPIO0, gpio & ~priv->rfkill_mask);
15710+ rtl818x_iowrite8(priv, &priv->map->GPIO0, gpio & ~0x02);
15711 gpio = rtl818x_ioread8(priv, &priv->map->GPIO1);
15712
15713- return gpio & priv->rfkill_mask;
15714+ return gpio & 0x02;
15715 }
15716
15717 void rtl8187_rfkill_init(struct ieee80211_hw *hw)
15718diff --git a/drivers/pci/dmar.c b/drivers/pci/dmar.c
15719index 5753036..b952ebc 100644
15720--- a/drivers/pci/dmar.c
15721+++ b/drivers/pci/dmar.c
15722@@ -582,8 +582,6 @@ int __init dmar_table_init(void)
15723 return 0;
15724 }
15725
15726-static int bios_warned;
15727-
15728 int __init check_zero_address(void)
15729 {
15730 struct acpi_table_dmar *dmar;
15731@@ -603,9 +601,6 @@ int __init check_zero_address(void)
15732 }
15733
15734 if (entry_header->type == ACPI_DMAR_TYPE_HARDWARE_UNIT) {
15735- void __iomem *addr;
15736- u64 cap, ecap;
15737-
15738 drhd = (void *)entry_header;
15739 if (!drhd->address) {
15740 /* Promote an attitude of violence to a BIOS engineer today */
15741@@ -614,40 +609,17 @@ int __init check_zero_address(void)
15742 dmi_get_system_info(DMI_BIOS_VENDOR),
15743 dmi_get_system_info(DMI_BIOS_VERSION),
15744 dmi_get_system_info(DMI_PRODUCT_VERSION));
15745- bios_warned = 1;
15746- goto failed;
15747- }
15748-
15749- addr = early_ioremap(drhd->address, VTD_PAGE_SIZE);
15750- if (!addr ) {
15751- printk("IOMMU: can't validate: %llx\n", drhd->address);
15752- goto failed;
15753- }
15754- cap = dmar_readq(addr + DMAR_CAP_REG);
15755- ecap = dmar_readq(addr + DMAR_ECAP_REG);
15756- early_iounmap(addr, VTD_PAGE_SIZE);
15757- if (cap == (uint64_t)-1 && ecap == (uint64_t)-1) {
15758- /* Promote an attitude of violence to a BIOS engineer today */
15759- WARN(1, "Your BIOS is broken; DMAR reported at address %llx returns all ones!\n"
15760- "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
15761- drhd->address,
15762- dmi_get_system_info(DMI_BIOS_VENDOR),
15763- dmi_get_system_info(DMI_BIOS_VERSION),
15764- dmi_get_system_info(DMI_PRODUCT_VERSION));
15765- bios_warned = 1;
15766- goto failed;
15767+#ifdef CONFIG_DMAR
15768+ dmar_disabled = 1;
15769+#endif
15770+ return 0;
15771 }
15772+ break;
15773 }
15774
15775 entry_header = ((void *)entry_header + entry_header->length);
15776 }
15777 return 1;
15778-
15779-failed:
15780-#ifdef CONFIG_DMAR
15781- dmar_disabled = 1;
15782-#endif
15783- return 0;
15784 }
15785
15786 void __init detect_intel_iommu(void)
15787@@ -692,18 +664,6 @@ int alloc_iommu(struct dmar_drhd_unit *drhd)
15788 int agaw = 0;
15789 int msagaw = 0;
15790
15791- if (!drhd->reg_base_addr) {
15792- if (!bios_warned) {
15793- WARN(1, "Your BIOS is broken; DMAR reported at address zero!\n"
15794- "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
15795- dmi_get_system_info(DMI_BIOS_VENDOR),
15796- dmi_get_system_info(DMI_BIOS_VERSION),
15797- dmi_get_system_info(DMI_PRODUCT_VERSION));
15798- bios_warned = 1;
15799- }
15800- return -EINVAL;
15801- }
15802-
15803 iommu = kzalloc(sizeof(*iommu), GFP_KERNEL);
15804 if (!iommu)
15805 return -ENOMEM;
15806@@ -720,16 +680,13 @@ int alloc_iommu(struct dmar_drhd_unit *drhd)
15807 iommu->ecap = dmar_readq(iommu->reg + DMAR_ECAP_REG);
15808
15809 if (iommu->cap == (uint64_t)-1 && iommu->ecap == (uint64_t)-1) {
15810- if (!bios_warned) {
15811- /* Promote an attitude of violence to a BIOS engineer today */
15812- WARN(1, "Your BIOS is broken; DMAR reported at address %llx returns all ones!\n"
15813- "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
15814- drhd->reg_base_addr,
15815- dmi_get_system_info(DMI_BIOS_VENDOR),
15816- dmi_get_system_info(DMI_BIOS_VERSION),
15817- dmi_get_system_info(DMI_PRODUCT_VERSION));
15818- bios_warned = 1;
15819- }
15820+ /* Promote an attitude of violence to a BIOS engineer today */
15821+ WARN(1, "Your BIOS is broken; DMAR reported at address %llx returns all ones!\n"
15822+ "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
15823+ drhd->reg_base_addr,
15824+ dmi_get_system_info(DMI_BIOS_VENDOR),
15825+ dmi_get_system_info(DMI_BIOS_VERSION),
15826+ dmi_get_system_info(DMI_PRODUCT_VERSION));
15827 goto err_unmap;
15828 }
15829
15830diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c
15831index 2498602..1840a05 100644
15832--- a/drivers/pci/intel-iommu.c
15833+++ b/drivers/pci/intel-iommu.c
15834@@ -1523,15 +1523,12 @@ static int domain_context_mapping_one(struct dmar_domain *domain, int segment,
15835
15836 /* Skip top levels of page tables for
15837 * iommu which has less agaw than default.
15838- * Unnecessary for PT mode.
15839 */
15840- if (translation != CONTEXT_TT_PASS_THROUGH) {
15841- for (agaw = domain->agaw; agaw != iommu->agaw; agaw--) {
15842- pgd = phys_to_virt(dma_pte_addr(pgd));
15843- if (!dma_pte_present(pgd)) {
15844- spin_unlock_irqrestore(&iommu->lock, flags);
15845- return -ENOMEM;
15846- }
15847+ for (agaw = domain->agaw; agaw != iommu->agaw; agaw--) {
15848+ pgd = phys_to_virt(dma_pte_addr(pgd));
15849+ if (!dma_pte_present(pgd)) {
15850+ spin_unlock_irqrestore(&iommu->lock, flags);
15851+ return -ENOMEM;
15852 }
15853 }
15854 }
15855@@ -1994,16 +1991,6 @@ static int iommu_prepare_identity_map(struct pci_dev *pdev,
15856 "IOMMU: Setting identity map for device %s [0x%Lx - 0x%Lx]\n",
15857 pci_name(pdev), start, end);
15858
15859- if (end < start) {
15860- WARN(1, "Your BIOS is broken; RMRR ends before it starts!\n"
15861- "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
15862- dmi_get_system_info(DMI_BIOS_VENDOR),
15863- dmi_get_system_info(DMI_BIOS_VERSION),
15864- dmi_get_system_info(DMI_PRODUCT_VERSION));
15865- ret = -EIO;
15866- goto error;
15867- }
15868-
15869 if (end >> agaw_to_width(domain->agaw)) {
15870 WARN(1, "Your BIOS is broken; RMRR exceeds permitted address width (%d bits)\n"
15871 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
15872@@ -3241,9 +3228,6 @@ static int device_notifier(struct notifier_block *nb,
15873 struct pci_dev *pdev = to_pci_dev(dev);
15874 struct dmar_domain *domain;
15875
15876- if (iommu_no_mapping(dev))
15877- return 0;
15878-
15879 domain = find_domain(pdev);
15880 if (!domain)
15881 return 0;
15882diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
15883index 6477722..4e4c295 100644
15884--- a/drivers/pci/pci.c
15885+++ b/drivers/pci/pci.c
15886@@ -2723,11 +2723,6 @@ int __attribute__ ((weak)) pci_ext_cfg_avail(struct pci_dev *dev)
15887 return 1;
15888 }
15889
15890-void __weak pci_fixup_cardbus(struct pci_bus *bus)
15891-{
15892-}
15893-EXPORT_SYMBOL(pci_fixup_cardbus);
15894-
15895 static int __init pci_setup(char *str)
15896 {
15897 while (str) {
15898diff --git a/drivers/pci/pcie/aer/aer_inject.c b/drivers/pci/pcie/aer/aer_inject.c
15899index 0d91a8a..62d15f6 100644
15900--- a/drivers/pci/pcie/aer/aer_inject.c
15901+++ b/drivers/pci/pcie/aer/aer_inject.c
15902@@ -392,14 +392,8 @@ static int aer_inject(struct aer_error_inj *einj)
15903 if (ret)
15904 goto out_put;
15905
15906- if (find_aer_device(rpdev, &edev)) {
15907- if (!get_service_data(edev)) {
15908- printk(KERN_WARNING "AER service is not initialized\n");
15909- ret = -EINVAL;
15910- goto out_put;
15911- }
15912+ if (find_aer_device(rpdev, &edev))
15913 aer_irq(-1, edev);
15914- }
15915 else
15916 ret = -EINVAL;
15917 out_put:
15918diff --git a/drivers/pci/setup-bus.c b/drivers/pci/setup-bus.c
15919index dd58c6a..cb1a027 100644
15920--- a/drivers/pci/setup-bus.c
15921+++ b/drivers/pci/setup-bus.c
15922@@ -142,6 +142,7 @@ static void pci_setup_bridge(struct pci_bus *bus)
15923 struct pci_dev *bridge = bus->self;
15924 struct pci_bus_region region;
15925 u32 l, bu, lu, io_upper16;
15926+ int pref_mem64;
15927
15928 if (pci_is_enabled(bridge))
15929 return;
15930@@ -197,6 +198,7 @@ static void pci_setup_bridge(struct pci_bus *bus)
15931 pci_write_config_dword(bridge, PCI_PREF_LIMIT_UPPER32, 0);
15932
15933 /* Set up PREF base/limit. */
15934+ pref_mem64 = 0;
15935 bu = lu = 0;
15936 pcibios_resource_to_bus(bridge, &region, bus->resource[2]);
15937 if (bus->resource[2]->flags & IORESOURCE_PREFETCH) {
15938@@ -204,6 +206,7 @@ static void pci_setup_bridge(struct pci_bus *bus)
15939 l = (region.start >> 16) & 0xfff0;
15940 l |= region.end & 0xfff00000;
15941 if (bus->resource[2]->flags & IORESOURCE_MEM_64) {
15942+ pref_mem64 = 1;
15943 bu = upper_32_bits(region.start);
15944 lu = upper_32_bits(region.end);
15945 width = 16;
15946@@ -218,9 +221,11 @@ static void pci_setup_bridge(struct pci_bus *bus)
15947 }
15948 pci_write_config_dword(bridge, PCI_PREF_MEMORY_BASE, l);
15949
15950- /* Set the upper 32 bits of PREF base & limit. */
15951- pci_write_config_dword(bridge, PCI_PREF_BASE_UPPER32, bu);
15952- pci_write_config_dword(bridge, PCI_PREF_LIMIT_UPPER32, lu);
15953+ if (pref_mem64) {
15954+ /* Set the upper 32 bits of PREF base & limit. */
15955+ pci_write_config_dword(bridge, PCI_PREF_BASE_UPPER32, bu);
15956+ pci_write_config_dword(bridge, PCI_PREF_LIMIT_UPPER32, lu);
15957+ }
15958
15959 pci_write_config_word(bridge, PCI_BRIDGE_CONTROL, bus->bridge_ctl);
15960 }
15961diff --git a/drivers/pcmcia/cardbus.c b/drivers/pcmcia/cardbus.c
15962index 5c26793..db77e1f 100644
15963--- a/drivers/pcmcia/cardbus.c
15964+++ b/drivers/pcmcia/cardbus.c
15965@@ -214,7 +214,7 @@ int __ref cb_alloc(struct pcmcia_socket * s)
15966 unsigned int max, pass;
15967
15968 s->functions = pci_scan_slot(bus, PCI_DEVFN(0, 0));
15969- pci_fixup_cardbus(bus);
15970+// pcibios_fixup_bus(bus);
15971
15972 max = bus->secondary;
15973 for (pass = 0; pass < 2; pass++)
15974diff --git a/drivers/platform/x86/acerhdf.c b/drivers/platform/x86/acerhdf.c
15975index 4d922e4..ab64522 100644
15976--- a/drivers/platform/x86/acerhdf.c
15977+++ b/drivers/platform/x86/acerhdf.c
15978@@ -52,7 +52,7 @@
15979 */
15980 #undef START_IN_KERNEL_MODE
15981
15982-#define DRV_VER "0.5.20"
15983+#define DRV_VER "0.5.18"
15984
15985 /*
15986 * According to the Atom N270 datasheet,
15987@@ -112,14 +112,12 @@ module_param_string(force_product, force_product, 16, 0);
15988 MODULE_PARM_DESC(force_product, "Force BIOS product and omit BIOS check");
15989
15990 /*
15991- * cmd_off: to switch the fan completely off
15992- * chk_off: to check if the fan is off
15993+ * cmd_off: to switch the fan completely off / to check if the fan is off
15994 * cmd_auto: to set the BIOS in control of the fan. The BIOS regulates then
15995 * the fan speed depending on the temperature
15996 */
15997 struct fancmd {
15998 u8 cmd_off;
15999- u8 chk_off;
16000 u8 cmd_auto;
16001 };
16002
16003@@ -136,41 +134,32 @@ struct bios_settings_t {
16004 /* Register addresses and values for different BIOS versions */
16005 static const struct bios_settings_t bios_tbl[] = {
16006 /* AOA110 */
16007- {"Acer", "AOA110", "v0.3109", 0x55, 0x58, {0x1f, 0x1f, 0x00} },
16008- {"Acer", "AOA110", "v0.3114", 0x55, 0x58, {0x1f, 0x1f, 0x00} },
16009- {"Acer", "AOA110", "v0.3301", 0x55, 0x58, {0xaf, 0xaf, 0x00} },
16010- {"Acer", "AOA110", "v0.3304", 0x55, 0x58, {0xaf, 0xaf, 0x00} },
16011- {"Acer", "AOA110", "v0.3305", 0x55, 0x58, {0xaf, 0xaf, 0x00} },
16012- {"Acer", "AOA110", "v0.3307", 0x55, 0x58, {0xaf, 0xaf, 0x00} },
16013- {"Acer", "AOA110", "v0.3308", 0x55, 0x58, {0x21, 0x21, 0x00} },
16014- {"Acer", "AOA110", "v0.3309", 0x55, 0x58, {0x21, 0x21, 0x00} },
16015- {"Acer", "AOA110", "v0.3310", 0x55, 0x58, {0x21, 0x21, 0x00} },
16016+ {"Acer", "AOA110", "v0.3109", 0x55, 0x58, {0x1f, 0x00} },
16017+ {"Acer", "AOA110", "v0.3114", 0x55, 0x58, {0x1f, 0x00} },
16018+ {"Acer", "AOA110", "v0.3301", 0x55, 0x58, {0xaf, 0x00} },
16019+ {"Acer", "AOA110", "v0.3304", 0x55, 0x58, {0xaf, 0x00} },
16020+ {"Acer", "AOA110", "v0.3305", 0x55, 0x58, {0xaf, 0x00} },
16021+ {"Acer", "AOA110", "v0.3307", 0x55, 0x58, {0xaf, 0x00} },
16022+ {"Acer", "AOA110", "v0.3308", 0x55, 0x58, {0x21, 0x00} },
16023+ {"Acer", "AOA110", "v0.3309", 0x55, 0x58, {0x21, 0x00} },
16024+ {"Acer", "AOA110", "v0.3310", 0x55, 0x58, {0x21, 0x00} },
16025 /* AOA150 */
16026- {"Acer", "AOA150", "v0.3114", 0x55, 0x58, {0x20, 0x20, 0x00} },
16027- {"Acer", "AOA150", "v0.3301", 0x55, 0x58, {0x20, 0x20, 0x00} },
16028- {"Acer", "AOA150", "v0.3304", 0x55, 0x58, {0x20, 0x20, 0x00} },
16029- {"Acer", "AOA150", "v0.3305", 0x55, 0x58, {0x20, 0x20, 0x00} },
16030- {"Acer", "AOA150", "v0.3307", 0x55, 0x58, {0x20, 0x20, 0x00} },
16031- {"Acer", "AOA150", "v0.3308", 0x55, 0x58, {0x20, 0x20, 0x00} },
16032- {"Acer", "AOA150", "v0.3309", 0x55, 0x58, {0x20, 0x20, 0x00} },
16033- {"Acer", "AOA150", "v0.3310", 0x55, 0x58, {0x20, 0x20, 0x00} },
16034- /* Acer 1410 */
16035- {"Acer", "Aspire 1410", "v0.3120", 0x55, 0x58, {0x9e, 0x9e, 0x00} },
16036+ {"Acer", "AOA150", "v0.3114", 0x55, 0x58, {0x20, 0x00} },
16037+ {"Acer", "AOA150", "v0.3301", 0x55, 0x58, {0x20, 0x00} },
16038+ {"Acer", "AOA150", "v0.3304", 0x55, 0x58, {0x20, 0x00} },
16039+ {"Acer", "AOA150", "v0.3305", 0x55, 0x58, {0x20, 0x00} },
16040+ {"Acer", "AOA150", "v0.3307", 0x55, 0x58, {0x20, 0x00} },
16041+ {"Acer", "AOA150", "v0.3308", 0x55, 0x58, {0x20, 0x00} },
16042+ {"Acer", "AOA150", "v0.3309", 0x55, 0x58, {0x20, 0x00} },
16043+ {"Acer", "AOA150", "v0.3310", 0x55, 0x58, {0x20, 0x00} },
16044 /* special BIOS / other */
16045- {"Gateway", "AOA110", "v0.3103", 0x55, 0x58, {0x21, 0x21, 0x00} },
16046- {"Gateway", "AOA150", "v0.3103", 0x55, 0x58, {0x20, 0x20, 0x00} },
16047- {"Gateway ", "LT31 ", "v1.3103 ", 0x55, 0x58,
16048- {0x10, 0x0f, 0x00} },
16049- {"Gateway ", "LT31 ", "v1.3201 ", 0x55, 0x58,
16050- {0x10, 0x0f, 0x00} },
16051- {"Gateway ", "LT31 ", "v1.3302 ", 0x55, 0x58,
16052- {0x10, 0x0f, 0x00} },
16053- {"Packard Bell", "DOA150", "v0.3104", 0x55, 0x58, {0x21, 0x21, 0x00} },
16054- {"Packard Bell", "DOA150", "v0.3105", 0x55, 0x58, {0x20, 0x20, 0x00} },
16055- {"Packard Bell", "AOA110", "v0.3105", 0x55, 0x58, {0x21, 0x21, 0x00} },
16056- {"Packard Bell", "AOA150", "v0.3105", 0x55, 0x58, {0x20, 0x20, 0x00} },
16057+ {"Gateway", "AOA110", "v0.3103", 0x55, 0x58, {0x21, 0x00} },
16058+ {"Gateway", "AOA150", "v0.3103", 0x55, 0x58, {0x20, 0x00} },
16059+ {"Packard Bell", "DOA150", "v0.3104", 0x55, 0x58, {0x21, 0x00} },
16060+ {"Packard Bell", "AOA110", "v0.3105", 0x55, 0x58, {0x21, 0x00} },
16061+ {"Packard Bell", "AOA150", "v0.3105", 0x55, 0x58, {0x20, 0x00} },
16062 /* pewpew-terminator */
16063- {"", "", "", 0, 0, {0, 0, 0} }
16064+ {"", "", "", 0, 0, {0, 0} }
16065 };
16066
16067 static const struct bios_settings_t *bios_cfg __read_mostly;
16068@@ -194,7 +183,7 @@ static int acerhdf_get_fanstate(int *state)
16069 if (ec_read(bios_cfg->fanreg, &fan))
16070 return -EINVAL;
16071
16072- if (fan != bios_cfg->cmd.chk_off)
16073+ if (fan != bios_cfg->cmd.cmd_off)
16074 *state = ACERHDF_FAN_AUTO;
16075 else
16076 *state = ACERHDF_FAN_OFF;
16077@@ -640,10 +629,9 @@ static void __exit acerhdf_exit(void)
16078 MODULE_LICENSE("GPL");
16079 MODULE_AUTHOR("Peter Feuerer");
16080 MODULE_DESCRIPTION("Aspire One temperature and fan driver");
16081-MODULE_ALIAS("dmi:*:*Acer*:pnAOA*:");
16082-MODULE_ALIAS("dmi:*:*Gateway*:pnAOA*:");
16083-MODULE_ALIAS("dmi:*:*Packard Bell*:pnAOA*:");
16084-MODULE_ALIAS("dmi:*:*Packard Bell*:pnDOA*:");
16085+MODULE_ALIAS("dmi:*:*Acer*:*:");
16086+MODULE_ALIAS("dmi:*:*Gateway*:*:");
16087+MODULE_ALIAS("dmi:*:*Packard Bell*:*:");
16088
16089 module_init(acerhdf_init);
16090 module_exit(acerhdf_exit);
16091diff --git a/drivers/platform/x86/asus-laptop.c b/drivers/platform/x86/asus-laptop.c
16092index 767cb61..b39d2bb 100644
16093--- a/drivers/platform/x86/asus-laptop.c
16094+++ b/drivers/platform/x86/asus-laptop.c
16095@@ -221,7 +221,6 @@ static struct asus_hotk *hotk;
16096 */
16097 static const struct acpi_device_id asus_device_ids[] = {
16098 {"ATK0100", 0},
16099- {"ATK0101", 0},
16100 {"", 0},
16101 };
16102 MODULE_DEVICE_TABLE(acpi, asus_device_ids);
16103@@ -294,11 +293,6 @@ struct key_entry {
16104 enum { KE_KEY, KE_END };
16105
16106 static struct key_entry asus_keymap[] = {
16107- {KE_KEY, 0x02, KEY_SCREENLOCK},
16108- {KE_KEY, 0x05, KEY_WLAN},
16109- {KE_KEY, 0x08, BTN_TOUCH},
16110- {KE_KEY, 0x17, KEY_ZOOM},
16111- {KE_KEY, 0x1f, KEY_BATTERY},
16112 {KE_KEY, 0x30, KEY_VOLUMEUP},
16113 {KE_KEY, 0x31, KEY_VOLUMEDOWN},
16114 {KE_KEY, 0x32, KEY_MUTE},
16115@@ -318,8 +312,6 @@ static struct key_entry asus_keymap[] = {
16116 {KE_KEY, 0x5F, KEY_WLAN},
16117 {KE_KEY, 0x60, KEY_SWITCHVIDEOMODE},
16118 {KE_KEY, 0x61, KEY_SWITCHVIDEOMODE},
16119- {KE_KEY, 0x62, KEY_SWITCHVIDEOMODE},
16120- {KE_KEY, 0x63, KEY_SWITCHVIDEOMODE},
16121 {KE_KEY, 0x6B, BTN_TOUCH}, /* Lock Mouse */
16122 {KE_KEY, 0x82, KEY_CAMERA},
16123 {KE_KEY, 0x8A, KEY_PROG1},
16124@@ -1291,8 +1283,8 @@ static int asus_hotk_add(struct acpi_device *device)
16125 hotk->ledd_status = 0xFFF;
16126
16127 /* Set initial values of light sensor and level */
16128- hotk->light_switch = 0; /* Default to light sensor disabled */
16129- hotk->light_level = 5; /* level 5 for sensor sensitivity */
16130+ hotk->light_switch = 1; /* Default to light sensor disabled */
16131+ hotk->light_level = 0; /* level 5 for sensor sensitivity */
16132
16133 if (ls_switch_handle)
16134 set_light_sens_switch(hotk->light_switch);
16135diff --git a/drivers/platform/x86/dell-wmi.c b/drivers/platform/x86/dell-wmi.c
16136index 6dec7cc..0f900cc 100644
16137--- a/drivers/platform/x86/dell-wmi.c
16138+++ b/drivers/platform/x86/dell-wmi.c
16139@@ -158,13 +158,8 @@ static void dell_wmi_notify(u32 value, void *context)
16140 struct acpi_buffer response = { ACPI_ALLOCATE_BUFFER, NULL };
16141 static struct key_entry *key;
16142 union acpi_object *obj;
16143- acpi_status status;
16144
16145- status = wmi_get_event_data(value, &response);
16146- if (status != AE_OK) {
16147- printk(KERN_INFO "dell-wmi: bad event status 0x%x\n", status);
16148- return;
16149- }
16150+ wmi_get_event_data(value, &response);
16151
16152 obj = (union acpi_object *)response.pointer;
16153
16154@@ -185,7 +180,6 @@ static void dell_wmi_notify(u32 value, void *context)
16155 printk(KERN_INFO "dell-wmi: Unknown key %x pressed\n",
16156 buffer[1] & 0xFFFF);
16157 }
16158- kfree(obj);
16159 }
16160
16161 static int __init dell_wmi_input_setup(void)
16162diff --git a/drivers/platform/x86/hp-wmi.c b/drivers/platform/x86/hp-wmi.c
16163index deb53b5..c284217 100644
16164--- a/drivers/platform/x86/hp-wmi.c
16165+++ b/drivers/platform/x86/hp-wmi.c
16166@@ -334,13 +334,8 @@ static void hp_wmi_notify(u32 value, void *context)
16167 struct acpi_buffer response = { ACPI_ALLOCATE_BUFFER, NULL };
16168 static struct key_entry *key;
16169 union acpi_object *obj;
16170- acpi_status status;
16171
16172- status = wmi_get_event_data(value, &response);
16173- if (status != AE_OK) {
16174- printk(KERN_INFO "hp-wmi: bad event status 0x%x\n", status);
16175- return;
16176- }
16177+ wmi_get_event_data(value, &response);
16178
16179 obj = (union acpi_object *)response.pointer;
16180
16181@@ -382,8 +377,6 @@ static void hp_wmi_notify(u32 value, void *context)
16182 eventcode);
16183 } else
16184 printk(KERN_INFO "HP WMI: Unknown response received\n");
16185-
16186- kfree(obj);
16187 }
16188
16189 static int __init hp_wmi_input_setup(void)
16190diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
16191index 1ee734c..a848c7e 100644
16192--- a/drivers/platform/x86/thinkpad_acpi.c
16193+++ b/drivers/platform/x86/thinkpad_acpi.c
16194@@ -3866,6 +3866,15 @@ enum {
16195
16196 #define TPACPI_RFK_BLUETOOTH_SW_NAME "tpacpi_bluetooth_sw"
16197
16198+static void bluetooth_suspend(pm_message_t state)
16199+{
16200+ /* Try to make sure radio will resume powered off */
16201+ if (!acpi_evalf(NULL, NULL, "\\BLTH", "vd",
16202+ TP_ACPI_BLTH_PWR_OFF_ON_RESUME))
16203+ vdbg_printk(TPACPI_DBG_RFKILL,
16204+ "bluetooth power down on resume request failed\n");
16205+}
16206+
16207 static int bluetooth_get_status(void)
16208 {
16209 int status;
16210@@ -3899,9 +3908,10 @@ static int bluetooth_set_status(enum tpacpi_rfkill_state state)
16211 #endif
16212
16213 /* We make sure to keep TP_ACPI_BLUETOOTH_RESUMECTRL off */
16214- status = TP_ACPI_BLUETOOTH_RESUMECTRL;
16215 if (state == TPACPI_RFK_RADIO_ON)
16216- status |= TP_ACPI_BLUETOOTH_RADIOSSW;
16217+ status = TP_ACPI_BLUETOOTH_RADIOSSW;
16218+ else
16219+ status = 0;
16220
16221 if (!acpi_evalf(hkey_handle, NULL, "SBDC", "vd", status))
16222 return -EIO;
16223@@ -4040,6 +4050,7 @@ static struct ibm_struct bluetooth_driver_data = {
16224 .read = bluetooth_read,
16225 .write = bluetooth_write,
16226 .exit = bluetooth_exit,
16227+ .suspend = bluetooth_suspend,
16228 .shutdown = bluetooth_shutdown,
16229 };
16230
16231@@ -4057,6 +4068,15 @@ enum {
16232
16233 #define TPACPI_RFK_WWAN_SW_NAME "tpacpi_wwan_sw"
16234
16235+static void wan_suspend(pm_message_t state)
16236+{
16237+ /* Try to make sure radio will resume powered off */
16238+ if (!acpi_evalf(NULL, NULL, "\\WGSV", "qvd",
16239+ TP_ACPI_WGSV_PWR_OFF_ON_RESUME))
16240+ vdbg_printk(TPACPI_DBG_RFKILL,
16241+ "WWAN power down on resume request failed\n");
16242+}
16243+
16244 static int wan_get_status(void)
16245 {
16246 int status;
16247@@ -4089,10 +4109,11 @@ static int wan_set_status(enum tpacpi_rfkill_state state)
16248 }
16249 #endif
16250
16251- /* We make sure to set TP_ACPI_WANCARD_RESUMECTRL */
16252- status = TP_ACPI_WANCARD_RESUMECTRL;
16253+ /* We make sure to keep TP_ACPI_WANCARD_RESUMECTRL off */
16254 if (state == TPACPI_RFK_RADIO_ON)
16255- status |= TP_ACPI_WANCARD_RADIOSSW;
16256+ status = TP_ACPI_WANCARD_RADIOSSW;
16257+ else
16258+ status = 0;
16259
16260 if (!acpi_evalf(hkey_handle, NULL, "SWAN", "vd", status))
16261 return -EIO;
16262@@ -4230,6 +4251,7 @@ static struct ibm_struct wan_driver_data = {
16263 .read = wan_read,
16264 .write = wan_write,
16265 .exit = wan_exit,
16266+ .suspend = wan_suspend,
16267 .shutdown = wan_shutdown,
16268 };
16269
16270@@ -6101,8 +6123,8 @@ static const struct tpacpi_quirk brightness_quirk_table[] __initconst = {
16271
16272 /* Models with Intel Extreme Graphics 2 */
16273 TPACPI_Q_IBM('1', 'U', TPACPI_BRGHT_Q_NOEC),
16274- TPACPI_Q_IBM('1', 'V', TPACPI_BRGHT_Q_ASK|TPACPI_BRGHT_Q_EC),
16275- TPACPI_Q_IBM('1', 'W', TPACPI_BRGHT_Q_ASK|TPACPI_BRGHT_Q_EC),
16276+ TPACPI_Q_IBM('1', 'V', TPACPI_BRGHT_Q_ASK|TPACPI_BRGHT_Q_NOEC),
16277+ TPACPI_Q_IBM('1', 'W', TPACPI_BRGHT_Q_ASK|TPACPI_BRGHT_Q_NOEC),
16278
16279 /* Models with Intel GMA900 */
16280 TPACPI_Q_IBM('7', '0', TPACPI_BRGHT_Q_NOEC), /* T43, R52 */
16281diff --git a/drivers/platform/x86/wmi.c b/drivers/platform/x86/wmi.c
16282index 87f4c97..177f8d7 100644
16283--- a/drivers/platform/x86/wmi.c
16284+++ b/drivers/platform/x86/wmi.c
16285@@ -510,8 +510,8 @@ EXPORT_SYMBOL_GPL(wmi_remove_notify_handler);
16286 /**
16287 * wmi_get_event_data - Get WMI data associated with an event
16288 *
16289- * @event: Event to find
16290- * @out: Buffer to hold event data. out->pointer should be freed with kfree()
16291+ * @event - Event to find
16292+ * &out - Buffer to hold event data
16293 *
16294 * Returns extra data associated with an event in WMI.
16295 */
16296diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
16297index 1836053..efe568d 100644
16298--- a/drivers/regulator/core.c
16299+++ b/drivers/regulator/core.c
16300@@ -640,7 +640,7 @@ static int suspend_prepare(struct regulator_dev *rdev, suspend_state_t state)
16301 static void print_constraints(struct regulator_dev *rdev)
16302 {
16303 struct regulation_constraints *constraints = rdev->constraints;
16304- char buf[80] = "";
16305+ char buf[80];
16306 int count;
16307
16308 if (rdev->desc->type == REGULATOR_VOLTAGE) {
16309diff --git a/drivers/regulator/wm8350-regulator.c b/drivers/regulator/wm8350-regulator.c
16310index 43ed81e..768bd0e 100644
16311--- a/drivers/regulator/wm8350-regulator.c
16312+++ b/drivers/regulator/wm8350-regulator.c
16313@@ -1504,8 +1504,7 @@ int wm8350_register_led(struct wm8350 *wm8350, int lednum, int dcdc, int isink,
16314 led->isink_init.consumer_supplies = &led->isink_consumer;
16315 led->isink_init.constraints.min_uA = 0;
16316 led->isink_init.constraints.max_uA = pdata->max_uA;
16317- led->isink_init.constraints.valid_ops_mask
16318- = REGULATOR_CHANGE_CURRENT | REGULATOR_CHANGE_STATUS;
16319+ led->isink_init.constraints.valid_ops_mask = REGULATOR_CHANGE_CURRENT;
16320 led->isink_init.constraints.valid_modes_mask = REGULATOR_MODE_NORMAL;
16321 ret = wm8350_register_regulator(wm8350, isink, &led->isink_init);
16322 if (ret != 0) {
16323@@ -1518,7 +1517,6 @@ int wm8350_register_led(struct wm8350 *wm8350, int lednum, int dcdc, int isink,
16324 led->dcdc_init.num_consumer_supplies = 1;
16325 led->dcdc_init.consumer_supplies = &led->dcdc_consumer;
16326 led->dcdc_init.constraints.valid_modes_mask = REGULATOR_MODE_NORMAL;
16327- led->dcdc_init.constraints.valid_ops_mask = REGULATOR_CHANGE_STATUS;
16328 ret = wm8350_register_regulator(wm8350, dcdc, &led->dcdc_init);
16329 if (ret != 0) {
16330 platform_device_put(pdev);
16331diff --git a/drivers/rtc/rtc-cmos.c b/drivers/rtc/rtc-cmos.c
16332index 473e5f2..f7a4701 100644
16333--- a/drivers/rtc/rtc-cmos.c
16334+++ b/drivers/rtc/rtc-cmos.c
16335@@ -1099,9 +1099,9 @@ static int cmos_pnp_resume(struct pnp_dev *pnp)
16336 #define cmos_pnp_resume NULL
16337 #endif
16338
16339-static void cmos_pnp_shutdown(struct pnp_dev *pnp)
16340+static void cmos_pnp_shutdown(struct device *pdev)
16341 {
16342- if (system_state == SYSTEM_POWER_OFF && !cmos_poweroff(&pnp->dev))
16343+ if (system_state == SYSTEM_POWER_OFF && !cmos_poweroff(pdev))
16344 return;
16345
16346 cmos_do_shutdown();
16347@@ -1120,12 +1120,15 @@ static struct pnp_driver cmos_pnp_driver = {
16348 .id_table = rtc_ids,
16349 .probe = cmos_pnp_probe,
16350 .remove = __exit_p(cmos_pnp_remove),
16351- .shutdown = cmos_pnp_shutdown,
16352
16353 /* flag ensures resume() gets called, and stops syslog spam */
16354 .flags = PNP_DRIVER_RES_DO_NOT_CHANGE,
16355 .suspend = cmos_pnp_suspend,
16356 .resume = cmos_pnp_resume,
16357+ .driver = {
16358+ .name = (char *)driver_name,
16359+ .shutdown = cmos_pnp_shutdown,
16360+ }
16361 };
16362
16363 #endif /* CONFIG_PNP */
16364diff --git a/drivers/rtc/rtc-fm3130.c b/drivers/rtc/rtc-fm3130.c
16365index 812c667..3a7be11 100644
16366--- a/drivers/rtc/rtc-fm3130.c
16367+++ b/drivers/rtc/rtc-fm3130.c
16368@@ -376,22 +376,20 @@ static int __devinit fm3130_probe(struct i2c_client *client,
16369 }
16370
16371 /* Disabling calibration mode */
16372- if (fm3130->regs[FM3130_RTC_CONTROL] & FM3130_RTC_CONTROL_BIT_CAL) {
16373+ if (fm3130->regs[FM3130_RTC_CONTROL] & FM3130_RTC_CONTROL_BIT_CAL)
16374 i2c_smbus_write_byte_data(client, FM3130_RTC_CONTROL,
16375 fm3130->regs[FM3130_RTC_CONTROL] &
16376 ~(FM3130_RTC_CONTROL_BIT_CAL));
16377 dev_warn(&client->dev, "Disabling calibration mode!\n");
16378- }
16379
16380 /* Disabling read and write modes */
16381 if (fm3130->regs[FM3130_RTC_CONTROL] & FM3130_RTC_CONTROL_BIT_WRITE ||
16382- fm3130->regs[FM3130_RTC_CONTROL] & FM3130_RTC_CONTROL_BIT_READ) {
16383+ fm3130->regs[FM3130_RTC_CONTROL] & FM3130_RTC_CONTROL_BIT_READ)
16384 i2c_smbus_write_byte_data(client, FM3130_RTC_CONTROL,
16385 fm3130->regs[FM3130_RTC_CONTROL] &
16386 ~(FM3130_RTC_CONTROL_BIT_READ |
16387 FM3130_RTC_CONTROL_BIT_WRITE));
16388 dev_warn(&client->dev, "Disabling READ or WRITE mode!\n");
16389- }
16390
16391 /* oscillator off? turn it on, so clock can tick. */
16392 if (fm3130->regs[FM3130_CAL_CONTROL] & FM3130_CAL_CONTROL_BIT_nOSCEN)
16393diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
16394index d0ef15a..aaccc8e 100644
16395--- a/drivers/s390/block/dasd.c
16396+++ b/drivers/s390/block/dasd.c
16397@@ -994,9 +994,10 @@ static void dasd_handle_killed_request(struct ccw_device *cdev,
16398 return;
16399 cqr = (struct dasd_ccw_req *) intparm;
16400 if (cqr->status != DASD_CQR_IN_IO) {
16401- DBF_EVENT_DEVID(DBF_DEBUG, cdev,
16402- "invalid status in handle_killed_request: "
16403- "%02x", cqr->status);
16404+ DBF_EVENT(DBF_DEBUG,
16405+ "invalid status in handle_killed_request: "
16406+ "bus_id %s, status %02x",
16407+ dev_name(&cdev->dev), cqr->status);
16408 return;
16409 }
16410
16411@@ -1004,8 +1005,8 @@ static void dasd_handle_killed_request(struct ccw_device *cdev,
16412 if (device == NULL ||
16413 device != dasd_device_from_cdev_locked(cdev) ||
16414 strncmp(device->discipline->ebcname, (char *) &cqr->magic, 4)) {
16415- DBF_EVENT_DEVID(DBF_DEBUG, cdev, "%s",
16416- "invalid device in request");
16417+ DBF_DEV_EVENT(DBF_DEBUG, device, "invalid device in request: "
16418+ "bus_id %s", dev_name(&cdev->dev));
16419 return;
16420 }
16421
16422@@ -1044,13 +1045,12 @@ void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm,
16423 case -EIO:
16424 break;
16425 case -ETIMEDOUT:
16426- DBF_EVENT_DEVID(DBF_WARNING, cdev, "%s: "
16427- "request timed out\n", __func__);
16428+ DBF_EVENT(DBF_WARNING, "%s(%s): request timed out\n",
16429+ __func__, dev_name(&cdev->dev));
16430 break;
16431 default:
16432- DBF_EVENT_DEVID(DBF_WARNING, cdev, "%s: "
16433- "unknown error %ld\n", __func__,
16434- PTR_ERR(irb));
16435+ DBF_EVENT(DBF_WARNING, "%s(%s): unknown error %ld\n",
16436+ __func__, dev_name(&cdev->dev), PTR_ERR(irb));
16437 }
16438 dasd_handle_killed_request(cdev, intparm);
16439 return;
16440@@ -1078,8 +1078,8 @@ void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm,
16441 device = (struct dasd_device *) cqr->startdev;
16442 if (!device ||
16443 strncmp(device->discipline->ebcname, (char *) &cqr->magic, 4)) {
16444- DBF_EVENT_DEVID(DBF_DEBUG, cdev, "%s",
16445- "invalid device in request");
16446+ DBF_DEV_EVENT(DBF_DEBUG, device, "invalid device in request: "
16447+ "bus_id %s", dev_name(&cdev->dev));
16448 return;
16449 }
16450
16451@@ -2217,9 +2217,9 @@ int dasd_generic_probe(struct ccw_device *cdev,
16452 }
16453 ret = dasd_add_sysfs_files(cdev);
16454 if (ret) {
16455- DBF_EVENT_DEVID(DBF_WARNING, cdev, "%s",
16456- "dasd_generic_probe: could not add "
16457- "sysfs entries");
16458+ DBF_EVENT(DBF_WARNING,
16459+ "dasd_generic_probe: could not add sysfs entries "
16460+ "for %s\n", dev_name(&cdev->dev));
16461 return ret;
16462 }
16463 cdev->handler = &dasd_int_handler;
16464diff --git a/drivers/s390/block/dasd_diag.c b/drivers/s390/block/dasd_diag.c
16465index 8174ec9..4e49b4a 100644
16466--- a/drivers/s390/block/dasd_diag.c
16467+++ b/drivers/s390/block/dasd_diag.c
16468@@ -145,15 +145,6 @@ dasd_diag_erp(struct dasd_device *device)
16469
16470 mdsk_term_io(device);
16471 rc = mdsk_init_io(device, device->block->bp_block, 0, NULL);
16472- if (rc == 4) {
16473- if (!(device->features & DASD_FEATURE_READONLY)) {
16474- dev_warn(&device->cdev->dev,
16475- "The access mode of a DIAG device changed"
16476- " to read-only");
16477- device->features |= DASD_FEATURE_READONLY;
16478- }
16479- rc = 0;
16480- }
16481 if (rc)
16482 dev_warn(&device->cdev->dev, "DIAG ERP failed with "
16483 "rc=%d\n", rc);
16484@@ -442,20 +433,16 @@ dasd_diag_check_device(struct dasd_device *device)
16485 for (sb = 512; sb < bsize; sb = sb << 1)
16486 block->s2b_shift++;
16487 rc = mdsk_init_io(device, block->bp_block, 0, NULL);
16488- if (rc && (rc != 4)) {
16489+ if (rc) {
16490 dev_warn(&device->cdev->dev, "DIAG initialization "
16491 "failed with rc=%d\n", rc);
16492 rc = -EIO;
16493 } else {
16494- if (rc == 4)
16495- device->features |= DASD_FEATURE_READONLY;
16496 dev_info(&device->cdev->dev,
16497- "New DASD with %ld byte/block, total size %ld KB%s\n",
16498+ "New DASD with %ld byte/block, total size %ld KB\n",
16499 (unsigned long) block->bp_block,
16500 (unsigned long) (block->blocks <<
16501- block->s2b_shift) >> 1,
16502- (rc == 4) ? ", read-only device" : "");
16503- rc = 0;
16504+ block->s2b_shift) >> 1);
16505 }
16506 out_label:
16507 free_page((long) label);
16508diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c
16509index 678bb94..417b97c 100644
16510--- a/drivers/s390/block/dasd_eckd.c
16511+++ b/drivers/s390/block/dasd_eckd.c
16512@@ -88,9 +88,9 @@ dasd_eckd_probe (struct ccw_device *cdev)
16513 /* set ECKD specific ccw-device options */
16514 ret = ccw_device_set_options(cdev, CCWDEV_ALLOW_FORCE);
16515 if (ret) {
16516- DBF_EVENT_DEVID(DBF_WARNING, cdev, "%s",
16517- "dasd_eckd_probe: could not set "
16518- "ccw-device options");
16519+ DBF_EVENT(DBF_WARNING,
16520+ "dasd_eckd_probe: could not set ccw-device options "
16521+ "for %s\n", dev_name(&cdev->dev));
16522 return ret;
16523 }
16524 ret = dasd_generic_probe(cdev, &dasd_eckd_discipline);
16525@@ -885,15 +885,16 @@ static int dasd_eckd_read_conf(struct dasd_device *device)
16526 rc = dasd_eckd_read_conf_lpm(device, &conf_data,
16527 &conf_len, lpm);
16528 if (rc && rc != -EOPNOTSUPP) { /* -EOPNOTSUPP is ok */
16529- DBF_EVENT_DEVID(DBF_WARNING, device->cdev,
16530+ DBF_EVENT(DBF_WARNING,
16531 "Read configuration data returned "
16532- "error %d", rc);
16533+ "error %d for device: %s", rc,
16534+ dev_name(&device->cdev->dev));
16535 return rc;
16536 }
16537 if (conf_data == NULL) {
16538- DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
16539- "No configuration data "
16540- "retrieved");
16541+ DBF_EVENT(DBF_WARNING, "No configuration "
16542+ "data retrieved for device: %s",
16543+ dev_name(&device->cdev->dev));
16544 continue; /* no error */
16545 }
16546 /* save first valid configuration data */
16547@@ -940,8 +941,9 @@ static int dasd_eckd_read_features(struct dasd_device *device)
16548 sizeof(struct dasd_rssd_features)),
16549 device);
16550 if (IS_ERR(cqr)) {
16551- DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s", "Could not "
16552- "allocate initialization request");
16553+ DBF_EVENT(DBF_WARNING, "Could not allocate initialization "
16554+ "request for device: %s",
16555+ dev_name(&device->cdev->dev));
16556 return PTR_ERR(cqr);
16557 }
16558 cqr->startdev = device;
16559@@ -1069,8 +1071,10 @@ static int dasd_eckd_validate_server(struct dasd_device *device)
16560 /* may be requested feature is not available on server,
16561 * therefore just report error and go ahead */
16562 private = (struct dasd_eckd_private *) device->private;
16563- DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "PSF-SSC for SSID %04x "
16564- "returned rc=%d", private->uid.ssid, rc);
16565+ DBF_EVENT(DBF_WARNING, "PSF-SSC on storage subsystem %s.%s.%04x "
16566+ "returned rc=%d for device: %s",
16567+ private->uid.vendor, private->uid.serial,
16568+ private->uid.ssid, rc, dev_name(&device->cdev->dev));
16569 /* RE-Read Configuration Data */
16570 return dasd_eckd_read_conf(device);
16571 }
16572@@ -1119,9 +1123,9 @@ dasd_eckd_check_characteristics(struct dasd_device *device)
16573 if (private->uid.type == UA_BASE_DEVICE) {
16574 block = dasd_alloc_block();
16575 if (IS_ERR(block)) {
16576- DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
16577- "could not allocate dasd "
16578- "block structure");
16579+ DBF_EVENT(DBF_WARNING, "could not allocate dasd "
16580+ "block structure for device: %s",
16581+ dev_name(&device->cdev->dev));
16582 rc = PTR_ERR(block);
16583 goto out_err1;
16584 }
16585@@ -1149,8 +1153,9 @@ dasd_eckd_check_characteristics(struct dasd_device *device)
16586 rc = dasd_generic_read_dev_chars(device, DASD_ECKD_MAGIC,
16587 &private->rdc_data, 64);
16588 if (rc) {
16589- DBF_EVENT_DEVID(DBF_WARNING, device->cdev,
16590- "Read device characteristic failed, rc=%d", rc);
16591+ DBF_EVENT(DBF_WARNING,
16592+ "Read device characteristics failed, rc=%d for "
16593+ "device: %s", rc, dev_name(&device->cdev->dev));
16594 goto out_err3;
16595 }
16596 /* find the vaild cylinder size */
16597@@ -2975,7 +2980,7 @@ static void dasd_eckd_dump_sense_ccw(struct dasd_device *device,
16598 len += sprintf(page + len, KERN_ERR PRINTK_HEADER
16599 " in req: %p CS: 0x%02X DS: 0x%02X CC: 0x%02X RC: %d\n",
16600 req, scsw_cstat(&irb->scsw), scsw_dstat(&irb->scsw),
16601- scsw_cc(&irb->scsw), req ? req->intrc : 0);
16602+ scsw_cc(&irb->scsw), req->intrc);
16603 len += sprintf(page + len, KERN_ERR PRINTK_HEADER
16604 " device %s: Failing CCW: %p\n",
16605 dev_name(&device->cdev->dev),
16606@@ -3248,8 +3253,9 @@ int dasd_eckd_restore_device(struct dasd_device *device)
16607 rc = dasd_generic_read_dev_chars(device, DASD_ECKD_MAGIC,
16608 &temp_rdc_data, 64);
16609 if (rc) {
16610- DBF_EVENT_DEVID(DBF_WARNING, device->cdev,
16611- "Read device characteristic failed, rc=%d", rc);
16612+ DBF_EVENT(DBF_WARNING,
16613+ "Read device characteristics failed, rc=%d for "
16614+ "device: %s", rc, dev_name(&device->cdev->dev));
16615 goto out_err;
16616 }
16617 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
16618diff --git a/drivers/s390/block/dasd_fba.c b/drivers/s390/block/dasd_fba.c
16619index 227b4e9..f245377 100644
16620--- a/drivers/s390/block/dasd_fba.c
16621+++ b/drivers/s390/block/dasd_fba.c
16622@@ -141,8 +141,9 @@ dasd_fba_check_characteristics(struct dasd_device *device)
16623 }
16624 block = dasd_alloc_block();
16625 if (IS_ERR(block)) {
16626- DBF_EVENT_DEVID(DBF_WARNING, cdev, "%s", "could not allocate "
16627- "dasd block structure");
16628+ DBF_EVENT(DBF_WARNING, "could not allocate dasd block "
16629+ "structure for device: %s",
16630+ dev_name(&device->cdev->dev));
16631 device->private = NULL;
16632 kfree(private);
16633 return PTR_ERR(block);
16634@@ -154,8 +155,9 @@ dasd_fba_check_characteristics(struct dasd_device *device)
16635 rc = dasd_generic_read_dev_chars(device, DASD_FBA_MAGIC,
16636 &private->rdc_data, 32);
16637 if (rc) {
16638- DBF_EVENT_DEVID(DBF_WARNING, cdev, "Read device "
16639- "characteristics returned error %d", rc);
16640+ DBF_EVENT(DBF_WARNING, "Read device characteristics returned "
16641+ "error %d for device: %s",
16642+ rc, dev_name(&device->cdev->dev));
16643 device->block = NULL;
16644 dasd_free_block(block);
16645 device->private = NULL;
16646diff --git a/drivers/s390/block/dasd_int.h b/drivers/s390/block/dasd_int.h
16647index b19f309..8afd9fa 100644
16648--- a/drivers/s390/block/dasd_int.h
16649+++ b/drivers/s390/block/dasd_int.h
16650@@ -108,16 +108,6 @@ do { \
16651 d_data); \
16652 } while(0)
16653
16654-#define DBF_EVENT_DEVID(d_level, d_cdev, d_str, d_data...) \
16655-do { \
16656- struct ccw_dev_id __dev_id; \
16657- ccw_device_get_id(d_cdev, &__dev_id); \
16658- debug_sprintf_event(dasd_debug_area, \
16659- d_level, \
16660- "0.%x.%04x " d_str "\n", \
16661- __dev_id.ssid, __dev_id.devno, d_data); \
16662-} while (0)
16663-
16664 #define DBF_EXC(d_level, d_str, d_data...)\
16665 do { \
16666 debug_sprintf_exception(dasd_debug_area, \
16667diff --git a/drivers/s390/block/dasd_ioctl.c b/drivers/s390/block/dasd_ioctl.c
16668index a5354b8..f756a1b 100644
16669--- a/drivers/s390/block/dasd_ioctl.c
16670+++ b/drivers/s390/block/dasd_ioctl.c
16671@@ -260,7 +260,7 @@ static int dasd_ioctl_information(struct dasd_block *block,
16672 struct ccw_dev_id dev_id;
16673
16674 base = block->base;
16675- if (!base->discipline || !base->discipline->fill_info)
16676+ if (!base->discipline->fill_info)
16677 return -EINVAL;
16678
16679 dasd_info = kzalloc(sizeof(struct dasd_information2_t), GFP_KERNEL);
16680@@ -303,7 +303,10 @@ static int dasd_ioctl_information(struct dasd_block *block,
16681 dasd_info->features |=
16682 ((base->features & DASD_FEATURE_READONLY) != 0);
16683
16684- memcpy(dasd_info->type, base->discipline->name, 4);
16685+ if (base->discipline)
16686+ memcpy(dasd_info->type, base->discipline->name, 4);
16687+ else
16688+ memcpy(dasd_info->type, "none", 4);
16689
16690 if (block->request_queue->request_fn) {
16691 struct list_head *l;
16692diff --git a/drivers/s390/block/dasd_proc.c b/drivers/s390/block/dasd_proc.c
16693index f9d7d38..654daa3 100644
16694--- a/drivers/s390/block/dasd_proc.c
16695+++ b/drivers/s390/block/dasd_proc.c
16696@@ -71,7 +71,7 @@ dasd_devices_show(struct seq_file *m, void *v)
16697 /* Print device number. */
16698 seq_printf(m, "%s", dev_name(&device->cdev->dev));
16699 /* Print discipline string. */
16700- if (device->discipline != NULL)
16701+ if (device != NULL && device->discipline != NULL)
16702 seq_printf(m, "(%s)", device->discipline->name);
16703 else
16704 seq_printf(m, "(none)");
16705@@ -91,7 +91,10 @@ dasd_devices_show(struct seq_file *m, void *v)
16706 substr = (device->features & DASD_FEATURE_READONLY) ? "(ro)" : " ";
16707 seq_printf(m, "%4s: ", substr);
16708 /* Print device status information. */
16709- switch (device->state) {
16710+ switch ((device != NULL) ? device->state : -1) {
16711+ case -1:
16712+ seq_printf(m, "unknown");
16713+ break;
16714 case DASD_STATE_NEW:
16715 seq_printf(m, "new");
16716 break;
16717diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c
16718index 55f9973..2490b74 100644
16719--- a/drivers/s390/cio/device.c
16720+++ b/drivers/s390/cio/device.c
16721@@ -1292,7 +1292,7 @@ static int io_subchannel_probe(struct subchannel *sch)
16722 sch->private = kzalloc(sizeof(struct io_subchannel_private),
16723 GFP_KERNEL | GFP_DMA);
16724 if (!sch->private)
16725- goto out_schedule;
16726+ goto out_err;
16727 /*
16728 * First check if a fitting device may be found amongst the
16729 * disconnected devices or in the orphanage.
16730@@ -1317,7 +1317,7 @@ static int io_subchannel_probe(struct subchannel *sch)
16731 }
16732 cdev = io_subchannel_create_ccwdev(sch);
16733 if (IS_ERR(cdev))
16734- goto out_schedule;
16735+ goto out_err;
16736 rc = io_subchannel_recog(cdev, sch);
16737 if (rc) {
16738 spin_lock_irqsave(sch->lock, flags);
16739@@ -1325,7 +1325,9 @@ static int io_subchannel_probe(struct subchannel *sch)
16740 spin_unlock_irqrestore(sch->lock, flags);
16741 }
16742 return 0;
16743-
16744+out_err:
16745+ kfree(sch->private);
16746+ sysfs_remove_group(&sch->dev.kobj, &io_subchannel_attr_group);
16747 out_schedule:
16748 io_subchannel_schedule_removal(sch);
16749 return 0;
16750@@ -1339,14 +1341,13 @@ io_subchannel_remove (struct subchannel *sch)
16751
16752 cdev = sch_get_cdev(sch);
16753 if (!cdev)
16754- goto out_free;
16755+ return 0;
16756 /* Set ccw device to not operational and drop reference. */
16757 spin_lock_irqsave(cdev->ccwlock, flags);
16758 sch_set_cdev(sch, NULL);
16759 cdev->private->state = DEV_STATE_NOT_OPER;
16760 spin_unlock_irqrestore(cdev->ccwlock, flags);
16761 ccw_device_unregister(cdev);
16762-out_free:
16763 kfree(sch->private);
16764 sysfs_remove_group(&sch->dev.kobj, &io_subchannel_attr_group);
16765 return 0;
16766diff --git a/drivers/s390/cio/device_fsm.c b/drivers/s390/cio/device_fsm.c
16767index 13b703a..b9613d7 100644
16768--- a/drivers/s390/cio/device_fsm.c
16769+++ b/drivers/s390/cio/device_fsm.c
16770@@ -1080,14 +1080,14 @@ void ccw_device_trigger_reprobe(struct ccw_device *cdev)
16771 ccw_device_start_id(cdev, 0);
16772 }
16773
16774-static void ccw_device_disabled_irq(struct ccw_device *cdev,
16775- enum dev_event dev_event)
16776+static void
16777+ccw_device_offline_irq(struct ccw_device *cdev, enum dev_event dev_event)
16778 {
16779 struct subchannel *sch;
16780
16781 sch = to_subchannel(cdev->dev.parent);
16782 /*
16783- * An interrupt in a disabled state means a previous disable was not
16784+ * An interrupt in state offline means a previous disable was not
16785 * successful - should not happen, but we try to disable again.
16786 */
16787 cio_disable_subchannel(sch);
16788@@ -1150,12 +1150,25 @@ ccw_device_nop(struct ccw_device *cdev, enum dev_event dev_event)
16789 }
16790
16791 /*
16792+ * Bug operation action.
16793+ */
16794+static void
16795+ccw_device_bug(struct ccw_device *cdev, enum dev_event dev_event)
16796+{
16797+ CIO_MSG_EVENT(0, "Internal state [%i][%i] not handled for device "
16798+ "0.%x.%04x\n", cdev->private->state, dev_event,
16799+ cdev->private->dev_id.ssid,
16800+ cdev->private->dev_id.devno);
16801+ BUG();
16802+}
16803+
16804+/*
16805 * device statemachine
16806 */
16807 fsm_func_t *dev_jumptable[NR_DEV_STATES][NR_DEV_EVENTS] = {
16808 [DEV_STATE_NOT_OPER] = {
16809 [DEV_EVENT_NOTOPER] = ccw_device_nop,
16810- [DEV_EVENT_INTERRUPT] = ccw_device_disabled_irq,
16811+ [DEV_EVENT_INTERRUPT] = ccw_device_bug,
16812 [DEV_EVENT_TIMEOUT] = ccw_device_nop,
16813 [DEV_EVENT_VERIFY] = ccw_device_nop,
16814 },
16815@@ -1173,7 +1186,7 @@ fsm_func_t *dev_jumptable[NR_DEV_STATES][NR_DEV_EVENTS] = {
16816 },
16817 [DEV_STATE_OFFLINE] = {
16818 [DEV_EVENT_NOTOPER] = ccw_device_generic_notoper,
16819- [DEV_EVENT_INTERRUPT] = ccw_device_disabled_irq,
16820+ [DEV_EVENT_INTERRUPT] = ccw_device_offline_irq,
16821 [DEV_EVENT_TIMEOUT] = ccw_device_nop,
16822 [DEV_EVENT_VERIFY] = ccw_device_offline_verify,
16823 },
16824@@ -1230,7 +1243,7 @@ fsm_func_t *dev_jumptable[NR_DEV_STATES][NR_DEV_EVENTS] = {
16825 [DEV_STATE_DISCONNECTED] = {
16826 [DEV_EVENT_NOTOPER] = ccw_device_nop,
16827 [DEV_EVENT_INTERRUPT] = ccw_device_start_id,
16828- [DEV_EVENT_TIMEOUT] = ccw_device_nop,
16829+ [DEV_EVENT_TIMEOUT] = ccw_device_bug,
16830 [DEV_EVENT_VERIFY] = ccw_device_start_id,
16831 },
16832 [DEV_STATE_DISCONNECTED_SENSE_ID] = {
16833diff --git a/drivers/s390/crypto/zcrypt_pcicc.c b/drivers/s390/crypto/zcrypt_pcicc.c
16834index 7f1e3ba..f4b0c47 100644
16835--- a/drivers/s390/crypto/zcrypt_pcicc.c
16836+++ b/drivers/s390/crypto/zcrypt_pcicc.c
16837@@ -373,8 +373,6 @@ static int convert_type86(struct zcrypt_device *zdev,
16838 zdev->max_mod_size = PCICC_MAX_MOD_SIZE_OLD;
16839 return -EAGAIN;
16840 }
16841- if (service_rc == 8 && service_rs == 72)
16842- return -EINVAL;
16843 zdev->online = 0;
16844 return -EAGAIN; /* repeat the request on a different device. */
16845 }
16846diff --git a/drivers/s390/crypto/zcrypt_pcixcc.c b/drivers/s390/crypto/zcrypt_pcixcc.c
16847index 1f9e923..5677b40 100644
16848--- a/drivers/s390/crypto/zcrypt_pcixcc.c
16849+++ b/drivers/s390/crypto/zcrypt_pcixcc.c
16850@@ -462,8 +462,6 @@ static int convert_type86_ica(struct zcrypt_device *zdev,
16851 }
16852 if (service_rc == 12 && service_rs == 769)
16853 return -EINVAL;
16854- if (service_rc == 8 && service_rs == 72)
16855- return -EINVAL;
16856 zdev->online = 0;
16857 return -EAGAIN; /* repeat the request on a different device. */
16858 }
16859diff --git a/drivers/s390/net/netiucv.c b/drivers/s390/net/netiucv.c
16860index 395c04c..c84eadd 100644
16861--- a/drivers/s390/net/netiucv.c
16862+++ b/drivers/s390/net/netiucv.c
16863@@ -741,13 +741,13 @@ static void conn_action_txdone(fsm_instance *fi, int event, void *arg)
16864 if (single_flag) {
16865 if ((skb = skb_dequeue(&conn->commit_queue))) {
16866 atomic_dec(&skb->users);
16867+ dev_kfree_skb_any(skb);
16868 if (privptr) {
16869 privptr->stats.tx_packets++;
16870 privptr->stats.tx_bytes +=
16871 (skb->len - NETIUCV_HDRLEN
16872- - NETIUCV_HDRLEN);
16873+ - NETIUCV_HDRLEN);
16874 }
16875- dev_kfree_skb_any(skb);
16876 }
16877 }
16878 conn->tx_buff->data = conn->tx_buff->head;
16879diff --git a/drivers/scsi/device_handler/scsi_dh.c b/drivers/scsi/device_handler/scsi_dh.c
16880index bfec4fa..3ee1cbc 100644
16881--- a/drivers/scsi/device_handler/scsi_dh.c
16882+++ b/drivers/scsi/device_handler/scsi_dh.c
16883@@ -304,15 +304,18 @@ static int scsi_dh_notifier(struct notifier_block *nb,
16884 sdev = to_scsi_device(dev);
16885
16886 if (action == BUS_NOTIFY_ADD_DEVICE) {
16887- err = device_create_file(dev, &scsi_dh_state_attr);
16888- /* don't care about err */
16889 devinfo = device_handler_match(NULL, sdev);
16890- if (devinfo)
16891- err = scsi_dh_handler_attach(sdev, devinfo);
16892+ if (!devinfo)
16893+ goto out;
16894+
16895+ err = scsi_dh_handler_attach(sdev, devinfo);
16896+ if (!err)
16897+ err = device_create_file(dev, &scsi_dh_state_attr);
16898 } else if (action == BUS_NOTIFY_DEL_DEVICE) {
16899 device_remove_file(dev, &scsi_dh_state_attr);
16900 scsi_dh_handler_detach(sdev, NULL);
16901 }
16902+out:
16903 return err;
16904 }
16905
16906diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c
16907index 70ab5d0..704b8e0 100644
16908--- a/drivers/scsi/fcoe/fcoe.c
16909+++ b/drivers/scsi/fcoe/fcoe.c
16910@@ -137,7 +137,7 @@ static struct scsi_host_template fcoe_shost_template = {
16911 .change_queue_depth = fc_change_queue_depth,
16912 .change_queue_type = fc_change_queue_type,
16913 .this_id = -1,
16914- .cmd_per_lun = 3,
16915+ .cmd_per_lun = 32,
16916 .can_queue = FCOE_MAX_OUTSTANDING_COMMANDS,
16917 .use_clustering = ENABLE_CLUSTERING,
16918 .sg_tablesize = SG_ALL,
16919@@ -160,7 +160,6 @@ static int fcoe_interface_setup(struct fcoe_interface *fcoe,
16920 {
16921 struct fcoe_ctlr *fip = &fcoe->ctlr;
16922 struct netdev_hw_addr *ha;
16923- struct net_device *real_dev;
16924 u8 flogi_maddr[ETH_ALEN];
16925
16926 fcoe->netdev = netdev;
16927@@ -174,12 +173,10 @@ static int fcoe_interface_setup(struct fcoe_interface *fcoe,
16928
16929 /* look for SAN MAC address, if multiple SAN MACs exist, only
16930 * use the first one for SPMA */
16931- real_dev = (netdev->priv_flags & IFF_802_1Q_VLAN) ?
16932- vlan_dev_real_dev(netdev) : netdev;
16933 rcu_read_lock();
16934- for_each_dev_addr(real_dev, ha) {
16935+ for_each_dev_addr(netdev, ha) {
16936 if ((ha->type == NETDEV_HW_ADDR_T_SAN) &&
16937- (is_valid_ether_addr(ha->addr))) {
16938+ (is_valid_ether_addr(fip->ctl_src_addr))) {
16939 memcpy(fip->ctl_src_addr, ha->addr, ETH_ALEN);
16940 fip->spma = 1;
16941 break;
16942@@ -667,7 +664,7 @@ static int fcoe_ddp_setup(struct fc_lport *lp, u16 xid,
16943 {
16944 struct net_device *n = fcoe_netdev(lp);
16945
16946- if (n->netdev_ops->ndo_fcoe_ddp_setup)
16947+ if (n->netdev_ops && n->netdev_ops->ndo_fcoe_ddp_setup)
16948 return n->netdev_ops->ndo_fcoe_ddp_setup(n, xid, sgl, sgc);
16949
16950 return 0;
16951@@ -684,7 +681,7 @@ static int fcoe_ddp_done(struct fc_lport *lp, u16 xid)
16952 {
16953 struct net_device *n = fcoe_netdev(lp);
16954
16955- if (n->netdev_ops->ndo_fcoe_ddp_done)
16956+ if (n->netdev_ops && n->netdev_ops->ndo_fcoe_ddp_done)
16957 return n->netdev_ops->ndo_fcoe_ddp_done(n, xid);
16958 return 0;
16959 }
16960@@ -1634,7 +1631,7 @@ static int fcoe_destroy(const char *buffer, struct kernel_param *kp)
16961 {
16962 struct fcoe_interface *fcoe;
16963 struct net_device *netdev;
16964- int rc = 0;
16965+ int rc;
16966
16967 mutex_lock(&fcoe_config_mutex);
16968 #ifdef CONFIG_FCOE_MODULE
16969diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
16970index 554626e..c968cc3 100644
16971--- a/drivers/scsi/hosts.c
16972+++ b/drivers/scsi/hosts.c
16973@@ -180,20 +180,14 @@ void scsi_remove_host(struct Scsi_Host *shost)
16974 EXPORT_SYMBOL(scsi_remove_host);
16975
16976 /**
16977- * scsi_add_host_with_dma - add a scsi host with dma device
16978+ * scsi_add_host - add a scsi host
16979 * @shost: scsi host pointer to add
16980 * @dev: a struct device of type scsi class
16981- * @dma_dev: dma device for the host
16982- *
16983- * Note: You rarely need to worry about this unless you're in a
16984- * virtualised host environments, so use the simpler scsi_add_host()
16985- * function instead.
16986 *
16987 * Return value:
16988 * 0 on success / != 0 for error
16989 **/
16990-int scsi_add_host_with_dma(struct Scsi_Host *shost, struct device *dev,
16991- struct device *dma_dev)
16992+int scsi_add_host(struct Scsi_Host *shost, struct device *dev)
16993 {
16994 struct scsi_host_template *sht = shost->hostt;
16995 int error = -EINVAL;
16996@@ -213,7 +207,6 @@ int scsi_add_host_with_dma(struct Scsi_Host *shost, struct device *dev,
16997
16998 if (!shost->shost_gendev.parent)
16999 shost->shost_gendev.parent = dev ? dev : &platform_bus;
17000- shost->dma_dev = dma_dev;
17001
17002 error = device_add(&shost->shost_gendev);
17003 if (error)
17004@@ -269,7 +262,7 @@ int scsi_add_host_with_dma(struct Scsi_Host *shost, struct device *dev,
17005 fail:
17006 return error;
17007 }
17008-EXPORT_SYMBOL(scsi_add_host_with_dma);
17009+EXPORT_SYMBOL(scsi_add_host);
17010
17011 static void scsi_host_dev_release(struct device *dev)
17012 {
17013diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
17014index c3ff9a6..76d294f 100644
17015--- a/drivers/scsi/ipr.c
17016+++ b/drivers/scsi/ipr.c
17017@@ -6516,7 +6516,6 @@ static int ipr_reset_restore_cfg_space(struct ipr_cmnd *ipr_cmd)
17018 int rc;
17019
17020 ENTER;
17021- ioa_cfg->pdev->state_saved = true;
17022 rc = pci_restore_state(ioa_cfg->pdev);
17023
17024 if (rc != PCIBIOS_SUCCESSFUL) {
17025diff --git a/drivers/scsi/libfc/fc_disc.c b/drivers/scsi/libfc/fc_disc.c
17026index d4cb3f9..c48799e 100644
17027--- a/drivers/scsi/libfc/fc_disc.c
17028+++ b/drivers/scsi/libfc/fc_disc.c
17029@@ -371,7 +371,7 @@ static void fc_disc_gpn_ft_req(struct fc_disc *disc)
17030 disc, lport->e_d_tov))
17031 return;
17032 err:
17033- fc_disc_error(disc, NULL);
17034+ fc_disc_error(disc, fp);
17035 }
17036
17037 /**
17038diff --git a/drivers/scsi/libfc/fc_elsct.c b/drivers/scsi/libfc/fc_elsct.c
17039index 9298458..5cfa687 100644
17040--- a/drivers/scsi/libfc/fc_elsct.c
17041+++ b/drivers/scsi/libfc/fc_elsct.c
17042@@ -53,10 +53,8 @@ static struct fc_seq *fc_elsct_send(struct fc_lport *lport,
17043 did = FC_FID_DIR_SERV;
17044 }
17045
17046- if (rc) {
17047- fc_frame_free(fp);
17048+ if (rc)
17049 return NULL;
17050- }
17051
17052 fc_fill_fc_hdr(fp, r_ctl, did, fc_host_port_id(lport->host), fh_type,
17053 FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0);
17054diff --git a/drivers/scsi/libfc/fc_fcp.c b/drivers/scsi/libfc/fc_fcp.c
17055index 7a14402..59a4408 100644
17056--- a/drivers/scsi/libfc/fc_fcp.c
17057+++ b/drivers/scsi/libfc/fc_fcp.c
17058@@ -302,13 +302,10 @@ static void fc_fcp_ddp_done(struct fc_fcp_pkt *fsp)
17059 if (!fsp)
17060 return;
17061
17062- if (fsp->xfer_ddp == FC_XID_UNKNOWN)
17063- return;
17064-
17065 lp = fsp->lp;
17066- if (lp->tt.ddp_done) {
17067+ if (fsp->xfer_ddp && lp->tt.ddp_done) {
17068 fsp->xfer_len = lp->tt.ddp_done(lp, fsp->xfer_ddp);
17069- fsp->xfer_ddp = FC_XID_UNKNOWN;
17070+ fsp->xfer_ddp = 0;
17071 }
17072 }
17073
17074@@ -575,8 +572,7 @@ static int fc_fcp_send_data(struct fc_fcp_pkt *fsp, struct fc_seq *seq,
17075 tlen -= sg_bytes;
17076 remaining -= sg_bytes;
17077
17078- if ((skb_shinfo(fp_skb(fp))->nr_frags < FC_FRAME_SG_LEN) &&
17079- (tlen))
17080+ if (tlen)
17081 continue;
17082
17083 /*
17084@@ -1052,6 +1048,7 @@ static int fc_fcp_cmd_send(struct fc_lport *lp, struct fc_fcp_pkt *fsp,
17085
17086 seq = lp->tt.exch_seq_send(lp, fp, resp, fc_fcp_pkt_destroy, fsp, 0);
17087 if (!seq) {
17088+ fc_frame_free(fp);
17089 rc = -1;
17090 goto unlock;
17091 }
17092@@ -1316,6 +1313,7 @@ static void fc_fcp_rec(struct fc_fcp_pkt *fsp)
17093 fc_fcp_pkt_hold(fsp); /* hold while REC outstanding */
17094 return;
17095 }
17096+ fc_frame_free(fp);
17097 retry:
17098 if (fsp->recov_retry++ < FC_MAX_RECOV_RETRY)
17099 fc_fcp_timer_set(fsp, FC_SCSI_REC_TOV);
17100@@ -1563,9 +1561,10 @@ static void fc_fcp_srr(struct fc_fcp_pkt *fsp, enum fc_rctl r_ctl, u32 offset)
17101
17102 seq = lp->tt.exch_seq_send(lp, fp, fc_fcp_srr_resp, NULL,
17103 fsp, jiffies_to_msecs(FC_SCSI_REC_TOV));
17104- if (!seq)
17105+ if (!seq) {
17106+ fc_frame_free(fp);
17107 goto retry;
17108-
17109+ }
17110 fsp->recov_seq = seq;
17111 fsp->xfer_len = offset;
17112 fsp->xfer_contig_end = offset;
17113@@ -1709,7 +1708,6 @@ int fc_queuecommand(struct scsi_cmnd *sc_cmd, void (*done)(struct scsi_cmnd *))
17114 fsp->cmd = sc_cmd; /* save the cmd */
17115 fsp->lp = lp; /* save the softc ptr */
17116 fsp->rport = rport; /* set the remote port ptr */
17117- fsp->xfer_ddp = FC_XID_UNKNOWN;
17118 sc_cmd->scsi_done = done;
17119
17120 /*
17121@@ -1848,8 +1846,7 @@ static void fc_io_compl(struct fc_fcp_pkt *fsp)
17122 * scsi status is good but transport level
17123 * underrun.
17124 */
17125- sc_cmd->result = (fsp->state & FC_SRB_RCV_STATUS ?
17126- DID_OK : DID_ERROR) << 16;
17127+ sc_cmd->result = DID_OK << 16;
17128 } else {
17129 /*
17130 * scsi got underrun, this is an error
17131@@ -2049,16 +2046,18 @@ EXPORT_SYMBOL(fc_eh_host_reset);
17132 int fc_slave_alloc(struct scsi_device *sdev)
17133 {
17134 struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
17135+ int queue_depth;
17136
17137 if (!rport || fc_remote_port_chkready(rport))
17138 return -ENXIO;
17139
17140- if (sdev->tagged_supported)
17141- scsi_activate_tcq(sdev, FC_FCP_DFLT_QUEUE_DEPTH);
17142- else
17143- scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev),
17144- FC_FCP_DFLT_QUEUE_DEPTH);
17145-
17146+ if (sdev->tagged_supported) {
17147+ if (sdev->host->hostt->cmd_per_lun)
17148+ queue_depth = sdev->host->hostt->cmd_per_lun;
17149+ else
17150+ queue_depth = FC_FCP_DFLT_QUEUE_DEPTH;
17151+ scsi_activate_tcq(sdev, queue_depth);
17152+ }
17153 return 0;
17154 }
17155 EXPORT_SYMBOL(fc_slave_alloc);
17156diff --git a/drivers/scsi/libfc/fc_lport.c b/drivers/scsi/libfc/fc_lport.c
17157index 536492a..bd2f771 100644
17158--- a/drivers/scsi/libfc/fc_lport.c
17159+++ b/drivers/scsi/libfc/fc_lport.c
17160@@ -329,7 +329,7 @@ static void fc_lport_add_fc4_type(struct fc_lport *lport, enum fc_fh_type type)
17161 * @sp: current sequence in the RLIR exchange
17162 * @fp: RLIR request frame
17163 *
17164- * Locking Note: The lport lock is expected to be held before calling
17165+ * Locking Note: The lport lock is exected to be held before calling
17166 * this function.
17167 */
17168 static void fc_lport_recv_rlir_req(struct fc_seq *sp, struct fc_frame *fp,
17169@@ -348,7 +348,7 @@ static void fc_lport_recv_rlir_req(struct fc_seq *sp, struct fc_frame *fp,
17170 * @sp: current sequence in the ECHO exchange
17171 * @fp: ECHO request frame
17172 *
17173- * Locking Note: The lport lock is expected to be held before calling
17174+ * Locking Note: The lport lock is exected to be held before calling
17175 * this function.
17176 */
17177 static void fc_lport_recv_echo_req(struct fc_seq *sp, struct fc_frame *in_fp,
17178@@ -361,7 +361,7 @@ static void fc_lport_recv_echo_req(struct fc_seq *sp, struct fc_frame *in_fp,
17179 void *dp;
17180 u32 f_ctl;
17181
17182- FC_LPORT_DBG(lport, "Received ECHO request while in state %s\n",
17183+ FC_LPORT_DBG(lport, "Received RLIR request while in state %s\n",
17184 fc_lport_state(lport));
17185
17186 len = fr_len(in_fp) - sizeof(struct fc_frame_header);
17187@@ -374,7 +374,7 @@ static void fc_lport_recv_echo_req(struct fc_seq *sp, struct fc_frame *in_fp,
17188 if (fp) {
17189 dp = fc_frame_payload_get(fp, len);
17190 memcpy(dp, pp, len);
17191- *((__be32 *)dp) = htonl(ELS_LS_ACC << 24);
17192+ *((u32 *)dp) = htonl(ELS_LS_ACC << 24);
17193 sp = lport->tt.seq_start_next(sp);
17194 f_ctl = FC_FC_EX_CTX | FC_FC_LAST_SEQ | FC_FC_END_SEQ;
17195 fc_fill_fc_hdr(fp, FC_RCTL_ELS_REP, ep->did, ep->sid,
17196@@ -385,12 +385,12 @@ static void fc_lport_recv_echo_req(struct fc_seq *sp, struct fc_frame *in_fp,
17197 }
17198
17199 /**
17200- * fc_lport_recv_rnid_req() - Handle received Request Node ID data request
17201- * @sp: The sequence in the RNID exchange
17202- * @fp: The RNID request frame
17203- * @lport: The local port recieving the RNID
17204+ * fc_lport_recv_echo_req() - Handle received Request Node ID data request
17205+ * @lport: Fibre Channel local port recieving the RNID
17206+ * @sp: current sequence in the RNID exchange
17207+ * @fp: RNID request frame
17208 *
17209- * Locking Note: The lport lock is expected to be held before calling
17210+ * Locking Note: The lport lock is exected to be held before calling
17211 * this function.
17212 */
17213 static void fc_lport_recv_rnid_req(struct fc_seq *sp, struct fc_frame *in_fp,
17214@@ -667,7 +667,7 @@ static void fc_lport_enter_ready(struct fc_lport *lport)
17215 * Accept it with the common service parameters indicating our N port.
17216 * Set up to do a PLOGI if we have the higher-number WWPN.
17217 *
17218- * Locking Note: The lport lock is expected to be held before calling
17219+ * Locking Note: The lport lock is exected to be held before calling
17220 * this function.
17221 */
17222 static void fc_lport_recv_flogi_req(struct fc_seq *sp_in,
17223@@ -1115,7 +1115,7 @@ static void fc_lport_enter_scr(struct fc_lport *lport)
17224
17225 if (!lport->tt.elsct_send(lport, FC_FID_FCTRL, fp, ELS_SCR,
17226 fc_lport_scr_resp, lport, lport->e_d_tov))
17227- fc_lport_error(lport, NULL);
17228+ fc_lport_error(lport, fp);
17229 }
17230
17231 /**
17232@@ -1186,7 +1186,7 @@ static void fc_lport_enter_rpn_id(struct fc_lport *lport)
17233 if (!lport->tt.elsct_send(lport, FC_FID_DIR_SERV, fp, FC_NS_RPN_ID,
17234 fc_lport_rpn_id_resp,
17235 lport, lport->e_d_tov))
17236- fc_lport_error(lport, NULL);
17237+ fc_lport_error(lport, fp);
17238 }
17239
17240 static struct fc_rport_operations fc_lport_rport_ops = {
17241@@ -1237,12 +1237,9 @@ static void fc_lport_timeout(struct work_struct *work)
17242
17243 switch (lport->state) {
17244 case LPORT_ST_DISABLED:
17245- WARN_ON(1);
17246- break;
17247 case LPORT_ST_READY:
17248- WARN_ON(1);
17249- break;
17250 case LPORT_ST_RESET:
17251+ WARN_ON(1);
17252 break;
17253 case LPORT_ST_FLOGI:
17254 fc_lport_enter_flogi(lport);
17255@@ -1340,7 +1337,7 @@ static void fc_lport_enter_logo(struct fc_lport *lport)
17256
17257 if (!lport->tt.elsct_send(lport, FC_FID_FLOGI, fp, ELS_LOGO,
17258 fc_lport_logo_resp, lport, lport->e_d_tov))
17259- fc_lport_error(lport, NULL);
17260+ fc_lport_error(lport, fp);
17261 }
17262
17263 /**
17264@@ -1456,7 +1453,7 @@ void fc_lport_enter_flogi(struct fc_lport *lport)
17265
17266 if (!lport->tt.elsct_send(lport, FC_FID_FLOGI, fp, ELS_FLOGI,
17267 fc_lport_flogi_resp, lport, lport->e_d_tov))
17268- fc_lport_error(lport, NULL);
17269+ fc_lport_error(lport, fp);
17270 }
17271
17272 /* Configure a fc_lport */
17273diff --git a/drivers/scsi/libfc/fc_rport.c b/drivers/scsi/libfc/fc_rport.c
17274index ff558a6..03ea674 100644
17275--- a/drivers/scsi/libfc/fc_rport.c
17276+++ b/drivers/scsi/libfc/fc_rport.c
17277@@ -86,7 +86,6 @@ static const char *fc_rport_state_names[] = {
17278 [RPORT_ST_LOGO] = "LOGO",
17279 [RPORT_ST_ADISC] = "ADISC",
17280 [RPORT_ST_DELETE] = "Delete",
17281- [RPORT_ST_RESTART] = "Restart",
17282 };
17283
17284 /**
17285@@ -100,7 +99,8 @@ static struct fc_rport_priv *fc_rport_lookup(const struct fc_lport *lport,
17286 struct fc_rport_priv *rdata;
17287
17288 list_for_each_entry(rdata, &lport->disc.rports, peers)
17289- if (rdata->ids.port_id == port_id)
17290+ if (rdata->ids.port_id == port_id &&
17291+ rdata->rp_state != RPORT_ST_DELETE)
17292 return rdata;
17293 return NULL;
17294 }
17295@@ -235,7 +235,6 @@ static void fc_rport_work(struct work_struct *work)
17296 struct fc_rport_operations *rport_ops;
17297 struct fc_rport_identifiers ids;
17298 struct fc_rport *rport;
17299- int restart = 0;
17300
17301 mutex_lock(&rdata->rp_mutex);
17302 event = rdata->event;
17303@@ -288,20 +287,8 @@ static void fc_rport_work(struct work_struct *work)
17304 mutex_unlock(&rdata->rp_mutex);
17305
17306 if (port_id != FC_FID_DIR_SERV) {
17307- /*
17308- * We must drop rp_mutex before taking disc_mutex.
17309- * Re-evaluate state to allow for restart.
17310- * A transition to RESTART state must only happen
17311- * while disc_mutex is held and rdata is on the list.
17312- */
17313 mutex_lock(&lport->disc.disc_mutex);
17314- mutex_lock(&rdata->rp_mutex);
17315- if (rdata->rp_state == RPORT_ST_RESTART)
17316- restart = 1;
17317- else
17318- list_del(&rdata->peers);
17319- rdata->event = RPORT_EV_NONE;
17320- mutex_unlock(&rdata->rp_mutex);
17321+ list_del(&rdata->peers);
17322 mutex_unlock(&lport->disc.disc_mutex);
17323 }
17324
17325@@ -325,13 +312,7 @@ static void fc_rport_work(struct work_struct *work)
17326 mutex_unlock(&rdata->rp_mutex);
17327 fc_remote_port_delete(rport);
17328 }
17329- if (restart) {
17330- mutex_lock(&rdata->rp_mutex);
17331- FC_RPORT_DBG(rdata, "work restart\n");
17332- fc_rport_enter_plogi(rdata);
17333- mutex_unlock(&rdata->rp_mutex);
17334- } else
17335- kref_put(&rdata->kref, lport->tt.rport_destroy);
17336+ kref_put(&rdata->kref, lport->tt.rport_destroy);
17337 break;
17338
17339 default:
17340@@ -361,12 +342,6 @@ int fc_rport_login(struct fc_rport_priv *rdata)
17341 FC_RPORT_DBG(rdata, "ADISC port\n");
17342 fc_rport_enter_adisc(rdata);
17343 break;
17344- case RPORT_ST_RESTART:
17345- break;
17346- case RPORT_ST_DELETE:
17347- FC_RPORT_DBG(rdata, "Restart deleted port\n");
17348- fc_rport_state_enter(rdata, RPORT_ST_RESTART);
17349- break;
17350 default:
17351 FC_RPORT_DBG(rdata, "Login to port\n");
17352 fc_rport_enter_plogi(rdata);
17353@@ -422,21 +397,20 @@ int fc_rport_logoff(struct fc_rport_priv *rdata)
17354
17355 if (rdata->rp_state == RPORT_ST_DELETE) {
17356 FC_RPORT_DBG(rdata, "Port in Delete state, not removing\n");
17357+ mutex_unlock(&rdata->rp_mutex);
17358 goto out;
17359 }
17360
17361- if (rdata->rp_state == RPORT_ST_RESTART)
17362- FC_RPORT_DBG(rdata, "Port in Restart state, deleting\n");
17363- else
17364- fc_rport_enter_logo(rdata);
17365+ fc_rport_enter_logo(rdata);
17366
17367 /*
17368 * Change the state to Delete so that we discard
17369 * the response.
17370 */
17371 fc_rport_enter_delete(rdata, RPORT_EV_STOP);
17372-out:
17373 mutex_unlock(&rdata->rp_mutex);
17374+
17375+out:
17376 return 0;
17377 }
17378
17379@@ -492,7 +466,6 @@ static void fc_rport_timeout(struct work_struct *work)
17380 case RPORT_ST_READY:
17381 case RPORT_ST_INIT:
17382 case RPORT_ST_DELETE:
17383- case RPORT_ST_RESTART:
17384 break;
17385 }
17386
17387@@ -526,7 +499,6 @@ static void fc_rport_error(struct fc_rport_priv *rdata, struct fc_frame *fp)
17388 fc_rport_enter_logo(rdata);
17389 break;
17390 case RPORT_ST_DELETE:
17391- case RPORT_ST_RESTART:
17392 case RPORT_ST_READY:
17393 case RPORT_ST_INIT:
17394 break;
17395@@ -660,7 +632,7 @@ static void fc_rport_enter_plogi(struct fc_rport_priv *rdata)
17396
17397 if (!lport->tt.elsct_send(lport, rdata->ids.port_id, fp, ELS_PLOGI,
17398 fc_rport_plogi_resp, rdata, lport->e_d_tov))
17399- fc_rport_error_retry(rdata, NULL);
17400+ fc_rport_error_retry(rdata, fp);
17401 else
17402 kref_get(&rdata->kref);
17403 }
17404@@ -821,7 +793,7 @@ static void fc_rport_enter_prli(struct fc_rport_priv *rdata)
17405
17406 if (!lport->tt.elsct_send(lport, rdata->ids.port_id, fp, ELS_PRLI,
17407 fc_rport_prli_resp, rdata, lport->e_d_tov))
17408- fc_rport_error_retry(rdata, NULL);
17409+ fc_rport_error_retry(rdata, fp);
17410 else
17411 kref_get(&rdata->kref);
17412 }
17413@@ -917,7 +889,7 @@ static void fc_rport_enter_rtv(struct fc_rport_priv *rdata)
17414
17415 if (!lport->tt.elsct_send(lport, rdata->ids.port_id, fp, ELS_RTV,
17416 fc_rport_rtv_resp, rdata, lport->e_d_tov))
17417- fc_rport_error_retry(rdata, NULL);
17418+ fc_rport_error_retry(rdata, fp);
17419 else
17420 kref_get(&rdata->kref);
17421 }
17422@@ -947,7 +919,7 @@ static void fc_rport_enter_logo(struct fc_rport_priv *rdata)
17423
17424 if (!lport->tt.elsct_send(lport, rdata->ids.port_id, fp, ELS_LOGO,
17425 fc_rport_logo_resp, rdata, lport->e_d_tov))
17426- fc_rport_error_retry(rdata, NULL);
17427+ fc_rport_error_retry(rdata, fp);
17428 else
17429 kref_get(&rdata->kref);
17430 }
17431@@ -1034,7 +1006,7 @@ static void fc_rport_enter_adisc(struct fc_rport_priv *rdata)
17432 }
17433 if (!lport->tt.elsct_send(lport, rdata->ids.port_id, fp, ELS_ADISC,
17434 fc_rport_adisc_resp, rdata, lport->e_d_tov))
17435- fc_rport_error_retry(rdata, NULL);
17436+ fc_rport_error_retry(rdata, fp);
17437 else
17438 kref_get(&rdata->kref);
17439 }
17440@@ -1276,7 +1248,6 @@ static void fc_rport_recv_plogi_req(struct fc_lport *lport,
17441 }
17442 break;
17443 case RPORT_ST_PRLI:
17444- case RPORT_ST_RTV:
17445 case RPORT_ST_READY:
17446 case RPORT_ST_ADISC:
17447 FC_RPORT_DBG(rdata, "Received PLOGI in logged-in state %d "
17448@@ -1284,14 +1255,11 @@ static void fc_rport_recv_plogi_req(struct fc_lport *lport,
17449 /* XXX TBD - should reset */
17450 break;
17451 case RPORT_ST_DELETE:
17452- case RPORT_ST_LOGO:
17453- case RPORT_ST_RESTART:
17454- FC_RPORT_DBG(rdata, "Received PLOGI in state %s - send busy\n",
17455- fc_rport_state(rdata));
17456- mutex_unlock(&rdata->rp_mutex);
17457- rjt_data.reason = ELS_RJT_BUSY;
17458- rjt_data.explan = ELS_EXPL_NONE;
17459- goto reject;
17460+ default:
17461+ FC_RPORT_DBG(rdata, "Received PLOGI in unexpected state %d\n",
17462+ rdata->rp_state);
17463+ fc_frame_free(rx_fp);
17464+ goto out;
17465 }
17466
17467 /*
17468@@ -1434,7 +1402,7 @@ static void fc_rport_recv_prli_req(struct fc_rport_priv *rdata,
17469 break;
17470 case FC_TYPE_FCP:
17471 fcp_parm = ntohl(rspp->spp_params);
17472- if (fcp_parm & FCP_SPPF_RETRY)
17473+ if (fcp_parm * FCP_SPPF_RETRY)
17474 rdata->flags |= FC_RP_FLAGS_RETRY;
17475 rdata->supported_classes = FC_COS_CLASS3;
17476 if (fcp_parm & FCP_SPPF_INIT_FCN)
17477@@ -1542,14 +1510,14 @@ static void fc_rport_recv_logo_req(struct fc_lport *lport,
17478 FC_RPORT_DBG(rdata, "Received LOGO request while in state %s\n",
17479 fc_rport_state(rdata));
17480
17481- fc_rport_enter_delete(rdata, RPORT_EV_LOGO);
17482-
17483 /*
17484- * If the remote port was created due to discovery, set state
17485- * to log back in. It may have seen a stale RSCN about us.
17486+ * If the remote port was created due to discovery,
17487+ * log back in. It may have seen a stale RSCN about us.
17488 */
17489- if (rdata->disc_id)
17490- fc_rport_state_enter(rdata, RPORT_ST_RESTART);
17491+ if (rdata->rp_state != RPORT_ST_DELETE && rdata->disc_id)
17492+ fc_rport_enter_plogi(rdata);
17493+ else
17494+ fc_rport_enter_delete(rdata, RPORT_EV_LOGO);
17495 mutex_unlock(&rdata->rp_mutex);
17496 } else
17497 FC_RPORT_ID_DBG(lport, sid,
17498diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
17499index 549bc7d..562d8ce 100644
17500--- a/drivers/scsi/lpfc/lpfc_init.c
17501+++ b/drivers/scsi/lpfc/lpfc_init.c
17502@@ -2408,7 +2408,7 @@ lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev)
17503 vport->els_tmofunc.function = lpfc_els_timeout;
17504 vport->els_tmofunc.data = (unsigned long)vport;
17505
17506- error = scsi_add_host_with_dma(shost, dev, &phba->pcidev->dev);
17507+ error = scsi_add_host(shost, dev);
17508 if (error)
17509 goto out_put_shost;
17510
17511@@ -4384,13 +4384,9 @@ lpfc_sli_pci_mem_setup(struct lpfc_hba *phba)
17512 pdev = phba->pcidev;
17513
17514 /* Set the device DMA mask size */
17515- if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0
17516- || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(64)) != 0) {
17517- if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0
17518- || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(32)) != 0) {
17519+ if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0)
17520+ if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0)
17521 return error;
17522- }
17523- }
17524
17525 /* Get the bus address of Bar0 and Bar2 and the number of bytes
17526 * required by each mapping.
17527@@ -5944,13 +5940,9 @@ lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba)
17528 pdev = phba->pcidev;
17529
17530 /* Set the device DMA mask size */
17531- if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0
17532- || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(64)) != 0) {
17533- if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0
17534- || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(32)) != 0) {
17535+ if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0)
17536+ if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0)
17537 return error;
17538- }
17539- }
17540
17541 /* Get the bus address of SLI4 device Bar0, Bar1, and Bar2 and the
17542 * number of bytes required by each mapping. They are actually
17543diff --git a/drivers/scsi/megaraid/megaraid_sas.c b/drivers/scsi/megaraid/megaraid_sas.c
17544index 518712c..a39addc 100644
17545--- a/drivers/scsi/megaraid/megaraid_sas.c
17546+++ b/drivers/scsi/megaraid/megaraid_sas.c
17547@@ -3032,7 +3032,7 @@ megasas_mgmt_fw_ioctl(struct megasas_instance *instance,
17548 int error = 0, i;
17549 void *sense = NULL;
17550 dma_addr_t sense_handle;
17551- unsigned long *sense_ptr;
17552+ u32 *sense_ptr;
17553
17554 memset(kbuff_arr, 0, sizeof(kbuff_arr));
17555
17556@@ -3109,7 +3109,7 @@ megasas_mgmt_fw_ioctl(struct megasas_instance *instance,
17557 }
17558
17559 sense_ptr =
17560- (unsigned long *) ((unsigned long)cmd->frame + ioc->sense_off);
17561+ (u32 *) ((unsigned long)cmd->frame + ioc->sense_off);
17562 *sense_ptr = sense_handle;
17563 }
17564
17565@@ -3140,8 +3140,8 @@ megasas_mgmt_fw_ioctl(struct megasas_instance *instance,
17566 * sense_ptr points to the location that has the user
17567 * sense buffer address
17568 */
17569- sense_ptr = (unsigned long *) ((unsigned long)ioc->frame.raw +
17570- ioc->sense_off);
17571+ sense_ptr = (u32 *) ((unsigned long)ioc->frame.raw +
17572+ ioc->sense_off);
17573
17574 if (copy_to_user((void __user *)((unsigned long)(*sense_ptr)),
17575 sense, ioc->sense_len)) {
17576@@ -3451,7 +3451,7 @@ out:
17577 return retval;
17578 }
17579
17580-static DRIVER_ATTR(poll_mode_io, S_IRUGO|S_IWUSR,
17581+static DRIVER_ATTR(poll_mode_io, S_IRUGO|S_IWUGO,
17582 megasas_sysfs_show_poll_mode_io,
17583 megasas_sysfs_set_poll_mode_io);
17584
17585diff --git a/drivers/scsi/mpt2sas/mpi/mpi2_cnfg.h b/drivers/scsi/mpt2sas/mpi/mpi2_cnfg.h
17586index 5af66db..ab47c46 100644
17587--- a/drivers/scsi/mpt2sas/mpi/mpi2_cnfg.h
17588+++ b/drivers/scsi/mpt2sas/mpi/mpi2_cnfg.h
17589@@ -348,14 +348,6 @@ typedef struct _MPI2_CONFIG_REPLY
17590 #define MPI2_MFGPAGE_DEVID_SAS2108_3 (0x0077)
17591 #define MPI2_MFGPAGE_DEVID_SAS2116_1 (0x0064)
17592 #define MPI2_MFGPAGE_DEVID_SAS2116_2 (0x0065)
17593-#define MPI2_MFGPAGE_DEVID_SAS2208_1 (0x0080)
17594-#define MPI2_MFGPAGE_DEVID_SAS2208_2 (0x0081)
17595-#define MPI2_MFGPAGE_DEVID_SAS2208_3 (0x0082)
17596-#define MPI2_MFGPAGE_DEVID_SAS2208_4 (0x0083)
17597-#define MPI2_MFGPAGE_DEVID_SAS2208_5 (0x0084)
17598-#define MPI2_MFGPAGE_DEVID_SAS2208_6 (0x0085)
17599-#define MPI2_MFGPAGE_DEVID_SAS2208_7 (0x0086)
17600-#define MPI2_MFGPAGE_DEVID_SAS2208_8 (0x0087)
17601
17602
17603 /* Manufacturing Page 0 */
17604diff --git a/drivers/scsi/mpt2sas/mpt2sas_scsih.c b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
17605index 1743640..86ab32d 100644
17606--- a/drivers/scsi/mpt2sas/mpt2sas_scsih.c
17607+++ b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
17608@@ -196,28 +196,10 @@ static struct pci_device_id scsih_pci_table[] = {
17609 PCI_ANY_ID, PCI_ANY_ID },
17610 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2108_3,
17611 PCI_ANY_ID, PCI_ANY_ID },
17612- /* Meteor ~ 2116 */
17613 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2116_1,
17614 PCI_ANY_ID, PCI_ANY_ID },
17615 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2116_2,
17616 PCI_ANY_ID, PCI_ANY_ID },
17617- /* Thunderbolt ~ 2208 */
17618- { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_1,
17619- PCI_ANY_ID, PCI_ANY_ID },
17620- { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_2,
17621- PCI_ANY_ID, PCI_ANY_ID },
17622- { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_3,
17623- PCI_ANY_ID, PCI_ANY_ID },
17624- { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_4,
17625- PCI_ANY_ID, PCI_ANY_ID },
17626- { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_5,
17627- PCI_ANY_ID, PCI_ANY_ID },
17628- { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_6,
17629- PCI_ANY_ID, PCI_ANY_ID },
17630- { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_7,
17631- PCI_ANY_ID, PCI_ANY_ID },
17632- { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_8,
17633- PCI_ANY_ID, PCI_ANY_ID },
17634 {0} /* Terminating entry */
17635 };
17636 MODULE_DEVICE_TABLE(pci, scsih_pci_table);
17637diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
17638index 21e2bc4..fbcb82a 100644
17639--- a/drivers/scsi/qla2xxx/qla_attr.c
17640+++ b/drivers/scsi/qla2xxx/qla_attr.c
17641@@ -1654,8 +1654,7 @@ qla24xx_vport_create(struct fc_vport *fc_vport, bool disable)
17642 fc_vport_set_state(fc_vport, FC_VPORT_LINKDOWN);
17643 }
17644
17645- if (scsi_add_host_with_dma(vha->host, &fc_vport->dev,
17646- &ha->pdev->dev)) {
17647+ if (scsi_add_host(vha->host, &fc_vport->dev)) {
17648 DEBUG15(printk("scsi(%ld): scsi_add_host failure for VP[%d].\n",
17649 vha->host_no, vha->vp_idx));
17650 goto vport_create_failed_2;
17651diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
17652index 06bbe0d..b79fca7 100644
17653--- a/drivers/scsi/qla2xxx/qla_os.c
17654+++ b/drivers/scsi/qla2xxx/qla_os.c
17655@@ -2016,13 +2016,13 @@ skip_dpc:
17656 DEBUG2(printk("DEBUG: detect hba %ld at address = %p\n",
17657 base_vha->host_no, ha));
17658
17659+ base_vha->flags.init_done = 1;
17660+ base_vha->flags.online = 1;
17661+
17662 ret = scsi_add_host(host, &pdev->dev);
17663 if (ret)
17664 goto probe_failed;
17665
17666- base_vha->flags.init_done = 1;
17667- base_vha->flags.online = 1;
17668-
17669 ha->isp_ops->enable_intrs(ha);
17670
17671 scsi_scan_host(host);
17672diff --git a/drivers/scsi/scsi_devinfo.c b/drivers/scsi/scsi_devinfo.c
17673index 802e91c..93c2622 100644
17674--- a/drivers/scsi/scsi_devinfo.c
17675+++ b/drivers/scsi/scsi_devinfo.c
17676@@ -168,10 +168,11 @@ static struct {
17677 {"Generic", "USB SD Reader", "1.00", BLIST_FORCELUN | BLIST_INQUIRY_36},
17678 {"Generic", "USB Storage-SMC", "0180", BLIST_FORCELUN | BLIST_INQUIRY_36},
17679 {"Generic", "USB Storage-SMC", "0207", BLIST_FORCELUN | BLIST_INQUIRY_36},
17680- {"HITACHI", "DF400", "*", BLIST_REPORTLUN2},
17681- {"HITACHI", "DF500", "*", BLIST_REPORTLUN2},
17682- {"HITACHI", "DISK-SUBSYSTEM", "*", BLIST_REPORTLUN2},
17683- {"HITACHI", "OPEN-", "*", BLIST_REPORTLUN2},
17684+ {"HITACHI", "DF400", "*", BLIST_SPARSELUN},
17685+ {"HITACHI", "DF500", "*", BLIST_SPARSELUN},
17686+ {"HITACHI", "DF600", "*", BLIST_SPARSELUN},
17687+ {"HITACHI", "DISK-SUBSYSTEM", "*", BLIST_ATTACH_PQ3 | BLIST_SPARSELUN | BLIST_LARGELUN},
17688+ {"HITACHI", "OPEN-E", "*", BLIST_ATTACH_PQ3 | BLIST_SPARSELUN | BLIST_LARGELUN},
17689 {"HITACHI", "OP-C-", "*", BLIST_SPARSELUN | BLIST_LARGELUN},
17690 {"HITACHI", "3380-", "*", BLIST_SPARSELUN | BLIST_LARGELUN},
17691 {"HITACHI", "3390-", "*", BLIST_SPARSELUN | BLIST_LARGELUN},
17692diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
17693index bc9a881..5987da8 100644
17694--- a/drivers/scsi/scsi_lib.c
17695+++ b/drivers/scsi/scsi_lib.c
17696@@ -749,9 +749,9 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
17697 */
17698 req->next_rq->resid_len = scsi_in(cmd)->resid;
17699
17700- scsi_release_buffers(cmd);
17701 blk_end_request_all(req, 0);
17702
17703+ scsi_release_buffers(cmd);
17704 scsi_next_command(cmd);
17705 return;
17706 }
17707diff --git a/drivers/scsi/scsi_lib_dma.c b/drivers/scsi/scsi_lib_dma.c
17708index dcd1285..ac6855c 100644
17709--- a/drivers/scsi/scsi_lib_dma.c
17710+++ b/drivers/scsi/scsi_lib_dma.c
17711@@ -23,7 +23,7 @@ int scsi_dma_map(struct scsi_cmnd *cmd)
17712 int nseg = 0;
17713
17714 if (scsi_sg_count(cmd)) {
17715- struct device *dev = cmd->device->host->dma_dev;
17716+ struct device *dev = cmd->device->host->shost_gendev.parent;
17717
17718 nseg = dma_map_sg(dev, scsi_sglist(cmd), scsi_sg_count(cmd),
17719 cmd->sc_data_direction);
17720@@ -41,7 +41,7 @@ EXPORT_SYMBOL(scsi_dma_map);
17721 void scsi_dma_unmap(struct scsi_cmnd *cmd)
17722 {
17723 if (scsi_sg_count(cmd)) {
17724- struct device *dev = cmd->device->host->dma_dev;
17725+ struct device *dev = cmd->device->host->shost_gendev.parent;
17726
17727 dma_unmap_sg(dev, scsi_sglist(cmd), scsi_sg_count(cmd),
17728 cmd->sc_data_direction);
17729diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
17730index bf52dec..c6f70da 100644
17731--- a/drivers/scsi/scsi_transport_fc.c
17732+++ b/drivers/scsi/scsi_transport_fc.c
17733@@ -648,22 +648,11 @@ static __init int fc_transport_init(void)
17734 return error;
17735 error = transport_class_register(&fc_vport_class);
17736 if (error)
17737- goto unreg_host_class;
17738+ return error;
17739 error = transport_class_register(&fc_rport_class);
17740 if (error)
17741- goto unreg_vport_class;
17742- error = transport_class_register(&fc_transport_class);
17743- if (error)
17744- goto unreg_rport_class;
17745- return 0;
17746-
17747-unreg_rport_class:
17748- transport_class_unregister(&fc_rport_class);
17749-unreg_vport_class:
17750- transport_class_unregister(&fc_vport_class);
17751-unreg_host_class:
17752- transport_class_unregister(&fc_host_class);
17753- return error;
17754+ return error;
17755+ return transport_class_register(&fc_transport_class);
17756 }
17757
17758 static void __exit fc_transport_exit(void)
17759@@ -2395,7 +2384,6 @@ fc_rport_final_delete(struct work_struct *work)
17760 struct Scsi_Host *shost = rport_to_shost(rport);
17761 struct fc_internal *i = to_fc_internal(shost->transportt);
17762 unsigned long flags;
17763- int do_callback = 0;
17764
17765 /*
17766 * if a scan is pending, flush the SCSI Host work_q so that
17767@@ -2434,15 +2422,8 @@ fc_rport_final_delete(struct work_struct *work)
17768 * Avoid this call if we already called it when we preserved the
17769 * rport for the binding.
17770 */
17771- spin_lock_irqsave(shost->host_lock, flags);
17772 if (!(rport->flags & FC_RPORT_DEVLOSS_CALLBK_DONE) &&
17773- (i->f->dev_loss_tmo_callbk)) {
17774- rport->flags |= FC_RPORT_DEVLOSS_CALLBK_DONE;
17775- do_callback = 1;
17776- }
17777- spin_unlock_irqrestore(shost->host_lock, flags);
17778-
17779- if (do_callback)
17780+ (i->f->dev_loss_tmo_callbk))
17781 i->f->dev_loss_tmo_callbk(rport);
17782
17783 fc_bsg_remove(rport->rqst_q);
17784@@ -2989,7 +2970,6 @@ fc_timeout_deleted_rport(struct work_struct *work)
17785 struct fc_internal *i = to_fc_internal(shost->transportt);
17786 struct fc_host_attrs *fc_host = shost_to_fc_host(shost);
17787 unsigned long flags;
17788- int do_callback = 0;
17789
17790 spin_lock_irqsave(shost->host_lock, flags);
17791
17792@@ -3055,6 +3035,7 @@ fc_timeout_deleted_rport(struct work_struct *work)
17793 rport->roles = FC_PORT_ROLE_UNKNOWN;
17794 rport->port_state = FC_PORTSTATE_NOTPRESENT;
17795 rport->flags &= ~FC_RPORT_FAST_FAIL_TIMEDOUT;
17796+ rport->flags |= FC_RPORT_DEVLOSS_CALLBK_DONE;
17797
17798 /*
17799 * Pre-emptively kill I/O rather than waiting for the work queue
17800@@ -3064,40 +3045,32 @@ fc_timeout_deleted_rport(struct work_struct *work)
17801 spin_unlock_irqrestore(shost->host_lock, flags);
17802 fc_terminate_rport_io(rport);
17803
17804- spin_lock_irqsave(shost->host_lock, flags);
17805-
17806- if (rport->port_state == FC_PORTSTATE_NOTPRESENT) { /* still missing */
17807-
17808- /* remove the identifiers that aren't used in the consisting binding */
17809- switch (fc_host->tgtid_bind_type) {
17810- case FC_TGTID_BIND_BY_WWPN:
17811- rport->node_name = -1;
17812- rport->port_id = -1;
17813- break;
17814- case FC_TGTID_BIND_BY_WWNN:
17815- rport->port_name = -1;
17816- rport->port_id = -1;
17817- break;
17818- case FC_TGTID_BIND_BY_ID:
17819- rport->node_name = -1;
17820- rport->port_name = -1;
17821- break;
17822- case FC_TGTID_BIND_NONE: /* to keep compiler happy */
17823- break;
17824- }
17825-
17826- /*
17827- * As this only occurs if the remote port (scsi target)
17828- * went away and didn't come back - we'll remove
17829- * all attached scsi devices.
17830- */
17831- rport->flags |= FC_RPORT_DEVLOSS_CALLBK_DONE;
17832- fc_queue_work(shost, &rport->stgt_delete_work);
17833+ BUG_ON(rport->port_state != FC_PORTSTATE_NOTPRESENT);
17834
17835- do_callback = 1;
17836+ /* remove the identifiers that aren't used in the consisting binding */
17837+ switch (fc_host->tgtid_bind_type) {
17838+ case FC_TGTID_BIND_BY_WWPN:
17839+ rport->node_name = -1;
17840+ rport->port_id = -1;
17841+ break;
17842+ case FC_TGTID_BIND_BY_WWNN:
17843+ rport->port_name = -1;
17844+ rport->port_id = -1;
17845+ break;
17846+ case FC_TGTID_BIND_BY_ID:
17847+ rport->node_name = -1;
17848+ rport->port_name = -1;
17849+ break;
17850+ case FC_TGTID_BIND_NONE: /* to keep compiler happy */
17851+ break;
17852 }
17853
17854- spin_unlock_irqrestore(shost->host_lock, flags);
17855+ /*
17856+ * As this only occurs if the remote port (scsi target)
17857+ * went away and didn't come back - we'll remove
17858+ * all attached scsi devices.
17859+ */
17860+ fc_queue_work(shost, &rport->stgt_delete_work);
17861
17862 /*
17863 * Notify the driver that the rport is now dead. The LLDD will
17864@@ -3105,7 +3078,7 @@ fc_timeout_deleted_rport(struct work_struct *work)
17865 *
17866 * Note: we set the CALLBK_DONE flag above to correspond
17867 */
17868- if (do_callback && i->f->dev_loss_tmo_callbk)
17869+ if (i->f->dev_loss_tmo_callbk)
17870 i->f->dev_loss_tmo_callbk(rport);
17871 }
17872
17873diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
17874index de2f8c4..ad897df 100644
17875--- a/drivers/scsi/scsi_transport_iscsi.c
17876+++ b/drivers/scsi/scsi_transport_iscsi.c
17877@@ -627,10 +627,8 @@ static void __iscsi_block_session(struct work_struct *work)
17878 spin_unlock_irqrestore(&session->lock, flags);
17879 scsi_target_block(&session->dev);
17880 ISCSI_DBG_TRANS_SESSION(session, "Completed SCSI target blocking\n");
17881- if (session->recovery_tmo >= 0)
17882- queue_delayed_work(iscsi_eh_timer_workq,
17883- &session->recovery_work,
17884- session->recovery_tmo * HZ);
17885+ queue_delayed_work(iscsi_eh_timer_workq, &session->recovery_work,
17886+ session->recovery_tmo * HZ);
17887 }
17888
17889 void iscsi_block_session(struct iscsi_cls_session *session)
17890@@ -1350,7 +1348,8 @@ iscsi_set_param(struct iscsi_transport *transport, struct iscsi_uevent *ev)
17891 switch (ev->u.set_param.param) {
17892 case ISCSI_PARAM_SESS_RECOVERY_TMO:
17893 sscanf(data, "%d", &value);
17894- session->recovery_tmo = value;
17895+ if (value != 0)
17896+ session->recovery_tmo = value;
17897 break;
17898 default:
17899 err = transport->set_param(conn, ev->u.set_param.param,
17900diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c
17901index 5081f97..12d58a7 100644
17902--- a/drivers/scsi/st.c
17903+++ b/drivers/scsi/st.c
17904@@ -552,15 +552,13 @@ st_do_scsi(struct st_request * SRpnt, struct scsi_tape * STp, unsigned char *cmd
17905 SRpnt->waiting = waiting;
17906
17907 if (STp->buffer->do_dio) {
17908- mdata->page_order = 0;
17909 mdata->nr_entries = STp->buffer->sg_segs;
17910 mdata->pages = STp->buffer->mapped_pages;
17911 } else {
17912- mdata->page_order = STp->buffer->reserved_page_order;
17913 mdata->nr_entries =
17914 DIV_ROUND_UP(bytes, PAGE_SIZE << mdata->page_order);
17915- mdata->pages = STp->buffer->reserved_pages;
17916- mdata->offset = 0;
17917+ STp->buffer->map_data.pages = STp->buffer->reserved_pages;
17918+ STp->buffer->map_data.offset = 0;
17919 }
17920
17921 memcpy(SRpnt->cmd, cmd, sizeof(SRpnt->cmd));
17922@@ -3720,7 +3718,7 @@ static int enlarge_buffer(struct st_buffer * STbuffer, int new_size, int need_dm
17923 priority |= __GFP_ZERO;
17924
17925 if (STbuffer->frp_segs) {
17926- order = STbuffer->reserved_page_order;
17927+ order = STbuffer->map_data.page_order;
17928 b_size = PAGE_SIZE << order;
17929 } else {
17930 for (b_size = PAGE_SIZE, order = 0;
17931@@ -3753,7 +3751,7 @@ static int enlarge_buffer(struct st_buffer * STbuffer, int new_size, int need_dm
17932 segs++;
17933 }
17934 STbuffer->b_data = page_address(STbuffer->reserved_pages[0]);
17935- STbuffer->reserved_page_order = order;
17936+ STbuffer->map_data.page_order = order;
17937
17938 return 1;
17939 }
17940@@ -3766,7 +3764,7 @@ static void clear_buffer(struct st_buffer * st_bp)
17941
17942 for (i=0; i < st_bp->frp_segs; i++)
17943 memset(page_address(st_bp->reserved_pages[i]), 0,
17944- PAGE_SIZE << st_bp->reserved_page_order);
17945+ PAGE_SIZE << st_bp->map_data.page_order);
17946 st_bp->cleared = 1;
17947 }
17948
17949@@ -3774,7 +3772,7 @@ static void clear_buffer(struct st_buffer * st_bp)
17950 /* Release the extra buffer */
17951 static void normalize_buffer(struct st_buffer * STbuffer)
17952 {
17953- int i, order = STbuffer->reserved_page_order;
17954+ int i, order = STbuffer->map_data.page_order;
17955
17956 for (i = 0; i < STbuffer->frp_segs; i++) {
17957 __free_pages(STbuffer->reserved_pages[i], order);
17958@@ -3782,7 +3780,7 @@ static void normalize_buffer(struct st_buffer * STbuffer)
17959 }
17960 STbuffer->frp_segs = 0;
17961 STbuffer->sg_segs = 0;
17962- STbuffer->reserved_page_order = 0;
17963+ STbuffer->map_data.page_order = 0;
17964 STbuffer->map_data.offset = 0;
17965 }
17966
17967@@ -3792,7 +3790,7 @@ static void normalize_buffer(struct st_buffer * STbuffer)
17968 static int append_to_buffer(const char __user *ubp, struct st_buffer * st_bp, int do_count)
17969 {
17970 int i, cnt, res, offset;
17971- int length = PAGE_SIZE << st_bp->reserved_page_order;
17972+ int length = PAGE_SIZE << st_bp->map_data.page_order;
17973
17974 for (i = 0, offset = st_bp->buffer_bytes;
17975 i < st_bp->frp_segs && offset >= length; i++)
17976@@ -3824,7 +3822,7 @@ static int append_to_buffer(const char __user *ubp, struct st_buffer * st_bp, in
17977 static int from_buffer(struct st_buffer * st_bp, char __user *ubp, int do_count)
17978 {
17979 int i, cnt, res, offset;
17980- int length = PAGE_SIZE << st_bp->reserved_page_order;
17981+ int length = PAGE_SIZE << st_bp->map_data.page_order;
17982
17983 for (i = 0, offset = st_bp->read_pointer;
17984 i < st_bp->frp_segs && offset >= length; i++)
17985@@ -3857,7 +3855,7 @@ static void move_buffer_data(struct st_buffer * st_bp, int offset)
17986 {
17987 int src_seg, dst_seg, src_offset = 0, dst_offset;
17988 int count, total;
17989- int length = PAGE_SIZE << st_bp->reserved_page_order;
17990+ int length = PAGE_SIZE << st_bp->map_data.page_order;
17991
17992 if (offset == 0)
17993 return;
17994@@ -4579,6 +4577,7 @@ static int sgl_map_user_pages(struct st_buffer *STbp,
17995 }
17996
17997 mdata->offset = uaddr & ~PAGE_MASK;
17998+ mdata->page_order = 0;
17999 STbp->mapped_pages = pages;
18000
18001 return nr_pages;
18002diff --git a/drivers/scsi/st.h b/drivers/scsi/st.h
18003index f91a67c..544dc6b 100644
18004--- a/drivers/scsi/st.h
18005+++ b/drivers/scsi/st.h
18006@@ -46,7 +46,6 @@ struct st_buffer {
18007 struct st_request *last_SRpnt;
18008 struct st_cmdstatus cmdstat;
18009 struct page **reserved_pages;
18010- int reserved_page_order;
18011 struct page **mapped_pages;
18012 struct rq_map_data map_data;
18013 unsigned char *b_data;
18014diff --git a/drivers/serial/8250.c b/drivers/serial/8250.c
18015index 5ed1b82..737b4c9 100644
18016--- a/drivers/serial/8250.c
18017+++ b/drivers/serial/8250.c
18018@@ -83,9 +83,6 @@ static unsigned int skip_txen_test; /* force skip of txen test at init time */
18019
18020 #define PASS_LIMIT 256
18021
18022-#define BOTH_EMPTY (UART_LSR_TEMT | UART_LSR_THRE)
18023-
18024-
18025 /*
18026 * We default to IRQ0 for the "no irq" hack. Some
18027 * machine types want others as well - they're free
18028@@ -1342,12 +1339,14 @@ static void serial8250_start_tx(struct uart_port *port)
18029 serial_out(up, UART_IER, up->ier);
18030
18031 if (up->bugs & UART_BUG_TXEN) {
18032- unsigned char lsr;
18033+ unsigned char lsr, iir;
18034 lsr = serial_in(up, UART_LSR);
18035 up->lsr_saved_flags |= lsr & LSR_SAVE_FLAGS;
18036+ iir = serial_in(up, UART_IIR) & 0x0f;
18037 if ((up->port.type == PORT_RM9000) ?
18038- (lsr & UART_LSR_THRE) :
18039- (lsr & UART_LSR_TEMT))
18040+ (lsr & UART_LSR_THRE &&
18041+ (iir == UART_IIR_NO_INT || iir == UART_IIR_THRI)) :
18042+ (lsr & UART_LSR_TEMT && iir & UART_IIR_NO_INT))
18043 transmit_chars(up);
18044 }
18045 }
18046@@ -1795,7 +1794,7 @@ static unsigned int serial8250_tx_empty(struct uart_port *port)
18047 up->lsr_saved_flags |= lsr & LSR_SAVE_FLAGS;
18048 spin_unlock_irqrestore(&up->port.lock, flags);
18049
18050- return (lsr & BOTH_EMPTY) == BOTH_EMPTY ? TIOCSER_TEMT : 0;
18051+ return lsr & UART_LSR_TEMT ? TIOCSER_TEMT : 0;
18052 }
18053
18054 static unsigned int serial8250_get_mctrl(struct uart_port *port)
18055@@ -1853,6 +1852,8 @@ static void serial8250_break_ctl(struct uart_port *port, int break_state)
18056 spin_unlock_irqrestore(&up->port.lock, flags);
18057 }
18058
18059+#define BOTH_EMPTY (UART_LSR_TEMT | UART_LSR_THRE)
18060+
18061 /*
18062 * Wait for transmitter & holding register to empty
18063 */
18064diff --git a/drivers/serial/8250_pnp.c b/drivers/serial/8250_pnp.c
18065index deac67e..d71dfe3 100644
18066--- a/drivers/serial/8250_pnp.c
18067+++ b/drivers/serial/8250_pnp.c
18068@@ -328,7 +328,15 @@ static const struct pnp_device_id pnp_dev_table[] = {
18069 /* U.S. Robotics 56K Voice INT PnP*/
18070 { "USR9190", 0 },
18071 /* Wacom tablets */
18072- { "WACFXXX", 0 },
18073+ { "WACF004", 0 },
18074+ { "WACF005", 0 },
18075+ { "WACF006", 0 },
18076+ { "WACF007", 0 },
18077+ { "WACF008", 0 },
18078+ { "WACF009", 0 },
18079+ { "WACF00A", 0 },
18080+ { "WACF00B", 0 },
18081+ { "WACF00C", 0 },
18082 /* Compaq touchscreen */
18083 { "FPI2002", 0 },
18084 /* Fujitsu Stylistic touchscreens */
18085@@ -346,8 +354,6 @@ static const struct pnp_device_id pnp_dev_table[] = {
18086 { "FUJ02E5", 0 },
18087 /* Fujitsu P-series tablet PC device */
18088 { "FUJ02E6", 0 },
18089- /* Fujitsu Wacom 2FGT Tablet PC device */
18090- { "FUJ02E7", 0 },
18091 /*
18092 * LG C1 EXPRESS DUAL (C1-PB11A3) touch screen (actually a FUJ02E6 in
18093 * disguise)
18094diff --git a/drivers/serial/uartlite.c b/drivers/serial/uartlite.c
18095index ab2ab3c..377f271 100644
18096--- a/drivers/serial/uartlite.c
18097+++ b/drivers/serial/uartlite.c
18098@@ -394,7 +394,7 @@ static void ulite_console_write(struct console *co, const char *s,
18099 spin_unlock_irqrestore(&port->lock, flags);
18100 }
18101
18102-static int __devinit ulite_console_setup(struct console *co, char *options)
18103+static int __init ulite_console_setup(struct console *co, char *options)
18104 {
18105 struct uart_port *port;
18106 int baud = 9600;
18107diff --git a/drivers/ssb/sprom.c b/drivers/ssb/sprom.c
18108index eb70843..8943015 100644
18109--- a/drivers/ssb/sprom.c
18110+++ b/drivers/ssb/sprom.c
18111@@ -13,8 +13,6 @@
18112
18113 #include "ssb_private.h"
18114
18115-#include <linux/ctype.h>
18116-
18117
18118 static const struct ssb_sprom *fallback_sprom;
18119
18120@@ -35,27 +33,17 @@ static int sprom2hex(const u16 *sprom, char *buf, size_t buf_len,
18121 static int hex2sprom(u16 *sprom, const char *dump, size_t len,
18122 size_t sprom_size_words)
18123 {
18124- char c, tmp[5] = { 0 };
18125- int err, cnt = 0;
18126+ char tmp[5] = { 0 };
18127+ int cnt = 0;
18128 unsigned long parsed;
18129
18130- /* Strip whitespace at the end. */
18131- while (len) {
18132- c = dump[len - 1];
18133- if (!isspace(c) && c != '\0')
18134- break;
18135- len--;
18136- }
18137- /* Length must match exactly. */
18138- if (len != sprom_size_words * 4)
18139+ if (len < sprom_size_words * 2)
18140 return -EINVAL;
18141
18142 while (cnt < sprom_size_words) {
18143 memcpy(tmp, dump, 4);
18144 dump += 4;
18145- err = strict_strtoul(tmp, 16, &parsed);
18146- if (err)
18147- return err;
18148+ parsed = simple_strtoul(tmp, NULL, 16);
18149 sprom[cnt++] = swab16((u16)parsed);
18150 }
18151
18152diff --git a/drivers/staging/asus_oled/asus_oled.c b/drivers/staging/asus_oled/asus_oled.c
18153index 43c57b7..f4c2657 100644
18154--- a/drivers/staging/asus_oled/asus_oled.c
18155+++ b/drivers/staging/asus_oled/asus_oled.c
18156@@ -194,11 +194,9 @@ static ssize_t set_enabled(struct device *dev, struct device_attribute *attr,
18157 {
18158 struct usb_interface *intf = to_usb_interface(dev);
18159 struct asus_oled_dev *odev = usb_get_intfdata(intf);
18160- unsigned long value;
18161- if (strict_strtoul(buf, 10, &value))
18162- return -EINVAL;
18163+ int temp = strict_strtoul(buf, 10, NULL);
18164
18165- enable_oled(odev, value);
18166+ enable_oled(odev, temp);
18167
18168 return count;
18169 }
18170@@ -209,12 +207,10 @@ static ssize_t class_set_enabled(struct device *device,
18171 {
18172 struct asus_oled_dev *odev =
18173 (struct asus_oled_dev *) dev_get_drvdata(device);
18174- unsigned long value;
18175
18176- if (strict_strtoul(buf, 10, &value))
18177- return -EINVAL;
18178+ int temp = strict_strtoul(buf, 10, NULL);
18179
18180- enable_oled(odev, value);
18181+ enable_oled(odev, temp);
18182
18183 return count;
18184 }
18185diff --git a/drivers/staging/hv/Hv.c b/drivers/staging/hv/Hv.c
18186index c2809f2..c5b6613 100644
18187--- a/drivers/staging/hv/Hv.c
18188+++ b/drivers/staging/hv/Hv.c
18189@@ -386,7 +386,7 @@ u16 HvSignalEvent(void)
18190 * retrieve the initialized message and event pages. Otherwise, we create and
18191 * initialize the message and event pages.
18192 */
18193-void HvSynicInit(void *irqarg)
18194+int HvSynicInit(u32 irqVector)
18195 {
18196 u64 version;
18197 union hv_synic_simp simp;
18198@@ -394,14 +394,13 @@ void HvSynicInit(void *irqarg)
18199 union hv_synic_sint sharedSint;
18200 union hv_synic_scontrol sctrl;
18201 u64 guestID;
18202- u32 irqVector = *((u32 *)(irqarg));
18203- int cpu = smp_processor_id();
18204+ int ret = 0;
18205
18206 DPRINT_ENTER(VMBUS);
18207
18208 if (!gHvContext.HypercallPage) {
18209 DPRINT_EXIT(VMBUS);
18210- return;
18211+ return ret;
18212 }
18213
18214 /* Check the version */
18215@@ -426,27 +425,27 @@ void HvSynicInit(void *irqarg)
18216 */
18217 rdmsrl(HV_X64_MSR_GUEST_OS_ID, guestID);
18218 if (guestID == HV_LINUX_GUEST_ID) {
18219- gHvContext.synICMessagePage[cpu] =
18220+ gHvContext.synICMessagePage[0] =
18221 phys_to_virt(simp.BaseSimpGpa << PAGE_SHIFT);
18222- gHvContext.synICEventPage[cpu] =
18223+ gHvContext.synICEventPage[0] =
18224 phys_to_virt(siefp.BaseSiefpGpa << PAGE_SHIFT);
18225 } else {
18226 DPRINT_ERR(VMBUS, "unknown guest id!!");
18227 goto Cleanup;
18228 }
18229 DPRINT_DBG(VMBUS, "MAPPED: Simp: %p, Sifep: %p",
18230- gHvContext.synICMessagePage[cpu],
18231- gHvContext.synICEventPage[cpu]);
18232+ gHvContext.synICMessagePage[0],
18233+ gHvContext.synICEventPage[0]);
18234 } else {
18235- gHvContext.synICMessagePage[cpu] = (void *)get_zeroed_page(GFP_ATOMIC);
18236- if (gHvContext.synICMessagePage[cpu] == NULL) {
18237+ gHvContext.synICMessagePage[0] = osd_PageAlloc(1);
18238+ if (gHvContext.synICMessagePage[0] == NULL) {
18239 DPRINT_ERR(VMBUS,
18240 "unable to allocate SYNIC message page!!");
18241 goto Cleanup;
18242 }
18243
18244- gHvContext.synICEventPage[cpu] = (void *)get_zeroed_page(GFP_ATOMIC);
18245- if (gHvContext.synICEventPage[cpu] == NULL) {
18246+ gHvContext.synICEventPage[0] = osd_PageAlloc(1);
18247+ if (gHvContext.synICEventPage[0] == NULL) {
18248 DPRINT_ERR(VMBUS,
18249 "unable to allocate SYNIC event page!!");
18250 goto Cleanup;
18251@@ -455,7 +454,7 @@ void HvSynicInit(void *irqarg)
18252 /* Setup the Synic's message page */
18253 rdmsrl(HV_X64_MSR_SIMP, simp.AsUINT64);
18254 simp.SimpEnabled = 1;
18255- simp.BaseSimpGpa = virt_to_phys(gHvContext.synICMessagePage[cpu])
18256+ simp.BaseSimpGpa = virt_to_phys(gHvContext.synICMessagePage[0])
18257 >> PAGE_SHIFT;
18258
18259 DPRINT_DBG(VMBUS, "HV_X64_MSR_SIMP msr set to: %llx",
18260@@ -466,7 +465,7 @@ void HvSynicInit(void *irqarg)
18261 /* Setup the Synic's event page */
18262 rdmsrl(HV_X64_MSR_SIEFP, siefp.AsUINT64);
18263 siefp.SiefpEnabled = 1;
18264- siefp.BaseSiefpGpa = virt_to_phys(gHvContext.synICEventPage[cpu])
18265+ siefp.BaseSiefpGpa = virt_to_phys(gHvContext.synICEventPage[0])
18266 >> PAGE_SHIFT;
18267
18268 DPRINT_DBG(VMBUS, "HV_X64_MSR_SIEFP msr set to: %llx",
18269@@ -502,30 +501,32 @@ void HvSynicInit(void *irqarg)
18270
18271 DPRINT_EXIT(VMBUS);
18272
18273- return;
18274+ return ret;
18275
18276 Cleanup:
18277+ ret = -1;
18278+
18279 if (gHvContext.GuestId == HV_LINUX_GUEST_ID) {
18280- if (gHvContext.synICEventPage[cpu])
18281- osd_PageFree(gHvContext.synICEventPage[cpu], 1);
18282+ if (gHvContext.synICEventPage[0])
18283+ osd_PageFree(gHvContext.synICEventPage[0], 1);
18284
18285- if (gHvContext.synICMessagePage[cpu])
18286- osd_PageFree(gHvContext.synICMessagePage[cpu], 1);
18287+ if (gHvContext.synICMessagePage[0])
18288+ osd_PageFree(gHvContext.synICMessagePage[0], 1);
18289 }
18290
18291 DPRINT_EXIT(VMBUS);
18292- return;
18293+
18294+ return ret;
18295 }
18296
18297 /**
18298 * HvSynicCleanup - Cleanup routine for HvSynicInit().
18299 */
18300-void HvSynicCleanup(void *arg)
18301+void HvSynicCleanup(void)
18302 {
18303 union hv_synic_sint sharedSint;
18304 union hv_synic_simp simp;
18305 union hv_synic_siefp siefp;
18306- int cpu = smp_processor_id();
18307
18308 DPRINT_ENTER(VMBUS);
18309
18310@@ -538,7 +539,6 @@ void HvSynicCleanup(void *arg)
18311
18312 sharedSint.Masked = 1;
18313
18314- /* Need to correctly cleanup in the case of SMP!!! */
18315 /* Disable the interrupt */
18316 wrmsrl(HV_X64_MSR_SINT0 + VMBUS_MESSAGE_SINT, sharedSint.AsUINT64);
18317
18318@@ -560,8 +560,8 @@ void HvSynicCleanup(void *arg)
18319
18320 wrmsrl(HV_X64_MSR_SIEFP, siefp.AsUINT64);
18321
18322- osd_PageFree(gHvContext.synICMessagePage[cpu], 1);
18323- osd_PageFree(gHvContext.synICEventPage[cpu], 1);
18324+ osd_PageFree(gHvContext.synICMessagePage[0], 1);
18325+ osd_PageFree(gHvContext.synICEventPage[0], 1);
18326 }
18327
18328 DPRINT_EXIT(VMBUS);
18329diff --git a/drivers/staging/hv/Hv.h b/drivers/staging/hv/Hv.h
18330index fce4b5c..5379e4b 100644
18331--- a/drivers/staging/hv/Hv.h
18332+++ b/drivers/staging/hv/Hv.h
18333@@ -93,7 +93,7 @@ static const struct hv_guid VMBUS_SERVICE_ID = {
18334 },
18335 };
18336
18337-#define MAX_NUM_CPUS 32
18338+#define MAX_NUM_CPUS 1
18339
18340
18341 struct hv_input_signal_event_buffer {
18342@@ -137,8 +137,8 @@ extern u16 HvPostMessage(union hv_connection_id connectionId,
18343
18344 extern u16 HvSignalEvent(void);
18345
18346-extern void HvSynicInit(void *irqarg);
18347+extern int HvSynicInit(u32 irqVector);
18348
18349-extern void HvSynicCleanup(void *arg);
18350+extern void HvSynicCleanup(void);
18351
18352 #endif /* __HV_H__ */
18353diff --git a/drivers/staging/hv/Vmbus.c b/drivers/staging/hv/Vmbus.c
18354index 35a023e..a4dd06f 100644
18355--- a/drivers/staging/hv/Vmbus.c
18356+++ b/drivers/staging/hv/Vmbus.c
18357@@ -129,7 +129,7 @@ static int VmbusOnDeviceAdd(struct hv_device *dev, void *AdditionalInfo)
18358
18359 /* strcpy(dev->name, "vmbus"); */
18360 /* SynIC setup... */
18361- on_each_cpu(HvSynicInit, (void *)irqvector, 1);
18362+ ret = HvSynicInit(*irqvector);
18363
18364 /* Connect to VMBus in the root partition */
18365 ret = VmbusConnect();
18366@@ -150,7 +150,7 @@ static int VmbusOnDeviceRemove(struct hv_device *dev)
18367 DPRINT_ENTER(VMBUS);
18368 VmbusChannelReleaseUnattachedChannels();
18369 VmbusDisconnect();
18370- on_each_cpu(HvSynicCleanup, NULL, 1);
18371+ HvSynicCleanup();
18372 DPRINT_EXIT(VMBUS);
18373
18374 return ret;
18375@@ -173,8 +173,7 @@ static void VmbusOnCleanup(struct hv_driver *drv)
18376 */
18377 static void VmbusOnMsgDPC(struct hv_driver *drv)
18378 {
18379- int cpu = smp_processor_id();
18380- void *page_addr = gHvContext.synICMessagePage[cpu];
18381+ void *page_addr = gHvContext.synICMessagePage[0];
18382 struct hv_message *msg = (struct hv_message *)page_addr +
18383 VMBUS_MESSAGE_SINT;
18384 struct hv_message *copied;
18385@@ -231,12 +230,11 @@ static void VmbusOnEventDPC(struct hv_driver *drv)
18386 static int VmbusOnISR(struct hv_driver *drv)
18387 {
18388 int ret = 0;
18389- int cpu = smp_processor_id();
18390 void *page_addr;
18391 struct hv_message *msg;
18392 union hv_synic_event_flags *event;
18393
18394- page_addr = gHvContext.synICMessagePage[cpu];
18395+ page_addr = gHvContext.synICMessagePage[0];
18396 msg = (struct hv_message *)page_addr + VMBUS_MESSAGE_SINT;
18397
18398 DPRINT_ENTER(VMBUS);
18399@@ -250,7 +248,7 @@ static int VmbusOnISR(struct hv_driver *drv)
18400 }
18401
18402 /* TODO: Check if there are events to be process */
18403- page_addr = gHvContext.synICEventPage[cpu];
18404+ page_addr = gHvContext.synICEventPage[0];
18405 event = (union hv_synic_event_flags *)page_addr + VMBUS_MESSAGE_SINT;
18406
18407 /* Since we are a child, we only need to check bit 0 */
18408diff --git a/drivers/staging/rtl8187se/ieee80211/ieee80211.h b/drivers/staging/rtl8187se/ieee80211/ieee80211.h
18409index 0d490c1..3222c22 100644
18410--- a/drivers/staging/rtl8187se/ieee80211/ieee80211.h
18411+++ b/drivers/staging/rtl8187se/ieee80211/ieee80211.h
18412@@ -1318,13 +1318,13 @@ extern int ieee80211_encrypt_fragment(
18413 struct sk_buff *frag,
18414 int hdr_len);
18415
18416-extern int ieee80211_rtl_xmit(struct sk_buff *skb,
18417+extern int ieee80211_xmit(struct sk_buff *skb,
18418 struct net_device *dev);
18419 extern void ieee80211_txb_free(struct ieee80211_txb *);
18420
18421
18422 /* ieee80211_rx.c */
18423-extern int ieee80211_rtl_rx(struct ieee80211_device *ieee, struct sk_buff *skb,
18424+extern int ieee80211_rx(struct ieee80211_device *ieee, struct sk_buff *skb,
18425 struct ieee80211_rx_stats *rx_stats);
18426 extern void ieee80211_rx_mgt(struct ieee80211_device *ieee,
18427 struct ieee80211_hdr_4addr *header,
18428@@ -1376,8 +1376,8 @@ extern void ieee80211_stop_protocol(struct ieee80211_device *ieee);
18429 extern void ieee80211_softmac_start_protocol(struct ieee80211_device *ieee);
18430 extern void ieee80211_softmac_stop_protocol(struct ieee80211_device *ieee);
18431 extern void ieee80211_reset_queue(struct ieee80211_device *ieee);
18432-extern void ieee80211_rtl_wake_queue(struct ieee80211_device *ieee);
18433-extern void ieee80211_rtl_stop_queue(struct ieee80211_device *ieee);
18434+extern void ieee80211_wake_queue(struct ieee80211_device *ieee);
18435+extern void ieee80211_stop_queue(struct ieee80211_device *ieee);
18436 extern struct sk_buff *ieee80211_get_beacon(struct ieee80211_device *ieee);
18437 extern void ieee80211_start_send_beacons(struct ieee80211_device *ieee);
18438 extern void ieee80211_stop_send_beacons(struct ieee80211_device *ieee);
18439@@ -1385,7 +1385,7 @@ extern int ieee80211_wpa_supplicant_ioctl(struct ieee80211_device *ieee, struct
18440 extern void notify_wx_assoc_event(struct ieee80211_device *ieee);
18441 extern void ieee80211_ps_tx_ack(struct ieee80211_device *ieee, short success);
18442 extern void SendDisassociation(struct ieee80211_device *ieee,u8* asSta,u8 asRsn);
18443-extern void ieee80211_rtl_start_scan(struct ieee80211_device *ieee);
18444+extern void ieee80211_start_scan(struct ieee80211_device *ieee);
18445
18446 //Add for RF power on power off by lizhaoming 080512
18447 extern void SendDisassociation(struct ieee80211_device *ieee,
18448diff --git a/drivers/staging/rtl8187se/ieee80211/ieee80211_rx.c b/drivers/staging/rtl8187se/ieee80211/ieee80211_rx.c
18449index 7ad305b..5e2e79b 100644
18450--- a/drivers/staging/rtl8187se/ieee80211/ieee80211_rx.c
18451+++ b/drivers/staging/rtl8187se/ieee80211/ieee80211_rx.c
18452@@ -470,7 +470,7 @@ drop:
18453 /* All received frames are sent to this function. @skb contains the frame in
18454 * IEEE 802.11 format, i.e., in the format it was sent over air.
18455 * This function is called only as a tasklet (software IRQ). */
18456-int ieee80211_rtl_rx(struct ieee80211_device *ieee, struct sk_buff *skb,
18457+int ieee80211_rx(struct ieee80211_device *ieee, struct sk_buff *skb,
18458 struct ieee80211_rx_stats *rx_stats)
18459 {
18460 struct net_device *dev = ieee->dev;
18461diff --git a/drivers/staging/rtl8187se/ieee80211/ieee80211_softmac.c b/drivers/staging/rtl8187se/ieee80211/ieee80211_softmac.c
18462index a2fa9a9..334e4c7 100644
18463--- a/drivers/staging/rtl8187se/ieee80211/ieee80211_softmac.c
18464+++ b/drivers/staging/rtl8187se/ieee80211/ieee80211_softmac.c
18465@@ -689,7 +689,7 @@ void ieee80211_stop_scan(struct ieee80211_device *ieee)
18466 }
18467
18468 /* called with ieee->lock held */
18469-void ieee80211_rtl_start_scan(struct ieee80211_device *ieee)
18470+void ieee80211_start_scan(struct ieee80211_device *ieee)
18471 {
18472 if(IS_DOT11D_ENABLE(ieee) )
18473 {
18474@@ -1196,7 +1196,7 @@ void ieee80211_associate_step1(struct ieee80211_device *ieee)
18475 }
18476 }
18477
18478-void ieee80211_rtl_auth_challenge(struct ieee80211_device *ieee, u8 *challenge, int chlen)
18479+void ieee80211_auth_challenge(struct ieee80211_device *ieee, u8 *challenge, int chlen)
18480 {
18481 u8 *c;
18482 struct sk_buff *skb;
18483@@ -1898,7 +1898,7 @@ associate_complete:
18484
18485 ieee80211_associate_step2(ieee);
18486 }else{
18487- ieee80211_rtl_auth_challenge(ieee, challenge, chlen);
18488+ ieee80211_auth_challenge(ieee, challenge, chlen);
18489 }
18490 }else{
18491 ieee->softmac_stats.rx_auth_rs_err++;
18492@@ -2047,7 +2047,7 @@ void ieee80211_reset_queue(struct ieee80211_device *ieee)
18493
18494 }
18495
18496-void ieee80211_rtl_wake_queue(struct ieee80211_device *ieee)
18497+void ieee80211_wake_queue(struct ieee80211_device *ieee)
18498 {
18499
18500 unsigned long flags;
18501@@ -2089,7 +2089,7 @@ exit :
18502 }
18503
18504
18505-void ieee80211_rtl_stop_queue(struct ieee80211_device *ieee)
18506+void ieee80211_stop_queue(struct ieee80211_device *ieee)
18507 {
18508 //unsigned long flags;
18509 //spin_lock_irqsave(&ieee->lock,flags);
18510@@ -2301,7 +2301,7 @@ void ieee80211_start_bss(struct ieee80211_device *ieee)
18511 //#else
18512 if (ieee->state == IEEE80211_NOLINK){
18513 ieee->actscanning = true;
18514- ieee80211_rtl_start_scan(ieee);
18515+ ieee80211_start_scan(ieee);
18516 }
18517 //#endif
18518 spin_unlock_irqrestore(&ieee->lock, flags);
18519@@ -2357,7 +2357,7 @@ void ieee80211_associate_retry_wq(struct work_struct *work)
18520 if(ieee->state == IEEE80211_NOLINK){
18521 ieee->beinretry = false;
18522 ieee->actscanning = true;
18523- ieee80211_rtl_start_scan(ieee);
18524+ ieee80211_start_scan(ieee);
18525 }
18526 //YJ,add,080828, notify os here
18527 if(ieee->state == IEEE80211_NOLINK)
18528diff --git a/drivers/staging/rtl8187se/ieee80211/ieee80211_tx.c b/drivers/staging/rtl8187se/ieee80211/ieee80211_tx.c
18529index c7996ea..e2945db 100644
18530--- a/drivers/staging/rtl8187se/ieee80211/ieee80211_tx.c
18531+++ b/drivers/staging/rtl8187se/ieee80211/ieee80211_tx.c
18532@@ -305,7 +305,7 @@ ieee80211_classify(struct sk_buff *skb, struct ieee80211_network *network)
18533 }
18534
18535 /* SKBs are added to the ieee->tx_queue. */
18536-int ieee80211_rtl_xmit(struct sk_buff *skb,
18537+int ieee80211_xmit(struct sk_buff *skb,
18538 struct net_device *dev)
18539 {
18540 struct ieee80211_device *ieee = netdev_priv(dev);
18541diff --git a/drivers/staging/rtl8187se/r8180_core.c b/drivers/staging/rtl8187se/r8180_core.c
18542index 3f19143..53e654d 100644
18543--- a/drivers/staging/rtl8187se/r8180_core.c
18544+++ b/drivers/staging/rtl8187se/r8180_core.c
18545@@ -1830,7 +1830,7 @@ void rtl8180_rx(struct net_device *dev)
18546 if(priv->rx_skb->len > 4)
18547 skb_trim(priv->rx_skb,priv->rx_skb->len-4);
18548 #ifndef RX_DONT_PASS_UL
18549- if(!ieee80211_rtl_rx(priv->ieee80211,
18550+ if(!ieee80211_rx(priv->ieee80211,
18551 priv->rx_skb, &stats)){
18552 #endif // RX_DONT_PASS_UL
18553
18554@@ -1936,11 +1936,11 @@ rate)
18555 if (!check_nic_enought_desc(dev, priority)){
18556 DMESGW("Error: no descriptor left by previous TX (avail %d) ",
18557 get_curr_tx_free_desc(dev, priority));
18558- ieee80211_rtl_stop_queue(priv->ieee80211);
18559+ ieee80211_stop_queue(priv->ieee80211);
18560 }
18561 rtl8180_tx(dev, skb->data, skb->len, priority, morefrag,0,rate);
18562 if (!check_nic_enought_desc(dev, priority))
18563- ieee80211_rtl_stop_queue(priv->ieee80211);
18564+ ieee80211_stop_queue(priv->ieee80211);
18565
18566 spin_unlock_irqrestore(&priv->tx_lock,flags);
18567 }
18568@@ -3846,7 +3846,7 @@ static const struct net_device_ops rtl8180_netdev_ops = {
18569 .ndo_set_mac_address = r8180_set_mac_adr,
18570 .ndo_validate_addr = eth_validate_addr,
18571 .ndo_change_mtu = eth_change_mtu,
18572- .ndo_start_xmit = ieee80211_rtl_xmit,
18573+ .ndo_start_xmit = ieee80211_xmit,
18574 };
18575
18576 static int __devinit rtl8180_pci_probe(struct pci_dev *pdev,
18577@@ -4066,7 +4066,7 @@ void rtl8180_try_wake_queue(struct net_device *dev, int pri)
18578 spin_unlock_irqrestore(&priv->tx_lock,flags);
18579
18580 if(enough_desc)
18581- ieee80211_rtl_wake_queue(priv->ieee80211);
18582+ ieee80211_wake_queue(priv->ieee80211);
18583 }
18584
18585 void rtl8180_tx_isr(struct net_device *dev, int pri,short error)
18586diff --git a/drivers/staging/rtl8187se/r8180_wx.c b/drivers/staging/rtl8187se/r8180_wx.c
18587index 637ee8e..766892e 100644
18588--- a/drivers/staging/rtl8187se/r8180_wx.c
18589+++ b/drivers/staging/rtl8187se/r8180_wx.c
18590@@ -377,7 +377,7 @@ static int r8180_wx_set_scan(struct net_device *dev, struct iw_request_info *a,
18591 // queue_work(priv->ieee80211->wq, &priv->ieee80211->wx_sync_scan_wq);
18592 //printk("start scan============================>\n");
18593 ieee80211_softmac_ips_scan_syncro(priv->ieee80211);
18594-//ieee80211_rtl_start_scan(priv->ieee80211);
18595+//ieee80211_start_scan(priv->ieee80211);
18596 /* intentionally forget to up sem */
18597 // up(&priv->ieee80211->wx_sem);
18598 ret = 0;
18599diff --git a/drivers/usb/class/usbtmc.c b/drivers/usb/class/usbtmc.c
18600index d9461c9..2473cf0 100644
18601--- a/drivers/usb/class/usbtmc.c
18602+++ b/drivers/usb/class/usbtmc.c
18603@@ -562,16 +562,10 @@ static ssize_t usbtmc_write(struct file *filp, const char __user *buf,
18604 n_bytes = roundup(12 + this_part, 4);
18605 memset(buffer + 12 + this_part, 0, n_bytes - (12 + this_part));
18606
18607- do {
18608- retval = usb_bulk_msg(data->usb_dev,
18609- usb_sndbulkpipe(data->usb_dev,
18610- data->bulk_out),
18611- buffer, n_bytes,
18612- &actual, USBTMC_TIMEOUT);
18613- if (retval != 0)
18614- break;
18615- n_bytes -= actual;
18616- } while (n_bytes);
18617+ retval = usb_bulk_msg(data->usb_dev,
18618+ usb_sndbulkpipe(data->usb_dev,
18619+ data->bulk_out),
18620+ buffer, n_bytes, &actual, USBTMC_TIMEOUT);
18621
18622 data->bTag_last_write = data->bTag;
18623 data->bTag++;
18624diff --git a/drivers/usb/core/devices.c b/drivers/usb/core/devices.c
18625index 355dffc..96f1171 100644
18626--- a/drivers/usb/core/devices.c
18627+++ b/drivers/usb/core/devices.c
18628@@ -494,7 +494,7 @@ static ssize_t usb_device_dump(char __user **buffer, size_t *nbytes,
18629 return 0;
18630 /* allocate 2^1 pages = 8K (on i386);
18631 * should be more than enough for one device */
18632- pages_start = (char *)__get_free_pages(GFP_NOIO, 1);
18633+ pages_start = (char *)__get_free_pages(GFP_KERNEL, 1);
18634 if (!pages_start)
18635 return -ENOMEM;
18636
18637diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c
18638index 24120db..181f78c 100644
18639--- a/drivers/usb/core/devio.c
18640+++ b/drivers/usb/core/devio.c
18641@@ -1312,9 +1312,9 @@ static int processcompl(struct async *as, void __user * __user *arg)
18642 void __user *addr = as->userurb;
18643 unsigned int i;
18644
18645- if (as->userbuffer && urb->actual_length)
18646+ if (as->userbuffer)
18647 if (copy_to_user(as->userbuffer, urb->transfer_buffer,
18648- urb->actual_length))
18649+ urb->transfer_buffer_length))
18650 goto err_out;
18651 if (put_user(as->status, &userurb->status))
18652 goto err_out;
18653@@ -1334,11 +1334,14 @@ static int processcompl(struct async *as, void __user * __user *arg)
18654 }
18655 }
18656
18657+ free_async(as);
18658+
18659 if (put_user(addr, (void __user * __user *)arg))
18660 return -EFAULT;
18661 return 0;
18662
18663 err_out:
18664+ free_async(as);
18665 return -EFAULT;
18666 }
18667
18668@@ -1368,11 +1371,8 @@ static struct async *reap_as(struct dev_state *ps)
18669 static int proc_reapurb(struct dev_state *ps, void __user *arg)
18670 {
18671 struct async *as = reap_as(ps);
18672- if (as) {
18673- int retval = processcompl(as, (void __user * __user *)arg);
18674- free_async(as);
18675- return retval;
18676- }
18677+ if (as)
18678+ return processcompl(as, (void __user * __user *)arg);
18679 if (signal_pending(current))
18680 return -EINTR;
18681 return -EIO;
18682@@ -1380,16 +1380,11 @@ static int proc_reapurb(struct dev_state *ps, void __user *arg)
18683
18684 static int proc_reapurbnonblock(struct dev_state *ps, void __user *arg)
18685 {
18686- int retval;
18687 struct async *as;
18688
18689- as = async_getcompleted(ps);
18690- retval = -EAGAIN;
18691- if (as) {
18692- retval = processcompl(as, (void __user * __user *)arg);
18693- free_async(as);
18694- }
18695- return retval;
18696+ if (!(as = async_getcompleted(ps)))
18697+ return -EAGAIN;
18698+ return processcompl(as, (void __user * __user *)arg);
18699 }
18700
18701 #ifdef CONFIG_COMPAT
18702@@ -1440,9 +1435,9 @@ static int processcompl_compat(struct async *as, void __user * __user *arg)
18703 void __user *addr = as->userurb;
18704 unsigned int i;
18705
18706- if (as->userbuffer && urb->actual_length)
18707+ if (as->userbuffer)
18708 if (copy_to_user(as->userbuffer, urb->transfer_buffer,
18709- urb->actual_length))
18710+ urb->transfer_buffer_length))
18711 return -EFAULT;
18712 if (put_user(as->status, &userurb->status))
18713 return -EFAULT;
18714@@ -1462,6 +1457,7 @@ static int processcompl_compat(struct async *as, void __user * __user *arg)
18715 }
18716 }
18717
18718+ free_async(as);
18719 if (put_user(ptr_to_compat(addr), (u32 __user *)arg))
18720 return -EFAULT;
18721 return 0;
18722@@ -1470,11 +1466,8 @@ static int processcompl_compat(struct async *as, void __user * __user *arg)
18723 static int proc_reapurb_compat(struct dev_state *ps, void __user *arg)
18724 {
18725 struct async *as = reap_as(ps);
18726- if (as) {
18727- int retval = processcompl_compat(as, (void __user * __user *)arg);
18728- free_async(as);
18729- return retval;
18730- }
18731+ if (as)
18732+ return processcompl_compat(as, (void __user * __user *)arg);
18733 if (signal_pending(current))
18734 return -EINTR;
18735 return -EIO;
18736@@ -1482,16 +1475,11 @@ static int proc_reapurb_compat(struct dev_state *ps, void __user *arg)
18737
18738 static int proc_reapurbnonblock_compat(struct dev_state *ps, void __user *arg)
18739 {
18740- int retval;
18741 struct async *as;
18742
18743- retval = -EAGAIN;
18744- as = async_getcompleted(ps);
18745- if (as) {
18746- retval = processcompl_compat(as, (void __user * __user *)arg);
18747- free_async(as);
18748- }
18749- return retval;
18750+ if (!(as = async_getcompleted(ps)))
18751+ return -EAGAIN;
18752+ return processcompl_compat(as, (void __user * __user *)arg);
18753 }
18754
18755 #endif
18756diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
18757index 1a7d54b..0f857e6 100644
18758--- a/drivers/usb/core/hub.c
18759+++ b/drivers/usb/core/hub.c
18760@@ -1612,12 +1612,12 @@ static inline void announce_device(struct usb_device *udev) { }
18761 #endif
18762
18763 /**
18764- * usb_enumerate_device_otg - FIXME (usbcore-internal)
18765+ * usb_configure_device_otg - FIXME (usbcore-internal)
18766 * @udev: newly addressed device (in ADDRESS state)
18767 *
18768- * Finish enumeration for On-The-Go devices
18769+ * Do configuration for On-The-Go devices
18770 */
18771-static int usb_enumerate_device_otg(struct usb_device *udev)
18772+static int usb_configure_device_otg(struct usb_device *udev)
18773 {
18774 int err = 0;
18775
18776@@ -1688,7 +1688,7 @@ fail:
18777
18778
18779 /**
18780- * usb_enumerate_device - Read device configs/intfs/otg (usbcore-internal)
18781+ * usb_configure_device - Detect and probe device intfs/otg (usbcore-internal)
18782 * @udev: newly addressed device (in ADDRESS state)
18783 *
18784 * This is only called by usb_new_device() and usb_authorize_device()
18785@@ -1699,7 +1699,7 @@ fail:
18786 * the string descriptors, as they will be errored out by the device
18787 * until it has been authorized.
18788 */
18789-static int usb_enumerate_device(struct usb_device *udev)
18790+static int usb_configure_device(struct usb_device *udev)
18791 {
18792 int err;
18793
18794@@ -1723,7 +1723,7 @@ static int usb_enumerate_device(struct usb_device *udev)
18795 udev->descriptor.iManufacturer);
18796 udev->serial = usb_cache_string(udev, udev->descriptor.iSerialNumber);
18797 }
18798- err = usb_enumerate_device_otg(udev);
18799+ err = usb_configure_device_otg(udev);
18800 fail:
18801 return err;
18802 }
18803@@ -1733,8 +1733,8 @@ fail:
18804 * usb_new_device - perform initial device setup (usbcore-internal)
18805 * @udev: newly addressed device (in ADDRESS state)
18806 *
18807- * This is called with devices which have been detected but not fully
18808- * enumerated. The device descriptor is available, but not descriptors
18809+ * This is called with devices which have been enumerated, but not yet
18810+ * configured. The device descriptor is available, but not descriptors
18811 * for any device configuration. The caller must have locked either
18812 * the parent hub (if udev is a normal device) or else the
18813 * usb_bus_list_lock (if udev is a root hub). The parent's pointer to
18814@@ -1757,8 +1757,8 @@ int usb_new_device(struct usb_device *udev)
18815 if (udev->parent)
18816 usb_autoresume_device(udev->parent);
18817
18818- usb_detect_quirks(udev);
18819- err = usb_enumerate_device(udev); /* Read descriptors */
18820+ usb_detect_quirks(udev); /* Determine quirks */
18821+ err = usb_configure_device(udev); /* detect & probe dev/intfs */
18822 if (err < 0)
18823 goto fail;
18824 dev_dbg(&udev->dev, "udev %d, busnum %d, minor = %d\n",
18825@@ -1803,23 +1803,21 @@ fail:
18826 */
18827 int usb_deauthorize_device(struct usb_device *usb_dev)
18828 {
18829+ unsigned cnt;
18830 usb_lock_device(usb_dev);
18831 if (usb_dev->authorized == 0)
18832 goto out_unauthorized;
18833-
18834 usb_dev->authorized = 0;
18835 usb_set_configuration(usb_dev, -1);
18836-
18837- kfree(usb_dev->product);
18838 usb_dev->product = kstrdup("n/a (unauthorized)", GFP_KERNEL);
18839- kfree(usb_dev->manufacturer);
18840 usb_dev->manufacturer = kstrdup("n/a (unauthorized)", GFP_KERNEL);
18841- kfree(usb_dev->serial);
18842 usb_dev->serial = kstrdup("n/a (unauthorized)", GFP_KERNEL);
18843-
18844- usb_destroy_configuration(usb_dev);
18845+ kfree(usb_dev->config);
18846+ usb_dev->config = NULL;
18847+ for (cnt = 0; cnt < usb_dev->descriptor.bNumConfigurations; cnt++)
18848+ kfree(usb_dev->rawdescriptors[cnt]);
18849 usb_dev->descriptor.bNumConfigurations = 0;
18850-
18851+ kfree(usb_dev->rawdescriptors);
18852 out_unauthorized:
18853 usb_unlock_device(usb_dev);
18854 return 0;
18855@@ -1829,11 +1827,15 @@ out_unauthorized:
18856 int usb_authorize_device(struct usb_device *usb_dev)
18857 {
18858 int result = 0, c;
18859-
18860 usb_lock_device(usb_dev);
18861 if (usb_dev->authorized == 1)
18862 goto out_authorized;
18863-
18864+ kfree(usb_dev->product);
18865+ usb_dev->product = NULL;
18866+ kfree(usb_dev->manufacturer);
18867+ usb_dev->manufacturer = NULL;
18868+ kfree(usb_dev->serial);
18869+ usb_dev->serial = NULL;
18870 result = usb_autoresume_device(usb_dev);
18871 if (result < 0) {
18872 dev_err(&usb_dev->dev,
18873@@ -1846,18 +1848,10 @@ int usb_authorize_device(struct usb_device *usb_dev)
18874 "authorization: %d\n", result);
18875 goto error_device_descriptor;
18876 }
18877-
18878- kfree(usb_dev->product);
18879- usb_dev->product = NULL;
18880- kfree(usb_dev->manufacturer);
18881- usb_dev->manufacturer = NULL;
18882- kfree(usb_dev->serial);
18883- usb_dev->serial = NULL;
18884-
18885 usb_dev->authorized = 1;
18886- result = usb_enumerate_device(usb_dev);
18887+ result = usb_configure_device(usb_dev);
18888 if (result < 0)
18889- goto error_enumerate;
18890+ goto error_configure;
18891 /* Choose and set the configuration. This registers the interfaces
18892 * with the driver core and lets interface drivers bind to them.
18893 */
18894@@ -1872,10 +1866,8 @@ int usb_authorize_device(struct usb_device *usb_dev)
18895 }
18896 }
18897 dev_info(&usb_dev->dev, "authorized to connect\n");
18898-
18899-error_enumerate:
18900+error_configure:
18901 error_device_descriptor:
18902- usb_autosuspend_device(usb_dev);
18903 error_autoresume:
18904 out_authorized:
18905 usb_unlock_device(usb_dev); // complements locktree
18906@@ -3286,9 +3278,6 @@ static void hub_events(void)
18907 USB_PORT_FEAT_C_SUSPEND);
18908 udev = hdev->children[i-1];
18909 if (udev) {
18910- /* TRSMRCY = 10 msec */
18911- msleep(10);
18912-
18913 usb_lock_device(udev);
18914 ret = remote_wakeup(hdev->
18915 children[i-1]);
18916diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c
18917index 980a8d2..da718e8 100644
18918--- a/drivers/usb/core/message.c
18919+++ b/drivers/usb/core/message.c
18920@@ -911,11 +911,11 @@ char *usb_cache_string(struct usb_device *udev, int index)
18921 if (index <= 0)
18922 return NULL;
18923
18924- buf = kmalloc(MAX_USB_STRING_SIZE, GFP_NOIO);
18925+ buf = kmalloc(MAX_USB_STRING_SIZE, GFP_KERNEL);
18926 if (buf) {
18927 len = usb_string(udev, index, buf, MAX_USB_STRING_SIZE);
18928 if (len > 0) {
18929- smallbuf = kmalloc(++len, GFP_NOIO);
18930+ smallbuf = kmalloc(++len, GFP_KERNEL);
18931 if (!smallbuf)
18932 return buf;
18933 memcpy(smallbuf, buf, len);
18934@@ -1682,7 +1682,7 @@ int usb_set_configuration(struct usb_device *dev, int configuration)
18935 if (cp) {
18936 nintf = cp->desc.bNumInterfaces;
18937 new_interfaces = kmalloc(nintf * sizeof(*new_interfaces),
18938- GFP_NOIO);
18939+ GFP_KERNEL);
18940 if (!new_interfaces) {
18941 dev_err(&dev->dev, "Out of memory\n");
18942 return -ENOMEM;
18943@@ -1691,7 +1691,7 @@ int usb_set_configuration(struct usb_device *dev, int configuration)
18944 for (; n < nintf; ++n) {
18945 new_interfaces[n] = kzalloc(
18946 sizeof(struct usb_interface),
18947- GFP_NOIO);
18948+ GFP_KERNEL);
18949 if (!new_interfaces[n]) {
18950 dev_err(&dev->dev, "Out of memory\n");
18951 ret = -ENOMEM;
18952diff --git a/drivers/usb/core/sysfs.c b/drivers/usb/core/sysfs.c
18953index fcdcad4..7ec3041 100644
18954--- a/drivers/usb/core/sysfs.c
18955+++ b/drivers/usb/core/sysfs.c
18956@@ -82,13 +82,9 @@ static ssize_t show_##name(struct device *dev, \
18957 struct device_attribute *attr, char *buf) \
18958 { \
18959 struct usb_device *udev; \
18960- int retval; \
18961 \
18962 udev = to_usb_device(dev); \
18963- usb_lock_device(udev); \
18964- retval = sprintf(buf, "%s\n", udev->name); \
18965- usb_unlock_device(udev); \
18966- return retval; \
18967+ return sprintf(buf, "%s\n", udev->name); \
18968 } \
18969 static DEVICE_ATTR(name, S_IRUGO, show_##name, NULL);
18970
18971@@ -115,12 +111,6 @@ show_speed(struct device *dev, struct device_attribute *attr, char *buf)
18972 case USB_SPEED_HIGH:
18973 speed = "480";
18974 break;
18975- case USB_SPEED_VARIABLE:
18976- speed = "480";
18977- break;
18978- case USB_SPEED_SUPER:
18979- speed = "5000";
18980- break;
18981 default:
18982 speed = "unknown";
18983 }
18984diff --git a/drivers/usb/core/usb.c b/drivers/usb/core/usb.c
18985index 52e5e31..b1b85ab 100644
18986--- a/drivers/usb/core/usb.c
18987+++ b/drivers/usb/core/usb.c
18988@@ -132,7 +132,7 @@ EXPORT_SYMBOL_GPL(usb_altnum_to_altsetting);
18989
18990 struct find_interface_arg {
18991 int minor;
18992- struct device_driver *drv;
18993+ struct usb_interface *interface;
18994 };
18995
18996 static int __find_interface(struct device *dev, void *data)
18997@@ -143,10 +143,12 @@ static int __find_interface(struct device *dev, void *data)
18998 if (!is_usb_interface(dev))
18999 return 0;
19000
19001- if (dev->driver != arg->drv)
19002- return 0;
19003 intf = to_usb_interface(dev);
19004- return intf->minor == arg->minor;
19005+ if (intf->minor != -1 && intf->minor == arg->minor) {
19006+ arg->interface = intf;
19007+ return 1;
19008+ }
19009+ return 0;
19010 }
19011
19012 /**
19013@@ -154,24 +156,21 @@ static int __find_interface(struct device *dev, void *data)
19014 * @drv: the driver whose current configuration is considered
19015 * @minor: the minor number of the desired device
19016 *
19017- * This walks the bus device list and returns a pointer to the interface
19018- * with the matching minor and driver. Note, this only works for devices
19019- * that share the USB major number.
19020+ * This walks the driver device list and returns a pointer to the interface
19021+ * with the matching minor. Note, this only works for devices that share the
19022+ * USB major number.
19023 */
19024 struct usb_interface *usb_find_interface(struct usb_driver *drv, int minor)
19025 {
19026 struct find_interface_arg argb;
19027- struct device *dev;
19028+ int retval;
19029
19030 argb.minor = minor;
19031- argb.drv = &drv->drvwrap.driver;
19032-
19033- dev = bus_find_device(&usb_bus_type, NULL, &argb, __find_interface);
19034-
19035- /* Drop reference count from bus_find_device */
19036- put_device(dev);
19037-
19038- return dev ? to_usb_interface(dev) : NULL;
19039+ argb.interface = NULL;
19040+ /* eat the error, it will be in argb.interface */
19041+ retval = driver_for_each_device(&drv->drvwrap.driver, NULL, &argb,
19042+ __find_interface);
19043+ return argb.interface;
19044 }
19045 EXPORT_SYMBOL_GPL(usb_find_interface);
19046
19047diff --git a/drivers/usb/host/ehci-hcd.c b/drivers/usb/host/ehci-hcd.c
19048index e18c677..f5f5601 100644
19049--- a/drivers/usb/host/ehci-hcd.c
19050+++ b/drivers/usb/host/ehci-hcd.c
19051@@ -785,10 +785,9 @@ static irqreturn_t ehci_irq (struct usb_hcd *hcd)
19052
19053 /* start 20 msec resume signaling from this port,
19054 * and make khubd collect PORT_STAT_C_SUSPEND to
19055- * stop that signaling. Use 5 ms extra for safety,
19056- * like usb_port_resume() does.
19057+ * stop that signaling.
19058 */
19059- ehci->reset_done[i] = jiffies + msecs_to_jiffies(25);
19060+ ehci->reset_done [i] = jiffies + msecs_to_jiffies (20);
19061 ehci_dbg (ehci, "port %d remote wakeup\n", i + 1);
19062 mod_timer(&hcd->rh_timer, ehci->reset_done[i]);
19063 }
19064diff --git a/drivers/usb/host/ehci-hub.c b/drivers/usb/host/ehci-hub.c
19065index 698f461..1b6f1c0 100644
19066--- a/drivers/usb/host/ehci-hub.c
19067+++ b/drivers/usb/host/ehci-hub.c
19068@@ -120,26 +120,9 @@ static int ehci_bus_suspend (struct usb_hcd *hcd)
19069 del_timer_sync(&ehci->watchdog);
19070 del_timer_sync(&ehci->iaa_watchdog);
19071
19072+ port = HCS_N_PORTS (ehci->hcs_params);
19073 spin_lock_irq (&ehci->lock);
19074
19075- /* Once the controller is stopped, port resumes that are already
19076- * in progress won't complete. Hence if remote wakeup is enabled
19077- * for the root hub and any ports are in the middle of a resume or
19078- * remote wakeup, we must fail the suspend.
19079- */
19080- if (hcd->self.root_hub->do_remote_wakeup) {
19081- port = HCS_N_PORTS(ehci->hcs_params);
19082- while (port--) {
19083- if (ehci->reset_done[port] != 0) {
19084- spin_unlock_irq(&ehci->lock);
19085- ehci_dbg(ehci, "suspend failed because "
19086- "port %d is resuming\n",
19087- port + 1);
19088- return -EBUSY;
19089- }
19090- }
19091- }
19092-
19093 /* stop schedules, clean any completed work */
19094 if (HC_IS_RUNNING(hcd->state)) {
19095 ehci_quiesce (ehci);
19096@@ -155,7 +138,6 @@ static int ehci_bus_suspend (struct usb_hcd *hcd)
19097 */
19098 ehci->bus_suspended = 0;
19099 ehci->owned_ports = 0;
19100- port = HCS_N_PORTS(ehci->hcs_params);
19101 while (port--) {
19102 u32 __iomem *reg = &ehci->regs->port_status [port];
19103 u32 t1 = ehci_readl(ehci, reg) & ~PORT_RWC_BITS;
19104diff --git a/drivers/usb/host/ehci-q.c b/drivers/usb/host/ehci-q.c
19105index c0d4b39..139a2cc 100644
19106--- a/drivers/usb/host/ehci-q.c
19107+++ b/drivers/usb/host/ehci-q.c
19108@@ -827,10 +827,9 @@ qh_make (
19109 * But interval 1 scheduling is simpler, and
19110 * includes high bandwidth.
19111 */
19112- urb->interval = 1;
19113- } else if (qh->period > ehci->periodic_size) {
19114- qh->period = ehci->periodic_size;
19115- urb->interval = qh->period << 3;
19116+ dbg ("intr period %d uframes, NYET!",
19117+ urb->interval);
19118+ goto done;
19119 }
19120 } else {
19121 int think_time;
19122@@ -853,10 +852,6 @@ qh_make (
19123 usb_calc_bus_time (urb->dev->speed,
19124 is_input, 0, max_packet (maxp)));
19125 qh->period = urb->interval;
19126- if (qh->period > ehci->periodic_size) {
19127- qh->period = ehci->periodic_size;
19128- urb->interval = qh->period;
19129- }
19130 }
19131 }
19132
19133diff --git a/drivers/usb/host/r8a66597-hcd.c b/drivers/usb/host/r8a66597-hcd.c
19134index 9260c74..e33d362 100644
19135--- a/drivers/usb/host/r8a66597-hcd.c
19136+++ b/drivers/usb/host/r8a66597-hcd.c
19137@@ -35,9 +35,7 @@
19138 #include <linux/usb.h>
19139 #include <linux/platform_device.h>
19140 #include <linux/io.h>
19141-#include <linux/mm.h>
19142 #include <linux/irq.h>
19143-#include <asm/cacheflush.h>
19144
19145 #include "../core/hcd.h"
19146 #include "r8a66597.h"
19147@@ -218,17 +216,8 @@ static void disable_controller(struct r8a66597 *r8a66597)
19148 {
19149 int port;
19150
19151- /* disable interrupts */
19152 r8a66597_write(r8a66597, 0, INTENB0);
19153- r8a66597_write(r8a66597, 0, INTENB1);
19154- r8a66597_write(r8a66597, 0, BRDYENB);
19155- r8a66597_write(r8a66597, 0, BEMPENB);
19156- r8a66597_write(r8a66597, 0, NRDYENB);
19157-
19158- /* clear status */
19159- r8a66597_write(r8a66597, 0, BRDYSTS);
19160- r8a66597_write(r8a66597, 0, NRDYSTS);
19161- r8a66597_write(r8a66597, 0, BEMPSTS);
19162+ r8a66597_write(r8a66597, 0, INTSTS0);
19163
19164 for (port = 0; port < r8a66597->max_root_hub; port++)
19165 r8a66597_disable_port(r8a66597, port);
19166@@ -822,26 +811,6 @@ static void enable_r8a66597_pipe(struct r8a66597 *r8a66597, struct urb *urb,
19167 enable_r8a66597_pipe_dma(r8a66597, dev, pipe, urb);
19168 }
19169
19170-static void r8a66597_urb_done(struct r8a66597 *r8a66597, struct urb *urb,
19171- int status)
19172-__releases(r8a66597->lock)
19173-__acquires(r8a66597->lock)
19174-{
19175- if (usb_pipein(urb->pipe) && usb_pipetype(urb->pipe) != PIPE_CONTROL) {
19176- void *ptr;
19177-
19178- for (ptr = urb->transfer_buffer;
19179- ptr < urb->transfer_buffer + urb->transfer_buffer_length;
19180- ptr += PAGE_SIZE)
19181- flush_dcache_page(virt_to_page(ptr));
19182- }
19183-
19184- usb_hcd_unlink_urb_from_ep(r8a66597_to_hcd(r8a66597), urb);
19185- spin_unlock(&r8a66597->lock);
19186- usb_hcd_giveback_urb(r8a66597_to_hcd(r8a66597), urb, status);
19187- spin_lock(&r8a66597->lock);
19188-}
19189-
19190 /* this function must be called with interrupt disabled */
19191 static void force_dequeue(struct r8a66597 *r8a66597, u16 pipenum, u16 address)
19192 {
19193@@ -862,9 +831,15 @@ static void force_dequeue(struct r8a66597 *r8a66597, u16 pipenum, u16 address)
19194 list_del(&td->queue);
19195 kfree(td);
19196
19197- if (urb)
19198- r8a66597_urb_done(r8a66597, urb, -ENODEV);
19199+ if (urb) {
19200+ usb_hcd_unlink_urb_from_ep(r8a66597_to_hcd(r8a66597),
19201+ urb);
19202
19203+ spin_unlock(&r8a66597->lock);
19204+ usb_hcd_giveback_urb(r8a66597_to_hcd(r8a66597), urb,
19205+ -ENODEV);
19206+ spin_lock(&r8a66597->lock);
19207+ }
19208 break;
19209 }
19210 }
19211@@ -1301,7 +1276,10 @@ __releases(r8a66597->lock) __acquires(r8a66597->lock)
19212 if (usb_pipeisoc(urb->pipe))
19213 urb->start_frame = r8a66597_get_frame(hcd);
19214
19215- r8a66597_urb_done(r8a66597, urb, status);
19216+ usb_hcd_unlink_urb_from_ep(r8a66597_to_hcd(r8a66597), urb);
19217+ spin_unlock(&r8a66597->lock);
19218+ usb_hcd_giveback_urb(hcd, urb, status);
19219+ spin_lock(&r8a66597->lock);
19220 }
19221
19222 if (restart) {
19223@@ -2492,12 +2470,6 @@ static int __devinit r8a66597_probe(struct platform_device *pdev)
19224 r8a66597->rh_timer.data = (unsigned long)r8a66597;
19225 r8a66597->reg = (unsigned long)reg;
19226
19227- /* make sure no interrupts are pending */
19228- ret = r8a66597_clock_enable(r8a66597);
19229- if (ret < 0)
19230- goto clean_up3;
19231- disable_controller(r8a66597);
19232-
19233 for (i = 0; i < R8A66597_MAX_NUM_PIPE; i++) {
19234 INIT_LIST_HEAD(&r8a66597->pipe_queue[i]);
19235 init_timer(&r8a66597->td_timer[i]);
19236diff --git a/drivers/usb/host/uhci-hcd.c b/drivers/usb/host/uhci-hcd.c
19237index 99cd00f..5cd0e48 100644
19238--- a/drivers/usb/host/uhci-hcd.c
19239+++ b/drivers/usb/host/uhci-hcd.c
19240@@ -749,20 +749,7 @@ static int uhci_rh_suspend(struct usb_hcd *hcd)
19241 spin_lock_irq(&uhci->lock);
19242 if (!test_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags))
19243 rc = -ESHUTDOWN;
19244- else if (uhci->dead)
19245- ; /* Dead controllers tell no tales */
19246-
19247- /* Once the controller is stopped, port resumes that are already
19248- * in progress won't complete. Hence if remote wakeup is enabled
19249- * for the root hub and any ports are in the middle of a resume or
19250- * remote wakeup, we must fail the suspend.
19251- */
19252- else if (hcd->self.root_hub->do_remote_wakeup &&
19253- uhci->resuming_ports) {
19254- dev_dbg(uhci_dev(uhci), "suspend failed because a port "
19255- "is resuming\n");
19256- rc = -EBUSY;
19257- } else
19258+ else if (!uhci->dead)
19259 suspend_rh(uhci, UHCI_RH_SUSPENDED);
19260 spin_unlock_irq(&uhci->lock);
19261 return rc;
19262diff --git a/drivers/usb/host/uhci-hub.c b/drivers/usb/host/uhci-hub.c
19263index 8270055..885b585 100644
19264--- a/drivers/usb/host/uhci-hub.c
19265+++ b/drivers/usb/host/uhci-hub.c
19266@@ -167,7 +167,7 @@ static void uhci_check_ports(struct uhci_hcd *uhci)
19267 /* Port received a wakeup request */
19268 set_bit(port, &uhci->resuming_ports);
19269 uhci->ports_timeout = jiffies +
19270- msecs_to_jiffies(25);
19271+ msecs_to_jiffies(20);
19272
19273 /* Make sure we see the port again
19274 * after the resuming period is over. */
19275diff --git a/drivers/usb/misc/appledisplay.c b/drivers/usb/misc/appledisplay.c
19276index 62ff5e7..1d8e39a 100644
19277--- a/drivers/usb/misc/appledisplay.c
19278+++ b/drivers/usb/misc/appledisplay.c
19279@@ -72,8 +72,8 @@ struct appledisplay {
19280 struct usb_device *udev; /* usb device */
19281 struct urb *urb; /* usb request block */
19282 struct backlight_device *bd; /* backlight device */
19283- u8 *urbdata; /* interrupt URB data buffer */
19284- u8 *msgdata; /* control message data buffer */
19285+ char *urbdata; /* interrupt URB data buffer */
19286+ char *msgdata; /* control message data buffer */
19287
19288 struct delayed_work work;
19289 int button_pressed;
19290diff --git a/drivers/usb/misc/emi62.c b/drivers/usb/misc/emi62.c
19291index 59860b3..602ee05 100644
19292--- a/drivers/usb/misc/emi62.c
19293+++ b/drivers/usb/misc/emi62.c
19294@@ -167,7 +167,7 @@ static int emi62_load_firmware (struct usb_device *dev)
19295 err("%s - error loading firmware: error = %d", __func__, err);
19296 goto wraperr;
19297 }
19298- } while (rec);
19299+ } while (i > 0);
19300
19301 /* Assert reset (stop the CPU in the EMI) */
19302 err = emi62_set_reset(dev,1);
19303diff --git a/drivers/usb/musb/musb_gadget_ep0.c b/drivers/usb/musb/musb_gadget_ep0.c
19304index 067e5a9..522efb3 100644
19305--- a/drivers/usb/musb/musb_gadget_ep0.c
19306+++ b/drivers/usb/musb/musb_gadget_ep0.c
19307@@ -199,6 +199,7 @@ service_in_request(struct musb *musb, const struct usb_ctrlrequest *ctrlrequest)
19308 static void musb_g_ep0_giveback(struct musb *musb, struct usb_request *req)
19309 {
19310 musb_g_giveback(&musb->endpoints[0].ep_in, req, 0);
19311+ musb->ep0_state = MUSB_EP0_STAGE_SETUP;
19312 }
19313
19314 /*
19315@@ -647,7 +648,7 @@ irqreturn_t musb_g_ep0_irq(struct musb *musb)
19316 musb->ep0_state = MUSB_EP0_STAGE_STATUSIN;
19317 break;
19318 default:
19319- ERR("SetupEnd came in a wrong ep0stage %s\n",
19320+ ERR("SetupEnd came in a wrong ep0stage %s",
19321 decode_ep0stage(musb->ep0_state));
19322 }
19323 csr = musb_readw(regs, MUSB_CSR0);
19324@@ -770,18 +771,12 @@ setup:
19325 handled = service_zero_data_request(
19326 musb, &setup);
19327
19328- /*
19329- * We're expecting no data in any case, so
19330- * always set the DATAEND bit -- doing this
19331- * here helps avoid SetupEnd interrupt coming
19332- * in the idle stage when we're stalling...
19333- */
19334- musb->ackpend |= MUSB_CSR0_P_DATAEND;
19335-
19336 /* status stage might be immediate */
19337- if (handled > 0)
19338+ if (handled > 0) {
19339+ musb->ackpend |= MUSB_CSR0_P_DATAEND;
19340 musb->ep0_state =
19341 MUSB_EP0_STAGE_STATUSIN;
19342+ }
19343 break;
19344
19345 /* sequence #1 (IN to host), includes GET_STATUS
19346diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
19347index 13a1b39..ebcc6d0 100644
19348--- a/drivers/usb/serial/ftdi_sio.c
19349+++ b/drivers/usb/serial/ftdi_sio.c
19350@@ -598,20 +598,6 @@ static struct usb_device_id id_table_combined [] = {
19351 { USB_DEVICE(BANDB_VID, BANDB_USOTL4_PID) },
19352 { USB_DEVICE(BANDB_VID, BANDB_USTL4_PID) },
19353 { USB_DEVICE(BANDB_VID, BANDB_USO9ML2_PID) },
19354- { USB_DEVICE(BANDB_VID, BANDB_USOPTL4_PID) },
19355- { USB_DEVICE(BANDB_VID, BANDB_USPTL4_PID) },
19356- { USB_DEVICE(BANDB_VID, BANDB_USO9ML2DR_2_PID) },
19357- { USB_DEVICE(BANDB_VID, BANDB_USO9ML2DR_PID) },
19358- { USB_DEVICE(BANDB_VID, BANDB_USOPTL4DR2_PID) },
19359- { USB_DEVICE(BANDB_VID, BANDB_USOPTL4DR_PID) },
19360- { USB_DEVICE(BANDB_VID, BANDB_485USB9F_2W_PID) },
19361- { USB_DEVICE(BANDB_VID, BANDB_485USB9F_4W_PID) },
19362- { USB_DEVICE(BANDB_VID, BANDB_232USB9M_PID) },
19363- { USB_DEVICE(BANDB_VID, BANDB_485USBTB_2W_PID) },
19364- { USB_DEVICE(BANDB_VID, BANDB_485USBTB_4W_PID) },
19365- { USB_DEVICE(BANDB_VID, BANDB_TTL5USB9M_PID) },
19366- { USB_DEVICE(BANDB_VID, BANDB_TTL3USB9M_PID) },
19367- { USB_DEVICE(BANDB_VID, BANDB_ZZ_PROG1_USB_PID) },
19368 { USB_DEVICE(FTDI_VID, EVER_ECO_PRO_CDS) },
19369 { USB_DEVICE(FTDI_VID, FTDI_4N_GALAXY_DE_1_PID) },
19370 { USB_DEVICE(FTDI_VID, FTDI_4N_GALAXY_DE_2_PID) },
19371diff --git a/drivers/usb/serial/ftdi_sio.h b/drivers/usb/serial/ftdi_sio.h
19372index 4586a24..6f31e0d 100644
19373--- a/drivers/usb/serial/ftdi_sio.h
19374+++ b/drivers/usb/serial/ftdi_sio.h
19375@@ -662,20 +662,6 @@
19376 #define BANDB_USOTL4_PID 0xAC01 /* USOTL4 Isolated RS-485 Converter */
19377 #define BANDB_USTL4_PID 0xAC02 /* USTL4 RS-485 Converter */
19378 #define BANDB_USO9ML2_PID 0xAC03 /* USO9ML2 Isolated RS-232 Converter */
19379-#define BANDB_USOPTL4_PID 0xAC11
19380-#define BANDB_USPTL4_PID 0xAC12
19381-#define BANDB_USO9ML2DR_2_PID 0xAC16
19382-#define BANDB_USO9ML2DR_PID 0xAC17
19383-#define BANDB_USOPTL4DR2_PID 0xAC18 /* USOPTL4R-2 2-port Isolated RS-232 Converter */
19384-#define BANDB_USOPTL4DR_PID 0xAC19
19385-#define BANDB_485USB9F_2W_PID 0xAC25
19386-#define BANDB_485USB9F_4W_PID 0xAC26
19387-#define BANDB_232USB9M_PID 0xAC27
19388-#define BANDB_485USBTB_2W_PID 0xAC33
19389-#define BANDB_485USBTB_4W_PID 0xAC34
19390-#define BANDB_TTL5USB9M_PID 0xAC49
19391-#define BANDB_TTL3USB9M_PID 0xAC50
19392-#define BANDB_ZZ_PROG1_USB_PID 0xBA02
19393
19394 /*
19395 * RM Michaelides CANview USB (http://www.rmcan.com)
19396diff --git a/drivers/usb/serial/generic.c b/drivers/usb/serial/generic.c
19397index e0fb294..bbe005c 100644
19398--- a/drivers/usb/serial/generic.c
19399+++ b/drivers/usb/serial/generic.c
19400@@ -489,8 +489,6 @@ void usb_serial_generic_write_bulk_callback(struct urb *urb)
19401 dbg("%s - port %d", __func__, port->number);
19402
19403 if (port->serial->type->max_in_flight_urbs) {
19404- kfree(urb->transfer_buffer);
19405-
19406 spin_lock_irqsave(&port->lock, flags);
19407 --port->urbs_in_flight;
19408 port->tx_bytes_flight -= urb->transfer_buffer_length;
19409diff --git a/drivers/usb/serial/mos7840.c b/drivers/usb/serial/mos7840.c
19410index 485fa9c..f11abf5 100644
19411--- a/drivers/usb/serial/mos7840.c
19412+++ b/drivers/usb/serial/mos7840.c
19413@@ -121,14 +121,8 @@
19414 * moschip_id_table_combined
19415 */
19416 #define USB_VENDOR_ID_BANDB 0x0856
19417-#define BANDB_DEVICE_ID_USO9ML2_2 0xAC22
19418-#define BANDB_DEVICE_ID_USO9ML2_4 0xAC24
19419-#define BANDB_DEVICE_ID_US9ML2_2 0xAC29
19420-#define BANDB_DEVICE_ID_US9ML2_4 0xAC30
19421-#define BANDB_DEVICE_ID_USPTL4_2 0xAC31
19422-#define BANDB_DEVICE_ID_USPTL4_4 0xAC32
19423-#define BANDB_DEVICE_ID_USOPTL4_2 0xAC42
19424 #define BANDB_DEVICE_ID_USOPTL4_4 0xAC44
19425+#define BANDB_DEVICE_ID_USOPTL4_2 0xAC42
19426
19427 /* This driver also supports
19428 * ATEN UC2324 device using Moschip MCS7840
19429@@ -183,14 +177,8 @@
19430 static struct usb_device_id moschip_port_id_table[] = {
19431 {USB_DEVICE(USB_VENDOR_ID_MOSCHIP, MOSCHIP_DEVICE_ID_7840)},
19432 {USB_DEVICE(USB_VENDOR_ID_MOSCHIP, MOSCHIP_DEVICE_ID_7820)},
19433- {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USO9ML2_2)},
19434- {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USO9ML2_4)},
19435- {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_US9ML2_2)},
19436- {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_US9ML2_4)},
19437- {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USPTL4_2)},
19438- {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USPTL4_4)},
19439- {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_2)},
19440 {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_4)},
19441+ {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_2)},
19442 {USB_DEVICE(USB_VENDOR_ID_ATENINTL, ATENINTL_DEVICE_ID_UC2324)},
19443 {USB_DEVICE(USB_VENDOR_ID_ATENINTL, ATENINTL_DEVICE_ID_UC2322)},
19444 {} /* terminating entry */
19445@@ -199,14 +187,8 @@ static struct usb_device_id moschip_port_id_table[] = {
19446 static __devinitdata struct usb_device_id moschip_id_table_combined[] = {
19447 {USB_DEVICE(USB_VENDOR_ID_MOSCHIP, MOSCHIP_DEVICE_ID_7840)},
19448 {USB_DEVICE(USB_VENDOR_ID_MOSCHIP, MOSCHIP_DEVICE_ID_7820)},
19449- {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USO9ML2_2)},
19450- {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USO9ML2_4)},
19451- {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_US9ML2_2)},
19452- {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_US9ML2_4)},
19453- {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USPTL4_2)},
19454- {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USPTL4_4)},
19455- {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_2)},
19456 {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_4)},
19457+ {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_2)},
19458 {USB_DEVICE(USB_VENDOR_ID_ATENINTL, ATENINTL_DEVICE_ID_UC2324)},
19459 {USB_DEVICE(USB_VENDOR_ID_ATENINTL, ATENINTL_DEVICE_ID_UC2322)},
19460 {} /* terminating entry */
19461diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
19462index be3dff1..0577e4b 100644
19463--- a/drivers/usb/serial/option.c
19464+++ b/drivers/usb/serial/option.c
19465@@ -340,10 +340,6 @@ static int option_resume(struct usb_serial *serial);
19466 #define FOUR_G_SYSTEMS_VENDOR_ID 0x1c9e
19467 #define FOUR_G_SYSTEMS_PRODUCT_W14 0x9603
19468
19469-/* Haier products */
19470-#define HAIER_VENDOR_ID 0x201e
19471-#define HAIER_PRODUCT_CE100 0x2009
19472-
19473 static struct usb_device_id option_ids[] = {
19474 { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_COLT) },
19475 { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_RICOLA) },
19476@@ -584,48 +580,12 @@ static struct usb_device_id option_ids[] = {
19477 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0086, 0xff, 0xff, 0xff) },
19478 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x2002, 0xff, 0xff, 0xff) },
19479 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x2003, 0xff, 0xff, 0xff) },
19480- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0104, 0xff, 0xff, 0xff) },
19481- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0106, 0xff, 0xff, 0xff) },
19482- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0108, 0xff, 0xff, 0xff) },
19483- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0113, 0xff, 0xff, 0xff) },
19484- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0117, 0xff, 0xff, 0xff) },
19485- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0118, 0xff, 0xff, 0xff) },
19486- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0121, 0xff, 0xff, 0xff) },
19487- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0122, 0xff, 0xff, 0xff) },
19488- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0123, 0xff, 0xff, 0xff) },
19489- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0124, 0xff, 0xff, 0xff) },
19490- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0125, 0xff, 0xff, 0xff) },
19491- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0126, 0xff, 0xff, 0xff) },
19492- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0128, 0xff, 0xff, 0xff) },
19493- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0142, 0xff, 0xff, 0xff) },
19494- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0143, 0xff, 0xff, 0xff) },
19495- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0144, 0xff, 0xff, 0xff) },
19496- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0145, 0xff, 0xff, 0xff) },
19497- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0146, 0xff, 0xff, 0xff) },
19498- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0147, 0xff, 0xff, 0xff) },
19499- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0148, 0xff, 0xff, 0xff) },
19500- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0149, 0xff, 0xff, 0xff) },
19501- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0150, 0xff, 0xff, 0xff) },
19502- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0151, 0xff, 0xff, 0xff) },
19503- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0152, 0xff, 0xff, 0xff) },
19504- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0153, 0xff, 0xff, 0xff) },
19505- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0154, 0xff, 0xff, 0xff) },
19506- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0155, 0xff, 0xff, 0xff) },
19507- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0156, 0xff, 0xff, 0xff) },
19508- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0157, 0xff, 0xff, 0xff) },
19509- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0158, 0xff, 0xff, 0xff) },
19510- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0159, 0xff, 0xff, 0xff) },
19511- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0160, 0xff, 0xff, 0xff) },
19512- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0161, 0xff, 0xff, 0xff) },
19513- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0162, 0xff, 0xff, 0xff) },
19514 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0014, 0xff, 0xff, 0xff) }, /* ZTE CDMA products */
19515 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0027, 0xff, 0xff, 0xff) },
19516 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0059, 0xff, 0xff, 0xff) },
19517 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0060, 0xff, 0xff, 0xff) },
19518 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0070, 0xff, 0xff, 0xff) },
19519 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0073, 0xff, 0xff, 0xff) },
19520- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0130, 0xff, 0xff, 0xff) },
19521- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0141, 0xff, 0xff, 0xff) },
19522 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_CDMA_TECH, 0xff, 0xff, 0xff) },
19523 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_AC8710, 0xff, 0xff, 0xff) },
19524 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_AC2726, 0xff, 0xff, 0xff) },
19525@@ -639,13 +599,11 @@ static struct usb_device_id option_ids[] = {
19526 { USB_DEVICE(TOSHIBA_VENDOR_ID, TOSHIBA_PRODUCT_G450) },
19527 { USB_DEVICE(TOSHIBA_VENDOR_ID, TOSHIBA_PRODUCT_HSDPA_MINICARD ) }, /* Toshiba 3G HSDPA == Novatel Expedite EU870D MiniCard */
19528 { USB_DEVICE(ALINK_VENDOR_ID, 0x9000) },
19529- { USB_DEVICE(ALINK_VENDOR_ID, 0xce16) },
19530 { USB_DEVICE_AND_INTERFACE_INFO(ALINK_VENDOR_ID, ALINK_PRODUCT_3GU, 0xff, 0xff, 0xff) },
19531 { USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_X060S) },
19532 { USB_DEVICE(AIRPLUS_VENDOR_ID, AIRPLUS_PRODUCT_MCD650) },
19533 { USB_DEVICE(TLAYTECH_VENDOR_ID, TLAYTECH_PRODUCT_TEU800) },
19534 { USB_DEVICE(FOUR_G_SYSTEMS_VENDOR_ID, FOUR_G_SYSTEMS_PRODUCT_W14) },
19535- { USB_DEVICE(HAIER_VENDOR_ID, HAIER_PRODUCT_CE100) },
19536 { } /* Terminating entry */
19537 };
19538 MODULE_DEVICE_TABLE(usb, option_ids);
19539diff --git a/drivers/usb/storage/transport.c b/drivers/usb/storage/transport.c
19540index cc313d1..589f6b4 100644
19541--- a/drivers/usb/storage/transport.c
19542+++ b/drivers/usb/storage/transport.c
19543@@ -666,11 +666,10 @@ void usb_stor_invoke_transport(struct scsi_cmnd *srb, struct us_data *us)
19544 * to wait for at least one CHECK_CONDITION to determine
19545 * SANE_SENSE support
19546 */
19547- if (unlikely((srb->cmnd[0] == ATA_16 || srb->cmnd[0] == ATA_12) &&
19548+ if ((srb->cmnd[0] == ATA_16 || srb->cmnd[0] == ATA_12) &&
19549 result == USB_STOR_TRANSPORT_GOOD &&
19550 !(us->fflags & US_FL_SANE_SENSE) &&
19551- !(us->fflags & US_FL_BAD_SENSE) &&
19552- !(srb->cmnd[2] & 0x20))) {
19553+ !(srb->cmnd[2] & 0x20)) {
19554 US_DEBUGP("-- SAT supported, increasing auto-sense\n");
19555 us->fflags |= US_FL_SANE_SENSE;
19556 }
19557@@ -719,12 +718,6 @@ Retry_Sense:
19558 if (test_bit(US_FLIDX_TIMED_OUT, &us->dflags)) {
19559 US_DEBUGP("-- auto-sense aborted\n");
19560 srb->result = DID_ABORT << 16;
19561-
19562- /* If SANE_SENSE caused this problem, disable it */
19563- if (sense_size != US_SENSE_SIZE) {
19564- us->fflags &= ~US_FL_SANE_SENSE;
19565- us->fflags |= US_FL_BAD_SENSE;
19566- }
19567 goto Handle_Errors;
19568 }
19569
19570@@ -734,11 +727,10 @@ Retry_Sense:
19571 * (small) sense request. This fixes some USB GSM modems
19572 */
19573 if (temp_result == USB_STOR_TRANSPORT_FAILED &&
19574- sense_size != US_SENSE_SIZE) {
19575+ (us->fflags & US_FL_SANE_SENSE) &&
19576+ sense_size != US_SENSE_SIZE) {
19577 US_DEBUGP("-- auto-sense failure, retry small sense\n");
19578 sense_size = US_SENSE_SIZE;
19579- us->fflags &= ~US_FL_SANE_SENSE;
19580- us->fflags |= US_FL_BAD_SENSE;
19581 goto Retry_Sense;
19582 }
19583
19584@@ -762,7 +754,6 @@ Retry_Sense:
19585 */
19586 if (srb->sense_buffer[7] > (US_SENSE_SIZE - 8) &&
19587 !(us->fflags & US_FL_SANE_SENSE) &&
19588- !(us->fflags & US_FL_BAD_SENSE) &&
19589 (srb->sense_buffer[0] & 0x7C) == 0x70) {
19590 US_DEBUGP("-- SANE_SENSE support enabled\n");
19591 us->fflags |= US_FL_SANE_SENSE;
19592diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
19593index c932f90..d4f034e 100644
19594--- a/drivers/usb/storage/unusual_devs.h
19595+++ b/drivers/usb/storage/unusual_devs.h
19596@@ -818,13 +818,6 @@ UNUSUAL_DEV( 0x066f, 0x8000, 0x0001, 0x0001,
19597 US_SC_DEVICE, US_PR_DEVICE, NULL,
19598 US_FL_FIX_CAPACITY ),
19599
19600-/* Reported by Daniel Kukula <daniel.kuku@gmail.com> */
19601-UNUSUAL_DEV( 0x067b, 0x1063, 0x0100, 0x0100,
19602- "Prolific Technology, Inc.",
19603- "Prolific Storage Gadget",
19604- US_SC_DEVICE, US_PR_DEVICE, NULL,
19605- US_FL_BAD_SENSE ),
19606-
19607 /* Reported by Rogerio Brito <rbrito@ime.usp.br> */
19608 UNUSUAL_DEV( 0x067b, 0x2317, 0x0001, 0x001,
19609 "Prolific Technology, Inc.",
19610@@ -1807,6 +1800,13 @@ UNUSUAL_DEV( 0x2735, 0x100b, 0x0000, 0x9999,
19611 US_SC_DEVICE, US_PR_DEVICE, NULL,
19612 US_FL_GO_SLOW ),
19613
19614+/* Reported by Rohan Hart <rohan.hart17@gmail.com> */
19615+UNUSUAL_DEV( 0x2770, 0x915d, 0x0010, 0x0010,
19616+ "INTOVA",
19617+ "Pixtreme",
19618+ US_SC_DEVICE, US_PR_DEVICE, NULL,
19619+ US_FL_FIX_CAPACITY ),
19620+
19621 /* Reported by Frederic Marchal <frederic.marchal@wowcompany.com>
19622 * Mio Moov 330
19623 */
19624diff --git a/drivers/usb/storage/usb.c b/drivers/usb/storage/usb.c
19625index 33197fa..8060b85 100644
19626--- a/drivers/usb/storage/usb.c
19627+++ b/drivers/usb/storage/usb.c
19628@@ -228,7 +228,6 @@ void fill_inquiry_response(struct us_data *us, unsigned char *data,
19629 if (data_len<36) // You lose.
19630 return;
19631
19632- memset(data+8, ' ', 28);
19633 if(data[0]&0x20) { /* USB device currently not connected. Return
19634 peripheral qualifier 001b ("...however, the
19635 physical device is not currently connected
19636@@ -238,15 +237,15 @@ void fill_inquiry_response(struct us_data *us, unsigned char *data,
19637 device, it may return zeros or ASCII spaces
19638 (20h) in those fields until the data is
19639 available from the device."). */
19640+ memset(data+8,0,28);
19641 } else {
19642 u16 bcdDevice = le16_to_cpu(us->pusb_dev->descriptor.bcdDevice);
19643- int n;
19644-
19645- n = strlen(us->unusual_dev->vendorName);
19646- memcpy(data+8, us->unusual_dev->vendorName, min(8, n));
19647- n = strlen(us->unusual_dev->productName);
19648- memcpy(data+16, us->unusual_dev->productName, min(16, n));
19649-
19650+ memcpy(data+8, us->unusual_dev->vendorName,
19651+ strlen(us->unusual_dev->vendorName) > 8 ? 8 :
19652+ strlen(us->unusual_dev->vendorName));
19653+ memcpy(data+16, us->unusual_dev->productName,
19654+ strlen(us->unusual_dev->productName) > 16 ? 16 :
19655+ strlen(us->unusual_dev->productName));
19656 data[32] = 0x30 + ((bcdDevice>>12) & 0x0F);
19657 data[33] = 0x30 + ((bcdDevice>>8) & 0x0F);
19658 data[34] = 0x30 + ((bcdDevice>>4) & 0x0F);
19659@@ -430,8 +429,7 @@ static void adjust_quirks(struct us_data *us)
19660 u16 vid = le16_to_cpu(us->pusb_dev->descriptor.idVendor);
19661 u16 pid = le16_to_cpu(us->pusb_dev->descriptor.idProduct);
19662 unsigned f = 0;
19663- unsigned int mask = (US_FL_SANE_SENSE | US_FL_BAD_SENSE |
19664- US_FL_FIX_CAPACITY |
19665+ unsigned int mask = (US_FL_SANE_SENSE | US_FL_FIX_CAPACITY |
19666 US_FL_CAPACITY_HEURISTICS | US_FL_IGNORE_DEVICE |
19667 US_FL_NOT_LOCKABLE | US_FL_MAX_SECTORS_64 |
19668 US_FL_CAPACITY_OK | US_FL_IGNORE_RESIDUE |
19669@@ -461,9 +459,6 @@ static void adjust_quirks(struct us_data *us)
19670 case 'a':
19671 f |= US_FL_SANE_SENSE;
19672 break;
19673- case 'b':
19674- f |= US_FL_BAD_SENSE;
19675- break;
19676 case 'c':
19677 f |= US_FL_FIX_CAPACITY;
19678 break;
19679diff --git a/drivers/video/imxfb.c b/drivers/video/imxfb.c
19680index b4b6dec..66358fa 100644
19681--- a/drivers/video/imxfb.c
19682+++ b/drivers/video/imxfb.c
19683@@ -593,8 +593,7 @@ static int imxfb_activate_var(struct fb_var_screeninfo *var, struct fb_info *inf
19684 */
19685 static int imxfb_suspend(struct platform_device *dev, pm_message_t state)
19686 {
19687- struct fb_info *info = platform_get_drvdata(dev);
19688- struct imxfb_info *fbi = info->par;
19689+ struct imxfb_info *fbi = platform_get_drvdata(dev);
19690
19691 pr_debug("%s\n", __func__);
19692
19693@@ -604,8 +603,7 @@ static int imxfb_suspend(struct platform_device *dev, pm_message_t state)
19694
19695 static int imxfb_resume(struct platform_device *dev)
19696 {
19697- struct fb_info *info = platform_get_drvdata(dev);
19698- struct imxfb_info *fbi = info->par;
19699+ struct imxfb_info *fbi = platform_get_drvdata(dev);
19700
19701 pr_debug("%s\n", __func__);
19702
19703diff --git a/drivers/video/matrox/g450_pll.c b/drivers/video/matrox/g450_pll.c
19704index c15f8a5..09f6e04 100644
19705--- a/drivers/video/matrox/g450_pll.c
19706+++ b/drivers/video/matrox/g450_pll.c
19707@@ -368,8 +368,7 @@ static int __g450_setclk(struct matrox_fb_info *minfo, unsigned int fout,
19708 M1064_XDVICLKCTRL_C1DVICLKEN |
19709 M1064_XDVICLKCTRL_DVILOOPCTL |
19710 M1064_XDVICLKCTRL_P1LOOPBWDTCTL;
19711- /* Setting this breaks PC systems so don't do it */
19712- /* matroxfb_DAC_out(minfo, M1064_XDVICLKCTRL, tmp); */
19713+ matroxfb_DAC_out(minfo, M1064_XDVICLKCTRL, tmp);
19714 matroxfb_DAC_out(minfo, M1064_XPWRCTRL,
19715 xpwrctrl);
19716
19717diff --git a/drivers/video/mx3fb.c b/drivers/video/mx3fb.c
19718index 772ba3f..054ef29 100644
19719--- a/drivers/video/mx3fb.c
19720+++ b/drivers/video/mx3fb.c
19721@@ -324,11 +324,8 @@ static void sdc_enable_channel(struct mx3fb_info *mx3_fbi)
19722 unsigned long flags;
19723 dma_cookie_t cookie;
19724
19725- if (mx3_fbi->txd)
19726- dev_dbg(mx3fb->dev, "mx3fbi %p, desc %p, sg %p\n", mx3_fbi,
19727- to_tx_desc(mx3_fbi->txd), to_tx_desc(mx3_fbi->txd)->sg);
19728- else
19729- dev_dbg(mx3fb->dev, "mx3fbi %p, txd = NULL\n", mx3_fbi);
19730+ dev_dbg(mx3fb->dev, "mx3fbi %p, desc %p, sg %p\n", mx3_fbi,
19731+ to_tx_desc(mx3_fbi->txd), to_tx_desc(mx3_fbi->txd)->sg);
19732
19733 /* This enables the channel */
19734 if (mx3_fbi->cookie < 0) {
19735@@ -649,7 +646,6 @@ static int sdc_set_global_alpha(struct mx3fb_data *mx3fb, bool enable, uint8_t a
19736
19737 static void sdc_set_brightness(struct mx3fb_data *mx3fb, uint8_t value)
19738 {
19739- dev_dbg(mx3fb->dev, "%s: value = %d\n", __func__, value);
19740 /* This might be board-specific */
19741 mx3fb_write_reg(mx3fb, 0x03000000UL | value << 16, SDC_PWM_CTRL);
19742 return;
19743@@ -1490,12 +1486,12 @@ static int mx3fb_probe(struct platform_device *pdev)
19744 goto ersdc0;
19745 }
19746
19747- mx3fb->backlight_level = 255;
19748-
19749 ret = init_fb_chan(mx3fb, to_idmac_chan(chan));
19750 if (ret < 0)
19751 goto eisdc0;
19752
19753+ mx3fb->backlight_level = 255;
19754+
19755 return 0;
19756
19757 eisdc0:
19758diff --git a/drivers/video/s3c-fb.c b/drivers/video/s3c-fb.c
19759index 53cb722..adf9632 100644
19760--- a/drivers/video/s3c-fb.c
19761+++ b/drivers/video/s3c-fb.c
19762@@ -211,23 +211,21 @@ static int s3c_fb_check_var(struct fb_var_screeninfo *var,
19763
19764 /**
19765 * s3c_fb_calc_pixclk() - calculate the divider to create the pixel clock.
19766+ * @id: window id.
19767 * @sfb: The hardware state.
19768 * @pixclock: The pixel clock wanted, in picoseconds.
19769 *
19770 * Given the specified pixel clock, work out the necessary divider to get
19771 * close to the output frequency.
19772 */
19773-static int s3c_fb_calc_pixclk(struct s3c_fb *sfb, unsigned int pixclk)
19774+static int s3c_fb_calc_pixclk(unsigned char id, struct s3c_fb *sfb, unsigned int pixclk)
19775 {
19776+ struct s3c_fb_pd_win *win = sfb->pdata->win[id];
19777 unsigned long clk = clk_get_rate(sfb->bus_clk);
19778- unsigned long long tmp;
19779 unsigned int result;
19780
19781- tmp = (unsigned long long)clk;
19782- tmp *= pixclk;
19783-
19784- do_div(tmp, 1000000000UL);
19785- result = (unsigned int)tmp / 1000;
19786+ pixclk *= win->win_mode.refresh;
19787+ result = clk / pixclk;
19788
19789 dev_dbg(sfb->dev, "pixclk=%u, clk=%lu, div=%d (%lu)\n",
19790 pixclk, clk, result, clk / result);
19791@@ -303,7 +301,7 @@ static int s3c_fb_set_par(struct fb_info *info)
19792 /* use window 0 as the basis for the lcd output timings */
19793
19794 if (win_no == 0) {
19795- clkdiv = s3c_fb_calc_pixclk(sfb, var->pixclock);
19796+ clkdiv = s3c_fb_calc_pixclk(win_no, sfb, var->pixclock);
19797
19798 data = sfb->pdata->vidcon0;
19799 data &= ~(VIDCON0_CLKVAL_F_MASK | VIDCON0_CLKDIR);
19800diff --git a/drivers/watchdog/iTCO_wdt.c b/drivers/watchdog/iTCO_wdt.c
19801index 4bdb7f1..6a51edd 100644
19802--- a/drivers/watchdog/iTCO_wdt.c
19803+++ b/drivers/watchdog/iTCO_wdt.c
19804@@ -1,5 +1,5 @@
19805 /*
19806- * intel TCO Watchdog Driver
19807+ * intel TCO Watchdog Driver (Used in i82801 and i63xxESB chipsets)
19808 *
19809 * (c) Copyright 2006-2009 Wim Van Sebroeck <wim@iguana.be>.
19810 *
19811@@ -14,24 +14,47 @@
19812 *
19813 * The TCO watchdog is implemented in the following I/O controller hubs:
19814 * (See the intel documentation on http://developer.intel.com.)
19815- * document number 290655-003, 290677-014: 82801AA (ICH), 82801AB (ICHO)
19816- * document number 290687-002, 298242-027: 82801BA (ICH2)
19817- * document number 290733-003, 290739-013: 82801CA (ICH3-S)
19818- * document number 290716-001, 290718-007: 82801CAM (ICH3-M)
19819- * document number 290744-001, 290745-025: 82801DB (ICH4)
19820- * document number 252337-001, 252663-008: 82801DBM (ICH4-M)
19821- * document number 273599-001, 273645-002: 82801E (C-ICH)
19822- * document number 252516-001, 252517-028: 82801EB (ICH5), 82801ER (ICH5R)
19823- * document number 300641-004, 300884-013: 6300ESB
19824- * document number 301473-002, 301474-026: 82801F (ICH6)
19825- * document number 313082-001, 313075-006: 631xESB, 632xESB
19826- * document number 307013-003, 307014-024: 82801G (ICH7)
19827- * document number 313056-003, 313057-017: 82801H (ICH8)
19828- * document number 316972-004, 316973-012: 82801I (ICH9)
19829- * document number 319973-002, 319974-002: 82801J (ICH10)
19830- * document number 322169-001, 322170-003: 5 Series, 3400 Series (PCH)
19831- * document number 320066-003, 320257-008: EP80597 (IICH)
19832- * document number TBD : Cougar Point (CPT)
19833+ * 82801AA (ICH) : document number 290655-003, 290677-014,
19834+ * 82801AB (ICHO) : document number 290655-003, 290677-014,
19835+ * 82801BA (ICH2) : document number 290687-002, 298242-027,
19836+ * 82801BAM (ICH2-M) : document number 290687-002, 298242-027,
19837+ * 82801CA (ICH3-S) : document number 290733-003, 290739-013,
19838+ * 82801CAM (ICH3-M) : document number 290716-001, 290718-007,
19839+ * 82801DB (ICH4) : document number 290744-001, 290745-025,
19840+ * 82801DBM (ICH4-M) : document number 252337-001, 252663-008,
19841+ * 82801E (C-ICH) : document number 273599-001, 273645-002,
19842+ * 82801EB (ICH5) : document number 252516-001, 252517-028,
19843+ * 82801ER (ICH5R) : document number 252516-001, 252517-028,
19844+ * 6300ESB (6300ESB) : document number 300641-004, 300884-013,
19845+ * 82801FB (ICH6) : document number 301473-002, 301474-026,
19846+ * 82801FR (ICH6R) : document number 301473-002, 301474-026,
19847+ * 82801FBM (ICH6-M) : document number 301473-002, 301474-026,
19848+ * 82801FW (ICH6W) : document number 301473-001, 301474-026,
19849+ * 82801FRW (ICH6RW) : document number 301473-001, 301474-026,
19850+ * 631xESB (631xESB) : document number 313082-001, 313075-006,
19851+ * 632xESB (632xESB) : document number 313082-001, 313075-006,
19852+ * 82801GB (ICH7) : document number 307013-003, 307014-024,
19853+ * 82801GR (ICH7R) : document number 307013-003, 307014-024,
19854+ * 82801GDH (ICH7DH) : document number 307013-003, 307014-024,
19855+ * 82801GBM (ICH7-M) : document number 307013-003, 307014-024,
19856+ * 82801GHM (ICH7-M DH) : document number 307013-003, 307014-024,
19857+ * 82801GU (ICH7-U) : document number 307013-003, 307014-024,
19858+ * 82801HB (ICH8) : document number 313056-003, 313057-017,
19859+ * 82801HR (ICH8R) : document number 313056-003, 313057-017,
19860+ * 82801HBM (ICH8M) : document number 313056-003, 313057-017,
19861+ * 82801HH (ICH8DH) : document number 313056-003, 313057-017,
19862+ * 82801HO (ICH8DO) : document number 313056-003, 313057-017,
19863+ * 82801HEM (ICH8M-E) : document number 313056-003, 313057-017,
19864+ * 82801IB (ICH9) : document number 316972-004, 316973-012,
19865+ * 82801IR (ICH9R) : document number 316972-004, 316973-012,
19866+ * 82801IH (ICH9DH) : document number 316972-004, 316973-012,
19867+ * 82801IO (ICH9DO) : document number 316972-004, 316973-012,
19868+ * 82801IBM (ICH9M) : document number 316972-004, 316973-012,
19869+ * 82801IEM (ICH9M-E) : document number 316972-004, 316973-012,
19870+ * 82801JIB (ICH10) : document number 319973-002, 319974-002,
19871+ * 82801JIR (ICH10R) : document number 319973-002, 319974-002,
19872+ * 82801JD (ICH10D) : document number 319973-002, 319974-002,
19873+ * 82801JDO (ICH10DO) : document number 319973-002, 319974-002
19874 */
19875
19876 /*
19877@@ -99,24 +122,6 @@ enum iTCO_chipsets {
19878 TCO_ICH10R, /* ICH10R */
19879 TCO_ICH10D, /* ICH10D */
19880 TCO_ICH10DO, /* ICH10DO */
19881- TCO_PCH, /* PCH Desktop Full Featured */
19882- TCO_PCHM, /* PCH Mobile Full Featured */
19883- TCO_P55, /* P55 */
19884- TCO_PM55, /* PM55 */
19885- TCO_H55, /* H55 */
19886- TCO_QM57, /* QM57 */
19887- TCO_H57, /* H57 */
19888- TCO_HM55, /* HM55 */
19889- TCO_Q57, /* Q57 */
19890- TCO_HM57, /* HM57 */
19891- TCO_PCHMSFF, /* PCH Mobile SFF Full Featured */
19892- TCO_QS57, /* QS57 */
19893- TCO_3400, /* 3400 */
19894- TCO_3420, /* 3420 */
19895- TCO_3450, /* 3450 */
19896- TCO_EP80579, /* EP80579 */
19897- TCO_CPTD, /* CPT Desktop */
19898- TCO_CPTM, /* CPT Mobile */
19899 };
19900
19901 static struct {
19902@@ -157,24 +162,6 @@ static struct {
19903 {"ICH10R", 2},
19904 {"ICH10D", 2},
19905 {"ICH10DO", 2},
19906- {"PCH Desktop Full Featured", 2},
19907- {"PCH Mobile Full Featured", 2},
19908- {"P55", 2},
19909- {"PM55", 2},
19910- {"H55", 2},
19911- {"QM57", 2},
19912- {"H57", 2},
19913- {"HM55", 2},
19914- {"Q57", 2},
19915- {"HM57", 2},
19916- {"PCH Mobile SFF Full Featured", 2},
19917- {"QS57", 2},
19918- {"3400", 2},
19919- {"3420", 2},
19920- {"3450", 2},
19921- {"EP80579", 2},
19922- {"CPT Desktop", 2},
19923- {"CPT Mobile", 2},
19924 {NULL, 0}
19925 };
19926
19927@@ -243,24 +230,6 @@ static struct pci_device_id iTCO_wdt_pci_tbl[] = {
19928 { ITCO_PCI_DEVICE(0x3a16, TCO_ICH10R)},
19929 { ITCO_PCI_DEVICE(0x3a1a, TCO_ICH10D)},
19930 { ITCO_PCI_DEVICE(0x3a14, TCO_ICH10DO)},
19931- { ITCO_PCI_DEVICE(0x3b00, TCO_PCH)},
19932- { ITCO_PCI_DEVICE(0x3b01, TCO_PCHM)},
19933- { ITCO_PCI_DEVICE(0x3b02, TCO_P55)},
19934- { ITCO_PCI_DEVICE(0x3b03, TCO_PM55)},
19935- { ITCO_PCI_DEVICE(0x3b06, TCO_H55)},
19936- { ITCO_PCI_DEVICE(0x3b07, TCO_QM57)},
19937- { ITCO_PCI_DEVICE(0x3b08, TCO_H57)},
19938- { ITCO_PCI_DEVICE(0x3b09, TCO_HM55)},
19939- { ITCO_PCI_DEVICE(0x3b0a, TCO_Q57)},
19940- { ITCO_PCI_DEVICE(0x3b0b, TCO_HM57)},
19941- { ITCO_PCI_DEVICE(0x3b0d, TCO_PCHMSFF)},
19942- { ITCO_PCI_DEVICE(0x3b0f, TCO_QS57)},
19943- { ITCO_PCI_DEVICE(0x3b12, TCO_3400)},
19944- { ITCO_PCI_DEVICE(0x3b14, TCO_3420)},
19945- { ITCO_PCI_DEVICE(0x3b16, TCO_3450)},
19946- { ITCO_PCI_DEVICE(0x5031, TCO_EP80579)},
19947- { ITCO_PCI_DEVICE(0x1c42, TCO_CPTD)},
19948- { ITCO_PCI_DEVICE(0x1c43, TCO_CPTM)},
19949 { 0, }, /* End of list */
19950 };
19951 MODULE_DEVICE_TABLE(pci, iTCO_wdt_pci_tbl);
19952diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c
19953index 4204336..d31505b 100644
19954--- a/drivers/xen/balloon.c
19955+++ b/drivers/xen/balloon.c
19956@@ -66,6 +66,8 @@ struct balloon_stats {
19957 /* We aim for 'current allocation' == 'target allocation'. */
19958 unsigned long current_pages;
19959 unsigned long target_pages;
19960+ /* We may hit the hard limit in Xen. If we do then we remember it. */
19961+ unsigned long hard_limit;
19962 /*
19963 * Drivers may alter the memory reservation independently, but they
19964 * must inform the balloon driver so we avoid hitting the hard limit.
19965@@ -134,8 +136,6 @@ static void balloon_append(struct page *page)
19966 list_add(&page->lru, &ballooned_pages);
19967 balloon_stats.balloon_low++;
19968 }
19969-
19970- totalram_pages--;
19971 }
19972
19973 /* balloon_retrieve: rescue a page from the balloon, if it is not empty. */
19974@@ -156,8 +156,6 @@ static struct page *balloon_retrieve(void)
19975 else
19976 balloon_stats.balloon_low--;
19977
19978- totalram_pages++;
19979-
19980 return page;
19981 }
19982
19983@@ -183,7 +181,7 @@ static void balloon_alarm(unsigned long unused)
19984
19985 static unsigned long current_target(void)
19986 {
19987- unsigned long target = balloon_stats.target_pages;
19988+ unsigned long target = min(balloon_stats.target_pages, balloon_stats.hard_limit);
19989
19990 target = min(target,
19991 balloon_stats.current_pages +
19992@@ -219,10 +217,23 @@ static int increase_reservation(unsigned long nr_pages)
19993 set_xen_guest_handle(reservation.extent_start, frame_list);
19994 reservation.nr_extents = nr_pages;
19995 rc = HYPERVISOR_memory_op(XENMEM_populate_physmap, &reservation);
19996- if (rc < 0)
19997+ if (rc < nr_pages) {
19998+ if (rc > 0) {
19999+ int ret;
20000+
20001+ /* We hit the Xen hard limit: reprobe. */
20002+ reservation.nr_extents = rc;
20003+ ret = HYPERVISOR_memory_op(XENMEM_decrease_reservation,
20004+ &reservation);
20005+ BUG_ON(ret != rc);
20006+ }
20007+ if (rc >= 0)
20008+ balloon_stats.hard_limit = (balloon_stats.current_pages + rc -
20009+ balloon_stats.driver_pages);
20010 goto out;
20011+ }
20012
20013- for (i = 0; i < rc; i++) {
20014+ for (i = 0; i < nr_pages; i++) {
20015 page = balloon_retrieve();
20016 BUG_ON(page == NULL);
20017
20018@@ -248,12 +259,13 @@ static int increase_reservation(unsigned long nr_pages)
20019 __free_page(page);
20020 }
20021
20022- balloon_stats.current_pages += rc;
20023+ balloon_stats.current_pages += nr_pages;
20024+ totalram_pages = balloon_stats.current_pages;
20025
20026 out:
20027 spin_unlock_irqrestore(&balloon_lock, flags);
20028
20029- return rc < 0 ? rc : rc != nr_pages;
20030+ return 0;
20031 }
20032
20033 static int decrease_reservation(unsigned long nr_pages)
20034@@ -311,6 +323,7 @@ static int decrease_reservation(unsigned long nr_pages)
20035 BUG_ON(ret != nr_pages);
20036
20037 balloon_stats.current_pages -= nr_pages;
20038+ totalram_pages = balloon_stats.current_pages;
20039
20040 spin_unlock_irqrestore(&balloon_lock, flags);
20041
20042@@ -354,6 +367,7 @@ static void balloon_process(struct work_struct *work)
20043 static void balloon_set_new_target(unsigned long target)
20044 {
20045 /* No need for lock. Not read-modify-write updates. */
20046+ balloon_stats.hard_limit = ~0UL;
20047 balloon_stats.target_pages = target;
20048 schedule_work(&balloon_worker);
20049 }
20050@@ -408,10 +422,12 @@ static int __init balloon_init(void)
20051 pr_info("xen_balloon: Initialising balloon driver.\n");
20052
20053 balloon_stats.current_pages = min(xen_start_info->nr_pages, max_pfn);
20054+ totalram_pages = balloon_stats.current_pages;
20055 balloon_stats.target_pages = balloon_stats.current_pages;
20056 balloon_stats.balloon_low = 0;
20057 balloon_stats.balloon_high = 0;
20058 balloon_stats.driver_pages = 0UL;
20059+ balloon_stats.hard_limit = ~0UL;
20060
20061 init_timer(&balloon_timer);
20062 balloon_timer.data = 0;
20063@@ -456,6 +472,9 @@ module_exit(balloon_exit);
20064 BALLOON_SHOW(current_kb, "%lu\n", PAGES2KB(balloon_stats.current_pages));
20065 BALLOON_SHOW(low_kb, "%lu\n", PAGES2KB(balloon_stats.balloon_low));
20066 BALLOON_SHOW(high_kb, "%lu\n", PAGES2KB(balloon_stats.balloon_high));
20067+BALLOON_SHOW(hard_limit_kb,
20068+ (balloon_stats.hard_limit!=~0UL) ? "%lu\n" : "???\n",
20069+ (balloon_stats.hard_limit!=~0UL) ? PAGES2KB(balloon_stats.hard_limit) : 0);
20070 BALLOON_SHOW(driver_kb, "%lu\n", PAGES2KB(balloon_stats.driver_pages));
20071
20072 static ssize_t show_target_kb(struct sys_device *dev, struct sysdev_attribute *attr,
20073@@ -525,6 +544,7 @@ static struct attribute *balloon_info_attrs[] = {
20074 &attr_current_kb.attr,
20075 &attr_low_kb.attr,
20076 &attr_high_kb.attr,
20077+ &attr_hard_limit_kb.attr,
20078 &attr_driver_kb.attr,
20079 NULL
20080 };
20081diff --git a/drivers/xen/events.c b/drivers/xen/events.c
20082index ce602dd..2f57276 100644
20083--- a/drivers/xen/events.c
20084+++ b/drivers/xen/events.c
20085@@ -474,9 +474,6 @@ static void unbind_from_irq(unsigned int irq)
20086 bind_evtchn_to_cpu(evtchn, 0);
20087
20088 evtchn_to_irq[evtchn] = -1;
20089- }
20090-
20091- if (irq_info[irq].type != IRQT_UNBOUND) {
20092 irq_info[irq] = mk_unbound_info();
20093
20094 dynamic_irq_cleanup(irq);
20095diff --git a/drivers/xen/manage.c b/drivers/xen/manage.c
20096index 5d42d55..10d03d7 100644
20097--- a/drivers/xen/manage.c
20098+++ b/drivers/xen/manage.c
20099@@ -43,6 +43,7 @@ static int xen_suspend(void *data)
20100 if (err) {
20101 printk(KERN_ERR "xen_suspend: sysdev_suspend failed: %d\n",
20102 err);
20103+ dpm_resume_noirq(PMSG_RESUME);
20104 return err;
20105 }
20106
20107@@ -68,6 +69,7 @@ static int xen_suspend(void *data)
20108 }
20109
20110 sysdev_resume();
20111+ dpm_resume_noirq(PMSG_RESUME);
20112
20113 return 0;
20114 }
20115@@ -79,12 +81,6 @@ static void do_suspend(void)
20116
20117 shutting_down = SHUTDOWN_SUSPEND;
20118
20119- err = stop_machine_create();
20120- if (err) {
20121- printk(KERN_ERR "xen suspend: failed to setup stop_machine %d\n", err);
20122- goto out;
20123- }
20124-
20125 #ifdef CONFIG_PREEMPT
20126 /* If the kernel is preemptible, we need to freeze all the processes
20127 to prevent them from being in the middle of a pagetable update
20128@@ -92,14 +88,14 @@ static void do_suspend(void)
20129 err = freeze_processes();
20130 if (err) {
20131 printk(KERN_ERR "xen suspend: freeze failed %d\n", err);
20132- goto out_destroy_sm;
20133+ return;
20134 }
20135 #endif
20136
20137 err = dpm_suspend_start(PMSG_SUSPEND);
20138 if (err) {
20139 printk(KERN_ERR "xen suspend: dpm_suspend_start %d\n", err);
20140- goto out_thaw;
20141+ goto out;
20142 }
20143
20144 printk(KERN_DEBUG "suspending xenstore...\n");
20145@@ -108,39 +104,32 @@ static void do_suspend(void)
20146 err = dpm_suspend_noirq(PMSG_SUSPEND);
20147 if (err) {
20148 printk(KERN_ERR "dpm_suspend_noirq failed: %d\n", err);
20149- goto out_resume;
20150+ goto resume_devices;
20151 }
20152
20153 err = stop_machine(xen_suspend, &cancelled, cpumask_of(0));
20154-
20155- dpm_resume_noirq(PMSG_RESUME);
20156-
20157 if (err) {
20158 printk(KERN_ERR "failed to start xen_suspend: %d\n", err);
20159- cancelled = 1;
20160+ goto out;
20161 }
20162
20163-out_resume:
20164 if (!cancelled) {
20165 xen_arch_resume();
20166 xs_resume();
20167 } else
20168 xs_suspend_cancel();
20169
20170+ dpm_resume_noirq(PMSG_RESUME);
20171+
20172+resume_devices:
20173 dpm_resume_end(PMSG_RESUME);
20174
20175 /* Make sure timer events get retriggered on all CPUs */
20176 clock_was_set();
20177-
20178-out_thaw:
20179+out:
20180 #ifdef CONFIG_PREEMPT
20181 thaw_processes();
20182-
20183-out_destroy_sm:
20184 #endif
20185- stop_machine_destroy();
20186-
20187-out:
20188 shutting_down = SHUTDOWN_INVALID;
20189 }
20190 #endif /* CONFIG_PM_SLEEP */
20191diff --git a/drivers/xen/xenbus/xenbus_probe.c b/drivers/xen/xenbus/xenbus_probe.c
20192index 649fcdf..d42e25d 100644
20193--- a/drivers/xen/xenbus/xenbus_probe.c
20194+++ b/drivers/xen/xenbus/xenbus_probe.c
20195@@ -454,21 +454,21 @@ static ssize_t xendev_show_nodename(struct device *dev,
20196 {
20197 return sprintf(buf, "%s\n", to_xenbus_device(dev)->nodename);
20198 }
20199-static DEVICE_ATTR(nodename, S_IRUSR | S_IRGRP | S_IROTH, xendev_show_nodename, NULL);
20200+DEVICE_ATTR(nodename, S_IRUSR | S_IRGRP | S_IROTH, xendev_show_nodename, NULL);
20201
20202 static ssize_t xendev_show_devtype(struct device *dev,
20203 struct device_attribute *attr, char *buf)
20204 {
20205 return sprintf(buf, "%s\n", to_xenbus_device(dev)->devicetype);
20206 }
20207-static DEVICE_ATTR(devtype, S_IRUSR | S_IRGRP | S_IROTH, xendev_show_devtype, NULL);
20208+DEVICE_ATTR(devtype, S_IRUSR | S_IRGRP | S_IROTH, xendev_show_devtype, NULL);
20209
20210 static ssize_t xendev_show_modalias(struct device *dev,
20211 struct device_attribute *attr, char *buf)
20212 {
20213 return sprintf(buf, "xen:%s\n", to_xenbus_device(dev)->devicetype);
20214 }
20215-static DEVICE_ATTR(modalias, S_IRUSR | S_IRGRP | S_IROTH, xendev_show_modalias, NULL);
20216+DEVICE_ATTR(modalias, S_IRUSR | S_IRGRP | S_IROTH, xendev_show_modalias, NULL);
20217
20218 int xenbus_probe_node(struct xen_bus_type *bus,
20219 const char *type,
20220@@ -843,7 +843,7 @@ postcore_initcall(xenbus_probe_init);
20221
20222 MODULE_LICENSE("GPL");
20223
20224-static int is_device_connecting(struct device *dev, void *data)
20225+static int is_disconnected_device(struct device *dev, void *data)
20226 {
20227 struct xenbus_device *xendev = to_xenbus_device(dev);
20228 struct device_driver *drv = data;
20229@@ -861,15 +861,14 @@ static int is_device_connecting(struct device *dev, void *data)
20230 return 0;
20231
20232 xendrv = to_xenbus_driver(dev->driver);
20233- return (xendev->state < XenbusStateConnected ||
20234- (xendev->state == XenbusStateConnected &&
20235- xendrv->is_ready && !xendrv->is_ready(xendev)));
20236+ return (xendev->state != XenbusStateConnected ||
20237+ (xendrv->is_ready && !xendrv->is_ready(xendev)));
20238 }
20239
20240-static int exists_connecting_device(struct device_driver *drv)
20241+static int exists_disconnected_device(struct device_driver *drv)
20242 {
20243 return bus_for_each_dev(&xenbus_frontend.bus, NULL, drv,
20244- is_device_connecting);
20245+ is_disconnected_device);
20246 }
20247
20248 static int print_device_status(struct device *dev, void *data)
20249@@ -885,13 +884,10 @@ static int print_device_status(struct device *dev, void *data)
20250 /* Information only: is this too noisy? */
20251 printk(KERN_INFO "XENBUS: Device with no driver: %s\n",
20252 xendev->nodename);
20253- } else if (xendev->state < XenbusStateConnected) {
20254- enum xenbus_state rstate = XenbusStateUnknown;
20255- if (xendev->otherend)
20256- rstate = xenbus_read_driver_state(xendev->otherend);
20257+ } else if (xendev->state != XenbusStateConnected) {
20258 printk(KERN_WARNING "XENBUS: Timeout connecting "
20259- "to device: %s (local state %d, remote state %d)\n",
20260- xendev->nodename, xendev->state, rstate);
20261+ "to device: %s (state %d)\n",
20262+ xendev->nodename, xendev->state);
20263 }
20264
20265 return 0;
20266@@ -901,7 +897,7 @@ static int print_device_status(struct device *dev, void *data)
20267 static int ready_to_wait_for_devices;
20268
20269 /*
20270- * On a 5-minute timeout, wait for all devices currently configured. We need
20271+ * On a 10 second timeout, wait for all devices currently configured. We need
20272 * to do this to guarantee that the filesystems and / or network devices
20273 * needed for boot are available, before we can allow the boot to proceed.
20274 *
20275@@ -916,30 +912,18 @@ static int ready_to_wait_for_devices;
20276 */
20277 static void wait_for_devices(struct xenbus_driver *xendrv)
20278 {
20279- unsigned long start = jiffies;
20280+ unsigned long timeout = jiffies + 10*HZ;
20281 struct device_driver *drv = xendrv ? &xendrv->driver : NULL;
20282- unsigned int seconds_waited = 0;
20283
20284 if (!ready_to_wait_for_devices || !xen_domain())
20285 return;
20286
20287- while (exists_connecting_device(drv)) {
20288- if (time_after(jiffies, start + (seconds_waited+5)*HZ)) {
20289- if (!seconds_waited)
20290- printk(KERN_WARNING "XENBUS: Waiting for "
20291- "devices to initialise: ");
20292- seconds_waited += 5;
20293- printk("%us...", 300 - seconds_waited);
20294- if (seconds_waited == 300)
20295- break;
20296- }
20297-
20298+ while (exists_disconnected_device(drv)) {
20299+ if (time_after(jiffies, timeout))
20300+ break;
20301 schedule_timeout_interruptible(HZ/10);
20302 }
20303
20304- if (seconds_waited)
20305- printk("\n");
20306-
20307 bus_for_each_dev(&xenbus_frontend.bus, NULL, drv,
20308 print_device_status);
20309 }
20310diff --git a/fs/9p/vfs_super.c b/fs/9p/vfs_super.c
20311index 69357c0..14a8644 100644
20312--- a/fs/9p/vfs_super.c
20313+++ b/fs/9p/vfs_super.c
20314@@ -188,8 +188,7 @@ static void v9fs_kill_super(struct super_block *s)
20315
20316 P9_DPRINTK(P9_DEBUG_VFS, " %p\n", s);
20317
20318- if (s->s_root)
20319- v9fs_dentry_release(s->s_root); /* clunk root */
20320+ v9fs_dentry_release(s->s_root); /* clunk root */
20321
20322 kill_anon_super(s);
20323
20324diff --git a/fs/affs/affs.h b/fs/affs/affs.h
20325index 0e40caa..e511dc6 100644
20326--- a/fs/affs/affs.h
20327+++ b/fs/affs/affs.h
20328@@ -106,8 +106,8 @@ struct affs_sb_info {
20329 u32 s_last_bmap;
20330 struct buffer_head *s_bmap_bh;
20331 char *s_prefix; /* Prefix for volumes and assigns. */
20332+ int s_prefix_len; /* Length of prefix. */
20333 char s_volume[32]; /* Volume prefix for absolute symlinks. */
20334- spinlock_t symlink_lock; /* protects the previous two */
20335 };
20336
20337 #define SF_INTL 0x0001 /* International filesystem. */
20338diff --git a/fs/affs/namei.c b/fs/affs/namei.c
20339index d70bbba..960d336 100644
20340--- a/fs/affs/namei.c
20341+++ b/fs/affs/namei.c
20342@@ -341,13 +341,10 @@ affs_symlink(struct inode *dir, struct dentry *dentry, const char *symname)
20343 p = (char *)AFFS_HEAD(bh)->table;
20344 lc = '/';
20345 if (*symname == '/') {
20346- struct affs_sb_info *sbi = AFFS_SB(sb);
20347 while (*symname == '/')
20348 symname++;
20349- spin_lock(&sbi->symlink_lock);
20350- while (sbi->s_volume[i]) /* Cannot overflow */
20351- *p++ = sbi->s_volume[i++];
20352- spin_unlock(&sbi->symlink_lock);
20353+ while (AFFS_SB(sb)->s_volume[i]) /* Cannot overflow */
20354+ *p++ = AFFS_SB(sb)->s_volume[i++];
20355 }
20356 while (i < maxlen && (c = *symname++)) {
20357 if (c == '.' && lc == '/' && *symname == '.' && symname[1] == '/') {
20358diff --git a/fs/affs/super.c b/fs/affs/super.c
20359index d41e967..104fdcb 100644
20360--- a/fs/affs/super.c
20361+++ b/fs/affs/super.c
20362@@ -203,7 +203,7 @@ parse_options(char *options, uid_t *uid, gid_t *gid, int *mode, int *reserved, s
20363 switch (token) {
20364 case Opt_bs:
20365 if (match_int(&args[0], &n))
20366- return 0;
20367+ return -EINVAL;
20368 if (n != 512 && n != 1024 && n != 2048
20369 && n != 4096) {
20370 printk ("AFFS: Invalid blocksize (512, 1024, 2048, 4096 allowed)\n");
20371@@ -213,7 +213,7 @@ parse_options(char *options, uid_t *uid, gid_t *gid, int *mode, int *reserved, s
20372 break;
20373 case Opt_mode:
20374 if (match_octal(&args[0], &option))
20375- return 0;
20376+ return 1;
20377 *mode = option & 0777;
20378 *mount_opts |= SF_SETMODE;
20379 break;
20380@@ -221,6 +221,8 @@ parse_options(char *options, uid_t *uid, gid_t *gid, int *mode, int *reserved, s
20381 *mount_opts |= SF_MUFS;
20382 break;
20383 case Opt_prefix:
20384+ /* Free any previous prefix */
20385+ kfree(*prefix);
20386 *prefix = match_strdup(&args[0]);
20387 if (!*prefix)
20388 return 0;
20389@@ -231,21 +233,21 @@ parse_options(char *options, uid_t *uid, gid_t *gid, int *mode, int *reserved, s
20390 break;
20391 case Opt_reserved:
20392 if (match_int(&args[0], reserved))
20393- return 0;
20394+ return 1;
20395 break;
20396 case Opt_root:
20397 if (match_int(&args[0], root))
20398- return 0;
20399+ return 1;
20400 break;
20401 case Opt_setgid:
20402 if (match_int(&args[0], &option))
20403- return 0;
20404+ return 1;
20405 *gid = option;
20406 *mount_opts |= SF_SETGID;
20407 break;
20408 case Opt_setuid:
20409 if (match_int(&args[0], &option))
20410- return 0;
20411+ return -EINVAL;
20412 *uid = option;
20413 *mount_opts |= SF_SETUID;
20414 break;
20415@@ -309,14 +311,11 @@ static int affs_fill_super(struct super_block *sb, void *data, int silent)
20416 return -ENOMEM;
20417 sb->s_fs_info = sbi;
20418 mutex_init(&sbi->s_bmlock);
20419- spin_lock_init(&sbi->symlink_lock);
20420
20421 if (!parse_options(data,&uid,&gid,&i,&reserved,&root_block,
20422 &blocksize,&sbi->s_prefix,
20423 sbi->s_volume, &mount_flags)) {
20424 printk(KERN_ERR "AFFS: Error parsing options\n");
20425- kfree(sbi->s_prefix);
20426- kfree(sbi);
20427 return -EINVAL;
20428 }
20429 /* N.B. after this point s_prefix must be released */
20430@@ -517,18 +516,14 @@ affs_remount(struct super_block *sb, int *flags, char *data)
20431 unsigned long mount_flags;
20432 int res = 0;
20433 char *new_opts = kstrdup(data, GFP_KERNEL);
20434- char volume[32];
20435- char *prefix = NULL;
20436
20437 pr_debug("AFFS: remount(flags=0x%x,opts=\"%s\")\n",*flags,data);
20438
20439 *flags |= MS_NODIRATIME;
20440
20441- memcpy(volume, sbi->s_volume, 32);
20442 if (!parse_options(data, &uid, &gid, &mode, &reserved, &root_block,
20443- &blocksize, &prefix, volume,
20444+ &blocksize, &sbi->s_prefix, sbi->s_volume,
20445 &mount_flags)) {
20446- kfree(prefix);
20447 kfree(new_opts);
20448 return -EINVAL;
20449 }
20450@@ -539,14 +534,6 @@ affs_remount(struct super_block *sb, int *flags, char *data)
20451 sbi->s_mode = mode;
20452 sbi->s_uid = uid;
20453 sbi->s_gid = gid;
20454- /* protect against readers */
20455- spin_lock(&sbi->symlink_lock);
20456- if (prefix) {
20457- kfree(sbi->s_prefix);
20458- sbi->s_prefix = prefix;
20459- }
20460- memcpy(sbi->s_volume, volume, 32);
20461- spin_unlock(&sbi->symlink_lock);
20462
20463 if ((*flags & MS_RDONLY) == (sb->s_flags & MS_RDONLY)) {
20464 unlock_kernel();
20465diff --git a/fs/affs/symlink.c b/fs/affs/symlink.c
20466index ee00f08..4178253 100644
20467--- a/fs/affs/symlink.c
20468+++ b/fs/affs/symlink.c
20469@@ -20,6 +20,7 @@ static int affs_symlink_readpage(struct file *file, struct page *page)
20470 int i, j;
20471 char c;
20472 char lc;
20473+ char *pf;
20474
20475 pr_debug("AFFS: follow_link(ino=%lu)\n",inode->i_ino);
20476
20477@@ -31,15 +32,11 @@ static int affs_symlink_readpage(struct file *file, struct page *page)
20478 j = 0;
20479 lf = (struct slink_front *)bh->b_data;
20480 lc = 0;
20481+ pf = AFFS_SB(inode->i_sb)->s_prefix ? AFFS_SB(inode->i_sb)->s_prefix : "/";
20482
20483 if (strchr(lf->symname,':')) { /* Handle assign or volume name */
20484- struct affs_sb_info *sbi = AFFS_SB(inode->i_sb);
20485- char *pf;
20486- spin_lock(&sbi->symlink_lock);
20487- pf = sbi->s_prefix ? sbi->s_prefix : "/";
20488 while (i < 1023 && (c = pf[i]))
20489 link[i++] = c;
20490- spin_unlock(&sbi->symlink_lock);
20491 while (i < 1023 && lf->symname[j] != ':')
20492 link[i++] = lf->symname[j++];
20493 if (i < 1023)
20494diff --git a/fs/befs/linuxvfs.c b/fs/befs/linuxvfs.c
20495index 34ddda8..33baf27 100644
20496--- a/fs/befs/linuxvfs.c
20497+++ b/fs/befs/linuxvfs.c
20498@@ -873,7 +873,6 @@ befs_fill_super(struct super_block *sb, void *data, int silent)
20499 brelse(bh);
20500
20501 unacquire_priv_sbp:
20502- kfree(befs_sb->mount_opts.iocharset);
20503 kfree(sb->s_fs_info);
20504
20505 unacquire_none:
20506diff --git a/fs/bfs/inode.c b/fs/bfs/inode.c
20507index 8f3d9fd..6f60336 100644
20508--- a/fs/bfs/inode.c
20509+++ b/fs/bfs/inode.c
20510@@ -353,35 +353,35 @@ static int bfs_fill_super(struct super_block *s, void *data, int silent)
20511 struct inode *inode;
20512 unsigned i, imap_len;
20513 struct bfs_sb_info *info;
20514- int ret = -EINVAL;
20515+ long ret = -EINVAL;
20516 unsigned long i_sblock, i_eblock, i_eoff, s_size;
20517
20518 info = kzalloc(sizeof(*info), GFP_KERNEL);
20519 if (!info)
20520 return -ENOMEM;
20521- mutex_init(&info->bfs_lock);
20522 s->s_fs_info = info;
20523
20524 sb_set_blocksize(s, BFS_BSIZE);
20525
20526- info->si_sbh = sb_bread(s, 0);
20527- if (!info->si_sbh)
20528+ bh = sb_bread(s, 0);
20529+ if(!bh)
20530 goto out;
20531- bfs_sb = (struct bfs_super_block *)info->si_sbh->b_data;
20532+ bfs_sb = (struct bfs_super_block *)bh->b_data;
20533 if (le32_to_cpu(bfs_sb->s_magic) != BFS_MAGIC) {
20534 if (!silent)
20535 printf("No BFS filesystem on %s (magic=%08x)\n",
20536 s->s_id, le32_to_cpu(bfs_sb->s_magic));
20537- goto out1;
20538+ goto out;
20539 }
20540 if (BFS_UNCLEAN(bfs_sb, s) && !silent)
20541 printf("%s is unclean, continuing\n", s->s_id);
20542
20543 s->s_magic = BFS_MAGIC;
20544+ info->si_sbh = bh;
20545
20546 if (le32_to_cpu(bfs_sb->s_start) > le32_to_cpu(bfs_sb->s_end)) {
20547 printf("Superblock is corrupted\n");
20548- goto out1;
20549+ goto out;
20550 }
20551
20552 info->si_lasti = (le32_to_cpu(bfs_sb->s_start) - BFS_BSIZE) /
20553@@ -390,7 +390,7 @@ static int bfs_fill_super(struct super_block *s, void *data, int silent)
20554 imap_len = (info->si_lasti / 8) + 1;
20555 info->si_imap = kzalloc(imap_len, GFP_KERNEL);
20556 if (!info->si_imap)
20557- goto out1;
20558+ goto out;
20559 for (i = 0; i < BFS_ROOT_INO; i++)
20560 set_bit(i, info->si_imap);
20561
20562@@ -398,13 +398,15 @@ static int bfs_fill_super(struct super_block *s, void *data, int silent)
20563 inode = bfs_iget(s, BFS_ROOT_INO);
20564 if (IS_ERR(inode)) {
20565 ret = PTR_ERR(inode);
20566- goto out2;
20567+ kfree(info->si_imap);
20568+ goto out;
20569 }
20570 s->s_root = d_alloc_root(inode);
20571 if (!s->s_root) {
20572 iput(inode);
20573 ret = -ENOMEM;
20574- goto out2;
20575+ kfree(info->si_imap);
20576+ goto out;
20577 }
20578
20579 info->si_blocks = (le32_to_cpu(bfs_sb->s_end) + 1) >> BFS_BSIZE_BITS;
20580@@ -417,8 +419,10 @@ static int bfs_fill_super(struct super_block *s, void *data, int silent)
20581 bh = sb_bread(s, info->si_blocks - 1);
20582 if (!bh) {
20583 printf("Last block not available: %lu\n", info->si_blocks - 1);
20584+ iput(inode);
20585 ret = -EIO;
20586- goto out3;
20587+ kfree(info->si_imap);
20588+ goto out;
20589 }
20590 brelse(bh);
20591
20592@@ -455,8 +459,11 @@ static int bfs_fill_super(struct super_block *s, void *data, int silent)
20593 printf("Inode 0x%08x corrupted\n", i);
20594
20595 brelse(bh);
20596- ret = -EIO;
20597- goto out3;
20598+ s->s_root = NULL;
20599+ kfree(info->si_imap);
20600+ kfree(info);
20601+ s->s_fs_info = NULL;
20602+ return -EIO;
20603 }
20604
20605 if (!di->i_ino) {
20606@@ -476,17 +483,11 @@ static int bfs_fill_super(struct super_block *s, void *data, int silent)
20607 s->s_dirt = 1;
20608 }
20609 dump_imap("read_super", s);
20610+ mutex_init(&info->bfs_lock);
20611 return 0;
20612
20613-out3:
20614- dput(s->s_root);
20615- s->s_root = NULL;
20616-out2:
20617- kfree(info->si_imap);
20618-out1:
20619- brelse(info->si_sbh);
20620 out:
20621- mutex_destroy(&info->bfs_lock);
20622+ brelse(bh);
20623 kfree(info);
20624 s->s_fs_info = NULL;
20625 return ret;
20626diff --git a/fs/binfmt_aout.c b/fs/binfmt_aout.c
20627index 0133b5a..b639dcf 100644
20628--- a/fs/binfmt_aout.c
20629+++ b/fs/binfmt_aout.c
20630@@ -263,7 +263,6 @@ static int load_aout_binary(struct linux_binprm * bprm, struct pt_regs * regs)
20631 #else
20632 set_personality(PER_LINUX);
20633 #endif
20634- setup_new_exec(bprm);
20635
20636 current->mm->end_code = ex.a_text +
20637 (current->mm->start_code = N_TXTADDR(ex));
20638diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
20639index 1ed37ba..b9b3bb5 100644
20640--- a/fs/binfmt_elf.c
20641+++ b/fs/binfmt_elf.c
20642@@ -662,6 +662,27 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
20643 if (elf_interpreter[elf_ppnt->p_filesz - 1] != '\0')
20644 goto out_free_interp;
20645
20646+ /*
20647+ * The early SET_PERSONALITY here is so that the lookup
20648+ * for the interpreter happens in the namespace of the
20649+ * to-be-execed image. SET_PERSONALITY can select an
20650+ * alternate root.
20651+ *
20652+ * However, SET_PERSONALITY is NOT allowed to switch
20653+ * this task into the new images's memory mapping
20654+ * policy - that is, TASK_SIZE must still evaluate to
20655+ * that which is appropriate to the execing application.
20656+ * This is because exit_mmap() needs to have TASK_SIZE
20657+ * evaluate to the size of the old image.
20658+ *
20659+ * So if (say) a 64-bit application is execing a 32-bit
20660+ * application it is the architecture's responsibility
20661+ * to defer changing the value of TASK_SIZE until the
20662+ * switch really is going to happen - do this in
20663+ * flush_thread(). - akpm
20664+ */
20665+ SET_PERSONALITY(loc->elf_ex);
20666+
20667 interpreter = open_exec(elf_interpreter);
20668 retval = PTR_ERR(interpreter);
20669 if (IS_ERR(interpreter))
20670@@ -709,6 +730,9 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
20671 /* Verify the interpreter has a valid arch */
20672 if (!elf_check_arch(&loc->interp_elf_ex))
20673 goto out_free_dentry;
20674+ } else {
20675+ /* Executables without an interpreter also need a personality */
20676+ SET_PERSONALITY(loc->elf_ex);
20677 }
20678
20679 /* Flush all traces of the currently running executable */
20680@@ -728,8 +752,7 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
20681
20682 if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
20683 current->flags |= PF_RANDOMIZE;
20684-
20685- setup_new_exec(bprm);
20686+ arch_pick_mmap_layout(current->mm);
20687
20688 /* Do this so that we can load the interpreter, if need be. We will
20689 change some of these later */
20690diff --git a/fs/binfmt_elf_fdpic.c b/fs/binfmt_elf_fdpic.c
20691index e7a0bb4..38502c6 100644
20692--- a/fs/binfmt_elf_fdpic.c
20693+++ b/fs/binfmt_elf_fdpic.c
20694@@ -171,9 +171,6 @@ static int load_elf_fdpic_binary(struct linux_binprm *bprm,
20695 #ifdef ELF_FDPIC_PLAT_INIT
20696 unsigned long dynaddr;
20697 #endif
20698-#ifndef CONFIG_MMU
20699- unsigned long stack_prot;
20700-#endif
20701 struct file *interpreter = NULL; /* to shut gcc up */
20702 char *interpreter_name = NULL;
20703 int executable_stack;
20704@@ -319,11 +316,6 @@ static int load_elf_fdpic_binary(struct linux_binprm *bprm,
20705 * defunct, deceased, etc. after this point we have to exit via
20706 * error_kill */
20707 set_personality(PER_LINUX_FDPIC);
20708- if (elf_read_implies_exec(&exec_params.hdr, executable_stack))
20709- current->personality |= READ_IMPLIES_EXEC;
20710-
20711- setup_new_exec(bprm);
20712-
20713 set_binfmt(&elf_fdpic_format);
20714
20715 current->mm->start_code = 0;
20716@@ -385,13 +377,9 @@ static int load_elf_fdpic_binary(struct linux_binprm *bprm,
20717 if (stack_size < PAGE_SIZE * 2)
20718 stack_size = PAGE_SIZE * 2;
20719
20720- stack_prot = PROT_READ | PROT_WRITE;
20721- if (executable_stack == EXSTACK_ENABLE_X ||
20722- (executable_stack == EXSTACK_DEFAULT && VM_STACK_FLAGS & VM_EXEC))
20723- stack_prot |= PROT_EXEC;
20724-
20725 down_write(&current->mm->mmap_sem);
20726- current->mm->start_brk = do_mmap(NULL, 0, stack_size, stack_prot,
20727+ current->mm->start_brk = do_mmap(NULL, 0, stack_size,
20728+ PROT_READ | PROT_WRITE | PROT_EXEC,
20729 MAP_PRIVATE | MAP_ANONYMOUS | MAP_GROWSDOWN,
20730 0);
20731
20732diff --git a/fs/binfmt_flat.c b/fs/binfmt_flat.c
20733index ca88c46..a279665 100644
20734--- a/fs/binfmt_flat.c
20735+++ b/fs/binfmt_flat.c
20736@@ -519,7 +519,6 @@ static int load_flat_file(struct linux_binprm * bprm,
20737
20738 /* OK, This is the point of no return */
20739 set_personality(PER_LINUX_32BIT);
20740- setup_new_exec(bprm);
20741 }
20742
20743 /*
20744diff --git a/fs/binfmt_som.c b/fs/binfmt_som.c
20745index 35cf002..eff74b9 100644
20746--- a/fs/binfmt_som.c
20747+++ b/fs/binfmt_som.c
20748@@ -227,7 +227,6 @@ load_som_binary(struct linux_binprm * bprm, struct pt_regs * regs)
20749 /* OK, This is the point of no return */
20750 current->flags &= ~PF_FORKNOEXEC;
20751 current->personality = PER_HPUX;
20752- setup_new_exec(bprm);
20753
20754 /* Set the task size for HP-UX processes such that
20755 * the gateway page is outside the address space.
20756diff --git a/fs/bio-integrity.c b/fs/bio-integrity.c
20757index a16f29e..49a34e7 100644
20758--- a/fs/bio-integrity.c
20759+++ b/fs/bio-integrity.c
20760@@ -61,7 +61,7 @@ static inline unsigned int vecs_to_idx(unsigned int nr)
20761
20762 static inline int use_bip_pool(unsigned int idx)
20763 {
20764- if (idx == BIOVEC_MAX_IDX)
20765+ if (idx == BIOVEC_NR_POOLS)
20766 return 1;
20767
20768 return 0;
20769@@ -95,7 +95,6 @@ struct bio_integrity_payload *bio_integrity_alloc_bioset(struct bio *bio,
20770
20771 /* Use mempool if lower order alloc failed or max vecs were requested */
20772 if (bip == NULL) {
20773- idx = BIOVEC_MAX_IDX; /* so we free the payload properly later */
20774 bip = mempool_alloc(bs->bio_integrity_pool, gfp_mask);
20775
20776 if (unlikely(bip == NULL)) {
20777diff --git a/fs/bio.c b/fs/bio.c
20778index e0c9e71..12da5db 100644
20779--- a/fs/bio.c
20780+++ b/fs/bio.c
20781@@ -542,18 +542,13 @@ static int __bio_add_page(struct request_queue *q, struct bio *bio, struct page
20782
20783 if (page == prev->bv_page &&
20784 offset == prev->bv_offset + prev->bv_len) {
20785- unsigned int prev_bv_len = prev->bv_len;
20786 prev->bv_len += len;
20787
20788 if (q->merge_bvec_fn) {
20789 struct bvec_merge_data bvm = {
20790- /* prev_bvec is already charged in
20791- bi_size, discharge it in order to
20792- simulate merging updated prev_bvec
20793- as new bvec. */
20794 .bi_bdev = bio->bi_bdev,
20795 .bi_sector = bio->bi_sector,
20796- .bi_size = bio->bi_size - prev_bv_len,
20797+ .bi_size = bio->bi_size,
20798 .bi_rw = bio->bi_rw,
20799 };
20800
20801diff --git a/fs/block_dev.c b/fs/block_dev.c
20802index 34e2d20..8bed055 100644
20803--- a/fs/block_dev.c
20804+++ b/fs/block_dev.c
20805@@ -246,8 +246,7 @@ struct super_block *freeze_bdev(struct block_device *bdev)
20806 if (!sb)
20807 goto out;
20808 if (sb->s_flags & MS_RDONLY) {
20809- sb->s_frozen = SB_FREEZE_TRANS;
20810- up_write(&sb->s_umount);
20811+ deactivate_locked_super(sb);
20812 mutex_unlock(&bdev->bd_fsfreeze_mutex);
20813 return sb;
20814 }
20815@@ -308,7 +307,7 @@ int thaw_bdev(struct block_device *bdev, struct super_block *sb)
20816 BUG_ON(sb->s_bdev != bdev);
20817 down_write(&sb->s_umount);
20818 if (sb->s_flags & MS_RDONLY)
20819- goto out_unfrozen;
20820+ goto out_deactivate;
20821
20822 if (sb->s_op->unfreeze_fs) {
20823 error = sb->s_op->unfreeze_fs(sb);
20824@@ -322,11 +321,11 @@ int thaw_bdev(struct block_device *bdev, struct super_block *sb)
20825 }
20826 }
20827
20828-out_unfrozen:
20829 sb->s_frozen = SB_UNFROZEN;
20830 smp_wmb();
20831 wake_up(&sb->s_wait_unfrozen);
20832
20833+out_deactivate:
20834 if (sb)
20835 deactivate_locked_super(sb);
20836 out_unlock:
20837diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
20838index 3bbcaa7..63ea83f 100644
20839--- a/fs/cifs/connect.c
20840+++ b/fs/cifs/connect.c
20841@@ -2287,12 +2287,12 @@ int
20842 cifs_mount(struct super_block *sb, struct cifs_sb_info *cifs_sb,
20843 char *mount_data_global, const char *devname)
20844 {
20845- int rc;
20846+ int rc = 0;
20847 int xid;
20848 struct smb_vol *volume_info;
20849- struct cifsSesInfo *pSesInfo;
20850- struct cifsTconInfo *tcon;
20851- struct TCP_Server_Info *srvTcp;
20852+ struct cifsSesInfo *pSesInfo = NULL;
20853+ struct cifsTconInfo *tcon = NULL;
20854+ struct TCP_Server_Info *srvTcp = NULL;
20855 char *full_path;
20856 char *mount_data = mount_data_global;
20857 #ifdef CONFIG_CIFS_DFS_UPCALL
20858@@ -2301,10 +2301,6 @@ cifs_mount(struct super_block *sb, struct cifs_sb_info *cifs_sb,
20859 int referral_walks_count = 0;
20860 try_mount_again:
20861 #endif
20862- rc = 0;
20863- tcon = NULL;
20864- pSesInfo = NULL;
20865- srvTcp = NULL;
20866 full_path = NULL;
20867
20868 xid = GetXid();
20869@@ -2601,7 +2597,6 @@ remote_path_check:
20870
20871 cleanup_volume_info(&volume_info);
20872 referral_walks_count++;
20873- FreeXid(xid);
20874 goto try_mount_again;
20875 }
20876 #else /* No DFS support, return error on mount */
20877diff --git a/fs/cifs/readdir.c b/fs/cifs/readdir.c
20878index f5618f8..f84062f 100644
20879--- a/fs/cifs/readdir.c
20880+++ b/fs/cifs/readdir.c
20881@@ -666,7 +666,6 @@ static int cifs_get_name_from_search_buf(struct qstr *pqst,
20882 min(len, max_len), nlt,
20883 cifs_sb->mnt_cifs_flags &
20884 CIFS_MOUNT_MAP_SPECIAL_CHR);
20885- pqst->len -= nls_nullsize(nlt);
20886 } else {
20887 pqst->name = filename;
20888 pqst->len = len;
20889diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c
20890index 39c6ee8..d22438e 100644
20891--- a/fs/debugfs/inode.c
20892+++ b/fs/debugfs/inode.c
20893@@ -32,9 +32,7 @@ static struct vfsmount *debugfs_mount;
20894 static int debugfs_mount_count;
20895 static bool debugfs_registered;
20896
20897-static struct inode *debugfs_get_inode(struct super_block *sb, int mode, dev_t dev,
20898- void *data, const struct file_operations *fops)
20899-
20900+static struct inode *debugfs_get_inode(struct super_block *sb, int mode, dev_t dev)
20901 {
20902 struct inode *inode = new_inode(sb);
20903
20904@@ -46,18 +44,14 @@ static struct inode *debugfs_get_inode(struct super_block *sb, int mode, dev_t d
20905 init_special_inode(inode, mode, dev);
20906 break;
20907 case S_IFREG:
20908- inode->i_fop = fops ? fops : &debugfs_file_operations;
20909- inode->i_private = data;
20910+ inode->i_fop = &debugfs_file_operations;
20911 break;
20912 case S_IFLNK:
20913 inode->i_op = &debugfs_link_operations;
20914- inode->i_fop = fops;
20915- inode->i_private = data;
20916 break;
20917 case S_IFDIR:
20918 inode->i_op = &simple_dir_inode_operations;
20919- inode->i_fop = fops ? fops : &simple_dir_operations;
20920- inode->i_private = data;
20921+ inode->i_fop = &simple_dir_operations;
20922
20923 /* directory inodes start off with i_nlink == 2
20924 * (for "." entry) */
20925@@ -70,8 +64,7 @@ static struct inode *debugfs_get_inode(struct super_block *sb, int mode, dev_t d
20926
20927 /* SMP-safe */
20928 static int debugfs_mknod(struct inode *dir, struct dentry *dentry,
20929- int mode, dev_t dev, void *data,
20930- const struct file_operations *fops)
20931+ int mode, dev_t dev)
20932 {
20933 struct inode *inode;
20934 int error = -EPERM;
20935@@ -79,7 +72,7 @@ static int debugfs_mknod(struct inode *dir, struct dentry *dentry,
20936 if (dentry->d_inode)
20937 return -EEXIST;
20938
20939- inode = debugfs_get_inode(dir->i_sb, mode, dev, data, fops);
20940+ inode = debugfs_get_inode(dir->i_sb, mode, dev);
20941 if (inode) {
20942 d_instantiate(dentry, inode);
20943 dget(dentry);
20944@@ -88,13 +81,12 @@ static int debugfs_mknod(struct inode *dir, struct dentry *dentry,
20945 return error;
20946 }
20947
20948-static int debugfs_mkdir(struct inode *dir, struct dentry *dentry, int mode,
20949- void *data, const struct file_operations *fops)
20950+static int debugfs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
20951 {
20952 int res;
20953
20954 mode = (mode & (S_IRWXUGO | S_ISVTX)) | S_IFDIR;
20955- res = debugfs_mknod(dir, dentry, mode, 0, data, fops);
20956+ res = debugfs_mknod(dir, dentry, mode, 0);
20957 if (!res) {
20958 inc_nlink(dir);
20959 fsnotify_mkdir(dir, dentry);
20960@@ -102,20 +94,18 @@ static int debugfs_mkdir(struct inode *dir, struct dentry *dentry, int mode,
20961 return res;
20962 }
20963
20964-static int debugfs_link(struct inode *dir, struct dentry *dentry, int mode,
20965- void *data, const struct file_operations *fops)
20966+static int debugfs_link(struct inode *dir, struct dentry *dentry, int mode)
20967 {
20968 mode = (mode & S_IALLUGO) | S_IFLNK;
20969- return debugfs_mknod(dir, dentry, mode, 0, data, fops);
20970+ return debugfs_mknod(dir, dentry, mode, 0);
20971 }
20972
20973-static int debugfs_create(struct inode *dir, struct dentry *dentry, int mode,
20974- void *data, const struct file_operations *fops)
20975+static int debugfs_create(struct inode *dir, struct dentry *dentry, int mode)
20976 {
20977 int res;
20978
20979 mode = (mode & S_IALLUGO) | S_IFREG;
20980- res = debugfs_mknod(dir, dentry, mode, 0, data, fops);
20981+ res = debugfs_mknod(dir, dentry, mode, 0);
20982 if (!res)
20983 fsnotify_create(dir, dentry);
20984 return res;
20985@@ -149,9 +139,7 @@ static struct file_system_type debug_fs_type = {
20986
20987 static int debugfs_create_by_name(const char *name, mode_t mode,
20988 struct dentry *parent,
20989- struct dentry **dentry,
20990- void *data,
20991- const struct file_operations *fops)
20992+ struct dentry **dentry)
20993 {
20994 int error = 0;
20995
20996@@ -176,16 +164,13 @@ static int debugfs_create_by_name(const char *name, mode_t mode,
20997 if (!IS_ERR(*dentry)) {
20998 switch (mode & S_IFMT) {
20999 case S_IFDIR:
21000- error = debugfs_mkdir(parent->d_inode, *dentry, mode,
21001- data, fops);
21002+ error = debugfs_mkdir(parent->d_inode, *dentry, mode);
21003 break;
21004 case S_IFLNK:
21005- error = debugfs_link(parent->d_inode, *dentry, mode,
21006- data, fops);
21007+ error = debugfs_link(parent->d_inode, *dentry, mode);
21008 break;
21009 default:
21010- error = debugfs_create(parent->d_inode, *dentry, mode,
21011- data, fops);
21012+ error = debugfs_create(parent->d_inode, *dentry, mode);
21013 break;
21014 }
21015 dput(*dentry);
21016@@ -236,13 +221,19 @@ struct dentry *debugfs_create_file(const char *name, mode_t mode,
21017 if (error)
21018 goto exit;
21019
21020- error = debugfs_create_by_name(name, mode, parent, &dentry,
21021- data, fops);
21022+ error = debugfs_create_by_name(name, mode, parent, &dentry);
21023 if (error) {
21024 dentry = NULL;
21025 simple_release_fs(&debugfs_mount, &debugfs_mount_count);
21026 goto exit;
21027 }
21028+
21029+ if (dentry->d_inode) {
21030+ if (data)
21031+ dentry->d_inode->i_private = data;
21032+ if (fops)
21033+ dentry->d_inode->i_fop = fops;
21034+ }
21035 exit:
21036 return dentry;
21037 }
21038diff --git a/fs/devpts/inode.c b/fs/devpts/inode.c
21039index 8882ecc..d5f8c96 100644
21040--- a/fs/devpts/inode.c
21041+++ b/fs/devpts/inode.c
21042@@ -517,23 +517,11 @@ int devpts_pty_new(struct inode *ptmx_inode, struct tty_struct *tty)
21043
21044 struct tty_struct *devpts_get_tty(struct inode *pts_inode, int number)
21045 {
21046- struct dentry *dentry;
21047- struct tty_struct *tty;
21048-
21049 BUG_ON(pts_inode->i_rdev == MKDEV(TTYAUX_MAJOR, PTMX_MINOR));
21050
21051- /* Ensure dentry has not been deleted by devpts_pty_kill() */
21052- dentry = d_find_alias(pts_inode);
21053- if (!dentry)
21054- return NULL;
21055-
21056- tty = NULL;
21057 if (pts_inode->i_sb->s_magic == DEVPTS_SUPER_MAGIC)
21058- tty = (struct tty_struct *)pts_inode->i_private;
21059-
21060- dput(dentry);
21061-
21062- return tty;
21063+ return (struct tty_struct *)pts_inode->i_private;
21064+ return NULL;
21065 }
21066
21067 void devpts_pty_kill(struct tty_struct *tty)
21068diff --git a/fs/ecryptfs/crypto.c b/fs/ecryptfs/crypto.c
21069index 7cb0a59..fbb6e5e 100644
21070--- a/fs/ecryptfs/crypto.c
21071+++ b/fs/ecryptfs/crypto.c
21072@@ -1748,7 +1748,7 @@ ecryptfs_process_key_cipher(struct crypto_blkcipher **key_tfm,
21073 char *cipher_name, size_t *key_size)
21074 {
21075 char dummy_key[ECRYPTFS_MAX_KEY_BYTES];
21076- char *full_alg_name = NULL;
21077+ char *full_alg_name;
21078 int rc;
21079
21080 *key_tfm = NULL;
21081@@ -1763,6 +1763,7 @@ ecryptfs_process_key_cipher(struct crypto_blkcipher **key_tfm,
21082 if (rc)
21083 goto out;
21084 *key_tfm = crypto_alloc_blkcipher(full_alg_name, 0, CRYPTO_ALG_ASYNC);
21085+ kfree(full_alg_name);
21086 if (IS_ERR(*key_tfm)) {
21087 rc = PTR_ERR(*key_tfm);
21088 printk(KERN_ERR "Unable to allocate crypto cipher with name "
21089@@ -1785,7 +1786,6 @@ ecryptfs_process_key_cipher(struct crypto_blkcipher **key_tfm,
21090 goto out;
21091 }
21092 out:
21093- kfree(full_alg_name);
21094 return rc;
21095 }
21096
21097diff --git a/fs/ecryptfs/file.c b/fs/ecryptfs/file.c
21098index 1744f17..9e94405 100644
21099--- a/fs/ecryptfs/file.c
21100+++ b/fs/ecryptfs/file.c
21101@@ -191,6 +191,13 @@ static int ecryptfs_open(struct inode *inode, struct file *file)
21102 | ECRYPTFS_ENCRYPTED);
21103 }
21104 mutex_unlock(&crypt_stat->cs_mutex);
21105+ if ((ecryptfs_inode_to_private(inode)->lower_file->f_flags & O_RDONLY)
21106+ && !(file->f_flags & O_RDONLY)) {
21107+ rc = -EPERM;
21108+ printk(KERN_WARNING "%s: Lower persistent file is RO; eCryptfs "
21109+ "file must hence be opened RO\n", __func__);
21110+ goto out;
21111+ }
21112 if (!ecryptfs_inode_to_private(inode)->lower_file) {
21113 rc = ecryptfs_init_persistent_file(ecryptfs_dentry);
21114 if (rc) {
21115@@ -201,13 +208,6 @@ static int ecryptfs_open(struct inode *inode, struct file *file)
21116 goto out;
21117 }
21118 }
21119- if ((ecryptfs_inode_to_private(inode)->lower_file->f_flags & O_RDONLY)
21120- && !(file->f_flags & O_RDONLY)) {
21121- rc = -EPERM;
21122- printk(KERN_WARNING "%s: Lower persistent file is RO; eCryptfs "
21123- "file must hence be opened RO\n", __func__);
21124- goto out;
21125- }
21126 ecryptfs_set_file_lower(
21127 file, ecryptfs_inode_to_private(inode)->lower_file);
21128 if (S_ISDIR(ecryptfs_dentry->d_inode->i_mode)) {
21129diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c
21130index 728f07e..056fed6 100644
21131--- a/fs/ecryptfs/inode.c
21132+++ b/fs/ecryptfs/inode.c
21133@@ -971,21 +971,6 @@ out:
21134 return rc;
21135 }
21136
21137-int ecryptfs_getattr(struct vfsmount *mnt, struct dentry *dentry,
21138- struct kstat *stat)
21139-{
21140- struct kstat lower_stat;
21141- int rc;
21142-
21143- rc = vfs_getattr(ecryptfs_dentry_to_lower_mnt(dentry),
21144- ecryptfs_dentry_to_lower(dentry), &lower_stat);
21145- if (!rc) {
21146- generic_fillattr(dentry->d_inode, stat);
21147- stat->blocks = lower_stat.blocks;
21148- }
21149- return rc;
21150-}
21151-
21152 int
21153 ecryptfs_setxattr(struct dentry *dentry, const char *name, const void *value,
21154 size_t size, int flags)
21155@@ -1115,7 +1100,6 @@ const struct inode_operations ecryptfs_dir_iops = {
21156 const struct inode_operations ecryptfs_main_iops = {
21157 .permission = ecryptfs_permission,
21158 .setattr = ecryptfs_setattr,
21159- .getattr = ecryptfs_getattr,
21160 .setxattr = ecryptfs_setxattr,
21161 .getxattr = ecryptfs_getxattr,
21162 .listxattr = ecryptfs_listxattr,
21163diff --git a/fs/exec.c b/fs/exec.c 663diff --git a/fs/exec.c b/fs/exec.c
21164index da36c20..606cf96 100644 664index ba112bd..606cf96 100644
21165--- a/fs/exec.c 665--- a/fs/exec.c
21166+++ b/fs/exec.c 666+++ b/fs/exec.c
21167@@ -19,7 +19,7 @@ 667@@ -19,7 +19,7 @@
@@ -21191,100 +691,7 @@ index da36c20..606cf96 100644
21191 } 691 }
21192 692
21193 EXPORT_SYMBOL(__register_binfmt); 693 EXPORT_SYMBOL(__register_binfmt);
21194@@ -572,9 +574,6 @@ int setup_arg_pages(struct linux_binprm *bprm, 694@@ -1006,7 +1008,7 @@ int flush_old_exec(struct linux_binprm * bprm)
21195 struct vm_area_struct *prev = NULL;
21196 unsigned long vm_flags;
21197 unsigned long stack_base;
21198- unsigned long stack_size;
21199- unsigned long stack_expand;
21200- unsigned long rlim_stack;
21201
21202 #ifdef CONFIG_STACK_GROWSUP
21203 /* Limit stack size to 1GB */
21204@@ -631,24 +630,10 @@ int setup_arg_pages(struct linux_binprm *bprm,
21205 goto out_unlock;
21206 }
21207
21208- stack_expand = EXTRA_STACK_VM_PAGES * PAGE_SIZE;
21209- stack_size = vma->vm_end - vma->vm_start;
21210- /*
21211- * Align this down to a page boundary as expand_stack
21212- * will align it up.
21213- */
21214- rlim_stack = rlimit(RLIMIT_STACK) & PAGE_MASK;
21215- rlim_stack = min(rlim_stack, stack_size);
21216 #ifdef CONFIG_STACK_GROWSUP
21217- if (stack_size + stack_expand > rlim_stack)
21218- stack_base = vma->vm_start + rlim_stack;
21219- else
21220- stack_base = vma->vm_end + stack_expand;
21221+ stack_base = vma->vm_end + EXTRA_STACK_VM_PAGES * PAGE_SIZE;
21222 #else
21223- if (stack_size + stack_expand > rlim_stack)
21224- stack_base = vma->vm_end - rlim_stack;
21225- else
21226- stack_base = vma->vm_start - stack_expand;
21227+ stack_base = vma->vm_start - EXTRA_STACK_VM_PAGES * PAGE_SIZE;
21228 #endif
21229 ret = expand_stack(vma, stack_base);
21230 if (ret)
21231@@ -948,7 +933,9 @@ void set_task_comm(struct task_struct *tsk, char *buf)
21232
21233 int flush_old_exec(struct linux_binprm * bprm)
21234 {
21235- int retval;
21236+ char * name;
21237+ int i, ch, retval;
21238+ char tcomm[sizeof(current->comm)];
21239
21240 /*
21241 * Make sure we have a private signal table and that
21242@@ -969,25 +956,6 @@ int flush_old_exec(struct linux_binprm * bprm)
21243
21244 bprm->mm = NULL; /* We're using it now */
21245
21246- current->flags &= ~PF_RANDOMIZE;
21247- flush_thread();
21248- current->personality &= ~bprm->per_clear;
21249-
21250- return 0;
21251-
21252-out:
21253- return retval;
21254-}
21255-EXPORT_SYMBOL(flush_old_exec);
21256-
21257-void setup_new_exec(struct linux_binprm * bprm)
21258-{
21259- int i, ch;
21260- char * name;
21261- char tcomm[sizeof(current->comm)];
21262-
21263- arch_pick_mmap_layout(current->mm);
21264-
21265 /* This is the point of no return */
21266 current->sas_ss_sp = current->sas_ss_size = 0;
21267
21268@@ -1009,6 +977,9 @@ void setup_new_exec(struct linux_binprm * bprm)
21269 tcomm[i] = '\0';
21270 set_task_comm(current, tcomm);
21271
21272+ current->flags &= ~PF_RANDOMIZE;
21273+ flush_thread();
21274+
21275 /* Set the new mm task size. We have to do that late because it may
21276 * depend on TIF_32BIT which is only updated in flush_thread() on
21277 * some architectures like powerpc
21278@@ -1024,6 +995,8 @@ void setup_new_exec(struct linux_binprm * bprm)
21279 set_dumpable(current->mm, suid_dumpable);
21280 }
21281
21282+ current->personality &= ~bprm->per_clear;
21283+
21284 /*
21285 * Flush performance counters when crossing a
21286 * security domain:
21287@@ -1035,11 +1008,17 @@ void setup_new_exec(struct linux_binprm * bprm)
21288 group */ 695 group */
21289 696
21290 current->self_exec_id++; 697 current->self_exec_id++;
@@ -21292,19 +699,8 @@ index da36c20..606cf96 100644
21292+ 699+
21293 flush_signal_handlers(current, 0); 700 flush_signal_handlers(current, 0);
21294 flush_old_files(current->files); 701 flush_old_files(current->files);
21295+
21296+ return 0;
21297+
21298+out:
21299+ return retval;
21300 }
21301-EXPORT_SYMBOL(setup_new_exec);
21302+
21303+EXPORT_SYMBOL(flush_old_exec);
21304 702
21305 /* 703@@ -1102,8 +1104,8 @@ int check_unsafe_exec(struct linux_binprm *bprm)
21306 * Prepare credentials and lock ->cred_guard_mutex.
21307@@ -1125,8 +1104,8 @@ int check_unsafe_exec(struct linux_binprm *bprm)
21308 return res; 704 return res;
21309 } 705 }
21310 706
@@ -21315,7 +711,7 @@ index da36c20..606cf96 100644
21315 * Check permissions, then read the first 128 (BINPRM_BUF_SIZE) bytes 711 * Check permissions, then read the first 128 (BINPRM_BUF_SIZE) bytes
21316 * 712 *
21317 * This may be called multiple times for binary chains (scripts for example). 713 * This may be called multiple times for binary chains (scripts for example).
21318@@ -1341,6 +1320,7 @@ int do_execve(char * filename, 714@@ -1318,6 +1320,7 @@ int do_execve(char * filename,
21319 goto out_unmark; 715 goto out_unmark;
21320 716
21321 sched_exec(); 717 sched_exec();
@@ -21323,1959 +719,6 @@ index da36c20..606cf96 100644
21323 719
21324 bprm->file = file; 720 bprm->file = file;
21325 bprm->filename = filename; 721 bprm->filename = filename;
21326diff --git a/fs/exofs/inode.c b/fs/exofs/inode.c
21327index 6f7df0f..6c10f74 100644
21328--- a/fs/exofs/inode.c
21329+++ b/fs/exofs/inode.c
21330@@ -731,28 +731,13 @@ static int exofs_write_begin_export(struct file *file,
21331 fsdata);
21332 }
21333
21334-static int exofs_write_end(struct file *file, struct address_space *mapping,
21335- loff_t pos, unsigned len, unsigned copied,
21336- struct page *page, void *fsdata)
21337-{
21338- struct inode *inode = mapping->host;
21339- /* According to comment in simple_write_end i_mutex is held */
21340- loff_t i_size = inode->i_size;
21341- int ret;
21342-
21343- ret = simple_write_end(file, mapping,pos, len, copied, page, fsdata);
21344- if (i_size != inode->i_size)
21345- mark_inode_dirty(inode);
21346- return ret;
21347-}
21348-
21349 const struct address_space_operations exofs_aops = {
21350 .readpage = exofs_readpage,
21351 .readpages = exofs_readpages,
21352 .writepage = exofs_writepage,
21353 .writepages = exofs_writepages,
21354 .write_begin = exofs_write_begin_export,
21355- .write_end = exofs_write_end,
21356+ .write_end = simple_write_end,
21357 };
21358
21359 /******************************************************************************
21360diff --git a/fs/ext3/inode.c b/fs/ext3/inode.c
21361index f9d6937..354ed3b 100644
21362--- a/fs/ext3/inode.c
21363+++ b/fs/ext3/inode.c
21364@@ -1151,16 +1151,6 @@ static int do_journal_get_write_access(handle_t *handle,
21365 return ext3_journal_get_write_access(handle, bh);
21366 }
21367
21368-/*
21369- * Truncate blocks that were not used by write. We have to truncate the
21370- * pagecache as well so that corresponding buffers get properly unmapped.
21371- */
21372-static void ext3_truncate_failed_write(struct inode *inode)
21373-{
21374- truncate_inode_pages(inode->i_mapping, inode->i_size);
21375- ext3_truncate(inode);
21376-}
21377-
21378 static int ext3_write_begin(struct file *file, struct address_space *mapping,
21379 loff_t pos, unsigned len, unsigned flags,
21380 struct page **pagep, void **fsdata)
21381@@ -1219,7 +1209,7 @@ write_begin_failed:
21382 unlock_page(page);
21383 page_cache_release(page);
21384 if (pos + len > inode->i_size)
21385- ext3_truncate_failed_write(inode);
21386+ ext3_truncate(inode);
21387 }
21388 if (ret == -ENOSPC && ext3_should_retry_alloc(inode->i_sb, &retries))
21389 goto retry;
21390@@ -1314,7 +1304,7 @@ static int ext3_ordered_write_end(struct file *file,
21391 page_cache_release(page);
21392
21393 if (pos + len > inode->i_size)
21394- ext3_truncate_failed_write(inode);
21395+ ext3_truncate(inode);
21396 return ret ? ret : copied;
21397 }
21398
21399@@ -1340,7 +1330,7 @@ static int ext3_writeback_write_end(struct file *file,
21400 page_cache_release(page);
21401
21402 if (pos + len > inode->i_size)
21403- ext3_truncate_failed_write(inode);
21404+ ext3_truncate(inode);
21405 return ret ? ret : copied;
21406 }
21407
21408@@ -1393,7 +1383,7 @@ static int ext3_journalled_write_end(struct file *file,
21409 page_cache_release(page);
21410
21411 if (pos + len > inode->i_size)
21412- ext3_truncate_failed_write(inode);
21413+ ext3_truncate(inode);
21414 return ret ? ret : copied;
21415 }
21416
21417diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c
21418index f3032c9..1d04189 100644
21419--- a/fs/ext4/balloc.c
21420+++ b/fs/ext4/balloc.c
21421@@ -761,13 +761,7 @@ static unsigned long ext4_bg_num_gdb_meta(struct super_block *sb,
21422 static unsigned long ext4_bg_num_gdb_nometa(struct super_block *sb,
21423 ext4_group_t group)
21424 {
21425- if (!ext4_bg_has_super(sb, group))
21426- return 0;
21427-
21428- if (EXT4_HAS_INCOMPAT_FEATURE(sb,EXT4_FEATURE_INCOMPAT_META_BG))
21429- return le32_to_cpu(EXT4_SB(sb)->s_es->s_first_meta_bg);
21430- else
21431- return EXT4_SB(sb)->s_gdb_count;
21432+ return ext4_bg_has_super(sb, group) ? EXT4_SB(sb)->s_gdb_count : 0;
21433 }
21434
21435 /**
21436diff --git a/fs/ext4/block_validity.c b/fs/ext4/block_validity.c
21437index dc79b75..50784ef 100644
21438--- a/fs/ext4/block_validity.c
21439+++ b/fs/ext4/block_validity.c
21440@@ -160,7 +160,7 @@ int ext4_setup_system_zone(struct super_block *sb)
21441 if (ext4_bg_has_super(sb, i) &&
21442 ((i < 5) || ((i % flex_size) == 0)))
21443 add_system_zone(sbi, ext4_group_first_block_no(sb, i),
21444- ext4_bg_num_gdb(sb, i) + 1);
21445+ sbi->s_gdb_count + 1);
21446 gdp = ext4_get_group_desc(sb, i, NULL);
21447 ret = add_system_zone(sbi, ext4_block_bitmap(sb, gdp), 1);
21448 if (ret)
21449diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
21450index d0a2afb..8825515 100644
21451--- a/fs/ext4/ext4.h
21452+++ b/fs/ext4/ext4.h
21453@@ -698,22 +698,11 @@ struct ext4_inode_info {
21454 __u16 i_extra_isize;
21455
21456 spinlock_t i_block_reservation_lock;
21457-#ifdef CONFIG_QUOTA
21458- /* quota space reservation, managed internally by quota code */
21459- qsize_t i_reserved_quota;
21460-#endif
21461
21462 /* completed async DIOs that might need unwritten extents handling */
21463 struct list_head i_aio_dio_complete_list;
21464 /* current io_end structure for async DIO write*/
21465 ext4_io_end_t *cur_aio_dio;
21466-
21467- /*
21468- * Transactions that contain inode's metadata needed to complete
21469- * fsync and fdatasync, respectively.
21470- */
21471- tid_t i_sync_tid;
21472- tid_t i_datasync_tid;
21473 };
21474
21475 /*
21476@@ -761,7 +750,6 @@ struct ext4_inode_info {
21477 #define EXT4_MOUNT_DELALLOC 0x8000000 /* Delalloc support */
21478 #define EXT4_MOUNT_DATA_ERR_ABORT 0x10000000 /* Abort on file data write */
21479 #define EXT4_MOUNT_BLOCK_VALIDITY 0x20000000 /* Block validity checking */
21480-#define EXT4_MOUNT_DISCARD 0x40000000 /* Issue DISCARD requests */
21481
21482 #define clear_opt(o, opt) o &= ~EXT4_MOUNT_##opt
21483 #define set_opt(o, opt) o |= EXT4_MOUNT_##opt
21484@@ -1436,7 +1424,7 @@ extern int ext4_chunk_trans_blocks(struct inode *, int nrblocks);
21485 extern int ext4_block_truncate_page(handle_t *handle,
21486 struct address_space *mapping, loff_t from);
21487 extern int ext4_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf);
21488-extern qsize_t *ext4_get_reserved_space(struct inode *inode);
21489+extern qsize_t ext4_get_reserved_space(struct inode *inode);
21490 extern int flush_aio_dio_completed_IO(struct inode *inode);
21491 /* ioctl.c */
21492 extern long ext4_ioctl(struct file *, unsigned int, unsigned long);
21493diff --git a/fs/ext4/ext4_jbd2.h b/fs/ext4/ext4_jbd2.h
21494index 1892a77..a286598 100644
21495--- a/fs/ext4/ext4_jbd2.h
21496+++ b/fs/ext4/ext4_jbd2.h
21497@@ -49,7 +49,7 @@
21498
21499 #define EXT4_DATA_TRANS_BLOCKS(sb) (EXT4_SINGLEDATA_TRANS_BLOCKS(sb) + \
21500 EXT4_XATTR_TRANS_BLOCKS - 2 + \
21501- EXT4_MAXQUOTAS_TRANS_BLOCKS(sb))
21502+ 2*EXT4_QUOTA_TRANS_BLOCKS(sb))
21503
21504 /*
21505 * Define the number of metadata blocks we need to account to modify data.
21506@@ -57,7 +57,7 @@
21507 * This include super block, inode block, quota blocks and xattr blocks
21508 */
21509 #define EXT4_META_TRANS_BLOCKS(sb) (EXT4_XATTR_TRANS_BLOCKS + \
21510- EXT4_MAXQUOTAS_TRANS_BLOCKS(sb))
21511+ 2*EXT4_QUOTA_TRANS_BLOCKS(sb))
21512
21513 /* Delete operations potentially hit one directory's namespace plus an
21514 * entire inode, plus arbitrary amounts of bitmap/indirection data. Be
21515@@ -92,7 +92,6 @@
21516 * but inode, sb and group updates are done only once */
21517 #define EXT4_QUOTA_INIT_BLOCKS(sb) (test_opt(sb, QUOTA) ? (DQUOT_INIT_ALLOC*\
21518 (EXT4_SINGLEDATA_TRANS_BLOCKS(sb)-3)+3+DQUOT_INIT_REWRITE) : 0)
21519-
21520 #define EXT4_QUOTA_DEL_BLOCKS(sb) (test_opt(sb, QUOTA) ? (DQUOT_DEL_ALLOC*\
21521 (EXT4_SINGLEDATA_TRANS_BLOCKS(sb)-3)+3+DQUOT_DEL_REWRITE) : 0)
21522 #else
21523@@ -100,9 +99,6 @@
21524 #define EXT4_QUOTA_INIT_BLOCKS(sb) 0
21525 #define EXT4_QUOTA_DEL_BLOCKS(sb) 0
21526 #endif
21527-#define EXT4_MAXQUOTAS_TRANS_BLOCKS(sb) (MAXQUOTAS*EXT4_QUOTA_TRANS_BLOCKS(sb))
21528-#define EXT4_MAXQUOTAS_INIT_BLOCKS(sb) (MAXQUOTAS*EXT4_QUOTA_INIT_BLOCKS(sb))
21529-#define EXT4_MAXQUOTAS_DEL_BLOCKS(sb) (MAXQUOTAS*EXT4_QUOTA_DEL_BLOCKS(sb))
21530
21531 int
21532 ext4_mark_iloc_dirty(handle_t *handle,
21533@@ -258,19 +254,6 @@ static inline int ext4_jbd2_file_inode(handle_t *handle, struct inode *inode)
21534 return 0;
21535 }
21536
21537-static inline void ext4_update_inode_fsync_trans(handle_t *handle,
21538- struct inode *inode,
21539- int datasync)
21540-{
21541- struct ext4_inode_info *ei = EXT4_I(inode);
21542-
21543- if (ext4_handle_valid(handle)) {
21544- ei->i_sync_tid = handle->h_transaction->t_tid;
21545- if (datasync)
21546- ei->i_datasync_tid = handle->h_transaction->t_tid;
21547- }
21548-}
21549-
21550 /* super.c */
21551 int ext4_force_commit(struct super_block *sb);
21552
21553diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
21554index 8b8bae4..715264b 100644
21555--- a/fs/ext4/extents.c
21556+++ b/fs/ext4/extents.c
21557@@ -1761,9 +1761,7 @@ int ext4_ext_walk_space(struct inode *inode, ext4_lblk_t block,
21558 while (block < last && block != EXT_MAX_BLOCK) {
21559 num = last - block;
21560 /* find extent for this block */
21561- down_read(&EXT4_I(inode)->i_data_sem);
21562 path = ext4_ext_find_extent(inode, block, path);
21563- up_read(&EXT4_I(inode)->i_data_sem);
21564 if (IS_ERR(path)) {
21565 err = PTR_ERR(path);
21566 path = NULL;
21567@@ -2076,7 +2074,7 @@ static int ext4_remove_blocks(handle_t *handle, struct inode *inode,
21568 ext_debug("free last %u blocks starting %llu\n", num, start);
21569 for (i = 0; i < num; i++) {
21570 bh = sb_find_get_block(inode->i_sb, start + i);
21571- ext4_forget(handle, metadata, inode, bh, start + i);
21572+ ext4_forget(handle, 0, inode, bh, start + i);
21573 }
21574 ext4_free_blocks(handle, inode, start, num, metadata);
21575 } else if (from == le32_to_cpu(ex->ee_block)
21576@@ -2169,7 +2167,7 @@ ext4_ext_rm_leaf(handle_t *handle, struct inode *inode,
21577 correct_index = 1;
21578 credits += (ext_depth(inode)) + 1;
21579 }
21580- credits += EXT4_MAXQUOTAS_TRANS_BLOCKS(inode->i_sb);
21581+ credits += 2 * EXT4_QUOTA_TRANS_BLOCKS(inode->i_sb);
21582
21583 err = ext4_ext_truncate_extend_restart(handle, inode, credits);
21584 if (err)
21585@@ -3066,8 +3064,6 @@ ext4_ext_handle_uninitialized_extents(handle_t *handle, struct inode *inode,
21586 if (flags == EXT4_GET_BLOCKS_DIO_CONVERT_EXT) {
21587 ret = ext4_convert_unwritten_extents_dio(handle, inode,
21588 path);
21589- if (ret >= 0)
21590- ext4_update_inode_fsync_trans(handle, inode, 1);
21591 goto out2;
21592 }
21593 /* buffered IO case */
21594@@ -3095,8 +3091,6 @@ ext4_ext_handle_uninitialized_extents(handle_t *handle, struct inode *inode,
21595 ret = ext4_ext_convert_to_initialized(handle, inode,
21596 path, iblock,
21597 max_blocks);
21598- if (ret >= 0)
21599- ext4_update_inode_fsync_trans(handle, inode, 1);
21600 out:
21601 if (ret <= 0) {
21602 err = ret;
21603@@ -3335,16 +3329,10 @@ int ext4_ext_get_blocks(handle_t *handle, struct inode *inode,
21604 allocated = ext4_ext_get_actual_len(&newex);
21605 set_buffer_new(bh_result);
21606
21607- /*
21608- * Cache the extent and update transaction to commit on fdatasync only
21609- * when it is _not_ an uninitialized extent.
21610- */
21611- if ((flags & EXT4_GET_BLOCKS_UNINIT_EXT) == 0) {
21612+ /* Cache only when it is _not_ an uninitialized extent */
21613+ if ((flags & EXT4_GET_BLOCKS_UNINIT_EXT) == 0)
21614 ext4_ext_put_in_cache(inode, iblock, allocated, newblock,
21615 EXT4_EXT_CACHE_EXTENT);
21616- ext4_update_inode_fsync_trans(handle, inode, 1);
21617- } else
21618- ext4_update_inode_fsync_trans(handle, inode, 0);
21619 out:
21620 if (allocated > max_blocks)
21621 allocated = max_blocks;
21622@@ -3732,8 +3720,10 @@ int ext4_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
21623 * Walk the extent tree gathering extent information.
21624 * ext4_ext_fiemap_cb will push extents back to user.
21625 */
21626+ down_read(&EXT4_I(inode)->i_data_sem);
21627 error = ext4_ext_walk_space(inode, start_blk, len_blks,
21628 ext4_ext_fiemap_cb, fieinfo);
21629+ up_read(&EXT4_I(inode)->i_data_sem);
21630 }
21631
21632 return error;
21633diff --git a/fs/ext4/fsync.c b/fs/ext4/fsync.c
21634index d6049e4..2b15312 100644
21635--- a/fs/ext4/fsync.c
21636+++ b/fs/ext4/fsync.c
21637@@ -51,30 +51,25 @@
21638 int ext4_sync_file(struct file *file, struct dentry *dentry, int datasync)
21639 {
21640 struct inode *inode = dentry->d_inode;
21641- struct ext4_inode_info *ei = EXT4_I(inode);
21642 journal_t *journal = EXT4_SB(inode->i_sb)->s_journal;
21643- int ret;
21644- tid_t commit_tid;
21645+ int err, ret = 0;
21646
21647 J_ASSERT(ext4_journal_current_handle() == NULL);
21648
21649 trace_ext4_sync_file(file, dentry, datasync);
21650
21651- if (inode->i_sb->s_flags & MS_RDONLY)
21652- return 0;
21653-
21654 ret = flush_aio_dio_completed_IO(inode);
21655 if (ret < 0)
21656- return ret;
21657-
21658- if (!journal)
21659- return simple_fsync(file, dentry, datasync);
21660-
21661+ goto out;
21662 /*
21663- * data=writeback,ordered:
21664+ * data=writeback:
21665 * The caller's filemap_fdatawrite()/wait will sync the data.
21666- * Metadata is in the journal, we wait for proper transaction to
21667- * commit here.
21668+ * sync_inode() will sync the metadata
21669+ *
21670+ * data=ordered:
21671+ * The caller's filemap_fdatawrite() will write the data and
21672+ * sync_inode() will write the inode if it is dirty. Then the caller's
21673+ * filemap_fdatawait() will wait on the pages.
21674 *
21675 * data=journal:
21676 * filemap_fdatawrite won't do anything (the buffers are clean).
21677@@ -84,13 +79,32 @@ int ext4_sync_file(struct file *file, struct dentry *dentry, int datasync)
21678 * (they were dirtied by commit). But that's OK - the blocks are
21679 * safe in-journal, which is all fsync() needs to ensure.
21680 */
21681- if (ext4_should_journal_data(inode))
21682- return ext4_force_commit(inode->i_sb);
21683+ if (ext4_should_journal_data(inode)) {
21684+ ret = ext4_force_commit(inode->i_sb);
21685+ goto out;
21686+ }
21687
21688- commit_tid = datasync ? ei->i_datasync_tid : ei->i_sync_tid;
21689- if (jbd2_log_start_commit(journal, commit_tid))
21690- jbd2_log_wait_commit(journal, commit_tid);
21691- else if (journal->j_flags & JBD2_BARRIER)
21692+ if (!journal)
21693+ ret = sync_mapping_buffers(inode->i_mapping);
21694+
21695+ if (datasync && !(inode->i_state & I_DIRTY_DATASYNC))
21696+ goto out;
21697+
21698+ /*
21699+ * The VFS has written the file data. If the inode is unaltered
21700+ * then we need not start a commit.
21701+ */
21702+ if (inode->i_state & (I_DIRTY_SYNC|I_DIRTY_DATASYNC)) {
21703+ struct writeback_control wbc = {
21704+ .sync_mode = WB_SYNC_ALL,
21705+ .nr_to_write = 0, /* sys_fsync did this */
21706+ };
21707+ err = sync_inode(inode, &wbc);
21708+ if (ret == 0)
21709+ ret = err;
21710+ }
21711+out:
21712+ if (journal && (journal->j_flags & JBD2_BARRIER))
21713 blkdev_issue_flush(inode->i_sb->s_bdev, NULL);
21714 return ret;
21715 }
21716diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
21717index e233879..2c8caa5 100644
21718--- a/fs/ext4/inode.c
21719+++ b/fs/ext4/inode.c
21720@@ -1021,12 +1021,10 @@ static int ext4_ind_get_blocks(handle_t *handle, struct inode *inode,
21721 if (!err)
21722 err = ext4_splice_branch(handle, inode, iblock,
21723 partial, indirect_blks, count);
21724- if (err)
21725+ else
21726 goto cleanup;
21727
21728 set_buffer_new(bh_result);
21729-
21730- ext4_update_inode_fsync_trans(handle, inode, 1);
21731 got_it:
21732 map_bh(bh_result, inode->i_sb, le32_to_cpu(chain[depth-1].key));
21733 if (count > blocks_to_boundary)
21734@@ -1045,12 +1043,17 @@ out:
21735 return err;
21736 }
21737
21738-#ifdef CONFIG_QUOTA
21739-qsize_t *ext4_get_reserved_space(struct inode *inode)
21740+qsize_t ext4_get_reserved_space(struct inode *inode)
21741 {
21742- return &EXT4_I(inode)->i_reserved_quota;
21743+ unsigned long long total;
21744+
21745+ spin_lock(&EXT4_I(inode)->i_block_reservation_lock);
21746+ total = EXT4_I(inode)->i_reserved_data_blocks +
21747+ EXT4_I(inode)->i_reserved_meta_blocks;
21748+ spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
21749+
21750+ return total;
21751 }
21752-#endif
21753 /*
21754 * Calculate the number of metadata blocks need to reserve
21755 * to allocate @blocks for non extent file based file
21756@@ -1531,16 +1534,6 @@ static int do_journal_get_write_access(handle_t *handle,
21757 return ext4_journal_get_write_access(handle, bh);
21758 }
21759
21760-/*
21761- * Truncate blocks that were not used by write. We have to truncate the
21762- * pagecache as well so that corresponding buffers get properly unmapped.
21763- */
21764-static void ext4_truncate_failed_write(struct inode *inode)
21765-{
21766- truncate_inode_pages(inode->i_mapping, inode->i_size);
21767- ext4_truncate(inode);
21768-}
21769-
21770 static int ext4_write_begin(struct file *file, struct address_space *mapping,
21771 loff_t pos, unsigned len, unsigned flags,
21772 struct page **pagep, void **fsdata)
21773@@ -1606,7 +1599,7 @@ retry:
21774
21775 ext4_journal_stop(handle);
21776 if (pos + len > inode->i_size) {
21777- ext4_truncate_failed_write(inode);
21778+ ext4_truncate(inode);
21779 /*
21780 * If truncate failed early the inode might
21781 * still be on the orphan list; we need to
21782@@ -1716,7 +1709,7 @@ static int ext4_ordered_write_end(struct file *file,
21783 ret = ret2;
21784
21785 if (pos + len > inode->i_size) {
21786- ext4_truncate_failed_write(inode);
21787+ ext4_truncate(inode);
21788 /*
21789 * If truncate failed early the inode might still be
21790 * on the orphan list; we need to make sure the inode
21791@@ -1758,7 +1751,7 @@ static int ext4_writeback_write_end(struct file *file,
21792 ret = ret2;
21793
21794 if (pos + len > inode->i_size) {
21795- ext4_truncate_failed_write(inode);
21796+ ext4_truncate(inode);
21797 /*
21798 * If truncate failed early the inode might still be
21799 * on the orphan list; we need to make sure the inode
21800@@ -1821,7 +1814,7 @@ static int ext4_journalled_write_end(struct file *file,
21801 if (!ret)
21802 ret = ret2;
21803 if (pos + len > inode->i_size) {
21804- ext4_truncate_failed_write(inode);
21805+ ext4_truncate(inode);
21806 /*
21807 * If truncate failed early the inode might still be
21808 * on the orphan list; we need to make sure the inode
21809@@ -1853,17 +1846,19 @@ repeat:
21810
21811 md_needed = mdblocks - EXT4_I(inode)->i_reserved_meta_blocks;
21812 total = md_needed + nrblocks;
21813- spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
21814
21815 /*
21816 * Make quota reservation here to prevent quota overflow
21817 * later. Real quota accounting is done at pages writeout
21818 * time.
21819 */
21820- if (vfs_dq_reserve_block(inode, total))
21821+ if (vfs_dq_reserve_block(inode, total)) {
21822+ spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
21823 return -EDQUOT;
21824+ }
21825
21826 if (ext4_claim_free_blocks(sbi, total)) {
21827+ spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
21828 vfs_dq_release_reservation_block(inode, total);
21829 if (ext4_should_retry_alloc(inode->i_sb, &retries)) {
21830 yield();
21831@@ -1871,11 +1866,10 @@ repeat:
21832 }
21833 return -ENOSPC;
21834 }
21835- spin_lock(&EXT4_I(inode)->i_block_reservation_lock);
21836 EXT4_I(inode)->i_reserved_data_blocks += nrblocks;
21837- EXT4_I(inode)->i_reserved_meta_blocks += md_needed;
21838- spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
21839+ EXT4_I(inode)->i_reserved_meta_blocks = mdblocks;
21840
21841+ spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
21842 return 0; /* success */
21843 }
21844
21845@@ -2794,7 +2788,7 @@ static int ext4_da_writepages_trans_blocks(struct inode *inode)
21846 * number of contiguous block. So we will limit
21847 * number of contiguous block to a sane value
21848 */
21849- if (!(EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL) &&
21850+ if (!(inode->i_flags & EXT4_EXTENTS_FL) &&
21851 (max_blocks > EXT4_MAX_TRANS_DATA))
21852 max_blocks = EXT4_MAX_TRANS_DATA;
21853
21854@@ -3097,7 +3091,7 @@ retry:
21855 * i_size_read because we hold i_mutex.
21856 */
21857 if (pos + len > inode->i_size)
21858- ext4_truncate_failed_write(inode);
21859+ ext4_truncate(inode);
21860 }
21861
21862 if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries))
21863@@ -4126,8 +4120,6 @@ static void ext4_clear_blocks(handle_t *handle, struct inode *inode,
21864 __le32 *last)
21865 {
21866 __le32 *p;
21867- int is_metadata = S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode);
21868-
21869 if (try_to_extend_transaction(handle, inode)) {
21870 if (bh) {
21871 BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata");
21872@@ -4158,11 +4150,11 @@ static void ext4_clear_blocks(handle_t *handle, struct inode *inode,
21873
21874 *p = 0;
21875 tbh = sb_find_get_block(inode->i_sb, nr);
21876- ext4_forget(handle, is_metadata, inode, tbh, nr);
21877+ ext4_forget(handle, 0, inode, tbh, nr);
21878 }
21879 }
21880
21881- ext4_free_blocks(handle, inode, block_to_free, count, is_metadata);
21882+ ext4_free_blocks(handle, inode, block_to_free, count, 0);
21883 }
21884
21885 /**
21886@@ -4789,8 +4781,8 @@ struct inode *ext4_iget(struct super_block *sb, unsigned long ino)
21887 struct ext4_iloc iloc;
21888 struct ext4_inode *raw_inode;
21889 struct ext4_inode_info *ei;
21890+ struct buffer_head *bh;
21891 struct inode *inode;
21892- journal_t *journal = EXT4_SB(sb)->s_journal;
21893 long ret;
21894 int block;
21895
21896@@ -4801,11 +4793,11 @@ struct inode *ext4_iget(struct super_block *sb, unsigned long ino)
21897 return inode;
21898
21899 ei = EXT4_I(inode);
21900- iloc.bh = 0;
21901
21902 ret = __ext4_get_inode_loc(inode, &iloc, 0);
21903 if (ret < 0)
21904 goto bad_inode;
21905+ bh = iloc.bh;
21906 raw_inode = ext4_raw_inode(&iloc);
21907 inode->i_mode = le16_to_cpu(raw_inode->i_mode);
21908 inode->i_uid = (uid_t)le16_to_cpu(raw_inode->i_uid_low);
21909@@ -4828,6 +4820,7 @@ struct inode *ext4_iget(struct super_block *sb, unsigned long ino)
21910 if (inode->i_mode == 0 ||
21911 !(EXT4_SB(inode->i_sb)->s_mount_state & EXT4_ORPHAN_FS)) {
21912 /* this inode is deleted */
21913+ brelse(bh);
21914 ret = -ESTALE;
21915 goto bad_inode;
21916 }
21917@@ -4844,9 +4837,6 @@ struct inode *ext4_iget(struct super_block *sb, unsigned long ino)
21918 ((__u64)le16_to_cpu(raw_inode->i_file_acl_high)) << 32;
21919 inode->i_size = ext4_isize(raw_inode);
21920 ei->i_disksize = inode->i_size;
21921-#ifdef CONFIG_QUOTA
21922- ei->i_reserved_quota = 0;
21923-#endif
21924 inode->i_generation = le32_to_cpu(raw_inode->i_generation);
21925 ei->i_block_group = iloc.block_group;
21926 ei->i_last_alloc_group = ~0;
21927@@ -4858,35 +4848,11 @@ struct inode *ext4_iget(struct super_block *sb, unsigned long ino)
21928 ei->i_data[block] = raw_inode->i_block[block];
21929 INIT_LIST_HEAD(&ei->i_orphan);
21930
21931- /*
21932- * Set transaction id's of transactions that have to be committed
21933- * to finish f[data]sync. We set them to currently running transaction
21934- * as we cannot be sure that the inode or some of its metadata isn't
21935- * part of the transaction - the inode could have been reclaimed and
21936- * now it is reread from disk.
21937- */
21938- if (journal) {
21939- transaction_t *transaction;
21940- tid_t tid;
21941-
21942- spin_lock(&journal->j_state_lock);
21943- if (journal->j_running_transaction)
21944- transaction = journal->j_running_transaction;
21945- else
21946- transaction = journal->j_committing_transaction;
21947- if (transaction)
21948- tid = transaction->t_tid;
21949- else
21950- tid = journal->j_commit_sequence;
21951- spin_unlock(&journal->j_state_lock);
21952- ei->i_sync_tid = tid;
21953- ei->i_datasync_tid = tid;
21954- }
21955-
21956 if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) {
21957 ei->i_extra_isize = le16_to_cpu(raw_inode->i_extra_isize);
21958 if (EXT4_GOOD_OLD_INODE_SIZE + ei->i_extra_isize >
21959 EXT4_INODE_SIZE(inode->i_sb)) {
21960+ brelse(bh);
21961 ret = -EIO;
21962 goto bad_inode;
21963 }
21964@@ -4918,7 +4884,10 @@ struct inode *ext4_iget(struct super_block *sb, unsigned long ino)
21965
21966 ret = 0;
21967 if (ei->i_file_acl &&
21968- !ext4_data_block_valid(EXT4_SB(sb), ei->i_file_acl, 1)) {
21969+ ((ei->i_file_acl <
21970+ (le32_to_cpu(EXT4_SB(sb)->s_es->s_first_data_block) +
21971+ EXT4_SB(sb)->s_gdb_count)) ||
21972+ (ei->i_file_acl >= ext4_blocks_count(EXT4_SB(sb)->s_es)))) {
21973 ext4_error(sb, __func__,
21974 "bad extended attribute block %llu in inode #%lu",
21975 ei->i_file_acl, inode->i_ino);
21976@@ -4936,8 +4905,10 @@ struct inode *ext4_iget(struct super_block *sb, unsigned long ino)
21977 /* Validate block references which are part of inode */
21978 ret = ext4_check_inode_blockref(inode);
21979 }
21980- if (ret)
21981+ if (ret) {
21982+ brelse(bh);
21983 goto bad_inode;
21984+ }
21985
21986 if (S_ISREG(inode->i_mode)) {
21987 inode->i_op = &ext4_file_inode_operations;
21988@@ -4965,6 +4936,7 @@ struct inode *ext4_iget(struct super_block *sb, unsigned long ino)
21989 init_special_inode(inode, inode->i_mode,
21990 new_decode_dev(le32_to_cpu(raw_inode->i_block[1])));
21991 } else {
21992+ brelse(bh);
21993 ret = -EIO;
21994 ext4_error(inode->i_sb, __func__,
21995 "bogus i_mode (%o) for inode=%lu",
21996@@ -4977,7 +4949,6 @@ struct inode *ext4_iget(struct super_block *sb, unsigned long ino)
21997 return inode;
21998
21999 bad_inode:
22000- brelse(iloc.bh);
22001 iget_failed(inode);
22002 return ERR_PTR(ret);
22003 }
22004@@ -5137,7 +5108,6 @@ static int ext4_do_update_inode(handle_t *handle,
22005 err = rc;
22006 ei->i_state &= ~EXT4_STATE_NEW;
22007
22008- ext4_update_inode_fsync_trans(handle, inode, 0);
22009 out_brelse:
22010 brelse(bh);
22011 ext4_std_error(inode->i_sb, err);
22012@@ -5257,8 +5227,8 @@ int ext4_setattr(struct dentry *dentry, struct iattr *attr)
22013
22014 /* (user+group)*(old+new) structure, inode write (sb,
22015 * inode block, ? - but truncate inode update has it) */
22016- handle = ext4_journal_start(inode, (EXT4_MAXQUOTAS_INIT_BLOCKS(inode->i_sb)+
22017- EXT4_MAXQUOTAS_DEL_BLOCKS(inode->i_sb))+3);
22018+ handle = ext4_journal_start(inode, 2*(EXT4_QUOTA_INIT_BLOCKS(inode->i_sb)+
22019+ EXT4_QUOTA_DEL_BLOCKS(inode->i_sb))+3);
22020 if (IS_ERR(handle)) {
22021 error = PTR_ERR(handle);
22022 goto err_out;
22023diff --git a/fs/ext4/ioctl.c b/fs/ext4/ioctl.c
22024index b63d193..c1cdf61 100644
22025--- a/fs/ext4/ioctl.c
22026+++ b/fs/ext4/ioctl.c
22027@@ -221,38 +221,31 @@ setversion_out:
22028 struct file *donor_filp;
22029 int err;
22030
22031- if (!(filp->f_mode & FMODE_READ) ||
22032- !(filp->f_mode & FMODE_WRITE))
22033- return -EBADF;
22034-
22035 if (copy_from_user(&me,
22036 (struct move_extent __user *)arg, sizeof(me)))
22037 return -EFAULT;
22038- me.moved_len = 0;
22039
22040 donor_filp = fget(me.donor_fd);
22041 if (!donor_filp)
22042 return -EBADF;
22043
22044- if (!(donor_filp->f_mode & FMODE_WRITE)) {
22045- err = -EBADF;
22046- goto mext_out;
22047+ if (!capable(CAP_DAC_OVERRIDE)) {
22048+ if ((current->real_cred->fsuid != inode->i_uid) ||
22049+ !(inode->i_mode & S_IRUSR) ||
22050+ !(donor_filp->f_dentry->d_inode->i_mode &
22051+ S_IRUSR)) {
22052+ fput(donor_filp);
22053+ return -EACCES;
22054+ }
22055 }
22056
22057- err = mnt_want_write(filp->f_path.mnt);
22058- if (err)
22059- goto mext_out;
22060-
22061 err = ext4_move_extents(filp, donor_filp, me.orig_start,
22062 me.donor_start, me.len, &me.moved_len);
22063- mnt_drop_write(filp->f_path.mnt);
22064- if (me.moved_len > 0)
22065- file_remove_suid(donor_filp);
22066+ fput(donor_filp);
22067
22068 if (copy_to_user((struct move_extent *)arg, &me, sizeof(me)))
22069- err = -EFAULT;
22070-mext_out:
22071- fput(donor_filp);
22072+ return -EFAULT;
22073+
22074 return err;
22075 }
22076
22077diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
22078index 7d71148..bba1282 100644
22079--- a/fs/ext4/mballoc.c
22080+++ b/fs/ext4/mballoc.c
22081@@ -2529,6 +2529,7 @@ static void release_blocks_on_commit(journal_t *journal, transaction_t *txn)
22082 struct ext4_group_info *db;
22083 int err, count = 0, count2 = 0;
22084 struct ext4_free_data *entry;
22085+ ext4_fsblk_t discard_block;
22086 struct list_head *l, *ltmp;
22087
22088 list_for_each_safe(l, ltmp, &txn->t_private_list) {
22089@@ -2558,19 +2559,13 @@ static void release_blocks_on_commit(journal_t *journal, transaction_t *txn)
22090 page_cache_release(e4b.bd_bitmap_page);
22091 }
22092 ext4_unlock_group(sb, entry->group);
22093- if (test_opt(sb, DISCARD)) {
22094- ext4_fsblk_t discard_block;
22095- struct ext4_super_block *es = EXT4_SB(sb)->s_es;
22096-
22097- discard_block = (ext4_fsblk_t)entry->group *
22098- EXT4_BLOCKS_PER_GROUP(sb)
22099- + entry->start_blk
22100- + le32_to_cpu(es->s_first_data_block);
22101- trace_ext4_discard_blocks(sb,
22102- (unsigned long long)discard_block,
22103- entry->count);
22104- sb_issue_discard(sb, discard_block, entry->count);
22105- }
22106+ discard_block = (ext4_fsblk_t) entry->group * EXT4_BLOCKS_PER_GROUP(sb)
22107+ + entry->start_blk
22108+ + le32_to_cpu(EXT4_SB(sb)->s_es->s_first_data_block);
22109+ trace_ext4_discard_blocks(sb, (unsigned long long)discard_block,
22110+ entry->count);
22111+ sb_issue_discard(sb, discard_block, entry->count);
22112+
22113 kmem_cache_free(ext4_free_ext_cachep, entry);
22114 ext4_mb_release_desc(&e4b);
22115 }
22116@@ -3011,24 +3006,6 @@ static void ext4_mb_collect_stats(struct ext4_allocation_context *ac)
22117 }
22118
22119 /*
22120- * Called on failure; free up any blocks from the inode PA for this
22121- * context. We don't need this for MB_GROUP_PA because we only change
22122- * pa_free in ext4_mb_release_context(), but on failure, we've already
22123- * zeroed out ac->ac_b_ex.fe_len, so group_pa->pa_free is not changed.
22124- */
22125-static void ext4_discard_allocated_blocks(struct ext4_allocation_context *ac)
22126-{
22127- struct ext4_prealloc_space *pa = ac->ac_pa;
22128- int len;
22129-
22130- if (pa && pa->pa_type == MB_INODE_PA) {
22131- len = ac->ac_b_ex.fe_len;
22132- pa->pa_free += len;
22133- }
22134-
22135-}
22136-
22137-/*
22138 * use blocks preallocated to inode
22139 */
22140 static void ext4_mb_use_inode_pa(struct ext4_allocation_context *ac,
22141@@ -4313,7 +4290,6 @@ repeat:
22142 ac->ac_status = AC_STATUS_CONTINUE;
22143 goto repeat;
22144 } else if (*errp) {
22145- ext4_discard_allocated_blocks(ac);
22146 ac->ac_b_ex.fe_len = 0;
22147 ar->len = 0;
22148 ext4_mb_show_ac(ac);
22149diff --git a/fs/ext4/migrate.c b/fs/ext4/migrate.c
22150index 8646149..a93d5b8 100644
22151--- a/fs/ext4/migrate.c
22152+++ b/fs/ext4/migrate.c
22153@@ -238,7 +238,7 @@ static int extend_credit_for_blkdel(handle_t *handle, struct inode *inode)
22154 * So allocate a credit of 3. We may update
22155 * quota (user and group).
22156 */
22157- needed = 3 + EXT4_MAXQUOTAS_TRANS_BLOCKS(inode->i_sb);
22158+ needed = 3 + 2*EXT4_QUOTA_TRANS_BLOCKS(inode->i_sb);
22159
22160 if (ext4_journal_extend(handle, needed) != 0)
22161 retval = ext4_journal_restart(handle, needed);
22162@@ -477,7 +477,7 @@ int ext4_ext_migrate(struct inode *inode)
22163 handle = ext4_journal_start(inode,
22164 EXT4_DATA_TRANS_BLOCKS(inode->i_sb) +
22165 EXT4_INDEX_EXTRA_TRANS_BLOCKS + 3 +
22166- EXT4_MAXQUOTAS_INIT_BLOCKS(inode->i_sb)
22167+ 2 * EXT4_QUOTA_INIT_BLOCKS(inode->i_sb)
22168 + 1);
22169 if (IS_ERR(handle)) {
22170 retval = PTR_ERR(handle);
22171diff --git a/fs/ext4/move_extent.c b/fs/ext4/move_extent.c
22172index f5b03a1..25b6b14 100644
22173--- a/fs/ext4/move_extent.c
22174+++ b/fs/ext4/move_extent.c
22175@@ -77,14 +77,12 @@ static int
22176 mext_next_extent(struct inode *inode, struct ext4_ext_path *path,
22177 struct ext4_extent **extent)
22178 {
22179- struct ext4_extent_header *eh;
22180 int ppos, leaf_ppos = path->p_depth;
22181
22182 ppos = leaf_ppos;
22183 if (EXT_LAST_EXTENT(path[ppos].p_hdr) > path[ppos].p_ext) {
22184 /* leaf block */
22185 *extent = ++path[ppos].p_ext;
22186- path[ppos].p_block = ext_pblock(path[ppos].p_ext);
22187 return 0;
22188 }
22189
22190@@ -121,18 +119,9 @@ mext_next_extent(struct inode *inode, struct ext4_ext_path *path,
22191 ext_block_hdr(path[cur_ppos+1].p_bh);
22192 }
22193
22194- path[leaf_ppos].p_ext = *extent = NULL;
22195-
22196- eh = path[leaf_ppos].p_hdr;
22197- if (le16_to_cpu(eh->eh_entries) == 0)
22198- /* empty leaf is found */
22199- return -ENODATA;
22200-
22201 /* leaf block */
22202 path[leaf_ppos].p_ext = *extent =
22203 EXT_FIRST_EXTENT(path[leaf_ppos].p_hdr);
22204- path[leaf_ppos].p_block =
22205- ext_pblock(path[leaf_ppos].p_ext);
22206 return 0;
22207 }
22208 }
22209@@ -166,15 +155,40 @@ mext_check_null_inode(struct inode *inode1, struct inode *inode2,
22210 }
22211
22212 /**
22213- * double_down_write_data_sem - Acquire two inodes' write lock of i_data_sem
22214+ * mext_double_down_read - Acquire two inodes' read semaphore
22215+ *
22216+ * @orig_inode: original inode structure
22217+ * @donor_inode: donor inode structure
22218+ * Acquire read semaphore of the two inodes (orig and donor) by i_ino order.
22219+ */
22220+static void
22221+mext_double_down_read(struct inode *orig_inode, struct inode *donor_inode)
22222+{
22223+ struct inode *first = orig_inode, *second = donor_inode;
22224+
22225+ /*
22226+ * Use the inode number to provide the stable locking order instead
22227+ * of its address, because the C language doesn't guarantee you can
22228+ * compare pointers that don't come from the same array.
22229+ */
22230+ if (donor_inode->i_ino < orig_inode->i_ino) {
22231+ first = donor_inode;
22232+ second = orig_inode;
22233+ }
22234+
22235+ down_read(&EXT4_I(first)->i_data_sem);
22236+ down_read(&EXT4_I(second)->i_data_sem);
22237+}
22238+
22239+/**
22240+ * mext_double_down_write - Acquire two inodes' write semaphore
22241 *
22242 * @orig_inode: original inode structure
22243 * @donor_inode: donor inode structure
22244- * Acquire write lock of i_data_sem of the two inodes (orig and donor) by
22245- * i_ino order.
22246+ * Acquire write semaphore of the two inodes (orig and donor) by i_ino order.
22247 */
22248 static void
22249-double_down_write_data_sem(struct inode *orig_inode, struct inode *donor_inode)
22250+mext_double_down_write(struct inode *orig_inode, struct inode *donor_inode)
22251 {
22252 struct inode *first = orig_inode, *second = donor_inode;
22253
22254@@ -189,18 +203,32 @@ double_down_write_data_sem(struct inode *orig_inode, struct inode *donor_inode)
22255 }
22256
22257 down_write(&EXT4_I(first)->i_data_sem);
22258- down_write_nested(&EXT4_I(second)->i_data_sem, SINGLE_DEPTH_NESTING);
22259+ down_write(&EXT4_I(second)->i_data_sem);
22260 }
22261
22262 /**
22263- * double_up_write_data_sem - Release two inodes' write lock of i_data_sem
22264+ * mext_double_up_read - Release two inodes' read semaphore
22265 *
22266 * @orig_inode: original inode structure to be released its lock first
22267 * @donor_inode: donor inode structure to be released its lock second
22268- * Release write lock of i_data_sem of two inodes (orig and donor).
22269+ * Release read semaphore of two inodes (orig and donor).
22270 */
22271 static void
22272-double_up_write_data_sem(struct inode *orig_inode, struct inode *donor_inode)
22273+mext_double_up_read(struct inode *orig_inode, struct inode *donor_inode)
22274+{
22275+ up_read(&EXT4_I(orig_inode)->i_data_sem);
22276+ up_read(&EXT4_I(donor_inode)->i_data_sem);
22277+}
22278+
22279+/**
22280+ * mext_double_up_write - Release two inodes' write semaphore
22281+ *
22282+ * @orig_inode: original inode structure to be released its lock first
22283+ * @donor_inode: donor inode structure to be released its lock second
22284+ * Release write semaphore of two inodes (orig and donor).
22285+ */
22286+static void
22287+mext_double_up_write(struct inode *orig_inode, struct inode *donor_inode)
22288 {
22289 up_write(&EXT4_I(orig_inode)->i_data_sem);
22290 up_write(&EXT4_I(donor_inode)->i_data_sem);
22291@@ -633,7 +661,6 @@ mext_calc_swap_extents(struct ext4_extent *tmp_dext,
22292 * @donor_inode: donor inode
22293 * @from: block offset of orig_inode
22294 * @count: block count to be replaced
22295- * @err: pointer to save return value
22296 *
22297 * Replace original inode extents and donor inode extents page by page.
22298 * We implement this replacement in the following three steps:
22299@@ -644,33 +671,33 @@ mext_calc_swap_extents(struct ext4_extent *tmp_dext,
22300 * 3. Change the block information of donor inode to point at the saved
22301 * original inode blocks in the dummy extents.
22302 *
22303- * Return replaced block count.
22304+ * Return 0 on success, or a negative error value on failure.
22305 */
22306 static int
22307 mext_replace_branches(handle_t *handle, struct inode *orig_inode,
22308 struct inode *donor_inode, ext4_lblk_t from,
22309- ext4_lblk_t count, int *err)
22310+ ext4_lblk_t count)
22311 {
22312 struct ext4_ext_path *orig_path = NULL;
22313 struct ext4_ext_path *donor_path = NULL;
22314 struct ext4_extent *oext, *dext;
22315 struct ext4_extent tmp_dext, tmp_oext;
22316 ext4_lblk_t orig_off = from, donor_off = from;
22317+ int err = 0;
22318 int depth;
22319 int replaced_count = 0;
22320 int dext_alen;
22321
22322- /* Protect extent trees against block allocations via delalloc */
22323- double_down_write_data_sem(orig_inode, donor_inode);
22324+ mext_double_down_write(orig_inode, donor_inode);
22325
22326 /* Get the original extent for the block "orig_off" */
22327- *err = get_ext_path(orig_inode, orig_off, &orig_path);
22328- if (*err)
22329+ err = get_ext_path(orig_inode, orig_off, &orig_path);
22330+ if (err)
22331 goto out;
22332
22333 /* Get the donor extent for the head */
22334- *err = get_ext_path(donor_inode, donor_off, &donor_path);
22335- if (*err)
22336+ err = get_ext_path(donor_inode, donor_off, &donor_path);
22337+ if (err)
22338 goto out;
22339 depth = ext_depth(orig_inode);
22340 oext = orig_path[depth].p_ext;
22341@@ -680,9 +707,9 @@ mext_replace_branches(handle_t *handle, struct inode *orig_inode,
22342 dext = donor_path[depth].p_ext;
22343 tmp_dext = *dext;
22344
22345- *err = mext_calc_swap_extents(&tmp_dext, &tmp_oext, orig_off,
22346+ err = mext_calc_swap_extents(&tmp_dext, &tmp_oext, orig_off,
22347 donor_off, count);
22348- if (*err)
22349+ if (err)
22350 goto out;
22351
22352 /* Loop for the donor extents */
22353@@ -691,7 +718,7 @@ mext_replace_branches(handle_t *handle, struct inode *orig_inode,
22354 if (!dext) {
22355 ext4_error(donor_inode->i_sb, __func__,
22356 "The extent for donor must be found");
22357- *err = -EIO;
22358+ err = -EIO;
22359 goto out;
22360 } else if (donor_off != le32_to_cpu(tmp_dext.ee_block)) {
22361 ext4_error(donor_inode->i_sb, __func__,
22362@@ -699,20 +726,20 @@ mext_replace_branches(handle_t *handle, struct inode *orig_inode,
22363 "extent(%u) should be equal",
22364 donor_off,
22365 le32_to_cpu(tmp_dext.ee_block));
22366- *err = -EIO;
22367+ err = -EIO;
22368 goto out;
22369 }
22370
22371 /* Set donor extent to orig extent */
22372- *err = mext_leaf_block(handle, orig_inode,
22373+ err = mext_leaf_block(handle, orig_inode,
22374 orig_path, &tmp_dext, &orig_off);
22375- if (*err)
22376+ if (err < 0)
22377 goto out;
22378
22379 /* Set orig extent to donor extent */
22380- *err = mext_leaf_block(handle, donor_inode,
22381+ err = mext_leaf_block(handle, donor_inode,
22382 donor_path, &tmp_oext, &donor_off);
22383- if (*err)
22384+ if (err < 0)
22385 goto out;
22386
22387 dext_alen = ext4_ext_get_actual_len(&tmp_dext);
22388@@ -726,25 +753,35 @@ mext_replace_branches(handle_t *handle, struct inode *orig_inode,
22389
22390 if (orig_path)
22391 ext4_ext_drop_refs(orig_path);
22392- *err = get_ext_path(orig_inode, orig_off, &orig_path);
22393- if (*err)
22394+ err = get_ext_path(orig_inode, orig_off, &orig_path);
22395+ if (err)
22396 goto out;
22397 depth = ext_depth(orig_inode);
22398 oext = orig_path[depth].p_ext;
22399+ if (le32_to_cpu(oext->ee_block) +
22400+ ext4_ext_get_actual_len(oext) <= orig_off) {
22401+ err = 0;
22402+ goto out;
22403+ }
22404 tmp_oext = *oext;
22405
22406 if (donor_path)
22407 ext4_ext_drop_refs(donor_path);
22408- *err = get_ext_path(donor_inode, donor_off, &donor_path);
22409- if (*err)
22410+ err = get_ext_path(donor_inode, donor_off, &donor_path);
22411+ if (err)
22412 goto out;
22413 depth = ext_depth(donor_inode);
22414 dext = donor_path[depth].p_ext;
22415+ if (le32_to_cpu(dext->ee_block) +
22416+ ext4_ext_get_actual_len(dext) <= donor_off) {
22417+ err = 0;
22418+ goto out;
22419+ }
22420 tmp_dext = *dext;
22421
22422- *err = mext_calc_swap_extents(&tmp_dext, &tmp_oext, orig_off,
22423+ err = mext_calc_swap_extents(&tmp_dext, &tmp_oext, orig_off,
22424 donor_off, count - replaced_count);
22425- if (*err)
22426+ if (err)
22427 goto out;
22428 }
22429
22430@@ -758,12 +795,8 @@ out:
22431 kfree(donor_path);
22432 }
22433
22434- ext4_ext_invalidate_cache(orig_inode);
22435- ext4_ext_invalidate_cache(donor_inode);
22436-
22437- double_up_write_data_sem(orig_inode, donor_inode);
22438-
22439- return replaced_count;
22440+ mext_double_up_write(orig_inode, donor_inode);
22441+ return err;
22442 }
22443
22444 /**
22445@@ -775,17 +808,16 @@ out:
22446 * @data_offset_in_page: block index where data swapping starts
22447 * @block_len_in_page: the number of blocks to be swapped
22448 * @uninit: orig extent is uninitialized or not
22449- * @err: pointer to save return value
22450 *
22451 * Save the data in original inode blocks and replace original inode extents
22452 * with donor inode extents by calling mext_replace_branches().
22453- * Finally, write out the saved data in new original inode blocks. Return
22454- * replaced block count.
22455+ * Finally, write out the saved data in new original inode blocks. Return 0
22456+ * on success, or a negative error value on failure.
22457 */
22458 static int
22459 move_extent_per_page(struct file *o_filp, struct inode *donor_inode,
22460 pgoff_t orig_page_offset, int data_offset_in_page,
22461- int block_len_in_page, int uninit, int *err)
22462+ int block_len_in_page, int uninit)
22463 {
22464 struct inode *orig_inode = o_filp->f_dentry->d_inode;
22465 struct address_space *mapping = orig_inode->i_mapping;
22466@@ -797,11 +829,9 @@ move_extent_per_page(struct file *o_filp, struct inode *donor_inode,
22467 long long offs = orig_page_offset << PAGE_CACHE_SHIFT;
22468 unsigned long blocksize = orig_inode->i_sb->s_blocksize;
22469 unsigned int w_flags = 0;
22470- unsigned int tmp_data_size, data_size, replaced_size;
22471+ unsigned int tmp_data_len, data_len;
22472 void *fsdata;
22473- int i, jblocks;
22474- int err2 = 0;
22475- int replaced_count = 0;
22476+ int ret, i, jblocks;
22477 int blocks_per_page = PAGE_CACHE_SIZE >> orig_inode->i_blkbits;
22478
22479 /*
22480@@ -811,8 +841,8 @@ move_extent_per_page(struct file *o_filp, struct inode *donor_inode,
22481 jblocks = ext4_writepage_trans_blocks(orig_inode) * 2;
22482 handle = ext4_journal_start(orig_inode, jblocks);
22483 if (IS_ERR(handle)) {
22484- *err = PTR_ERR(handle);
22485- return 0;
22486+ ret = PTR_ERR(handle);
22487+ return ret;
22488 }
22489
22490 if (segment_eq(get_fs(), KERNEL_DS))
22491@@ -828,36 +858,39 @@ move_extent_per_page(struct file *o_filp, struct inode *donor_inode,
22492 * Just swap data blocks between orig and donor.
22493 */
22494 if (uninit) {
22495- replaced_count = mext_replace_branches(handle, orig_inode,
22496- donor_inode, orig_blk_offset,
22497- block_len_in_page, err);
22498+ ret = mext_replace_branches(handle, orig_inode,
22499+ donor_inode, orig_blk_offset,
22500+ block_len_in_page);
22501+
22502+ /* Clear the inode cache not to refer to the old data */
22503+ ext4_ext_invalidate_cache(orig_inode);
22504+ ext4_ext_invalidate_cache(donor_inode);
22505 goto out2;
22506 }
22507
22508 offs = (long long)orig_blk_offset << orig_inode->i_blkbits;
22509
22510- /* Calculate data_size */
22511+ /* Calculate data_len */
22512 if ((orig_blk_offset + block_len_in_page - 1) ==
22513 ((orig_inode->i_size - 1) >> orig_inode->i_blkbits)) {
22514 /* Replace the last block */
22515- tmp_data_size = orig_inode->i_size & (blocksize - 1);
22516+ tmp_data_len = orig_inode->i_size & (blocksize - 1);
22517 /*
22518- * If data_size equal zero, it shows data_size is multiples of
22519+ * If data_len equal zero, it shows data_len is multiples of
22520 * blocksize. So we set appropriate value.
22521 */
22522- if (tmp_data_size == 0)
22523- tmp_data_size = blocksize;
22524+ if (tmp_data_len == 0)
22525+ tmp_data_len = blocksize;
22526
22527- data_size = tmp_data_size +
22528+ data_len = tmp_data_len +
22529 ((block_len_in_page - 1) << orig_inode->i_blkbits);
22530- } else
22531- data_size = block_len_in_page << orig_inode->i_blkbits;
22532-
22533- replaced_size = data_size;
22534+ } else {
22535+ data_len = block_len_in_page << orig_inode->i_blkbits;
22536+ }
22537
22538- *err = a_ops->write_begin(o_filp, mapping, offs, data_size, w_flags,
22539+ ret = a_ops->write_begin(o_filp, mapping, offs, data_len, w_flags,
22540 &page, &fsdata);
22541- if (unlikely(*err < 0))
22542+ if (unlikely(ret < 0))
22543 goto out;
22544
22545 if (!PageUptodate(page)) {
22546@@ -878,17 +911,14 @@ move_extent_per_page(struct file *o_filp, struct inode *donor_inode,
22547 /* Release old bh and drop refs */
22548 try_to_release_page(page, 0);
22549
22550- replaced_count = mext_replace_branches(handle, orig_inode, donor_inode,
22551- orig_blk_offset, block_len_in_page,
22552- &err2);
22553- if (err2) {
22554- if (replaced_count) {
22555- block_len_in_page = replaced_count;
22556- replaced_size =
22557- block_len_in_page << orig_inode->i_blkbits;
22558- } else
22559- goto out;
22560- }
22561+ ret = mext_replace_branches(handle, orig_inode, donor_inode,
22562+ orig_blk_offset, block_len_in_page);
22563+ if (ret < 0)
22564+ goto out;
22565+
22566+ /* Clear the inode cache not to refer to the old data */
22567+ ext4_ext_invalidate_cache(orig_inode);
22568+ ext4_ext_invalidate_cache(donor_inode);
22569
22570 if (!page_has_buffers(page))
22571 create_empty_buffers(page, 1 << orig_inode->i_blkbits, 0);
22572@@ -898,16 +928,16 @@ move_extent_per_page(struct file *o_filp, struct inode *donor_inode,
22573 bh = bh->b_this_page;
22574
22575 for (i = 0; i < block_len_in_page; i++) {
22576- *err = ext4_get_block(orig_inode,
22577+ ret = ext4_get_block(orig_inode,
22578 (sector_t)(orig_blk_offset + i), bh, 0);
22579- if (*err < 0)
22580+ if (ret < 0)
22581 goto out;
22582
22583 if (bh->b_this_page != NULL)
22584 bh = bh->b_this_page;
22585 }
22586
22587- *err = a_ops->write_end(o_filp, mapping, offs, data_size, replaced_size,
22588+ ret = a_ops->write_end(o_filp, mapping, offs, data_len, data_len,
22589 page, fsdata);
22590 page = NULL;
22591
22592@@ -921,10 +951,7 @@ out:
22593 out2:
22594 ext4_journal_stop(handle);
22595
22596- if (err2)
22597- *err = err2;
22598-
22599- return replaced_count;
22600+ return ret < 0 ? ret : 0;
22601 }
22602
22603 /**
22604@@ -935,6 +962,7 @@ out2:
22605 * @orig_start: logical start offset in block for orig
22606 * @donor_start: logical start offset in block for donor
22607 * @len: the number of blocks to be moved
22608+ * @moved_len: moved block length
22609 *
22610 * Check the arguments of ext4_move_extents() whether the files can be
22611 * exchanged with each other.
22612@@ -942,8 +970,8 @@ out2:
22613 */
22614 static int
22615 mext_check_arguments(struct inode *orig_inode,
22616- struct inode *donor_inode, __u64 orig_start,
22617- __u64 donor_start, __u64 *len)
22618+ struct inode *donor_inode, __u64 orig_start,
22619+ __u64 donor_start, __u64 *len, __u64 moved_len)
22620 {
22621 ext4_lblk_t orig_blocks, donor_blocks;
22622 unsigned int blkbits = orig_inode->i_blkbits;
22623@@ -957,13 +985,6 @@ mext_check_arguments(struct inode *orig_inode,
22624 return -EINVAL;
22625 }
22626
22627- if (donor_inode->i_mode & (S_ISUID|S_ISGID)) {
22628- ext4_debug("ext4 move extent: suid or sgid is set"
22629- " to donor file [ino:orig %lu, donor %lu]\n",
22630- orig_inode->i_ino, donor_inode->i_ino);
22631- return -EINVAL;
22632- }
22633-
22634 /* Ext4 move extent does not support swapfile */
22635 if (IS_SWAPFILE(orig_inode) || IS_SWAPFILE(donor_inode)) {
22636 ext4_debug("ext4 move extent: The argument files should "
22637@@ -1004,6 +1025,13 @@ mext_check_arguments(struct inode *orig_inode,
22638 return -EINVAL;
22639 }
22640
22641+ if (moved_len) {
22642+ ext4_debug("ext4 move extent: moved_len should be 0 "
22643+ "[ino:orig %lu, donor %lu]\n", orig_inode->i_ino,
22644+ donor_inode->i_ino);
22645+ return -EINVAL;
22646+ }
22647+
22648 if ((orig_start > EXT_MAX_BLOCK) ||
22649 (donor_start > EXT_MAX_BLOCK) ||
22650 (*len > EXT_MAX_BLOCK) ||
22651@@ -1204,16 +1232,16 @@ ext4_move_extents(struct file *o_filp, struct file *d_filp,
22652 return -EINVAL;
22653 }
22654
22655- /* Protect orig and donor inodes against a truncate */
22656+ /* protect orig and donor against a truncate */
22657 ret1 = mext_inode_double_lock(orig_inode, donor_inode);
22658 if (ret1 < 0)
22659 return ret1;
22660
22661- /* Protect extent tree against block allocations via delalloc */
22662- double_down_write_data_sem(orig_inode, donor_inode);
22663+ mext_double_down_read(orig_inode, donor_inode);
22664 /* Check the filesystem environment whether move_extent can be done */
22665 ret1 = mext_check_arguments(orig_inode, donor_inode, orig_start,
22666- donor_start, &len);
22667+ donor_start, &len, *moved_len);
22668+ mext_double_up_read(orig_inode, donor_inode);
22669 if (ret1)
22670 goto out;
22671
22672@@ -1327,39 +1355,36 @@ ext4_move_extents(struct file *o_filp, struct file *d_filp,
22673 seq_start = le32_to_cpu(ext_cur->ee_block);
22674 rest_blocks = seq_blocks;
22675
22676- /*
22677- * Up semaphore to avoid following problems:
22678- * a. transaction deadlock among ext4_journal_start,
22679- * ->write_begin via pagefault, and jbd2_journal_commit
22680- * b. racing with ->readpage, ->write_begin, and ext4_get_block
22681- * in move_extent_per_page
22682- */
22683- double_up_write_data_sem(orig_inode, donor_inode);
22684+ /* Discard preallocations of two inodes */
22685+ down_write(&EXT4_I(orig_inode)->i_data_sem);
22686+ ext4_discard_preallocations(orig_inode);
22687+ up_write(&EXT4_I(orig_inode)->i_data_sem);
22688+
22689+ down_write(&EXT4_I(donor_inode)->i_data_sem);
22690+ ext4_discard_preallocations(donor_inode);
22691+ up_write(&EXT4_I(donor_inode)->i_data_sem);
22692
22693 while (orig_page_offset <= seq_end_page) {
22694
22695 /* Swap original branches with new branches */
22696- block_len_in_page = move_extent_per_page(
22697- o_filp, donor_inode,
22698+ ret1 = move_extent_per_page(o_filp, donor_inode,
22699 orig_page_offset,
22700 data_offset_in_page,
22701- block_len_in_page, uninit,
22702- &ret1);
22703-
22704+ block_len_in_page, uninit);
22705+ if (ret1 < 0)
22706+ goto out;
22707+ orig_page_offset++;
22708 /* Count how many blocks we have exchanged */
22709 *moved_len += block_len_in_page;
22710- if (ret1 < 0)
22711- break;
22712 if (*moved_len > len) {
22713 ext4_error(orig_inode->i_sb, __func__,
22714 "We replaced blocks too much! "
22715 "sum of replaced: %llu requested: %llu",
22716 *moved_len, len);
22717 ret1 = -EIO;
22718- break;
22719+ goto out;
22720 }
22721
22722- orig_page_offset++;
22723 data_offset_in_page = 0;
22724 rest_blocks -= block_len_in_page;
22725 if (rest_blocks > blocks_per_page)
22726@@ -1368,10 +1393,6 @@ ext4_move_extents(struct file *o_filp, struct file *d_filp,
22727 block_len_in_page = rest_blocks;
22728 }
22729
22730- double_down_write_data_sem(orig_inode, donor_inode);
22731- if (ret1 < 0)
22732- break;
22733-
22734 /* Decrease buffer counter */
22735 if (holecheck_path)
22736 ext4_ext_drop_refs(holecheck_path);
22737@@ -1393,11 +1414,6 @@ ext4_move_extents(struct file *o_filp, struct file *d_filp,
22738
22739 }
22740 out:
22741- if (*moved_len) {
22742- ext4_discard_preallocations(orig_inode);
22743- ext4_discard_preallocations(donor_inode);
22744- }
22745-
22746 if (orig_path) {
22747 ext4_ext_drop_refs(orig_path);
22748 kfree(orig_path);
22749@@ -1406,7 +1422,7 @@ out:
22750 ext4_ext_drop_refs(holecheck_path);
22751 kfree(holecheck_path);
22752 }
22753- double_up_write_data_sem(orig_inode, donor_inode);
22754+
22755 ret2 = mext_inode_double_unlock(orig_inode, donor_inode);
22756
22757 if (ret1)
22758diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
22759index 17a17e1..6d2c1b8 100644
22760--- a/fs/ext4/namei.c
22761+++ b/fs/ext4/namei.c
22762@@ -1292,6 +1292,9 @@ errout:
22763 * add_dirent_to_buf will attempt search the directory block for
22764 * space. It will return -ENOSPC if no space is available, and -EIO
22765 * and -EEXIST if directory entry already exists.
22766+ *
22767+ * NOTE! bh is NOT released in the case where ENOSPC is returned. In
22768+ * all other cases bh is released.
22769 */
22770 static int add_dirent_to_buf(handle_t *handle, struct dentry *dentry,
22771 struct inode *inode, struct ext4_dir_entry_2 *de,
22772@@ -1312,10 +1315,14 @@ static int add_dirent_to_buf(handle_t *handle, struct dentry *dentry,
22773 top = bh->b_data + blocksize - reclen;
22774 while ((char *) de <= top) {
22775 if (!ext4_check_dir_entry("ext4_add_entry", dir, de,
22776- bh, offset))
22777+ bh, offset)) {
22778+ brelse(bh);
22779 return -EIO;
22780- if (ext4_match(namelen, name, de))
22781+ }
22782+ if (ext4_match(namelen, name, de)) {
22783+ brelse(bh);
22784 return -EEXIST;
22785+ }
22786 nlen = EXT4_DIR_REC_LEN(de->name_len);
22787 rlen = ext4_rec_len_from_disk(de->rec_len, blocksize);
22788 if ((de->inode? rlen - nlen: rlen) >= reclen)
22789@@ -1330,6 +1337,7 @@ static int add_dirent_to_buf(handle_t *handle, struct dentry *dentry,
22790 err = ext4_journal_get_write_access(handle, bh);
22791 if (err) {
22792 ext4_std_error(dir->i_sb, err);
22793+ brelse(bh);
22794 return err;
22795 }
22796
22797@@ -1369,6 +1377,7 @@ static int add_dirent_to_buf(handle_t *handle, struct dentry *dentry,
22798 err = ext4_handle_dirty_metadata(handle, dir, bh);
22799 if (err)
22800 ext4_std_error(dir->i_sb, err);
22801+ brelse(bh);
22802 return 0;
22803 }
22804
22805@@ -1462,9 +1471,7 @@ static int make_indexed_dir(handle_t *handle, struct dentry *dentry,
22806 if (!(de))
22807 return retval;
22808
22809- retval = add_dirent_to_buf(handle, dentry, inode, de, bh);
22810- brelse(bh);
22811- return retval;
22812+ return add_dirent_to_buf(handle, dentry, inode, de, bh);
22813 }
22814
22815 /*
22816@@ -1507,10 +1514,8 @@ static int ext4_add_entry(handle_t *handle, struct dentry *dentry,
22817 if(!bh)
22818 return retval;
22819 retval = add_dirent_to_buf(handle, dentry, inode, NULL, bh);
22820- if (retval != -ENOSPC) {
22821- brelse(bh);
22822+ if (retval != -ENOSPC)
22823 return retval;
22824- }
22825
22826 if (blocks == 1 && !dx_fallback &&
22827 EXT4_HAS_COMPAT_FEATURE(sb, EXT4_FEATURE_COMPAT_DIR_INDEX))
22828@@ -1523,9 +1528,7 @@ static int ext4_add_entry(handle_t *handle, struct dentry *dentry,
22829 de = (struct ext4_dir_entry_2 *) bh->b_data;
22830 de->inode = 0;
22831 de->rec_len = ext4_rec_len_to_disk(blocksize, blocksize);
22832- retval = add_dirent_to_buf(handle, dentry, inode, de, bh);
22833- brelse(bh);
22834- return retval;
22835+ return add_dirent_to_buf(handle, dentry, inode, de, bh);
22836 }
22837
22838 /*
22839@@ -1558,8 +1561,10 @@ static int ext4_dx_add_entry(handle_t *handle, struct dentry *dentry,
22840 goto journal_error;
22841
22842 err = add_dirent_to_buf(handle, dentry, inode, NULL, bh);
22843- if (err != -ENOSPC)
22844+ if (err != -ENOSPC) {
22845+ bh = NULL;
22846 goto cleanup;
22847+ }
22848
22849 /* Block full, should compress but for now just split */
22850 dxtrace(printk(KERN_DEBUG "using %u of %u node entries\n",
22851@@ -1652,6 +1657,7 @@ static int ext4_dx_add_entry(handle_t *handle, struct dentry *dentry,
22852 if (!de)
22853 goto cleanup;
22854 err = add_dirent_to_buf(handle, dentry, inode, de, bh);
22855+ bh = NULL;
22856 goto cleanup;
22857
22858 journal_error:
22859@@ -1769,7 +1775,7 @@ static int ext4_create(struct inode *dir, struct dentry *dentry, int mode,
22860 retry:
22861 handle = ext4_journal_start(dir, EXT4_DATA_TRANS_BLOCKS(dir->i_sb) +
22862 EXT4_INDEX_EXTRA_TRANS_BLOCKS + 3 +
22863- EXT4_MAXQUOTAS_INIT_BLOCKS(dir->i_sb));
22864+ 2*EXT4_QUOTA_INIT_BLOCKS(dir->i_sb));
22865 if (IS_ERR(handle))
22866 return PTR_ERR(handle);
22867
22868@@ -1803,7 +1809,7 @@ static int ext4_mknod(struct inode *dir, struct dentry *dentry,
22869 retry:
22870 handle = ext4_journal_start(dir, EXT4_DATA_TRANS_BLOCKS(dir->i_sb) +
22871 EXT4_INDEX_EXTRA_TRANS_BLOCKS + 3 +
22872- EXT4_MAXQUOTAS_INIT_BLOCKS(dir->i_sb));
22873+ 2*EXT4_QUOTA_INIT_BLOCKS(dir->i_sb));
22874 if (IS_ERR(handle))
22875 return PTR_ERR(handle);
22876
22877@@ -1840,7 +1846,7 @@ static int ext4_mkdir(struct inode *dir, struct dentry *dentry, int mode)
22878 retry:
22879 handle = ext4_journal_start(dir, EXT4_DATA_TRANS_BLOCKS(dir->i_sb) +
22880 EXT4_INDEX_EXTRA_TRANS_BLOCKS + 3 +
22881- EXT4_MAXQUOTAS_INIT_BLOCKS(dir->i_sb));
22882+ 2*EXT4_QUOTA_INIT_BLOCKS(dir->i_sb));
22883 if (IS_ERR(handle))
22884 return PTR_ERR(handle);
22885
22886@@ -2253,7 +2259,7 @@ static int ext4_symlink(struct inode *dir,
22887 retry:
22888 handle = ext4_journal_start(dir, EXT4_DATA_TRANS_BLOCKS(dir->i_sb) +
22889 EXT4_INDEX_EXTRA_TRANS_BLOCKS + 5 +
22890- EXT4_MAXQUOTAS_INIT_BLOCKS(dir->i_sb));
22891+ 2*EXT4_QUOTA_INIT_BLOCKS(dir->i_sb));
22892 if (IS_ERR(handle))
22893 return PTR_ERR(handle);
22894
22895diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c
22896index 3b2c554..3cfc343 100644
22897--- a/fs/ext4/resize.c
22898+++ b/fs/ext4/resize.c
22899@@ -247,7 +247,7 @@ static int setup_new_group_blocks(struct super_block *sb,
22900 goto exit_bh;
22901
22902 if (IS_ERR(gdb = bclean(handle, sb, block))) {
22903- err = PTR_ERR(gdb);
22904+ err = PTR_ERR(bh);
22905 goto exit_bh;
22906 }
22907 ext4_handle_dirty_metadata(handle, NULL, gdb);
22908diff --git a/fs/ext4/super.c b/fs/ext4/super.c
22909index 92943f2..d4ca92a 100644
22910--- a/fs/ext4/super.c
22911+++ b/fs/ext4/super.c
22912@@ -603,6 +603,10 @@ static void ext4_put_super(struct super_block *sb)
22913 if (sb->s_dirt)
22914 ext4_commit_super(sb, 1);
22915
22916+ ext4_release_system_zone(sb);
22917+ ext4_mb_release(sb);
22918+ ext4_ext_release(sb);
22919+ ext4_xattr_put_super(sb);
22920 if (sbi->s_journal) {
22921 err = jbd2_journal_destroy(sbi->s_journal);
22922 sbi->s_journal = NULL;
22923@@ -610,12 +614,6 @@ static void ext4_put_super(struct super_block *sb)
22924 ext4_abort(sb, __func__,
22925 "Couldn't clean up the journal");
22926 }
22927-
22928- ext4_release_system_zone(sb);
22929- ext4_mb_release(sb);
22930- ext4_ext_release(sb);
22931- ext4_xattr_put_super(sb);
22932-
22933 if (!(sb->s_flags & MS_RDONLY)) {
22934 EXT4_CLEAR_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_RECOVER);
22935 es->s_state = cpu_to_le16(sbi->s_mount_state);
22936@@ -704,13 +702,8 @@ static struct inode *ext4_alloc_inode(struct super_block *sb)
22937 ei->i_allocated_meta_blocks = 0;
22938 ei->i_delalloc_reserved_flag = 0;
22939 spin_lock_init(&(ei->i_block_reservation_lock));
22940-#ifdef CONFIG_QUOTA
22941- ei->i_reserved_quota = 0;
22942-#endif
22943 INIT_LIST_HEAD(&ei->i_aio_dio_complete_list);
22944 ei->cur_aio_dio = NULL;
22945- ei->i_sync_tid = 0;
22946- ei->i_datasync_tid = 0;
22947
22948 return &ei->vfs_inode;
22949 }
22950@@ -906,12 +899,6 @@ static int ext4_show_options(struct seq_file *seq, struct vfsmount *vfs)
22951 if (test_opt(sb, NO_AUTO_DA_ALLOC))
22952 seq_puts(seq, ",noauto_da_alloc");
22953
22954- if (test_opt(sb, DISCARD))
22955- seq_puts(seq, ",discard");
22956-
22957- if (test_opt(sb, NOLOAD))
22958- seq_puts(seq, ",norecovery");
22959-
22960 ext4_show_quota_options(seq, sb);
22961
22962 return 0;
22963@@ -1004,9 +991,7 @@ static const struct dquot_operations ext4_quota_operations = {
22964 .reserve_space = dquot_reserve_space,
22965 .claim_space = dquot_claim_space,
22966 .release_rsv = dquot_release_reserved_space,
22967-#ifdef CONFIG_QUOTA
22968 .get_reserved_space = ext4_get_reserved_space,
22969-#endif
22970 .alloc_inode = dquot_alloc_inode,
22971 .free_space = dquot_free_space,
22972 .free_inode = dquot_free_inode,
22973@@ -1094,8 +1079,7 @@ enum {
22974 Opt_usrquota, Opt_grpquota, Opt_i_version,
22975 Opt_stripe, Opt_delalloc, Opt_nodelalloc,
22976 Opt_block_validity, Opt_noblock_validity,
22977- Opt_inode_readahead_blks, Opt_journal_ioprio,
22978- Opt_discard, Opt_nodiscard,
22979+ Opt_inode_readahead_blks, Opt_journal_ioprio
22980 };
22981
22982 static const match_table_t tokens = {
22983@@ -1120,7 +1104,6 @@ static const match_table_t tokens = {
22984 {Opt_acl, "acl"},
22985 {Opt_noacl, "noacl"},
22986 {Opt_noload, "noload"},
22987- {Opt_noload, "norecovery"},
22988 {Opt_nobh, "nobh"},
22989 {Opt_bh, "bh"},
22990 {Opt_commit, "commit=%u"},
22991@@ -1161,8 +1144,6 @@ static const match_table_t tokens = {
22992 {Opt_auto_da_alloc, "auto_da_alloc=%u"},
22993 {Opt_auto_da_alloc, "auto_da_alloc"},
22994 {Opt_noauto_da_alloc, "noauto_da_alloc"},
22995- {Opt_discard, "discard"},
22996- {Opt_nodiscard, "nodiscard"},
22997 {Opt_err, NULL},
22998 };
22999
23000@@ -1584,12 +1565,6 @@ set_qf_format:
23001 else
23002 set_opt(sbi->s_mount_opt,NO_AUTO_DA_ALLOC);
23003 break;
23004- case Opt_discard:
23005- set_opt(sbi->s_mount_opt, DISCARD);
23006- break;
23007- case Opt_nodiscard:
23008- clear_opt(sbi->s_mount_opt, DISCARD);
23009- break;
23010 default:
23011 ext4_msg(sb, KERN_ERR,
23012 "Unrecognized mount option \"%s\" "
23013@@ -1698,14 +1673,14 @@ static int ext4_fill_flex_info(struct super_block *sb)
23014 size_t size;
23015 int i;
23016
23017- sbi->s_log_groups_per_flex = sbi->s_es->s_log_groups_per_flex;
23018- groups_per_flex = 1 << sbi->s_log_groups_per_flex;
23019-
23020- if (groups_per_flex < 2) {
23021+ if (!sbi->s_es->s_log_groups_per_flex) {
23022 sbi->s_log_groups_per_flex = 0;
23023 return 1;
23024 }
23025
23026+ sbi->s_log_groups_per_flex = sbi->s_es->s_log_groups_per_flex;
23027+ groups_per_flex = 1 << sbi->s_log_groups_per_flex;
23028+
23029 /* We allocate both existing and potentially added groups */
23030 flex_group_count = ((sbi->s_groups_count + groups_per_flex - 1) +
23031 ((le16_to_cpu(sbi->s_es->s_reserved_gdt_blocks) + 1) <<
23032@@ -3693,11 +3668,13 @@ static int ext4_statfs(struct dentry *dentry, struct kstatfs *buf)
23033 buf->f_blocks = ext4_blocks_count(es) - sbi->s_overhead_last;
23034 buf->f_bfree = percpu_counter_sum_positive(&sbi->s_freeblocks_counter) -
23035 percpu_counter_sum_positive(&sbi->s_dirtyblocks_counter);
23036+ ext4_free_blocks_count_set(es, buf->f_bfree);
23037 buf->f_bavail = buf->f_bfree - ext4_r_blocks_count(es);
23038 if (buf->f_bfree < ext4_r_blocks_count(es))
23039 buf->f_bavail = 0;
23040 buf->f_files = le32_to_cpu(es->s_inodes_count);
23041 buf->f_ffree = percpu_counter_sum_positive(&sbi->s_freeinodes_counter);
23042+ es->s_free_inodes_count = cpu_to_le32(buf->f_ffree);
23043 buf->f_namelen = EXT4_NAME_LEN;
23044 fsid = le64_to_cpup((void *)es->s_uuid) ^
23045 le64_to_cpup((void *)es->s_uuid + sizeof(u64));
23046diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c
23047index 0257019..fed5b01 100644
23048--- a/fs/ext4/xattr.c
23049+++ b/fs/ext4/xattr.c
23050@@ -988,10 +988,6 @@ ext4_xattr_set_handle(handle_t *handle, struct inode *inode, int name_index,
23051 if (error)
23052 goto cleanup;
23053
23054- error = ext4_journal_get_write_access(handle, is.iloc.bh);
23055- if (error)
23056- goto cleanup;
23057-
23058 if (EXT4_I(inode)->i_state & EXT4_STATE_NEW) {
23059 struct ext4_inode *raw_inode = ext4_raw_inode(&is.iloc);
23060 memset(raw_inode, 0, EXT4_SB(inode->i_sb)->s_inode_size);
23061@@ -1017,6 +1013,9 @@ ext4_xattr_set_handle(handle_t *handle, struct inode *inode, int name_index,
23062 if (flags & XATTR_CREATE)
23063 goto cleanup;
23064 }
23065+ error = ext4_journal_get_write_access(handle, is.iloc.bh);
23066+ if (error)
23067+ goto cleanup;
23068 if (!value) {
23069 if (!is.s.not_found)
23070 error = ext4_xattr_ibody_set(handle, inode, &i, &is);
23071diff --git a/fs/fcntl.c b/fs/fcntl.c
23072index 97e01dc..2cf93ec 100644
23073--- a/fs/fcntl.c
23074+++ b/fs/fcntl.c
23075@@ -618,90 +618,60 @@ static DEFINE_RWLOCK(fasync_lock);
23076 static struct kmem_cache *fasync_cache __read_mostly;
23077
23078 /*
23079- * Remove a fasync entry. If successfully removed, return
23080- * positive and clear the FASYNC flag. If no entry exists,
23081- * do nothing and return 0.
23082- *
23083- * NOTE! It is very important that the FASYNC flag always
23084- * match the state "is the filp on a fasync list".
23085- *
23086- * We always take the 'filp->f_lock', in since fasync_lock
23087- * needs to be irq-safe.
23088+ * fasync_helper() is used by almost all character device drivers
23089+ * to set up the fasync queue. It returns negative on error, 0 if it did
23090+ * no changes and positive if it added/deleted the entry.
23091 */
23092-static int fasync_remove_entry(struct file *filp, struct fasync_struct **fapp)
23093+int fasync_helper(int fd, struct file * filp, int on, struct fasync_struct **fapp)
23094 {
23095 struct fasync_struct *fa, **fp;
23096+ struct fasync_struct *new = NULL;
23097 int result = 0;
23098
23099- spin_lock(&filp->f_lock);
23100- write_lock_irq(&fasync_lock);
23101- for (fp = fapp; (fa = *fp) != NULL; fp = &fa->fa_next) {
23102- if (fa->fa_file != filp)
23103- continue;
23104- *fp = fa->fa_next;
23105- kmem_cache_free(fasync_cache, fa);
23106- filp->f_flags &= ~FASYNC;
23107- result = 1;
23108- break;
23109+ if (on) {
23110+ new = kmem_cache_alloc(fasync_cache, GFP_KERNEL);
23111+ if (!new)
23112+ return -ENOMEM;
23113 }
23114- write_unlock_irq(&fasync_lock);
23115- spin_unlock(&filp->f_lock);
23116- return result;
23117-}
23118-
23119-/*
23120- * Add a fasync entry. Return negative on error, positive if
23121- * added, and zero if did nothing but change an existing one.
23122- *
23123- * NOTE! It is very important that the FASYNC flag always
23124- * match the state "is the filp on a fasync list".
23125- */
23126-static int fasync_add_entry(int fd, struct file *filp, struct fasync_struct **fapp)
23127-{
23128- struct fasync_struct *new, *fa, **fp;
23129- int result = 0;
23130-
23131- new = kmem_cache_alloc(fasync_cache, GFP_KERNEL);
23132- if (!new)
23133- return -ENOMEM;
23134
23135+ /*
23136+ * We need to take f_lock first since it's not an IRQ-safe
23137+ * lock.
23138+ */
23139 spin_lock(&filp->f_lock);
23140 write_lock_irq(&fasync_lock);
23141 for (fp = fapp; (fa = *fp) != NULL; fp = &fa->fa_next) {
23142- if (fa->fa_file != filp)
23143- continue;
23144- fa->fa_fd = fd;
23145- kmem_cache_free(fasync_cache, new);
23146- goto out;
23147+ if (fa->fa_file == filp) {
23148+ if(on) {
23149+ fa->fa_fd = fd;
23150+ kmem_cache_free(fasync_cache, new);
23151+ } else {
23152+ *fp = fa->fa_next;
23153+ kmem_cache_free(fasync_cache, fa);
23154+ result = 1;
23155+ }
23156+ goto out;
23157+ }
23158 }
23159
23160- new->magic = FASYNC_MAGIC;
23161- new->fa_file = filp;
23162- new->fa_fd = fd;
23163- new->fa_next = *fapp;
23164- *fapp = new;
23165- result = 1;
23166- filp->f_flags |= FASYNC;
23167-
23168+ if (on) {
23169+ new->magic = FASYNC_MAGIC;
23170+ new->fa_file = filp;
23171+ new->fa_fd = fd;
23172+ new->fa_next = *fapp;
23173+ *fapp = new;
23174+ result = 1;
23175+ }
23176 out:
23177+ if (on)
23178+ filp->f_flags |= FASYNC;
23179+ else
23180+ filp->f_flags &= ~FASYNC;
23181 write_unlock_irq(&fasync_lock);
23182 spin_unlock(&filp->f_lock);
23183 return result;
23184 }
23185
23186-/*
23187- * fasync_helper() is used by almost all character device drivers
23188- * to set up the fasync queue, and for regular files by the file
23189- * lease code. It returns negative on error, 0 if it did no changes
23190- * and positive if it added/deleted the entry.
23191- */
23192-int fasync_helper(int fd, struct file * filp, int on, struct fasync_struct **fapp)
23193-{
23194- if (!on)
23195- return fasync_remove_entry(filp, fapp);
23196- return fasync_add_entry(fd, filp, fapp);
23197-}
23198-
23199 EXPORT_SYMBOL(fasync_helper);
23200
23201 void __kill_fasync(struct fasync_struct *fa, int sig, int band)
23202diff --git a/fs/fuse/file.c b/fs/fuse/file.c
23203index a9f5e13..c18913a 100644
23204--- a/fs/fuse/file.c
23205+++ b/fs/fuse/file.c
23206@@ -828,9 +828,6 @@ static ssize_t fuse_fill_write_pages(struct fuse_req *req,
23207 if (!page)
23208 break;
23209
23210- if (mapping_writably_mapped(mapping))
23211- flush_dcache_page(page);
23212-
23213 pagefault_disable();
23214 tmp = iov_iter_copy_from_user_atomic(page, ii, offset, bytes);
23215 pagefault_enable();
23216diff --git a/fs/hfs/catalog.c b/fs/hfs/catalog.c
23217index 424b033..6d98f11 100644
23218--- a/fs/hfs/catalog.c
23219+++ b/fs/hfs/catalog.c
23220@@ -289,10 +289,6 @@ int hfs_cat_move(u32 cnid, struct inode *src_dir, struct qstr *src_name,
23221 err = hfs_brec_find(&src_fd);
23222 if (err)
23223 goto out;
23224- if (src_fd.entrylength > sizeof(entry) || src_fd.entrylength < 0) {
23225- err = -EIO;
23226- goto out;
23227- }
23228
23229 hfs_bnode_read(src_fd.bnode, &entry, src_fd.entryoffset,
23230 src_fd.entrylength);
23231diff --git a/fs/hfs/dir.c b/fs/hfs/dir.c
23232index 2b3b861..7c69b98 100644
23233--- a/fs/hfs/dir.c
23234+++ b/fs/hfs/dir.c
23235@@ -79,11 +79,6 @@ static int hfs_readdir(struct file *filp, void *dirent, filldir_t filldir)
23236 filp->f_pos++;
23237 /* fall through */
23238 case 1:
23239- if (fd.entrylength > sizeof(entry) || fd.entrylength < 0) {
23240- err = -EIO;
23241- goto out;
23242- }
23243-
23244 hfs_bnode_read(fd.bnode, &entry, fd.entryoffset, fd.entrylength);
23245 if (entry.type != HFS_CDR_THD) {
23246 printk(KERN_ERR "hfs: bad catalog folder thread\n");
23247@@ -114,12 +109,6 @@ static int hfs_readdir(struct file *filp, void *dirent, filldir_t filldir)
23248 err = -EIO;
23249 goto out;
23250 }
23251-
23252- if (fd.entrylength > sizeof(entry) || fd.entrylength < 0) {
23253- err = -EIO;
23254- goto out;
23255- }
23256-
23257 hfs_bnode_read(fd.bnode, &entry, fd.entryoffset, fd.entrylength);
23258 type = entry.type;
23259 len = hfs_mac2asc(sb, strbuf, &fd.key->cat.CName);
23260diff --git a/fs/hfs/super.c b/fs/hfs/super.c
23261index 5ed7252..f7fcbe4 100644
23262--- a/fs/hfs/super.c
23263+++ b/fs/hfs/super.c
23264@@ -409,13 +409,8 @@ static int hfs_fill_super(struct super_block *sb, void *data, int silent)
23265 /* try to get the root inode */
23266 hfs_find_init(HFS_SB(sb)->cat_tree, &fd);
23267 res = hfs_cat_find_brec(sb, HFS_ROOT_CNID, &fd);
23268- if (!res) {
23269- if (fd.entrylength > sizeof(rec) || fd.entrylength < 0) {
23270- res = -EIO;
23271- goto bail;
23272- }
23273+ if (!res)
23274 hfs_bnode_read(fd.bnode, &rec, fd.entryoffset, fd.entrylength);
23275- }
23276 if (res) {
23277 hfs_find_exit(&fd);
23278 goto bail_no_root;
23279diff --git a/fs/inode.c b/fs/inode.c 722diff --git a/fs/inode.c b/fs/inode.c
23280index 4d8e3be..de80bc2 100644 723index 4d8e3be..de80bc2 100644
23281--- a/fs/inode.c 724--- a/fs/inode.c
@@ -23289,1313 +732,6 @@ index 4d8e3be..de80bc2 100644
23289 } 732 }
23290 EXPORT_SYMBOL(inode_init_once); 733 EXPORT_SYMBOL(inode_init_once);
23291 734
23292diff --git a/fs/jbd2/commit.c b/fs/jbd2/commit.c
23293index 8896c1d..d4cfd6d 100644
23294--- a/fs/jbd2/commit.c
23295+++ b/fs/jbd2/commit.c
23296@@ -636,10 +636,6 @@ void jbd2_journal_commit_transaction(journal_t *journal)
23297 JBUFFER_TRACE(jh, "ph3: write metadata");
23298 flags = jbd2_journal_write_metadata_buffer(commit_transaction,
23299 jh, &new_jh, blocknr);
23300- if (flags < 0) {
23301- jbd2_journal_abort(journal, flags);
23302- continue;
23303- }
23304 set_bit(BH_JWrite, &jh2bh(new_jh)->b_state);
23305 wbuf[bufs++] = jh2bh(new_jh);
23306
23307diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c
23308index b7ca3a9..fed8538 100644
23309--- a/fs/jbd2/journal.c
23310+++ b/fs/jbd2/journal.c
23311@@ -78,7 +78,6 @@ EXPORT_SYMBOL(jbd2_journal_errno);
23312 EXPORT_SYMBOL(jbd2_journal_ack_err);
23313 EXPORT_SYMBOL(jbd2_journal_clear_err);
23314 EXPORT_SYMBOL(jbd2_log_wait_commit);
23315-EXPORT_SYMBOL(jbd2_log_start_commit);
23316 EXPORT_SYMBOL(jbd2_journal_start_commit);
23317 EXPORT_SYMBOL(jbd2_journal_force_commit_nested);
23318 EXPORT_SYMBOL(jbd2_journal_wipe);
23319@@ -359,10 +358,6 @@ repeat:
23320
23321 jbd_unlock_bh_state(bh_in);
23322 tmp = jbd2_alloc(bh_in->b_size, GFP_NOFS);
23323- if (!tmp) {
23324- jbd2_journal_put_journal_head(new_jh);
23325- return -ENOMEM;
23326- }
23327 jbd_lock_bh_state(bh_in);
23328 if (jh_in->b_frozen_data) {
23329 jbd2_free(tmp, bh_in->b_size);
23330@@ -1253,13 +1248,6 @@ int jbd2_journal_load(journal_t *journal)
23331 if (jbd2_journal_recover(journal))
23332 goto recovery_error;
23333
23334- if (journal->j_failed_commit) {
23335- printk(KERN_ERR "JBD2: journal transaction %u on %s "
23336- "is corrupt.\n", journal->j_failed_commit,
23337- journal->j_devname);
23338- return -EIO;
23339- }
23340-
23341 /* OK, we've finished with the dynamic journal bits:
23342 * reinitialise the dynamic contents of the superblock in memory
23343 * and reset them on disk. */
23344diff --git a/fs/jffs2/gc.c b/fs/jffs2/gc.c
23345index 3b6f2fa..090c556 100644
23346--- a/fs/jffs2/gc.c
23347+++ b/fs/jffs2/gc.c
23348@@ -700,8 +700,7 @@ static int jffs2_garbage_collect_metadata(struct jffs2_sb_info *c, struct jffs2_
23349 struct jffs2_raw_inode ri;
23350 struct jffs2_node_frag *last_frag;
23351 union jffs2_device_node dev;
23352- char *mdata = NULL;
23353- int mdatalen = 0;
23354+ char *mdata = NULL, mdatalen = 0;
23355 uint32_t alloclen, ilen;
23356 int ret;
23357
23358diff --git a/fs/namei.c b/fs/namei.c
23359index a2b3c28..d11f404 100644
23360--- a/fs/namei.c
23361+++ b/fs/namei.c
23362@@ -234,7 +234,6 @@ int generic_permission(struct inode *inode, int mask,
23363 /*
23364 * Searching includes executable on directories, else just read.
23365 */
23366- mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
23367 if (mask == MAY_READ || (S_ISDIR(inode->i_mode) && !(mask & MAY_WRITE)))
23368 if (capable(CAP_DAC_READ_SEARCH))
23369 return 0;
23370diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c
23371index 0d28982..e1d415e 100644
23372--- a/fs/nfs/direct.c
23373+++ b/fs/nfs/direct.c
23374@@ -342,7 +342,6 @@ static ssize_t nfs_direct_read_schedule_segment(struct nfs_direct_req *dreq,
23375 data->res.fattr = &data->fattr;
23376 data->res.eof = 0;
23377 data->res.count = bytes;
23378- nfs_fattr_init(&data->fattr);
23379 msg.rpc_argp = &data->args;
23380 msg.rpc_resp = &data->res;
23381
23382@@ -576,7 +575,6 @@ static void nfs_direct_commit_schedule(struct nfs_direct_req *dreq)
23383 data->res.count = 0;
23384 data->res.fattr = &data->fattr;
23385 data->res.verf = &data->verf;
23386- nfs_fattr_init(&data->fattr);
23387
23388 NFS_PROTO(data->inode)->commit_setup(data, &msg);
23389
23390@@ -768,7 +766,6 @@ static ssize_t nfs_direct_write_schedule_segment(struct nfs_direct_req *dreq,
23391 data->res.fattr = &data->fattr;
23392 data->res.count = bytes;
23393 data->res.verf = &data->verf;
23394- nfs_fattr_init(&data->fattr);
23395
23396 task_setup_data.task = &data->task;
23397 task_setup_data.callback_data = data;
23398diff --git a/fs/nfs/file.c b/fs/nfs/file.c
23399index 393d40f..f5fdd39 100644
23400--- a/fs/nfs/file.c
23401+++ b/fs/nfs/file.c
23402@@ -486,8 +486,6 @@ static int nfs_release_page(struct page *page, gfp_t gfp)
23403 {
23404 dfprintk(PAGECACHE, "NFS: release_page(%p)\n", page);
23405
23406- if (gfp & __GFP_WAIT)
23407- nfs_wb_page(page->mapping->host, page);
23408 /* If PagePrivate() is set, then the page is not freeable */
23409 if (PagePrivate(page))
23410 return 0;
23411diff --git a/fs/nfs/fscache.c b/fs/nfs/fscache.c
23412index 237874f..fa58800 100644
23413--- a/fs/nfs/fscache.c
23414+++ b/fs/nfs/fscache.c
23415@@ -354,11 +354,12 @@ void nfs_fscache_reset_inode_cookie(struct inode *inode)
23416 */
23417 int nfs_fscache_release_page(struct page *page, gfp_t gfp)
23418 {
23419- if (PageFsCache(page)) {
23420- struct nfs_inode *nfsi = NFS_I(page->mapping->host);
23421- struct fscache_cookie *cookie = nfsi->fscache;
23422+ struct nfs_inode *nfsi = NFS_I(page->mapping->host);
23423+ struct fscache_cookie *cookie = nfsi->fscache;
23424
23425- BUG_ON(!cookie);
23426+ BUG_ON(!cookie);
23427+
23428+ if (PageFsCache(page)) {
23429 dfprintk(FSCACHE, "NFS: fscache releasepage (0x%p/0x%p/0x%p)\n",
23430 cookie, page, nfsi);
23431
23432diff --git a/fs/nfs/mount_clnt.c b/fs/nfs/mount_clnt.c
23433index 59047f8..0adefc4 100644
23434--- a/fs/nfs/mount_clnt.c
23435+++ b/fs/nfs/mount_clnt.c
23436@@ -120,7 +120,7 @@ static struct {
23437 { .status = MNT3ERR_INVAL, .errno = -EINVAL, },
23438 { .status = MNT3ERR_NAMETOOLONG, .errno = -ENAMETOOLONG, },
23439 { .status = MNT3ERR_NOTSUPP, .errno = -ENOTSUPP, },
23440- { .status = MNT3ERR_SERVERFAULT, .errno = -EREMOTEIO, },
23441+ { .status = MNT3ERR_SERVERFAULT, .errno = -ESERVERFAULT, },
23442 };
23443
23444 struct mountres {
23445diff --git a/fs/nfs/nfs2xdr.c b/fs/nfs/nfs2xdr.c
23446index 7bc2da8..5e078b2 100644
23447--- a/fs/nfs/nfs2xdr.c
23448+++ b/fs/nfs/nfs2xdr.c
23449@@ -699,7 +699,7 @@ static struct {
23450 { NFSERR_BAD_COOKIE, -EBADCOOKIE },
23451 { NFSERR_NOTSUPP, -ENOTSUPP },
23452 { NFSERR_TOOSMALL, -ETOOSMALL },
23453- { NFSERR_SERVERFAULT, -EREMOTEIO },
23454+ { NFSERR_SERVERFAULT, -ESERVERFAULT },
23455 { NFSERR_BADTYPE, -EBADTYPE },
23456 { NFSERR_JUKEBOX, -EJUKEBOX },
23457 { -1, -EIO }
23458diff --git a/fs/nfs/nfs4_fs.h b/fs/nfs/nfs4_fs.h
23459index b4a6b1a..6ea07a3 100644
23460--- a/fs/nfs/nfs4_fs.h
23461+++ b/fs/nfs/nfs4_fs.h
23462@@ -141,7 +141,6 @@ enum {
23463 NFS_O_RDWR_STATE, /* OPEN stateid has read/write state */
23464 NFS_STATE_RECLAIM_REBOOT, /* OPEN stateid server rebooted */
23465 NFS_STATE_RECLAIM_NOGRACE, /* OPEN stateid needs to recover state */
23466- NFS_STATE_POSIX_LOCKS, /* Posix locks are supported */
23467 };
23468
23469 struct nfs4_state {
23470diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
23471index 6c20059..741a562 100644
23472--- a/fs/nfs/nfs4proc.c
23473+++ b/fs/nfs/nfs4proc.c
23474@@ -1573,8 +1573,6 @@ static int _nfs4_do_open(struct inode *dir, struct path *path, fmode_t fmode, in
23475 status = PTR_ERR(state);
23476 if (IS_ERR(state))
23477 goto err_opendata_put;
23478- if ((opendata->o_res.rflags & NFS4_OPEN_RESULT_LOCKTYPE_POSIX) != 0)
23479- set_bit(NFS_STATE_POSIX_LOCKS, &state->flags);
23480 nfs4_opendata_put(opendata);
23481 nfs4_put_state_owner(sp);
23482 *res = state;
23483@@ -3978,22 +3976,6 @@ static const struct rpc_call_ops nfs4_lock_ops = {
23484 .rpc_release = nfs4_lock_release,
23485 };
23486
23487-static void nfs4_handle_setlk_error(struct nfs_server *server, struct nfs4_lock_state *lsp, int new_lock_owner, int error)
23488-{
23489- struct nfs_client *clp = server->nfs_client;
23490- struct nfs4_state *state = lsp->ls_state;
23491-
23492- switch (error) {
23493- case -NFS4ERR_ADMIN_REVOKED:
23494- case -NFS4ERR_BAD_STATEID:
23495- case -NFS4ERR_EXPIRED:
23496- if (new_lock_owner != 0 ||
23497- (lsp->ls_flags & NFS_LOCK_INITIALIZED) != 0)
23498- nfs4_state_mark_reclaim_nograce(clp, state);
23499- lsp->ls_seqid.flags &= ~NFS_SEQID_CONFIRMED;
23500- };
23501-}
23502-
23503 static int _nfs4_do_setlk(struct nfs4_state *state, int cmd, struct file_lock *fl, int reclaim)
23504 {
23505 struct nfs4_lockdata *data;
23506@@ -4029,9 +4011,6 @@ static int _nfs4_do_setlk(struct nfs4_state *state, int cmd, struct file_lock *f
23507 ret = nfs4_wait_for_completion_rpc_task(task);
23508 if (ret == 0) {
23509 ret = data->rpc_status;
23510- if (ret)
23511- nfs4_handle_setlk_error(data->server, data->lsp,
23512- data->arg.new_lock_owner, ret);
23513 } else
23514 data->cancelled = 1;
23515 rpc_put_task(task);
23516@@ -4081,11 +4060,8 @@ static int _nfs4_proc_setlk(struct nfs4_state *state, int cmd, struct file_lock
23517 {
23518 struct nfs_inode *nfsi = NFS_I(state->inode);
23519 unsigned char fl_flags = request->fl_flags;
23520- int status = -ENOLCK;
23521+ int status;
23522
23523- if ((fl_flags & FL_POSIX) &&
23524- !test_bit(NFS_STATE_POSIX_LOCKS, &state->flags))
23525- goto out;
23526 /* Is this a delegated open? */
23527 status = nfs4_set_lock_state(state, request);
23528 if (status != 0)
23529diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c
23530index a4cd1b7..20b4e30 100644
23531--- a/fs/nfs/nfs4xdr.c
23532+++ b/fs/nfs/nfs4xdr.c
23533@@ -4554,7 +4554,7 @@ static int decode_sequence(struct xdr_stream *xdr,
23534 * If the server returns different values for sessionID, slotID or
23535 * sequence number, the server is looney tunes.
23536 */
23537- status = -EREMOTEIO;
23538+ status = -ESERVERFAULT;
23539
23540 if (memcmp(id.data, res->sr_session->sess_id.data,
23541 NFS4_MAX_SESSIONID_LEN)) {
23542@@ -5678,7 +5678,7 @@ static struct {
23543 { NFS4ERR_BAD_COOKIE, -EBADCOOKIE },
23544 { NFS4ERR_NOTSUPP, -ENOTSUPP },
23545 { NFS4ERR_TOOSMALL, -ETOOSMALL },
23546- { NFS4ERR_SERVERFAULT, -EREMOTEIO },
23547+ { NFS4ERR_SERVERFAULT, -ESERVERFAULT },
23548 { NFS4ERR_BADTYPE, -EBADTYPE },
23549 { NFS4ERR_LOCKED, -EAGAIN },
23550 { NFS4ERR_SYMLINK, -ELOOP },
23551@@ -5705,7 +5705,7 @@ nfs4_stat_to_errno(int stat)
23552 }
23553 if (stat <= 10000 || stat > 10100) {
23554 /* The server is looney tunes. */
23555- return -EREMOTEIO;
23556+ return -ESERVERFAULT;
23557 }
23558 /* If we cannot translate the error, the recovery routines should
23559 * handle it.
23560diff --git a/fs/nfs/pagelist.c b/fs/nfs/pagelist.c
23561index a12c45b..e297593 100644
23562--- a/fs/nfs/pagelist.c
23563+++ b/fs/nfs/pagelist.c
23564@@ -176,12 +176,6 @@ void nfs_release_request(struct nfs_page *req)
23565 kref_put(&req->wb_kref, nfs_free_request);
23566 }
23567
23568-static int nfs_wait_bit_uninterruptible(void *word)
23569-{
23570- io_schedule();
23571- return 0;
23572-}
23573-
23574 /**
23575 * nfs_wait_on_request - Wait for a request to complete.
23576 * @req: request to wait upon.
23577@@ -192,9 +186,14 @@ static int nfs_wait_bit_uninterruptible(void *word)
23578 int
23579 nfs_wait_on_request(struct nfs_page *req)
23580 {
23581- return wait_on_bit(&req->wb_flags, PG_BUSY,
23582- nfs_wait_bit_uninterruptible,
23583- TASK_UNINTERRUPTIBLE);
23584+ int ret = 0;
23585+
23586+ if (!test_bit(PG_BUSY, &req->wb_flags))
23587+ goto out;
23588+ ret = out_of_line_wait_on_bit(&req->wb_flags, PG_BUSY,
23589+ nfs_wait_bit_killable, TASK_KILLABLE);
23590+out:
23591+ return ret;
23592 }
23593
23594 /**
23595diff --git a/fs/nfs/super.c b/fs/nfs/super.c
23596index 4bf23f6..90be551 100644
23597--- a/fs/nfs/super.c
23598+++ b/fs/nfs/super.c
23599@@ -241,7 +241,6 @@ static int nfs_show_stats(struct seq_file *, struct vfsmount *);
23600 static int nfs_get_sb(struct file_system_type *, int, const char *, void *, struct vfsmount *);
23601 static int nfs_xdev_get_sb(struct file_system_type *fs_type,
23602 int flags, const char *dev_name, void *raw_data, struct vfsmount *mnt);
23603-static void nfs_put_super(struct super_block *);
23604 static void nfs_kill_super(struct super_block *);
23605 static int nfs_remount(struct super_block *sb, int *flags, char *raw_data);
23606
23607@@ -265,7 +264,6 @@ static const struct super_operations nfs_sops = {
23608 .alloc_inode = nfs_alloc_inode,
23609 .destroy_inode = nfs_destroy_inode,
23610 .write_inode = nfs_write_inode,
23611- .put_super = nfs_put_super,
23612 .statfs = nfs_statfs,
23613 .clear_inode = nfs_clear_inode,
23614 .umount_begin = nfs_umount_begin,
23615@@ -335,7 +333,6 @@ static const struct super_operations nfs4_sops = {
23616 .alloc_inode = nfs_alloc_inode,
23617 .destroy_inode = nfs_destroy_inode,
23618 .write_inode = nfs_write_inode,
23619- .put_super = nfs_put_super,
23620 .statfs = nfs_statfs,
23621 .clear_inode = nfs4_clear_inode,
23622 .umount_begin = nfs_umount_begin,
23623@@ -737,6 +734,8 @@ static struct nfs_parsed_mount_data *nfs_alloc_parsed_mount_data(unsigned int ve
23624
23625 data = kzalloc(sizeof(*data), GFP_KERNEL);
23626 if (data) {
23627+ data->rsize = NFS_MAX_FILE_IO_SIZE;
23628+ data->wsize = NFS_MAX_FILE_IO_SIZE;
23629 data->acregmin = NFS_DEF_ACREGMIN;
23630 data->acregmax = NFS_DEF_ACREGMAX;
23631 data->acdirmin = NFS_DEF_ACDIRMIN;
23632@@ -2199,17 +2198,6 @@ error_splat_super:
23633 }
23634
23635 /*
23636- * Ensure that we unregister the bdi before kill_anon_super
23637- * releases the device name
23638- */
23639-static void nfs_put_super(struct super_block *s)
23640-{
23641- struct nfs_server *server = NFS_SB(s);
23642-
23643- bdi_unregister(&server->backing_dev_info);
23644-}
23645-
23646-/*
23647 * Destroy an NFS2/3 superblock
23648 */
23649 static void nfs_kill_super(struct super_block *s)
23650@@ -2217,6 +2205,7 @@ static void nfs_kill_super(struct super_block *s)
23651 struct nfs_server *server = NFS_SB(s);
23652
23653 kill_anon_super(s);
23654+ bdi_unregister(&server->backing_dev_info);
23655 nfs_fscache_release_super_cookie(s);
23656 nfs_free_server(server);
23657 }
23658diff --git a/fs/nfs/write.c b/fs/nfs/write.c
23659index cf6c06f..53eb26c 100644
23660--- a/fs/nfs/write.c
23661+++ b/fs/nfs/write.c
23662@@ -1542,7 +1542,6 @@ int nfs_wb_page_cancel(struct inode *inode, struct page *page)
23663 break;
23664 }
23665 ret = nfs_wait_on_request(req);
23666- nfs_release_request(req);
23667 if (ret < 0)
23668 goto out;
23669 }
23670@@ -1613,16 +1612,15 @@ int nfs_migrate_page(struct address_space *mapping, struct page *newpage,
23671 if (ret)
23672 goto out_unlock;
23673 page_cache_get(newpage);
23674- spin_lock(&mapping->host->i_lock);
23675 req->wb_page = newpage;
23676 SetPagePrivate(newpage);
23677- set_page_private(newpage, (unsigned long)req);
23678+ set_page_private(newpage, page_private(page));
23679 ClearPagePrivate(page);
23680 set_page_private(page, 0);
23681- spin_unlock(&mapping->host->i_lock);
23682 page_cache_release(page);
23683 out_unlock:
23684 nfs_clear_page_tag_locked(req);
23685+ nfs_release_request(req);
23686 out:
23687 return ret;
23688 }
23689diff --git a/fs/nfsd/nfs4acl.c b/fs/nfsd/nfs4acl.c
23690index 6d9c6aa..725d02f 100644
23691--- a/fs/nfsd/nfs4acl.c
23692+++ b/fs/nfsd/nfs4acl.c
23693@@ -389,7 +389,7 @@ sort_pacl(struct posix_acl *pacl)
23694 sort_pacl_range(pacl, 1, i-1);
23695
23696 BUG_ON(pacl->a_entries[i].e_tag != ACL_GROUP_OBJ);
23697- j = ++i;
23698+ j = i++;
23699 while (pacl->a_entries[j].e_tag == ACL_GROUP)
23700 j++;
23701 sort_pacl_range(pacl, i, j-1);
23702diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
23703index 570dd1c..a293f02 100644
23704--- a/fs/nfsd/vfs.c
23705+++ b/fs/nfsd/vfs.c
23706@@ -774,9 +774,12 @@ static inline int nfsd_dosync(struct file *filp, struct dentry *dp,
23707 int (*fsync) (struct file *, struct dentry *, int);
23708 int err;
23709
23710- err = filemap_write_and_wait(inode->i_mapping);
23711+ err = filemap_fdatawrite(inode->i_mapping);
23712 if (err == 0 && fop && (fsync = fop->fsync))
23713 err = fsync(filp, dp, 0);
23714+ if (err == 0)
23715+ err = filemap_fdatawait(inode->i_mapping);
23716+
23717 return err;
23718 }
23719
23720diff --git a/fs/notify/inotify/inotify_fsnotify.c b/fs/notify/inotify/inotify_fsnotify.c
23721index 1afb0a1..c9ee67b 100644
23722--- a/fs/notify/inotify/inotify_fsnotify.c
23723+++ b/fs/notify/inotify/inotify_fsnotify.c
23724@@ -121,7 +121,7 @@ static int idr_callback(int id, void *p, void *data)
23725 if (warned)
23726 return 0;
23727
23728- warned = true;
23729+ warned = false;
23730 entry = p;
23731 ientry = container_of(entry, struct inotify_inode_mark_entry, fsn_entry);
23732
23733diff --git a/fs/notify/inotify/inotify_user.c b/fs/notify/inotify/inotify_user.c
23734index ca44337..dcd2040 100644
23735--- a/fs/notify/inotify/inotify_user.c
23736+++ b/fs/notify/inotify/inotify_user.c
23737@@ -558,7 +558,7 @@ retry:
23738
23739 spin_lock(&group->inotify_data.idr_lock);
23740 ret = idr_get_new_above(&group->inotify_data.idr, &tmp_ientry->fsn_entry,
23741- group->inotify_data.last_wd+1,
23742+ group->inotify_data.last_wd,
23743 &tmp_ientry->wd);
23744 spin_unlock(&group->inotify_data.idr_lock);
23745 if (ret) {
23746@@ -638,7 +638,7 @@ static struct fsnotify_group *inotify_new_group(struct user_struct *user, unsign
23747
23748 spin_lock_init(&group->inotify_data.idr_lock);
23749 idr_init(&group->inotify_data.idr);
23750- group->inotify_data.last_wd = 0;
23751+ group->inotify_data.last_wd = 1;
23752 group->inotify_data.user = user;
23753 group->inotify_data.fa = NULL;
23754
23755diff --git a/fs/partitions/efi.c b/fs/partitions/efi.c
23756index 49cfd5f..038a602 100644
23757--- a/fs/partitions/efi.c
23758+++ b/fs/partitions/efi.c
23759@@ -1,9 +1,7 @@
23760 /************************************************************
23761 * EFI GUID Partition Table handling
23762- *
23763- * http://www.uefi.org/specs/
23764- * http://www.intel.com/technology/efi/
23765- *
23766+ * Per Intel EFI Specification v1.02
23767+ * http://developer.intel.com/technology/efi/efi.htm
23768 * efi.[ch] by Matt Domsch <Matt_Domsch@dell.com>
23769 * Copyright 2000,2001,2002,2004 Dell Inc.
23770 *
23771@@ -94,7 +92,6 @@
23772 *
23773 ************************************************************/
23774 #include <linux/crc32.h>
23775-#include <linux/math64.h>
23776 #include "check.h"
23777 #include "efi.h"
23778
23779@@ -144,8 +141,7 @@ last_lba(struct block_device *bdev)
23780 {
23781 if (!bdev || !bdev->bd_inode)
23782 return 0;
23783- return div_u64(bdev->bd_inode->i_size,
23784- bdev_logical_block_size(bdev)) - 1ULL;
23785+ return (bdev->bd_inode->i_size >> 9) - 1ULL;
23786 }
23787
23788 static inline int
23789@@ -192,7 +188,6 @@ static size_t
23790 read_lba(struct block_device *bdev, u64 lba, u8 * buffer, size_t count)
23791 {
23792 size_t totalreadcount = 0;
23793- sector_t n = lba * (bdev_logical_block_size(bdev) / 512);
23794
23795 if (!bdev || !buffer || lba > last_lba(bdev))
23796 return 0;
23797@@ -200,7 +195,7 @@ read_lba(struct block_device *bdev, u64 lba, u8 * buffer, size_t count)
23798 while (count) {
23799 int copied = 512;
23800 Sector sect;
23801- unsigned char *data = read_dev_sector(bdev, n++, &sect);
23802+ unsigned char *data = read_dev_sector(bdev, lba++, &sect);
23803 if (!data)
23804 break;
23805 if (copied > count)
23806@@ -262,16 +257,15 @@ static gpt_header *
23807 alloc_read_gpt_header(struct block_device *bdev, u64 lba)
23808 {
23809 gpt_header *gpt;
23810- unsigned ssz = bdev_logical_block_size(bdev);
23811-
23812 if (!bdev)
23813 return NULL;
23814
23815- gpt = kzalloc(ssz, GFP_KERNEL);
23816+ gpt = kzalloc(sizeof (gpt_header), GFP_KERNEL);
23817 if (!gpt)
23818 return NULL;
23819
23820- if (read_lba(bdev, lba, (u8 *) gpt, ssz) < ssz) {
23821+ if (read_lba(bdev, lba, (u8 *) gpt,
23822+ sizeof (gpt_header)) < sizeof (gpt_header)) {
23823 kfree(gpt);
23824 gpt=NULL;
23825 return NULL;
23826@@ -607,7 +601,6 @@ efi_partition(struct parsed_partitions *state, struct block_device *bdev)
23827 gpt_header *gpt = NULL;
23828 gpt_entry *ptes = NULL;
23829 u32 i;
23830- unsigned ssz = bdev_logical_block_size(bdev) / 512;
23831
23832 if (!find_valid_gpt(bdev, &gpt, &ptes) || !gpt || !ptes) {
23833 kfree(gpt);
23834@@ -618,14 +611,13 @@ efi_partition(struct parsed_partitions *state, struct block_device *bdev)
23835 pr_debug("GUID Partition Table is valid! Yea!\n");
23836
23837 for (i = 0; i < le32_to_cpu(gpt->num_partition_entries) && i < state->limit-1; i++) {
23838- u64 start = le64_to_cpu(ptes[i].starting_lba);
23839- u64 size = le64_to_cpu(ptes[i].ending_lba) -
23840- le64_to_cpu(ptes[i].starting_lba) + 1ULL;
23841-
23842 if (!is_pte_valid(&ptes[i], last_lba(bdev)))
23843 continue;
23844
23845- put_partition(state, i+1, start * ssz, size * ssz);
23846+ put_partition(state, i+1, le64_to_cpu(ptes[i].starting_lba),
23847+ (le64_to_cpu(ptes[i].ending_lba) -
23848+ le64_to_cpu(ptes[i].starting_lba) +
23849+ 1ULL));
23850
23851 /* If this is a RAID volume, tell md */
23852 if (!efi_guidcmp(ptes[i].partition_type_guid,
23853diff --git a/fs/partitions/efi.h b/fs/partitions/efi.h
23854index 6998b58..2cc89d0 100644
23855--- a/fs/partitions/efi.h
23856+++ b/fs/partitions/efi.h
23857@@ -37,6 +37,7 @@
23858 #define EFI_PMBR_OSTYPE_EFI 0xEF
23859 #define EFI_PMBR_OSTYPE_EFI_GPT 0xEE
23860
23861+#define GPT_BLOCK_SIZE 512
23862 #define GPT_HEADER_SIGNATURE 0x5452415020494645ULL
23863 #define GPT_HEADER_REVISION_V1 0x00010000
23864 #define GPT_PRIMARY_PARTITION_TABLE_LBA 1
23865@@ -78,12 +79,7 @@ typedef struct _gpt_header {
23866 __le32 num_partition_entries;
23867 __le32 sizeof_partition_entry;
23868 __le32 partition_entry_array_crc32;
23869-
23870- /* The rest of the logical block is reserved by UEFI and must be zero.
23871- * EFI standard handles this by:
23872- *
23873- * uint8_t reserved2[ BlockSize - 92 ];
23874- */
23875+ u8 reserved2[GPT_BLOCK_SIZE - 92];
23876 } __attribute__ ((packed)) gpt_header;
23877
23878 typedef struct _gpt_entry_attributes {
23879diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c
23880index 2534987..39b49c4 100644
23881--- a/fs/quota/dquot.c
23882+++ b/fs/quota/dquot.c
23883@@ -1388,70 +1388,6 @@ void vfs_dq_drop(struct inode *inode)
23884 EXPORT_SYMBOL(vfs_dq_drop);
23885
23886 /*
23887- * inode_reserved_space is managed internally by quota, and protected by
23888- * i_lock similar to i_blocks+i_bytes.
23889- */
23890-static qsize_t *inode_reserved_space(struct inode * inode)
23891-{
23892- /* Filesystem must explicitly define it's own method in order to use
23893- * quota reservation interface */
23894- BUG_ON(!inode->i_sb->dq_op->get_reserved_space);
23895- return inode->i_sb->dq_op->get_reserved_space(inode);
23896-}
23897-
23898-static void inode_add_rsv_space(struct inode *inode, qsize_t number)
23899-{
23900- spin_lock(&inode->i_lock);
23901- *inode_reserved_space(inode) += number;
23902- spin_unlock(&inode->i_lock);
23903-}
23904-
23905-
23906-static void inode_claim_rsv_space(struct inode *inode, qsize_t number)
23907-{
23908- spin_lock(&inode->i_lock);
23909- *inode_reserved_space(inode) -= number;
23910- __inode_add_bytes(inode, number);
23911- spin_unlock(&inode->i_lock);
23912-}
23913-
23914-static void inode_sub_rsv_space(struct inode *inode, qsize_t number)
23915-{
23916- spin_lock(&inode->i_lock);
23917- *inode_reserved_space(inode) -= number;
23918- spin_unlock(&inode->i_lock);
23919-}
23920-
23921-static qsize_t inode_get_rsv_space(struct inode *inode)
23922-{
23923- qsize_t ret;
23924-
23925- if (!inode->i_sb->dq_op->get_reserved_space)
23926- return 0;
23927- spin_lock(&inode->i_lock);
23928- ret = *inode_reserved_space(inode);
23929- spin_unlock(&inode->i_lock);
23930- return ret;
23931-}
23932-
23933-static void inode_incr_space(struct inode *inode, qsize_t number,
23934- int reserve)
23935-{
23936- if (reserve)
23937- inode_add_rsv_space(inode, number);
23938- else
23939- inode_add_bytes(inode, number);
23940-}
23941-
23942-static void inode_decr_space(struct inode *inode, qsize_t number, int reserve)
23943-{
23944- if (reserve)
23945- inode_sub_rsv_space(inode, number);
23946- else
23947- inode_sub_bytes(inode, number);
23948-}
23949-
23950-/*
23951 * Following four functions update i_blocks+i_bytes fields and
23952 * quota information (together with appropriate checks)
23953 * NOTE: We absolutely rely on the fact that caller dirties
23954@@ -1469,21 +1405,6 @@ int __dquot_alloc_space(struct inode *inode, qsize_t number,
23955 int cnt, ret = QUOTA_OK;
23956 char warntype[MAXQUOTAS];
23957
23958- /*
23959- * First test before acquiring mutex - solves deadlocks when we
23960- * re-enter the quota code and are already holding the mutex
23961- */
23962- if (IS_NOQUOTA(inode)) {
23963- inode_incr_space(inode, number, reserve);
23964- goto out;
23965- }
23966-
23967- down_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
23968- if (IS_NOQUOTA(inode)) {
23969- inode_incr_space(inode, number, reserve);
23970- goto out_unlock;
23971- }
23972-
23973 for (cnt = 0; cnt < MAXQUOTAS; cnt++)
23974 warntype[cnt] = QUOTA_NL_NOWARN;
23975
23976@@ -1494,8 +1415,7 @@ int __dquot_alloc_space(struct inode *inode, qsize_t number,
23977 if (check_bdq(inode->i_dquot[cnt], number, warn, warntype+cnt)
23978 == NO_QUOTA) {
23979 ret = NO_QUOTA;
23980- spin_unlock(&dq_data_lock);
23981- goto out_flush_warn;
23982+ goto out_unlock;
23983 }
23984 }
23985 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
23986@@ -1506,32 +1426,64 @@ int __dquot_alloc_space(struct inode *inode, qsize_t number,
23987 else
23988 dquot_incr_space(inode->i_dquot[cnt], number);
23989 }
23990- inode_incr_space(inode, number, reserve);
23991+ if (!reserve)
23992+ inode_add_bytes(inode, number);
23993+out_unlock:
23994 spin_unlock(&dq_data_lock);
23995+ flush_warnings(inode->i_dquot, warntype);
23996+ return ret;
23997+}
23998+
23999+int dquot_alloc_space(struct inode *inode, qsize_t number, int warn)
24000+{
24001+ int cnt, ret = QUOTA_OK;
24002+
24003+ /*
24004+ * First test before acquiring mutex - solves deadlocks when we
24005+ * re-enter the quota code and are already holding the mutex
24006+ */
24007+ if (IS_NOQUOTA(inode)) {
24008+ inode_add_bytes(inode, number);
24009+ goto out;
24010+ }
24011+
24012+ down_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
24013+ if (IS_NOQUOTA(inode)) {
24014+ inode_add_bytes(inode, number);
24015+ goto out_unlock;
24016+ }
24017+
24018+ ret = __dquot_alloc_space(inode, number, warn, 0);
24019+ if (ret == NO_QUOTA)
24020+ goto out_unlock;
24021
24022- if (reserve)
24023- goto out_flush_warn;
24024 /* Dirtify all the dquots - this can block when journalling */
24025 for (cnt = 0; cnt < MAXQUOTAS; cnt++)
24026 if (inode->i_dquot[cnt])
24027 mark_dquot_dirty(inode->i_dquot[cnt]);
24028-out_flush_warn:
24029- flush_warnings(inode->i_dquot, warntype);
24030 out_unlock:
24031 up_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
24032 out:
24033 return ret;
24034 }
24035-
24036-int dquot_alloc_space(struct inode *inode, qsize_t number, int warn)
24037-{
24038- return __dquot_alloc_space(inode, number, warn, 0);
24039-}
24040 EXPORT_SYMBOL(dquot_alloc_space);
24041
24042 int dquot_reserve_space(struct inode *inode, qsize_t number, int warn)
24043 {
24044- return __dquot_alloc_space(inode, number, warn, 1);
24045+ int ret = QUOTA_OK;
24046+
24047+ if (IS_NOQUOTA(inode))
24048+ goto out;
24049+
24050+ down_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
24051+ if (IS_NOQUOTA(inode))
24052+ goto out_unlock;
24053+
24054+ ret = __dquot_alloc_space(inode, number, warn, 1);
24055+out_unlock:
24056+ up_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
24057+out:
24058+ return ret;
24059 }
24060 EXPORT_SYMBOL(dquot_reserve_space);
24061
24062@@ -1588,14 +1540,14 @@ int dquot_claim_space(struct inode *inode, qsize_t number)
24063 int ret = QUOTA_OK;
24064
24065 if (IS_NOQUOTA(inode)) {
24066- inode_claim_rsv_space(inode, number);
24067+ inode_add_bytes(inode, number);
24068 goto out;
24069 }
24070
24071 down_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
24072 if (IS_NOQUOTA(inode)) {
24073 up_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
24074- inode_claim_rsv_space(inode, number);
24075+ inode_add_bytes(inode, number);
24076 goto out;
24077 }
24078
24079@@ -1607,7 +1559,7 @@ int dquot_claim_space(struct inode *inode, qsize_t number)
24080 number);
24081 }
24082 /* Update inode bytes */
24083- inode_claim_rsv_space(inode, number);
24084+ inode_add_bytes(inode, number);
24085 spin_unlock(&dq_data_lock);
24086 /* Dirtify all the dquots - this can block when journalling */
24087 for (cnt = 0; cnt < MAXQUOTAS; cnt++)
24088@@ -1620,9 +1572,38 @@ out:
24089 EXPORT_SYMBOL(dquot_claim_space);
24090
24091 /*
24092+ * Release reserved quota space
24093+ */
24094+void dquot_release_reserved_space(struct inode *inode, qsize_t number)
24095+{
24096+ int cnt;
24097+
24098+ if (IS_NOQUOTA(inode))
24099+ goto out;
24100+
24101+ down_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
24102+ if (IS_NOQUOTA(inode))
24103+ goto out_unlock;
24104+
24105+ spin_lock(&dq_data_lock);
24106+ /* Release reserved dquots */
24107+ for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
24108+ if (inode->i_dquot[cnt])
24109+ dquot_free_reserved_space(inode->i_dquot[cnt], number);
24110+ }
24111+ spin_unlock(&dq_data_lock);
24112+
24113+out_unlock:
24114+ up_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
24115+out:
24116+ return;
24117+}
24118+EXPORT_SYMBOL(dquot_release_reserved_space);
24119+
24120+/*
24121 * This operation can block, but only after everything is updated
24122 */
24123-int __dquot_free_space(struct inode *inode, qsize_t number, int reserve)
24124+int dquot_free_space(struct inode *inode, qsize_t number)
24125 {
24126 unsigned int cnt;
24127 char warntype[MAXQUOTAS];
24128@@ -1631,7 +1612,7 @@ int __dquot_free_space(struct inode *inode, qsize_t number, int reserve)
24129 * re-enter the quota code and are already holding the mutex */
24130 if (IS_NOQUOTA(inode)) {
24131 out_sub:
24132- inode_decr_space(inode, number, reserve);
24133+ inode_sub_bytes(inode, number);
24134 return QUOTA_OK;
24135 }
24136
24137@@ -1646,43 +1627,21 @@ out_sub:
24138 if (!inode->i_dquot[cnt])
24139 continue;
24140 warntype[cnt] = info_bdq_free(inode->i_dquot[cnt], number);
24141- if (reserve)
24142- dquot_free_reserved_space(inode->i_dquot[cnt], number);
24143- else
24144- dquot_decr_space(inode->i_dquot[cnt], number);
24145+ dquot_decr_space(inode->i_dquot[cnt], number);
24146 }
24147- inode_decr_space(inode, number, reserve);
24148+ inode_sub_bytes(inode, number);
24149 spin_unlock(&dq_data_lock);
24150-
24151- if (reserve)
24152- goto out_unlock;
24153 /* Dirtify all the dquots - this can block when journalling */
24154 for (cnt = 0; cnt < MAXQUOTAS; cnt++)
24155 if (inode->i_dquot[cnt])
24156 mark_dquot_dirty(inode->i_dquot[cnt]);
24157-out_unlock:
24158 flush_warnings(inode->i_dquot, warntype);
24159 up_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
24160 return QUOTA_OK;
24161 }
24162-
24163-int dquot_free_space(struct inode *inode, qsize_t number)
24164-{
24165- return __dquot_free_space(inode, number, 0);
24166-}
24167 EXPORT_SYMBOL(dquot_free_space);
24168
24169 /*
24170- * Release reserved quota space
24171- */
24172-void dquot_release_reserved_space(struct inode *inode, qsize_t number)
24173-{
24174- __dquot_free_space(inode, number, 1);
24175-
24176-}
24177-EXPORT_SYMBOL(dquot_release_reserved_space);
24178-
24179-/*
24180 * This operation can block, but only after everything is updated
24181 */
24182 int dquot_free_inode(const struct inode *inode, qsize_t number)
24183@@ -1720,6 +1679,19 @@ int dquot_free_inode(const struct inode *inode, qsize_t number)
24184 EXPORT_SYMBOL(dquot_free_inode);
24185
24186 /*
24187+ * call back function, get reserved quota space from underlying fs
24188+ */
24189+qsize_t dquot_get_reserved_space(struct inode *inode)
24190+{
24191+ qsize_t reserved_space = 0;
24192+
24193+ if (sb_any_quota_active(inode->i_sb) &&
24194+ inode->i_sb->dq_op->get_reserved_space)
24195+ reserved_space = inode->i_sb->dq_op->get_reserved_space(inode);
24196+ return reserved_space;
24197+}
24198+
24199+/*
24200 * Transfer the number of inode and blocks from one diskquota to an other.
24201 *
24202 * This operation can block, but only after everything is updated
24203@@ -1762,7 +1734,7 @@ int dquot_transfer(struct inode *inode, struct iattr *iattr)
24204 }
24205 spin_lock(&dq_data_lock);
24206 cur_space = inode_get_bytes(inode);
24207- rsv_space = inode_get_rsv_space(inode);
24208+ rsv_space = dquot_get_reserved_space(inode);
24209 space = cur_space + rsv_space;
24210 /* Build the transfer_from list and check the limits */
24211 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
24212diff --git a/fs/reiserfs/inode.c b/fs/reiserfs/inode.c
24213index d240c15..a14d6cd 100644
24214--- a/fs/reiserfs/inode.c
24215+++ b/fs/reiserfs/inode.c
24216@@ -2531,12 +2531,6 @@ static int reiserfs_writepage(struct page *page, struct writeback_control *wbc)
24217 return reiserfs_write_full_page(page, wbc);
24218 }
24219
24220-static void reiserfs_truncate_failed_write(struct inode *inode)
24221-{
24222- truncate_inode_pages(inode->i_mapping, inode->i_size);
24223- reiserfs_truncate_file(inode, 0);
24224-}
24225-
24226 static int reiserfs_write_begin(struct file *file,
24227 struct address_space *mapping,
24228 loff_t pos, unsigned len, unsigned flags,
24229@@ -2603,8 +2597,6 @@ static int reiserfs_write_begin(struct file *file,
24230 if (ret) {
24231 unlock_page(page);
24232 page_cache_release(page);
24233- /* Truncate allocated blocks */
24234- reiserfs_truncate_failed_write(inode);
24235 }
24236 return ret;
24237 }
24238@@ -2697,7 +2689,8 @@ static int reiserfs_write_end(struct file *file, struct address_space *mapping,
24239 ** transaction tracking stuff when the size changes. So, we have
24240 ** to do the i_size updates here.
24241 */
24242- if (pos + copied > inode->i_size) {
24243+ pos += copied;
24244+ if (pos > inode->i_size) {
24245 struct reiserfs_transaction_handle myth;
24246 reiserfs_write_lock(inode->i_sb);
24247 /* If the file have grown beyond the border where it
24248@@ -2715,7 +2708,7 @@ static int reiserfs_write_end(struct file *file, struct address_space *mapping,
24249 goto journal_error;
24250 }
24251 reiserfs_update_inode_transaction(inode);
24252- inode->i_size = pos + copied;
24253+ inode->i_size = pos;
24254 /*
24255 * this will just nest into our transaction. It's important
24256 * to use mark_inode_dirty so the inode gets pushed around on the
24257@@ -2742,10 +2735,6 @@ static int reiserfs_write_end(struct file *file, struct address_space *mapping,
24258 out:
24259 unlock_page(page);
24260 page_cache_release(page);
24261-
24262- if (pos + len > inode->i_size)
24263- reiserfs_truncate_failed_write(inode);
24264-
24265 return ret == 0 ? copied : ret;
24266
24267 journal_error:
24268diff --git a/fs/romfs/super.c b/fs/romfs/super.c
24269index 42d2135..c117fa8 100644
24270--- a/fs/romfs/super.c
24271+++ b/fs/romfs/super.c
24272@@ -544,7 +544,6 @@ error:
24273 error_rsb_inval:
24274 ret = -EINVAL;
24275 error_rsb:
24276- kfree(rsb);
24277 return ret;
24278 }
24279
24280diff --git a/fs/stat.c b/fs/stat.c
24281index c4ecd52..075694e 100644
24282--- a/fs/stat.c
24283+++ b/fs/stat.c
24284@@ -401,9 +401,9 @@ SYSCALL_DEFINE4(fstatat64, int, dfd, char __user *, filename,
24285 }
24286 #endif /* __ARCH_WANT_STAT64 */
24287
24288-/* Caller is here responsible for sufficient locking (ie. inode->i_lock) */
24289-void __inode_add_bytes(struct inode *inode, loff_t bytes)
24290+void inode_add_bytes(struct inode *inode, loff_t bytes)
24291 {
24292+ spin_lock(&inode->i_lock);
24293 inode->i_blocks += bytes >> 9;
24294 bytes &= 511;
24295 inode->i_bytes += bytes;
24296@@ -411,12 +411,6 @@ void __inode_add_bytes(struct inode *inode, loff_t bytes)
24297 inode->i_blocks++;
24298 inode->i_bytes -= 512;
24299 }
24300-}
24301-
24302-void inode_add_bytes(struct inode *inode, loff_t bytes)
24303-{
24304- spin_lock(&inode->i_lock);
24305- __inode_add_bytes(inode, bytes);
24306 spin_unlock(&inode->i_lock);
24307 }
24308
24309diff --git a/fs/super.c b/fs/super.c
24310index aff046b..19eb70b 100644
24311--- a/fs/super.c
24312+++ b/fs/super.c
24313@@ -901,9 +901,8 @@ int get_sb_single(struct file_system_type *fs_type,
24314 return error;
24315 }
24316 s->s_flags |= MS_ACTIVE;
24317- } else {
24318- do_remount_sb(s, flags, data, 0);
24319 }
24320+ do_remount_sb(s, flags, data, 0);
24321 simple_set_mnt(mnt, s);
24322 return 0;
24323 }
24324diff --git a/fs/sysfs/inode.c b/fs/sysfs/inode.c
24325index 02a022a..e28cecf 100644
24326--- a/fs/sysfs/inode.c
24327+++ b/fs/sysfs/inode.c
24328@@ -94,29 +94,30 @@ int sysfs_setattr(struct dentry * dentry, struct iattr * iattr)
24329 if (!sd_attrs)
24330 return -ENOMEM;
24331 sd->s_iattr = sd_attrs;
24332- }
24333- /* attributes were changed at least once in past */
24334- iattrs = &sd_attrs->ia_iattr;
24335-
24336- if (ia_valid & ATTR_UID)
24337- iattrs->ia_uid = iattr->ia_uid;
24338- if (ia_valid & ATTR_GID)
24339- iattrs->ia_gid = iattr->ia_gid;
24340- if (ia_valid & ATTR_ATIME)
24341- iattrs->ia_atime = timespec_trunc(iattr->ia_atime,
24342- inode->i_sb->s_time_gran);
24343- if (ia_valid & ATTR_MTIME)
24344- iattrs->ia_mtime = timespec_trunc(iattr->ia_mtime,
24345- inode->i_sb->s_time_gran);
24346- if (ia_valid & ATTR_CTIME)
24347- iattrs->ia_ctime = timespec_trunc(iattr->ia_ctime,
24348- inode->i_sb->s_time_gran);
24349- if (ia_valid & ATTR_MODE) {
24350- umode_t mode = iattr->ia_mode;
24351-
24352- if (!in_group_p(inode->i_gid) && !capable(CAP_FSETID))
24353- mode &= ~S_ISGID;
24354- iattrs->ia_mode = sd->s_mode = mode;
24355+ } else {
24356+ /* attributes were changed at least once in past */
24357+ iattrs = &sd_attrs->ia_iattr;
24358+
24359+ if (ia_valid & ATTR_UID)
24360+ iattrs->ia_uid = iattr->ia_uid;
24361+ if (ia_valid & ATTR_GID)
24362+ iattrs->ia_gid = iattr->ia_gid;
24363+ if (ia_valid & ATTR_ATIME)
24364+ iattrs->ia_atime = timespec_trunc(iattr->ia_atime,
24365+ inode->i_sb->s_time_gran);
24366+ if (ia_valid & ATTR_MTIME)
24367+ iattrs->ia_mtime = timespec_trunc(iattr->ia_mtime,
24368+ inode->i_sb->s_time_gran);
24369+ if (ia_valid & ATTR_CTIME)
24370+ iattrs->ia_ctime = timespec_trunc(iattr->ia_ctime,
24371+ inode->i_sb->s_time_gran);
24372+ if (ia_valid & ATTR_MODE) {
24373+ umode_t mode = iattr->ia_mode;
24374+
24375+ if (!in_group_p(inode->i_gid) && !capable(CAP_FSETID))
24376+ mode &= ~S_ISGID;
24377+ iattrs->ia_mode = sd->s_mode = mode;
24378+ }
24379 }
24380 return error;
24381 }
24382diff --git a/fs/udf/super.c b/fs/udf/super.c
24383index 1e4543c..9d1b8c2 100644
24384--- a/fs/udf/super.c
24385+++ b/fs/udf/super.c
24386@@ -1078,39 +1078,21 @@ static int udf_fill_partdesc_info(struct super_block *sb,
24387 return 0;
24388 }
24389
24390-static void udf_find_vat_block(struct super_block *sb, int p_index,
24391- int type1_index, sector_t start_block)
24392-{
24393- struct udf_sb_info *sbi = UDF_SB(sb);
24394- struct udf_part_map *map = &sbi->s_partmaps[p_index];
24395- sector_t vat_block;
24396- struct kernel_lb_addr ino;
24397-
24398- /*
24399- * VAT file entry is in the last recorded block. Some broken disks have
24400- * it a few blocks before so try a bit harder...
24401- */
24402- ino.partitionReferenceNum = type1_index;
24403- for (vat_block = start_block;
24404- vat_block >= map->s_partition_root &&
24405- vat_block >= start_block - 3 &&
24406- !sbi->s_vat_inode; vat_block--) {
24407- ino.logicalBlockNum = vat_block - map->s_partition_root;
24408- sbi->s_vat_inode = udf_iget(sb, &ino);
24409- }
24410-}
24411-
24412 static int udf_load_vat(struct super_block *sb, int p_index, int type1_index)
24413 {
24414 struct udf_sb_info *sbi = UDF_SB(sb);
24415 struct udf_part_map *map = &sbi->s_partmaps[p_index];
24416+ struct kernel_lb_addr ino;
24417 struct buffer_head *bh = NULL;
24418 struct udf_inode_info *vati;
24419 uint32_t pos;
24420 struct virtualAllocationTable20 *vat20;
24421 sector_t blocks = sb->s_bdev->bd_inode->i_size >> sb->s_blocksize_bits;
24422
24423- udf_find_vat_block(sb, p_index, type1_index, sbi->s_last_block);
24424+ /* VAT file entry is in the last recorded block */
24425+ ino.partitionReferenceNum = type1_index;
24426+ ino.logicalBlockNum = sbi->s_last_block - map->s_partition_root;
24427+ sbi->s_vat_inode = udf_iget(sb, &ino);
24428 if (!sbi->s_vat_inode &&
24429 sbi->s_last_block != blocks - 1) {
24430 printk(KERN_NOTICE "UDF-fs: Failed to read VAT inode from the"
24431@@ -1118,7 +1100,9 @@ static int udf_load_vat(struct super_block *sb, int p_index, int type1_index)
24432 "block of the device (%lu).\n",
24433 (unsigned long)sbi->s_last_block,
24434 (unsigned long)blocks - 1);
24435- udf_find_vat_block(sb, p_index, type1_index, blocks - 1);
24436+ ino.partitionReferenceNum = type1_index;
24437+ ino.logicalBlockNum = blocks - 1 - map->s_partition_root;
24438+ sbi->s_vat_inode = udf_iget(sb, &ino);
24439 }
24440 if (!sbi->s_vat_inode)
24441 return 1;
24442diff --git a/include/acpi/platform/aclinux.h b/include/acpi/platform/aclinux.h
24443index 0946997..9d7febd 100644
24444--- a/include/acpi/platform/aclinux.h
24445+++ b/include/acpi/platform/aclinux.h
24446@@ -152,7 +152,7 @@ static inline void *acpi_os_acquire_object(acpi_cache_t * cache)
24447 #include <linux/hardirq.h>
24448 #define ACPI_PREEMPTION_POINT() \
24449 do { \
24450- if (!in_atomic_preempt_off() && !irqs_disabled()) \
24451+ if (!in_atomic_preempt_off()) \
24452 cond_resched(); \
24453 } while (0)
24454
24455diff --git a/include/drm/drmP.h b/include/drm/drmP.h
24456index 7ad3faa..c8e64bb 100644
24457--- a/include/drm/drmP.h
24458+++ b/include/drm/drmP.h
24459@@ -1295,7 +1295,6 @@ extern u32 drm_vblank_count(struct drm_device *dev, int crtc);
24460 extern void drm_handle_vblank(struct drm_device *dev, int crtc);
24461 extern int drm_vblank_get(struct drm_device *dev, int crtc);
24462 extern void drm_vblank_put(struct drm_device *dev, int crtc);
24463-extern void drm_vblank_off(struct drm_device *dev, int crtc);
24464 extern void drm_vblank_cleanup(struct drm_device *dev);
24465 /* Modesetting support */
24466 extern void drm_vblank_pre_modeset(struct drm_device *dev, int crtc);
24467@@ -1402,7 +1401,7 @@ extern int drm_ati_pcigart_cleanup(struct drm_device *dev,
24468 struct drm_ati_pcigart_info * gart_info);
24469
24470 extern drm_dma_handle_t *drm_pci_alloc(struct drm_device *dev, size_t size,
24471- size_t align);
24472+ size_t align, dma_addr_t maxaddr);
24473 extern void __drm_pci_free(struct drm_device *dev, drm_dma_handle_t * dmah);
24474 extern void drm_pci_free(struct drm_device *dev, drm_dma_handle_t * dmah);
24475
24476diff --git a/include/drm/drm_os_linux.h b/include/drm/drm_os_linux.h
24477index 3933691..26641e9 100644
24478--- a/include/drm/drm_os_linux.h
24479+++ b/include/drm/drm_os_linux.h
24480@@ -123,5 +123,5 @@ do { \
24481 remove_wait_queue(&(queue), &entry); \
24482 } while (0)
24483
24484-#define DRM_WAKEUP( queue ) wake_up( queue )
24485+#define DRM_WAKEUP( queue ) wake_up_interruptible( queue )
24486 #define DRM_INIT_WAITQUEUE( queue ) init_waitqueue_head( queue )
24487diff --git a/include/drm/ttm/ttm_memory.h b/include/drm/ttm/ttm_memory.h
24488index b199170..6983a7c 100644
24489--- a/include/drm/ttm/ttm_memory.h
24490+++ b/include/drm/ttm/ttm_memory.h
24491@@ -33,7 +33,6 @@
24492 #include <linux/wait.h>
24493 #include <linux/errno.h>
24494 #include <linux/kobject.h>
24495-#include <linux/mm.h>
24496
24497 /**
24498 * struct ttm_mem_shrink - callback to shrink TTM memory usage.
24499diff --git a/include/linux/acpi.h b/include/linux/acpi.h
24500index c010b94..dfcd920 100644
24501--- a/include/linux/acpi.h
24502+++ b/include/linux/acpi.h
24503@@ -253,13 +253,6 @@ void __init acpi_old_suspend_ordering(void);
24504 void __init acpi_s4_no_nvs(void);
24505 #endif /* CONFIG_PM_SLEEP */
24506
24507-struct acpi_osc_context {
24508- char *uuid_str; /* uuid string */
24509- int rev;
24510- struct acpi_buffer cap; /* arg2/arg3 */
24511- struct acpi_buffer ret; /* free by caller if success */
24512-};
24513-
24514 #define OSC_QUERY_TYPE 0
24515 #define OSC_SUPPORT_TYPE 1
24516 #define OSC_CONTROL_TYPE 2
24517@@ -272,15 +265,6 @@ struct acpi_osc_context {
24518 #define OSC_INVALID_REVISION_ERROR 8
24519 #define OSC_CAPABILITIES_MASK_ERROR 16
24520
24521-acpi_status acpi_run_osc(acpi_handle handle, struct acpi_osc_context *context);
24522-
24523-/* platform-wide _OSC bits */
24524-#define OSC_SB_PAD_SUPPORT 1
24525-#define OSC_SB_PPC_OST_SUPPORT 2
24526-#define OSC_SB_PR3_SUPPORT 4
24527-#define OSC_SB_CPUHP_OST_SUPPORT 8
24528-#define OSC_SB_APEI_SUPPORT 16
24529-
24530 /* _OSC DW1 Definition (OS Support Fields) */
24531 #define OSC_EXT_PCI_CONFIG_SUPPORT 1
24532 #define OSC_ACTIVE_STATE_PWR_SUPPORT 2
24533diff --git a/include/linux/binfmts.h b/include/linux/binfmts.h
24534index 340f441..aece486 100644
24535--- a/include/linux/binfmts.h
24536+++ b/include/linux/binfmts.h
24537@@ -101,7 +101,6 @@ extern int prepare_binprm(struct linux_binprm *);
24538 extern int __must_check remove_arg_zero(struct linux_binprm *);
24539 extern int search_binary_handler(struct linux_binprm *,struct pt_regs *);
24540 extern int flush_old_exec(struct linux_binprm * bprm);
24541-extern void setup_new_exec(struct linux_binprm * bprm);
24542
24543 extern int suid_dumpable;
24544 #define SUID_DUMP_DISABLE 0 /* No setuid dumping */
24545diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
24546index 912b8ff..221cecd 100644
24547--- a/include/linux/blkdev.h
24548+++ b/include/linux/blkdev.h
24549@@ -942,8 +942,6 @@ extern void blk_queue_io_opt(struct request_queue *q, unsigned int opt);
24550 extern void blk_set_default_limits(struct queue_limits *lim);
24551 extern int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
24552 sector_t offset);
24553-extern int bdev_stack_limits(struct queue_limits *t, struct block_device *bdev,
24554- sector_t offset);
24555 extern void disk_stack_limits(struct gendisk *disk, struct block_device *bdev,
24556 sector_t offset);
24557 extern void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b);
24558@@ -1116,18 +1114,11 @@ static inline int queue_alignment_offset(struct request_queue *q)
24559 return q->limits.alignment_offset;
24560 }
24561
24562-static inline int queue_limit_alignment_offset(struct queue_limits *lim, sector_t offset)
24563-{
24564- unsigned int granularity = max(lim->physical_block_size, lim->io_min);
24565-
24566- offset &= granularity - 1;
24567- return (granularity + lim->alignment_offset - offset) & (granularity - 1);
24568-}
24569-
24570 static inline int queue_sector_alignment_offset(struct request_queue *q,
24571 sector_t sector)
24572 {
24573- return queue_limit_alignment_offset(&q->limits, sector << 9);
24574+ return ((sector << 9) - q->limits.alignment_offset)
24575+ & (q->limits.io_min - 1);
24576 }
24577
24578 static inline int bdev_alignment_offset(struct block_device *bdev)
24579diff --git a/include/linux/clocksource.h b/include/linux/clocksource.h
24580index 64b1a4c..83d2fbd 100644
24581--- a/include/linux/clocksource.h
24582+++ b/include/linux/clocksource.h
24583@@ -151,7 +151,6 @@ extern u64 timecounter_cyc2time(struct timecounter *tc,
24584 * subtraction of non 64 bit counters
24585 * @mult: cycle to nanosecond multiplier
24586 * @shift: cycle to nanosecond divisor (power of two)
24587- * @max_idle_ns: max idle time permitted by the clocksource (nsecs)
24588 * @flags: flags describing special properties
24589 * @vread: vsyscall based read
24590 * @resume: resume function for the clocksource, if necessary
24591@@ -169,7 +168,6 @@ struct clocksource {
24592 cycle_t mask;
24593 u32 mult;
24594 u32 shift;
24595- u64 max_idle_ns;
24596 unsigned long flags;
24597 cycle_t (*vread)(void);
24598 void (*resume)(void);
24599diff --git a/include/linux/completion.h b/include/linux/completion.h 735diff --git a/include/linux/completion.h b/include/linux/completion.h
24600index 4a6b604..258bec1 100644 736index 4a6b604..258bec1 100644
24601--- a/include/linux/completion.h 737--- a/include/linux/completion.h
@@ -24608,98 +744,8 @@ index 4a6b604..258bec1 100644
24608 744
24609 /** 745 /**
24610 * INIT_COMPLETION: - reinitialize a completion structure 746 * INIT_COMPLETION: - reinitialize a completion structure
24611diff --git a/include/linux/connector.h b/include/linux/connector.h
24612index ecb61c4..3a14615 100644
24613--- a/include/linux/connector.h
24614+++ b/include/linux/connector.h
24615@@ -24,6 +24,9 @@
24616
24617 #include <linux/types.h>
24618
24619+#define CN_IDX_CONNECTOR 0xffffffff
24620+#define CN_VAL_CONNECTOR 0xffffffff
24621+
24622 /*
24623 * Process Events connector unique ids -- used for message routing
24624 */
24625@@ -70,6 +73,30 @@ struct cn_msg {
24626 __u8 data[0];
24627 };
24628
24629+/*
24630+ * Notify structure - requests notification about
24631+ * registering/unregistering idx/val in range [first, first+range].
24632+ */
24633+struct cn_notify_req {
24634+ __u32 first;
24635+ __u32 range;
24636+};
24637+
24638+/*
24639+ * Main notification control message
24640+ * *_notify_num - number of appropriate cn_notify_req structures after
24641+ * this struct.
24642+ * group - notification receiver's idx.
24643+ * len - total length of the attached data.
24644+ */
24645+struct cn_ctl_msg {
24646+ __u32 idx_notify_num;
24647+ __u32 val_notify_num;
24648+ __u32 group;
24649+ __u32 len;
24650+ __u8 data[0];
24651+};
24652+
24653 #ifdef __KERNEL__
24654
24655 #include <asm/atomic.h>
24656@@ -122,6 +149,11 @@ struct cn_callback_entry {
24657 u32 seq, group;
24658 };
24659
24660+struct cn_ctl_entry {
24661+ struct list_head notify_entry;
24662+ struct cn_ctl_msg *msg;
24663+};
24664+
24665 struct cn_dev {
24666 struct cb_id id;
24667
24668diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h
24669index d77b547..789cf5f 100644
24670--- a/include/linux/cpumask.h
24671+++ b/include/linux/cpumask.h
24672@@ -84,7 +84,6 @@ extern const struct cpumask *const cpu_active_mask;
24673 #define num_online_cpus() cpumask_weight(cpu_online_mask)
24674 #define num_possible_cpus() cpumask_weight(cpu_possible_mask)
24675 #define num_present_cpus() cpumask_weight(cpu_present_mask)
24676-#define num_active_cpus() cpumask_weight(cpu_active_mask)
24677 #define cpu_online(cpu) cpumask_test_cpu((cpu), cpu_online_mask)
24678 #define cpu_possible(cpu) cpumask_test_cpu((cpu), cpu_possible_mask)
24679 #define cpu_present(cpu) cpumask_test_cpu((cpu), cpu_present_mask)
24680@@ -93,7 +92,6 @@ extern const struct cpumask *const cpu_active_mask;
24681 #define num_online_cpus() 1
24682 #define num_possible_cpus() 1
24683 #define num_present_cpus() 1
24684-#define num_active_cpus() 1
24685 #define cpu_online(cpu) ((cpu) == 0)
24686 #define cpu_possible(cpu) ((cpu) == 0)
24687 #define cpu_present(cpu) ((cpu) == 0)
24688diff --git a/include/linux/enclosure.h b/include/linux/enclosure.h
24689index 9a33c5f..90d1c21 100644
24690--- a/include/linux/enclosure.h
24691+++ b/include/linux/enclosure.h
24692@@ -42,8 +42,6 @@ enum enclosure_status {
24693 ENCLOSURE_STATUS_NOT_INSTALLED,
24694 ENCLOSURE_STATUS_UNKNOWN,
24695 ENCLOSURE_STATUS_UNAVAILABLE,
24696- /* last element for counting purposes */
24697- ENCLOSURE_STATUS_MAX
24698 };
24699
24700 /* SFF-8485 activity light settings */
24701diff --git a/include/linux/fs.h b/include/linux/fs.h 747diff --git a/include/linux/fs.h b/include/linux/fs.h
24702index 98ea200..5c7e0ff 100644 748index 2620a8c..5c7e0ff 100644
24703--- a/include/linux/fs.h 749--- a/include/linux/fs.h
24704+++ b/include/linux/fs.h 750+++ b/include/linux/fs.h
24705@@ -15,8 +15,8 @@ 751@@ -15,8 +15,8 @@
@@ -24780,28 +826,8 @@ index 98ea200..5c7e0ff 100644
24780 /* fs/dcache.c -- generic fs support functions */ 826 /* fs/dcache.c -- generic fs support functions */
24781 extern int is_subdir(struct dentry *, struct dentry *); 827 extern int is_subdir(struct dentry *, struct dentry *);
24782 extern ino_t find_inode_number(struct dentry *, struct qstr *); 828 extern ino_t find_inode_number(struct dentry *, struct qstr *);
24783@@ -2314,7 +2317,6 @@ extern const struct inode_operations page_symlink_inode_operations;
24784 extern int generic_readlink(struct dentry *, char __user *, int);
24785 extern void generic_fillattr(struct inode *, struct kstat *);
24786 extern int vfs_getattr(struct vfsmount *, struct dentry *, struct kstat *);
24787-void __inode_add_bytes(struct inode *inode, loff_t bytes);
24788 void inode_add_bytes(struct inode *inode, loff_t bytes);
24789 void inode_sub_bytes(struct inode *inode, loff_t bytes);
24790 loff_t inode_get_bytes(struct inode *inode);
24791diff --git a/include/linux/hid.h b/include/linux/hid.h
24792index 8709365..10f6284 100644
24793--- a/include/linux/hid.h
24794+++ b/include/linux/hid.h
24795@@ -312,7 +312,6 @@ struct hid_item {
24796 #define HID_QUIRK_MULTI_INPUT 0x00000040
24797 #define HID_QUIRK_SKIP_OUTPUT_REPORTS 0x00010000
24798 #define HID_QUIRK_FULLSPEED_INTERVAL 0x10000000
24799-#define HID_QUIRK_NO_INIT_REPORTS 0x20000000
24800
24801 /*
24802 * This is the global environment of the parser. This information is
24803diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h 829diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h
24804index 9bace4b..b984b94 100644 830index ff037f0..b984b94 100644
24805--- a/include/linux/hrtimer.h 831--- a/include/linux/hrtimer.h
24806+++ b/include/linux/hrtimer.h 832+++ b/include/linux/hrtimer.h
24807@@ -166,6 +166,7 @@ struct hrtimer_clock_base { 833@@ -166,6 +166,7 @@ struct hrtimer_clock_base {
@@ -24850,235 +876,8 @@ index 9bace4b..b984b94 100644
24850 extern int hrtimer_cancel(struct hrtimer *timer); 876 extern int hrtimer_cancel(struct hrtimer *timer);
24851 extern int hrtimer_try_to_cancel(struct hrtimer *timer); 877 extern int hrtimer_try_to_cancel(struct hrtimer *timer);
24852 878
24853@@ -446,7 +471,7 @@ extern void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
24854
24855 static inline void timer_stats_account_hrtimer(struct hrtimer *timer)
24856 {
24857- if (likely(!timer_stats_active))
24858+ if (likely(!timer->start_site))
24859 return;
24860 timer_stats_update_stats(timer, timer->start_pid, timer->start_site,
24861 timer->function, timer->start_comm, 0);
24862@@ -457,6 +482,8 @@ extern void __timer_stats_hrtimer_set_start_info(struct hrtimer *timer,
24863
24864 static inline void timer_stats_hrtimer_set_start_info(struct hrtimer *timer)
24865 {
24866+ if (likely(!timer_stats_active))
24867+ return;
24868 __timer_stats_hrtimer_set_start_info(timer, __builtin_return_address(0));
24869 }
24870
24871diff --git a/include/linux/inetdevice.h b/include/linux/inetdevice.h
24872index 9cd0bcf..ad27c7d 100644
24873--- a/include/linux/inetdevice.h
24874+++ b/include/linux/inetdevice.h
24875@@ -83,7 +83,6 @@ static inline void ipv4_devconf_setall(struct in_device *in_dev)
24876 #define IN_DEV_FORWARD(in_dev) IN_DEV_CONF_GET((in_dev), FORWARDING)
24877 #define IN_DEV_MFORWARD(in_dev) IN_DEV_ANDCONF((in_dev), MC_FORWARDING)
24878 #define IN_DEV_RPFILTER(in_dev) IN_DEV_MAXCONF((in_dev), RP_FILTER)
24879-#define IN_DEV_SRC_VMARK(in_dev) IN_DEV_ORCONF((in_dev), SRC_VMARK)
24880 #define IN_DEV_SOURCE_ROUTE(in_dev) IN_DEV_ANDCONF((in_dev), \
24881 ACCEPT_SOURCE_ROUTE)
24882 #define IN_DEV_BOOTP_RELAY(in_dev) IN_DEV_ANDCONF((in_dev), BOOTP_RELAY)
24883diff --git a/include/linux/kvm.h b/include/linux/kvm.h
24884index 0eadd71..f8f8900 100644
24885--- a/include/linux/kvm.h
24886+++ b/include/linux/kvm.h
24887@@ -116,11 +116,6 @@ struct kvm_run {
24888 __u64 cr8;
24889 __u64 apic_base;
24890
24891-#ifdef __KVM_S390
24892- /* the processor status word for s390 */
24893- __u64 psw_mask; /* psw upper half */
24894- __u64 psw_addr; /* psw lower half */
24895-#endif
24896 union {
24897 /* KVM_EXIT_UNKNOWN */
24898 struct {
24899@@ -172,6 +167,8 @@ struct kvm_run {
24900 /* KVM_EXIT_S390_SIEIC */
24901 struct {
24902 __u8 icptcode;
24903+ __u64 mask; /* psw upper half */
24904+ __u64 addr; /* psw lower half */
24905 __u16 ipa;
24906 __u32 ipb;
24907 } s390_sieic;
24908@@ -439,7 +436,6 @@ struct kvm_ioeventfd {
24909 #endif
24910 #define KVM_CAP_IOEVENTFD 36
24911 #define KVM_CAP_SET_IDENTITY_MAP_ADDR 37
24912-#define KVM_CAP_ADJUST_CLOCK 39
24913
24914 #ifdef KVM_CAP_IRQ_ROUTING
24915
24916@@ -478,7 +474,6 @@ struct kvm_irq_routing {
24917 };
24918
24919 #endif
24920-#define KVM_CAP_S390_PSW 42
24921
24922 #ifdef KVM_CAP_MCE
24923 /* x86 MCE */
24924@@ -502,12 +497,6 @@ struct kvm_irqfd {
24925 __u8 pad[20];
24926 };
24927
24928-struct kvm_clock_data {
24929- __u64 clock;
24930- __u32 flags;
24931- __u32 pad[9];
24932-};
24933-
24934 /*
24935 * ioctls for VM fds
24936 */
24937@@ -557,8 +546,6 @@ struct kvm_clock_data {
24938 #define KVM_CREATE_PIT2 _IOW(KVMIO, 0x77, struct kvm_pit_config)
24939 #define KVM_SET_BOOT_CPU_ID _IO(KVMIO, 0x78)
24940 #define KVM_IOEVENTFD _IOW(KVMIO, 0x79, struct kvm_ioeventfd)
24941-#define KVM_SET_CLOCK _IOW(KVMIO, 0x7b, struct kvm_clock_data)
24942-#define KVM_GET_CLOCK _IOR(KVMIO, 0x7c, struct kvm_clock_data)
24943
24944 /*
24945 * ioctls for vcpu fds
24946diff --git a/include/linux/libata.h b/include/linux/libata.h
24947index b0f6d97..8769864 100644
24948--- a/include/linux/libata.h
24949+++ b/include/linux/libata.h
24950@@ -354,9 +354,6 @@ enum {
24951 /* max tries if error condition is still set after ->error_handler */
24952 ATA_EH_MAX_TRIES = 5,
24953
24954- /* sometimes resuming a link requires several retries */
24955- ATA_LINK_RESUME_TRIES = 5,
24956-
24957 /* how hard are we gonna try to probe/recover devices */
24958 ATA_PROBE_MAX_TRIES = 3,
24959 ATA_EH_DEV_TRIES = 3,
24960diff --git a/include/linux/mfd/wm8350/pmic.h b/include/linux/mfd/wm8350/pmic.h
24961index e786fe9..be3264e 100644
24962--- a/include/linux/mfd/wm8350/pmic.h
24963+++ b/include/linux/mfd/wm8350/pmic.h
24964@@ -666,20 +666,20 @@
24965 #define WM8350_ISINK_FLASH_DUR_64MS (1 << 8)
24966 #define WM8350_ISINK_FLASH_DUR_96MS (2 << 8)
24967 #define WM8350_ISINK_FLASH_DUR_1024MS (3 << 8)
24968-#define WM8350_ISINK_FLASH_ON_INSTANT (0 << 0)
24969-#define WM8350_ISINK_FLASH_ON_0_25S (1 << 0)
24970-#define WM8350_ISINK_FLASH_ON_0_50S (2 << 0)
24971-#define WM8350_ISINK_FLASH_ON_1_00S (3 << 0)
24972-#define WM8350_ISINK_FLASH_ON_1_95S (1 << 0)
24973-#define WM8350_ISINK_FLASH_ON_3_91S (2 << 0)
24974-#define WM8350_ISINK_FLASH_ON_7_80S (3 << 0)
24975-#define WM8350_ISINK_FLASH_OFF_INSTANT (0 << 4)
24976-#define WM8350_ISINK_FLASH_OFF_0_25S (1 << 4)
24977-#define WM8350_ISINK_FLASH_OFF_0_50S (2 << 4)
24978-#define WM8350_ISINK_FLASH_OFF_1_00S (3 << 4)
24979-#define WM8350_ISINK_FLASH_OFF_1_95S (1 << 4)
24980-#define WM8350_ISINK_FLASH_OFF_3_91S (2 << 4)
24981-#define WM8350_ISINK_FLASH_OFF_7_80S (3 << 4)
24982+#define WM8350_ISINK_FLASH_ON_INSTANT (0 << 4)
24983+#define WM8350_ISINK_FLASH_ON_0_25S (1 << 4)
24984+#define WM8350_ISINK_FLASH_ON_0_50S (2 << 4)
24985+#define WM8350_ISINK_FLASH_ON_1_00S (3 << 4)
24986+#define WM8350_ISINK_FLASH_ON_1_95S (1 << 4)
24987+#define WM8350_ISINK_FLASH_ON_3_91S (2 << 4)
24988+#define WM8350_ISINK_FLASH_ON_7_80S (3 << 4)
24989+#define WM8350_ISINK_FLASH_OFF_INSTANT (0 << 0)
24990+#define WM8350_ISINK_FLASH_OFF_0_25S (1 << 0)
24991+#define WM8350_ISINK_FLASH_OFF_0_50S (2 << 0)
24992+#define WM8350_ISINK_FLASH_OFF_1_00S (3 << 0)
24993+#define WM8350_ISINK_FLASH_OFF_1_95S (1 << 0)
24994+#define WM8350_ISINK_FLASH_OFF_3_91S (2 << 0)
24995+#define WM8350_ISINK_FLASH_OFF_7_80S (3 << 0)
24996
24997 /*
24998 * Regulator Interrupts.
24999diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
25000index 3c62ed4..ed5d750 100644
25001--- a/include/linux/pagemap.h
25002+++ b/include/linux/pagemap.h
25003@@ -253,8 +253,6 @@ extern struct page * read_cache_page_async(struct address_space *mapping,
25004 extern struct page * read_cache_page(struct address_space *mapping,
25005 pgoff_t index, filler_t *filler,
25006 void *data);
25007-extern struct page * read_cache_page_gfp(struct address_space *mapping,
25008- pgoff_t index, gfp_t gfp_mask);
25009 extern int read_cache_pages(struct address_space *mapping,
25010 struct list_head *pages, filler_t *filler, void *data);
25011
25012diff --git a/include/linux/pci.h b/include/linux/pci.h
25013index 2547515..f5c7cd3 100644
25014--- a/include/linux/pci.h
25015+++ b/include/linux/pci.h
25016@@ -564,9 +564,6 @@ void pcibios_align_resource(void *, struct resource *, resource_size_t,
25017 resource_size_t);
25018 void pcibios_update_irq(struct pci_dev *, int irq);
25019
25020-/* Weak but can be overriden by arch */
25021-void pci_fixup_cardbus(struct pci_bus *);
25022-
25023 /* Generic PCI functions used internally */
25024
25025 extern struct pci_bus *pci_find_bus(int domain, int busnr);
25026diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
25027index 1b7f2a7..84cf1f3 100644
25028--- a/include/linux/pci_ids.h
25029+++ b/include/linux/pci_ids.h
25030@@ -2290,20 +2290,6 @@
25031 #define PCI_DEVICE_ID_MPC8536 0x0051
25032 #define PCI_DEVICE_ID_P2020E 0x0070
25033 #define PCI_DEVICE_ID_P2020 0x0071
25034-#define PCI_DEVICE_ID_P2010E 0x0078
25035-#define PCI_DEVICE_ID_P2010 0x0079
25036-#define PCI_DEVICE_ID_P1020E 0x0100
25037-#define PCI_DEVICE_ID_P1020 0x0101
25038-#define PCI_DEVICE_ID_P1011E 0x0108
25039-#define PCI_DEVICE_ID_P1011 0x0109
25040-#define PCI_DEVICE_ID_P1022E 0x0110
25041-#define PCI_DEVICE_ID_P1022 0x0111
25042-#define PCI_DEVICE_ID_P1013E 0x0118
25043-#define PCI_DEVICE_ID_P1013 0x0119
25044-#define PCI_DEVICE_ID_P4080E 0x0400
25045-#define PCI_DEVICE_ID_P4080 0x0401
25046-#define PCI_DEVICE_ID_P4040E 0x0408
25047-#define PCI_DEVICE_ID_P4040 0x0409
25048 #define PCI_DEVICE_ID_MPC8641 0x7010
25049 #define PCI_DEVICE_ID_MPC8641D 0x7011
25050 #define PCI_DEVICE_ID_MPC8610 0x7018
25051diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
25052index 81c9689..9e70126 100644
25053--- a/include/linux/perf_event.h
25054+++ b/include/linux/perf_event.h
25055@@ -219,7 +219,7 @@ struct perf_event_attr {
25056 #define PERF_EVENT_IOC_DISABLE _IO ('$', 1)
25057 #define PERF_EVENT_IOC_REFRESH _IO ('$', 2)
25058 #define PERF_EVENT_IOC_RESET _IO ('$', 3)
25059-#define PERF_EVENT_IOC_PERIOD _IOW('$', 4, __u64)
25060+#define PERF_EVENT_IOC_PERIOD _IOW('$', 4, u64)
25061 #define PERF_EVENT_IOC_SET_OUTPUT _IO ('$', 5)
25062
25063 enum perf_event_ioc_flags {
25064diff --git a/include/linux/quota.h b/include/linux/quota.h
25065index 8fd8efc..78c4889 100644
25066--- a/include/linux/quota.h
25067+++ b/include/linux/quota.h
25068@@ -313,9 +313,8 @@ struct dquot_operations {
25069 int (*claim_space) (struct inode *, qsize_t);
25070 /* release rsved quota for delayed alloc */
25071 void (*release_rsv) (struct inode *, qsize_t);
25072- /* get reserved quota for delayed alloc, value returned is managed by
25073- * quota code only */
25074- qsize_t *(*get_reserved_space) (struct inode *);
25075+ /* get reserved quota for delayed alloc */
25076+ qsize_t (*get_reserved_space) (struct inode *);
25077 };
25078
25079 /* Operations handling requests from userspace */
25080diff --git a/include/linux/sched.h b/include/linux/sched.h 879diff --git a/include/linux/sched.h b/include/linux/sched.h
25081index e48311e..7248141 100644 880index 75e6e60..7248141 100644
25082--- a/include/linux/sched.h 881--- a/include/linux/sched.h
25083+++ b/include/linux/sched.h 882+++ b/include/linux/sched.h
25084@@ -38,6 +38,7 @@ 883@@ -38,6 +38,7 @@
@@ -25118,15 +917,6 @@ index e48311e..7248141 100644
25118 * p->real_parent->pid) 917 * p->real_parent->pid)
25119 */ 918 */
25120 struct task_struct *real_parent; /* real parent process */ 919 struct task_struct *real_parent; /* real parent process */
25121@@ -1354,7 +1358,7 @@ struct task_struct {
25122 char comm[TASK_COMM_LEN]; /* executable name excluding path
25123 - access with [gs]et_task_comm (which lock
25124 it with task_lock())
25125- - initialized normally by setup_new_exec */
25126+ - initialized normally by flush_old_exec */
25127 /* file system info */
25128 int link_count, total_link_count;
25129 #ifdef CONFIG_SYSVIPC
25130@@ -1505,6 +1509,13 @@ struct task_struct { 920@@ -1505,6 +1509,13 @@ struct task_struct {
25131 int make_it_fail; 921 int make_it_fail;
25132 #endif 922 #endif
@@ -25150,91 +940,6 @@ index e48311e..7248141 100644
25150 940
25151 extern void block_all_signals(int (*notifier)(void *priv), void *priv, 941 extern void block_all_signals(int (*notifier)(void *priv), void *priv,
25152 sigset_t *mask); 942 sigset_t *mask);
25153@@ -2086,18 +2097,11 @@ static inline int is_si_special(const struct siginfo *info)
25154 return info <= SEND_SIG_FORCED;
25155 }
25156
25157-/*
25158- * True if we are on the alternate signal stack.
25159- */
25160+/* True if we are on the alternate signal stack. */
25161+
25162 static inline int on_sig_stack(unsigned long sp)
25163 {
25164-#ifdef CONFIG_STACK_GROWSUP
25165- return sp >= current->sas_ss_sp &&
25166- sp - current->sas_ss_sp < current->sas_ss_size;
25167-#else
25168- return sp > current->sas_ss_sp &&
25169- sp - current->sas_ss_sp <= current->sas_ss_size;
25170-#endif
25171+ return (sp - current->sas_ss_sp < current->sas_ss_size);
25172 }
25173
25174 static inline int sas_ss_flags(unsigned long sp)
25175@@ -2583,28 +2587,6 @@ static inline void mm_init_owner(struct mm_struct *mm, struct task_struct *p)
25176
25177 #define TASK_STATE_TO_CHAR_STR "RSDTtZX"
25178
25179-static inline unsigned long task_rlimit(const struct task_struct *tsk,
25180- unsigned int limit)
25181-{
25182- return ACCESS_ONCE(tsk->signal->rlim[limit].rlim_cur);
25183-}
25184-
25185-static inline unsigned long task_rlimit_max(const struct task_struct *tsk,
25186- unsigned int limit)
25187-{
25188- return ACCESS_ONCE(tsk->signal->rlim[limit].rlim_max);
25189-}
25190-
25191-static inline unsigned long rlimit(unsigned int limit)
25192-{
25193- return task_rlimit(current, limit);
25194-}
25195-
25196-static inline unsigned long rlimit_max(unsigned int limit)
25197-{
25198- return task_rlimit_max(current, limit);
25199-}
25200-
25201 #endif /* __KERNEL__ */
25202
25203 #endif
25204diff --git a/include/linux/security.h b/include/linux/security.h
25205index d40d23f..239e40d 100644
25206--- a/include/linux/security.h
25207+++ b/include/linux/security.h
25208@@ -95,13 +95,8 @@ struct seq_file;
25209 extern int cap_netlink_send(struct sock *sk, struct sk_buff *skb);
25210 extern int cap_netlink_recv(struct sk_buff *skb, int cap);
25211
25212-#ifdef CONFIG_MMU
25213 extern unsigned long mmap_min_addr;
25214 extern unsigned long dac_mmap_min_addr;
25215-#else
25216-#define dac_mmap_min_addr 0UL
25217-#endif
25218-
25219 /*
25220 * Values used in the task_security_ops calls
25221 */
25222@@ -126,7 +121,6 @@ struct request_sock;
25223 #define LSM_UNSAFE_PTRACE 2
25224 #define LSM_UNSAFE_PTRACE_CAP 4
25225
25226-#ifdef CONFIG_MMU
25227 /*
25228 * If a hint addr is less than mmap_min_addr change hint to be as
25229 * low as possible but still greater than mmap_min_addr
25230@@ -141,7 +135,6 @@ static inline unsigned long round_hint_to_min(unsigned long hint)
25231 }
25232 extern int mmap_min_addr_handler(struct ctl_table *table, int write,
25233 void __user *buffer, size_t *lenp, loff_t *ppos);
25234-#endif
25235
25236 #ifdef CONFIG_SECURITY
25237
25238diff --git a/include/linux/smp.h b/include/linux/smp.h 943diff --git a/include/linux/smp.h b/include/linux/smp.h
25239index 39c64ba..76bb3e4 100644 944index 39c64ba..76bb3e4 100644
25240--- a/include/linux/smp.h 945--- a/include/linux/smp.h
@@ -25251,31 +956,6 @@ index 39c64ba..76bb3e4 100644
25251 * Generic and arch helpers 956 * Generic and arch helpers
25252 */ 957 */
25253 #ifdef CONFIG_USE_GENERIC_SMP_HELPERS 958 #ifdef CONFIG_USE_GENERIC_SMP_HELPERS
25254diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h
25255index 93515c6..a990ace 100644
25256--- a/include/linux/syscalls.h
25257+++ b/include/linux/syscalls.h
25258@@ -879,8 +879,4 @@ int kernel_execve(const char *filename, char *const argv[], char *const envp[]);
25259 asmlinkage long sys_perf_event_open(
25260 struct perf_event_attr __user *attr_uptr,
25261 pid_t pid, int cpu, int group_fd, unsigned long flags);
25262-
25263-asmlinkage long sys_mmap_pgoff(unsigned long addr, unsigned long len,
25264- unsigned long prot, unsigned long flags,
25265- unsigned long fd, unsigned long pgoff);
25266 #endif
25267diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h
25268index 0eb6942..1e4743e 100644
25269--- a/include/linux/sysctl.h
25270+++ b/include/linux/sysctl.h
25271@@ -490,7 +490,6 @@ enum
25272 NET_IPV4_CONF_PROMOTE_SECONDARIES=20,
25273 NET_IPV4_CONF_ARP_ACCEPT=21,
25274 NET_IPV4_CONF_ARP_NOTIFY=22,
25275- NET_IPV4_CONF_SRC_VMARK=24,
25276 __NET_IPV4_CONF_MAX
25277 };
25278
25279diff --git a/include/linux/tick.h b/include/linux/tick.h 959diff --git a/include/linux/tick.h b/include/linux/tick.h
25280index 0482229..4f9ba05 100644 960index 0482229..4f9ba05 100644
25281--- a/include/linux/tick.h 961--- a/include/linux/tick.h
@@ -25292,49 +972,6 @@ index 0482229..4f9ba05 100644
25292 extern int tick_init_highres(void); 972 extern int tick_init_highres(void);
25293 extern int tick_program_event(ktime_t expires, int force); 973 extern int tick_program_event(ktime_t expires, int force);
25294 extern void tick_setup_sched_timer(void); 974 extern void tick_setup_sched_timer(void);
25295diff --git a/include/linux/time.h b/include/linux/time.h
25296index 6e026e4..fe04e5e 100644
25297--- a/include/linux/time.h
25298+++ b/include/linux/time.h
25299@@ -148,7 +148,6 @@ extern void monotonic_to_bootbased(struct timespec *ts);
25300
25301 extern struct timespec timespec_trunc(struct timespec t, unsigned gran);
25302 extern int timekeeping_valid_for_hres(void);
25303-extern u64 timekeeping_max_deferment(void);
25304 extern void update_wall_time(void);
25305 extern void update_xtime_cache(u64 nsec);
25306 extern void timekeeping_leap_insert(int leapsecond);
25307diff --git a/include/linux/usb_usual.h b/include/linux/usb_usual.h
25308index a4b947e..3d15fb9 100644
25309--- a/include/linux/usb_usual.h
25310+++ b/include/linux/usb_usual.h
25311@@ -56,9 +56,7 @@
25312 US_FLAG(SANE_SENSE, 0x00008000) \
25313 /* Sane Sense (> 18 bytes) */ \
25314 US_FLAG(CAPACITY_OK, 0x00010000) \
25315- /* READ CAPACITY response is correct */ \
25316- US_FLAG(BAD_SENSE, 0x00020000) \
25317- /* Bad Sense (never more than 18 bytes) */
25318+ /* READ CAPACITY response is correct */
25319
25320 #define US_FLAG(name, value) US_FL_##name = value ,
25321 enum { US_DO_ALL_FLAGS };
25322diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h
25323index 3c123c3..227c2a5 100644
25324--- a/include/linux/vmalloc.h
25325+++ b/include/linux/vmalloc.h
25326@@ -115,11 +115,9 @@ extern rwlock_t vmlist_lock;
25327 extern struct vm_struct *vmlist;
25328 extern __init void vm_area_register_early(struct vm_struct *vm, size_t align);
25329
25330-#ifndef CONFIG_HAVE_LEGACY_PER_CPU_AREA
25331 struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets,
25332 const size_t *sizes, int nr_vms,
25333 size_t align, gfp_t gfp_mask);
25334-#endif
25335
25336 void pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms);
25337
25338diff --git a/include/litmus/bheap.h b/include/litmus/bheap.h 975diff --git a/include/litmus/bheap.h b/include/litmus/bheap.h
25339new file mode 100644 976new file mode 100644
25340index 0000000..cf4864a 977index 0000000..cf4864a
@@ -26374,10 +2011,10 @@ index 0000000..5b94d1a
26374+#endif 2011+#endif
26375diff --git a/include/litmus/sched_plugin.h b/include/litmus/sched_plugin.h 2012diff --git a/include/litmus/sched_plugin.h b/include/litmus/sched_plugin.h
26376new file mode 100644 2013new file mode 100644
26377index 0000000..2d856d5 2014index 0000000..9c1c9f2
26378--- /dev/null 2015--- /dev/null
26379+++ b/include/litmus/sched_plugin.h 2016+++ b/include/litmus/sched_plugin.h
26380@@ -0,0 +1,159 @@ 2017@@ -0,0 +1,162 @@
26381+/* 2018+/*
26382+ * Definition of the scheduler plugin interface. 2019+ * Definition of the scheduler plugin interface.
26383+ * 2020+ *
@@ -26513,6 +2150,9 @@ index 0000000..2d856d5
26513+ 2150+
26514+extern struct sched_plugin *litmus; 2151+extern struct sched_plugin *litmus;
26515+ 2152+
2153+/* cluster size: cache_index = 2 L2, cache_index = 3 L3 */
2154+extern int cluster_cache_index;
2155+
26516+int register_sched_plugin(struct sched_plugin* plugin); 2156+int register_sched_plugin(struct sched_plugin* plugin);
26517+struct sched_plugin* find_sched_plugin(const char* name); 2157+struct sched_plugin* find_sched_plugin(const char* name);
26518+int print_sched_plugins(char* buf, int max); 2158+int print_sched_plugins(char* buf, int max);
@@ -26926,441 +2566,6 @@ index 0000000..f0618e7
26926+__SYSCALL(__NR_null_call, sys_null_call) 2566+__SYSCALL(__NR_null_call, sys_null_call)
26927+ 2567+
26928+#define NR_litmus_syscalls 14 2568+#define NR_litmus_syscalls 14
26929diff --git a/include/net/ip.h b/include/net/ip.h
26930index 69db943..2f47e54 100644
26931--- a/include/net/ip.h
26932+++ b/include/net/ip.h
26933@@ -342,7 +342,6 @@ enum ip_defrag_users
26934 IP_DEFRAG_CALL_RA_CHAIN,
26935 IP_DEFRAG_CONNTRACK_IN,
26936 IP_DEFRAG_CONNTRACK_OUT,
26937- IP_DEFRAG_CONNTRACK_BRIDGE_IN,
26938 IP_DEFRAG_VS_IN,
26939 IP_DEFRAG_VS_OUT,
26940 IP_DEFRAG_VS_FWD
26941diff --git a/include/net/ipv6.h b/include/net/ipv6.h
26942index 639bbf0..8c31d8a 100644
26943--- a/include/net/ipv6.h
26944+++ b/include/net/ipv6.h
26945@@ -354,16 +354,8 @@ static inline int ipv6_prefix_equal(const struct in6_addr *a1,
26946
26947 struct inet_frag_queue;
26948
26949-enum ip6_defrag_users {
26950- IP6_DEFRAG_LOCAL_DELIVER,
26951- IP6_DEFRAG_CONNTRACK_IN,
26952- IP6_DEFRAG_CONNTRACK_OUT,
26953- IP6_DEFRAG_CONNTRACK_BRIDGE_IN,
26954-};
26955-
26956 struct ip6_create_arg {
26957 __be32 id;
26958- u32 user;
26959 struct in6_addr *src;
26960 struct in6_addr *dst;
26961 };
26962diff --git a/include/net/netfilter/ipv6/nf_conntrack_ipv6.h b/include/net/netfilter/ipv6/nf_conntrack_ipv6.h
26963index 1ee717e..abc55ad 100644
26964--- a/include/net/netfilter/ipv6/nf_conntrack_ipv6.h
26965+++ b/include/net/netfilter/ipv6/nf_conntrack_ipv6.h
26966@@ -9,7 +9,7 @@ extern struct nf_conntrack_l4proto nf_conntrack_l4proto_icmpv6;
26967
26968 extern int nf_ct_frag6_init(void);
26969 extern void nf_ct_frag6_cleanup(void);
26970-extern struct sk_buff *nf_ct_frag6_gather(struct sk_buff *skb, u32 user);
26971+extern struct sk_buff *nf_ct_frag6_gather(struct sk_buff *skb);
26972 extern void nf_ct_frag6_output(unsigned int hooknum, struct sk_buff *skb,
26973 struct net_device *in,
26974 struct net_device *out,
26975diff --git a/include/net/netns/conntrack.h b/include/net/netns/conntrack.h
26976index 63d4498..ba1ba0c 100644
26977--- a/include/net/netns/conntrack.h
26978+++ b/include/net/netns/conntrack.h
26979@@ -11,8 +11,6 @@ struct nf_conntrack_ecache;
26980 struct netns_ct {
26981 atomic_t count;
26982 unsigned int expect_count;
26983- unsigned int htable_size;
26984- struct kmem_cache *nf_conntrack_cachep;
26985 struct hlist_nulls_head *hash;
26986 struct hlist_head *expect_hash;
26987 struct hlist_nulls_head unconfirmed;
26988@@ -30,6 +28,5 @@ struct netns_ct {
26989 #endif
26990 int hash_vmalloc;
26991 int expect_vmalloc;
26992- char *slabname;
26993 };
26994 #endif
26995diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h
26996index 9a4b8b7..2eb3814 100644
26997--- a/include/net/netns/ipv4.h
26998+++ b/include/net/netns/ipv4.h
26999@@ -40,7 +40,6 @@ struct netns_ipv4 {
27000 struct xt_table *iptable_security;
27001 struct xt_table *nat_table;
27002 struct hlist_head *nat_bysource;
27003- unsigned int nat_htable_size;
27004 int nat_vmalloced;
27005 #endif
27006
27007diff --git a/include/net/netrom.h b/include/net/netrom.h
27008index ab170a6..15696b1 100644
27009--- a/include/net/netrom.h
27010+++ b/include/net/netrom.h
27011@@ -132,8 +132,6 @@ static __inline__ void nr_node_put(struct nr_node *nr_node)
27012 static __inline__ void nr_neigh_put(struct nr_neigh *nr_neigh)
27013 {
27014 if (atomic_dec_and_test(&nr_neigh->refcount)) {
27015- if (nr_neigh->ax25)
27016- ax25_cb_put(nr_neigh->ax25);
27017 kfree(nr_neigh->digipeat);
27018 kfree(nr_neigh);
27019 }
27020diff --git a/include/net/tcp.h b/include/net/tcp.h
27021index 842ac4d..03a49c7 100644
27022--- a/include/net/tcp.h
27023+++ b/include/net/tcp.h
27024@@ -1263,20 +1263,14 @@ static inline struct sk_buff *tcp_write_queue_prev(struct sock *sk, struct sk_bu
27025 * TCP connection after "boundary" unsucessful, exponentially backed-off
27026 * retransmissions with an initial RTO of TCP_RTO_MIN.
27027 */
27028-static inline bool retransmits_timed_out(struct sock *sk,
27029+static inline bool retransmits_timed_out(const struct sock *sk,
27030 unsigned int boundary)
27031 {
27032 unsigned int timeout, linear_backoff_thresh;
27033- unsigned int start_ts;
27034
27035 if (!inet_csk(sk)->icsk_retransmits)
27036 return false;
27037
27038- if (unlikely(!tcp_sk(sk)->retrans_stamp))
27039- start_ts = TCP_SKB_CB(tcp_write_queue_head(sk))->when;
27040- else
27041- start_ts = tcp_sk(sk)->retrans_stamp;
27042-
27043 linear_backoff_thresh = ilog2(TCP_RTO_MAX/TCP_RTO_MIN);
27044
27045 if (boundary <= linear_backoff_thresh)
27046@@ -1285,7 +1279,7 @@ static inline bool retransmits_timed_out(struct sock *sk,
27047 timeout = ((2 << linear_backoff_thresh) - 1) * TCP_RTO_MIN +
27048 (boundary - linear_backoff_thresh) * TCP_RTO_MAX;
27049
27050- return (tcp_time_stamp - start_ts) >= timeout;
27051+ return (tcp_time_stamp - tcp_sk(sk)->retrans_stamp) >= timeout;
27052 }
27053
27054 static inline struct sk_buff *tcp_send_head(struct sock *sk)
27055diff --git a/include/scsi/fc_frame.h b/include/scsi/fc_frame.h
27056index 148126d..c35d238 100644
27057--- a/include/scsi/fc_frame.h
27058+++ b/include/scsi/fc_frame.h
27059@@ -37,9 +37,6 @@
27060 #define FC_FRAME_HEADROOM 32 /* headroom for VLAN + FCoE headers */
27061 #define FC_FRAME_TAILROOM 8 /* trailer space for FCoE */
27062
27063-/* Max number of skb frags allowed, reserving one for fcoe_crc_eof page */
27064-#define FC_FRAME_SG_LEN (MAX_SKB_FRAGS - 1)
27065-
27066 #define fp_skb(fp) (&((fp)->skb))
27067 #define fr_hdr(fp) ((fp)->skb.data)
27068 #define fr_len(fp) ((fp)->skb.len)
27069diff --git a/include/scsi/libfc.h b/include/scsi/libfc.h
27070index 09a124b..65dc9aa 100644
27071--- a/include/scsi/libfc.h
27072+++ b/include/scsi/libfc.h
27073@@ -145,7 +145,6 @@ enum fc_rport_state {
27074 RPORT_ST_LOGO, /* port logout sent */
27075 RPORT_ST_ADISC, /* Discover Address sent */
27076 RPORT_ST_DELETE, /* port being deleted */
27077- RPORT_ST_RESTART, /* remote port being deleted and will restart */
27078 };
27079
27080 /**
27081diff --git a/include/scsi/osd_protocol.h b/include/scsi/osd_protocol.h
27082index 6856612..2cc8e8b 100644
27083--- a/include/scsi/osd_protocol.h
27084+++ b/include/scsi/osd_protocol.h
27085@@ -17,7 +17,6 @@
27086 #define __OSD_PROTOCOL_H__
27087
27088 #include <linux/types.h>
27089-#include <linux/kernel.h>
27090 #include <asm/unaligned.h>
27091 #include <scsi/scsi.h>
27092
27093diff --git a/include/scsi/scsi_host.h b/include/scsi/scsi_host.h
27094index 0b4baba..47941fc 100644
27095--- a/include/scsi/scsi_host.h
27096+++ b/include/scsi/scsi_host.h
27097@@ -677,12 +677,6 @@ struct Scsi_Host {
27098 void *shost_data;
27099
27100 /*
27101- * Points to the physical bus device we'd use to do DMA
27102- * Needed just in case we have virtual hosts.
27103- */
27104- struct device *dma_dev;
27105-
27106- /*
27107 * We should ensure that this is aligned, both for better performance
27108 * and also because some compilers (m68k) don't automatically force
27109 * alignment to a long boundary.
27110@@ -726,9 +720,7 @@ extern int scsi_queue_work(struct Scsi_Host *, struct work_struct *);
27111 extern void scsi_flush_work(struct Scsi_Host *);
27112
27113 extern struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *, int);
27114-extern int __must_check scsi_add_host_with_dma(struct Scsi_Host *,
27115- struct device *,
27116- struct device *);
27117+extern int __must_check scsi_add_host(struct Scsi_Host *, struct device *);
27118 extern void scsi_scan_host(struct Scsi_Host *);
27119 extern void scsi_rescan_device(struct device *);
27120 extern void scsi_remove_host(struct Scsi_Host *);
27121@@ -739,12 +731,6 @@ extern const char *scsi_host_state_name(enum scsi_host_state);
27122
27123 extern u64 scsi_calculate_bounce_limit(struct Scsi_Host *);
27124
27125-static inline int __must_check scsi_add_host(struct Scsi_Host *host,
27126- struct device *dev)
27127-{
27128- return scsi_add_host_with_dma(host, dev, dev);
27129-}
27130-
27131 static inline struct device *scsi_get_device(struct Scsi_Host *shost)
27132 {
27133 return shost->shost_gendev.parent;
27134diff --git a/include/trace/ftrace.h b/include/trace/ftrace.h
27135index dacb8ef..cc0d966 100644
27136--- a/include/trace/ftrace.h
27137+++ b/include/trace/ftrace.h
27138@@ -159,7 +159,7 @@
27139 #undef __get_str
27140
27141 #undef TP_printk
27142-#define TP_printk(fmt, args...) "\"%s\", %s\n", fmt, __stringify(args)
27143+#define TP_printk(fmt, args...) "%s, %s\n", #fmt, __stringify(args)
27144
27145 #undef TP_fast_assign
27146 #define TP_fast_assign(args...) args
27147diff --git a/kernel/acct.c b/kernel/acct.c
27148index a6605ca..9a4715a 100644
27149--- a/kernel/acct.c
27150+++ b/kernel/acct.c
27151@@ -536,8 +536,7 @@ static void do_acct_process(struct bsd_acct_struct *acct,
27152 do_div(elapsed, AHZ);
27153 ac.ac_btime = get_seconds() - elapsed;
27154 /* we really need to bite the bullet and change layout */
27155- ac.ac_uid = orig_cred->uid;
27156- ac.ac_gid = orig_cred->gid;
27157+ current_uid_gid(&ac.ac_uid, &ac.ac_gid);
27158 #if ACCT_VERSION==2
27159 ac.ac_ahz = AHZ;
27160 #endif
27161diff --git a/kernel/audit_tree.c b/kernel/audit_tree.c
27162index 4b05bd9..2451dc6 100644
27163--- a/kernel/audit_tree.c
27164+++ b/kernel/audit_tree.c
27165@@ -277,7 +277,7 @@ static void untag_chunk(struct node *p)
27166 owner->root = NULL;
27167 }
27168
27169- for (i = j = 0; j <= size; i++, j++) {
27170+ for (i = j = 0; i < size; i++, j++) {
27171 struct audit_tree *s;
27172 if (&chunk->owners[j] == p) {
27173 list_del_init(&p->list);
27174@@ -290,7 +290,7 @@ static void untag_chunk(struct node *p)
27175 if (!s) /* result of earlier fallback */
27176 continue;
27177 get_tree(s);
27178- list_replace_init(&chunk->owners[j].list, &new->owners[i].list);
27179+ list_replace_init(&chunk->owners[i].list, &new->owners[j].list);
27180 }
27181
27182 list_replace_rcu(&chunk->hash, &new->hash);
27183@@ -373,17 +373,15 @@ static int tag_chunk(struct inode *inode, struct audit_tree *tree)
27184 for (n = 0; n < old->count; n++) {
27185 if (old->owners[n].owner == tree) {
27186 spin_unlock(&hash_lock);
27187- put_inotify_watch(&old->watch);
27188+ put_inotify_watch(watch);
27189 return 0;
27190 }
27191 }
27192 spin_unlock(&hash_lock);
27193
27194 chunk = alloc_chunk(old->count + 1);
27195- if (!chunk) {
27196- put_inotify_watch(&old->watch);
27197+ if (!chunk)
27198 return -ENOMEM;
27199- }
27200
27201 mutex_lock(&inode->inotify_mutex);
27202 if (inotify_clone_watch(&old->watch, &chunk->watch) < 0) {
27203@@ -427,8 +425,7 @@ static int tag_chunk(struct inode *inode, struct audit_tree *tree)
27204 spin_unlock(&hash_lock);
27205 inotify_evict_watch(&old->watch);
27206 mutex_unlock(&inode->inotify_mutex);
27207- put_inotify_watch(&old->watch); /* pair to inotify_find_watch */
27208- put_inotify_watch(&old->watch); /* and kill it */
27209+ put_inotify_watch(&old->watch);
27210 return 0;
27211 }
27212
27213diff --git a/kernel/cgroup.c b/kernel/cgroup.c
27214index 1fbcc74..0249f4b 100644
27215--- a/kernel/cgroup.c
27216+++ b/kernel/cgroup.c
27217@@ -2468,6 +2468,7 @@ static struct cgroup_pidlist *cgroup_pidlist_find(struct cgroup *cgrp,
27218 /* make sure l doesn't vanish out from under us */
27219 down_write(&l->mutex);
27220 mutex_unlock(&cgrp->pidlist_mutex);
27221+ l->use_count++;
27222 return l;
27223 }
27224 }
27225diff --git a/kernel/cpu.c b/kernel/cpu.c
27226index 291ac58..6ba0f1e 100644
27227--- a/kernel/cpu.c
27228+++ b/kernel/cpu.c
27229@@ -212,8 +212,6 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen)
27230 err = __raw_notifier_call_chain(&cpu_chain, CPU_DOWN_PREPARE | mod,
27231 hcpu, -1, &nr_calls);
27232 if (err == NOTIFY_BAD) {
27233- set_cpu_active(cpu, true);
27234-
27235 nr_calls--;
27236 __raw_notifier_call_chain(&cpu_chain, CPU_DOWN_FAILED | mod,
27237 hcpu, nr_calls, NULL);
27238@@ -225,11 +223,11 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen)
27239
27240 /* Ensure that we are not runnable on dying cpu */
27241 cpumask_copy(old_allowed, &current->cpus_allowed);
27242- set_cpus_allowed_ptr(current, cpu_active_mask);
27243+ set_cpus_allowed_ptr(current,
27244+ cpumask_of(cpumask_any_but(cpu_online_mask, cpu)));
27245
27246 err = __stop_machine(take_cpu_down, &tcd_param, cpumask_of(cpu));
27247 if (err) {
27248- set_cpu_active(cpu, true);
27249 /* CPU didn't die: tell everyone. Can't complain. */
27250 if (raw_notifier_call_chain(&cpu_chain, CPU_DOWN_FAILED | mod,
27251 hcpu) == NOTIFY_BAD)
27252@@ -294,6 +292,9 @@ int __ref cpu_down(unsigned int cpu)
27253
27254 err = _cpu_down(cpu, 0);
27255
27256+ if (cpu_online(cpu))
27257+ set_cpu_active(cpu, true);
27258+
27259 out:
27260 cpu_maps_update_done();
27261 stop_machine_destroy();
27262@@ -386,23 +387,15 @@ int disable_nonboot_cpus(void)
27263 * with the userspace trying to use the CPU hotplug at the same time
27264 */
27265 cpumask_clear(frozen_cpus);
27266-
27267- for_each_online_cpu(cpu) {
27268- if (cpu == first_cpu)
27269- continue;
27270- set_cpu_active(cpu, false);
27271- }
27272-
27273- synchronize_sched();
27274-
27275 printk("Disabling non-boot CPUs ...\n");
27276 for_each_online_cpu(cpu) {
27277 if (cpu == first_cpu)
27278 continue;
27279 error = _cpu_down(cpu, 1);
27280- if (!error)
27281+ if (!error) {
27282 cpumask_set_cpu(cpu, frozen_cpus);
27283- else {
27284+ printk("CPU%d is down\n", cpu);
27285+ } else {
27286 printk(KERN_ERR "Error taking CPU%d down: %d\n",
27287 cpu, error);
27288 break;
27289diff --git a/kernel/cpuset.c b/kernel/cpuset.c
27290index 39e5121..b5cb469 100644
27291--- a/kernel/cpuset.c
27292+++ b/kernel/cpuset.c
27293@@ -873,7 +873,7 @@ static int update_cpumask(struct cpuset *cs, struct cpuset *trialcs,
27294 if (retval < 0)
27295 return retval;
27296
27297- if (!cpumask_subset(trialcs->cpus_allowed, cpu_active_mask))
27298+ if (!cpumask_subset(trialcs->cpus_allowed, cpu_online_mask))
27299 return -EINVAL;
27300 }
27301 retval = validate_change(cs, trialcs);
27302@@ -2011,7 +2011,7 @@ static void scan_for_empty_cpusets(struct cpuset *root)
27303 }
27304
27305 /* Continue past cpusets with all cpus, mems online */
27306- if (cpumask_subset(cp->cpus_allowed, cpu_active_mask) &&
27307+ if (cpumask_subset(cp->cpus_allowed, cpu_online_mask) &&
27308 nodes_subset(cp->mems_allowed, node_states[N_HIGH_MEMORY]))
27309 continue;
27310
27311@@ -2020,7 +2020,7 @@ static void scan_for_empty_cpusets(struct cpuset *root)
27312 /* Remove offline cpus and mems from this cpuset. */
27313 mutex_lock(&callback_mutex);
27314 cpumask_and(cp->cpus_allowed, cp->cpus_allowed,
27315- cpu_active_mask);
27316+ cpu_online_mask);
27317 nodes_and(cp->mems_allowed, cp->mems_allowed,
27318 node_states[N_HIGH_MEMORY]);
27319 mutex_unlock(&callback_mutex);
27320@@ -2058,10 +2058,8 @@ static int cpuset_track_online_cpus(struct notifier_block *unused_nb,
27321 switch (phase) {
27322 case CPU_ONLINE:
27323 case CPU_ONLINE_FROZEN:
27324- case CPU_DOWN_PREPARE:
27325- case CPU_DOWN_PREPARE_FROZEN:
27326- case CPU_DOWN_FAILED:
27327- case CPU_DOWN_FAILED_FROZEN:
27328+ case CPU_DEAD:
27329+ case CPU_DEAD_FROZEN:
27330 break;
27331
27332 default:
27333@@ -2070,7 +2068,7 @@ static int cpuset_track_online_cpus(struct notifier_block *unused_nb,
27334
27335 cgroup_lock();
27336 mutex_lock(&callback_mutex);
27337- cpumask_copy(top_cpuset.cpus_allowed, cpu_active_mask);
27338+ cpumask_copy(top_cpuset.cpus_allowed, cpu_online_mask);
27339 mutex_unlock(&callback_mutex);
27340 scan_for_empty_cpusets(&top_cpuset);
27341 ndoms = generate_sched_domains(&doms, &attr);
27342@@ -2117,7 +2115,7 @@ static int cpuset_track_online_nodes(struct notifier_block *self,
27343
27344 void __init cpuset_init_smp(void)
27345 {
27346- cpumask_copy(top_cpuset.cpus_allowed, cpu_active_mask);
27347+ cpumask_copy(top_cpuset.cpus_allowed, cpu_online_mask);
27348 top_cpuset.mems_allowed = node_states[N_HIGH_MEMORY];
27349
27350 hotcpu_notifier(cpuset_track_online_cpus, 0);
27351diff --git a/kernel/cred.c b/kernel/cred.c
27352index 1ed8ca1..dd76cfe 100644
27353--- a/kernel/cred.c
27354+++ b/kernel/cred.c
27355@@ -224,7 +224,7 @@ struct cred *cred_alloc_blank(void)
27356 #ifdef CONFIG_KEYS
27357 new->tgcred = kzalloc(sizeof(*new->tgcred), GFP_KERNEL);
27358 if (!new->tgcred) {
27359- kmem_cache_free(cred_jar, new);
27360+ kfree(new);
27361 return NULL;
27362 }
27363 atomic_set(&new->tgcred->usage, 1);
27364diff --git a/kernel/exit.c b/kernel/exit.c 2569diff --git a/kernel/exit.c b/kernel/exit.c
27365index f7864ac..3da0425 100644 2570index f7864ac..3da0425 100644
27366--- a/kernel/exit.c 2571--- a/kernel/exit.c
@@ -27415,186 +2620,6 @@ index 166b8c4..9fad346 100644
27415 err = prop_local_init_single(&tsk->dirties); 2620 err = prop_local_init_single(&tsk->dirties);
27416 if (err) 2621 if (err)
27417 goto out; 2622 goto out;
27418diff --git a/kernel/futex.c b/kernel/futex.c
27419index 1ad4fa6..fb65e82 100644
27420--- a/kernel/futex.c
27421+++ b/kernel/futex.c
27422@@ -203,6 +203,8 @@ static void drop_futex_key_refs(union futex_key *key)
27423 * @uaddr: virtual address of the futex
27424 * @fshared: 0 for a PROCESS_PRIVATE futex, 1 for PROCESS_SHARED
27425 * @key: address where result is stored.
27426+ * @rw: mapping needs to be read/write (values: VERIFY_READ,
27427+ * VERIFY_WRITE)
27428 *
27429 * Returns a negative error code or 0
27430 * The key words are stored in *key on success.
27431@@ -214,7 +216,7 @@ static void drop_futex_key_refs(union futex_key *key)
27432 * lock_page() might sleep, the caller should not hold a spinlock.
27433 */
27434 static int
27435-get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key)
27436+get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key, int rw)
27437 {
27438 unsigned long address = (unsigned long)uaddr;
27439 struct mm_struct *mm = current->mm;
27440@@ -237,7 +239,7 @@ get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key)
27441 * but access_ok() should be faster than find_vma()
27442 */
27443 if (!fshared) {
27444- if (unlikely(!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))))
27445+ if (unlikely(!access_ok(rw, uaddr, sizeof(u32))))
27446 return -EFAULT;
27447 key->private.mm = mm;
27448 key->private.address = address;
27449@@ -246,7 +248,7 @@ get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key)
27450 }
27451
27452 again:
27453- err = get_user_pages_fast(address, 1, 1, &page);
27454+ err = get_user_pages_fast(address, 1, rw == VERIFY_WRITE, &page);
27455 if (err < 0)
27456 return err;
27457
27458@@ -302,14 +304,8 @@ void put_futex_key(int fshared, union futex_key *key)
27459 */
27460 static int fault_in_user_writeable(u32 __user *uaddr)
27461 {
27462- struct mm_struct *mm = current->mm;
27463- int ret;
27464-
27465- down_read(&mm->mmap_sem);
27466- ret = get_user_pages(current, mm, (unsigned long)uaddr,
27467- 1, 1, 0, NULL, NULL);
27468- up_read(&mm->mmap_sem);
27469-
27470+ int ret = get_user_pages(current, current->mm, (unsigned long)uaddr,
27471+ 1, 1, 0, NULL, NULL);
27472 return ret < 0 ? ret : 0;
27473 }
27474
27475@@ -530,25 +526,8 @@ lookup_pi_state(u32 uval, struct futex_hash_bucket *hb,
27476 return -EINVAL;
27477
27478 WARN_ON(!atomic_read(&pi_state->refcount));
27479-
27480- /*
27481- * When pi_state->owner is NULL then the owner died
27482- * and another waiter is on the fly. pi_state->owner
27483- * is fixed up by the task which acquires
27484- * pi_state->rt_mutex.
27485- *
27486- * We do not check for pid == 0 which can happen when
27487- * the owner died and robust_list_exit() cleared the
27488- * TID.
27489- */
27490- if (pid && pi_state->owner) {
27491- /*
27492- * Bail out if user space manipulated the
27493- * futex value.
27494- */
27495- if (pid != task_pid_vnr(pi_state->owner))
27496- return -EINVAL;
27497- }
27498+ WARN_ON(pid && pi_state->owner &&
27499+ pi_state->owner->pid != pid);
27500
27501 atomic_inc(&pi_state->refcount);
27502 *ps = pi_state;
27503@@ -775,13 +754,6 @@ static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_q *this)
27504 if (!pi_state)
27505 return -EINVAL;
27506
27507- /*
27508- * If current does not own the pi_state then the futex is
27509- * inconsistent and user space fiddled with the futex value.
27510- */
27511- if (pi_state->owner != current)
27512- return -EINVAL;
27513-
27514 spin_lock(&pi_state->pi_mutex.wait_lock);
27515 new_owner = rt_mutex_next_owner(&pi_state->pi_mutex);
27516
27517@@ -889,7 +861,7 @@ static int futex_wake(u32 __user *uaddr, int fshared, int nr_wake, u32 bitset)
27518 if (!bitset)
27519 return -EINVAL;
27520
27521- ret = get_futex_key(uaddr, fshared, &key);
27522+ ret = get_futex_key(uaddr, fshared, &key, VERIFY_READ);
27523 if (unlikely(ret != 0))
27524 goto out;
27525
27526@@ -935,10 +907,10 @@ futex_wake_op(u32 __user *uaddr1, int fshared, u32 __user *uaddr2,
27527 int ret, op_ret;
27528
27529 retry:
27530- ret = get_futex_key(uaddr1, fshared, &key1);
27531+ ret = get_futex_key(uaddr1, fshared, &key1, VERIFY_READ);
27532 if (unlikely(ret != 0))
27533 goto out;
27534- ret = get_futex_key(uaddr2, fshared, &key2);
27535+ ret = get_futex_key(uaddr2, fshared, &key2, VERIFY_WRITE);
27536 if (unlikely(ret != 0))
27537 goto out_put_key1;
27538
27539@@ -1197,10 +1169,11 @@ retry:
27540 pi_state = NULL;
27541 }
27542
27543- ret = get_futex_key(uaddr1, fshared, &key1);
27544+ ret = get_futex_key(uaddr1, fshared, &key1, VERIFY_READ);
27545 if (unlikely(ret != 0))
27546 goto out;
27547- ret = get_futex_key(uaddr2, fshared, &key2);
27548+ ret = get_futex_key(uaddr2, fshared, &key2,
27549+ requeue_pi ? VERIFY_WRITE : VERIFY_READ);
27550 if (unlikely(ret != 0))
27551 goto out_put_key1;
27552
27553@@ -1759,7 +1732,7 @@ static int futex_wait_setup(u32 __user *uaddr, u32 val, int fshared,
27554 */
27555 retry:
27556 q->key = FUTEX_KEY_INIT;
27557- ret = get_futex_key(uaddr, fshared, &q->key);
27558+ ret = get_futex_key(uaddr, fshared, &q->key, VERIFY_READ);
27559 if (unlikely(ret != 0))
27560 return ret;
27561
27562@@ -1925,7 +1898,7 @@ static int futex_lock_pi(u32 __user *uaddr, int fshared,
27563 q.requeue_pi_key = NULL;
27564 retry:
27565 q.key = FUTEX_KEY_INIT;
27566- ret = get_futex_key(uaddr, fshared, &q.key);
27567+ ret = get_futex_key(uaddr, fshared, &q.key, VERIFY_WRITE);
27568 if (unlikely(ret != 0))
27569 goto out;
27570
27571@@ -1995,7 +1968,7 @@ retry_private:
27572 /* Unqueue and drop the lock */
27573 unqueue_me_pi(&q);
27574
27575- goto out_put_key;
27576+ goto out;
27577
27578 out_unlock_put_key:
27579 queue_unlock(&q, hb);
27580@@ -2044,7 +2017,7 @@ retry:
27581 if ((uval & FUTEX_TID_MASK) != task_pid_vnr(current))
27582 return -EPERM;
27583
27584- ret = get_futex_key(uaddr, fshared, &key);
27585+ ret = get_futex_key(uaddr, fshared, &key, VERIFY_WRITE);
27586 if (unlikely(ret != 0))
27587 goto out;
27588
27589@@ -2236,7 +2209,7 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, int fshared,
27590 rt_waiter.task = NULL;
27591
27592 key2 = FUTEX_KEY_INIT;
27593- ret = get_futex_key(uaddr2, fshared, &key2);
27594+ ret = get_futex_key(uaddr2, fshared, &key2, VERIFY_WRITE);
27595 if (unlikely(ret != 0))
27596 goto out;
27597
27598diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c 2623diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
27599index 3e1c36e..7b19403 100644 2624index 3e1c36e..7b19403 100644
27600--- a/kernel/hrtimer.c 2625--- a/kernel/hrtimer.c
@@ -27702,282 +2727,6 @@ index 3e1c36e..7b19403 100644
27702 } 2727 }
27703 2728
27704 #ifdef CONFIG_HOTPLUG_CPU 2729 #ifdef CONFIG_HOTPLUG_CPU
27705diff --git a/kernel/module.c b/kernel/module.c
27706index dfa33e8..5842a71 100644
27707--- a/kernel/module.c
27708+++ b/kernel/module.c
27709@@ -1030,23 +1030,11 @@ static int try_to_force_load(struct module *mod, const char *reason)
27710 }
27711
27712 #ifdef CONFIG_MODVERSIONS
27713-/* If the arch applies (non-zero) relocations to kernel kcrctab, unapply it. */
27714-static unsigned long maybe_relocated(unsigned long crc,
27715- const struct module *crc_owner)
27716-{
27717-#ifdef ARCH_RELOCATES_KCRCTAB
27718- if (crc_owner == NULL)
27719- return crc - (unsigned long)reloc_start;
27720-#endif
27721- return crc;
27722-}
27723-
27724 static int check_version(Elf_Shdr *sechdrs,
27725 unsigned int versindex,
27726 const char *symname,
27727 struct module *mod,
27728- const unsigned long *crc,
27729- const struct module *crc_owner)
27730+ const unsigned long *crc)
27731 {
27732 unsigned int i, num_versions;
27733 struct modversion_info *versions;
27734@@ -1067,10 +1055,10 @@ static int check_version(Elf_Shdr *sechdrs,
27735 if (strcmp(versions[i].name, symname) != 0)
27736 continue;
27737
27738- if (versions[i].crc == maybe_relocated(*crc, crc_owner))
27739+ if (versions[i].crc == *crc)
27740 return 1;
27741 DEBUGP("Found checksum %lX vs module %lX\n",
27742- maybe_relocated(*crc, crc_owner), versions[i].crc);
27743+ *crc, versions[i].crc);
27744 goto bad_version;
27745 }
27746
27747@@ -1093,8 +1081,7 @@ static inline int check_modstruct_version(Elf_Shdr *sechdrs,
27748 if (!find_symbol(MODULE_SYMBOL_PREFIX "module_layout", NULL,
27749 &crc, true, false))
27750 BUG();
27751- return check_version(sechdrs, versindex, "module_layout", mod, crc,
27752- NULL);
27753+ return check_version(sechdrs, versindex, "module_layout", mod, crc);
27754 }
27755
27756 /* First part is kernel version, which we ignore if module has crcs. */
27757@@ -1112,8 +1099,7 @@ static inline int check_version(Elf_Shdr *sechdrs,
27758 unsigned int versindex,
27759 const char *symname,
27760 struct module *mod,
27761- const unsigned long *crc,
27762- const struct module *crc_owner)
27763+ const unsigned long *crc)
27764 {
27765 return 1;
27766 }
27767@@ -1148,8 +1134,8 @@ static const struct kernel_symbol *resolve_symbol(Elf_Shdr *sechdrs,
27768 /* use_module can fail due to OOM,
27769 or module initialization or unloading */
27770 if (sym) {
27771- if (!check_version(sechdrs, versindex, name, mod, crc, owner)
27772- || !use_module(mod, owner))
27773+ if (!check_version(sechdrs, versindex, name, mod, crc) ||
27774+ !use_module(mod, owner))
27775 sym = NULL;
27776 }
27777 return sym;
27778@@ -1160,12 +1146,6 @@ static const struct kernel_symbol *resolve_symbol(Elf_Shdr *sechdrs,
27779 * J. Corbet <corbet@lwn.net>
27780 */
27781 #if defined(CONFIG_KALLSYMS) && defined(CONFIG_SYSFS)
27782-
27783-static inline bool sect_empty(const Elf_Shdr *sect)
27784-{
27785- return !(sect->sh_flags & SHF_ALLOC) || sect->sh_size == 0;
27786-}
27787-
27788 struct module_sect_attr
27789 {
27790 struct module_attribute mattr;
27791@@ -1207,7 +1187,8 @@ static void add_sect_attrs(struct module *mod, unsigned int nsect,
27792
27793 /* Count loaded sections and allocate structures */
27794 for (i = 0; i < nsect; i++)
27795- if (!sect_empty(&sechdrs[i]))
27796+ if (sechdrs[i].sh_flags & SHF_ALLOC
27797+ && sechdrs[i].sh_size)
27798 nloaded++;
27799 size[0] = ALIGN(sizeof(*sect_attrs)
27800 + nloaded * sizeof(sect_attrs->attrs[0]),
27801@@ -1225,7 +1206,9 @@ static void add_sect_attrs(struct module *mod, unsigned int nsect,
27802 sattr = &sect_attrs->attrs[0];
27803 gattr = &sect_attrs->grp.attrs[0];
27804 for (i = 0; i < nsect; i++) {
27805- if (sect_empty(&sechdrs[i]))
27806+ if (! (sechdrs[i].sh_flags & SHF_ALLOC))
27807+ continue;
27808+ if (!sechdrs[i].sh_size)
27809 continue;
27810 sattr->address = sechdrs[i].sh_addr;
27811 sattr->name = kstrdup(secstrings + sechdrs[i].sh_name,
27812@@ -1309,7 +1292,7 @@ static void add_notes_attrs(struct module *mod, unsigned int nsect,
27813 /* Count notes sections and allocate structures. */
27814 notes = 0;
27815 for (i = 0; i < nsect; i++)
27816- if (!sect_empty(&sechdrs[i]) &&
27817+ if ((sechdrs[i].sh_flags & SHF_ALLOC) &&
27818 (sechdrs[i].sh_type == SHT_NOTE))
27819 ++notes;
27820
27821@@ -1325,7 +1308,7 @@ static void add_notes_attrs(struct module *mod, unsigned int nsect,
27822 notes_attrs->notes = notes;
27823 nattr = &notes_attrs->attrs[0];
27824 for (loaded = i = 0; i < nsect; ++i) {
27825- if (sect_empty(&sechdrs[i]))
27826+ if (!(sechdrs[i].sh_flags & SHF_ALLOC))
27827 continue;
27828 if (sechdrs[i].sh_type == SHT_NOTE) {
27829 nattr->attr.name = mod->sect_attrs->attrs[loaded].name;
27830diff --git a/kernel/perf_event.c b/kernel/perf_event.c
27831index 413d101..7f29643 100644
27832--- a/kernel/perf_event.c
27833+++ b/kernel/perf_event.c
27834@@ -1359,9 +1359,6 @@ static void perf_ctx_adjust_freq(struct perf_event_context *ctx)
27835 if (event->state != PERF_EVENT_STATE_ACTIVE)
27836 continue;
27837
27838- if (event->cpu != -1 && event->cpu != smp_processor_id())
27839- continue;
27840-
27841 hwc = &event->hw;
27842
27843 interrupts = hwc->interrupts;
27844@@ -1586,7 +1583,7 @@ static struct perf_event_context *find_get_context(pid_t pid, int cpu)
27845 if (perf_paranoid_cpu() && !capable(CAP_SYS_ADMIN))
27846 return ERR_PTR(-EACCES);
27847
27848- if (cpu < 0 || cpu >= nr_cpumask_bits)
27849+ if (cpu < 0 || cpu > num_possible_cpus())
27850 return ERR_PTR(-EINVAL);
27851
27852 /*
27853@@ -2177,7 +2174,6 @@ static void perf_mmap_data_free(struct perf_mmap_data *data)
27854 perf_mmap_free_page((unsigned long)data->user_page);
27855 for (i = 0; i < data->nr_pages; i++)
27856 perf_mmap_free_page((unsigned long)data->data_pages[i]);
27857- kfree(data);
27858 }
27859
27860 #else
27861@@ -2218,7 +2214,6 @@ static void perf_mmap_data_free_work(struct work_struct *work)
27862 perf_mmap_unmark_page(base + (i * PAGE_SIZE));
27863
27864 vfree(base);
27865- kfree(data);
27866 }
27867
27868 static void perf_mmap_data_free(struct perf_mmap_data *data)
27869@@ -2324,6 +2319,7 @@ static void perf_mmap_data_free_rcu(struct rcu_head *rcu_head)
27870
27871 data = container_of(rcu_head, struct perf_mmap_data, rcu_head);
27872 perf_mmap_data_free(data);
27873+ kfree(data);
27874 }
27875
27876 static void perf_mmap_data_release(struct perf_event *event)
27877@@ -3229,12 +3225,6 @@ static void perf_event_task_output(struct perf_event *event,
27878
27879 static int perf_event_task_match(struct perf_event *event)
27880 {
27881- if (event->state != PERF_EVENT_STATE_ACTIVE)
27882- return 0;
27883-
27884- if (event->cpu != -1 && event->cpu != smp_processor_id())
27885- return 0;
27886-
27887 if (event->attr.comm || event->attr.mmap || event->attr.task)
27888 return 1;
27889
27890@@ -3264,13 +3254,13 @@ static void perf_event_task_event(struct perf_task_event *task_event)
27891
27892 cpuctx = &get_cpu_var(perf_cpu_context);
27893 perf_event_task_ctx(&cpuctx->ctx, task_event);
27894+ put_cpu_var(perf_cpu_context);
27895
27896 rcu_read_lock();
27897 if (!ctx)
27898 ctx = rcu_dereference(task_event->task->perf_event_ctxp);
27899 if (ctx)
27900 perf_event_task_ctx(ctx, task_event);
27901- put_cpu_var(perf_cpu_context);
27902 rcu_read_unlock();
27903 }
27904
27905@@ -3347,12 +3337,6 @@ static void perf_event_comm_output(struct perf_event *event,
27906
27907 static int perf_event_comm_match(struct perf_event *event)
27908 {
27909- if (event->state != PERF_EVENT_STATE_ACTIVE)
27910- return 0;
27911-
27912- if (event->cpu != -1 && event->cpu != smp_processor_id())
27913- return 0;
27914-
27915 if (event->attr.comm)
27916 return 1;
27917
27918@@ -3393,6 +3377,7 @@ static void perf_event_comm_event(struct perf_comm_event *comm_event)
27919
27920 cpuctx = &get_cpu_var(perf_cpu_context);
27921 perf_event_comm_ctx(&cpuctx->ctx, comm_event);
27922+ put_cpu_var(perf_cpu_context);
27923
27924 rcu_read_lock();
27925 /*
27926@@ -3402,7 +3387,6 @@ static void perf_event_comm_event(struct perf_comm_event *comm_event)
27927 ctx = rcu_dereference(current->perf_event_ctxp);
27928 if (ctx)
27929 perf_event_comm_ctx(ctx, comm_event);
27930- put_cpu_var(perf_cpu_context);
27931 rcu_read_unlock();
27932 }
27933
27934@@ -3477,12 +3461,6 @@ static void perf_event_mmap_output(struct perf_event *event,
27935 static int perf_event_mmap_match(struct perf_event *event,
27936 struct perf_mmap_event *mmap_event)
27937 {
27938- if (event->state != PERF_EVENT_STATE_ACTIVE)
27939- return 0;
27940-
27941- if (event->cpu != -1 && event->cpu != smp_processor_id())
27942- return 0;
27943-
27944 if (event->attr.mmap)
27945 return 1;
27946
27947@@ -3560,6 +3538,7 @@ got_name:
27948
27949 cpuctx = &get_cpu_var(perf_cpu_context);
27950 perf_event_mmap_ctx(&cpuctx->ctx, mmap_event);
27951+ put_cpu_var(perf_cpu_context);
27952
27953 rcu_read_lock();
27954 /*
27955@@ -3569,7 +3548,6 @@ got_name:
27956 ctx = rcu_dereference(current->perf_event_ctxp);
27957 if (ctx)
27958 perf_event_mmap_ctx(ctx, mmap_event);
27959- put_cpu_var(perf_cpu_context);
27960 rcu_read_unlock();
27961
27962 kfree(buf);
27963@@ -3832,9 +3810,6 @@ static int perf_swevent_match(struct perf_event *event,
27964 enum perf_type_id type,
27965 u32 event_id, struct pt_regs *regs)
27966 {
27967- if (event->cpu != -1 && event->cpu != smp_processor_id())
27968- return 0;
27969-
27970 if (!perf_swevent_is_counting(event))
27971 return 0;
27972
27973@@ -3974,7 +3949,6 @@ static enum hrtimer_restart perf_swevent_hrtimer(struct hrtimer *hrtimer)
27974 event->pmu->read(event);
27975
27976 data.addr = 0;
27977- data.period = event->hw.last_period;
27978 regs = get_irq_regs();
27979 /*
27980 * In case we exclude kernel IPs or are somehow not in interrupt
27981diff --git a/kernel/printk.c b/kernel/printk.c 2730diff --git a/kernel/printk.c b/kernel/printk.c
27982index f38b07f..6712a25 100644 2731index f38b07f..6712a25 100644
27983--- a/kernel/printk.c 2732--- a/kernel/printk.c
@@ -28024,437 +2773,8 @@ index f38b07f..6712a25 100644
28024 __raw_get_cpu_var(printk_pending) = 1; 2773 __raw_get_cpu_var(printk_pending) = 1;
28025 } 2774 }
28026 2775
28027diff --git a/kernel/rcutree.c b/kernel/rcutree.c
28028index 683c4f3..f3077c0 100644
28029--- a/kernel/rcutree.c
28030+++ b/kernel/rcutree.c
28031@@ -176,29 +176,9 @@ static struct rcu_node *rcu_get_root(struct rcu_state *rsp)
28032 return &rsp->node[0];
28033 }
28034
28035-/*
28036- * Record the specified "completed" value, which is later used to validate
28037- * dynticks counter manipulations and CPU-offline checks. Specify
28038- * "rsp->completed - 1" to unconditionally invalidate any future dynticks
28039- * manipulations and CPU-offline checks. Such invalidation is useful at
28040- * the beginning of a grace period.
28041- */
28042-static void dyntick_record_completed(struct rcu_state *rsp, long comp)
28043-{
28044- rsp->dynticks_completed = comp;
28045-}
28046-
28047 #ifdef CONFIG_SMP
28048
28049 /*
28050- * Recall the previously recorded value of the completion for dynticks.
28051- */
28052-static long dyntick_recall_completed(struct rcu_state *rsp)
28053-{
28054- return rsp->dynticks_completed;
28055-}
28056-
28057-/*
28058 * If the specified CPU is offline, tell the caller that it is in
28059 * a quiescent state. Otherwise, whack it with a reschedule IPI.
28060 * Grace periods can end up waiting on an offline CPU when that
28061@@ -355,9 +335,28 @@ void rcu_irq_exit(void)
28062 set_need_resched();
28063 }
28064
28065+/*
28066+ * Record the specified "completed" value, which is later used to validate
28067+ * dynticks counter manipulations. Specify "rsp->completed - 1" to
28068+ * unconditionally invalidate any future dynticks manipulations (which is
28069+ * useful at the beginning of a grace period).
28070+ */
28071+static void dyntick_record_completed(struct rcu_state *rsp, long comp)
28072+{
28073+ rsp->dynticks_completed = comp;
28074+}
28075+
28076 #ifdef CONFIG_SMP
28077
28078 /*
28079+ * Recall the previously recorded value of the completion for dynticks.
28080+ */
28081+static long dyntick_recall_completed(struct rcu_state *rsp)
28082+{
28083+ return rsp->dynticks_completed;
28084+}
28085+
28086+/*
28087 * Snapshot the specified CPU's dynticks counter so that we can later
28088 * credit them with an implicit quiescent state. Return 1 if this CPU
28089 * is in dynticks idle mode, which is an extended quiescent state.
28090@@ -420,8 +419,24 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp)
28091
28092 #else /* #ifdef CONFIG_NO_HZ */
28093
28094+static void dyntick_record_completed(struct rcu_state *rsp, long comp)
28095+{
28096+}
28097+
28098 #ifdef CONFIG_SMP
28099
28100+/*
28101+ * If there are no dynticks, then the only way that a CPU can passively
28102+ * be in a quiescent state is to be offline. Unlike dynticks idle, which
28103+ * is a point in time during the prior (already finished) grace period,
28104+ * an offline CPU is always in a quiescent state, and thus can be
28105+ * unconditionally applied. So just return the current value of completed.
28106+ */
28107+static long dyntick_recall_completed(struct rcu_state *rsp)
28108+{
28109+ return rsp->completed;
28110+}
28111+
28112 static int dyntick_save_progress_counter(struct rcu_data *rdp)
28113 {
28114 return 0;
28115@@ -538,33 +553,13 @@ static void check_cpu_stall(struct rcu_state *rsp, struct rcu_data *rdp)
28116 /*
28117 * Update CPU-local rcu_data state to record the newly noticed grace period.
28118 * This is used both when we started the grace period and when we notice
28119- * that someone else started the grace period. The caller must hold the
28120- * ->lock of the leaf rcu_node structure corresponding to the current CPU,
28121- * and must have irqs disabled.
28122+ * that someone else started the grace period.
28123 */
28124-static void __note_new_gpnum(struct rcu_state *rsp, struct rcu_node *rnp, struct rcu_data *rdp)
28125-{
28126- if (rdp->gpnum != rnp->gpnum) {
28127- rdp->qs_pending = 1;
28128- rdp->passed_quiesc = 0;
28129- rdp->gpnum = rnp->gpnum;
28130- }
28131-}
28132-
28133 static void note_new_gpnum(struct rcu_state *rsp, struct rcu_data *rdp)
28134 {
28135- unsigned long flags;
28136- struct rcu_node *rnp;
28137-
28138- local_irq_save(flags);
28139- rnp = rdp->mynode;
28140- if (rdp->gpnum == ACCESS_ONCE(rnp->gpnum) || /* outside lock. */
28141- !spin_trylock(&rnp->lock)) { /* irqs already off, retry later. */
28142- local_irq_restore(flags);
28143- return;
28144- }
28145- __note_new_gpnum(rsp, rnp, rdp);
28146- spin_unlock_irqrestore(&rnp->lock, flags);
28147+ rdp->qs_pending = 1;
28148+ rdp->passed_quiesc = 0;
28149+ rdp->gpnum = rsp->gpnum;
28150 }
28151
28152 /*
28153@@ -588,79 +583,6 @@ check_for_new_grace_period(struct rcu_state *rsp, struct rcu_data *rdp)
28154 }
28155
28156 /*
28157- * Advance this CPU's callbacks, but only if the current grace period
28158- * has ended. This may be called only from the CPU to whom the rdp
28159- * belongs. In addition, the corresponding leaf rcu_node structure's
28160- * ->lock must be held by the caller, with irqs disabled.
28161- */
28162-static void
28163-__rcu_process_gp_end(struct rcu_state *rsp, struct rcu_node *rnp, struct rcu_data *rdp)
28164-{
28165- /* Did another grace period end? */
28166- if (rdp->completed != rnp->completed) {
28167-
28168- /* Advance callbacks. No harm if list empty. */
28169- rdp->nxttail[RCU_DONE_TAIL] = rdp->nxttail[RCU_WAIT_TAIL];
28170- rdp->nxttail[RCU_WAIT_TAIL] = rdp->nxttail[RCU_NEXT_READY_TAIL];
28171- rdp->nxttail[RCU_NEXT_READY_TAIL] = rdp->nxttail[RCU_NEXT_TAIL];
28172-
28173- /* Remember that we saw this grace-period completion. */
28174- rdp->completed = rnp->completed;
28175- }
28176-}
28177-
28178-/*
28179- * Advance this CPU's callbacks, but only if the current grace period
28180- * has ended. This may be called only from the CPU to whom the rdp
28181- * belongs.
28182- */
28183-static void
28184-rcu_process_gp_end(struct rcu_state *rsp, struct rcu_data *rdp)
28185-{
28186- unsigned long flags;
28187- struct rcu_node *rnp;
28188-
28189- local_irq_save(flags);
28190- rnp = rdp->mynode;
28191- if (rdp->completed == ACCESS_ONCE(rnp->completed) || /* outside lock. */
28192- !spin_trylock(&rnp->lock)) { /* irqs already off, retry later. */
28193- local_irq_restore(flags);
28194- return;
28195- }
28196- __rcu_process_gp_end(rsp, rnp, rdp);
28197- spin_unlock_irqrestore(&rnp->lock, flags);
28198-}
28199-
28200-/*
28201- * Do per-CPU grace-period initialization for running CPU. The caller
28202- * must hold the lock of the leaf rcu_node structure corresponding to
28203- * this CPU.
28204- */
28205-static void
28206-rcu_start_gp_per_cpu(struct rcu_state *rsp, struct rcu_node *rnp, struct rcu_data *rdp)
28207-{
28208- /* Prior grace period ended, so advance callbacks for current CPU. */
28209- __rcu_process_gp_end(rsp, rnp, rdp);
28210-
28211- /*
28212- * Because this CPU just now started the new grace period, we know
28213- * that all of its callbacks will be covered by this upcoming grace
28214- * period, even the ones that were registered arbitrarily recently.
28215- * Therefore, advance all outstanding callbacks to RCU_WAIT_TAIL.
28216- *
28217- * Other CPUs cannot be sure exactly when the grace period started.
28218- * Therefore, their recently registered callbacks must pass through
28219- * an additional RCU_NEXT_READY stage, so that they will be handled
28220- * by the next RCU grace period.
28221- */
28222- rdp->nxttail[RCU_NEXT_READY_TAIL] = rdp->nxttail[RCU_NEXT_TAIL];
28223- rdp->nxttail[RCU_WAIT_TAIL] = rdp->nxttail[RCU_NEXT_TAIL];
28224-
28225- /* Set state so that this CPU will detect the next quiescent state. */
28226- __note_new_gpnum(rsp, rnp, rdp);
28227-}
28228-
28229-/*
28230 * Start a new RCU grace period if warranted, re-initializing the hierarchy
28231 * in preparation for detecting the next grace period. The caller must hold
28232 * the root node's ->lock, which is released before return. Hard irqs must
28233@@ -685,15 +607,28 @@ rcu_start_gp(struct rcu_state *rsp, unsigned long flags)
28234 rsp->jiffies_force_qs = jiffies + RCU_JIFFIES_TILL_FORCE_QS;
28235 record_gp_stall_check_time(rsp);
28236 dyntick_record_completed(rsp, rsp->completed - 1);
28237+ note_new_gpnum(rsp, rdp);
28238+
28239+ /*
28240+ * Because this CPU just now started the new grace period, we know
28241+ * that all of its callbacks will be covered by this upcoming grace
28242+ * period, even the ones that were registered arbitrarily recently.
28243+ * Therefore, advance all outstanding callbacks to RCU_WAIT_TAIL.
28244+ *
28245+ * Other CPUs cannot be sure exactly when the grace period started.
28246+ * Therefore, their recently registered callbacks must pass through
28247+ * an additional RCU_NEXT_READY stage, so that they will be handled
28248+ * by the next RCU grace period.
28249+ */
28250+ rdp->nxttail[RCU_NEXT_READY_TAIL] = rdp->nxttail[RCU_NEXT_TAIL];
28251+ rdp->nxttail[RCU_WAIT_TAIL] = rdp->nxttail[RCU_NEXT_TAIL];
28252
28253 /* Special-case the common single-level case. */
28254 if (NUM_RCU_NODES == 1) {
28255 rcu_preempt_check_blocked_tasks(rnp);
28256 rnp->qsmask = rnp->qsmaskinit;
28257 rnp->gpnum = rsp->gpnum;
28258- rnp->completed = rsp->completed;
28259 rsp->signaled = RCU_SIGNAL_INIT; /* force_quiescent_state OK. */
28260- rcu_start_gp_per_cpu(rsp, rnp, rdp);
28261 spin_unlock_irqrestore(&rnp->lock, flags);
28262 return;
28263 }
28264@@ -726,9 +661,6 @@ rcu_start_gp(struct rcu_state *rsp, unsigned long flags)
28265 rcu_preempt_check_blocked_tasks(rnp);
28266 rnp->qsmask = rnp->qsmaskinit;
28267 rnp->gpnum = rsp->gpnum;
28268- rnp->completed = rsp->completed;
28269- if (rnp == rdp->mynode)
28270- rcu_start_gp_per_cpu(rsp, rnp, rdp);
28271 spin_unlock(&rnp->lock); /* irqs remain disabled. */
28272 }
28273
28274@@ -740,6 +672,34 @@ rcu_start_gp(struct rcu_state *rsp, unsigned long flags)
28275 }
28276
28277 /*
28278+ * Advance this CPU's callbacks, but only if the current grace period
28279+ * has ended. This may be called only from the CPU to whom the rdp
28280+ * belongs.
28281+ */
28282+static void
28283+rcu_process_gp_end(struct rcu_state *rsp, struct rcu_data *rdp)
28284+{
28285+ long completed_snap;
28286+ unsigned long flags;
28287+
28288+ local_irq_save(flags);
28289+ completed_snap = ACCESS_ONCE(rsp->completed); /* outside of lock. */
28290+
28291+ /* Did another grace period end? */
28292+ if (rdp->completed != completed_snap) {
28293+
28294+ /* Advance callbacks. No harm if list empty. */
28295+ rdp->nxttail[RCU_DONE_TAIL] = rdp->nxttail[RCU_WAIT_TAIL];
28296+ rdp->nxttail[RCU_WAIT_TAIL] = rdp->nxttail[RCU_NEXT_READY_TAIL];
28297+ rdp->nxttail[RCU_NEXT_READY_TAIL] = rdp->nxttail[RCU_NEXT_TAIL];
28298+
28299+ /* Remember that we saw this grace-period completion. */
28300+ rdp->completed = completed_snap;
28301+ }
28302+ local_irq_restore(flags);
28303+}
28304+
28305+/*
28306 * Clean up after the prior grace period and let rcu_start_gp() start up
28307 * the next grace period if one is needed. Note that the caller must
28308 * hold rnp->lock, as required by rcu_start_gp(), which will release it.
28309@@ -750,6 +710,7 @@ static void cpu_quiet_msk_finish(struct rcu_state *rsp, unsigned long flags)
28310 WARN_ON_ONCE(!rcu_gp_in_progress(rsp));
28311 rsp->completed = rsp->gpnum;
28312 rsp->signaled = RCU_GP_IDLE;
28313+ rcu_process_gp_end(rsp, rsp->rda[smp_processor_id()]);
28314 rcu_start_gp(rsp, flags); /* releases root node's rnp->lock. */
28315 }
28316
28317@@ -1183,7 +1144,6 @@ static void force_quiescent_state(struct rcu_state *rsp, int relaxed)
28318 long lastcomp;
28319 struct rcu_node *rnp = rcu_get_root(rsp);
28320 u8 signaled;
28321- u8 forcenow;
28322
28323 if (!rcu_gp_in_progress(rsp))
28324 return; /* No grace period in progress, nothing to force. */
28325@@ -1220,23 +1180,16 @@ static void force_quiescent_state(struct rcu_state *rsp, int relaxed)
28326 if (rcu_process_dyntick(rsp, lastcomp,
28327 dyntick_save_progress_counter))
28328 goto unlock_ret;
28329- /* fall into next case. */
28330-
28331- case RCU_SAVE_COMPLETED:
28332
28333 /* Update state, record completion counter. */
28334- forcenow = 0;
28335 spin_lock(&rnp->lock);
28336 if (lastcomp == rsp->completed &&
28337- rsp->signaled == signaled) {
28338+ rsp->signaled == RCU_SAVE_DYNTICK) {
28339 rsp->signaled = RCU_FORCE_QS;
28340 dyntick_record_completed(rsp, lastcomp);
28341- forcenow = signaled == RCU_SAVE_COMPLETED;
28342 }
28343 spin_unlock(&rnp->lock);
28344- if (!forcenow)
28345- break;
28346- /* fall into next case. */
28347+ break;
28348
28349 case RCU_FORCE_QS:
28350
28351@@ -1591,16 +1544,21 @@ static void __cpuinit
28352 rcu_init_percpu_data(int cpu, struct rcu_state *rsp, int preemptable)
28353 {
28354 unsigned long flags;
28355+ long lastcomp;
28356 unsigned long mask;
28357 struct rcu_data *rdp = rsp->rda[cpu];
28358 struct rcu_node *rnp = rcu_get_root(rsp);
28359
28360 /* Set up local state, ensuring consistent view of global state. */
28361 spin_lock_irqsave(&rnp->lock, flags);
28362+ lastcomp = rsp->completed;
28363+ rdp->completed = lastcomp;
28364+ rdp->gpnum = lastcomp;
28365 rdp->passed_quiesc = 0; /* We could be racing with new GP, */
28366 rdp->qs_pending = 1; /* so set up to respond to current GP. */
28367 rdp->beenonline = 1; /* We have now been online. */
28368 rdp->preemptable = preemptable;
28369+ rdp->passed_quiesc_completed = lastcomp - 1;
28370 rdp->qlen_last_fqs_check = 0;
28371 rdp->n_force_qs_snap = rsp->n_force_qs;
28372 rdp->blimit = blimit;
28373@@ -1622,11 +1580,6 @@ rcu_init_percpu_data(int cpu, struct rcu_state *rsp, int preemptable)
28374 spin_lock(&rnp->lock); /* irqs already disabled. */
28375 rnp->qsmaskinit |= mask;
28376 mask = rnp->grpmask;
28377- if (rnp == rdp->mynode) {
28378- rdp->gpnum = rnp->completed; /* if GP in progress... */
28379- rdp->completed = rnp->completed;
28380- rdp->passed_quiesc_completed = rnp->completed - 1;
28381- }
28382 spin_unlock(&rnp->lock); /* irqs already disabled. */
28383 rnp = rnp->parent;
28384 } while (rnp != NULL && !(rnp->qsmaskinit & mask));
28385diff --git a/kernel/rcutree.h b/kernel/rcutree.h
28386index ddb79ec..1899023 100644
28387--- a/kernel/rcutree.h
28388+++ b/kernel/rcutree.h
28389@@ -84,9 +84,6 @@ struct rcu_node {
28390 long gpnum; /* Current grace period for this node. */
28391 /* This will either be equal to or one */
28392 /* behind the root rcu_node's gpnum. */
28393- long completed; /* Last grace period completed for this node. */
28394- /* This will either be equal to or one */
28395- /* behind the root rcu_node's gpnum. */
28396 unsigned long qsmask; /* CPUs or groups that need to switch in */
28397 /* order for current grace period to proceed.*/
28398 /* In leaf rcu_node, each bit corresponds to */
28399@@ -207,12 +204,11 @@ struct rcu_data {
28400 #define RCU_GP_IDLE 0 /* No grace period in progress. */
28401 #define RCU_GP_INIT 1 /* Grace period being initialized. */
28402 #define RCU_SAVE_DYNTICK 2 /* Need to scan dyntick state. */
28403-#define RCU_SAVE_COMPLETED 3 /* Need to save rsp->completed. */
28404-#define RCU_FORCE_QS 4 /* Need to force quiescent state. */
28405+#define RCU_FORCE_QS 3 /* Need to force quiescent state. */
28406 #ifdef CONFIG_NO_HZ
28407 #define RCU_SIGNAL_INIT RCU_SAVE_DYNTICK
28408 #else /* #ifdef CONFIG_NO_HZ */
28409-#define RCU_SIGNAL_INIT RCU_SAVE_COMPLETED
28410+#define RCU_SIGNAL_INIT RCU_FORCE_QS
28411 #endif /* #else #ifdef CONFIG_NO_HZ */
28412
28413 #define RCU_JIFFIES_TILL_FORCE_QS 3 /* for rsp->jiffies_force_qs */
28414@@ -278,8 +274,9 @@ struct rcu_state {
28415 unsigned long jiffies_stall; /* Time at which to check */
28416 /* for CPU stalls. */
28417 #endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */
28418+#ifdef CONFIG_NO_HZ
28419 long dynticks_completed; /* Value of completed @ snap. */
28420- /* Protected by fqslock. */
28421+#endif /* #ifdef CONFIG_NO_HZ */
28422 };
28423
28424 #ifdef RCU_TREE_NONCORE
28425@@ -301,7 +298,7 @@ DECLARE_PER_CPU(struct rcu_data, rcu_preempt_data);
28426 #else /* #ifdef RCU_TREE_NONCORE */
28427
28428 /* Forward declarations for rcutree_plugin.h */
28429-static void rcu_bootup_announce(void);
28430+static inline void rcu_bootup_announce(void);
28431 long rcu_batches_completed(void);
28432 static void rcu_preempt_note_context_switch(int cpu);
28433 static int rcu_preempted_readers(struct rcu_node *rnp);
28434diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h
28435index c03edf7..ef2a58c 100644
28436--- a/kernel/rcutree_plugin.h
28437+++ b/kernel/rcutree_plugin.h
28438@@ -33,7 +33,7 @@ DEFINE_PER_CPU(struct rcu_data, rcu_preempt_data);
28439 /*
28440 * Tell them what RCU they are running.
28441 */
28442-static void rcu_bootup_announce(void)
28443+static inline void rcu_bootup_announce(void)
28444 {
28445 printk(KERN_INFO
28446 "Experimental preemptable hierarchical RCU implementation.\n");
28447@@ -481,7 +481,7 @@ void exit_rcu(void)
28448 /*
28449 * Tell them what RCU they are running.
28450 */
28451-static void rcu_bootup_announce(void)
28452+static inline void rcu_bootup_announce(void)
28453 {
28454 printk(KERN_INFO "Hierarchical RCU implementation.\n");
28455 }
28456diff --git a/kernel/sched.c b/kernel/sched.c 2776diff --git a/kernel/sched.c b/kernel/sched.c
28457index 60d74cc..1701eae 100644 2777index 3c11ae0..adb5e92 100644
28458--- a/kernel/sched.c 2778--- a/kernel/sched.c
28459+++ b/kernel/sched.c 2779+++ b/kernel/sched.c
28460@@ -77,6 +77,9 @@ 2780@@ -77,6 +77,9 @@
@@ -28488,30 +2808,7 @@ index 60d74cc..1701eae 100644
28488 2808
28489 #ifdef CONFIG_FAIR_GROUP_SCHED 2809 #ifdef CONFIG_FAIR_GROUP_SCHED
28490 /* list of leaf cfs_rq on this cpu: */ 2810 /* list of leaf cfs_rq on this cpu: */
28491@@ -591,8 +601,6 @@ struct rq { 2811@@ -1815,11 +1825,12 @@ static void calc_load_account_active(struct rq *this_rq);
28492
28493 u64 rt_avg;
28494 u64 age_stamp;
28495- u64 idle_stamp;
28496- u64 avg_idle;
28497 #endif
28498
28499 /* calc_load related fields */
28500@@ -816,7 +824,6 @@ const_debug unsigned int sysctl_sched_nr_migrate = 32;
28501 * default: 0.25ms
28502 */
28503 unsigned int sysctl_sched_shares_ratelimit = 250000;
28504-unsigned int normalized_sysctl_sched_shares_ratelimit = 250000;
28505
28506 /*
28507 * Inject some fuzzyness into changing the per-cpu group shares
28508@@ -1813,17 +1820,17 @@ static void cfs_rq_set_shares(struct cfs_rq *cfs_rq, unsigned long shares)
28509 #endif
28510
28511 static void calc_load_account_active(struct rq *this_rq);
28512-static void update_sysctl(void);
28513
28514 #include "sched_stats.h"
28515 #include "sched_idletask.c" 2812 #include "sched_idletask.c"
28516 #include "sched_fair.c" 2813 #include "sched_fair.c"
28517 #include "sched_rt.c" 2814 #include "sched_rt.c"
@@ -28525,27 +2822,7 @@ index 60d74cc..1701eae 100644
28525 #define for_each_class(class) \ 2822 #define for_each_class(class) \
28526 for (class = sched_class_highest; class; class = class->next) 2823 for (class = sched_class_highest; class; class = class->next)
28527 2824
28528@@ -2038,9 +2045,6 @@ task_hot(struct task_struct *p, u64 now, struct sched_domain *sd) 2825@@ -2343,6 +2354,9 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state,
28529 {
28530 s64 delta;
28531
28532- if (p->sched_class != &fair_sched_class)
28533- return 0;
28534-
28535 /*
28536 * Buddy candidates are cache hot:
28537 */
28538@@ -2049,6 +2053,9 @@ task_hot(struct task_struct *p, u64 now, struct sched_domain *sd)
28539 &p->se == cfs_rq_of(&p->se)->last))
28540 return 1;
28541
28542+ if (p->sched_class != &fair_sched_class)
28543+ return 0;
28544+
28545 if (sysctl_sched_migration_cost == -1)
28546 return 1;
28547 if (sysctl_sched_migration_cost == 0)
28548@@ -2347,6 +2354,9 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state,
28549 unsigned long flags; 2826 unsigned long flags;
28550 struct rq *rq, *orig_rq; 2827 struct rq *rq, *orig_rq;
28551 2828
@@ -28555,7 +2832,7 @@ index 60d74cc..1701eae 100644
28555 if (!sched_feat(SYNC_WAKEUPS)) 2832 if (!sched_feat(SYNC_WAKEUPS))
28556 wake_flags &= ~WF_SYNC; 2833 wake_flags &= ~WF_SYNC;
28557 2834
28558@@ -2365,7 +2375,7 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state, 2835@@ -2361,7 +2375,7 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state,
28559 orig_cpu = cpu; 2836 orig_cpu = cpu;
28560 2837
28561 #ifdef CONFIG_SMP 2838 #ifdef CONFIG_SMP
@@ -28564,21 +2841,8 @@ index 60d74cc..1701eae 100644
28564 goto out_activate; 2841 goto out_activate;
28565 2842
28566 /* 2843 /*
28567@@ -2444,19 +2454,10 @@ out_running: 2844@@ -2442,6 +2456,8 @@ out_running:
28568 #ifdef CONFIG_SMP
28569 if (p->sched_class->task_wake_up)
28570 p->sched_class->task_wake_up(rq, p); 2845 p->sched_class->task_wake_up(rq, p);
28571-
28572- if (unlikely(rq->idle_stamp)) {
28573- u64 delta = rq->clock - rq->idle_stamp;
28574- u64 max = 2*sysctl_sched_migration_cost;
28575-
28576- if (delta > max)
28577- rq->avg_idle = max;
28578- else
28579- update_avg(&rq->avg_idle, delta);
28580- rq->idle_stamp = 0;
28581- }
28582 #endif 2846 #endif
28583 out: 2847 out:
28584+ if (is_realtime(p)) 2848+ if (is_realtime(p))
@@ -28586,7 +2850,7 @@ index 60d74cc..1701eae 100644
28586 task_rq_unlock(rq, &flags); 2850 task_rq_unlock(rq, &flags);
28587 put_cpu(); 2851 put_cpu();
28588 2852
28589@@ -2765,6 +2766,8 @@ static void finish_task_switch(struct rq *rq, struct task_struct *prev) 2853@@ -2750,6 +2766,8 @@ static void finish_task_switch(struct rq *rq, struct task_struct *prev)
28590 */ 2854 */
28591 prev_state = prev->state; 2855 prev_state = prev->state;
28592 finish_arch_switch(prev); 2856 finish_arch_switch(prev);
@@ -28595,7 +2859,7 @@ index 60d74cc..1701eae 100644
28595 perf_event_task_sched_in(current, cpu_of(rq)); 2859 perf_event_task_sched_in(current, cpu_of(rq));
28596 finish_lock_switch(rq, prev); 2860 finish_lock_switch(rq, prev);
28597 2861
28598@@ -2788,6 +2791,15 @@ static inline void pre_schedule(struct rq *rq, struct task_struct *prev) 2862@@ -2773,6 +2791,15 @@ static inline void pre_schedule(struct rq *rq, struct task_struct *prev)
28599 { 2863 {
28600 if (prev->sched_class->pre_schedule) 2864 if (prev->sched_class->pre_schedule)
28601 prev->sched_class->pre_schedule(rq, prev); 2865 prev->sched_class->pre_schedule(rq, prev);
@@ -28611,69 +2875,7 @@ index 60d74cc..1701eae 100644
28611 } 2875 }
28612 2876
28613 /* rq->lock is NOT held, but preemption is disabled */ 2877 /* rq->lock is NOT held, but preemption is disabled */
28614@@ -3179,6 +3191,10 @@ static void pull_task(struct rq *src_rq, struct task_struct *p, 2878@@ -5232,18 +5259,26 @@ void scheduler_tick(void)
28615 deactivate_task(src_rq, p, 0);
28616 set_task_cpu(p, this_cpu);
28617 activate_task(this_rq, p, 0);
28618+ /*
28619+ * Note that idle threads have a prio of MAX_PRIO, for this test
28620+ * to be always true for them.
28621+ */
28622 check_preempt_curr(this_rq, p, 0);
28623 }
28624
28625@@ -4137,7 +4153,7 @@ static int load_balance(int this_cpu, struct rq *this_rq,
28626 unsigned long flags;
28627 struct cpumask *cpus = __get_cpu_var(load_balance_tmpmask);
28628
28629- cpumask_copy(cpus, cpu_active_mask);
28630+ cpumask_setall(cpus);
28631
28632 /*
28633 * When power savings policy is enabled for the parent domain, idle
28634@@ -4300,7 +4316,7 @@ load_balance_newidle(int this_cpu, struct rq *this_rq, struct sched_domain *sd)
28635 int all_pinned = 0;
28636 struct cpumask *cpus = __get_cpu_var(load_balance_tmpmask);
28637
28638- cpumask_copy(cpus, cpu_active_mask);
28639+ cpumask_setall(cpus);
28640
28641 /*
28642 * When power savings policy is enabled for the parent domain, idle
28643@@ -4440,11 +4456,6 @@ static void idle_balance(int this_cpu, struct rq *this_rq)
28644 int pulled_task = 0;
28645 unsigned long next_balance = jiffies + HZ;
28646
28647- this_rq->idle_stamp = this_rq->clock;
28648-
28649- if (this_rq->avg_idle < sysctl_sched_migration_cost)
28650- return;
28651-
28652 for_each_domain(this_cpu, sd) {
28653 unsigned long interval;
28654
28655@@ -4459,10 +4470,8 @@ static void idle_balance(int this_cpu, struct rq *this_rq)
28656 interval = msecs_to_jiffies(sd->balance_interval);
28657 if (time_after(next_balance, sd->last_balance + interval))
28658 next_balance = sd->last_balance + interval;
28659- if (pulled_task) {
28660- this_rq->idle_stamp = 0;
28661+ if (pulled_task)
28662 break;
28663- }
28664 }
28665 if (pulled_task || time_after(jiffies, this_rq->next_balance)) {
28666 /*
28667@@ -4697,7 +4706,7 @@ int select_nohz_load_balancer(int stop_tick)
28668 cpumask_set_cpu(cpu, nohz.cpu_mask);
28669
28670 /* time for ilb owner also to sleep */
28671- if (cpumask_weight(nohz.cpu_mask) == num_active_cpus()) {
28672+ if (cpumask_weight(nohz.cpu_mask) == num_online_cpus()) {
28673 if (atomic_read(&nohz.load_balancer) == cpu)
28674 atomic_set(&nohz.load_balancer, -1);
28675 return 0;
28676@@ -5250,18 +5259,26 @@ void scheduler_tick(void)
28677 2879
28678 sched_clock_tick(); 2880 sched_clock_tick();
28679 2881
@@ -28701,7 +2903,7 @@ index 60d74cc..1701eae 100644
28701 } 2903 }
28702 2904
28703 notrace unsigned long get_parent_ip(unsigned long addr) 2905 notrace unsigned long get_parent_ip(unsigned long addr)
28704@@ -5404,12 +5421,20 @@ pick_next_task(struct rq *rq) 2906@@ -5386,12 +5421,20 @@ pick_next_task(struct rq *rq)
28705 /* 2907 /*
28706 * Optimization: we know that if all tasks are in 2908 * Optimization: we know that if all tasks are in
28707 * the fair class we can call that function directly: 2909 * the fair class we can call that function directly:
@@ -28724,7 +2926,7 @@ index 60d74cc..1701eae 100644
28724 2926
28725 class = sched_class_highest; 2927 class = sched_class_highest;
28726 for ( ; ; ) { 2928 for ( ; ; ) {
28727@@ -5444,6 +5469,8 @@ need_resched: 2929@@ -5426,6 +5469,8 @@ need_resched:
28728 2930
28729 release_kernel_lock(prev); 2931 release_kernel_lock(prev);
28730 need_resched_nonpreemptible: 2932 need_resched_nonpreemptible:
@@ -28733,7 +2935,7 @@ index 60d74cc..1701eae 100644
28733 2935
28734 schedule_debug(prev); 2936 schedule_debug(prev);
28735 2937
28736@@ -5478,24 +5505,40 @@ need_resched_nonpreemptible: 2938@@ -5460,24 +5505,36 @@ need_resched_nonpreemptible:
28737 rq->curr = next; 2939 rq->curr = next;
28738 ++*switch_count; 2940 ++*switch_count;
28739 2941
@@ -28753,21 +2955,17 @@ index 60d74cc..1701eae 100644
28753 spin_unlock_irq(&rq->lock); 2955 spin_unlock_irq(&rq->lock);
28754+ } 2956+ }
28755+ 2957+
28756+ TS_SCHED2_START(current);
28757+ sched_trace_task_switch_to(current); 2958+ sched_trace_task_switch_to(current);
28758 2959
28759 post_schedule(rq); 2960 post_schedule(rq);
28760 2961
28761- if (unlikely(reacquire_kernel_lock(current) < 0)) 2962- if (unlikely(reacquire_kernel_lock(current) < 0))
28762+ if (unlikely(reacquire_kernel_lock(current) < 0)) { 2963+ if (unlikely(reacquire_kernel_lock(current) < 0)) {
28763+ TS_SCHED2_END(current);
28764 goto need_resched_nonpreemptible; 2964 goto need_resched_nonpreemptible;
28765+ } 2965+ }
28766 2966
28767 preempt_enable_no_resched(); 2967 preempt_enable_no_resched();
28768+ 2968+
28769+ TS_SCHED2_END(current);
28770+
28771 if (need_resched()) 2969 if (need_resched())
28772 goto need_resched; 2970 goto need_resched;
28773+ 2971+
@@ -28776,7 +2974,7 @@ index 60d74cc..1701eae 100644
28776 } 2974 }
28777 EXPORT_SYMBOL(schedule); 2975 EXPORT_SYMBOL(schedule);
28778 2976
28779@@ -5772,6 +5815,17 @@ void complete_all(struct completion *x) 2977@@ -5754,6 +5811,17 @@ void complete_all(struct completion *x)
28780 } 2978 }
28781 EXPORT_SYMBOL(complete_all); 2979 EXPORT_SYMBOL(complete_all);
28782 2980
@@ -28794,7 +2992,7 @@ index 60d74cc..1701eae 100644
28794 static inline long __sched 2992 static inline long __sched
28795 do_wait_for_common(struct completion *x, long timeout, int state) 2993 do_wait_for_common(struct completion *x, long timeout, int state)
28796 { 2994 {
28797@@ -6203,6 +6257,9 @@ __setscheduler(struct rq *rq, struct task_struct *p, int policy, int prio) 2995@@ -6185,6 +6253,9 @@ __setscheduler(struct rq *rq, struct task_struct *p, int policy, int prio)
28798 case SCHED_RR: 2996 case SCHED_RR:
28799 p->sched_class = &rt_sched_class; 2997 p->sched_class = &rt_sched_class;
28800 break; 2998 break;
@@ -28804,7 +3002,7 @@ index 60d74cc..1701eae 100644
28804 } 3002 }
28805 3003
28806 p->rt_priority = prio; 3004 p->rt_priority = prio;
28807@@ -6250,7 +6307,7 @@ recheck: 3005@@ -6232,7 +6303,7 @@ recheck:
28808 3006
28809 if (policy != SCHED_FIFO && policy != SCHED_RR && 3007 if (policy != SCHED_FIFO && policy != SCHED_RR &&
28810 policy != SCHED_NORMAL && policy != SCHED_BATCH && 3008 policy != SCHED_NORMAL && policy != SCHED_BATCH &&
@@ -28813,7 +3011,7 @@ index 60d74cc..1701eae 100644
28813 return -EINVAL; 3011 return -EINVAL;
28814 } 3012 }
28815 3013
28816@@ -6265,6 +6322,8 @@ recheck: 3014@@ -6247,6 +6318,8 @@ recheck:
28817 return -EINVAL; 3015 return -EINVAL;
28818 if (rt_policy(policy) != (param->sched_priority != 0)) 3016 if (rt_policy(policy) != (param->sched_priority != 0))
28819 return -EINVAL; 3017 return -EINVAL;
@@ -28822,7 +3020,7 @@ index 60d74cc..1701eae 100644
28822 3020
28823 /* 3021 /*
28824 * Allow unprivileged RT tasks to decrease priority: 3022 * Allow unprivileged RT tasks to decrease priority:
28825@@ -6319,6 +6378,12 @@ recheck: 3023@@ -6301,6 +6374,12 @@ recheck:
28826 return retval; 3024 return retval;
28827 } 3025 }
28828 3026
@@ -28835,7 +3033,7 @@ index 60d74cc..1701eae 100644
28835 /* 3033 /*
28836 * make sure no PI-waiters arrive (or leave) while we are 3034 * make sure no PI-waiters arrive (or leave) while we are
28837 * changing the priority of the task: 3035 * changing the priority of the task:
28838@@ -6346,9 +6411,18 @@ recheck: 3036@@ -6328,9 +6407,18 @@ recheck:
28839 3037
28840 p->sched_reset_on_fork = reset_on_fork; 3038 p->sched_reset_on_fork = reset_on_fork;
28841 3039
@@ -28854,7 +3052,7 @@ index 60d74cc..1701eae 100644
28854 if (running) 3052 if (running)
28855 p->sched_class->set_curr_task(rq); 3053 p->sched_class->set_curr_task(rq);
28856 if (on_rq) { 3054 if (on_rq) {
28857@@ -6518,10 +6592,11 @@ long sched_setaffinity(pid_t pid, const struct cpumask *in_mask) 3055@@ -6500,10 +6588,11 @@ long sched_setaffinity(pid_t pid, const struct cpumask *in_mask)
28858 read_lock(&tasklist_lock); 3056 read_lock(&tasklist_lock);
28859 3057
28860 p = find_process_by_pid(pid); 3058 p = find_process_by_pid(pid);
@@ -28868,341 +3066,11 @@ index 60d74cc..1701eae 100644
28868 } 3066 }
28869 3067
28870 /* 3068 /*
28871@@ -6980,6 +7055,7 @@ void __cpuinit init_idle(struct task_struct *idle, int cpu)
28872 __sched_fork(idle);
28873 idle->se.exec_start = sched_clock();
28874
28875+ idle->prio = idle->normal_prio = MAX_PRIO;
28876 cpumask_copy(&idle->cpus_allowed, cpumask_of(cpu));
28877 __set_task_cpu(idle, cpu);
28878
28879@@ -7020,23 +7096,22 @@ cpumask_var_t nohz_cpu_mask;
28880 *
28881 * This idea comes from the SD scheduler of Con Kolivas:
28882 */
28883-static void update_sysctl(void)
28884+static inline void sched_init_granularity(void)
28885 {
28886- unsigned int cpus = min(num_online_cpus(), 8U);
28887- unsigned int factor = 1 + ilog2(cpus);
28888+ unsigned int factor = 1 + ilog2(num_online_cpus());
28889+ const unsigned long limit = 200000000;
28890
28891-#define SET_SYSCTL(name) \
28892- (sysctl_##name = (factor) * normalized_sysctl_##name)
28893- SET_SYSCTL(sched_min_granularity);
28894- SET_SYSCTL(sched_latency);
28895- SET_SYSCTL(sched_wakeup_granularity);
28896- SET_SYSCTL(sched_shares_ratelimit);
28897-#undef SET_SYSCTL
28898-}
28899+ sysctl_sched_min_granularity *= factor;
28900+ if (sysctl_sched_min_granularity > limit)
28901+ sysctl_sched_min_granularity = limit;
28902
28903-static inline void sched_init_granularity(void)
28904-{
28905- update_sysctl();
28906+ sysctl_sched_latency *= factor;
28907+ if (sysctl_sched_latency > limit)
28908+ sysctl_sched_latency = limit;
28909+
28910+ sysctl_sched_wakeup_granularity *= factor;
28911+
28912+ sysctl_sched_shares_ratelimit *= factor;
28913 }
28914
28915 #ifdef CONFIG_SMP
28916@@ -7073,7 +7148,7 @@ int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask)
28917 int ret = 0;
28918
28919 rq = task_rq_lock(p, &flags);
28920- if (!cpumask_intersects(new_mask, cpu_active_mask)) {
28921+ if (!cpumask_intersects(new_mask, cpu_online_mask)) {
28922 ret = -EINVAL;
28923 goto out;
28924 }
28925@@ -7095,7 +7170,7 @@ int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask)
28926 if (cpumask_test_cpu(task_cpu(p), new_mask))
28927 goto out;
28928
28929- if (migrate_task(p, cpumask_any_and(cpu_active_mask, new_mask), &req)) {
28930+ if (migrate_task(p, cpumask_any_and(cpu_online_mask, new_mask), &req)) {
28931 /* Need help from migration thread: drop lock and wait. */
28932 struct task_struct *mt = rq->migration_thread;
28933
28934@@ -7249,19 +7324,19 @@ static void move_task_off_dead_cpu(int dead_cpu, struct task_struct *p)
28935
28936 again:
28937 /* Look for allowed, online CPU in same node. */
28938- for_each_cpu_and(dest_cpu, nodemask, cpu_active_mask)
28939+ for_each_cpu_and(dest_cpu, nodemask, cpu_online_mask)
28940 if (cpumask_test_cpu(dest_cpu, &p->cpus_allowed))
28941 goto move;
28942
28943 /* Any allowed, online CPU? */
28944- dest_cpu = cpumask_any_and(&p->cpus_allowed, cpu_active_mask);
28945+ dest_cpu = cpumask_any_and(&p->cpus_allowed, cpu_online_mask);
28946 if (dest_cpu < nr_cpu_ids)
28947 goto move;
28948
28949 /* No more Mr. Nice Guy. */
28950 if (dest_cpu >= nr_cpu_ids) {
28951 cpuset_cpus_allowed_locked(p, &p->cpus_allowed);
28952- dest_cpu = cpumask_any_and(cpu_active_mask, &p->cpus_allowed);
28953+ dest_cpu = cpumask_any_and(cpu_online_mask, &p->cpus_allowed);
28954
28955 /*
28956 * Don't tell them about moving exiting tasks or
28957@@ -7290,7 +7365,7 @@ move:
28958 */
28959 static void migrate_nr_uninterruptible(struct rq *rq_src)
28960 {
28961- struct rq *rq_dest = cpu_rq(cpumask_any(cpu_active_mask));
28962+ struct rq *rq_dest = cpu_rq(cpumask_any(cpu_online_mask));
28963 unsigned long flags;
28964
28965 local_irq_save(flags);
28966@@ -7544,7 +7619,7 @@ static ctl_table *sd_alloc_ctl_cpu_table(int cpu)
28967 static struct ctl_table_header *sd_sysctl_header;
28968 static void register_sched_domain_sysctl(void)
28969 {
28970- int i, cpu_num = num_possible_cpus();
28971+ int i, cpu_num = num_online_cpus();
28972 struct ctl_table *entry = sd_alloc_ctl_entry(cpu_num + 1);
28973 char buf[32];
28974
28975@@ -7554,7 +7629,7 @@ static void register_sched_domain_sysctl(void)
28976 if (entry == NULL)
28977 return;
28978
28979- for_each_possible_cpu(i) {
28980+ for_each_online_cpu(i) {
28981 snprintf(buf, 32, "cpu%d", i);
28982 entry->procname = kstrdup(buf, GFP_KERNEL);
28983 entry->mode = 0555;
28984@@ -7684,6 +7759,7 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
28985 spin_lock_irq(&rq->lock);
28986 update_rq_clock(rq);
28987 deactivate_task(rq, rq->idle, 0);
28988+ rq->idle->static_prio = MAX_PRIO;
28989 __setscheduler(rq, rq->idle, SCHED_NORMAL, 0);
28990 rq->idle->sched_class = &idle_sched_class;
28991 migrate_dead_tasks(cpu);
28992@@ -7922,8 +7998,6 @@ sd_parent_degenerate(struct sched_domain *sd, struct sched_domain *parent)
28993
28994 static void free_rootdomain(struct root_domain *rd)
28995 {
28996- synchronize_sched();
28997-
28998 cpupri_cleanup(&rd->cpupri);
28999
29000 free_cpumask_var(rd->rto_mask);
29001@@ -8064,7 +8138,6 @@ static cpumask_var_t cpu_isolated_map;
29002 /* Setup the mask of cpus configured for isolated domains */
29003 static int __init isolated_cpu_setup(char *str)
29004 {
29005- alloc_bootmem_cpumask_var(&cpu_isolated_map);
29006 cpulist_parse(str, cpu_isolated_map);
29007 return 1;
29008 }
29009@@ -9042,7 +9115,7 @@ match1:
29010 if (doms_new == NULL) {
29011 ndoms_cur = 0;
29012 doms_new = fallback_doms;
29013- cpumask_andnot(&doms_new[0], cpu_active_mask, cpu_isolated_map);
29014+ cpumask_andnot(&doms_new[0], cpu_online_mask, cpu_isolated_map);
29015 WARN_ON_ONCE(dattr_new);
29016 }
29017
29018@@ -9173,10 +9246,8 @@ static int update_sched_domains(struct notifier_block *nfb,
29019 switch (action) {
29020 case CPU_ONLINE:
29021 case CPU_ONLINE_FROZEN:
29022- case CPU_DOWN_PREPARE:
29023- case CPU_DOWN_PREPARE_FROZEN:
29024- case CPU_DOWN_FAILED:
29025- case CPU_DOWN_FAILED_FROZEN:
29026+ case CPU_DEAD:
29027+ case CPU_DEAD_FROZEN:
29028 partition_sched_domains(1, NULL, NULL);
29029 return NOTIFY_OK;
29030
29031@@ -9223,7 +9294,7 @@ void __init sched_init_smp(void)
29032 #endif
29033 get_online_cpus();
29034 mutex_lock(&sched_domains_mutex);
29035- arch_init_sched_domains(cpu_active_mask);
29036+ arch_init_sched_domains(cpu_online_mask);
29037 cpumask_andnot(non_isolated_cpus, cpu_possible_mask, cpu_isolated_map);
29038 if (cpumask_empty(non_isolated_cpus))
29039 cpumask_set_cpu(smp_processor_id(), non_isolated_cpus);
29040@@ -9544,8 +9615,6 @@ void __init sched_init(void)
29041 rq->cpu = i;
29042 rq->online = 0;
29043 rq->migration_thread = NULL;
29044- rq->idle_stamp = 0;
29045- rq->avg_idle = 2*sysctl_sched_migration_cost;
29046 INIT_LIST_HEAD(&rq->migration_queue);
29047 rq_attach_root(rq, &def_root_domain);
29048 #endif
29049@@ -9595,9 +9664,7 @@ void __init sched_init(void)
29050 zalloc_cpumask_var(&nohz.cpu_mask, GFP_NOWAIT);
29051 alloc_cpumask_var(&nohz.ilb_grp_nohz_mask, GFP_NOWAIT);
29052 #endif
29053- /* May be allocated at isolcpus cmdline parse time */
29054- if (cpu_isolated_map == NULL)
29055- zalloc_cpumask_var(&cpu_isolated_map, GFP_NOWAIT);
29056+ zalloc_cpumask_var(&cpu_isolated_map, GFP_NOWAIT);
29057 #endif /* SMP */
29058
29059 perf_event_init();
29060diff --git a/kernel/sched_clock.c b/kernel/sched_clock.c
29061index 5b49613..479ce56 100644
29062--- a/kernel/sched_clock.c
29063+++ b/kernel/sched_clock.c
29064@@ -236,18 +236,6 @@ void sched_clock_idle_wakeup_event(u64 delta_ns)
29065 }
29066 EXPORT_SYMBOL_GPL(sched_clock_idle_wakeup_event);
29067
29068-unsigned long long cpu_clock(int cpu)
29069-{
29070- unsigned long long clock;
29071- unsigned long flags;
29072-
29073- local_irq_save(flags);
29074- clock = sched_clock_cpu(cpu);
29075- local_irq_restore(flags);
29076-
29077- return clock;
29078-}
29079-
29080 #else /* CONFIG_HAVE_UNSTABLE_SCHED_CLOCK */
29081
29082 void sched_clock_init(void)
29083@@ -263,12 +251,17 @@ u64 sched_clock_cpu(int cpu)
29084 return sched_clock();
29085 }
29086
29087+#endif /* CONFIG_HAVE_UNSTABLE_SCHED_CLOCK */
29088
29089 unsigned long long cpu_clock(int cpu)
29090 {
29091- return sched_clock_cpu(cpu);
29092-}
29093+ unsigned long long clock;
29094+ unsigned long flags;
29095
29096-#endif /* CONFIG_HAVE_UNSTABLE_SCHED_CLOCK */
29097+ local_irq_save(flags);
29098+ clock = sched_clock_cpu(cpu);
29099+ local_irq_restore(flags);
29100
29101+ return clock;
29102+}
29103 EXPORT_SYMBOL_GPL(cpu_clock);
29104diff --git a/kernel/sched_debug.c b/kernel/sched_debug.c
29105index 6988cf0..efb8440 100644
29106--- a/kernel/sched_debug.c
29107+++ b/kernel/sched_debug.c
29108@@ -285,16 +285,12 @@ static void print_cpu(struct seq_file *m, int cpu)
29109
29110 #ifdef CONFIG_SCHEDSTATS
29111 #define P(n) SEQ_printf(m, " .%-30s: %d\n", #n, rq->n);
29112-#define P64(n) SEQ_printf(m, " .%-30s: %Ld\n", #n, rq->n);
29113
29114 P(yld_count);
29115
29116 P(sched_switch);
29117 P(sched_count);
29118 P(sched_goidle);
29119-#ifdef CONFIG_SMP
29120- P64(avg_idle);
29121-#endif
29122
29123 P(ttwu_count);
29124 P(ttwu_local);
29125diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c 3069diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
29126index d80812d..ef43ff9 100644 3070index 37087a7..ef43ff9 100644
29127--- a/kernel/sched_fair.c 3071--- a/kernel/sched_fair.c
29128+++ b/kernel/sched_fair.c 3072+++ b/kernel/sched_fair.c
29129@@ -35,14 +35,12 @@ 3073@@ -1598,7 +1598,7 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_
29130 * run vmstat and monitor the context-switches (cs) field)
29131 */
29132 unsigned int sysctl_sched_latency = 5000000ULL;
29133-unsigned int normalized_sysctl_sched_latency = 5000000ULL;
29134
29135 /*
29136 * Minimal preemption granularity for CPU-bound tasks:
29137 * (default: 1 msec * (1 + ilog(ncpus)), units: nanoseconds)
29138 */
29139 unsigned int sysctl_sched_min_granularity = 1000000ULL;
29140-unsigned int normalized_sysctl_sched_min_granularity = 1000000ULL;
29141
29142 /*
29143 * is kept at sysctl_sched_latency / sysctl_sched_min_granularity
29144@@ -72,7 +70,6 @@ unsigned int __read_mostly sysctl_sched_compat_yield;
29145 * have immediate wakeup/sleep latencies.
29146 */
29147 unsigned int sysctl_sched_wakeup_granularity = 1000000UL;
29148-unsigned int normalized_sysctl_sched_wakeup_granularity = 1000000UL;
29149
29150 const_debug unsigned int sysctl_sched_migration_cost = 500000UL;
29151
29152@@ -1377,9 +1374,6 @@ static int select_task_rq_fair(struct task_struct *p, int sd_flag, int wake_flag
29153
29154 rcu_read_lock();
29155 for_each_domain(cpu, tmp) {
29156- if (!(tmp->flags & SD_LOAD_BALANCE))
29157- continue;
29158-
29159 /*
29160 * If power savings logic is enabled for a domain, see if we
29161 * are not overloaded, if so, don't balance wider.
29162@@ -1404,38 +1398,11 @@ static int select_task_rq_fair(struct task_struct *p, int sd_flag, int wake_flag
29163 want_sd = 0;
29164 }
29165
29166- if (want_affine && (tmp->flags & SD_WAKE_AFFINE)) {
29167- int candidate = -1, i;
29168-
29169- if (cpumask_test_cpu(prev_cpu, sched_domain_span(tmp)))
29170- candidate = cpu;
29171-
29172- /*
29173- * Check for an idle shared cache.
29174- */
29175- if (tmp->flags & SD_PREFER_SIBLING) {
29176- if (candidate == cpu) {
29177- if (!cpu_rq(prev_cpu)->cfs.nr_running)
29178- candidate = prev_cpu;
29179- }
29180-
29181- if (candidate == -1 || candidate == cpu) {
29182- for_each_cpu(i, sched_domain_span(tmp)) {
29183- if (!cpumask_test_cpu(i, &p->cpus_allowed))
29184- continue;
29185- if (!cpu_rq(i)->cfs.nr_running) {
29186- candidate = i;
29187- break;
29188- }
29189- }
29190- }
29191- }
29192+ if (want_affine && (tmp->flags & SD_WAKE_AFFINE) &&
29193+ cpumask_test_cpu(prev_cpu, sched_domain_span(tmp))) {
29194
29195- if (candidate >= 0) {
29196- affine_sd = tmp;
29197- want_affine = 0;
29198- cpu = candidate;
29199- }
29200+ affine_sd = tmp;
29201+ want_affine = 0;
29202 }
29203
29204 if (!want_sd && !want_affine)
29205@@ -1631,7 +1598,7 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_
29206 3074
29207 update_curr(cfs_rq); 3075 update_curr(cfs_rq);
29208 3076
@@ -29211,33 +3079,6 @@ index d80812d..ef43ff9 100644
29211 resched_task(curr); 3079 resched_task(curr);
29212 return; 3080 return;
29213 } 3081 }
29214@@ -1883,17 +1850,6 @@ move_one_task_fair(struct rq *this_rq, int this_cpu, struct rq *busiest,
29215
29216 return 0;
29217 }
29218-
29219-static void rq_online_fair(struct rq *rq)
29220-{
29221- update_sysctl();
29222-}
29223-
29224-static void rq_offline_fair(struct rq *rq)
29225-{
29226- update_sysctl();
29227-}
29228-
29229 #endif /* CONFIG_SMP */
29230
29231 /*
29232@@ -2041,8 +1997,6 @@ static const struct sched_class fair_sched_class = {
29233
29234 .load_balance = load_balance_fair,
29235 .move_one_task = move_one_task_fair,
29236- .rq_online = rq_online_fair,
29237- .rq_offline = rq_offline_fair,
29238 #endif
29239
29240 .set_curr_task = set_curr_task_fair,
29241diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c 3082diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c
29242index a4d790c..f622880 100644 3083index a4d790c..f622880 100644
29243--- a/kernel/sched_rt.c 3084--- a/kernel/sched_rt.c
@@ -29251,263 +3092,11 @@ index a4d790c..f622880 100644
29251 resched_task(rq->curr); 3092 resched_task(rq->curr);
29252 return; 3093 return;
29253 } 3094 }
29254diff --git a/kernel/signal.c b/kernel/signal.c
29255index 4d0658d..6705320 100644
29256--- a/kernel/signal.c
29257+++ b/kernel/signal.c
29258@@ -939,8 +939,7 @@ static void print_fatal_signal(struct pt_regs *regs, int signr)
29259 for (i = 0; i < 16; i++) {
29260 unsigned char insn;
29261
29262- if (get_user(insn, (unsigned char *)(regs->ip + i)))
29263- break;
29264+ __get_user(insn, (unsigned char *)(regs->ip + i));
29265 printk("%02x ", insn);
29266 }
29267 }
29268diff --git a/kernel/sysctl.c b/kernel/sysctl.c
29269index b8bd058..0d949c5 100644
29270--- a/kernel/sysctl.c
29271+++ b/kernel/sysctl.c
29272@@ -1345,7 +1345,6 @@ static struct ctl_table vm_table[] = {
29273 .strategy = &sysctl_jiffies,
29274 },
29275 #endif
29276-#ifdef CONFIG_MMU
29277 {
29278 .ctl_name = CTL_UNNUMBERED,
29279 .procname = "mmap_min_addr",
29280@@ -1354,7 +1353,6 @@ static struct ctl_table vm_table[] = {
29281 .mode = 0644,
29282 .proc_handler = &mmap_min_addr_handler,
29283 },
29284-#endif
29285 #ifdef CONFIG_NUMA
29286 {
29287 .ctl_name = CTL_UNNUMBERED,
29288@@ -1607,8 +1605,7 @@ static struct ctl_table debug_table[] = {
29289 .data = &show_unhandled_signals,
29290 .maxlen = sizeof(int),
29291 .mode = 0644,
29292- .proc_handler = proc_dointvec_minmax,
29293- .extra1 = &zero,
29294+ .proc_handler = proc_dointvec
29295 },
29296 #endif
29297 { .ctl_name = 0 }
29298diff --git a/kernel/sysctl_check.c b/kernel/sysctl_check.c
29299index 469193c..b6e7aae 100644
29300--- a/kernel/sysctl_check.c
29301+++ b/kernel/sysctl_check.c
29302@@ -220,7 +220,6 @@ static const struct trans_ctl_table trans_net_ipv4_conf_vars_table[] = {
29303 { NET_IPV4_CONF_PROMOTE_SECONDARIES, "promote_secondaries" },
29304 { NET_IPV4_CONF_ARP_ACCEPT, "arp_accept" },
29305 { NET_IPV4_CONF_ARP_NOTIFY, "arp_notify" },
29306- { NET_IPV4_CONF_SRC_VMARK, "src_valid_mark" },
29307 {}
29308 };
29309
29310diff --git a/kernel/time/clockevents.c b/kernel/time/clockevents.c
29311index 0d809ae..620b58a 100644
29312--- a/kernel/time/clockevents.c
29313+++ b/kernel/time/clockevents.c
29314@@ -20,8 +20,6 @@
29315 #include <linux/sysdev.h>
29316 #include <linux/tick.h>
29317
29318-#include "tick-internal.h"
29319-
29320 /* The registered clock event devices */
29321 static LIST_HEAD(clockevent_devices);
29322 static LIST_HEAD(clockevents_released);
29323@@ -239,9 +237,8 @@ void clockevents_exchange_device(struct clock_event_device *old,
29324 */
29325 void clockevents_notify(unsigned long reason, void *arg)
29326 {
29327- struct clock_event_device *dev, *tmp;
29328+ struct list_head *node, *tmp;
29329 unsigned long flags;
29330- int cpu;
29331
29332 spin_lock_irqsave(&clockevents_lock, flags);
29333 clockevents_do_notify(reason, arg);
29334@@ -252,20 +249,8 @@ void clockevents_notify(unsigned long reason, void *arg)
29335 * Unregister the clock event devices which were
29336 * released from the users in the notify chain.
29337 */
29338- list_for_each_entry_safe(dev, tmp, &clockevents_released, list)
29339- list_del(&dev->list);
29340- /*
29341- * Now check whether the CPU has left unused per cpu devices
29342- */
29343- cpu = *((int *)arg);
29344- list_for_each_entry_safe(dev, tmp, &clockevent_devices, list) {
29345- if (cpumask_test_cpu(cpu, dev->cpumask) &&
29346- cpumask_weight(dev->cpumask) == 1 &&
29347- !tick_is_broadcast_device(dev)) {
29348- BUG_ON(dev->mode != CLOCK_EVT_MODE_UNUSED);
29349- list_del(&dev->list);
29350- }
29351- }
29352+ list_for_each_safe(node, tmp, &clockevents_released)
29353+ list_del(node);
29354 break;
29355 default:
29356 break;
29357diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c
29358index ecc7adb..5e18c6a 100644
29359--- a/kernel/time/clocksource.c
29360+++ b/kernel/time/clocksource.c
29361@@ -413,47 +413,6 @@ void clocksource_touch_watchdog(void)
29362 clocksource_resume_watchdog();
29363 }
29364
29365-/**
29366- * clocksource_max_deferment - Returns max time the clocksource can be deferred
29367- * @cs: Pointer to clocksource
29368- *
29369- */
29370-static u64 clocksource_max_deferment(struct clocksource *cs)
29371-{
29372- u64 max_nsecs, max_cycles;
29373-
29374- /*
29375- * Calculate the maximum number of cycles that we can pass to the
29376- * cyc2ns function without overflowing a 64-bit signed result. The
29377- * maximum number of cycles is equal to ULLONG_MAX/cs->mult which
29378- * is equivalent to the below.
29379- * max_cycles < (2^63)/cs->mult
29380- * max_cycles < 2^(log2((2^63)/cs->mult))
29381- * max_cycles < 2^(log2(2^63) - log2(cs->mult))
29382- * max_cycles < 2^(63 - log2(cs->mult))
29383- * max_cycles < 1 << (63 - log2(cs->mult))
29384- * Please note that we add 1 to the result of the log2 to account for
29385- * any rounding errors, ensure the above inequality is satisfied and
29386- * no overflow will occur.
29387- */
29388- max_cycles = 1ULL << (63 - (ilog2(cs->mult) + 1));
29389-
29390- /*
29391- * The actual maximum number of cycles we can defer the clocksource is
29392- * determined by the minimum of max_cycles and cs->mask.
29393- */
29394- max_cycles = min_t(u64, max_cycles, (u64) cs->mask);
29395- max_nsecs = clocksource_cyc2ns(max_cycles, cs->mult, cs->shift);
29396-
29397- /*
29398- * To ensure that the clocksource does not wrap whilst we are idle,
29399- * limit the time the clocksource can be deferred by 12.5%. Please
29400- * note a margin of 12.5% is used because this can be computed with
29401- * a shift, versus say 10% which would require division.
29402- */
29403- return max_nsecs - (max_nsecs >> 5);
29404-}
29405-
29406 #ifdef CONFIG_GENERIC_TIME
29407
29408 /**
29409@@ -552,9 +511,6 @@ static void clocksource_enqueue(struct clocksource *cs)
29410 */
29411 int clocksource_register(struct clocksource *cs)
29412 {
29413- /* calculate max idle time permitted for this clocksource */
29414- cs->max_idle_ns = clocksource_max_deferment(cs);
29415-
29416 mutex_lock(&clocksource_mutex);
29417 clocksource_enqueue(cs);
29418 clocksource_select();
29419diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c 3095diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
29420index 44320b1..dcbff75 100644 3096index 89aed59..dcbff75 100644
29421--- a/kernel/time/tick-sched.c 3097--- a/kernel/time/tick-sched.c
29422+++ b/kernel/time/tick-sched.c 3098+++ b/kernel/time/tick-sched.c
29423@@ -216,7 +216,6 @@ void tick_nohz_stop_sched_tick(int inidle) 3099@@ -686,6 +686,46 @@ static enum hrtimer_restart tick_sched_timer(struct hrtimer *timer)
29424 struct tick_sched *ts;
29425 ktime_t last_update, expires, now;
29426 struct clock_event_device *dev = __get_cpu_var(tick_cpu_device).evtdev;
29427- u64 time_delta;
29428 int cpu;
29429
29430 local_irq_save(flags);
29431@@ -276,17 +275,6 @@ void tick_nohz_stop_sched_tick(int inidle)
29432 seq = read_seqbegin(&xtime_lock);
29433 last_update = last_jiffies_update;
29434 last_jiffies = jiffies;
29435-
29436- /*
29437- * On SMP we really should only care for the CPU which
29438- * has the do_timer duty assigned. All other CPUs can
29439- * sleep as long as they want.
29440- */
29441- if (cpu == tick_do_timer_cpu ||
29442- tick_do_timer_cpu == TICK_DO_TIMER_NONE)
29443- time_delta = timekeeping_max_deferment();
29444- else
29445- time_delta = KTIME_MAX;
29446 } while (read_seqretry(&xtime_lock, seq));
29447
29448 /* Get the next timer wheel timer */
29449@@ -306,26 +294,11 @@ void tick_nohz_stop_sched_tick(int inidle)
29450 if ((long)delta_jiffies >= 1) {
29451
29452 /*
29453- * calculate the expiry time for the next timer wheel
29454- * timer. delta_jiffies >= NEXT_TIMER_MAX_DELTA signals
29455- * that there is no timer pending or at least extremely
29456- * far into the future (12 days for HZ=1000). In this
29457- * case we set the expiry to the end of time.
29458- */
29459- if (likely(delta_jiffies < NEXT_TIMER_MAX_DELTA)) {
29460- /*
29461- * Calculate the time delta for the next timer event.
29462- * If the time delta exceeds the maximum time delta
29463- * permitted by the current clocksource then adjust
29464- * the time delta accordingly to ensure the
29465- * clocksource does not wrap.
29466- */
29467- time_delta = min_t(u64, time_delta,
29468- tick_period.tv64 * delta_jiffies);
29469- expires = ktime_add_ns(last_update, time_delta);
29470- } else {
29471- expires.tv64 = KTIME_MAX;
29472- }
29473+ * calculate the expiry time for the next timer wheel
29474+ * timer
29475+ */
29476+ expires = ktime_add_ns(last_update, tick_period.tv64 *
29477+ delta_jiffies);
29478
29479 /*
29480 * If this cpu is the one which updates jiffies, then
29481@@ -369,19 +342,22 @@ void tick_nohz_stop_sched_tick(int inidle)
29482
29483 ts->idle_sleeps++;
29484
29485- /* Mark expires */
29486- ts->idle_expires = expires;
29487-
29488 /*
29489- * If the expiration time == KTIME_MAX, then
29490- * in this case we simply stop the tick timer.
29491+ * delta_jiffies >= NEXT_TIMER_MAX_DELTA signals that
29492+ * there is no timer pending or at least extremly far
29493+ * into the future (12 days for HZ=1000). In this case
29494+ * we simply stop the tick timer:
29495 */
29496- if (unlikely(expires.tv64 == KTIME_MAX)) {
29497+ if (unlikely(delta_jiffies >= NEXT_TIMER_MAX_DELTA)) {
29498+ ts->idle_expires.tv64 = KTIME_MAX;
29499 if (ts->nohz_mode == NOHZ_MODE_HIGHRES)
29500 hrtimer_cancel(&ts->sched_timer);
29501 goto out;
29502 }
29503
29504+ /* Mark expiries */
29505+ ts->idle_expires = expires;
29506+
29507 if (ts->nohz_mode == NOHZ_MODE_HIGHRES) {
29508 hrtimer_start(&ts->sched_timer, expires,
29509 HRTIMER_MODE_ABS_PINNED);
29510@@ -710,6 +686,46 @@ static enum hrtimer_restart tick_sched_timer(struct hrtimer *timer)
29511 } 3100 }
29512 3101
29513 /** 3102 /**
@@ -29554,7 +3143,7 @@ index 44320b1..dcbff75 100644
29554 * tick_setup_sched_timer - setup the tick emulation timer 3143 * tick_setup_sched_timer - setup the tick emulation timer
29555 */ 3144 */
29556 void tick_setup_sched_timer(void) 3145 void tick_setup_sched_timer(void)
29557@@ -726,9 +742,11 @@ void tick_setup_sched_timer(void) 3146@@ -702,9 +742,11 @@ void tick_setup_sched_timer(void)
29558 3147
29559 /* Get the next period (per cpu) */ 3148 /* Get the next period (per cpu) */
29560 hrtimer_set_expires(&ts->sched_timer, tick_init_jiffy_update()); 3149 hrtimer_set_expires(&ts->sched_timer, tick_init_jiffy_update());
@@ -29569,44 +3158,6 @@ index 44320b1..dcbff75 100644
29569 hrtimer_add_expires_ns(&ts->sched_timer, offset); 3158 hrtimer_add_expires_ns(&ts->sched_timer, offset);
29570 3159
29571 for (;;) { 3160 for (;;) {
29572diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
29573index 8b709de..c3a4e29 100644
29574--- a/kernel/time/timekeeping.c
29575+++ b/kernel/time/timekeeping.c
29576@@ -488,17 +488,6 @@ int timekeeping_valid_for_hres(void)
29577 }
29578
29579 /**
29580- * timekeeping_max_deferment - Returns max time the clocksource can be deferred
29581- *
29582- * Caller must observe xtime_lock via read_seqbegin/read_seqretry to
29583- * ensure that the clocksource does not change!
29584- */
29585-u64 timekeeping_max_deferment(void)
29586-{
29587- return timekeeper.clock->max_idle_ns;
29588-}
29589-
29590-/**
29591 * read_persistent_clock - Return time from the persistent clock.
29592 *
29593 * Weak dummy function for arches that do not yet support it.
29594@@ -845,7 +834,6 @@ void getboottime(struct timespec *ts)
29595
29596 set_normalized_timespec(ts, -boottime.tv_sec, -boottime.tv_nsec);
29597 }
29598-EXPORT_SYMBOL_GPL(getboottime);
29599
29600 /**
29601 * monotonic_to_bootbased - Convert the monotonic time to boot based.
29602@@ -855,7 +843,6 @@ void monotonic_to_bootbased(struct timespec *ts)
29603 {
29604 *ts = timespec_add_safe(*ts, total_sleep_time);
29605 }
29606-EXPORT_SYMBOL_GPL(monotonic_to_bootbased);
29607
29608 unsigned long get_seconds(void)
29609 {
29610diff --git a/litmus/Kconfig b/litmus/Kconfig 3161diff --git a/litmus/Kconfig b/litmus/Kconfig
29611new file mode 100644 3162new file mode 100644
29612index 0000000..874794f 3163index 0000000..874794f
@@ -31337,10 +4888,10 @@ index 0000000..36e3146
31337+} 4888+}
31338diff --git a/litmus/litmus.c b/litmus/litmus.c 4889diff --git a/litmus/litmus.c b/litmus/litmus.c
31339new file mode 100644 4890new file mode 100644
31340index 0000000..3cf7cb9 4891index 0000000..e43596a
31341--- /dev/null 4892--- /dev/null
31342+++ b/litmus/litmus.c 4893+++ b/litmus/litmus.c
31343@@ -0,0 +1,699 @@ 4894@@ -0,0 +1,775 @@
31344+/* 4895+/*
31345+ * litmus.c -- Implementation of the LITMUS syscalls, 4896+ * litmus.c -- Implementation of the LITMUS syscalls,
31346+ * the LITMUS intialization code, 4897+ * the LITMUS intialization code,
@@ -31367,6 +4918,8 @@ index 0000000..3cf7cb9
31367+/* Number of RT tasks that exist in the system */ 4918+/* Number of RT tasks that exist in the system */
31368+atomic_t rt_task_count = ATOMIC_INIT(0); 4919+atomic_t rt_task_count = ATOMIC_INIT(0);
31369+static DEFINE_SPINLOCK(task_transition_lock); 4920+static DEFINE_SPINLOCK(task_transition_lock);
4921+/* synchronize plugin switching */
4922+atomic_t cannot_use_plugin = ATOMIC_INIT(0);
31370+ 4923+
31371+/* Give log messages sequential IDs. */ 4924+/* Give log messages sequential IDs. */
31372+atomic_t __log_seq_no = ATOMIC_INIT(0); 4925+atomic_t __log_seq_no = ATOMIC_INIT(0);
@@ -31712,13 +5265,17 @@ index 0000000..3cf7cb9
31712+ } 5265+ }
31713+} 5266+}
31714+ 5267+
5268+/* IPI callback to synchronize plugin switching */
5269+static void synch_on_plugin_switch(void* info)
5270+{
5271+ while (atomic_read(&cannot_use_plugin))
5272+ cpu_relax();
5273+}
5274+
31715+/* Switching a plugin in use is tricky. 5275+/* Switching a plugin in use is tricky.
31716+ * We must watch out that no real-time tasks exists 5276+ * We must watch out that no real-time tasks exists
31717+ * (and that none is created in parallel) and that the plugin is not 5277+ * (and that none is created in parallel) and that the plugin is not
31718+ * currently in use on any processor (in theory). 5278+ * currently in use on any processor (in theory).
31719+ *
31720+ * For now, we don't enforce the second part since it is unlikely to cause
31721+ * any trouble by itself as long as we don't unload modules.
31722+ */ 5279+ */
31723+int switch_sched_plugin(struct sched_plugin* plugin) 5280+int switch_sched_plugin(struct sched_plugin* plugin)
31724+{ 5281+{
@@ -31727,6 +5284,11 @@ index 0000000..3cf7cb9
31727+ 5284+
31728+ BUG_ON(!plugin); 5285+ BUG_ON(!plugin);
31729+ 5286+
5287+ /* forbid other cpus to use the plugin */
5288+ atomic_set(&cannot_use_plugin, 1);
5289+ /* send IPI to force other CPUs to synch with us */
5290+ smp_call_function(synch_on_plugin_switch, NULL, 0);
5291+
31730+ /* stop task transitions */ 5292+ /* stop task transitions */
31731+ spin_lock_irqsave(&task_transition_lock, flags); 5293+ spin_lock_irqsave(&task_transition_lock, flags);
31732+ 5294+
@@ -31747,6 +5309,7 @@ index 0000000..3cf7cb9
31747+ ret = -EBUSY; 5309+ ret = -EBUSY;
31748+out: 5310+out:
31749+ spin_unlock_irqrestore(&task_transition_lock, flags); 5311+ spin_unlock_irqrestore(&task_transition_lock, flags);
5312+ atomic_set(&cannot_use_plugin, 0);
31750+ return ret; 5313+ return ret;
31751+} 5314+}
31752+ 5315+
@@ -31897,6 +5460,55 @@ index 0000000..3cf7cb9
31897+ return len; 5460+ return len;
31898+} 5461+}
31899+ 5462+
5463+static int proc_read_cluster_size(char *page, char **start,
5464+ off_t off, int count,
5465+ int *eof, void *data)
5466+{
5467+ int len;
5468+ if (cluster_cache_index == 2)
5469+ len = snprintf(page, PAGE_SIZE, "L2\n");
5470+ else if (cluster_cache_index == 3)
5471+ len = snprintf(page, PAGE_SIZE, "L3\n");
5472+ else /* (cluster_cache_index == 1) */
5473+ len = snprintf(page, PAGE_SIZE, "L1\n");
5474+
5475+ return len;
5476+}
5477+
5478+static int proc_write_cluster_size(struct file *file,
5479+ const char *buffer,
5480+ unsigned long count,
5481+ void *data)
5482+{
5483+ int len;
5484+ /* L2, L3 */
5485+ char cache_name[33];
5486+
5487+ if(count > 32)
5488+ len = 32;
5489+ else
5490+ len = count;
5491+
5492+ if(copy_from_user(cache_name, buffer, len))
5493+ return -EFAULT;
5494+
5495+ cache_name[len] = '\0';
5496+ /* chomp name */
5497+ if (len > 1 && cache_name[len - 1] == '\n')
5498+ cache_name[len - 1] = '\0';
5499+
5500+ /* do a quick and dirty comparison to find the cluster size */
5501+ if (!strcmp(cache_name, "L2"))
5502+ cluster_cache_index = 2;
5503+ else if (!strcmp(cache_name, "L3"))
5504+ cluster_cache_index = 3;
5505+ else if (!strcmp(cache_name, "L1"))
5506+ cluster_cache_index = 1;
5507+ else
5508+ printk(KERN_INFO "Cluster '%s' is unknown.\n", cache_name);
5509+
5510+ return len;
5511+}
31900+ 5512+
31901+static int proc_read_release_master(char *page, char **start, 5513+static int proc_read_release_master(char *page, char **start,
31902+ off_t off, int count, 5514+ off_t off, int count,
@@ -31952,6 +5564,7 @@ index 0000000..3cf7cb9
31952+ *curr_file = NULL, 5564+ *curr_file = NULL,
31953+ *stat_file = NULL, 5565+ *stat_file = NULL,
31954+ *plugs_file = NULL, 5566+ *plugs_file = NULL,
5567+ *clus_cache_idx_file = NULL,
31955+ *release_master_file = NULL; 5568+ *release_master_file = NULL;
31956+ 5569+
31957+static int __init init_litmus_proc(void) 5570+static int __init init_litmus_proc(void)
@@ -31982,6 +5595,16 @@ index 0000000..3cf7cb9
31982+ release_master_file->read_proc = proc_read_release_master; 5595+ release_master_file->read_proc = proc_read_release_master;
31983+ release_master_file->write_proc = proc_write_release_master; 5596+ release_master_file->write_proc = proc_write_release_master;
31984+ 5597+
5598+ clus_cache_idx_file = create_proc_entry("cluster_cache",
5599+ 0644, litmus_dir);
5600+ if (!clus_cache_idx_file) {
5601+ printk(KERN_ERR "Could not allocate cluster_cache "
5602+ "procfs entry.\n");
5603+ return -ENOMEM;
5604+ }
5605+ clus_cache_idx_file->read_proc = proc_read_cluster_size;
5606+ clus_cache_idx_file->write_proc = proc_write_cluster_size;
5607+
31985+ stat_file = create_proc_read_entry("stats", 0444, litmus_dir, 5608+ stat_file = create_proc_read_entry("stats", 0444, litmus_dir,
31986+ proc_read_stats, NULL); 5609+ proc_read_stats, NULL);
31987+ 5610+
@@ -31999,6 +5622,10 @@ index 0000000..3cf7cb9
31999+ remove_proc_entry("stats", litmus_dir); 5622+ remove_proc_entry("stats", litmus_dir);
32000+ if (curr_file) 5623+ if (curr_file)
32001+ remove_proc_entry("active_plugin", litmus_dir); 5624+ remove_proc_entry("active_plugin", litmus_dir);
5625+ if (clus_cache_idx_file)
5626+ remove_proc_entry("cluster_cache", litmus_dir);
5627+ if (release_master_file)
5628+ remove_proc_entry("release_master", litmus_dir);
32002+ if (litmus_dir) 5629+ if (litmus_dir)
32003+ remove_proc_entry("litmus", NULL); 5630+ remove_proc_entry("litmus", NULL);
32004+} 5631+}
@@ -32042,10 +5669,10 @@ index 0000000..3cf7cb9
32042+module_exit(_exit_litmus); 5669+module_exit(_exit_litmus);
32043diff --git a/litmus/rt_domain.c b/litmus/rt_domain.c 5670diff --git a/litmus/rt_domain.c b/litmus/rt_domain.c
32044new file mode 100644 5671new file mode 100644
32045index 0000000..0ed6d5c 5672index 0000000..609ff0f
32046--- /dev/null 5673--- /dev/null
32047+++ b/litmus/rt_domain.c 5674+++ b/litmus/rt_domain.c
32048@@ -0,0 +1,306 @@ 5675@@ -0,0 +1,310 @@
32049+/* 5676+/*
32050+ * litmus/rt_domain.c 5677+ * litmus/rt_domain.c
32051+ * 5678+ *
@@ -32349,21 +5976,36 @@ index 0000000..0ed6d5c
32349+ task->rt_param.domain = rt; 5976+ task->rt_param.domain = rt;
32350+ 5977+
32351+ /* start release timer */ 5978+ /* start release timer */
5979+ TS_SCHED2_START(task);
5980+
32352+ arm_release_timer(rt); 5981+ arm_release_timer(rt);
5982+
5983+ TS_SCHED2_END(task);
32353+} 5984+}
32354+ 5985+
32355diff --git a/litmus/sched_cedf.c b/litmus/sched_cedf.c 5986diff --git a/litmus/sched_cedf.c b/litmus/sched_cedf.c
32356new file mode 100644 5987new file mode 100644
32357index 0000000..d0767ce 5988index 0000000..da44b45
32358--- /dev/null 5989--- /dev/null
32359+++ b/litmus/sched_cedf.c 5990+++ b/litmus/sched_cedf.c
32360@@ -0,0 +1,727 @@ 5991@@ -0,0 +1,756 @@
32361+/* 5992+/*
32362+ * kernel/sched_cedf.c 5993+ * litmus/sched_cedf.c
5994+ *
5995+ * Implementation of the C-EDF scheduling algorithm.
5996+ *
5997+ * This implementation is based on G-EDF:
5998+ * - CPUs are clustered around L2 or L3 caches.
5999+ * - Clusters topology is automatically detected (this is arch dependent
6000+ * and is working only on x86 at the moment --- and only with modern
6001+ * cpus that exports cpuid4 information)
6002+ * - The plugins _does not_ attempt to put tasks in the right cluster i.e.
6003+ * the programmer needs to be aware of the topology to place tasks
6004+ * in the desired cluster
6005+ * - default clustering is around L2 cache (cache index = 2)
6006+ * supported clusters are: L1 (private cache: pedf), L2, L3
32363+ * 6007+ *
32364+ * Implementation of the Clustered EDF (C-EDF) scheduling algorithm. 6008+ * For details on functions, take a look at sched_gsn_edf.c
32365+ * Linking is included so that support for synchronization (e.g., through
32366+ * the implementation of a "CSN-EDF" algorithm) can be added later if desired.
32367+ * 6009+ *
32368+ * This version uses the simple approach and serializes all scheduling 6010+ * This version uses the simple approach and serializes all scheduling
32369+ * decisions by the use of a queue lock. This is probably not the 6011+ * decisions by the use of a queue lock. This is probably not the
@@ -32373,92 +6015,36 @@ index 0000000..d0767ce
32373+#include <linux/spinlock.h> 6015+#include <linux/spinlock.h>
32374+#include <linux/percpu.h> 6016+#include <linux/percpu.h>
32375+#include <linux/sched.h> 6017+#include <linux/sched.h>
32376+#include <linux/list.h>
32377+ 6018+
32378+#include <litmus/litmus.h> 6019+#include <litmus/litmus.h>
32379+#include <litmus/jobs.h> 6020+#include <litmus/jobs.h>
32380+#include <litmus/sched_plugin.h> 6021+#include <litmus/sched_plugin.h>
32381+#include <litmus/edf_common.h> 6022+#include <litmus/edf_common.h>
32382+#include <litmus/sched_trace.h> 6023+#include <litmus/sched_trace.h>
6024+
32383+#include <litmus/bheap.h> 6025+#include <litmus/bheap.h>
32384+ 6026+
32385+#include <linux/module.h> 6027+#include <linux/module.h>
32386+ 6028+
32387+/* Overview of C-EDF operations. 6029+/* forward declaration... a funny thing with C ;) */
32388+ * 6030+struct clusterdomain;
32389+ * link_task_to_cpu(T, cpu) - Low-level operation to update the linkage
32390+ * structure (NOT the actually scheduled
32391+ * task). If there is another linked task To
32392+ * already it will set To->linked_on = NO_CPU
32393+ * (thereby removing its association with this
32394+ * CPU). However, it will not requeue the
32395+ * previously linked task (if any). It will set
32396+ * T's state to RT_F_RUNNING and check whether
32397+ * it is already running somewhere else. If T
32398+ * is scheduled somewhere else it will link
32399+ * it to that CPU instead (and pull the linked
32400+ * task to cpu). T may be NULL.
32401+ *
32402+ * unlink(T) - Unlink removes T from all scheduler data
32403+ * structures. If it is linked to some CPU it
32404+ * will link NULL to that CPU. If it is
32405+ * currently queued in the cedf queue for
32406+ * a partition, it will be removed from
32407+ * the rt_domain. It is safe to call
32408+ * unlink(T) if T is not linked. T may not
32409+ * be NULL.
32410+ *
32411+ * requeue(T) - Requeue will insert T into the appropriate
32412+ * queue. If the system is in real-time mode and
32413+ * the T is released already, it will go into the
32414+ * ready queue. If the system is not in
32415+ * real-time mode is T, then T will go into the
32416+ * release queue. If T's release time is in the
32417+ * future, it will go into the release
32418+ * queue. That means that T's release time/job
32419+ * no/etc. has to be updated before requeue(T) is
32420+ * called. It is not safe to call requeue(T)
32421+ * when T is already queued. T may not be NULL.
32422+ *
32423+ * cedf_job_arrival(T) - This is the catch-all function when T enters
32424+ * the system after either a suspension or at a
32425+ * job release. It will queue T (which means it
32426+ * is not safe to call cedf_job_arrival(T) if
32427+ * T is already queued) and then check whether a
32428+ * preemption is necessary. If a preemption is
32429+ * necessary it will update the linkage
32430+ * accordingly and cause scheduled to be called
32431+ * (either with an IPI or need_resched). It is
32432+ * safe to call cedf_job_arrival(T) if T's
32433+ * next job has not been actually released yet
32434+ * (release time in the future). T will be put
32435+ * on the release queue in that case.
32436+ *
32437+ * job_completion(T) - Take care of everything that needs to be done
32438+ * to prepare T for its next release and place
32439+ * it in the right queue with
32440+ * cedf_job_arrival().
32441+ *
32442+ *
32443+ * When we now that T is linked to CPU then link_task_to_cpu(NULL, CPU) is
32444+ * equivalent to unlink(T). Note that if you unlink a task from a CPU none of
32445+ * the functions will automatically propagate pending task from the ready queue
32446+ * to a linked task. This is the job of the calling function ( by means of
32447+ * __take_ready).
32448+ */
32449+ 6031+
32450+/* cpu_entry_t - maintain the linked and scheduled state 6032+/* cpu_entry_t - maintain the linked and scheduled state
6033+ *
6034+ * A cpu also contains a pointer to the cedf_domain_t cluster
6035+ * that owns it (struct clusterdomain*)
32451+ */ 6036+ */
32452+typedef struct { 6037+typedef struct {
32453+ int cpu; 6038+ int cpu;
6039+ struct clusterdomain* cluster; /* owning cluster */
32454+ struct task_struct* linked; /* only RT tasks */ 6040+ struct task_struct* linked; /* only RT tasks */
32455+ struct task_struct* scheduled; /* only RT tasks */ 6041+ struct task_struct* scheduled; /* only RT tasks */
32456+ struct list_head list;
32457+ atomic_t will_schedule; /* prevent unneeded IPIs */ 6042+ atomic_t will_schedule; /* prevent unneeded IPIs */
6043+ struct bheap_node* hn;
32458+} cpu_entry_t; 6044+} cpu_entry_t;
32459+DEFINE_PER_CPU(cpu_entry_t, cedf_cpu_entries);
32460+ 6045+
32461+cpu_entry_t* *cedf_cpu_entries_array; 6046+/* one cpu_entry_t per CPU */
6047+DEFINE_PER_CPU(cpu_entry_t, cedf_cpu_entries);
32462+ 6048+
32463+#define set_will_schedule() \ 6049+#define set_will_schedule() \
32464+ (atomic_set(&__get_cpu_var(cedf_cpu_entries).will_schedule, 1)) 6050+ (atomic_set(&__get_cpu_var(cedf_cpu_entries).will_schedule, 1))
@@ -32467,75 +6053,73 @@ index 0000000..d0767ce
32467+#define test_will_schedule(cpu) \ 6053+#define test_will_schedule(cpu) \
32468+ (atomic_read(&per_cpu(cedf_cpu_entries, cpu).will_schedule)) 6054+ (atomic_read(&per_cpu(cedf_cpu_entries, cpu).will_schedule))
32469+ 6055+
32470+/* Cluster size -- currently four. This is a variable to allow for 6056+/*
32471+ * the possibility of changing the cluster size online in the future. 6057+ * In C-EDF there is a cedf domain _per_ cluster
32472+ */ 6058+ * The number of clusters is dynamically determined accordingly to the
32473+int cluster_size = 4; 6059+ * total cpu number and the cluster size
32474+ 6060+ */
32475+int do_cleanup = 1; 6061+typedef struct clusterdomain {
32476+ 6062+ /* rt_domain for this cluster */
32477+typedef struct { 6063+ rt_domain_t domain;
32478+ rt_domain_t domain; 6064+ /* cpus in this cluster */
32479+ int first_cpu; 6065+ cpu_entry_t* *cpus;
32480+ int last_cpu; 6066+ /* map of this cluster cpus */
32481+ 6067+ cpumask_var_t cpu_map;
32482+ /* the cpus queue themselves according to priority in here */ 6068+ /* the cpus queue themselves according to priority in here */
32483+ struct list_head cedf_cpu_queue; 6069+ struct bheap_node *heap_node;
32484+ 6070+ struct bheap cpu_heap;
32485+ /* per-partition spinlock: protects the domain and 6071+ /* lock for this cluster */
32486+ * serializes scheduling decisions 6072+#define lock domain.ready_lock
32487+ */
32488+#define slock domain.ready_lock
32489+} cedf_domain_t; 6073+} cedf_domain_t;
32490+ 6074+
32491+DEFINE_PER_CPU(cedf_domain_t*, cedf_domains) = NULL; 6075+/* a cedf_domain per cluster; allocation is done at init/activation time */
32492+ 6076+cedf_domain_t *cedf;
32493+cedf_domain_t* *cedf_domains_array;
32494+ 6077+
6078+#define remote_cluster(cpu) ((cedf_domain_t *) per_cpu(cedf_cpu_entries, cpu).cluster)
6079+#define task_cpu_cluster(task) remote_cluster(get_partition(task))
32495+ 6080+
32496+/* These are defined similarly to partitioning, except that a 6081+/* Uncomment WANT_ALL_SCHED_EVENTS if you want to see all scheduling
32497+ * tasks partition is any cpu of the cluster to which it 6082+ * decisions in the TRACE() log; uncomment VERBOSE_INIT for verbose
32498+ * is assigned, typically the lowest-numbered cpu. 6083+ * information during the initialization of the plugin (e.g., topology)
6084+#define WANT_ALL_SCHED_EVENTS
32499+ */ 6085+ */
32500+#define local_edf (&__get_cpu_var(cedf_domains)->domain) 6086+#define VERBOSE_INIT
32501+#define local_cedf __get_cpu_var(cedf_domains) 6087+
32502+#define remote_edf(cpu) (&per_cpu(cedf_domains, cpu)->domain) 6088+static int cpu_lower_prio(struct bheap_node *_a, struct bheap_node *_b)
32503+#define remote_cedf(cpu) per_cpu(cedf_domains, cpu) 6089+{
32504+#define task_edf(task) remote_edf(get_partition(task)) 6090+ cpu_entry_t *a, *b;
32505+#define task_cedf(task) remote_cedf(get_partition(task)) 6091+ a = _a->value;
6092+ b = _b->value;
6093+ /* Note that a and b are inverted: we want the lowest-priority CPU at
6094+ * the top of the heap.
6095+ */
6096+ return edf_higher_prio(b->linked, a->linked);
6097+}
32506+ 6098+
32507+/* update_cpu_position - Move the cpu entry to the correct place to maintain 6099+/* update_cpu_position - Move the cpu entry to the correct place to maintain
32508+ * order in the cpu queue. Caller must hold cedf lock. 6100+ * order in the cpu queue. Caller must hold cedf lock.
32509+ *
32510+ * This really should be a heap.
32511+ */ 6101+ */
32512+static void update_cpu_position(cpu_entry_t *entry) 6102+static void update_cpu_position(cpu_entry_t *entry)
32513+{ 6103+{
32514+ cpu_entry_t *other; 6104+ cedf_domain_t *cluster = entry->cluster;
32515+ struct list_head *cedf_cpu_queue =
32516+ &(remote_cedf(entry->cpu))->cedf_cpu_queue;
32517+ struct list_head *pos;
32518+ 6105+
32519+ BUG_ON(!cedf_cpu_queue); 6106+ if (likely(bheap_node_in_heap(entry->hn)))
6107+ bheap_delete(cpu_lower_prio,
6108+ &cluster->cpu_heap,
6109+ entry->hn);
32520+ 6110+
32521+ if (likely(in_list(&entry->list))) 6111+ bheap_insert(cpu_lower_prio, &cluster->cpu_heap, entry->hn);
32522+ list_del(&entry->list);
32523+ /* if we do not execute real-time jobs we just move
32524+ * to the end of the queue
32525+ */
32526+ if (entry->linked) {
32527+ list_for_each(pos, cedf_cpu_queue) {
32528+ other = list_entry(pos, cpu_entry_t, list);
32529+ if (edf_higher_prio(entry->linked, other->linked)) {
32530+ __list_add(&entry->list, pos->prev, pos);
32531+ return;
32532+ }
32533+ }
32534+ }
32535+ /* if we get this far we have the lowest priority job */
32536+ list_add_tail(&entry->list, cedf_cpu_queue);
32537+} 6112+}
32538+ 6113+
6114+/* caller must hold cedf lock */
6115+static cpu_entry_t* lowest_prio_cpu(cedf_domain_t *cluster)
6116+{
6117+ struct bheap_node* hn;
6118+ hn = bheap_peek(cpu_lower_prio, &cluster->cpu_heap);
6119+ return hn->value;
6120+}
6121+
6122+
32539+/* link_task_to_cpu - Update the link of a CPU. 6123+/* link_task_to_cpu - Update the link of a CPU.
32540+ * Handles the case where the to-be-linked task is already 6124+ * Handles the case where the to-be-linked task is already
32541+ * scheduled on a different CPU. 6125+ * scheduled on a different CPU.
@@ -32549,9 +6133,6 @@ index 0000000..d0767ce
32549+ 6133+
32550+ BUG_ON(linked && !is_realtime(linked)); 6134+ BUG_ON(linked && !is_realtime(linked));
32551+ 6135+
32552+ /* Cannot link task to a CPU that doesn't belong to its partition... */
32553+ BUG_ON(linked && remote_cedf(entry->cpu) != task_cedf(linked));
32554+
32555+ /* Currently linked task is set to be unlinked. */ 6136+ /* Currently linked task is set to be unlinked. */
32556+ if (entry->linked) { 6137+ if (entry->linked) {
32557+ entry->linked->rt_param.linked_on = NO_CPU; 6138+ entry->linked->rt_param.linked_on = NO_CPU;
@@ -32573,6 +6154,9 @@ index 0000000..d0767ce
32573+ * the caller to get things right. 6154+ * the caller to get things right.
32574+ */ 6155+ */
32575+ if (entry != sched) { 6156+ if (entry != sched) {
6157+ TRACE_TASK(linked,
6158+ "already scheduled on %d, updating link.\n",
6159+ sched->cpu);
32576+ tmp = sched->linked; 6160+ tmp = sched->linked;
32577+ linked->rt_param.linked_on = sched->cpu; 6161+ linked->rt_param.linked_on = sched->cpu;
32578+ sched->linked = linked; 6162+ sched->linked = linked;
@@ -32584,13 +6168,12 @@ index 0000000..d0767ce
32584+ linked->rt_param.linked_on = entry->cpu; 6168+ linked->rt_param.linked_on = entry->cpu;
32585+ } 6169+ }
32586+ entry->linked = linked; 6170+ entry->linked = linked;
32587+ 6171+#ifdef WANT_ALL_SCHED_EVENTS
32588+ if (entry->linked) 6172+ if (linked)
32589+ TRACE_TASK(entry->linked, "linked to CPU %d, state:%d\n", 6173+ TRACE_TASK(linked, "linked to %d.\n", entry->cpu);
32590+ entry->cpu, entry->linked->state);
32591+ else 6174+ else
32592+ TRACE("NULL linked to CPU %d\n", entry->cpu); 6175+ TRACE("NULL linked to %d.\n", entry->cpu);
32593+ 6176+#endif
32594+ update_cpu_position(entry); 6177+ update_cpu_position(entry);
32595+} 6178+}
32596+ 6179+
@@ -32606,6 +6189,7 @@ index 0000000..d0767ce
32606+ return; 6189+ return;
32607+ } 6190+ }
32608+ 6191+
6192+
32609+ if (t->rt_param.linked_on != NO_CPU) { 6193+ if (t->rt_param.linked_on != NO_CPU) {
32610+ /* unlink */ 6194+ /* unlink */
32611+ entry = &per_cpu(cedf_cpu_entries, t->rt_param.linked_on); 6195+ entry = &per_cpu(cedf_cpu_entries, t->rt_param.linked_on);
@@ -32618,95 +6202,105 @@ index 0000000..d0767ce
32618+ * been relinked to this CPU), thus it must be in some 6202+ * been relinked to this CPU), thus it must be in some
32619+ * queue. We must remove it from the list in this 6203+ * queue. We must remove it from the list in this
32620+ * case. 6204+ * case.
6205+ *
6206+ * in C-EDF case is should be somewhere in the queue for
6207+ * its domain, therefore and we can get the domain using
6208+ * task_cpu_cluster
32621+ */ 6209+ */
32622+ remove(task_edf(t), t); 6210+ remove(&(task_cpu_cluster(t))->domain, t);
32623+ } 6211+ }
32624+} 6212+}
32625+ 6213+
32626+ 6214+
32627+/* preempt - force a CPU to reschedule 6215+/* preempt - force a CPU to reschedule
32628+ */ 6216+ */
32629+static noinline void preempt(cpu_entry_t *entry) 6217+static void preempt(cpu_entry_t *entry)
32630+{ 6218+{
32631+ preempt_if_preemptable(entry->scheduled, entry->cpu); 6219+ preempt_if_preemptable(entry->scheduled, entry->cpu);
32632+} 6220+}
32633+ 6221+
32634+/* requeue - Put an unlinked task into c-edf domain. 6222+/* requeue - Put an unlinked task into gsn-edf domain.
32635+ * Caller must hold cedf_lock. 6223+ * Caller must hold cedf_lock.
32636+ */ 6224+ */
32637+static noinline void requeue(struct task_struct* task) 6225+static noinline void requeue(struct task_struct* task)
32638+{ 6226+{
32639+ cedf_domain_t* cedf; 6227+ cedf_domain_t *cluster = task_cpu_cluster(task);
32640+ rt_domain_t* edf;
32641+
32642+ BUG_ON(!task); 6228+ BUG_ON(!task);
32643+ /* sanity check rt_list before insertion */ 6229+ /* sanity check before insertion */
32644+ BUG_ON(is_queued(task)); 6230+ BUG_ON(is_queued(task));
32645+ 6231+
32646+ /* Get correct real-time domain. */
32647+ cedf = task_cedf(task);
32648+ edf = &cedf->domain;
32649+
32650+ if (is_released(task, litmus_clock())) 6232+ if (is_released(task, litmus_clock()))
32651+ __add_ready(edf, task); 6233+ __add_ready(&cluster->domain, task);
32652+ else { 6234+ else {
32653+ /* it has got to wait */ 6235+ /* it has got to wait */
32654+ add_release(edf, task); 6236+ add_release(&cluster->domain, task);
32655+ } 6237+ }
32656+} 6238+}
32657+ 6239+
32658+static void check_for_preemptions(cedf_domain_t* cedf) 6240+/* check for any necessary preemptions */
6241+static void check_for_preemptions(cedf_domain_t *cluster)
32659+{ 6242+{
32660+ cpu_entry_t *last;
32661+ struct task_struct *task; 6243+ struct task_struct *task;
32662+ struct list_head *cedf_cpu_queue; 6244+ cpu_entry_t* last;
32663+ cedf_cpu_queue = &cedf->cedf_cpu_queue;
32664+ 6245+
32665+ for(last = list_entry(cedf_cpu_queue->prev, cpu_entry_t, list); 6246+ for(last = lowest_prio_cpu(cluster);
32666+ edf_preemption_needed(&cedf->domain, last->linked); 6247+ edf_preemption_needed(&cluster->domain, last->linked);
32667+ last = list_entry(cedf_cpu_queue->prev, cpu_entry_t, list)) { 6248+ last = lowest_prio_cpu(cluster)) {
32668+ /* preemption necessary */ 6249+ /* preemption necessary */
32669+ task = __take_ready(&cedf->domain); 6250+ task = __take_ready(&cluster->domain);
32670+ TRACE("check_for_preemptions: task %d linked to %d, state:%d\n", 6251+ TRACE("check_for_preemptions: attempting to link task %d to %d\n",
32671+ task->pid, last->cpu, task->state); 6252+ task->pid, last->cpu);
32672+ if (last->linked) 6253+ if (last->linked)
32673+ requeue(last->linked); 6254+ requeue(last->linked);
32674+ link_task_to_cpu(task, last); 6255+ link_task_to_cpu(task, last);
32675+ preempt(last); 6256+ preempt(last);
32676+ } 6257+ }
32677+
32678+} 6258+}
32679+ 6259+
32680+/* cedf_job_arrival: task is either resumed or released */ 6260+/* cedf_job_arrival: task is either resumed or released */
32681+static noinline void cedf_job_arrival(struct task_struct* task) 6261+static noinline void cedf_job_arrival(struct task_struct* task)
32682+{ 6262+{
32683+ cedf_domain_t* cedf; 6263+ cedf_domain_t *cluster = task_cpu_cluster(task);
32684+ rt_domain_t* edf;
32685+
32686+ BUG_ON(!task); 6264+ BUG_ON(!task);
32687+ 6265+
32688+ /* Get correct real-time domain. */
32689+ cedf = task_cedf(task);
32690+ edf = &cedf->domain;
32691+
32692+ /* first queue arriving job */
32693+ requeue(task); 6266+ requeue(task);
32694+ 6267+ check_for_preemptions(cluster);
32695+ /* then check for any necessary preemptions */
32696+ check_for_preemptions(cedf);
32697+} 6268+}
32698+ 6269+
32699+/* check for current job releases */
32700+static void cedf_release_jobs(rt_domain_t* rt, struct bheap* tasks) 6270+static void cedf_release_jobs(rt_domain_t* rt, struct bheap* tasks)
32701+{ 6271+{
32702+ cedf_domain_t* cedf = container_of(rt, cedf_domain_t, domain); 6272+ cedf_domain_t* cluster = container_of(rt, cedf_domain_t, domain);
32703+ unsigned long flags; 6273+ unsigned long flags;
6274+
6275+ spin_lock_irqsave(&cluster->lock, flags);
6276+
6277+ __merge_ready(&cluster->domain, tasks);
6278+ check_for_preemptions(cluster);
6279+
6280+ spin_unlock_irqrestore(&cluster->lock, flags);
6281+}
6282+
6283+/* caller holds cedf_lock */
6284+static noinline void job_completion(struct task_struct *t, int forced)
6285+{
6286+ BUG_ON(!t);
6287+
6288+ sched_trace_task_completion(t, forced);
32704+ 6289+
32705+ spin_lock_irqsave(&cedf->slock, flags); 6290+ TRACE_TASK(t, "job_completion().\n");
32706+ 6291+
32707+ __merge_ready(&cedf->domain, tasks); 6292+ /* set flags */
32708+ check_for_preemptions(cedf); 6293+ set_rt_flags(t, RT_F_SLEEP);
32709+ spin_unlock_irqrestore(&cedf->slock, flags); 6294+ /* prepare for next period */
6295+ prepare_for_next_period(t);
6296+ if (is_released(t, litmus_clock()))
6297+ sched_trace_task_release(t);
6298+ /* unlink */
6299+ unlink(t);
6300+ /* requeue
6301+ * But don't requeue a blocking task. */
6302+ if (is_running(t))
6303+ cedf_job_arrival(t);
32710+} 6304+}
32711+ 6305+
32712+/* cedf_tick - this function is called for every local timer 6306+/* cedf_tick - this function is called for every local timer
@@ -32717,8 +6311,6 @@ index 0000000..d0767ce
32717+ */ 6311+ */
32718+static void cedf_tick(struct task_struct* t) 6312+static void cedf_tick(struct task_struct* t)
32719+{ 6313+{
32720+ BUG_ON(!t);
32721+
32722+ if (is_realtime(t) && budget_exhausted(t)) { 6314+ if (is_realtime(t) && budget_exhausted(t)) {
32723+ if (!is_np(t)) { 6315+ if (!is_np(t)) {
32724+ /* np tasks will be preempted when they become 6316+ /* np tasks will be preempted when they become
@@ -32727,38 +6319,17 @@ index 0000000..d0767ce
32727+ set_tsk_need_resched(t); 6319+ set_tsk_need_resched(t);
32728+ set_will_schedule(); 6320+ set_will_schedule();
32729+ TRACE("cedf_scheduler_tick: " 6321+ TRACE("cedf_scheduler_tick: "
32730+ "%d is preemptable (state:%d) " 6322+ "%d is preemptable "
32731+ " => FORCE_RESCHED\n", t->pid, t->state); 6323+ " => FORCE_RESCHED\n", t->pid);
32732+ } else if(is_user_np(t)) { 6324+ } else if (is_user_np(t)) {
32733+ TRACE("cedf_scheduler_tick: " 6325+ TRACE("cedf_scheduler_tick: "
32734+ "%d is non-preemptable (state:%d), " 6326+ "%d is non-preemptable, "
32735+ "preemption delayed.\n", t->pid, t->state); 6327+ "preemption delayed.\n", t->pid);
32736+ request_exit_np(t); 6328+ request_exit_np(t);
32737+ } 6329+ }
32738+ } 6330+ }
32739+} 6331+}
32740+ 6332+
32741+/* caller holds cedf_lock */
32742+static noinline void job_completion(struct task_struct *t, int forced)
32743+{
32744+ BUG_ON(!t);
32745+
32746+ sched_trace_task_completion(t, forced);
32747+
32748+ TRACE_TASK(t, "job_completion(). [state:%d]\n", t->state);
32749+
32750+ /* set flags */
32751+ set_rt_flags(t, RT_F_SLEEP);
32752+ /* prepare for next period */
32753+ prepare_for_next_period(t);
32754+ /* unlink */
32755+ unlink(t);
32756+ /* requeue
32757+ * But don't requeue a blocking task. */
32758+ if (is_running(t))
32759+ cedf_job_arrival(t);
32760+}
32761+
32762+/* Getting schedule() right is a bit tricky. schedule() may not make any 6333+/* Getting schedule() right is a bit tricky. schedule() may not make any
32763+ * assumptions on the state of the current task since it may be called for a 6334+ * assumptions on the state of the current task since it may be called for a
32764+ * number of reasons. The reasons include a scheduler_tick() determined that it 6335+ * number of reasons. The reasons include a scheduler_tick() determined that it
@@ -32782,22 +6353,12 @@ index 0000000..d0767ce
32782+ */ 6353+ */
32783+static struct task_struct* cedf_schedule(struct task_struct * prev) 6354+static struct task_struct* cedf_schedule(struct task_struct * prev)
32784+{ 6355+{
32785+ cedf_domain_t* cedf = local_cedf; 6356+ cpu_entry_t* entry = &__get_cpu_var(cedf_cpu_entries);
32786+ rt_domain_t* edf = &cedf->domain; 6357+ cedf_domain_t *cluster = entry->cluster;
32787+ cpu_entry_t* entry = &__get_cpu_var(cedf_cpu_entries); 6358+ int out_of_time, sleep, preempt, np, exists, blocks;
32788+ int out_of_time, sleep, preempt, np, 6359+ struct task_struct* next = NULL;
32789+ exists, blocks; 6360+
32790+ struct task_struct* next = NULL; 6361+ spin_lock(&cluster->lock);
32791+
32792+ BUG_ON(!prev);
32793+ BUG_ON(!cedf);
32794+ BUG_ON(!edf);
32795+ BUG_ON(!entry);
32796+ BUG_ON(cedf != remote_cedf(entry->cpu));
32797+ BUG_ON(is_realtime(prev) && cedf != task_cedf(prev));
32798+
32799+ /* Will be released in finish_switch. */
32800+ spin_lock(&cedf->slock);
32801+ clear_will_schedule(); 6362+ clear_will_schedule();
32802+ 6363+
32803+ /* sanity checking */ 6364+ /* sanity checking */
@@ -32813,6 +6374,21 @@ index 0000000..d0767ce
32813+ sleep = exists && get_rt_flags(entry->scheduled) == RT_F_SLEEP; 6374+ sleep = exists && get_rt_flags(entry->scheduled) == RT_F_SLEEP;
32814+ preempt = entry->scheduled != entry->linked; 6375+ preempt = entry->scheduled != entry->linked;
32815+ 6376+
6377+#ifdef WANT_ALL_SCHED_EVENTS
6378+ TRACE_TASK(prev, "invoked cedf_schedule.\n");
6379+#endif
6380+
6381+ if (exists)
6382+ TRACE_TASK(prev,
6383+ "blocks:%d out_of_time:%d np:%d sleep:%d preempt:%d "
6384+ "state:%d sig:%d\n",
6385+ blocks, out_of_time, np, sleep, preempt,
6386+ prev->state, signal_pending(prev));
6387+ if (entry->linked && preempt)
6388+ TRACE_TASK(prev, "will be preempted by %s/%d\n",
6389+ entry->linked->comm, entry->linked->pid);
6390+
6391+
32816+ /* If a task blocks we have no choice but to reschedule. 6392+ /* If a task blocks we have no choice but to reschedule.
32817+ */ 6393+ */
32818+ if (blocks) 6394+ if (blocks)
@@ -32830,8 +6406,8 @@ index 0000000..d0767ce
32830+ 6406+
32831+ /* Any task that is preemptable and either exhausts its execution 6407+ /* Any task that is preemptable and either exhausts its execution
32832+ * budget or wants to sleep completes. We may have to reschedule after 6408+ * budget or wants to sleep completes. We may have to reschedule after
32833+ * this. Don't do a job completion if blocks (can't have timers 6409+ * this. Don't do a job completion if we block (can't have timers running
32834+ * running for blocked jobs). Preemption go first for the same reason. 6410+ * for blocked jobs). Preemption go first for the same reason.
32835+ */ 6411+ */
32836+ if (!np && (out_of_time || sleep) && !blocks && !preempt) 6412+ if (!np && (out_of_time || sleep) && !blocks && !preempt)
32837+ job_completion(entry->scheduled, !sleep); 6413+ job_completion(entry->scheduled, !sleep);
@@ -32839,10 +6415,10 @@ index 0000000..d0767ce
32839+ /* Link pending task if we became unlinked. 6415+ /* Link pending task if we became unlinked.
32840+ */ 6416+ */
32841+ if (!entry->linked) 6417+ if (!entry->linked)
32842+ link_task_to_cpu(__take_ready(edf), entry); 6418+ link_task_to_cpu(__take_ready(&cluster->domain), entry);
32843+ 6419+
32844+ /* The final scheduling decision. Do we need to switch for some reason? 6420+ /* The final scheduling decision. Do we need to switch for some reason?
32845+ * If linked different from scheduled select linked as next. 6421+ * If linked is different from scheduled, then select linked as next.
32846+ */ 6422+ */
32847+ if ((!np || blocks) && 6423+ if ((!np || blocks) &&
32848+ entry->linked != entry->scheduled) { 6424+ entry->linked != entry->scheduled) {
@@ -32851,76 +6427,91 @@ index 0000000..d0767ce
32851+ entry->linked->rt_param.scheduled_on = entry->cpu; 6427+ entry->linked->rt_param.scheduled_on = entry->cpu;
32852+ next = entry->linked; 6428+ next = entry->linked;
32853+ } 6429+ }
32854+ if (entry->scheduled) { 6430+ if (entry->scheduled) {
32855+ /* not gonna be scheduled soon */ 6431+ /* not gonna be scheduled soon */
32856+ entry->scheduled->rt_param.scheduled_on = NO_CPU; 6432+ entry->scheduled->rt_param.scheduled_on = NO_CPU;
32857+ TRACE_TASK(entry->scheduled, "cedf_schedule: scheduled_on = NO_CPU\n"); 6433+ TRACE_TASK(entry->scheduled, "scheduled_on = NO_CPU\n");
32858+ } 6434+ }
32859+ } else 6435+ } else
32860+ /* Only override Linux scheduler if we have real-time task 6436+ /* Only override Linux scheduler if we have a real-time task
32861+ * scheduled that needs to continue. 6437+ * scheduled that needs to continue.
32862+ */ 6438+ */
32863+ if (exists) 6439+ if (exists)
32864+ next = prev; 6440+ next = prev;
32865+ 6441+
32866+ spin_unlock(&cedf->slock); 6442+ spin_unlock(&cluster->lock);
6443+
6444+#ifdef WANT_ALL_SCHED_EVENTS
6445+ TRACE("cedf_lock released, next=0x%p\n", next);
6446+
6447+ if (next)
6448+ TRACE_TASK(next, "scheduled at %llu\n", litmus_clock());
6449+ else if (exists && !next)
6450+ TRACE("becomes idle at %llu.\n", litmus_clock());
6451+#endif
6452+
32867+ 6453+
32868+ return next; 6454+ return next;
32869+} 6455+}
32870+ 6456+
6457+
32871+/* _finish_switch - we just finished the switch away from prev 6458+/* _finish_switch - we just finished the switch away from prev
32872+ */ 6459+ */
32873+static void cedf_finish_switch(struct task_struct *prev) 6460+static void cedf_finish_switch(struct task_struct *prev)
32874+{ 6461+{
32875+ cpu_entry_t* entry = &__get_cpu_var(cedf_cpu_entries); 6462+ cpu_entry_t* entry = &__get_cpu_var(cedf_cpu_entries);
32876+
32877+ BUG_ON(!prev);
32878+ BUG_ON(!entry);
32879+ 6463+
32880+ entry->scheduled = is_realtime(current) ? current : NULL; 6464+ entry->scheduled = is_realtime(current) ? current : NULL;
6465+#ifdef WANT_ALL_SCHED_EVENTS
6466+ TRACE_TASK(prev, "switched away from\n");
6467+#endif
32881+} 6468+}
32882+ 6469+
6470+
32883+/* Prepare a task for running in RT mode 6471+/* Prepare a task for running in RT mode
32884+ */ 6472+ */
32885+static void cedf_task_new(struct task_struct *t, int on_rq, int running) 6473+static void cedf_task_new(struct task_struct * t, int on_rq, int running)
32886+{ 6474+{
32887+ unsigned long flags; 6475+ unsigned long flags;
32888+ cedf_domain_t* cedf = task_cedf(t);
32889+ cpu_entry_t* entry; 6476+ cpu_entry_t* entry;
6477+ cedf_domain_t* cluster;
6478+
6479+ TRACE("gsn edf: task new %d\n", t->pid);
32890+ 6480+
32891+ BUG_ON(!cedf); 6481+ /* the cluster doesn't change even if t is running */
6482+ cluster = task_cpu_cluster(t);
6483+
6484+ spin_lock_irqsave(&cluster->domain.ready_lock, flags);
6485+
6486+ /* setup job params */
6487+ release_at(t, litmus_clock());
32892+ 6488+
32893+ spin_lock_irqsave(&cedf->slock, flags);
32894+ if (running) { 6489+ if (running) {
32895+ entry = &per_cpu(cedf_cpu_entries, task_cpu(t)); 6490+ entry = &per_cpu(cedf_cpu_entries, task_cpu(t));
32896+ BUG_ON(!entry);
32897+ BUG_ON(entry->scheduled); 6491+ BUG_ON(entry->scheduled);
6492+
32898+ entry->scheduled = t; 6493+ entry->scheduled = t;
32899+ t->rt_param.scheduled_on = task_cpu(t); 6494+ tsk_rt(t)->scheduled_on = task_cpu(t);
32900+ } else 6495+ } else {
32901+ t->rt_param.scheduled_on = NO_CPU; 6496+ t->rt_param.scheduled_on = NO_CPU;
32902+ t->rt_param.linked_on = NO_CPU; 6497+ }
32903+ 6498+ t->rt_param.linked_on = NO_CPU;
32904+ /* setup job params */
32905+ release_at(t, litmus_clock());
32906+ 6499+
32907+ cedf_job_arrival(t); 6500+ cedf_job_arrival(t);
32908+ spin_unlock_irqrestore(&cedf->slock, flags); 6501+ spin_unlock_irqrestore(&(cluster->domain.ready_lock), flags);
32909+} 6502+}
32910+ 6503+
32911+
32912+static void cedf_task_wake_up(struct task_struct *task) 6504+static void cedf_task_wake_up(struct task_struct *task)
32913+{ 6505+{
32914+ unsigned long flags; 6506+ unsigned long flags;
32915+ cedf_domain_t* cedf; 6507+ lt_t now;
32916+ lt_t now; 6508+ cedf_domain_t *cluster;
32917+ 6509+
32918+ BUG_ON(!task); 6510+ TRACE_TASK(task, "wake_up at %llu\n", litmus_clock());
32919+ 6511+
32920+ cedf = task_cedf(task); 6512+ cluster = task_cpu_cluster(task);
32921+ BUG_ON(!cedf);
32922+ 6513+
32923+ spin_lock_irqsave(&cedf->slock, flags); 6514+ spin_lock_irqsave(&cluster->lock, flags);
32924+ /* We need to take suspensions because of semaphores into 6515+ /* We need to take suspensions because of semaphores into
32925+ * account! If a job resumes after being suspended due to acquiring 6516+ * account! If a job resumes after being suspended due to acquiring
32926+ * a semaphore, it should never be treated as a new job release. 6517+ * a semaphore, it should never be treated as a new job release.
@@ -32934,48 +6525,49 @@ index 0000000..d0767ce
32934+ release_at(task, now); 6525+ release_at(task, now);
32935+ sched_trace_task_release(task); 6526+ sched_trace_task_release(task);
32936+ } 6527+ }
32937+ else if (task->rt.time_slice) 6528+ else {
32938+ /* came back in time before deadline 6529+ if (task->rt.time_slice) {
32939+ */ 6530+ /* came back in time before deadline
32940+ set_rt_flags(task, RT_F_RUNNING); 6531+ */
6532+ set_rt_flags(task, RT_F_RUNNING);
6533+ }
6534+ }
32941+ } 6535+ }
32942+ cedf_job_arrival(task); 6536+ cedf_job_arrival(task);
32943+ spin_unlock_irqrestore(&cedf->slock, flags); 6537+ spin_unlock_irqrestore(&cluster->lock, flags);
32944+} 6538+}
32945+ 6539+
32946+
32947+static void cedf_task_block(struct task_struct *t) 6540+static void cedf_task_block(struct task_struct *t)
32948+{ 6541+{
32949+ unsigned long flags; 6542+ unsigned long flags;
6543+ cedf_domain_t *cluster;
32950+ 6544+
32951+ BUG_ON(!t); 6545+ TRACE_TASK(t, "block at %llu\n", litmus_clock());
32952+ 6546+
32953+ /* unlink if necessary */ 6547+ cluster = task_cpu_cluster(t);
32954+ spin_lock_irqsave(&task_cedf(t)->slock, flags);
32955+ 6548+
32956+ t->rt_param.scheduled_on = NO_CPU; 6549+ /* unlink if necessary */
6550+ spin_lock_irqsave(&cluster->lock, flags);
32957+ unlink(t); 6551+ unlink(t);
32958+ 6552+ spin_unlock_irqrestore(&cluster->lock, flags);
32959+ spin_unlock_irqrestore(&task_cedf(t)->slock, flags);
32960+ 6553+
32961+ BUG_ON(!is_realtime(t)); 6554+ BUG_ON(!is_realtime(t));
32962+} 6555+}
32963+ 6556+
6557+
32964+static void cedf_task_exit(struct task_struct * t) 6558+static void cedf_task_exit(struct task_struct * t)
32965+{ 6559+{
32966+ unsigned long flags; 6560+ unsigned long flags;
32967+ 6561+ cedf_domain_t *cluster = task_cpu_cluster(t);
32968+ BUG_ON(!t);
32969+ 6562+
32970+ /* unlink if necessary */ 6563+ /* unlink if necessary */
32971+ spin_lock_irqsave(&task_cedf(t)->slock, flags); 6564+ spin_lock_irqsave(&cluster->lock, flags);
32972+ unlink(t); 6565+ unlink(t);
32973+ if (tsk_rt(t)->scheduled_on != NO_CPU) { 6566+ if (tsk_rt(t)->scheduled_on != NO_CPU) {
32974+ cedf_cpu_entries_array[tsk_rt(t)->scheduled_on]-> 6567+ cluster->cpus[tsk_rt(t)->scheduled_on]->scheduled = NULL;
32975+ scheduled = NULL;
32976+ tsk_rt(t)->scheduled_on = NO_CPU; 6568+ tsk_rt(t)->scheduled_on = NO_CPU;
32977+ } 6569+ }
32978+ spin_unlock_irqrestore(&task_cedf(t)->slock, flags); 6570+ spin_unlock_irqrestore(&cluster->lock, flags);
32979+ 6571+
32980+ BUG_ON(!is_realtime(t)); 6572+ BUG_ON(!is_realtime(t));
32981+ TRACE_TASK(t, "RIP\n"); 6573+ TRACE_TASK(t, "RIP\n");
@@ -32983,108 +6575,176 @@ index 0000000..d0767ce
32983+ 6575+
32984+static long cedf_admit_task(struct task_struct* tsk) 6576+static long cedf_admit_task(struct task_struct* tsk)
32985+{ 6577+{
32986+ return (task_cpu(tsk) >= task_cedf(tsk)->first_cpu && 6578+ return task_cpu(tsk) == tsk->rt_param.task_params.cpu ? 0 : -EINVAL;
32987+ task_cpu(tsk) <= task_cedf(tsk)->last_cpu) ? 0 : -EINVAL;
32988+} 6579+}
32989+ 6580+
6581+/* total number of cluster */
6582+static int num_clusters;
6583+/* we do not support cluster of different sizes */
6584+static unsigned int cluster_size;
32990+ 6585+
32991+/* Plugin object */ 6586+#ifdef VERBOSE_INIT
32992+static struct sched_plugin cedf_plugin __cacheline_aligned_in_smp = { 6587+static void print_cluster_topology(cpumask_var_t mask, int cpu)
32993+ .plugin_name = "C-EDF", 6588+{
32994+ .finish_switch = cedf_finish_switch, 6589+ int chk;
32995+ .tick = cedf_tick, 6590+ char buf[255];
32996+ .task_new = cedf_task_new, 6591+
32997+ .complete_job = complete_job, 6592+ chk = cpulist_scnprintf(buf, 254, mask);
32998+ .task_exit = cedf_task_exit, 6593+ buf[chk] = '\0';
32999+ .schedule = cedf_schedule, 6594+ printk(KERN_INFO "CPU = %d, shared cpu(s) = %s\n", cpu, buf);
33000+ .task_wake_up = cedf_task_wake_up,
33001+ .task_block = cedf_task_block,
33002+ .admit_task = cedf_admit_task
33003+};
33004+ 6595+
33005+static void cedf_domain_init(int first_cpu, int last_cpu) 6596+}
6597+#endif
6598+
6599+static int clusters_allocated = 0;
6600+
6601+static void cleanup_cedf(void)
33006+{ 6602+{
33007+ int cpu; 6603+ int i;
6604+
6605+ if (clusters_allocated) {
6606+ for (i = 0; i < num_clusters; i++) {
6607+ kfree(cedf[i].cpus);
6608+ kfree(cedf[i].heap_node);
6609+ free_cpumask_var(cedf[i].cpu_map);
6610+ }
33008+ 6611+
33009+ /* Create new domain for this cluster. */ 6612+ kfree(cedf);
33010+ cedf_domain_t *new_cedf_domain = kmalloc(sizeof(*new_cedf_domain),
33011+ GFP_KERNEL);
33012+
33013+ /* Initialize cluster domain. */
33014+ edf_domain_init(&new_cedf_domain->domain, NULL,
33015+ cedf_release_jobs);
33016+ new_cedf_domain->first_cpu = first_cpu;
33017+ new_cedf_domain->last_cpu = last_cpu;
33018+ INIT_LIST_HEAD(&new_cedf_domain->cedf_cpu_queue);
33019+
33020+ /* Assign all cpus in cluster to point to this domain. */
33021+ for (cpu = first_cpu; cpu <= last_cpu; cpu++) {
33022+ remote_cedf(cpu) = new_cedf_domain;
33023+ cedf_domains_array[cpu] = new_cedf_domain;
33024+ } 6613+ }
33025+} 6614+}
33026+ 6615+
33027+static int __init init_cedf(void) 6616+static long cedf_activate_plugin(void)
33028+{ 6617+{
33029+ int cpu; 6618+ int i, j, cpu, ccpu, cpu_count;
33030+ cpu_entry_t *entry; 6619+ cpu_entry_t *entry;
33031+ 6620+
33032+ /* num_online_cpus() should have been set already 6621+ cpumask_var_t mask;
33033+ * if the number of available cpus is less then the cluster 6622+ int chk = 0;
33034+ * size (currently 4) then it is pointless trying to use 6623+
33035+ * CEDF, so we disable this plugin 6624+ /* de-allocate old clusters, if any */
33036+ */ 6625+ cleanup_cedf();
33037+ if(num_online_cpus() < cluster_size) { 6626+
33038+ printk(KERN_INFO "Not registering C-EDF plugin: " 6627+ printk(KERN_INFO "C-EDF: Activate Plugin, cache index = %d\n",
33039+ "Num Online Cpus (%d) < Min Cluster Size (%d)\n", 6628+ cluster_cache_index);
33040+ num_online_cpus(), cluster_size); 6629+
33041+ do_cleanup = 0; 6630+ /* need to get cluster_size first */
33042+ return 0; 6631+ if(!zalloc_cpumask_var(&mask, GFP_ATOMIC))
6632+ return -ENOMEM;
6633+
6634+ chk = get_shared_cpu_map(mask, 0, cluster_cache_index);
6635+ if (chk) {
6636+ /* if chk != 0 then it is the max allowed index */
6637+ printk(KERN_INFO "C-EDF: Cannot support cache index = %d\n",
6638+ cluster_cache_index);
6639+ printk(KERN_INFO "C-EDF: Using cache index = %d\n",
6640+ chk);
6641+ cluster_cache_index = chk;
33043+ } 6642+ }
33044+ 6643+
33045+ /* 6644+ cluster_size = cpumask_weight(mask);
33046+ * initialize short_cut for per-cpu cedf state;
33047+ * there may be a problem here if someone removes a cpu
33048+ * while we are doing this initialization... and if cpus
33049+ * are added / removed later... is it a _real_ problem for cedf?
33050+ */
33051+ cedf_cpu_entries_array = kmalloc(
33052+ sizeof(cpu_entry_t *) * num_online_cpus(),
33053+ GFP_KERNEL);
33054+ 6645+
33055+ cedf_domains_array = kmalloc( 6646+ if ((num_online_cpus() % cluster_size) != 0) {
33056+ sizeof(cedf_domain_t *) * num_online_cpus(), 6647+ /* this can't be right, some cpus are left out */
33057+ GFP_KERNEL); 6648+ printk(KERN_ERR "C-EDF: Trying to group %d cpus in %d!\n",
6649+ num_online_cpus(), cluster_size);
6650+ return -1;
6651+ }
33058+ 6652+
33059+ /* initialize CPU state */ 6653+ num_clusters = num_online_cpus() / cluster_size;
33060+ for (cpu = 0; cpu < num_online_cpus(); cpu++) { 6654+ printk(KERN_INFO "C-EDF: %d cluster(s) of size = %d\n",
33061+ entry = &per_cpu(cedf_cpu_entries, cpu); 6655+ num_clusters, cluster_size);
33062+ cedf_cpu_entries_array[cpu] = entry; 6656+
33063+ atomic_set(&entry->will_schedule, 0); 6657+ /* initialize clusters */
33064+ entry->linked = NULL; 6658+ cedf = kmalloc(num_clusters * sizeof(cedf_domain_t), GFP_ATOMIC);
33065+ entry->scheduled = NULL; 6659+ for (i = 0; i < num_clusters; i++) {
33066+ entry->cpu = cpu; 6660+
33067+ INIT_LIST_HEAD(&entry->list); 6661+ cedf[i].cpus = kmalloc(cluster_size * sizeof(cpu_entry_t),
6662+ GFP_ATOMIC);
6663+ cedf[i].heap_node = kmalloc(
6664+ cluster_size * sizeof(struct bheap_node),
6665+ GFP_ATOMIC);
6666+ bheap_init(&(cedf[i].cpu_heap));
6667+ edf_domain_init(&(cedf[i].domain), NULL, cedf_release_jobs);
6668+
6669+ if(!zalloc_cpumask_var(&cedf[i].cpu_map, GFP_ATOMIC))
6670+ return -ENOMEM;
6671+ }
6672+
6673+ /* cycle through cluster and add cpus to them */
6674+ for (i = 0; i < num_clusters; i++) {
6675+
6676+ for_each_online_cpu(cpu) {
6677+ /* check if the cpu is already in a cluster */
6678+ for (j = 0; j < num_clusters; j++)
6679+ if (cpumask_test_cpu(cpu, cedf[j].cpu_map))
6680+ break;
6681+ /* if it is in a cluster go to next cpu */
6682+ if (cpumask_test_cpu(cpu, cedf[j].cpu_map))
6683+ continue;
6684+
6685+ /* this cpu isn't in any cluster */
6686+ /* get the shared cpus */
6687+ get_shared_cpu_map(mask, cpu, cluster_cache_index);
6688+ cpumask_copy(cedf[i].cpu_map, mask);
6689+#ifdef VERBOSE_INIT
6690+ print_cluster_topology(mask, cpu);
6691+#endif
6692+ /* add cpus to current cluster and init cpu_entry_t */
6693+ cpu_count = 0;
6694+ for_each_cpu(ccpu, cedf[i].cpu_map) {
6695+
6696+ entry = &per_cpu(cedf_cpu_entries, ccpu);
6697+ cedf[i].cpus[cpu_count] = entry;
6698+ atomic_set(&entry->will_schedule, 0);
6699+ entry->cpu = ccpu;
6700+ entry->cluster = &cedf[i];
6701+ entry->hn = &(cedf[i].heap_node[cpu_count]);
6702+ bheap_node_init(&entry->hn, entry);
6703+
6704+ cpu_count++;
6705+
6706+ entry->linked = NULL;
6707+ entry->scheduled = NULL;
6708+ update_cpu_position(entry);
6709+ }
6710+ /* done with this cluster */
6711+ break;
6712+ }
33068+ } 6713+ }
33069+ 6714+
33070+ /* initialize all cluster domains */ 6715+ free_cpumask_var(mask);
33071+ for (cpu = 0; cpu < num_online_cpus(); cpu += cluster_size) 6716+ clusters_allocated = 1;
33072+ cedf_domain_init(cpu, cpu+cluster_size-1); 6717+ return 0;
6718+}
6719+
6720+/* Plugin object */
6721+static struct sched_plugin cedf_plugin __cacheline_aligned_in_smp = {
6722+ .plugin_name = "C-EDF",
6723+ .finish_switch = cedf_finish_switch,
6724+ .tick = cedf_tick,
6725+ .task_new = cedf_task_new,
6726+ .complete_job = complete_job,
6727+ .task_exit = cedf_task_exit,
6728+ .schedule = cedf_schedule,
6729+ .task_wake_up = cedf_task_wake_up,
6730+ .task_block = cedf_task_block,
6731+ .admit_task = cedf_admit_task,
6732+ .activate_plugin = cedf_activate_plugin,
6733+};
6734+
33073+ 6735+
6736+static int __init init_cedf(void)
6737+{
33074+ return register_sched_plugin(&cedf_plugin); 6738+ return register_sched_plugin(&cedf_plugin);
33075+} 6739+}
33076+ 6740+
33077+static void clean_cedf(void) 6741+static void clean_cedf(void)
33078+{ 6742+{
33079+ if(do_cleanup) { 6743+ cleanup_cedf();
33080+ kfree(cedf_cpu_entries_array);
33081+ kfree(cedf_domains_array);
33082+ }
33083+} 6744+}
33084+ 6745+
33085+module_init(init_cedf); 6746+module_init(init_cedf);
33086+module_exit(clean_cedf); 6747+module_exit(clean_cedf);
33087+
33088diff --git a/litmus/sched_gsn_edf.c b/litmus/sched_gsn_edf.c 6748diff --git a/litmus/sched_gsn_edf.c b/litmus/sched_gsn_edf.c
33089new file mode 100644 6749new file mode 100644
33090index 0000000..b9310dd 6750index 0000000..b9310dd
@@ -35147,10 +8807,10 @@ index 0000000..2ea3922
35147+module_exit(clean_pfair); 8807+module_exit(clean_pfair);
35148diff --git a/litmus/sched_plugin.c b/litmus/sched_plugin.c 8808diff --git a/litmus/sched_plugin.c b/litmus/sched_plugin.c
35149new file mode 100644 8809new file mode 100644
35150index 0000000..bc7c0e9 8810index 0000000..3767b30
35151--- /dev/null 8811--- /dev/null
35152+++ b/litmus/sched_plugin.c 8812+++ b/litmus/sched_plugin.c
35153@@ -0,0 +1,257 @@ 8813@@ -0,0 +1,265 @@
35154+/* sched_plugin.c -- core infrastructure for the scheduler plugin system 8814+/* sched_plugin.c -- core infrastructure for the scheduler plugin system
35155+ * 8815+ *
35156+ * This file includes the initialization of the plugin system, the no-op Linux 8816+ * This file includes the initialization of the plugin system, the no-op Linux
@@ -35324,6 +8984,14 @@ index 0000000..bc7c0e9
35324+}; 8984+};
35325+ 8985+
35326+/* 8986+/*
8987+ * The cluster size is needed in C-EDF: it makes sense only to cluster
8988+ * around L2 or L3, so if cluster_cache_index = 2 (default) we cluster
8989+ * all the CPUs that shares a L2 cache, while cluster_cache_index = 3
8990+ * we cluster all CPs that shares a L3 cache
8991+ */
8992+int cluster_cache_index = 2;
8993+
8994+/*
35327+ * The reference to current plugin that is used to schedule tasks within 8995+ * The reference to current plugin that is used to schedule tasks within
35328+ * the system. It stores references to actual function implementations 8996+ * the system. It stores references to actual function implementations
35329+ * Should be initialized by calling "init_***_plugin()" 8997+ * Should be initialized by calling "init_***_plugin()"
@@ -37029,1952 +10697,3 @@ index 0000000..4403769
37029+} 10697+}
37030+ 10698+
37031+module_init(init_ft_overhead_trace); 10699+module_init(init_ft_overhead_trace);
37032diff --git a/net/ax25/ax25_out.c b/net/ax25/ax25_out.c
37033index 1491260..bf706f8 100644
37034--- a/net/ax25/ax25_out.c
37035+++ b/net/ax25/ax25_out.c
37036@@ -92,12 +92,6 @@ ax25_cb *ax25_send_frame(struct sk_buff *skb, int paclen, ax25_address *src, ax2
37037 #endif
37038 }
37039
37040- /*
37041- * There is one ref for the state machine; a caller needs
37042- * one more to put it back, just like with the existing one.
37043- */
37044- ax25_cb_hold(ax25);
37045-
37046 ax25_cb_add(ax25);
37047
37048 ax25->state = AX25_STATE_1;
37049diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
37050index 0b7f262..bd1c654 100644
37051--- a/net/bridge/netfilter/ebtables.c
37052+++ b/net/bridge/netfilter/ebtables.c
37053@@ -1406,9 +1406,6 @@ static int do_ebt_set_ctl(struct sock *sk,
37054 {
37055 int ret;
37056
37057- if (!capable(CAP_NET_ADMIN))
37058- return -EPERM;
37059-
37060 switch(cmd) {
37061 case EBT_SO_SET_ENTRIES:
37062 ret = do_replace(sock_net(sk), user, len);
37063@@ -1428,9 +1425,6 @@ static int do_ebt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
37064 struct ebt_replace tmp;
37065 struct ebt_table *t;
37066
37067- if (!capable(CAP_NET_ADMIN))
37068- return -EPERM;
37069-
37070 if (copy_from_user(&tmp, user, sizeof(tmp)))
37071 return -EFAULT;
37072
37073diff --git a/net/core/dev.c b/net/core/dev.c
37074index 584046e..fe10551 100644
37075--- a/net/core/dev.c
37076+++ b/net/core/dev.c
37077@@ -4860,11 +4860,6 @@ int register_netdevice(struct net_device *dev)
37078 rollback_registered(dev);
37079 dev->reg_state = NETREG_UNREGISTERED;
37080 }
37081- /*
37082- * Prevent userspace races by waiting until the network
37083- * device is fully setup before sending notifications.
37084- */
37085- rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U);
37086
37087 out:
37088 return ret;
37089@@ -5403,12 +5398,6 @@ int dev_change_net_namespace(struct net_device *dev, struct net *net, const char
37090 /* Notify protocols, that a new device appeared. */
37091 call_netdevice_notifiers(NETDEV_REGISTER, dev);
37092
37093- /*
37094- * Prevent userspace races by waiting until the network
37095- * device is fully setup before sending notifications.
37096- */
37097- rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U);
37098-
37099 synchronize_net();
37100 err = 0;
37101 out:
37102diff --git a/net/core/dst.c b/net/core/dst.c
37103index cb1b348..57bc4d5 100644
37104--- a/net/core/dst.c
37105+++ b/net/core/dst.c
37106@@ -17,7 +17,6 @@
37107 #include <linux/string.h>
37108 #include <linux/types.h>
37109 #include <net/net_namespace.h>
37110-#include <linux/sched.h>
37111
37112 #include <net/dst.h>
37113
37114@@ -80,7 +79,6 @@ loop:
37115 while ((dst = next) != NULL) {
37116 next = dst->next;
37117 prefetch(&next->next);
37118- cond_resched();
37119 if (likely(atomic_read(&dst->__refcnt))) {
37120 last->next = dst;
37121 last = dst;
37122diff --git a/net/core/pktgen.c b/net/core/pktgen.c
37123index 6a993b1..6e79e96 100644
37124--- a/net/core/pktgen.c
37125+++ b/net/core/pktgen.c
37126@@ -3516,7 +3516,6 @@ static int pktgen_thread_worker(void *arg)
37127 wait_event_interruptible_timeout(t->queue,
37128 t->control != 0,
37129 HZ/10);
37130- try_to_freeze();
37131 continue;
37132 }
37133
37134diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
37135index d4fd895..eb42873 100644
37136--- a/net/core/rtnetlink.c
37137+++ b/net/core/rtnetlink.c
37138@@ -1334,11 +1334,13 @@ static int rtnetlink_event(struct notifier_block *this, unsigned long event, voi
37139 case NETDEV_UNREGISTER:
37140 rtmsg_ifinfo(RTM_DELLINK, dev, ~0U);
37141 break;
37142+ case NETDEV_REGISTER:
37143+ rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U);
37144+ break;
37145 case NETDEV_UP:
37146 case NETDEV_DOWN:
37147 rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING);
37148 break;
37149- case NETDEV_REGISTER:
37150 case NETDEV_CHANGE:
37151 case NETDEV_GOING_DOWN:
37152 break;
37153diff --git a/net/core/sock.c b/net/core/sock.c
37154index 6605e75..7626b6a 100644
37155--- a/net/core/sock.c
37156+++ b/net/core/sock.c
37157@@ -1181,10 +1181,6 @@ struct sock *sk_clone(const struct sock *sk, const gfp_t priority)
37158
37159 if (newsk->sk_prot->sockets_allocated)
37160 percpu_counter_inc(newsk->sk_prot->sockets_allocated);
37161-
37162- if (sock_flag(newsk, SOCK_TIMESTAMP) ||
37163- sock_flag(newsk, SOCK_TIMESTAMPING_RX_SOFTWARE))
37164- net_enable_timestamp();
37165 }
37166 out:
37167 return newsk;
37168diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
37169index 0030e73..5df2f6a 100644
37170--- a/net/ipv4/devinet.c
37171+++ b/net/ipv4/devinet.c
37172@@ -1450,7 +1450,6 @@ static struct devinet_sysctl_table {
37173 DEVINET_SYSCTL_RW_ENTRY(SEND_REDIRECTS, "send_redirects"),
37174 DEVINET_SYSCTL_RW_ENTRY(ACCEPT_SOURCE_ROUTE,
37175 "accept_source_route"),
37176- DEVINET_SYSCTL_RW_ENTRY(SRC_VMARK, "src_valid_mark"),
37177 DEVINET_SYSCTL_RW_ENTRY(PROXY_ARP, "proxy_arp"),
37178 DEVINET_SYSCTL_RW_ENTRY(MEDIUM_ID, "medium_id"),
37179 DEVINET_SYSCTL_RW_ENTRY(BOOTP_RELAY, "bootp_relay"),
37180diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
37181index 29391ee..aa00398 100644
37182--- a/net/ipv4/fib_frontend.c
37183+++ b/net/ipv4/fib_frontend.c
37184@@ -251,8 +251,6 @@ int fib_validate_source(__be32 src, __be32 dst, u8 tos, int oif,
37185 if (in_dev) {
37186 no_addr = in_dev->ifa_list == NULL;
37187 rpf = IN_DEV_RPFILTER(in_dev);
37188- if (mark && !IN_DEV_SRC_VMARK(in_dev))
37189- fl.mark = 0;
37190 }
37191 rcu_read_unlock();
37192
37193diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
37194index 4d50daa..f989518 100644
37195--- a/net/ipv4/ip_output.c
37196+++ b/net/ipv4/ip_output.c
37197@@ -501,8 +501,8 @@ int ip_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
37198 if (skb->sk) {
37199 frag->sk = skb->sk;
37200 frag->destructor = sock_wfree;
37201+ truesizes += frag->truesize;
37202 }
37203- truesizes += frag->truesize;
37204 }
37205
37206 /* Everything is OK. Generate! */
37207diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c
37208index 98442f3..27774c9 100644
37209--- a/net/ipv4/netfilter/arp_tables.c
37210+++ b/net/ipv4/netfilter/arp_tables.c
37211@@ -925,10 +925,10 @@ static int get_info(struct net *net, void __user *user, int *len, int compat)
37212 if (t && !IS_ERR(t)) {
37213 struct arpt_getinfo info;
37214 const struct xt_table_info *private = t->private;
37215-#ifdef CONFIG_COMPAT
37216- struct xt_table_info tmp;
37217
37218+#ifdef CONFIG_COMPAT
37219 if (compat) {
37220+ struct xt_table_info tmp;
37221 ret = compat_table_info(private, &tmp);
37222 xt_compat_flush_offsets(NFPROTO_ARP);
37223 private = &tmp;
37224diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c
37225index 62aff31..cde755d 100644
37226--- a/net/ipv4/netfilter/ip_tables.c
37227+++ b/net/ipv4/netfilter/ip_tables.c
37228@@ -1132,10 +1132,10 @@ static int get_info(struct net *net, void __user *user, int *len, int compat)
37229 if (t && !IS_ERR(t)) {
37230 struct ipt_getinfo info;
37231 const struct xt_table_info *private = t->private;
37232-#ifdef CONFIG_COMPAT
37233- struct xt_table_info tmp;
37234
37235+#ifdef CONFIG_COMPAT
37236 if (compat) {
37237+ struct xt_table_info tmp;
37238 ret = compat_table_info(private, &tmp);
37239 xt_compat_flush_offsets(AF_INET);
37240 private = &tmp;
37241diff --git a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
37242index 1032a15..aa95bb8 100644
37243--- a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
37244+++ b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
37245@@ -213,7 +213,7 @@ static ctl_table ip_ct_sysctl_table[] = {
37246 {
37247 .ctl_name = NET_IPV4_NF_CONNTRACK_BUCKETS,
37248 .procname = "ip_conntrack_buckets",
37249- .data = &init_net.ct.htable_size,
37250+ .data = &nf_conntrack_htable_size,
37251 .maxlen = sizeof(unsigned int),
37252 .mode = 0444,
37253 .proc_handler = proc_dointvec,
37254diff --git a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c
37255index 2fb7b76..8668a3d 100644
37256--- a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c
37257+++ b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c
37258@@ -32,7 +32,7 @@ static struct hlist_nulls_node *ct_get_first(struct seq_file *seq)
37259 struct hlist_nulls_node *n;
37260
37261 for (st->bucket = 0;
37262- st->bucket < net->ct.htable_size;
37263+ st->bucket < nf_conntrack_htable_size;
37264 st->bucket++) {
37265 n = rcu_dereference(net->ct.hash[st->bucket].first);
37266 if (!is_a_nulls(n))
37267@@ -50,7 +50,7 @@ static struct hlist_nulls_node *ct_get_next(struct seq_file *seq,
37268 head = rcu_dereference(head->next);
37269 while (is_a_nulls(head)) {
37270 if (likely(get_nulls_value(head) == st->bucket)) {
37271- if (++st->bucket >= net->ct.htable_size)
37272+ if (++st->bucket >= nf_conntrack_htable_size)
37273 return NULL;
37274 }
37275 head = rcu_dereference(net->ct.hash[st->bucket].first);
37276diff --git a/net/ipv4/netfilter/nf_defrag_ipv4.c b/net/ipv4/netfilter/nf_defrag_ipv4.c
37277index 331ead3..fa2d6b6 100644
37278--- a/net/ipv4/netfilter/nf_defrag_ipv4.c
37279+++ b/net/ipv4/netfilter/nf_defrag_ipv4.c
37280@@ -14,7 +14,6 @@
37281 #include <net/route.h>
37282 #include <net/ip.h>
37283
37284-#include <linux/netfilter_bridge.h>
37285 #include <linux/netfilter_ipv4.h>
37286 #include <net/netfilter/ipv4/nf_defrag_ipv4.h>
37287
37288@@ -35,20 +34,6 @@ static int nf_ct_ipv4_gather_frags(struct sk_buff *skb, u_int32_t user)
37289 return err;
37290 }
37291
37292-static enum ip_defrag_users nf_ct_defrag_user(unsigned int hooknum,
37293- struct sk_buff *skb)
37294-{
37295-#ifdef CONFIG_BRIDGE_NETFILTER
37296- if (skb->nf_bridge &&
37297- skb->nf_bridge->mask & BRNF_NF_BRIDGE_PREROUTING)
37298- return IP_DEFRAG_CONNTRACK_BRIDGE_IN;
37299-#endif
37300- if (hooknum == NF_INET_PRE_ROUTING)
37301- return IP_DEFRAG_CONNTRACK_IN;
37302- else
37303- return IP_DEFRAG_CONNTRACK_OUT;
37304-}
37305-
37306 static unsigned int ipv4_conntrack_defrag(unsigned int hooknum,
37307 struct sk_buff *skb,
37308 const struct net_device *in,
37309@@ -65,8 +50,10 @@ static unsigned int ipv4_conntrack_defrag(unsigned int hooknum,
37310 #endif
37311 /* Gather fragments. */
37312 if (ip_hdr(skb)->frag_off & htons(IP_MF | IP_OFFSET)) {
37313- enum ip_defrag_users user = nf_ct_defrag_user(hooknum, skb);
37314- if (nf_ct_ipv4_gather_frags(skb, user))
37315+ if (nf_ct_ipv4_gather_frags(skb,
37316+ hooknum == NF_INET_PRE_ROUTING ?
37317+ IP_DEFRAG_CONNTRACK_IN :
37318+ IP_DEFRAG_CONNTRACK_OUT))
37319 return NF_STOLEN;
37320 }
37321 return NF_ACCEPT;
37322diff --git a/net/ipv4/netfilter/nf_nat_core.c b/net/ipv4/netfilter/nf_nat_core.c
37323index 26066a2..fe1a644 100644
37324--- a/net/ipv4/netfilter/nf_nat_core.c
37325+++ b/net/ipv4/netfilter/nf_nat_core.c
37326@@ -35,6 +35,9 @@ static DEFINE_SPINLOCK(nf_nat_lock);
37327
37328 static struct nf_conntrack_l3proto *l3proto __read_mostly;
37329
37330+/* Calculated at init based on memory size */
37331+static unsigned int nf_nat_htable_size __read_mostly;
37332+
37333 #define MAX_IP_NAT_PROTO 256
37334 static const struct nf_nat_protocol *nf_nat_protos[MAX_IP_NAT_PROTO]
37335 __read_mostly;
37336@@ -69,7 +72,7 @@ EXPORT_SYMBOL_GPL(nf_nat_proto_put);
37337
37338 /* We keep an extra hash for each conntrack, for fast searching. */
37339 static inline unsigned int
37340-hash_by_src(const struct net *net, const struct nf_conntrack_tuple *tuple)
37341+hash_by_src(const struct nf_conntrack_tuple *tuple)
37342 {
37343 unsigned int hash;
37344
37345@@ -77,7 +80,7 @@ hash_by_src(const struct net *net, const struct nf_conntrack_tuple *tuple)
37346 hash = jhash_3words((__force u32)tuple->src.u3.ip,
37347 (__force u32)tuple->src.u.all,
37348 tuple->dst.protonum, 0);
37349- return ((u64)hash * net->ipv4.nat_htable_size) >> 32;
37350+ return ((u64)hash * nf_nat_htable_size) >> 32;
37351 }
37352
37353 /* Is this tuple already taken? (not by us) */
37354@@ -144,7 +147,7 @@ find_appropriate_src(struct net *net,
37355 struct nf_conntrack_tuple *result,
37356 const struct nf_nat_range *range)
37357 {
37358- unsigned int h = hash_by_src(net, tuple);
37359+ unsigned int h = hash_by_src(tuple);
37360 const struct nf_conn_nat *nat;
37361 const struct nf_conn *ct;
37362 const struct hlist_node *n;
37363@@ -327,7 +330,7 @@ nf_nat_setup_info(struct nf_conn *ct,
37364 if (have_to_hash) {
37365 unsigned int srchash;
37366
37367- srchash = hash_by_src(net, &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
37368+ srchash = hash_by_src(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
37369 spin_lock_bh(&nf_nat_lock);
37370 /* nf_conntrack_alter_reply might re-allocate exntension aera */
37371 nat = nfct_nat(ct);
37372@@ -676,10 +679,8 @@ nfnetlink_parse_nat_setup(struct nf_conn *ct,
37373
37374 static int __net_init nf_nat_net_init(struct net *net)
37375 {
37376- /* Leave them the same for the moment. */
37377- net->ipv4.nat_htable_size = net->ct.htable_size;
37378- net->ipv4.nat_bysource = nf_ct_alloc_hashtable(&net->ipv4.nat_htable_size,
37379- &net->ipv4.nat_vmalloced, 0);
37380+ net->ipv4.nat_bysource = nf_ct_alloc_hashtable(&nf_nat_htable_size,
37381+ &net->ipv4.nat_vmalloced, 0);
37382 if (!net->ipv4.nat_bysource)
37383 return -ENOMEM;
37384 return 0;
37385@@ -702,7 +703,7 @@ static void __net_exit nf_nat_net_exit(struct net *net)
37386 nf_ct_iterate_cleanup(net, &clean_nat, NULL);
37387 synchronize_rcu();
37388 nf_ct_free_hashtable(net->ipv4.nat_bysource, net->ipv4.nat_vmalloced,
37389- net->ipv4.nat_htable_size);
37390+ nf_nat_htable_size);
37391 }
37392
37393 static struct pernet_operations nf_nat_net_ops = {
37394@@ -723,6 +724,9 @@ static int __init nf_nat_init(void)
37395 return ret;
37396 }
37397
37398+ /* Leave them the same for the moment. */
37399+ nf_nat_htable_size = nf_conntrack_htable_size;
37400+
37401 ret = register_pernet_subsys(&nf_nat_net_ops);
37402 if (ret < 0)
37403 goto cleanup_extend;
37404diff --git a/net/ipv6/exthdrs.c b/net/ipv6/exthdrs.c
37405index 4bac362..df159ff 100644
37406--- a/net/ipv6/exthdrs.c
37407+++ b/net/ipv6/exthdrs.c
37408@@ -559,11 +559,6 @@ static inline struct inet6_dev *ipv6_skb_idev(struct sk_buff *skb)
37409 return skb_dst(skb) ? ip6_dst_idev(skb_dst(skb)) : __in6_dev_get(skb->dev);
37410 }
37411
37412-static inline struct net *ipv6_skb_net(struct sk_buff *skb)
37413-{
37414- return skb_dst(skb) ? dev_net(skb_dst(skb)->dev) : dev_net(skb->dev);
37415-}
37416-
37417 /* Router Alert as of RFC 2711 */
37418
37419 static int ipv6_hop_ra(struct sk_buff *skb, int optoff)
37420@@ -585,8 +580,8 @@ static int ipv6_hop_ra(struct sk_buff *skb, int optoff)
37421 static int ipv6_hop_jumbo(struct sk_buff *skb, int optoff)
37422 {
37423 const unsigned char *nh = skb_network_header(skb);
37424- struct net *net = ipv6_skb_net(skb);
37425 u32 pkt_len;
37426+ struct net *net = dev_net(skb_dst(skb)->dev);
37427
37428 if (nh[optoff + 1] != 4 || (optoff & 3) != 2) {
37429 LIMIT_NETDEBUG(KERN_DEBUG "ipv6_hop_jumbo: wrong jumbo opt length/alignment %d\n",
37430diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
37431index 1de56fd..cc9f8ef 100644
37432--- a/net/ipv6/netfilter/ip6_tables.c
37433+++ b/net/ipv6/netfilter/ip6_tables.c
37434@@ -1164,10 +1164,10 @@ static int get_info(struct net *net, void __user *user, int *len, int compat)
37435 if (t && !IS_ERR(t)) {
37436 struct ip6t_getinfo info;
37437 const struct xt_table_info *private = t->private;
37438-#ifdef CONFIG_COMPAT
37439- struct xt_table_info tmp;
37440
37441+#ifdef CONFIG_COMPAT
37442 if (compat) {
37443+ struct xt_table_info tmp;
37444 ret = compat_table_info(private, &tmp);
37445 xt_compat_flush_offsets(AF_INET6);
37446 private = &tmp;
37447diff --git a/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c b/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c
37448index 0956eba..5f2ec20 100644
37449--- a/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c
37450+++ b/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c
37451@@ -20,7 +20,6 @@
37452 #include <net/ipv6.h>
37453 #include <net/inet_frag.h>
37454
37455-#include <linux/netfilter_bridge.h>
37456 #include <linux/netfilter_ipv6.h>
37457 #include <net/netfilter/nf_conntrack.h>
37458 #include <net/netfilter/nf_conntrack_helper.h>
37459@@ -188,21 +187,6 @@ out:
37460 return nf_conntrack_confirm(skb);
37461 }
37462
37463-static enum ip6_defrag_users nf_ct6_defrag_user(unsigned int hooknum,
37464- struct sk_buff *skb)
37465-{
37466-#ifdef CONFIG_BRIDGE_NETFILTER
37467- if (skb->nf_bridge &&
37468- skb->nf_bridge->mask & BRNF_NF_BRIDGE_PREROUTING)
37469- return IP6_DEFRAG_CONNTRACK_BRIDGE_IN;
37470-#endif
37471- if (hooknum == NF_INET_PRE_ROUTING)
37472- return IP6_DEFRAG_CONNTRACK_IN;
37473- else
37474- return IP6_DEFRAG_CONNTRACK_OUT;
37475-
37476-}
37477-
37478 static unsigned int ipv6_defrag(unsigned int hooknum,
37479 struct sk_buff *skb,
37480 const struct net_device *in,
37481@@ -215,7 +199,8 @@ static unsigned int ipv6_defrag(unsigned int hooknum,
37482 if (skb->nfct)
37483 return NF_ACCEPT;
37484
37485- reasm = nf_ct_frag6_gather(skb, nf_ct6_defrag_user(hooknum, skb));
37486+ reasm = nf_ct_frag6_gather(skb);
37487+
37488 /* queued */
37489 if (reasm == NULL)
37490 return NF_STOLEN;
37491diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c
37492index 4b6a539..f3aba25 100644
37493--- a/net/ipv6/netfilter/nf_conntrack_reasm.c
37494+++ b/net/ipv6/netfilter/nf_conntrack_reasm.c
37495@@ -170,14 +170,13 @@ out:
37496 /* Creation primitives. */
37497
37498 static __inline__ struct nf_ct_frag6_queue *
37499-fq_find(__be32 id, u32 user, struct in6_addr *src, struct in6_addr *dst)
37500+fq_find(__be32 id, struct in6_addr *src, struct in6_addr *dst)
37501 {
37502 struct inet_frag_queue *q;
37503 struct ip6_create_arg arg;
37504 unsigned int hash;
37505
37506 arg.id = id;
37507- arg.user = user;
37508 arg.src = src;
37509 arg.dst = dst;
37510
37511@@ -562,7 +561,7 @@ find_prev_fhdr(struct sk_buff *skb, u8 *prevhdrp, int *prevhoff, int *fhoff)
37512 return 0;
37513 }
37514
37515-struct sk_buff *nf_ct_frag6_gather(struct sk_buff *skb, u32 user)
37516+struct sk_buff *nf_ct_frag6_gather(struct sk_buff *skb)
37517 {
37518 struct sk_buff *clone;
37519 struct net_device *dev = skb->dev;
37520@@ -608,7 +607,7 @@ struct sk_buff *nf_ct_frag6_gather(struct sk_buff *skb, u32 user)
37521 if (atomic_read(&nf_init_frags.mem) > nf_init_frags.high_thresh)
37522 nf_ct_frag6_evictor();
37523
37524- fq = fq_find(fhdr->identification, user, &hdr->saddr, &hdr->daddr);
37525+ fq = fq_find(fhdr->identification, &hdr->saddr, &hdr->daddr);
37526 if (fq == NULL) {
37527 pr_debug("Can't find and can't create new queue\n");
37528 goto ret_orig;
37529diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c
37530index 4d18699..da5bd0e 100644
37531--- a/net/ipv6/reassembly.c
37532+++ b/net/ipv6/reassembly.c
37533@@ -72,7 +72,6 @@ struct frag_queue
37534 struct inet_frag_queue q;
37535
37536 __be32 id; /* fragment id */
37537- u32 user;
37538 struct in6_addr saddr;
37539 struct in6_addr daddr;
37540
37541@@ -142,7 +141,7 @@ int ip6_frag_match(struct inet_frag_queue *q, void *a)
37542 struct ip6_create_arg *arg = a;
37543
37544 fq = container_of(q, struct frag_queue, q);
37545- return (fq->id == arg->id && fq->user == arg->user &&
37546+ return (fq->id == arg->id &&
37547 ipv6_addr_equal(&fq->saddr, arg->src) &&
37548 ipv6_addr_equal(&fq->daddr, arg->dst));
37549 }
37550@@ -164,7 +163,6 @@ void ip6_frag_init(struct inet_frag_queue *q, void *a)
37551 struct ip6_create_arg *arg = a;
37552
37553 fq->id = arg->id;
37554- fq->user = arg->user;
37555 ipv6_addr_copy(&fq->saddr, arg->src);
37556 ipv6_addr_copy(&fq->daddr, arg->dst);
37557 }
37558@@ -246,7 +244,6 @@ fq_find(struct net *net, __be32 id, struct in6_addr *src, struct in6_addr *dst,
37559 unsigned int hash;
37560
37561 arg.id = id;
37562- arg.user = IP6_DEFRAG_LOCAL_DELIVER;
37563 arg.src = src;
37564 arg.dst = dst;
37565
37566diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
37567index fe2d3f8..7b5131b 100644
37568--- a/net/mac80211/cfg.c
37569+++ b/net/mac80211/cfg.c
37570@@ -338,8 +338,7 @@ static void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo)
37571 sinfo->rx_packets = sta->rx_packets;
37572 sinfo->tx_packets = sta->tx_packets;
37573
37574- if ((sta->local->hw.flags & IEEE80211_HW_SIGNAL_DBM) ||
37575- (sta->local->hw.flags & IEEE80211_HW_SIGNAL_UNSPEC)) {
37576+ if (sta->local->hw.flags & IEEE80211_HW_SIGNAL_DBM) {
37577 sinfo->filled |= STATION_INFO_SIGNAL;
37578 sinfo->signal = (s8)sta->last_signal;
37579 }
37580@@ -1306,9 +1305,6 @@ static int ieee80211_set_power_mgmt(struct wiphy *wiphy, struct net_device *dev,
37581 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
37582 struct ieee80211_conf *conf = &local->hw.conf;
37583
37584- if (sdata->vif.type != NL80211_IFTYPE_STATION)
37585- return -EOPNOTSUPP;
37586-
37587 if (!(local->hw.flags & IEEE80211_HW_SUPPORTS_PS))
37588 return -EOPNOTSUPP;
37589
37590diff --git a/net/mac80211/driver-trace.h b/net/mac80211/driver-trace.h
37591index d87645e..37b9051 100644
37592--- a/net/mac80211/driver-trace.h
37593+++ b/net/mac80211/driver-trace.h
37594@@ -655,7 +655,7 @@ TRACE_EVENT(drv_ampdu_action,
37595 __entry->ret = ret;
37596 __entry->action = action;
37597 __entry->tid = tid;
37598- __entry->ssn = ssn ? *ssn : 0;
37599+ __entry->ssn = *ssn;
37600 ),
37601
37602 TP_printk(
37603diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c
37604index 07600a6..f1362f3 100644
37605--- a/net/mac80211/ibss.c
37606+++ b/net/mac80211/ibss.c
37607@@ -455,10 +455,6 @@ static void ieee80211_sta_merge_ibss(struct ieee80211_sub_if_data *sdata)
37608
37609 ieee80211_sta_expire(sdata, IEEE80211_IBSS_INACTIVITY_LIMIT);
37610
37611- if (time_before(jiffies, ifibss->last_scan_completed +
37612- IEEE80211_IBSS_MERGE_INTERVAL))
37613- return;
37614-
37615 if (ieee80211_sta_active_ibss(sdata))
37616 return;
37617
37618@@ -643,7 +639,7 @@ static void ieee80211_rx_mgmt_probe_req(struct ieee80211_sub_if_data *sdata,
37619 }
37620 if (pos[1] != 0 &&
37621 (pos[1] != ifibss->ssid_len ||
37622- memcmp(pos + 2, ifibss->ssid, ifibss->ssid_len))) {
37623+ !memcmp(pos + 2, ifibss->ssid, ifibss->ssid_len))) {
37624 /* Ignore ProbeReq for foreign SSID */
37625 return;
37626 }
37627diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
37628index 5a46164..10d316e 100644
37629--- a/net/mac80211/ieee80211_i.h
37630+++ b/net/mac80211/ieee80211_i.h
37631@@ -808,7 +808,6 @@ struct ieee80211_local {
37632 unsigned int wmm_acm; /* bit field of ACM bits (BIT(802.1D tag)) */
37633
37634 bool pspolling;
37635- bool scan_ps_enabled;
37636 /*
37637 * PS can only be enabled when we have exactly one managed
37638 * interface (and monitors) in PS, this then points there.
37639diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
37640index 079c500..b8295cb 100644
37641--- a/net/mac80211/iface.c
37642+++ b/net/mac80211/iface.c
37643@@ -15,14 +15,12 @@
37644 #include <linux/netdevice.h>
37645 #include <linux/rtnetlink.h>
37646 #include <net/mac80211.h>
37647-#include <net/ieee80211_radiotap.h>
37648 #include "ieee80211_i.h"
37649 #include "sta_info.h"
37650 #include "debugfs_netdev.h"
37651 #include "mesh.h"
37652 #include "led.h"
37653 #include "driver-ops.h"
37654-#include "wme.h"
37655
37656 /**
37657 * DOC: Interface list locking
37658@@ -644,12 +642,6 @@ static void ieee80211_teardown_sdata(struct net_device *dev)
37659 WARN_ON(flushed);
37660 }
37661
37662-static u16 ieee80211_netdev_select_queue(struct net_device *dev,
37663- struct sk_buff *skb)
37664-{
37665- return ieee80211_select_queue(IEEE80211_DEV_TO_SUB_IF(dev), skb);
37666-}
37667-
37668 static const struct net_device_ops ieee80211_dataif_ops = {
37669 .ndo_open = ieee80211_open,
37670 .ndo_stop = ieee80211_stop,
37671@@ -658,35 +650,8 @@ static const struct net_device_ops ieee80211_dataif_ops = {
37672 .ndo_set_multicast_list = ieee80211_set_multicast_list,
37673 .ndo_change_mtu = ieee80211_change_mtu,
37674 .ndo_set_mac_address = eth_mac_addr,
37675- .ndo_select_queue = ieee80211_netdev_select_queue,
37676 };
37677
37678-static u16 ieee80211_monitor_select_queue(struct net_device *dev,
37679- struct sk_buff *skb)
37680-{
37681- struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
37682- struct ieee80211_local *local = sdata->local;
37683- struct ieee80211_hdr *hdr;
37684- struct ieee80211_radiotap_header *rtap = (void *)skb->data;
37685-
37686- if (local->hw.queues < 4)
37687- return 0;
37688-
37689- if (skb->len < 4 ||
37690- skb->len < le16_to_cpu(rtap->it_len) + 2 /* frame control */)
37691- return 0; /* doesn't matter, frame will be dropped */
37692-
37693- hdr = (void *)((u8 *)skb->data + le16_to_cpu(rtap->it_len));
37694-
37695- if (!ieee80211_is_data(hdr->frame_control)) {
37696- skb->priority = 7;
37697- return ieee802_1d_to_ac[skb->priority];
37698- }
37699-
37700- skb->priority = 0;
37701- return ieee80211_downgrade_queue(local, skb);
37702-}
37703-
37704 static const struct net_device_ops ieee80211_monitorif_ops = {
37705 .ndo_open = ieee80211_open,
37706 .ndo_stop = ieee80211_stop,
37707@@ -695,7 +660,6 @@ static const struct net_device_ops ieee80211_monitorif_ops = {
37708 .ndo_set_multicast_list = ieee80211_set_multicast_list,
37709 .ndo_change_mtu = ieee80211_change_mtu,
37710 .ndo_set_mac_address = eth_mac_addr,
37711- .ndo_select_queue = ieee80211_monitor_select_queue,
37712 };
37713
37714 static void ieee80211_if_setup(struct net_device *dev)
37715@@ -804,8 +768,8 @@ int ieee80211_if_add(struct ieee80211_local *local, const char *name,
37716
37717 ASSERT_RTNL();
37718
37719- ndev = alloc_netdev_mq(sizeof(*sdata) + local->hw.vif_data_size,
37720- name, ieee80211_if_setup, local->hw.queues);
37721+ ndev = alloc_netdev(sizeof(*sdata) + local->hw.vif_data_size,
37722+ name, ieee80211_if_setup);
37723 if (!ndev)
37724 return -ENOMEM;
37725 dev_net_set(ndev, wiphy_net(local->hw.wiphy));
37726diff --git a/net/mac80211/mesh.h b/net/mac80211/mesh.h
37727index 010ff2f..dd1c193 100644
37728--- a/net/mac80211/mesh.h
37729+++ b/net/mac80211/mesh.h
37730@@ -186,9 +186,8 @@ struct mesh_rmc {
37731 */
37732 #define MESH_PREQ_MIN_INT 10
37733 #define MESH_DIAM_TRAVERSAL_TIME 50
37734-/* A path will be refreshed if it is used PATH_REFRESH_TIME milliseconds before
37735- * timing out. This way it will remain ACTIVE and no data frames will be
37736- * unnecesarily held in the pending queue.
37737+/* Paths will be refreshed if they are closer than PATH_REFRESH_TIME to their
37738+ * expiration
37739 */
37740 #define MESH_PATH_REFRESH_TIME 1000
37741 #define MESH_MIN_DISCOVERY_TIMEOUT (2 * MESH_DIAM_TRAVERSAL_TIME)
37742diff --git a/net/mac80211/mesh_hwmp.c b/net/mac80211/mesh_hwmp.c
37743index 93c49fc..29b82e9 100644
37744--- a/net/mac80211/mesh_hwmp.c
37745+++ b/net/mac80211/mesh_hwmp.c
37746@@ -813,7 +813,7 @@ int mesh_nexthop_lookup(struct sk_buff *skb,
37747 }
37748
37749 if (mpath->flags & MESH_PATH_ACTIVE) {
37750- if (time_after(jiffies, mpath->exp_time -
37751+ if (time_after(jiffies, mpath->exp_time +
37752 msecs_to_jiffies(sdata->u.mesh.mshcfg.path_refresh_time))
37753 && !memcmp(sdata->dev->dev_addr, hdr->addr4,
37754 ETH_ALEN)
37755diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
37756index 6cae295..dc5049d 100644
37757--- a/net/mac80211/mlme.c
37758+++ b/net/mac80211/mlme.c
37759@@ -904,14 +904,6 @@ static void ieee80211_set_associated(struct ieee80211_sub_if_data *sdata,
37760 sdata->u.mgd.flags &= ~(IEEE80211_STA_CONNECTION_POLL |
37761 IEEE80211_STA_BEACON_POLL);
37762
37763- /*
37764- * Always handle WMM once after association regardless
37765- * of the first value the AP uses. Setting -1 here has
37766- * that effect because the AP values is an unsigned
37767- * 4-bit value.
37768- */
37769- sdata->u.mgd.wmm_last_param_set = -1;
37770-
37771 ieee80211_led_assoc(local, 1);
37772
37773 sdata->vif.bss_conf.assoc = 1;
37774@@ -1953,9 +1945,7 @@ static void ieee80211_sta_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata,
37775 rma = ieee80211_rx_mgmt_disassoc(sdata, mgmt, skb->len);
37776 break;
37777 case IEEE80211_STYPE_ACTION:
37778- if (mgmt->u.action.category != WLAN_CATEGORY_SPECTRUM_MGMT)
37779- break;
37780-
37781+ /* XXX: differentiate, can only happen for CSA now! */
37782 ieee80211_sta_process_chanswitch(sdata,
37783 &mgmt->u.action.u.chan_switch.sw_elem,
37784 ifmgd->associated);
37785diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
37786index 16c6cdc..7170bf4 100644
37787--- a/net/mac80211/rx.c
37788+++ b/net/mac80211/rx.c
37789@@ -1514,6 +1514,7 @@ ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx)
37790 mpp_path_add(mesh_hdr->eaddr2, hdr->addr4, sdata);
37791 } else {
37792 spin_lock_bh(&mppath->state_lock);
37793+ mppath->exp_time = jiffies;
37794 if (compare_ether_addr(mppath->mpp, hdr->addr4) != 0)
37795 memcpy(mppath->mpp, hdr->addr4, ETH_ALEN);
37796 spin_unlock_bh(&mppath->state_lock);
37797@@ -1548,9 +1549,7 @@ ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx)
37798 memset(info, 0, sizeof(*info));
37799 info->flags |= IEEE80211_TX_INTFL_NEED_TXPROCESSING;
37800 info->control.vif = &rx->sdata->vif;
37801- skb_set_queue_mapping(skb,
37802- ieee80211_select_queue(rx->sdata, fwd_skb));
37803- ieee80211_set_qos_hdr(local, skb);
37804+ ieee80211_select_queue(local, fwd_skb);
37805 if (is_multicast_ether_addr(fwd_hdr->addr1))
37806 IEEE80211_IFSTA_MESH_CTR_INC(&sdata->u.mesh,
37807 fwded_mcast);
37808@@ -1810,10 +1809,6 @@ ieee80211_rx_h_action(struct ieee80211_rx_data *rx)
37809 }
37810 break;
37811 default:
37812- /* do not process rejected action frames */
37813- if (mgmt->u.action.category & 0x80)
37814- return RX_DROP_MONITOR;
37815-
37816 return RX_CONTINUE;
37817 }
37818
37819diff --git a/net/mac80211/scan.c b/net/mac80211/scan.c
37820index 1a41909..71e10ca 100644
37821--- a/net/mac80211/scan.c
37822+++ b/net/mac80211/scan.c
37823@@ -196,8 +196,7 @@ ieee80211_scan_rx(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb)
37824 static void ieee80211_scan_ps_enable(struct ieee80211_sub_if_data *sdata)
37825 {
37826 struct ieee80211_local *local = sdata->local;
37827-
37828- local->scan_ps_enabled = false;
37829+ bool ps = false;
37830
37831 /* FIXME: what to do when local->pspolling is true? */
37832
37833@@ -205,13 +204,12 @@ static void ieee80211_scan_ps_enable(struct ieee80211_sub_if_data *sdata)
37834 cancel_work_sync(&local->dynamic_ps_enable_work);
37835
37836 if (local->hw.conf.flags & IEEE80211_CONF_PS) {
37837- local->scan_ps_enabled = true;
37838+ ps = true;
37839 local->hw.conf.flags &= ~IEEE80211_CONF_PS;
37840 ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_PS);
37841 }
37842
37843- if (!(local->scan_ps_enabled) ||
37844- !(local->hw.flags & IEEE80211_HW_PS_NULLFUNC_STACK))
37845+ if (!ps || !(local->hw.flags & IEEE80211_HW_PS_NULLFUNC_STACK))
37846 /*
37847 * If power save was enabled, no need to send a nullfunc
37848 * frame because AP knows that we are sleeping. But if the
37849@@ -232,7 +230,7 @@ static void ieee80211_scan_ps_disable(struct ieee80211_sub_if_data *sdata)
37850
37851 if (!local->ps_sdata)
37852 ieee80211_send_nullfunc(local, sdata, 0);
37853- else if (local->scan_ps_enabled) {
37854+ else {
37855 /*
37856 * In !IEEE80211_HW_PS_NULLFUNC_STACK case the hardware
37857 * will send a nullfunc frame with the powersave bit set
37858@@ -248,16 +246,6 @@ static void ieee80211_scan_ps_disable(struct ieee80211_sub_if_data *sdata)
37859 */
37860 local->hw.conf.flags |= IEEE80211_CONF_PS;
37861 ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_PS);
37862- } else if (local->hw.conf.dynamic_ps_timeout > 0) {
37863- /*
37864- * If IEEE80211_CONF_PS was not set and the dynamic_ps_timer
37865- * had been running before leaving the operating channel,
37866- * restart the timer now and send a nullfunc frame to inform
37867- * the AP that we are awake.
37868- */
37869- ieee80211_send_nullfunc(local, sdata, 0);
37870- mod_timer(&local->dynamic_ps_timer, jiffies +
37871- msecs_to_jiffies(local->hw.conf.dynamic_ps_timeout));
37872 }
37873 }
37874
37875@@ -276,14 +264,10 @@ void ieee80211_scan_completed(struct ieee80211_hw *hw, bool aborted)
37876
37877 mutex_lock(&local->scan_mtx);
37878
37879- /*
37880- * It's ok to abort a not-yet-running scan (that
37881- * we have one at all will be verified by checking
37882- * local->scan_req next), but not to complete it
37883- * successfully.
37884- */
37885- if (WARN_ON(!local->scanning && !aborted))
37886- aborted = true;
37887+ if (WARN_ON(!local->scanning)) {
37888+ mutex_unlock(&local->scan_mtx);
37889+ return;
37890+ }
37891
37892 if (WARN_ON(!local->scan_req)) {
37893 mutex_unlock(&local->scan_mtx);
37894diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
37895index 441f68e..eaa4118 100644
37896--- a/net/mac80211/tx.c
37897+++ b/net/mac80211/tx.c
37898@@ -1401,7 +1401,6 @@ static void ieee80211_xmit(struct ieee80211_sub_if_data *sdata,
37899
37900 if ((local->hw.flags & IEEE80211_HW_PS_NULLFUNC_STACK) &&
37901 local->hw.conf.dynamic_ps_timeout > 0 &&
37902- !local->quiescing &&
37903 !(local->scanning) && local->ps_sdata) {
37904 if (local->hw.conf.flags & IEEE80211_CONF_PS) {
37905 ieee80211_stop_queues_by_reason(&local->hw,
37906@@ -1482,7 +1481,7 @@ static void ieee80211_xmit(struct ieee80211_sub_if_data *sdata,
37907 return;
37908 }
37909
37910- ieee80211_set_qos_hdr(local, skb);
37911+ ieee80211_select_queue(local, skb);
37912 ieee80211_tx(sdata, skb, false);
37913 dev_put(sdata->dev);
37914 }
37915@@ -2226,9 +2225,6 @@ void ieee80211_tx_skb(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb,
37916 if (!encrypt)
37917 info->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT;
37918
37919- /* send all internal mgmt frames on VO */
37920- skb_set_queue_mapping(skb, 0);
37921-
37922 /*
37923 * The other path calling ieee80211_xmit is from the tasklet,
37924 * and while we can handle concurrent transmissions locking
37925diff --git a/net/mac80211/util.c b/net/mac80211/util.c
37926index 553cffe..e6c08da 100644
37927--- a/net/mac80211/util.c
37928+++ b/net/mac80211/util.c
37929@@ -269,7 +269,6 @@ static void __ieee80211_wake_queue(struct ieee80211_hw *hw, int queue,
37930 enum queue_stop_reason reason)
37931 {
37932 struct ieee80211_local *local = hw_to_local(hw);
37933- struct ieee80211_sub_if_data *sdata;
37934
37935 if (WARN_ON(queue >= hw->queues))
37936 return;
37937@@ -282,11 +281,6 @@ static void __ieee80211_wake_queue(struct ieee80211_hw *hw, int queue,
37938
37939 if (!skb_queue_empty(&local->pending[queue]))
37940 tasklet_schedule(&local->tx_pending_tasklet);
37941-
37942- rcu_read_lock();
37943- list_for_each_entry_rcu(sdata, &local->interfaces, list)
37944- netif_tx_wake_queue(netdev_get_tx_queue(sdata->dev, queue));
37945- rcu_read_unlock();
37946 }
37947
37948 void ieee80211_wake_queue_by_reason(struct ieee80211_hw *hw, int queue,
37949@@ -311,17 +305,11 @@ static void __ieee80211_stop_queue(struct ieee80211_hw *hw, int queue,
37950 enum queue_stop_reason reason)
37951 {
37952 struct ieee80211_local *local = hw_to_local(hw);
37953- struct ieee80211_sub_if_data *sdata;
37954
37955 if (WARN_ON(queue >= hw->queues))
37956 return;
37957
37958 __set_bit(reason, &local->queue_stop_reasons[queue]);
37959-
37960- rcu_read_lock();
37961- list_for_each_entry_rcu(sdata, &local->interfaces, list)
37962- netif_tx_stop_queue(netdev_get_tx_queue(sdata->dev, queue));
37963- rcu_read_unlock();
37964 }
37965
37966 void ieee80211_stop_queue_by_reason(struct ieee80211_hw *hw, int queue,
37967@@ -591,7 +579,7 @@ u32 ieee802_11_parse_elems_crc(u8 *start, size_t len,
37968 if (elen > left)
37969 break;
37970
37971- if (calc_crc && id < 64 && (filter & (1ULL << id)))
37972+ if (calc_crc && id < 64 && (filter & BIT(id)))
37973 crc = crc32_be(crc, pos - 2, elen + 2);
37974
37975 switch (id) {
37976@@ -1043,19 +1031,7 @@ int ieee80211_reconfig(struct ieee80211_local *local)
37977
37978 /* restart hardware */
37979 if (local->open_count) {
37980- /*
37981- * Upon resume hardware can sometimes be goofy due to
37982- * various platform / driver / bus issues, so restarting
37983- * the device may at times not work immediately. Propagate
37984- * the error.
37985- */
37986 res = drv_start(local);
37987- if (res) {
37988- WARN(local->suspended, "Harware became unavailable "
37989- "upon resume. This is could be a software issue"
37990- "prior to suspend or a harware issue\n");
37991- return res;
37992- }
37993
37994 ieee80211_led_radio(local, true);
37995 }
37996diff --git a/net/mac80211/wme.c b/net/mac80211/wme.c
37997index 6d32ebf..b19b769 100644
37998--- a/net/mac80211/wme.c
37999+++ b/net/mac80211/wme.c
38000@@ -44,62 +44,22 @@ static int wme_downgrade_ac(struct sk_buff *skb)
38001 }
38002
38003
38004-/* Indicate which queue to use. */
38005-u16 ieee80211_select_queue(struct ieee80211_sub_if_data *sdata,
38006- struct sk_buff *skb)
38007+/* Indicate which queue to use. */
38008+static u16 classify80211(struct ieee80211_local *local, struct sk_buff *skb)
38009 {
38010- struct ieee80211_local *local = sdata->local;
38011- struct sta_info *sta = NULL;
38012- u32 sta_flags = 0;
38013- const u8 *ra = NULL;
38014- bool qos = false;
38015+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
38016
38017- if (local->hw.queues < 4 || skb->len < 6) {
38018- skb->priority = 0; /* required for correct WPA/11i MIC */
38019- return min_t(u16, local->hw.queues - 1,
38020- ieee802_1d_to_ac[skb->priority]);
38021- }
38022-
38023- rcu_read_lock();
38024- switch (sdata->vif.type) {
38025- case NL80211_IFTYPE_AP_VLAN:
38026- case NL80211_IFTYPE_AP:
38027- ra = skb->data;
38028- break;
38029- case NL80211_IFTYPE_WDS:
38030- ra = sdata->u.wds.remote_addr;
38031- break;
38032-#ifdef CONFIG_MAC80211_MESH
38033- case NL80211_IFTYPE_MESH_POINT:
38034- /*
38035- * XXX: This is clearly broken ... but already was before,
38036- * because ieee80211_fill_mesh_addresses() would clear A1
38037- * except for multicast addresses.
38038- */
38039- break;
38040-#endif
38041- case NL80211_IFTYPE_STATION:
38042- ra = sdata->u.mgd.bssid;
38043- break;
38044- case NL80211_IFTYPE_ADHOC:
38045- ra = skb->data;
38046- break;
38047- default:
38048- break;
38049+ if (!ieee80211_is_data(hdr->frame_control)) {
38050+ /* management frames go on AC_VO queue, but are sent
38051+ * without QoS control fields */
38052+ return 0;
38053 }
38054
38055- if (!sta && ra && !is_multicast_ether_addr(ra)) {
38056- sta = sta_info_get(local, ra);
38057- if (sta)
38058- sta_flags = get_sta_flags(sta);
38059+ if (0 /* injected */) {
38060+ /* use AC from radiotap */
38061 }
38062
38063- if (sta_flags & WLAN_STA_WME)
38064- qos = true;
38065-
38066- rcu_read_unlock();
38067-
38068- if (!qos) {
38069+ if (!ieee80211_is_data_qos(hdr->frame_control)) {
38070 skb->priority = 0; /* required for correct WPA/11i MIC */
38071 return ieee802_1d_to_ac[skb->priority];
38072 }
38073@@ -108,12 +68,6 @@ u16 ieee80211_select_queue(struct ieee80211_sub_if_data *sdata,
38074 * data frame has */
38075 skb->priority = cfg80211_classify8021d(skb);
38076
38077- return ieee80211_downgrade_queue(local, skb);
38078-}
38079-
38080-u16 ieee80211_downgrade_queue(struct ieee80211_local *local,
38081- struct sk_buff *skb)
38082-{
38083 /* in case we are a client verify acm is not set for this ac */
38084 while (unlikely(local->wmm_acm & BIT(skb->priority))) {
38085 if (wme_downgrade_ac(skb)) {
38086@@ -131,17 +85,24 @@ u16 ieee80211_downgrade_queue(struct ieee80211_local *local,
38087 return ieee802_1d_to_ac[skb->priority];
38088 }
38089
38090-void ieee80211_set_qos_hdr(struct ieee80211_local *local, struct sk_buff *skb)
38091+void ieee80211_select_queue(struct ieee80211_local *local, struct sk_buff *skb)
38092 {
38093- struct ieee80211_hdr *hdr = (void *)skb->data;
38094-
38095- /* Fill in the QoS header if there is one. */
38096+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
38097+ u16 queue;
38098+ u8 tid;
38099+
38100+ queue = classify80211(local, skb);
38101+ if (unlikely(queue >= local->hw.queues))
38102+ queue = local->hw.queues - 1;
38103+
38104+ /*
38105+ * Now we know the 1d priority, fill in the QoS header if
38106+ * there is one (and we haven't done this before).
38107+ */
38108 if (ieee80211_is_data_qos(hdr->frame_control)) {
38109 u8 *p = ieee80211_get_qos_ctl(hdr);
38110- u8 ack_policy = 0, tid;
38111-
38112+ u8 ack_policy = 0;
38113 tid = skb->priority & IEEE80211_QOS_CTL_TAG1D_MASK;
38114-
38115 if (unlikely(local->wifi_wme_noack_test))
38116 ack_policy |= QOS_CONTROL_ACK_POLICY_NOACK <<
38117 QOS_CONTROL_ACK_POLICY_SHIFT;
38118@@ -149,4 +110,6 @@ void ieee80211_set_qos_hdr(struct ieee80211_local *local, struct sk_buff *skb)
38119 *p++ = ack_policy | tid;
38120 *p = 0;
38121 }
38122+
38123+ skb_set_queue_mapping(skb, queue);
38124 }
38125diff --git a/net/mac80211/wme.h b/net/mac80211/wme.h
38126index 6053b1c..d4fd87c 100644
38127--- a/net/mac80211/wme.h
38128+++ b/net/mac80211/wme.h
38129@@ -20,11 +20,7 @@
38130
38131 extern const int ieee802_1d_to_ac[8];
38132
38133-u16 ieee80211_select_queue(struct ieee80211_sub_if_data *sdata,
38134- struct sk_buff *skb);
38135-void ieee80211_set_qos_hdr(struct ieee80211_local *local, struct sk_buff *skb);
38136-u16 ieee80211_downgrade_queue(struct ieee80211_local *local,
38137- struct sk_buff *skb);
38138-
38139+void ieee80211_select_queue(struct ieee80211_local *local,
38140+ struct sk_buff *skb);
38141
38142 #endif /* _WME_H */
38143diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
38144index 02b2610..446e9bd 100644
38145--- a/net/netfilter/ipvs/ip_vs_ctl.c
38146+++ b/net/netfilter/ipvs/ip_vs_ctl.c
38147@@ -2714,8 +2714,6 @@ static int ip_vs_genl_parse_service(struct ip_vs_service_user_kern *usvc,
38148 if (!(nla_af && (nla_fwmark || (nla_port && nla_protocol && nla_addr))))
38149 return -EINVAL;
38150
38151- memset(usvc, 0, sizeof(*usvc));
38152-
38153 usvc->af = nla_get_u16(nla_af);
38154 #ifdef CONFIG_IP_VS_IPV6
38155 if (usvc->af != AF_INET && usvc->af != AF_INET6)
38156@@ -2903,8 +2901,6 @@ static int ip_vs_genl_parse_dest(struct ip_vs_dest_user_kern *udest,
38157 if (!(nla_addr && nla_port))
38158 return -EINVAL;
38159
38160- memset(udest, 0, sizeof(*udest));
38161-
38162 nla_memcpy(&udest->addr, nla_addr, sizeof(udest->addr));
38163 udest->port = nla_get_u16(nla_port);
38164
38165diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
38166index 1374179..b9168c1 100644
38167--- a/net/netfilter/nf_conntrack_core.c
38168+++ b/net/netfilter/nf_conntrack_core.c
38169@@ -30,7 +30,6 @@
38170 #include <linux/netdevice.h>
38171 #include <linux/socket.h>
38172 #include <linux/mm.h>
38173-#include <linux/nsproxy.h>
38174 #include <linux/rculist_nulls.h>
38175
38176 #include <net/netfilter/nf_conntrack.h>
38177@@ -64,6 +63,8 @@ EXPORT_SYMBOL_GPL(nf_conntrack_max);
38178 struct nf_conn nf_conntrack_untracked __read_mostly;
38179 EXPORT_SYMBOL_GPL(nf_conntrack_untracked);
38180
38181+static struct kmem_cache *nf_conntrack_cachep __read_mostly;
38182+
38183 static int nf_conntrack_hash_rnd_initted;
38184 static unsigned int nf_conntrack_hash_rnd;
38185
38186@@ -85,10 +86,9 @@ static u_int32_t __hash_conntrack(const struct nf_conntrack_tuple *tuple,
38187 return ((u64)h * size) >> 32;
38188 }
38189
38190-static inline u_int32_t hash_conntrack(const struct net *net,
38191- const struct nf_conntrack_tuple *tuple)
38192+static inline u_int32_t hash_conntrack(const struct nf_conntrack_tuple *tuple)
38193 {
38194- return __hash_conntrack(tuple, net->ct.htable_size,
38195+ return __hash_conntrack(tuple, nf_conntrack_htable_size,
38196 nf_conntrack_hash_rnd);
38197 }
38198
38199@@ -296,7 +296,7 @@ __nf_conntrack_find(struct net *net, const struct nf_conntrack_tuple *tuple)
38200 {
38201 struct nf_conntrack_tuple_hash *h;
38202 struct hlist_nulls_node *n;
38203- unsigned int hash = hash_conntrack(net, tuple);
38204+ unsigned int hash = hash_conntrack(tuple);
38205
38206 /* Disable BHs the entire time since we normally need to disable them
38207 * at least once for the stats anyway.
38208@@ -366,11 +366,10 @@ static void __nf_conntrack_hash_insert(struct nf_conn *ct,
38209
38210 void nf_conntrack_hash_insert(struct nf_conn *ct)
38211 {
38212- struct net *net = nf_ct_net(ct);
38213 unsigned int hash, repl_hash;
38214
38215- hash = hash_conntrack(net, &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
38216- repl_hash = hash_conntrack(net, &ct->tuplehash[IP_CT_DIR_REPLY].tuple);
38217+ hash = hash_conntrack(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
38218+ repl_hash = hash_conntrack(&ct->tuplehash[IP_CT_DIR_REPLY].tuple);
38219
38220 __nf_conntrack_hash_insert(ct, hash, repl_hash);
38221 }
38222@@ -398,8 +397,8 @@ __nf_conntrack_confirm(struct sk_buff *skb)
38223 if (CTINFO2DIR(ctinfo) != IP_CT_DIR_ORIGINAL)
38224 return NF_ACCEPT;
38225
38226- hash = hash_conntrack(net, &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
38227- repl_hash = hash_conntrack(net, &ct->tuplehash[IP_CT_DIR_REPLY].tuple);
38228+ hash = hash_conntrack(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
38229+ repl_hash = hash_conntrack(&ct->tuplehash[IP_CT_DIR_REPLY].tuple);
38230
38231 /* We're not in hash table, and we refuse to set up related
38232 connections for unconfirmed conns. But packet copies and
38233@@ -469,7 +468,7 @@ nf_conntrack_tuple_taken(const struct nf_conntrack_tuple *tuple,
38234 struct net *net = nf_ct_net(ignored_conntrack);
38235 struct nf_conntrack_tuple_hash *h;
38236 struct hlist_nulls_node *n;
38237- unsigned int hash = hash_conntrack(net, tuple);
38238+ unsigned int hash = hash_conntrack(tuple);
38239
38240 /* Disable BHs the entire time since we need to disable them at
38241 * least once for the stats anyway.
38242@@ -504,7 +503,7 @@ static noinline int early_drop(struct net *net, unsigned int hash)
38243 int dropped = 0;
38244
38245 rcu_read_lock();
38246- for (i = 0; i < net->ct.htable_size; i++) {
38247+ for (i = 0; i < nf_conntrack_htable_size; i++) {
38248 hlist_nulls_for_each_entry_rcu(h, n, &net->ct.hash[hash],
38249 hnnode) {
38250 tmp = nf_ct_tuplehash_to_ctrack(h);
38251@@ -518,8 +517,7 @@ static noinline int early_drop(struct net *net, unsigned int hash)
38252 ct = NULL;
38253 if (ct || cnt >= NF_CT_EVICTION_RANGE)
38254 break;
38255-
38256- hash = (hash + 1) % net->ct.htable_size;
38257+ hash = (hash + 1) % nf_conntrack_htable_size;
38258 }
38259 rcu_read_unlock();
38260
38261@@ -553,7 +551,7 @@ struct nf_conn *nf_conntrack_alloc(struct net *net,
38262
38263 if (nf_conntrack_max &&
38264 unlikely(atomic_read(&net->ct.count) > nf_conntrack_max)) {
38265- unsigned int hash = hash_conntrack(net, orig);
38266+ unsigned int hash = hash_conntrack(orig);
38267 if (!early_drop(net, hash)) {
38268 atomic_dec(&net->ct.count);
38269 if (net_ratelimit())
38270@@ -568,7 +566,7 @@ struct nf_conn *nf_conntrack_alloc(struct net *net,
38271 * Do not use kmem_cache_zalloc(), as this cache uses
38272 * SLAB_DESTROY_BY_RCU.
38273 */
38274- ct = kmem_cache_alloc(net->ct.nf_conntrack_cachep, gfp);
38275+ ct = kmem_cache_alloc(nf_conntrack_cachep, gfp);
38276 if (ct == NULL) {
38277 pr_debug("nf_conntrack_alloc: Can't alloc conntrack.\n");
38278 atomic_dec(&net->ct.count);
38279@@ -607,7 +605,7 @@ void nf_conntrack_free(struct nf_conn *ct)
38280 nf_ct_ext_destroy(ct);
38281 atomic_dec(&net->ct.count);
38282 nf_ct_ext_free(ct);
38283- kmem_cache_free(net->ct.nf_conntrack_cachep, ct);
38284+ kmem_cache_free(nf_conntrack_cachep, ct);
38285 }
38286 EXPORT_SYMBOL_GPL(nf_conntrack_free);
38287
38288@@ -1010,7 +1008,7 @@ get_next_corpse(struct net *net, int (*iter)(struct nf_conn *i, void *data),
38289 struct hlist_nulls_node *n;
38290
38291 spin_lock_bh(&nf_conntrack_lock);
38292- for (; *bucket < net->ct.htable_size; (*bucket)++) {
38293+ for (; *bucket < nf_conntrack_htable_size; (*bucket)++) {
38294 hlist_nulls_for_each_entry(h, n, &net->ct.hash[*bucket], hnnode) {
38295 ct = nf_ct_tuplehash_to_ctrack(h);
38296 if (iter(ct, data))
38297@@ -1109,12 +1107,9 @@ static void nf_ct_release_dying_list(struct net *net)
38298
38299 static void nf_conntrack_cleanup_init_net(void)
38300 {
38301- /* wait until all references to nf_conntrack_untracked are dropped */
38302- while (atomic_read(&nf_conntrack_untracked.ct_general.use) > 1)
38303- schedule();
38304-
38305 nf_conntrack_helper_fini();
38306 nf_conntrack_proto_fini();
38307+ kmem_cache_destroy(nf_conntrack_cachep);
38308 }
38309
38310 static void nf_conntrack_cleanup_net(struct net *net)
38311@@ -1126,14 +1121,15 @@ static void nf_conntrack_cleanup_net(struct net *net)
38312 schedule();
38313 goto i_see_dead_people;
38314 }
38315+ /* wait until all references to nf_conntrack_untracked are dropped */
38316+ while (atomic_read(&nf_conntrack_untracked.ct_general.use) > 1)
38317+ schedule();
38318
38319 nf_ct_free_hashtable(net->ct.hash, net->ct.hash_vmalloc,
38320- net->ct.htable_size);
38321+ nf_conntrack_htable_size);
38322 nf_conntrack_ecache_fini(net);
38323 nf_conntrack_acct_fini(net);
38324 nf_conntrack_expect_fini(net);
38325- kmem_cache_destroy(net->ct.nf_conntrack_cachep);
38326- kfree(net->ct.slabname);
38327 free_percpu(net->ct.stat);
38328 }
38329
38330@@ -1188,12 +1184,10 @@ int nf_conntrack_set_hashsize(const char *val, struct kernel_param *kp)
38331 {
38332 int i, bucket, vmalloced, old_vmalloced;
38333 unsigned int hashsize, old_size;
38334+ int rnd;
38335 struct hlist_nulls_head *hash, *old_hash;
38336 struct nf_conntrack_tuple_hash *h;
38337
38338- if (current->nsproxy->net_ns != &init_net)
38339- return -EOPNOTSUPP;
38340-
38341 /* On boot, we can set this without any fancy locking. */
38342 if (!nf_conntrack_htable_size)
38343 return param_set_uint(val, kp);
38344@@ -1206,29 +1200,33 @@ int nf_conntrack_set_hashsize(const char *val, struct kernel_param *kp)
38345 if (!hash)
38346 return -ENOMEM;
38347
38348+ /* We have to rehahs for the new table anyway, so we also can
38349+ * use a newrandom seed */
38350+ get_random_bytes(&rnd, sizeof(rnd));
38351+
38352 /* Lookups in the old hash might happen in parallel, which means we
38353 * might get false negatives during connection lookup. New connections
38354 * created because of a false negative won't make it into the hash
38355 * though since that required taking the lock.
38356 */
38357 spin_lock_bh(&nf_conntrack_lock);
38358- for (i = 0; i < init_net.ct.htable_size; i++) {
38359+ for (i = 0; i < nf_conntrack_htable_size; i++) {
38360 while (!hlist_nulls_empty(&init_net.ct.hash[i])) {
38361 h = hlist_nulls_entry(init_net.ct.hash[i].first,
38362 struct nf_conntrack_tuple_hash, hnnode);
38363 hlist_nulls_del_rcu(&h->hnnode);
38364- bucket = __hash_conntrack(&h->tuple, hashsize,
38365- nf_conntrack_hash_rnd);
38366+ bucket = __hash_conntrack(&h->tuple, hashsize, rnd);
38367 hlist_nulls_add_head_rcu(&h->hnnode, &hash[bucket]);
38368 }
38369 }
38370- old_size = init_net.ct.htable_size;
38371+ old_size = nf_conntrack_htable_size;
38372 old_vmalloced = init_net.ct.hash_vmalloc;
38373 old_hash = init_net.ct.hash;
38374
38375- init_net.ct.htable_size = nf_conntrack_htable_size = hashsize;
38376+ nf_conntrack_htable_size = hashsize;
38377 init_net.ct.hash_vmalloc = vmalloced;
38378 init_net.ct.hash = hash;
38379+ nf_conntrack_hash_rnd = rnd;
38380 spin_unlock_bh(&nf_conntrack_lock);
38381
38382 nf_ct_free_hashtable(old_hash, old_vmalloced, old_size);
38383@@ -1267,6 +1265,15 @@ static int nf_conntrack_init_init_net(void)
38384 NF_CONNTRACK_VERSION, nf_conntrack_htable_size,
38385 nf_conntrack_max);
38386
38387+ nf_conntrack_cachep = kmem_cache_create("nf_conntrack",
38388+ sizeof(struct nf_conn),
38389+ 0, SLAB_DESTROY_BY_RCU, NULL);
38390+ if (!nf_conntrack_cachep) {
38391+ printk(KERN_ERR "Unable to create nf_conn slab cache\n");
38392+ ret = -ENOMEM;
38393+ goto err_cache;
38394+ }
38395+
38396 ret = nf_conntrack_proto_init();
38397 if (ret < 0)
38398 goto err_proto;
38399@@ -1275,19 +1282,13 @@ static int nf_conntrack_init_init_net(void)
38400 if (ret < 0)
38401 goto err_helper;
38402
38403- /* Set up fake conntrack: to never be deleted, not in any hashes */
38404-#ifdef CONFIG_NET_NS
38405- nf_conntrack_untracked.ct_net = &init_net;
38406-#endif
38407- atomic_set(&nf_conntrack_untracked.ct_general.use, 1);
38408- /* - and look it like as a confirmed connection */
38409- set_bit(IPS_CONFIRMED_BIT, &nf_conntrack_untracked.status);
38410-
38411 return 0;
38412
38413 err_helper:
38414 nf_conntrack_proto_fini();
38415 err_proto:
38416+ kmem_cache_destroy(nf_conntrack_cachep);
38417+err_cache:
38418 return ret;
38419 }
38420
38421@@ -1309,24 +1310,7 @@ static int nf_conntrack_init_net(struct net *net)
38422 ret = -ENOMEM;
38423 goto err_stat;
38424 }
38425-
38426- net->ct.slabname = kasprintf(GFP_KERNEL, "nf_conntrack_%p", net);
38427- if (!net->ct.slabname) {
38428- ret = -ENOMEM;
38429- goto err_slabname;
38430- }
38431-
38432- net->ct.nf_conntrack_cachep = kmem_cache_create(net->ct.slabname,
38433- sizeof(struct nf_conn), 0,
38434- SLAB_DESTROY_BY_RCU, NULL);
38435- if (!net->ct.nf_conntrack_cachep) {
38436- printk(KERN_ERR "Unable to create nf_conn slab cache\n");
38437- ret = -ENOMEM;
38438- goto err_cache;
38439- }
38440-
38441- net->ct.htable_size = nf_conntrack_htable_size;
38442- net->ct.hash = nf_ct_alloc_hashtable(&net->ct.htable_size,
38443+ net->ct.hash = nf_ct_alloc_hashtable(&nf_conntrack_htable_size,
38444 &net->ct.hash_vmalloc, 1);
38445 if (!net->ct.hash) {
38446 ret = -ENOMEM;
38447@@ -1343,6 +1327,15 @@ static int nf_conntrack_init_net(struct net *net)
38448 if (ret < 0)
38449 goto err_ecache;
38450
38451+ /* Set up fake conntrack:
38452+ - to never be deleted, not in any hashes */
38453+#ifdef CONFIG_NET_NS
38454+ nf_conntrack_untracked.ct_net = &init_net;
38455+#endif
38456+ atomic_set(&nf_conntrack_untracked.ct_general.use, 1);
38457+ /* - and look it like as a confirmed connection */
38458+ set_bit(IPS_CONFIRMED_BIT, &nf_conntrack_untracked.status);
38459+
38460 return 0;
38461
38462 err_ecache:
38463@@ -1351,12 +1344,8 @@ err_acct:
38464 nf_conntrack_expect_fini(net);
38465 err_expect:
38466 nf_ct_free_hashtable(net->ct.hash, net->ct.hash_vmalloc,
38467- net->ct.htable_size);
38468+ nf_conntrack_htable_size);
38469 err_hash:
38470- kmem_cache_destroy(net->ct.nf_conntrack_cachep);
38471-err_cache:
38472- kfree(net->ct.slabname);
38473-err_slabname:
38474 free_percpu(net->ct.stat);
38475 err_stat:
38476 return ret;
38477diff --git a/net/netfilter/nf_conntrack_expect.c b/net/netfilter/nf_conntrack_expect.c
38478index e73eb04..2032dfe 100644
38479--- a/net/netfilter/nf_conntrack_expect.c
38480+++ b/net/netfilter/nf_conntrack_expect.c
38481@@ -569,7 +569,7 @@ static void exp_proc_remove(struct net *net)
38482 #endif /* CONFIG_PROC_FS */
38483 }
38484
38485-module_param_named(expect_hashsize, nf_ct_expect_hsize, uint, 0400);
38486+module_param_named(expect_hashsize, nf_ct_expect_hsize, uint, 0600);
38487
38488 int nf_conntrack_expect_init(struct net *net)
38489 {
38490@@ -577,7 +577,7 @@ int nf_conntrack_expect_init(struct net *net)
38491
38492 if (net_eq(net, &init_net)) {
38493 if (!nf_ct_expect_hsize) {
38494- nf_ct_expect_hsize = net->ct.htable_size / 256;
38495+ nf_ct_expect_hsize = nf_conntrack_htable_size / 256;
38496 if (!nf_ct_expect_hsize)
38497 nf_ct_expect_hsize = 1;
38498 }
38499diff --git a/net/netfilter/nf_conntrack_ftp.c b/net/netfilter/nf_conntrack_ftp.c
38500index 7dfd469..5509dd1 100644
38501--- a/net/netfilter/nf_conntrack_ftp.c
38502+++ b/net/netfilter/nf_conntrack_ftp.c
38503@@ -323,24 +323,24 @@ static void update_nl_seq(struct nf_conn *ct, u32 nl_seq,
38504 struct nf_ct_ftp_master *info, int dir,
38505 struct sk_buff *skb)
38506 {
38507- unsigned int i, oldest;
38508+ unsigned int i, oldest = NUM_SEQ_TO_REMEMBER;
38509
38510 /* Look for oldest: if we find exact match, we're done. */
38511 for (i = 0; i < info->seq_aft_nl_num[dir]; i++) {
38512 if (info->seq_aft_nl[dir][i] == nl_seq)
38513 return;
38514+
38515+ if (oldest == info->seq_aft_nl_num[dir] ||
38516+ before(info->seq_aft_nl[dir][i],
38517+ info->seq_aft_nl[dir][oldest]))
38518+ oldest = i;
38519 }
38520
38521 if (info->seq_aft_nl_num[dir] < NUM_SEQ_TO_REMEMBER) {
38522 info->seq_aft_nl[dir][info->seq_aft_nl_num[dir]++] = nl_seq;
38523- } else {
38524- if (before(info->seq_aft_nl[dir][0], info->seq_aft_nl[dir][1]))
38525- oldest = 0;
38526- else
38527- oldest = 1;
38528-
38529- if (after(nl_seq, info->seq_aft_nl[dir][oldest]))
38530- info->seq_aft_nl[dir][oldest] = nl_seq;
38531+ } else if (oldest != NUM_SEQ_TO_REMEMBER &&
38532+ after(nl_seq, info->seq_aft_nl[dir][oldest])) {
38533+ info->seq_aft_nl[dir][oldest] = nl_seq;
38534 }
38535 }
38536
38537diff --git a/net/netfilter/nf_conntrack_helper.c b/net/netfilter/nf_conntrack_helper.c
38538index 4b1a56b..65c2a7b 100644
38539--- a/net/netfilter/nf_conntrack_helper.c
38540+++ b/net/netfilter/nf_conntrack_helper.c
38541@@ -192,7 +192,7 @@ static void __nf_conntrack_helper_unregister(struct nf_conntrack_helper *me,
38542 /* Get rid of expecteds, set helpers to NULL. */
38543 hlist_nulls_for_each_entry(h, nn, &net->ct.unconfirmed, hnnode)
38544 unhelp(h, me);
38545- for (i = 0; i < net->ct.htable_size; i++) {
38546+ for (i = 0; i < nf_conntrack_htable_size; i++) {
38547 hlist_nulls_for_each_entry(h, nn, &net->ct.hash[i], hnnode)
38548 unhelp(h, me);
38549 }
38550diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
38551index d521718..59d8064 100644
38552--- a/net/netfilter/nf_conntrack_netlink.c
38553+++ b/net/netfilter/nf_conntrack_netlink.c
38554@@ -594,7 +594,7 @@ ctnetlink_dump_table(struct sk_buff *skb, struct netlink_callback *cb)
38555
38556 rcu_read_lock();
38557 last = (struct nf_conn *)cb->args[1];
38558- for (; cb->args[0] < init_net.ct.htable_size; cb->args[0]++) {
38559+ for (; cb->args[0] < nf_conntrack_htable_size; cb->args[0]++) {
38560 restart:
38561 hlist_nulls_for_each_entry_rcu(h, n, &init_net.ct.hash[cb->args[0]],
38562 hnnode) {
38563diff --git a/net/netfilter/nf_conntrack_standalone.c b/net/netfilter/nf_conntrack_standalone.c
38564index 1a84bf6..1935153 100644
38565--- a/net/netfilter/nf_conntrack_standalone.c
38566+++ b/net/netfilter/nf_conntrack_standalone.c
38567@@ -51,7 +51,7 @@ static struct hlist_nulls_node *ct_get_first(struct seq_file *seq)
38568 struct hlist_nulls_node *n;
38569
38570 for (st->bucket = 0;
38571- st->bucket < net->ct.htable_size;
38572+ st->bucket < nf_conntrack_htable_size;
38573 st->bucket++) {
38574 n = rcu_dereference(net->ct.hash[st->bucket].first);
38575 if (!is_a_nulls(n))
38576@@ -69,7 +69,7 @@ static struct hlist_nulls_node *ct_get_next(struct seq_file *seq,
38577 head = rcu_dereference(head->next);
38578 while (is_a_nulls(head)) {
38579 if (likely(get_nulls_value(head) == st->bucket)) {
38580- if (++st->bucket >= net->ct.htable_size)
38581+ if (++st->bucket >= nf_conntrack_htable_size)
38582 return NULL;
38583 }
38584 head = rcu_dereference(net->ct.hash[st->bucket].first);
38585@@ -358,7 +358,7 @@ static ctl_table nf_ct_sysctl_table[] = {
38586 {
38587 .ctl_name = NET_NF_CONNTRACK_BUCKETS,
38588 .procname = "nf_conntrack_buckets",
38589- .data = &init_net.ct.htable_size,
38590+ .data = &nf_conntrack_htable_size,
38591 .maxlen = sizeof(unsigned int),
38592 .mode = 0444,
38593 .proc_handler = proc_dointvec,
38594@@ -429,7 +429,6 @@ static int nf_conntrack_standalone_init_sysctl(struct net *net)
38595 goto out_kmemdup;
38596
38597 table[1].data = &net->ct.count;
38598- table[2].data = &net->ct.htable_size;
38599 table[3].data = &net->ct.sysctl_checksum;
38600 table[4].data = &net->ct.sysctl_log_invalid;
38601
38602diff --git a/net/netfilter/xt_conntrack.c b/net/netfilter/xt_conntrack.c
38603index ae66305..6dc4652 100644
38604--- a/net/netfilter/xt_conntrack.c
38605+++ b/net/netfilter/xt_conntrack.c
38606@@ -113,8 +113,7 @@ ct_proto_port_check(const struct xt_conntrack_mtinfo2 *info,
38607 }
38608
38609 static bool
38610-conntrack_mt(const struct sk_buff *skb, const struct xt_match_param *par,
38611- u16 state_mask, u16 status_mask)
38612+conntrack_mt(const struct sk_buff *skb, const struct xt_match_param *par)
38613 {
38614 const struct xt_conntrack_mtinfo2 *info = par->matchinfo;
38615 enum ip_conntrack_info ctinfo;
38616@@ -137,7 +136,7 @@ conntrack_mt(const struct sk_buff *skb, const struct xt_match_param *par,
38617 if (test_bit(IPS_DST_NAT_BIT, &ct->status))
38618 statebit |= XT_CONNTRACK_STATE_DNAT;
38619 }
38620- if (!!(state_mask & statebit) ^
38621+ if (!!(info->state_mask & statebit) ^
38622 !(info->invert_flags & XT_CONNTRACK_STATE))
38623 return false;
38624 }
38625@@ -173,7 +172,7 @@ conntrack_mt(const struct sk_buff *skb, const struct xt_match_param *par,
38626 return false;
38627
38628 if ((info->match_flags & XT_CONNTRACK_STATUS) &&
38629- (!!(status_mask & ct->status) ^
38630+ (!!(info->status_mask & ct->status) ^
38631 !(info->invert_flags & XT_CONNTRACK_STATUS)))
38632 return false;
38633
38634@@ -193,17 +192,11 @@ conntrack_mt(const struct sk_buff *skb, const struct xt_match_param *par,
38635 static bool
38636 conntrack_mt_v1(const struct sk_buff *skb, const struct xt_match_param *par)
38637 {
38638- const struct xt_conntrack_mtinfo1 *info = par->matchinfo;
38639+ const struct xt_conntrack_mtinfo2 *const *info = par->matchinfo;
38640+ struct xt_match_param newpar = *par;
38641
38642- return conntrack_mt(skb, par, info->state_mask, info->status_mask);
38643-}
38644-
38645-static bool
38646-conntrack_mt_v2(const struct sk_buff *skb, const struct xt_match_param *par)
38647-{
38648- const struct xt_conntrack_mtinfo2 *info = par->matchinfo;
38649-
38650- return conntrack_mt(skb, par, info->state_mask, info->status_mask);
38651+ newpar.matchinfo = *info;
38652+ return conntrack_mt(skb, &newpar);
38653 }
38654
38655 static bool conntrack_mt_check(const struct xt_mtchk_param *par)
38656@@ -216,11 +209,45 @@ static bool conntrack_mt_check(const struct xt_mtchk_param *par)
38657 return true;
38658 }
38659
38660+static bool conntrack_mt_check_v1(const struct xt_mtchk_param *par)
38661+{
38662+ struct xt_conntrack_mtinfo1 *info = par->matchinfo;
38663+ struct xt_conntrack_mtinfo2 *up;
38664+ int ret = conntrack_mt_check(par);
38665+
38666+ if (ret < 0)
38667+ return ret;
38668+
38669+ up = kmalloc(sizeof(*up), GFP_KERNEL);
38670+ if (up == NULL) {
38671+ nf_ct_l3proto_module_put(par->family);
38672+ return -ENOMEM;
38673+ }
38674+
38675+ /*
38676+ * The strategy here is to minimize the overhead of v1 matching,
38677+ * by prebuilding a v2 struct and putting the pointer into the
38678+ * v1 dataspace.
38679+ */
38680+ memcpy(up, info, offsetof(typeof(*info), state_mask));
38681+ up->state_mask = info->state_mask;
38682+ up->status_mask = info->status_mask;
38683+ *(void **)info = up;
38684+ return true;
38685+}
38686+
38687 static void conntrack_mt_destroy(const struct xt_mtdtor_param *par)
38688 {
38689 nf_ct_l3proto_module_put(par->family);
38690 }
38691
38692+static void conntrack_mt_destroy_v1(const struct xt_mtdtor_param *par)
38693+{
38694+ struct xt_conntrack_mtinfo2 **info = par->matchinfo;
38695+ kfree(*info);
38696+ conntrack_mt_destroy(par);
38697+}
38698+
38699 static struct xt_match conntrack_mt_reg[] __read_mostly = {
38700 {
38701 .name = "conntrack",
38702@@ -228,8 +255,8 @@ static struct xt_match conntrack_mt_reg[] __read_mostly = {
38703 .family = NFPROTO_UNSPEC,
38704 .matchsize = sizeof(struct xt_conntrack_mtinfo1),
38705 .match = conntrack_mt_v1,
38706- .checkentry = conntrack_mt_check,
38707- .destroy = conntrack_mt_destroy,
38708+ .checkentry = conntrack_mt_check_v1,
38709+ .destroy = conntrack_mt_destroy_v1,
38710 .me = THIS_MODULE,
38711 },
38712 {
38713@@ -237,7 +264,7 @@ static struct xt_match conntrack_mt_reg[] __read_mostly = {
38714 .revision = 2,
38715 .family = NFPROTO_UNSPEC,
38716 .matchsize = sizeof(struct xt_conntrack_mtinfo2),
38717- .match = conntrack_mt_v2,
38718+ .match = conntrack_mt,
38719 .checkentry = conntrack_mt_check,
38720 .destroy = conntrack_mt_destroy,
38721 .me = THIS_MODULE,
38722diff --git a/net/netrom/nr_route.c b/net/netrom/nr_route.c
38723index 850ffc0..4eb1ac9 100644
38724--- a/net/netrom/nr_route.c
38725+++ b/net/netrom/nr_route.c
38726@@ -842,13 +842,12 @@ int nr_route_frame(struct sk_buff *skb, ax25_cb *ax25)
38727 dptr = skb_push(skb, 1);
38728 *dptr = AX25_P_NETROM;
38729
38730- ax25s = nr_neigh->ax25;
38731- nr_neigh->ax25 = ax25_send_frame(skb, 256,
38732- (ax25_address *)dev->dev_addr,
38733- &nr_neigh->callsign,
38734- nr_neigh->digipeat, nr_neigh->dev);
38735- if (ax25s)
38736+ ax25s = ax25_send_frame(skb, 256, (ax25_address *)dev->dev_addr, &nr_neigh->callsign, nr_neigh->digipeat, nr_neigh->dev);
38737+ if (nr_neigh->ax25 && ax25s) {
38738+ /* We were already holding this ax25_cb */
38739 ax25_cb_put(ax25s);
38740+ }
38741+ nr_neigh->ax25 = ax25s;
38742
38743 dev_put(dev);
38744 ret = (nr_neigh->ax25 != NULL);
38745diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
38746index 41866eb..f2d116a 100644
38747--- a/net/packet/af_packet.c
38748+++ b/net/packet/af_packet.c
38749@@ -1028,20 +1028,8 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
38750
38751 status = TP_STATUS_SEND_REQUEST;
38752 err = dev_queue_xmit(skb);
38753- if (unlikely(err > 0)) {
38754- err = net_xmit_errno(err);
38755- if (err && __packet_get_status(po, ph) ==
38756- TP_STATUS_AVAILABLE) {
38757- /* skb was destructed already */
38758- skb = NULL;
38759- goto out_status;
38760- }
38761- /*
38762- * skb was dropped but not destructed yet;
38763- * let's treat it like congestion or err < 0
38764- */
38765- err = 0;
38766- }
38767+ if (unlikely(err > 0 && (err = net_xmit_errno(err)) != 0))
38768+ goto out_xmit;
38769 packet_increment_head(&po->tx_ring);
38770 len_sum += tp_len;
38771 } while (likely((ph != NULL) || ((!(msg->msg_flags & MSG_DONTWAIT))
38772@@ -1051,6 +1039,9 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
38773 err = len_sum;
38774 goto out_put;
38775
38776+out_xmit:
38777+ skb->destructor = sock_wfree;
38778+ atomic_dec(&po->tx_ring.pending);
38779 out_status:
38780 __packet_set_status(po, ph, status);
38781 kfree_skb(skb);
38782diff --git a/net/rose/rose_link.c b/net/rose/rose_link.c
38783index 5ef5f69..bd86a63 100644
38784--- a/net/rose/rose_link.c
38785+++ b/net/rose/rose_link.c
38786@@ -101,17 +101,13 @@ static void rose_t0timer_expiry(unsigned long param)
38787 static int rose_send_frame(struct sk_buff *skb, struct rose_neigh *neigh)
38788 {
38789 ax25_address *rose_call;
38790- ax25_cb *ax25s;
38791
38792 if (ax25cmp(&rose_callsign, &null_ax25_address) == 0)
38793 rose_call = (ax25_address *)neigh->dev->dev_addr;
38794 else
38795 rose_call = &rose_callsign;
38796
38797- ax25s = neigh->ax25;
38798 neigh->ax25 = ax25_send_frame(skb, 260, rose_call, &neigh->callsign, neigh->digipeat, neigh->dev);
38799- if (ax25s)
38800- ax25_cb_put(ax25s);
38801
38802 return (neigh->ax25 != NULL);
38803 }
38804@@ -124,17 +120,13 @@ static int rose_send_frame(struct sk_buff *skb, struct rose_neigh *neigh)
38805 static int rose_link_up(struct rose_neigh *neigh)
38806 {
38807 ax25_address *rose_call;
38808- ax25_cb *ax25s;
38809
38810 if (ax25cmp(&rose_callsign, &null_ax25_address) == 0)
38811 rose_call = (ax25_address *)neigh->dev->dev_addr;
38812 else
38813 rose_call = &rose_callsign;
38814
38815- ax25s = neigh->ax25;
38816 neigh->ax25 = ax25_find_cb(rose_call, &neigh->callsign, neigh->digipeat, neigh->dev);
38817- if (ax25s)
38818- ax25_cb_put(ax25s);
38819
38820 return (neigh->ax25 != NULL);
38821 }
38822diff --git a/net/rose/rose_route.c b/net/rose/rose_route.c
38823index 08230fa..f3e2198 100644
38824--- a/net/rose/rose_route.c
38825+++ b/net/rose/rose_route.c
38826@@ -234,8 +234,6 @@ static void rose_remove_neigh(struct rose_neigh *rose_neigh)
38827
38828 if ((s = rose_neigh_list) == rose_neigh) {
38829 rose_neigh_list = rose_neigh->next;
38830- if (rose_neigh->ax25)
38831- ax25_cb_put(rose_neigh->ax25);
38832 kfree(rose_neigh->digipeat);
38833 kfree(rose_neigh);
38834 return;
38835@@ -244,8 +242,6 @@ static void rose_remove_neigh(struct rose_neigh *rose_neigh)
38836 while (s != NULL && s->next != NULL) {
38837 if (s->next == rose_neigh) {
38838 s->next = rose_neigh->next;
38839- if (rose_neigh->ax25)
38840- ax25_cb_put(rose_neigh->ax25);
38841 kfree(rose_neigh->digipeat);
38842 kfree(rose_neigh);
38843 return;
38844@@ -814,7 +810,6 @@ void rose_link_failed(ax25_cb *ax25, int reason)
38845
38846 if (rose_neigh != NULL) {
38847 rose_neigh->ax25 = NULL;
38848- ax25_cb_put(ax25);
38849
38850 rose_del_route_by_neigh(rose_neigh);
38851 rose_kill_by_neigh(rose_neigh);
38852diff --git a/net/sunrpc/auth_gss/auth_gss.c b/net/sunrpc/auth_gss/auth_gss.c
38853index 9c5a19d..fc6a43c 100644
38854--- a/net/sunrpc/auth_gss/auth_gss.c
38855+++ b/net/sunrpc/auth_gss/auth_gss.c
38856@@ -485,7 +485,7 @@ gss_refresh_upcall(struct rpc_task *task)
38857 dprintk("RPC: %5u gss_refresh_upcall for uid %u\n", task->tk_pid,
38858 cred->cr_uid);
38859 gss_msg = gss_setup_upcall(task->tk_client, gss_auth, cred);
38860- if (PTR_ERR(gss_msg) == -EAGAIN) {
38861+ if (IS_ERR(gss_msg) == -EAGAIN) {
38862 /* XXX: warning on the first, under the assumption we
38863 * shouldn't normally hit this case on a refresh. */
38864 warn_gssd();
38865@@ -644,22 +644,7 @@ gss_pipe_downcall(struct file *filp, const char __user *src, size_t mlen)
38866 p = gss_fill_context(p, end, ctx, gss_msg->auth->mech);
38867 if (IS_ERR(p)) {
38868 err = PTR_ERR(p);
38869- switch (err) {
38870- case -EACCES:
38871- gss_msg->msg.errno = err;
38872- err = mlen;
38873- break;
38874- case -EFAULT:
38875- case -ENOMEM:
38876- case -EINVAL:
38877- case -ENOSYS:
38878- gss_msg->msg.errno = -EAGAIN;
38879- break;
38880- default:
38881- printk(KERN_CRIT "%s: bad return from "
38882- "gss_fill_context: %ld\n", __func__, err);
38883- BUG();
38884- }
38885+ gss_msg->msg.errno = (err == -EAGAIN) ? -EAGAIN : -EACCES;
38886 goto err_release_msg;
38887 }
38888 gss_msg->ctx = gss_get_ctx(ctx);
38889diff --git a/net/sunrpc/auth_gss/gss_krb5_mech.c b/net/sunrpc/auth_gss/gss_krb5_mech.c
38890index 2deb0ed..ef45eba 100644
38891--- a/net/sunrpc/auth_gss/gss_krb5_mech.c
38892+++ b/net/sunrpc/auth_gss/gss_krb5_mech.c
38893@@ -131,10 +131,8 @@ gss_import_sec_context_kerberos(const void *p,
38894 struct krb5_ctx *ctx;
38895 int tmp;
38896
38897- if (!(ctx = kzalloc(sizeof(*ctx), GFP_NOFS))) {
38898- p = ERR_PTR(-ENOMEM);
38899+ if (!(ctx = kzalloc(sizeof(*ctx), GFP_NOFS)))
38900 goto out_err;
38901- }
38902
38903 p = simple_get_bytes(p, end, &ctx->initiate, sizeof(ctx->initiate));
38904 if (IS_ERR(p))
38905diff --git a/net/sunrpc/auth_gss/gss_mech_switch.c b/net/sunrpc/auth_gss/gss_mech_switch.c
38906index 76e4c6f..6efbb0c 100644
38907--- a/net/sunrpc/auth_gss/gss_mech_switch.c
38908+++ b/net/sunrpc/auth_gss/gss_mech_switch.c
38909@@ -252,7 +252,7 @@ gss_import_sec_context(const void *input_token, size_t bufsize,
38910 struct gss_ctx **ctx_id)
38911 {
38912 if (!(*ctx_id = kzalloc(sizeof(**ctx_id), GFP_KERNEL)))
38913- return -ENOMEM;
38914+ return GSS_S_FAILURE;
38915 (*ctx_id)->mech_type = gss_mech_get(mech);
38916
38917 return mech->gm_ops
38918diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c
38919index 0266cca..df124f7 100644
38920--- a/net/sunrpc/svc_xprt.c
38921+++ b/net/sunrpc/svc_xprt.c
38922@@ -711,8 +711,7 @@ int svc_recv(struct svc_rqst *rqstp, long timeout)
38923 spin_unlock_bh(&pool->sp_lock);
38924
38925 len = 0;
38926- if (test_bit(XPT_LISTENER, &xprt->xpt_flags) &&
38927- !test_bit(XPT_CLOSE, &xprt->xpt_flags)) {
38928+ if (test_bit(XPT_LISTENER, &xprt->xpt_flags)) {
38929 struct svc_xprt *newxpt;
38930 newxpt = xprt->xpt_ops->xpo_accept(xprt);
38931 if (newxpt) {
38932diff --git a/net/wireless/mlme.c b/net/wireless/mlme.c
38933index 0d86248..0a6b7a0 100644
38934--- a/net/wireless/mlme.c
38935+++ b/net/wireless/mlme.c
38936@@ -94,18 +94,7 @@ void cfg80211_send_rx_assoc(struct net_device *dev, const u8 *buf, size_t len)
38937 }
38938 }
38939
38940- /*
38941- * We might be coming here because the driver reported
38942- * a successful association at the same time as the
38943- * user requested a deauth. In that case, we will have
38944- * removed the BSS from the auth_bsses list due to the
38945- * deauth request when the assoc response makes it. If
38946- * the two code paths acquire the lock the other way
38947- * around, that's just the standard situation of a
38948- * deauth being requested while connected.
38949- */
38950- if (!bss)
38951- goto out;
38952+ WARN_ON(!bss);
38953 } else if (wdev->conn) {
38954 cfg80211_sme_failed_assoc(wdev);
38955 need_connect_result = false;
38956diff --git a/net/wireless/reg.c b/net/wireless/reg.c
38957index efd24a7..f256dff 100644
38958--- a/net/wireless/reg.c
38959+++ b/net/wireless/reg.c
38960@@ -1714,7 +1714,7 @@ int regulatory_hint_user(const char *alpha2)
38961 request->wiphy_idx = WIPHY_IDX_STALE;
38962 request->alpha2[0] = alpha2[0];
38963 request->alpha2[1] = alpha2[1];
38964- request->initiator = NL80211_REGDOM_SET_BY_USER;
38965+ request->initiator = NL80211_REGDOM_SET_BY_USER,
38966
38967 queue_regulatory_request(request);
38968
38969diff --git a/net/wireless/sme.c b/net/wireless/sme.c
38970index b2930e3..9f0b280 100644
38971--- a/net/wireless/sme.c
38972+++ b/net/wireless/sme.c
38973@@ -655,7 +655,6 @@ void __cfg80211_disconnected(struct net_device *dev, const u8 *ie,
38974 memset(&wrqu, 0, sizeof(wrqu));
38975 wrqu.ap_addr.sa_family = ARPHRD_ETHER;
38976 wireless_send_event(dev, SIOCGIWAP, &wrqu, NULL);
38977- wdev->wext.connect.ssid_len = 0;
38978 #endif
38979 }
38980