From 0423f5fb58e1712f1ca2b6a8c97f3e625b684406 Mon Sep 17 00:00:00 2001 From: Bjoern Brandenburg Date: Wed, 19 May 2010 15:58:28 -0400 Subject: update final release software --- download/2010.1/SHA256SUMS | 7 +- download/2010.1/ft_tools-2010.1.tgz | Bin 0 -> 5577 bytes download/2010.1/liblitmus-2010.1.tgz | Bin 232635 -> 17633 bytes download/2010.1/litmus-rt-2010.1.patch | 31569 ++----------------------------- 4 files changed, 1649 insertions(+), 29927 deletions(-) create mode 100644 download/2010.1/ft_tools-2010.1.tgz diff --git a/download/2010.1/SHA256SUMS b/download/2010.1/SHA256SUMS index e095f25..e96ef1b 100644 --- a/download/2010.1/SHA256SUMS +++ b/download/2010.1/SHA256SUMS @@ -1,4 +1,7 @@ +0cff900f43667fec7682fd7af83177a3d36c45514979a1c534fee4aa89a5b390 ft_tools-2010.1.tgz +8629ba294c695d9d33283eb0748aa533ac7a880b8d2773925434cb61475267df liblitmus-2010.1.tgz +7e2fa2b43d43f96ab11e3bb9136cd3f1528d32f023df5136dcde2e5c9ddc4c60 litmus-rt-2010.1.patch 26b2aa111452e31acffbb866fd9b66058aa640220e8b7d30c103be8ed96b5751 32bit-config 91fbdbd565c02cfb2f0d69f9dbdfde0b4b401fcaba04f4af24d8b6cf61046aa2 64bit-config -e3e99958a8872403e206b380c2be2be118495373361436d436a706e0bdd2df79 liblitmus-2010.1.tgz -79753deefdfdb3f37341c95c92efc520fd4c840fc54c323c35c1f8fe65d8840e litmus-rt-2010.1.patch +666a1a8d4c6e00c92178031fea1b1ee49e9c04f9347fb8e8d369772d4fede8b9 liblitmus-2010-config + diff --git a/download/2010.1/ft_tools-2010.1.tgz b/download/2010.1/ft_tools-2010.1.tgz new file mode 100644 index 0000000..c49f228 Binary files /dev/null and b/download/2010.1/ft_tools-2010.1.tgz differ diff --git a/download/2010.1/liblitmus-2010.1.tgz b/download/2010.1/liblitmus-2010.1.tgz index 58302f9..9c92dcb 100644 Binary files a/download/2010.1/liblitmus-2010.1.tgz and b/download/2010.1/liblitmus-2010.1.tgz differ diff --git a/download/2010.1/litmus-rt-2010.1.patch b/download/2010.1/litmus-rt-2010.1.patch index 9df4716..50013fb 100644 --- a/download/2010.1/litmus-rt-2010.1.patch +++ b/download/2010.1/litmus-rt-2010.1.patch @@ -1,12 +1,86 @@ + Makefile | 4 +- + arch/x86/Kconfig | 2 + + arch/x86/include/asm/entry_arch.h | 1 + + arch/x86/include/asm/feather_trace.h | 17 + + arch/x86/include/asm/feather_trace_32.h | 80 +++ + arch/x86/include/asm/feather_trace_64.h | 69 +++ + arch/x86/include/asm/hw_irq.h | 3 + + arch/x86/include/asm/irq_vectors.h | 5 + + arch/x86/include/asm/processor.h | 2 + + arch/x86/include/asm/unistd_32.h | 6 +- + arch/x86/include/asm/unistd_64.h | 4 + + arch/x86/kernel/Makefile | 2 + + arch/x86/kernel/cpu/intel_cacheinfo.c | 17 + + arch/x86/kernel/entry_64.S | 2 + + arch/x86/kernel/ft_event.c | 112 ++++ + arch/x86/kernel/irqinit.c | 3 + + arch/x86/kernel/smp.c | 28 + + arch/x86/kernel/syscall_table_32.S | 14 + + fs/exec.c | 13 +- + fs/inode.c | 2 + + include/linux/completion.h | 1 + + include/linux/fs.h | 21 +- + include/linux/hrtimer.h | 25 + + include/linux/sched.h | 17 +- + include/linux/smp.h | 5 + + include/linux/tick.h | 5 + + include/litmus/bheap.h | 77 +++ + include/litmus/edf_common.h | 27 + + include/litmus/fdso.h | 69 +++ + include/litmus/feather_buffer.h | 94 ++++ + include/litmus/feather_trace.h | 49 ++ + include/litmus/ftdev.h | 49 ++ + include/litmus/jobs.h | 9 + + include/litmus/litmus.h | 252 +++++++++ + include/litmus/rt_domain.h | 162 ++++++ + include/litmus/rt_param.h | 189 +++++++ + include/litmus/sched_plugin.h | 162 ++++++ + include/litmus/sched_trace.h | 192 +++++++ + include/litmus/trace.h | 113 ++++ + include/litmus/unistd_32.h | 23 + + include/litmus/unistd_64.h | 37 ++ + kernel/exit.c | 4 + + kernel/fork.c | 7 + + kernel/hrtimer.c | 82 +++ + kernel/printk.c | 14 +- + kernel/sched.c | 109 ++++- + kernel/sched_fair.c | 2 +- + kernel/sched_rt.c | 2 +- + kernel/time/tick-sched.c | 48 ++- + litmus/Kconfig | 85 +++ + litmus/Makefile | 23 + + litmus/bheap.c | 314 +++++++++++ + litmus/ctrldev.c | 150 +++++ + litmus/edf_common.c | 102 ++++ + litmus/fdso.c | 281 ++++++++++ + litmus/fmlp.c | 268 +++++++++ + litmus/ft_event.c | 43 ++ + litmus/ftdev.c | 359 +++++++++++++ + litmus/jobs.c | 43 ++ + litmus/litmus.c | 775 ++++++++++++++++++++++++++ + litmus/rt_domain.c | 310 +++++++++++ + litmus/sched_cedf.c | 756 ++++++++++++++++++++++++++ + litmus/sched_gsn_edf.c | 828 ++++++++++++++++++++++++++++ + litmus/sched_litmus.c | 318 +++++++++++ + litmus/sched_pfair.c | 896 +++++++++++++++++++++++++++++++ + litmus/sched_plugin.c | 265 +++++++++ + litmus/sched_psn_edf.c | 478 ++++++++++++++++ + litmus/sched_task_trace.c | 204 +++++++ + litmus/sched_trace.c | 378 +++++++++++++ + litmus/srp.c | 318 +++++++++++ + litmus/sync.c | 104 ++++ + litmus/trace.c | 103 ++++ + 72 files changed, 9596 insertions(+), 37 deletions(-) + diff --git a/Makefile b/Makefile -index ec932b2..2603066 100644 +index f5cdb72..2603066 100644 --- a/Makefile +++ b/Makefile @@ -1,7 +1,7 @@ VERSION = 2 PATCHLEVEL = 6 SUBLEVEL = 32 --EXTRAVERSION = .9 +-EXTRAVERSION = +EXTRAVERSION =-litmus2010 NAME = Man-Eating Seals of Antiquity @@ -20,25534 +94,1097 @@ index ec932b2..2603066 100644 vmlinux-dirs := $(patsubst %/,%,$(filter %/, $(init-y) $(init-m) \ $(core-y) $(core-m) $(drivers-y) $(drivers-m) \ -diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c -index 62619f2..9a3334a 100644 ---- a/arch/alpha/kernel/osf_sys.c -+++ b/arch/alpha/kernel/osf_sys.c -@@ -178,18 +178,25 @@ SYSCALL_DEFINE6(osf_mmap, unsigned long, addr, unsigned long, len, - unsigned long, prot, unsigned long, flags, unsigned long, fd, - unsigned long, off) - { -- unsigned long ret = -EINVAL; -+ struct file *file = NULL; -+ unsigned long ret = -EBADF; - - #if 0 - if (flags & (_MAP_HASSEMAPHORE | _MAP_INHERIT | _MAP_UNALIGNED)) - printk("%s: unimplemented OSF mmap flags %04lx\n", - current->comm, flags); - #endif -- if ((off + PAGE_ALIGN(len)) < off) -- goto out; -- if (off & ~PAGE_MASK) -- goto out; -- ret = sys_mmap_pgoff(addr, len, prot, flags, fd, off >> PAGE_SHIFT); -+ if (!(flags & MAP_ANONYMOUS)) { -+ file = fget(fd); -+ if (!file) -+ goto out; -+ } -+ flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE); -+ down_write(¤t->mm->mmap_sem); -+ ret = do_mmap(file, addr, len, prot, flags, off); -+ up_write(¤t->mm->mmap_sem); -+ if (file) -+ fput(file); - out: - return ret; - } -diff --git a/arch/arm/include/asm/mman.h b/arch/arm/include/asm/mman.h -index 41f99c5..8eebf89 100644 ---- a/arch/arm/include/asm/mman.h -+++ b/arch/arm/include/asm/mman.h -@@ -1,4 +1 @@ - #include -- --#define arch_mmap_check(addr, len, flags) \ -- (((flags) & MAP_FIXED && (addr) < FIRST_USER_ADDRESS) ? -EINVAL : 0) -diff --git a/arch/arm/kernel/calls.S b/arch/arm/kernel/calls.S -index 4f07168..fafce1b 100644 ---- a/arch/arm/kernel/calls.S -+++ b/arch/arm/kernel/calls.S -@@ -172,7 +172,7 @@ - /* 160 */ CALL(sys_sched_get_priority_min) - CALL(sys_sched_rr_get_interval) - CALL(sys_nanosleep) -- CALL(sys_mremap) -+ CALL(sys_arm_mremap) - CALL(sys_setresuid16) - /* 165 */ CALL(sys_getresuid16) - CALL(sys_ni_syscall) /* vm86 */ -diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S -index 2c1db77..f0fe95b 100644 ---- a/arch/arm/kernel/entry-common.S -+++ b/arch/arm/kernel/entry-common.S -@@ -416,12 +416,12 @@ sys_mmap2: - tst r5, #PGOFF_MASK - moveq r5, r5, lsr #PAGE_SHIFT - 12 - streq r5, [sp, #4] -- beq sys_mmap_pgoff -+ beq do_mmap2 - mov r0, #-EINVAL - mov pc, lr - #else - str r5, [sp, #4] -- b sys_mmap_pgoff -+ b do_mmap2 - #endif - ENDPROC(sys_mmap2) +diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig +index 72ace95..e2cd95e 100644 +--- a/arch/x86/Kconfig ++++ b/arch/x86/Kconfig +@@ -2092,3 +2092,5 @@ source "crypto/Kconfig" + source "arch/x86/kvm/Kconfig" -diff --git a/arch/arm/kernel/sys_arm.c b/arch/arm/kernel/sys_arm.c -index ae4027b..78ecaac 100644 ---- a/arch/arm/kernel/sys_arm.c -+++ b/arch/arm/kernel/sys_arm.c -@@ -28,6 +28,41 @@ - #include - #include + source "lib/Kconfig" ++ ++source "litmus/Kconfig" +diff --git a/arch/x86/include/asm/entry_arch.h b/arch/x86/include/asm/entry_arch.h +index f5693c8..19e22e3 100644 +--- a/arch/x86/include/asm/entry_arch.h ++++ b/arch/x86/include/asm/entry_arch.h +@@ -13,6 +13,7 @@ + BUILD_INTERRUPT(reschedule_interrupt,RESCHEDULE_VECTOR) + BUILD_INTERRUPT(call_function_interrupt,CALL_FUNCTION_VECTOR) + BUILD_INTERRUPT(call_function_single_interrupt,CALL_FUNCTION_SINGLE_VECTOR) ++BUILD_INTERRUPT(pull_timers_interrupt,PULL_TIMERS_VECTOR) + BUILD_INTERRUPT(irq_move_cleanup_interrupt,IRQ_MOVE_CLEANUP_VECTOR) + BUILD_INTERRUPT(reboot_interrupt,REBOOT_VECTOR) -+extern unsigned long do_mremap(unsigned long addr, unsigned long old_len, -+ unsigned long new_len, unsigned long flags, -+ unsigned long new_addr); +diff --git a/arch/x86/include/asm/feather_trace.h b/arch/x86/include/asm/feather_trace.h +new file mode 100644 +index 0000000..4fd3163 +--- /dev/null ++++ b/arch/x86/include/asm/feather_trace.h +@@ -0,0 +1,17 @@ ++#ifndef _ARCH_FEATHER_TRACE_H ++#define _ARCH_FEATHER_TRACE_H ++ ++#include + -+/* common code for old and new mmaps */ -+inline long do_mmap2( -+ unsigned long addr, unsigned long len, -+ unsigned long prot, unsigned long flags, -+ unsigned long fd, unsigned long pgoff) ++static inline unsigned long long ft_timestamp(void) +{ -+ int error = -EINVAL; -+ struct file * file = NULL; ++ return __native_read_tsc(); ++} + -+ flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE); ++#ifdef CONFIG_X86_32 ++#include "feather_trace_32.h" ++#else ++#include "feather_trace_64.h" ++#endif + -+ if (flags & MAP_FIXED && addr < FIRST_USER_ADDRESS) -+ goto out; ++#endif +diff --git a/arch/x86/include/asm/feather_trace_32.h b/arch/x86/include/asm/feather_trace_32.h +new file mode 100644 +index 0000000..192cd09 +--- /dev/null ++++ b/arch/x86/include/asm/feather_trace_32.h +@@ -0,0 +1,80 @@ ++/* Do not directly include this file. Include feather_trace.h instead */ + -+ error = -EBADF; -+ if (!(flags & MAP_ANONYMOUS)) { -+ file = fget(fd); -+ if (!file) -+ goto out; -+ } ++#define feather_callback __attribute__((regparm(0))) + -+ down_write(¤t->mm->mmap_sem); -+ error = do_mmap_pgoff(file, addr, len, prot, flags, pgoff); -+ up_write(¤t->mm->mmap_sem); ++/* ++ * make the compiler reload any register that is not saved in ++ * a cdecl function call ++ */ ++#define CLOBBER_LIST "memory", "cc", "eax", "ecx", "edx" + -+ if (file) -+ fput(file); -+out: -+ return error; -+} ++#define ft_event(id, callback) \ ++ __asm__ __volatile__( \ ++ "1: jmp 2f \n\t" \ ++ " call " #callback " \n\t" \ ++ ".section __event_table, \"aw\" \n\t" \ ++ ".long " #id ", 0, 1b, 2f \n\t" \ ++ ".previous \n\t" \ ++ "2: \n\t" \ ++ : : : CLOBBER_LIST) + - struct mmap_arg_struct { - unsigned long addr; - unsigned long len; -@@ -49,11 +84,29 @@ asmlinkage int old_mmap(struct mmap_arg_struct __user *arg) - if (a.offset & ~PAGE_MASK) - goto out; - -- error = sys_mmap_pgoff(a.addr, a.len, a.prot, a.flags, a.fd, a.offset >> PAGE_SHIFT); -+ error = do_mmap2(a.addr, a.len, a.prot, a.flags, a.fd, a.offset >> PAGE_SHIFT); - out: - return error; - } - -+asmlinkage unsigned long -+sys_arm_mremap(unsigned long addr, unsigned long old_len, -+ unsigned long new_len, unsigned long flags, -+ unsigned long new_addr) -+{ -+ unsigned long ret = -EINVAL; ++#define ft_event0(id, callback) \ ++ __asm__ __volatile__( \ ++ "1: jmp 2f \n\t" \ ++ " subl $4, %%esp \n\t" \ ++ " movl $" #id ", (%%esp) \n\t" \ ++ " call " #callback " \n\t" \ ++ " addl $4, %%esp \n\t" \ ++ ".section __event_table, \"aw\" \n\t" \ ++ ".long " #id ", 0, 1b, 2f \n\t" \ ++ ".previous \n\t" \ ++ "2: \n\t" \ ++ : : : CLOBBER_LIST) + -+ if (flags & MREMAP_FIXED && new_addr < FIRST_USER_ADDRESS) -+ goto out; ++#define ft_event1(id, callback, param) \ ++ __asm__ __volatile__( \ ++ "1: jmp 2f \n\t" \ ++ " subl $8, %%esp \n\t" \ ++ " movl %0, 4(%%esp) \n\t" \ ++ " movl $" #id ", (%%esp) \n\t" \ ++ " call " #callback " \n\t" \ ++ " addl $8, %%esp \n\t" \ ++ ".section __event_table, \"aw\" \n\t" \ ++ ".long " #id ", 0, 1b, 2f \n\t" \ ++ ".previous \n\t" \ ++ "2: \n\t" \ ++ : : "r" (param) : CLOBBER_LIST) + -+ down_write(¤t->mm->mmap_sem); -+ ret = do_mremap(addr, old_len, new_len, flags, new_addr); -+ up_write(¤t->mm->mmap_sem); ++#define ft_event2(id, callback, param, param2) \ ++ __asm__ __volatile__( \ ++ "1: jmp 2f \n\t" \ ++ " subl $12, %%esp \n\t" \ ++ " movl %1, 8(%%esp) \n\t" \ ++ " movl %0, 4(%%esp) \n\t" \ ++ " movl $" #id ", (%%esp) \n\t" \ ++ " call " #callback " \n\t" \ ++ " addl $12, %%esp \n\t" \ ++ ".section __event_table, \"aw\" \n\t" \ ++ ".long " #id ", 0, 1b, 2f \n\t" \ ++ ".previous \n\t" \ ++ "2: \n\t" \ ++ : : "r" (param), "r" (param2) : CLOBBER_LIST) + -+out: -+ return ret; -+} + - /* - * Perform the select(nd, in, out, ex, tv) and mmap() system - * calls. -diff --git a/arch/arm/mach-davinci/dm646x.c b/arch/arm/mach-davinci/dm646x.c -index 36e4fb4..0976049 100644 ---- a/arch/arm/mach-davinci/dm646x.c -+++ b/arch/arm/mach-davinci/dm646x.c -@@ -789,14 +789,7 @@ static struct davinci_id dm646x_ids[] = { - .part_no = 0xb770, - .manufacturer = 0x017, - .cpu_id = DAVINCI_CPU_ID_DM6467, -- .name = "dm6467_rev1.x", -- }, -- { -- .variant = 0x1, -- .part_no = 0xb770, -- .manufacturer = 0x017, -- .cpu_id = DAVINCI_CPU_ID_DM6467, -- .name = "dm6467_rev3.x", -+ .name = "dm6467", - }, - }; - -diff --git a/arch/arm/mach-pxa/em-x270.c b/arch/arm/mach-pxa/em-x270.c -index 86a8732..aec7f42 100644 ---- a/arch/arm/mach-pxa/em-x270.c -+++ b/arch/arm/mach-pxa/em-x270.c -@@ -497,15 +497,16 @@ static int em_x270_usb_hub_init(void) - goto err_free_vbus_gpio; - - /* USB Hub power-on and reset */ -- gpio_direction_output(usb_hub_reset, 1); -- gpio_direction_output(GPIO9_USB_VBUS_EN, 0); -+ gpio_direction_output(usb_hub_reset, 0); - regulator_enable(em_x270_usb_ldo); -- gpio_set_value(usb_hub_reset, 0); - gpio_set_value(usb_hub_reset, 1); -+ gpio_set_value(usb_hub_reset, 0); - regulator_disable(em_x270_usb_ldo); - regulator_enable(em_x270_usb_ldo); -- gpio_set_value(usb_hub_reset, 0); -- gpio_set_value(GPIO9_USB_VBUS_EN, 1); -+ gpio_set_value(usb_hub_reset, 1); -+ -+ /* enable VBUS */ -+ gpio_direction_output(GPIO9_USB_VBUS_EN, 1); - - return 0; - -diff --git a/arch/arm/mm/mmap.c b/arch/arm/mm/mmap.c -index f5abc51..2b79964 100644 ---- a/arch/arm/mm/mmap.c -+++ b/arch/arm/mm/mmap.c -@@ -54,8 +54,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr, - * We enforce the MAP_FIXED case. - */ - if (flags & MAP_FIXED) { -- if (aliasing && flags & MAP_SHARED && -- (addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)) -+ if (aliasing && flags & MAP_SHARED && addr & (SHMLBA - 1)) - return -EINVAL; - return addr; - } -diff --git a/arch/avr32/include/asm/syscalls.h b/arch/avr32/include/asm/syscalls.h -index 66a1972..483d666 100644 ---- a/arch/avr32/include/asm/syscalls.h -+++ b/arch/avr32/include/asm/syscalls.h -@@ -29,6 +29,10 @@ asmlinkage int sys_sigaltstack(const stack_t __user *, stack_t __user *, - struct pt_regs *); - asmlinkage int sys_rt_sigreturn(struct pt_regs *); - -+/* kernel/sys_avr32.c */ -+asmlinkage long sys_mmap2(unsigned long, unsigned long, unsigned long, -+ unsigned long, unsigned long, off_t); ++#define ft_event3(id, callback, p, p2, p3) \ ++ __asm__ __volatile__( \ ++ "1: jmp 2f \n\t" \ ++ " subl $16, %%esp \n\t" \ ++ " movl %2, 12(%%esp) \n\t" \ ++ " movl %1, 8(%%esp) \n\t" \ ++ " movl %0, 4(%%esp) \n\t" \ ++ " movl $" #id ", (%%esp) \n\t" \ ++ " call " #callback " \n\t" \ ++ " addl $16, %%esp \n\t" \ ++ ".section __event_table, \"aw\" \n\t" \ ++ ".long " #id ", 0, 1b, 2f \n\t" \ ++ ".previous \n\t" \ ++ "2: \n\t" \ ++ : : "r" (p), "r" (p2), "r" (p3) : CLOBBER_LIST) + - /* mm/cache.c */ - asmlinkage int sys_cacheflush(int, void __user *, size_t); - -diff --git a/arch/avr32/kernel/sys_avr32.c b/arch/avr32/kernel/sys_avr32.c -index 459349b..5d2daea 100644 ---- a/arch/avr32/kernel/sys_avr32.c -+++ b/arch/avr32/kernel/sys_avr32.c -@@ -5,8 +5,39 @@ - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ -+#include -+#include -+#include -+#include - #include - -+#include -+#include -+#include ++#define __ARCH_HAS_FEATHER_TRACE +diff --git a/arch/x86/include/asm/feather_trace_64.h b/arch/x86/include/asm/feather_trace_64.h +new file mode 100644 +index 0000000..1cffa4e +--- /dev/null ++++ b/arch/x86/include/asm/feather_trace_64.h +@@ -0,0 +1,69 @@ ++/* Do not directly include this file. Include feather_trace.h instead */ + -+asmlinkage long sys_mmap2(unsigned long addr, unsigned long len, -+ unsigned long prot, unsigned long flags, -+ unsigned long fd, off_t offset) -+{ -+ int error = -EBADF; -+ struct file *file = NULL; ++/* regparm is the default on x86_64 */ ++#define feather_callback + -+ flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE); -+ if (!(flags & MAP_ANONYMOUS)) { -+ file = fget(fd); -+ if (!file) -+ return error; -+ } ++# define _EVENT_TABLE(id,from,to) \ ++ ".section __event_table, \"aw\"\n\t" \ ++ ".balign 8\n\t" \ ++ ".quad " #id ", 0, " #from ", " #to " \n\t" \ ++ ".previous \n\t" + -+ down_write(¤t->mm->mmap_sem); -+ error = do_mmap_pgoff(file, addr, len, prot, flags, offset); -+ up_write(¤t->mm->mmap_sem); ++/* ++ * x86_64 callee only owns rbp, rbx, r12 -> r15 ++ * the called can freely modify the others ++ */ ++#define CLOBBER_LIST "memory", "cc", "rdi", "rsi", "rdx", "rcx", \ ++ "r8", "r9", "r10", "r11", "rax" + -+ if (file) -+ fput(file); -+ return error; -+} ++#define ft_event(id, callback) \ ++ __asm__ __volatile__( \ ++ "1: jmp 2f \n\t" \ ++ " call " #callback " \n\t" \ ++ _EVENT_TABLE(id,1b,2f) \ ++ "2: \n\t" \ ++ : : : CLOBBER_LIST) + - int kernel_execve(const char *file, char **argv, char **envp) - { - register long scno asm("r8") = __NR_execve; -diff --git a/arch/avr32/kernel/syscall-stubs.S b/arch/avr32/kernel/syscall-stubs.S -index 0447a3e..f7244cd 100644 ---- a/arch/avr32/kernel/syscall-stubs.S -+++ b/arch/avr32/kernel/syscall-stubs.S -@@ -61,7 +61,7 @@ __sys_execve: - __sys_mmap2: - pushm lr - st.w --sp, ARG6 -- call sys_mmap_pgoff -+ call sys_mmap2 - sub sp, -4 - popm pc - -diff --git a/arch/blackfin/include/asm/page.h b/arch/blackfin/include/asm/page.h -index 1d04e40..944a07c 100644 ---- a/arch/blackfin/include/asm/page.h -+++ b/arch/blackfin/include/asm/page.h -@@ -10,9 +10,4 @@ - #include - #define MAP_NR(addr) (((unsigned long)(addr)-PAGE_OFFSET) >> PAGE_SHIFT) - --#define VM_DATA_DEFAULT_FLAGS \ -- (VM_READ | VM_WRITE | \ -- ((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0 ) | \ -- VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) -- - #endif -diff --git a/arch/blackfin/kernel/sys_bfin.c b/arch/blackfin/kernel/sys_bfin.c -index 2e7f8e1..afcef12 100644 ---- a/arch/blackfin/kernel/sys_bfin.c -+++ b/arch/blackfin/kernel/sys_bfin.c -@@ -22,6 +22,39 @@ - #include - #include - -+/* common code for old and new mmaps */ -+static inline long -+do_mmap2(unsigned long addr, unsigned long len, -+ unsigned long prot, unsigned long flags, -+ unsigned long fd, unsigned long pgoff) -+{ -+ int error = -EBADF; -+ struct file *file = NULL; -+ -+ flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE); -+ if (!(flags & MAP_ANONYMOUS)) { -+ file = fget(fd); -+ if (!file) -+ goto out; -+ } ++#define ft_event0(id, callback) \ ++ __asm__ __volatile__( \ ++ "1: jmp 2f \n\t" \ ++ " movq $" #id ", %%rdi \n\t" \ ++ " call " #callback " \n\t" \ ++ _EVENT_TABLE(id,1b,2f) \ ++ "2: \n\t" \ ++ : : : CLOBBER_LIST) + -+ down_write(¤t->mm->mmap_sem); -+ error = do_mmap_pgoff(file, addr, len, prot, flags, pgoff); -+ up_write(¤t->mm->mmap_sem); ++#define ft_event1(id, callback, param) \ ++ __asm__ __volatile__( \ ++ "1: jmp 2f \n\t" \ ++ " movq %0, %%rsi \n\t" \ ++ " movq $" #id ", %%rdi \n\t" \ ++ " call " #callback " \n\t" \ ++ _EVENT_TABLE(id,1b,2f) \ ++ "2: \n\t" \ ++ : : "r" (param) : CLOBBER_LIST) + -+ if (file) -+ fput(file); -+ out: -+ return error; -+} ++#define ft_event2(id, callback, param, param2) \ ++ __asm__ __volatile__( \ ++ "1: jmp 2f \n\t" \ ++ " movq %1, %%rdx \n\t" \ ++ " movq %0, %%rsi \n\t" \ ++ " movq $" #id ", %%rdi \n\t" \ ++ " call " #callback " \n\t" \ ++ _EVENT_TABLE(id,1b,2f) \ ++ "2: \n\t" \ ++ : : "r" (param), "r" (param2) : CLOBBER_LIST) + -+asmlinkage long sys_mmap2(unsigned long addr, unsigned long len, -+ unsigned long prot, unsigned long flags, -+ unsigned long fd, unsigned long pgoff) -+{ -+ return do_mmap2(addr, len, prot, flags, fd, pgoff); -+} ++#define ft_event3(id, callback, p, p2, p3) \ ++ __asm__ __volatile__( \ ++ "1: jmp 2f \n\t" \ ++ " movq %2, %%rcx \n\t" \ ++ " movq %1, %%rdx \n\t" \ ++ " movq %0, %%rsi \n\t" \ ++ " movq $" #id ", %%rdi \n\t" \ ++ " call " #callback " \n\t" \ ++ _EVENT_TABLE(id,1b,2f) \ ++ "2: \n\t" \ ++ : : "r" (p), "r" (p2), "r" (p3) : CLOBBER_LIST) + - asmlinkage void *sys_sram_alloc(size_t size, unsigned long flags) - { - return sram_alloc_with_lsl(size, flags); -diff --git a/arch/blackfin/mach-common/entry.S b/arch/blackfin/mach-common/entry.S -index 1d8f00a..94a0375 100644 ---- a/arch/blackfin/mach-common/entry.S -+++ b/arch/blackfin/mach-common/entry.S -@@ -1422,7 +1422,7 @@ ENTRY(_sys_call_table) - .long _sys_ni_syscall /* streams2 */ - .long _sys_vfork /* 190 */ - .long _sys_getrlimit -- .long _sys_mmap_pgoff -+ .long _sys_mmap2 - .long _sys_truncate64 - .long _sys_ftruncate64 - .long _sys_stat64 /* 195 */ -diff --git a/arch/cris/kernel/sys_cris.c b/arch/cris/kernel/sys_cris.c -index c2bbb1a..2ad962c 100644 ---- a/arch/cris/kernel/sys_cris.c -+++ b/arch/cris/kernel/sys_cris.c -@@ -26,6 +26,31 @@ - #include - #include ++#define __ARCH_HAS_FEATHER_TRACE +diff --git a/arch/x86/include/asm/hw_irq.h b/arch/x86/include/asm/hw_irq.h +index ba180d9..68900e7 100644 +--- a/arch/x86/include/asm/hw_irq.h ++++ b/arch/x86/include/asm/hw_irq.h +@@ -53,6 +53,8 @@ extern void threshold_interrupt(void); + extern void call_function_interrupt(void); + extern void call_function_single_interrupt(void); -+/* common code for old and new mmaps */ -+static inline long -+do_mmap2(unsigned long addr, unsigned long len, unsigned long prot, -+ unsigned long flags, unsigned long fd, unsigned long pgoff) -+{ -+ int error = -EBADF; -+ struct file * file = NULL; -+ -+ flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE); -+ if (!(flags & MAP_ANONYMOUS)) { -+ file = fget(fd); -+ if (!file) -+ goto out; -+ } -+ -+ down_write(¤t->mm->mmap_sem); -+ error = do_mmap_pgoff(file, addr, len, prot, flags, pgoff); -+ up_write(¤t->mm->mmap_sem); -+ -+ if (file) -+ fput(file); -+out: -+ return error; -+} ++extern void pull_timers_interrupt(void); + - asmlinkage unsigned long old_mmap(unsigned long __user *args) - { - unsigned long buffer[6]; -@@ -38,7 +63,7 @@ asmlinkage unsigned long old_mmap(unsigned long __user *args) - if (buffer[5] & ~PAGE_MASK) /* verify that offset is on page boundary */ - goto out; - -- err = sys_mmap_pgoff(buffer[0], buffer[1], buffer[2], buffer[3], -+ err = do_mmap2(buffer[0], buffer[1], buffer[2], buffer[3], - buffer[4], buffer[5] >> PAGE_SHIFT); - out: - return err; -@@ -48,8 +73,7 @@ asmlinkage long - sys_mmap2(unsigned long addr, unsigned long len, unsigned long prot, - unsigned long flags, unsigned long fd, unsigned long pgoff) - { -- /* bug(?): 8Kb pages here */ -- return sys_mmap_pgoff(addr, len, prot, flags, fd, pgoff); -+ return do_mmap2(addr, len, prot, flags, fd, pgoff); - } + /* PIC specific functions */ + extern void disable_8259A_irq(unsigned int irq); + extern void enable_8259A_irq(unsigned int irq); +@@ -110,6 +112,7 @@ extern asmlinkage void smp_irq_move_cleanup_interrupt(void); + extern void smp_reschedule_interrupt(struct pt_regs *); + extern void smp_call_function_interrupt(struct pt_regs *); + extern void smp_call_function_single_interrupt(struct pt_regs *); ++extern void smp_pull_timers_interrupt(struct pt_regs *); + #ifdef CONFIG_X86_32 + extern void smp_invalidate_interrupt(struct pt_regs *); + #else +diff --git a/arch/x86/include/asm/irq_vectors.h b/arch/x86/include/asm/irq_vectors.h +index 5b21f0e..28c3bf3 100644 +--- a/arch/x86/include/asm/irq_vectors.h ++++ b/arch/x86/include/asm/irq_vectors.h +@@ -104,6 +104,11 @@ + #define LOCAL_TIMER_VECTOR 0xef /* -diff --git a/arch/frv/include/asm/page.h b/arch/frv/include/asm/page.h -index 8c97068..25c6a50 100644 ---- a/arch/frv/include/asm/page.h -+++ b/arch/frv/include/asm/page.h -@@ -63,10 +63,12 @@ extern unsigned long max_pfn; - #define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT) - - -+#ifdef CONFIG_MMU - #define VM_DATA_DEFAULT_FLAGS \ - (VM_READ | VM_WRITE | \ - ((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0 ) | \ - VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) -+#endif - - #endif /* __ASSEMBLY__ */ - -diff --git a/arch/frv/kernel/sys_frv.c b/arch/frv/kernel/sys_frv.c -index 1d3d4c9..2b6b528 100644 ---- a/arch/frv/kernel/sys_frv.c -+++ b/arch/frv/kernel/sys_frv.c -@@ -31,6 +31,9 @@ asmlinkage long sys_mmap2(unsigned long addr, unsigned long len, - unsigned long prot, unsigned long flags, - unsigned long fd, unsigned long pgoff) - { -+ int error = -EBADF; -+ struct file * file = NULL; ++ * LITMUS^RT pull timers IRQ vector ++ */ ++#define PULL_TIMERS_VECTOR 0xee + - /* As with sparc32, make sure the shift for mmap2 is constant - (12), no matter what PAGE_SIZE we have.... */ ++/* + * Generic system vector for platform specific use + */ + #define GENERIC_INTERRUPT_VECTOR 0xed +diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h +index c978648..e75daac 100644 +--- a/arch/x86/include/asm/processor.h ++++ b/arch/x86/include/asm/processor.h +@@ -172,6 +172,8 @@ extern void print_cpu_info(struct cpuinfo_x86 *); + extern void init_scattered_cpuid_features(struct cpuinfo_x86 *c); + extern unsigned int init_intel_cacheinfo(struct cpuinfo_x86 *c); + extern unsigned short num_cache_leaves; ++extern int get_shared_cpu_map(cpumask_var_t mask, ++ unsigned int cpu, int index); + + extern void detect_extended_topology(struct cpuinfo_x86 *c); + extern void detect_ht(struct cpuinfo_x86 *c); +diff --git a/arch/x86/include/asm/unistd_32.h b/arch/x86/include/asm/unistd_32.h +index 6fb3c20..f9b507f 100644 +--- a/arch/x86/include/asm/unistd_32.h ++++ b/arch/x86/include/asm/unistd_32.h +@@ -343,9 +343,13 @@ + #define __NR_rt_tgsigqueueinfo 335 + #define __NR_perf_event_open 336 -@@ -38,10 +41,69 @@ asmlinkage long sys_mmap2(unsigned long addr, unsigned long len, - trying to map something we can't */ - if (pgoff & ((1 << (PAGE_SHIFT - 12)) - 1)) - return -EINVAL; -+ pgoff >>= PAGE_SHIFT - 12; -+ -+ flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE); -+ if (!(flags & MAP_ANONYMOUS)) { -+ file = fget(fd); -+ if (!file) -+ goto out; -+ } -+ -+ down_write(¤t->mm->mmap_sem); -+ error = do_mmap_pgoff(file, addr, len, prot, flags, pgoff); -+ up_write(¤t->mm->mmap_sem); -+ -+ if (file) -+ fput(file); -+out: -+ return error; -+} -+ -+#if 0 /* DAVIDM - do we want this */ -+struct mmap_arg_struct64 { -+ __u32 addr; -+ __u32 len; -+ __u32 prot; -+ __u32 flags; -+ __u64 offset; /* 64 bits */ -+ __u32 fd; -+}; -+ -+asmlinkage long sys_mmap64(struct mmap_arg_struct64 *arg) -+{ -+ int error = -EFAULT; -+ struct file * file = NULL; -+ struct mmap_arg_struct64 a; -+ unsigned long pgoff; -+ -+ if (copy_from_user(&a, arg, sizeof(a))) -+ return -EFAULT; -+ -+ if ((long)a.offset & ~PAGE_MASK) -+ return -EINVAL; ++#define __NR_LITMUS 337 + -+ pgoff = a.offset >> PAGE_SHIFT; -+ if ((a.offset >> PAGE_SHIFT) != pgoff) -+ return -EINVAL; ++#include "litmus/unistd_32.h" + -+ if (!(a.flags & MAP_ANONYMOUS)) { -+ error = -EBADF; -+ file = fget(a.fd); -+ if (!file) -+ goto out; -+ } -+ a.flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE); + #ifdef __KERNEL__ -- return sys_mmap_pgoff(addr, len, prot, flags, fd, -- pgoff >> (PAGE_SHIFT - 12)); -+ down_write(¤t->mm->mmap_sem); -+ error = do_mmap_pgoff(file, a.addr, a.len, a.prot, a.flags, pgoff); -+ up_write(¤t->mm->mmap_sem); -+ if (file) -+ fput(file); -+out: -+ return error; - } -+#endif +-#define NR_syscalls 337 ++#define NR_syscalls 336 + NR_litmus_syscalls - /* - * sys_ipc() is the de-multiplexer for the SysV IPC calls.. -diff --git a/arch/h8300/kernel/sys_h8300.c b/arch/h8300/kernel/sys_h8300.c -index b5969db..8cb5d73 100644 ---- a/arch/h8300/kernel/sys_h8300.c -+++ b/arch/h8300/kernel/sys_h8300.c -@@ -26,6 +26,39 @@ - #include - #include + #define __ARCH_WANT_IPC_PARSE_VERSION + #define __ARCH_WANT_OLD_READDIR +diff --git a/arch/x86/include/asm/unistd_64.h b/arch/x86/include/asm/unistd_64.h +index 8d3ad0a..33b2003 100644 +--- a/arch/x86/include/asm/unistd_64.h ++++ b/arch/x86/include/asm/unistd_64.h +@@ -662,6 +662,10 @@ __SYSCALL(__NR_rt_tgsigqueueinfo, sys_rt_tgsigqueueinfo) + #define __NR_perf_event_open 298 + __SYSCALL(__NR_perf_event_open, sys_perf_event_open) -+/* common code for old and new mmaps */ -+static inline long do_mmap2( -+ unsigned long addr, unsigned long len, -+ unsigned long prot, unsigned long flags, -+ unsigned long fd, unsigned long pgoff) -+{ -+ int error = -EBADF; -+ struct file * file = NULL; -+ -+ flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE); -+ if (!(flags & MAP_ANONYMOUS)) { -+ file = fget(fd); -+ if (!file) -+ goto out; -+ } -+ -+ down_write(¤t->mm->mmap_sem); -+ error = do_mmap_pgoff(file, addr, len, prot, flags, pgoff); -+ up_write(¤t->mm->mmap_sem); -+ -+ if (file) -+ fput(file); -+out: -+ return error; -+} ++#define __NR_LITMUS 299 + -+asmlinkage long sys_mmap2(unsigned long addr, unsigned long len, -+ unsigned long prot, unsigned long flags, -+ unsigned long fd, unsigned long pgoff) -+{ -+ return do_mmap2(addr, len, prot, flags, fd, pgoff); -+} ++#include "litmus/unistd_64.h" + - /* - * Perform the select(nd, in, out, ex, tv) and mmap() system - * calls. Linux/m68k cloned Linux/i386, which didn't use to be able to -@@ -54,11 +87,57 @@ asmlinkage int old_mmap(struct mmap_arg_struct *arg) - if (a.offset & ~PAGE_MASK) - goto out; + #ifndef __NO_STUBS + #define __ARCH_WANT_OLD_READDIR + #define __ARCH_WANT_OLD_STAT +diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile +index d8e5d0c..a99b34d 100644 +--- a/arch/x86/kernel/Makefile ++++ b/arch/x86/kernel/Makefile +@@ -117,6 +117,8 @@ obj-$(CONFIG_X86_CHECK_BIOS_CORRUPTION) += check.o -- error = sys_mmap_pgoff(a.addr, a.len, a.prot, a.flags, a.fd, -- a.offset >> PAGE_SHIFT); -+ a.flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE); -+ -+ error = do_mmap2(a.addr, a.len, a.prot, a.flags, a.fd, a.offset >> PAGE_SHIFT); -+out: -+ return error; -+} -+ -+#if 0 /* DAVIDM - do we want this */ -+struct mmap_arg_struct64 { -+ __u32 addr; -+ __u32 len; -+ __u32 prot; -+ __u32 flags; -+ __u64 offset; /* 64 bits */ -+ __u32 fd; -+}; + obj-$(CONFIG_SWIOTLB) += pci-swiotlb.o + ++obj-$(CONFIG_FEATHER_TRACE) += ft_event.o + -+asmlinkage long sys_mmap64(struct mmap_arg_struct64 *arg) + ### + # 64 bit specific files + ifeq ($(CONFIG_X86_64),y) +diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c +index 804c40e..3167c3d 100644 +--- a/arch/x86/kernel/cpu/intel_cacheinfo.c ++++ b/arch/x86/kernel/cpu/intel_cacheinfo.c +@@ -515,6 +515,23 @@ unsigned int __cpuinit init_intel_cacheinfo(struct cpuinfo_x86 *c) + static DEFINE_PER_CPU(struct _cpuid4_info *, cpuid4_info); + #define CPUID4_INFO_IDX(x, y) (&((per_cpu(cpuid4_info, x))[y])) + ++/* returns CPUs that share the index cache with cpu */ ++int get_shared_cpu_map(cpumask_var_t mask, unsigned int cpu, int index) +{ -+ int error = -EFAULT; -+ struct file * file = NULL; -+ struct mmap_arg_struct64 a; -+ unsigned long pgoff; -+ -+ if (copy_from_user(&a, arg, sizeof(a))) -+ return -EFAULT; ++ int ret = 0; ++ struct _cpuid4_info *this_leaf; + -+ if ((long)a.offset & ~PAGE_MASK) -+ return -EINVAL; ++ if (index >= num_cache_leaves) { ++ index = num_cache_leaves - 1; ++ ret = index; ++ } + -+ pgoff = a.offset >> PAGE_SHIFT; -+ if ((a.offset >> PAGE_SHIFT) != pgoff) -+ return -EINVAL; ++ this_leaf = CPUID4_INFO_IDX(cpu,index); ++ cpumask_copy(mask, to_cpumask(this_leaf->shared_cpu_map)); + -+ if (!(a.flags & MAP_ANONYMOUS)) { -+ error = -EBADF; -+ file = fget(a.fd); -+ if (!file) -+ goto out; -+ } -+ a.flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE); ++ return ret; ++} + -+ down_write(¤t->mm->mmap_sem); -+ error = do_mmap_pgoff(file, a.addr, a.len, a.prot, a.flags, pgoff); -+ up_write(¤t->mm->mmap_sem); -+ if (file) -+ fput(file); - out: - return error; - } -+#endif - - struct sel_arg_struct { - unsigned long n; -diff --git a/arch/h8300/kernel/syscalls.S b/arch/h8300/kernel/syscalls.S -index 2d69881..4eb67fa 100644 ---- a/arch/h8300/kernel/syscalls.S -+++ b/arch/h8300/kernel/syscalls.S -@@ -206,7 +206,7 @@ SYMBOL_NAME_LABEL(sys_call_table) - .long SYMBOL_NAME(sys_ni_syscall) /* streams2 */ - .long SYMBOL_NAME(sys_vfork) /* 190 */ - .long SYMBOL_NAME(sys_getrlimit) -- .long SYMBOL_NAME(sys_mmap_pgoff) -+ .long SYMBOL_NAME(sys_mmap2) - .long SYMBOL_NAME(sys_truncate64) - .long SYMBOL_NAME(sys_ftruncate64) - .long SYMBOL_NAME(sys_stat64) /* 195 */ -diff --git a/arch/ia64/ia32/sys_ia32.c b/arch/ia64/ia32/sys_ia32.c -index e031ee8..625ed8f 100644 ---- a/arch/ia64/ia32/sys_ia32.c -+++ b/arch/ia64/ia32/sys_ia32.c -@@ -858,9 +858,6 @@ ia32_do_mmap (struct file *file, unsigned long addr, unsigned long len, int prot - - prot = get_prot32(prot); - -- if (flags & MAP_HUGETLB) -- return -ENOMEM; -- - #if PAGE_SHIFT > IA32_PAGE_SHIFT - mutex_lock(&ia32_mmap_mutex); - { -diff --git a/arch/ia64/include/asm/io.h b/arch/ia64/include/asm/io.h -index cc8335e..0d9d16e 100644 ---- a/arch/ia64/include/asm/io.h -+++ b/arch/ia64/include/asm/io.h -@@ -424,8 +424,6 @@ __writeq (unsigned long val, volatile void __iomem *addr) - extern void __iomem * ioremap(unsigned long offset, unsigned long size); - extern void __iomem * ioremap_nocache (unsigned long offset, unsigned long size); - extern void iounmap (volatile void __iomem *addr); --extern void __iomem * early_ioremap (unsigned long phys_addr, unsigned long size); --extern void early_iounmap (volatile void __iomem *addr, unsigned long size); - - /* - * String version of IO memory access ops: -diff --git a/arch/ia64/kernel/sys_ia64.c b/arch/ia64/kernel/sys_ia64.c -index 609d500..92ed83f 100644 ---- a/arch/ia64/kernel/sys_ia64.c -+++ b/arch/ia64/kernel/sys_ia64.c -@@ -100,7 +100,51 @@ sys_getpagesize (void) - asmlinkage unsigned long - ia64_brk (unsigned long brk) + #ifdef CONFIG_SMP + static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index) { -- unsigned long retval = sys_brk(brk); -+ unsigned long rlim, retval, newbrk, oldbrk; -+ struct mm_struct *mm = current->mm; +diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S +index b5c061f..5e9b0e5 100644 +--- a/arch/x86/kernel/entry_64.S ++++ b/arch/x86/kernel/entry_64.S +@@ -1008,6 +1008,8 @@ apicinterrupt CALL_FUNCTION_VECTOR \ + call_function_interrupt smp_call_function_interrupt + apicinterrupt RESCHEDULE_VECTOR \ + reschedule_interrupt smp_reschedule_interrupt ++apicinterrupt PULL_TIMERS_VECTOR \ ++ pull_timers_interrupt smp_pull_timers_interrupt + #endif + + apicinterrupt ERROR_APIC_VECTOR \ +diff --git a/arch/x86/kernel/ft_event.c b/arch/x86/kernel/ft_event.c +new file mode 100644 +index 0000000..e07ee30 +--- /dev/null ++++ b/arch/x86/kernel/ft_event.c +@@ -0,0 +1,112 @@ ++#include + -+ /* -+ * Most of this replicates the code in sys_brk() except for an additional safety -+ * check and the clearing of r8. However, we can't call sys_brk() because we need -+ * to acquire the mmap_sem before we can do the test... -+ */ -+ down_write(&mm->mmap_sem); ++#include + -+ if (brk < mm->end_code) -+ goto out; -+ newbrk = PAGE_ALIGN(brk); -+ oldbrk = PAGE_ALIGN(mm->brk); -+ if (oldbrk == newbrk) -+ goto set_brk; -+ -+ /* Always allow shrinking brk. */ -+ if (brk <= mm->brk) { -+ if (!do_munmap(mm, newbrk, oldbrk-newbrk)) -+ goto set_brk; -+ goto out; -+ } ++#ifdef __ARCH_HAS_FEATHER_TRACE ++/* the feather trace management functions assume ++ * exclusive access to the event table ++ */ + -+ /* Check against unimplemented/unmapped addresses: */ -+ if ((newbrk - oldbrk) > RGN_MAP_LIMIT || REGION_OFFSET(newbrk) > RGN_MAP_LIMIT) -+ goto out; + -+ /* Check against rlimit.. */ -+ rlim = current->signal->rlim[RLIMIT_DATA].rlim_cur; -+ if (rlim < RLIM_INFINITY && brk - mm->start_data > rlim) -+ goto out; ++#define BYTE_JUMP 0xeb ++#define BYTE_JUMP_LEN 0x02 + -+ /* Check against existing mmap mappings. */ -+ if (find_vma_intersection(mm, oldbrk, newbrk+PAGE_SIZE)) -+ goto out; ++/* for each event, there is an entry in the event table */ ++struct trace_event { ++ long id; ++ long count; ++ long start_addr; ++ long end_addr; ++}; + -+ /* Ok, looks good - let it rip. */ -+ if (do_brk(oldbrk, newbrk-oldbrk) != oldbrk) -+ goto out; -+set_brk: -+ mm->brk = brk; -+out: -+ retval = mm->brk; -+ up_write(&mm->mmap_sem); - force_successful_syscall_return(); - return retval; - } -@@ -141,6 +185,39 @@ int ia64_mmap_check(unsigned long addr, unsigned long len, - return 0; - } - -+static inline unsigned long -+do_mmap2 (unsigned long addr, unsigned long len, int prot, int flags, int fd, unsigned long pgoff) -+{ -+ struct file *file = NULL; ++extern struct trace_event __start___event_table[]; ++extern struct trace_event __stop___event_table[]; + -+ flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE); -+ if (!(flags & MAP_ANONYMOUS)) { -+ file = fget(fd); -+ if (!file) -+ return -EBADF; ++int ft_enable_event(unsigned long id) ++{ ++ struct trace_event* te = __start___event_table; ++ int count = 0; ++ char* delta; ++ unsigned char* instr; + -+ if (!file->f_op || !file->f_op->mmap) { -+ addr = -ENODEV; -+ goto out; ++ while (te < __stop___event_table) { ++ if (te->id == id && ++te->count == 1) { ++ instr = (unsigned char*) te->start_addr; ++ /* make sure we don't clobber something wrong */ ++ if (*instr == BYTE_JUMP) { ++ delta = (((unsigned char*) te->start_addr) + 1); ++ *delta = 0; ++ } + } ++ if (te->id == id) ++ count++; ++ te++; + } + -+ /* Careful about overflows.. */ -+ len = PAGE_ALIGN(len); -+ if (!len || len > TASK_SIZE) { -+ addr = -EINVAL; -+ goto out; -+ } -+ -+ down_write(¤t->mm->mmap_sem); -+ addr = do_mmap_pgoff(file, addr, len, prot, flags, pgoff); -+ up_write(¤t->mm->mmap_sem); -+ -+out: if (file) -+ fput(file); -+ return addr; ++ printk(KERN_DEBUG "ft_enable_event: enabled %d events\n", count); ++ return count; +} + - /* - * mmap2() is like mmap() except that the offset is expressed in units - * of PAGE_SIZE (instead of bytes). This allows to mmap2() (pieces -@@ -149,7 +226,7 @@ int ia64_mmap_check(unsigned long addr, unsigned long len, - asmlinkage unsigned long - sys_mmap2 (unsigned long addr, unsigned long len, int prot, int flags, int fd, long pgoff) - { -- addr = sys_mmap_pgoff(addr, len, prot, flags, fd, pgoff); -+ addr = do_mmap2(addr, len, prot, flags, fd, pgoff); - if (!IS_ERR((void *) addr)) - force_successful_syscall_return(); - return addr; -@@ -161,7 +238,7 @@ sys_mmap (unsigned long addr, unsigned long len, int prot, int flags, int fd, lo - if (offset_in_page(off) != 0) - return -EINVAL; - -- addr = sys_mmap_pgoff(addr, len, prot, flags, fd, off >> PAGE_SHIFT); -+ addr = do_mmap2(addr, len, prot, flags, fd, off >> PAGE_SHIFT); - if (!IS_ERR((void *) addr)) - force_successful_syscall_return(); - return addr; -diff --git a/arch/ia64/mm/ioremap.c b/arch/ia64/mm/ioremap.c -index 3dccdd8..2a14062 100644 ---- a/arch/ia64/mm/ioremap.c -+++ b/arch/ia64/mm/ioremap.c -@@ -22,12 +22,6 @@ __ioremap (unsigned long phys_addr) - } - - void __iomem * --early_ioremap (unsigned long phys_addr, unsigned long size) --{ -- return __ioremap(phys_addr); --} -- --void __iomem * - ioremap (unsigned long phys_addr, unsigned long size) - { - void __iomem *addr; -@@ -108,11 +102,6 @@ ioremap_nocache (unsigned long phys_addr, unsigned long size) - EXPORT_SYMBOL(ioremap_nocache); - - void --early_iounmap (volatile void __iomem *addr, unsigned long size) --{ --} -- --void - iounmap (volatile void __iomem *addr) - { - if (REGION_NUMBER(addr) == RGN_GATE) -diff --git a/arch/m32r/kernel/sys_m32r.c b/arch/m32r/kernel/sys_m32r.c -index d3c865c..305ac85 100644 ---- a/arch/m32r/kernel/sys_m32r.c -+++ b/arch/m32r/kernel/sys_m32r.c -@@ -76,6 +76,30 @@ asmlinkage int sys_tas(int __user *addr) - return oldval; - } - -+asmlinkage long sys_mmap2(unsigned long addr, unsigned long len, -+ unsigned long prot, unsigned long flags, -+ unsigned long fd, unsigned long pgoff) ++int ft_disable_event(unsigned long id) +{ -+ int error = -EBADF; -+ struct file *file = NULL; ++ struct trace_event* te = __start___event_table; ++ int count = 0; ++ char* delta; ++ unsigned char* instr; + -+ flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE); -+ if (!(flags & MAP_ANONYMOUS)) { -+ file = fget(fd); -+ if (!file) -+ goto out; ++ while (te < __stop___event_table) { ++ if (te->id == id && --te->count == 0) { ++ instr = (unsigned char*) te->start_addr; ++ if (*instr == BYTE_JUMP) { ++ delta = (((unsigned char*) te->start_addr) + 1); ++ *delta = te->end_addr - te->start_addr - ++ BYTE_JUMP_LEN; ++ } ++ } ++ if (te->id == id) ++ count++; ++ te++; + } + -+ down_write(¤t->mm->mmap_sem); -+ error = do_mmap_pgoff(file, addr, len, prot, flags, pgoff); -+ up_write(¤t->mm->mmap_sem); -+ -+ if (file) -+ fput(file); -+out: -+ return error; ++ printk(KERN_DEBUG "ft_disable_event: disabled %d events\n", count); ++ return count; +} + - /* - * sys_ipc() is the de-multiplexer for the SysV IPC calls.. - * -diff --git a/arch/m32r/kernel/syscall_table.S b/arch/m32r/kernel/syscall_table.S -index 60536e2..aa3bf4c 100644 ---- a/arch/m32r/kernel/syscall_table.S -+++ b/arch/m32r/kernel/syscall_table.S -@@ -191,7 +191,7 @@ ENTRY(sys_call_table) - .long sys_ni_syscall /* streams2 */ - .long sys_vfork /* 190 */ - .long sys_getrlimit -- .long sys_mmap_pgoff -+ .long sys_mmap2 - .long sys_truncate64 - .long sys_ftruncate64 - .long sys_stat64 /* 195 */ -diff --git a/arch/m68k/kernel/sys_m68k.c b/arch/m68k/kernel/sys_m68k.c -index 218f441..7deb402 100644 ---- a/arch/m68k/kernel/sys_m68k.c -+++ b/arch/m68k/kernel/sys_m68k.c -@@ -29,16 +29,37 @@ - #include - #include - -+/* common code for old and new mmaps */ -+static inline long do_mmap2( -+ unsigned long addr, unsigned long len, -+ unsigned long prot, unsigned long flags, -+ unsigned long fd, unsigned long pgoff) -+{ -+ int error = -EBADF; -+ struct file * file = NULL; -+ -+ flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE); -+ if (!(flags & MAP_ANONYMOUS)) { -+ file = fget(fd); -+ if (!file) -+ goto out; -+ } -+ -+ down_write(¤t->mm->mmap_sem); -+ error = do_mmap_pgoff(file, addr, len, prot, flags, pgoff); -+ up_write(¤t->mm->mmap_sem); -+ -+ if (file) -+ fput(file); -+out: -+ return error; -+} -+ - asmlinkage long sys_mmap2(unsigned long addr, unsigned long len, - unsigned long prot, unsigned long flags, - unsigned long fd, unsigned long pgoff) - { -- /* -- * This is wrong for sun3 - there PAGE_SIZE is 8Kb, -- * so we need to shift the argument down by 1; m68k mmap64(3) -- * (in libc) expects the last argument of mmap2 in 4Kb units. -- */ -- return sys_mmap_pgoff(addr, len, prot, flags, fd, pgoff); -+ return do_mmap2(addr, len, prot, flags, fd, pgoff); - } - - /* -@@ -69,11 +90,57 @@ asmlinkage int old_mmap(struct mmap_arg_struct __user *arg) - if (a.offset & ~PAGE_MASK) - goto out; - -- error = sys_mmap_pgoff(a.addr, a.len, a.prot, a.flags, a.fd, -- a.offset >> PAGE_SHIFT); -+ a.flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE); -+ -+ error = do_mmap2(a.addr, a.len, a.prot, a.flags, a.fd, a.offset >> PAGE_SHIFT); -+out: -+ return error; -+} -+ -+#if 0 -+struct mmap_arg_struct64 { -+ __u32 addr; -+ __u32 len; -+ __u32 prot; -+ __u32 flags; -+ __u64 offset; /* 64 bits */ -+ __u32 fd; -+}; -+ -+asmlinkage long sys_mmap64(struct mmap_arg_struct64 *arg) ++int ft_disable_all_events(void) +{ -+ int error = -EFAULT; -+ struct file * file = NULL; -+ struct mmap_arg_struct64 a; -+ unsigned long pgoff; -+ -+ if (copy_from_user(&a, arg, sizeof(a))) -+ return -EFAULT; -+ -+ if ((long)a.offset & ~PAGE_MASK) -+ return -EINVAL; -+ -+ pgoff = a.offset >> PAGE_SHIFT; -+ if ((a.offset >> PAGE_SHIFT) != pgoff) -+ return -EINVAL; -+ -+ if (!(a.flags & MAP_ANONYMOUS)) { -+ error = -EBADF; -+ file = fget(a.fd); -+ if (!file) -+ goto out; -+ } -+ a.flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE); ++ struct trace_event* te = __start___event_table; ++ int count = 0; ++ char* delta; ++ unsigned char* instr; + -+ down_write(¤t->mm->mmap_sem); -+ error = do_mmap_pgoff(file, a.addr, a.len, a.prot, a.flags, pgoff); -+ up_write(¤t->mm->mmap_sem); -+ if (file) -+ fput(file); - out: - return error; - } -+#endif - - struct sel_arg_struct { - unsigned long n; -diff --git a/arch/m68knommu/kernel/sys_m68k.c b/arch/m68knommu/kernel/sys_m68k.c -index b67cbc7..efdd090 100644 ---- a/arch/m68knommu/kernel/sys_m68k.c -+++ b/arch/m68knommu/kernel/sys_m68k.c -@@ -27,6 +27,39 @@ - #include - #include - -+/* common code for old and new mmaps */ -+static inline long do_mmap2( -+ unsigned long addr, unsigned long len, -+ unsigned long prot, unsigned long flags, -+ unsigned long fd, unsigned long pgoff) -+{ -+ int error = -EBADF; -+ struct file * file = NULL; -+ -+ flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE); -+ if (!(flags & MAP_ANONYMOUS)) { -+ file = fget(fd); -+ if (!file) -+ goto out; ++ while (te < __stop___event_table) { ++ if (te->count) { ++ instr = (unsigned char*) te->start_addr; ++ if (*instr == BYTE_JUMP) { ++ delta = (((unsigned char*) te->start_addr) ++ + 1); ++ *delta = te->end_addr - te->start_addr - ++ BYTE_JUMP_LEN; ++ te->count = 0; ++ count++; ++ } ++ } ++ te++; + } -+ -+ down_write(¤t->mm->mmap_sem); -+ error = do_mmap_pgoff(file, addr, len, prot, flags, pgoff); -+ up_write(¤t->mm->mmap_sem); -+ -+ if (file) -+ fput(file); -+out: -+ return error; ++ return count; +} + -+asmlinkage long sys_mmap2(unsigned long addr, unsigned long len, -+ unsigned long prot, unsigned long flags, -+ unsigned long fd, unsigned long pgoff) ++int ft_is_event_enabled(unsigned long id) +{ -+ return do_mmap2(addr, len, prot, flags, fd, pgoff); -+} -+ - /* - * Perform the select(nd, in, out, ex, tv) and mmap() system - * calls. Linux/m68k cloned Linux/i386, which didn't use to be able to -@@ -55,8 +88,9 @@ asmlinkage int old_mmap(struct mmap_arg_struct *arg) - if (a.offset & ~PAGE_MASK) - goto out; - -- error = sys_mmap_pgoff(a.addr, a.len, a.prot, a.flags, a.fd, -- a.offset >> PAGE_SHIFT); -+ a.flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE); ++ struct trace_event* te = __start___event_table; + -+ error = do_mmap2(a.addr, a.len, a.prot, a.flags, a.fd, a.offset >> PAGE_SHIFT); - out: - return error; - } -diff --git a/arch/m68knommu/kernel/syscalltable.S b/arch/m68knommu/kernel/syscalltable.S -index 486837e..23535cc 100644 ---- a/arch/m68knommu/kernel/syscalltable.S -+++ b/arch/m68knommu/kernel/syscalltable.S -@@ -210,7 +210,7 @@ ENTRY(sys_call_table) - .long sys_ni_syscall /* streams2 */ - .long sys_vfork /* 190 */ - .long sys_getrlimit -- .long sys_mmap_pgoff -+ .long sys_mmap2 - .long sys_truncate64 - .long sys_ftruncate64 - .long sys_stat64 /* 195 */ -diff --git a/arch/microblaze/kernel/sys_microblaze.c b/arch/microblaze/kernel/sys_microblaze.c -index 9f3c205..07cabed 100644 ---- a/arch/microblaze/kernel/sys_microblaze.c -+++ b/arch/microblaze/kernel/sys_microblaze.c -@@ -62,14 +62,46 @@ out: - return error; - } - -+asmlinkage long -+sys_mmap2(unsigned long addr, unsigned long len, -+ unsigned long prot, unsigned long flags, -+ unsigned long fd, unsigned long pgoff) -+{ -+ struct file *file = NULL; -+ int ret = -EBADF; -+ -+ flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE); -+ if (!(flags & MAP_ANONYMOUS)) { -+ file = fget(fd); -+ if (!file) { -+ printk(KERN_INFO "no fd in mmap\r\n"); -+ goto out; -+ } ++ while (te < __stop___event_table) { ++ if (te->id == id) ++ return te->count; ++ te++; + } -+ -+ down_write(¤t->mm->mmap_sem); -+ ret = do_mmap_pgoff(file, addr, len, prot, flags, pgoff); -+ up_write(¤t->mm->mmap_sem); -+ if (file) -+ fput(file); -+out: -+ return ret; ++ return 0; +} + - asmlinkage long sys_mmap(unsigned long addr, unsigned long len, - unsigned long prot, unsigned long flags, - unsigned long fd, off_t pgoff) - { -- if (pgoff & ~PAGE_MASK) -- return -EINVAL; -+ int err = -EINVAL; ++#endif + -+ if (pgoff & ~PAGE_MASK) { -+ printk(KERN_INFO "no pagemask in mmap\r\n"); -+ goto out; -+ } - -- return sys_mmap_pgoff(addr, len, prot, flags, fd, pgoff >> PAGE_SHIFT); -+ err = sys_mmap2(addr, len, prot, flags, fd, pgoff >> PAGE_SHIFT); -+out: -+ return err; - } - - /* -diff --git a/arch/microblaze/kernel/syscall_table.S b/arch/microblaze/kernel/syscall_table.S -index eb50ce5..ecec191 100644 ---- a/arch/microblaze/kernel/syscall_table.S -+++ b/arch/microblaze/kernel/syscall_table.S -@@ -196,7 +196,7 @@ ENTRY(sys_call_table) - .long sys_ni_syscall /* reserved for streams2 */ - .long sys_vfork /* 190 */ - .long sys_getrlimit -- .long sys_mmap_pgoff /* mmap2 */ -+ .long sys_mmap2 /* mmap2 */ - .long sys_truncate64 - .long sys_ftruncate64 - .long sys_stat64 /* 195 */ -diff --git a/arch/mips/kernel/linux32.c b/arch/mips/kernel/linux32.c -index ea4a746..b77fefa 100644 ---- a/arch/mips/kernel/linux32.c -+++ b/arch/mips/kernel/linux32.c -@@ -67,13 +67,28 @@ SYSCALL_DEFINE6(32_mmap2, unsigned long, addr, unsigned long, len, - unsigned long, prot, unsigned long, flags, unsigned long, fd, - unsigned long, pgoff) - { -+ struct file * file = NULL; - unsigned long error; +diff --git a/arch/x86/kernel/irqinit.c b/arch/x86/kernel/irqinit.c +index 40f3077..f5fa64c 100644 +--- a/arch/x86/kernel/irqinit.c ++++ b/arch/x86/kernel/irqinit.c +@@ -172,6 +172,9 @@ static void __init smp_intr_init(void) + alloc_intr_gate(CALL_FUNCTION_SINGLE_VECTOR, + call_function_single_interrupt); - error = -EINVAL; - if (pgoff & (~PAGE_MASK >> 12)) - goto out; -- error = sys_mmap_pgoff(addr, len, prot, flags, fd, -- pgoff >> (PAGE_SHIFT-12)); -+ pgoff >>= PAGE_SHIFT-12; -+ -+ if (!(flags & MAP_ANONYMOUS)) { -+ error = -EBADF; -+ file = fget(fd); -+ if (!file) -+ goto out; -+ } -+ flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE); ++ /* IPI for hrtimer pulling on remote cpus */ ++ alloc_intr_gate(PULL_TIMERS_VECTOR, pull_timers_interrupt); + -+ down_write(¤t->mm->mmap_sem); -+ error = do_mmap_pgoff(file, addr, len, prot, flags, pgoff); -+ up_write(¤t->mm->mmap_sem); -+ if (file) -+ fput(file); + /* Low priority IPI to cleanup after moving an irq */ + set_intr_gate(IRQ_MOVE_CLEANUP_VECTOR, irq_move_cleanup_interrupt); + set_bit(IRQ_MOVE_CLEANUP_VECTOR, used_vectors); +diff --git a/arch/x86/kernel/smp.c b/arch/x86/kernel/smp.c +index ec1de97..a93528b 100644 +--- a/arch/x86/kernel/smp.c ++++ b/arch/x86/kernel/smp.c +@@ -22,6 +22,9 @@ + #include + #include + ++#include ++#include + - out: - return error; - } -diff --git a/arch/mips/kernel/syscall.c b/arch/mips/kernel/syscall.c -index 3f7f466..fe0d798 100644 ---- a/arch/mips/kernel/syscall.c -+++ b/arch/mips/kernel/syscall.c -@@ -93,8 +93,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, - * We do not accept a shared mapping if it would violate - * cache aliasing constraints. - */ -- if ((flags & MAP_SHARED) && -- ((addr - (pgoff << PAGE_SHIFT)) & shm_align_mask)) -+ if ((flags & MAP_SHARED) && (addr & shm_align_mask)) - return -EINVAL; - return addr; - } -@@ -130,6 +129,31 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, + #include + #include + #include +@@ -117,6 +120,7 @@ static void native_smp_send_reschedule(int cpu) + WARN_ON(1); + return; } ++ TS_SEND_RESCHED_START(cpu); + apic->send_IPI_mask(cpumask_of(cpu), RESCHEDULE_VECTOR); } -+/* common code for old and new mmaps */ -+static inline unsigned long -+do_mmap2(unsigned long addr, unsigned long len, unsigned long prot, -+ unsigned long flags, unsigned long fd, unsigned long pgoff) -+{ -+ unsigned long error = -EBADF; -+ struct file * file = NULL; -+ -+ flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE); -+ if (!(flags & MAP_ANONYMOUS)) { -+ file = fget(fd); -+ if (!file) -+ goto out; -+ } -+ -+ down_write(¤t->mm->mmap_sem); -+ error = do_mmap_pgoff(file, addr, len, prot, flags, pgoff); -+ up_write(¤t->mm->mmap_sem); -+ -+ if (file) -+ fput(file); -+out: -+ return error; -+} -+ - SYSCALL_DEFINE6(mips_mmap, unsigned long, addr, unsigned long, len, - unsigned long, prot, unsigned long, flags, unsigned long, - fd, off_t, offset) -@@ -140,7 +164,7 @@ SYSCALL_DEFINE6(mips_mmap, unsigned long, addr, unsigned long, len, - if (offset & ~PAGE_MASK) - goto out; - -- result = sys_mmap_pgoff(addr, len, prot, flags, fd, offset >> PAGE_SHIFT); -+ result = do_mmap2(addr, len, prot, flags, fd, offset >> PAGE_SHIFT); - - out: - return result; -@@ -153,7 +177,7 @@ SYSCALL_DEFINE6(mips_mmap2, unsigned long, addr, unsigned long, len, - if (pgoff & (~PAGE_MASK >> 12)) - return -EINVAL; - -- return sys_mmap_pgoff(addr, len, prot, flags, fd, pgoff >> (PAGE_SHIFT-12)); -+ return do_mmap2(addr, len, prot, flags, fd, pgoff >> (PAGE_SHIFT-12)); +@@ -146,6 +150,16 @@ void native_send_call_func_ipi(const struct cpumask *mask) + free_cpumask_var(allbutself); } - save_static_function(sys_fork); -diff --git a/arch/mn10300/include/asm/mman.h b/arch/mn10300/include/asm/mman.h -index db5c53d..8eebf89 100644 ---- a/arch/mn10300/include/asm/mman.h -+++ b/arch/mn10300/include/asm/mman.h -@@ -1,6 +1 @@ - #include -- --#define MIN_MAP_ADDR PAGE_SIZE /* minimum fixed mmap address */ -- --#define arch_mmap_check(addr, len, flags) \ -- (((flags) & MAP_FIXED && (addr) < MIN_MAP_ADDR) ? -EINVAL : 0) -diff --git a/arch/mn10300/kernel/entry.S b/arch/mn10300/kernel/entry.S -index c9ee6c0..a94e7ea 100644 ---- a/arch/mn10300/kernel/entry.S -+++ b/arch/mn10300/kernel/entry.S -@@ -578,7 +578,7 @@ ENTRY(sys_call_table) - .long sys_ni_syscall /* reserved for streams2 */ - .long sys_vfork /* 190 */ - .long sys_getrlimit -- .long sys_mmap_pgoff -+ .long sys_mmap2 - .long sys_truncate64 - .long sys_ftruncate64 - .long sys_stat64 /* 195 */ -diff --git a/arch/mn10300/kernel/sys_mn10300.c b/arch/mn10300/kernel/sys_mn10300.c -index 17cc6ce..8ca5af0 100644 ---- a/arch/mn10300/kernel/sys_mn10300.c -+++ b/arch/mn10300/kernel/sys_mn10300.c -@@ -23,13 +23,47 @@ - - #include - -+#define MIN_MAP_ADDR PAGE_SIZE /* minimum fixed mmap address */ -+ -+/* -+ * memory mapping syscall -+ */ -+asmlinkage long sys_mmap2(unsigned long addr, unsigned long len, -+ unsigned long prot, unsigned long flags, -+ unsigned long fd, unsigned long pgoff) ++/* trigger timers on remote cpu */ ++void smp_send_pull_timers(int cpu) +{ -+ struct file *file = NULL; -+ long error = -EINVAL; -+ -+ flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE); -+ -+ if (flags & MAP_FIXED && addr < MIN_MAP_ADDR) -+ goto out; -+ -+ error = -EBADF; -+ if (!(flags & MAP_ANONYMOUS)) { -+ file = fget(fd); -+ if (!file) -+ goto out; ++ if (unlikely(cpu_is_offline(cpu))) { ++ WARN_ON(1); ++ return; + } -+ -+ down_write(¤t->mm->mmap_sem); -+ error = do_mmap_pgoff(file, addr, len, prot, flags, pgoff); -+ up_write(¤t->mm->mmap_sem); -+ -+ if (file) -+ fput(file); -+out: -+ return error; ++ apic->send_IPI_mask(cpumask_of(cpu), PULL_TIMERS_VECTOR); +} + - asmlinkage long old_mmap(unsigned long addr, unsigned long len, - unsigned long prot, unsigned long flags, - unsigned long fd, unsigned long offset) + /* + * this function calls the 'stop' function on all other CPUs in the system. + */ +@@ -197,7 +211,12 @@ static void native_smp_send_stop(void) + void smp_reschedule_interrupt(struct pt_regs *regs) { - if (offset & ~PAGE_MASK) - return -EINVAL; -- return sys_mmap_pgoff(addr, len, prot, flags, fd, offset >> PAGE_SHIFT); -+ return sys_mmap2(addr, len, prot, flags, fd, offset >> PAGE_SHIFT); - } - - struct sel_arg_struct { -diff --git a/arch/parisc/kernel/sys_parisc.c b/arch/parisc/kernel/sys_parisc.c -index 9147391..71b3195 100644 ---- a/arch/parisc/kernel/sys_parisc.c -+++ b/arch/parisc/kernel/sys_parisc.c -@@ -110,14 +110,37 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, - return addr; + ack_APIC_irq(); ++ /* LITMUS^RT needs this interrupt to proper reschedule ++ * on this cpu ++ */ ++ set_tsk_need_resched(current); + inc_irq_stat(irq_resched_count); ++ TS_SEND_RESCHED_END; + /* + * KVM uses this interrupt to force a cpu out of guest mode + */ +@@ -221,6 +240,15 @@ void smp_call_function_single_interrupt(struct pt_regs *regs) + irq_exit(); } -+static unsigned long do_mmap2(unsigned long addr, unsigned long len, -+ unsigned long prot, unsigned long flags, unsigned long fd, -+ unsigned long pgoff) -+{ -+ struct file * file = NULL; -+ unsigned long error = -EBADF; -+ if (!(flags & MAP_ANONYMOUS)) { -+ file = fget(fd); -+ if (!file) -+ goto out; -+ } -+ -+ flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE); -+ -+ down_write(¤t->mm->mmap_sem); -+ error = do_mmap_pgoff(file, addr, len, prot, flags, pgoff); -+ up_write(¤t->mm->mmap_sem); ++extern void hrtimer_pull(void); + -+ if (file != NULL) -+ fput(file); -+out: -+ return error; ++void smp_pull_timers_interrupt(struct pt_regs *regs) ++{ ++ ack_APIC_irq(); ++ TRACE("pull timer interrupt\n"); ++ hrtimer_pull(); +} + - asmlinkage unsigned long sys_mmap2(unsigned long addr, unsigned long len, - unsigned long prot, unsigned long flags, unsigned long fd, - unsigned long pgoff) - { - /* Make sure the shift for mmap2 is constant (12), no matter what PAGE_SIZE - we have. */ -- return sys_mmap_pgoff(addr, len, prot, flags, fd, -- pgoff >> (PAGE_SHIFT - 12)); -+ return do_mmap2(addr, len, prot, flags, fd, pgoff >> (PAGE_SHIFT - 12)); - } - - asmlinkage unsigned long sys_mmap(unsigned long addr, unsigned long len, -@@ -125,8 +148,7 @@ asmlinkage unsigned long sys_mmap(unsigned long addr, unsigned long len, - unsigned long offset) - { - if (!(offset & ~PAGE_MASK)) { -- return sys_mmap_pgoff(addr, len, prot, flags, fd, -- offset >> PAGE_SHIFT); -+ return do_mmap2(addr, len, prot, flags, fd, offset >> PAGE_SHIFT); - } else { - return -EINVAL; - } -diff --git a/arch/powerpc/include/asm/elf.h b/arch/powerpc/include/asm/elf.h -index 5698502..014a624 100644 ---- a/arch/powerpc/include/asm/elf.h -+++ b/arch/powerpc/include/asm/elf.h -@@ -236,10 +236,14 @@ typedef elf_vrregset_t elf_fpxregset_t; - #ifdef __powerpc64__ - # define SET_PERSONALITY(ex) \ - do { \ -+ unsigned long new_flags = 0; \ - if ((ex).e_ident[EI_CLASS] == ELFCLASS32) \ -- set_thread_flag(TIF_32BIT); \ -+ new_flags = _TIF_32BIT; \ -+ if ((current_thread_info()->flags & _TIF_32BIT) \ -+ != new_flags) \ -+ set_thread_flag(TIF_ABI_PENDING); \ - else \ -- clear_thread_flag(TIF_32BIT); \ -+ clear_thread_flag(TIF_ABI_PENDING); \ - if (personality(current->personality) != PER_LINUX32) \ - set_personality(PER_LINUX | \ - (current->personality & (~PER_MASK))); \ -diff --git a/arch/powerpc/include/asm/module.h b/arch/powerpc/include/asm/module.h -index 0192a4e..0845488 100644 ---- a/arch/powerpc/include/asm/module.h -+++ b/arch/powerpc/include/asm/module.h -@@ -87,10 +87,5 @@ struct exception_table_entry; - void sort_ex_table(struct exception_table_entry *start, - struct exception_table_entry *finish); - --#ifdef CONFIG_MODVERSIONS --#define ARCH_RELOCATES_KCRCTAB -- --extern const unsigned long reloc_start[]; --#endif - #endif /* __KERNEL__ */ - #endif /* _ASM_POWERPC_MODULE_H */ -diff --git a/arch/powerpc/include/asm/thread_info.h b/arch/powerpc/include/asm/thread_info.h -index aa9d383..c8b3292 100644 ---- a/arch/powerpc/include/asm/thread_info.h -+++ b/arch/powerpc/include/asm/thread_info.h -@@ -111,6 +111,7 @@ static inline struct thread_info *current_thread_info(void) - #define TIF_NOTIFY_RESUME 13 /* callback before returning to user */ - #define TIF_FREEZE 14 /* Freezing for suspend */ - #define TIF_RUNLATCH 15 /* Is the runlatch enabled? */ -+#define TIF_ABI_PENDING 16 /* 32/64 bit switch needed */ - - /* as above, but as bit values */ - #define _TIF_SYSCALL_TRACE (1<executable is only used by the procfs. This allows a dispatch + * table to check for several different types of binary formats. We keep + * trying until we recognize the file or we run out of supported binary +- * formats. ++ * formats. */ - static int emulate_vsx(unsigned char __user *addr, unsigned int reg, - unsigned int areg, struct pt_regs *regs, -- unsigned int flags, unsigned int length, -- unsigned int elsize) -+ unsigned int flags, unsigned int length) - { - char *ptr; -- unsigned long *lptr; - int ret = 0; -- int sw = 0; -- int i, j; - - flush_vsx_to_thread(current); -@@ -658,35 +654,19 @@ static int emulate_vsx(unsigned char __user *addr, unsigned int reg, - else - ptr = (char *) ¤t->thread.vr[reg - 32]; + #include +@@ -57,6 +57,8 @@ + #include + #include -- lptr = (unsigned long *) ptr; -- -- if (flags & SW) -- sw = elsize-1; -- -- for (j = 0; j < length; j += elsize) { -- for (i = 0; i < elsize; ++i) { -- if (flags & ST) -- ret |= __put_user(ptr[i^sw], addr + i); -- else -- ret |= __get_user(ptr[i^sw], addr + i); -+ if (flags & ST) -+ ret = __copy_to_user(addr, ptr, length); -+ else { -+ if (flags & SPLT){ -+ ret = __copy_from_user(ptr, addr, length); -+ ptr += length; - } -- ptr += elsize; -- addr += elsize; -+ ret |= __copy_from_user(ptr, addr, length); - } -- -- if (!ret) { -- if (flags & U) -- regs->gpr[areg] = regs->dar; -- -- /* Splat load copies the same data to top and bottom 8 bytes */ -- if (flags & SPLT) -- lptr[1] = lptr[0]; -- /* For 8 byte loads, zero the top 8 bytes */ -- else if (!(flags & ST) && (8 == length)) -- lptr[1] = 0; -- } else -+ if (flags & U) -+ regs->gpr[areg] = regs->dar; -+ if (ret) - return -EFAULT; -- - return 1; ++#include ++ + #include + #include + #include +@@ -80,7 +82,7 @@ int __register_binfmt(struct linux_binfmt * fmt, int insert) + insert ? list_add(&fmt->lh, &formats) : + list_add_tail(&fmt->lh, &formats); + write_unlock(&binfmt_lock); +- return 0; ++ return 0; } - #endif -@@ -787,25 +767,16 @@ int fix_alignment(struct pt_regs *regs) - #ifdef CONFIG_VSX - if ((instruction & 0xfc00003e) == 0x7c000018) { -- unsigned int elsize; -- -- /* Additional register addressing bit (64 VSX vs 32 FPR/GPR) */ -+ /* Additional register addressing bit (64 VSX vs 32 FPR/GPR */ - reg |= (instruction & 0x1) << 5; - /* Simple inline decoder instead of a table */ -- /* VSX has only 8 and 16 byte memory accesses */ -- nb = 8; - if (instruction & 0x200) - nb = 16; -- -- /* Vector stores in little-endian mode swap individual -- elements, so process them separately */ -- elsize = 4; -- if (instruction & 0x80) -- elsize = 8; -- -+ else if (instruction & 0x080) -+ nb = 8; -+ else -+ nb = 4; - flags = 0; -- if (regs->msr & MSR_LE) -- flags |= SW; - if (instruction & 0x100) - flags |= ST; - if (instruction & 0x040) -@@ -816,7 +787,7 @@ int fix_alignment(struct pt_regs *regs) - nb = 8; - } - PPC_WARN_EMULATED(vsx); -- return emulate_vsx(addr, reg, areg, regs, flags, nb, elsize); -+ return emulate_vsx(addr, reg, areg, regs, flags, nb); - } - #endif - /* A size of 0 indicates an instruction we don't support, with -diff --git a/arch/powerpc/kernel/pci-common.c b/arch/powerpc/kernel/pci-common.c -index cadbed6..e8dfdbd 100644 ---- a/arch/powerpc/kernel/pci-common.c -+++ b/arch/powerpc/kernel/pci-common.c -@@ -1107,12 +1107,6 @@ void __devinit pcibios_setup_bus_devices(struct pci_bus *bus) - list_for_each_entry(dev, &bus->devices, bus_list) { - struct dev_archdata *sd = &dev->dev.archdata; + EXPORT_SYMBOL(__register_binfmt); +@@ -1006,7 +1008,7 @@ int flush_old_exec(struct linux_binprm * bprm) + group */ -- /* Cardbus can call us to add new devices to a bus, so ignore -- * those who are already fully discovered -- */ -- if (dev->is_added) -- continue; -- - /* Setup OF node pointer in archdata */ - sd->of_node = pci_device_to_OF_node(dev); + current->self_exec_id++; +- ++ + flush_signal_handlers(current, 0); + flush_old_files(current->files); -@@ -1153,13 +1147,6 @@ void __devinit pcibios_fixup_bus(struct pci_bus *bus) +@@ -1102,8 +1104,8 @@ int check_unsafe_exec(struct linux_binprm *bprm) + return res; } - EXPORT_SYMBOL(pcibios_fixup_bus); - --void __devinit pci_fixup_cardbus(struct pci_bus *bus) --{ -- /* Now fixup devices on that bus */ -- pcibios_setup_bus_devices(bus); --} -- -- - static int skip_isa_ioresource_align(struct pci_dev *dev) - { - if ((ppc_pci_flags & PPC_PCI_CAN_SKIP_ISA_ALIGN) && -diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c -index 7b816da..c930ac3 100644 ---- a/arch/powerpc/kernel/process.c -+++ b/arch/powerpc/kernel/process.c -@@ -554,6 +554,18 @@ void exit_thread(void) - - void flush_thread(void) - { -+#ifdef CONFIG_PPC64 -+ struct thread_info *t = current_thread_info(); -+ -+ if (test_ti_thread_flag(t, TIF_ABI_PENDING)) { -+ clear_ti_thread_flag(t, TIF_ABI_PENDING); -+ if (test_ti_thread_flag(t, TIF_32BIT)) -+ clear_ti_thread_flag(t, TIF_32BIT); -+ else -+ set_ti_thread_flag(t, TIF_32BIT); -+ } -+#endif -+ - discard_lazy_cpu_state(); - if (current->thread.dabr) { -diff --git a/arch/powerpc/kernel/syscalls.c b/arch/powerpc/kernel/syscalls.c -index 3370e62..c04832c 100644 ---- a/arch/powerpc/kernel/syscalls.c -+++ b/arch/powerpc/kernel/syscalls.c -@@ -140,6 +140,7 @@ static inline unsigned long do_mmap2(unsigned long addr, size_t len, - unsigned long prot, unsigned long flags, - unsigned long fd, unsigned long off, int shift) - { -+ struct file * file = NULL; - unsigned long ret = -EINVAL; +-/* +- * Fill the binprm structure from the inode. ++/* ++ * Fill the binprm structure from the inode. + * Check permissions, then read the first 128 (BINPRM_BUF_SIZE) bytes + * + * This may be called multiple times for binary chains (scripts for example). +@@ -1318,6 +1320,7 @@ int do_execve(char * filename, + goto out_unmark; - if (!arch_validate_prot(prot)) -@@ -150,8 +151,20 @@ static inline unsigned long do_mmap2(unsigned long addr, size_t len, - goto out; - off >>= shift; - } -+ -+ ret = -EBADF; -+ if (!(flags & MAP_ANONYMOUS)) { -+ if (!(file = fget(fd))) -+ goto out; -+ } -+ -+ flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE); + sched_exec(); ++ litmus_exec(); -- ret = sys_mmap_pgoff(addr, len, prot, flags, fd, off); -+ down_write(¤t->mm->mmap_sem); -+ ret = do_mmap_pgoff(file, addr, len, prot, flags, off); -+ up_write(¤t->mm->mmap_sem); -+ if (file) -+ fput(file); - out: - return ret; - } -diff --git a/arch/powerpc/kernel/vector.S b/arch/powerpc/kernel/vector.S -index fe46048..67b6916 100644 ---- a/arch/powerpc/kernel/vector.S -+++ b/arch/powerpc/kernel/vector.S -@@ -58,7 +58,7 @@ _GLOBAL(load_up_altivec) - * all 1's - */ - mfspr r4,SPRN_VRSAVE -- cmpwi 0,r4,0 -+ cmpdi 0,r4,0 - bne+ 1f - li r4,-1 - mtspr SPRN_VRSAVE,r4 -diff --git a/arch/powerpc/kernel/vmlinux.lds.S b/arch/powerpc/kernel/vmlinux.lds.S -index dcd01c8..27735a7 100644 ---- a/arch/powerpc/kernel/vmlinux.lds.S -+++ b/arch/powerpc/kernel/vmlinux.lds.S -@@ -38,9 +38,6 @@ jiffies = jiffies_64 + 4; + bprm->file = file; + bprm->filename = filename; +diff --git a/fs/inode.c b/fs/inode.c +index 4d8e3be..de80bc2 100644 +--- a/fs/inode.c ++++ b/fs/inode.c +@@ -282,6 +282,8 @@ void inode_init_once(struct inode *inode) + #ifdef CONFIG_FSNOTIFY + INIT_HLIST_HEAD(&inode->i_fsnotify_mark_entries); #endif - SECTIONS - { -- . = 0; -- reloc_start = .; -- - . = KERNELBASE; ++ INIT_LIST_HEAD(&inode->i_obj_list); ++ mutex_init(&inode->i_obj_mutex); + } + EXPORT_SYMBOL(inode_init_once); - /* -diff --git a/arch/powerpc/sysdev/fsl_pci.c b/arch/powerpc/sysdev/fsl_pci.c -index e82749b..ae88b14 100644 ---- a/arch/powerpc/sysdev/fsl_pci.c -+++ b/arch/powerpc/sysdev/fsl_pci.c -@@ -392,22 +392,8 @@ DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_MPC8536, quirk_fsl_pcie_header); - DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_MPC8641, quirk_fsl_pcie_header); - DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_MPC8641D, quirk_fsl_pcie_header); - DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_MPC8610, quirk_fsl_pcie_header); --DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P1011E, quirk_fsl_pcie_header); --DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P1011, quirk_fsl_pcie_header); --DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P1013E, quirk_fsl_pcie_header); --DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P1013, quirk_fsl_pcie_header); --DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P1020E, quirk_fsl_pcie_header); --DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P1020, quirk_fsl_pcie_header); --DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P1022E, quirk_fsl_pcie_header); --DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P1022, quirk_fsl_pcie_header); --DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P2010E, quirk_fsl_pcie_header); --DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P2010, quirk_fsl_pcie_header); - DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P2020E, quirk_fsl_pcie_header); - DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P2020, quirk_fsl_pcie_header); --DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P4040E, quirk_fsl_pcie_header); --DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P4040, quirk_fsl_pcie_header); --DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P4080E, quirk_fsl_pcie_header); --DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P4080, quirk_fsl_pcie_header); - #endif /* CONFIG_PPC_85xx || CONFIG_PPC_86xx */ +diff --git a/include/linux/completion.h b/include/linux/completion.h +index 4a6b604..258bec1 100644 +--- a/include/linux/completion.h ++++ b/include/linux/completion.h +@@ -88,6 +88,7 @@ extern bool completion_done(struct completion *x); - #if defined(CONFIG_PPC_83xx) || defined(CONFIG_PPC_MPC512x) -diff --git a/arch/s390/include/asm/kvm.h b/arch/s390/include/asm/kvm.h -index 82b32a1..3dfcaeb 100644 ---- a/arch/s390/include/asm/kvm.h -+++ b/arch/s390/include/asm/kvm.h -@@ -1,5 +1,6 @@ - #ifndef __LINUX_KVM_S390_H - #define __LINUX_KVM_S390_H -+ - /* - * asm-s390/kvm.h - KVM s390 specific structures and definitions + extern void complete(struct completion *); + extern void complete_all(struct completion *); ++extern void complete_n(struct completion *, int n); + + /** + * INIT_COMPLETION: - reinitialize a completion structure +diff --git a/include/linux/fs.h b/include/linux/fs.h +index 2620a8c..5c7e0ff 100644 +--- a/include/linux/fs.h ++++ b/include/linux/fs.h +@@ -15,8 +15,8 @@ + * nr_file rlimit, so it's safe to set up a ridiculously high absolute + * upper limit on files-per-process. * -@@ -14,8 +15,6 @@ +- * Some programs (notably those using select()) may have to be +- * recompiled to take full advantage of the new limits.. ++ * Some programs (notably those using select()) may have to be ++ * recompiled to take full advantage of the new limits.. */ - #include --#define __KVM_S390 -- - /* for KVM_GET_REGS and KVM_SET_REGS */ - struct kvm_regs { - /* general purpose regs for s390 */ -diff --git a/arch/s390/kernel/compat_linux.c b/arch/s390/kernel/compat_linux.c -index 9c746c0..0debcec 100644 ---- a/arch/s390/kernel/compat_linux.c -+++ b/arch/s390/kernel/compat_linux.c -@@ -683,6 +683,38 @@ struct mmap_arg_struct_emu31 { - u32 offset; - }; + /* Fixed constants first: */ +@@ -169,7 +169,7 @@ struct inodes_stat_t { + #define SEL_EX 4 -+/* common code for old and new mmaps */ -+static inline long do_mmap2( -+ unsigned long addr, unsigned long len, -+ unsigned long prot, unsigned long flags, -+ unsigned long fd, unsigned long pgoff) -+{ -+ struct file * file = NULL; -+ unsigned long error = -EBADF; -+ -+ flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE); -+ if (!(flags & MAP_ANONYMOUS)) { -+ file = fget(fd); -+ if (!file) -+ goto out; -+ } -+ -+ down_write(¤t->mm->mmap_sem); -+ error = do_mmap_pgoff(file, addr, len, prot, flags, pgoff); -+ if (!IS_ERR((void *) error) && error + len >= 0x80000000ULL) { -+ /* Result is out of bounds. */ -+ do_munmap(current->mm, addr, len); -+ error = -ENOMEM; -+ } -+ up_write(¤t->mm->mmap_sem); -+ -+ if (file) -+ fput(file); -+out: -+ return error; -+} -+ -+ - asmlinkage unsigned long - old32_mmap(struct mmap_arg_struct_emu31 __user *arg) - { -@@ -696,8 +728,7 @@ old32_mmap(struct mmap_arg_struct_emu31 __user *arg) - if (a.offset & ~PAGE_MASK) - goto out; + /* public flags for file_system_type */ +-#define FS_REQUIRES_DEV 1 ++#define FS_REQUIRES_DEV 1 + #define FS_BINARY_MOUNTDATA 2 + #define FS_HAS_SUBTYPE 4 + #define FS_REVAL_DOT 16384 /* Check the paths ".", ".." for staleness */ +@@ -466,7 +466,7 @@ struct iattr { + */ + #include -- error = sys_mmap_pgoff(a.addr, a.len, a.prot, a.flags, a.fd, -- a.offset >> PAGE_SHIFT); -+ error = do_mmap2(a.addr, a.len, a.prot, a.flags, a.fd, a.offset >> PAGE_SHIFT); - out: - return error; - } -@@ -710,7 +741,7 @@ sys32_mmap2(struct mmap_arg_struct_emu31 __user *arg) +-/** ++/** + * enum positive_aop_returns - aop return codes with specific semantics + * + * @AOP_WRITEPAGE_ACTIVATE: Informs the caller that page writeback has +@@ -476,7 +476,7 @@ struct iattr { + * be a candidate for writeback again in the near + * future. Other callers must be careful to unlock + * the page if they get this return. Returned by +- * writepage(); ++ * writepage(); + * + * @AOP_TRUNCATED_PAGE: The AOP method that was handed a locked page has + * unlocked it and the page might have been truncated. +@@ -715,6 +715,7 @@ static inline int mapping_writably_mapped(struct address_space *mapping) - if (copy_from_user(&a, arg, sizeof(a))) - goto out; -- error = sys_mmap_pgoff(a.addr, a.len, a.prot, a.flags, a.fd, a.offset); -+ error = do_mmap2(a.addr, a.len, a.prot, a.flags, a.fd, a.offset); - out: - return error; - } -diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S -index e8ef21c..48215d1 100644 ---- a/arch/s390/kernel/entry.S -+++ b/arch/s390/kernel/entry.S -@@ -571,7 +571,6 @@ pgm_svcper: - mvc __THREAD_per+__PER_access_id(1,%r8),__LC_PER_ACCESS_ID - oi __TI_flags+3(%r9),_TIF_SINGLE_STEP # set TIF_SINGLE_STEP - TRACE_IRQS_ON -- lm %r2,%r6,SP_R2(%r15) # load svc arguments - stosm __SF_EMPTY(%r15),0x03 # reenable interrupts - b BASED(sysc_do_svc) + struct posix_acl; + #define ACL_NOT_CACHED ((void *)(-1)) ++struct inode_obj_id_table; + + struct inode { + struct hlist_node i_hash; +@@ -783,6 +784,8 @@ struct inode { + struct posix_acl *i_acl; + struct posix_acl *i_default_acl; + #endif ++ struct list_head i_obj_list; ++ struct mutex i_obj_mutex; + void *i_private; /* fs or device private pointer */ + }; -diff --git a/arch/s390/kernel/entry64.S b/arch/s390/kernel/entry64.S -index f33658f..9aff1d4 100644 ---- a/arch/s390/kernel/entry64.S -+++ b/arch/s390/kernel/entry64.S -@@ -549,7 +549,6 @@ pgm_svcper: - mvc __THREAD_per+__PER_access_id(1,%r8),__LC_PER_ACCESS_ID - oi __TI_flags+7(%r9),_TIF_SINGLE_STEP # set TIF_SINGLE_STEP - TRACE_IRQS_ON -- lmg %r2,%r6,SP_R2(%r15) # load svc arguments - stosm __SF_EMPTY(%r15),0x03 # reenable interrupts - j sysc_do_svc +@@ -995,10 +998,10 @@ static inline int file_check_writeable(struct file *filp) -diff --git a/arch/s390/kernel/head64.S b/arch/s390/kernel/head64.S -index d984a2a..6a25080 100644 ---- a/arch/s390/kernel/head64.S -+++ b/arch/s390/kernel/head64.S -@@ -83,8 +83,6 @@ startup_continue: - slr %r0,%r0 # set cpuid to zero - sigp %r1,%r0,0x12 # switch to esame mode - sam64 # switch to 64 bit mode -- llgfr %r13,%r13 # clear high-order half of base reg -- lmh %r0,%r15,.Lzero64-.LPG1(%r13) # clear high-order half - lctlg %c0,%c15,.Lctl-.LPG1(%r13) # load control registers - lg %r12,.Lparmaddr-.LPG1(%r13) # pointer to parameter area - # move IPL device to lowcore -@@ -129,7 +127,6 @@ startup_continue: - .L4malign:.quad 0xffffffffffc00000 - .Lscan2g:.quad 0x80000000 + 0x20000 - 8 # 2GB + 128K - 8 - .Lnop: .long 0x07000700 --.Lzero64:.fill 16,4,0x0 - #ifdef CONFIG_ZFCPDUMP - .Lcurrent_cpu: - .long 0x0 -diff --git a/arch/s390/kernel/sys_s390.c b/arch/s390/kernel/sys_s390.c -index 86a74c9..e9d94f6 100644 ---- a/arch/s390/kernel/sys_s390.c -+++ b/arch/s390/kernel/sys_s390.c -@@ -32,6 +32,32 @@ - #include - #include "entry.h" + #define MAX_NON_LFS ((1UL<<31) - 1) -+/* common code for old and new mmaps */ -+static inline long do_mmap2( -+ unsigned long addr, unsigned long len, -+ unsigned long prot, unsigned long flags, -+ unsigned long fd, unsigned long pgoff) -+{ -+ long error = -EBADF; -+ struct file * file = NULL; -+ -+ flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE); -+ if (!(flags & MAP_ANONYMOUS)) { -+ file = fget(fd); -+ if (!file) -+ goto out; -+ } +-/* Page cache limit. The filesystems should put that into their s_maxbytes +- limits, otherwise bad things can happen in VM. */ ++/* Page cache limit. The filesystems should put that into their s_maxbytes ++ limits, otherwise bad things can happen in VM. */ + #if BITS_PER_LONG==32 +-#define MAX_LFS_FILESIZE (((u64)PAGE_CACHE_SIZE << (BITS_PER_LONG-1))-1) ++#define MAX_LFS_FILESIZE (((u64)PAGE_CACHE_SIZE << (BITS_PER_LONG-1))-1) + #elif BITS_PER_LONG==64 + #define MAX_LFS_FILESIZE 0x7fffffffffffffffUL + #endif +@@ -2139,7 +2142,7 @@ extern int may_open(struct path *, int, int); + + extern int kernel_read(struct file *, loff_t, char *, unsigned long); + extern struct file * open_exec(const char *); +- + -+ down_write(¤t->mm->mmap_sem); -+ error = do_mmap_pgoff(file, addr, len, prot, flags, pgoff); -+ up_write(¤t->mm->mmap_sem); + /* fs/dcache.c -- generic fs support functions */ + extern int is_subdir(struct dentry *, struct dentry *); + extern ino_t find_inode_number(struct dentry *, struct qstr *); +diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h +index ff037f0..b984b94 100644 +--- a/include/linux/hrtimer.h ++++ b/include/linux/hrtimer.h +@@ -166,6 +166,7 @@ struct hrtimer_clock_base { + * event devices whether high resolution mode can be + * activated. + * @nr_events: Total number of timer interrupt events ++ * @to_pull: LITMUS^RT list of timers to be pulled on this cpu + */ + struct hrtimer_cpu_base { + spinlock_t lock; +@@ -175,6 +176,26 @@ struct hrtimer_cpu_base { + int hres_active; + unsigned long nr_events; + #endif ++ struct list_head to_pull; ++}; + -+ if (file) -+ fput(file); -+out: -+ return error; -+} ++#define HRTIMER_START_ON_INACTIVE 0 ++#define HRTIMER_START_ON_QUEUED 1 + - /* - * Perform the select(nd, in, out, ex, tv) and mmap() system - * calls. Linux for S/390 isn't able to handle more than 5 -@@ -55,7 +81,7 @@ SYSCALL_DEFINE1(mmap2, struct mmap_arg_struct __user *, arg) - - if (copy_from_user(&a, arg, sizeof(a))) - goto out; -- error = sys_mmap_pgoff(a.addr, a.len, a.prot, a.flags, a.fd, a.offset); -+ error = do_mmap2(a.addr, a.len, a.prot, a.flags, a.fd, a.offset); - out: - return error; - } -@@ -72,7 +98,7 @@ SYSCALL_DEFINE1(s390_old_mmap, struct mmap_arg_struct __user *, arg) - if (a.offset & ~PAGE_MASK) - goto out; - -- error = sys_mmap_pgoff(a.addr, a.len, a.prot, a.flags, a.fd, a.offset >> PAGE_SHIFT); -+ error = do_mmap2(a.addr, a.len, a.prot, a.flags, a.fd, a.offset >> PAGE_SHIFT); - out: - return error; - } -diff --git a/arch/s390/kvm/intercept.c b/arch/s390/kvm/intercept.c -index b400964..ba9d8a7 100644 ---- a/arch/s390/kvm/intercept.c -+++ b/arch/s390/kvm/intercept.c -@@ -213,7 +213,7 @@ static int handle_instruction_and_prog(struct kvm_vcpu *vcpu) - return rc2; - } - --static const intercept_handler_t intercept_funcs[] = { -+static const intercept_handler_t intercept_funcs[0x48 >> 2] = { - [0x00 >> 2] = handle_noop, - [0x04 >> 2] = handle_instruction, - [0x08 >> 2] = handle_prog, -@@ -230,7 +230,7 @@ int kvm_handle_sie_intercept(struct kvm_vcpu *vcpu) - intercept_handler_t func; - u8 code = vcpu->arch.sie_block->icptcode; ++/* ++ * struct hrtimer_start_on_info - save timer info on remote cpu ++ * @list: list of hrtimer_start_on_info on remote cpu (to_pull) ++ * @timer: timer to be triggered on remote cpu ++ * @time: time event ++ * @mode: timer mode ++ * @state: activity flag ++ */ ++struct hrtimer_start_on_info { ++ struct list_head list; ++ struct hrtimer *timer; ++ ktime_t time; ++ enum hrtimer_mode mode; ++ atomic_t state; + }; -- if (code & 3 || (code >> 2) >= ARRAY_SIZE(intercept_funcs)) -+ if (code & 3 || code > 0x48) - return -ENOTSUPP; - func = intercept_funcs[code >> 2]; - if (func) -diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c -index ca2d312..07ced89 100644 ---- a/arch/s390/kvm/kvm-s390.c -+++ b/arch/s390/kvm/kvm-s390.c -@@ -116,16 +116,10 @@ long kvm_arch_dev_ioctl(struct file *filp, + static inline void hrtimer_set_expires(struct hrtimer *timer, ktime_t time) +@@ -343,6 +364,10 @@ __hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim, + unsigned long delta_ns, + const enum hrtimer_mode mode, int wakeup); - int kvm_dev_ioctl_check_extension(long ext) - { -- int r; -- - switch (ext) { -- case KVM_CAP_S390_PSW: -- r = 1; -- break; - default: -- r = 0; -+ return 0; - } -- return r; - } ++extern int hrtimer_start_on(int cpu, struct hrtimer_start_on_info *info, ++ struct hrtimer *timer, ktime_t time, ++ const enum hrtimer_mode mode); ++ + extern int hrtimer_cancel(struct hrtimer *timer); + extern int hrtimer_try_to_cancel(struct hrtimer *timer); - /* Section: vm related */ -@@ -425,10 +419,8 @@ static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw) - vcpu_load(vcpu); - if (atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_RUNNING) - rc = -EBUSY; -- else { -- vcpu->run->psw_mask = psw.mask; -- vcpu->run->psw_addr = psw.addr; -- } -+ else -+ vcpu->arch.sie_block->gpsw = psw; - vcpu_put(vcpu); - return rc; - } -@@ -516,6 +508,9 @@ rerun_vcpu: +diff --git a/include/linux/sched.h b/include/linux/sched.h +index 75e6e60..7248141 100644 +--- a/include/linux/sched.h ++++ b/include/linux/sched.h +@@ -38,6 +38,7 @@ + #define SCHED_BATCH 3 + /* SCHED_ISO: reserved but not implemented yet */ + #define SCHED_IDLE 5 ++#define SCHED_LITMUS 6 + /* Can be ORed in to make sure the process is reverted back to SCHED_NORMAL on fork */ + #define SCHED_RESET_ON_FORK 0x40000000 - switch (kvm_run->exit_reason) { - case KVM_EXIT_S390_SIEIC: -+ vcpu->arch.sie_block->gpsw.mask = kvm_run->s390_sieic.mask; -+ vcpu->arch.sie_block->gpsw.addr = kvm_run->s390_sieic.addr; -+ break; - case KVM_EXIT_UNKNOWN: - case KVM_EXIT_INTR: - case KVM_EXIT_S390_RESET: -@@ -524,9 +519,6 @@ rerun_vcpu: - BUG(); - } +@@ -94,6 +95,8 @@ struct sched_param { -- vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask; -- vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr; -- - might_fault(); + #include - do { -@@ -546,6 +538,8 @@ rerun_vcpu: - /* intercept cannot be handled in-kernel, prepare kvm-run */ - kvm_run->exit_reason = KVM_EXIT_S390_SIEIC; - kvm_run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode; -+ kvm_run->s390_sieic.mask = vcpu->arch.sie_block->gpsw.mask; -+ kvm_run->s390_sieic.addr = vcpu->arch.sie_block->gpsw.addr; - kvm_run->s390_sieic.ipa = vcpu->arch.sie_block->ipa; - kvm_run->s390_sieic.ipb = vcpu->arch.sie_block->ipb; - rc = 0; -@@ -557,9 +551,6 @@ rerun_vcpu: - rc = 0; - } ++#include ++ + struct exec_domain; + struct futex_pi_state; + struct robust_list_head; +@@ -1211,6 +1214,7 @@ struct sched_rt_entity { + }; -- kvm_run->psw_mask = vcpu->arch.sie_block->gpsw.mask; -- kvm_run->psw_addr = vcpu->arch.sie_block->gpsw.addr; -- - if (vcpu->sigset_active) - sigprocmask(SIG_SETMASK, &sigsaved, NULL); + struct rcu_node; ++struct od_table_entry; -diff --git a/arch/s390/kvm/sigp.c b/arch/s390/kvm/sigp.c -index 15ee111..40c8c67 100644 ---- a/arch/s390/kvm/sigp.c -+++ b/arch/s390/kvm/sigp.c -@@ -188,9 +188,9 @@ static int __sigp_set_prefix(struct kvm_vcpu *vcpu, u16 cpu_addr, u32 address, + struct task_struct { + volatile long state; /* -1 unrunnable, 0 runnable, >0 stopped */ +@@ -1293,9 +1297,9 @@ struct task_struct { + unsigned long stack_canary; + #endif - /* make sure that the new value is valid memory */ - address = address & 0x7fffe000u; -- if ((copy_from_user(&tmp, (void __user *) -- (address + vcpu->arch.sie_block->gmsor) , 1)) || -- (copy_from_user(&tmp, (void __user *)(address + -+ if ((copy_from_guest(vcpu, &tmp, -+ (u64) (address + vcpu->arch.sie_block->gmsor) , 1)) || -+ (copy_from_guest(vcpu, &tmp, (u64) (address + - vcpu->arch.sie_block->gmsor + PAGE_SIZE), 1))) { - *reg |= SIGP_STAT_INVALID_PARAMETER; - return 1; /* invalid parameter */ -diff --git a/arch/score/kernel/sys_score.c b/arch/score/kernel/sys_score.c -index 856ed68..0012494 100644 ---- a/arch/score/kernel/sys_score.c -+++ b/arch/score/kernel/sys_score.c -@@ -36,16 +36,34 @@ asmlinkage long - sys_mmap2(unsigned long addr, unsigned long len, unsigned long prot, - unsigned long flags, unsigned long fd, unsigned long pgoff) - { -- return sys_mmap_pgoff(addr, len, prot, flags, fd, pgoff); -+ int error = -EBADF; -+ struct file *file = NULL; -+ -+ if (pgoff & (~PAGE_MASK >> 12)) -+ return -EINVAL; -+ -+ flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE); -+ if (!(flags & MAP_ANONYMOUS)) { -+ file = fget(fd); -+ if (!file) -+ return error; -+ } -+ -+ down_write(¤t->mm->mmap_sem); -+ error = do_mmap_pgoff(file, addr, len, prot, flags, pgoff); -+ up_write(¤t->mm->mmap_sem); -+ -+ if (file) -+ fput(file); -+ -+ return error; - } - - asmlinkage long - sys_mmap(unsigned long addr, unsigned long len, unsigned long prot, -- unsigned long flags, unsigned long fd, off_t offset) -+ unsigned long flags, unsigned long fd, off_t pgoff) - { -- if (unlikely(offset & ~PAGE_MASK)) -- return -EINVAL; -- return sys_mmap_pgoff(addr, len, prot, flags, fd, offset >> PAGE_SHIFT); -+ return sys_mmap2(addr, len, prot, flags, fd, pgoff >> PAGE_SHIFT); - } - - asmlinkage long -diff --git a/arch/sh/include/asm/pgtable_32.h b/arch/sh/include/asm/pgtable_32.h -index ba64e7f..c0d359c 100644 ---- a/arch/sh/include/asm/pgtable_32.h -+++ b/arch/sh/include/asm/pgtable_32.h -@@ -344,8 +344,7 @@ static inline void set_pte(pte_t *ptep, pte_t pte) - #define pte_special(pte) ((pte).pte_low & _PAGE_SPECIAL) - - #ifdef CONFIG_X2TLB --#define pte_write(pte) \ -- ((pte).pte_high & (_PAGE_EXT_USER_WRITE | _PAGE_EXT_KERN_WRITE)) -+#define pte_write(pte) ((pte).pte_high & _PAGE_EXT_USER_WRITE) - #else - #define pte_write(pte) ((pte).pte_low & _PAGE_RW) +- /* ++ /* + * pointers to (original) parent process, youngest child, younger sibling, +- * older sibling, respectively. (p->father can be replaced with ++ * older sibling, respectively. (p->father can be replaced with + * p->real_parent->pid) + */ + struct task_struct *real_parent; /* real parent process */ +@@ -1505,6 +1509,13 @@ struct task_struct { + int make_it_fail; #endif -@@ -359,7 +358,7 @@ static inline pte_t pte_##fn(pte_t pte) { pte.pte_##h op; return pte; } - * individually toggled (and user permissions are entirely decoupled from - * kernel permissions), we attempt to couple them a bit more sanely here. - */ --PTE_BIT_FUNC(high, wrprotect, &= ~(_PAGE_EXT_USER_WRITE | _PAGE_EXT_KERN_WRITE)); -+PTE_BIT_FUNC(high, wrprotect, &= ~_PAGE_EXT_USER_WRITE); - PTE_BIT_FUNC(high, mkwrite, |= _PAGE_EXT_USER_WRITE | _PAGE_EXT_KERN_WRITE); - PTE_BIT_FUNC(high, mkhuge, |= _PAGE_SZHUGE); - #else -diff --git a/arch/sh/kernel/process_64.c b/arch/sh/kernel/process_64.c -index 44aa119..1192398 100644 ---- a/arch/sh/kernel/process_64.c -+++ b/arch/sh/kernel/process_64.c -@@ -367,7 +367,7 @@ void exit_thread(void) - void flush_thread(void) - { - -- /* Called by fs/exec.c (setup_new_exec) to remove traces of a -+ /* Called by fs/exec.c (flush_old_exec) to remove traces of a - * previously running executable. */ - #ifdef CONFIG_SH_FPU - if (last_task_used_math == current) { -diff --git a/arch/sh/kernel/sys_sh.c b/arch/sh/kernel/sys_sh.c -index 71399cd..8aa5d1c 100644 ---- a/arch/sh/kernel/sys_sh.c -+++ b/arch/sh/kernel/sys_sh.c -@@ -28,13 +28,37 @@ - #include - #include - -+static inline long -+do_mmap2(unsigned long addr, unsigned long len, unsigned long prot, -+ unsigned long flags, int fd, unsigned long pgoff) -+{ -+ int error = -EBADF; -+ struct file *file = NULL; -+ -+ flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE); -+ if (!(flags & MAP_ANONYMOUS)) { -+ file = fget(fd); -+ if (!file) -+ goto out; -+ } + struct prop_local_single dirties; + -+ down_write(¤t->mm->mmap_sem); -+ error = do_mmap_pgoff(file, addr, len, prot, flags, pgoff); -+ up_write(¤t->mm->mmap_sem); ++ /* LITMUS RT parameters and state */ ++ struct rt_param rt_param; + -+ if (file) -+ fput(file); -+out: -+ return error; -+} ++ /* references to PI semaphores, etc. */ ++ struct od_table_entry *od_table; + - asmlinkage int old_mmap(unsigned long addr, unsigned long len, - unsigned long prot, unsigned long flags, - int fd, unsigned long off) - { - if (off & ~PAGE_MASK) - return -EINVAL; -- return sys_mmap_pgoff(addr, len, prot, flags, fd, off>>PAGE_SHIFT); -+ return do_mmap2(addr, len, prot, flags, fd, off>>PAGE_SHIFT); - } - - asmlinkage long sys_mmap2(unsigned long addr, unsigned long len, -@@ -50,7 +74,7 @@ asmlinkage long sys_mmap2(unsigned long addr, unsigned long len, + #ifdef CONFIG_LATENCYTOP + int latency_record_count; + struct latency_record latency_record[LT_SAVECOUNT]; +@@ -2044,7 +2055,7 @@ static inline int dequeue_signal_lock(struct task_struct *tsk, sigset_t *mask, s + spin_unlock_irqrestore(&tsk->sighand->siglock, flags); - pgoff >>= PAGE_SHIFT - 12; + return ret; +-} ++} -- return sys_mmap_pgoff(addr, len, prot, flags, fd, pgoff); -+ return do_mmap2(addr, len, prot, flags, fd, pgoff); - } + extern void block_all_signals(int (*notifier)(void *priv), void *priv, + sigset_t *mask); +diff --git a/include/linux/smp.h b/include/linux/smp.h +index 39c64ba..76bb3e4 100644 +--- a/include/linux/smp.h ++++ b/include/linux/smp.h +@@ -77,6 +77,11 @@ void __smp_call_function_single(int cpuid, struct call_single_data *data, + int wait); /* -diff --git a/arch/sh/mm/mmap.c b/arch/sh/mm/mmap.c -index afeb710..d2984fa 100644 ---- a/arch/sh/mm/mmap.c -+++ b/arch/sh/mm/mmap.c -@@ -54,8 +54,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, - /* We do not accept a shared mapping if it would violate - * cache aliasing constraints. - */ -- if ((flags & MAP_SHARED) && -- ((addr - (pgoff << PAGE_SHIFT)) & shm_align_mask)) -+ if ((flags & MAP_SHARED) && (addr & shm_align_mask)) - return -EINVAL; - return addr; - } -diff --git a/arch/sparc/Makefile b/arch/sparc/Makefile -index 113225b..dfe272d 100644 ---- a/arch/sparc/Makefile -+++ b/arch/sparc/Makefile -@@ -27,7 +27,6 @@ AS := $(AS) -32 - LDFLAGS := -m elf32_sparc - CHECKFLAGS += -D__sparc__ - export BITS := 32 --UTS_MACHINE := sparc - - #KBUILD_CFLAGS += -g -pipe -fcall-used-g5 -fcall-used-g7 - KBUILD_CFLAGS += -m32 -pipe -mno-fpu -fcall-used-g5 -fcall-used-g7 -@@ -47,7 +46,6 @@ CHECKFLAGS += -D__sparc__ -D__sparc_v9__ -D__arch64__ -m64 - - LDFLAGS := -m elf64_sparc - export BITS := 64 --UTS_MACHINE := sparc64 - - KBUILD_CFLAGS += -m64 -pipe -mno-fpu -mcpu=ultrasparc -mcmodel=medlow \ - -ffixed-g4 -ffixed-g5 -fcall-used-g7 -Wno-sign-compare \ -diff --git a/arch/sparc/include/asm/elf_64.h b/arch/sparc/include/asm/elf_64.h -index 9968085..d42e393 100644 ---- a/arch/sparc/include/asm/elf_64.h -+++ b/arch/sparc/include/asm/elf_64.h -@@ -196,10 +196,17 @@ static inline unsigned int sparc64_elf_hwcap(void) - #define ELF_PLATFORM (NULL) - - #define SET_PERSONALITY(ex) \ --do { if ((ex).e_ident[EI_CLASS] == ELFCLASS32) \ -- set_thread_flag(TIF_32BIT); \ -+do { unsigned long new_flags = current_thread_info()->flags; \ -+ new_flags &= _TIF_32BIT; \ -+ if ((ex).e_ident[EI_CLASS] == ELFCLASS32) \ -+ new_flags |= _TIF_32BIT; \ - else \ -- clear_thread_flag(TIF_32BIT); \ -+ new_flags &= ~_TIF_32BIT; \ -+ if ((current_thread_info()->flags & _TIF_32BIT) \ -+ != new_flags) \ -+ set_thread_flag(TIF_ABI_PENDING); \ -+ else \ -+ clear_thread_flag(TIF_ABI_PENDING); \ - /* flush_thread will update pgd cache */ \ - if (personality(current->personality) != PER_LINUX32) \ - set_personality(PER_LINUX | \ -diff --git a/arch/sparc/include/asm/thread_info_64.h b/arch/sparc/include/asm/thread_info_64.h -index f78ad9a..1b45a7b 100644 ---- a/arch/sparc/include/asm/thread_info_64.h -+++ b/arch/sparc/include/asm/thread_info_64.h -@@ -227,11 +227,12 @@ register struct thread_info *current_thread_info_reg asm("g6"); - /* flag bit 8 is available */ - #define TIF_SECCOMP 9 /* secure computing */ - #define TIF_SYSCALL_AUDIT 10 /* syscall auditing active */ -+/* flag bit 11 is available */ - /* NOTE: Thread flags >= 12 should be ones we have no interest - * in using in assembly, else we can't use the mask as - * an immediate value in instructions such as andcc. ++ * sends a 'pull timer' event to a remote CPU ++ */ ++extern void smp_send_pull_timers(int cpu); ++ ++/* + * Generic and arch helpers */ --/* flag bit 12 is available */ -+#define TIF_ABI_PENDING 12 - #define TIF_MEMDIE 13 - #define TIF_POLLING_NRFLAG 14 - #define TIF_FREEZE 15 /* is freezing for suspend */ -@@ -245,6 +246,7 @@ register struct thread_info *current_thread_info_reg asm("g6"); - #define _TIF_32BIT (1<tx_irq_name, LDC_IRQ_NAME_MAX, "%s TX", name); - - err = request_irq(lp->cfg.rx_irq, ldc_rx, -- IRQF_SAMPLE_RANDOM | IRQF_DISABLED, -+ IRQF_SAMPLE_RANDOM | IRQF_DISABLED | IRQF_SHARED, - lp->rx_irq_name, lp); - if (err) - return err; - - err = request_irq(lp->cfg.tx_irq, ldc_tx, -- IRQF_SAMPLE_RANDOM | IRQF_DISABLED, -+ IRQF_SAMPLE_RANDOM | IRQF_DISABLED | IRQF_SHARED, - lp->tx_irq_name, lp); - if (err) { - free_irq(lp->cfg.rx_irq, lp); -diff --git a/arch/sparc/kernel/nmi.c b/arch/sparc/kernel/nmi.c -index 4771274..b129611 100644 ---- a/arch/sparc/kernel/nmi.c -+++ b/arch/sparc/kernel/nmi.c -@@ -96,6 +96,7 @@ notrace __kprobes void perfctr_irq(int irq, struct pt_regs *regs) - int cpu = smp_processor_id(); - - clear_softint(1 << irq); -+ pcr_ops->write(PCR_PIC_PRIV); - - local_cpu_data().__nmi_count++; - -@@ -104,8 +105,6 @@ notrace __kprobes void perfctr_irq(int irq, struct pt_regs *regs) - if (notify_die(DIE_NMI, "nmi", regs, 0, - pt_regs_trap_type(regs), SIGINT) == NOTIFY_STOP) - touched = 1; -- else -- pcr_ops->write(PCR_PIC_PRIV); - - sum = kstat_irqs_cpu(0, cpu); - if (__get_cpu_var(nmi_touch)) { -diff --git a/arch/sparc/kernel/of_device_64.c b/arch/sparc/kernel/of_device_64.c -index 0a6f2d1..881947e 100644 ---- a/arch/sparc/kernel/of_device_64.c -+++ b/arch/sparc/kernel/of_device_64.c -@@ -104,19 +104,9 @@ static int of_bus_pci_map(u32 *addr, const u32 *range, - int i; - - /* Check address type match */ -- if (!((addr[0] ^ range[0]) & 0x03000000)) -- goto type_match; -- -- /* Special exception, we can map a 64-bit address into -- * a 32-bit range. -- */ -- if ((addr[0] & 0x03000000) == 0x03000000 && -- (range[0] & 0x03000000) == 0x02000000) -- goto type_match; -- -- return -EINVAL; -+ if ((addr[0] ^ range[0]) & 0x03000000) -+ return -EINVAL; - --type_match: - if (of_out_of_range(addr + 1, range + 1, range + na + pna, - na - 1, ns)) - return -EINVAL; -diff --git a/arch/sparc/kernel/perf_event.c b/arch/sparc/kernel/perf_event.c -index 198fb4e..fa5936e 100644 ---- a/arch/sparc/kernel/perf_event.c -+++ b/arch/sparc/kernel/perf_event.c -@@ -986,17 +986,6 @@ static int __kprobes perf_event_nmi_handler(struct notifier_block *self, - data.addr = 0; - - cpuc = &__get_cpu_var(cpu_hw_events); -- -- /* If the PMU has the TOE IRQ enable bits, we need to do a -- * dummy write to the %pcr to clear the overflow bits and thus -- * the interrupt. -- * -- * Do this before we peek at the counters to determine -- * overflow so we don't lose any events. -- */ -- if (sparc_pmu->irq_bit) -- pcr_ops->write(cpuc->pcr); -- - for (idx = 0; idx < MAX_HWEVENTS; idx++) { - struct perf_event *event = cpuc->events[idx]; - struct hw_perf_event *hwc; -diff --git a/arch/sparc/kernel/process_64.c b/arch/sparc/kernel/process_64.c -index c3f1cce..18d6785 100644 ---- a/arch/sparc/kernel/process_64.c -+++ b/arch/sparc/kernel/process_64.c -@@ -365,6 +365,14 @@ void flush_thread(void) - struct thread_info *t = current_thread_info(); - struct mm_struct *mm; + #ifdef CONFIG_USE_GENERIC_SMP_HELPERS +diff --git a/include/linux/tick.h b/include/linux/tick.h +index 0482229..4f9ba05 100644 +--- a/include/linux/tick.h ++++ b/include/linux/tick.h +@@ -71,6 +71,11 @@ extern int tick_is_oneshot_available(void); + extern struct tick_device *tick_get_device(int cpu); -+ if (test_ti_thread_flag(t, TIF_ABI_PENDING)) { -+ clear_ti_thread_flag(t, TIF_ABI_PENDING); -+ if (test_ti_thread_flag(t, TIF_32BIT)) -+ clear_ti_thread_flag(t, TIF_32BIT); -+ else -+ set_ti_thread_flag(t, TIF_32BIT); -+ } + # ifdef CONFIG_HIGH_RES_TIMERS ++/* LITMUS^RT tick alignment */ ++#define LINUX_DEFAULT_TICKS 0 ++#define LITMUS_ALIGNED_TICKS 1 ++#define LITMUS_STAGGERED_TICKS 2 + - mm = t->task->mm; - if (mm) - tsb_context_switch(mm); -diff --git a/arch/sparc/kernel/sys_sparc_32.c b/arch/sparc/kernel/sys_sparc_32.c -index 3a82e65..03035c8 100644 ---- a/arch/sparc/kernel/sys_sparc_32.c -+++ b/arch/sparc/kernel/sys_sparc_32.c -@@ -45,8 +45,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi - /* We do not accept a shared mapping if it would violate - * cache aliasing constraints. - */ -- if ((flags & MAP_SHARED) && -- ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1))) -+ if ((flags & MAP_SHARED) && (addr & (SHMLBA - 1))) - return -EINVAL; - return addr; - } -@@ -80,6 +79,15 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi - } - } - -+asmlinkage unsigned long sparc_brk(unsigned long brk) -+{ -+ if(ARCH_SUN4C) { -+ if ((brk & 0xe0000000) != (current->mm->brk & 0xe0000000)) -+ return current->mm->brk; -+ } -+ return sys_brk(brk); -+} + extern int tick_init_highres(void); + extern int tick_program_event(ktime_t expires, int force); + extern void tick_setup_sched_timer(void); +diff --git a/include/litmus/bheap.h b/include/litmus/bheap.h +new file mode 100644 +index 0000000..cf4864a +--- /dev/null ++++ b/include/litmus/bheap.h +@@ -0,0 +1,77 @@ ++/* bheaps.h -- Binomial Heaps ++ * ++ * (c) 2008, 2009 Bjoern Brandenburg ++ */ + - /* - * sys_pipe() is the normal C calling standard for creating - * a pipe. It's not the way unix traditionally does this, though. -@@ -226,6 +234,31 @@ int sparc_mmap_check(unsigned long addr, unsigned long len) - } - - /* Linux version of mmap */ -+static unsigned long do_mmap2(unsigned long addr, unsigned long len, -+ unsigned long prot, unsigned long flags, unsigned long fd, -+ unsigned long pgoff) -+{ -+ struct file * file = NULL; -+ unsigned long retval = -EBADF; ++#ifndef BHEAP_H ++#define BHEAP_H + -+ if (!(flags & MAP_ANONYMOUS)) { -+ file = fget(fd); -+ if (!file) -+ goto out; -+ } ++#define NOT_IN_HEAP UINT_MAX + -+ len = PAGE_ALIGN(len); -+ flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE); ++struct bheap_node { ++ struct bheap_node* parent; ++ struct bheap_node* next; ++ struct bheap_node* child; + -+ down_write(¤t->mm->mmap_sem); -+ retval = do_mmap_pgoff(file, addr, len, prot, flags, pgoff); -+ up_write(¤t->mm->mmap_sem); ++ unsigned int degree; ++ void* value; ++ struct bheap_node** ref; ++}; + -+ if (file) -+ fput(file); -+out: -+ return retval; -+} - - asmlinkage unsigned long sys_mmap2(unsigned long addr, unsigned long len, - unsigned long prot, unsigned long flags, unsigned long fd, -@@ -233,16 +266,14 @@ asmlinkage unsigned long sys_mmap2(unsigned long addr, unsigned long len, - { - /* Make sure the shift for mmap2 is constant (12), no matter what PAGE_SIZE - we have. */ -- return sys_mmap_pgoff(addr, len, prot, flags, fd, -- pgoff >> (PAGE_SHIFT - 12)); -+ return do_mmap2(addr, len, prot, flags, fd, pgoff >> (PAGE_SHIFT - 12)); - } - - asmlinkage unsigned long sys_mmap(unsigned long addr, unsigned long len, - unsigned long prot, unsigned long flags, unsigned long fd, - unsigned long off) - { -- /* no alignment check? */ -- return sys_mmap_pgoff(addr, len, prot, flags, fd, off >> PAGE_SHIFT); -+ return do_mmap2(addr, len, prot, flags, fd, off >> PAGE_SHIFT); - } - - long sparc_remap_file_pages(unsigned long start, unsigned long size, -@@ -256,6 +287,27 @@ long sparc_remap_file_pages(unsigned long start, unsigned long size, - (pgoff >> (PAGE_SHIFT - 12)), flags); - } - -+extern unsigned long do_mremap(unsigned long addr, -+ unsigned long old_len, unsigned long new_len, -+ unsigned long flags, unsigned long new_addr); -+ -+asmlinkage unsigned long sparc_mremap(unsigned long addr, -+ unsigned long old_len, unsigned long new_len, -+ unsigned long flags, unsigned long new_addr) -+{ -+ unsigned long ret = -EINVAL; ++struct bheap { ++ struct bheap_node* head; ++ /* We cache the minimum of the heap. ++ * This speeds up repeated peek operations. ++ */ ++ struct bheap_node* min; ++}; + -+ if (unlikely(sparc_mmap_check(addr, old_len))) -+ goto out; -+ if (unlikely(sparc_mmap_check(new_addr, new_len))) -+ goto out; -+ down_write(¤t->mm->mmap_sem); -+ ret = do_mremap(addr, old_len, new_len, flags, new_addr); -+ up_write(¤t->mm->mmap_sem); -+out: -+ return ret; -+} -+ - /* we come to here via sys_nis_syscall so it can setup the regs argument */ - asmlinkage unsigned long - c_sys_nis_syscall (struct pt_regs *regs) -diff --git a/arch/sparc/kernel/sys_sparc_64.c b/arch/sparc/kernel/sys_sparc_64.c -index cfa0e19..e2d1024 100644 ---- a/arch/sparc/kernel/sys_sparc_64.c -+++ b/arch/sparc/kernel/sys_sparc_64.c -@@ -317,14 +317,10 @@ bottomup: - unsigned long get_fb_unmapped_area(struct file *filp, unsigned long orig_addr, unsigned long len, unsigned long pgoff, unsigned long flags) - { - unsigned long align_goal, addr = -ENOMEM; -- unsigned long (*get_area)(struct file *, unsigned long, -- unsigned long, unsigned long, unsigned long); -- -- get_area = current->mm->get_unmapped_area; - - if (flags & MAP_FIXED) { - /* Ok, don't mess with it. */ -- return get_area(NULL, orig_addr, len, pgoff, flags); -+ return get_unmapped_area(NULL, orig_addr, len, pgoff, flags); - } - flags &= ~MAP_SHARED; - -@@ -337,7 +333,7 @@ unsigned long get_fb_unmapped_area(struct file *filp, unsigned long orig_addr, u - align_goal = (64UL * 1024); - - do { -- addr = get_area(NULL, orig_addr, len + (align_goal - PAGE_SIZE), pgoff, flags); -+ addr = get_unmapped_area(NULL, orig_addr, len + (align_goal - PAGE_SIZE), pgoff, flags); - if (!(addr & ~PAGE_MASK)) { - addr = (addr + (align_goal - 1UL)) & ~(align_goal - 1UL); - break; -@@ -355,7 +351,7 @@ unsigned long get_fb_unmapped_area(struct file *filp, unsigned long orig_addr, u - * be obtained. - */ - if (addr & ~PAGE_MASK) -- addr = get_area(NULL, orig_addr, len, pgoff, flags); -+ addr = get_unmapped_area(NULL, orig_addr, len, pgoff, flags); - - return addr; - } -@@ -403,6 +399,18 @@ void arch_pick_mmap_layout(struct mm_struct *mm) - } - } - -+SYSCALL_DEFINE1(sparc_brk, unsigned long, brk) -+{ -+ /* People could try to be nasty and use ta 0x6d in 32bit programs */ -+ if (test_thread_flag(TIF_32BIT) && brk >= STACK_TOP32) -+ return current->mm->brk; ++typedef int (*bheap_prio_t)(struct bheap_node* a, struct bheap_node* b); + -+ if (unlikely(straddles_64bit_va_hole(current->mm->brk, brk))) -+ return current->mm->brk; ++void bheap_init(struct bheap* heap); ++void bheap_node_init(struct bheap_node** ref_to_bheap_node_ptr, void* value); + -+ return sys_brk(brk); ++static inline int bheap_node_in_heap(struct bheap_node* h) ++{ ++ return h->degree != NOT_IN_HEAP; +} -+ - /* - * sys_pipe() is the normal C calling standard for creating - * a pipe. It's not the way unix traditionally does this, though. -@@ -560,13 +568,23 @@ SYSCALL_DEFINE6(mmap, unsigned long, addr, unsigned long, len, - unsigned long, prot, unsigned long, flags, unsigned long, fd, - unsigned long, off) - { -- unsigned long retval = -EINVAL; -+ struct file * file = NULL; -+ unsigned long retval = -EBADF; - -- if ((off + PAGE_ALIGN(len)) < off) -- goto out; -- if (off & ~PAGE_MASK) -- goto out; -- retval = sys_mmap_pgoff(addr, len, prot, flags, fd, off >> PAGE_SHIFT); -+ if (!(flags & MAP_ANONYMOUS)) { -+ file = fget(fd); -+ if (!file) -+ goto out; -+ } -+ flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE); -+ len = PAGE_ALIGN(len); -+ -+ down_write(¤t->mm->mmap_sem); -+ retval = do_mmap(file, addr, len, prot, flags, off); -+ up_write(¤t->mm->mmap_sem); -+ -+ if (file) -+ fput(file); - out: - return retval; - } -@@ -596,6 +614,12 @@ SYSCALL_DEFINE5(64_mremap, unsigned long, addr, unsigned long, old_len, - - if (test_thread_flag(TIF_32BIT)) - goto out; -+ if (unlikely(new_len >= VA_EXCLUDE_START)) -+ goto out; -+ if (unlikely(sparc_mmap_check(addr, old_len))) -+ goto out; -+ if (unlikely(sparc_mmap_check(new_addr, new_len))) -+ goto out; - - down_write(¤t->mm->mmap_sem); - ret = do_mremap(addr, old_len, new_len, flags, new_addr); -diff --git a/arch/sparc/kernel/systbls.h b/arch/sparc/kernel/systbls.h -index d2f999a..a63c5d2 100644 ---- a/arch/sparc/kernel/systbls.h -+++ b/arch/sparc/kernel/systbls.h -@@ -9,6 +9,7 @@ - struct new_utsname; - - extern asmlinkage unsigned long sys_getpagesize(void); -+extern asmlinkage unsigned long sparc_brk(unsigned long brk); - extern asmlinkage long sparc_pipe(struct pt_regs *regs); - extern asmlinkage long sys_ipc(unsigned int call, int first, - unsigned long second, -diff --git a/arch/sparc/kernel/systbls_32.S b/arch/sparc/kernel/systbls_32.S -index 14f950a..0f1658d 100644 ---- a/arch/sparc/kernel/systbls_32.S -+++ b/arch/sparc/kernel/systbls_32.S -@@ -19,7 +19,7 @@ sys_call_table: - /*0*/ .long sys_restart_syscall, sys_exit, sys_fork, sys_read, sys_write - /*5*/ .long sys_open, sys_close, sys_wait4, sys_creat, sys_link - /*10*/ .long sys_unlink, sunos_execv, sys_chdir, sys_chown16, sys_mknod --/*15*/ .long sys_chmod, sys_lchown16, sys_brk, sys_nis_syscall, sys_lseek -+/*15*/ .long sys_chmod, sys_lchown16, sparc_brk, sys_nis_syscall, sys_lseek - /*20*/ .long sys_getpid, sys_capget, sys_capset, sys_setuid16, sys_getuid16 - /*25*/ .long sys_vmsplice, sys_ptrace, sys_alarm, sys_sigaltstack, sys_pause - /*30*/ .long sys_utime, sys_lchown, sys_fchown, sys_access, sys_nice -@@ -67,7 +67,7 @@ sys_call_table: - /*235*/ .long sys_fstatfs64, sys_llseek, sys_mlock, sys_munlock, sys_mlockall - /*240*/ .long sys_munlockall, sys_sched_setparam, sys_sched_getparam, sys_sched_setscheduler, sys_sched_getscheduler - /*245*/ .long sys_sched_yield, sys_sched_get_priority_max, sys_sched_get_priority_min, sys_sched_rr_get_interval, sys_nanosleep --/*250*/ .long sys_mremap, sys_sysctl, sys_getsid, sys_fdatasync, sys_nfsservctl -+/*250*/ .long sparc_mremap, sys_sysctl, sys_getsid, sys_fdatasync, sys_nfsservctl - /*255*/ .long sys_sync_file_range, sys_clock_settime, sys_clock_gettime, sys_clock_getres, sys_clock_nanosleep - /*260*/ .long sys_sched_getaffinity, sys_sched_setaffinity, sys_timer_settime, sys_timer_gettime, sys_timer_getoverrun - /*265*/ .long sys_timer_delete, sys_timer_create, sys_nis_syscall, sys_io_setup, sys_io_destroy -diff --git a/arch/sparc/kernel/systbls_64.S b/arch/sparc/kernel/systbls_64.S -index f63c871..009825f 100644 ---- a/arch/sparc/kernel/systbls_64.S -+++ b/arch/sparc/kernel/systbls_64.S -@@ -21,7 +21,7 @@ sys_call_table32: - /*0*/ .word sys_restart_syscall, sys32_exit, sys_fork, sys_read, sys_write - /*5*/ .word sys32_open, sys_close, sys32_wait4, sys32_creat, sys_link - /*10*/ .word sys_unlink, sunos_execv, sys_chdir, sys_chown16, sys32_mknod --/*15*/ .word sys_chmod, sys_lchown16, sys_brk, sys32_perfctr, sys32_lseek -+/*15*/ .word sys_chmod, sys_lchown16, sys_sparc_brk, sys32_perfctr, sys32_lseek - /*20*/ .word sys_getpid, sys_capget, sys_capset, sys_setuid16, sys_getuid16 - /*25*/ .word sys32_vmsplice, compat_sys_ptrace, sys_alarm, sys32_sigaltstack, sys_pause - /*30*/ .word compat_sys_utime, sys_lchown, sys_fchown, sys32_access, sys32_nice -@@ -96,7 +96,7 @@ sys_call_table: - /*0*/ .word sys_restart_syscall, sparc_exit, sys_fork, sys_read, sys_write - /*5*/ .word sys_open, sys_close, sys_wait4, sys_creat, sys_link - /*10*/ .word sys_unlink, sys_nis_syscall, sys_chdir, sys_chown, sys_mknod --/*15*/ .word sys_chmod, sys_lchown, sys_brk, sys_perfctr, sys_lseek -+/*15*/ .word sys_chmod, sys_lchown, sys_sparc_brk, sys_perfctr, sys_lseek - /*20*/ .word sys_getpid, sys_capget, sys_capset, sys_setuid, sys_getuid - /*25*/ .word sys_vmsplice, sys_ptrace, sys_alarm, sys_sigaltstack, sys_nis_syscall - /*30*/ .word sys_utime, sys_nis_syscall, sys_nis_syscall, sys_access, sys_nice -diff --git a/arch/sparc/lib/mcount.S b/arch/sparc/lib/mcount.S -index 24b8b12..7ce9c65 100644 ---- a/arch/sparc/lib/mcount.S -+++ b/arch/sparc/lib/mcount.S -@@ -64,9 +64,8 @@ mcount: - 2: sethi %hi(softirq_stack), %g3 - or %g3, %lo(softirq_stack), %g3 - ldx [%g3 + %g1], %g7 -- sub %g7, STACK_BIAS, %g7 - cmp %sp, %g7 -- bleu,pt %xcc, 3f -+ bleu,pt %xcc, 2f - sethi %hi(THREAD_SIZE), %g3 - add %g7, %g3, %g7 - cmp %sp, %g7 -@@ -76,7 +75,7 @@ mcount: - * again, we are already trying to output the stack overflow - * message. - */ --3: sethi %hi(ovstack), %g7 ! cant move to panic stack fast enough -+ sethi %hi(ovstack), %g7 ! cant move to panic stack fast enough - or %g7, %lo(ovstack), %g7 - add %g7, OVSTACKSIZE, %g3 - sub %g3, STACK_BIAS + 192, %g3 -diff --git a/arch/um/kernel/syscall.c b/arch/um/kernel/syscall.c -index cccab85..a4625c7 100644 ---- a/arch/um/kernel/syscall.c -+++ b/arch/um/kernel/syscall.c -@@ -8,7 +8,6 @@ - #include "linux/mm.h" - #include "linux/sched.h" - #include "linux/utsname.h" --#include "linux/syscalls.h" - #include "asm/current.h" - #include "asm/mman.h" - #include "asm/uaccess.h" -@@ -38,6 +37,31 @@ long sys_vfork(void) - return ret; - } - -+/* common code for old and new mmaps */ -+long sys_mmap2(unsigned long addr, unsigned long len, -+ unsigned long prot, unsigned long flags, -+ unsigned long fd, unsigned long pgoff) -+{ -+ long error = -EBADF; -+ struct file * file = NULL; -+ -+ flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE); -+ if (!(flags & MAP_ANONYMOUS)) { -+ file = fget(fd); -+ if (!file) -+ goto out; -+ } + -+ down_write(¤t->mm->mmap_sem); -+ error = do_mmap_pgoff(file, addr, len, prot, flags, pgoff); -+ up_write(¤t->mm->mmap_sem); -+ -+ if (file) -+ fput(file); -+ out: -+ return error; ++static inline int bheap_empty(struct bheap* heap) ++{ ++ return heap->head == NULL && heap->min == NULL; +} + - long old_mmap(unsigned long addr, unsigned long len, - unsigned long prot, unsigned long flags, - unsigned long fd, unsigned long offset) -@@ -46,7 +70,7 @@ long old_mmap(unsigned long addr, unsigned long len, - if (offset & ~PAGE_MASK) - goto out; - -- err = sys_mmap_pgoff(addr, len, prot, flags, fd, offset >> PAGE_SHIFT); -+ err = sys_mmap2(addr, len, prot, flags, fd, offset >> PAGE_SHIFT); - out: - return err; - } -diff --git a/arch/um/sys-i386/shared/sysdep/syscalls.h b/arch/um/sys-i386/shared/sysdep/syscalls.h -index e778767..9056981 100644 ---- a/arch/um/sys-i386/shared/sysdep/syscalls.h -+++ b/arch/um/sys-i386/shared/sysdep/syscalls.h -@@ -20,3 +20,7 @@ extern syscall_handler_t *sys_call_table[]; - #define EXECUTE_SYSCALL(syscall, regs) \ - ((long (*)(struct syscall_args)) \ - (*sys_call_table[syscall]))(SYSCALL_ARGS(®s->regs)) -+ -+extern long sys_mmap2(unsigned long addr, unsigned long len, -+ unsigned long prot, unsigned long flags, -+ unsigned long fd, unsigned long pgoff); -diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig -index 4fdb669..e2cd95e 100644 ---- a/arch/x86/Kconfig -+++ b/arch/x86/Kconfig -@@ -984,6 +984,12 @@ config X86_CPUID - with major 203 and minors 0 to 31 for /dev/cpu/0/cpuid to - /dev/cpu/31/cpuid. - -+config X86_CPU_DEBUG -+ tristate "/sys/kernel/debug/x86/cpu/* - CPU Debug support" -+ ---help--- -+ If you select this option, this will provide various x86 CPUs -+ information through debugfs. -+ - choice - prompt "High Memory Support" - default HIGHMEM4G if !X86_NUMAQ -@@ -2086,3 +2092,5 @@ source "crypto/Kconfig" - source "arch/x86/kvm/Kconfig" - - source "lib/Kconfig" -+ -+source "litmus/Kconfig" -diff --git a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu -index f2824fb..2649840 100644 ---- a/arch/x86/Kconfig.cpu -+++ b/arch/x86/Kconfig.cpu -@@ -400,7 +400,7 @@ config X86_TSC - - config X86_CMPXCHG64 - def_bool y -- depends on X86_PAE || X86_64 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MATOM -+ depends on !M386 && !M486 - - # this should be set for all -march=.. options where the compiler - # generates cmov. -diff --git a/arch/x86/Makefile_32.cpu b/arch/x86/Makefile_32.cpu -index 1937226..30e9a26 100644 ---- a/arch/x86/Makefile_32.cpu -+++ b/arch/x86/Makefile_32.cpu -@@ -46,13 +46,6 @@ cflags-$(CONFIG_MGEODEGX1) += -march=pentium-mmx - # cpu entries - cflags-$(CONFIG_X86_GENERIC) += $(call tune,generic,$(call tune,i686)) - --# Work around the pentium-mmx code generator madness of gcc4.4.x which --# does stack alignment by generating horrible code _before_ the mcount --# prologue (push %ebp, mov %esp, %ebp) which breaks the function graph --# tracer assumptions. For i686, generic, core2 this is set by the --# compiler anyway --cflags-$(CONFIG_FUNCTION_GRAPH_TRACER) += $(call cc-option,-maccumulate-outgoing-args) -- - # Bug fix for binutils: this option is required in order to keep - # binutils from generating NOPL instructions against our will. - ifneq ($(CONFIG_X86_P6_NOP),y) -diff --git a/arch/x86/ia32/ia32_aout.c b/arch/x86/ia32/ia32_aout.c -index f9f4724..2a4d073 100644 ---- a/arch/x86/ia32/ia32_aout.c -+++ b/arch/x86/ia32/ia32_aout.c -@@ -308,16 +308,15 @@ static int load_aout_binary(struct linux_binprm *bprm, struct pt_regs *regs) - if (retval) - return retval; - -- /* OK, This is the point of no return */ -- set_personality(PER_LINUX); -- set_thread_flag(TIF_IA32); -- -- setup_new_exec(bprm); -- - regs->cs = __USER32_CS; - regs->r8 = regs->r9 = regs->r10 = regs->r11 = regs->r12 = - regs->r13 = regs->r14 = regs->r15 = 0; - -+ /* OK, This is the point of no return */ -+ set_personality(PER_LINUX); -+ set_thread_flag(TIF_IA32); -+ clear_thread_flag(TIF_ABI_PENDING); -+ - current->mm->end_code = ex.a_text + - (current->mm->start_code = N_TXTADDR(ex)); - current->mm->end_data = ex.a_data + -diff --git a/arch/x86/ia32/ia32entry.S b/arch/x86/ia32/ia32entry.S -index 5294d84..581b056 100644 ---- a/arch/x86/ia32/ia32entry.S -+++ b/arch/x86/ia32/ia32entry.S -@@ -696,7 +696,7 @@ ia32_sys_call_table: - .quad quiet_ni_syscall /* streams2 */ - .quad stub32_vfork /* 190 */ - .quad compat_sys_getrlimit -- .quad sys_mmap_pgoff -+ .quad sys32_mmap2 - .quad sys32_truncate64 - .quad sys32_ftruncate64 - .quad sys32_stat64 /* 195 */ -diff --git a/arch/x86/ia32/sys_ia32.c b/arch/x86/ia32/sys_ia32.c -index 016218c..9f55271 100644 ---- a/arch/x86/ia32/sys_ia32.c -+++ b/arch/x86/ia32/sys_ia32.c -@@ -155,6 +155,9 @@ struct mmap_arg_struct { - asmlinkage long sys32_mmap(struct mmap_arg_struct __user *arg) - { - struct mmap_arg_struct a; -+ struct file *file = NULL; -+ unsigned long retval; -+ struct mm_struct *mm ; - - if (copy_from_user(&a, arg, sizeof(a))) - return -EFAULT; -@@ -162,8 +165,22 @@ asmlinkage long sys32_mmap(struct mmap_arg_struct __user *arg) - if (a.offset & ~PAGE_MASK) - return -EINVAL; - -- return sys_mmap_pgoff(a.addr, a.len, a.prot, a.flags, a.fd, -+ if (!(a.flags & MAP_ANONYMOUS)) { -+ file = fget(a.fd); -+ if (!file) -+ return -EBADF; -+ } ++/* insert (and reinitialize) a node into the heap */ ++void bheap_insert(bheap_prio_t higher_prio, ++ struct bheap* heap, ++ struct bheap_node* node); + -+ mm = current->mm; -+ down_write(&mm->mmap_sem); -+ retval = do_mmap_pgoff(file, a.addr, a.len, a.prot, a.flags, - a.offset>>PAGE_SHIFT); -+ if (file) -+ fput(file); ++/* merge addition into target */ ++void bheap_union(bheap_prio_t higher_prio, ++ struct bheap* target, ++ struct bheap* addition); + -+ up_write(&mm->mmap_sem); ++struct bheap_node* bheap_peek(bheap_prio_t higher_prio, ++ struct bheap* heap); + -+ return retval; - } - - asmlinkage long sys32_mprotect(unsigned long start, size_t len, -@@ -522,6 +539,30 @@ asmlinkage long sys32_sendfile(int out_fd, int in_fd, - return ret; - } - -+asmlinkage long sys32_mmap2(unsigned long addr, unsigned long len, -+ unsigned long prot, unsigned long flags, -+ unsigned long fd, unsigned long pgoff) -+{ -+ struct mm_struct *mm = current->mm; -+ unsigned long error; -+ struct file *file = NULL; -+ -+ flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE); -+ if (!(flags & MAP_ANONYMOUS)) { -+ file = fget(fd); -+ if (!file) -+ return -EBADF; -+ } ++struct bheap_node* bheap_take(bheap_prio_t higher_prio, ++ struct bheap* heap); + -+ down_write(&mm->mmap_sem); -+ error = do_mmap_pgoff(file, addr, len, prot, flags, pgoff); -+ up_write(&mm->mmap_sem); ++void bheap_uncache_min(bheap_prio_t higher_prio, struct bheap* heap); ++int bheap_decrease(bheap_prio_t higher_prio, struct bheap_node* node); + -+ if (file) -+ fput(file); -+ return error; -+} ++void bheap_delete(bheap_prio_t higher_prio, ++ struct bheap* heap, ++ struct bheap_node* node); + - asmlinkage long sys32_olduname(struct oldold_utsname __user *name) - { - char *arch = "x86_64"; -diff --git a/arch/x86/include/asm/amd_iommu.h b/arch/x86/include/asm/amd_iommu.h -index 18aa3f8..4b18089 100644 ---- a/arch/x86/include/asm/amd_iommu.h -+++ b/arch/x86/include/asm/amd_iommu.h -@@ -32,7 +32,6 @@ extern void amd_iommu_flush_all_domains(void); - extern void amd_iommu_flush_all_devices(void); - extern void amd_iommu_shutdown(void); - extern void amd_iommu_apply_erratum_63(u16 devid); --extern void amd_iommu_init_api(void); - #else - static inline int amd_iommu_init(void) { return -ENODEV; } - static inline void amd_iommu_detect(void) { } -diff --git a/arch/x86/include/asm/cpu_debug.h b/arch/x86/include/asm/cpu_debug.h ++/* allocate from memcache */ ++struct bheap_node* bheap_node_alloc(int gfp_flags); ++void bheap_node_free(struct bheap_node* hn); ++ ++/* allocate a heap node for value and insert into the heap */ ++int bheap_add(bheap_prio_t higher_prio, struct bheap* heap, ++ void* value, int gfp_flags); ++ ++void* bheap_take_del(bheap_prio_t higher_prio, ++ struct bheap* heap); ++#endif +diff --git a/include/litmus/edf_common.h b/include/litmus/edf_common.h new file mode 100644 -index 0000000..d96c1ee +index 0000000..80d4321 --- /dev/null -+++ b/arch/x86/include/asm/cpu_debug.h -@@ -0,0 +1,127 @@ -+#ifndef _ASM_X86_CPU_DEBUG_H -+#define _ASM_X86_CPU_DEBUG_H -+ ++++ b/include/litmus/edf_common.h +@@ -0,0 +1,27 @@ +/* -+ * CPU x86 architecture debug ++ * EDF common data structures and utility functions shared by all EDF ++ * based scheduler plugins ++ */ ++ ++/* CLEANUP: Add comments and make it less messy. + * -+ * Copyright(C) 2009 Jaswinder Singh Rajput + */ + -+/* Register flags */ -+enum cpu_debug_bit { -+/* Model Specific Registers (MSRs) */ -+ CPU_MC_BIT, /* Machine Check */ -+ CPU_MONITOR_BIT, /* Monitor */ -+ CPU_TIME_BIT, /* Time */ -+ CPU_PMC_BIT, /* Performance Monitor */ -+ CPU_PLATFORM_BIT, /* Platform */ -+ CPU_APIC_BIT, /* APIC */ -+ CPU_POWERON_BIT, /* Power-on */ -+ CPU_CONTROL_BIT, /* Control */ -+ CPU_FEATURES_BIT, /* Features control */ -+ CPU_LBRANCH_BIT, /* Last Branch */ -+ CPU_BIOS_BIT, /* BIOS */ -+ CPU_FREQ_BIT, /* Frequency */ -+ CPU_MTTR_BIT, /* MTRR */ -+ CPU_PERF_BIT, /* Performance */ -+ CPU_CACHE_BIT, /* Cache */ -+ CPU_SYSENTER_BIT, /* Sysenter */ -+ CPU_THERM_BIT, /* Thermal */ -+ CPU_MISC_BIT, /* Miscellaneous */ -+ CPU_DEBUG_BIT, /* Debug */ -+ CPU_PAT_BIT, /* PAT */ -+ CPU_VMX_BIT, /* VMX */ -+ CPU_CALL_BIT, /* System Call */ -+ CPU_BASE_BIT, /* BASE Address */ -+ CPU_VER_BIT, /* Version ID */ -+ CPU_CONF_BIT, /* Configuration */ -+ CPU_SMM_BIT, /* System mgmt mode */ -+ CPU_SVM_BIT, /*Secure Virtual Machine*/ -+ CPU_OSVM_BIT, /* OS-Visible Workaround*/ -+/* Standard Registers */ -+ CPU_TSS_BIT, /* Task Stack Segment */ -+ CPU_CR_BIT, /* Control Registers */ -+ CPU_DT_BIT, /* Descriptor Table */ -+/* End of Registers flags */ -+ CPU_REG_ALL_BIT, /* Select all Registers */ -+}; ++#ifndef __UNC_EDF_COMMON_H__ ++#define __UNC_EDF_COMMON_H__ + -+#define CPU_REG_ALL (~0) /* Select all Registers */ -+ -+#define CPU_MC (1 << CPU_MC_BIT) -+#define CPU_MONITOR (1 << CPU_MONITOR_BIT) -+#define CPU_TIME (1 << CPU_TIME_BIT) -+#define CPU_PMC (1 << CPU_PMC_BIT) -+#define CPU_PLATFORM (1 << CPU_PLATFORM_BIT) -+#define CPU_APIC (1 << CPU_APIC_BIT) -+#define CPU_POWERON (1 << CPU_POWERON_BIT) -+#define CPU_CONTROL (1 << CPU_CONTROL_BIT) -+#define CPU_FEATURES (1 << CPU_FEATURES_BIT) -+#define CPU_LBRANCH (1 << CPU_LBRANCH_BIT) -+#define CPU_BIOS (1 << CPU_BIOS_BIT) -+#define CPU_FREQ (1 << CPU_FREQ_BIT) -+#define CPU_MTRR (1 << CPU_MTTR_BIT) -+#define CPU_PERF (1 << CPU_PERF_BIT) -+#define CPU_CACHE (1 << CPU_CACHE_BIT) -+#define CPU_SYSENTER (1 << CPU_SYSENTER_BIT) -+#define CPU_THERM (1 << CPU_THERM_BIT) -+#define CPU_MISC (1 << CPU_MISC_BIT) -+#define CPU_DEBUG (1 << CPU_DEBUG_BIT) -+#define CPU_PAT (1 << CPU_PAT_BIT) -+#define CPU_VMX (1 << CPU_VMX_BIT) -+#define CPU_CALL (1 << CPU_CALL_BIT) -+#define CPU_BASE (1 << CPU_BASE_BIT) -+#define CPU_VER (1 << CPU_VER_BIT) -+#define CPU_CONF (1 << CPU_CONF_BIT) -+#define CPU_SMM (1 << CPU_SMM_BIT) -+#define CPU_SVM (1 << CPU_SVM_BIT) -+#define CPU_OSVM (1 << CPU_OSVM_BIT) -+#define CPU_TSS (1 << CPU_TSS_BIT) -+#define CPU_CR (1 << CPU_CR_BIT) -+#define CPU_DT (1 << CPU_DT_BIT) -+ -+/* Register file flags */ -+enum cpu_file_bit { -+ CPU_INDEX_BIT, /* index */ -+ CPU_VALUE_BIT, /* value */ -+}; ++#include + -+#define CPU_FILE_VALUE (1 << CPU_VALUE_BIT) ++void edf_domain_init(rt_domain_t* rt, check_resched_needed_t resched, ++ release_jobs_t release); + -+#define MAX_CPU_FILES 512 ++int edf_higher_prio(struct task_struct* first, ++ struct task_struct* second); + -+struct cpu_private { -+ unsigned cpu; -+ unsigned type; -+ unsigned reg; -+ unsigned file; -+}; ++int edf_ready_order(struct bheap_node* a, struct bheap_node* b); + -+struct cpu_debug_base { -+ char *name; /* Register name */ -+ unsigned flag; /* Register flag */ -+ unsigned write; /* Register write flag */ -+}; ++int edf_preemption_needed(rt_domain_t* rt, struct task_struct *t); + -+/* -+ * Currently it looks similar to cpu_debug_base but once we add more files -+ * cpu_file_base will go in different direction ++int edf_set_hp_task(struct pi_semaphore *sem); ++int edf_set_hp_cpu_task(struct pi_semaphore *sem, int cpu); ++#endif +diff --git a/include/litmus/fdso.h b/include/litmus/fdso.h +new file mode 100644 +index 0000000..286e10f +--- /dev/null ++++ b/include/litmus/fdso.h +@@ -0,0 +1,69 @@ ++/* fdso.h - file descriptor attached shared objects ++ * ++ * (c) 2007 B. Brandenburg, LITMUS^RT project + */ -+struct cpu_file_base { -+ char *name; /* Register file name */ -+ unsigned flag; /* Register file flag */ -+ unsigned write; /* Register write flag */ -+}; + -+struct cpu_cpuX_base { -+ struct dentry *dentry; /* Register dentry */ -+ int init; /* Register index file */ -+}; ++#ifndef _LINUX_FDSO_H_ ++#define _LINUX_FDSO_H_ + -+struct cpu_debug_range { -+ unsigned min; /* Register range min */ -+ unsigned max; /* Register range max */ -+ unsigned flag; /* Supported flags */ -+}; ++#include ++#include + -+#endif /* _ASM_X86_CPU_DEBUG_H */ -diff --git a/arch/x86/include/asm/elf.h b/arch/x86/include/asm/elf.h -index 8ac9d9a..456a304 100644 ---- a/arch/x86/include/asm/elf.h -+++ b/arch/x86/include/asm/elf.h -@@ -197,8 +197,14 @@ do { \ - set_fs(USER_DS); \ - } while (0) - --void set_personality_ia32(void); --#define COMPAT_SET_PERSONALITY(ex) set_personality_ia32() -+#define COMPAT_SET_PERSONALITY(ex) \ -+do { \ -+ if (test_thread_flag(TIF_IA32)) \ -+ clear_thread_flag(TIF_ABI_PENDING); \ -+ else \ -+ set_thread_flag(TIF_ABI_PENDING); \ -+ current->personality |= force_personality32; \ -+} while (0) - - #define COMPAT_ELF_PLATFORM ("i686") - -diff --git a/arch/x86/include/asm/entry_arch.h b/arch/x86/include/asm/entry_arch.h -index f5693c8..19e22e3 100644 ---- a/arch/x86/include/asm/entry_arch.h -+++ b/arch/x86/include/asm/entry_arch.h -@@ -13,6 +13,7 @@ - BUILD_INTERRUPT(reschedule_interrupt,RESCHEDULE_VECTOR) - BUILD_INTERRUPT(call_function_interrupt,CALL_FUNCTION_VECTOR) - BUILD_INTERRUPT(call_function_single_interrupt,CALL_FUNCTION_SINGLE_VECTOR) -+BUILD_INTERRUPT(pull_timers_interrupt,PULL_TIMERS_VECTOR) - BUILD_INTERRUPT(irq_move_cleanup_interrupt,IRQ_MOVE_CLEANUP_VECTOR) - BUILD_INTERRUPT(reboot_interrupt,REBOOT_VECTOR) - -diff --git a/arch/x86/include/asm/feather_trace.h b/arch/x86/include/asm/feather_trace.h -new file mode 100644 -index 0000000..4fd3163 ---- /dev/null -+++ b/arch/x86/include/asm/feather_trace.h -@@ -0,0 +1,17 @@ -+#ifndef _ARCH_FEATHER_TRACE_H -+#define _ARCH_FEATHER_TRACE_H ++#include + -+#include ++#define MAX_OBJECT_DESCRIPTORS 32 + -+static inline unsigned long long ft_timestamp(void) -+{ -+ return __native_read_tsc(); -+} ++typedef enum { ++ MIN_OBJ_TYPE = 0, + -+#ifdef CONFIG_X86_32 -+#include "feather_trace_32.h" -+#else -+#include "feather_trace_64.h" -+#endif ++ FMLP_SEM = 0, ++ SRP_SEM = 1, + -+#endif -diff --git a/arch/x86/include/asm/feather_trace_32.h b/arch/x86/include/asm/feather_trace_32.h -new file mode 100644 -index 0000000..192cd09 ---- /dev/null -+++ b/arch/x86/include/asm/feather_trace_32.h -@@ -0,0 +1,80 @@ -+/* Do not directly include this file. Include feather_trace.h instead */ ++ MAX_OBJ_TYPE = 1 ++} obj_type_t; + -+#define feather_callback __attribute__((regparm(0))) ++struct inode_obj_id { ++ struct list_head list; ++ atomic_t count; ++ struct inode* inode; + -+/* -+ * make the compiler reload any register that is not saved in -+ * a cdecl function call -+ */ -+#define CLOBBER_LIST "memory", "cc", "eax", "ecx", "edx" ++ obj_type_t type; ++ void* obj; ++ unsigned int id; ++}; + -+#define ft_event(id, callback) \ -+ __asm__ __volatile__( \ -+ "1: jmp 2f \n\t" \ -+ " call " #callback " \n\t" \ -+ ".section __event_table, \"aw\" \n\t" \ -+ ".long " #id ", 0, 1b, 2f \n\t" \ -+ ".previous \n\t" \ -+ "2: \n\t" \ -+ : : : CLOBBER_LIST) + -+#define ft_event0(id, callback) \ -+ __asm__ __volatile__( \ -+ "1: jmp 2f \n\t" \ -+ " subl $4, %%esp \n\t" \ -+ " movl $" #id ", (%%esp) \n\t" \ -+ " call " #callback " \n\t" \ -+ " addl $4, %%esp \n\t" \ -+ ".section __event_table, \"aw\" \n\t" \ -+ ".long " #id ", 0, 1b, 2f \n\t" \ -+ ".previous \n\t" \ -+ "2: \n\t" \ -+ : : : CLOBBER_LIST) ++struct od_table_entry { ++ unsigned int used; + -+#define ft_event1(id, callback, param) \ -+ __asm__ __volatile__( \ -+ "1: jmp 2f \n\t" \ -+ " subl $8, %%esp \n\t" \ -+ " movl %0, 4(%%esp) \n\t" \ -+ " movl $" #id ", (%%esp) \n\t" \ -+ " call " #callback " \n\t" \ -+ " addl $8, %%esp \n\t" \ -+ ".section __event_table, \"aw\" \n\t" \ -+ ".long " #id ", 0, 1b, 2f \n\t" \ -+ ".previous \n\t" \ -+ "2: \n\t" \ -+ : : "r" (param) : CLOBBER_LIST) ++ struct inode_obj_id* obj; ++ void* extra; ++}; + -+#define ft_event2(id, callback, param, param2) \ -+ __asm__ __volatile__( \ -+ "1: jmp 2f \n\t" \ -+ " subl $12, %%esp \n\t" \ -+ " movl %1, 8(%%esp) \n\t" \ -+ " movl %0, 4(%%esp) \n\t" \ -+ " movl $" #id ", (%%esp) \n\t" \ -+ " call " #callback " \n\t" \ -+ " addl $12, %%esp \n\t" \ -+ ".section __event_table, \"aw\" \n\t" \ -+ ".long " #id ", 0, 1b, 2f \n\t" \ -+ ".previous \n\t" \ -+ "2: \n\t" \ -+ : : "r" (param), "r" (param2) : CLOBBER_LIST) ++struct fdso_ops { ++ void* (*create) (void); ++ void (*destroy)(void*); ++ int (*open) (struct od_table_entry*, void* __user); ++ int (*close) (struct od_table_entry*); ++}; + ++/* translate a userspace supplied od into the raw table entry ++ * returns NULL if od is invalid ++ */ ++struct od_table_entry* __od_lookup(int od); + -+#define ft_event3(id, callback, p, p2, p3) \ -+ __asm__ __volatile__( \ -+ "1: jmp 2f \n\t" \ -+ " subl $16, %%esp \n\t" \ -+ " movl %2, 12(%%esp) \n\t" \ -+ " movl %1, 8(%%esp) \n\t" \ -+ " movl %0, 4(%%esp) \n\t" \ -+ " movl $" #id ", (%%esp) \n\t" \ -+ " call " #callback " \n\t" \ -+ " addl $16, %%esp \n\t" \ -+ ".section __event_table, \"aw\" \n\t" \ -+ ".long " #id ", 0, 1b, 2f \n\t" \ -+ ".previous \n\t" \ -+ "2: \n\t" \ -+ : : "r" (p), "r" (p2), "r" (p3) : CLOBBER_LIST) ++/* translate a userspace supplied od into the associated object ++ * returns NULL if od is invalid ++ */ ++static inline void* od_lookup(int od, obj_type_t type) ++{ ++ struct od_table_entry* e = __od_lookup(od); ++ return e && e->obj->type == type ? e->obj->obj : NULL; ++} + -+#define __ARCH_HAS_FEATHER_TRACE -diff --git a/arch/x86/include/asm/feather_trace_64.h b/arch/x86/include/asm/feather_trace_64.h ++#define lookup_fmlp_sem(od)((struct pi_semaphore*) od_lookup(od, FMLP_SEM)) ++#define lookup_srp_sem(od) ((struct srp_semaphore*) od_lookup(od, SRP_SEM)) ++#define lookup_ics(od) ((struct ics*) od_lookup(od, ICS_ID)) ++ ++ ++#endif +diff --git a/include/litmus/feather_buffer.h b/include/litmus/feather_buffer.h new file mode 100644 -index 0000000..1cffa4e +index 0000000..6c18277 --- /dev/null -+++ b/arch/x86/include/asm/feather_trace_64.h -@@ -0,0 +1,69 @@ -+/* Do not directly include this file. Include feather_trace.h instead */ ++++ b/include/litmus/feather_buffer.h +@@ -0,0 +1,94 @@ ++#ifndef _FEATHER_BUFFER_H_ ++#define _FEATHER_BUFFER_H_ + -+/* regparm is the default on x86_64 */ -+#define feather_callback ++/* requires UINT_MAX and memcpy */ + -+# define _EVENT_TABLE(id,from,to) \ -+ ".section __event_table, \"aw\"\n\t" \ -+ ".balign 8\n\t" \ -+ ".quad " #id ", 0, " #from ", " #to " \n\t" \ -+ ".previous \n\t" ++#define SLOT_FREE 0 ++#define SLOT_BUSY 1 ++#define SLOT_READY 2 + -+/* -+ * x86_64 callee only owns rbp, rbx, r12 -> r15 -+ * the called can freely modify the others -+ */ -+#define CLOBBER_LIST "memory", "cc", "rdi", "rsi", "rdx", "rcx", \ -+ "r8", "r9", "r10", "r11", "rax" ++struct ft_buffer { ++ unsigned int slot_count; ++ unsigned int slot_size; + -+#define ft_event(id, callback) \ -+ __asm__ __volatile__( \ -+ "1: jmp 2f \n\t" \ -+ " call " #callback " \n\t" \ -+ _EVENT_TABLE(id,1b,2f) \ -+ "2: \n\t" \ -+ : : : CLOBBER_LIST) -+ -+#define ft_event0(id, callback) \ -+ __asm__ __volatile__( \ -+ "1: jmp 2f \n\t" \ -+ " movq $" #id ", %%rdi \n\t" \ -+ " call " #callback " \n\t" \ -+ _EVENT_TABLE(id,1b,2f) \ -+ "2: \n\t" \ -+ : : : CLOBBER_LIST) -+ -+#define ft_event1(id, callback, param) \ -+ __asm__ __volatile__( \ -+ "1: jmp 2f \n\t" \ -+ " movq %0, %%rsi \n\t" \ -+ " movq $" #id ", %%rdi \n\t" \ -+ " call " #callback " \n\t" \ -+ _EVENT_TABLE(id,1b,2f) \ -+ "2: \n\t" \ -+ : : "r" (param) : CLOBBER_LIST) -+ -+#define ft_event2(id, callback, param, param2) \ -+ __asm__ __volatile__( \ -+ "1: jmp 2f \n\t" \ -+ " movq %1, %%rdx \n\t" \ -+ " movq %0, %%rsi \n\t" \ -+ " movq $" #id ", %%rdi \n\t" \ -+ " call " #callback " \n\t" \ -+ _EVENT_TABLE(id,1b,2f) \ -+ "2: \n\t" \ -+ : : "r" (param), "r" (param2) : CLOBBER_LIST) -+ -+#define ft_event3(id, callback, p, p2, p3) \ -+ __asm__ __volatile__( \ -+ "1: jmp 2f \n\t" \ -+ " movq %2, %%rcx \n\t" \ -+ " movq %1, %%rdx \n\t" \ -+ " movq %0, %%rsi \n\t" \ -+ " movq $" #id ", %%rdi \n\t" \ -+ " call " #callback " \n\t" \ -+ _EVENT_TABLE(id,1b,2f) \ -+ "2: \n\t" \ -+ : : "r" (p), "r" (p2), "r" (p3) : CLOBBER_LIST) -+ -+#define __ARCH_HAS_FEATHER_TRACE -diff --git a/arch/x86/include/asm/hpet.h b/arch/x86/include/asm/hpet.h -index 3251e23..1c22cb0 100644 ---- a/arch/x86/include/asm/hpet.h -+++ b/arch/x86/include/asm/hpet.h -@@ -66,7 +66,6 @@ - extern unsigned long hpet_address; - extern unsigned long force_hpet_address; - extern int hpet_force_user; --extern u8 hpet_msi_disable; - extern int is_hpet_enabled(void); - extern int hpet_enable(void); - extern void hpet_disable(void); -diff --git a/arch/x86/include/asm/hw_irq.h b/arch/x86/include/asm/hw_irq.h -index ba180d9..68900e7 100644 ---- a/arch/x86/include/asm/hw_irq.h -+++ b/arch/x86/include/asm/hw_irq.h -@@ -53,6 +53,8 @@ extern void threshold_interrupt(void); - extern void call_function_interrupt(void); - extern void call_function_single_interrupt(void); - -+extern void pull_timers_interrupt(void); -+ - /* PIC specific functions */ - extern void disable_8259A_irq(unsigned int irq); - extern void enable_8259A_irq(unsigned int irq); -@@ -110,6 +112,7 @@ extern asmlinkage void smp_irq_move_cleanup_interrupt(void); - extern void smp_reschedule_interrupt(struct pt_regs *); - extern void smp_call_function_interrupt(struct pt_regs *); - extern void smp_call_function_single_interrupt(struct pt_regs *); -+extern void smp_pull_timers_interrupt(struct pt_regs *); - #ifdef CONFIG_X86_32 - extern void smp_invalidate_interrupt(struct pt_regs *); - #else -diff --git a/arch/x86/include/asm/irq_vectors.h b/arch/x86/include/asm/irq_vectors.h -index 6e90a04..28c3bf3 100644 ---- a/arch/x86/include/asm/irq_vectors.h -+++ b/arch/x86/include/asm/irq_vectors.h -@@ -104,6 +104,11 @@ - #define LOCAL_TIMER_VECTOR 0xef - - /* -+ * LITMUS^RT pull timers IRQ vector -+ */ -+#define PULL_TIMERS_VECTOR 0xee -+ -+/* - * Generic system vector for platform specific use - */ - #define GENERIC_INTERRUPT_VECTOR 0xed -@@ -113,7 +118,7 @@ - */ - #define LOCAL_PENDING_VECTOR 0xec - --#define UV_BAU_MESSAGE 0xea -+#define UV_BAU_MESSAGE 0xec - - /* - * Self IPI vector for machine checks -diff --git a/arch/x86/include/asm/kvm_emulate.h b/arch/x86/include/asm/kvm_emulate.h -index 7c18e12..b7ed2c4 100644 ---- a/arch/x86/include/asm/kvm_emulate.h -+++ b/arch/x86/include/asm/kvm_emulate.h -@@ -129,7 +129,7 @@ struct decode_cache { - u8 seg_override; - unsigned int d; - unsigned long regs[NR_VCPU_REGS]; -- unsigned long eip, eip_orig; -+ unsigned long eip; - /* modrm */ - u8 modrm; - u8 modrm_mod; -diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h -index d759a1f..d838922 100644 ---- a/arch/x86/include/asm/kvm_host.h -+++ b/arch/x86/include/asm/kvm_host.h -@@ -412,7 +412,6 @@ struct kvm_arch{ - unsigned long irq_sources_bitmap; - unsigned long irq_states[KVM_IOAPIC_NUM_PINS]; - u64 vm_init_tsc; -- s64 kvmclock_offset; - }; - - struct kvm_vm_stat { -diff --git a/arch/x86/include/asm/mce.h b/arch/x86/include/asm/mce.h -index a479023..f1363b7 100644 ---- a/arch/x86/include/asm/mce.h -+++ b/arch/x86/include/asm/mce.h -@@ -214,11 +214,5 @@ void intel_init_thermal(struct cpuinfo_x86 *c); - - void mce_log_therm_throt_event(__u64 status); - --#ifdef CONFIG_X86_THERMAL_VECTOR --extern void mcheck_intel_therm_init(void); --#else --static inline void mcheck_intel_therm_init(void) { } --#endif -- - #endif /* __KERNEL__ */ - #endif /* _ASM_X86_MCE_H */ -diff --git a/arch/x86/include/asm/msr.h b/arch/x86/include/asm/msr.h -index 0e3e728..7e2b6ba 100644 ---- a/arch/x86/include/asm/msr.h -+++ b/arch/x86/include/asm/msr.h -@@ -27,18 +27,6 @@ struct msr { - }; - }; - --struct msr_info { -- u32 msr_no; -- struct msr reg; -- struct msr *msrs; -- int err; --}; -- --struct msr_regs_info { -- u32 *regs; -- int err; --}; -- - static inline unsigned long long native_read_tscp(unsigned int *aux) - { - unsigned long low, high; -@@ -256,14 +244,11 @@ do { \ - - #define write_rdtscp_aux(val) wrmsr(0xc0000103, (val), 0) - --struct msr *msrs_alloc(void); --void msrs_free(struct msr *msrs); -- - #ifdef CONFIG_SMP - int rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h); - int wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h); --void rdmsr_on_cpus(const struct cpumask *mask, u32 msr_no, struct msr *msrs); --void wrmsr_on_cpus(const struct cpumask *mask, u32 msr_no, struct msr *msrs); -+void rdmsr_on_cpus(const cpumask_t *mask, u32 msr_no, struct msr *msrs); -+void wrmsr_on_cpus(const cpumask_t *mask, u32 msr_no, struct msr *msrs); - int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h); - int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h); - int rdmsr_safe_regs_on_cpu(unsigned int cpu, u32 regs[8]); -diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h -index 13b1885..c978648 100644 ---- a/arch/x86/include/asm/processor.h -+++ b/arch/x86/include/asm/processor.h -@@ -180,7 +180,7 @@ static inline void native_cpuid(unsigned int *eax, unsigned int *ebx, - unsigned int *ecx, unsigned int *edx) - { - /* ecx is often an input as well as an output. */ -- asm volatile("cpuid" -+ asm("cpuid" - : "=a" (*eax), - "=b" (*ebx), - "=c" (*ecx), -diff --git a/arch/x86/include/asm/sys_ia32.h b/arch/x86/include/asm/sys_ia32.h -index 77c1184..72a6dcd 100644 ---- a/arch/x86/include/asm/sys_ia32.h -+++ b/arch/x86/include/asm/sys_ia32.h -@@ -62,6 +62,9 @@ asmlinkage long sys32_pwrite(unsigned int, char __user *, u32, u32, u32); - asmlinkage long sys32_personality(unsigned long); - asmlinkage long sys32_sendfile(int, int, compat_off_t __user *, s32); - -+asmlinkage long sys32_mmap2(unsigned long, unsigned long, unsigned long, -+ unsigned long, unsigned long, unsigned long); -+ - struct oldold_utsname; - struct old_utsname; - asmlinkage long sys32_olduname(struct oldold_utsname __user *); -diff --git a/arch/x86/include/asm/syscalls.h b/arch/x86/include/asm/syscalls.h -index 1bb6e39..372b76e 100644 ---- a/arch/x86/include/asm/syscalls.h -+++ b/arch/x86/include/asm/syscalls.h -@@ -55,6 +55,8 @@ struct sel_arg_struct; - struct oldold_utsname; - struct old_utsname; - -+asmlinkage long sys_mmap2(unsigned long, unsigned long, unsigned long, -+ unsigned long, unsigned long, unsigned long); - asmlinkage int old_mmap(struct mmap_arg_struct __user *); - asmlinkage int old_select(struct sel_arg_struct __user *); - asmlinkage int sys_ipc(uint, int, int, int, void __user *, long); -diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h -index 19c3ce4..d27d0a2 100644 ---- a/arch/x86/include/asm/thread_info.h -+++ b/arch/x86/include/asm/thread_info.h -@@ -86,6 +86,7 @@ struct thread_info { - #define TIF_NOTSC 16 /* TSC is not accessible in userland */ - #define TIF_IA32 17 /* 32bit process */ - #define TIF_FORK 18 /* ret_from_fork */ -+#define TIF_ABI_PENDING 19 - #define TIF_MEMDIE 20 - #define TIF_DEBUG 21 /* uses debug registers */ - #define TIF_IO_BITMAP 22 /* uses I/O bitmap */ -@@ -109,6 +110,7 @@ struct thread_info { - #define _TIF_NOTSC (1 << TIF_NOTSC) - #define _TIF_IA32 (1 << TIF_IA32) - #define _TIF_FORK (1 << TIF_FORK) -+#define _TIF_ABI_PENDING (1 << TIF_ABI_PENDING) - #define _TIF_DEBUG (1 << TIF_DEBUG) - #define _TIF_IO_BITMAP (1 << TIF_IO_BITMAP) - #define _TIF_FREEZE (1 << TIF_FREEZE) -diff --git a/arch/x86/include/asm/unistd_32.h b/arch/x86/include/asm/unistd_32.h -index 6fb3c20..f9b507f 100644 ---- a/arch/x86/include/asm/unistd_32.h -+++ b/arch/x86/include/asm/unistd_32.h -@@ -343,9 +343,13 @@ - #define __NR_rt_tgsigqueueinfo 335 - #define __NR_perf_event_open 336 - -+#define __NR_LITMUS 337 -+ -+#include "litmus/unistd_32.h" -+ - #ifdef __KERNEL__ - --#define NR_syscalls 337 -+#define NR_syscalls 336 + NR_litmus_syscalls - - #define __ARCH_WANT_IPC_PARSE_VERSION - #define __ARCH_WANT_OLD_READDIR -diff --git a/arch/x86/include/asm/unistd_64.h b/arch/x86/include/asm/unistd_64.h -index 8d3ad0a..33b2003 100644 ---- a/arch/x86/include/asm/unistd_64.h -+++ b/arch/x86/include/asm/unistd_64.h -@@ -662,6 +662,10 @@ __SYSCALL(__NR_rt_tgsigqueueinfo, sys_rt_tgsigqueueinfo) - #define __NR_perf_event_open 298 - __SYSCALL(__NR_perf_event_open, sys_perf_event_open) - -+#define __NR_LITMUS 299 -+ -+#include "litmus/unistd_64.h" -+ - #ifndef __NO_STUBS - #define __ARCH_WANT_OLD_READDIR - #define __ARCH_WANT_OLD_STAT -diff --git a/arch/x86/include/asm/uv/uv_hub.h b/arch/x86/include/asm/uv/uv_hub.h -index e90a8a9..d1414af 100644 ---- a/arch/x86/include/asm/uv/uv_hub.h -+++ b/arch/x86/include/asm/uv/uv_hub.h -@@ -31,20 +31,20 @@ - * contiguous (although various IO spaces may punch holes in - * it).. - * -- * N - Number of bits in the node portion of a socket physical -- * address. -+ * N - Number of bits in the node portion of a socket physical -+ * address. - * -- * NASID - network ID of a router, Mbrick or Cbrick. Nasid values of -- * routers always have low bit of 1, C/MBricks have low bit -- * equal to 0. Most addressing macros that target UV hub chips -- * right shift the NASID by 1 to exclude the always-zero bit. -- * NASIDs contain up to 15 bits. -+ * NASID - network ID of a router, Mbrick or Cbrick. Nasid values of -+ * routers always have low bit of 1, C/MBricks have low bit -+ * equal to 0. Most addressing macros that target UV hub chips -+ * right shift the NASID by 1 to exclude the always-zero bit. -+ * NASIDs contain up to 15 bits. - * - * GNODE - NASID right shifted by 1 bit. Most mmrs contain gnodes instead - * of nasids. - * -- * PNODE - the low N bits of the GNODE. The PNODE is the most useful variant -- * of the nasid for socket usage. -+ * PNODE - the low N bits of the GNODE. The PNODE is the most useful variant -+ * of the nasid for socket usage. - * - * - * NumaLink Global Physical Address Format: -@@ -71,12 +71,12 @@ - * - * - * APICID format -- * NOTE!!!!!! This is the current format of the APICID. However, code -- * should assume that this will change in the future. Use functions -- * in this file for all APICID bit manipulations and conversion. -+ * NOTE!!!!!! This is the current format of the APICID. However, code -+ * should assume that this will change in the future. Use functions -+ * in this file for all APICID bit manipulations and conversion. - * -- * 1111110000000000 -- * 5432109876543210 -+ * 1111110000000000 -+ * 5432109876543210 - * pppppppppplc0cch - * sssssssssss - * -@@ -89,9 +89,9 @@ - * Note: Processor only supports 12 bits in the APICID register. The ACPI - * tables hold all 16 bits. Software needs to be aware of this. - * -- * Unless otherwise specified, all references to APICID refer to -- * the FULL value contained in ACPI tables, not the subset in the -- * processor APICID register. -+ * Unless otherwise specified, all references to APICID refer to -+ * the FULL value contained in ACPI tables, not the subset in the -+ * processor APICID register. - */ - - -@@ -151,16 +151,16 @@ struct uv_hub_info_s { - }; - - DECLARE_PER_CPU(struct uv_hub_info_s, __uv_hub_info); --#define uv_hub_info (&__get_cpu_var(__uv_hub_info)) -+#define uv_hub_info (&__get_cpu_var(__uv_hub_info)) - #define uv_cpu_hub_info(cpu) (&per_cpu(__uv_hub_info, cpu)) - - /* - * Local & Global MMR space macros. -- * Note: macros are intended to be used ONLY by inline functions -- * in this file - not by other kernel code. -- * n - NASID (full 15-bit global nasid) -- * g - GNODE (full 15-bit global nasid, right shifted 1) -- * p - PNODE (local part of nsids, right shifted 1) -+ * Note: macros are intended to be used ONLY by inline functions -+ * in this file - not by other kernel code. -+ * n - NASID (full 15-bit global nasid) -+ * g - GNODE (full 15-bit global nasid, right shifted 1) -+ * p - PNODE (local part of nsids, right shifted 1) - */ - #define UV_NASID_TO_PNODE(n) (((n) >> 1) & uv_hub_info->pnode_mask) - #define UV_PNODE_TO_GNODE(p) ((p) |uv_hub_info->gnode_extra) -@@ -213,8 +213,8 @@ DECLARE_PER_CPU(struct uv_hub_info_s, __uv_hub_info); - /* - * Macros for converting between kernel virtual addresses, socket local physical - * addresses, and UV global physical addresses. -- * Note: use the standard __pa() & __va() macros for converting -- * between socket virtual and socket physical addresses. -+ * Note: use the standard __pa() & __va() macros for converting -+ * between socket virtual and socket physical addresses. - */ - - /* socket phys RAM --> UV global physical address */ -@@ -265,18 +265,21 @@ static inline int uv_apicid_to_pnode(int apicid) - * Access global MMRs using the low memory MMR32 space. This region supports - * faster MMR access but not all MMRs are accessible in this space. - */ --static inline unsigned long *uv_global_mmr32_address(int pnode, unsigned long offset) -+static inline unsigned long *uv_global_mmr32_address(int pnode, -+ unsigned long offset) - { - return __va(UV_GLOBAL_MMR32_BASE | - UV_GLOBAL_MMR32_PNODE_BITS(pnode) | offset); - } - --static inline void uv_write_global_mmr32(int pnode, unsigned long offset, unsigned long val) -+static inline void uv_write_global_mmr32(int pnode, unsigned long offset, -+ unsigned long val) - { - writeq(val, uv_global_mmr32_address(pnode, offset)); - } - --static inline unsigned long uv_read_global_mmr32(int pnode, unsigned long offset) -+static inline unsigned long uv_read_global_mmr32(int pnode, -+ unsigned long offset) - { - return readq(uv_global_mmr32_address(pnode, offset)); - } -@@ -285,32 +288,25 @@ static inline unsigned long uv_read_global_mmr32(int pnode, unsigned long offset - * Access Global MMR space using the MMR space located at the top of physical - * memory. - */ --static inline unsigned long *uv_global_mmr64_address(int pnode, unsigned long offset) -+static inline unsigned long *uv_global_mmr64_address(int pnode, -+ unsigned long offset) - { - return __va(UV_GLOBAL_MMR64_BASE | - UV_GLOBAL_MMR64_PNODE_BITS(pnode) | offset); - } - --static inline void uv_write_global_mmr64(int pnode, unsigned long offset, unsigned long val) -+static inline void uv_write_global_mmr64(int pnode, unsigned long offset, -+ unsigned long val) - { - writeq(val, uv_global_mmr64_address(pnode, offset)); - } - --static inline unsigned long uv_read_global_mmr64(int pnode, unsigned long offset) -+static inline unsigned long uv_read_global_mmr64(int pnode, -+ unsigned long offset) - { - return readq(uv_global_mmr64_address(pnode, offset)); - } - --static inline void uv_write_global_mmr8(int pnode, unsigned long offset, unsigned char val) --{ -- writeb(val, uv_global_mmr64_address(pnode, offset)); --} -- --static inline unsigned char uv_read_global_mmr8(int pnode, unsigned long offset) --{ -- return readb(uv_global_mmr64_address(pnode, offset)); --} -- - /* - * Access hub local MMRs. Faster than using global space but only local MMRs - * are accessible. -@@ -430,17 +426,11 @@ static inline void uv_set_scir_bits(unsigned char value) - } - } - --static inline unsigned long uv_scir_offset(int apicid) --{ -- return SCIR_LOCAL_MMR_BASE | (apicid & 0x3f); --} -- - static inline void uv_set_cpu_scir_bits(int cpu, unsigned char value) - { - if (uv_cpu_hub_info(cpu)->scir.state != value) { -- uv_write_global_mmr8(uv_cpu_to_pnode(cpu), -- uv_cpu_hub_info(cpu)->scir.offset, value); - uv_cpu_hub_info(cpu)->scir.state = value; -+ uv_write_local_mmr8(uv_cpu_hub_info(cpu)->scir.offset, value); - } - } - -diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile -index d8e5d0c..a99b34d 100644 ---- a/arch/x86/kernel/Makefile -+++ b/arch/x86/kernel/Makefile -@@ -117,6 +117,8 @@ obj-$(CONFIG_X86_CHECK_BIOS_CORRUPTION) += check.o - - obj-$(CONFIG_SWIOTLB) += pci-swiotlb.o - -+obj-$(CONFIG_FEATHER_TRACE) += ft_event.o -+ - ### - # 64 bit specific files - ifeq ($(CONFIG_X86_64),y) -diff --git a/arch/x86/kernel/acpi/cstate.c b/arch/x86/kernel/acpi/cstate.c -index 2e837f5..59cdfa4 100644 ---- a/arch/x86/kernel/acpi/cstate.c -+++ b/arch/x86/kernel/acpi/cstate.c -@@ -48,7 +48,7 @@ void acpi_processor_power_init_bm_check(struct acpi_processor_flags *flags, - * P4, Core and beyond CPUs - */ - if (c->x86_vendor == X86_VENDOR_INTEL && -- (c->x86 > 0xf || (c->x86 == 6 && c->x86_model >= 0x0f))) -+ (c->x86 > 0xf || (c->x86 == 6 && c->x86_model >= 14))) - flags->bm_control = 0; - } - EXPORT_SYMBOL(acpi_processor_power_init_bm_check); -diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c -index 23fc9fe..0285521 100644 ---- a/arch/x86/kernel/amd_iommu.c -+++ b/arch/x86/kernel/amd_iommu.c -@@ -540,7 +540,7 @@ static void flush_all_devices_for_iommu(struct amd_iommu *iommu) - static void flush_devices_by_domain(struct protection_domain *domain) - { - struct amd_iommu *iommu; -- unsigned long i; -+ int i; - - for (i = 0; i <= amd_iommu_last_bdf; ++i) { - if ((domain == NULL && amd_iommu_pd_table[i] == NULL) || -@@ -1230,10 +1230,9 @@ static void __detach_device(struct protection_domain *domain, u16 devid) - - /* - * If we run in passthrough mode the device must be assigned to the -- * passthrough domain if it is detached from any other domain. -- * Make sure we can deassign from the pt_domain itself. -+ * passthrough domain if it is detached from any other domain - */ -- if (iommu_pass_through && domain != pt_domain) { -+ if (iommu_pass_through) { - struct amd_iommu *iommu = amd_iommu_rlookup_table[devid]; - __attach_device(iommu, pt_domain, devid); - } -@@ -2048,10 +2047,10 @@ static void prealloc_protection_domains(void) - struct pci_dev *dev = NULL; - struct dma_ops_domain *dma_dom; - struct amd_iommu *iommu; -- u16 devid, __devid; -+ u16 devid; - - while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) { -- __devid = devid = calc_devid(dev->bus->number, dev->devfn); -+ devid = calc_devid(dev->bus->number, dev->devfn); - if (devid > amd_iommu_last_bdf) - continue; - devid = amd_iommu_alias_table[devid]; -@@ -2066,10 +2065,6 @@ static void prealloc_protection_domains(void) - init_unity_mappings_for_device(dma_dom, devid); - dma_dom->target_dev = devid; - -- attach_device(iommu, &dma_dom->domain, devid); -- if (__devid != devid) -- attach_device(iommu, &dma_dom->domain, __devid); -- - list_add_tail(&dma_dom->list, &iommu_pd_list); - } - } -@@ -2084,11 +2079,6 @@ static struct dma_map_ops amd_iommu_dma_ops = { - .dma_supported = amd_iommu_dma_supported, - }; - --void __init amd_iommu_init_api(void) --{ -- register_iommu(&amd_iommu_ops); --} -- - /* - * The function which clues the AMD IOMMU driver into dma_ops. - */ -@@ -2130,6 +2120,8 @@ int __init amd_iommu_init_dma_ops(void) - /* Make the driver finally visible to the drivers */ - dma_ops = &amd_iommu_dma_ops; - -+ register_iommu(&amd_iommu_ops); -+ - bus_register_notifier(&pci_bus_type, &device_nb); - - amd_iommu_stats_init(); -diff --git a/arch/x86/kernel/amd_iommu_init.c b/arch/x86/kernel/amd_iommu_init.c -index 362ab88..c20001e 100644 ---- a/arch/x86/kernel/amd_iommu_init.c -+++ b/arch/x86/kernel/amd_iommu_init.c -@@ -136,11 +136,6 @@ LIST_HEAD(amd_iommu_list); /* list of all AMD IOMMUs in the - system */ - - /* -- * Set to true if ACPI table parsing and hardware intialization went properly -- */ --static bool amd_iommu_initialized; -- --/* - * Pointer to the device table which is shared by all AMD IOMMUs - * it is indexed by the PCI device id or the HT unit id and contains - * information about the domain the device belongs to as well as the -@@ -918,8 +913,6 @@ static int __init init_iommu_all(struct acpi_table_header *table) - } - WARN_ON(p != end); - -- amd_iommu_initialized = true; -- - return 0; - } - -@@ -932,7 +925,7 @@ static int __init init_iommu_all(struct acpi_table_header *table) - * - ****************************************************************************/ - --static int iommu_setup_msi(struct amd_iommu *iommu) -+static int __init iommu_setup_msi(struct amd_iommu *iommu) - { - int r; - -@@ -1270,9 +1263,6 @@ int __init amd_iommu_init(void) - if (acpi_table_parse("IVRS", init_iommu_all) != 0) - goto free; - -- if (!amd_iommu_initialized) -- goto free; -- - if (acpi_table_parse("IVRS", init_memory_definitions) != 0) - goto free; - -@@ -1288,12 +1278,9 @@ int __init amd_iommu_init(void) - ret = amd_iommu_init_passthrough(); - else - ret = amd_iommu_init_dma_ops(); -- - if (ret) - goto free; - -- amd_iommu_init_api(); -- - enable_iommus(); - - if (iommu_pass_through) -diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c -index c86dbcf..894aa97 100644 ---- a/arch/x86/kernel/apic/apic.c -+++ b/arch/x86/kernel/apic/apic.c -@@ -246,7 +246,7 @@ static int modern_apic(void) - */ - static void native_apic_write_dummy(u32 reg, u32 v) - { -- WARN_ON_ONCE(cpu_has_apic && !disable_apic); -+ WARN_ON_ONCE((cpu_has_apic || !disable_apic)); - } - - static u32 native_apic_read_dummy(u32 reg) -diff --git a/arch/x86/kernel/apic/apic_flat_64.c b/arch/x86/kernel/apic/apic_flat_64.c -index 873f81f..d0c99ab 100644 ---- a/arch/x86/kernel/apic/apic_flat_64.c -+++ b/arch/x86/kernel/apic/apic_flat_64.c -@@ -240,11 +240,6 @@ static int physflat_acpi_madt_oem_check(char *oem_id, char *oem_table_id) - printk(KERN_DEBUG "system APIC only can use physical flat"); - return 1; - } -- -- if (!strncmp(oem_id, "IBM", 3) && !strncmp(oem_table_id, "EXA", 3)) { -- printk(KERN_DEBUG "IBM Summit detected, will use apic physical"); -- return 1; -- } - #endif - - return 0; -diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c -index c107e83..dc69f28 100644 ---- a/arch/x86/kernel/apic/io_apic.c -+++ b/arch/x86/kernel/apic/io_apic.c -@@ -3157,7 +3157,6 @@ unsigned int create_irq_nr(unsigned int irq_want, int node) - continue; - - desc_new = move_irq_desc(desc_new, node); -- cfg_new = desc_new->chip_data; - - if (__assign_irq_vector(new, cfg_new, apic->target_cpus()) == 0) - irq = new; -diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c -index 9ee87cf..326c254 100644 ---- a/arch/x86/kernel/apic/x2apic_uv_x.c -+++ b/arch/x86/kernel/apic/x2apic_uv_x.c -@@ -364,13 +364,13 @@ static __init void get_lowmem_redirect(unsigned long *base, unsigned long *size) - - enum map_type {map_wb, map_uc}; - --static __init void map_high(char *id, unsigned long base, int pshift, -- int bshift, int max_pnode, enum map_type map_type) -+static __init void map_high(char *id, unsigned long base, int shift, -+ int max_pnode, enum map_type map_type) - { - unsigned long bytes, paddr; - -- paddr = base << pshift; -- bytes = (1UL << bshift) * (max_pnode + 1); -+ paddr = base << shift; -+ bytes = (1UL << shift) * (max_pnode + 1); - printk(KERN_INFO "UV: Map %s_HI 0x%lx - 0x%lx\n", id, paddr, - paddr + bytes); - if (map_type == map_uc) -@@ -386,7 +386,7 @@ static __init void map_gru_high(int max_pnode) - - gru.v = uv_read_local_mmr(UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR); - if (gru.s.enable) -- map_high("GRU", gru.s.base, shift, shift, max_pnode, map_wb); -+ map_high("GRU", gru.s.base, shift, max_pnode, map_wb); - } - - static __init void map_mmr_high(int max_pnode) -@@ -396,7 +396,7 @@ static __init void map_mmr_high(int max_pnode) - - mmr.v = uv_read_local_mmr(UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR); - if (mmr.s.enable) -- map_high("MMR", mmr.s.base, shift, shift, max_pnode, map_uc); -+ map_high("MMR", mmr.s.base, shift, max_pnode, map_uc); - } - - static __init void map_mmioh_high(int max_pnode) -@@ -406,8 +406,7 @@ static __init void map_mmioh_high(int max_pnode) - - mmioh.v = uv_read_local_mmr(UVH_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR); - if (mmioh.s.enable) -- map_high("MMIOH", mmioh.s.base, shift, mmioh.s.m_io, -- max_pnode, map_uc); -+ map_high("MMIOH", mmioh.s.base, shift, max_pnode, map_uc); - } - - static __init void uv_rtc_init(void) -@@ -608,10 +607,8 @@ void __init uv_system_init(void) - uv_rtc_init(); - - for_each_present_cpu(cpu) { -- int apicid = per_cpu(x86_cpu_to_apicid, cpu); -- - nid = cpu_to_node(cpu); -- pnode = uv_apicid_to_pnode(apicid); -+ pnode = uv_apicid_to_pnode(per_cpu(x86_cpu_to_apicid, cpu)); - blade = boot_pnode_to_blade(pnode); - lcpu = uv_blade_info[blade].nr_possible_cpus; - uv_blade_info[blade].nr_possible_cpus++; -@@ -632,13 +629,15 @@ void __init uv_system_init(void) - uv_cpu_hub_info(cpu)->gnode_extra = gnode_extra; - uv_cpu_hub_info(cpu)->global_mmr_base = mmr_base; - uv_cpu_hub_info(cpu)->coherency_domain_number = sn_coherency_id; -- uv_cpu_hub_info(cpu)->scir.offset = uv_scir_offset(apicid); -+ uv_cpu_hub_info(cpu)->scir.offset = SCIR_LOCAL_MMR_BASE + lcpu; - uv_node_to_blade[nid] = blade; - uv_cpu_to_blade[cpu] = blade; - max_pnode = max(pnode, max_pnode); - -- printk(KERN_DEBUG "UV: cpu %d, apicid 0x%x, pnode %d, nid %d, lcpu %d, blade %d\n", -- cpu, apicid, pnode, nid, lcpu, blade); -+ printk(KERN_DEBUG "UV: cpu %d, apicid 0x%x, pnode %d, nid %d, " -+ "lcpu %d, blade %d\n", -+ cpu, per_cpu(x86_cpu_to_apicid, cpu), pnode, nid, -+ lcpu, blade); - } - - /* Add blade/pnode info for nodes without cpus */ -diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile -index ff502cc..68537e9 100644 ---- a/arch/x86/kernel/cpu/Makefile -+++ b/arch/x86/kernel/cpu/Makefile -@@ -18,6 +18,8 @@ obj-y += vmware.o hypervisor.o sched.o - obj-$(CONFIG_X86_32) += bugs.o cmpxchg.o - obj-$(CONFIG_X86_64) += bugs_64.o - -+obj-$(CONFIG_X86_CPU_DEBUG) += cpu_debug.o -+ - obj-$(CONFIG_CPU_SUP_INTEL) += intel.o - obj-$(CONFIG_CPU_SUP_AMD) += amd.o - obj-$(CONFIG_CPU_SUP_CYRIX_32) += cyrix.o -diff --git a/arch/x86/kernel/cpu/cpu_debug.c b/arch/x86/kernel/cpu/cpu_debug.c -new file mode 100644 -index 0000000..dca325c ---- /dev/null -+++ b/arch/x86/kernel/cpu/cpu_debug.c -@@ -0,0 +1,688 @@ -+/* -+ * CPU x86 architecture debug code -+ * -+ * Copyright(C) 2009 Jaswinder Singh Rajput -+ * -+ * For licencing details see kernel-base/COPYING -+ */ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+#include -+#include -+#include -+#include -+#include -+#include -+ -+static DEFINE_PER_CPU(struct cpu_cpuX_base [CPU_REG_ALL_BIT], cpu_arr); -+static DEFINE_PER_CPU(struct cpu_private * [MAX_CPU_FILES], priv_arr); -+static DEFINE_PER_CPU(int, cpu_priv_count); -+ -+static DEFINE_MUTEX(cpu_debug_lock); -+ -+static struct dentry *cpu_debugfs_dir; -+ -+static struct cpu_debug_base cpu_base[] = { -+ { "mc", CPU_MC, 0 }, -+ { "monitor", CPU_MONITOR, 0 }, -+ { "time", CPU_TIME, 0 }, -+ { "pmc", CPU_PMC, 1 }, -+ { "platform", CPU_PLATFORM, 0 }, -+ { "apic", CPU_APIC, 0 }, -+ { "poweron", CPU_POWERON, 0 }, -+ { "control", CPU_CONTROL, 0 }, -+ { "features", CPU_FEATURES, 0 }, -+ { "lastbranch", CPU_LBRANCH, 0 }, -+ { "bios", CPU_BIOS, 0 }, -+ { "freq", CPU_FREQ, 0 }, -+ { "mtrr", CPU_MTRR, 0 }, -+ { "perf", CPU_PERF, 0 }, -+ { "cache", CPU_CACHE, 0 }, -+ { "sysenter", CPU_SYSENTER, 0 }, -+ { "therm", CPU_THERM, 0 }, -+ { "misc", CPU_MISC, 0 }, -+ { "debug", CPU_DEBUG, 0 }, -+ { "pat", CPU_PAT, 0 }, -+ { "vmx", CPU_VMX, 0 }, -+ { "call", CPU_CALL, 0 }, -+ { "base", CPU_BASE, 0 }, -+ { "ver", CPU_VER, 0 }, -+ { "conf", CPU_CONF, 0 }, -+ { "smm", CPU_SMM, 0 }, -+ { "svm", CPU_SVM, 0 }, -+ { "osvm", CPU_OSVM, 0 }, -+ { "tss", CPU_TSS, 0 }, -+ { "cr", CPU_CR, 0 }, -+ { "dt", CPU_DT, 0 }, -+ { "registers", CPU_REG_ALL, 0 }, -+}; -+ -+static struct cpu_file_base cpu_file[] = { -+ { "index", CPU_REG_ALL, 0 }, -+ { "value", CPU_REG_ALL, 1 }, -+}; -+ -+/* CPU Registers Range */ -+static struct cpu_debug_range cpu_reg_range[] = { -+ { 0x00000000, 0x00000001, CPU_MC, }, -+ { 0x00000006, 0x00000007, CPU_MONITOR, }, -+ { 0x00000010, 0x00000010, CPU_TIME, }, -+ { 0x00000011, 0x00000013, CPU_PMC, }, -+ { 0x00000017, 0x00000017, CPU_PLATFORM, }, -+ { 0x0000001B, 0x0000001B, CPU_APIC, }, -+ { 0x0000002A, 0x0000002B, CPU_POWERON, }, -+ { 0x0000002C, 0x0000002C, CPU_FREQ, }, -+ { 0x0000003A, 0x0000003A, CPU_CONTROL, }, -+ { 0x00000040, 0x00000047, CPU_LBRANCH, }, -+ { 0x00000060, 0x00000067, CPU_LBRANCH, }, -+ { 0x00000079, 0x00000079, CPU_BIOS, }, -+ { 0x00000088, 0x0000008A, CPU_CACHE, }, -+ { 0x0000008B, 0x0000008B, CPU_BIOS, }, -+ { 0x0000009B, 0x0000009B, CPU_MONITOR, }, -+ { 0x000000C1, 0x000000C4, CPU_PMC, }, -+ { 0x000000CD, 0x000000CD, CPU_FREQ, }, -+ { 0x000000E7, 0x000000E8, CPU_PERF, }, -+ { 0x000000FE, 0x000000FE, CPU_MTRR, }, -+ -+ { 0x00000116, 0x0000011E, CPU_CACHE, }, -+ { 0x00000174, 0x00000176, CPU_SYSENTER, }, -+ { 0x00000179, 0x0000017B, CPU_MC, }, -+ { 0x00000186, 0x00000189, CPU_PMC, }, -+ { 0x00000198, 0x00000199, CPU_PERF, }, -+ { 0x0000019A, 0x0000019A, CPU_TIME, }, -+ { 0x0000019B, 0x0000019D, CPU_THERM, }, -+ { 0x000001A0, 0x000001A0, CPU_MISC, }, -+ { 0x000001C9, 0x000001C9, CPU_LBRANCH, }, -+ { 0x000001D7, 0x000001D8, CPU_LBRANCH, }, -+ { 0x000001D9, 0x000001D9, CPU_DEBUG, }, -+ { 0x000001DA, 0x000001E0, CPU_LBRANCH, }, -+ -+ { 0x00000200, 0x0000020F, CPU_MTRR, }, -+ { 0x00000250, 0x00000250, CPU_MTRR, }, -+ { 0x00000258, 0x00000259, CPU_MTRR, }, -+ { 0x00000268, 0x0000026F, CPU_MTRR, }, -+ { 0x00000277, 0x00000277, CPU_PAT, }, -+ { 0x000002FF, 0x000002FF, CPU_MTRR, }, -+ -+ { 0x00000300, 0x00000311, CPU_PMC, }, -+ { 0x00000345, 0x00000345, CPU_PMC, }, -+ { 0x00000360, 0x00000371, CPU_PMC, }, -+ { 0x0000038D, 0x00000390, CPU_PMC, }, -+ { 0x000003A0, 0x000003BE, CPU_PMC, }, -+ { 0x000003C0, 0x000003CD, CPU_PMC, }, -+ { 0x000003E0, 0x000003E1, CPU_PMC, }, -+ { 0x000003F0, 0x000003F2, CPU_PMC, }, -+ -+ { 0x00000400, 0x00000417, CPU_MC, }, -+ { 0x00000480, 0x0000048B, CPU_VMX, }, -+ -+ { 0x00000600, 0x00000600, CPU_DEBUG, }, -+ { 0x00000680, 0x0000068F, CPU_LBRANCH, }, -+ { 0x000006C0, 0x000006CF, CPU_LBRANCH, }, -+ -+ { 0x000107CC, 0x000107D3, CPU_PMC, }, -+ -+ { 0xC0000080, 0xC0000080, CPU_FEATURES, }, -+ { 0xC0000081, 0xC0000084, CPU_CALL, }, -+ { 0xC0000100, 0xC0000102, CPU_BASE, }, -+ { 0xC0000103, 0xC0000103, CPU_TIME, }, -+ -+ { 0xC0010000, 0xC0010007, CPU_PMC, }, -+ { 0xC0010010, 0xC0010010, CPU_CONF, }, -+ { 0xC0010015, 0xC0010015, CPU_CONF, }, -+ { 0xC0010016, 0xC001001A, CPU_MTRR, }, -+ { 0xC001001D, 0xC001001D, CPU_MTRR, }, -+ { 0xC001001F, 0xC001001F, CPU_CONF, }, -+ { 0xC0010030, 0xC0010035, CPU_BIOS, }, -+ { 0xC0010044, 0xC0010048, CPU_MC, }, -+ { 0xC0010050, 0xC0010056, CPU_SMM, }, -+ { 0xC0010058, 0xC0010058, CPU_CONF, }, -+ { 0xC0010060, 0xC0010060, CPU_CACHE, }, -+ { 0xC0010061, 0xC0010068, CPU_SMM, }, -+ { 0xC0010069, 0xC001006B, CPU_SMM, }, -+ { 0xC0010070, 0xC0010071, CPU_SMM, }, -+ { 0xC0010111, 0xC0010113, CPU_SMM, }, -+ { 0xC0010114, 0xC0010118, CPU_SVM, }, -+ { 0xC0010140, 0xC0010141, CPU_OSVM, }, -+ { 0xC0011022, 0xC0011023, CPU_CONF, }, -+}; -+ -+static int is_typeflag_valid(unsigned cpu, unsigned flag) -+{ -+ int i; -+ -+ /* Standard Registers should be always valid */ -+ if (flag >= CPU_TSS) -+ return 1; -+ -+ for (i = 0; i < ARRAY_SIZE(cpu_reg_range); i++) { -+ if (cpu_reg_range[i].flag == flag) -+ return 1; -+ } -+ -+ /* Invalid */ -+ return 0; -+} -+ -+static unsigned get_cpu_range(unsigned cpu, unsigned *min, unsigned *max, -+ int index, unsigned flag) -+{ -+ if (cpu_reg_range[index].flag == flag) { -+ *min = cpu_reg_range[index].min; -+ *max = cpu_reg_range[index].max; -+ } else -+ *max = 0; -+ -+ return *max; -+} -+ -+/* This function can also be called with seq = NULL for printk */ -+static void print_cpu_data(struct seq_file *seq, unsigned type, -+ u32 low, u32 high) -+{ -+ struct cpu_private *priv; -+ u64 val = high; -+ -+ if (seq) { -+ priv = seq->private; -+ if (priv->file) { -+ val = (val << 32) | low; -+ seq_printf(seq, "0x%llx\n", val); -+ } else -+ seq_printf(seq, " %08x: %08x_%08x\n", -+ type, high, low); -+ } else -+ printk(KERN_INFO " %08x: %08x_%08x\n", type, high, low); -+} -+ -+/* This function can also be called with seq = NULL for printk */ -+static void print_msr(struct seq_file *seq, unsigned cpu, unsigned flag) -+{ -+ unsigned msr, msr_min, msr_max; -+ struct cpu_private *priv; -+ u32 low, high; -+ int i; -+ -+ if (seq) { -+ priv = seq->private; -+ if (priv->file) { -+ if (!rdmsr_safe_on_cpu(priv->cpu, priv->reg, -+ &low, &high)) -+ print_cpu_data(seq, priv->reg, low, high); -+ return; -+ } -+ } -+ -+ for (i = 0; i < ARRAY_SIZE(cpu_reg_range); i++) { -+ if (!get_cpu_range(cpu, &msr_min, &msr_max, i, flag)) -+ continue; -+ -+ for (msr = msr_min; msr <= msr_max; msr++) { -+ if (rdmsr_safe_on_cpu(cpu, msr, &low, &high)) -+ continue; -+ print_cpu_data(seq, msr, low, high); -+ } -+ } -+} -+ -+static void print_tss(void *arg) -+{ -+ struct pt_regs *regs = task_pt_regs(current); -+ struct seq_file *seq = arg; -+ unsigned int seg; -+ -+ seq_printf(seq, " RAX\t: %016lx\n", regs->ax); -+ seq_printf(seq, " RBX\t: %016lx\n", regs->bx); -+ seq_printf(seq, " RCX\t: %016lx\n", regs->cx); -+ seq_printf(seq, " RDX\t: %016lx\n", regs->dx); -+ -+ seq_printf(seq, " RSI\t: %016lx\n", regs->si); -+ seq_printf(seq, " RDI\t: %016lx\n", regs->di); -+ seq_printf(seq, " RBP\t: %016lx\n", regs->bp); -+ seq_printf(seq, " ESP\t: %016lx\n", regs->sp); -+ -+#ifdef CONFIG_X86_64 -+ seq_printf(seq, " R08\t: %016lx\n", regs->r8); -+ seq_printf(seq, " R09\t: %016lx\n", regs->r9); -+ seq_printf(seq, " R10\t: %016lx\n", regs->r10); -+ seq_printf(seq, " R11\t: %016lx\n", regs->r11); -+ seq_printf(seq, " R12\t: %016lx\n", regs->r12); -+ seq_printf(seq, " R13\t: %016lx\n", regs->r13); -+ seq_printf(seq, " R14\t: %016lx\n", regs->r14); -+ seq_printf(seq, " R15\t: %016lx\n", regs->r15); -+#endif -+ -+ asm("movl %%cs,%0" : "=r" (seg)); -+ seq_printf(seq, " CS\t: %04x\n", seg); -+ asm("movl %%ds,%0" : "=r" (seg)); -+ seq_printf(seq, " DS\t: %04x\n", seg); -+ seq_printf(seq, " SS\t: %04lx\n", regs->ss & 0xffff); -+ asm("movl %%es,%0" : "=r" (seg)); -+ seq_printf(seq, " ES\t: %04x\n", seg); -+ asm("movl %%fs,%0" : "=r" (seg)); -+ seq_printf(seq, " FS\t: %04x\n", seg); -+ asm("movl %%gs,%0" : "=r" (seg)); -+ seq_printf(seq, " GS\t: %04x\n", seg); -+ -+ seq_printf(seq, " EFLAGS\t: %016lx\n", regs->flags); -+ -+ seq_printf(seq, " EIP\t: %016lx\n", regs->ip); -+} -+ -+static void print_cr(void *arg) -+{ -+ struct seq_file *seq = arg; -+ -+ seq_printf(seq, " cr0\t: %016lx\n", read_cr0()); -+ seq_printf(seq, " cr2\t: %016lx\n", read_cr2()); -+ seq_printf(seq, " cr3\t: %016lx\n", read_cr3()); -+ seq_printf(seq, " cr4\t: %016lx\n", read_cr4_safe()); -+#ifdef CONFIG_X86_64 -+ seq_printf(seq, " cr8\t: %016lx\n", read_cr8()); -+#endif -+} -+ -+static void print_desc_ptr(char *str, struct seq_file *seq, struct desc_ptr dt) -+{ -+ seq_printf(seq, " %s\t: %016llx\n", str, (u64)(dt.address | dt.size)); -+} -+ -+static void print_dt(void *seq) -+{ -+ struct desc_ptr dt; -+ unsigned long ldt; -+ -+ /* IDT */ -+ store_idt((struct desc_ptr *)&dt); -+ print_desc_ptr("IDT", seq, dt); -+ -+ /* GDT */ -+ store_gdt((struct desc_ptr *)&dt); -+ print_desc_ptr("GDT", seq, dt); -+ -+ /* LDT */ -+ store_ldt(ldt); -+ seq_printf(seq, " LDT\t: %016lx\n", ldt); -+ -+ /* TR */ -+ store_tr(ldt); -+ seq_printf(seq, " TR\t: %016lx\n", ldt); -+} -+ -+static void print_dr(void *arg) -+{ -+ struct seq_file *seq = arg; -+ unsigned long dr; -+ int i; -+ -+ for (i = 0; i < 8; i++) { -+ /* Ignore db4, db5 */ -+ if ((i == 4) || (i == 5)) -+ continue; -+ get_debugreg(dr, i); -+ seq_printf(seq, " dr%d\t: %016lx\n", i, dr); -+ } -+ -+ seq_printf(seq, "\n MSR\t:\n"); -+} -+ -+static void print_apic(void *arg) -+{ -+ struct seq_file *seq = arg; -+ -+#ifdef CONFIG_X86_LOCAL_APIC -+ seq_printf(seq, " LAPIC\t:\n"); -+ seq_printf(seq, " ID\t\t: %08x\n", apic_read(APIC_ID) >> 24); -+ seq_printf(seq, " LVR\t\t: %08x\n", apic_read(APIC_LVR)); -+ seq_printf(seq, " TASKPRI\t: %08x\n", apic_read(APIC_TASKPRI)); -+ seq_printf(seq, " ARBPRI\t\t: %08x\n", apic_read(APIC_ARBPRI)); -+ seq_printf(seq, " PROCPRI\t: %08x\n", apic_read(APIC_PROCPRI)); -+ seq_printf(seq, " LDR\t\t: %08x\n", apic_read(APIC_LDR)); -+ seq_printf(seq, " DFR\t\t: %08x\n", apic_read(APIC_DFR)); -+ seq_printf(seq, " SPIV\t\t: %08x\n", apic_read(APIC_SPIV)); -+ seq_printf(seq, " ISR\t\t: %08x\n", apic_read(APIC_ISR)); -+ seq_printf(seq, " ESR\t\t: %08x\n", apic_read(APIC_ESR)); -+ seq_printf(seq, " ICR\t\t: %08x\n", apic_read(APIC_ICR)); -+ seq_printf(seq, " ICR2\t\t: %08x\n", apic_read(APIC_ICR2)); -+ seq_printf(seq, " LVTT\t\t: %08x\n", apic_read(APIC_LVTT)); -+ seq_printf(seq, " LVTTHMR\t: %08x\n", apic_read(APIC_LVTTHMR)); -+ seq_printf(seq, " LVTPC\t\t: %08x\n", apic_read(APIC_LVTPC)); -+ seq_printf(seq, " LVT0\t\t: %08x\n", apic_read(APIC_LVT0)); -+ seq_printf(seq, " LVT1\t\t: %08x\n", apic_read(APIC_LVT1)); -+ seq_printf(seq, " LVTERR\t\t: %08x\n", apic_read(APIC_LVTERR)); -+ seq_printf(seq, " TMICT\t\t: %08x\n", apic_read(APIC_TMICT)); -+ seq_printf(seq, " TMCCT\t\t: %08x\n", apic_read(APIC_TMCCT)); -+ seq_printf(seq, " TDCR\t\t: %08x\n", apic_read(APIC_TDCR)); -+ if (boot_cpu_has(X86_FEATURE_EXTAPIC)) { -+ unsigned int i, v, maxeilvt; -+ -+ v = apic_read(APIC_EFEAT); -+ maxeilvt = (v >> 16) & 0xff; -+ seq_printf(seq, " EFEAT\t\t: %08x\n", v); -+ seq_printf(seq, " ECTRL\t\t: %08x\n", apic_read(APIC_ECTRL)); -+ -+ for (i = 0; i < maxeilvt; i++) { -+ v = apic_read(APIC_EILVTn(i)); -+ seq_printf(seq, " EILVT%d\t\t: %08x\n", i, v); -+ } -+ } -+#endif /* CONFIG_X86_LOCAL_APIC */ -+ seq_printf(seq, "\n MSR\t:\n"); -+} -+ -+static int cpu_seq_show(struct seq_file *seq, void *v) -+{ -+ struct cpu_private *priv = seq->private; -+ -+ if (priv == NULL) -+ return -EINVAL; -+ -+ switch (cpu_base[priv->type].flag) { -+ case CPU_TSS: -+ smp_call_function_single(priv->cpu, print_tss, seq, 1); -+ break; -+ case CPU_CR: -+ smp_call_function_single(priv->cpu, print_cr, seq, 1); -+ break; -+ case CPU_DT: -+ smp_call_function_single(priv->cpu, print_dt, seq, 1); -+ break; -+ case CPU_DEBUG: -+ if (priv->file == CPU_INDEX_BIT) -+ smp_call_function_single(priv->cpu, print_dr, seq, 1); -+ print_msr(seq, priv->cpu, cpu_base[priv->type].flag); -+ break; -+ case CPU_APIC: -+ if (priv->file == CPU_INDEX_BIT) -+ smp_call_function_single(priv->cpu, print_apic, seq, 1); -+ print_msr(seq, priv->cpu, cpu_base[priv->type].flag); -+ break; -+ -+ default: -+ print_msr(seq, priv->cpu, cpu_base[priv->type].flag); -+ break; -+ } -+ seq_printf(seq, "\n"); -+ -+ return 0; -+} -+ -+static void *cpu_seq_start(struct seq_file *seq, loff_t *pos) -+{ -+ if (*pos == 0) /* One time is enough ;-) */ -+ return seq; -+ -+ return NULL; -+} -+ -+static void *cpu_seq_next(struct seq_file *seq, void *v, loff_t *pos) -+{ -+ (*pos)++; -+ -+ return cpu_seq_start(seq, pos); -+} -+ -+static void cpu_seq_stop(struct seq_file *seq, void *v) -+{ -+} -+ -+static const struct seq_operations cpu_seq_ops = { -+ .start = cpu_seq_start, -+ .next = cpu_seq_next, -+ .stop = cpu_seq_stop, -+ .show = cpu_seq_show, -+}; -+ -+static int cpu_seq_open(struct inode *inode, struct file *file) -+{ -+ struct cpu_private *priv = inode->i_private; -+ struct seq_file *seq; -+ int err; -+ -+ err = seq_open(file, &cpu_seq_ops); -+ if (!err) { -+ seq = file->private_data; -+ seq->private = priv; -+ } -+ -+ return err; -+} -+ -+static int write_msr(struct cpu_private *priv, u64 val) -+{ -+ u32 low, high; -+ -+ high = (val >> 32) & 0xffffffff; -+ low = val & 0xffffffff; -+ -+ if (!wrmsr_safe_on_cpu(priv->cpu, priv->reg, low, high)) -+ return 0; -+ -+ return -EPERM; -+} -+ -+static int write_cpu_register(struct cpu_private *priv, const char *buf) -+{ -+ int ret = -EPERM; -+ u64 val; -+ -+ ret = strict_strtoull(buf, 0, &val); -+ if (ret < 0) -+ return ret; -+ -+ /* Supporting only MSRs */ -+ if (priv->type < CPU_TSS_BIT) -+ return write_msr(priv, val); -+ -+ return ret; -+} -+ -+static ssize_t cpu_write(struct file *file, const char __user *ubuf, -+ size_t count, loff_t *off) -+{ -+ struct seq_file *seq = file->private_data; -+ struct cpu_private *priv = seq->private; -+ char buf[19]; -+ -+ if ((priv == NULL) || (count >= sizeof(buf))) -+ return -EINVAL; -+ -+ if (copy_from_user(&buf, ubuf, count)) -+ return -EFAULT; -+ -+ buf[count] = 0; -+ -+ if ((cpu_base[priv->type].write) && (cpu_file[priv->file].write)) -+ if (!write_cpu_register(priv, buf)) -+ return count; -+ -+ return -EACCES; -+} -+ -+static const struct file_operations cpu_fops = { -+ .owner = THIS_MODULE, -+ .open = cpu_seq_open, -+ .read = seq_read, -+ .write = cpu_write, -+ .llseek = seq_lseek, -+ .release = seq_release, -+}; -+ -+static int cpu_create_file(unsigned cpu, unsigned type, unsigned reg, -+ unsigned file, struct dentry *dentry) -+{ -+ struct cpu_private *priv = NULL; -+ -+ /* Already intialized */ -+ if (file == CPU_INDEX_BIT) -+ if (per_cpu(cpu_arr[type].init, cpu)) -+ return 0; -+ -+ priv = kzalloc(sizeof(*priv), GFP_KERNEL); -+ if (priv == NULL) -+ return -ENOMEM; -+ -+ priv->cpu = cpu; -+ priv->type = type; -+ priv->reg = reg; -+ priv->file = file; -+ mutex_lock(&cpu_debug_lock); -+ per_cpu(priv_arr[type], cpu) = priv; -+ per_cpu(cpu_priv_count, cpu)++; -+ mutex_unlock(&cpu_debug_lock); -+ -+ if (file) -+ debugfs_create_file(cpu_file[file].name, S_IRUGO, -+ dentry, (void *)priv, &cpu_fops); -+ else { -+ debugfs_create_file(cpu_base[type].name, S_IRUGO, -+ per_cpu(cpu_arr[type].dentry, cpu), -+ (void *)priv, &cpu_fops); -+ mutex_lock(&cpu_debug_lock); -+ per_cpu(cpu_arr[type].init, cpu) = 1; -+ mutex_unlock(&cpu_debug_lock); -+ } -+ -+ return 0; -+} -+ -+static int cpu_init_regfiles(unsigned cpu, unsigned int type, unsigned reg, -+ struct dentry *dentry) -+{ -+ unsigned file; -+ int err = 0; -+ -+ for (file = 0; file < ARRAY_SIZE(cpu_file); file++) { -+ err = cpu_create_file(cpu, type, reg, file, dentry); -+ if (err) -+ return err; -+ } -+ -+ return err; -+} -+ -+static int cpu_init_msr(unsigned cpu, unsigned type, struct dentry *dentry) -+{ -+ struct dentry *cpu_dentry = NULL; -+ unsigned reg, reg_min, reg_max; -+ int i, err = 0; -+ char reg_dir[12]; -+ u32 low, high; -+ -+ for (i = 0; i < ARRAY_SIZE(cpu_reg_range); i++) { -+ if (!get_cpu_range(cpu, ®_min, ®_max, i, -+ cpu_base[type].flag)) -+ continue; -+ -+ for (reg = reg_min; reg <= reg_max; reg++) { -+ if (rdmsr_safe_on_cpu(cpu, reg, &low, &high)) -+ continue; -+ -+ sprintf(reg_dir, "0x%x", reg); -+ cpu_dentry = debugfs_create_dir(reg_dir, dentry); -+ err = cpu_init_regfiles(cpu, type, reg, cpu_dentry); -+ if (err) -+ return err; -+ } -+ } -+ -+ return err; -+} -+ -+static int cpu_init_allreg(unsigned cpu, struct dentry *dentry) -+{ -+ struct dentry *cpu_dentry = NULL; -+ unsigned type; -+ int err = 0; -+ -+ for (type = 0; type < ARRAY_SIZE(cpu_base) - 1; type++) { -+ if (!is_typeflag_valid(cpu, cpu_base[type].flag)) -+ continue; -+ cpu_dentry = debugfs_create_dir(cpu_base[type].name, dentry); -+ per_cpu(cpu_arr[type].dentry, cpu) = cpu_dentry; -+ -+ if (type < CPU_TSS_BIT) -+ err = cpu_init_msr(cpu, type, cpu_dentry); -+ else -+ err = cpu_create_file(cpu, type, 0, CPU_INDEX_BIT, -+ cpu_dentry); -+ if (err) -+ return err; -+ } -+ -+ return err; -+} -+ -+static int cpu_init_cpu(void) -+{ -+ struct dentry *cpu_dentry = NULL; -+ struct cpuinfo_x86 *cpui; -+ char cpu_dir[12]; -+ unsigned cpu; -+ int err = 0; -+ -+ for (cpu = 0; cpu < nr_cpu_ids; cpu++) { -+ cpui = &cpu_data(cpu); -+ if (!cpu_has(cpui, X86_FEATURE_MSR)) -+ continue; -+ -+ sprintf(cpu_dir, "cpu%d", cpu); -+ cpu_dentry = debugfs_create_dir(cpu_dir, cpu_debugfs_dir); -+ err = cpu_init_allreg(cpu, cpu_dentry); -+ -+ pr_info("cpu%d(%d) debug files %d\n", -+ cpu, nr_cpu_ids, per_cpu(cpu_priv_count, cpu)); -+ if (per_cpu(cpu_priv_count, cpu) > MAX_CPU_FILES) { -+ pr_err("Register files count %d exceeds limit %d\n", -+ per_cpu(cpu_priv_count, cpu), MAX_CPU_FILES); -+ per_cpu(cpu_priv_count, cpu) = MAX_CPU_FILES; -+ err = -ENFILE; -+ } -+ if (err) -+ return err; -+ } -+ -+ return err; -+} -+ -+static int __init cpu_debug_init(void) -+{ -+ cpu_debugfs_dir = debugfs_create_dir("cpu", arch_debugfs_dir); -+ -+ return cpu_init_cpu(); -+} -+ -+static void __exit cpu_debug_exit(void) -+{ -+ int i, cpu; -+ -+ if (cpu_debugfs_dir) -+ debugfs_remove_recursive(cpu_debugfs_dir); -+ -+ for (cpu = 0; cpu < nr_cpu_ids; cpu++) -+ for (i = 0; i < per_cpu(cpu_priv_count, cpu); i++) -+ kfree(per_cpu(priv_arr[i], cpu)); -+} -+ -+module_init(cpu_debug_init); -+module_exit(cpu_debug_exit); -+ -+MODULE_AUTHOR("Jaswinder Singh Rajput"); -+MODULE_DESCRIPTION("CPU Debug module"); -+MODULE_LICENSE("GPL"); -diff --git a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c -index ab1cd30..3f12dab 100644 ---- a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c -+++ b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c -@@ -1351,7 +1351,6 @@ static int __devexit powernowk8_cpu_exit(struct cpufreq_policy *pol) - - kfree(data->powernow_table); - kfree(data); -- per_cpu(powernow_data, pol->cpu) = NULL; - - return 0; - } -@@ -1371,7 +1370,7 @@ static unsigned int powernowk8_get(unsigned int cpu) - int err; - - if (!data) -- return 0; -+ return -EINVAL; - - smp_call_function_single(cpu, query_values_on_cpu, &err, true); - if (err) -diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c -index a2a03cf..40e1835 100644 ---- a/arch/x86/kernel/cpu/intel.c -+++ b/arch/x86/kernel/cpu/intel.c -@@ -70,6 +70,7 @@ static void __cpuinit early_init_intel(struct cpuinfo_x86 *c) - if (c->x86_power & (1 << 8)) { - set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC); - set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC); -+ set_cpu_cap(c, X86_FEATURE_TSC_RELIABLE); - sched_clock_stable = 1; - } - -diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c -index 8178d03..804c40e 100644 ---- a/arch/x86/kernel/cpu/intel_cacheinfo.c -+++ b/arch/x86/kernel/cpu/intel_cacheinfo.c -@@ -94,7 +94,7 @@ static const struct _cache_table __cpuinitconst cache_table[] = - { 0xd1, LVL_3, 1024 }, /* 4-way set assoc, 64 byte line size */ - { 0xd2, LVL_3, 2048 }, /* 4-way set assoc, 64 byte line size */ - { 0xd6, LVL_3, 1024 }, /* 8-way set assoc, 64 byte line size */ -- { 0xd7, LVL_3, 2048 }, /* 8-way set assoc, 64 byte line size */ -+ { 0xd7, LVL_3, 2038 }, /* 8-way set assoc, 64 byte line size */ - { 0xd8, LVL_3, 4096 }, /* 12-way set assoc, 64 byte line size */ - { 0xdc, LVL_3, 2048 }, /* 12-way set assoc, 64 byte line size */ - { 0xdd, LVL_3, 4096 }, /* 12-way set assoc, 64 byte line size */ -@@ -102,9 +102,6 @@ static const struct _cache_table __cpuinitconst cache_table[] = - { 0xe2, LVL_3, 2048 }, /* 16-way set assoc, 64 byte line size */ - { 0xe3, LVL_3, 4096 }, /* 16-way set assoc, 64 byte line size */ - { 0xe4, LVL_3, 8192 }, /* 16-way set assoc, 64 byte line size */ -- { 0xea, LVL_3, 12288 }, /* 24-way set assoc, 64 byte line size */ -- { 0xeb, LVL_3, 18432 }, /* 24-way set assoc, 64 byte line size */ -- { 0xec, LVL_3, 24576 }, /* 24-way set assoc, 64 byte line size */ - { 0x00, 0, 0} - }; - -diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c -index 0f16a2b..721a77c 100644 ---- a/arch/x86/kernel/cpu/mcheck/mce.c -+++ b/arch/x86/kernel/cpu/mcheck/mce.c -@@ -1374,14 +1374,13 @@ static void mce_init_timer(void) - struct timer_list *t = &__get_cpu_var(mce_timer); - int *n = &__get_cpu_var(mce_next_interval); - -- setup_timer(t, mcheck_timer, smp_processor_id()); -- - if (mce_ignore_ce) - return; - - *n = check_interval * HZ; - if (!*n) - return; -+ setup_timer(t, mcheck_timer, smp_processor_id()); - t->expires = round_jiffies(jiffies + *n); - add_timer_on(t, smp_processor_id()); - } -@@ -1992,11 +1991,9 @@ mce_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) - break; - case CPU_DOWN_FAILED: - case CPU_DOWN_FAILED_FROZEN: -- if (!mce_ignore_ce && check_interval) { -- t->expires = round_jiffies(jiffies + -+ t->expires = round_jiffies(jiffies + - __get_cpu_var(mce_next_interval)); -- add_timer_on(t, cpu); -- } -+ add_timer_on(t, cpu); - smp_call_function_single(cpu, mce_reenable_cpu, &action, 1); - break; - case CPU_POST_DEAD: -diff --git a/arch/x86/kernel/cpu/mcheck/therm_throt.c b/arch/x86/kernel/cpu/mcheck/therm_throt.c -index 687638e..b3a1dba 100644 ---- a/arch/x86/kernel/cpu/mcheck/therm_throt.c -+++ b/arch/x86/kernel/cpu/mcheck/therm_throt.c -@@ -49,8 +49,6 @@ static DEFINE_PER_CPU(struct thermal_state, thermal_state); - - static atomic_t therm_throt_en = ATOMIC_INIT(0); - --static u32 lvtthmr_init __read_mostly; -- - #ifdef CONFIG_SYSFS - #define define_therm_throt_sysdev_one_ro(_name) \ - static SYSDEV_ATTR(_name, 0444, therm_throt_sysdev_show_##_name, NULL) -@@ -256,27 +254,14 @@ asmlinkage void smp_thermal_interrupt(struct pt_regs *regs) - ack_APIC_irq(); - } - --void __init mcheck_intel_therm_init(void) --{ -- /* -- * This function is only called on boot CPU. Save the init thermal -- * LVT value on BSP and use that value to restore APs' thermal LVT -- * entry BIOS programmed later -- */ -- if (cpu_has(&boot_cpu_data, X86_FEATURE_ACPI) && -- cpu_has(&boot_cpu_data, X86_FEATURE_ACC)) -- lvtthmr_init = apic_read(APIC_LVTTHMR); --} -- - void intel_init_thermal(struct cpuinfo_x86 *c) - { - unsigned int cpu = smp_processor_id(); - int tm2 = 0; - u32 l, h; - -- /* Thermal monitoring depends on APIC, ACPI and clock modulation */ -- if (!cpu_has_apic || !cpu_has(c, X86_FEATURE_ACPI) || -- !cpu_has(c, X86_FEATURE_ACC)) -+ /* Thermal monitoring depends on ACPI and clock modulation*/ -+ if (!cpu_has(c, X86_FEATURE_ACPI) || !cpu_has(c, X86_FEATURE_ACC)) - return; - - /* -@@ -285,20 +270,7 @@ void intel_init_thermal(struct cpuinfo_x86 *c) - * since it might be delivered via SMI already: - */ - rdmsr(MSR_IA32_MISC_ENABLE, l, h); -- -- /* -- * The initial value of thermal LVT entries on all APs always reads -- * 0x10000 because APs are woken up by BSP issuing INIT-SIPI-SIPI -- * sequence to them and LVT registers are reset to 0s except for -- * the mask bits which are set to 1s when APs receive INIT IPI. -- * Always restore the value that BIOS has programmed on AP based on -- * BSP's info we saved since BIOS is always setting the same value -- * for all threads/cores -- */ -- apic_write(APIC_LVTTHMR, lvtthmr_init); -- -- h = lvtthmr_init; -- -+ h = apic_read(APIC_LVTTHMR); - if ((l & MSR_IA32_MISC_ENABLE_TM1) && (h & APIC_DM_SMI)) { - printk(KERN_DEBUG - "CPU%d: Thermal monitoring handled by SMI\n", cpu); -diff --git a/arch/x86/kernel/cpu/perfctr-watchdog.c b/arch/x86/kernel/cpu/perfctr-watchdog.c -index 898df97..fab786f 100644 ---- a/arch/x86/kernel/cpu/perfctr-watchdog.c -+++ b/arch/x86/kernel/cpu/perfctr-watchdog.c -@@ -712,7 +712,7 @@ static void probe_nmi_watchdog(void) - switch (boot_cpu_data.x86_vendor) { - case X86_VENDOR_AMD: - if (boot_cpu_data.x86 != 6 && boot_cpu_data.x86 != 15 && -- boot_cpu_data.x86 != 16 && boot_cpu_data.x86 != 17) -+ boot_cpu_data.x86 != 16) - return; - wd_ops = &k7_wd_ops; - break; -diff --git a/arch/x86/kernel/cpuid.c b/arch/x86/kernel/cpuid.c -index 0c91110..6a52d4b 100644 ---- a/arch/x86/kernel/cpuid.c -+++ b/arch/x86/kernel/cpuid.c -@@ -192,8 +192,7 @@ static int __init cpuid_init(void) - int i, err = 0; - i = 0; - -- if (__register_chrdev(CPUID_MAJOR, 0, NR_CPUS, -- "cpu/cpuid", &cpuid_fops)) { -+ if (register_chrdev(CPUID_MAJOR, "cpu/cpuid", &cpuid_fops)) { - printk(KERN_ERR "cpuid: unable to get major %d for cpuid\n", - CPUID_MAJOR); - err = -EBUSY; -@@ -222,7 +221,7 @@ out_class: - } - class_destroy(cpuid_class); - out_chrdev: -- __unregister_chrdev(CPUID_MAJOR, 0, NR_CPUS, "cpu/cpuid"); -+ unregister_chrdev(CPUID_MAJOR, "cpu/cpuid"); - out: - return err; - } -@@ -234,7 +233,7 @@ static void __exit cpuid_exit(void) - for_each_online_cpu(cpu) - cpuid_device_destroy(cpu); - class_destroy(cpuid_class); -- __unregister_chrdev(CPUID_MAJOR, 0, NR_CPUS, "cpu/cpuid"); -+ unregister_chrdev(CPUID_MAJOR, "cpu/cpuid"); - unregister_hotcpu_notifier(&cpuid_class_cpu_notifier); - } - -diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S -index b5c061f..5e9b0e5 100644 ---- a/arch/x86/kernel/entry_64.S -+++ b/arch/x86/kernel/entry_64.S -@@ -1008,6 +1008,8 @@ apicinterrupt CALL_FUNCTION_VECTOR \ - call_function_interrupt smp_call_function_interrupt - apicinterrupt RESCHEDULE_VECTOR \ - reschedule_interrupt smp_reschedule_interrupt -+apicinterrupt PULL_TIMERS_VECTOR \ -+ pull_timers_interrupt smp_pull_timers_interrupt - #endif - - apicinterrupt ERROR_APIC_VECTOR \ -diff --git a/arch/x86/kernel/ft_event.c b/arch/x86/kernel/ft_event.c -new file mode 100644 -index 0000000..e07ee30 ---- /dev/null -+++ b/arch/x86/kernel/ft_event.c -@@ -0,0 +1,112 @@ -+#include -+ -+#include -+ -+#ifdef __ARCH_HAS_FEATHER_TRACE -+/* the feather trace management functions assume -+ * exclusive access to the event table -+ */ -+ -+ -+#define BYTE_JUMP 0xeb -+#define BYTE_JUMP_LEN 0x02 -+ -+/* for each event, there is an entry in the event table */ -+struct trace_event { -+ long id; -+ long count; -+ long start_addr; -+ long end_addr; -+}; -+ -+extern struct trace_event __start___event_table[]; -+extern struct trace_event __stop___event_table[]; -+ -+int ft_enable_event(unsigned long id) -+{ -+ struct trace_event* te = __start___event_table; -+ int count = 0; -+ char* delta; -+ unsigned char* instr; -+ -+ while (te < __stop___event_table) { -+ if (te->id == id && ++te->count == 1) { -+ instr = (unsigned char*) te->start_addr; -+ /* make sure we don't clobber something wrong */ -+ if (*instr == BYTE_JUMP) { -+ delta = (((unsigned char*) te->start_addr) + 1); -+ *delta = 0; -+ } -+ } -+ if (te->id == id) -+ count++; -+ te++; -+ } -+ -+ printk(KERN_DEBUG "ft_enable_event: enabled %d events\n", count); -+ return count; -+} -+ -+int ft_disable_event(unsigned long id) -+{ -+ struct trace_event* te = __start___event_table; -+ int count = 0; -+ char* delta; -+ unsigned char* instr; -+ -+ while (te < __stop___event_table) { -+ if (te->id == id && --te->count == 0) { -+ instr = (unsigned char*) te->start_addr; -+ if (*instr == BYTE_JUMP) { -+ delta = (((unsigned char*) te->start_addr) + 1); -+ *delta = te->end_addr - te->start_addr - -+ BYTE_JUMP_LEN; -+ } -+ } -+ if (te->id == id) -+ count++; -+ te++; -+ } -+ -+ printk(KERN_DEBUG "ft_disable_event: disabled %d events\n", count); -+ return count; -+} -+ -+int ft_disable_all_events(void) -+{ -+ struct trace_event* te = __start___event_table; -+ int count = 0; -+ char* delta; -+ unsigned char* instr; -+ -+ while (te < __stop___event_table) { -+ if (te->count) { -+ instr = (unsigned char*) te->start_addr; -+ if (*instr == BYTE_JUMP) { -+ delta = (((unsigned char*) te->start_addr) -+ + 1); -+ *delta = te->end_addr - te->start_addr - -+ BYTE_JUMP_LEN; -+ te->count = 0; -+ count++; -+ } -+ } -+ te++; -+ } -+ return count; -+} -+ -+int ft_is_event_enabled(unsigned long id) -+{ -+ struct trace_event* te = __start___event_table; -+ -+ while (te < __stop___event_table) { -+ if (te->id == id) -+ return te->count; -+ te++; -+ } -+ return 0; -+} -+ -+#endif -+ -diff --git a/arch/x86/kernel/hpet.c b/arch/x86/kernel/hpet.c -index 5877873..dedc2bd 100644 ---- a/arch/x86/kernel/hpet.c -+++ b/arch/x86/kernel/hpet.c -@@ -33,8 +33,6 @@ - * HPET address is set in acpi/boot.c, when an ACPI entry exists - */ - unsigned long hpet_address; --u8 hpet_msi_disable; -- - #ifdef CONFIG_PCI_MSI - static unsigned long hpet_num_timers; - #endif -@@ -586,9 +584,6 @@ static void hpet_msi_capability_lookup(unsigned int start_timer) - unsigned int num_timers_used = 0; - int i; - -- if (hpet_msi_disable) -- return; -- - id = hpet_readl(HPET_ID); - - num_timers = ((id & HPET_ID_NUMBER) >> HPET_ID_NUMBER_SHIFT); -@@ -916,9 +911,6 @@ static __init int hpet_late_init(void) - hpet_reserve_platform_timers(hpet_readl(HPET_ID)); - hpet_print_config(); - -- if (hpet_msi_disable) -- return 0; -- - for_each_online_cpu(cpu) { - hpet_cpuhp_notify(NULL, CPU_ONLINE, (void *)(long)cpu); - } -diff --git a/arch/x86/kernel/irqinit.c b/arch/x86/kernel/irqinit.c -index 40f3077..f5fa64c 100644 ---- a/arch/x86/kernel/irqinit.c -+++ b/arch/x86/kernel/irqinit.c -@@ -172,6 +172,9 @@ static void __init smp_intr_init(void) - alloc_intr_gate(CALL_FUNCTION_SINGLE_VECTOR, - call_function_single_interrupt); - -+ /* IPI for hrtimer pulling on remote cpus */ -+ alloc_intr_gate(PULL_TIMERS_VECTOR, pull_timers_interrupt); -+ - /* Low priority IPI to cleanup after moving an irq */ - set_intr_gate(IRQ_MOVE_CLEANUP_VECTOR, irq_move_cleanup_interrupt); - set_bit(IRQ_MOVE_CLEANUP_VECTOR, used_vectors); -diff --git a/arch/x86/kernel/msr.c b/arch/x86/kernel/msr.c -index 5eaeb5e..6a3cefc 100644 ---- a/arch/x86/kernel/msr.c -+++ b/arch/x86/kernel/msr.c -@@ -251,7 +251,7 @@ static int __init msr_init(void) - int i, err = 0; - i = 0; - -- if (__register_chrdev(MSR_MAJOR, 0, NR_CPUS, "cpu/msr", &msr_fops)) { -+ if (register_chrdev(MSR_MAJOR, "cpu/msr", &msr_fops)) { - printk(KERN_ERR "msr: unable to get major %d for msr\n", - MSR_MAJOR); - err = -EBUSY; -@@ -279,7 +279,7 @@ out_class: - msr_device_destroy(i); - class_destroy(msr_class); - out_chrdev: -- __unregister_chrdev(MSR_MAJOR, 0, NR_CPUS, "cpu/msr"); -+ unregister_chrdev(MSR_MAJOR, "cpu/msr"); - out: - return err; - } -@@ -290,7 +290,7 @@ static void __exit msr_exit(void) - for_each_online_cpu(cpu) - msr_device_destroy(cpu); - class_destroy(msr_class); -- __unregister_chrdev(MSR_MAJOR, 0, NR_CPUS, "cpu/msr"); -+ unregister_chrdev(MSR_MAJOR, "cpu/msr"); - unregister_hotcpu_notifier(&msr_class_cpu_notifier); - } - -diff --git a/arch/x86/kernel/pci-calgary_64.c b/arch/x86/kernel/pci-calgary_64.c -index e6ec8a2..971a3be 100644 ---- a/arch/x86/kernel/pci-calgary_64.c -+++ b/arch/x86/kernel/pci-calgary_64.c -@@ -318,15 +318,13 @@ static inline struct iommu_table *find_iommu_table(struct device *dev) - - pdev = to_pci_dev(dev); - -- /* search up the device tree for an iommu */ - pbus = pdev->bus; -- do { -- tbl = pci_iommu(pbus); -- if (tbl && tbl->it_busno == pbus->number) -- break; -- tbl = NULL; -+ -+ /* is the device behind a bridge? Look for the root bus */ -+ while (pbus->parent) - pbus = pbus->parent; -- } while (pbus); -+ -+ tbl = pci_iommu(pbus); - - BUG_ON(tbl && (tbl->it_busno != pbus->number)); - -diff --git a/arch/x86/kernel/pci-dma.c b/arch/x86/kernel/pci-dma.c -index 6ac3931..a6e804d 100644 ---- a/arch/x86/kernel/pci-dma.c -+++ b/arch/x86/kernel/pci-dma.c -@@ -214,7 +214,7 @@ static __init int iommu_setup(char *p) - if (!strncmp(p, "allowdac", 8)) - forbid_dac = 0; - if (!strncmp(p, "nodac", 5)) -- forbid_dac = 1; -+ forbid_dac = -1; - if (!strncmp(p, "usedac", 6)) { - forbid_dac = -1; - return 1; -diff --git a/arch/x86/kernel/pci-gart_64.c b/arch/x86/kernel/pci-gart_64.c -index fcc0b5c..a7f1b64 100644 ---- a/arch/x86/kernel/pci-gart_64.c -+++ b/arch/x86/kernel/pci-gart_64.c -@@ -856,7 +856,7 @@ void __init gart_parse_options(char *p) - #endif - if (isdigit(*p) && get_option(&p, &arg)) - iommu_size = arg; -- if (!strncmp(p, "fullflush", 9)) -+ if (!strncmp(p, "fullflush", 8)) - iommu_fullflush = 1; - if (!strncmp(p, "nofullflush", 11)) - iommu_fullflush = 0; -diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c -index f010ab4..5284cd2 100644 ---- a/arch/x86/kernel/process.c -+++ b/arch/x86/kernel/process.c -@@ -91,6 +91,18 @@ void flush_thread(void) - { - struct task_struct *tsk = current; - -+#ifdef CONFIG_X86_64 -+ if (test_tsk_thread_flag(tsk, TIF_ABI_PENDING)) { -+ clear_tsk_thread_flag(tsk, TIF_ABI_PENDING); -+ if (test_tsk_thread_flag(tsk, TIF_IA32)) { -+ clear_tsk_thread_flag(tsk, TIF_IA32); -+ } else { -+ set_tsk_thread_flag(tsk, TIF_IA32); -+ current_thread_info()->status |= TS_COMPAT; -+ } -+ } -+#endif -+ - clear_tsk_thread_flag(tsk, TIF_DEBUG); - - tsk->thread.debugreg0 = 0; -diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c -index f9ce04f..eb62cbc 100644 ---- a/arch/x86/kernel/process_64.c -+++ b/arch/x86/kernel/process_64.c -@@ -540,17 +540,6 @@ sys_clone(unsigned long clone_flags, unsigned long newsp, - return do_fork(clone_flags, newsp, regs, 0, parent_tid, child_tid); - } - --void set_personality_ia32(void) --{ -- /* inherit personality from parent */ -- -- /* Make sure to be in 32bit mode */ -- set_thread_flag(TIF_IA32); -- -- /* Prepare the first "return" to user space */ -- current_thread_info()->status |= TS_COMPAT; --} -- - unsigned long get_wchan(struct task_struct *p) - { - unsigned long stack; -diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c -index c06acdd..7b058a2 100644 ---- a/arch/x86/kernel/ptrace.c -+++ b/arch/x86/kernel/ptrace.c -@@ -408,14 +408,14 @@ static int genregs_get(struct task_struct *target, - { - if (kbuf) { - unsigned long *k = kbuf; -- while (count >= sizeof(*k)) { -+ while (count > 0) { - *k++ = getreg(target, pos); - count -= sizeof(*k); - pos += sizeof(*k); - } - } else { - unsigned long __user *u = ubuf; -- while (count >= sizeof(*u)) { -+ while (count > 0) { - if (__put_user(getreg(target, pos), u++)) - return -EFAULT; - count -= sizeof(*u); -@@ -434,14 +434,14 @@ static int genregs_set(struct task_struct *target, - int ret = 0; - if (kbuf) { - const unsigned long *k = kbuf; -- while (count >= sizeof(*k) && !ret) { -+ while (count > 0 && !ret) { - ret = putreg(target, pos, *k++); - count -= sizeof(*k); - pos += sizeof(*k); - } - } else { - const unsigned long __user *u = ubuf; -- while (count >= sizeof(*u) && !ret) { -+ while (count > 0 && !ret) { - unsigned long word; - ret = __get_user(word, u++); - if (ret) -@@ -1219,14 +1219,14 @@ static int genregs32_get(struct task_struct *target, - { - if (kbuf) { - compat_ulong_t *k = kbuf; -- while (count >= sizeof(*k)) { -+ while (count > 0) { - getreg32(target, pos, k++); - count -= sizeof(*k); - pos += sizeof(*k); - } - } else { - compat_ulong_t __user *u = ubuf; -- while (count >= sizeof(*u)) { -+ while (count > 0) { - compat_ulong_t word; - getreg32(target, pos, &word); - if (__put_user(word, u++)) -@@ -1247,14 +1247,14 @@ static int genregs32_set(struct task_struct *target, - int ret = 0; - if (kbuf) { - const compat_ulong_t *k = kbuf; -- while (count >= sizeof(*k) && !ret) { -+ while (count > 0 && !ret) { - ret = putreg32(target, pos, *k++); - count -= sizeof(*k); - pos += sizeof(*k); - } - } else { - const compat_ulong_t __user *u = ubuf; -- while (count >= sizeof(*u) && !ret) { -+ while (count > 0 && !ret) { - compat_ulong_t word; - ret = __get_user(word, u++); - if (ret) -diff --git a/arch/x86/kernel/quirks.c b/arch/x86/kernel/quirks.c -index 0040164..6c3b2c6 100644 ---- a/arch/x86/kernel/quirks.c -+++ b/arch/x86/kernel/quirks.c -@@ -491,19 +491,6 @@ void force_hpet_resume(void) - break; - } - } -- --/* -- * HPET MSI on some boards (ATI SB700/SB800) has side effect on -- * floppy DMA. Disable HPET MSI on such platforms. -- */ --static void force_disable_hpet_msi(struct pci_dev *unused) --{ -- hpet_msi_disable = 1; --} -- --DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_SBX00_SMBUS, -- force_disable_hpet_msi); -- - #endif - - #if defined(CONFIG_PCI) && defined(CONFIG_NUMA) -diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c -index bff34d6..f930787 100644 ---- a/arch/x86/kernel/reboot.c -+++ b/arch/x86/kernel/reboot.c -@@ -203,15 +203,6 @@ static struct dmi_system_id __initdata reboot_dmi_table[] = { - DMI_MATCH(DMI_BOARD_NAME, "0T656F"), - }, - }, -- { /* Handle problems with rebooting on Dell OptiPlex 760 with 0G919G*/ -- .callback = set_bios_reboot, -- .ident = "Dell OptiPlex 760", -- .matches = { -- DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), -- DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex 760"), -- DMI_MATCH(DMI_BOARD_NAME, "0G919G"), -- }, -- }, - { /* Handle problems with rebooting on Dell 2400's */ - .callback = set_bios_reboot, - .ident = "Dell PowerEdge 2400", -@@ -268,14 +259,6 @@ static struct dmi_system_id __initdata reboot_dmi_table[] = { - DMI_MATCH(DMI_PRODUCT_NAME, "SBC-FITPC2"), - }, - }, -- { /* Handle problems with rebooting on ASUS P4S800 */ -- .callback = set_bios_reboot, -- .ident = "ASUS P4S800", -- .matches = { -- DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."), -- DMI_MATCH(DMI_BOARD_NAME, "P4S800"), -- }, -- }, - { } - }; - -diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c -index 8425f7e..2a34f9c 100644 ---- a/arch/x86/kernel/setup.c -+++ b/arch/x86/kernel/setup.c -@@ -109,7 +109,6 @@ - #ifdef CONFIG_X86_64 - #include - #endif --#include - - /* - * end_pfn only includes RAM, while max_pfn_mapped includes all e820 entries. -@@ -667,27 +666,19 @@ static struct dmi_system_id __initdata bad_bios_dmi_table[] = { - DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix/MSC"), - }, - }, -+ { - /* -- * AMI BIOS with low memory corruption was found on Intel DG45ID and -- * DG45FC boards. -- * It has a different DMI_BIOS_VENDOR = "Intel Corp.", for now we will -+ * AMI BIOS with low memory corruption was found on Intel DG45ID board. -+ * It hase different DMI_BIOS_VENDOR = "Intel Corp.", for now we will - * match only DMI_BOARD_NAME and see if there is more bad products - * with this vendor. - */ -- { - .callback = dmi_low_memory_corruption, - .ident = "AMI BIOS", - .matches = { - DMI_MATCH(DMI_BOARD_NAME, "DG45ID"), - }, - }, -- { -- .callback = dmi_low_memory_corruption, -- .ident = "AMI BIOS", -- .matches = { -- DMI_MATCH(DMI_BOARD_NAME, "DG45FC"), -- }, -- }, - #endif - {} - }; -@@ -1040,8 +1031,6 @@ void __init setup_arch(char **cmdline_p) - #endif - #endif - x86_init.oem.banner(); -- -- mcheck_intel_therm_init(); - } - - #ifdef CONFIG_X86_32 -diff --git a/arch/x86/kernel/smp.c b/arch/x86/kernel/smp.c -index ec1de97..a93528b 100644 ---- a/arch/x86/kernel/smp.c -+++ b/arch/x86/kernel/smp.c -@@ -22,6 +22,9 @@ - #include - #include - -+#include -+#include -+ - #include - #include - #include -@@ -117,6 +120,7 @@ static void native_smp_send_reschedule(int cpu) - WARN_ON(1); - return; - } -+ TS_SEND_RESCHED_START(cpu); - apic->send_IPI_mask(cpumask_of(cpu), RESCHEDULE_VECTOR); - } - -@@ -146,6 +150,16 @@ void native_send_call_func_ipi(const struct cpumask *mask) - free_cpumask_var(allbutself); - } - -+/* trigger timers on remote cpu */ -+void smp_send_pull_timers(int cpu) -+{ -+ if (unlikely(cpu_is_offline(cpu))) { -+ WARN_ON(1); -+ return; -+ } -+ apic->send_IPI_mask(cpumask_of(cpu), PULL_TIMERS_VECTOR); -+} -+ - /* - * this function calls the 'stop' function on all other CPUs in the system. - */ -@@ -197,7 +211,12 @@ static void native_smp_send_stop(void) - void smp_reschedule_interrupt(struct pt_regs *regs) - { - ack_APIC_irq(); -+ /* LITMUS^RT needs this interrupt to proper reschedule -+ * on this cpu -+ */ -+ set_tsk_need_resched(current); - inc_irq_stat(irq_resched_count); -+ TS_SEND_RESCHED_END; - /* - * KVM uses this interrupt to force a cpu out of guest mode - */ -@@ -221,6 +240,15 @@ void smp_call_function_single_interrupt(struct pt_regs *regs) - irq_exit(); - } - -+extern void hrtimer_pull(void); -+ -+void smp_pull_timers_interrupt(struct pt_regs *regs) -+{ -+ ack_APIC_irq(); -+ TRACE("pull timer interrupt\n"); -+ hrtimer_pull(); -+} -+ - struct smp_ops smp_ops = { - .smp_prepare_boot_cpu = native_smp_prepare_boot_cpu, - .smp_prepare_cpus = native_smp_prepare_cpus, -diff --git a/arch/x86/kernel/sys_i386_32.c b/arch/x86/kernel/sys_i386_32.c -index dee1ff7..1884a8d 100644 ---- a/arch/x86/kernel/sys_i386_32.c -+++ b/arch/x86/kernel/sys_i386_32.c -@@ -24,6 +24,31 @@ - - #include - -+asmlinkage long sys_mmap2(unsigned long addr, unsigned long len, -+ unsigned long prot, unsigned long flags, -+ unsigned long fd, unsigned long pgoff) -+{ -+ int error = -EBADF; -+ struct file *file = NULL; -+ struct mm_struct *mm = current->mm; -+ -+ flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE); -+ if (!(flags & MAP_ANONYMOUS)) { -+ file = fget(fd); -+ if (!file) -+ goto out; -+ } -+ -+ down_write(&mm->mmap_sem); -+ error = do_mmap_pgoff(file, addr, len, prot, flags, pgoff); -+ up_write(&mm->mmap_sem); -+ -+ if (file) -+ fput(file); -+out: -+ return error; -+} -+ - /* - * Perform the select(nd, in, out, ex, tv) and mmap() system - * calls. Linux/i386 didn't use to be able to handle more than -@@ -52,7 +77,7 @@ asmlinkage int old_mmap(struct mmap_arg_struct __user *arg) - if (a.offset & ~PAGE_MASK) - goto out; - -- err = sys_mmap_pgoff(a.addr, a.len, a.prot, a.flags, -+ err = sys_mmap2(a.addr, a.len, a.prot, a.flags, - a.fd, a.offset >> PAGE_SHIFT); - out: - return err; -diff --git a/arch/x86/kernel/sys_x86_64.c b/arch/x86/kernel/sys_x86_64.c -index 8aa2057..45e00eb 100644 ---- a/arch/x86/kernel/sys_x86_64.c -+++ b/arch/x86/kernel/sys_x86_64.c -@@ -23,11 +23,26 @@ SYSCALL_DEFINE6(mmap, unsigned long, addr, unsigned long, len, - unsigned long, fd, unsigned long, off) - { - long error; -+ struct file *file; -+ - error = -EINVAL; - if (off & ~PAGE_MASK) - goto out; - -- error = sys_mmap_pgoff(addr, len, prot, flags, fd, off >> PAGE_SHIFT); -+ error = -EBADF; -+ file = NULL; -+ flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE); -+ if (!(flags & MAP_ANONYMOUS)) { -+ file = fget(fd); -+ if (!file) -+ goto out; -+ } -+ down_write(¤t->mm->mmap_sem); -+ error = do_mmap_pgoff(file, addr, len, prot, flags, off >> PAGE_SHIFT); -+ up_write(¤t->mm->mmap_sem); -+ -+ if (file) -+ fput(file); - out: - return error; - } -diff --git a/arch/x86/kernel/syscall_table_32.S b/arch/x86/kernel/syscall_table_32.S -index 76d70a4..17fcb3a 100644 ---- a/arch/x86/kernel/syscall_table_32.S -+++ b/arch/x86/kernel/syscall_table_32.S -@@ -191,7 +191,7 @@ ENTRY(sys_call_table) - .long sys_ni_syscall /* reserved for streams2 */ - .long ptregs_vfork /* 190 */ - .long sys_getrlimit -- .long sys_mmap_pgoff -+ .long sys_mmap2 - .long sys_truncate64 - .long sys_ftruncate64 - .long sys_stat64 /* 195 */ -@@ -336,3 +336,17 @@ ENTRY(sys_call_table) - .long sys_pwritev - .long sys_rt_tgsigqueueinfo /* 335 */ - .long sys_perf_event_open -+ .long sys_set_rt_task_param /* LITMUS^RT 337 */ -+ .long sys_get_rt_task_param -+ .long sys_complete_job -+ .long sys_od_open -+ .long sys_od_close -+ .long sys_fmlp_down -+ .long sys_fmlp_up -+ .long sys_srp_down -+ .long sys_srp_up -+ .long sys_query_job_no -+ .long sys_wait_for_job_release -+ .long sys_wait_for_ts_release -+ .long sys_release_ts -+ .long sys_null_call -diff --git a/arch/x86/kernel/tlb_uv.c b/arch/x86/kernel/tlb_uv.c -index 364d015..1740c85 100644 ---- a/arch/x86/kernel/tlb_uv.c -+++ b/arch/x86/kernel/tlb_uv.c -@@ -817,8 +817,10 @@ static int __init uv_init_blade(int blade) - */ - apicid = blade_to_first_apicid(blade); - pa = uv_read_global_mmr64(pnode, UVH_BAU_DATA_CONFIG); -- uv_write_global_mmr64(pnode, UVH_BAU_DATA_CONFIG, -+ if ((pa & 0xff) != UV_BAU_MESSAGE) { -+ uv_write_global_mmr64(pnode, UVH_BAU_DATA_CONFIG, - ((apicid << 32) | UV_BAU_MESSAGE)); -+ } - return 0; - } - -diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c -index 597683a..cd982f4 100644 ---- a/arch/x86/kernel/tsc.c -+++ b/arch/x86/kernel/tsc.c -@@ -763,7 +763,6 @@ void mark_tsc_unstable(char *reason) - { - if (!tsc_unstable) { - tsc_unstable = 1; -- sched_clock_stable = 0; - printk(KERN_INFO "Marking TSC unstable due to %s\n", reason); - /* Change only the rating, when not registered */ - if (clocksource_tsc.mult) -diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c -index e02dbb6..1be5cd6 100644 ---- a/arch/x86/kvm/emulate.c -+++ b/arch/x86/kvm/emulate.c -@@ -613,9 +613,6 @@ static int do_insn_fetch(struct x86_emulate_ctxt *ctxt, - { - int rc = 0; - -- /* x86 instructions are limited to 15 bytes. */ -- if (eip + size - ctxt->decode.eip_orig > 15) -- return X86EMUL_UNHANDLEABLE; - eip += ctxt->cs_base; - while (size--) { - rc = do_fetch_insn_byte(ctxt, ops, eip++, dest++); -@@ -874,7 +871,7 @@ x86_decode_insn(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops) - /* Shadow copy of register state. Committed on successful emulation. */ - - memset(c, 0, sizeof(struct decode_cache)); -- c->eip = c->eip_orig = kvm_rip_read(ctxt->vcpu); -+ c->eip = kvm_rip_read(ctxt->vcpu); - ctxt->cs_base = seg_base(ctxt, VCPU_SREG_CS); - memcpy(c->regs, ctxt->vcpu->arch.regs, sizeof c->regs); - -diff --git a/arch/x86/kvm/i8254.c b/arch/x86/kvm/i8254.c -index 88ad162..144e7f6 100644 ---- a/arch/x86/kvm/i8254.c -+++ b/arch/x86/kvm/i8254.c -@@ -465,9 +465,6 @@ static int pit_ioport_read(struct kvm_io_device *this, - return -EOPNOTSUPP; - - addr &= KVM_PIT_CHANNEL_MASK; -- if (addr == 3) -- return 0; -- - s = &pit_state->channels[addr]; - - mutex_lock(&pit_state->lock); -diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c -index 8dfeaaa..23c2176 100644 ---- a/arch/x86/kvm/lapic.c -+++ b/arch/x86/kvm/lapic.c -@@ -374,12 +374,6 @@ static int __apic_accept_irq(struct kvm_lapic *apic, int delivery_mode, - if (unlikely(!apic_enabled(apic))) - break; - -- if (trig_mode) { -- apic_debug("level trig mode for vector %d", vector); -- apic_set_vector(vector, apic->regs + APIC_TMR); -- } else -- apic_clear_vector(vector, apic->regs + APIC_TMR); -- - result = !apic_test_and_set_irr(vector, apic); - trace_kvm_apic_accept_irq(vcpu->vcpu_id, delivery_mode, - trig_mode, vector, !result); -@@ -390,6 +384,11 @@ static int __apic_accept_irq(struct kvm_lapic *apic, int delivery_mode, - break; - } - -+ if (trig_mode) { -+ apic_debug("level trig mode for vector %d", vector); -+ apic_set_vector(vector, apic->regs + APIC_TMR); -+ } else -+ apic_clear_vector(vector, apic->regs + APIC_TMR); - kvm_vcpu_kick(vcpu); - break; - -@@ -1157,7 +1156,6 @@ void kvm_apic_post_state_restore(struct kvm_vcpu *vcpu) - hrtimer_cancel(&apic->lapic_timer.timer); - update_divide_count(apic); - start_apic_timer(apic); -- apic->irr_pending = true; - } - - void __kvm_migrate_apic_timer(struct kvm_vcpu *vcpu) -diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c -index 3a01519..818b92a 100644 ---- a/arch/x86/kvm/mmu.c -+++ b/arch/x86/kvm/mmu.c -@@ -477,7 +477,7 @@ static int host_mapping_level(struct kvm *kvm, gfn_t gfn) - - addr = gfn_to_hva(kvm, gfn); - if (kvm_is_error_hva(addr)) -- return PT_PAGE_TABLE_LEVEL; -+ return page_size; - - down_read(¤t->mm->mmap_sem); - vma = find_vma(current->mm, addr); -@@ -515,9 +515,11 @@ static int mapping_level(struct kvm_vcpu *vcpu, gfn_t large_gfn) - if (host_level == PT_PAGE_TABLE_LEVEL) - return host_level; - -- for (level = PT_DIRECTORY_LEVEL; level <= host_level; ++level) -+ for (level = PT_DIRECTORY_LEVEL; level <= host_level; ++level) { -+ - if (has_wrprotected_page(vcpu->kvm, large_gfn, level)) - break; -+ } - - return level - 1; - } -diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h -index 5fa3325..72558f8 100644 ---- a/arch/x86/kvm/paging_tmpl.h -+++ b/arch/x86/kvm/paging_tmpl.h -@@ -150,9 +150,7 @@ walk: - walker->table_gfn[walker->level - 1] = table_gfn; - walker->pte_gpa[walker->level - 1] = pte_gpa; - -- if (kvm_read_guest(vcpu->kvm, pte_gpa, &pte, sizeof(pte))) -- goto not_present; -- -+ kvm_read_guest(vcpu->kvm, pte_gpa, &pte, sizeof(pte)); - trace_kvm_mmu_paging_element(pte, walker->level); - - if (!is_present_gpte(pte)) -@@ -457,6 +455,8 @@ out_unlock: - static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva) - { - struct kvm_shadow_walk_iterator iterator; -+ pt_element_t gpte; -+ gpa_t pte_gpa = -1; - int level; - u64 *sptep; - int need_flush = 0; -@@ -471,6 +471,10 @@ static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva) - if (level == PT_PAGE_TABLE_LEVEL || - ((level == PT_DIRECTORY_LEVEL && is_large_pte(*sptep))) || - ((level == PT_PDPE_LEVEL && is_large_pte(*sptep)))) { -+ struct kvm_mmu_page *sp = page_header(__pa(sptep)); -+ -+ pte_gpa = (sp->gfn << PAGE_SHIFT); -+ pte_gpa += (sptep - sp->spt) * sizeof(pt_element_t); - - if (is_shadow_present_pte(*sptep)) { - rmap_remove(vcpu->kvm, sptep); -@@ -489,6 +493,18 @@ static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva) - if (need_flush) - kvm_flush_remote_tlbs(vcpu->kvm); - spin_unlock(&vcpu->kvm->mmu_lock); -+ -+ if (pte_gpa == -1) -+ return; -+ if (kvm_read_guest_atomic(vcpu->kvm, pte_gpa, &gpte, -+ sizeof(pt_element_t))) -+ return; -+ if (is_present_gpte(gpte) && (gpte & PT_ACCESSED_MASK)) { -+ if (mmu_topup_memory_caches(vcpu)) -+ return; -+ kvm_mmu_pte_write(vcpu, pte_gpa, (const u8 *)&gpte, -+ sizeof(pt_element_t), 0); -+ } - } - - static gpa_t FNAME(gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t vaddr) -diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c -index e78d990..ae07d26 100644 ---- a/arch/x86/kvm/x86.c -+++ b/arch/x86/kvm/x86.c -@@ -484,19 +484,16 @@ static inline u32 bit(int bitno) - * and KVM_SET_MSRS, and KVM_GET_MSR_INDEX_LIST. - * - * This list is modified at module load time to reflect the -- * capabilities of the host cpu. This capabilities test skips MSRs that are -- * kvm-specific. Those are put in the beginning of the list. -+ * capabilities of the host cpu. - */ -- --#define KVM_SAVE_MSRS_BEGIN 2 - static u32 msrs_to_save[] = { -- MSR_KVM_SYSTEM_TIME, MSR_KVM_WALL_CLOCK, - MSR_IA32_SYSENTER_CS, MSR_IA32_SYSENTER_ESP, MSR_IA32_SYSENTER_EIP, - MSR_K6_STAR, - #ifdef CONFIG_X86_64 - MSR_CSTAR, MSR_KERNEL_GS_BASE, MSR_SYSCALL_MASK, MSR_LSTAR, - #endif -- MSR_IA32_TSC, MSR_IA32_PERF_STATUS, MSR_IA32_CR_PAT, MSR_VM_HSAVE_PA -+ MSR_IA32_TSC, MSR_KVM_SYSTEM_TIME, MSR_KVM_WALL_CLOCK, -+ MSR_IA32_PERF_STATUS, MSR_IA32_CR_PAT, MSR_VM_HSAVE_PA - }; - - static unsigned num_msrs_to_save; -@@ -583,7 +580,7 @@ static void kvm_write_wall_clock(struct kvm *kvm, gpa_t wall_clock) - { - static int version; - struct pvclock_wall_clock wc; -- struct timespec boot; -+ struct timespec now, sys, boot; - - if (!wall_clock) - return; -@@ -598,7 +595,9 @@ static void kvm_write_wall_clock(struct kvm *kvm, gpa_t wall_clock) - * wall clock specified here. guest system time equals host - * system time for us, thus we must fill in host boot time here. - */ -- getboottime(&boot); -+ now = current_kernel_time(); -+ ktime_get_ts(&sys); -+ boot = ns_to_timespec(timespec_to_ns(&now) - timespec_to_ns(&sys)); - - wc.sec = boot.tv_sec; - wc.nsec = boot.tv_nsec; -@@ -673,14 +672,12 @@ static void kvm_write_guest_time(struct kvm_vcpu *v) - local_irq_save(flags); - kvm_get_msr(v, MSR_IA32_TSC, &vcpu->hv_clock.tsc_timestamp); - ktime_get_ts(&ts); -- monotonic_to_bootbased(&ts); - local_irq_restore(flags); - - /* With all the info we got, fill in the values */ - - vcpu->hv_clock.system_time = ts.tv_nsec + -- (NSEC_PER_SEC * (u64)ts.tv_sec) + v->kvm->arch.kvmclock_offset; -- -+ (NSEC_PER_SEC * (u64)ts.tv_sec); - /* - * The interface expects us to write an even number signaling that the - * update is finished. Since the guest won't see the intermediate -@@ -1227,7 +1224,6 @@ int kvm_dev_ioctl_check_extension(long ext) - case KVM_CAP_PIT2: - case KVM_CAP_PIT_STATE2: - case KVM_CAP_SET_IDENTITY_MAP_ADDR: -- case KVM_CAP_ADJUST_CLOCK: - r = 1; - break; - case KVM_CAP_COALESCED_MMIO: -@@ -2425,44 +2421,6 @@ long kvm_arch_vm_ioctl(struct file *filp, - r = 0; - break; - } -- case KVM_SET_CLOCK: { -- struct timespec now; -- struct kvm_clock_data user_ns; -- u64 now_ns; -- s64 delta; -- -- r = -EFAULT; -- if (copy_from_user(&user_ns, argp, sizeof(user_ns))) -- goto out; -- -- r = -EINVAL; -- if (user_ns.flags) -- goto out; -- -- r = 0; -- ktime_get_ts(&now); -- now_ns = timespec_to_ns(&now); -- delta = user_ns.clock - now_ns; -- kvm->arch.kvmclock_offset = delta; -- break; -- } -- case KVM_GET_CLOCK: { -- struct timespec now; -- struct kvm_clock_data user_ns; -- u64 now_ns; -- -- ktime_get_ts(&now); -- now_ns = timespec_to_ns(&now); -- user_ns.clock = kvm->arch.kvmclock_offset + now_ns; -- user_ns.flags = 0; -- -- r = -EFAULT; -- if (copy_to_user(argp, &user_ns, sizeof(user_ns))) -- goto out; -- r = 0; -- break; -- } -- - default: - ; - } -@@ -2475,8 +2433,7 @@ static void kvm_init_msr_list(void) - u32 dummy[2]; - unsigned i, j; - -- /* skip the first msrs in the list. KVM-specific */ -- for (i = j = KVM_SAVE_MSRS_BEGIN; i < ARRAY_SIZE(msrs_to_save); i++) { -+ for (i = j = 0; i < ARRAY_SIZE(msrs_to_save); i++) { - if (rdmsr_safe(msrs_to_save[i], &dummy[0], &dummy[1]) < 0) - continue; - if (j < i) -@@ -4805,13 +4762,12 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu) - GFP_KERNEL); - if (!vcpu->arch.mce_banks) { - r = -ENOMEM; -- goto fail_free_lapic; -+ goto fail_mmu_destroy; - } - vcpu->arch.mcg_cap = KVM_MAX_MCE_BANKS; - - return 0; --fail_free_lapic: -- kvm_free_lapic(vcpu); -+ - fail_mmu_destroy: - kvm_mmu_destroy(vcpu); - fail_free_pio_data: -@@ -4822,7 +4778,6 @@ fail: - - void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu) - { -- kfree(vcpu->arch.mce_banks); - kvm_free_lapic(vcpu); - down_read(&vcpu->kvm->slots_lock); - kvm_mmu_destroy(vcpu); -diff --git a/arch/x86/lib/Makefile b/arch/x86/lib/Makefile -index c2b6f39..85f5db9 100644 ---- a/arch/x86/lib/Makefile -+++ b/arch/x86/lib/Makefile -@@ -2,14 +2,14 @@ - # Makefile for x86 specific library files. - # - --obj-$(CONFIG_SMP) += msr-smp.o -+obj-$(CONFIG_SMP) := msr.o - - lib-y := delay.o - lib-y += thunk_$(BITS).o - lib-y += usercopy_$(BITS).o getuser.o putuser.o - lib-y += memcpy_$(BITS).o - --obj-y += msr.o msr-reg.o msr-reg-export.o -+obj-y += msr-reg.o msr-reg-export.o - - ifeq ($(CONFIG_X86_32),y) - obj-y += atomic64_32.o -diff --git a/arch/x86/lib/msr.c b/arch/x86/lib/msr.c -index 8f8eebd..33a1e3c 100644 ---- a/arch/x86/lib/msr.c -+++ b/arch/x86/lib/msr.c -@@ -1,23 +1,226 @@ - #include - #include -+#include - #include - --struct msr *msrs_alloc(void) -+struct msr_info { -+ u32 msr_no; -+ struct msr reg; -+ struct msr *msrs; -+ int off; -+ int err; -+}; -+ -+static void __rdmsr_on_cpu(void *info) -+{ -+ struct msr_info *rv = info; -+ struct msr *reg; -+ int this_cpu = raw_smp_processor_id(); -+ -+ if (rv->msrs) -+ reg = &rv->msrs[this_cpu - rv->off]; -+ else -+ reg = &rv->reg; -+ -+ rdmsr(rv->msr_no, reg->l, reg->h); -+} -+ -+static void __wrmsr_on_cpu(void *info) -+{ -+ struct msr_info *rv = info; -+ struct msr *reg; -+ int this_cpu = raw_smp_processor_id(); -+ -+ if (rv->msrs) -+ reg = &rv->msrs[this_cpu - rv->off]; -+ else -+ reg = &rv->reg; -+ -+ wrmsr(rv->msr_no, reg->l, reg->h); -+} -+ -+int rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h) -+{ -+ int err; -+ struct msr_info rv; -+ -+ memset(&rv, 0, sizeof(rv)); -+ -+ rv.msr_no = msr_no; -+ err = smp_call_function_single(cpu, __rdmsr_on_cpu, &rv, 1); -+ *l = rv.reg.l; -+ *h = rv.reg.h; -+ -+ return err; -+} -+EXPORT_SYMBOL(rdmsr_on_cpu); -+ -+int wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h) -+{ -+ int err; -+ struct msr_info rv; -+ -+ memset(&rv, 0, sizeof(rv)); -+ -+ rv.msr_no = msr_no; -+ rv.reg.l = l; -+ rv.reg.h = h; -+ err = smp_call_function_single(cpu, __wrmsr_on_cpu, &rv, 1); -+ -+ return err; -+} -+EXPORT_SYMBOL(wrmsr_on_cpu); -+ -+/* rdmsr on a bunch of CPUs -+ * -+ * @mask: which CPUs -+ * @msr_no: which MSR -+ * @msrs: array of MSR values -+ * -+ */ -+void rdmsr_on_cpus(const cpumask_t *mask, u32 msr_no, struct msr *msrs) -+{ -+ struct msr_info rv; -+ int this_cpu; -+ -+ memset(&rv, 0, sizeof(rv)); -+ -+ rv.off = cpumask_first(mask); -+ rv.msrs = msrs; -+ rv.msr_no = msr_no; -+ -+ this_cpu = get_cpu(); -+ -+ if (cpumask_test_cpu(this_cpu, mask)) -+ __rdmsr_on_cpu(&rv); -+ -+ smp_call_function_many(mask, __rdmsr_on_cpu, &rv, 1); -+ put_cpu(); -+} -+EXPORT_SYMBOL(rdmsr_on_cpus); -+ -+/* -+ * wrmsr on a bunch of CPUs -+ * -+ * @mask: which CPUs -+ * @msr_no: which MSR -+ * @msrs: array of MSR values -+ * -+ */ -+void wrmsr_on_cpus(const cpumask_t *mask, u32 msr_no, struct msr *msrs) -+{ -+ struct msr_info rv; -+ int this_cpu; -+ -+ memset(&rv, 0, sizeof(rv)); -+ -+ rv.off = cpumask_first(mask); -+ rv.msrs = msrs; -+ rv.msr_no = msr_no; -+ -+ this_cpu = get_cpu(); -+ -+ if (cpumask_test_cpu(this_cpu, mask)) -+ __wrmsr_on_cpu(&rv); -+ -+ smp_call_function_many(mask, __wrmsr_on_cpu, &rv, 1); -+ put_cpu(); -+} -+EXPORT_SYMBOL(wrmsr_on_cpus); -+ -+/* These "safe" variants are slower and should be used when the target MSR -+ may not actually exist. */ -+static void __rdmsr_safe_on_cpu(void *info) -+{ -+ struct msr_info *rv = info; -+ -+ rv->err = rdmsr_safe(rv->msr_no, &rv->reg.l, &rv->reg.h); -+} -+ -+static void __wrmsr_safe_on_cpu(void *info) -+{ -+ struct msr_info *rv = info; -+ -+ rv->err = wrmsr_safe(rv->msr_no, rv->reg.l, rv->reg.h); -+} -+ -+int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h) - { -- struct msr *msrs = NULL; -+ int err; -+ struct msr_info rv; - -- msrs = alloc_percpu(struct msr); -- if (!msrs) { -- pr_warning("%s: error allocating msrs\n", __func__); -- return NULL; -- } -+ memset(&rv, 0, sizeof(rv)); - -- return msrs; -+ rv.msr_no = msr_no; -+ err = smp_call_function_single(cpu, __rdmsr_safe_on_cpu, &rv, 1); -+ *l = rv.reg.l; -+ *h = rv.reg.h; -+ -+ return err ? err : rv.err; - } --EXPORT_SYMBOL(msrs_alloc); -+EXPORT_SYMBOL(rdmsr_safe_on_cpu); - --void msrs_free(struct msr *msrs) -+int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h) - { -- free_percpu(msrs); -+ int err; -+ struct msr_info rv; -+ -+ memset(&rv, 0, sizeof(rv)); -+ -+ rv.msr_no = msr_no; -+ rv.reg.l = l; -+ rv.reg.h = h; -+ err = smp_call_function_single(cpu, __wrmsr_safe_on_cpu, &rv, 1); -+ -+ return err ? err : rv.err; -+} -+EXPORT_SYMBOL(wrmsr_safe_on_cpu); -+ -+/* -+ * These variants are significantly slower, but allows control over -+ * the entire 32-bit GPR set. -+ */ -+struct msr_regs_info { -+ u32 *regs; -+ int err; -+}; -+ -+static void __rdmsr_safe_regs_on_cpu(void *info) -+{ -+ struct msr_regs_info *rv = info; -+ -+ rv->err = rdmsr_safe_regs(rv->regs); -+} -+ -+static void __wrmsr_safe_regs_on_cpu(void *info) -+{ -+ struct msr_regs_info *rv = info; -+ -+ rv->err = wrmsr_safe_regs(rv->regs); -+} -+ -+int rdmsr_safe_regs_on_cpu(unsigned int cpu, u32 *regs) -+{ -+ int err; -+ struct msr_regs_info rv; -+ -+ rv.regs = regs; -+ rv.err = -EIO; -+ err = smp_call_function_single(cpu, __rdmsr_safe_regs_on_cpu, &rv, 1); -+ -+ return err ? err : rv.err; -+} -+EXPORT_SYMBOL(rdmsr_safe_regs_on_cpu); -+ -+int wrmsr_safe_regs_on_cpu(unsigned int cpu, u32 *regs) -+{ -+ int err; -+ struct msr_regs_info rv; -+ -+ rv.regs = regs; -+ rv.err = -EIO; -+ err = smp_call_function_single(cpu, __wrmsr_safe_regs_on_cpu, &rv, 1); -+ -+ return err ? err : rv.err; - } --EXPORT_SYMBOL(msrs_free); -+EXPORT_SYMBOL(wrmsr_safe_regs_on_cpu); -diff --git a/arch/x86/mm/srat_64.c b/arch/x86/mm/srat_64.c -index 3871c60..dbb5381 100644 ---- a/arch/x86/mm/srat_64.c -+++ b/arch/x86/mm/srat_64.c -@@ -229,11 +229,9 @@ update_nodes_add(int node, unsigned long start, unsigned long end) - printk(KERN_ERR "SRAT: Hotplug zone not continuous. Partly ignored\n"); - } - -- if (changed) { -- node_set(node, cpu_nodes_parsed); -+ if (changed) - printk(KERN_INFO "SRAT: hot plug zone found %Lx - %Lx\n", - nd->start, nd->end); -- } - } - - /* Callback for parsing of the Proximity Domain <-> Memory Area mappings */ -diff --git a/arch/x86/oprofile/nmi_int.c b/arch/x86/oprofile/nmi_int.c -index 3347f69..cb88b1a 100644 ---- a/arch/x86/oprofile/nmi_int.c -+++ b/arch/x86/oprofile/nmi_int.c -@@ -222,7 +222,7 @@ static void nmi_cpu_switch(void *dummy) - - /* move to next set */ - si += model->num_counters; -- if ((si >= model->num_virt_counters) || (counter_config[si].count == 0)) -+ if ((si > model->num_virt_counters) || (counter_config[si].count == 0)) - per_cpu(switch_index, cpu) = 0; - else - per_cpu(switch_index, cpu) = si; -@@ -598,7 +598,6 @@ static int __init ppro_init(char **cpu_type) - case 15: case 23: - *cpu_type = "i386/core_2"; - break; -- case 0x2e: - case 26: - spec = &op_arch_perfmon_spec; - *cpu_type = "i386/core_i7"; -diff --git a/arch/x86/pci/i386.c b/arch/x86/pci/i386.c -index a672f12..b22d13b 100644 ---- a/arch/x86/pci/i386.c -+++ b/arch/x86/pci/i386.c -@@ -282,15 +282,6 @@ int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma, - return -EINVAL; - - prot = pgprot_val(vma->vm_page_prot); -- -- /* -- * Return error if pat is not enabled and write_combine is requested. -- * Caller can followup with UC MINUS request and add a WC mtrr if there -- * is a free mtrr slot. -- */ -- if (!pat_enabled && write_combine) -- return -EINVAL; -- - if (pat_enabled && write_combine) - prot |= _PAGE_CACHE_WC; - else if (pat_enabled || boot_cpu_data.x86 > 3) -diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c -index 79f9738..dfbf70e 100644 ---- a/arch/x86/xen/enlighten.c -+++ b/arch/x86/xen/enlighten.c -@@ -138,23 +138,24 @@ static void xen_vcpu_setup(int cpu) - */ - void xen_vcpu_restore(void) - { -- int cpu; -+ if (have_vcpu_info_placement) { -+ int cpu; - -- for_each_online_cpu(cpu) { -- bool other_cpu = (cpu != smp_processor_id()); -+ for_each_online_cpu(cpu) { -+ bool other_cpu = (cpu != smp_processor_id()); - -- if (other_cpu && -- HYPERVISOR_vcpu_op(VCPUOP_down, cpu, NULL)) -- BUG(); -- -- xen_setup_runstate_info(cpu); -+ if (other_cpu && -+ HYPERVISOR_vcpu_op(VCPUOP_down, cpu, NULL)) -+ BUG(); - -- if (have_vcpu_info_placement) - xen_vcpu_setup(cpu); - -- if (other_cpu && -- HYPERVISOR_vcpu_op(VCPUOP_up, cpu, NULL)) -- BUG(); -+ if (other_cpu && -+ HYPERVISOR_vcpu_op(VCPUOP_up, cpu, NULL)) -+ BUG(); -+ } -+ -+ BUG_ON(!have_vcpu_info_placement); - } - } - -@@ -1181,8 +1182,6 @@ asmlinkage void __init xen_start_kernel(void) - - xen_raw_console_write("about to get started...\n"); - -- xen_setup_runstate_info(0); -- - /* Start the world */ - #ifdef CONFIG_X86_32 - i386_start_kernel(); -diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c -index bf4cd6b..3bf7b1d 100644 ---- a/arch/x86/xen/mmu.c -+++ b/arch/x86/xen/mmu.c -@@ -185,7 +185,7 @@ static inline unsigned p2m_index(unsigned long pfn) - } - - /* Build the parallel p2m_top_mfn structures */ --void xen_build_mfn_list_list(void) -+static void __init xen_build_mfn_list_list(void) - { - unsigned pfn, idx; - -diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c -index 360f8d8..fe03eee 100644 ---- a/arch/x86/xen/smp.c -+++ b/arch/x86/xen/smp.c -@@ -295,7 +295,6 @@ static int __cpuinit xen_cpu_up(unsigned int cpu) - (unsigned long)task_stack_page(idle) - - KERNEL_STACK_OFFSET + THREAD_SIZE; - #endif -- xen_setup_runstate_info(cpu); - xen_setup_timer(cpu); - xen_init_lock_cpu(cpu); - -diff --git a/arch/x86/xen/suspend.c b/arch/x86/xen/suspend.c -index 987267f..95be7b4 100644 ---- a/arch/x86/xen/suspend.c -+++ b/arch/x86/xen/suspend.c -@@ -1,5 +1,4 @@ - #include --#include - - #include - #include -@@ -28,8 +27,6 @@ void xen_pre_suspend(void) - - void xen_post_suspend(int suspend_cancelled) - { -- xen_build_mfn_list_list(); -- - xen_setup_shared_info(); - - if (suspend_cancelled) { -@@ -47,19 +44,7 @@ void xen_post_suspend(int suspend_cancelled) - - } - --static void xen_vcpu_notify_restore(void *data) --{ -- unsigned long reason = (unsigned long)data; -- -- /* Boot processor notified via generic timekeeping_resume() */ -- if ( smp_processor_id() == 0) -- return; -- -- clockevents_notify(reason, NULL); --} -- - void xen_arch_resume(void) - { -- smp_call_function(xen_vcpu_notify_restore, -- (void *)CLOCK_EVT_NOTIFY_RESUME, 1); -+ /* nothing */ - } -diff --git a/arch/x86/xen/time.c b/arch/x86/xen/time.c -index 9d1f853..0a5aa44 100644 ---- a/arch/x86/xen/time.c -+++ b/arch/x86/xen/time.c -@@ -100,7 +100,7 @@ bool xen_vcpu_stolen(int vcpu) - return per_cpu(runstate, vcpu).state == RUNSTATE_runnable; - } - --void xen_setup_runstate_info(int cpu) -+static void setup_runstate_info(int cpu) - { - struct vcpu_register_runstate_memory_area area; - -@@ -434,7 +434,7 @@ void xen_setup_timer(int cpu) - name = ""; - - irq = bind_virq_to_irqhandler(VIRQ_TIMER, cpu, xen_timer_interrupt, -- IRQF_DISABLED|IRQF_PERCPU|IRQF_NOBALANCING|IRQF_TIMER, -+ IRQF_DISABLED|IRQF_PERCPU|IRQF_NOBALANCING, - name, NULL); - - evt = &per_cpu(xen_clock_events, cpu); -@@ -442,6 +442,8 @@ void xen_setup_timer(int cpu) - - evt->cpumask = cpumask_of(cpu); - evt->irq = irq; -+ -+ setup_runstate_info(cpu); - } - - void xen_teardown_timer(int cpu) -@@ -492,7 +494,6 @@ __init void xen_time_init(void) - - setup_force_cpu_cap(X86_FEATURE_TSC); - -- xen_setup_runstate_info(cpu); - xen_setup_timer(cpu); - xen_setup_cpu_clockevents(); - } -diff --git a/arch/x86/xen/xen-asm_64.S b/arch/x86/xen/xen-asm_64.S -index 53adefd..02f496a 100644 ---- a/arch/x86/xen/xen-asm_64.S -+++ b/arch/x86/xen/xen-asm_64.S -@@ -96,7 +96,7 @@ ENTRY(xen_sysret32) - pushq $__USER32_CS - pushq %rcx - -- pushq $0 -+ pushq $VGCF_in_syscall - 1: jmp hypercall_iret - ENDPATCH(xen_sysret32) - RELOC(xen_sysret32, 1b+1) -@@ -151,7 +151,7 @@ ENTRY(xen_syscall32_target) - ENTRY(xen_sysenter_target) - lea 16(%rsp), %rsp /* strip %rcx, %r11 */ - mov $-ENOSYS, %rax -- pushq $0 -+ pushq $VGCF_in_syscall - jmp hypercall_iret - ENDPROC(xen_syscall32_target) - ENDPROC(xen_sysenter_target) -diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h -index f9153a3..355fa6b 100644 ---- a/arch/x86/xen/xen-ops.h -+++ b/arch/x86/xen/xen-ops.h -@@ -25,7 +25,6 @@ extern struct shared_info *HYPERVISOR_shared_info; - - void xen_setup_mfn_list_list(void); - void xen_setup_shared_info(void); --void xen_build_mfn_list_list(void); - void xen_setup_machphys_mapping(void); - pgd_t *xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn); - void xen_ident_map_ISA(void); -@@ -42,7 +41,6 @@ void __init xen_build_dynamic_phys_to_machine(void); - - void xen_init_irq_ops(void); - void xen_setup_timer(int cpu); --void xen_setup_runstate_info(int cpu); - void xen_teardown_timer(int cpu); - cycle_t xen_clocksource_read(void); - void xen_setup_cpu_clockevents(void); -diff --git a/arch/xtensa/include/asm/syscall.h b/arch/xtensa/include/asm/syscall.h -index 4352dbe..05cebf8 100644 ---- a/arch/xtensa/include/asm/syscall.h -+++ b/arch/xtensa/include/asm/syscall.h -@@ -13,6 +13,8 @@ struct sigaction; - asmlinkage long xtensa_execve(char*, char**, char**, struct pt_regs*); - asmlinkage long xtensa_clone(unsigned long, unsigned long, struct pt_regs*); - asmlinkage long xtensa_pipe(int __user *); -+asmlinkage long xtensa_mmap2(unsigned long, unsigned long, unsigned long, -+ unsigned long, unsigned long, unsigned long); - asmlinkage long xtensa_ptrace(long, long, long, long); - asmlinkage long xtensa_sigreturn(struct pt_regs*); - asmlinkage long xtensa_rt_sigreturn(struct pt_regs*); -diff --git a/arch/xtensa/include/asm/unistd.h b/arch/xtensa/include/asm/unistd.h -index 9a5c354..c092c8f 100644 ---- a/arch/xtensa/include/asm/unistd.h -+++ b/arch/xtensa/include/asm/unistd.h -@@ -189,7 +189,7 @@ __SYSCALL( 79, sys_fremovexattr, 2) - /* File Map / Shared Memory Operations */ - - #define __NR_mmap2 80 --__SYSCALL( 80, sys_mmap_pgoff, 6) -+__SYSCALL( 80, xtensa_mmap2, 6) - #define __NR_munmap 81 - __SYSCALL( 81, sys_munmap, 2) - #define __NR_mprotect 82 -diff --git a/arch/xtensa/kernel/syscall.c b/arch/xtensa/kernel/syscall.c -index 1e67bab..ac15ecb 100644 ---- a/arch/xtensa/kernel/syscall.c -+++ b/arch/xtensa/kernel/syscall.c -@@ -57,6 +57,31 @@ asmlinkage long xtensa_pipe(int __user *userfds) - return error; - } - -+ -+asmlinkage long xtensa_mmap2(unsigned long addr, unsigned long len, -+ unsigned long prot, unsigned long flags, -+ unsigned long fd, unsigned long pgoff) -+{ -+ int error = -EBADF; -+ struct file * file = NULL; -+ -+ flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE); -+ if (!(flags & MAP_ANONYMOUS)) { -+ file = fget(fd); -+ if (!file) -+ goto out; -+ } -+ -+ down_write(¤t->mm->mmap_sem); -+ error = do_mmap_pgoff(file, addr, len, prot, flags, pgoff); -+ up_write(¤t->mm->mmap_sem); -+ -+ if (file) -+ fput(file); -+out: -+ return error; -+} -+ - asmlinkage long xtensa_shmat(int shmid, char __user *shmaddr, int shmflg) - { - unsigned long ret; -diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c -index 49f6ede..7411915 100644 ---- a/drivers/acpi/bus.c -+++ b/drivers/acpi/bus.c -@@ -344,167 +344,6 @@ bool acpi_bus_can_wakeup(acpi_handle handle) - - EXPORT_SYMBOL(acpi_bus_can_wakeup); - --static void acpi_print_osc_error(acpi_handle handle, -- struct acpi_osc_context *context, char *error) --{ -- struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER}; -- int i; -- -- if (ACPI_FAILURE(acpi_get_name(handle, ACPI_FULL_PATHNAME, &buffer))) -- printk(KERN_DEBUG "%s\n", error); -- else { -- printk(KERN_DEBUG "%s:%s\n", (char *)buffer.pointer, error); -- kfree(buffer.pointer); -- } -- printk(KERN_DEBUG"_OSC request data:"); -- for (i = 0; i < context->cap.length; i += sizeof(u32)) -- printk("%x ", *((u32 *)(context->cap.pointer + i))); -- printk("\n"); --} -- --static u8 hex_val(unsigned char c) --{ -- return isdigit(c) ? c - '0' : toupper(c) - 'A' + 10; --} -- --static acpi_status acpi_str_to_uuid(char *str, u8 *uuid) --{ -- int i; -- static int opc_map_to_uuid[16] = {6, 4, 2, 0, 11, 9, 16, 14, 19, 21, -- 24, 26, 28, 30, 32, 34}; -- -- if (strlen(str) != 36) -- return AE_BAD_PARAMETER; -- for (i = 0; i < 36; i++) { -- if (i == 8 || i == 13 || i == 18 || i == 23) { -- if (str[i] != '-') -- return AE_BAD_PARAMETER; -- } else if (!isxdigit(str[i])) -- return AE_BAD_PARAMETER; -- } -- for (i = 0; i < 16; i++) { -- uuid[i] = hex_val(str[opc_map_to_uuid[i]]) << 4; -- uuid[i] |= hex_val(str[opc_map_to_uuid[i] + 1]); -- } -- return AE_OK; --} -- --acpi_status acpi_run_osc(acpi_handle handle, struct acpi_osc_context *context) --{ -- acpi_status status; -- struct acpi_object_list input; -- union acpi_object in_params[4]; -- union acpi_object *out_obj; -- u8 uuid[16]; -- u32 errors; -- struct acpi_buffer output = {ACPI_ALLOCATE_BUFFER, NULL}; -- -- if (!context) -- return AE_ERROR; -- if (ACPI_FAILURE(acpi_str_to_uuid(context->uuid_str, uuid))) -- return AE_ERROR; -- context->ret.length = ACPI_ALLOCATE_BUFFER; -- context->ret.pointer = NULL; -- -- /* Setting up input parameters */ -- input.count = 4; -- input.pointer = in_params; -- in_params[0].type = ACPI_TYPE_BUFFER; -- in_params[0].buffer.length = 16; -- in_params[0].buffer.pointer = uuid; -- in_params[1].type = ACPI_TYPE_INTEGER; -- in_params[1].integer.value = context->rev; -- in_params[2].type = ACPI_TYPE_INTEGER; -- in_params[2].integer.value = context->cap.length/sizeof(u32); -- in_params[3].type = ACPI_TYPE_BUFFER; -- in_params[3].buffer.length = context->cap.length; -- in_params[3].buffer.pointer = context->cap.pointer; -- -- status = acpi_evaluate_object(handle, "_OSC", &input, &output); -- if (ACPI_FAILURE(status)) -- return status; -- -- if (!output.length) -- return AE_NULL_OBJECT; -- -- out_obj = output.pointer; -- if (out_obj->type != ACPI_TYPE_BUFFER -- || out_obj->buffer.length != context->cap.length) { -- acpi_print_osc_error(handle, context, -- "_OSC evaluation returned wrong type"); -- status = AE_TYPE; -- goto out_kfree; -- } -- /* Need to ignore the bit0 in result code */ -- errors = *((u32 *)out_obj->buffer.pointer) & ~(1 << 0); -- if (errors) { -- if (errors & OSC_REQUEST_ERROR) -- acpi_print_osc_error(handle, context, -- "_OSC request failed"); -- if (errors & OSC_INVALID_UUID_ERROR) -- acpi_print_osc_error(handle, context, -- "_OSC invalid UUID"); -- if (errors & OSC_INVALID_REVISION_ERROR) -- acpi_print_osc_error(handle, context, -- "_OSC invalid revision"); -- if (errors & OSC_CAPABILITIES_MASK_ERROR) { -- if (((u32 *)context->cap.pointer)[OSC_QUERY_TYPE] -- & OSC_QUERY_ENABLE) -- goto out_success; -- status = AE_SUPPORT; -- goto out_kfree; -- } -- status = AE_ERROR; -- goto out_kfree; -- } --out_success: -- context->ret.length = out_obj->buffer.length; -- context->ret.pointer = kmalloc(context->ret.length, GFP_KERNEL); -- if (!context->ret.pointer) { -- status = AE_NO_MEMORY; -- goto out_kfree; -- } -- memcpy(context->ret.pointer, out_obj->buffer.pointer, -- context->ret.length); -- status = AE_OK; -- --out_kfree: -- kfree(output.pointer); -- if (status != AE_OK) -- context->ret.pointer = NULL; -- return status; --} --EXPORT_SYMBOL(acpi_run_osc); -- --static u8 sb_uuid_str[] = "0811B06E-4A27-44F9-8D60-3CBBC22E7B48"; --static void acpi_bus_osc_support(void) --{ -- u32 capbuf[2]; -- struct acpi_osc_context context = { -- .uuid_str = sb_uuid_str, -- .rev = 1, -- .cap.length = 8, -- .cap.pointer = capbuf, -- }; -- acpi_handle handle; -- -- capbuf[OSC_QUERY_TYPE] = OSC_QUERY_ENABLE; -- capbuf[OSC_SUPPORT_TYPE] = OSC_SB_PR3_SUPPORT; /* _PR3 is in use */ --#if defined(CONFIG_ACPI_PROCESSOR_AGGREGATOR) ||\ -- defined(CONFIG_ACPI_PROCESSOR_AGGREGATOR_MODULE) -- capbuf[OSC_SUPPORT_TYPE] |= OSC_SB_PAD_SUPPORT; --#endif -- --#if defined(CONFIG_ACPI_PROCESSOR) || defined(CONFIG_ACPI_PROCESSOR_MODULE) -- capbuf[OSC_SUPPORT_TYPE] |= OSC_SB_PPC_OST_SUPPORT; --#endif -- if (ACPI_FAILURE(acpi_get_handle(NULL, "\\_SB", &handle))) -- return; -- if (ACPI_SUCCESS(acpi_run_osc(handle, &context))) -- kfree(context.ret.pointer); -- /* do we need to check the returned cap? Sounds no */ --} -- - /* -------------------------------------------------------------------------- - Event Management - -------------------------------------------------------------------------- */ -@@ -895,8 +734,6 @@ static int __init acpi_bus_init(void) - status = acpi_ec_ecdt_probe(); - /* Ignore result. Not having an ECDT is not fatal. */ - -- acpi_bus_osc_support(); -- - status = acpi_initialize_objects(ACPI_FULL_INITIALIZATION); - if (ACPI_FAILURE(status)) { - printk(KERN_ERR PREFIX "Unable to initialize ACPI objects\n"); -diff --git a/drivers/acpi/button.c b/drivers/acpi/button.c -index 8a95e83..0c9c6a9 100644 ---- a/drivers/acpi/button.c -+++ b/drivers/acpi/button.c -@@ -282,13 +282,6 @@ static int acpi_lid_send_state(struct acpi_device *device) - if (ret == NOTIFY_DONE) - ret = blocking_notifier_call_chain(&acpi_lid_notifier, state, - device); -- if (ret == NOTIFY_DONE || ret == NOTIFY_OK) { -- /* -- * It is also regarded as success if the notifier_chain -- * returns NOTIFY_OK or NOTIFY_DONE. -- */ -- ret = 0; -- } - return ret; - } - -diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c -index f1670e0..baef28c 100644 ---- a/drivers/acpi/ec.c -+++ b/drivers/acpi/ec.c -@@ -201,13 +201,14 @@ unlock: - spin_unlock_irqrestore(&ec->curr_lock, flags); - } - --static int acpi_ec_sync_query(struct acpi_ec *ec); -+static void acpi_ec_gpe_query(void *ec_cxt); - --static int ec_check_sci_sync(struct acpi_ec *ec, u8 state) -+static int ec_check_sci(struct acpi_ec *ec, u8 state) - { - if (state & ACPI_EC_FLAG_SCI) { - if (!test_and_set_bit(EC_FLAGS_QUERY_PENDING, &ec->flags)) -- return acpi_ec_sync_query(ec); -+ return acpi_os_execute(OSL_EC_BURST_HANDLER, -+ acpi_ec_gpe_query, ec); - } - return 0; - } -@@ -248,6 +249,11 @@ static int acpi_ec_transaction_unlocked(struct acpi_ec *ec, - { - unsigned long tmp; - int ret = 0; -+ pr_debug(PREFIX "transaction start\n"); -+ /* disable GPE during transaction if storm is detected */ -+ if (test_bit(EC_FLAGS_GPE_STORM, &ec->flags)) { -+ acpi_disable_gpe(NULL, ec->gpe); -+ } - if (EC_FLAGS_MSI) - udelay(ACPI_EC_MSI_UDELAY); - /* start transaction */ -@@ -259,9 +265,20 @@ static int acpi_ec_transaction_unlocked(struct acpi_ec *ec, - clear_bit(EC_FLAGS_QUERY_PENDING, &ec->flags); - spin_unlock_irqrestore(&ec->curr_lock, tmp); - ret = ec_poll(ec); -+ pr_debug(PREFIX "transaction end\n"); - spin_lock_irqsave(&ec->curr_lock, tmp); - ec->curr = NULL; - spin_unlock_irqrestore(&ec->curr_lock, tmp); -+ if (test_bit(EC_FLAGS_GPE_STORM, &ec->flags)) { -+ /* check if we received SCI during transaction */ -+ ec_check_sci(ec, acpi_ec_read_status(ec)); -+ /* it is safe to enable GPE outside of transaction */ -+ acpi_enable_gpe(NULL, ec->gpe); -+ } else if (t->irq_count > ACPI_EC_STORM_THRESHOLD) { -+ pr_info(PREFIX "GPE storm detected, " -+ "transactions will use polling mode\n"); -+ set_bit(EC_FLAGS_GPE_STORM, &ec->flags); -+ } - return ret; - } - -@@ -304,26 +321,7 @@ static int acpi_ec_transaction(struct acpi_ec *ec, struct transaction *t) - status = -ETIME; - goto end; - } -- pr_debug(PREFIX "transaction start\n"); -- /* disable GPE during transaction if storm is detected */ -- if (test_bit(EC_FLAGS_GPE_STORM, &ec->flags)) { -- acpi_disable_gpe(NULL, ec->gpe); -- } -- - status = acpi_ec_transaction_unlocked(ec, t); -- -- /* check if we received SCI during transaction */ -- ec_check_sci_sync(ec, acpi_ec_read_status(ec)); -- if (test_bit(EC_FLAGS_GPE_STORM, &ec->flags)) { -- msleep(1); -- /* it is safe to enable GPE outside of transaction */ -- acpi_enable_gpe(NULL, ec->gpe); -- } else if (t->irq_count > ACPI_EC_STORM_THRESHOLD) { -- pr_info(PREFIX "GPE storm detected, " -- "transactions will use polling mode\n"); -- set_bit(EC_FLAGS_GPE_STORM, &ec->flags); -- } -- pr_debug(PREFIX "transaction end\n"); - end: - if (ec->global_lock) - acpi_release_global_lock(glk); -@@ -445,7 +443,7 @@ int ec_transaction(u8 command, - - EXPORT_SYMBOL(ec_transaction); - --static int acpi_ec_query_unlocked(struct acpi_ec *ec, u8 * data) -+static int acpi_ec_query(struct acpi_ec *ec, u8 * data) - { - int result; - u8 d; -@@ -454,16 +452,20 @@ static int acpi_ec_query_unlocked(struct acpi_ec *ec, u8 * data) - .wlen = 0, .rlen = 1}; - if (!ec || !data) - return -EINVAL; -+ - /* - * Query the EC to find out which _Qxx method we need to evaluate. - * Note that successful completion of the query causes the ACPI_EC_SCI - * bit to be cleared (and thus clearing the interrupt source). - */ -- result = acpi_ec_transaction_unlocked(ec, &t); -+ -+ result = acpi_ec_transaction(ec, &t); - if (result) - return result; -+ - if (!d) - return -ENODATA; -+ - *data = d; - return 0; - } -@@ -507,78 +509,43 @@ void acpi_ec_remove_query_handler(struct acpi_ec *ec, u8 query_bit) - - EXPORT_SYMBOL_GPL(acpi_ec_remove_query_handler); - --static void acpi_ec_run(void *cxt) --{ -- struct acpi_ec_query_handler *handler = cxt; -- if (!handler) -- return; -- pr_debug(PREFIX "start query execution\n"); -- if (handler->func) -- handler->func(handler->data); -- else if (handler->handle) -- acpi_evaluate_object(handler->handle, NULL, NULL, NULL); -- pr_debug(PREFIX "stop query execution\n"); -- kfree(handler); --} -- --static int acpi_ec_sync_query(struct acpi_ec *ec) -+static void acpi_ec_gpe_query(void *ec_cxt) - { -+ struct acpi_ec *ec = ec_cxt; - u8 value = 0; -- int status; -- struct acpi_ec_query_handler *handler, *copy; -- if ((status = acpi_ec_query_unlocked(ec, &value))) -- return status; -+ struct acpi_ec_query_handler *handler, copy; -+ -+ if (!ec || acpi_ec_query(ec, &value)) -+ return; -+ mutex_lock(&ec->lock); - list_for_each_entry(handler, &ec->list, node) { - if (value == handler->query_bit) { - /* have custom handler for this bit */ -- copy = kmalloc(sizeof(*handler), GFP_KERNEL); -- if (!copy) -- return -ENOMEM; -- memcpy(copy, handler, sizeof(*copy)); -- pr_debug(PREFIX "push query execution (0x%2x) on queue\n", value); -- return acpi_os_execute(OSL_GPE_HANDLER, -- acpi_ec_run, copy); -+ memcpy(©, handler, sizeof(copy)); -+ mutex_unlock(&ec->lock); -+ if (copy.func) { -+ copy.func(copy.data); -+ } else if (copy.handle) { -+ acpi_evaluate_object(copy.handle, NULL, NULL, NULL); -+ } -+ return; - } - } -- return 0; --} -- --static void acpi_ec_gpe_query(void *ec_cxt) --{ -- struct acpi_ec *ec = ec_cxt; -- if (!ec) -- return; -- mutex_lock(&ec->lock); -- acpi_ec_sync_query(ec); - mutex_unlock(&ec->lock); - } - --static void acpi_ec_gpe_query(void *ec_cxt); -- --static int ec_check_sci(struct acpi_ec *ec, u8 state) --{ -- if (state & ACPI_EC_FLAG_SCI) { -- if (!test_and_set_bit(EC_FLAGS_QUERY_PENDING, &ec->flags)) { -- pr_debug(PREFIX "push gpe query to the queue\n"); -- return acpi_os_execute(OSL_NOTIFY_HANDLER, -- acpi_ec_gpe_query, ec); -- } -- } -- return 0; --} -- - static u32 acpi_ec_gpe_handler(void *data) - { - struct acpi_ec *ec = data; -+ u8 status; - - pr_debug(PREFIX "~~~> interrupt\n"); -+ status = acpi_ec_read_status(ec); - -- advance_transaction(ec, acpi_ec_read_status(ec)); -- if (ec_transaction_done(ec) && -- (acpi_ec_read_status(ec) & ACPI_EC_FLAG_IBF) == 0) { -+ advance_transaction(ec, status); -+ if (ec_transaction_done(ec) && (status & ACPI_EC_FLAG_IBF) == 0) - wake_up(&ec->wait); -- ec_check_sci(ec, acpi_ec_read_status(ec)); -- } -+ ec_check_sci(ec, status); - return ACPI_INTERRUPT_HANDLED; - } - -@@ -949,7 +916,6 @@ static int ec_validate_ecdt(const struct dmi_system_id *id) - /* MSI EC needs special treatment, enable it */ - static int ec_flag_msi(const struct dmi_system_id *id) - { -- printk(KERN_DEBUG PREFIX "Detected MSI hardware, enabling workarounds.\n"); - EC_FLAGS_MSI = 1; - EC_FLAGS_VALIDATE_ECDT = 1; - return 0; -@@ -962,13 +928,8 @@ static struct dmi_system_id __initdata ec_dmi_table[] = { - DMI_MATCH(DMI_BOARD_NAME, "JFL92") }, NULL}, - { - ec_flag_msi, "MSI hardware", { -- DMI_MATCH(DMI_BIOS_VENDOR, "Micro-Star")}, NULL}, -- { -- ec_flag_msi, "MSI hardware", { -- DMI_MATCH(DMI_SYS_VENDOR, "Micro-Star")}, NULL}, -- { -- ec_flag_msi, "MSI hardware", { -- DMI_MATCH(DMI_CHASSIS_VENDOR, "MICRO-Star")}, NULL}, -+ DMI_MATCH(DMI_BIOS_VENDOR, "Micro-Star"), -+ DMI_MATCH(DMI_CHASSIS_VENDOR, "MICRO-Star") }, NULL}, - { - ec_validate_ecdt, "ASUS hardware", { - DMI_MATCH(DMI_BIOS_VENDOR, "ASUS") }, NULL}, -diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c -index d9f78f6..bbd066e 100644 ---- a/drivers/acpi/processor_idle.c -+++ b/drivers/acpi/processor_idle.c -@@ -110,14 +110,6 @@ static struct dmi_system_id __cpuinitdata processor_power_dmi_table[] = { - DMI_MATCH(DMI_BIOS_VENDOR,"Phoenix Technologies LTD"), - DMI_MATCH(DMI_BIOS_VERSION,"SHE845M0.86C.0013.D.0302131307")}, - (void *)2}, -- { set_max_cstate, "Pavilion zv5000", { -- DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), -- DMI_MATCH(DMI_PRODUCT_NAME,"Pavilion zv5000 (DS502A#ABA)")}, -- (void *)1}, -- { set_max_cstate, "Asus L8400B", { -- DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer Inc."), -- DMI_MATCH(DMI_PRODUCT_NAME,"L8400B series Notebook PC")}, -- (void *)1}, - {}, - }; - -@@ -307,17 +299,6 @@ static int acpi_processor_get_power_info_fadt(struct acpi_processor *pr) - pr->power.states[ACPI_STATE_C2].latency = acpi_gbl_FADT.C2latency; - pr->power.states[ACPI_STATE_C3].latency = acpi_gbl_FADT.C3latency; - -- /* -- * FADT specified C2 latency must be less than or equal to -- * 100 microseconds. -- */ -- if (acpi_gbl_FADT.C2latency > ACPI_PROCESSOR_MAX_C2_LATENCY) { -- ACPI_DEBUG_PRINT((ACPI_DB_INFO, -- "C2 latency too large [%d]\n", acpi_gbl_FADT.C2latency)); -- /* invalidate C2 */ -- pr->power.states[ACPI_STATE_C2].address = 0; -- } -- - ACPI_DEBUG_PRINT((ACPI_DB_INFO, - "lvl2[0x%08x] lvl3[0x%08x]\n", - pr->power.states[ACPI_STATE_C2].address, -@@ -514,6 +495,16 @@ static void acpi_processor_power_verify_c2(struct acpi_processor_cx *cx) - return; - - /* -+ * C2 latency must be less than or equal to 100 -+ * microseconds. -+ */ -+ else if (cx->latency > ACPI_PROCESSOR_MAX_C2_LATENCY) { -+ ACPI_DEBUG_PRINT((ACPI_DB_INFO, -+ "latency too large [%d]\n", cx->latency)); -+ return; -+ } -+ -+ /* - * Otherwise we've met all of our C2 requirements. - * Normalize the C2 latency to expidite policy - */ -diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c -index 0b09703..14a7481 100644 ---- a/drivers/acpi/scan.c -+++ b/drivers/acpi/scan.c -@@ -1357,9 +1357,6 @@ int acpi_bus_start(struct acpi_device *device) - { - struct acpi_bus_ops ops; - -- if (!device) -- return -EINVAL; -- - memset(&ops, 0, sizeof(ops)); - ops.acpi_op_start = 1; - -diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c -index 9b37502..a3241a1 100644 ---- a/drivers/ata/ahci.c -+++ b/drivers/ata/ahci.c -@@ -113,7 +113,6 @@ enum { - board_ahci_mcp65 = 6, - board_ahci_nopmp = 7, - board_ahci_yesncq = 8, -- board_ahci_nosntf = 9, - - /* global controller registers */ - HOST_CAP = 0x00, /* host capabilities */ -@@ -236,7 +235,6 @@ enum { - AHCI_HFLAG_NO_SUSPEND = (1 << 10), /* don't suspend */ - AHCI_HFLAG_SRST_TOUT_IS_OFFLINE = (1 << 11), /* treat SRST timeout as - link offline */ -- AHCI_HFLAG_NO_SNTF = (1 << 12), /* no sntf */ - - /* ap->flags bits */ - -@@ -510,7 +508,7 @@ static const struct ata_port_info ahci_port_info[] = { - .udma_mask = ATA_UDMA6, - .port_ops = &ahci_ops, - }, -- [board_ahci_yesncq] = -+ /* board_ahci_yesncq */ - { - AHCI_HFLAGS (AHCI_HFLAG_YES_NCQ), - .flags = AHCI_FLAG_COMMON, -@@ -518,14 +516,6 @@ static const struct ata_port_info ahci_port_info[] = { - .udma_mask = ATA_UDMA6, - .port_ops = &ahci_ops, - }, -- [board_ahci_nosntf] = -- { -- AHCI_HFLAGS (AHCI_HFLAG_NO_SNTF), -- .flags = AHCI_FLAG_COMMON, -- .pio_mask = ATA_PIO4, -- .udma_mask = ATA_UDMA6, -- .port_ops = &ahci_ops, -- }, - }; - - static const struct pci_device_id ahci_pci_tbl[] = { -@@ -541,7 +531,7 @@ static const struct pci_device_id ahci_pci_tbl[] = { - { PCI_VDEVICE(INTEL, 0x2683), board_ahci }, /* ESB2 */ - { PCI_VDEVICE(INTEL, 0x27c6), board_ahci }, /* ICH7-M DH */ - { PCI_VDEVICE(INTEL, 0x2821), board_ahci }, /* ICH8 */ -- { PCI_VDEVICE(INTEL, 0x2822), board_ahci_nosntf }, /* ICH8 */ -+ { PCI_VDEVICE(INTEL, 0x2822), board_ahci }, /* ICH8 */ - { PCI_VDEVICE(INTEL, 0x2824), board_ahci }, /* ICH8 */ - { PCI_VDEVICE(INTEL, 0x2829), board_ahci }, /* ICH8M */ - { PCI_VDEVICE(INTEL, 0x282a), board_ahci }, /* ICH8M */ -@@ -859,12 +849,6 @@ static void ahci_save_initial_config(struct pci_dev *pdev, - cap &= ~HOST_CAP_PMP; - } - -- if ((cap & HOST_CAP_SNTF) && (hpriv->flags & AHCI_HFLAG_NO_SNTF)) { -- dev_printk(KERN_INFO, &pdev->dev, -- "controller can't do SNTF, turning off CAP_SNTF\n"); -- cap &= ~HOST_CAP_SNTF; -- } -- - if (pdev->vendor == PCI_VENDOR_ID_JMICRON && pdev->device == 0x2361 && - port_map != 1) { - dev_printk(KERN_INFO, &pdev->dev, -@@ -2868,21 +2852,6 @@ static bool ahci_broken_suspend(struct pci_dev *pdev) - }, - .driver_data = "F.23", /* cutoff BIOS version */ - }, -- /* -- * Acer eMachines G725 has the same problem. BIOS -- * V1.03 is known to be broken. V3.04 is known to -- * work. Inbetween, there are V1.06, V2.06 and V3.03 -- * that we don't have much idea about. For now, -- * blacklist anything older than V3.04. -- */ -- { -- .ident = "G725", -- .matches = { -- DMI_MATCH(DMI_SYS_VENDOR, "eMachines"), -- DMI_MATCH(DMI_PRODUCT_NAME, "eMachines G725"), -- }, -- .driver_data = "V3.04", /* cutoff BIOS version */ -- }, - { } /* terminate list */ - }; - const struct dmi_system_id *dmi = dmi_first_match(sysids); -diff --git a/drivers/ata/ata_piix.c b/drivers/ata/ata_piix.c -index 0c6155f..9ac4e37 100644 ---- a/drivers/ata/ata_piix.c -+++ b/drivers/ata/ata_piix.c -@@ -869,10 +869,10 @@ static void do_pata_set_dmamode(struct ata_port *ap, struct ata_device *adev, in - (timings[pio][1] << 8); - } - -- if (ap->udma_mask) -+ if (ap->udma_mask) { - udma_enable &= ~(1 << devid); -- -- pci_write_config_word(dev, master_port, master_data); -+ pci_write_config_word(dev, master_port, master_data); -+ } - } - /* Don't scribble on 0x48 if the controller does not support UDMA */ - if (ap->udma_mask) -diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c -index 91fed3c..dc72690 100644 ---- a/drivers/ata/libata-core.c -+++ b/drivers/ata/libata-core.c -@@ -3790,45 +3790,21 @@ int sata_link_debounce(struct ata_link *link, const unsigned long *params, - int sata_link_resume(struct ata_link *link, const unsigned long *params, - unsigned long deadline) - { -- int tries = ATA_LINK_RESUME_TRIES; - u32 scontrol, serror; - int rc; - - if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol))) - return rc; - -- /* -- * Writes to SControl sometimes get ignored under certain -- * controllers (ata_piix SIDPR). Make sure DET actually is -- * cleared. -- */ -- do { -- scontrol = (scontrol & 0x0f0) | 0x300; -- if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol))) -- return rc; -- /* -- * Some PHYs react badly if SStatus is pounded -- * immediately after resuming. Delay 200ms before -- * debouncing. -- */ -- msleep(200); -+ scontrol = (scontrol & 0x0f0) | 0x300; - -- /* is SControl restored correctly? */ -- if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol))) -- return rc; -- } while ((scontrol & 0xf0f) != 0x300 && --tries); -- -- if ((scontrol & 0xf0f) != 0x300) { -- ata_link_printk(link, KERN_ERR, -- "failed to resume link (SControl %X)\n", -- scontrol); -- return 0; -- } -+ if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol))) -+ return rc; - -- if (tries < ATA_LINK_RESUME_TRIES) -- ata_link_printk(link, KERN_WARNING, -- "link resume succeeded after %d retries\n", -- ATA_LINK_RESUME_TRIES - tries); -+ /* Some PHYs react badly if SStatus is pounded immediately -+ * after resuming. Delay 200ms before debouncing. -+ */ -+ msleep(200); - - if ((rc = sata_link_debounce(link, params, deadline))) - return rc; -diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c -index 7d8d3c3..bba2ae5 100644 ---- a/drivers/ata/libata-eh.c -+++ b/drivers/ata/libata-eh.c -@@ -2019,9 +2019,8 @@ static void ata_eh_link_autopsy(struct ata_link *link) - qc->err_mask &= ~(AC_ERR_DEV | AC_ERR_OTHER); - - /* determine whether the command is worth retrying */ -- if (qc->flags & ATA_QCFLAG_IO || -- (!(qc->err_mask & AC_ERR_INVALID) && -- qc->err_mask != AC_ERR_DEV)) -+ if (!(qc->err_mask & AC_ERR_INVALID) && -+ ((qc->flags & ATA_QCFLAG_IO) || qc->err_mask != AC_ERR_DEV)) - qc->flags |= ATA_QCFLAG_RETRY; - - /* accumulate error info */ -diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c -index 2ae15c3..bbbb1fa 100644 ---- a/drivers/ata/libata-sff.c -+++ b/drivers/ata/libata-sff.c -@@ -893,9 +893,6 @@ static void ata_pio_sector(struct ata_queued_cmd *qc) - do_write); - } - -- if (!do_write) -- flush_dcache_page(page); -- - qc->curbytes += qc->sect_size; - qc->cursg_ofs += qc->sect_size; - -diff --git a/drivers/ata/pata_cmd64x.c b/drivers/ata/pata_cmd64x.c -index f0bad9b..f98dffe 100644 ---- a/drivers/ata/pata_cmd64x.c -+++ b/drivers/ata/pata_cmd64x.c -@@ -219,7 +219,7 @@ static void cmd64x_set_dmamode(struct ata_port *ap, struct ata_device *adev) - regU |= udma_data[adev->dma_mode - XFER_UDMA_0] << shift; - /* Merge the control bits */ - regU |= 1 << adev->devno; /* UDMA on */ -- if (adev->dma_mode > XFER_UDMA_2) /* 15nS timing */ -+ if (adev->dma_mode > 2) /* 15nS timing */ - regU |= 4 << adev->devno; - } else { - regU &= ~ (1 << adev->devno); /* UDMA off */ -diff --git a/drivers/ata/pata_hpt37x.c b/drivers/ata/pata_hpt37x.c -index ec07c53..d0a7df2 100644 ---- a/drivers/ata/pata_hpt37x.c -+++ b/drivers/ata/pata_hpt37x.c -@@ -24,7 +24,7 @@ - #include - - #define DRV_NAME "pata_hpt37x" --#define DRV_VERSION "0.6.14" -+#define DRV_VERSION "0.6.12" - - struct hpt_clock { - u8 xfer_speed; -@@ -404,8 +404,9 @@ static void hpt370_set_piomode(struct ata_port *ap, struct ata_device *adev) - - pci_read_config_dword(pdev, addr1, ®); - mode = hpt37x_find_mode(ap, adev->pio_mode); -- mode &= 0xCFC3FFFF; /* Leave DMA bits alone */ -- reg &= ~0xCFC3FFFF; /* Strip timing bits */ -+ mode &= ~0x8000000; /* No FIFO in PIO */ -+ mode &= ~0x30070000; /* Leave config bits alone */ -+ reg &= 0x30070000; /* Strip timing bits */ - pci_write_config_dword(pdev, addr1, reg | mode); - } - -@@ -422,7 +423,8 @@ static void hpt370_set_dmamode(struct ata_port *ap, struct ata_device *adev) - { - struct pci_dev *pdev = to_pci_dev(ap->host->dev); - u32 addr1, addr2; -- u32 reg, mode, mask; -+ u32 reg; -+ u32 mode; - u8 fast; - - addr1 = 0x40 + 4 * (adev->devno + 2 * ap->port_no); -@@ -434,12 +436,11 @@ static void hpt370_set_dmamode(struct ata_port *ap, struct ata_device *adev) - fast |= 0x01; - pci_write_config_byte(pdev, addr2, fast); - -- mask = adev->dma_mode < XFER_UDMA_0 ? 0x31C001FF : 0x303C0000; -- - pci_read_config_dword(pdev, addr1, ®); - mode = hpt37x_find_mode(ap, adev->dma_mode); -- mode &= mask; -- reg &= ~mask; -+ mode |= 0x8000000; /* FIFO in MWDMA or UDMA */ -+ mode &= ~0xC0000000; /* Leave config bits alone */ -+ reg &= 0xC0000000; /* Strip timing bits */ - pci_write_config_dword(pdev, addr1, reg | mode); - } - -@@ -507,8 +508,9 @@ static void hpt372_set_piomode(struct ata_port *ap, struct ata_device *adev) - mode = hpt37x_find_mode(ap, adev->pio_mode); - - printk("Find mode for %d reports %X\n", adev->pio_mode, mode); -- mode &= 0xCFC3FFFF; /* Leave DMA bits alone */ -- reg &= ~0xCFC3FFFF; /* Strip timing bits */ -+ mode &= ~0x80000000; /* No FIFO in PIO */ -+ mode &= ~0x30070000; /* Leave config bits alone */ -+ reg &= 0x30070000; /* Strip timing bits */ - pci_write_config_dword(pdev, addr1, reg | mode); - } - -@@ -525,7 +527,8 @@ static void hpt372_set_dmamode(struct ata_port *ap, struct ata_device *adev) - { - struct pci_dev *pdev = to_pci_dev(ap->host->dev); - u32 addr1, addr2; -- u32 reg, mode, mask; -+ u32 reg; -+ u32 mode; - u8 fast; - - addr1 = 0x40 + 4 * (adev->devno + 2 * ap->port_no); -@@ -536,13 +539,12 @@ static void hpt372_set_dmamode(struct ata_port *ap, struct ata_device *adev) - fast &= ~0x07; - pci_write_config_byte(pdev, addr2, fast); - -- mask = adev->dma_mode < XFER_UDMA_0 ? 0x31C001FF : 0x303C0000; -- - pci_read_config_dword(pdev, addr1, ®); - mode = hpt37x_find_mode(ap, adev->dma_mode); - printk("Find mode for DMA %d reports %X\n", adev->dma_mode, mode); -- mode &= mask; -- reg &= ~mask; -+ mode &= ~0xC0000000; /* Leave config bits alone */ -+ mode |= 0x80000000; /* FIFO in MWDMA or UDMA */ -+ reg &= 0xC0000000; /* Strip timing bits */ - pci_write_config_dword(pdev, addr1, reg | mode); - } - -diff --git a/drivers/ata/pata_hpt3x2n.c b/drivers/ata/pata_hpt3x2n.c -index d16e87e..3d59fe0 100644 ---- a/drivers/ata/pata_hpt3x2n.c -+++ b/drivers/ata/pata_hpt3x2n.c -@@ -8,7 +8,7 @@ - * Copyright (C) 1999-2003 Andre Hedrick - * Portions Copyright (C) 2001 Sun Microsystems, Inc. - * Portions Copyright (C) 2003 Red Hat Inc -- * Portions Copyright (C) 2005-2009 MontaVista Software, Inc. -+ * Portions Copyright (C) 2005-2007 MontaVista Software, Inc. - * - * - * TODO -@@ -25,7 +25,7 @@ - #include - - #define DRV_NAME "pata_hpt3x2n" --#define DRV_VERSION "0.3.8" -+#define DRV_VERSION "0.3.4" - - enum { - HPT_PCI_FAST = (1 << 31), -@@ -185,8 +185,9 @@ static void hpt3x2n_set_piomode(struct ata_port *ap, struct ata_device *adev) - - pci_read_config_dword(pdev, addr1, ®); - mode = hpt3x2n_find_mode(ap, adev->pio_mode); -- mode &= 0xCFC3FFFF; /* Leave DMA bits alone */ -- reg &= ~0xCFC3FFFF; /* Strip timing bits */ -+ mode &= ~0x8000000; /* No FIFO in PIO */ -+ mode &= ~0x30070000; /* Leave config bits alone */ -+ reg &= 0x30070000; /* Strip timing bits */ - pci_write_config_dword(pdev, addr1, reg | mode); - } - -@@ -203,7 +204,8 @@ static void hpt3x2n_set_dmamode(struct ata_port *ap, struct ata_device *adev) - { - struct pci_dev *pdev = to_pci_dev(ap->host->dev); - u32 addr1, addr2; -- u32 reg, mode, mask; -+ u32 reg; -+ u32 mode; - u8 fast; - - addr1 = 0x40 + 4 * (adev->devno + 2 * ap->port_no); -@@ -214,12 +216,11 @@ static void hpt3x2n_set_dmamode(struct ata_port *ap, struct ata_device *adev) - fast &= ~0x07; - pci_write_config_byte(pdev, addr2, fast); - -- mask = adev->dma_mode < XFER_UDMA_0 ? 0x31C001FF : 0x303C0000; -- - pci_read_config_dword(pdev, addr1, ®); - mode = hpt3x2n_find_mode(ap, adev->dma_mode); -- mode &= mask; -- reg &= ~mask; -+ mode |= 0x8000000; /* FIFO in MWDMA or UDMA */ -+ mode &= ~0xC0000000; /* Leave config bits alone */ -+ reg &= 0xC0000000; /* Strip timing bits */ - pci_write_config_dword(pdev, addr1, reg | mode); - } - -@@ -262,7 +263,7 @@ static void hpt3x2n_bmdma_stop(struct ata_queued_cmd *qc) - - static void hpt3x2n_set_clock(struct ata_port *ap, int source) - { -- void __iomem *bmdma = ap->ioaddr.bmdma_addr - ap->port_no * 8; -+ void __iomem *bmdma = ap->ioaddr.bmdma_addr; - - /* Tristate the bus */ - iowrite8(0x80, bmdma+0x73); -@@ -272,9 +273,9 @@ static void hpt3x2n_set_clock(struct ata_port *ap, int source) - iowrite8(source, bmdma+0x7B); - iowrite8(0xC0, bmdma+0x79); - -- /* Reset state machines, avoid enabling the disabled channels */ -- iowrite8(ioread8(bmdma+0x70) | 0x32, bmdma+0x70); -- iowrite8(ioread8(bmdma+0x74) | 0x32, bmdma+0x74); -+ /* Reset state machines */ -+ iowrite8(0x37, bmdma+0x70); -+ iowrite8(0x37, bmdma+0x74); - - /* Complete reset */ - iowrite8(0x00, bmdma+0x79); -@@ -284,10 +285,21 @@ static void hpt3x2n_set_clock(struct ata_port *ap, int source) - iowrite8(0x00, bmdma+0x77); - } - -+/* Check if our partner interface is busy */ -+ -+static int hpt3x2n_pair_idle(struct ata_port *ap) -+{ -+ struct ata_host *host = ap->host; -+ struct ata_port *pair = host->ports[ap->port_no ^ 1]; -+ -+ if (pair->hsm_task_state == HSM_ST_IDLE) -+ return 1; -+ return 0; -+} -+ - static int hpt3x2n_use_dpll(struct ata_port *ap, int writing) - { - long flags = (long)ap->host->private_data; -- - /* See if we should use the DPLL */ - if (writing) - return USE_DPLL; /* Needed for write */ -@@ -296,35 +308,20 @@ static int hpt3x2n_use_dpll(struct ata_port *ap, int writing) - return 0; - } - --static int hpt3x2n_qc_defer(struct ata_queued_cmd *qc) --{ -- struct ata_port *ap = qc->ap; -- struct ata_port *alt = ap->host->ports[ap->port_no ^ 1]; -- int rc, flags = (long)ap->host->private_data; -- int dpll = hpt3x2n_use_dpll(ap, qc->tf.flags & ATA_TFLAG_WRITE); -- -- /* First apply the usual rules */ -- rc = ata_std_qc_defer(qc); -- if (rc != 0) -- return rc; -- -- if ((flags & USE_DPLL) != dpll && alt->qc_active) -- return ATA_DEFER_PORT; -- return 0; --} -- - static unsigned int hpt3x2n_qc_issue(struct ata_queued_cmd *qc) - { -+ struct ata_taskfile *tf = &qc->tf; - struct ata_port *ap = qc->ap; - int flags = (long)ap->host->private_data; -- int dpll = hpt3x2n_use_dpll(ap, qc->tf.flags & ATA_TFLAG_WRITE); -- -- if ((flags & USE_DPLL) != dpll) { -- flags &= ~USE_DPLL; -- flags |= dpll; -- ap->host->private_data = (void *)(long)flags; - -- hpt3x2n_set_clock(ap, dpll ? 0x21 : 0x23); -+ if (hpt3x2n_pair_idle(ap)) { -+ int dpll = hpt3x2n_use_dpll(ap, (tf->flags & ATA_TFLAG_WRITE)); -+ if ((flags & USE_DPLL) != dpll) { -+ if (dpll == 1) -+ hpt3x2n_set_clock(ap, 0x21); -+ else -+ hpt3x2n_set_clock(ap, 0x23); -+ } - } - return ata_sff_qc_issue(qc); - } -@@ -341,8 +338,6 @@ static struct ata_port_operations hpt3x2n_port_ops = { - .inherits = &ata_bmdma_port_ops, - - .bmdma_stop = hpt3x2n_bmdma_stop, -- -- .qc_defer = hpt3x2n_qc_defer, - .qc_issue = hpt3x2n_qc_issue, - - .cable_detect = hpt3x2n_cable_detect, -@@ -460,7 +455,7 @@ static int hpt3x2n_init_one(struct pci_dev *dev, const struct pci_device_id *id) - unsigned int f_low, f_high; - int adjust; - unsigned long iobase = pci_resource_start(dev, 4); -- void *hpriv = (void *)USE_DPLL; -+ void *hpriv = NULL; - int rc; - - rc = pcim_enable_device(dev); -@@ -548,7 +543,7 @@ static int hpt3x2n_init_one(struct pci_dev *dev, const struct pci_device_id *id) - /* Set our private data up. We only need a few flags so we use - it directly */ - if (pci_mhz > 60) { -- hpriv = (void *)(PCI66 | USE_DPLL); -+ hpriv = (void *)PCI66; - /* - * On HPT371N, if ATA clock is 66 MHz we must set bit 2 in - * the MISC. register to stretch the UltraDMA Tss timing. -diff --git a/drivers/base/class.c b/drivers/base/class.c -index 6e2c3b0..161746d 100644 ---- a/drivers/base/class.c -+++ b/drivers/base/class.c -@@ -59,8 +59,6 @@ static void class_release(struct kobject *kobj) - else - pr_debug("class '%s' does not have a release() function, " - "be careful\n", class->name); -- -- kfree(cp); - } - - static struct sysfs_ops class_sysfs_ops = { -diff --git a/drivers/base/core.c b/drivers/base/core.c -index 1093179..6bee6af 100644 ---- a/drivers/base/core.c -+++ b/drivers/base/core.c -@@ -56,14 +56,7 @@ static inline int device_is_not_partition(struct device *dev) - */ - const char *dev_driver_string(const struct device *dev) - { -- struct device_driver *drv; -- -- /* dev->driver can change to NULL underneath us because of unbinding, -- * so be careful about accessing it. dev->bus and dev->class should -- * never change once they are set, so they don't need special care. -- */ -- drv = ACCESS_ONCE(dev->driver); -- return drv ? drv->name : -+ return dev->driver ? dev->driver->name : - (dev->bus ? dev->bus->name : - (dev->class ? dev->class->name : "")); - } -diff --git a/drivers/base/devtmpfs.c b/drivers/base/devtmpfs.c -index 33faaa2..a1cb5af 100644 ---- a/drivers/base/devtmpfs.c -+++ b/drivers/base/devtmpfs.c -@@ -353,7 +353,6 @@ int __init devtmpfs_init(void) - { - int err; - struct vfsmount *mnt; -- char options[] = "mode=0755"; - - err = register_filesystem(&dev_fs_type); - if (err) { -@@ -362,7 +361,7 @@ int __init devtmpfs_init(void) - return err; - } - -- mnt = kern_mount_data(&dev_fs_type, options); -+ mnt = kern_mount(&dev_fs_type); - if (IS_ERR(mnt)) { - err = PTR_ERR(mnt); - printk(KERN_ERR "devtmpfs: unable to create devtmpfs %i\n", err); -diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c -index 0a4b75f..846d89e 100644 ---- a/drivers/base/power/runtime.c -+++ b/drivers/base/power/runtime.c -@@ -777,7 +777,7 @@ int __pm_runtime_set_status(struct device *dev, unsigned int status) - } - - if (parent) { -- spin_lock_nested(&parent->power.lock, SINGLE_DEPTH_NESTING); -+ spin_lock(&parent->power.lock); - - /* - * It is invalid to put an active child under a parent that is -diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c -index ca9c548..92b1263 100644 ---- a/drivers/block/cciss.c -+++ b/drivers/block/cciss.c -@@ -339,9 +339,6 @@ static int cciss_seq_show(struct seq_file *seq, void *v) - if (*pos > h->highest_lun) - return 0; - -- if (drv == NULL) /* it's possible for h->drv[] to have holes. */ -- return 0; -- - if (drv->heads == 0) - return 0; - -diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c -index 68b5957..2ddf03a 100644 ---- a/drivers/block/pktcdvd.c -+++ b/drivers/block/pktcdvd.c -@@ -322,7 +322,7 @@ static void pkt_sysfs_dev_remove(struct pktcdvd_device *pd) - pkt_kobj_remove(pd->kobj_stat); - pkt_kobj_remove(pd->kobj_wqueue); - if (class_pktcdvd) -- device_unregister(pd->dev); -+ device_destroy(class_pktcdvd, pd->pkt_dev); - } - - -diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c -index 1be7631..44bc8bb 100644 ---- a/drivers/bluetooth/btusb.c -+++ b/drivers/bluetooth/btusb.c -@@ -307,7 +307,6 @@ static void btusb_bulk_complete(struct urb *urb) - return; - - usb_anchor_urb(urb, &data->bulk_anchor); -- usb_mark_last_busy(data->udev); - - err = usb_submit_urb(urb, GFP_ATOMIC); - if (err < 0) { -diff --git a/drivers/char/agp/intel-agp.c b/drivers/char/agp/intel-agp.c -index 4dcfef0..3cb56a0 100644 ---- a/drivers/char/agp/intel-agp.c -+++ b/drivers/char/agp/intel-agp.c -@@ -178,7 +178,6 @@ static struct _intel_private { - * popup and for the GTT. - */ - int gtt_entries; /* i830+ */ -- int gtt_total_size; - union { - void __iomem *i9xx_flush_page; - void *i8xx_flush_page; -@@ -1154,7 +1153,7 @@ static int intel_i915_configure(void) - readl(intel_private.registers+I810_PGETBL_CTL); /* PCI Posting. */ - - if (agp_bridge->driver->needs_scratch_page) { -- for (i = intel_private.gtt_entries; i < intel_private.gtt_total_size; i++) { -+ for (i = intel_private.gtt_entries; i < current_size->num_entries; i++) { - writel(agp_bridge->scratch_page, intel_private.gtt+i); - } - readl(intel_private.gtt+i-1); /* PCI Posting. */ -@@ -1309,8 +1308,6 @@ static int intel_i915_create_gatt_table(struct agp_bridge_data *bridge) - if (!intel_private.gtt) - return -ENOMEM; - -- intel_private.gtt_total_size = gtt_map_size / 4; -- - temp &= 0xfff80000; - - intel_private.registers = ioremap(temp, 128 * 4096); -@@ -1398,8 +1395,6 @@ static int intel_i965_create_gatt_table(struct agp_bridge_data *bridge) - if (!intel_private.gtt) - return -ENOMEM; - -- intel_private.gtt_total_size = gtt_size / 4; -- - intel_private.registers = ioremap(temp, 128 * 4096); - if (!intel_private.registers) { - iounmap(intel_private.gtt); -diff --git a/drivers/char/mem.c b/drivers/char/mem.c -index aef3fb4..a074fce 100644 ---- a/drivers/char/mem.c -+++ b/drivers/char/mem.c -@@ -35,19 +35,6 @@ - # include - #endif - --static inline unsigned long size_inside_page(unsigned long start, -- unsigned long size) --{ -- unsigned long sz; -- -- if (-start & (PAGE_SIZE - 1)) -- sz = -start & (PAGE_SIZE - 1); -- else -- sz = PAGE_SIZE; -- -- return min_t(unsigned long, sz, size); --} -- - /* - * Architectures vary in how they handle caching for addresses - * outside of main memory. -@@ -421,7 +408,6 @@ static ssize_t read_kmem(struct file *file, char __user *buf, - unsigned long p = *ppos; - ssize_t low_count, read, sz; - char * kbuf; /* k-addr because vread() takes vmlist_lock rwlock */ -- int err = 0; - - read = 0; - if (p < (unsigned long) high_memory) { -@@ -444,7 +430,15 @@ static ssize_t read_kmem(struct file *file, char __user *buf, - } - #endif - while (low_count > 0) { -- sz = size_inside_page(p, low_count); -+ /* -+ * Handle first page in case it's not aligned -+ */ -+ if (-p & (PAGE_SIZE - 1)) -+ sz = -p & (PAGE_SIZE - 1); -+ else -+ sz = PAGE_SIZE; -+ -+ sz = min_t(unsigned long, sz, low_count); - - /* - * On ia64 if a page has been mapped somewhere as -@@ -468,18 +462,16 @@ static ssize_t read_kmem(struct file *file, char __user *buf, - if (!kbuf) - return -ENOMEM; - while (count > 0) { -- int len = size_inside_page(p, count); -+ int len = count; - -- if (!is_vmalloc_or_module_addr((void *)p)) { -- err = -ENXIO; -- break; -- } -+ if (len > PAGE_SIZE) -+ len = PAGE_SIZE; - len = vread(kbuf, (char *)p, len); - if (!len) - break; - if (copy_to_user(buf, kbuf, len)) { -- err = -EFAULT; -- break; -+ free_page((unsigned long)kbuf); -+ return -EFAULT; - } - count -= len; - buf += len; -@@ -488,8 +480,8 @@ static ssize_t read_kmem(struct file *file, char __user *buf, - } - free_page((unsigned long)kbuf); - } -- *ppos = p; -- return read ? read : err; -+ *ppos = p; -+ return read; - } - - -@@ -518,8 +510,15 @@ do_write_kmem(void *p, unsigned long realp, const char __user * buf, - - while (count > 0) { - char *ptr; -+ /* -+ * Handle first page in case it's not aligned -+ */ -+ if (-realp & (PAGE_SIZE - 1)) -+ sz = -realp & (PAGE_SIZE - 1); -+ else -+ sz = PAGE_SIZE; - -- sz = size_inside_page(realp, count); -+ sz = min_t(unsigned long, sz, count); - - /* - * On ia64 if a page has been mapped somewhere as -@@ -558,7 +557,6 @@ static ssize_t write_kmem(struct file * file, const char __user * buf, - ssize_t virtr = 0; - ssize_t written; - char * kbuf; /* k-addr because vwrite() takes vmlist_lock rwlock */ -- int err = 0; - - if (p < (unsigned long) high_memory) { - -@@ -580,20 +578,20 @@ static ssize_t write_kmem(struct file * file, const char __user * buf, - if (!kbuf) - return wrote ? wrote : -ENOMEM; - while (count > 0) { -- int len = size_inside_page(p, count); -+ int len = count; - -- if (!is_vmalloc_or_module_addr((void *)p)) { -- err = -ENXIO; -- break; -- } -+ if (len > PAGE_SIZE) -+ len = PAGE_SIZE; - if (len) { - written = copy_from_user(kbuf, buf, len); - if (written) { -- err = -EFAULT; -- break; -+ if (wrote + virtr) -+ break; -+ free_page((unsigned long)kbuf); -+ return -EFAULT; - } - } -- vwrite(kbuf, (char *)p, len); -+ len = vwrite(kbuf, (char *)p, len); - count -= len; - buf += len; - virtr += len; -@@ -602,8 +600,8 @@ static ssize_t write_kmem(struct file * file, const char __user * buf, - free_page((unsigned long)kbuf); - } - -- *ppos = p; -- return virtr + wrote ? : err; -+ *ppos = p; -+ return virtr + wrote; - } - #endif - -diff --git a/drivers/char/nozomi.c b/drivers/char/nozomi.c -index dc52f75..d3400b2 100644 ---- a/drivers/char/nozomi.c -+++ b/drivers/char/nozomi.c -@@ -1629,10 +1629,10 @@ static void ntty_close(struct tty_struct *tty, struct file *file) - - dc->open_ttys--; - port->count--; -+ tty_port_tty_set(port, NULL); - - if (port->count == 0) { - DBG1("close: %d", nport->token_dl); -- tty_port_tty_set(port, NULL); - spin_lock_irqsave(&dc->spin_mutex, flags); - dc->last_ier &= ~(nport->token_dl); - writew(dc->last_ier, dc->reg_ier); -diff --git a/drivers/char/random.c b/drivers/char/random.c -index 908ac1f..04b505e 100644 ---- a/drivers/char/random.c -+++ b/drivers/char/random.c -@@ -1051,6 +1051,12 @@ random_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos) - /* like a named pipe */ - } - -+ /* -+ * If we gave the user some bytes, update the access time. -+ */ -+ if (count) -+ file_accessed(file); -+ - return (count ? count : retval); - } - -@@ -1101,6 +1107,7 @@ static ssize_t random_write(struct file *file, const char __user *buffer, - size_t count, loff_t *ppos) - { - size_t ret; -+ struct inode *inode = file->f_path.dentry->d_inode; - - ret = write_pool(&blocking_pool, buffer, count); - if (ret) -@@ -1109,6 +1116,8 @@ static ssize_t random_write(struct file *file, const char __user *buffer, - if (ret) - return ret; - -+ inode->i_mtime = current_fs_time(inode->i_sb); -+ mark_inode_dirty(inode); - return (ssize_t)count; - } - -diff --git a/drivers/char/tpm/tpm_infineon.c b/drivers/char/tpm/tpm_infineon.c -index f584407..ecba494 100644 ---- a/drivers/char/tpm/tpm_infineon.c -+++ b/drivers/char/tpm/tpm_infineon.c -@@ -39,12 +39,12 @@ - struct tpm_inf_dev { - int iotype; - -- void __iomem *mem_base; /* MMIO ioremap'd addr */ -- unsigned long map_base; /* phys MMIO base */ -- unsigned long map_size; /* MMIO region size */ -- unsigned int index_off; /* index register offset */ -+ void __iomem *mem_base; /* MMIO ioremap'd addr */ -+ unsigned long map_base; /* phys MMIO base */ -+ unsigned long map_size; /* MMIO region size */ -+ unsigned int index_off; /* index register offset */ - -- unsigned int data_regs; /* Data registers */ -+ unsigned int data_regs; /* Data registers */ - unsigned int data_size; - - unsigned int config_port; /* IO Port config index reg */ -@@ -406,14 +406,14 @@ static const struct tpm_vendor_specific tpm_inf = { - .miscdev = {.fops = &inf_ops,}, - }; - --static const struct pnp_device_id tpm_inf_pnp_tbl[] = { -+static const struct pnp_device_id tpm_pnp_tbl[] = { - /* Infineon TPMs */ - {"IFX0101", 0}, - {"IFX0102", 0}, - {"", 0} - }; - --MODULE_DEVICE_TABLE(pnp, tpm_inf_pnp_tbl); -+MODULE_DEVICE_TABLE(pnp, tpm_pnp_tbl); - - static int __devinit tpm_inf_pnp_probe(struct pnp_dev *dev, - const struct pnp_device_id *dev_id) -@@ -430,7 +430,7 @@ static int __devinit tpm_inf_pnp_probe(struct pnp_dev *dev, - if (pnp_port_valid(dev, 0) && pnp_port_valid(dev, 1) && - !(pnp_port_flags(dev, 0) & IORESOURCE_DISABLED)) { - -- tpm_dev.iotype = TPM_INF_IO_PORT; -+ tpm_dev.iotype = TPM_INF_IO_PORT; - - tpm_dev.config_port = pnp_port_start(dev, 0); - tpm_dev.config_size = pnp_port_len(dev, 0); -@@ -459,9 +459,9 @@ static int __devinit tpm_inf_pnp_probe(struct pnp_dev *dev, - goto err_last; - } - } else if (pnp_mem_valid(dev, 0) && -- !(pnp_mem_flags(dev, 0) & IORESOURCE_DISABLED)) { -+ !(pnp_mem_flags(dev, 0) & IORESOURCE_DISABLED)) { - -- tpm_dev.iotype = TPM_INF_IO_MEM; -+ tpm_dev.iotype = TPM_INF_IO_MEM; - - tpm_dev.map_base = pnp_mem_start(dev, 0); - tpm_dev.map_size = pnp_mem_len(dev, 0); -@@ -563,11 +563,11 @@ static int __devinit tpm_inf_pnp_probe(struct pnp_dev *dev, - "product id 0x%02x%02x" - "%s\n", - tpm_dev.iotype == TPM_INF_IO_PORT ? -- tpm_dev.config_port : -- tpm_dev.map_base + tpm_dev.index_off, -+ tpm_dev.config_port : -+ tpm_dev.map_base + tpm_dev.index_off, - tpm_dev.iotype == TPM_INF_IO_PORT ? -- tpm_dev.data_regs : -- tpm_dev.map_base + tpm_dev.data_regs, -+ tpm_dev.data_regs : -+ tpm_dev.map_base + tpm_dev.data_regs, - version[0], version[1], - vendorid[0], vendorid[1], - productid[0], productid[1], chipname); -@@ -607,55 +607,20 @@ static __devexit void tpm_inf_pnp_remove(struct pnp_dev *dev) - iounmap(tpm_dev.mem_base); - release_mem_region(tpm_dev.map_base, tpm_dev.map_size); - } -- tpm_dev_vendor_release(chip); - tpm_remove_hardware(chip->dev); - } - } - --static int tpm_inf_pnp_suspend(struct pnp_dev *dev, pm_message_t pm_state) --{ -- struct tpm_chip *chip = pnp_get_drvdata(dev); -- int rc; -- if (chip) { -- u8 savestate[] = { -- 0, 193, /* TPM_TAG_RQU_COMMAND */ -- 0, 0, 0, 10, /* blob length (in bytes) */ -- 0, 0, 0, 152 /* TPM_ORD_SaveState */ -- }; -- dev_info(&dev->dev, "saving TPM state\n"); -- rc = tpm_inf_send(chip, savestate, sizeof(savestate)); -- if (rc < 0) { -- dev_err(&dev->dev, "error while saving TPM state\n"); -- return rc; -- } -- } -- return 0; --} -- --static int tpm_inf_pnp_resume(struct pnp_dev *dev) --{ -- /* Re-configure TPM after suspending */ -- tpm_config_out(ENABLE_REGISTER_PAIR, TPM_INF_ADDR); -- tpm_config_out(IOLIMH, TPM_INF_ADDR); -- tpm_config_out((tpm_dev.data_regs >> 8) & 0xff, TPM_INF_DATA); -- tpm_config_out(IOLIML, TPM_INF_ADDR); -- tpm_config_out((tpm_dev.data_regs & 0xff), TPM_INF_DATA); -- /* activate register */ -- tpm_config_out(TPM_DAR, TPM_INF_ADDR); -- tpm_config_out(0x01, TPM_INF_DATA); -- tpm_config_out(DISABLE_REGISTER_PAIR, TPM_INF_ADDR); -- /* disable RESET, LP and IRQC */ -- tpm_data_out(RESET_LP_IRQC_DISABLE, CMD); -- return tpm_pm_resume(&dev->dev); --} -- - static struct pnp_driver tpm_inf_pnp_driver = { - .name = "tpm_inf_pnp", -- .id_table = tpm_inf_pnp_tbl, -+ .driver = { -+ .owner = THIS_MODULE, -+ .suspend = tpm_pm_suspend, -+ .resume = tpm_pm_resume, -+ }, -+ .id_table = tpm_pnp_tbl, - .probe = tpm_inf_pnp_probe, -- .suspend = tpm_inf_pnp_suspend, -- .resume = tpm_inf_pnp_resume, -- .remove = __devexit_p(tpm_inf_pnp_remove) -+ .remove = __devexit_p(tpm_inf_pnp_remove), - }; - - static int __init init_inf(void) -@@ -673,5 +638,5 @@ module_exit(cleanup_inf); - - MODULE_AUTHOR("Marcel Selhorst "); - MODULE_DESCRIPTION("Driver for Infineon TPM SLD 9630 TT 1.1 / SLB 9635 TT 1.2"); --MODULE_VERSION("1.9.2"); -+MODULE_VERSION("1.9"); - MODULE_LICENSE("GPL"); -diff --git a/drivers/char/tty_io.c b/drivers/char/tty_io.c -index 05cab2c..59499ee 100644 ---- a/drivers/char/tty_io.c -+++ b/drivers/char/tty_io.c -@@ -1930,10 +1930,8 @@ static int tty_fasync(int fd, struct file *filp, int on) - pid = task_pid(current); - type = PIDTYPE_PID; - } -- get_pid(pid); - spin_unlock_irqrestore(&tty->ctrl_lock, flags); - retval = __f_setown(filp, pid, type, 0); -- put_pid(pid); - if (retval) - goto out; - } else { -diff --git a/drivers/connector/connector.c b/drivers/connector/connector.c -index 537c29a..f060246 100644 ---- a/drivers/connector/connector.c -+++ b/drivers/connector/connector.c -@@ -36,6 +36,17 @@ MODULE_LICENSE("GPL"); - MODULE_AUTHOR("Evgeniy Polyakov "); - MODULE_DESCRIPTION("Generic userspace <-> kernelspace connector."); - -+static u32 cn_idx = CN_IDX_CONNECTOR; -+static u32 cn_val = CN_VAL_CONNECTOR; -+ -+module_param(cn_idx, uint, 0); -+module_param(cn_val, uint, 0); -+MODULE_PARM_DESC(cn_idx, "Connector's main device idx."); -+MODULE_PARM_DESC(cn_val, "Connector's main device val."); -+ -+static DEFINE_MUTEX(notify_lock); -+static LIST_HEAD(notify_list); -+ - static struct cn_dev cdev; - - static int cn_already_initialized; -@@ -199,6 +210,54 @@ static void cn_rx_skb(struct sk_buff *__skb) - } - - /* -+ * Notification routing. -+ * -+ * Gets id and checks if there are notification request for it's idx -+ * and val. If there are such requests notify the listeners with the -+ * given notify event. -+ * -+ */ -+static void cn_notify(struct cb_id *id, u32 notify_event) -+{ -+ struct cn_ctl_entry *ent; -+ -+ mutex_lock(¬ify_lock); -+ list_for_each_entry(ent, ¬ify_list, notify_entry) { -+ int i; -+ struct cn_notify_req *req; -+ struct cn_ctl_msg *ctl = ent->msg; -+ int idx_found, val_found; -+ -+ idx_found = val_found = 0; -+ -+ req = (struct cn_notify_req *)ctl->data; -+ for (i = 0; i < ctl->idx_notify_num; ++i, ++req) { -+ if (id->idx >= req->first && -+ id->idx < req->first + req->range) { -+ idx_found = 1; -+ break; -+ } -+ } -+ -+ for (i = 0; i < ctl->val_notify_num; ++i, ++req) { -+ if (id->val >= req->first && -+ id->val < req->first + req->range) { -+ val_found = 1; -+ break; -+ } -+ } -+ -+ if (idx_found && val_found) { -+ struct cn_msg m = { .ack = notify_event, }; -+ -+ memcpy(&m.id, id, sizeof(m.id)); -+ cn_netlink_send(&m, ctl->group, GFP_KERNEL); -+ } -+ } -+ mutex_unlock(¬ify_lock); -+} -+ -+/* - * Callback add routing - adds callback with given ID and name. - * If there is registered callback with the same ID it will not be added. - * -@@ -217,6 +276,8 @@ int cn_add_callback(struct cb_id *id, char *name, - if (err) - return err; - -+ cn_notify(id, 0); -+ - return 0; - } - EXPORT_SYMBOL_GPL(cn_add_callback); -@@ -234,9 +295,111 @@ void cn_del_callback(struct cb_id *id) - struct cn_dev *dev = &cdev; - - cn_queue_del_callback(dev->cbdev, id); -+ cn_notify(id, 1); - } - EXPORT_SYMBOL_GPL(cn_del_callback); - -+/* -+ * Checks two connector's control messages to be the same. -+ * Returns 1 if they are the same or if the first one is corrupted. -+ */ -+static int cn_ctl_msg_equals(struct cn_ctl_msg *m1, struct cn_ctl_msg *m2) -+{ -+ int i; -+ struct cn_notify_req *req1, *req2; -+ -+ if (m1->idx_notify_num != m2->idx_notify_num) -+ return 0; -+ -+ if (m1->val_notify_num != m2->val_notify_num) -+ return 0; -+ -+ if (m1->len != m2->len) -+ return 0; -+ -+ if ((m1->idx_notify_num + m1->val_notify_num) * sizeof(*req1) != -+ m1->len) -+ return 1; -+ -+ req1 = (struct cn_notify_req *)m1->data; -+ req2 = (struct cn_notify_req *)m2->data; -+ -+ for (i = 0; i < m1->idx_notify_num; ++i) { -+ if (req1->first != req2->first || req1->range != req2->range) -+ return 0; -+ req1++; -+ req2++; -+ } -+ -+ for (i = 0; i < m1->val_notify_num; ++i) { -+ if (req1->first != req2->first || req1->range != req2->range) -+ return 0; -+ req1++; -+ req2++; -+ } -+ -+ return 1; -+} -+ -+/* -+ * Main connector device's callback. -+ * -+ * Used for notification of a request's processing. -+ */ -+static void cn_callback(struct cn_msg *msg, struct netlink_skb_parms *nsp) -+{ -+ struct cn_ctl_msg *ctl; -+ struct cn_ctl_entry *ent; -+ u32 size; -+ -+ if (msg->len < sizeof(*ctl)) -+ return; -+ -+ ctl = (struct cn_ctl_msg *)msg->data; -+ -+ size = (sizeof(*ctl) + ((ctl->idx_notify_num + -+ ctl->val_notify_num) * -+ sizeof(struct cn_notify_req))); -+ -+ if (msg->len != size) -+ return; -+ -+ if (ctl->len + sizeof(*ctl) != msg->len) -+ return; -+ -+ /* -+ * Remove notification. -+ */ -+ if (ctl->group == 0) { -+ struct cn_ctl_entry *n; -+ -+ mutex_lock(¬ify_lock); -+ list_for_each_entry_safe(ent, n, ¬ify_list, notify_entry) { -+ if (cn_ctl_msg_equals(ent->msg, ctl)) { -+ list_del(&ent->notify_entry); -+ kfree(ent); -+ } -+ } -+ mutex_unlock(¬ify_lock); -+ -+ return; -+ } -+ -+ size += sizeof(*ent); -+ -+ ent = kzalloc(size, GFP_KERNEL); -+ if (!ent) -+ return; -+ -+ ent->msg = (struct cn_ctl_msg *)(ent + 1); -+ -+ memcpy(ent->msg, ctl, size - sizeof(*ent)); -+ -+ mutex_lock(¬ify_lock); -+ list_add(&ent->notify_entry, ¬ify_list); -+ mutex_unlock(¬ify_lock); -+} -+ - static int cn_proc_show(struct seq_file *m, void *v) - { - struct cn_queue_dev *dev = cdev.cbdev; -@@ -274,8 +437,11 @@ static const struct file_operations cn_file_ops = { - static int __devinit cn_init(void) - { - struct cn_dev *dev = &cdev; -+ int err; - - dev->input = cn_rx_skb; -+ dev->id.idx = cn_idx; -+ dev->id.val = cn_val; - - dev->nls = netlink_kernel_create(&init_net, NETLINK_CONNECTOR, - CN_NETLINK_USERS + 0xf, -@@ -291,6 +457,14 @@ static int __devinit cn_init(void) - - cn_already_initialized = 1; - -+ err = cn_add_callback(&dev->id, "connector", &cn_callback); -+ if (err) { -+ cn_already_initialized = 0; -+ cn_queue_free_dev(dev->cbdev); -+ netlink_kernel_release(dev->nls); -+ return -EINVAL; -+ } -+ - proc_net_fops_create(&init_net, "connector", S_IRUGO, &cn_file_ops); - - return 0; -@@ -304,6 +478,7 @@ static void __devexit cn_fini(void) - - proc_net_remove(&init_net, "connector"); - -+ cn_del_callback(&dev->id); - cn_queue_free_dev(dev->cbdev); - netlink_kernel_release(dev->nls); - } -diff --git a/drivers/cpuidle/governors/menu.c b/drivers/cpuidle/governors/menu.c -index 73655ae..6810443 100644 ---- a/drivers/cpuidle/governors/menu.c -+++ b/drivers/cpuidle/governors/menu.c -@@ -18,7 +18,6 @@ - #include - #include - #include --#include - - #define BUCKETS 12 - #define RESOLUTION 1024 -@@ -170,12 +169,6 @@ static DEFINE_PER_CPU(struct menu_device, menu_devices); - - static void menu_update(struct cpuidle_device *dev); - --/* This implements DIV_ROUND_CLOSEST but avoids 64 bit division */ --static u64 div_round64(u64 dividend, u32 divisor) --{ -- return div_u64(dividend + (divisor / 2), divisor); --} -- - /** - * menu_select - selects the next idle state to enter - * @dev: the CPU -@@ -216,8 +209,9 @@ static int menu_select(struct cpuidle_device *dev) - data->correction_factor[data->bucket] = RESOLUTION * DECAY; - - /* Make sure to round up for half microseconds */ -- data->predicted_us = div_round64(data->expected_us * data->correction_factor[data->bucket], -- RESOLUTION * DECAY); -+ data->predicted_us = DIV_ROUND_CLOSEST( -+ data->expected_us * data->correction_factor[data->bucket], -+ RESOLUTION * DECAY); - - /* - * We want to default to C1 (hlt), not to busy polling -diff --git a/drivers/crypto/padlock-sha.c b/drivers/crypto/padlock-sha.c -index d3a27e0..0af8057 100644 ---- a/drivers/crypto/padlock-sha.c -+++ b/drivers/crypto/padlock-sha.c -@@ -57,23 +57,6 @@ static int padlock_sha_update(struct shash_desc *desc, - return crypto_shash_update(&dctx->fallback, data, length); - } - --static int padlock_sha_export(struct shash_desc *desc, void *out) --{ -- struct padlock_sha_desc *dctx = shash_desc_ctx(desc); -- -- return crypto_shash_export(&dctx->fallback, out); --} -- --static int padlock_sha_import(struct shash_desc *desc, const void *in) --{ -- struct padlock_sha_desc *dctx = shash_desc_ctx(desc); -- struct padlock_sha_ctx *ctx = crypto_shash_ctx(desc->tfm); -- -- dctx->fallback.tfm = ctx->fallback; -- dctx->fallback.flags = desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP; -- return crypto_shash_import(&dctx->fallback, in); --} -- - static inline void padlock_output_block(uint32_t *src, - uint32_t *dst, size_t count) - { -@@ -252,10 +235,7 @@ static struct shash_alg sha1_alg = { - .update = padlock_sha_update, - .finup = padlock_sha1_finup, - .final = padlock_sha1_final, -- .export = padlock_sha_export, -- .import = padlock_sha_import, - .descsize = sizeof(struct padlock_sha_desc), -- .statesize = sizeof(struct sha1_state), - .base = { - .cra_name = "sha1", - .cra_driver_name = "sha1-padlock", -@@ -276,10 +256,7 @@ static struct shash_alg sha256_alg = { - .update = padlock_sha_update, - .finup = padlock_sha256_finup, - .final = padlock_sha256_final, -- .export = padlock_sha_export, -- .import = padlock_sha_import, - .descsize = sizeof(struct padlock_sha_desc), -- .statesize = sizeof(struct sha256_state), - .base = { - .cra_name = "sha256", - .cra_driver_name = "sha256-padlock", -diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c -index c558fa1..7585c41 100644 ---- a/drivers/dma/at_hdmac.c -+++ b/drivers/dma/at_hdmac.c -@@ -815,7 +815,7 @@ atc_is_tx_complete(struct dma_chan *chan, - dev_vdbg(chan2dev(chan), "is_tx_complete: %d (d%d, u%d)\n", - cookie, done ? *done : 0, used ? *used : 0); - -- spin_lock_bh(&atchan->lock); -+ spin_lock_bh(atchan->lock); - - last_complete = atchan->completed_cookie; - last_used = chan->cookie; -@@ -830,7 +830,7 @@ atc_is_tx_complete(struct dma_chan *chan, - ret = dma_async_is_complete(cookie, last_complete, last_used); - } - -- spin_unlock_bh(&atchan->lock); -+ spin_unlock_bh(atchan->lock); - - if (done) - *done = last_complete; -diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c -index dcc4ab7..c524d36 100644 ---- a/drivers/dma/ioat/dma.c -+++ b/drivers/dma/ioat/dma.c -@@ -1032,7 +1032,7 @@ int __devinit ioat_probe(struct ioatdma_device *device) - dma->dev = &pdev->dev; - - if (!dma->chancnt) { -- dev_err(dev, "channel enumeration error\n"); -+ dev_err(dev, "zero channels detected\n"); - goto err_setup_interrupts; - } - -diff --git a/drivers/dma/ioat/dma.h b/drivers/dma/ioat/dma.h -index bbc3e78..45edde9 100644 ---- a/drivers/dma/ioat/dma.h -+++ b/drivers/dma/ioat/dma.h -@@ -60,7 +60,6 @@ - * @dca: direct cache access context - * @intr_quirk: interrupt setup quirk (for ioat_v1 devices) - * @enumerate_channels: hw version specific channel enumeration -- * @reset_hw: hw version specific channel (re)initialization - * @cleanup_tasklet: select between the v2 and v3 cleanup routines - * @timer_fn: select between the v2 and v3 timer watchdog routines - * @self_test: hardware version specific self test for each supported op type -@@ -79,7 +78,6 @@ struct ioatdma_device { - struct dca_provider *dca; - void (*intr_quirk)(struct ioatdma_device *device); - int (*enumerate_channels)(struct ioatdma_device *device); -- int (*reset_hw)(struct ioat_chan_common *chan); - void (*cleanup_tasklet)(unsigned long data); - void (*timer_fn)(unsigned long data); - int (*self_test)(struct ioatdma_device *device); -@@ -266,22 +264,6 @@ static inline void ioat_suspend(struct ioat_chan_common *chan) - writeb(IOAT_CHANCMD_SUSPEND, chan->reg_base + IOAT_CHANCMD_OFFSET(ver)); - } - --static inline void ioat_reset(struct ioat_chan_common *chan) --{ -- u8 ver = chan->device->version; -- -- writeb(IOAT_CHANCMD_RESET, chan->reg_base + IOAT_CHANCMD_OFFSET(ver)); --} -- --static inline bool ioat_reset_pending(struct ioat_chan_common *chan) --{ -- u8 ver = chan->device->version; -- u8 cmd; -- -- cmd = readb(chan->reg_base + IOAT_CHANCMD_OFFSET(ver)); -- return (cmd & IOAT_CHANCMD_RESET) == IOAT_CHANCMD_RESET; --} -- - static inline void ioat_set_chainaddr(struct ioat_dma_chan *ioat, u64 addr) - { - struct ioat_chan_common *chan = &ioat->base; -diff --git a/drivers/dma/ioat/dma_v2.c b/drivers/dma/ioat/dma_v2.c -index 5cc37af..8f1f7f0 100644 ---- a/drivers/dma/ioat/dma_v2.c -+++ b/drivers/dma/ioat/dma_v2.c -@@ -239,50 +239,20 @@ void __ioat2_restart_chan(struct ioat2_dma_chan *ioat) - __ioat2_start_null_desc(ioat); - } - --int ioat2_quiesce(struct ioat_chan_common *chan, unsigned long tmo) -+static void ioat2_restart_channel(struct ioat2_dma_chan *ioat) - { -- unsigned long end = jiffies + tmo; -- int err = 0; -+ struct ioat_chan_common *chan = &ioat->base; -+ unsigned long phys_complete; - u32 status; - - status = ioat_chansts(chan); - if (is_ioat_active(status) || is_ioat_idle(status)) - ioat_suspend(chan); - while (is_ioat_active(status) || is_ioat_idle(status)) { -- if (tmo && time_after(jiffies, end)) { -- err = -ETIMEDOUT; -- break; -- } - status = ioat_chansts(chan); - cpu_relax(); - } - -- return err; --} -- --int ioat2_reset_sync(struct ioat_chan_common *chan, unsigned long tmo) --{ -- unsigned long end = jiffies + tmo; -- int err = 0; -- -- ioat_reset(chan); -- while (ioat_reset_pending(chan)) { -- if (end && time_after(jiffies, end)) { -- err = -ETIMEDOUT; -- break; -- } -- cpu_relax(); -- } -- -- return err; --} -- --static void ioat2_restart_channel(struct ioat2_dma_chan *ioat) --{ -- struct ioat_chan_common *chan = &ioat->base; -- unsigned long phys_complete; -- -- ioat2_quiesce(chan, 0); - if (ioat_cleanup_preamble(chan, &phys_complete)) - __cleanup(ioat, phys_complete); - -@@ -348,19 +318,6 @@ void ioat2_timer_event(unsigned long data) - spin_unlock_bh(&chan->cleanup_lock); - } - --static int ioat2_reset_hw(struct ioat_chan_common *chan) --{ -- /* throw away whatever the channel was doing and get it initialized */ -- u32 chanerr; -- -- ioat2_quiesce(chan, msecs_to_jiffies(100)); -- -- chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET); -- writel(chanerr, chan->reg_base + IOAT_CHANERR_OFFSET); -- -- return ioat2_reset_sync(chan, msecs_to_jiffies(200)); --} -- - /** - * ioat2_enumerate_channels - find and initialize the device's channels - * @device: the device to be enumerated -@@ -403,10 +360,6 @@ int ioat2_enumerate_channels(struct ioatdma_device *device) - (unsigned long) ioat); - ioat->xfercap_log = xfercap_log; - spin_lock_init(&ioat->ring_lock); -- if (device->reset_hw(&ioat->base)) { -- i = 0; -- break; -- } - } - dma->chancnt = i; - return i; -@@ -514,6 +467,7 @@ int ioat2_alloc_chan_resources(struct dma_chan *c) - struct ioat2_dma_chan *ioat = to_ioat2_chan(c); - struct ioat_chan_common *chan = &ioat->base; - struct ioat_ring_ent **ring; -+ u32 chanerr; - int order; - - /* have we already been set up? */ -@@ -523,6 +477,12 @@ int ioat2_alloc_chan_resources(struct dma_chan *c) - /* Setup register to interrupt and write completion status on error */ - writew(IOAT_CHANCTRL_RUN, chan->reg_base + IOAT_CHANCTRL_OFFSET); - -+ chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET); -+ if (chanerr) { -+ dev_err(to_dev(chan), "CHANERR = %x, clearing\n", chanerr); -+ writel(chanerr, chan->reg_base + IOAT_CHANERR_OFFSET); -+ } -+ - /* allocate a completion writeback area */ - /* doing 2 32bit writes to mmio since 1 64b write doesn't work */ - chan->completion = pci_pool_alloc(chan->device->completion_pool, -@@ -786,7 +746,13 @@ void ioat2_free_chan_resources(struct dma_chan *c) - tasklet_disable(&chan->cleanup_task); - del_timer_sync(&chan->timer); - device->cleanup_tasklet((unsigned long) ioat); -- device->reset_hw(chan); -+ -+ /* Delay 100ms after reset to allow internal DMA logic to quiesce -+ * before removing DMA descriptor resources. -+ */ -+ writeb(IOAT_CHANCMD_RESET, -+ chan->reg_base + IOAT_CHANCMD_OFFSET(chan->device->version)); -+ mdelay(100); - - spin_lock_bh(&ioat->ring_lock); - descs = ioat2_ring_space(ioat); -@@ -873,7 +839,6 @@ int __devinit ioat2_dma_probe(struct ioatdma_device *device, int dca) - int err; - - device->enumerate_channels = ioat2_enumerate_channels; -- device->reset_hw = ioat2_reset_hw; - device->cleanup_tasklet = ioat2_cleanup_tasklet; - device->timer_fn = ioat2_timer_event; - device->self_test = ioat_dma_self_test; -diff --git a/drivers/dma/ioat/dma_v2.h b/drivers/dma/ioat/dma_v2.h -index 3afad8d..1d849ef 100644 ---- a/drivers/dma/ioat/dma_v2.h -+++ b/drivers/dma/ioat/dma_v2.h -@@ -185,8 +185,6 @@ bool reshape_ring(struct ioat2_dma_chan *ioat, int order); - void __ioat2_issue_pending(struct ioat2_dma_chan *ioat); - void ioat2_cleanup_tasklet(unsigned long data); - void ioat2_timer_event(unsigned long data); --int ioat2_quiesce(struct ioat_chan_common *chan, unsigned long tmo); --int ioat2_reset_sync(struct ioat_chan_common *chan, unsigned long tmo); - extern struct kobj_type ioat2_ktype; - extern struct kmem_cache *ioat2_cache; - #endif /* IOATDMA_V2_H */ -diff --git a/drivers/dma/ioat/dma_v3.c b/drivers/dma/ioat/dma_v3.c -index 9908c9e..42f6f10 100644 ---- a/drivers/dma/ioat/dma_v3.c -+++ b/drivers/dma/ioat/dma_v3.c -@@ -650,11 +650,9 @@ __ioat3_prep_pq_lock(struct dma_chan *c, enum sum_check_flags *result, - - num_descs = ioat2_xferlen_to_descs(ioat, len); - /* we need 2x the number of descriptors to cover greater than 3 -- * sources (we need 1 extra source in the q-only continuation -- * case and 3 extra sources in the p+q continuation case. -+ * sources - */ -- if (src_cnt + dmaf_p_disabled_continue(flags) > 3 || -- (dmaf_continue(flags) && !dmaf_p_disabled_continue(flags))) { -+ if (src_cnt > 3 || flags & DMA_PREP_CONTINUE) { - with_ext = 1; - num_descs *= 2; - } else -@@ -1130,45 +1128,6 @@ static int __devinit ioat3_dma_self_test(struct ioatdma_device *device) - return 0; - } - --static int ioat3_reset_hw(struct ioat_chan_common *chan) --{ -- /* throw away whatever the channel was doing and get it -- * initialized, with ioat3 specific workarounds -- */ -- struct ioatdma_device *device = chan->device; -- struct pci_dev *pdev = device->pdev; -- u32 chanerr; -- u16 dev_id; -- int err; -- -- ioat2_quiesce(chan, msecs_to_jiffies(100)); -- -- chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET); -- writel(chanerr, chan->reg_base + IOAT_CHANERR_OFFSET); -- -- /* -= IOAT ver.3 workarounds =- */ -- /* Write CHANERRMSK_INT with 3E07h to mask out the errors -- * that can cause stability issues for IOAT ver.3, and clear any -- * pending errors -- */ -- pci_write_config_dword(pdev, IOAT_PCI_CHANERRMASK_INT_OFFSET, 0x3e07); -- err = pci_read_config_dword(pdev, IOAT_PCI_CHANERR_INT_OFFSET, &chanerr); -- if (err) { -- dev_err(&pdev->dev, "channel error register unreachable\n"); -- return err; -- } -- pci_write_config_dword(pdev, IOAT_PCI_CHANERR_INT_OFFSET, chanerr); -- -- /* Clear DMAUNCERRSTS Cfg-Reg Parity Error status bit -- * (workaround for spurious config parity error after restart) -- */ -- pci_read_config_word(pdev, IOAT_PCI_DEVICE_ID_OFFSET, &dev_id); -- if (dev_id == PCI_DEVICE_ID_INTEL_IOAT_TBG0) -- pci_write_config_dword(pdev, IOAT_PCI_DMAUNCERRSTS_OFFSET, 0x10); -- -- return ioat2_reset_sync(chan, msecs_to_jiffies(200)); --} -- - int __devinit ioat3_dma_probe(struct ioatdma_device *device, int dca) - { - struct pci_dev *pdev = device->pdev; -@@ -1178,10 +1137,10 @@ int __devinit ioat3_dma_probe(struct ioatdma_device *device, int dca) - struct ioat_chan_common *chan; - bool is_raid_device = false; - int err; -+ u16 dev_id; - u32 cap; - - device->enumerate_channels = ioat2_enumerate_channels; -- device->reset_hw = ioat3_reset_hw; - device->self_test = ioat3_dma_self_test; - dma = &device->common; - dma->device_prep_dma_memcpy = ioat2_dma_prep_memcpy_lock; -@@ -1257,6 +1216,19 @@ int __devinit ioat3_dma_probe(struct ioatdma_device *device, int dca) - dma->device_prep_dma_xor_val = NULL; - #endif - -+ /* -= IOAT ver.3 workarounds =- */ -+ /* Write CHANERRMSK_INT with 3E07h to mask out the errors -+ * that can cause stability issues for IOAT ver.3 -+ */ -+ pci_write_config_dword(pdev, IOAT_PCI_CHANERRMASK_INT_OFFSET, 0x3e07); -+ -+ /* Clear DMAUNCERRSTS Cfg-Reg Parity Error status bit -+ * (workaround for spurious config parity error after restart) -+ */ -+ pci_read_config_word(pdev, IOAT_PCI_DEVICE_ID_OFFSET, &dev_id); -+ if (dev_id == PCI_DEVICE_ID_INTEL_IOAT_TBG0) -+ pci_write_config_dword(pdev, IOAT_PCI_DMAUNCERRSTS_OFFSET, 0x10); -+ - err = ioat_probe(device); - if (err) - return err; -diff --git a/drivers/dma/ioat/registers.h b/drivers/dma/ioat/registers.h -index e8ae63b..f015ec1 100644 ---- a/drivers/dma/ioat/registers.h -+++ b/drivers/dma/ioat/registers.h -@@ -27,7 +27,6 @@ - - #define IOAT_PCI_DEVICE_ID_OFFSET 0x02 - #define IOAT_PCI_DMAUNCERRSTS_OFFSET 0x148 --#define IOAT_PCI_CHANERR_INT_OFFSET 0x180 - #define IOAT_PCI_CHANERRMASK_INT_OFFSET 0x184 - - /* MMIO Device Registers */ -diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c -index 01bc8e2..a38831c 100644 ---- a/drivers/edac/amd64_edac.c -+++ b/drivers/edac/amd64_edac.c -@@ -13,8 +13,6 @@ module_param(report_gart_errors, int, 0644); - static int ecc_enable_override; - module_param(ecc_enable_override, int, 0644); - --static struct msr *msrs; -- - /* Lookup table for all possible MC control instances */ - struct amd64_pvt; - static struct mem_ctl_info *mci_lookup[EDAC_MAX_NUMNODES]; -@@ -2620,90 +2618,6 @@ static int amd64_init_csrows(struct mem_ctl_info *mci) - return empty; - } - --/* get all cores on this DCT */ --static void get_cpus_on_this_dct_cpumask(struct cpumask *mask, int nid) --{ -- int cpu; -- -- for_each_online_cpu(cpu) -- if (amd_get_nb_id(cpu) == nid) -- cpumask_set_cpu(cpu, mask); --} -- --/* check MCG_CTL on all the cpus on this node */ --static bool amd64_nb_mce_bank_enabled_on_node(int nid) --{ -- cpumask_var_t mask; -- int cpu, nbe; -- bool ret = false; -- -- if (!zalloc_cpumask_var(&mask, GFP_KERNEL)) { -- amd64_printk(KERN_WARNING, "%s: error allocating mask\n", -- __func__); -- return false; -- } -- -- get_cpus_on_this_dct_cpumask(mask, nid); -- -- rdmsr_on_cpus(mask, MSR_IA32_MCG_CTL, msrs); -- -- for_each_cpu(cpu, mask) { -- struct msr *reg = per_cpu_ptr(msrs, cpu); -- nbe = reg->l & K8_MSR_MCGCTL_NBE; -- -- debugf0("core: %u, MCG_CTL: 0x%llx, NB MSR is %s\n", -- cpu, reg->q, -- (nbe ? "enabled" : "disabled")); -- -- if (!nbe) -- goto out; -- } -- ret = true; -- --out: -- free_cpumask_var(mask); -- return ret; --} -- --static int amd64_toggle_ecc_err_reporting(struct amd64_pvt *pvt, bool on) --{ -- cpumask_var_t cmask; -- int cpu; -- -- if (!zalloc_cpumask_var(&cmask, GFP_KERNEL)) { -- amd64_printk(KERN_WARNING, "%s: error allocating mask\n", -- __func__); -- return false; -- } -- -- get_cpus_on_this_dct_cpumask(cmask, pvt->mc_node_id); -- -- rdmsr_on_cpus(cmask, MSR_IA32_MCG_CTL, msrs); -- -- for_each_cpu(cpu, cmask) { -- -- struct msr *reg = per_cpu_ptr(msrs, cpu); -- -- if (on) { -- if (reg->l & K8_MSR_MCGCTL_NBE) -- pvt->flags.ecc_report = 1; -- -- reg->l |= K8_MSR_MCGCTL_NBE; -- } else { -- /* -- * Turn off ECC reporting only when it was off before -- */ -- if (!pvt->flags.ecc_report) -- reg->l &= ~K8_MSR_MCGCTL_NBE; -- } -- } -- wrmsr_on_cpus(cmask, MSR_IA32_MCG_CTL, msrs); -- -- free_cpumask_var(cmask); -- -- return 0; --} -- - /* - * Only if 'ecc_enable_override' is set AND BIOS had ECC disabled, do "we" - * enable it. -@@ -2711,12 +2625,17 @@ static int amd64_toggle_ecc_err_reporting(struct amd64_pvt *pvt, bool on) - static void amd64_enable_ecc_error_reporting(struct mem_ctl_info *mci) - { - struct amd64_pvt *pvt = mci->pvt_info; -- int err = 0; -- u32 value, mask = K8_NBCTL_CECCEn | K8_NBCTL_UECCEn; -+ const cpumask_t *cpumask = cpumask_of_node(pvt->mc_node_id); -+ int cpu, idx = 0, err = 0; -+ struct msr msrs[cpumask_weight(cpumask)]; -+ u32 value; -+ u32 mask = K8_NBCTL_CECCEn | K8_NBCTL_UECCEn; - - if (!ecc_enable_override) - return; - -+ memset(msrs, 0, sizeof(msrs)); -+ - amd64_printk(KERN_WARNING, - "'ecc_enable_override' parameter is active, " - "Enabling AMD ECC hardware now: CAUTION\n"); -@@ -2732,9 +2651,16 @@ static void amd64_enable_ecc_error_reporting(struct mem_ctl_info *mci) - value |= mask; - pci_write_config_dword(pvt->misc_f3_ctl, K8_NBCTL, value); - -- if (amd64_toggle_ecc_err_reporting(pvt, ON)) -- amd64_printk(KERN_WARNING, "Error enabling ECC reporting over " -- "MCGCTL!\n"); -+ rdmsr_on_cpus(cpumask, K8_MSR_MCGCTL, msrs); -+ -+ for_each_cpu(cpu, cpumask) { -+ if (msrs[idx].l & K8_MSR_MCGCTL_NBE) -+ set_bit(idx, &pvt->old_mcgctl); -+ -+ msrs[idx].l |= K8_MSR_MCGCTL_NBE; -+ idx++; -+ } -+ wrmsr_on_cpus(cpumask, K8_MSR_MCGCTL, msrs); - - err = pci_read_config_dword(pvt->misc_f3_ctl, K8_NBCFG, &value); - if (err) -@@ -2775,12 +2701,17 @@ static void amd64_enable_ecc_error_reporting(struct mem_ctl_info *mci) - - static void amd64_restore_ecc_error_reporting(struct amd64_pvt *pvt) - { -- int err = 0; -- u32 value, mask = K8_NBCTL_CECCEn | K8_NBCTL_UECCEn; -+ const cpumask_t *cpumask = cpumask_of_node(pvt->mc_node_id); -+ int cpu, idx = 0, err = 0; -+ struct msr msrs[cpumask_weight(cpumask)]; -+ u32 value; -+ u32 mask = K8_NBCTL_CECCEn | K8_NBCTL_UECCEn; - - if (!pvt->nbctl_mcgctl_saved) - return; - -+ memset(msrs, 0, sizeof(msrs)); -+ - err = pci_read_config_dword(pvt->misc_f3_ctl, K8_NBCTL, &value); - if (err) - debugf0("Reading K8_NBCTL failed\n"); -@@ -2790,9 +2721,66 @@ static void amd64_restore_ecc_error_reporting(struct amd64_pvt *pvt) - /* restore the NB Enable MCGCTL bit */ - pci_write_config_dword(pvt->misc_f3_ctl, K8_NBCTL, value); - -- if (amd64_toggle_ecc_err_reporting(pvt, OFF)) -- amd64_printk(KERN_WARNING, "Error restoring ECC reporting over " -- "MCGCTL!\n"); -+ rdmsr_on_cpus(cpumask, K8_MSR_MCGCTL, msrs); -+ -+ for_each_cpu(cpu, cpumask) { -+ msrs[idx].l &= ~K8_MSR_MCGCTL_NBE; -+ msrs[idx].l |= -+ test_bit(idx, &pvt->old_mcgctl) << K8_MSR_MCGCTL_NBE; -+ idx++; -+ } -+ -+ wrmsr_on_cpus(cpumask, K8_MSR_MCGCTL, msrs); -+} -+ -+/* get all cores on this DCT */ -+static void get_cpus_on_this_dct_cpumask(cpumask_t *mask, int nid) -+{ -+ int cpu; -+ -+ for_each_online_cpu(cpu) -+ if (amd_get_nb_id(cpu) == nid) -+ cpumask_set_cpu(cpu, mask); -+} -+ -+/* check MCG_CTL on all the cpus on this node */ -+static bool amd64_nb_mce_bank_enabled_on_node(int nid) -+{ -+ cpumask_t mask; -+ struct msr *msrs; -+ int cpu, nbe, idx = 0; -+ bool ret = false; -+ -+ cpumask_clear(&mask); -+ -+ get_cpus_on_this_dct_cpumask(&mask, nid); -+ -+ msrs = kzalloc(sizeof(struct msr) * cpumask_weight(&mask), GFP_KERNEL); -+ if (!msrs) { -+ amd64_printk(KERN_WARNING, "%s: error allocating msrs\n", -+ __func__); -+ return false; -+ } -+ -+ rdmsr_on_cpus(&mask, MSR_IA32_MCG_CTL, msrs); -+ -+ for_each_cpu(cpu, &mask) { -+ nbe = msrs[idx].l & K8_MSR_MCGCTL_NBE; -+ -+ debugf0("core: %u, MCG_CTL: 0x%llx, NB MSR is %s\n", -+ cpu, msrs[idx].q, -+ (nbe ? "enabled" : "disabled")); -+ -+ if (!nbe) -+ goto out; -+ -+ idx++; -+ } -+ ret = true; -+ -+out: -+ kfree(msrs); -+ return ret; - } - - /* -@@ -2801,11 +2789,10 @@ static void amd64_restore_ecc_error_reporting(struct amd64_pvt *pvt) - * the memory system completely. A command line option allows to force-enable - * hardware ECC later in amd64_enable_ecc_error_reporting(). - */ --static const char *ecc_msg = -- "ECC disabled in the BIOS or no ECC capability, module will not load.\n" -- " Either enable ECC checking or force module loading by setting " -- "'ecc_enable_override'.\n" -- " (Note that use of the override may cause unknown side effects.)\n"; -+static const char *ecc_warning = -+ "WARNING: ECC is disabled by BIOS. Module will NOT be loaded.\n" -+ " Either Enable ECC in the BIOS, or set 'ecc_enable_override'.\n" -+ " Also, use of the override can cause unknown side effects.\n"; - - static int amd64_check_ecc_enabled(struct amd64_pvt *pvt) - { -@@ -2820,7 +2807,7 @@ static int amd64_check_ecc_enabled(struct amd64_pvt *pvt) - - ecc_enabled = !!(value & K8_NBCFG_ECC_ENABLE); - if (!ecc_enabled) -- amd64_printk(KERN_NOTICE, "This node reports that Memory ECC " -+ amd64_printk(KERN_WARNING, "This node reports that Memory ECC " - "is currently disabled, set F3x%x[22] (%s).\n", - K8_NBCFG, pci_name(pvt->misc_f3_ctl)); - else -@@ -2828,17 +2815,18 @@ static int amd64_check_ecc_enabled(struct amd64_pvt *pvt) - - nb_mce_en = amd64_nb_mce_bank_enabled_on_node(pvt->mc_node_id); - if (!nb_mce_en) -- amd64_printk(KERN_NOTICE, "NB MCE bank disabled, set MSR " -+ amd64_printk(KERN_WARNING, "NB MCE bank disabled, set MSR " - "0x%08x[4] on node %d to enable.\n", - MSR_IA32_MCG_CTL, pvt->mc_node_id); - - if (!ecc_enabled || !nb_mce_en) { - if (!ecc_enable_override) { -- amd64_printk(KERN_NOTICE, "%s", ecc_msg); -+ amd64_printk(KERN_WARNING, "%s", ecc_warning); - return -ENODEV; - } -+ } else -+ /* CLEAR the override, since BIOS controlled it */ - ecc_enable_override = 0; -- } - - return 0; - } -@@ -2921,6 +2909,7 @@ static int amd64_probe_one_instance(struct pci_dev *dram_f2_ctl, - pvt->ext_model = boot_cpu_data.x86_model >> 4; - pvt->mc_type_index = mc_type_index; - pvt->ops = family_ops(mc_type_index); -+ pvt->old_mcgctl = 0; - - /* - * We have the dram_f2_ctl device as an argument, now go reserve its -@@ -3082,15 +3071,16 @@ static void __devexit amd64_remove_one_instance(struct pci_dev *pdev) - - amd64_free_mc_sibling_devices(pvt); - -+ kfree(pvt); -+ mci->pvt_info = NULL; -+ -+ mci_lookup[pvt->mc_node_id] = NULL; -+ - /* unregister from EDAC MCE */ - amd_report_gart_errors(false); - amd_unregister_ecc_decoder(amd64_decode_bus_error); - - /* Free the EDAC CORE resources */ -- mci->pvt_info = NULL; -- mci_lookup[pvt->mc_node_id] = NULL; -- -- kfree(pvt); - edac_mc_free(mci); - } - -@@ -3167,29 +3157,23 @@ static void amd64_setup_pci_device(void) - static int __init amd64_edac_init(void) - { - int nb, err = -ENODEV; -- bool load_ok = false; - - edac_printk(KERN_INFO, EDAC_MOD_STR, EDAC_AMD64_VERSION "\n"); - - opstate_init(); - - if (cache_k8_northbridges() < 0) -- goto err_ret; -- -- msrs = msrs_alloc(); -- if (!msrs) -- goto err_ret; -+ return err; - - err = pci_register_driver(&amd64_pci_driver); - if (err) -- goto err_pci; -+ return err; - - /* - * At this point, the array 'pvt_lookup[]' contains pointers to alloc'd - * amd64_pvt structs. These will be used in the 2nd stage init function - * to finish initialization of the MC instances. - */ -- err = -ENODEV; - for (nb = 0; nb < num_k8_northbridges; nb++) { - if (!pvt_lookup[nb]) - continue; -@@ -3197,21 +3181,16 @@ static int __init amd64_edac_init(void) - err = amd64_init_2nd_stage(pvt_lookup[nb]); - if (err) - goto err_2nd_stage; -- -- load_ok = true; - } - -- if (load_ok) { -- amd64_setup_pci_device(); -- return 0; -- } -+ amd64_setup_pci_device(); -+ -+ return 0; - - err_2nd_stage: -+ debugf0("2nd stage failed\n"); - pci_unregister_driver(&amd64_pci_driver); --err_pci: -- msrs_free(msrs); -- msrs = NULL; --err_ret: -+ - return err; - } - -@@ -3221,9 +3200,6 @@ static void __exit amd64_edac_exit(void) - edac_pci_release_generic_ctl(amd64_ctl_pci); - - pci_unregister_driver(&amd64_pci_driver); -- -- msrs_free(msrs); -- msrs = NULL; - } - - module_init(amd64_edac_init); -diff --git a/drivers/edac/amd64_edac.h b/drivers/edac/amd64_edac.h -index bba6c94..c6f359a 100644 ---- a/drivers/edac/amd64_edac.h -+++ b/drivers/edac/amd64_edac.h -@@ -147,8 +147,6 @@ - #define MAX_CS_COUNT 8 - #define DRAM_REG_COUNT 8 - --#define ON true --#define OFF false - - /* - * PCI-defined configuration space registers -@@ -388,7 +386,10 @@ enum { - #define K8_NBCAP_DUAL_NODE BIT(1) - #define K8_NBCAP_DCT_DUAL BIT(0) - --/* MSRs */ -+/* -+ * MSR Regs -+ */ -+#define K8_MSR_MCGCTL 0x017b - #define K8_MSR_MCGCTL_NBE BIT(4) - - #define K8_MSR_MC4CTL 0x0410 -@@ -486,6 +487,7 @@ struct amd64_pvt { - /* Save old hw registers' values before we modified them */ - u32 nbctl_mcgctl_saved; /* When true, following 2 are valid */ - u32 old_nbctl; -+ unsigned long old_mcgctl; /* per core on this node */ - - /* MC Type Index value: socket F vs Family 10h */ - u32 mc_type_index; -@@ -493,7 +495,6 @@ struct amd64_pvt { - /* misc settings */ - struct flags { - unsigned long cf8_extcfg:1; -- unsigned long ecc_report:1; - } flags; - }; - -diff --git a/drivers/edac/i5000_edac.c b/drivers/edac/i5000_edac.c -index adc10a2..77a9579 100644 ---- a/drivers/edac/i5000_edac.c -+++ b/drivers/edac/i5000_edac.c -@@ -577,13 +577,7 @@ static void i5000_process_nonfatal_error_info(struct mem_ctl_info *mci, - debugf0("\tUncorrected bits= 0x%x\n", ue_errors); - - branch = EXTRACT_FBDCHAN_INDX(info->ferr_nf_fbd); -- -- /* -- * According with i5000 datasheet, bit 28 has no significance -- * for errors M4Err-M12Err and M17Err-M21Err, on FERR_NF_FBD -- */ -- channel = branch & 2; -- -+ channel = branch; - bank = NREC_BANK(info->nrecmema); - rank = NREC_RANK(info->nrecmema); - rdwr = NREC_RDWR(info->nrecmema); -diff --git a/drivers/firewire/core-card.c b/drivers/firewire/core-card.c -index ed635ae..e4864e8 100644 ---- a/drivers/firewire/core-card.c -+++ b/drivers/firewire/core-card.c -@@ -57,9 +57,6 @@ static LIST_HEAD(card_list); - static LIST_HEAD(descriptor_list); - static int descriptor_count; - --/* ROM header, bus info block, root dir header, capabilities = 7 quadlets */ --static size_t config_rom_length = 1 + 4 + 1 + 1; -- - #define BIB_CRC(v) ((v) << 0) - #define BIB_CRC_LENGTH(v) ((v) << 16) - #define BIB_INFO_LENGTH(v) ((v) << 24) -@@ -75,7 +72,7 @@ static size_t config_rom_length = 1 + 4 + 1 + 1; - #define BIB_CMC ((1) << 30) - #define BIB_IMC ((1) << 31) - --static u32 *generate_config_rom(struct fw_card *card) -+static u32 *generate_config_rom(struct fw_card *card, size_t *config_rom_length) - { - struct fw_descriptor *desc; - static u32 config_rom[256]; -@@ -134,7 +131,7 @@ static u32 *generate_config_rom(struct fw_card *card) - for (i = 0; i < j; i += length + 1) - length = fw_compute_block_crc(config_rom + i); - -- WARN_ON(j != config_rom_length); -+ *config_rom_length = j; - - return config_rom; - } -@@ -143,24 +140,17 @@ static void update_config_roms(void) - { - struct fw_card *card; - u32 *config_rom; -+ size_t length; - - list_for_each_entry (card, &card_list, link) { -- config_rom = generate_config_rom(card); -- card->driver->set_config_rom(card, config_rom, -- config_rom_length); -+ config_rom = generate_config_rom(card, &length); -+ card->driver->set_config_rom(card, config_rom, length); - } - } - --static size_t required_space(struct fw_descriptor *desc) --{ -- /* descriptor + entry into root dir + optional immediate entry */ -- return desc->length + 1 + (desc->immediate > 0 ? 1 : 0); --} -- - int fw_core_add_descriptor(struct fw_descriptor *desc) - { - size_t i; -- int ret; - - /* - * Check descriptor is valid; the length of all blocks in the -@@ -176,21 +166,15 @@ int fw_core_add_descriptor(struct fw_descriptor *desc) - - mutex_lock(&card_mutex); - -- if (config_rom_length + required_space(desc) > 256) { -- ret = -EBUSY; -- } else { -- list_add_tail(&desc->link, &descriptor_list); -- config_rom_length += required_space(desc); -+ list_add_tail(&desc->link, &descriptor_list); -+ descriptor_count++; -+ if (desc->immediate > 0) - descriptor_count++; -- if (desc->immediate > 0) -- descriptor_count++; -- update_config_roms(); -- ret = 0; -- } -+ update_config_roms(); - - mutex_unlock(&card_mutex); - -- return ret; -+ return 0; - } - EXPORT_SYMBOL(fw_core_add_descriptor); - -@@ -199,7 +183,6 @@ void fw_core_remove_descriptor(struct fw_descriptor *desc) - mutex_lock(&card_mutex); - - list_del(&desc->link); -- config_rom_length -= required_space(desc); - descriptor_count--; - if (desc->immediate > 0) - descriptor_count--; -@@ -453,6 +436,7 @@ int fw_card_add(struct fw_card *card, - u32 max_receive, u32 link_speed, u64 guid) - { - u32 *config_rom; -+ size_t length; - int ret; - - card->max_receive = max_receive; -@@ -461,8 +445,8 @@ int fw_card_add(struct fw_card *card, - - mutex_lock(&card_mutex); - -- config_rom = generate_config_rom(card); -- ret = card->driver->enable(card, config_rom, config_rom_length); -+ config_rom = generate_config_rom(card, &length); -+ ret = card->driver->enable(card, config_rom, length); - if (ret == 0) - list_add_tail(&card->link, &card_list); - -diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c -index 720b39b..94260aa 100644 ---- a/drivers/firewire/ohci.c -+++ b/drivers/firewire/ohci.c -@@ -2209,13 +2209,6 @@ static int ohci_queue_iso_receive_dualbuffer(struct fw_iso_context *base, - page = payload >> PAGE_SHIFT; - offset = payload & ~PAGE_MASK; - rest = p->payload_length; -- /* -- * The controllers I've tested have not worked correctly when -- * second_req_count is zero. Rather than do something we know won't -- * work, return an error -- */ -- if (rest == 0) -- return -EINVAL; - - /* FIXME: make packet-per-buffer/dual-buffer a context option */ - while (rest > 0) { -@@ -2269,7 +2262,7 @@ static int ohci_queue_iso_receive_packet_per_buffer(struct fw_iso_context *base, - unsigned long payload) - { - struct iso_context *ctx = container_of(base, struct iso_context, base); -- struct descriptor *d, *pd; -+ struct descriptor *d = NULL, *pd = NULL; - struct fw_iso_packet *p = packet; - dma_addr_t d_bus, page_bus; - u32 z, header_z, rest; -@@ -2307,9 +2300,8 @@ static int ohci_queue_iso_receive_packet_per_buffer(struct fw_iso_context *base, - d->data_address = cpu_to_le32(d_bus + (z * sizeof(*d))); - - rest = payload_per_buffer; -- pd = d; - for (j = 1; j < z; j++) { -- pd++; -+ pd = d + j; - pd->control = cpu_to_le16(DESCRIPTOR_STATUS | - DESCRIPTOR_INPUT_MORE); - -@@ -2412,7 +2404,6 @@ static void ohci_pmac_off(struct pci_dev *dev) - - #define PCI_VENDOR_ID_AGERE PCI_VENDOR_ID_ATT - #define PCI_DEVICE_ID_AGERE_FW643 0x5901 --#define PCI_DEVICE_ID_TI_TSB43AB23 0x8024 - - static int __devinit pci_probe(struct pci_dev *dev, - const struct pci_device_id *ent) -@@ -2478,8 +2469,7 @@ static int __devinit pci_probe(struct pci_dev *dev, - #if !defined(CONFIG_X86_32) - /* dual-buffer mode is broken with descriptor addresses above 2G */ - if (dev->vendor == PCI_VENDOR_ID_TI && -- (dev->device == PCI_DEVICE_ID_TI_TSB43AB22 || -- dev->device == PCI_DEVICE_ID_TI_TSB43AB23)) -+ dev->device == PCI_DEVICE_ID_TI_TSB43AB22) - ohci->use_dualbuffer = false; - #endif - -diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c -index 3a2ccb0..938100f 100644 ---- a/drivers/firmware/dmi_scan.c -+++ b/drivers/firmware/dmi_scan.c -@@ -429,7 +429,7 @@ static bool dmi_matches(const struct dmi_system_id *dmi) - for (i = 0; i < ARRAY_SIZE(dmi->matches); i++) { - int s = dmi->matches[i].slot; - if (s == DMI_NONE) -- break; -+ continue; - if (dmi_ident[s] - && strstr(dmi_ident[s], dmi->matches[i].substr)) - continue; -@@ -440,15 +440,6 @@ static bool dmi_matches(const struct dmi_system_id *dmi) - } - - /** -- * dmi_is_end_of_table - check for end-of-table marker -- * @dmi: pointer to the dmi_system_id structure to check -- */ --static bool dmi_is_end_of_table(const struct dmi_system_id *dmi) --{ -- return dmi->matches[0].slot == DMI_NONE; --} -- --/** - * dmi_check_system - check system DMI data - * @list: array of dmi_system_id structures to match against - * All non-null elements of the list must match -@@ -466,7 +457,7 @@ int dmi_check_system(const struct dmi_system_id *list) - int count = 0; - const struct dmi_system_id *d; - -- for (d = list; !dmi_is_end_of_table(d); d++) -+ for (d = list; d->ident; d++) - if (dmi_matches(d)) { - count++; - if (d->callback && d->callback(d)) -@@ -493,7 +484,7 @@ const struct dmi_system_id *dmi_first_match(const struct dmi_system_id *list) - { - const struct dmi_system_id *d; - -- for (d = list; !dmi_is_end_of_table(d); d++) -+ for (d = list; d->ident; d++) - if (dmi_matches(d)) - return d; - -diff --git a/drivers/gpu/drm/ati_pcigart.c b/drivers/gpu/drm/ati_pcigart.c -index a1fce68..628eae3 100644 ---- a/drivers/gpu/drm/ati_pcigart.c -+++ b/drivers/gpu/drm/ati_pcigart.c -@@ -39,7 +39,8 @@ static int drm_ati_alloc_pcigart_table(struct drm_device *dev, - struct drm_ati_pcigart_info *gart_info) - { - gart_info->table_handle = drm_pci_alloc(dev, gart_info->table_size, -- PAGE_SIZE); -+ PAGE_SIZE, -+ gart_info->table_mask); - if (gart_info->table_handle == NULL) - return -ENOMEM; - -@@ -111,13 +112,6 @@ int drm_ati_pcigart_init(struct drm_device *dev, struct drm_ati_pcigart_info *ga - if (gart_info->gart_table_location == DRM_ATI_GART_MAIN) { - DRM_DEBUG("PCI: no table in VRAM: using normal RAM\n"); - -- if (pci_set_dma_mask(dev->pdev, gart_info->table_mask)) { -- DRM_ERROR("fail to set dma mask to 0x%Lx\n", -- gart_info->table_mask); -- ret = 1; -- goto done; -- } -- - ret = drm_ati_alloc_pcigart_table(dev, gart_info); - if (ret) { - DRM_ERROR("cannot allocate PCI GART page!\n"); -diff --git a/drivers/gpu/drm/drm_bufs.c b/drivers/gpu/drm/drm_bufs.c -index 8417cc4..3d09e30 100644 ---- a/drivers/gpu/drm/drm_bufs.c -+++ b/drivers/gpu/drm/drm_bufs.c -@@ -326,7 +326,7 @@ static int drm_addmap_core(struct drm_device * dev, resource_size_t offset, - * As we're limiting the address to 2^32-1 (or less), - * casting it down to 32 bits is no problem, but we - * need to point to a 64bit variable first. */ -- dmah = drm_pci_alloc(dev, map->size, map->size); -+ dmah = drm_pci_alloc(dev, map->size, map->size, 0xffffffffUL); - if (!dmah) { - kfree(map); - return -ENOMEM; -@@ -885,7 +885,7 @@ int drm_addbufs_pci(struct drm_device * dev, struct drm_buf_desc * request) - - while (entry->buf_count < count) { - -- dmah = drm_pci_alloc(dev, PAGE_SIZE << page_order, 0x1000); -+ dmah = drm_pci_alloc(dev, PAGE_SIZE << page_order, 0x1000, 0xfffffffful); - - if (!dmah) { - /* Set count correctly so we free the proper amount. */ -diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c -index afed886..bbfd110 100644 ---- a/drivers/gpu/drm/drm_crtc_helper.c -+++ b/drivers/gpu/drm/drm_crtc_helper.c -@@ -1020,9 +1020,6 @@ bool drm_helper_initial_config(struct drm_device *dev) - { - int count = 0; - -- /* disable all the possible outputs/crtcs before entering KMS mode */ -- drm_helper_disable_unused_functions(dev); -- - drm_fb_helper_parse_command_line(dev); - - count = drm_helper_probe_connector_modes(dev, -diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c -index 8bf3770..e9dbb48 100644 ---- a/drivers/gpu/drm/drm_gem.c -+++ b/drivers/gpu/drm/drm_gem.c -@@ -142,6 +142,19 @@ drm_gem_object_alloc(struct drm_device *dev, size_t size) - if (IS_ERR(obj->filp)) - goto free; - -+ /* Basically we want to disable the OOM killer and handle ENOMEM -+ * ourselves by sacrificing pages from cached buffers. -+ * XXX shmem_file_[gs]et_gfp_mask() -+ */ -+ mapping_set_gfp_mask(obj->filp->f_path.dentry->d_inode->i_mapping, -+ GFP_HIGHUSER | -+ __GFP_COLD | -+ __GFP_FS | -+ __GFP_RECLAIMABLE | -+ __GFP_NORETRY | -+ __GFP_NOWARN | -+ __GFP_NOMEMALLOC); -+ - kref_init(&obj->refcount); - kref_init(&obj->handlecount); - obj->size = size; -diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c -index 332d743..0a6f0b3 100644 ---- a/drivers/gpu/drm/drm_irq.c -+++ b/drivers/gpu/drm/drm_irq.c -@@ -429,21 +429,15 @@ int drm_vblank_get(struct drm_device *dev, int crtc) - - spin_lock_irqsave(&dev->vbl_lock, irqflags); - /* Going from 0->1 means we have to enable interrupts again */ -- if (atomic_add_return(1, &dev->vblank_refcount[crtc]) == 1) { -- if (!dev->vblank_enabled[crtc]) { -- ret = dev->driver->enable_vblank(dev, crtc); -- DRM_DEBUG("enabling vblank on crtc %d, ret: %d\n", crtc, ret); -- if (ret) -- atomic_dec(&dev->vblank_refcount[crtc]); -- else { -- dev->vblank_enabled[crtc] = 1; -- drm_update_vblank_count(dev, crtc); -- } -- } -- } else { -- if (!dev->vblank_enabled[crtc]) { -+ if (atomic_add_return(1, &dev->vblank_refcount[crtc]) == 1 && -+ !dev->vblank_enabled[crtc]) { -+ ret = dev->driver->enable_vblank(dev, crtc); -+ DRM_DEBUG("enabling vblank on crtc %d, ret: %d\n", crtc, ret); -+ if (ret) - atomic_dec(&dev->vblank_refcount[crtc]); -- ret = -EINVAL; -+ else { -+ dev->vblank_enabled[crtc] = 1; -+ drm_update_vblank_count(dev, crtc); - } - } - spin_unlock_irqrestore(&dev->vbl_lock, irqflags); -@@ -470,18 +464,6 @@ void drm_vblank_put(struct drm_device *dev, int crtc) - } - EXPORT_SYMBOL(drm_vblank_put); - --void drm_vblank_off(struct drm_device *dev, int crtc) --{ -- unsigned long irqflags; -- -- spin_lock_irqsave(&dev->vbl_lock, irqflags); -- DRM_WAKEUP(&dev->vbl_queue[crtc]); -- dev->vblank_enabled[crtc] = 0; -- dev->last_vblank[crtc] = dev->driver->get_vblank_counter(dev, crtc); -- spin_unlock_irqrestore(&dev->vbl_lock, irqflags); --} --EXPORT_SYMBOL(drm_vblank_off); -- - /** - * drm_vblank_pre_modeset - account for vblanks across mode sets - * @dev: DRM device -diff --git a/drivers/gpu/drm/drm_pci.c b/drivers/gpu/drm/drm_pci.c -index e68ebf9..577094f 100644 ---- a/drivers/gpu/drm/drm_pci.c -+++ b/drivers/gpu/drm/drm_pci.c -@@ -47,7 +47,8 @@ - /** - * \brief Allocate a PCI consistent memory block, for DMA. - */ --drm_dma_handle_t *drm_pci_alloc(struct drm_device * dev, size_t size, size_t align) -+drm_dma_handle_t *drm_pci_alloc(struct drm_device * dev, size_t size, size_t align, -+ dma_addr_t maxaddr) - { - drm_dma_handle_t *dmah; - #if 1 -@@ -62,6 +63,11 @@ drm_dma_handle_t *drm_pci_alloc(struct drm_device * dev, size_t size, size_t ali - if (align > size) - return NULL; - -+ if (pci_set_dma_mask(dev->pdev, maxaddr) != 0) { -+ DRM_ERROR("Setting pci dma mask failed\n"); -+ return NULL; -+ } -+ - dmah = kmalloc(sizeof(drm_dma_handle_t), GFP_KERNEL); - if (!dmah) - return NULL; -diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c -index 7e859d6..26bf055 100644 ---- a/drivers/gpu/drm/i915/i915_debugfs.c -+++ b/drivers/gpu/drm/i915/i915_debugfs.c -@@ -288,7 +288,7 @@ static int i915_batchbuffer_info(struct seq_file *m, void *data) - list_for_each_entry(obj_priv, &dev_priv->mm.active_list, list) { - obj = obj_priv->obj; - if (obj->read_domains & I915_GEM_DOMAIN_COMMAND) { -- ret = i915_gem_object_get_pages(obj, 0); -+ ret = i915_gem_object_get_pages(obj); - if (ret) { - DRM_ERROR("Failed to get pages: %d\n", ret); - spin_unlock(&dev_priv->mm.active_list_lock); -@@ -384,7 +384,37 @@ out: - return 0; - } - -+static int i915_registers_info(struct seq_file *m, void *data) { -+ struct drm_info_node *node = (struct drm_info_node *) m->private; -+ struct drm_device *dev = node->minor->dev; -+ drm_i915_private_t *dev_priv = dev->dev_private; -+ uint32_t reg; -+ -+#define DUMP_RANGE(start, end) \ -+ for (reg=start; reg < end; reg += 4) \ -+ seq_printf(m, "%08x\t%08x\n", reg, I915_READ(reg)); -+ -+ DUMP_RANGE(0x00000, 0x00fff); /* VGA registers */ -+ DUMP_RANGE(0x02000, 0x02fff); /* instruction, memory, interrupt control registers */ -+ DUMP_RANGE(0x03000, 0x031ff); /* FENCE and PPGTT control registers */ -+ DUMP_RANGE(0x03200, 0x03fff); /* frame buffer compression registers */ -+ DUMP_RANGE(0x05000, 0x05fff); /* I/O control registers */ -+ DUMP_RANGE(0x06000, 0x06fff); /* clock control registers */ -+ DUMP_RANGE(0x07000, 0x07fff); /* 3D internal debug registers */ -+ DUMP_RANGE(0x07400, 0x088ff); /* GPE debug registers */ -+ DUMP_RANGE(0x0a000, 0x0afff); /* display palette registers */ -+ DUMP_RANGE(0x10000, 0x13fff); /* MMIO MCHBAR */ -+ DUMP_RANGE(0x30000, 0x3ffff); /* overlay registers */ -+ DUMP_RANGE(0x60000, 0x6ffff); /* display engine pipeline registers */ -+ DUMP_RANGE(0x70000, 0x72fff); /* display and cursor registers */ -+ DUMP_RANGE(0x73000, 0x73fff); /* performance counters */ -+ -+ return 0; -+} -+ -+ - static struct drm_info_list i915_debugfs_list[] = { -+ {"i915_regs", i915_registers_info, 0}, - {"i915_gem_active", i915_gem_object_list_info, 0, (void *) ACTIVE_LIST}, - {"i915_gem_flushing", i915_gem_object_list_info, 0, (void *) FLUSHING_LIST}, - {"i915_gem_inactive", i915_gem_object_list_info, 0, (void *) INACTIVE_LIST}, -diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c -index eaa1893..e5b138b 100644 ---- a/drivers/gpu/drm/i915/i915_dma.c -+++ b/drivers/gpu/drm/i915/i915_dma.c -@@ -123,7 +123,7 @@ static int i915_init_phys_hws(struct drm_device *dev) - drm_i915_private_t *dev_priv = dev->dev_private; - /* Program Hardware Status Page */ - dev_priv->status_page_dmah = -- drm_pci_alloc(dev, PAGE_SIZE, PAGE_SIZE); -+ drm_pci_alloc(dev, PAGE_SIZE, PAGE_SIZE, 0xffffffff); - - if (!dev_priv->status_page_dmah) { - DRM_ERROR("Can not allocate hardware status page\n"); -@@ -1111,8 +1111,7 @@ static void i915_setup_compression(struct drm_device *dev, int size) - { - struct drm_i915_private *dev_priv = dev->dev_private; - struct drm_mm_node *compressed_fb, *compressed_llb; -- unsigned long cfb_base; -- unsigned long ll_base = 0; -+ unsigned long cfb_base, ll_base; - - /* Leave 1M for line length buffer & misc. */ - compressed_fb = drm_mm_search_free(&dev_priv->vram, size, 4096, 0); -@@ -1252,8 +1251,6 @@ static int i915_load_modeset_init(struct drm_device *dev, - if (ret) - goto destroy_ringbuffer; - -- intel_modeset_init(dev); -- - ret = drm_irq_install(dev); - if (ret) - goto destroy_ringbuffer; -@@ -1268,6 +1265,8 @@ static int i915_load_modeset_init(struct drm_device *dev, - - I915_WRITE(INSTPM, (1 << 5) | (1 << 21)); - -+ intel_modeset_init(dev); -+ - drm_helper_initial_config(dev); - - return 0; -diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h -index f5d49a7..a725f65 100644 ---- a/drivers/gpu/drm/i915/i915_drv.h -+++ b/drivers/gpu/drm/i915/i915_drv.h -@@ -467,15 +467,6 @@ typedef struct drm_i915_private { - struct list_head flushing_list; - - /** -- * List of objects currently pending a GPU write flush. -- * -- * All elements on this list will belong to either the -- * active_list or flushing_list, last_rendering_seqno can -- * be used to differentiate between the two elements. -- */ -- struct list_head gpu_write_list; -- -- /** - * LRU list of objects which are not in the ringbuffer and - * are ready to unbind, but are still in the GTT. - * -@@ -555,7 +546,6 @@ typedef struct drm_i915_private { - struct timer_list idle_timer; - bool busy; - u16 orig_clock; -- struct drm_connector *int_lvds_connector; - } drm_i915_private_t; - - /** driver private structure attached to each drm_gem_object */ -@@ -567,8 +557,6 @@ struct drm_i915_gem_object { - - /** This object's place on the active/flushing/inactive lists */ - struct list_head list; -- /** This object's place on GPU write list */ -- struct list_head gpu_write_list; - - /** This object's place on the fenced object LRU */ - struct list_head fence_list; -@@ -825,17 +813,15 @@ void i915_gem_cleanup_ringbuffer(struct drm_device *dev); - int i915_gem_do_init(struct drm_device *dev, unsigned long start, - unsigned long end); - int i915_gem_idle(struct drm_device *dev); --int i915_lp_ring_sync(struct drm_device *dev); - int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf); - int i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, - int write); --int i915_gem_object_set_to_display_plane(struct drm_gem_object *obj); - int i915_gem_attach_phys_object(struct drm_device *dev, - struct drm_gem_object *obj, int id); - void i915_gem_detach_phys_object(struct drm_device *dev, - struct drm_gem_object *obj); - void i915_gem_free_all_phys_object(struct drm_device *dev); --int i915_gem_object_get_pages(struct drm_gem_object *obj, gfp_t gfpmask); -+int i915_gem_object_get_pages(struct drm_gem_object *obj); - void i915_gem_object_put_pages(struct drm_gem_object *obj); - void i915_gem_release(struct drm_device * dev, struct drm_file *file_priv); - -@@ -971,7 +957,6 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller); - #define IS_I85X(dev) ((dev)->pci_device == 0x3582) - #define IS_I855(dev) ((dev)->pci_device == 0x3582) - #define IS_I865G(dev) ((dev)->pci_device == 0x2572) --#define IS_I8XX(dev) (IS_I830(dev) || IS_845G(dev) || IS_I85X(dev) || IS_I865G(dev)) - - #define IS_I915G(dev) ((dev)->pci_device == 0x2582 || (dev)->pci_device == 0x258a) - #define IS_I915GM(dev) ((dev)->pci_device == 0x2592) -@@ -1033,12 +1018,9 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller); - */ - #define HAS_128_BYTE_Y_TILING(dev) (IS_I9XX(dev) && !(IS_I915G(dev) || \ - IS_I915GM(dev))) --#define SUPPORTS_DIGITAL_OUTPUTS(dev) (IS_I9XX(dev) && !IS_IGD(dev)) - #define SUPPORTS_INTEGRATED_HDMI(dev) (IS_G4X(dev) || IS_IGDNG(dev)) - #define SUPPORTS_INTEGRATED_DP(dev) (IS_G4X(dev) || IS_IGDNG(dev)) - #define SUPPORTS_EDP(dev) (IS_IGDNG_M(dev)) --#define SUPPORTS_TV(dev) (IS_I9XX(dev) && IS_MOBILE(dev) && \ -- !IS_IGDNG(dev) && !IS_IGD(dev)) - #define I915_HAS_HOTPLUG(dev) (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev) || IS_I965G(dev)) - /* dsparb controlled by hw only */ - #define DSPARB_HWCONTROL(dev) (IS_G4X(dev) || IS_IGDNG(dev)) -diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c -index 04da731..abfc27b 100644 ---- a/drivers/gpu/drm/i915/i915_gem.c -+++ b/drivers/gpu/drm/i915/i915_gem.c -@@ -277,7 +277,7 @@ i915_gem_shmem_pread_fast(struct drm_device *dev, struct drm_gem_object *obj, - - mutex_lock(&dev->struct_mutex); - -- ret = i915_gem_object_get_pages(obj, 0); -+ ret = i915_gem_object_get_pages(obj); - if (ret != 0) - goto fail_unlock; - -@@ -321,24 +321,40 @@ fail_unlock: - return ret; - } - -+static inline gfp_t -+i915_gem_object_get_page_gfp_mask (struct drm_gem_object *obj) -+{ -+ return mapping_gfp_mask(obj->filp->f_path.dentry->d_inode->i_mapping); -+} -+ -+static inline void -+i915_gem_object_set_page_gfp_mask (struct drm_gem_object *obj, gfp_t gfp) -+{ -+ mapping_set_gfp_mask(obj->filp->f_path.dentry->d_inode->i_mapping, gfp); -+} -+ - static int - i915_gem_object_get_pages_or_evict(struct drm_gem_object *obj) - { - int ret; - -- ret = i915_gem_object_get_pages(obj, __GFP_NORETRY | __GFP_NOWARN); -+ ret = i915_gem_object_get_pages(obj); - - /* If we've insufficient memory to map in the pages, attempt - * to make some space by throwing out some old buffers. - */ - if (ret == -ENOMEM) { - struct drm_device *dev = obj->dev; -+ gfp_t gfp; - - ret = i915_gem_evict_something(dev, obj->size); - if (ret) - return ret; - -- ret = i915_gem_object_get_pages(obj, 0); -+ gfp = i915_gem_object_get_page_gfp_mask(obj); -+ i915_gem_object_set_page_gfp_mask(obj, gfp & ~__GFP_NORETRY); -+ ret = i915_gem_object_get_pages(obj); -+ i915_gem_object_set_page_gfp_mask (obj, gfp); - } - - return ret; -@@ -774,7 +790,7 @@ i915_gem_shmem_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj, - - mutex_lock(&dev->struct_mutex); - -- ret = i915_gem_object_get_pages(obj, 0); -+ ret = i915_gem_object_get_pages(obj); - if (ret != 0) - goto fail_unlock; - -@@ -1272,7 +1288,6 @@ i915_gem_create_mmap_offset(struct drm_gem_object *obj) - list->hash.key = list->file_offset_node->start; - if (drm_ht_insert_item(&mm->offset_hash, &list->hash)) { - DRM_ERROR("failed to add to map hash\n"); -- ret = -ENOMEM; - goto out_free_mm; - } - -@@ -1552,8 +1567,6 @@ i915_gem_object_move_to_inactive(struct drm_gem_object *obj) - else - list_move_tail(&obj_priv->list, &dev_priv->mm.inactive_list); - -- BUG_ON(!list_empty(&obj_priv->gpu_write_list)); -- - obj_priv->last_rendering_seqno = 0; - if (obj_priv->active) { - obj_priv->active = 0; -@@ -1624,8 +1637,7 @@ i915_add_request(struct drm_device *dev, struct drm_file *file_priv, - struct drm_i915_gem_object *obj_priv, *next; - - list_for_each_entry_safe(obj_priv, next, -- &dev_priv->mm.gpu_write_list, -- gpu_write_list) { -+ &dev_priv->mm.flushing_list, list) { - struct drm_gem_object *obj = obj_priv->obj; - - if ((obj->write_domain & flush_domains) == -@@ -1633,7 +1645,6 @@ i915_add_request(struct drm_device *dev, struct drm_file *file_priv, - uint32_t old_write_domain = obj->write_domain; - - obj->write_domain = 0; -- list_del_init(&obj_priv->gpu_write_list); - i915_gem_object_move_to_active(obj, seqno); - - trace_i915_gem_object_change_domain(obj, -@@ -1809,8 +1820,12 @@ i915_gem_retire_work_handler(struct work_struct *work) - mutex_unlock(&dev->struct_mutex); - } - -+/** -+ * Waits for a sequence number to be signaled, and cleans up the -+ * request and object lists appropriately for that event. -+ */ - static int --i915_do_wait_request(struct drm_device *dev, uint32_t seqno, int interruptible) -+i915_wait_request(struct drm_device *dev, uint32_t seqno) - { - drm_i915_private_t *dev_priv = dev->dev_private; - u32 ier; -@@ -1837,15 +1852,10 @@ i915_do_wait_request(struct drm_device *dev, uint32_t seqno, int interruptible) - - dev_priv->mm.waiting_gem_seqno = seqno; - i915_user_irq_get(dev); -- if (interruptible) -- ret = wait_event_interruptible(dev_priv->irq_queue, -- i915_seqno_passed(i915_get_gem_seqno(dev), seqno) || -- atomic_read(&dev_priv->mm.wedged)); -- else -- wait_event(dev_priv->irq_queue, -- i915_seqno_passed(i915_get_gem_seqno(dev), seqno) || -- atomic_read(&dev_priv->mm.wedged)); -- -+ ret = wait_event_interruptible(dev_priv->irq_queue, -+ i915_seqno_passed(i915_get_gem_seqno(dev), -+ seqno) || -+ atomic_read(&dev_priv->mm.wedged)); - i915_user_irq_put(dev); - dev_priv->mm.waiting_gem_seqno = 0; - -@@ -1869,34 +1879,6 @@ i915_do_wait_request(struct drm_device *dev, uint32_t seqno, int interruptible) - return ret; - } - --/** -- * Waits for a sequence number to be signaled, and cleans up the -- * request and object lists appropriately for that event. -- */ --static int --i915_wait_request(struct drm_device *dev, uint32_t seqno) --{ -- return i915_do_wait_request(dev, seqno, 1); --} -- --/** -- * Waits for the ring to finish up to the latest request. Usefull for waiting -- * for flip events, e.g for the overlay support. */ --int i915_lp_ring_sync(struct drm_device *dev) --{ -- uint32_t seqno; -- int ret; -- -- seqno = i915_add_request(dev, NULL, 0); -- -- if (seqno == 0) -- return -ENOMEM; -- -- ret = i915_do_wait_request(dev, seqno, 0); -- BUG_ON(ret == -ERESTARTSYS); -- return ret; --} -- - static void - i915_gem_flush(struct drm_device *dev, - uint32_t invalidate_domains, -@@ -1965,7 +1947,7 @@ i915_gem_flush(struct drm_device *dev, - #endif - BEGIN_LP_RING(2); - OUT_RING(cmd); -- OUT_RING(MI_NOOP); -+ OUT_RING(0); /* noop */ - ADVANCE_LP_RING(); - } - } -@@ -2027,6 +2009,9 @@ i915_gem_object_unbind(struct drm_gem_object *obj) - /* blow away mappings if mapped through GTT */ - i915_gem_release_mmap(obj); - -+ if (obj_priv->fence_reg != I915_FENCE_REG_NONE) -+ i915_gem_clear_fence_reg(obj); -+ - /* Move the object to the CPU domain to ensure that - * any possible CPU writes while it's not in the GTT - * are flushed when we go to remap it. This will -@@ -2042,10 +2027,6 @@ i915_gem_object_unbind(struct drm_gem_object *obj) - - BUG_ON(obj_priv->active); - -- /* release the fence reg _after_ flushing */ -- if (obj_priv->fence_reg != I915_FENCE_REG_NONE) -- i915_gem_clear_fence_reg(obj); -- - if (obj_priv->agp_mem != NULL) { - drm_unbind_agp(obj_priv->agp_mem); - drm_free_agp(obj_priv->agp_mem, obj->size / PAGE_SIZE); -@@ -2106,8 +2087,8 @@ static int - i915_gem_evict_everything(struct drm_device *dev) - { - drm_i915_private_t *dev_priv = dev->dev_private; -- int ret; - uint32_t seqno; -+ int ret; - bool lists_empty; - - spin_lock(&dev_priv->mm.active_list_lock); -@@ -2129,8 +2110,6 @@ i915_gem_evict_everything(struct drm_device *dev) - if (ret) - return ret; - -- BUG_ON(!list_empty(&dev_priv->mm.flushing_list)); -- - ret = i915_gem_evict_from_inactive_list(dev); - if (ret) - return ret; -@@ -2238,8 +2217,7 @@ i915_gem_evict_something(struct drm_device *dev, int min_size) - } - - int --i915_gem_object_get_pages(struct drm_gem_object *obj, -- gfp_t gfpmask) -+i915_gem_object_get_pages(struct drm_gem_object *obj) - { - struct drm_i915_gem_object *obj_priv = obj->driver_private; - int page_count, i; -@@ -2265,10 +2243,7 @@ i915_gem_object_get_pages(struct drm_gem_object *obj, - inode = obj->filp->f_path.dentry->d_inode; - mapping = inode->i_mapping; - for (i = 0; i < page_count; i++) { -- page = read_cache_page_gfp(mapping, i, -- mapping_gfp_mask (mapping) | -- __GFP_COLD | -- gfpmask); -+ page = read_mapping_page(mapping, i, NULL); - if (IS_ERR(page)) { - ret = PTR_ERR(page); - i915_gem_object_put_pages(obj); -@@ -2591,9 +2566,12 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment) - drm_i915_private_t *dev_priv = dev->dev_private; - struct drm_i915_gem_object *obj_priv = obj->driver_private; - struct drm_mm_node *free_space; -- gfp_t gfpmask = __GFP_NORETRY | __GFP_NOWARN; -+ bool retry_alloc = false; - int ret; - -+ if (dev_priv->mm.suspended) -+ return -EBUSY; -+ - if (obj_priv->madv != I915_MADV_WILLNEED) { - DRM_ERROR("Attempting to bind a purgeable object\n"); - return -EINVAL; -@@ -2635,7 +2613,15 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment) - DRM_INFO("Binding object of size %zd at 0x%08x\n", - obj->size, obj_priv->gtt_offset); - #endif -- ret = i915_gem_object_get_pages(obj, gfpmask); -+ if (retry_alloc) { -+ i915_gem_object_set_page_gfp_mask (obj, -+ i915_gem_object_get_page_gfp_mask (obj) & ~__GFP_NORETRY); -+ } -+ ret = i915_gem_object_get_pages(obj); -+ if (retry_alloc) { -+ i915_gem_object_set_page_gfp_mask (obj, -+ i915_gem_object_get_page_gfp_mask (obj) | __GFP_NORETRY); -+ } - if (ret) { - drm_mm_put_block(obj_priv->gtt_space); - obj_priv->gtt_space = NULL; -@@ -2645,9 +2631,9 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment) - ret = i915_gem_evict_something(dev, obj->size); - if (ret) { - /* now try to shrink everyone else */ -- if (gfpmask) { -- gfpmask = 0; -- goto search_free; -+ if (! retry_alloc) { -+ retry_alloc = true; -+ goto search_free; - } - - return ret; -@@ -2725,7 +2711,7 @@ i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj) - old_write_domain = obj->write_domain; - i915_gem_flush(dev, 0, obj->write_domain); - seqno = i915_add_request(dev, NULL, obj->write_domain); -- BUG_ON(obj->write_domain); -+ obj->write_domain = 0; - i915_gem_object_move_to_active(obj, seqno); - - trace_i915_gem_object_change_domain(obj, -@@ -2825,57 +2811,6 @@ i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, int write) - return 0; - } - --/* -- * Prepare buffer for display plane. Use uninterruptible for possible flush -- * wait, as in modesetting process we're not supposed to be interrupted. -- */ --int --i915_gem_object_set_to_display_plane(struct drm_gem_object *obj) --{ -- struct drm_device *dev = obj->dev; -- struct drm_i915_gem_object *obj_priv = obj->driver_private; -- uint32_t old_write_domain, old_read_domains; -- int ret; -- -- /* Not valid to be called on unbound objects. */ -- if (obj_priv->gtt_space == NULL) -- return -EINVAL; -- -- i915_gem_object_flush_gpu_write_domain(obj); -- -- /* Wait on any GPU rendering and flushing to occur. */ -- if (obj_priv->active) { --#if WATCH_BUF -- DRM_INFO("%s: object %p wait for seqno %08x\n", -- __func__, obj, obj_priv->last_rendering_seqno); --#endif -- ret = i915_do_wait_request(dev, obj_priv->last_rendering_seqno, 0); -- if (ret != 0) -- return ret; -- } -- -- old_write_domain = obj->write_domain; -- old_read_domains = obj->read_domains; -- -- obj->read_domains &= I915_GEM_DOMAIN_GTT; -- -- i915_gem_object_flush_cpu_write_domain(obj); -- -- /* It should now be out of any other write domains, and we can update -- * the domain values for our changes. -- */ -- BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_GTT) != 0); -- obj->read_domains |= I915_GEM_DOMAIN_GTT; -- obj->write_domain = I915_GEM_DOMAIN_GTT; -- obj_priv->dirty = 1; -- -- trace_i915_gem_object_change_domain(obj, -- old_read_domains, -- old_write_domain); -- -- return 0; --} -- - /** - * Moves a single object to the CPU read, and possibly write domain. - * -@@ -3796,23 +3731,16 @@ i915_gem_execbuffer(struct drm_device *dev, void *data, - i915_gem_flush(dev, - dev->invalidate_domains, - dev->flush_domains); -- if (dev->flush_domains & I915_GEM_GPU_DOMAINS) -+ if (dev->flush_domains) - (void)i915_add_request(dev, file_priv, - dev->flush_domains); - } - - for (i = 0; i < args->buffer_count; i++) { - struct drm_gem_object *obj = object_list[i]; -- struct drm_i915_gem_object *obj_priv = obj->driver_private; - uint32_t old_write_domain = obj->write_domain; - - obj->write_domain = obj->pending_write_domain; -- if (obj->write_domain) -- list_move_tail(&obj_priv->gpu_write_list, -- &dev_priv->mm.gpu_write_list); -- else -- list_del_init(&obj_priv->gpu_write_list); -- - trace_i915_gem_object_change_domain(obj, - obj->read_domains, - old_write_domain); -@@ -4205,7 +4133,6 @@ int i915_gem_init_object(struct drm_gem_object *obj) - obj_priv->obj = obj; - obj_priv->fence_reg = I915_FENCE_REG_NONE; - INIT_LIST_HEAD(&obj_priv->list); -- INIT_LIST_HEAD(&obj_priv->gpu_write_list); - INIT_LIST_HEAD(&obj_priv->fence_list); - obj_priv->madv = I915_MADV_WILLNEED; - -@@ -4657,7 +4584,6 @@ i915_gem_load(struct drm_device *dev) - spin_lock_init(&dev_priv->mm.active_list_lock); - INIT_LIST_HEAD(&dev_priv->mm.active_list); - INIT_LIST_HEAD(&dev_priv->mm.flushing_list); -- INIT_LIST_HEAD(&dev_priv->mm.gpu_write_list); - INIT_LIST_HEAD(&dev_priv->mm.inactive_list); - INIT_LIST_HEAD(&dev_priv->mm.request_list); - INIT_LIST_HEAD(&dev_priv->mm.fence_list); -@@ -4712,7 +4638,7 @@ int i915_gem_init_phys_object(struct drm_device *dev, - - phys_obj->id = id; - -- phys_obj->handle = drm_pci_alloc(dev, size, 0); -+ phys_obj->handle = drm_pci_alloc(dev, size, 0, 0xffffffff); - if (!phys_obj->handle) { - ret = -ENOMEM; - goto kfree_obj; -@@ -4770,7 +4696,7 @@ void i915_gem_detach_phys_object(struct drm_device *dev, - if (!obj_priv->phys_obj) - return; - -- ret = i915_gem_object_get_pages(obj, 0); -+ ret = i915_gem_object_get_pages(obj); - if (ret) - goto out; - -@@ -4828,7 +4754,7 @@ i915_gem_attach_phys_object(struct drm_device *dev, - obj_priv->phys_obj = dev_priv->mm.phys_objs[id - 1]; - obj_priv->phys_obj->cur_obj = obj; - -- ret = i915_gem_object_get_pages(obj, 0); -+ ret = i915_gem_object_get_pages(obj); - if (ret) { - DRM_ERROR("failed to get page list\n"); - goto out; -diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c -index 63f28ad..aa7fd82 100644 ---- a/drivers/gpu/drm/i915/i915_irq.c -+++ b/drivers/gpu/drm/i915/i915_irq.c -@@ -255,6 +255,7 @@ irqreturn_t igdng_irq_handler(struct drm_device *dev) - drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; - int ret = IRQ_NONE; - u32 de_iir, gt_iir, de_ier; -+ u32 new_de_iir, new_gt_iir; - struct drm_i915_master_private *master_priv; - - /* disable master interrupt before clearing iir */ -@@ -265,31 +266,35 @@ irqreturn_t igdng_irq_handler(struct drm_device *dev) - de_iir = I915_READ(DEIIR); - gt_iir = I915_READ(GTIIR); - -- if (de_iir == 0 && gt_iir == 0) -- goto done; -+ for (;;) { -+ if (de_iir == 0 && gt_iir == 0) -+ break; - -- ret = IRQ_HANDLED; -+ ret = IRQ_HANDLED; - -- if (dev->primary->master) { -- master_priv = dev->primary->master->driver_priv; -- if (master_priv->sarea_priv) -- master_priv->sarea_priv->last_dispatch = -- READ_BREADCRUMB(dev_priv); -- } -+ I915_WRITE(DEIIR, de_iir); -+ new_de_iir = I915_READ(DEIIR); -+ I915_WRITE(GTIIR, gt_iir); -+ new_gt_iir = I915_READ(GTIIR); - -- if (gt_iir & GT_USER_INTERRUPT) { -- u32 seqno = i915_get_gem_seqno(dev); -- dev_priv->mm.irq_gem_seqno = seqno; -- trace_i915_gem_request_complete(dev, seqno); -- DRM_WAKEUP(&dev_priv->irq_queue); -- dev_priv->hangcheck_count = 0; -- mod_timer(&dev_priv->hangcheck_timer, jiffies + DRM_I915_HANGCHECK_PERIOD); -- } -+ if (dev->primary->master) { -+ master_priv = dev->primary->master->driver_priv; -+ if (master_priv->sarea_priv) -+ master_priv->sarea_priv->last_dispatch = -+ READ_BREADCRUMB(dev_priv); -+ } -+ -+ if (gt_iir & GT_USER_INTERRUPT) { -+ u32 seqno = i915_get_gem_seqno(dev); -+ dev_priv->mm.irq_gem_seqno = seqno; -+ trace_i915_gem_request_complete(dev, seqno); -+ DRM_WAKEUP(&dev_priv->irq_queue); -+ } - -- I915_WRITE(GTIIR, gt_iir); -- I915_WRITE(DEIIR, de_iir); -+ de_iir = new_de_iir; -+ gt_iir = new_gt_iir; -+ } - --done: - I915_WRITE(DEIER, de_ier); - (void)I915_READ(DEIER); - -@@ -1044,10 +1049,6 @@ void i915_driver_irq_preinstall(struct drm_device * dev) - (void) I915_READ(IER); - } - --/* -- * Must be called after intel_modeset_init or hotplug interrupts won't be -- * enabled correctly. -- */ - int i915_driver_irq_postinstall(struct drm_device *dev) - { - drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; -@@ -1070,23 +1071,19 @@ int i915_driver_irq_postinstall(struct drm_device *dev) - if (I915_HAS_HOTPLUG(dev)) { - u32 hotplug_en = I915_READ(PORT_HOTPLUG_EN); - -- /* Note HDMI and DP share bits */ -- if (dev_priv->hotplug_supported_mask & HDMIB_HOTPLUG_INT_STATUS) -- hotplug_en |= HDMIB_HOTPLUG_INT_EN; -- if (dev_priv->hotplug_supported_mask & HDMIC_HOTPLUG_INT_STATUS) -- hotplug_en |= HDMIC_HOTPLUG_INT_EN; -- if (dev_priv->hotplug_supported_mask & HDMID_HOTPLUG_INT_STATUS) -- hotplug_en |= HDMID_HOTPLUG_INT_EN; -- if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS) -- hotplug_en |= SDVOC_HOTPLUG_INT_EN; -- if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS) -- hotplug_en |= SDVOB_HOTPLUG_INT_EN; -- if (dev_priv->hotplug_supported_mask & CRT_HOTPLUG_INT_STATUS) -- hotplug_en |= CRT_HOTPLUG_INT_EN; -- /* Ignore TV since it's buggy */ -- -+ /* Leave other bits alone */ -+ hotplug_en |= HOTPLUG_EN_MASK; - I915_WRITE(PORT_HOTPLUG_EN, hotplug_en); - -+ dev_priv->hotplug_supported_mask = CRT_HOTPLUG_INT_STATUS | -+ TV_HOTPLUG_INT_STATUS | SDVOC_HOTPLUG_INT_STATUS | -+ SDVOB_HOTPLUG_INT_STATUS; -+ if (IS_G4X(dev)) { -+ dev_priv->hotplug_supported_mask |= -+ HDMIB_HOTPLUG_INT_STATUS | -+ HDMIC_HOTPLUG_INT_STATUS | -+ HDMID_HOTPLUG_INT_STATUS; -+ } - /* Enable in IER... */ - enable_mask |= I915_DISPLAY_PORT_INTERRUPT; - /* and unmask in IMR */ -diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h -index cc9b49a..1687edf 100644 ---- a/drivers/gpu/drm/i915/i915_reg.h -+++ b/drivers/gpu/drm/i915/i915_reg.h -@@ -329,7 +329,6 @@ - #define FBC_CTL_PERIODIC (1<<30) - #define FBC_CTL_INTERVAL_SHIFT (16) - #define FBC_CTL_UNCOMPRESSIBLE (1<<14) --#define FBC_C3_IDLE (1<<13) - #define FBC_CTL_STRIDE_SHIFT (5) - #define FBC_CTL_FENCENO (1<<0) - #define FBC_COMMAND 0x0320c -@@ -406,13 +405,6 @@ - # define GPIO_DATA_VAL_IN (1 << 12) - # define GPIO_DATA_PULLUP_DISABLE (1 << 13) - --#define GMBUS0 0x5100 --#define GMBUS1 0x5104 --#define GMBUS2 0x5108 --#define GMBUS3 0x510c --#define GMBUS4 0x5110 --#define GMBUS5 0x5120 -- - /* - * Clock control & power management - */ -@@ -871,6 +863,14 @@ - #define CRT_HOTPLUG_DETECT_VOLTAGE_475MV (1 << 2) - #define CRT_HOTPLUG_MASK (0x3fc) /* Bits 9-2 */ - #define CRT_FORCE_HOTPLUG_MASK 0xfffffe1f -+#define HOTPLUG_EN_MASK (HDMIB_HOTPLUG_INT_EN | \ -+ HDMIC_HOTPLUG_INT_EN | \ -+ HDMID_HOTPLUG_INT_EN | \ -+ SDVOB_HOTPLUG_INT_EN | \ -+ SDVOC_HOTPLUG_INT_EN | \ -+ TV_HOTPLUG_INT_EN | \ -+ CRT_HOTPLUG_INT_EN) -+ - - #define PORT_HOTPLUG_STAT 0x61114 - #define HDMIB_HOTPLUG_INT_STATUS (1 << 29) -@@ -968,8 +968,6 @@ - #define LVDS_PORT_EN (1 << 31) - /* Selects pipe B for LVDS data. Must be set on pre-965. */ - #define LVDS_PIPEB_SELECT (1 << 30) --/* LVDS dithering flag on 965/g4x platform */ --#define LVDS_ENABLE_DITHER (1 << 25) - /* Enable border for unscaled (or aspect-scaled) display */ - #define LVDS_BORDER_ENABLE (1 << 15) - /* -@@ -1739,8 +1737,6 @@ - - /* Display & cursor control */ - --/* dithering flag on Ironlake */ --#define PIPE_ENABLE_DITHER (1 << 4) - /* Pipe A */ - #define PIPEADSL 0x70000 - #define PIPEACONF 0x70008 -@@ -2161,13 +2157,6 @@ - #define PCH_GPIOE 0xc5020 - #define PCH_GPIOF 0xc5024 - --#define PCH_GMBUS0 0xc5100 --#define PCH_GMBUS1 0xc5104 --#define PCH_GMBUS2 0xc5108 --#define PCH_GMBUS3 0xc510c --#define PCH_GMBUS4 0xc5110 --#define PCH_GMBUS5 0xc5120 -- - #define PCH_DPLL_A 0xc6014 - #define PCH_DPLL_B 0xc6018 - -diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c -index 7ad742f..6eec817 100644 ---- a/drivers/gpu/drm/i915/i915_suspend.c -+++ b/drivers/gpu/drm/i915/i915_suspend.c -@@ -27,7 +27,7 @@ - #include "drmP.h" - #include "drm.h" - #include "i915_drm.h" --#include "intel_drv.h" -+#include "i915_drv.h" - - static bool i915_pipe_enabled(struct drm_device *dev, enum pipe pipe) - { -@@ -846,9 +846,6 @@ int i915_restore_state(struct drm_device *dev) - for (i = 0; i < 3; i++) - I915_WRITE(SWF30 + (i << 2), dev_priv->saveSWF2[i]); - -- /* I2C state */ -- intel_i2c_reset_gmbus(dev); -- - return 0; - } - -diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c -index 5e730e6..e505144 100644 ---- a/drivers/gpu/drm/i915/intel_crt.c -+++ b/drivers/gpu/drm/i915/intel_crt.c -@@ -185,9 +185,6 @@ static bool intel_igdng_crt_detect_hotplug(struct drm_connector *connector) - adpa = I915_READ(PCH_ADPA); - - adpa &= ~ADPA_CRT_HOTPLUG_MASK; -- /* disable HPD first */ -- I915_WRITE(PCH_ADPA, adpa); -- (void)I915_READ(PCH_ADPA); - - adpa |= (ADPA_CRT_HOTPLUG_PERIOD_128 | - ADPA_CRT_HOTPLUG_WARMUP_10MS | -@@ -579,6 +576,4 @@ void intel_crt_init(struct drm_device *dev) - drm_connector_helper_add(connector, &intel_crt_connector_helper_funcs); - - drm_sysfs_connector_add(connector); -- -- dev_priv->hotplug_supported_mask |= CRT_HOTPLUG_INT_STATUS; - } -diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c -index b00a1aa..099f420 100644 ---- a/drivers/gpu/drm/i915/intel_display.c -+++ b/drivers/gpu/drm/i915/intel_display.c -@@ -988,8 +988,6 @@ static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval) - - /* enable it... */ - fbc_ctl = FBC_CTL_EN | FBC_CTL_PERIODIC; -- if (IS_I945GM(dev)) -- fbc_ctl |= FBC_C3_IDLE; /* 945 needs special SR handling */ - fbc_ctl |= (dev_priv->cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT; - fbc_ctl |= (interval & 0x2fff) << FBC_CTL_INTERVAL_SHIFT; - if (obj_priv->tiling_mode != I915_TILING_NONE) -@@ -1253,7 +1251,7 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y, - return ret; - } - -- ret = i915_gem_object_set_to_display_plane(obj); -+ ret = i915_gem_object_set_to_gtt_domain(obj, 1); - if (ret != 0) { - i915_gem_object_unpin(obj); - mutex_unlock(&dev->struct_mutex); -@@ -1475,10 +1473,6 @@ static void igdng_crtc_dpms(struct drm_crtc *crtc, int mode) - int trans_vsync_reg = (pipe == 0) ? TRANS_VSYNC_A : TRANS_VSYNC_B; - u32 temp; - int tries = 5, j, n; -- u32 pipe_bpc; -- -- temp = I915_READ(pipeconf_reg); -- pipe_bpc = temp & PIPE_BPC_MASK; - - /* XXX: When our outputs are all unaware of DPMS modes other than off - * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC. -@@ -1488,15 +1482,6 @@ static void igdng_crtc_dpms(struct drm_crtc *crtc, int mode) - case DRM_MODE_DPMS_STANDBY: - case DRM_MODE_DPMS_SUSPEND: - DRM_DEBUG("crtc %d dpms on\n", pipe); -- -- if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) { -- temp = I915_READ(PCH_LVDS); -- if ((temp & LVDS_PORT_EN) == 0) { -- I915_WRITE(PCH_LVDS, temp | LVDS_PORT_EN); -- POSTING_READ(PCH_LVDS); -- } -- } -- - if (HAS_eDP) { - /* enable eDP PLL */ - igdng_enable_pll_edp(crtc); -@@ -1510,12 +1495,6 @@ static void igdng_crtc_dpms(struct drm_crtc *crtc, int mode) - - /* enable PCH FDI RX PLL, wait warmup plus DMI latency */ - temp = I915_READ(fdi_rx_reg); -- /* -- * make the BPC in FDI Rx be consistent with that in -- * pipeconf reg. -- */ -- temp &= ~(0x7 << 16); -- temp |= (pipe_bpc << 11); - I915_WRITE(fdi_rx_reg, temp | FDI_RX_PLL_ENABLE | - FDI_SEL_PCDCLK | - FDI_DP_PORT_WIDTH_X4); /* default 4 lanes */ -@@ -1656,12 +1635,6 @@ static void igdng_crtc_dpms(struct drm_crtc *crtc, int mode) - - /* enable PCH transcoder */ - temp = I915_READ(transconf_reg); -- /* -- * make the BPC in transcoder be consistent with -- * that in pipeconf reg. -- */ -- temp &= ~PIPE_BPC_MASK; -- temp |= pipe_bpc; - I915_WRITE(transconf_reg, temp | TRANS_ENABLE); - I915_READ(transconf_reg); - -@@ -1693,6 +1666,8 @@ static void igdng_crtc_dpms(struct drm_crtc *crtc, int mode) - case DRM_MODE_DPMS_OFF: - DRM_DEBUG("crtc %d dpms off\n", pipe); - -+ i915_disable_vga(dev); -+ - /* Disable display plane */ - temp = I915_READ(dspcntr_reg); - if ((temp & DISPLAY_PLANE_ENABLE) != 0) { -@@ -1702,8 +1677,6 @@ static void igdng_crtc_dpms(struct drm_crtc *crtc, int mode) - I915_READ(dspbase_reg); - } - -- i915_disable_vga(dev); -- - /* disable cpu pipe, disable after all planes disabled */ - temp = I915_READ(pipeconf_reg); - if ((temp & PIPEACONF_ENABLE) != 0) { -@@ -1724,15 +1697,9 @@ static void igdng_crtc_dpms(struct drm_crtc *crtc, int mode) - } else - DRM_DEBUG("crtc %d is disabled\n", pipe); - -- udelay(100); -- -- /* Disable PF */ -- temp = I915_READ(pf_ctl_reg); -- if ((temp & PF_ENABLE) != 0) { -- I915_WRITE(pf_ctl_reg, temp & ~PF_ENABLE); -- I915_READ(pf_ctl_reg); -+ if (HAS_eDP) { -+ igdng_disable_pll_edp(crtc); - } -- I915_WRITE(pf_win_size, 0); - - /* disable CPU FDI tx and PCH FDI rx */ - temp = I915_READ(fdi_tx_reg); -@@ -1740,9 +1707,6 @@ static void igdng_crtc_dpms(struct drm_crtc *crtc, int mode) - I915_READ(fdi_tx_reg); - - temp = I915_READ(fdi_rx_reg); -- /* BPC in FDI rx is consistent with that in pipeconf */ -- temp &= ~(0x07 << 16); -- temp |= (pipe_bpc << 11); - I915_WRITE(fdi_rx_reg, temp & ~FDI_RX_ENABLE); - I915_READ(fdi_rx_reg); - -@@ -1761,13 +1725,6 @@ static void igdng_crtc_dpms(struct drm_crtc *crtc, int mode) - - udelay(100); - -- if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) { -- temp = I915_READ(PCH_LVDS); -- I915_WRITE(PCH_LVDS, temp & ~LVDS_PORT_EN); -- I915_READ(PCH_LVDS); -- udelay(100); -- } -- - /* disable PCH transcoder */ - temp = I915_READ(transconf_reg); - if ((temp & TRANS_ENABLE) != 0) { -@@ -1786,13 +1743,6 @@ static void igdng_crtc_dpms(struct drm_crtc *crtc, int mode) - } - } - } -- temp = I915_READ(transconf_reg); -- /* BPC in transcoder is consistent with that in pipeconf */ -- temp &= ~PIPE_BPC_MASK; -- temp |= pipe_bpc; -- I915_WRITE(transconf_reg, temp); -- I915_READ(transconf_reg); -- udelay(100); - - /* disable PCH DPLL */ - temp = I915_READ(pch_dpll_reg); -@@ -1801,19 +1751,13 @@ static void igdng_crtc_dpms(struct drm_crtc *crtc, int mode) - I915_READ(pch_dpll_reg); - } - -- if (HAS_eDP) { -- igdng_disable_pll_edp(crtc); -- } -- - temp = I915_READ(fdi_rx_reg); -- temp &= ~FDI_SEL_PCDCLK; -- I915_WRITE(fdi_rx_reg, temp); -- I915_READ(fdi_rx_reg); -- -- temp = I915_READ(fdi_rx_reg); -- temp &= ~FDI_RX_PLL_ENABLE; -- I915_WRITE(fdi_rx_reg, temp); -- I915_READ(fdi_rx_reg); -+ if ((temp & FDI_RX_PLL_ENABLE) != 0) { -+ temp &= ~FDI_SEL_PCDCLK; -+ temp &= ~FDI_RX_PLL_ENABLE; -+ I915_WRITE(fdi_rx_reg, temp); -+ I915_READ(fdi_rx_reg); -+ } - - /* Disable CPU FDI TX PLL */ - temp = I915_READ(fdi_tx_reg); -@@ -1823,8 +1767,16 @@ static void igdng_crtc_dpms(struct drm_crtc *crtc, int mode) - udelay(100); - } - -+ /* Disable PF */ -+ temp = I915_READ(pf_ctl_reg); -+ if ((temp & PF_ENABLE) != 0) { -+ I915_WRITE(pf_ctl_reg, temp & ~PF_ENABLE); -+ I915_READ(pf_ctl_reg); -+ } -+ I915_WRITE(pf_win_size, 0); -+ - /* Wait for the clocks to turn off. */ -- udelay(100); -+ udelay(150); - break; - } - } -@@ -1893,7 +1845,6 @@ static void i9xx_crtc_dpms(struct drm_crtc *crtc, int mode) - intel_update_watermarks(dev); - /* Give the overlay scaler a chance to disable if it's on this pipe */ - //intel_crtc_dpms_video(crtc, FALSE); TODO -- drm_vblank_off(dev, pipe); - - if (dev_priv->cfb_plane == plane && - dev_priv->display.disable_fbc) -@@ -2540,10 +2491,6 @@ static void g4x_update_wm(struct drm_device *dev, int planea_clock, - sr_entries = roundup(sr_entries / cacheline_size, 1); - DRM_DEBUG("self-refresh entries: %d\n", sr_entries); - I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN); -- } else { -- /* Turn off self refresh if both pipes are enabled */ -- I915_WRITE(FW_BLC_SELF, I915_READ(FW_BLC_SELF) -- & ~FW_BLC_SELF_EN); - } - - DRM_DEBUG("Setting FIFO watermarks - A: %d, B: %d, SR %d\n", -@@ -2562,43 +2509,15 @@ static void g4x_update_wm(struct drm_device *dev, int planea_clock, - (cursor_sr << DSPFW_CURSOR_SR_SHIFT)); - } - --static void i965_update_wm(struct drm_device *dev, int planea_clock, -- int planeb_clock, int sr_hdisplay, int pixel_size) -+static void i965_update_wm(struct drm_device *dev, int unused, int unused2, -+ int unused3, int unused4) - { - struct drm_i915_private *dev_priv = dev->dev_private; -- unsigned long line_time_us; -- int sr_clock, sr_entries, srwm = 1; -- -- /* Calc sr entries for one plane configs */ -- if (sr_hdisplay && (!planea_clock || !planeb_clock)) { -- /* self-refresh has much higher latency */ -- const static int sr_latency_ns = 12000; -- -- sr_clock = planea_clock ? planea_clock : planeb_clock; -- line_time_us = ((sr_hdisplay * 1000) / sr_clock); -- -- /* Use ns/us then divide to preserve precision */ -- sr_entries = (((sr_latency_ns / line_time_us) + 1) * -- pixel_size * sr_hdisplay) / 1000; -- sr_entries = roundup(sr_entries / I915_FIFO_LINE_SIZE, 1); -- DRM_DEBUG("self-refresh entries: %d\n", sr_entries); -- srwm = I945_FIFO_SIZE - sr_entries; -- if (srwm < 0) -- srwm = 1; -- srwm &= 0x3f; -- I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN); -- } else { -- /* Turn off self refresh if both pipes are enabled */ -- I915_WRITE(FW_BLC_SELF, I915_READ(FW_BLC_SELF) -- & ~FW_BLC_SELF_EN); -- } - -- DRM_DEBUG_KMS("Setting FIFO watermarks - A: 8, B: 8, C: 8, SR %d\n", -- srwm); -+ DRM_DEBUG("Setting FIFO watermarks - A: 8, B: 8, C: 8, SR 8\n"); - - /* 965 has limitations... */ -- I915_WRITE(DSPFW1, (srwm << DSPFW_SR_SHIFT) | (8 << 16) | (8 << 8) | -- (8 << 0)); -+ I915_WRITE(DSPFW1, (8 << 16) | (8 << 8) | (8 << 0)); - I915_WRITE(DSPFW2, (8 << 8) | (8 << 0)); - } - -@@ -2659,10 +2578,6 @@ static void i9xx_update_wm(struct drm_device *dev, int planea_clock, - if (srwm < 0) - srwm = 1; - I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN | (srwm & 0x3f)); -- } else { -- /* Turn off self refresh if both pipes are enabled */ -- I915_WRITE(FW_BLC_SELF, I915_READ(FW_BLC_SELF) -- & ~FW_BLC_SELF_EN); - } - - DRM_DEBUG("Setting FIFO watermarks - A: %d, B: %d, C: %d, SR %d\n", -@@ -2939,18 +2854,6 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, - - /* determine panel color depth */ - temp = I915_READ(pipeconf_reg); -- temp &= ~PIPE_BPC_MASK; -- if (is_lvds) { -- int lvds_reg = I915_READ(PCH_LVDS); -- /* the BPC will be 6 if it is 18-bit LVDS panel */ -- if ((lvds_reg & LVDS_A3_POWER_MASK) == LVDS_A3_POWER_UP) -- temp |= PIPE_8BPC; -- else -- temp |= PIPE_6BPC; -- } else -- temp |= PIPE_8BPC; -- I915_WRITE(pipeconf_reg, temp); -- I915_READ(pipeconf_reg); - - switch (temp & PIPE_BPC_MASK) { - case PIPE_8BPC: -@@ -3178,20 +3081,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, - * appropriately here, but we need to look more thoroughly into how - * panels behave in the two modes. - */ -- /* set the dithering flag */ -- if (IS_I965G(dev)) { -- if (dev_priv->lvds_dither) { -- if (IS_IGDNG(dev)) -- pipeconf |= PIPE_ENABLE_DITHER; -- else -- lvds |= LVDS_ENABLE_DITHER; -- } else { -- if (IS_IGDNG(dev)) -- pipeconf &= ~PIPE_ENABLE_DITHER; -- else -- lvds &= ~LVDS_ENABLE_DITHER; -- } -- } -+ - I915_WRITE(lvds_reg, lvds); - I915_READ(lvds_reg); - } -@@ -3775,6 +3665,125 @@ static void intel_gpu_idle_timer(unsigned long arg) - queue_work(dev_priv->wq, &dev_priv->idle_work); - } - -+void intel_increase_renderclock(struct drm_device *dev, bool schedule) -+{ -+ drm_i915_private_t *dev_priv = dev->dev_private; -+ -+ if (IS_IGDNG(dev)) -+ return; -+ -+ if (!dev_priv->render_reclock_avail) { -+ DRM_DEBUG("not reclocking render clock\n"); -+ return; -+ } -+ -+ /* Restore render clock frequency to original value */ -+ if (IS_G4X(dev) || IS_I9XX(dev)) -+ pci_write_config_word(dev->pdev, GCFGC, dev_priv->orig_clock); -+ else if (IS_I85X(dev)) -+ pci_write_config_word(dev->pdev, HPLLCC, dev_priv->orig_clock); -+ DRM_DEBUG("increasing render clock frequency\n"); -+ -+ /* Schedule downclock */ -+ if (schedule) -+ mod_timer(&dev_priv->idle_timer, jiffies + -+ msecs_to_jiffies(GPU_IDLE_TIMEOUT)); -+} -+ -+void intel_decrease_renderclock(struct drm_device *dev) -+{ -+ drm_i915_private_t *dev_priv = dev->dev_private; -+ -+ if (IS_IGDNG(dev)) -+ return; -+ -+ if (!dev_priv->render_reclock_avail) { -+ DRM_DEBUG("not reclocking render clock\n"); -+ return; -+ } -+ -+ if (IS_G4X(dev)) { -+ u16 gcfgc; -+ -+ /* Adjust render clock... */ -+ pci_read_config_word(dev->pdev, GCFGC, &gcfgc); -+ -+ /* Down to minimum... */ -+ gcfgc &= ~GM45_GC_RENDER_CLOCK_MASK; -+ gcfgc |= GM45_GC_RENDER_CLOCK_266_MHZ; -+ -+ pci_write_config_word(dev->pdev, GCFGC, gcfgc); -+ } else if (IS_I965G(dev)) { -+ u16 gcfgc; -+ -+ /* Adjust render clock... */ -+ pci_read_config_word(dev->pdev, GCFGC, &gcfgc); -+ -+ /* Down to minimum... */ -+ gcfgc &= ~I965_GC_RENDER_CLOCK_MASK; -+ gcfgc |= I965_GC_RENDER_CLOCK_267_MHZ; -+ -+ pci_write_config_word(dev->pdev, GCFGC, gcfgc); -+ } else if (IS_I945G(dev) || IS_I945GM(dev)) { -+ u16 gcfgc; -+ -+ /* Adjust render clock... */ -+ pci_read_config_word(dev->pdev, GCFGC, &gcfgc); -+ -+ /* Down to minimum... */ -+ gcfgc &= ~I945_GC_RENDER_CLOCK_MASK; -+ gcfgc |= I945_GC_RENDER_CLOCK_166_MHZ; -+ -+ pci_write_config_word(dev->pdev, GCFGC, gcfgc); -+ } else if (IS_I915G(dev)) { -+ u16 gcfgc; -+ -+ /* Adjust render clock... */ -+ pci_read_config_word(dev->pdev, GCFGC, &gcfgc); -+ -+ /* Down to minimum... */ -+ gcfgc &= ~I915_GC_RENDER_CLOCK_MASK; -+ gcfgc |= I915_GC_RENDER_CLOCK_166_MHZ; -+ -+ pci_write_config_word(dev->pdev, GCFGC, gcfgc); -+ } else if (IS_I85X(dev)) { -+ u16 hpllcc; -+ -+ /* Adjust render clock... */ -+ pci_read_config_word(dev->pdev, HPLLCC, &hpllcc); -+ -+ /* Up to maximum... */ -+ hpllcc &= ~GC_CLOCK_CONTROL_MASK; -+ hpllcc |= GC_CLOCK_133_200; -+ -+ pci_write_config_word(dev->pdev, HPLLCC, hpllcc); -+ } -+ DRM_DEBUG("decreasing render clock frequency\n"); -+} -+ -+/* Note that no increase function is needed for this - increase_renderclock() -+ * will also rewrite these bits -+ */ -+void intel_decrease_displayclock(struct drm_device *dev) -+{ -+ if (IS_IGDNG(dev)) -+ return; -+ -+ if (IS_I945G(dev) || IS_I945GM(dev) || IS_I915G(dev) || -+ IS_I915GM(dev)) { -+ u16 gcfgc; -+ -+ /* Adjust render clock... */ -+ pci_read_config_word(dev->pdev, GCFGC, &gcfgc); -+ -+ /* Down to minimum... */ -+ gcfgc &= ~0xf0; -+ gcfgc |= 0x80; -+ -+ pci_write_config_word(dev->pdev, GCFGC, gcfgc); -+ } -+} -+ - #define CRTC_IDLE_TIMEOUT 1000 /* ms */ - - static void intel_crtc_idle_timer(unsigned long arg) -@@ -3888,6 +3897,12 @@ static void intel_idle_update(struct work_struct *work) - - mutex_lock(&dev->struct_mutex); - -+ /* GPU isn't processing, downclock it. */ -+ if (!dev_priv->busy) { -+ intel_decrease_renderclock(dev); -+ intel_decrease_displayclock(dev); -+ } -+ - list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { - /* Skip inactive CRTCs */ - if (!crtc->fb) -@@ -3922,6 +3937,7 @@ void intel_mark_busy(struct drm_device *dev, struct drm_gem_object *obj) - return; - - dev_priv->busy = true; -+ intel_increase_renderclock(dev, true); - - list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { - if (!crtc->fb) -@@ -4102,51 +4118,37 @@ static void intel_setup_outputs(struct drm_device *dev) - if (I915_READ(PCH_DP_D) & DP_DETECTED) - intel_dp_init(dev, PCH_DP_D); - -- } else if (SUPPORTS_DIGITAL_OUTPUTS(dev)) { -+ } else if (IS_I9XX(dev)) { - bool found = false; - - if (I915_READ(SDVOB) & SDVO_DETECTED) { -- DRM_DEBUG_KMS("probing SDVOB\n"); - found = intel_sdvo_init(dev, SDVOB); -- if (!found && SUPPORTS_INTEGRATED_HDMI(dev)) { -- DRM_DEBUG_KMS("probing HDMI on SDVOB\n"); -+ if (!found && SUPPORTS_INTEGRATED_HDMI(dev)) - intel_hdmi_init(dev, SDVOB); -- } - -- if (!found && SUPPORTS_INTEGRATED_DP(dev)) { -- DRM_DEBUG_KMS("probing DP_B\n"); -+ if (!found && SUPPORTS_INTEGRATED_DP(dev)) - intel_dp_init(dev, DP_B); -- } - } - - /* Before G4X SDVOC doesn't have its own detect register */ - -- if (I915_READ(SDVOB) & SDVO_DETECTED) { -- DRM_DEBUG_KMS("probing SDVOC\n"); -+ if (I915_READ(SDVOB) & SDVO_DETECTED) - found = intel_sdvo_init(dev, SDVOC); -- } - - if (!found && (I915_READ(SDVOC) & SDVO_DETECTED)) { - -- if (SUPPORTS_INTEGRATED_HDMI(dev)) { -- DRM_DEBUG_KMS("probing HDMI on SDVOC\n"); -+ if (SUPPORTS_INTEGRATED_HDMI(dev)) - intel_hdmi_init(dev, SDVOC); -- } -- if (SUPPORTS_INTEGRATED_DP(dev)) { -- DRM_DEBUG_KMS("probing DP_C\n"); -+ if (SUPPORTS_INTEGRATED_DP(dev)) - intel_dp_init(dev, DP_C); -- } - } - -- if (SUPPORTS_INTEGRATED_DP(dev) && -- (I915_READ(DP_D) & DP_DETECTED)) { -- DRM_DEBUG_KMS("probing DP_D\n"); -+ if (SUPPORTS_INTEGRATED_DP(dev) && (I915_READ(DP_D) & DP_DETECTED)) - intel_dp_init(dev, DP_D); -- } -- } else if (IS_I8XX(dev)) -+ } else - intel_dvo_init(dev); - -- if (SUPPORTS_TV(dev)) -+ if (IS_I9XX(dev) && IS_MOBILE(dev) && !IS_IGDNG(dev)) - intel_tv_init(dev); - - list_for_each_entry(connector, &dev->mode_config.connector_list, head) { -@@ -4440,6 +4442,7 @@ void intel_modeset_cleanup(struct drm_device *dev) - del_timer_sync(&intel_crtc->idle_timer); - } - -+ intel_increase_renderclock(dev, false); - del_timer_sync(&dev_priv->idle_timer); - - mutex_unlock(&dev->struct_mutex); -diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c -index d487771..d834475 100644 ---- a/drivers/gpu/drm/i915/intel_dp.c -+++ b/drivers/gpu/drm/i915/intel_dp.c -@@ -1254,11 +1254,11 @@ intel_dp_init(struct drm_device *dev, int output_reg) - else - intel_output->type = INTEL_OUTPUT_DISPLAYPORT; - -- if (output_reg == DP_B || output_reg == PCH_DP_B) -+ if (output_reg == DP_B) - intel_output->clone_mask = (1 << INTEL_DP_B_CLONE_BIT); -- else if (output_reg == DP_C || output_reg == PCH_DP_C) -+ else if (output_reg == DP_C) - intel_output->clone_mask = (1 << INTEL_DP_C_CLONE_BIT); -- else if (output_reg == DP_D || output_reg == PCH_DP_D) -+ else if (output_reg == DP_D) - intel_output->clone_mask = (1 << INTEL_DP_D_CLONE_BIT); - - if (IS_eDP(intel_output)) { -@@ -1290,20 +1290,14 @@ intel_dp_init(struct drm_device *dev, int output_reg) - break; - case DP_B: - case PCH_DP_B: -- dev_priv->hotplug_supported_mask |= -- HDMIB_HOTPLUG_INT_STATUS; - name = "DPDDC-B"; - break; - case DP_C: - case PCH_DP_C: -- dev_priv->hotplug_supported_mask |= -- HDMIC_HOTPLUG_INT_STATUS; - name = "DPDDC-C"; - break; - case DP_D: - case PCH_DP_D: -- dev_priv->hotplug_supported_mask |= -- HDMID_HOTPLUG_INT_STATUS; - name = "DPDDC-D"; - break; - } -diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h -index 6c7c19f..ef61fe9 100644 ---- a/drivers/gpu/drm/i915/intel_drv.h -+++ b/drivers/gpu/drm/i915/intel_drv.h -@@ -134,8 +134,6 @@ void intel_i2c_destroy(struct i2c_adapter *adapter); - int intel_ddc_get_modes(struct intel_output *intel_output); - extern bool intel_ddc_probe(struct intel_output *intel_output); - void intel_i2c_quirk_set(struct drm_device *dev, bool enable); --void intel_i2c_reset_gmbus(struct drm_device *dev); -- - extern void intel_crt_init(struct drm_device *dev); - extern void intel_hdmi_init(struct drm_device *dev, int sdvox_reg); - extern bool intel_sdvo_init(struct drm_device *dev, int output_device); -diff --git a/drivers/gpu/drm/i915/intel_fb.c b/drivers/gpu/drm/i915/intel_fb.c -index 1318ac2..2b0fe54 100644 ---- a/drivers/gpu/drm/i915/intel_fb.c -+++ b/drivers/gpu/drm/i915/intel_fb.c -@@ -148,7 +148,7 @@ static int intelfb_create(struct drm_device *dev, uint32_t fb_width, - - mutex_lock(&dev->struct_mutex); - -- ret = i915_gem_object_pin(fbo, 64*1024); -+ ret = i915_gem_object_pin(fbo, PAGE_SIZE); - if (ret) { - DRM_ERROR("failed to pin fb: %d\n", ret); - goto out_unref; -diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c -index 85760bf..c33451a 100644 ---- a/drivers/gpu/drm/i915/intel_hdmi.c -+++ b/drivers/gpu/drm/i915/intel_hdmi.c -@@ -254,26 +254,21 @@ void intel_hdmi_init(struct drm_device *dev, int sdvox_reg) - if (sdvox_reg == SDVOB) { - intel_output->clone_mask = (1 << INTEL_HDMIB_CLONE_BIT); - intel_output->ddc_bus = intel_i2c_create(dev, GPIOE, "HDMIB"); -- dev_priv->hotplug_supported_mask |= HDMIB_HOTPLUG_INT_STATUS; - } else if (sdvox_reg == SDVOC) { - intel_output->clone_mask = (1 << INTEL_HDMIC_CLONE_BIT); - intel_output->ddc_bus = intel_i2c_create(dev, GPIOD, "HDMIC"); -- dev_priv->hotplug_supported_mask |= HDMIC_HOTPLUG_INT_STATUS; - } else if (sdvox_reg == HDMIB) { - intel_output->clone_mask = (1 << INTEL_HDMID_CLONE_BIT); - intel_output->ddc_bus = intel_i2c_create(dev, PCH_GPIOE, - "HDMIB"); -- dev_priv->hotplug_supported_mask |= HDMIB_HOTPLUG_INT_STATUS; - } else if (sdvox_reg == HDMIC) { - intel_output->clone_mask = (1 << INTEL_HDMIE_CLONE_BIT); - intel_output->ddc_bus = intel_i2c_create(dev, PCH_GPIOD, - "HDMIC"); -- dev_priv->hotplug_supported_mask |= HDMIC_HOTPLUG_INT_STATUS; - } else if (sdvox_reg == HDMID) { - intel_output->clone_mask = (1 << INTEL_HDMIF_CLONE_BIT); - intel_output->ddc_bus = intel_i2c_create(dev, PCH_GPIOF, - "HDMID"); -- dev_priv->hotplug_supported_mask |= HDMID_HOTPLUG_INT_STATUS; - } - if (!intel_output->ddc_bus) - goto err_connector; -diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c -index b94acc4..c7eab72 100644 ---- a/drivers/gpu/drm/i915/intel_i2c.c -+++ b/drivers/gpu/drm/i915/intel_i2c.c -@@ -118,23 +118,6 @@ static void set_data(void *data, int state_high) - udelay(I2C_RISEFALL_TIME); /* wait for the line to change state */ - } - --/* Clears the GMBUS setup. Our driver doesn't make use of the GMBUS I2C -- * engine, but if the BIOS leaves it enabled, then that can break our use -- * of the bit-banging I2C interfaces. This is notably the case with the -- * Mac Mini in EFI mode. -- */ --void --intel_i2c_reset_gmbus(struct drm_device *dev) --{ -- struct drm_i915_private *dev_priv = dev->dev_private; -- -- if (IS_IGDNG(dev)) { -- I915_WRITE(PCH_GMBUS0, 0); -- } else { -- I915_WRITE(GMBUS0, 0); -- } --} -- - /** - * intel_i2c_create - instantiate an Intel i2c bus using the specified GPIO reg - * @dev: DRM device -@@ -185,8 +168,6 @@ struct i2c_adapter *intel_i2c_create(struct drm_device *dev, const u32 reg, - if(i2c_bit_add_bus(&chan->adapter)) - goto out_free; - -- intel_i2c_reset_gmbus(dev); -- - /* JJJ: raise SCL and SDA? */ - intel_i2c_quirk_set(dev, true); - set_data(chan, 1); -diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c -index 952bb4e..05598ae 100644 ---- a/drivers/gpu/drm/i915/intel_lvds.c -+++ b/drivers/gpu/drm/i915/intel_lvds.c -@@ -602,33 +602,12 @@ static void intel_lvds_mode_set(struct drm_encoder *encoder, - /* Some lid devices report incorrect lid status, assume they're connected */ - static const struct dmi_system_id bad_lid_status[] = { - { -- .ident = "Compaq nx9020", -- .matches = { -- DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), -- DMI_MATCH(DMI_BOARD_NAME, "3084"), -- }, -- }, -- { -- .ident = "Samsung SX20S", -- .matches = { -- DMI_MATCH(DMI_SYS_VENDOR, "Samsung Electronics"), -- DMI_MATCH(DMI_BOARD_NAME, "SX20S"), -- }, -- }, -- { - .ident = "Aspire One", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Acer"), - DMI_MATCH(DMI_PRODUCT_NAME, "Aspire one"), - }, - }, -- { -- .ident = "PC-81005", -- .matches = { -- DMI_MATCH(DMI_SYS_VENDOR, "MALATA"), -- DMI_MATCH(DMI_PRODUCT_NAME, "PC-81005"), -- }, -- }, - { } - }; - -@@ -700,14 +679,7 @@ static int intel_lid_notify(struct notifier_block *nb, unsigned long val, - struct drm_i915_private *dev_priv = - container_of(nb, struct drm_i915_private, lid_notifier); - struct drm_device *dev = dev_priv->dev; -- struct drm_connector *connector = dev_priv->int_lvds_connector; - -- /* -- * check and update the status of LVDS connector after receiving -- * the LID nofication event. -- */ -- if (connector) -- connector->status = connector->funcs->detect(connector); - if (!acpi_lid_open()) { - dev_priv->modeset_on_lid = 1; - return NOTIFY_OK; -@@ -1113,8 +1085,6 @@ out: - DRM_DEBUG("lid notifier registration failed\n"); - dev_priv->lid_notifier.notifier_call = NULL; - } -- /* keep the LVDS connector */ -- dev_priv->int_lvds_connector = connector; - drm_sysfs_connector_add(connector); - return; - -diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c -index 3f5aaf1..083bec2 100644 ---- a/drivers/gpu/drm/i915/intel_sdvo.c -+++ b/drivers/gpu/drm/i915/intel_sdvo.c -@@ -472,63 +472,14 @@ static int intel_sdvo_get_pixel_multiplier(struct drm_display_mode *mode) - } - - /** -- * Try to read the response after issuie the DDC switch command. But it -- * is noted that we must do the action of reading response and issuing DDC -- * switch command in one I2C transaction. Otherwise when we try to start -- * another I2C transaction after issuing the DDC bus switch, it will be -- * switched to the internal SDVO register. -+ * Don't check status code from this as it switches the bus back to the -+ * SDVO chips which defeats the purpose of doing a bus switch in the first -+ * place. - */ - static void intel_sdvo_set_control_bus_switch(struct intel_output *intel_output, - u8 target) - { -- struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv; -- u8 out_buf[2], cmd_buf[2], ret_value[2], ret; -- struct i2c_msg msgs[] = { -- { -- .addr = sdvo_priv->slave_addr >> 1, -- .flags = 0, -- .len = 2, -- .buf = out_buf, -- }, -- /* the following two are to read the response */ -- { -- .addr = sdvo_priv->slave_addr >> 1, -- .flags = 0, -- .len = 1, -- .buf = cmd_buf, -- }, -- { -- .addr = sdvo_priv->slave_addr >> 1, -- .flags = I2C_M_RD, -- .len = 1, -- .buf = ret_value, -- }, -- }; -- -- intel_sdvo_debug_write(intel_output, SDVO_CMD_SET_CONTROL_BUS_SWITCH, -- &target, 1); -- /* write the DDC switch command argument */ -- intel_sdvo_write_byte(intel_output, SDVO_I2C_ARG_0, target); -- -- out_buf[0] = SDVO_I2C_OPCODE; -- out_buf[1] = SDVO_CMD_SET_CONTROL_BUS_SWITCH; -- cmd_buf[0] = SDVO_I2C_CMD_STATUS; -- cmd_buf[1] = 0; -- ret_value[0] = 0; -- ret_value[1] = 0; -- -- ret = i2c_transfer(intel_output->i2c_bus, msgs, 3); -- if (ret != 3) { -- /* failure in I2C transfer */ -- DRM_DEBUG_KMS("I2c transfer returned %d\n", ret); -- return; -- } -- if (ret_value[0] != SDVO_CMD_STATUS_SUCCESS) { -- DRM_DEBUG_KMS("DDC switch command returns response %d\n", -- ret_value[0]); -- return; -- } -- return; -+ intel_sdvo_write_cmd(intel_output, SDVO_CMD_SET_CONTROL_BUS_SWITCH, &target, 1); - } - - static bool intel_sdvo_set_target_input(struct intel_output *intel_output, bool target_0, bool target_1) -@@ -1638,32 +1589,6 @@ intel_sdvo_hdmi_sink_detect(struct drm_connector *connector, u16 response) - edid = drm_get_edid(&intel_output->base, - intel_output->ddc_bus); - -- /* This is only applied to SDVO cards with multiple outputs */ -- if (edid == NULL && intel_sdvo_multifunc_encoder(intel_output)) { -- uint8_t saved_ddc, temp_ddc; -- saved_ddc = sdvo_priv->ddc_bus; -- temp_ddc = sdvo_priv->ddc_bus >> 1; -- /* -- * Don't use the 1 as the argument of DDC bus switch to get -- * the EDID. It is used for SDVO SPD ROM. -- */ -- while(temp_ddc > 1) { -- sdvo_priv->ddc_bus = temp_ddc; -- edid = drm_get_edid(&intel_output->base, -- intel_output->ddc_bus); -- if (edid) { -- /* -- * When we can get the EDID, maybe it is the -- * correct DDC bus. Update it. -- */ -- sdvo_priv->ddc_bus = temp_ddc; -- break; -- } -- temp_ddc >>= 1; -- } -- if (edid == NULL) -- sdvo_priv->ddc_bus = saved_ddc; -- } - /* when there is no edid and no monitor is connected with VGA - * port, try to use the CRT ddc to read the EDID for DVI-connector - */ -@@ -2743,7 +2668,6 @@ static void intel_sdvo_create_enhance_property(struct drm_connector *connector) - - bool intel_sdvo_init(struct drm_device *dev, int output_device) - { -- struct drm_i915_private *dev_priv = dev->dev_private; - struct drm_connector *connector; - struct intel_output *intel_output; - struct intel_sdvo_priv *sdvo_priv; -@@ -2790,12 +2714,10 @@ bool intel_sdvo_init(struct drm_device *dev, int output_device) - intel_output->ddc_bus = intel_i2c_create(dev, GPIOE, "SDVOB DDC BUS"); - sdvo_priv->analog_ddc_bus = intel_i2c_create(dev, GPIOA, - "SDVOB/VGA DDC BUS"); -- dev_priv->hotplug_supported_mask |= SDVOB_HOTPLUG_INT_STATUS; - } else { - intel_output->ddc_bus = intel_i2c_create(dev, GPIOE, "SDVOC DDC BUS"); - sdvo_priv->analog_ddc_bus = intel_i2c_create(dev, GPIOA, - "SDVOC/VGA DDC BUS"); -- dev_priv->hotplug_supported_mask |= SDVOC_HOTPLUG_INT_STATUS; - } - - if (intel_output->ddc_bus == NULL) -diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c -index ce026f0..9ca9179 100644 ---- a/drivers/gpu/drm/i915/intel_tv.c -+++ b/drivers/gpu/drm/i915/intel_tv.c -@@ -1213,17 +1213,20 @@ intel_tv_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, - tv_ctl |= TV_TRILEVEL_SYNC; - if (tv_mode->pal_burst) - tv_ctl |= TV_PAL_BURST; -- - scctl1 = 0; -- if (tv_mode->dda1_inc) -+ /* dda1 implies valid video levels */ -+ if (tv_mode->dda1_inc) { - scctl1 |= TV_SC_DDA1_EN; -+ } -+ - if (tv_mode->dda2_inc) - scctl1 |= TV_SC_DDA2_EN; -+ - if (tv_mode->dda3_inc) - scctl1 |= TV_SC_DDA3_EN; -+ - scctl1 |= tv_mode->sc_reset; -- if (video_levels) -- scctl1 |= video_levels->burst << TV_BURST_LEVEL_SHIFT; -+ scctl1 |= video_levels->burst << TV_BURST_LEVEL_SHIFT; - scctl1 |= tv_mode->dda1_inc << TV_SCDDA1_INC_SHIFT; - - scctl2 = tv_mode->dda2_size << TV_SCDDA2_SIZE_SHIFT | -@@ -1801,8 +1804,6 @@ intel_tv_init(struct drm_device *dev) - drm_connector_attach_property(connector, - dev->mode_config.tv_bottom_margin_property, - tv_priv->margin[TV_MARGIN_BOTTOM]); -- -- dev_priv->hotplug_supported_mask |= TV_HOTPLUG_INT_STATUS; - out: - drm_sysfs_connector_add(connector); - } -diff --git a/drivers/gpu/drm/radeon/atom.c b/drivers/gpu/drm/radeon/atom.c -index fed2291..d67c425 100644 ---- a/drivers/gpu/drm/radeon/atom.c -+++ b/drivers/gpu/drm/radeon/atom.c -@@ -607,7 +607,7 @@ static void atom_op_delay(atom_exec_context *ctx, int *ptr, int arg) - uint8_t count = U8((*ptr)++); - SDEBUG(" count: %d\n", count); - if (arg == ATOM_UNIT_MICROSEC) -- udelay(count); -+ schedule_timeout_uninterruptible(usecs_to_jiffies(count)); - else - schedule_timeout_uninterruptible(msecs_to_jiffies(count)); - } -diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c -index 19f93f2..c15287a 100644 ---- a/drivers/gpu/drm/radeon/atombios_crtc.c -+++ b/drivers/gpu/drm/radeon/atombios_crtc.c -@@ -241,7 +241,6 @@ void atombios_crtc_dpms(struct drm_crtc *crtc, int mode) - { - struct drm_device *dev = crtc->dev; - struct radeon_device *rdev = dev->dev_private; -- struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); - - switch (mode) { - case DRM_MODE_DPMS_ON: -@@ -249,21 +248,20 @@ void atombios_crtc_dpms(struct drm_crtc *crtc, int mode) - if (ASIC_IS_DCE3(rdev)) - atombios_enable_crtc_memreq(crtc, 1); - atombios_blank_crtc(crtc, 0); -- if (rdev->family < CHIP_R600) -- drm_vblank_post_modeset(dev, radeon_crtc->crtc_id); -- radeon_crtc_load_lut(crtc); - break; - case DRM_MODE_DPMS_STANDBY: - case DRM_MODE_DPMS_SUSPEND: - case DRM_MODE_DPMS_OFF: -- if (rdev->family < CHIP_R600) -- drm_vblank_pre_modeset(dev, radeon_crtc->crtc_id); - atombios_blank_crtc(crtc, 1); - if (ASIC_IS_DCE3(rdev)) - atombios_enable_crtc_memreq(crtc, 0); - atombios_enable_crtc(crtc, 0); - break; - } -+ -+ if (mode != DRM_MODE_DPMS_OFF) { -+ radeon_crtc_load_lut(crtc); -+ } - } - - static void -diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c -index 969502a..2ed88a8 100644 ---- a/drivers/gpu/drm/radeon/radeon_atombios.c -+++ b/drivers/gpu/drm/radeon/radeon_atombios.c -@@ -135,14 +135,6 @@ static bool radeon_atom_apply_quirks(struct drm_device *dev, - } - } - -- /* HIS X1300 is DVI+VGA, not DVI+DVI */ -- if ((dev->pdev->device == 0x7146) && -- (dev->pdev->subsystem_vendor == 0x17af) && -- (dev->pdev->subsystem_device == 0x2058)) { -- if (supported_device == ATOM_DEVICE_DFP1_SUPPORT) -- return false; -- } -- - /* Funky macbooks */ - if ((dev->pdev->device == 0x71C5) && - (dev->pdev->subsystem_vendor == 0x106b) && -diff --git a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c -index 22ce4d6..8d0b7aa 100644 ---- a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c -+++ b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c -@@ -292,7 +292,8 @@ void radeon_crtc_dpms(struct drm_crtc *crtc, int mode) - uint32_t mask; - - if (radeon_crtc->crtc_id) -- mask = (RADEON_CRTC2_DISP_DIS | -+ mask = (RADEON_CRTC2_EN | -+ RADEON_CRTC2_DISP_DIS | - RADEON_CRTC2_VSYNC_DIS | - RADEON_CRTC2_HSYNC_DIS | - RADEON_CRTC2_DISP_REQ_EN_B); -@@ -304,7 +305,7 @@ void radeon_crtc_dpms(struct drm_crtc *crtc, int mode) - switch (mode) { - case DRM_MODE_DPMS_ON: - if (radeon_crtc->crtc_id) -- WREG32_P(RADEON_CRTC2_GEN_CNTL, RADEON_CRTC2_EN, ~(RADEON_CRTC2_EN | mask)); -+ WREG32_P(RADEON_CRTC2_GEN_CNTL, RADEON_CRTC2_EN, ~mask); - else { - WREG32_P(RADEON_CRTC_GEN_CNTL, RADEON_CRTC_EN, ~(RADEON_CRTC_EN | - RADEON_CRTC_DISP_REQ_EN_B)); -@@ -318,7 +319,7 @@ void radeon_crtc_dpms(struct drm_crtc *crtc, int mode) - case DRM_MODE_DPMS_OFF: - drm_vblank_pre_modeset(dev, radeon_crtc->crtc_id); - if (radeon_crtc->crtc_id) -- WREG32_P(RADEON_CRTC2_GEN_CNTL, mask, ~(RADEON_CRTC2_EN | mask)); -+ WREG32_P(RADEON_CRTC2_GEN_CNTL, mask, ~mask); - else { - WREG32_P(RADEON_CRTC_GEN_CNTL, RADEON_CRTC_DISP_REQ_EN_B, ~(RADEON_CRTC_EN | - RADEON_CRTC_DISP_REQ_EN_B)); -diff --git a/drivers/gpu/drm/radeon/radeon_test.c b/drivers/gpu/drm/radeon/radeon_test.c -index c8942ca..f8a465d 100644 ---- a/drivers/gpu/drm/radeon/radeon_test.c -+++ b/drivers/gpu/drm/radeon/radeon_test.c -@@ -42,8 +42,8 @@ void radeon_test_moves(struct radeon_device *rdev) - /* Number of tests = - * (Total GTT - IB pool - writeback page - ring buffer) / test size - */ -- n = ((u32)(rdev->mc.gtt_size - RADEON_IB_POOL_SIZE*64*1024 - RADEON_GPU_PAGE_SIZE - -- rdev->cp.ring_size)) / size; -+ n = (rdev->mc.gtt_size - RADEON_IB_POOL_SIZE*64*1024 - RADEON_GPU_PAGE_SIZE - -+ rdev->cp.ring_size) / size; - - gtt_obj = kzalloc(n * sizeof(*gtt_obj), GFP_KERNEL); - if (!gtt_obj) { -diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c -index 4444f48..5f117cd 100644 ---- a/drivers/gpu/drm/radeon/rs600.c -+++ b/drivers/gpu/drm/radeon/rs600.c -@@ -301,7 +301,9 @@ int rs600_mc_wait_for_idle(struct radeon_device *rdev) - - void rs600_gpu_init(struct radeon_device *rdev) - { -+ /* FIXME: HDP same place on rs600 ? */ - r100_hdp_reset(rdev); -+ /* FIXME: is this correct ? */ - r420_pipes_init(rdev); - /* Wait for mc idle */ - if (rs600_mc_wait_for_idle(rdev)) -@@ -310,20 +312,9 @@ void rs600_gpu_init(struct radeon_device *rdev) - - void rs600_vram_info(struct radeon_device *rdev) - { -+ /* FIXME: to do or is these values sane ? */ - rdev->mc.vram_is_ddr = true; - rdev->mc.vram_width = 128; -- -- rdev->mc.real_vram_size = RREG32(RADEON_CONFIG_MEMSIZE); -- rdev->mc.mc_vram_size = rdev->mc.real_vram_size; -- -- rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0); -- rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0); -- -- if (rdev->mc.mc_vram_size > rdev->mc.aper_size) -- rdev->mc.mc_vram_size = rdev->mc.aper_size; -- -- if (rdev->mc.real_vram_size > rdev->mc.aper_size) -- rdev->mc.real_vram_size = rdev->mc.aper_size; - } - - void rs600_bandwidth_update(struct radeon_device *rdev) -diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c -index b12ff76..2754717 100644 ---- a/drivers/gpu/drm/radeon/rs690.c -+++ b/drivers/gpu/drm/radeon/rs690.c -@@ -131,25 +131,24 @@ void rs690_pm_info(struct radeon_device *rdev) - - void rs690_vram_info(struct radeon_device *rdev) - { -+ uint32_t tmp; - fixed20_12 a; - - rs400_gart_adjust_size(rdev); -- -+ /* DDR for all card after R300 & IGP */ - rdev->mc.vram_is_ddr = true; -- rdev->mc.vram_width = 128; -- -+ /* FIXME: is this correct for RS690/RS740 ? */ -+ tmp = RREG32(RADEON_MEM_CNTL); -+ if (tmp & R300_MEM_NUM_CHANNELS_MASK) { -+ rdev->mc.vram_width = 128; -+ } else { -+ rdev->mc.vram_width = 64; -+ } - rdev->mc.real_vram_size = RREG32(RADEON_CONFIG_MEMSIZE); - rdev->mc.mc_vram_size = rdev->mc.real_vram_size; - - rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0); - rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0); -- -- if (rdev->mc.mc_vram_size > rdev->mc.aper_size) -- rdev->mc.mc_vram_size = rdev->mc.aper_size; -- -- if (rdev->mc.real_vram_size > rdev->mc.aper_size) -- rdev->mc.real_vram_size = rdev->mc.aper_size; -- - rs690_pm_info(rdev); - /* FIXME: we should enforce default clock in case GPU is not in - * default setup -diff --git a/drivers/hid/hid-apple.c b/drivers/hid/hid-apple.c -index 5b4d66d..4b96e7a 100644 ---- a/drivers/hid/hid-apple.c -+++ b/drivers/hid/hid-apple.c -@@ -431,13 +431,6 @@ static const struct hid_device_id apple_devices[] = { - .driver_data = APPLE_HAS_FN | APPLE_ISO_KEYBOARD }, - { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING3_JIS), - .driver_data = APPLE_HAS_FN | APPLE_RDESC_JIS }, -- { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ANSI), -- .driver_data = APPLE_NUMLOCK_EMULATION | APPLE_HAS_FN }, -- { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ISO), -- .driver_data = APPLE_NUMLOCK_EMULATION | APPLE_HAS_FN | -- APPLE_ISO_KEYBOARD }, -- { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_JIS), -- .driver_data = APPLE_NUMLOCK_EMULATION | APPLE_HAS_FN }, - { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_TP_ONLY), - .driver_data = APPLE_NUMLOCK_EMULATION | APPLE_HAS_FN }, - { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY), -diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c -index 9678354..7d05c4b 100644 ---- a/drivers/hid/hid-core.c -+++ b/drivers/hid/hid-core.c -@@ -1287,9 +1287,6 @@ static const struct hid_device_id hid_blacklist[] = { - { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING3_ANSI) }, - { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING3_ISO) }, - { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING3_JIS) }, -- { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ANSI) }, -- { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ISO) }, -- { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_JIS) }, - { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_TP_ONLY) }, - { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY) }, - { HID_USB_DEVICE(USB_VENDOR_ID_BELKIN, USB_DEVICE_ID_FLIP_KVM) }, -diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h -index e380e7b..adbef5d 100644 ---- a/drivers/hid/hid-ids.h -+++ b/drivers/hid/hid-ids.h -@@ -88,9 +88,6 @@ - #define USB_DEVICE_ID_APPLE_WELLSPRING3_ANSI 0x0236 - #define USB_DEVICE_ID_APPLE_WELLSPRING3_ISO 0x0237 - #define USB_DEVICE_ID_APPLE_WELLSPRING3_JIS 0x0238 --#define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ANSI 0x0239 --#define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ISO 0x023a --#define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_JIS 0x023b - #define USB_DEVICE_ID_APPLE_FOUNTAIN_TP_ONLY 0x030a - #define USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY 0x030b - #define USB_DEVICE_ID_APPLE_ATV_IRCONTROL 0x8241 -diff --git a/drivers/hid/usbhid/hid-core.c b/drivers/hid/usbhid/hid-core.c -index 5d901f6..03bd703 100644 ---- a/drivers/hid/usbhid/hid-core.c -+++ b/drivers/hid/usbhid/hid-core.c -@@ -998,8 +998,7 @@ static int usbhid_start(struct hid_device *hid) - usbhid->urbctrl->transfer_dma = usbhid->ctrlbuf_dma; - usbhid->urbctrl->transfer_flags |= (URB_NO_TRANSFER_DMA_MAP | URB_NO_SETUP_DMA_MAP); - -- if (!(hid->quirks & HID_QUIRK_NO_INIT_REPORTS)) -- usbhid_init_reports(hid); -+ usbhid_init_reports(hid); - - set_bit(HID_STARTED, &usbhid->iofl); - -diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c -index 5713b93..0d9045a 100644 ---- a/drivers/hid/usbhid/hid-quirks.c -+++ b/drivers/hid/usbhid/hid-quirks.c -@@ -280,7 +280,7 @@ u32 usbhid_lookup_quirk(const u16 idVendor, const u16 idProduct) - if (idVendor == USB_VENDOR_ID_NCR && - idProduct >= USB_DEVICE_ID_NCR_FIRST && - idProduct <= USB_DEVICE_ID_NCR_LAST) -- return HID_QUIRK_NO_INIT_REPORTS; -+ return HID_QUIRK_NOGET; - - down_read(&dquirks_rwsem); - bl_entry = usbhid_exists_dquirk(idVendor, idProduct); -diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig -index c1f7ea0..700e93a 100644 ---- a/drivers/hwmon/Kconfig -+++ b/drivers/hwmon/Kconfig -@@ -374,7 +374,7 @@ config SENSORS_GL520SM - - config SENSORS_CORETEMP - tristate "Intel Core/Core2/Atom temperature sensor" -- depends on X86 && PCI && EXPERIMENTAL -+ depends on X86 && EXPERIMENTAL - help - If you say yes here you get support for the temperature - sensor inside your CPU. Most of the family 6 CPUs -diff --git a/drivers/hwmon/adt7462.c b/drivers/hwmon/adt7462.c -index 14f910d..1852f27 100644 ---- a/drivers/hwmon/adt7462.c -+++ b/drivers/hwmon/adt7462.c -@@ -97,7 +97,7 @@ I2C_CLIENT_INSMOD_1(adt7462); - #define ADT7462_PIN24_SHIFT 6 - #define ADT7462_PIN26_VOLT_INPUT 0x08 - #define ADT7462_PIN25_VOLT_INPUT 0x20 --#define ADT7462_PIN28_SHIFT 4 /* cfg3 */ -+#define ADT7462_PIN28_SHIFT 6 /* cfg3 */ - #define ADT7462_PIN28_VOLT 0x5 - - #define ADT7462_REG_ALARM1 0xB8 -@@ -182,7 +182,7 @@ I2C_CLIENT_INSMOD_1(adt7462); - * - * Some, but not all, of these voltages have low/high limits. - */ --#define ADT7462_VOLT_COUNT 13 -+#define ADT7462_VOLT_COUNT 12 - - #define ADT7462_VENDOR 0x41 - #define ADT7462_DEVICE 0x62 -diff --git a/drivers/hwmon/coretemp.c b/drivers/hwmon/coretemp.c -index 2d7bcee..caef39c 100644 ---- a/drivers/hwmon/coretemp.c -+++ b/drivers/hwmon/coretemp.c -@@ -33,7 +33,6 @@ - #include - #include - #include --#include - #include - #include - -@@ -162,7 +161,6 @@ static int __devinit adjust_tjmax(struct cpuinfo_x86 *c, u32 id, struct device * - int usemsr_ee = 1; - int err; - u32 eax, edx; -- struct pci_dev *host_bridge; - - /* Early chips have no MSR for TjMax */ - -@@ -170,21 +168,11 @@ static int __devinit adjust_tjmax(struct cpuinfo_x86 *c, u32 id, struct device * - usemsr_ee = 0; - } - -- /* Atom CPUs */ -+ /* Atoms seems to have TjMax at 90C */ - - if (c->x86_model == 0x1c) { - usemsr_ee = 0; -- -- host_bridge = pci_get_bus_and_slot(0, PCI_DEVFN(0, 0)); -- -- if (host_bridge && host_bridge->vendor == PCI_VENDOR_ID_INTEL -- && (host_bridge->device == 0xa000 /* NM10 based nettop */ -- || host_bridge->device == 0xa010)) /* NM10 based netbook */ -- tjmax = 100000; -- else -- tjmax = 90000; -- -- pci_dev_put(host_bridge); -+ tjmax = 90000; - } - - if ((c->x86_model > 0xe) && (usemsr_ee)) { -diff --git a/drivers/hwmon/fschmd.c b/drivers/hwmon/fschmd.c -index f600813..da1b1f9 100644 ---- a/drivers/hwmon/fschmd.c -+++ b/drivers/hwmon/fschmd.c -@@ -767,7 +767,6 @@ leave: - static int watchdog_open(struct inode *inode, struct file *filp) - { - struct fschmd_data *pos, *data = NULL; -- int watchdog_is_open; - - /* We get called from drivers/char/misc.c with misc_mtx hold, and we - call misc_register() from fschmd_probe() with watchdog_data_mutex -@@ -782,12 +781,10 @@ static int watchdog_open(struct inode *inode, struct file *filp) - } - } - /* Note we can never not have found data, so we don't check for this */ -- watchdog_is_open = test_and_set_bit(0, &data->watchdog_is_open); -- if (!watchdog_is_open) -- kref_get(&data->kref); -+ kref_get(&data->kref); - mutex_unlock(&watchdog_data_mutex); - -- if (watchdog_is_open) -+ if (test_and_set_bit(0, &data->watchdog_is_open)) - return -EBUSY; - - /* Start the watchdog */ -diff --git a/drivers/hwmon/lm78.c b/drivers/hwmon/lm78.c -index 1508e0a..f7e7016 100644 ---- a/drivers/hwmon/lm78.c -+++ b/drivers/hwmon/lm78.c -@@ -870,16 +870,17 @@ static struct lm78_data *lm78_update_device(struct device *dev) - static int __init lm78_isa_found(unsigned short address) - { - int val, save, found = 0; -- int port; -- -- /* Some boards declare base+0 to base+7 as a PNP device, some base+4 -- * to base+7 and some base+5 to base+6. So we better request each port -- * individually for the probing phase. */ -- for (port = address; port < address + LM78_EXTENT; port++) { -- if (!request_region(port, 1, "lm78")) { -- pr_debug("lm78: Failed to request port 0x%x\n", port); -- goto release; -- } -+ -+ /* We have to request the region in two parts because some -+ boards declare base+4 to base+7 as a PNP device */ -+ if (!request_region(address, 4, "lm78")) { -+ pr_debug("lm78: Failed to request low part of region\n"); -+ return 0; -+ } -+ if (!request_region(address + 4, 4, "lm78")) { -+ pr_debug("lm78: Failed to request high part of region\n"); -+ release_region(address, 4); -+ return 0; - } - - #define REALLY_SLOW_IO -@@ -943,8 +944,8 @@ static int __init lm78_isa_found(unsigned short address) - val & 0x80 ? "LM79" : "LM78", (int)address); - - release: -- for (port--; port >= address; port--) -- release_region(port, 1); -+ release_region(address + 4, 4); -+ release_region(address, 4); - return found; - } - -diff --git a/drivers/hwmon/sht15.c b/drivers/hwmon/sht15.c -index 864a371..ebe38b6 100644 ---- a/drivers/hwmon/sht15.c -+++ b/drivers/hwmon/sht15.c -@@ -305,7 +305,7 @@ static inline int sht15_calc_temp(struct sht15_data *data) - int d1 = 0; - int i; - -- for (i = 1; i < ARRAY_SIZE(temppoints); i++) -+ for (i = 1; i < ARRAY_SIZE(temppoints) - 1; i++) - /* Find pointer to interpolate */ - if (data->supply_uV > temppoints[i - 1].vdd) { - d1 = (data->supply_uV/1000 - temppoints[i - 1].vdd) -@@ -332,12 +332,12 @@ static inline int sht15_calc_humid(struct sht15_data *data) - - const int c1 = -4; - const int c2 = 40500; /* x 10 ^ -6 */ -- const int c3 = -2800; /* x10 ^ -9 */ -+ const int c3 = 2800; /* x10 ^ -9 */ - - RHlinear = c1*1000 - + c2 * data->val_humid/1000 - + (data->val_humid * data->val_humid * c3)/1000000; -- return (temp - 25000) * (10000 + 80 * data->val_humid) -+ return (temp - 25000) * (10000 + 800 * data->val_humid) - / 1000000 + RHlinear; - } - -diff --git a/drivers/hwmon/w83781d.c b/drivers/hwmon/w83781d.c -index f0b6883..d27ed1b 100644 ---- a/drivers/hwmon/w83781d.c -+++ b/drivers/hwmon/w83781d.c -@@ -1818,17 +1818,17 @@ static int __init - w83781d_isa_found(unsigned short address) - { - int val, save, found = 0; -- int port; -- -- /* Some boards declare base+0 to base+7 as a PNP device, some base+4 -- * to base+7 and some base+5 to base+6. So we better request each port -- * individually for the probing phase. */ -- for (port = address; port < address + W83781D_EXTENT; port++) { -- if (!request_region(port, 1, "w83781d")) { -- pr_debug("w83781d: Failed to request port 0x%x\n", -- port); -- goto release; -- } -+ -+ /* We have to request the region in two parts because some -+ boards declare base+4 to base+7 as a PNP device */ -+ if (!request_region(address, 4, "w83781d")) { -+ pr_debug("w83781d: Failed to request low part of region\n"); -+ return 0; -+ } -+ if (!request_region(address + 4, 4, "w83781d")) { -+ pr_debug("w83781d: Failed to request high part of region\n"); -+ release_region(address, 4); -+ return 0; - } - - #define REALLY_SLOW_IO -@@ -1902,8 +1902,8 @@ w83781d_isa_found(unsigned short address) - val == 0x30 ? "W83782D" : "W83781D", (int)address); - - release: -- for (port--; port >= address; port--) -- release_region(port, 1); -+ release_region(address + 4, 4); -+ release_region(address, 4); - return found; - } - -diff --git a/drivers/i2c/busses/i2c-pca-isa.c b/drivers/i2c/busses/i2c-pca-isa.c -index f7346a9..0ed68e2 100644 ---- a/drivers/i2c/busses/i2c-pca-isa.c -+++ b/drivers/i2c/busses/i2c-pca-isa.c -@@ -75,7 +75,7 @@ static int pca_isa_waitforcompletion(void *pd) - unsigned long timeout; - - if (irq > -1) { -- ret = wait_event_timeout(pca_wait, -+ ret = wait_event_interruptible_timeout(pca_wait, - pca_isa_readbyte(pd, I2C_PCA_CON) - & I2C_PCA_CON_SI, pca_isa_ops.timeout); - } else { -@@ -96,7 +96,7 @@ static void pca_isa_resetchip(void *pd) - } - - static irqreturn_t pca_handler(int this_irq, void *dev_id) { -- wake_up(&pca_wait); -+ wake_up_interruptible(&pca_wait); - return IRQ_HANDLED; - } - -diff --git a/drivers/i2c/busses/i2c-pca-platform.c b/drivers/i2c/busses/i2c-pca-platform.c -index 5b2213d..c4df9d4 100644 ---- a/drivers/i2c/busses/i2c-pca-platform.c -+++ b/drivers/i2c/busses/i2c-pca-platform.c -@@ -84,7 +84,7 @@ static int i2c_pca_pf_waitforcompletion(void *pd) - unsigned long timeout; - - if (i2c->irq) { -- ret = wait_event_timeout(i2c->wait, -+ ret = wait_event_interruptible_timeout(i2c->wait, - i2c->algo_data.read_byte(i2c, I2C_PCA_CON) - & I2C_PCA_CON_SI, i2c->adap.timeout); - } else { -@@ -122,7 +122,7 @@ static irqreturn_t i2c_pca_pf_handler(int this_irq, void *dev_id) - if ((i2c->algo_data.read_byte(i2c, I2C_PCA_CON) & I2C_PCA_CON_SI) == 0) - return IRQ_NONE; - -- wake_up(&i2c->wait); -+ wake_up_interruptible(&i2c->wait); - - return IRQ_HANDLED; - } -diff --git a/drivers/i2c/busses/i2c-tiny-usb.c b/drivers/i2c/busses/i2c-tiny-usb.c -index e29b6d5..b1c050f 100644 ---- a/drivers/i2c/busses/i2c-tiny-usb.c -+++ b/drivers/i2c/busses/i2c-tiny-usb.c -@@ -13,7 +13,6 @@ - #include - #include - #include --#include - - /* include interfaces to usb layer */ - #include -@@ -32,8 +31,8 @@ - #define CMD_I2C_IO_END (1<<1) - - /* i2c bit delay, default is 10us -> 100kHz */ --static unsigned short delay = 10; --module_param(delay, ushort, 0); -+static int delay = 10; -+module_param(delay, int, 0); - MODULE_PARM_DESC(delay, "bit delay in microseconds, " - "e.g. 10 for 100kHz (default is 100kHz)"); - -@@ -110,7 +109,7 @@ static int usb_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs, int num) - - static u32 usb_func(struct i2c_adapter *adapter) - { -- __le32 func; -+ u32 func; - - /* get functionality from adapter */ - if (usb_read(adapter, CMD_GET_FUNC, 0, 0, &func, sizeof(func)) != -@@ -119,7 +118,7 @@ static u32 usb_func(struct i2c_adapter *adapter) - return 0; - } - -- return le32_to_cpu(func); -+ return func; - } - - /* This is the actual algorithm we define */ -@@ -217,7 +216,8 @@ static int i2c_tiny_usb_probe(struct usb_interface *interface, - "i2c-tiny-usb at bus %03d device %03d", - dev->usb_dev->bus->busnum, dev->usb_dev->devnum); - -- if (usb_write(&dev->adapter, CMD_SET_DELAY, delay, 0, NULL, 0) != 0) { -+ if (usb_write(&dev->adapter, CMD_SET_DELAY, -+ cpu_to_le16(delay), 0, NULL, 0) != 0) { - dev_err(&dev->adapter.dev, - "failure setting delay to %dus\n", delay); - retval = -EIO; -diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c -index 3bf7b0a..2965043 100644 ---- a/drivers/i2c/i2c-core.c -+++ b/drivers/i2c/i2c-core.c -@@ -801,9 +801,6 @@ int i2c_del_adapter(struct i2c_adapter *adap) - adap->dev.parent); - #endif - -- /* device name is gone after device_unregister */ -- dev_dbg(&adap->dev, "adapter [%s] unregistered\n", adap->name); -- - /* clean up the sysfs representation */ - init_completion(&adap->dev_released); - device_unregister(&adap->dev); -@@ -816,6 +813,8 @@ int i2c_del_adapter(struct i2c_adapter *adap) - idr_remove(&i2c_adapter_idr, adap->nr); - mutex_unlock(&core_lock); - -+ dev_dbg(&adap->dev, "adapter [%s] unregistered\n", adap->name); -+ - /* Clear the device structure in case this adapter is ever going to be - added again */ - memset(&adap->dev, 0, sizeof(adap->dev)); -diff --git a/drivers/ide/slc90e66.c b/drivers/ide/slc90e66.c -index 1ccfb40..9aec78d 100644 ---- a/drivers/ide/slc90e66.c -+++ b/drivers/ide/slc90e66.c -@@ -91,7 +91,8 @@ static void slc90e66_set_dma_mode(ide_drive_t *drive, const u8 speed) - - if (!(reg48 & u_flag)) - pci_write_config_word(dev, 0x48, reg48|u_flag); -- if ((reg4a & a_speed) != u_speed) { -+ /* FIXME: (reg4a & a_speed) ? */ -+ if ((reg4a & u_speed) != u_speed) { - pci_write_config_word(dev, 0x4a, reg4a & ~a_speed); - pci_read_config_word(dev, 0x4a, ®4a); - pci_write_config_word(dev, 0x4a, reg4a|u_speed); -diff --git a/drivers/infiniband/hw/ipath/ipath_fs.c b/drivers/infiniband/hw/ipath/ipath_fs.c -index 100da85..b368406 100644 ---- a/drivers/infiniband/hw/ipath/ipath_fs.c -+++ b/drivers/infiniband/hw/ipath/ipath_fs.c -@@ -346,8 +346,10 @@ static int ipathfs_fill_super(struct super_block *sb, void *data, - list_for_each_entry_safe(dd, tmp, &ipath_dev_list, ipath_list) { - spin_unlock_irqrestore(&ipath_devs_lock, flags); - ret = create_device_files(sb, dd); -- if (ret) -+ if (ret) { -+ deactivate_locked_super(sb); - goto bail; -+ } - spin_lock_irqsave(&ipath_devs_lock, flags); - } - -diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c -index df3eb8c..2bf5116 100644 ---- a/drivers/infiniband/ulp/ipoib/ipoib_main.c -+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c -@@ -884,7 +884,6 @@ struct ipoib_neigh *ipoib_neigh_alloc(struct neighbour *neighbour, - - neigh->neighbour = neighbour; - neigh->dev = dev; -- memset(&neigh->dgid.raw, 0, sizeof (union ib_gid)); - *to_ipoib_neigh(neighbour) = neigh; - skb_queue_head_init(&neigh->queue); - ipoib_cm_set(neigh, NULL); -diff --git a/drivers/input/misc/winbond-cir.c b/drivers/input/misc/winbond-cir.c -index c8f5a9a..33309fe 100644 ---- a/drivers/input/misc/winbond-cir.c -+++ b/drivers/input/misc/winbond-cir.c -@@ -768,7 +768,7 @@ wbcir_parse_rc6(struct device *dev, struct wbcir_data *data) - return; - } - -- dev_dbg(dev, "IR-RC6 ad 0x%02X cm 0x%02X cu 0x%04X " -+ dev_info(dev, "IR-RC6 ad 0x%02X cm 0x%02X cu 0x%04X " - "toggle %u mode %u scan 0x%08X\n", - address, - command, -diff --git a/drivers/input/mouse/alps.c b/drivers/input/mouse/alps.c -index fc8823b..f361106 100644 ---- a/drivers/input/mouse/alps.c -+++ b/drivers/input/mouse/alps.c -@@ -5,7 +5,6 @@ - * Copyright (c) 2003-2005 Peter Osterlund - * Copyright (c) 2004 Dmitry Torokhov - * Copyright (c) 2005 Vojtech Pavlik -- * Copyright (c) 2009 Sebastian Kapfer - * - * ALPS detection, tap switching and status querying info is taken from - * tpconfig utility (by C. Scott Ananian and Bruce Kall). -@@ -36,8 +35,6 @@ - #define ALPS_OLDPROTO 0x10 - #define ALPS_PASS 0x20 - #define ALPS_FW_BK_2 0x40 --#define ALPS_PS2_INTERLEAVED 0x80 /* 3-byte PS/2 packet interleaved with -- 6-byte ALPS packet */ - - static const struct alps_model_info alps_model_data[] = { - { { 0x32, 0x02, 0x14 }, 0xf8, 0xf8, ALPS_PASS | ALPS_DUALPOINT }, /* Toshiba Salellite Pro M10 */ -@@ -58,9 +55,7 @@ static const struct alps_model_info alps_model_data[] = { - { { 0x20, 0x02, 0x0e }, 0xf8, 0xf8, ALPS_PASS | ALPS_DUALPOINT }, /* XXX */ - { { 0x22, 0x02, 0x0a }, 0xf8, 0xf8, ALPS_PASS | ALPS_DUALPOINT }, - { { 0x22, 0x02, 0x14 }, 0xff, 0xff, ALPS_PASS | ALPS_DUALPOINT }, /* Dell Latitude D600 */ -- /* Dell Latitude E5500, E6400, E6500, Precision M4400 */ -- { { 0x62, 0x02, 0x14 }, 0xcf, 0xcf, -- ALPS_PASS | ALPS_DUALPOINT | ALPS_PS2_INTERLEAVED }, -+ { { 0x62, 0x02, 0x14 }, 0xcf, 0xcf, ALPS_PASS | ALPS_DUALPOINT }, /* Dell Latitude E6500 */ - { { 0x73, 0x02, 0x50 }, 0xcf, 0xcf, ALPS_FW_BK_1 }, /* Dell Vostro 1400 */ - }; - -@@ -71,88 +66,20 @@ static const struct alps_model_info alps_model_data[] = { - */ - - /* -- * PS/2 packet format -- * -- * byte 0: 0 0 YSGN XSGN 1 M R L -- * byte 1: X7 X6 X5 X4 X3 X2 X1 X0 -- * byte 2: Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0 -- * -- * Note that the device never signals overflow condition. -- * -- * ALPS absolute Mode - new format -+ * ALPS abolute Mode - new format - * - * byte 0: 1 ? ? ? 1 ? ? ? - * byte 1: 0 x6 x5 x4 x3 x2 x1 x0 -- * byte 2: 0 x10 x9 x8 x7 ? fin ges -+ * byte 2: 0 x10 x9 x8 x7 ? fin ges - * byte 3: 0 y9 y8 y7 1 M R L - * byte 4: 0 y6 y5 y4 y3 y2 y1 y0 - * byte 5: 0 z6 z5 z4 z3 z2 z1 z0 - * -- * Dualpoint device -- interleaved packet format -- * -- * byte 0: 1 1 0 0 1 1 1 1 -- * byte 1: 0 x6 x5 x4 x3 x2 x1 x0 -- * byte 2: 0 x10 x9 x8 x7 0 fin ges -- * byte 3: 0 0 YSGN XSGN 1 1 1 1 -- * byte 4: X7 X6 X5 X4 X3 X2 X1 X0 -- * byte 5: Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0 -- * byte 6: 0 y9 y8 y7 1 m r l -- * byte 7: 0 y6 y5 y4 y3 y2 y1 y0 -- * byte 8: 0 z6 z5 z4 z3 z2 z1 z0 -- * -- * CAPITALS = stick, miniscules = touchpad -- * - * ?'s can have different meanings on different models, - * such as wheel rotation, extra buttons, stick buttons - * on a dualpoint, etc. - */ - --static bool alps_is_valid_first_byte(const struct alps_model_info *model, -- unsigned char data) --{ -- return (data & model->mask0) == model->byte0; --} -- --static void alps_report_buttons(struct psmouse *psmouse, -- struct input_dev *dev1, struct input_dev *dev2, -- int left, int right, int middle) --{ -- struct alps_data *priv = psmouse->private; -- const struct alps_model_info *model = priv->i; -- -- if (model->flags & ALPS_PS2_INTERLEAVED) { -- struct input_dev *dev; -- -- /* -- * If shared button has already been reported on the -- * other device (dev2) then this event should be also -- * sent through that device. -- */ -- dev = test_bit(BTN_LEFT, dev2->key) ? dev2 : dev1; -- input_report_key(dev, BTN_LEFT, left); -- -- dev = test_bit(BTN_RIGHT, dev2->key) ? dev2 : dev1; -- input_report_key(dev, BTN_RIGHT, right); -- -- dev = test_bit(BTN_MIDDLE, dev2->key) ? dev2 : dev1; -- input_report_key(dev, BTN_MIDDLE, middle); -- -- /* -- * Sync the _other_ device now, we'll do the first -- * device later once we report the rest of the events. -- */ -- input_sync(dev2); -- } else { -- /* -- * For devices with non-interleaved packets we know what -- * device buttons belong to so we can simply report them. -- */ -- input_report_key(dev1, BTN_LEFT, left); -- input_report_key(dev1, BTN_RIGHT, right); -- input_report_key(dev1, BTN_MIDDLE, middle); -- } --} -- - static void alps_process_packet(struct psmouse *psmouse) - { - struct alps_data *priv = psmouse->private; -@@ -162,6 +89,18 @@ static void alps_process_packet(struct psmouse *psmouse) - int x, y, z, ges, fin, left, right, middle; - int back = 0, forward = 0; - -+ if ((packet[0] & 0xc8) == 0x08) { /* 3-byte PS/2 packet */ -+ input_report_key(dev2, BTN_LEFT, packet[0] & 1); -+ input_report_key(dev2, BTN_RIGHT, packet[0] & 2); -+ input_report_key(dev2, BTN_MIDDLE, packet[0] & 4); -+ input_report_rel(dev2, REL_X, -+ packet[1] ? packet[1] - ((packet[0] << 4) & 0x100) : 0); -+ input_report_rel(dev2, REL_Y, -+ packet[2] ? ((packet[0] << 3) & 0x100) - packet[2] : 0); -+ input_sync(dev2); -+ return; -+ } -+ - if (priv->i->flags & ALPS_OLDPROTO) { - left = packet[2] & 0x10; - right = packet[2] & 0x08; -@@ -197,13 +136,18 @@ static void alps_process_packet(struct psmouse *psmouse) - input_report_rel(dev2, REL_X, (x > 383 ? (x - 768) : x)); - input_report_rel(dev2, REL_Y, -(y > 255 ? (y - 512) : y)); - -- alps_report_buttons(psmouse, dev2, dev, left, right, middle); -+ input_report_key(dev2, BTN_LEFT, left); -+ input_report_key(dev2, BTN_RIGHT, right); -+ input_report_key(dev2, BTN_MIDDLE, middle); - -+ input_sync(dev); - input_sync(dev2); - return; - } - -- alps_report_buttons(psmouse, dev, dev2, left, right, middle); -+ input_report_key(dev, BTN_LEFT, left); -+ input_report_key(dev, BTN_RIGHT, right); -+ input_report_key(dev, BTN_MIDDLE, middle); - - /* Convert hardware tap to a reasonable Z value */ - if (ges && !fin) z = 40; -@@ -244,168 +188,25 @@ static void alps_process_packet(struct psmouse *psmouse) - input_sync(dev); - } - --static void alps_report_bare_ps2_packet(struct psmouse *psmouse, -- unsigned char packet[], -- bool report_buttons) --{ -- struct alps_data *priv = psmouse->private; -- struct input_dev *dev2 = priv->dev2; -- -- if (report_buttons) -- alps_report_buttons(psmouse, dev2, psmouse->dev, -- packet[0] & 1, packet[0] & 2, packet[0] & 4); -- -- input_report_rel(dev2, REL_X, -- packet[1] ? packet[1] - ((packet[0] << 4) & 0x100) : 0); -- input_report_rel(dev2, REL_Y, -- packet[2] ? ((packet[0] << 3) & 0x100) - packet[2] : 0); -- -- input_sync(dev2); --} -- --static psmouse_ret_t alps_handle_interleaved_ps2(struct psmouse *psmouse) --{ -- struct alps_data *priv = psmouse->private; -- -- if (psmouse->pktcnt < 6) -- return PSMOUSE_GOOD_DATA; -- -- if (psmouse->pktcnt == 6) { -- /* -- * Start a timer to flush the packet if it ends up last -- * 6-byte packet in the stream. Timer needs to fire -- * psmouse core times out itself. 20 ms should be enough -- * to decide if we are getting more data or not. -- */ -- mod_timer(&priv->timer, jiffies + msecs_to_jiffies(20)); -- return PSMOUSE_GOOD_DATA; -- } -- -- del_timer(&priv->timer); -- -- if (psmouse->packet[6] & 0x80) { -- -- /* -- * Highest bit is set - that means we either had -- * complete ALPS packet and this is start of the -- * next packet or we got garbage. -- */ -- -- if (((psmouse->packet[3] | -- psmouse->packet[4] | -- psmouse->packet[5]) & 0x80) || -- (!alps_is_valid_first_byte(priv->i, psmouse->packet[6]))) { -- dbg("refusing packet %x %x %x %x " -- "(suspected interleaved ps/2)\n", -- psmouse->packet[3], psmouse->packet[4], -- psmouse->packet[5], psmouse->packet[6]); -- return PSMOUSE_BAD_DATA; -- } -- -- alps_process_packet(psmouse); -- -- /* Continue with the next packet */ -- psmouse->packet[0] = psmouse->packet[6]; -- psmouse->pktcnt = 1; -- -- } else { -- -- /* -- * High bit is 0 - that means that we indeed got a PS/2 -- * packet in the middle of ALPS packet. -- * -- * There is also possibility that we got 6-byte ALPS -- * packet followed by 3-byte packet from trackpoint. We -- * can not distinguish between these 2 scenarios but -- * becase the latter is unlikely to happen in course of -- * normal operation (user would need to press all -- * buttons on the pad and start moving trackpoint -- * without touching the pad surface) we assume former. -- * Even if we are wrong the wost thing that would happen -- * the cursor would jump but we should not get protocol -- * desynchronization. -- */ -- -- alps_report_bare_ps2_packet(psmouse, &psmouse->packet[3], -- false); -- -- /* -- * Continue with the standard ALPS protocol handling, -- * but make sure we won't process it as an interleaved -- * packet again, which may happen if all buttons are -- * pressed. To avoid this let's reset the 4th bit which -- * is normally 1. -- */ -- psmouse->packet[3] = psmouse->packet[6] & 0xf7; -- psmouse->pktcnt = 4; -- } -- -- return PSMOUSE_GOOD_DATA; --} -- --static void alps_flush_packet(unsigned long data) --{ -- struct psmouse *psmouse = (struct psmouse *)data; -- -- serio_pause_rx(psmouse->ps2dev.serio); -- -- if (psmouse->pktcnt == 6) { -- -- /* -- * We did not any more data in reasonable amount of time. -- * Validate the last 3 bytes and process as a standard -- * ALPS packet. -- */ -- if ((psmouse->packet[3] | -- psmouse->packet[4] | -- psmouse->packet[5]) & 0x80) { -- dbg("refusing packet %x %x %x " -- "(suspected interleaved ps/2)\n", -- psmouse->packet[3], psmouse->packet[4], -- psmouse->packet[5]); -- } else { -- alps_process_packet(psmouse); -- } -- psmouse->pktcnt = 0; -- } -- -- serio_continue_rx(psmouse->ps2dev.serio); --} -- - static psmouse_ret_t alps_process_byte(struct psmouse *psmouse) - { - struct alps_data *priv = psmouse->private; -- const struct alps_model_info *model = priv->i; - - if ((psmouse->packet[0] & 0xc8) == 0x08) { /* PS/2 packet */ - if (psmouse->pktcnt == 3) { -- alps_report_bare_ps2_packet(psmouse, psmouse->packet, -- true); -+ alps_process_packet(psmouse); - return PSMOUSE_FULL_PACKET; - } - return PSMOUSE_GOOD_DATA; - } - -- /* Check for PS/2 packet stuffed in the middle of ALPS packet. */ -- -- if ((model->flags & ALPS_PS2_INTERLEAVED) && -- psmouse->pktcnt >= 4 && (psmouse->packet[3] & 0x0f) == 0x0f) { -- return alps_handle_interleaved_ps2(psmouse); -- } -- -- if (!alps_is_valid_first_byte(model, psmouse->packet[0])) { -- dbg("refusing packet[0] = %x (mask0 = %x, byte0 = %x)\n", -- psmouse->packet[0], model->mask0, model->byte0); -+ if ((psmouse->packet[0] & priv->i->mask0) != priv->i->byte0) - return PSMOUSE_BAD_DATA; -- } - - /* Bytes 2 - 6 should have 0 in the highest bit */ - if (psmouse->pktcnt >= 2 && psmouse->pktcnt <= 6 && -- (psmouse->packet[psmouse->pktcnt - 1] & 0x80)) { -- dbg("refusing packet[%i] = %x\n", -- psmouse->pktcnt - 1, psmouse->packet[psmouse->pktcnt - 1]); -+ (psmouse->packet[psmouse->pktcnt - 1] & 0x80)) - return PSMOUSE_BAD_DATA; -- } - - if (psmouse->pktcnt == 6) { - alps_process_packet(psmouse); -@@ -644,7 +445,6 @@ static void alps_disconnect(struct psmouse *psmouse) - struct alps_data *priv = psmouse->private; - - psmouse_reset(psmouse); -- del_timer_sync(&priv->timer); - input_unregister_device(priv->dev2); - kfree(priv); - } -@@ -661,8 +461,6 @@ int alps_init(struct psmouse *psmouse) - goto init_fail; - - priv->dev2 = dev2; -- setup_timer(&priv->timer, alps_flush_packet, (unsigned long)psmouse); -- - psmouse->private = priv; - - if (alps_hw_init(psmouse, &version)) -diff --git a/drivers/input/mouse/alps.h b/drivers/input/mouse/alps.h -index 904ed8b..bc87936 100644 ---- a/drivers/input/mouse/alps.h -+++ b/drivers/input/mouse/alps.h -@@ -23,7 +23,6 @@ struct alps_data { - char phys[32]; /* Phys */ - const struct alps_model_info *i;/* Info */ - int prev_fin; /* Finger bit from previous packet */ -- struct timer_list timer; - }; - - #ifdef CONFIG_MOUSE_PS2_ALPS -diff --git a/drivers/input/mouse/psmouse-base.c b/drivers/input/mouse/psmouse-base.c -index 0876d82..07c5379 100644 ---- a/drivers/input/mouse/psmouse-base.c -+++ b/drivers/input/mouse/psmouse-base.c -@@ -667,6 +667,19 @@ static int psmouse_extensions(struct psmouse *psmouse, - max_proto = PSMOUSE_IMEX; - } - -+/* -+ * Try Finger Sensing Pad -+ */ -+ if (max_proto > PSMOUSE_IMEX) { -+ if (fsp_detect(psmouse, set_properties) == 0) { -+ if (!set_properties || fsp_init(psmouse) == 0) -+ return PSMOUSE_FSP; -+/* -+ * Init failed, try basic relative protocols -+ */ -+ max_proto = PSMOUSE_IMEX; -+ } -+ } - - if (max_proto > PSMOUSE_IMEX) { - if (genius_detect(psmouse, set_properties) == 0) -@@ -683,21 +696,6 @@ static int psmouse_extensions(struct psmouse *psmouse, - } - - /* -- * Try Finger Sensing Pad. We do it here because its probe upsets -- * Trackpoint devices (causing TP_READ_ID command to time out). -- */ -- if (max_proto > PSMOUSE_IMEX) { -- if (fsp_detect(psmouse, set_properties) == 0) { -- if (!set_properties || fsp_init(psmouse) == 0) -- return PSMOUSE_FSP; --/* -- * Init failed, try basic relative protocols -- */ -- max_proto = PSMOUSE_IMEX; -- } -- } -- --/* - * Reset to defaults in case the device got confused by extended - * protocol probes. Note that we follow up with full reset because - * some mice put themselves to sleep when they see PSMOUSE_RESET_DIS. -diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h -index 2a5982e..2bcf1ac 100644 ---- a/drivers/input/serio/i8042-x86ia64io.h -+++ b/drivers/input/serio/i8042-x86ia64io.h -@@ -67,12 +67,10 @@ static inline void i8042_write_command(int val) - - #include - --static const struct dmi_system_id __initconst i8042_dmi_noloop_table[] = { -+static struct dmi_system_id __initdata i8042_dmi_noloop_table[] = { - { -- /* -- * Arima-Rioworks HDAMB - -- * AUX LOOP command does not raise AUX IRQ -- */ -+ /* AUX LOOP command does not raise AUX IRQ */ -+ .ident = "Arima-Rioworks HDAMB", - .matches = { - DMI_MATCH(DMI_BOARD_VENDOR, "RIOWORKS"), - DMI_MATCH(DMI_BOARD_NAME, "HDAMB"), -@@ -80,7 +78,7 @@ static const struct dmi_system_id __initconst i8042_dmi_noloop_table[] = { - }, - }, - { -- /* ASUS G1S */ -+ .ident = "ASUS G1S", - .matches = { - DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer Inc."), - DMI_MATCH(DMI_BOARD_NAME, "G1S"), -@@ -88,7 +86,8 @@ static const struct dmi_system_id __initconst i8042_dmi_noloop_table[] = { - }, - }, - { -- /* ASUS P65UP5 - AUX LOOP command does not raise AUX IRQ */ -+ /* AUX LOOP command does not raise AUX IRQ */ -+ .ident = "ASUS P65UP5", - .matches = { - DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."), - DMI_MATCH(DMI_BOARD_NAME, "P/I-P65UP5"), -@@ -96,6 +95,7 @@ static const struct dmi_system_id __initconst i8042_dmi_noloop_table[] = { - }, - }, - { -+ .ident = "Compaq Proliant 8500", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Compaq"), - DMI_MATCH(DMI_PRODUCT_NAME , "ProLiant"), -@@ -103,6 +103,7 @@ static const struct dmi_system_id __initconst i8042_dmi_noloop_table[] = { - }, - }, - { -+ .ident = "Compaq Proliant DL760", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Compaq"), - DMI_MATCH(DMI_PRODUCT_NAME , "ProLiant"), -@@ -110,7 +111,7 @@ static const struct dmi_system_id __initconst i8042_dmi_noloop_table[] = { - }, - }, - { -- /* OQO Model 01 */ -+ .ident = "OQO Model 01", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "OQO"), - DMI_MATCH(DMI_PRODUCT_NAME, "ZEPTO"), -@@ -118,7 +119,8 @@ static const struct dmi_system_id __initconst i8042_dmi_noloop_table[] = { - }, - }, - { -- /* ULI EV4873 - AUX LOOP does not work properly */ -+ /* AUX LOOP does not work properly */ -+ .ident = "ULI EV4873", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "ULI"), - DMI_MATCH(DMI_PRODUCT_NAME, "EV4873"), -@@ -126,7 +128,7 @@ static const struct dmi_system_id __initconst i8042_dmi_noloop_table[] = { - }, - }, - { -- /* Microsoft Virtual Machine */ -+ .ident = "Microsoft Virtual Machine", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"), - DMI_MATCH(DMI_PRODUCT_NAME, "Virtual Machine"), -@@ -134,7 +136,7 @@ static const struct dmi_system_id __initconst i8042_dmi_noloop_table[] = { - }, - }, - { -- /* Medion MAM 2070 */ -+ .ident = "Medion MAM 2070", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Notebook"), - DMI_MATCH(DMI_PRODUCT_NAME, "MAM 2070"), -@@ -142,7 +144,7 @@ static const struct dmi_system_id __initconst i8042_dmi_noloop_table[] = { - }, - }, - { -- /* Blue FB5601 */ -+ .ident = "Blue FB5601", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "blue"), - DMI_MATCH(DMI_PRODUCT_NAME, "FB5601"), -@@ -150,7 +152,7 @@ static const struct dmi_system_id __initconst i8042_dmi_noloop_table[] = { - }, - }, - { -- /* Gigabyte M912 */ -+ .ident = "Gigabyte M912", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "GIGABYTE"), - DMI_MATCH(DMI_PRODUCT_NAME, "M912"), -@@ -158,14 +160,7 @@ static const struct dmi_system_id __initconst i8042_dmi_noloop_table[] = { - }, - }, - { -- /* Gigabyte M1022M netbook */ -- .matches = { -- DMI_MATCH(DMI_BOARD_VENDOR, "Gigabyte Technology Co.,Ltd."), -- DMI_MATCH(DMI_BOARD_NAME, "M1022E"), -- DMI_MATCH(DMI_BOARD_VERSION, "1.02"), -- }, -- }, -- { -+ .ident = "HP DV9700", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), - DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion dv9700"), -@@ -182,72 +177,72 @@ static const struct dmi_system_id __initconst i8042_dmi_noloop_table[] = { - * ... apparently some Toshibas don't like MUX mode either and - * die horrible death on reboot. - */ --static const struct dmi_system_id __initconst i8042_dmi_nomux_table[] = { -+static struct dmi_system_id __initdata i8042_dmi_nomux_table[] = { - { -- /* Fujitsu Lifebook P7010/P7010D */ -+ .ident = "Fujitsu Lifebook P7010/P7010D", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"), - DMI_MATCH(DMI_PRODUCT_NAME, "P7010"), - }, - }, - { -- /* Fujitsu Lifebook P7010 */ -+ .ident = "Fujitsu Lifebook P7010", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU SIEMENS"), - DMI_MATCH(DMI_PRODUCT_NAME, "0000000000"), - }, - }, - { -- /* Fujitsu Lifebook P5020D */ -+ .ident = "Fujitsu Lifebook P5020D", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"), - DMI_MATCH(DMI_PRODUCT_NAME, "LifeBook P Series"), - }, - }, - { -- /* Fujitsu Lifebook S2000 */ -+ .ident = "Fujitsu Lifebook S2000", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"), - DMI_MATCH(DMI_PRODUCT_NAME, "LifeBook S Series"), - }, - }, - { -- /* Fujitsu Lifebook S6230 */ -+ .ident = "Fujitsu Lifebook S6230", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"), - DMI_MATCH(DMI_PRODUCT_NAME, "LifeBook S6230"), - }, - }, - { -- /* Fujitsu T70H */ -+ .ident = "Fujitsu T70H", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"), - DMI_MATCH(DMI_PRODUCT_NAME, "FMVLT70H"), - }, - }, - { -- /* Fujitsu-Siemens Lifebook T3010 */ -+ .ident = "Fujitsu-Siemens Lifebook T3010", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU SIEMENS"), - DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK T3010"), - }, - }, - { -- /* Fujitsu-Siemens Lifebook E4010 */ -+ .ident = "Fujitsu-Siemens Lifebook E4010", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU SIEMENS"), - DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK E4010"), - }, - }, - { -- /* Fujitsu-Siemens Amilo Pro 2010 */ -+ .ident = "Fujitsu-Siemens Amilo Pro 2010", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU SIEMENS"), - DMI_MATCH(DMI_PRODUCT_NAME, "AMILO Pro V2010"), - }, - }, - { -- /* Fujitsu-Siemens Amilo Pro 2030 */ -+ .ident = "Fujitsu-Siemens Amilo Pro 2030", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU SIEMENS"), - DMI_MATCH(DMI_PRODUCT_NAME, "AMILO PRO V2030"), -@@ -258,7 +253,7 @@ static const struct dmi_system_id __initconst i8042_dmi_nomux_table[] = { - * No data is coming from the touchscreen unless KBC - * is in legacy mode. - */ -- /* Panasonic CF-29 */ -+ .ident = "Panasonic CF-29", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Matsushita"), - DMI_MATCH(DMI_PRODUCT_NAME, "CF-29"), -@@ -266,10 +261,10 @@ static const struct dmi_system_id __initconst i8042_dmi_nomux_table[] = { - }, - { - /* -- * HP Pavilion DV4017EA - -- * errors on MUX ports are reported without raising AUXDATA -+ * Errors on MUX ports are reported without raising AUXDATA - * causing "spurious NAK" messages. - */ -+ .ident = "HP Pavilion DV4017EA", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), - DMI_MATCH(DMI_PRODUCT_NAME, "Pavilion dv4000 (EA032EA#ABF)"), -@@ -277,9 +272,9 @@ static const struct dmi_system_id __initconst i8042_dmi_nomux_table[] = { - }, - { - /* -- * HP Pavilion ZT1000 - -- * like DV4017EA does not raise AUXERR for errors on MUX ports. -+ * Like DV4017EA does not raise AUXERR for errors on MUX ports. - */ -+ .ident = "HP Pavilion ZT1000", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), - DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion Notebook PC"), -@@ -288,41 +283,44 @@ static const struct dmi_system_id __initconst i8042_dmi_nomux_table[] = { - }, - { - /* -- * HP Pavilion DV4270ca - -- * like DV4017EA does not raise AUXERR for errors on MUX ports. -+ * Like DV4017EA does not raise AUXERR for errors on MUX ports. - */ -+ .ident = "HP Pavilion DV4270ca", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), - DMI_MATCH(DMI_PRODUCT_NAME, "Pavilion dv4000 (EH476UA#ABL)"), - }, - }, - { -+ .ident = "Toshiba P10", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"), - DMI_MATCH(DMI_PRODUCT_NAME, "Satellite P10"), - }, - }, - { -+ .ident = "Toshiba Equium A110", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"), - DMI_MATCH(DMI_PRODUCT_NAME, "EQUIUM A110"), - }, - }, - { -+ .ident = "Alienware Sentia", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "ALIENWARE"), - DMI_MATCH(DMI_PRODUCT_NAME, "Sentia"), - }, - }, - { -- /* Sharp Actius MM20 */ -+ .ident = "Sharp Actius MM20", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "SHARP"), - DMI_MATCH(DMI_PRODUCT_NAME, "PC-MM20 Series"), - }, - }, - { -- /* Sony Vaio FS-115b */ -+ .ident = "Sony Vaio FS-115b", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"), - DMI_MATCH(DMI_PRODUCT_NAME, "VGN-FS115B"), -@@ -330,72 +328,73 @@ static const struct dmi_system_id __initconst i8042_dmi_nomux_table[] = { - }, - { - /* -- * Sony Vaio FZ-240E - -- * reset and GET ID commands issued via KBD port are -+ * Reset and GET ID commands issued via KBD port are - * sometimes being delivered to AUX3. - */ -+ .ident = "Sony Vaio FZ-240E", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"), - DMI_MATCH(DMI_PRODUCT_NAME, "VGN-FZ240E"), - }, - }, - { -- /* Amoi M636/A737 */ -+ .ident = "Amoi M636/A737", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Amoi Electronics CO.,LTD."), - DMI_MATCH(DMI_PRODUCT_NAME, "M636/A737 platform"), - }, - }, - { -- /* Lenovo 3000 n100 */ -+ .ident = "Lenovo 3000 n100", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), - DMI_MATCH(DMI_PRODUCT_NAME, "076804U"), - }, - }, - { -+ .ident = "Acer Aspire 1360", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Acer"), - DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 1360"), - }, - }, - { -- /* Gericom Bellagio */ -+ .ident = "Gericom Bellagio", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Gericom"), - DMI_MATCH(DMI_PRODUCT_NAME, "N34AS6"), - }, - }, - { -- /* IBM 2656 */ -+ .ident = "IBM 2656", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "IBM"), - DMI_MATCH(DMI_PRODUCT_NAME, "2656"), - }, - }, - { -- /* Dell XPS M1530 */ -+ .ident = "Dell XPS M1530", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), - DMI_MATCH(DMI_PRODUCT_NAME, "XPS M1530"), - }, - }, - { -- /* Compal HEL80I */ -+ .ident = "Compal HEL80I", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "COMPAL"), - DMI_MATCH(DMI_PRODUCT_NAME, "HEL80I"), - }, - }, - { -- /* Dell Vostro 1510 */ -+ .ident = "Dell Vostro 1510", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), - DMI_MATCH(DMI_PRODUCT_NAME, "Vostro1510"), - }, - }, - { -- /* Acer Aspire 5536 */ -+ .ident = "Acer Aspire 5536", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Acer"), - DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5536"), -@@ -405,65 +404,65 @@ static const struct dmi_system_id __initconst i8042_dmi_nomux_table[] = { - { } - }; - --static const struct dmi_system_id __initconst i8042_dmi_reset_table[] = { -+static struct dmi_system_id __initdata i8042_dmi_reset_table[] = { - { -- /* MSI Wind U-100 */ -+ .ident = "MSI Wind U-100", - .matches = { - DMI_MATCH(DMI_BOARD_NAME, "U-100"), - DMI_MATCH(DMI_BOARD_VENDOR, "MICRO-STAR INTERNATIONAL CO., LTD"), - }, - }, - { -- /* LG Electronics X110 */ -+ .ident = "LG Electronics X110", - .matches = { - DMI_MATCH(DMI_BOARD_NAME, "X110"), - DMI_MATCH(DMI_BOARD_VENDOR, "LG Electronics Inc."), - }, - }, - { -- /* Acer Aspire One 150 */ -+ .ident = "Acer Aspire One 150", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Acer"), - DMI_MATCH(DMI_PRODUCT_NAME, "AOA150"), - }, - }, - { -- /* Advent 4211 */ -+ .ident = "Advent 4211", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "DIXONSXP"), - DMI_MATCH(DMI_PRODUCT_NAME, "Advent 4211"), - }, - }, - { -- /* Medion Akoya Mini E1210 */ -+ .ident = "Medion Akoya Mini E1210", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "MEDION"), - DMI_MATCH(DMI_PRODUCT_NAME, "E1210"), - }, - }, - { -- /* Mivvy M310 */ -+ .ident = "Mivvy M310", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "VIOOO"), - DMI_MATCH(DMI_PRODUCT_NAME, "N10"), - }, - }, - { -- /* Dell Vostro 1320 */ -+ .ident = "Dell Vostro 1320", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), - DMI_MATCH(DMI_PRODUCT_NAME, "Vostro 1320"), - }, - }, - { -- /* Dell Vostro 1520 */ -+ .ident = "Dell Vostro 1520", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), - DMI_MATCH(DMI_PRODUCT_NAME, "Vostro 1520"), - }, - }, - { -- /* Dell Vostro 1720 */ -+ .ident = "Dell Vostro 1720", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), - DMI_MATCH(DMI_PRODUCT_NAME, "Vostro 1720"), -@@ -473,16 +472,16 @@ static const struct dmi_system_id __initconst i8042_dmi_reset_table[] = { - }; - - #ifdef CONFIG_PNP --static const struct dmi_system_id __initconst i8042_dmi_nopnp_table[] = { -+static struct dmi_system_id __initdata i8042_dmi_nopnp_table[] = { - { -- /* Intel MBO Desktop D845PESV */ -+ .ident = "Intel MBO Desktop D845PESV", - .matches = { - DMI_MATCH(DMI_BOARD_NAME, "D845PESV"), - DMI_MATCH(DMI_BOARD_VENDOR, "Intel Corporation"), - }, - }, - { -- /* MSI Wind U-100 */ -+ .ident = "MSI Wind U-100", - .matches = { - DMI_MATCH(DMI_BOARD_NAME, "U-100"), - DMI_MATCH(DMI_BOARD_VENDOR, "MICRO-STAR INTERNATIONAL CO., LTD"), -@@ -491,23 +490,27 @@ static const struct dmi_system_id __initconst i8042_dmi_nopnp_table[] = { - { } - }; - --static const struct dmi_system_id __initconst i8042_dmi_laptop_table[] = { -+static struct dmi_system_id __initdata i8042_dmi_laptop_table[] = { - { -+ .ident = "Portable", - .matches = { - DMI_MATCH(DMI_CHASSIS_TYPE, "8"), /* Portable */ - }, - }, - { -+ .ident = "Laptop", - .matches = { - DMI_MATCH(DMI_CHASSIS_TYPE, "9"), /* Laptop */ - }, - }, - { -+ .ident = "Notebook", - .matches = { - DMI_MATCH(DMI_CHASSIS_TYPE, "10"), /* Notebook */ - }, - }, - { -+ .ident = "Sub-Notebook", - .matches = { - DMI_MATCH(DMI_CHASSIS_TYPE, "14"), /* Sub-Notebook */ - }, -@@ -522,65 +525,58 @@ static const struct dmi_system_id __initconst i8042_dmi_laptop_table[] = { - * Originally, this was just confined to older laptops, but a few Acer laptops - * have turned up in 2007 that also need this again. - */ --static const struct dmi_system_id __initconst i8042_dmi_dritek_table[] = { -- { -- /* Acer Aspire 5610 */ -- .matches = { -- DMI_MATCH(DMI_SYS_VENDOR, "Acer"), -- DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5610"), -- }, -- }, -+static struct dmi_system_id __initdata i8042_dmi_dritek_table[] = { - { -- /* Acer Aspire 5630 */ -+ .ident = "Acer Aspire 5630", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Acer"), - DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5630"), - }, - }, - { -- /* Acer Aspire 5650 */ -+ .ident = "Acer Aspire 5650", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Acer"), - DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5650"), - }, - }, - { -- /* Acer Aspire 5680 */ -+ .ident = "Acer Aspire 5680", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Acer"), - DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5680"), - }, - }, - { -- /* Acer Aspire 5720 */ -+ .ident = "Acer Aspire 5720", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Acer"), - DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5720"), - }, - }, - { -- /* Acer Aspire 9110 */ -+ .ident = "Acer Aspire 9110", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Acer"), - DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 9110"), - }, - }, - { -- /* Acer TravelMate 660 */ -+ .ident = "Acer TravelMate 660", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Acer"), - DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate 660"), - }, - }, - { -- /* Acer TravelMate 2490 */ -+ .ident = "Acer TravelMate 2490", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Acer"), - DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate 2490"), - }, - }, - { -- /* Acer TravelMate 4280 */ -+ .ident = "Acer TravelMate 4280", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Acer"), - DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate 4280"), -diff --git a/drivers/lguest/segments.c b/drivers/lguest/segments.c -index ede4658..951c57b 100644 ---- a/drivers/lguest/segments.c -+++ b/drivers/lguest/segments.c -@@ -179,10 +179,8 @@ void load_guest_gdt_entry(struct lg_cpu *cpu, u32 num, u32 lo, u32 hi) - * We assume the Guest has the same number of GDT entries as the - * Host, otherwise we'd have to dynamically allocate the Guest GDT. - */ -- if (num >= ARRAY_SIZE(cpu->arch.gdt)) { -+ if (num >= ARRAY_SIZE(cpu->arch.gdt)) - kill_guest(cpu, "too many gdt entries %i", num); -- return; -- } - - /* Set it up, then fix it. */ - cpu->arch.gdt[num].a = lo; -diff --git a/drivers/macintosh/therm_adt746x.c b/drivers/macintosh/therm_adt746x.c -index 386a797..556f0fe 100644 ---- a/drivers/macintosh/therm_adt746x.c -+++ b/drivers/macintosh/therm_adt746x.c -@@ -79,7 +79,6 @@ struct thermostat { - u8 limits[3]; - int last_speed[2]; - int last_var[2]; -- int pwm_inv[2]; - }; - - static enum {ADT7460, ADT7467} therm_type; -@@ -230,23 +229,19 @@ static void write_fan_speed(struct thermostat *th, int speed, int fan) - - if (speed >= 0) { - manual = read_reg(th, MANUAL_MODE[fan]); -- manual &= ~INVERT_MASK; - write_reg(th, MANUAL_MODE[fan], -- manual | MANUAL_MASK | th->pwm_inv[fan]); -+ (manual|MANUAL_MASK) & (~INVERT_MASK)); - write_reg(th, FAN_SPD_SET[fan], speed); - } else { - /* back to automatic */ - if(therm_type == ADT7460) { - manual = read_reg(th, - MANUAL_MODE[fan]) & (~MANUAL_MASK); -- manual &= ~INVERT_MASK; -- manual |= th->pwm_inv[fan]; -+ - write_reg(th, - MANUAL_MODE[fan], manual|REM_CONTROL[fan]); - } else { - manual = read_reg(th, MANUAL_MODE[fan]); -- manual &= ~INVERT_MASK; -- manual |= th->pwm_inv[fan]; - write_reg(th, MANUAL_MODE[fan], manual&(~AUTO_MASK)); - } - } -@@ -423,10 +418,6 @@ static int probe_thermostat(struct i2c_client *client, - - thermostat = th; - -- /* record invert bit status because fw can corrupt it after suspend */ -- th->pwm_inv[0] = read_reg(th, MANUAL_MODE[0]) & INVERT_MASK; -- th->pwm_inv[1] = read_reg(th, MANUAL_MODE[1]) & INVERT_MASK; -- - /* be sure to really write fan speed the first time */ - th->last_speed[0] = -2; - th->last_speed[1] = -2; -diff --git a/drivers/macintosh/windfarm_smu_controls.c b/drivers/macintosh/windfarm_smu_controls.c -index 6c68b9e..961fa0e 100644 ---- a/drivers/macintosh/windfarm_smu_controls.c -+++ b/drivers/macintosh/windfarm_smu_controls.c -@@ -202,8 +202,6 @@ static struct smu_fan_control *smu_fan_create(struct device_node *node, - fct->ctrl.name = "cpu-front-fan-1"; - else if (!strcmp(l, "CPU A PUMP")) - fct->ctrl.name = "cpu-pump-0"; -- else if (!strcmp(l, "CPU B PUMP")) -- fct->ctrl.name = "cpu-pump-1"; - else if (!strcmp(l, "Slots Fan") || !strcmp(l, "Slots fan") || - !strcmp(l, "EXPANSION SLOTS INTAKE")) - fct->ctrl.name = "slots-fan"; -diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c -index a5e5f2f..60e2b32 100644 ---- a/drivers/md/bitmap.c -+++ b/drivers/md/bitmap.c -@@ -1078,31 +1078,23 @@ static bitmap_counter_t *bitmap_get_counter(struct bitmap *bitmap, - * out to disk - */ - --void bitmap_daemon_work(mddev_t *mddev) -+void bitmap_daemon_work(struct bitmap *bitmap) - { -- struct bitmap *bitmap; - unsigned long j; - unsigned long flags; - struct page *page = NULL, *lastpage = NULL; - int blocks; - void *paddr; - -- /* Use a mutex to guard daemon_work against -- * bitmap_destroy. -- */ -- mutex_lock(&mddev->bitmap_mutex); -- bitmap = mddev->bitmap; -- if (bitmap == NULL) { -- mutex_unlock(&mddev->bitmap_mutex); -+ if (bitmap == NULL) - return; -- } - if (time_before(jiffies, bitmap->daemon_lastrun + bitmap->daemon_sleep*HZ)) - goto done; - - bitmap->daemon_lastrun = jiffies; - if (bitmap->allclean) { - bitmap->mddev->thread->timeout = MAX_SCHEDULE_TIMEOUT; -- goto done; -+ return; - } - bitmap->allclean = 1; - -@@ -1211,7 +1203,6 @@ void bitmap_daemon_work(mddev_t *mddev) - done: - if (bitmap->allclean == 0) - bitmap->mddev->thread->timeout = bitmap->daemon_sleep * HZ; -- mutex_unlock(&mddev->bitmap_mutex); - } - - static bitmap_counter_t *bitmap_get_counter(struct bitmap *bitmap, -@@ -1550,9 +1541,9 @@ void bitmap_flush(mddev_t *mddev) - */ - sleep = bitmap->daemon_sleep; - bitmap->daemon_sleep = 0; -- bitmap_daemon_work(mddev); -- bitmap_daemon_work(mddev); -- bitmap_daemon_work(mddev); -+ bitmap_daemon_work(bitmap); -+ bitmap_daemon_work(bitmap); -+ bitmap_daemon_work(bitmap); - bitmap->daemon_sleep = sleep; - bitmap_update_sb(bitmap); - } -@@ -1583,7 +1574,6 @@ static void bitmap_free(struct bitmap *bitmap) - kfree(bp); - kfree(bitmap); - } -- - void bitmap_destroy(mddev_t *mddev) - { - struct bitmap *bitmap = mddev->bitmap; -@@ -1591,9 +1581,7 @@ void bitmap_destroy(mddev_t *mddev) - if (!bitmap) /* there was no bitmap */ - return; - -- mutex_lock(&mddev->bitmap_mutex); - mddev->bitmap = NULL; /* disconnect from the md device */ -- mutex_unlock(&mddev->bitmap_mutex); - if (mddev->thread) - mddev->thread->timeout = MAX_SCHEDULE_TIMEOUT; - -diff --git a/drivers/md/bitmap.h b/drivers/md/bitmap.h -index 7e38d13..e989006 100644 ---- a/drivers/md/bitmap.h -+++ b/drivers/md/bitmap.h -@@ -282,7 +282,7 @@ void bitmap_close_sync(struct bitmap *bitmap); - void bitmap_cond_end_sync(struct bitmap *bitmap, sector_t sector); - - void bitmap_unplug(struct bitmap *bitmap); --void bitmap_daemon_work(mddev_t *mddev); -+void bitmap_daemon_work(struct bitmap *bitmap); - #endif - - #endif -diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c -index 959d6d1..ed10381 100644 ---- a/drivers/md/dm-crypt.c -+++ b/drivers/md/dm-crypt.c -@@ -1,7 +1,7 @@ - /* - * Copyright (C) 2003 Christophe Saout - * Copyright (C) 2004 Clemens Fruhwirth -- * Copyright (C) 2006-2009 Red Hat, Inc. All rights reserved. -+ * Copyright (C) 2006-2008 Red Hat, Inc. All rights reserved. - * - * This file is released under the GPL. - */ -@@ -71,21 +71,10 @@ struct crypt_iv_operations { - int (*ctr)(struct crypt_config *cc, struct dm_target *ti, - const char *opts); - void (*dtr)(struct crypt_config *cc); -- int (*init)(struct crypt_config *cc); -- int (*wipe)(struct crypt_config *cc); -+ const char *(*status)(struct crypt_config *cc); - int (*generator)(struct crypt_config *cc, u8 *iv, sector_t sector); - }; - --struct iv_essiv_private { -- struct crypto_cipher *tfm; -- struct crypto_hash *hash_tfm; -- u8 *salt; --}; -- --struct iv_benbi_private { -- int shift; --}; -- - /* - * Crypt: maps a linear range of a block device - * and encrypts / decrypts at the same time. -@@ -113,8 +102,8 @@ struct crypt_config { - struct crypt_iv_operations *iv_gen_ops; - char *iv_mode; - union { -- struct iv_essiv_private essiv; -- struct iv_benbi_private benbi; -+ struct crypto_cipher *essiv_tfm; -+ int benbi_shift; - } iv_gen_private; - sector_t iv_offset; - unsigned int iv_size; -@@ -180,114 +169,88 @@ static int crypt_iv_plain_gen(struct crypt_config *cc, u8 *iv, sector_t sector) - return 0; - } - --/* Initialise ESSIV - compute salt but no local memory allocations */ --static int crypt_iv_essiv_init(struct crypt_config *cc) --{ -- struct iv_essiv_private *essiv = &cc->iv_gen_private.essiv; -- struct hash_desc desc; -- struct scatterlist sg; -- int err; -- -- sg_init_one(&sg, cc->key, cc->key_size); -- desc.tfm = essiv->hash_tfm; -- desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP; -- -- err = crypto_hash_digest(&desc, &sg, cc->key_size, essiv->salt); -- if (err) -- return err; -- -- return crypto_cipher_setkey(essiv->tfm, essiv->salt, -- crypto_hash_digestsize(essiv->hash_tfm)); --} -- --/* Wipe salt and reset key derived from volume key */ --static int crypt_iv_essiv_wipe(struct crypt_config *cc) --{ -- struct iv_essiv_private *essiv = &cc->iv_gen_private.essiv; -- unsigned salt_size = crypto_hash_digestsize(essiv->hash_tfm); -- -- memset(essiv->salt, 0, salt_size); -- -- return crypto_cipher_setkey(essiv->tfm, essiv->salt, salt_size); --} -- --static void crypt_iv_essiv_dtr(struct crypt_config *cc) --{ -- struct iv_essiv_private *essiv = &cc->iv_gen_private.essiv; -- -- crypto_free_cipher(essiv->tfm); -- essiv->tfm = NULL; -- -- crypto_free_hash(essiv->hash_tfm); -- essiv->hash_tfm = NULL; -- -- kzfree(essiv->salt); -- essiv->salt = NULL; --} -- - static int crypt_iv_essiv_ctr(struct crypt_config *cc, struct dm_target *ti, - const char *opts) - { -- struct crypto_cipher *essiv_tfm = NULL; -- struct crypto_hash *hash_tfm = NULL; -- u8 *salt = NULL; -+ struct crypto_cipher *essiv_tfm; -+ struct crypto_hash *hash_tfm; -+ struct hash_desc desc; -+ struct scatterlist sg; -+ unsigned int saltsize; -+ u8 *salt; - int err; - -- if (!opts) { -+ if (opts == NULL) { - ti->error = "Digest algorithm missing for ESSIV mode"; - return -EINVAL; - } - -- /* Allocate hash algorithm */ -+ /* Hash the cipher key with the given hash algorithm */ - hash_tfm = crypto_alloc_hash(opts, 0, CRYPTO_ALG_ASYNC); - if (IS_ERR(hash_tfm)) { - ti->error = "Error initializing ESSIV hash"; -- err = PTR_ERR(hash_tfm); -- goto bad; -+ return PTR_ERR(hash_tfm); - } - -- salt = kzalloc(crypto_hash_digestsize(hash_tfm), GFP_KERNEL); -- if (!salt) { -+ saltsize = crypto_hash_digestsize(hash_tfm); -+ salt = kmalloc(saltsize, GFP_KERNEL); -+ if (salt == NULL) { - ti->error = "Error kmallocing salt storage in ESSIV"; -- err = -ENOMEM; -- goto bad; -+ crypto_free_hash(hash_tfm); -+ return -ENOMEM; - } - -- /* Allocate essiv_tfm */ -+ sg_init_one(&sg, cc->key, cc->key_size); -+ desc.tfm = hash_tfm; -+ desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP; -+ err = crypto_hash_digest(&desc, &sg, cc->key_size, salt); -+ crypto_free_hash(hash_tfm); -+ -+ if (err) { -+ ti->error = "Error calculating hash in ESSIV"; -+ kfree(salt); -+ return err; -+ } -+ -+ /* Setup the essiv_tfm with the given salt */ - essiv_tfm = crypto_alloc_cipher(cc->cipher, 0, CRYPTO_ALG_ASYNC); - if (IS_ERR(essiv_tfm)) { - ti->error = "Error allocating crypto tfm for ESSIV"; -- err = PTR_ERR(essiv_tfm); -- goto bad; -+ kfree(salt); -+ return PTR_ERR(essiv_tfm); - } - if (crypto_cipher_blocksize(essiv_tfm) != - crypto_ablkcipher_ivsize(cc->tfm)) { - ti->error = "Block size of ESSIV cipher does " - "not match IV size of block cipher"; -- err = -EINVAL; -- goto bad; -+ crypto_free_cipher(essiv_tfm); -+ kfree(salt); -+ return -EINVAL; - } -+ err = crypto_cipher_setkey(essiv_tfm, salt, saltsize); -+ if (err) { -+ ti->error = "Failed to set key for ESSIV cipher"; -+ crypto_free_cipher(essiv_tfm); -+ kfree(salt); -+ return err; -+ } -+ kfree(salt); - -- cc->iv_gen_private.essiv.salt = salt; -- cc->iv_gen_private.essiv.tfm = essiv_tfm; -- cc->iv_gen_private.essiv.hash_tfm = hash_tfm; -- -+ cc->iv_gen_private.essiv_tfm = essiv_tfm; - return 0; -+} - --bad: -- if (essiv_tfm && !IS_ERR(essiv_tfm)) -- crypto_free_cipher(essiv_tfm); -- if (hash_tfm && !IS_ERR(hash_tfm)) -- crypto_free_hash(hash_tfm); -- kfree(salt); -- return err; -+static void crypt_iv_essiv_dtr(struct crypt_config *cc) -+{ -+ crypto_free_cipher(cc->iv_gen_private.essiv_tfm); -+ cc->iv_gen_private.essiv_tfm = NULL; - } - - static int crypt_iv_essiv_gen(struct crypt_config *cc, u8 *iv, sector_t sector) - { - memset(iv, 0, cc->iv_size); - *(u64 *)iv = cpu_to_le64(sector); -- crypto_cipher_encrypt_one(cc->iv_gen_private.essiv.tfm, iv, iv); -+ crypto_cipher_encrypt_one(cc->iv_gen_private.essiv_tfm, iv, iv); - return 0; - } - -@@ -310,7 +273,7 @@ static int crypt_iv_benbi_ctr(struct crypt_config *cc, struct dm_target *ti, - return -EINVAL; - } - -- cc->iv_gen_private.benbi.shift = 9 - log; -+ cc->iv_gen_private.benbi_shift = 9 - log; - - return 0; - } -@@ -325,7 +288,7 @@ static int crypt_iv_benbi_gen(struct crypt_config *cc, u8 *iv, sector_t sector) - - memset(iv, 0, cc->iv_size - sizeof(u64)); /* rest is cleared below */ - -- val = cpu_to_be64(((u64)sector << cc->iv_gen_private.benbi.shift) + 1); -+ val = cpu_to_be64(((u64)sector << cc->iv_gen_private.benbi_shift) + 1); - put_unaligned(val, (__be64 *)(iv + cc->iv_size - sizeof(u64))); - - return 0; -@@ -345,8 +308,6 @@ static struct crypt_iv_operations crypt_iv_plain_ops = { - static struct crypt_iv_operations crypt_iv_essiv_ops = { - .ctr = crypt_iv_essiv_ctr, - .dtr = crypt_iv_essiv_dtr, -- .init = crypt_iv_essiv_init, -- .wipe = crypt_iv_essiv_wipe, - .generator = crypt_iv_essiv_gen - }; - -@@ -1078,12 +1039,6 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv) - cc->iv_gen_ops->ctr(cc, ti, ivopts) < 0) - goto bad_ivmode; - -- if (cc->iv_gen_ops && cc->iv_gen_ops->init && -- cc->iv_gen_ops->init(cc) < 0) { -- ti->error = "Error initialising IV"; -- goto bad_slab_pool; -- } -- - cc->iv_size = crypto_ablkcipher_ivsize(tfm); - if (cc->iv_size) - /* at least a 64 bit sector number should fit in our buffer */ -@@ -1323,7 +1278,6 @@ static void crypt_resume(struct dm_target *ti) - static int crypt_message(struct dm_target *ti, unsigned argc, char **argv) - { - struct crypt_config *cc = ti->private; -- int ret = -EINVAL; - - if (argc < 2) - goto error; -@@ -1333,22 +1287,10 @@ static int crypt_message(struct dm_target *ti, unsigned argc, char **argv) - DMWARN("not suspended during key manipulation."); - return -EINVAL; - } -- if (argc == 3 && !strnicmp(argv[1], MESG_STR("set"))) { -- ret = crypt_set_key(cc, argv[2]); -- if (ret) -- return ret; -- if (cc->iv_gen_ops && cc->iv_gen_ops->init) -- ret = cc->iv_gen_ops->init(cc); -- return ret; -- } -- if (argc == 2 && !strnicmp(argv[1], MESG_STR("wipe"))) { -- if (cc->iv_gen_ops && cc->iv_gen_ops->wipe) { -- ret = cc->iv_gen_ops->wipe(cc); -- if (ret) -- return ret; -- } -+ if (argc == 3 && !strnicmp(argv[1], MESG_STR("set"))) -+ return crypt_set_key(cc, argv[2]); -+ if (argc == 2 && !strnicmp(argv[1], MESG_STR("wipe"))) - return crypt_wipe_key(cc); -- } - } - - error: -diff --git a/drivers/md/dm-exception-store.c b/drivers/md/dm-exception-store.c -index 2052159..7dbe652 100644 ---- a/drivers/md/dm-exception-store.c -+++ b/drivers/md/dm-exception-store.c -@@ -216,8 +216,7 @@ int dm_exception_store_create(struct dm_target *ti, int argc, char **argv, - type = get_type("N"); - else { - ti->error = "Persistent flag is not P or N"; -- r = -EINVAL; -- goto bad_type; -+ return -EINVAL; - } - - if (!type) { -diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c -index d19854c..a679429 100644 ---- a/drivers/md/dm-ioctl.c -+++ b/drivers/md/dm-ioctl.c -@@ -56,11 +56,6 @@ static void dm_hash_remove_all(int keep_open_devices); - */ - static DECLARE_RWSEM(_hash_lock); - --/* -- * Protects use of mdptr to obtain hash cell name and uuid from mapped device. -- */ --static DEFINE_MUTEX(dm_hash_cells_mutex); -- - static void init_buckets(struct list_head *buckets) - { - unsigned int i; -@@ -211,9 +206,7 @@ static int dm_hash_insert(const char *name, const char *uuid, struct mapped_devi - list_add(&cell->uuid_list, _uuid_buckets + hash_str(uuid)); - } - dm_get(md); -- mutex_lock(&dm_hash_cells_mutex); - dm_set_mdptr(md, cell); -- mutex_unlock(&dm_hash_cells_mutex); - up_write(&_hash_lock); - - return 0; -@@ -231,9 +224,7 @@ static void __hash_remove(struct hash_cell *hc) - /* remove from the dev hash */ - list_del(&hc->uuid_list); - list_del(&hc->name_list); -- mutex_lock(&dm_hash_cells_mutex); - dm_set_mdptr(hc->md, NULL); -- mutex_unlock(&dm_hash_cells_mutex); - - table = dm_get_table(hc->md); - if (table) { -@@ -330,9 +321,7 @@ static int dm_hash_rename(uint32_t cookie, const char *old, const char *new) - */ - list_del(&hc->name_list); - old_name = hc->name; -- mutex_lock(&dm_hash_cells_mutex); - hc->name = new_name; -- mutex_unlock(&dm_hash_cells_mutex); - list_add(&hc->name_list, _name_buckets + hash_str(new_name)); - - /* -@@ -1593,7 +1582,8 @@ int dm_copy_name_and_uuid(struct mapped_device *md, char *name, char *uuid) - if (!md) - return -ENXIO; - -- mutex_lock(&dm_hash_cells_mutex); -+ dm_get(md); -+ down_read(&_hash_lock); - hc = dm_get_mdptr(md); - if (!hc || hc->md != md) { - r = -ENXIO; -@@ -1606,7 +1596,8 @@ int dm_copy_name_and_uuid(struct mapped_device *md, char *name, char *uuid) - strcpy(uuid, hc->uuid ? : ""); - - out: -- mutex_unlock(&dm_hash_cells_mutex); -+ up_read(&_hash_lock); -+ dm_put(md); - - return r; - } -diff --git a/drivers/md/dm-log-userspace-transfer.c b/drivers/md/dm-log-userspace-transfer.c -index f1c8cae..54abf9e 100644 ---- a/drivers/md/dm-log-userspace-transfer.c -+++ b/drivers/md/dm-log-userspace-transfer.c -@@ -172,15 +172,11 @@ int dm_consult_userspace(const char *uuid, uint64_t luid, int request_type, - { - int r = 0; - size_t dummy = 0; -- int overhead_size = sizeof(struct dm_ulog_request) + sizeof(struct cn_msg); -+ int overhead_size = -+ sizeof(struct dm_ulog_request *) + sizeof(struct cn_msg); - struct dm_ulog_request *tfr = prealloced_ulog_tfr; - struct receiving_pkg pkg; - -- /* -- * Given the space needed to hold the 'struct cn_msg' and -- * 'struct dm_ulog_request' - do we have enough payload -- * space remaining? -- */ - if (data_size > (DM_ULOG_PREALLOCED_SIZE - overhead_size)) { - DMINFO("Size of tfr exceeds preallocated size"); - return -EINVAL; -@@ -195,7 +191,7 @@ resend: - */ - mutex_lock(&dm_ulog_lock); - -- memset(tfr, 0, DM_ULOG_PREALLOCED_SIZE - sizeof(struct cn_msg)); -+ memset(tfr, 0, DM_ULOG_PREALLOCED_SIZE - overhead_size); - memcpy(tfr->uuid, uuid, DM_UUID_LEN); - tfr->luid = luid; - tfr->seq = dm_ulog_seq++; -diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c -index 8a4a9c8..3a3ba46 100644 ---- a/drivers/md/dm-snap.c -+++ b/drivers/md/dm-snap.c -@@ -553,8 +553,6 @@ static int init_hash_tables(struct dm_snapshot *s) - hash_size = min(origin_dev_size, cow_dev_size) >> s->store->chunk_shift; - hash_size = min(hash_size, max_buckets); - -- if (hash_size < 64) -- hash_size = 64; - hash_size = rounddown_pow_of_two(hash_size); - if (init_exception_table(&s->complete, hash_size, - DM_CHUNK_CONSECUTIVE_BITS)) -@@ -1154,11 +1152,10 @@ static int snapshot_status(struct dm_target *ti, status_type_t type, - unsigned sz = 0; - struct dm_snapshot *snap = ti->private; - -+ down_write(&snap->lock); -+ - switch (type) { - case STATUSTYPE_INFO: -- -- down_write(&snap->lock); -- - if (!snap->valid) - DMEMIT("Invalid"); - else { -@@ -1174,9 +1171,6 @@ static int snapshot_status(struct dm_target *ti, status_type_t type, - else - DMEMIT("Unknown"); - } -- -- up_write(&snap->lock); -- - break; - - case STATUSTYPE_TABLE: -@@ -1191,6 +1185,8 @@ static int snapshot_status(struct dm_target *ti, status_type_t type, - break; - } - -+ up_write(&snap->lock); -+ - return 0; - } - -diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c -index bd58703..e0efc1a 100644 ---- a/drivers/md/dm-stripe.c -+++ b/drivers/md/dm-stripe.c -@@ -110,7 +110,7 @@ static int stripe_ctr(struct dm_target *ti, unsigned int argc, char **argv) - } - - stripes = simple_strtoul(argv[0], &end, 10); -- if (!stripes || *end) { -+ if (*end) { - ti->error = "Invalid stripe count"; - return -EINVAL; - } -diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c -index e869128..1a6cb3c 100644 ---- a/drivers/md/dm-table.c -+++ b/drivers/md/dm-table.c -@@ -499,15 +499,16 @@ int dm_set_device_limits(struct dm_target *ti, struct dm_dev *dev, - return 0; - } - -- if (bdev_stack_limits(limits, bdev, start) < 0) -- DMWARN("%s: adding target device %s caused an alignment inconsistency: " -+ if (blk_stack_limits(limits, &q->limits, start << 9) < 0) -+ DMWARN("%s: target device %s is misaligned: " - "physical_block_size=%u, logical_block_size=%u, " - "alignment_offset=%u, start=%llu", - dm_device_name(ti->table->md), bdevname(bdev, b), - q->limits.physical_block_size, - q->limits.logical_block_size, - q->limits.alignment_offset, -- (unsigned long long) start << SECTOR_SHIFT); -+ (unsigned long long) start << 9); -+ - - /* - * Check if merge fn is supported. -@@ -1024,9 +1025,9 @@ combine_limits: - * for the table. - */ - if (blk_stack_limits(limits, &ti_limits, 0) < 0) -- DMWARN("%s: adding target device " -+ DMWARN("%s: target device " - "(start sect %llu len %llu) " -- "caused an alignment inconsistency", -+ "is misaligned", - dm_device_name(table->md), - (unsigned long long) ti->begin, - (unsigned long long) ti->len); -@@ -1078,6 +1079,15 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q, - struct queue_limits *limits) - { - /* -+ * Each target device in the table has a data area that should normally -+ * be aligned such that the DM device's alignment_offset is 0. -+ * FIXME: Propagate alignment_offsets up the stack and warn of -+ * sub-optimal or inconsistent settings. -+ */ -+ limits->alignment_offset = 0; -+ limits->misaligned = 0; -+ -+ /* - * Copy table's limits to the DM device's request_queue - */ - q->limits = *limits; -diff --git a/drivers/md/dm-uevent.c b/drivers/md/dm-uevent.c -index c7c555a..6f65883 100644 ---- a/drivers/md/dm-uevent.c -+++ b/drivers/md/dm-uevent.c -@@ -139,13 +139,14 @@ void dm_send_uevents(struct list_head *events, struct kobject *kobj) - list_del_init(&event->elist); - - /* -- * When a device is being removed this copy fails and we -- * discard these unsent events. -+ * Need to call dm_copy_name_and_uuid from here for now. -+ * Context of previous var adds and locking used for -+ * hash_cell not compatable. - */ - if (dm_copy_name_and_uuid(event->md, event->name, - event->uuid)) { -- DMINFO("%s: skipping sending uevent for lost device", -- __func__); -+ DMERR("%s: dm_copy_name_and_uuid() failed", -+ __func__); - goto uevent_free; - } - -diff --git a/drivers/md/md.c b/drivers/md/md.c -index 08f7471..b182f86 100644 ---- a/drivers/md/md.c -+++ b/drivers/md/md.c -@@ -282,9 +282,7 @@ static void mddev_put(mddev_t *mddev) - if (!atomic_dec_and_lock(&mddev->active, &all_mddevs_lock)) - return; - if (!mddev->raid_disks && list_empty(&mddev->disks) && -- mddev->ctime == 0 && !mddev->hold_active) { -- /* Array is not configured at all, and not held active, -- * so destroy it */ -+ !mddev->hold_active) { - list_del(&mddev->all_mddevs); - if (mddev->gendisk) { - /* we did a probe so need to clean up. -@@ -369,7 +367,6 @@ static mddev_t * mddev_find(dev_t unit) - - mutex_init(&new->open_mutex); - mutex_init(&new->reconfig_mutex); -- mutex_init(&new->bitmap_mutex); - INIT_LIST_HEAD(&new->disks); - INIT_LIST_HEAD(&new->all_mddevs); - init_timer(&new->safemode_timer); -@@ -4173,7 +4170,7 @@ static int do_md_run(mddev_t * mddev) - mddev->barriers_work = 1; - mddev->ok_start_degraded = start_dirty_degraded; - -- if (start_readonly && mddev->ro == 0) -+ if (start_readonly) - mddev->ro = 2; /* read-only, but switch on first write */ - - err = mddev->pers->run(mddev); -@@ -5073,10 +5070,6 @@ static int set_array_info(mddev_t * mddev, mdu_array_info_t *info) - mddev->minor_version = info->minor_version; - mddev->patch_version = info->patch_version; - mddev->persistent = !info->not_persistent; -- /* ensure mddev_put doesn't delete this now that there -- * is some minimal configuration. -- */ -- mddev->ctime = get_seconds(); - return 0; - } - mddev->major_version = MD_MAJOR_VERSION; -@@ -6636,7 +6629,7 @@ void md_check_recovery(mddev_t *mddev) - - - if (mddev->bitmap) -- bitmap_daemon_work(mddev); -+ bitmap_daemon_work(mddev->bitmap); - - if (mddev->ro) - return; -diff --git a/drivers/md/md.h b/drivers/md/md.h -index 87430fe..f184b69 100644 ---- a/drivers/md/md.h -+++ b/drivers/md/md.h -@@ -289,7 +289,6 @@ struct mddev_s - * hot-adding a bitmap. It should - * eventually be settable by sysfs. - */ -- struct mutex bitmap_mutex; - - struct list_head all_mddevs; - }; -diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c -index 431b9b2..d29215d 100644 ---- a/drivers/md/raid5.c -+++ b/drivers/md/raid5.c -@@ -5432,11 +5432,11 @@ static int raid5_start_reshape(mddev_t *mddev) - !test_bit(Faulty, &rdev->flags)) { - if (raid5_add_disk(mddev, rdev) == 0) { - char nm[20]; -- if (rdev->raid_disk >= conf->previous_raid_disks) { -+ if (rdev->raid_disk >= conf->previous_raid_disks) - set_bit(In_sync, &rdev->flags); -- added_devices++; -- } else -+ else - rdev->recovery_offset = 0; -+ added_devices++; - sprintf(nm, "rd%d", rdev->raid_disk); - if (sysfs_create_link(&mddev->kobj, - &rdev->kobj, nm)) -@@ -5448,12 +5448,9 @@ static int raid5_start_reshape(mddev_t *mddev) - break; - } - -- /* When a reshape changes the number of devices, ->degraded -- * is measured against the large of the pre and post number of -- * devices.*/ - if (mddev->delta_disks > 0) { - spin_lock_irqsave(&conf->device_lock, flags); -- mddev->degraded += (conf->raid_disks - conf->previous_raid_disks) -+ mddev->degraded = (conf->raid_disks - conf->previous_raid_disks) - - added_devices; - spin_unlock_irqrestore(&conf->device_lock, flags); - } -diff --git a/drivers/media/common/tuners/mxl5007t.c b/drivers/media/common/tuners/mxl5007t.c -index 7eb1bf7..2d02698 100644 ---- a/drivers/media/common/tuners/mxl5007t.c -+++ b/drivers/media/common/tuners/mxl5007t.c -@@ -196,7 +196,7 @@ static void copy_reg_bits(struct reg_pair_t *reg_pair1, - i = j = 0; - - while (reg_pair1[i].reg || reg_pair1[i].val) { -- while (reg_pair2[j].reg || reg_pair2[j].val) { -+ while (reg_pair2[j].reg || reg_pair2[j].reg) { - if (reg_pair1[i].reg != reg_pair2[j].reg) { - j++; - continue; -diff --git a/drivers/media/dvb/dvb-core/dmxdev.c b/drivers/media/dvb/dvb-core/dmxdev.c -index 9ddc579..c37790a 100644 ---- a/drivers/media/dvb/dvb-core/dmxdev.c -+++ b/drivers/media/dvb/dvb-core/dmxdev.c -@@ -761,6 +761,7 @@ static int dvb_demux_open(struct inode *inode, struct file *file) - dvb_ringbuffer_init(&dmxdevfilter->buffer, NULL, 8192); - dmxdevfilter->type = DMXDEV_TYPE_NONE; - dvb_dmxdev_filter_state_set(dmxdevfilter, DMXDEV_STATE_ALLOCATED); -+ INIT_LIST_HEAD(&dmxdevfilter->feed.ts); - init_timer(&dmxdevfilter->timer); - - dvbdev->users++; -@@ -886,7 +887,6 @@ static int dvb_dmxdev_pes_filter_set(struct dmxdev *dmxdev, - dmxdevfilter->type = DMXDEV_TYPE_PES; - memcpy(&dmxdevfilter->params, params, - sizeof(struct dmx_pes_filter_params)); -- INIT_LIST_HEAD(&dmxdevfilter->feed.ts); - - dvb_dmxdev_filter_state_set(dmxdevfilter, DMXDEV_STATE_SET); - -diff --git a/drivers/media/dvb/siano/smsusb.c b/drivers/media/dvb/siano/smsusb.c -index 6b03dbf..8f88a58 100644 ---- a/drivers/media/dvb/siano/smsusb.c -+++ b/drivers/media/dvb/siano/smsusb.c -@@ -533,18 +533,8 @@ struct usb_device_id smsusb_id_table[] = { - .driver_info = SMS1XXX_BOARD_HAUPPAUGE_WINDHAM }, - { USB_DEVICE(0x2040, 0xb910), - .driver_info = SMS1XXX_BOARD_HAUPPAUGE_WINDHAM }, -- { USB_DEVICE(0x2040, 0xb980), -- .driver_info = SMS1XXX_BOARD_HAUPPAUGE_WINDHAM }, -- { USB_DEVICE(0x2040, 0xb990), -- .driver_info = SMS1XXX_BOARD_HAUPPAUGE_WINDHAM }, - { USB_DEVICE(0x2040, 0xc000), - .driver_info = SMS1XXX_BOARD_HAUPPAUGE_WINDHAM }, -- { USB_DEVICE(0x2040, 0xc010), -- .driver_info = SMS1XXX_BOARD_HAUPPAUGE_WINDHAM }, -- { USB_DEVICE(0x2040, 0xc080), -- .driver_info = SMS1XXX_BOARD_HAUPPAUGE_WINDHAM }, -- { USB_DEVICE(0x2040, 0xc090), -- .driver_info = SMS1XXX_BOARD_HAUPPAUGE_WINDHAM }, - { } /* Terminating entry */ - }; - -diff --git a/drivers/media/video/gspca/ov519.c b/drivers/media/video/gspca/ov519.c -index e165578..a5c190e 100644 ---- a/drivers/media/video/gspca/ov519.c -+++ b/drivers/media/video/gspca/ov519.c -@@ -3364,7 +3364,6 @@ static const __devinitdata struct usb_device_id device_table[] = { - {USB_DEVICE(0x041e, 0x4061), .driver_info = BRIDGE_OV519 }, - {USB_DEVICE(0x041e, 0x4064), - .driver_info = BRIDGE_OV519 | BRIDGE_INVERT_LED }, -- {USB_DEVICE(0x041e, 0x4067), .driver_info = BRIDGE_OV519 }, - {USB_DEVICE(0x041e, 0x4068), - .driver_info = BRIDGE_OV519 | BRIDGE_INVERT_LED }, - {USB_DEVICE(0x045e, 0x028c), .driver_info = BRIDGE_OV519 }, -diff --git a/drivers/media/video/gspca/sn9c20x.c b/drivers/media/video/gspca/sn9c20x.c -index e0a3b75..cdad3db 100644 ---- a/drivers/media/video/gspca/sn9c20x.c -+++ b/drivers/media/video/gspca/sn9c20x.c -@@ -2319,7 +2319,7 @@ static void do_autogain(struct gspca_dev *gspca_dev, u16 avg_lum) - } - } - if (avg_lum > MAX_AVG_LUM) { -- if (sd->gain >= 1) { -+ if (sd->gain - 1 >= 0) { - sd->gain--; - set_gain(gspca_dev); - } -diff --git a/drivers/media/video/gspca/sunplus.c b/drivers/media/video/gspca/sunplus.c -index 28b4625..aa8f995 100644 ---- a/drivers/media/video/gspca/sunplus.c -+++ b/drivers/media/video/gspca/sunplus.c -@@ -705,7 +705,7 @@ static void spca504B_SetSizeType(struct gspca_dev *gspca_dev) - rc = spca504B_PollingDataReady(gspca_dev); - - /* Init the cam width height with some values get on init ? */ -- reg_w_riv(dev, 0x31, 0x04, 0); -+ reg_w_riv(dev, 0x31, 0, 0x04); - spca504B_WaitCmdStatus(gspca_dev); - rc = spca504B_PollingDataReady(gspca_dev); - break; -@@ -807,14 +807,14 @@ static void init_ctl_reg(struct gspca_dev *gspca_dev) - default: - /* case BRIDGE_SPCA533: */ - /* case BRIDGE_SPCA504B: */ -- reg_w_riv(dev, 0, 0x21ad, 0x00); /* hue */ -- reg_w_riv(dev, 0, 0x21ac, 0x01); /* sat/hue */ -- reg_w_riv(dev, 0, 0x21a3, 0x00); /* gamma */ -+ reg_w_riv(dev, 0, 0x00, 0x21ad); /* hue */ -+ reg_w_riv(dev, 0, 0x01, 0x21ac); /* sat/hue */ -+ reg_w_riv(dev, 0, 0x00, 0x21a3); /* gamma */ - break; - case BRIDGE_SPCA536: -- reg_w_riv(dev, 0, 0x20f5, 0x40); -- reg_w_riv(dev, 0, 0x20f4, 0x01); -- reg_w_riv(dev, 0, 0x2089, 0x00); -+ reg_w_riv(dev, 0, 0x40, 0x20f5); -+ reg_w_riv(dev, 0, 0x01, 0x20f4); -+ reg_w_riv(dev, 0, 0x00, 0x2089); - break; - } - if (pollreg) -@@ -888,11 +888,11 @@ static int sd_init(struct gspca_dev *gspca_dev) - switch (sd->bridge) { - case BRIDGE_SPCA504B: - reg_w_riv(dev, 0x1d, 0x00, 0); -- reg_w_riv(dev, 0, 0x2306, 0x01); -- reg_w_riv(dev, 0, 0x0d04, 0x00); -- reg_w_riv(dev, 0, 0x2000, 0x00); -- reg_w_riv(dev, 0, 0x2301, 0x13); -- reg_w_riv(dev, 0, 0x2306, 0x00); -+ reg_w_riv(dev, 0, 0x01, 0x2306); -+ reg_w_riv(dev, 0, 0x00, 0x0d04); -+ reg_w_riv(dev, 0, 0x00, 0x2000); -+ reg_w_riv(dev, 0, 0x13, 0x2301); -+ reg_w_riv(dev, 0, 0x00, 0x2306); - /* fall thru */ - case BRIDGE_SPCA533: - spca504B_PollingDataReady(gspca_dev); -@@ -1011,7 +1011,7 @@ static int sd_start(struct gspca_dev *gspca_dev) - spca504B_WaitCmdStatus(gspca_dev); - break; - default: -- reg_w_riv(dev, 0x31, 0x04, 0); -+ reg_w_riv(dev, 0x31, 0, 0x04); - spca504B_WaitCmdStatus(gspca_dev); - spca504B_PollingDataReady(gspca_dev); - break; -diff --git a/drivers/media/video/ov511.c b/drivers/media/video/ov511.c -index 2bed9e2..0bc2cf5 100644 ---- a/drivers/media/video/ov511.c -+++ b/drivers/media/video/ov511.c -@@ -5878,7 +5878,7 @@ ov51x_probe(struct usb_interface *intf, const struct usb_device_id *id) - goto error; - } - -- mutex_unlock(&ov->lock); -+ mutex_lock(&ov->lock); - - return 0; - -diff --git a/drivers/media/video/saa7134/saa7134-cards.c b/drivers/media/video/saa7134/saa7134-cards.c -index 6781a07..0901322 100644 ---- a/drivers/media/video/saa7134/saa7134-cards.c -+++ b/drivers/media/video/saa7134/saa7134-cards.c -@@ -5279,30 +5279,6 @@ struct saa7134_board saa7134_boards[] = { - .amux = TV, - }, - }, -- [SAA7134_BOARD_ASUS_EUROPA_HYBRID] = { -- .name = "Asus Europa Hybrid OEM", -- .audio_clock = 0x00187de7, -- .tuner_type = TUNER_PHILIPS_TD1316, -- .radio_type = UNSET, -- .tuner_addr = 0x61, -- .radio_addr = ADDR_UNSET, -- .tda9887_conf = TDA9887_PRESENT | TDA9887_PORT1_ACTIVE, -- .mpeg = SAA7134_MPEG_DVB, -- .inputs = { { -- .name = name_tv, -- .vmux = 3, -- .amux = TV, -- .tv = 1, -- }, { -- .name = name_comp1, -- .vmux = 4, -- .amux = LINE2, -- }, { -- .name = name_svideo, -- .vmux = 8, -- .amux = LINE2, -- } }, -- }, - - }; - -@@ -6442,12 +6418,6 @@ struct pci_device_id saa7134_pci_tbl[] = { - .subdevice = 0x2004, - .driver_data = SAA7134_BOARD_ZOLID_HYBRID_PCI, - }, { -- .vendor = PCI_VENDOR_ID_PHILIPS, -- .device = PCI_DEVICE_ID_PHILIPS_SAA7134, -- .subvendor = 0x1043, -- .subdevice = 0x4847, -- .driver_data = SAA7134_BOARD_ASUS_EUROPA_HYBRID, -- }, { - /* --- boards without eeprom + subsystem ID --- */ - .vendor = PCI_VENDOR_ID_PHILIPS, - .device = PCI_DEVICE_ID_PHILIPS_SAA7134, -@@ -7109,7 +7079,6 @@ int saa7134_board_init2(struct saa7134_dev *dev) - /* break intentionally omitted */ - case SAA7134_BOARD_VIDEOMATE_DVBT_300: - case SAA7134_BOARD_ASUS_EUROPA2_HYBRID: -- case SAA7134_BOARD_ASUS_EUROPA_HYBRID: - { - - /* The Philips EUROPA based hybrid boards have the tuner -diff --git a/drivers/media/video/saa7134/saa7134-dvb.c b/drivers/media/video/saa7134/saa7134-dvb.c -index b8a805c..a26e997 100644 ---- a/drivers/media/video/saa7134/saa7134-dvb.c -+++ b/drivers/media/video/saa7134/saa7134-dvb.c -@@ -1116,7 +1116,6 @@ static int dvb_init(struct saa7134_dev *dev) - break; - case SAA7134_BOARD_PHILIPS_EUROPA: - case SAA7134_BOARD_VIDEOMATE_DVBT_300: -- case SAA7134_BOARD_ASUS_EUROPA_HYBRID: - fe0->dvb.frontend = dvb_attach(tda10046_attach, - &philips_europa_config, - &dev->i2c_adap); -diff --git a/drivers/media/video/saa7134/saa7134.h b/drivers/media/video/saa7134/saa7134.h -index 94e1a3b..f8697d4 100644 ---- a/drivers/media/video/saa7134/saa7134.h -+++ b/drivers/media/video/saa7134/saa7134.h -@@ -297,7 +297,6 @@ struct saa7134_format { - #define SAA7134_BOARD_BEHOLD_X7 171 - #define SAA7134_BOARD_ROVERMEDIA_LINK_PRO_FM 172 - #define SAA7134_BOARD_ZOLID_HYBRID_PCI 173 --#define SAA7134_BOARD_ASUS_EUROPA_HYBRID 174 - - #define SAA7134_MAXBOARDS 32 - #define SAA7134_INPUT_MAX 8 -diff --git a/drivers/media/video/uvc/uvc_ctrl.c b/drivers/media/video/uvc/uvc_ctrl.c -index 4a293b4..1b89735 100644 ---- a/drivers/media/video/uvc/uvc_ctrl.c -+++ b/drivers/media/video/uvc/uvc_ctrl.c -@@ -1405,7 +1405,7 @@ uvc_ctrl_prune_entity(struct uvc_device *dev, struct uvc_entity *entity) - size = entity->processing.bControlSize; - - for (i = 0; i < ARRAY_SIZE(blacklist); ++i) { -- if (!usb_match_one_id(dev->intf, &blacklist[i].id)) -+ if (!usb_match_id(dev->intf, &blacklist[i].id)) - continue; - - if (blacklist[i].index >= 8 * size || -diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c -index b6992b7..610e914 100644 ---- a/drivers/message/fusion/mptbase.c -+++ b/drivers/message/fusion/mptbase.c -@@ -4330,8 +4330,6 @@ initChainBuffers(MPT_ADAPTER *ioc) - - if (ioc->bus_type == SPI) - num_chain *= MPT_SCSI_CAN_QUEUE; -- else if (ioc->bus_type == SAS) -- num_chain *= MPT_SAS_CAN_QUEUE; - else - num_chain *= MPT_FC_CAN_QUEUE; - -diff --git a/drivers/message/fusion/mptscsih.c b/drivers/message/fusion/mptscsih.c -index 6cea718..c295786 100644 ---- a/drivers/message/fusion/mptscsih.c -+++ b/drivers/message/fusion/mptscsih.c -@@ -1720,7 +1720,7 @@ mptscsih_abort(struct scsi_cmnd * SCpnt) - dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "task abort: " - "Command not in the active list! (sc=%p)\n", ioc->name, - SCpnt)); -- retval = SUCCESS; -+ retval = 0; - goto out; - } - -diff --git a/drivers/mfd/wm8350-core.c b/drivers/mfd/wm8350-core.c -index ca6b098..ba27c9d 100644 ---- a/drivers/mfd/wm8350-core.c -+++ b/drivers/mfd/wm8350-core.c -@@ -134,7 +134,8 @@ static inline int is_reg_locked(struct wm8350 *wm8350, u8 reg) - wm8350->reg_cache[WM8350_SECURITY] == WM8350_UNLOCK_KEY) - return 0; - -- if ((reg >= WM8350_GPIO_FUNCTION_SELECT_1 && -+ if ((reg == WM8350_GPIO_CONFIGURATION_I_O) || -+ (reg >= WM8350_GPIO_FUNCTION_SELECT_1 && - reg <= WM8350_GPIO_FUNCTION_SELECT_4) || - (reg >= WM8350_BATTERY_CHARGER_CONTROL_1 && - reg <= WM8350_BATTERY_CHARGER_CONTROL_3)) -diff --git a/drivers/misc/enclosure.c b/drivers/misc/enclosure.c -index 1eac626..e9eae4a 100644 ---- a/drivers/misc/enclosure.c -+++ b/drivers/misc/enclosure.c -@@ -391,7 +391,6 @@ static const char *const enclosure_status [] = { - [ENCLOSURE_STATUS_NOT_INSTALLED] = "not installed", - [ENCLOSURE_STATUS_UNKNOWN] = "unknown", - [ENCLOSURE_STATUS_UNAVAILABLE] = "unavailable", -- [ENCLOSURE_STATUS_MAX] = NULL, - }; - - static const char *const enclosure_type [] = { -diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c -index 1f552c6..85f0e8c 100644 ---- a/drivers/mmc/card/block.c -+++ b/drivers/mmc/card/block.c -@@ -85,14 +85,7 @@ static void mmc_blk_put(struct mmc_blk_data *md) - mutex_lock(&open_lock); - md->usage--; - if (md->usage == 0) { -- int devmaj = MAJOR(disk_devt(md->disk)); - int devidx = MINOR(disk_devt(md->disk)) >> MMC_SHIFT; -- -- if (!devmaj) -- devidx = md->disk->first_minor >> MMC_SHIFT; -- -- blk_cleanup_queue(md->queue.queue); -- - __clear_bit(devidx, dev_use); - - put_disk(md->disk); -@@ -620,7 +613,6 @@ static int mmc_blk_probe(struct mmc_card *card) - return 0; - - out: -- mmc_cleanup_queue(&md->queue); - mmc_blk_put(md); - - return err; -diff --git a/drivers/mmc/card/queue.c b/drivers/mmc/card/queue.c -index c5a7a85..49e5823 100644 ---- a/drivers/mmc/card/queue.c -+++ b/drivers/mmc/card/queue.c -@@ -90,10 +90,9 @@ static void mmc_request(struct request_queue *q) - struct request *req; - - if (!mq) { -- while ((req = blk_fetch_request(q)) != NULL) { -- req->cmd_flags |= REQ_QUIET; -+ printk(KERN_ERR "MMC: killing requests for dead queue\n"); -+ while ((req = blk_fetch_request(q)) != NULL) - __blk_end_request_all(req, -EIO); -- } - return; - } - -@@ -224,18 +223,17 @@ void mmc_cleanup_queue(struct mmc_queue *mq) - struct request_queue *q = mq->queue; - unsigned long flags; - -+ /* Mark that we should start throwing out stragglers */ -+ spin_lock_irqsave(q->queue_lock, flags); -+ q->queuedata = NULL; -+ spin_unlock_irqrestore(q->queue_lock, flags); -+ - /* Make sure the queue isn't suspended, as that will deadlock */ - mmc_queue_resume(mq); - - /* Then terminate our worker thread */ - kthread_stop(mq->thread); - -- /* Empty the queue */ -- spin_lock_irqsave(q->queue_lock, flags); -- q->queuedata = NULL; -- blk_start_queue(q); -- spin_unlock_irqrestore(q->queue_lock, flags); -- - if (mq->bounce_sg) - kfree(mq->bounce_sg); - mq->bounce_sg = NULL; -@@ -247,6 +245,8 @@ void mmc_cleanup_queue(struct mmc_queue *mq) - kfree(mq->bounce_buf); - mq->bounce_buf = NULL; - -+ blk_cleanup_queue(mq->queue); -+ - mq->card = NULL; - } - EXPORT_SYMBOL(mmc_cleanup_queue); -diff --git a/drivers/mtd/ubi/cdev.c b/drivers/mtd/ubi/cdev.c -index 111ea41..f237ddb 100644 ---- a/drivers/mtd/ubi/cdev.c -+++ b/drivers/mtd/ubi/cdev.c -@@ -853,6 +853,7 @@ static long ubi_cdev_ioctl(struct file *file, unsigned int cmd, - break; - } - -+ req.name[req.name_len] = '\0'; - err = verify_mkvol_req(ubi, &req); - if (err) - break; -diff --git a/drivers/mtd/ubi/upd.c b/drivers/mtd/ubi/upd.c -index 425bf5a..74fdc40 100644 ---- a/drivers/mtd/ubi/upd.c -+++ b/drivers/mtd/ubi/upd.c -@@ -147,15 +147,12 @@ int ubi_start_update(struct ubi_device *ubi, struct ubi_volume *vol, - } - - if (bytes == 0) { -- err = ubi_wl_flush(ubi); -- if (err) -- return err; -- - err = clear_update_marker(ubi, vol, 0); - if (err) - return err; -- vol->updating = 0; -- return 0; -+ err = ubi_wl_flush(ubi); -+ if (!err) -+ vol->updating = 0; - } - - vol->upd_buf = vmalloc(ubi->leb_size); -@@ -365,16 +362,16 @@ int ubi_more_update_data(struct ubi_device *ubi, struct ubi_volume *vol, - - ubi_assert(vol->upd_received <= vol->upd_bytes); - if (vol->upd_received == vol->upd_bytes) { -- err = ubi_wl_flush(ubi); -- if (err) -- return err; - /* The update is finished, clear the update marker */ - err = clear_update_marker(ubi, vol, vol->upd_bytes); - if (err) - return err; -- vol->updating = 0; -- err = to_write; -- vfree(vol->upd_buf); -+ err = ubi_wl_flush(ubi); -+ if (err == 0) { -+ vol->updating = 0; -+ err = to_write; -+ vfree(vol->upd_buf); -+ } - } - - return err; -diff --git a/drivers/mtd/ubi/vtbl.c b/drivers/mtd/ubi/vtbl.c -index 4004402..1afc61e 100644 ---- a/drivers/mtd/ubi/vtbl.c -+++ b/drivers/mtd/ubi/vtbl.c -@@ -566,7 +566,6 @@ static int init_volumes(struct ubi_device *ubi, const struct ubi_scan_info *si, - vol->reserved_pebs = be32_to_cpu(vtbl[i].reserved_pebs); - vol->alignment = be32_to_cpu(vtbl[i].alignment); - vol->data_pad = be32_to_cpu(vtbl[i].data_pad); -- vol->upd_marker = vtbl[i].upd_marker; - vol->vol_type = vtbl[i].vol_type == UBI_VID_DYNAMIC ? - UBI_DYNAMIC_VOLUME : UBI_STATIC_VOLUME; - vol->name_len = be16_to_cpu(vtbl[i].name_len); -diff --git a/drivers/net/atl1c/atl1c.h b/drivers/net/atl1c/atl1c.h -index 790e55b..2a1120a 100644 ---- a/drivers/net/atl1c/atl1c.h -+++ b/drivers/net/atl1c/atl1c.h -@@ -534,9 +534,6 @@ struct atl1c_adapter { - #define __AT_TESTING 0x0001 - #define __AT_RESETTING 0x0002 - #define __AT_DOWN 0x0003 -- u8 work_event; --#define ATL1C_WORK_EVENT_RESET 0x01 --#define ATL1C_WORK_EVENT_LINK_CHANGE 0x02 - u32 msg_enable; - - bool have_msi; -@@ -548,7 +545,8 @@ struct atl1c_adapter { - spinlock_t tx_lock; - atomic_t irq_sem; - -- struct work_struct common_task; -+ struct work_struct reset_task; -+ struct work_struct link_chg_task; - struct timer_list watchdog_timer; - struct timer_list phy_config_timer; - -diff --git a/drivers/net/atl1c/atl1c_main.c b/drivers/net/atl1c/atl1c_main.c -index be00ee9..1372e9a 100644 ---- a/drivers/net/atl1c/atl1c_main.c -+++ b/drivers/net/atl1c/atl1c_main.c -@@ -198,12 +198,27 @@ static void atl1c_phy_config(unsigned long data) - - void atl1c_reinit_locked(struct atl1c_adapter *adapter) - { -+ - WARN_ON(in_interrupt()); - atl1c_down(adapter); - atl1c_up(adapter); - clear_bit(__AT_RESETTING, &adapter->flags); - } - -+static void atl1c_reset_task(struct work_struct *work) -+{ -+ struct atl1c_adapter *adapter; -+ struct net_device *netdev; -+ -+ adapter = container_of(work, struct atl1c_adapter, reset_task); -+ netdev = adapter->netdev; -+ -+ netif_device_detach(netdev); -+ atl1c_down(adapter); -+ atl1c_up(adapter); -+ netif_device_attach(netdev); -+} -+ - static void atl1c_check_link_status(struct atl1c_adapter *adapter) - { - struct atl1c_hw *hw = &adapter->hw; -@@ -260,6 +275,18 @@ static void atl1c_check_link_status(struct atl1c_adapter *adapter) - } - } - -+/* -+ * atl1c_link_chg_task - deal with link change event Out of interrupt context -+ * @netdev: network interface device structure -+ */ -+static void atl1c_link_chg_task(struct work_struct *work) -+{ -+ struct atl1c_adapter *adapter; -+ -+ adapter = container_of(work, struct atl1c_adapter, link_chg_task); -+ atl1c_check_link_status(adapter); -+} -+ - static void atl1c_link_chg_event(struct atl1c_adapter *adapter) - { - struct net_device *netdev = adapter->netdev; -@@ -284,39 +311,19 @@ static void atl1c_link_chg_event(struct atl1c_adapter *adapter) - adapter->link_speed = SPEED_0; - } - } -- -- adapter->work_event |= ATL1C_WORK_EVENT_LINK_CHANGE; -- schedule_work(&adapter->common_task); --} -- --static void atl1c_common_task(struct work_struct *work) --{ -- struct atl1c_adapter *adapter; -- struct net_device *netdev; -- -- adapter = container_of(work, struct atl1c_adapter, common_task); -- netdev = adapter->netdev; -- -- if (adapter->work_event & ATL1C_WORK_EVENT_RESET) { -- netif_device_detach(netdev); -- atl1c_down(adapter); -- atl1c_up(adapter); -- netif_device_attach(netdev); -- return; -- } -- -- if (adapter->work_event & ATL1C_WORK_EVENT_LINK_CHANGE) -- atl1c_check_link_status(adapter); -- -- return; -+ schedule_work(&adapter->link_chg_task); - } - -- - static void atl1c_del_timer(struct atl1c_adapter *adapter) - { - del_timer_sync(&adapter->phy_config_timer); - } - -+static void atl1c_cancel_work(struct atl1c_adapter *adapter) -+{ -+ cancel_work_sync(&adapter->reset_task); -+ cancel_work_sync(&adapter->link_chg_task); -+} - - /* - * atl1c_tx_timeout - Respond to a Tx Hang -@@ -327,8 +334,7 @@ static void atl1c_tx_timeout(struct net_device *netdev) - struct atl1c_adapter *adapter = netdev_priv(netdev); - - /* Do the reset outside of interrupt context */ -- adapter->work_event |= ATL1C_WORK_EVENT_RESET; -- schedule_work(&adapter->common_task); -+ schedule_work(&adapter->reset_task); - } - - /* -@@ -1530,8 +1536,7 @@ static irqreturn_t atl1c_intr(int irq, void *data) - /* reset MAC */ - hw->intr_mask &= ~ISR_ERROR; - AT_WRITE_REG(hw, REG_IMR, hw->intr_mask); -- adapter->work_event |= ATL1C_WORK_EVENT_RESET; -- schedule_work(&adapter->common_task); -+ schedule_work(&adapter->reset_task); - break; - } - -@@ -2195,7 +2200,8 @@ void atl1c_down(struct atl1c_adapter *adapter) - struct net_device *netdev = adapter->netdev; - - atl1c_del_timer(adapter); -- adapter->work_event = 0; /* clear all event */ -+ atl1c_cancel_work(adapter); -+ - /* signal that we're down so the interrupt handler does not - * reschedule our watchdog timer */ - set_bit(__AT_DOWN, &adapter->flags); -@@ -2595,8 +2601,8 @@ static int __devinit atl1c_probe(struct pci_dev *pdev, - adapter->hw.mac_addr[4], adapter->hw.mac_addr[5]); - - atl1c_hw_set_mac_addr(&adapter->hw); -- INIT_WORK(&adapter->common_task, atl1c_common_task); -- adapter->work_event = 0; -+ INIT_WORK(&adapter->reset_task, atl1c_reset_task); -+ INIT_WORK(&adapter->link_chg_task, atl1c_link_chg_task); - err = register_netdev(netdev); - if (err) { - dev_err(&pdev->dev, "register netdevice failed\n"); -diff --git a/drivers/net/atl1e/atl1e_main.c b/drivers/net/atl1e/atl1e_main.c -index 1b5facf..955da73 100644 ---- a/drivers/net/atl1e/atl1e_main.c -+++ b/drivers/net/atl1e/atl1e_main.c -@@ -1666,6 +1666,41 @@ static int atl1e_tso_csum(struct atl1e_adapter *adapter, - } - return 0; - } -+ -+ if (offload_type & SKB_GSO_TCPV6) { -+ real_len = (((unsigned char *)ipv6_hdr(skb) - skb->data) -+ + ntohs(ipv6_hdr(skb)->payload_len)); -+ if (real_len < skb->len) -+ pskb_trim(skb, real_len); -+ -+ /* check payload == 0 byte ? */ -+ hdr_len = (skb_transport_offset(skb) + tcp_hdrlen(skb)); -+ if (unlikely(skb->len == hdr_len)) { -+ /* only xsum need */ -+ dev_warn(&pdev->dev, -+ "IPV6 tso with zero data??\n"); -+ goto check_sum; -+ } else { -+ tcp_hdr(skb)->check = ~csum_ipv6_magic( -+ &ipv6_hdr(skb)->saddr, -+ &ipv6_hdr(skb)->daddr, -+ 0, IPPROTO_TCP, 0); -+ tpd->word3 |= 1 << TPD_IP_VERSION_SHIFT; -+ hdr_len >>= 1; -+ tpd->word3 |= (hdr_len & TPD_V6_IPHLLO_MASK) << -+ TPD_V6_IPHLLO_SHIFT; -+ tpd->word3 |= ((hdr_len >> 3) & -+ TPD_V6_IPHLHI_MASK) << -+ TPD_V6_IPHLHI_SHIFT; -+ tpd->word3 |= (tcp_hdrlen(skb) >> 2 & -+ TPD_TCPHDRLEN_MASK) << -+ TPD_TCPHDRLEN_SHIFT; -+ tpd->word3 |= ((skb_shinfo(skb)->gso_size) & -+ TPD_MSS_MASK) << TPD_MSS_SHIFT; -+ tpd->word3 |= 1 << TPD_SEGMENT_EN_SHIFT; -+ } -+ } -+ return 0; - } - - check_sum: -@@ -2254,6 +2289,7 @@ static int atl1e_init_netdev(struct net_device *netdev, struct pci_dev *pdev) - NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX; - netdev->features |= NETIF_F_LLTX; - netdev->features |= NETIF_F_TSO; -+ netdev->features |= NETIF_F_TSO6; - - return 0; - } -diff --git a/drivers/net/b44.c b/drivers/net/b44.c -index 4869adb..2a91323 100644 ---- a/drivers/net/b44.c -+++ b/drivers/net/b44.c -@@ -1505,7 +1505,8 @@ static int b44_magic_pattern(u8 *macaddr, u8 *ppattern, u8 *pmask, int offset) - for (k = 0; k< ethaddr_bytes; k++) { - ppattern[offset + magicsync + - (j * ETH_ALEN) + k] = macaddr[k]; -- set_bit(len++, (unsigned long *) pmask); -+ len++; -+ set_bit(len, (unsigned long *) pmask); - } - } - return len - 1; -diff --git a/drivers/net/bcm63xx_enet.c b/drivers/net/bcm63xx_enet.c -index d110c1b..ba29dc3 100644 ---- a/drivers/net/bcm63xx_enet.c -+++ b/drivers/net/bcm63xx_enet.c -@@ -1248,15 +1248,9 @@ static void bcm_enet_get_drvinfo(struct net_device *netdev, - drvinfo->n_stats = BCM_ENET_STATS_LEN; - } - --static int bcm_enet_get_sset_count(struct net_device *netdev, -- int string_set) -+static int bcm_enet_get_stats_count(struct net_device *netdev) - { -- switch (string_set) { -- case ETH_SS_STATS: -- return BCM_ENET_STATS_LEN; -- default: -- return -EINVAL; -- } -+ return BCM_ENET_STATS_LEN; - } - - static void bcm_enet_get_strings(struct net_device *netdev, -@@ -1482,7 +1476,7 @@ static int bcm_enet_set_pauseparam(struct net_device *dev, - - static struct ethtool_ops bcm_enet_ethtool_ops = { - .get_strings = bcm_enet_get_strings, -- .get_sset_count = bcm_enet_get_sset_count, -+ .get_stats_count = bcm_enet_get_stats_count, - .get_ethtool_stats = bcm_enet_get_ethtool_stats, - .get_settings = bcm_enet_get_settings, - .set_settings = bcm_enet_set_settings, -diff --git a/drivers/net/benet/be.h b/drivers/net/benet/be.h -index 511b922..3b79a22 100644 ---- a/drivers/net/benet/be.h -+++ b/drivers/net/benet/be.h -@@ -35,31 +35,20 @@ - #define DRV_VER "2.101.205" - #define DRV_NAME "be2net" - #define BE_NAME "ServerEngines BladeEngine2 10Gbps NIC" --#define BE3_NAME "ServerEngines BladeEngine3 10Gbps NIC" - #define OC_NAME "Emulex OneConnect 10Gbps NIC" --#define OC_NAME1 "Emulex OneConnect 10Gbps NIC (be3)" - #define DRV_DESC BE_NAME "Driver" - - #define BE_VENDOR_ID 0x19a2 - #define BE_DEVICE_ID1 0x211 --#define BE_DEVICE_ID2 0x221 - #define OC_DEVICE_ID1 0x700 - #define OC_DEVICE_ID2 0x701 --#define OC_DEVICE_ID3 0x710 - - static inline char *nic_name(struct pci_dev *pdev) - { -- switch (pdev->device) { -- case OC_DEVICE_ID1: -- case OC_DEVICE_ID2: -+ if (pdev->device == OC_DEVICE_ID1 || pdev->device == OC_DEVICE_ID2) - return OC_NAME; -- case OC_DEVICE_ID3: -- return OC_NAME1; -- case BE_DEVICE_ID2: -- return BE3_NAME; -- default: -+ else - return BE_NAME; -- } - } - - /* Number of bytes of an RX frame that are copied to skb->data */ -@@ -272,13 +261,8 @@ struct be_adapter { - u32 cap; - u32 rx_fc; /* Rx flow control */ - u32 tx_fc; /* Tx flow control */ -- u8 generation; /* BladeEngine ASIC generation */ - }; - --/* BladeEngine Generation numbers */ --#define BE_GEN2 2 --#define BE_GEN3 3 -- - extern const struct ethtool_ops be_ethtool_ops; - - #define drvr_stats(adapter) (&adapter->stats.drvr_stats) -diff --git a/drivers/net/benet/be_cmds.h b/drivers/net/benet/be_cmds.h -index ad33d55..e5f9676 100644 ---- a/drivers/net/benet/be_cmds.h -+++ b/drivers/net/benet/be_cmds.h -@@ -154,8 +154,7 @@ struct be_cmd_req_hdr { - u8 domain; /* dword 0 */ - u32 timeout; /* dword 1 */ - u32 request_length; /* dword 2 */ -- u8 version; /* dword 3 */ -- u8 rsvd[3]; /* dword 3 */ -+ u32 rsvd; /* dword 3 */ - }; - - #define RESP_HDR_INFO_OPCODE_SHIFT 0 /* bits 0 - 7 */ -diff --git a/drivers/net/benet/be_main.c b/drivers/net/benet/be_main.c -index ec983cb..876b357 100644 ---- a/drivers/net/benet/be_main.c -+++ b/drivers/net/benet/be_main.c -@@ -31,10 +31,8 @@ MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data."); - - static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = { - { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) }, -- { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) }, - { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) }, - { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) }, -- { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID3) }, - { 0 } - }; - MODULE_DEVICE_TABLE(pci, be_dev_ids); -@@ -1944,7 +1942,6 @@ static void be_unmap_pci_bars(struct be_adapter *adapter) - static int be_map_pci_bars(struct be_adapter *adapter) - { - u8 __iomem *addr; -- int pcicfg_reg; - - addr = ioremap_nocache(pci_resource_start(adapter->pdev, 2), - pci_resource_len(adapter->pdev, 2)); -@@ -1958,13 +1955,8 @@ static int be_map_pci_bars(struct be_adapter *adapter) - goto pci_map_err; - adapter->db = addr; - -- if (adapter->generation == BE_GEN2) -- pcicfg_reg = 1; -- else -- pcicfg_reg = 0; -- -- addr = ioremap_nocache(pci_resource_start(adapter->pdev, pcicfg_reg), -- pci_resource_len(adapter->pdev, pcicfg_reg)); -+ addr = ioremap_nocache(pci_resource_start(adapter->pdev, 1), -+ pci_resource_len(adapter->pdev, 1)); - if (addr == NULL) - goto pci_map_err; - adapter->pcicfg = addr; -@@ -2034,7 +2026,6 @@ static int be_stats_init(struct be_adapter *adapter) - cmd->va = pci_alloc_consistent(adapter->pdev, cmd->size, &cmd->dma); - if (cmd->va == NULL) - return -1; -- memset(cmd->va, 0, cmd->size); - return 0; - } - -@@ -2108,20 +2099,6 @@ static int __devinit be_probe(struct pci_dev *pdev, - goto rel_reg; - } - adapter = netdev_priv(netdev); -- -- switch (pdev->device) { -- case BE_DEVICE_ID1: -- case OC_DEVICE_ID1: -- adapter->generation = BE_GEN2; -- break; -- case BE_DEVICE_ID2: -- case OC_DEVICE_ID2: -- adapter->generation = BE_GEN3; -- break; -- default: -- adapter->generation = 0; -- } -- - adapter->pdev = pdev; - pci_set_drvdata(pdev, adapter); - adapter->netdev = netdev; -diff --git a/drivers/net/e100.c b/drivers/net/e100.c -index 0c53c92..d269a68 100644 ---- a/drivers/net/e100.c -+++ b/drivers/net/e100.c -@@ -1817,7 +1817,6 @@ static int e100_alloc_cbs(struct nic *nic) - &nic->cbs_dma_addr); - if (!nic->cbs) - return -ENOMEM; -- memset(nic->cbs, 0, count * sizeof(struct cb)); - - for (cb = nic->cbs, i = 0; i < count; cb++, i++) { - cb->next = (i + 1 < count) ? cb + 1 : nic->cbs; -@@ -1826,6 +1825,7 @@ static int e100_alloc_cbs(struct nic *nic) - cb->dma_addr = nic->cbs_dma_addr + i * sizeof(struct cb); - cb->link = cpu_to_le32(nic->cbs_dma_addr + - ((i+1) % count) * sizeof(struct cb)); -+ cb->skb = NULL; - } - - nic->cb_to_use = nic->cb_to_send = nic->cb_to_clean = nic->cbs; -diff --git a/drivers/net/e1000/e1000.h b/drivers/net/e1000/e1000.h -index 4a2ee85..42e2b7e 100644 ---- a/drivers/net/e1000/e1000.h -+++ b/drivers/net/e1000/e1000.h -@@ -326,8 +326,6 @@ struct e1000_adapter { - /* for ioport free */ - int bars; - int need_ioport; -- -- bool discarding; - }; - - enum e1000_state_t { -diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c -index 1a23f16..bcd192c 100644 ---- a/drivers/net/e1000/e1000_main.c -+++ b/drivers/net/e1000/e1000_main.c -@@ -1698,6 +1698,18 @@ static void e1000_setup_rctl(struct e1000_adapter *adapter) - rctl &= ~E1000_RCTL_SZ_4096; - rctl |= E1000_RCTL_BSEX; - switch (adapter->rx_buffer_len) { -+ case E1000_RXBUFFER_256: -+ rctl |= E1000_RCTL_SZ_256; -+ rctl &= ~E1000_RCTL_BSEX; -+ break; -+ case E1000_RXBUFFER_512: -+ rctl |= E1000_RCTL_SZ_512; -+ rctl &= ~E1000_RCTL_BSEX; -+ break; -+ case E1000_RXBUFFER_1024: -+ rctl |= E1000_RCTL_SZ_1024; -+ rctl &= ~E1000_RCTL_BSEX; -+ break; - case E1000_RXBUFFER_2048: - default: - rctl |= E1000_RCTL_SZ_2048; -@@ -3142,7 +3154,13 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu) - * however with the new *_jumbo_rx* routines, jumbo receives will use - * fragmented skbs */ - -- if (max_frame <= E1000_RXBUFFER_2048) -+ if (max_frame <= E1000_RXBUFFER_256) -+ adapter->rx_buffer_len = E1000_RXBUFFER_256; -+ else if (max_frame <= E1000_RXBUFFER_512) -+ adapter->rx_buffer_len = E1000_RXBUFFER_512; -+ else if (max_frame <= E1000_RXBUFFER_1024) -+ adapter->rx_buffer_len = E1000_RXBUFFER_1024; -+ else if (max_frame <= E1000_RXBUFFER_2048) - adapter->rx_buffer_len = E1000_RXBUFFER_2048; - else - #if (PAGE_SIZE >= E1000_RXBUFFER_16384) -@@ -3809,22 +3827,13 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter, - - length = le16_to_cpu(rx_desc->length); - /* !EOP means multiple descriptors were used to store a single -- * packet, if thats the case we need to toss it. In fact, we -- * to toss every packet with the EOP bit clear and the next -- * frame that _does_ have the EOP bit set, as it is by -- * definition only a frame fragment -- */ -- if (unlikely(!(status & E1000_RXD_STAT_EOP))) -- adapter->discarding = true; -- -- if (adapter->discarding) { -+ * packet, also make sure the frame isn't just CRC only */ -+ if (unlikely(!(status & E1000_RXD_STAT_EOP) || (length <= 4))) { - /* All receives must fit into a single buffer */ - E1000_DBG("%s: Receive packet consumed multiple" - " buffers\n", netdev->name); - /* recycle */ - buffer_info->skb = skb; -- if (status & E1000_RXD_STAT_EOP) -- adapter->discarding = false; - goto next_desc; - } - -diff --git a/drivers/net/e1000e/e1000.h b/drivers/net/e1000e/e1000.h -index 47db9bd..3e187b0 100644 ---- a/drivers/net/e1000e/e1000.h -+++ b/drivers/net/e1000e/e1000.h -@@ -417,7 +417,6 @@ struct e1000_info { - /* CRC Stripping defines */ - #define FLAG2_CRC_STRIPPING (1 << 0) - #define FLAG2_HAS_PHY_WAKEUP (1 << 1) --#define FLAG2_IS_DISCARDING (1 << 2) - - #define E1000_RX_DESC_PS(R, i) \ - (&(((union e1000_rx_desc_packet_split *)((R).desc))[i])) -diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c -index 2154530..fad8f9e 100644 ---- a/drivers/net/e1000e/netdev.c -+++ b/drivers/net/e1000e/netdev.c -@@ -482,24 +482,14 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter, - - length = le16_to_cpu(rx_desc->length); - -- /* -- * !EOP means multiple descriptors were used to store a single -- * packet, if that's the case we need to toss it. In fact, we -- * need to toss every packet with the EOP bit clear and the -- * next frame that _does_ have the EOP bit set, as it is by -- * definition only a frame fragment -- */ -- if (unlikely(!(status & E1000_RXD_STAT_EOP))) -- adapter->flags2 |= FLAG2_IS_DISCARDING; -- -- if (adapter->flags2 & FLAG2_IS_DISCARDING) { -+ /* !EOP means multiple descriptors were used to store a single -+ * packet, also make sure the frame isn't just CRC only */ -+ if (!(status & E1000_RXD_STAT_EOP) || (length <= 4)) { - /* All receives must fit into a single buffer */ - e_dbg("%s: Receive packet consumed multiple buffers\n", - netdev->name); - /* recycle */ - buffer_info->skb = skb; -- if (status & E1000_RXD_STAT_EOP) -- adapter->flags2 &= ~FLAG2_IS_DISCARDING; - goto next_desc; - } - -@@ -757,16 +747,10 @@ static bool e1000_clean_rx_irq_ps(struct e1000_adapter *adapter, - PCI_DMA_FROMDEVICE); - buffer_info->dma = 0; - -- /* see !EOP comment in other rx routine */ -- if (!(staterr & E1000_RXD_STAT_EOP)) -- adapter->flags2 |= FLAG2_IS_DISCARDING; -- -- if (adapter->flags2 & FLAG2_IS_DISCARDING) { -+ if (!(staterr & E1000_RXD_STAT_EOP)) { - e_dbg("%s: Packet Split buffers didn't pick up the " - "full packet\n", netdev->name); - dev_kfree_skb_irq(skb); -- if (staterr & E1000_RXD_STAT_EOP) -- adapter->flags2 &= ~FLAG2_IS_DISCARDING; - goto next_desc; - } - -@@ -1136,7 +1120,6 @@ static void e1000_clean_rx_ring(struct e1000_adapter *adapter) - - rx_ring->next_to_clean = 0; - rx_ring->next_to_use = 0; -- adapter->flags2 &= ~FLAG2_IS_DISCARDING; - - writel(0, adapter->hw.hw_addr + rx_ring->head); - writel(0, adapter->hw.hw_addr + rx_ring->tail); -@@ -2347,6 +2330,18 @@ static void e1000_setup_rctl(struct e1000_adapter *adapter) - rctl &= ~E1000_RCTL_SZ_4096; - rctl |= E1000_RCTL_BSEX; - switch (adapter->rx_buffer_len) { -+ case 256: -+ rctl |= E1000_RCTL_SZ_256; -+ rctl &= ~E1000_RCTL_BSEX; -+ break; -+ case 512: -+ rctl |= E1000_RCTL_SZ_512; -+ rctl &= ~E1000_RCTL_BSEX; -+ break; -+ case 1024: -+ rctl |= E1000_RCTL_SZ_1024; -+ rctl &= ~E1000_RCTL_BSEX; -+ break; - case 2048: - default: - rctl |= E1000_RCTL_SZ_2048; -@@ -4326,7 +4321,13 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu) - * fragmented skbs - */ - -- if (max_frame <= 2048) -+ if (max_frame <= 256) -+ adapter->rx_buffer_len = 256; -+ else if (max_frame <= 512) -+ adapter->rx_buffer_len = 512; -+ else if (max_frame <= 1024) -+ adapter->rx_buffer_len = 1024; -+ else if (max_frame <= 2048) - adapter->rx_buffer_len = 2048; - else - adapter->rx_buffer_len = 4096; -diff --git a/drivers/net/qlge/qlge_main.c b/drivers/net/qlge/qlge_main.c -index 35d896b..a2fc70a 100644 ---- a/drivers/net/qlge/qlge_main.c -+++ b/drivers/net/qlge/qlge_main.c -@@ -3310,8 +3310,10 @@ static int ql_adapter_initialize(struct ql_adapter *qdev) - - /* Initialize the port and set the max framesize. */ - status = qdev->nic_ops->port_initialize(qdev); -- if (status) -- QPRINTK(qdev, IFUP, ERR, "Failed to start port.\n"); -+ if (status) { -+ QPRINTK(qdev, IFUP, ERR, "Failed to start port.\n"); -+ return status; -+ } - - /* Set up the MAC address and frame routing filter. */ - status = ql_cam_route_initialize(qdev); -@@ -3712,6 +3714,9 @@ static int qlge_set_mac_address(struct net_device *ndev, void *p) - struct sockaddr *addr = p; - int status; - -+ if (netif_running(ndev)) -+ return -EBUSY; -+ - if (!is_valid_ether_addr(addr->sa_data)) - return -EADDRNOTAVAIL; - memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len); -@@ -3863,7 +3868,8 @@ static int __devinit ql_init_device(struct pci_dev *pdev, - struct net_device *ndev, int cards_found) - { - struct ql_adapter *qdev = netdev_priv(ndev); -- int err = 0; -+ int pos, err = 0; -+ u16 val16; - - memset((void *)qdev, 0, sizeof(*qdev)); - err = pci_enable_device(pdev); -@@ -3875,12 +3881,18 @@ static int __devinit ql_init_device(struct pci_dev *pdev, - qdev->ndev = ndev; - qdev->pdev = pdev; - pci_set_drvdata(pdev, ndev); -- -- /* Set PCIe read request size */ -- err = pcie_set_readrq(pdev, 4096); -- if (err) { -- dev_err(&pdev->dev, "Set readrq failed.\n"); -- goto err_out; -+ pos = pci_find_capability(pdev, PCI_CAP_ID_EXP); -+ if (pos <= 0) { -+ dev_err(&pdev->dev, PFX "Cannot find PCI Express capability, " -+ "aborting.\n"); -+ return pos; -+ } else { -+ pci_read_config_word(pdev, pos + PCI_EXP_DEVCTL, &val16); -+ val16 &= ~PCI_EXP_DEVCTL_NOSNOOP_EN; -+ val16 |= (PCI_EXP_DEVCTL_CERE | -+ PCI_EXP_DEVCTL_NFERE | -+ PCI_EXP_DEVCTL_FERE | PCI_EXP_DEVCTL_URRE); -+ pci_write_config_word(pdev, pos + PCI_EXP_DEVCTL, val16); - } - - err = pci_request_regions(pdev, DRV_NAME); -diff --git a/drivers/net/qlge/qlge_mpi.c b/drivers/net/qlge/qlge_mpi.c -index 32b1e1f..aec05f2 100644 ---- a/drivers/net/qlge/qlge_mpi.c -+++ b/drivers/net/qlge/qlge_mpi.c -@@ -446,9 +446,6 @@ static int ql_mpi_handler(struct ql_adapter *qdev, struct mbox_params *mbcp) - ql_aen_lost(qdev, mbcp); - break; - -- case AEN_DCBX_CHG: -- /* Need to support AEN 8110 */ -- break; - default: - QPRINTK(qdev, DRV, ERR, - "Unsupported AE %.08x.\n", mbcp->mbox_out[0]); -diff --git a/drivers/net/sfc/tx.c b/drivers/net/sfc/tx.c -index d443ad7..489c4de 100644 ---- a/drivers/net/sfc/tx.c -+++ b/drivers/net/sfc/tx.c -@@ -821,6 +821,8 @@ static void efx_enqueue_unwind(struct efx_tx_queue *tx_queue) - tx_queue->efx->type->txd_ring_mask]; - efx_tsoh_free(tx_queue, buffer); - EFX_BUG_ON_PARANOID(buffer->skb); -+ buffer->len = 0; -+ buffer->continuation = true; - if (buffer->unmap_len) { - unmap_addr = (buffer->dma_addr + buffer->len - - buffer->unmap_len); -@@ -834,8 +836,6 @@ static void efx_enqueue_unwind(struct efx_tx_queue *tx_queue) - PCI_DMA_TODEVICE); - buffer->unmap_len = 0; - } -- buffer->len = 0; -- buffer->continuation = true; - } - } - -diff --git a/drivers/net/sky2.c b/drivers/net/sky2.c -index f3600b3..6a10d7b 100644 ---- a/drivers/net/sky2.c -+++ b/drivers/net/sky2.c -@@ -1806,8 +1806,7 @@ static void sky2_tx_complete(struct sky2_port *sky2, u16 done) - sky2->tx_cons = idx; - smp_mb(); - -- /* Wake unless it's detached, and called e.g. from sky2_down() */ -- if (tx_avail(sky2) > MAX_SKB_TX_LE + 4 && netif_device_present(dev)) -+ if (tx_avail(sky2) > MAX_SKB_TX_LE + 4) - netif_wake_queue(dev); - } - -diff --git a/drivers/net/starfire.c b/drivers/net/starfire.c -index e65ee4d..a36e2b5 100644 ---- a/drivers/net/starfire.c -+++ b/drivers/net/starfire.c -@@ -1063,7 +1063,7 @@ static int netdev_open(struct net_device *dev) - if (retval) { - printk(KERN_ERR "starfire: Failed to load firmware \"%s\"\n", - FIRMWARE_RX); -- goto out_init; -+ return retval; - } - if (fw_rx->size % 4) { - printk(KERN_ERR "starfire: bogus length %zu in \"%s\"\n", -@@ -1108,9 +1108,6 @@ out_tx: - release_firmware(fw_tx); - out_rx: - release_firmware(fw_rx); --out_init: -- if (retval) -- netdev_close(dev); - return retval; - } - -diff --git a/drivers/net/usb/rtl8150.c b/drivers/net/usb/rtl8150.c -index f14d225..b091e20 100644 ---- a/drivers/net/usb/rtl8150.c -+++ b/drivers/net/usb/rtl8150.c -@@ -324,7 +324,7 @@ static int rtl8150_set_mac_address(struct net_device *netdev, void *p) - dbg("%02X:", netdev->dev_addr[i]); - dbg("%02X\n", netdev->dev_addr[i]); - /* Set the IDR registers. */ -- set_registers(dev, IDR, netdev->addr_len, netdev->dev_addr); -+ set_registers(dev, IDR, sizeof(netdev->dev_addr), netdev->dev_addr); - #ifdef EEPROM_WRITE - { - u8 cr; -diff --git a/drivers/net/wireless/ath/ar9170/usb.c b/drivers/net/wireless/ath/ar9170/usb.c -index f141a4f..e974e58 100644 ---- a/drivers/net/wireless/ath/ar9170/usb.c -+++ b/drivers/net/wireless/ath/ar9170/usb.c -@@ -68,10 +68,8 @@ static struct usb_device_id ar9170_usb_ids[] = { - { USB_DEVICE(0x0cf3, 0x1002) }, - /* Cace Airpcap NX */ - { USB_DEVICE(0xcace, 0x0300) }, -- /* D-Link DWA 160 A1 */ -+ /* D-Link DWA 160A */ - { USB_DEVICE(0x07d1, 0x3c10) }, -- /* D-Link DWA 160 A2 */ -- { USB_DEVICE(0x07d1, 0x3a09) }, - /* Netgear WNDA3100 */ - { USB_DEVICE(0x0846, 0x9010) }, - /* Netgear WN111 v2 */ -diff --git a/drivers/net/wireless/ath/ath5k/base.c b/drivers/net/wireless/ath/ath5k/base.c -index 8a82c75..95a8e23 100644 ---- a/drivers/net/wireless/ath/ath5k/base.c -+++ b/drivers/net/wireless/ath/ath5k/base.c -@@ -2349,9 +2349,6 @@ ath5k_init(struct ath5k_softc *sc) - */ - ath5k_stop_locked(sc); - -- /* Set PHY calibration interval */ -- ah->ah_cal_intval = ath5k_calinterval; -- - /* - * The basic interface to setting the hardware in a good - * state is ``reset''. On return the hardware is known to -@@ -2379,6 +2376,10 @@ ath5k_init(struct ath5k_softc *sc) - - /* Set ack to be sent at low bit-rates */ - ath5k_hw_set_ack_bitrate_high(ah, false); -+ -+ /* Set PHY calibration inteval */ -+ ah->ah_cal_intval = ath5k_calinterval; -+ - ret = 0; - done: - mmiowb(); -diff --git a/drivers/net/wireless/ath/ath5k/eeprom.c b/drivers/net/wireless/ath/ath5k/eeprom.c -index 9a96550..644962a 100644 ---- a/drivers/net/wireless/ath/ath5k/eeprom.c -+++ b/drivers/net/wireless/ath/ath5k/eeprom.c -@@ -97,7 +97,6 @@ ath5k_eeprom_init_header(struct ath5k_hw *ah) - struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom; - int ret; - u16 val; -- u32 cksum, offset, eep_max = AR5K_EEPROM_INFO_MAX; - - /* - * Read values from EEPROM and store them in the capability structure -@@ -112,44 +111,20 @@ ath5k_eeprom_init_header(struct ath5k_hw *ah) - if (ah->ah_ee_version < AR5K_EEPROM_VERSION_3_0) - return 0; - -+#ifdef notyet - /* - * Validate the checksum of the EEPROM date. There are some - * devices with invalid EEPROMs. - */ -- AR5K_EEPROM_READ(AR5K_EEPROM_SIZE_UPPER, val); -- if (val) { -- eep_max = (val & AR5K_EEPROM_SIZE_UPPER_MASK) << -- AR5K_EEPROM_SIZE_ENDLOC_SHIFT; -- AR5K_EEPROM_READ(AR5K_EEPROM_SIZE_LOWER, val); -- eep_max = (eep_max | val) - AR5K_EEPROM_INFO_BASE; -- -- /* -- * Fail safe check to prevent stupid loops due -- * to busted EEPROMs. XXX: This value is likely too -- * big still, waiting on a better value. -- */ -- if (eep_max > (3 * AR5K_EEPROM_INFO_MAX)) { -- ATH5K_ERR(ah->ah_sc, "Invalid max custom EEPROM size: " -- "%d (0x%04x) max expected: %d (0x%04x)\n", -- eep_max, eep_max, -- 3 * AR5K_EEPROM_INFO_MAX, -- 3 * AR5K_EEPROM_INFO_MAX); -- return -EIO; -- } -- } -- -- for (cksum = 0, offset = 0; offset < eep_max; offset++) { -+ for (cksum = 0, offset = 0; offset < AR5K_EEPROM_INFO_MAX; offset++) { - AR5K_EEPROM_READ(AR5K_EEPROM_INFO(offset), val); - cksum ^= val; - } - if (cksum != AR5K_EEPROM_INFO_CKSUM) { -- ATH5K_ERR(ah->ah_sc, "Invalid EEPROM " -- "checksum: 0x%04x eep_max: 0x%04x (%s)\n", -- cksum, eep_max, -- eep_max == AR5K_EEPROM_INFO_MAX ? -- "default size" : "custom size"); -+ ATH5K_ERR(ah->ah_sc, "Invalid EEPROM checksum 0x%04x\n", cksum); - return -EIO; - } -+#endif - - AR5K_EEPROM_READ_HDR(AR5K_EEPROM_ANT_GAIN(ah->ah_ee_version), - ee_ant_gain); -diff --git a/drivers/net/wireless/ath/ath5k/eeprom.h b/drivers/net/wireless/ath/ath5k/eeprom.h -index 473a483..0123f35 100644 ---- a/drivers/net/wireless/ath/ath5k/eeprom.h -+++ b/drivers/net/wireless/ath/ath5k/eeprom.h -@@ -37,14 +37,6 @@ - #define AR5K_EEPROM_RFKILL_POLARITY_S 1 - - #define AR5K_EEPROM_REG_DOMAIN 0x00bf /* EEPROM regdom */ -- --/* FLASH(EEPROM) Defines for AR531X chips */ --#define AR5K_EEPROM_SIZE_LOWER 0x1b /* size info -- lower */ --#define AR5K_EEPROM_SIZE_UPPER 0x1c /* size info -- upper */ --#define AR5K_EEPROM_SIZE_UPPER_MASK 0xfff0 --#define AR5K_EEPROM_SIZE_UPPER_SHIFT 4 --#define AR5K_EEPROM_SIZE_ENDLOC_SHIFT 12 -- - #define AR5K_EEPROM_CHECKSUM 0x00c0 /* EEPROM checksum */ - #define AR5K_EEPROM_INFO_BASE 0x00c0 /* EEPROM header */ - #define AR5K_EEPROM_INFO_MAX (0x400 - AR5K_EEPROM_INFO_BASE) -diff --git a/drivers/net/wireless/ath/ath5k/phy.c b/drivers/net/wireless/ath/ath5k/phy.c -index 9d67647..1a039f2 100644 ---- a/drivers/net/wireless/ath/ath5k/phy.c -+++ b/drivers/net/wireless/ath/ath5k/phy.c -@@ -2954,6 +2954,8 @@ ath5k_hw_txpower(struct ath5k_hw *ah, struct ieee80211_channel *channel, - ATH5K_ERR(ah->ah_sc, "invalid tx power: %u\n", txpower); - return -EINVAL; - } -+ if (txpower == 0) -+ txpower = AR5K_TUNE_DEFAULT_TXPOWER; - - /* Reset TX power values */ - memset(&ah->ah_txpower, 0, sizeof(ah->ah_txpower)); -diff --git a/drivers/net/wireless/ath/ath9k/ath9k.h b/drivers/net/wireless/ath/ath9k/ath9k.h -index cdb90c5..1d59f10 100644 ---- a/drivers/net/wireless/ath/ath9k/ath9k.h -+++ b/drivers/net/wireless/ath/ath9k/ath9k.h -@@ -139,7 +139,6 @@ struct ath_buf { - dma_addr_t bf_daddr; /* physical addr of desc */ - dma_addr_t bf_buf_addr; /* physical addr of data buffer */ - bool bf_stale; -- bool bf_isnullfunc; - u16 bf_flags; - struct ath_buf_state bf_state; - dma_addr_t bf_dmacontext; -@@ -525,8 +524,6 @@ struct ath_led { - #define SC_OP_BEACON_SYNC BIT(19) - #define SC_OP_BTCOEX_ENABLED BIT(20) - #define SC_OP_BT_PRIORITY_DETECTED BIT(21) --#define SC_OP_NULLFUNC_COMPLETED BIT(22) --#define SC_OP_PS_ENABLED BIT(23) - - struct ath_bus_ops { - void (*read_cachesize)(struct ath_softc *sc, int *csz); -diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c -index 0905b38..ca7694c 100644 ---- a/drivers/net/wireless/ath/ath9k/hw.c -+++ b/drivers/net/wireless/ath/ath9k/hw.c -@@ -880,11 +880,12 @@ static void ath9k_hw_init_mode_gain_regs(struct ath_hw *ah) - } - } - --static void ath9k_hw_init_eeprom_fix(struct ath_hw *ah) -+static void ath9k_hw_init_11a_eeprom_fix(struct ath_hw *ah) - { - u32 i, j; - -- if (ah->hw_version.devid == AR9280_DEVID_PCI) { -+ if ((ah->hw_version.devid == AR9280_DEVID_PCI) && -+ test_bit(ATH9K_MODE_11A, ah->caps.wireless_modes)) { - - /* EEPROM Fixup */ - for (i = 0; i < ah->iniModes.ia_rows; i++) { -@@ -936,11 +937,6 @@ int ath9k_hw_init(struct ath_hw *ah) - DPRINTF(ah->ah_sc, ATH_DBG_RESET, "serialize_regmode is %d\n", - ah->config.serialize_regmode); - -- if (AR_SREV_9285(ah) || AR_SREV_9271(ah)) -- ah->config.max_txtrig_level = MAX_TX_FIFO_THRESHOLD >> 1; -- else -- ah->config.max_txtrig_level = MAX_TX_FIFO_THRESHOLD; -- - if (!ath9k_hw_macversion_supported(ah->hw_version.macVersion)) { - DPRINTF(ah->ah_sc, ATH_DBG_FATAL, - "Mac Chip Rev 0x%02x.%x is not supported by " -@@ -979,7 +975,7 @@ int ath9k_hw_init(struct ath_hw *ah) - - ath9k_hw_init_mode_gain_regs(ah); - ath9k_hw_fill_cap_info(ah); -- ath9k_hw_init_eeprom_fix(ah); -+ ath9k_hw_init_11a_eeprom_fix(ah); - - r = ath9k_hw_init_macaddr(ah); - if (r) { -@@ -3674,11 +3670,7 @@ void ath9k_hw_fill_cap_info(struct ath_hw *ah) - pCap->keycache_size = AR_KEYTABLE_SIZE; - - pCap->hw_caps |= ATH9K_HW_CAP_FASTCC; -- -- if (AR_SREV_9285(ah) || AR_SREV_9271(ah)) -- pCap->tx_triglevel_max = MAX_TX_FIFO_THRESHOLD >> 1; -- else -- pCap->tx_triglevel_max = MAX_TX_FIFO_THRESHOLD; -+ pCap->tx_triglevel_max = MAX_TX_FIFO_THRESHOLD; - - if (AR_SREV_9285_10_OR_LATER(ah)) - pCap->num_gpio_pins = AR9285_NUM_GPIO; -diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h -index ff4383b..b892345 100644 ---- a/drivers/net/wireless/ath/ath9k/hw.h -+++ b/drivers/net/wireless/ath/ath9k/hw.h -@@ -218,7 +218,6 @@ struct ath9k_ops_config { - #define AR_SPUR_FEEQ_BOUND_HT20 10 - int spurmode; - u16 spurchans[AR_EEPROM_MODAL_SPURS][2]; -- u8 max_txtrig_level; - }; - - enum ath9k_int { -@@ -408,7 +407,7 @@ struct ath9k_hw_version { - * Using de Bruijin sequence to to look up 1's index in a 32 bit number - * debruijn32 = 0000 0111 0111 1100 1011 0101 0011 0001 - */ --#define debruijn32 0x077CB531U -+#define debruijn32 0x077CB531UL - - struct ath_gen_timer_configuration { - u32 next_addr; -diff --git a/drivers/net/wireless/ath/ath9k/mac.c b/drivers/net/wireless/ath/ath9k/mac.c -index 110c16d..800bfab 100644 ---- a/drivers/net/wireless/ath/ath9k/mac.c -+++ b/drivers/net/wireless/ath/ath9k/mac.c -@@ -70,7 +70,7 @@ bool ath9k_hw_updatetxtriglevel(struct ath_hw *ah, bool bIncTrigLevel) - u32 txcfg, curLevel, newLevel; - enum ath9k_int omask; - -- if (ah->tx_trig_level >= ah->config.max_txtrig_level) -+ if (ah->tx_trig_level >= MAX_TX_FIFO_THRESHOLD) - return false; - - omask = ath9k_hw_set_interrupts(ah, ah->mask_reg & ~ATH9K_INT_GLOBAL); -@@ -79,7 +79,7 @@ bool ath9k_hw_updatetxtriglevel(struct ath_hw *ah, bool bIncTrigLevel) - curLevel = MS(txcfg, AR_FTRIG); - newLevel = curLevel; - if (bIncTrigLevel) { -- if (curLevel < ah->config.max_txtrig_level) -+ if (curLevel < MAX_TX_FIFO_THRESHOLD) - newLevel++; - } else if (curLevel > MIN_TX_FIFO_THRESHOLD) - newLevel--; -@@ -155,7 +155,7 @@ bool ath9k_hw_stoptxdma(struct ath_hw *ah, u32 q) - wait = wait_time; - while (ath9k_hw_numtxpending(ah, q)) { - if ((--wait) == 0) { -- DPRINTF(ah->ah_sc, ATH_DBG_FATAL, -+ DPRINTF(ah->ah_sc, ATH_DBG_QUEUE, - "Failed to stop TX DMA in 100 " - "msec after killing last frame\n"); - break; -@@ -222,8 +222,6 @@ int ath9k_hw_txprocdesc(struct ath_hw *ah, struct ath_desc *ds) - ds->ds_txstat.ts_status = 0; - ds->ds_txstat.ts_flags = 0; - -- if (ads->ds_txstatus1 & AR_FrmXmitOK) -- ds->ds_txstat.ts_status |= ATH9K_TX_ACKED; - if (ads->ds_txstatus1 & AR_ExcessiveRetries) - ds->ds_txstat.ts_status |= ATH9K_TXERR_XRETRY; - if (ads->ds_txstatus1 & AR_Filtered) -diff --git a/drivers/net/wireless/ath/ath9k/mac.h b/drivers/net/wireless/ath/ath9k/mac.h -index 9720c4d..f56e77d 100644 ---- a/drivers/net/wireless/ath/ath9k/mac.h -+++ b/drivers/net/wireless/ath/ath9k/mac.h -@@ -76,10 +76,6 @@ - #define ATH9K_TXERR_FIFO 0x04 - #define ATH9K_TXERR_XTXOP 0x08 - #define ATH9K_TXERR_TIMER_EXPIRED 0x10 --#define ATH9K_TX_ACKED 0x20 --#define ATH9K_TXERR_MASK \ -- (ATH9K_TXERR_XRETRY | ATH9K_TXERR_FILT | ATH9K_TXERR_FIFO | \ -- ATH9K_TXERR_XTXOP | ATH9K_TXERR_TIMER_EXPIRED) - - #define ATH9K_TX_BA 0x01 - #define ATH9K_TX_PWRMGMT 0x02 -diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c -index 5864eaa..43d2be9 100644 ---- a/drivers/net/wireless/ath/ath9k/main.c -+++ b/drivers/net/wireless/ath/ath9k/main.c -@@ -2147,9 +2147,6 @@ static void ath9k_stop(struct ieee80211_hw *hw) - return; /* another wiphy still in use */ - } - -- /* Ensure HW is awake when we try to shut it down. */ -- ath9k_ps_wakeup(sc); -- - if (sc->sc_flags & SC_OP_BTCOEX_ENABLED) { - ath9k_hw_btcoex_disable(sc->sc_ah); - if (sc->btcoex_info.btcoex_scheme == ATH_BTCOEX_CFG_3WIRE) -@@ -2170,9 +2167,6 @@ static void ath9k_stop(struct ieee80211_hw *hw) - /* disable HAL and put h/w to sleep */ - ath9k_hw_disable(sc->sc_ah); - ath9k_hw_configpcipowersave(sc->sc_ah, 1, 1); -- ath9k_ps_restore(sc); -- -- /* Finally, put the chip in FULL SLEEP mode */ - ath9k_hw_setpower(sc->sc_ah, ATH9K_PM_FULL_SLEEP); - - sc->sc_flags |= SC_OP_INVALID; -@@ -2283,12 +2277,10 @@ static void ath9k_remove_interface(struct ieee80211_hw *hw, - if ((sc->sc_ah->opmode == NL80211_IFTYPE_AP) || - (sc->sc_ah->opmode == NL80211_IFTYPE_ADHOC) || - (sc->sc_ah->opmode == NL80211_IFTYPE_MESH_POINT)) { -- ath9k_ps_wakeup(sc); - ath9k_hw_stoptxdma(sc->sc_ah, sc->beacon.beaconq); -- ath9k_ps_restore(sc); -+ ath_beacon_return(sc, avp); - } - -- ath_beacon_return(sc, avp); - sc->sc_flags &= ~SC_OP_BEACONS; - - for (i = 0; i < ARRAY_SIZE(sc->beacon.bslot); i++) { -@@ -2335,7 +2327,6 @@ static int ath9k_config(struct ieee80211_hw *hw, u32 changed) - - if (changed & IEEE80211_CONF_CHANGE_PS) { - if (conf->flags & IEEE80211_CONF_PS) { -- sc->sc_flags |= SC_OP_PS_ENABLED; - if (!(ah->caps.hw_caps & - ATH9K_HW_CAP_AUTOSLEEP)) { - if ((sc->imask & ATH9K_INT_TIM_TIMER) == 0) { -@@ -2343,17 +2334,11 @@ static int ath9k_config(struct ieee80211_hw *hw, u32 changed) - ath9k_hw_set_interrupts(sc->sc_ah, - sc->imask); - } -- } -- sc->ps_enabled = true; -- if ((sc->sc_flags & SC_OP_NULLFUNC_COMPLETED)) { -- sc->sc_flags &= ~SC_OP_NULLFUNC_COMPLETED; -- sc->ps_enabled = true; - ath9k_hw_setrxabort(sc->sc_ah, 1); - } -+ sc->ps_enabled = true; - } else { - sc->ps_enabled = false; -- sc->sc_flags &= ~(SC_OP_PS_ENABLED | -- SC_OP_NULLFUNC_COMPLETED); - ath9k_hw_setpower(sc->sc_ah, ATH9K_PM_AWAKE); - if (!(ah->caps.hw_caps & - ATH9K_HW_CAP_AUTOSLEEP)) { -@@ -2732,21 +2717,15 @@ static int ath9k_ampdu_action(struct ieee80211_hw *hw, - case IEEE80211_AMPDU_RX_STOP: - break; - case IEEE80211_AMPDU_TX_START: -- ath9k_ps_wakeup(sc); - ath_tx_aggr_start(sc, sta, tid, ssn); - ieee80211_start_tx_ba_cb_irqsafe(hw, sta->addr, tid); -- ath9k_ps_restore(sc); - break; - case IEEE80211_AMPDU_TX_STOP: -- ath9k_ps_wakeup(sc); - ath_tx_aggr_stop(sc, sta, tid); - ieee80211_stop_tx_ba_cb_irqsafe(hw, sta->addr, tid); -- ath9k_ps_restore(sc); - break; - case IEEE80211_AMPDU_TX_OPERATIONAL: -- ath9k_ps_wakeup(sc); - ath_tx_aggr_resume(sc, sta, tid); -- ath9k_ps_restore(sc); - break; - default: - DPRINTF(sc, ATH_DBG_FATAL, "Unknown AMPDU action\n"); -diff --git a/drivers/net/wireless/ath/ath9k/reg.h b/drivers/net/wireless/ath/ath9k/reg.h -index c0d7e65..d83b77f 100644 ---- a/drivers/net/wireless/ath/ath9k/reg.h -+++ b/drivers/net/wireless/ath/ath9k/reg.h -@@ -969,10 +969,10 @@ enum { - #define AR_GPIO_INPUT_EN_VAL_BT_ACTIVE_S 4 - #define AR_GPIO_INPUT_EN_VAL_RFSILENT_DEF 0x00000080 - #define AR_GPIO_INPUT_EN_VAL_RFSILENT_DEF_S 7 --#define AR_GPIO_INPUT_EN_VAL_BT_PRIORITY_BB 0x00000400 --#define AR_GPIO_INPUT_EN_VAL_BT_PRIORITY_BB_S 10 - #define AR_GPIO_INPUT_EN_VAL_BT_ACTIVE_BB 0x00001000 - #define AR_GPIO_INPUT_EN_VAL_BT_ACTIVE_BB_S 12 -+#define AR_GPIO_INPUT_EN_VAL_BT_PRIORITY_BB 0x00001000 -+#define AR_GPIO_INPUT_EN_VAL_BT_PRIORITY_BB_S 1 - #define AR_GPIO_INPUT_EN_VAL_RFSILENT_BB 0x00008000 - #define AR_GPIO_INPUT_EN_VAL_RFSILENT_BB_S 15 - #define AR_GPIO_RTC_RESET_OVERRIDE_ENABLE 0x00010000 -diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c -index 9009bac..42551a4 100644 ---- a/drivers/net/wireless/ath/ath9k/xmit.c -+++ b/drivers/net/wireless/ath/ath9k/xmit.c -@@ -1076,10 +1076,10 @@ void ath_drain_all_txq(struct ath_softc *sc, bool retry_tx) - if (npend) { - int r; - -- DPRINTF(sc, ATH_DBG_FATAL, "Unable to stop TxDMA. Reset HAL!\n"); -+ DPRINTF(sc, ATH_DBG_XMIT, "Unable to stop TxDMA. Reset HAL!\n"); - - spin_lock_bh(&sc->sc_resetlock); -- r = ath9k_hw_reset(ah, sc->sc_ah->curchan, false); -+ r = ath9k_hw_reset(ah, sc->sc_ah->curchan, true); - if (r) - DPRINTF(sc, ATH_DBG_FATAL, - "Unable to reset hardware; reset status %d\n", -@@ -1563,7 +1563,7 @@ static int ath_tx_setup_buffer(struct ieee80211_hw *hw, struct ath_buf *bf, - - bf->bf_frmlen = skb->len + FCS_LEN - (hdrlen & 3); - -- if (conf_is_ht(&sc->hw->conf)) -+ if (conf_is_ht(&sc->hw->conf) && !is_pae(skb)) - bf->bf_state.bf_type |= BUF_HT; - - bf->bf_flags = setup_tx_flags(sc, skb, txctl->txq); -@@ -1592,13 +1592,6 @@ static int ath_tx_setup_buffer(struct ieee80211_hw *hw, struct ath_buf *bf, - } - - bf->bf_buf_addr = bf->bf_dmacontext; -- -- if (ieee80211_is_nullfunc(fc) && ieee80211_has_pm(fc)) { -- bf->bf_isnullfunc = true; -- sc->sc_flags &= ~SC_OP_NULLFUNC_COMPLETED; -- } else -- bf->bf_isnullfunc = false; -- - return 0; - } - -@@ -1648,7 +1641,7 @@ static void ath_tx_start_dma(struct ath_softc *sc, struct ath_buf *bf, - goto tx_done; - } - -- if ((tx_info->flags & IEEE80211_TX_CTL_AMPDU) && !is_pae(skb)) { -+ if (tx_info->flags & IEEE80211_TX_CTL_AMPDU) { - /* - * Try aggregation if it's a unicast data frame - * and the destination is HT capable. -@@ -1996,15 +1989,6 @@ static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq) - if (ds == txq->axq_gatingds) - txq->axq_gatingds = NULL; - -- if (bf->bf_isnullfunc && -- (ds->ds_txstat.ts_status & ATH9K_TX_ACKED)) { -- if ((sc->sc_flags & SC_OP_PS_ENABLED)) { -- sc->ps_enabled = true; -- ath9k_hw_setrxabort(sc->sc_ah, 1); -- } else -- sc->sc_flags |= SC_OP_NULLFUNC_COMPLETED; -- } -- - /* - * Remove ath_buf's of the same transmit unit from txq, - * however leave the last descriptor back as the holding -@@ -2020,7 +2004,7 @@ static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq) - if (bf_isaggr(bf)) - txq->axq_aggr_depth--; - -- txok = !(ds->ds_txstat.ts_status & ATH9K_TXERR_MASK); -+ txok = (ds->ds_txstat.ts_status == 0); - txq->axq_tx_inprogress = false; - spin_unlock_bh(&txq->axq_lock); - -@@ -2081,9 +2065,7 @@ static void ath_tx_complete_poll_work(struct work_struct *work) - - if (needreset) { - DPRINTF(sc, ATH_DBG_RESET, "tx hung, resetting the chip\n"); -- ath9k_ps_wakeup(sc); - ath_reset(sc, false); -- ath9k_ps_restore(sc); - } - - ieee80211_queue_delayed_work(sc->hw, &sc->tx_complete_work, -diff --git a/drivers/net/wireless/b43/b43.h b/drivers/net/wireless/b43/b43.h -index 0e6b154..6607162 100644 ---- a/drivers/net/wireless/b43/b43.h -+++ b/drivers/net/wireless/b43/b43.h -@@ -117,7 +117,6 @@ - #define B43_MMIO_TSF_2 0x636 /* core rev < 3 only */ - #define B43_MMIO_TSF_3 0x638 /* core rev < 3 only */ - #define B43_MMIO_RNG 0x65A --#define B43_MMIO_IFSSLOT 0x684 /* Interframe slot time */ - #define B43_MMIO_IFSCTL 0x688 /* Interframe space control */ - #define B43_MMIO_IFSCTL_USE_EDCF 0x0004 - #define B43_MMIO_POWERUP_DELAY 0x6A8 -diff --git a/drivers/net/wireless/b43/main.c b/drivers/net/wireless/b43/main.c -index 9ca253e..098dda1 100644 ---- a/drivers/net/wireless/b43/main.c -+++ b/drivers/net/wireless/b43/main.c -@@ -628,17 +628,10 @@ static void b43_upload_card_macaddress(struct b43_wldev *dev) - static void b43_set_slot_time(struct b43_wldev *dev, u16 slot_time) - { - /* slot_time is in usec. */ -- /* This test used to exit for all but a G PHY. */ -- if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) -+ if (dev->phy.type != B43_PHYTYPE_G) - return; -- b43_write16(dev, B43_MMIO_IFSSLOT, 510 + slot_time); -- /* Shared memory location 0x0010 is the slot time and should be -- * set to slot_time; however, this register is initially 0 and changing -- * the value adversely affects the transmit rate for BCM4311 -- * devices. Until this behavior is unterstood, delete this step -- * -- * b43_shm_write16(dev, B43_SHM_SHARED, 0x0010, slot_time); -- */ -+ b43_write16(dev, 0x684, 510 + slot_time); -+ b43_shm_write16(dev, B43_SHM_SHARED, 0x0010, slot_time); - } - - static void b43_short_slot_timing_enable(struct b43_wldev *dev) -diff --git a/drivers/net/wireless/b43/rfkill.c b/drivers/net/wireless/b43/rfkill.c -index 78016ae..ffdce6f 100644 ---- a/drivers/net/wireless/b43/rfkill.c -+++ b/drivers/net/wireless/b43/rfkill.c -@@ -33,14 +33,8 @@ bool b43_is_hw_radio_enabled(struct b43_wldev *dev) - & B43_MMIO_RADIO_HWENABLED_HI_MASK)) - return 1; - } else { -- /* To prevent CPU fault on PPC, do not read a register -- * unless the interface is started; however, on resume -- * for hibernation, this routine is entered early. When -- * that happens, unconditionally return TRUE. -- */ -- if (b43_status(dev) < B43_STAT_STARTED) -- return 1; -- if (b43_read16(dev, B43_MMIO_RADIO_HWENABLED_LO) -+ if (b43_status(dev) >= B43_STAT_STARTED && -+ b43_read16(dev, B43_MMIO_RADIO_HWENABLED_LO) - & B43_MMIO_RADIO_HWENABLED_LO_MASK) - return 1; - } -diff --git a/drivers/net/wireless/b43legacy/rfkill.c b/drivers/net/wireless/b43legacy/rfkill.c -index d579df7..8783022 100644 ---- a/drivers/net/wireless/b43legacy/rfkill.c -+++ b/drivers/net/wireless/b43legacy/rfkill.c -@@ -34,13 +34,6 @@ bool b43legacy_is_hw_radio_enabled(struct b43legacy_wldev *dev) - & B43legacy_MMIO_RADIO_HWENABLED_HI_MASK)) - return 1; - } else { -- /* To prevent CPU fault on PPC, do not read a register -- * unless the interface is started; however, on resume -- * for hibernation, this routine is entered early. When -- * that happens, unconditionally return TRUE. -- */ -- if (b43legacy_status(dev) < B43legacy_STAT_STARTED) -- return 1; - if (b43legacy_read16(dev, B43legacy_MMIO_RADIO_HWENABLED_LO) - & B43legacy_MMIO_RADIO_HWENABLED_LO_MASK) - return 1; -diff --git a/drivers/net/wireless/ipw2x00/ipw2100.c b/drivers/net/wireless/ipw2x00/ipw2100.c -index 43102bf..6e2fc0c 100644 ---- a/drivers/net/wireless/ipw2x00/ipw2100.c -+++ b/drivers/net/wireless/ipw2x00/ipw2100.c -@@ -6487,16 +6487,6 @@ static int ipw2100_resume(struct pci_dev *pci_dev) - } - #endif - --static void ipw2100_shutdown(struct pci_dev *pci_dev) --{ -- struct ipw2100_priv *priv = pci_get_drvdata(pci_dev); -- -- /* Take down the device; powers it off, etc. */ -- ipw2100_down(priv); -- -- pci_disable_device(pci_dev); --} -- - #define IPW2100_DEV_ID(x) { PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, x } - - static struct pci_device_id ipw2100_pci_id_table[] __devinitdata = { -@@ -6560,7 +6550,6 @@ static struct pci_driver ipw2100_pci_driver = { - .suspend = ipw2100_suspend, - .resume = ipw2100_resume, - #endif -- .shutdown = ipw2100_shutdown, - }; - - /** -diff --git a/drivers/net/wireless/iwlwifi/iwl-3945.c b/drivers/net/wireless/iwlwifi/iwl-3945.c -index 9d60f6c..f059b49 100644 ---- a/drivers/net/wireless/iwlwifi/iwl-3945.c -+++ b/drivers/net/wireless/iwlwifi/iwl-3945.c -@@ -2895,7 +2895,6 @@ static struct iwl_cfg iwl3945_bg_cfg = { - .mod_params = &iwl3945_mod_params, - .use_isr_legacy = true, - .ht_greenfield_support = false, -- .broken_powersave = true, - }; - - static struct iwl_cfg iwl3945_abg_cfg = { -@@ -2910,7 +2909,6 @@ static struct iwl_cfg iwl3945_abg_cfg = { - .mod_params = &iwl3945_mod_params, - .use_isr_legacy = true, - .ht_greenfield_support = false, -- .broken_powersave = true, - }; - - struct pci_device_id iwl3945_hw_card_ids[] = { -diff --git a/drivers/net/wireless/iwlwifi/iwl-4965.c b/drivers/net/wireless/iwlwifi/iwl-4965.c -index 99331ed..6f703a0 100644 ---- a/drivers/net/wireless/iwlwifi/iwl-4965.c -+++ b/drivers/net/wireless/iwlwifi/iwl-4965.c -@@ -1337,7 +1337,7 @@ static int iwl4965_fill_txpower_tbl(struct iwl_priv *priv, u8 band, u16 channel, - iwl4965_interpolate_chan(priv, channel, &ch_eeprom_info); - - /* calculate tx gain adjustment based on power supply voltage */ -- voltage = le16_to_cpu(priv->calib_info->voltage); -+ voltage = priv->calib_info->voltage; - init_voltage = (s32)le32_to_cpu(priv->card_alive_init.voltage); - voltage_compensation = - iwl4965_get_voltage_compensation(voltage, init_voltage); -@@ -2087,7 +2087,7 @@ static void iwl4965_rx_reply_tx(struct iwl_priv *priv, - struct ieee80211_tx_info *info; - struct iwl4965_tx_resp *tx_resp = (void *)&pkt->u.raw[0]; - u32 status = le32_to_cpu(tx_resp->u.status); -- int tid = MAX_TID_COUNT - 1; -+ int tid = MAX_TID_COUNT; - int sta_id; - int freed; - u8 *qc = NULL; -diff --git a/drivers/net/wireless/iwlwifi/iwl-5000-hw.h b/drivers/net/wireless/iwlwifi/iwl-5000-hw.h -index bc056e9..4ef6804 100644 ---- a/drivers/net/wireless/iwlwifi/iwl-5000-hw.h -+++ b/drivers/net/wireless/iwlwifi/iwl-5000-hw.h -@@ -92,15 +92,11 @@ - - static inline s32 iwl_temp_calib_to_offset(struct iwl_priv *priv) - { -- u16 temperature, voltage; -- __le16 *temp_calib = -- (__le16 *)iwl_eeprom_query_addr(priv, EEPROM_5000_TEMPERATURE); -- -- temperature = le16_to_cpu(temp_calib[0]); -- voltage = le16_to_cpu(temp_calib[1]); -- -- /* offset = temp - volt / coeff */ -- return (s32)(temperature - voltage / IWL_5150_VOLTAGE_TO_TEMPERATURE_COEFF); -+ u16 *temp_calib = (u16 *)iwl_eeprom_query_addr(priv, -+ EEPROM_5000_TEMPERATURE); -+ /* offset = temperature - voltage / coef */ -+ s32 offset = (s32)(temp_calib[0] - temp_calib[1] / IWL_5150_VOLTAGE_TO_TEMPERATURE_COEFF); -+ return offset; - } - - /* Fixed (non-configurable) rx data from phy */ -diff --git a/drivers/net/wireless/iwlwifi/iwl-5000.c b/drivers/net/wireless/iwlwifi/iwl-5000.c -index 133df70..6e6f516 100644 ---- a/drivers/net/wireless/iwlwifi/iwl-5000.c -+++ b/drivers/net/wireless/iwlwifi/iwl-5000.c -@@ -460,15 +460,14 @@ static void iwl5000_set_ct_threshold(struct iwl_priv *priv) - static int iwl5000_set_Xtal_calib(struct iwl_priv *priv) - { - struct iwl_calib_xtal_freq_cmd cmd; -- __le16 *xtal_calib = -- (__le16 *)iwl_eeprom_query_addr(priv, EEPROM_5000_XTAL); -+ u16 *xtal_calib = (u16 *)iwl_eeprom_query_addr(priv, EEPROM_5000_XTAL); - - cmd.hdr.op_code = IWL_PHY_CALIBRATE_CRYSTAL_FRQ_CMD; - cmd.hdr.first_group = 0; - cmd.hdr.groups_num = 1; - cmd.hdr.data_valid = 1; -- cmd.cap_pin1 = le16_to_cpu(xtal_calib[0]); -- cmd.cap_pin2 = le16_to_cpu(xtal_calib[1]); -+ cmd.cap_pin1 = (u8)xtal_calib[0]; -+ cmd.cap_pin2 = (u8)xtal_calib[1]; - return iwl_calib_set(&priv->calib_results[IWL_CALIB_XTAL], - (u8 *)&cmd, sizeof(cmd)); - } -@@ -1666,7 +1665,6 @@ struct iwl_cfg iwl5300_agn_cfg = { - .valid_rx_ant = ANT_ABC, - .need_pll_cfg = true, - .ht_greenfield_support = true, -- .use_rts_for_ht = true, /* use rts/cts protection */ - }; - - struct iwl_cfg iwl5100_bg_cfg = { -@@ -1718,7 +1716,6 @@ struct iwl_cfg iwl5100_agn_cfg = { - .valid_rx_ant = ANT_AB, - .need_pll_cfg = true, - .ht_greenfield_support = true, -- .use_rts_for_ht = true, /* use rts/cts protection */ - }; - - struct iwl_cfg iwl5350_agn_cfg = { -@@ -1736,7 +1733,6 @@ struct iwl_cfg iwl5350_agn_cfg = { - .valid_rx_ant = ANT_ABC, - .need_pll_cfg = true, - .ht_greenfield_support = true, -- .use_rts_for_ht = true, /* use rts/cts protection */ - }; - - struct iwl_cfg iwl5150_agn_cfg = { -@@ -1754,7 +1750,6 @@ struct iwl_cfg iwl5150_agn_cfg = { - .valid_rx_ant = ANT_AB, - .need_pll_cfg = true, - .ht_greenfield_support = true, -- .use_rts_for_ht = true, /* use rts/cts protection */ - }; - - MODULE_FIRMWARE(IWL5000_MODULE_FIRMWARE(IWL5000_UCODE_API_MAX)); -diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-rs.c b/drivers/net/wireless/iwlwifi/iwl-agn-rs.c -index 0eb2591..81726ee 100644 ---- a/drivers/net/wireless/iwlwifi/iwl-agn-rs.c -+++ b/drivers/net/wireless/iwlwifi/iwl-agn-rs.c -@@ -2808,7 +2808,7 @@ static void rs_fill_link_cmd(struct iwl_priv *priv, - repeat_rate--; - } - -- lq_cmd->agg_params.agg_frame_cnt_limit = LINK_QUAL_AGG_FRAME_LIMIT_DEF; -+ lq_cmd->agg_params.agg_frame_cnt_limit = LINK_QUAL_AGG_FRAME_LIMIT_MAX; - lq_cmd->agg_params.agg_dis_start_th = LINK_QUAL_AGG_DISABLE_START_DEF; - lq_cmd->agg_params.agg_time_limit = - cpu_to_le16(LINK_QUAL_AGG_TIME_LIMIT_DEF); -diff --git a/drivers/net/wireless/iwlwifi/iwl-core.c b/drivers/net/wireless/iwlwifi/iwl-core.c -index 0cd4ec4..2dc9287 100644 ---- a/drivers/net/wireless/iwlwifi/iwl-core.c -+++ b/drivers/net/wireless/iwlwifi/iwl-core.c -@@ -2645,7 +2645,6 @@ int iwl_mac_config(struct ieee80211_hw *hw, u32 changed) - if ((le16_to_cpu(priv->staging_rxon.channel) != ch)) - priv->staging_rxon.flags = 0; - -- iwl_set_rxon_ht(priv, ht_conf); - iwl_set_rxon_channel(priv, conf->channel); - - iwl_set_flags_for_band(priv, conf->channel->band); -diff --git a/drivers/net/wireless/iwlwifi/iwl-dev.h b/drivers/net/wireless/iwlwifi/iwl-dev.h -index cea2ee2..028d505 100644 ---- a/drivers/net/wireless/iwlwifi/iwl-dev.h -+++ b/drivers/net/wireless/iwlwifi/iwl-dev.h -@@ -703,7 +703,7 @@ extern void iwl_txq_ctx_stop(struct iwl_priv *priv); - extern int iwl_queue_space(const struct iwl_queue *q); - static inline int iwl_queue_used(const struct iwl_queue *q, int i) - { -- return q->write_ptr >= q->read_ptr ? -+ return q->write_ptr > q->read_ptr ? - (i >= q->read_ptr && i < q->write_ptr) : - !(i < q->read_ptr && i >= q->write_ptr); - } -@@ -1149,7 +1149,7 @@ struct iwl_priv { - u32 last_beacon_time; - u64 last_tsf; - -- /* eeprom -- this is in the card's little endian byte order */ -+ /* eeprom */ - u8 *eeprom; - int nvm_device_type; - struct iwl_eeprom_calib_info *calib_info; -diff --git a/drivers/net/wireless/iwlwifi/iwl-eeprom.c b/drivers/net/wireless/iwlwifi/iwl-eeprom.c -index 18dc3a4..e14c995 100644 ---- a/drivers/net/wireless/iwlwifi/iwl-eeprom.c -+++ b/drivers/net/wireless/iwlwifi/iwl-eeprom.c -@@ -337,7 +337,7 @@ static int iwl_init_otp_access(struct iwl_priv *priv) - return ret; - } - --static int iwl_read_otp_word(struct iwl_priv *priv, u16 addr, __le16 *eeprom_data) -+static int iwl_read_otp_word(struct iwl_priv *priv, u16 addr, u16 *eeprom_data) - { - int ret = 0; - u32 r; -@@ -370,7 +370,7 @@ static int iwl_read_otp_word(struct iwl_priv *priv, u16 addr, __le16 *eeprom_dat - CSR_OTP_GP_REG_ECC_CORR_STATUS_MSK); - IWL_ERR(priv, "Correctable OTP ECC error, continue read\n"); - } -- *eeprom_data = cpu_to_le16(r >> 16); -+ *eeprom_data = le16_to_cpu((__force __le16)(r >> 16)); - return 0; - } - -@@ -379,8 +379,7 @@ static int iwl_read_otp_word(struct iwl_priv *priv, u16 addr, __le16 *eeprom_dat - */ - static bool iwl_is_otp_empty(struct iwl_priv *priv) - { -- u16 next_link_addr = 0; -- __le16 link_value; -+ u16 next_link_addr = 0, link_value; - bool is_empty = false; - - /* locate the beginning of OTP link list */ -@@ -410,8 +409,7 @@ static bool iwl_is_otp_empty(struct iwl_priv *priv) - static int iwl_find_otp_image(struct iwl_priv *priv, - u16 *validblockaddr) - { -- u16 next_link_addr = 0, valid_addr; -- __le16 link_value = 0; -+ u16 next_link_addr = 0, link_value = 0, valid_addr; - int usedblocks = 0; - - /* set addressing mode to absolute to traverse the link list */ -@@ -431,7 +429,7 @@ static int iwl_find_otp_image(struct iwl_priv *priv, - * check for more block on the link list - */ - valid_addr = next_link_addr; -- next_link_addr = le16_to_cpu(link_value) * sizeof(u16); -+ next_link_addr = link_value * sizeof(u16); - IWL_DEBUG_INFO(priv, "OTP blocks %d addr 0x%x\n", - usedblocks, next_link_addr); - if (iwl_read_otp_word(priv, next_link_addr, &link_value)) -@@ -465,7 +463,7 @@ static int iwl_find_otp_image(struct iwl_priv *priv, - */ - int iwl_eeprom_init(struct iwl_priv *priv) - { -- __le16 *e; -+ u16 *e; - u32 gp = iwl_read32(priv, CSR_EEPROM_GP); - int sz; - int ret; -@@ -484,7 +482,7 @@ int iwl_eeprom_init(struct iwl_priv *priv) - ret = -ENOMEM; - goto alloc_err; - } -- e = (__le16 *)priv->eeprom; -+ e = (u16 *)priv->eeprom; - - ret = priv->cfg->ops->lib->eeprom_ops.verify_signature(priv); - if (ret < 0) { -@@ -523,7 +521,7 @@ int iwl_eeprom_init(struct iwl_priv *priv) - } - for (addr = validblockaddr; addr < validblockaddr + sz; - addr += sizeof(u16)) { -- __le16 eeprom_data; -+ u16 eeprom_data; - - ret = iwl_read_otp_word(priv, addr, &eeprom_data); - if (ret) -@@ -547,7 +545,7 @@ int iwl_eeprom_init(struct iwl_priv *priv) - goto done; - } - r = _iwl_read_direct32(priv, CSR_EEPROM_REG); -- e[addr / 2] = cpu_to_le16(r >> 16); -+ e[addr / 2] = le16_to_cpu((__force __le16)(r >> 16)); - } - } - ret = 0; -@@ -711,8 +709,7 @@ static int iwl_mod_ht40_chan_info(struct iwl_priv *priv, - ch_info->ht40_min_power = 0; - ch_info->ht40_scan_power = eeprom_ch->max_power_avg; - ch_info->ht40_flags = eeprom_ch->flags; -- if (eeprom_ch->flags & EEPROM_CHANNEL_VALID) -- ch_info->ht40_extension_channel &= ~clear_ht40_extension_channel; -+ ch_info->ht40_extension_channel &= ~clear_ht40_extension_channel; - - return 0; - } -diff --git a/drivers/net/wireless/iwlwifi/iwl-eeprom.h b/drivers/net/wireless/iwlwifi/iwl-eeprom.h -index fc93f12..80b9e45 100644 ---- a/drivers/net/wireless/iwlwifi/iwl-eeprom.h -+++ b/drivers/net/wireless/iwlwifi/iwl-eeprom.h -@@ -133,7 +133,7 @@ struct iwl_eeprom_channel { - * - */ - struct iwl_eeprom_enhanced_txpwr { -- __le16 common; -+ u16 reserved; - s8 chain_a_max; - s8 chain_b_max; - s8 chain_c_max; -@@ -347,7 +347,7 @@ struct iwl_eeprom_calib_subband_info { - struct iwl_eeprom_calib_info { - u8 saturation_power24; /* half-dBm (e.g. "34" = 17 dBm) */ - u8 saturation_power52; /* half-dBm */ -- __le16 voltage; /* signed */ -+ s16 voltage; /* signed */ - struct iwl_eeprom_calib_subband_info - band_info[EEPROM_TX_POWER_BANDS]; - } __attribute__ ((packed)); -diff --git a/drivers/net/wireless/iwlwifi/iwl3945-base.c b/drivers/net/wireless/iwlwifi/iwl3945-base.c -index 5f26c93..d00a803 100644 ---- a/drivers/net/wireless/iwlwifi/iwl3945-base.c -+++ b/drivers/net/wireless/iwlwifi/iwl3945-base.c -@@ -562,9 +562,6 @@ static int iwl3945_tx_skb(struct iwl_priv *priv, struct sk_buff *skb) - txq = &priv->txq[txq_id]; - q = &txq->q; - -- if ((iwl_queue_space(q) < q->high_mark)) -- goto drop; -- - spin_lock_irqsave(&priv->lock, flags); - - idx = get_cmd_index(q, q->write_ptr, 0); -@@ -3857,11 +3854,9 @@ static int iwl3945_setup_mac(struct iwl_priv *priv) - /* Tell mac80211 our characteristics */ - hw->flags = IEEE80211_HW_SIGNAL_DBM | - IEEE80211_HW_NOISE_DBM | -- IEEE80211_HW_SPECTRUM_MGMT; -- -- if (!priv->cfg->broken_powersave) -- hw->flags |= IEEE80211_HW_SUPPORTS_PS | -- IEEE80211_HW_SUPPORTS_DYNAMIC_PS; -+ IEEE80211_HW_SPECTRUM_MGMT | -+ IEEE80211_HW_SUPPORTS_PS | -+ IEEE80211_HW_SUPPORTS_DYNAMIC_PS; - - hw->wiphy->interface_modes = - BIT(NL80211_IFTYPE_STATION) | -diff --git a/drivers/net/wireless/iwmc3200wifi/iwm.h b/drivers/net/wireless/iwmc3200wifi/iwm.h -index 93c8989..1b02a4e 100644 ---- a/drivers/net/wireless/iwmc3200wifi/iwm.h -+++ b/drivers/net/wireless/iwmc3200wifi/iwm.h -@@ -258,7 +258,7 @@ struct iwm_priv { - - struct sk_buff_head rx_list; - struct list_head rx_tickets; -- struct list_head rx_packets[IWM_RX_ID_HASH + 1]; -+ struct list_head rx_packets[IWM_RX_ID_HASH]; - struct workqueue_struct *rx_wq; - struct work_struct rx_worker; - -diff --git a/drivers/net/wireless/libertas/scan.c b/drivers/net/wireless/libertas/scan.c -index 06d66a1..6c95af3 100644 ---- a/drivers/net/wireless/libertas/scan.c -+++ b/drivers/net/wireless/libertas/scan.c -@@ -399,8 +399,11 @@ int lbs_scan_networks(struct lbs_private *priv, int full_scan) - chan_count = lbs_scan_create_channel_list(priv, chan_list); - - netif_stop_queue(priv->dev); -- if (priv->mesh_dev) -+ netif_carrier_off(priv->dev); -+ if (priv->mesh_dev) { - netif_stop_queue(priv->mesh_dev); -+ netif_carrier_off(priv->mesh_dev); -+ } - - /* Prepare to continue an interrupted scan */ - lbs_deb_scan("chan_count %d, scan_channel %d\n", -@@ -464,13 +467,16 @@ out2: - priv->scan_channel = 0; - - out: -- if (priv->connect_status == LBS_CONNECTED && !priv->tx_pending_len) -- netif_wake_queue(priv->dev); -- -- if (priv->mesh_dev && (priv->mesh_connect_status == LBS_CONNECTED) && -- !priv->tx_pending_len) -- netif_wake_queue(priv->mesh_dev); -- -+ if (priv->connect_status == LBS_CONNECTED) { -+ netif_carrier_on(priv->dev); -+ if (!priv->tx_pending_len) -+ netif_wake_queue(priv->dev); -+ } -+ if (priv->mesh_dev && (priv->mesh_connect_status == LBS_CONNECTED)) { -+ netif_carrier_on(priv->mesh_dev); -+ if (!priv->tx_pending_len) -+ netif_wake_queue(priv->mesh_dev); -+ } - kfree(chan_list); - - lbs_deb_leave_args(LBS_DEB_SCAN, "ret %d", ret); -diff --git a/drivers/net/wireless/libertas/wext.c b/drivers/net/wireless/libertas/wext.c -index 01c738b..be837a0 100644 ---- a/drivers/net/wireless/libertas/wext.c -+++ b/drivers/net/wireless/libertas/wext.c -@@ -1953,8 +1953,10 @@ static int lbs_get_essid(struct net_device *dev, struct iw_request_info *info, - if (priv->connect_status == LBS_CONNECTED) { - memcpy(extra, priv->curbssparams.ssid, - priv->curbssparams.ssid_len); -+ extra[priv->curbssparams.ssid_len] = '\0'; - } else { - memset(extra, 0, 32); -+ extra[priv->curbssparams.ssid_len] = '\0'; - } - /* - * If none, we may want to get the one that was set -diff --git a/drivers/net/wireless/orinoco/wext.c b/drivers/net/wireless/orinoco/wext.c -index 31ca241..7698fdd 100644 ---- a/drivers/net/wireless/orinoco/wext.c -+++ b/drivers/net/wireless/orinoco/wext.c -@@ -23,7 +23,7 @@ - #define MAX_RID_LEN 1024 - - /* Helper routine to record keys -- * It is called under orinoco_lock so it may not sleep */ -+ * Do not call from interrupt context */ - static int orinoco_set_key(struct orinoco_private *priv, int index, - enum orinoco_alg alg, const u8 *key, int key_len, - const u8 *seq, int seq_len) -@@ -32,14 +32,14 @@ static int orinoco_set_key(struct orinoco_private *priv, int index, - kzfree(priv->keys[index].seq); - - if (key_len) { -- priv->keys[index].key = kzalloc(key_len, GFP_ATOMIC); -+ priv->keys[index].key = kzalloc(key_len, GFP_KERNEL); - if (!priv->keys[index].key) - goto nomem; - } else - priv->keys[index].key = NULL; - - if (seq_len) { -- priv->keys[index].seq = kzalloc(seq_len, GFP_ATOMIC); -+ priv->keys[index].seq = kzalloc(seq_len, GFP_KERNEL); - if (!priv->keys[index].seq) - goto free_key; - } else -diff --git a/drivers/net/wireless/rt2x00/rt61pci.c b/drivers/net/wireless/rt2x00/rt61pci.c -index 9a6ceb4..b20e3ea 100644 ---- a/drivers/net/wireless/rt2x00/rt61pci.c -+++ b/drivers/net/wireless/rt2x00/rt61pci.c -@@ -2538,11 +2538,6 @@ static int rt61pci_probe_hw_mode(struct rt2x00_dev *rt2x00dev) - unsigned int i; - - /* -- * Disable powersaving as default. -- */ -- rt2x00dev->hw->wiphy->ps_default = false; -- -- /* - * Initialize all hw fields. - */ - rt2x00dev->hw->flags = -diff --git a/drivers/net/wireless/rtl818x/rtl8187.h b/drivers/net/wireless/rtl818x/rtl8187.h -index 99406bf..bf9175a 100644 ---- a/drivers/net/wireless/rtl818x/rtl8187.h -+++ b/drivers/net/wireless/rtl818x/rtl8187.h -@@ -23,7 +23,6 @@ - #define RTL8187_EEPROM_TXPWR_CHAN_1 0x16 /* 3 channels */ - #define RTL8187_EEPROM_TXPWR_CHAN_6 0x1B /* 2 channels */ - #define RTL8187_EEPROM_TXPWR_CHAN_4 0x3D /* 2 channels */ --#define RTL8187_EEPROM_SELECT_GPIO 0x3B - - #define RTL8187_REQT_READ 0xC0 - #define RTL8187_REQT_WRITE 0x40 -@@ -32,9 +31,6 @@ - - #define RTL8187_MAX_RX 0x9C4 - --#define RFKILL_MASK_8187_89_97 0x2 --#define RFKILL_MASK_8198 0x4 -- - struct rtl8187_rx_info { - struct urb *urb; - struct ieee80211_hw *dev; -@@ -127,7 +123,6 @@ struct rtl8187_priv { - u8 noise; - u8 slot_time; - u8 aifsn[4]; -- u8 rfkill_mask; - struct { - __le64 buf; - struct sk_buff_head queue; -diff --git a/drivers/net/wireless/rtl818x/rtl8187_dev.c b/drivers/net/wireless/rtl818x/rtl8187_dev.c -index 9921147..2017ccc 100644 ---- a/drivers/net/wireless/rtl818x/rtl8187_dev.c -+++ b/drivers/net/wireless/rtl818x/rtl8187_dev.c -@@ -65,7 +65,6 @@ static struct usb_device_id rtl8187_table[] __devinitdata = { - /* Sitecom */ - {USB_DEVICE(0x0df6, 0x000d), .driver_info = DEVICE_RTL8187}, - {USB_DEVICE(0x0df6, 0x0028), .driver_info = DEVICE_RTL8187B}, -- {USB_DEVICE(0x0df6, 0x0029), .driver_info = DEVICE_RTL8187B}, - /* Sphairon Access Systems GmbH */ - {USB_DEVICE(0x114B, 0x0150), .driver_info = DEVICE_RTL8187}, - /* Dick Smith Electronics */ -@@ -1330,7 +1329,6 @@ static int __devinit rtl8187_probe(struct usb_interface *intf, - struct ieee80211_channel *channel; - const char *chip_name; - u16 txpwr, reg; -- u16 product_id = le16_to_cpu(udev->descriptor.idProduct); - int err, i; - - dev = ieee80211_alloc_hw(sizeof(*priv), &rtl8187_ops); -@@ -1490,13 +1488,6 @@ static int __devinit rtl8187_probe(struct usb_interface *intf, - (*channel++).hw_value = txpwr & 0xFF; - (*channel++).hw_value = txpwr >> 8; - } -- /* Handle the differing rfkill GPIO bit in different models */ -- priv->rfkill_mask = RFKILL_MASK_8187_89_97; -- if (product_id == 0x8197 || product_id == 0x8198) { -- eeprom_93cx6_read(&eeprom, RTL8187_EEPROM_SELECT_GPIO, ®); -- if (reg & 0xFF00) -- priv->rfkill_mask = RFKILL_MASK_8198; -- } - - /* - * XXX: Once this driver supports anything that requires -@@ -1525,9 +1516,9 @@ static int __devinit rtl8187_probe(struct usb_interface *intf, - mutex_init(&priv->conf_mutex); - skb_queue_head_init(&priv->b_tx_status.queue); - -- printk(KERN_INFO "%s: hwaddr %pM, %s V%d + %s, rfkill mask %d\n", -+ printk(KERN_INFO "%s: hwaddr %pM, %s V%d + %s\n", - wiphy_name(dev->wiphy), dev->wiphy->perm_addr, -- chip_name, priv->asic_rev, priv->rf->name, priv->rfkill_mask); -+ chip_name, priv->asic_rev, priv->rf->name); - - #ifdef CONFIG_RTL8187_LEDS - eeprom_93cx6_read(&eeprom, 0x3F, ®); -diff --git a/drivers/net/wireless/rtl818x/rtl8187_rfkill.c b/drivers/net/wireless/rtl818x/rtl8187_rfkill.c -index 03555e1..cad8037 100644 ---- a/drivers/net/wireless/rtl818x/rtl8187_rfkill.c -+++ b/drivers/net/wireless/rtl818x/rtl8187_rfkill.c -@@ -25,10 +25,10 @@ static bool rtl8187_is_radio_enabled(struct rtl8187_priv *priv) - u8 gpio; - - gpio = rtl818x_ioread8(priv, &priv->map->GPIO0); -- rtl818x_iowrite8(priv, &priv->map->GPIO0, gpio & ~priv->rfkill_mask); -+ rtl818x_iowrite8(priv, &priv->map->GPIO0, gpio & ~0x02); - gpio = rtl818x_ioread8(priv, &priv->map->GPIO1); - -- return gpio & priv->rfkill_mask; -+ return gpio & 0x02; - } - - void rtl8187_rfkill_init(struct ieee80211_hw *hw) -diff --git a/drivers/pci/dmar.c b/drivers/pci/dmar.c -index 5753036..b952ebc 100644 ---- a/drivers/pci/dmar.c -+++ b/drivers/pci/dmar.c -@@ -582,8 +582,6 @@ int __init dmar_table_init(void) - return 0; - } - --static int bios_warned; -- - int __init check_zero_address(void) - { - struct acpi_table_dmar *dmar; -@@ -603,9 +601,6 @@ int __init check_zero_address(void) - } - - if (entry_header->type == ACPI_DMAR_TYPE_HARDWARE_UNIT) { -- void __iomem *addr; -- u64 cap, ecap; -- - drhd = (void *)entry_header; - if (!drhd->address) { - /* Promote an attitude of violence to a BIOS engineer today */ -@@ -614,40 +609,17 @@ int __init check_zero_address(void) - dmi_get_system_info(DMI_BIOS_VENDOR), - dmi_get_system_info(DMI_BIOS_VERSION), - dmi_get_system_info(DMI_PRODUCT_VERSION)); -- bios_warned = 1; -- goto failed; -- } -- -- addr = early_ioremap(drhd->address, VTD_PAGE_SIZE); -- if (!addr ) { -- printk("IOMMU: can't validate: %llx\n", drhd->address); -- goto failed; -- } -- cap = dmar_readq(addr + DMAR_CAP_REG); -- ecap = dmar_readq(addr + DMAR_ECAP_REG); -- early_iounmap(addr, VTD_PAGE_SIZE); -- if (cap == (uint64_t)-1 && ecap == (uint64_t)-1) { -- /* Promote an attitude of violence to a BIOS engineer today */ -- WARN(1, "Your BIOS is broken; DMAR reported at address %llx returns all ones!\n" -- "BIOS vendor: %s; Ver: %s; Product Version: %s\n", -- drhd->address, -- dmi_get_system_info(DMI_BIOS_VENDOR), -- dmi_get_system_info(DMI_BIOS_VERSION), -- dmi_get_system_info(DMI_PRODUCT_VERSION)); -- bios_warned = 1; -- goto failed; -+#ifdef CONFIG_DMAR -+ dmar_disabled = 1; -+#endif -+ return 0; - } -+ break; - } - - entry_header = ((void *)entry_header + entry_header->length); - } - return 1; -- --failed: --#ifdef CONFIG_DMAR -- dmar_disabled = 1; --#endif -- return 0; - } - - void __init detect_intel_iommu(void) -@@ -692,18 +664,6 @@ int alloc_iommu(struct dmar_drhd_unit *drhd) - int agaw = 0; - int msagaw = 0; - -- if (!drhd->reg_base_addr) { -- if (!bios_warned) { -- WARN(1, "Your BIOS is broken; DMAR reported at address zero!\n" -- "BIOS vendor: %s; Ver: %s; Product Version: %s\n", -- dmi_get_system_info(DMI_BIOS_VENDOR), -- dmi_get_system_info(DMI_BIOS_VERSION), -- dmi_get_system_info(DMI_PRODUCT_VERSION)); -- bios_warned = 1; -- } -- return -EINVAL; -- } -- - iommu = kzalloc(sizeof(*iommu), GFP_KERNEL); - if (!iommu) - return -ENOMEM; -@@ -720,16 +680,13 @@ int alloc_iommu(struct dmar_drhd_unit *drhd) - iommu->ecap = dmar_readq(iommu->reg + DMAR_ECAP_REG); - - if (iommu->cap == (uint64_t)-1 && iommu->ecap == (uint64_t)-1) { -- if (!bios_warned) { -- /* Promote an attitude of violence to a BIOS engineer today */ -- WARN(1, "Your BIOS is broken; DMAR reported at address %llx returns all ones!\n" -- "BIOS vendor: %s; Ver: %s; Product Version: %s\n", -- drhd->reg_base_addr, -- dmi_get_system_info(DMI_BIOS_VENDOR), -- dmi_get_system_info(DMI_BIOS_VERSION), -- dmi_get_system_info(DMI_PRODUCT_VERSION)); -- bios_warned = 1; -- } -+ /* Promote an attitude of violence to a BIOS engineer today */ -+ WARN(1, "Your BIOS is broken; DMAR reported at address %llx returns all ones!\n" -+ "BIOS vendor: %s; Ver: %s; Product Version: %s\n", -+ drhd->reg_base_addr, -+ dmi_get_system_info(DMI_BIOS_VENDOR), -+ dmi_get_system_info(DMI_BIOS_VERSION), -+ dmi_get_system_info(DMI_PRODUCT_VERSION)); - goto err_unmap; - } - -diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c -index 2498602..1840a05 100644 ---- a/drivers/pci/intel-iommu.c -+++ b/drivers/pci/intel-iommu.c -@@ -1523,15 +1523,12 @@ static int domain_context_mapping_one(struct dmar_domain *domain, int segment, - - /* Skip top levels of page tables for - * iommu which has less agaw than default. -- * Unnecessary for PT mode. - */ -- if (translation != CONTEXT_TT_PASS_THROUGH) { -- for (agaw = domain->agaw; agaw != iommu->agaw; agaw--) { -- pgd = phys_to_virt(dma_pte_addr(pgd)); -- if (!dma_pte_present(pgd)) { -- spin_unlock_irqrestore(&iommu->lock, flags); -- return -ENOMEM; -- } -+ for (agaw = domain->agaw; agaw != iommu->agaw; agaw--) { -+ pgd = phys_to_virt(dma_pte_addr(pgd)); -+ if (!dma_pte_present(pgd)) { -+ spin_unlock_irqrestore(&iommu->lock, flags); -+ return -ENOMEM; - } - } - } -@@ -1994,16 +1991,6 @@ static int iommu_prepare_identity_map(struct pci_dev *pdev, - "IOMMU: Setting identity map for device %s [0x%Lx - 0x%Lx]\n", - pci_name(pdev), start, end); - -- if (end < start) { -- WARN(1, "Your BIOS is broken; RMRR ends before it starts!\n" -- "BIOS vendor: %s; Ver: %s; Product Version: %s\n", -- dmi_get_system_info(DMI_BIOS_VENDOR), -- dmi_get_system_info(DMI_BIOS_VERSION), -- dmi_get_system_info(DMI_PRODUCT_VERSION)); -- ret = -EIO; -- goto error; -- } -- - if (end >> agaw_to_width(domain->agaw)) { - WARN(1, "Your BIOS is broken; RMRR exceeds permitted address width (%d bits)\n" - "BIOS vendor: %s; Ver: %s; Product Version: %s\n", -@@ -3241,9 +3228,6 @@ static int device_notifier(struct notifier_block *nb, - struct pci_dev *pdev = to_pci_dev(dev); - struct dmar_domain *domain; - -- if (iommu_no_mapping(dev)) -- return 0; -- - domain = find_domain(pdev); - if (!domain) - return 0; -diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c -index 6477722..4e4c295 100644 ---- a/drivers/pci/pci.c -+++ b/drivers/pci/pci.c -@@ -2723,11 +2723,6 @@ int __attribute__ ((weak)) pci_ext_cfg_avail(struct pci_dev *dev) - return 1; - } - --void __weak pci_fixup_cardbus(struct pci_bus *bus) --{ --} --EXPORT_SYMBOL(pci_fixup_cardbus); -- - static int __init pci_setup(char *str) - { - while (str) { -diff --git a/drivers/pci/pcie/aer/aer_inject.c b/drivers/pci/pcie/aer/aer_inject.c -index 0d91a8a..62d15f6 100644 ---- a/drivers/pci/pcie/aer/aer_inject.c -+++ b/drivers/pci/pcie/aer/aer_inject.c -@@ -392,14 +392,8 @@ static int aer_inject(struct aer_error_inj *einj) - if (ret) - goto out_put; - -- if (find_aer_device(rpdev, &edev)) { -- if (!get_service_data(edev)) { -- printk(KERN_WARNING "AER service is not initialized\n"); -- ret = -EINVAL; -- goto out_put; -- } -+ if (find_aer_device(rpdev, &edev)) - aer_irq(-1, edev); -- } - else - ret = -EINVAL; - out_put: -diff --git a/drivers/pci/setup-bus.c b/drivers/pci/setup-bus.c -index dd58c6a..cb1a027 100644 ---- a/drivers/pci/setup-bus.c -+++ b/drivers/pci/setup-bus.c -@@ -142,6 +142,7 @@ static void pci_setup_bridge(struct pci_bus *bus) - struct pci_dev *bridge = bus->self; - struct pci_bus_region region; - u32 l, bu, lu, io_upper16; -+ int pref_mem64; - - if (pci_is_enabled(bridge)) - return; -@@ -197,6 +198,7 @@ static void pci_setup_bridge(struct pci_bus *bus) - pci_write_config_dword(bridge, PCI_PREF_LIMIT_UPPER32, 0); - - /* Set up PREF base/limit. */ -+ pref_mem64 = 0; - bu = lu = 0; - pcibios_resource_to_bus(bridge, ®ion, bus->resource[2]); - if (bus->resource[2]->flags & IORESOURCE_PREFETCH) { -@@ -204,6 +206,7 @@ static void pci_setup_bridge(struct pci_bus *bus) - l = (region.start >> 16) & 0xfff0; - l |= region.end & 0xfff00000; - if (bus->resource[2]->flags & IORESOURCE_MEM_64) { -+ pref_mem64 = 1; - bu = upper_32_bits(region.start); - lu = upper_32_bits(region.end); - width = 16; -@@ -218,9 +221,11 @@ static void pci_setup_bridge(struct pci_bus *bus) - } - pci_write_config_dword(bridge, PCI_PREF_MEMORY_BASE, l); - -- /* Set the upper 32 bits of PREF base & limit. */ -- pci_write_config_dword(bridge, PCI_PREF_BASE_UPPER32, bu); -- pci_write_config_dword(bridge, PCI_PREF_LIMIT_UPPER32, lu); -+ if (pref_mem64) { -+ /* Set the upper 32 bits of PREF base & limit. */ -+ pci_write_config_dword(bridge, PCI_PREF_BASE_UPPER32, bu); -+ pci_write_config_dword(bridge, PCI_PREF_LIMIT_UPPER32, lu); -+ } - - pci_write_config_word(bridge, PCI_BRIDGE_CONTROL, bus->bridge_ctl); - } -diff --git a/drivers/pcmcia/cardbus.c b/drivers/pcmcia/cardbus.c -index 5c26793..db77e1f 100644 ---- a/drivers/pcmcia/cardbus.c -+++ b/drivers/pcmcia/cardbus.c -@@ -214,7 +214,7 @@ int __ref cb_alloc(struct pcmcia_socket * s) - unsigned int max, pass; - - s->functions = pci_scan_slot(bus, PCI_DEVFN(0, 0)); -- pci_fixup_cardbus(bus); -+// pcibios_fixup_bus(bus); - - max = bus->secondary; - for (pass = 0; pass < 2; pass++) -diff --git a/drivers/platform/x86/acerhdf.c b/drivers/platform/x86/acerhdf.c -index 4d922e4..ab64522 100644 ---- a/drivers/platform/x86/acerhdf.c -+++ b/drivers/platform/x86/acerhdf.c -@@ -52,7 +52,7 @@ - */ - #undef START_IN_KERNEL_MODE - --#define DRV_VER "0.5.20" -+#define DRV_VER "0.5.18" - - /* - * According to the Atom N270 datasheet, -@@ -112,14 +112,12 @@ module_param_string(force_product, force_product, 16, 0); - MODULE_PARM_DESC(force_product, "Force BIOS product and omit BIOS check"); - - /* -- * cmd_off: to switch the fan completely off -- * chk_off: to check if the fan is off -+ * cmd_off: to switch the fan completely off / to check if the fan is off - * cmd_auto: to set the BIOS in control of the fan. The BIOS regulates then - * the fan speed depending on the temperature - */ - struct fancmd { - u8 cmd_off; -- u8 chk_off; - u8 cmd_auto; - }; - -@@ -136,41 +134,32 @@ struct bios_settings_t { - /* Register addresses and values for different BIOS versions */ - static const struct bios_settings_t bios_tbl[] = { - /* AOA110 */ -- {"Acer", "AOA110", "v0.3109", 0x55, 0x58, {0x1f, 0x1f, 0x00} }, -- {"Acer", "AOA110", "v0.3114", 0x55, 0x58, {0x1f, 0x1f, 0x00} }, -- {"Acer", "AOA110", "v0.3301", 0x55, 0x58, {0xaf, 0xaf, 0x00} }, -- {"Acer", "AOA110", "v0.3304", 0x55, 0x58, {0xaf, 0xaf, 0x00} }, -- {"Acer", "AOA110", "v0.3305", 0x55, 0x58, {0xaf, 0xaf, 0x00} }, -- {"Acer", "AOA110", "v0.3307", 0x55, 0x58, {0xaf, 0xaf, 0x00} }, -- {"Acer", "AOA110", "v0.3308", 0x55, 0x58, {0x21, 0x21, 0x00} }, -- {"Acer", "AOA110", "v0.3309", 0x55, 0x58, {0x21, 0x21, 0x00} }, -- {"Acer", "AOA110", "v0.3310", 0x55, 0x58, {0x21, 0x21, 0x00} }, -+ {"Acer", "AOA110", "v0.3109", 0x55, 0x58, {0x1f, 0x00} }, -+ {"Acer", "AOA110", "v0.3114", 0x55, 0x58, {0x1f, 0x00} }, -+ {"Acer", "AOA110", "v0.3301", 0x55, 0x58, {0xaf, 0x00} }, -+ {"Acer", "AOA110", "v0.3304", 0x55, 0x58, {0xaf, 0x00} }, -+ {"Acer", "AOA110", "v0.3305", 0x55, 0x58, {0xaf, 0x00} }, -+ {"Acer", "AOA110", "v0.3307", 0x55, 0x58, {0xaf, 0x00} }, -+ {"Acer", "AOA110", "v0.3308", 0x55, 0x58, {0x21, 0x00} }, -+ {"Acer", "AOA110", "v0.3309", 0x55, 0x58, {0x21, 0x00} }, -+ {"Acer", "AOA110", "v0.3310", 0x55, 0x58, {0x21, 0x00} }, - /* AOA150 */ -- {"Acer", "AOA150", "v0.3114", 0x55, 0x58, {0x20, 0x20, 0x00} }, -- {"Acer", "AOA150", "v0.3301", 0x55, 0x58, {0x20, 0x20, 0x00} }, -- {"Acer", "AOA150", "v0.3304", 0x55, 0x58, {0x20, 0x20, 0x00} }, -- {"Acer", "AOA150", "v0.3305", 0x55, 0x58, {0x20, 0x20, 0x00} }, -- {"Acer", "AOA150", "v0.3307", 0x55, 0x58, {0x20, 0x20, 0x00} }, -- {"Acer", "AOA150", "v0.3308", 0x55, 0x58, {0x20, 0x20, 0x00} }, -- {"Acer", "AOA150", "v0.3309", 0x55, 0x58, {0x20, 0x20, 0x00} }, -- {"Acer", "AOA150", "v0.3310", 0x55, 0x58, {0x20, 0x20, 0x00} }, -- /* Acer 1410 */ -- {"Acer", "Aspire 1410", "v0.3120", 0x55, 0x58, {0x9e, 0x9e, 0x00} }, -+ {"Acer", "AOA150", "v0.3114", 0x55, 0x58, {0x20, 0x00} }, -+ {"Acer", "AOA150", "v0.3301", 0x55, 0x58, {0x20, 0x00} }, -+ {"Acer", "AOA150", "v0.3304", 0x55, 0x58, {0x20, 0x00} }, -+ {"Acer", "AOA150", "v0.3305", 0x55, 0x58, {0x20, 0x00} }, -+ {"Acer", "AOA150", "v0.3307", 0x55, 0x58, {0x20, 0x00} }, -+ {"Acer", "AOA150", "v0.3308", 0x55, 0x58, {0x20, 0x00} }, -+ {"Acer", "AOA150", "v0.3309", 0x55, 0x58, {0x20, 0x00} }, -+ {"Acer", "AOA150", "v0.3310", 0x55, 0x58, {0x20, 0x00} }, - /* special BIOS / other */ -- {"Gateway", "AOA110", "v0.3103", 0x55, 0x58, {0x21, 0x21, 0x00} }, -- {"Gateway", "AOA150", "v0.3103", 0x55, 0x58, {0x20, 0x20, 0x00} }, -- {"Gateway ", "LT31 ", "v1.3103 ", 0x55, 0x58, -- {0x10, 0x0f, 0x00} }, -- {"Gateway ", "LT31 ", "v1.3201 ", 0x55, 0x58, -- {0x10, 0x0f, 0x00} }, -- {"Gateway ", "LT31 ", "v1.3302 ", 0x55, 0x58, -- {0x10, 0x0f, 0x00} }, -- {"Packard Bell", "DOA150", "v0.3104", 0x55, 0x58, {0x21, 0x21, 0x00} }, -- {"Packard Bell", "DOA150", "v0.3105", 0x55, 0x58, {0x20, 0x20, 0x00} }, -- {"Packard Bell", "AOA110", "v0.3105", 0x55, 0x58, {0x21, 0x21, 0x00} }, -- {"Packard Bell", "AOA150", "v0.3105", 0x55, 0x58, {0x20, 0x20, 0x00} }, -+ {"Gateway", "AOA110", "v0.3103", 0x55, 0x58, {0x21, 0x00} }, -+ {"Gateway", "AOA150", "v0.3103", 0x55, 0x58, {0x20, 0x00} }, -+ {"Packard Bell", "DOA150", "v0.3104", 0x55, 0x58, {0x21, 0x00} }, -+ {"Packard Bell", "AOA110", "v0.3105", 0x55, 0x58, {0x21, 0x00} }, -+ {"Packard Bell", "AOA150", "v0.3105", 0x55, 0x58, {0x20, 0x00} }, - /* pewpew-terminator */ -- {"", "", "", 0, 0, {0, 0, 0} } -+ {"", "", "", 0, 0, {0, 0} } - }; - - static const struct bios_settings_t *bios_cfg __read_mostly; -@@ -194,7 +183,7 @@ static int acerhdf_get_fanstate(int *state) - if (ec_read(bios_cfg->fanreg, &fan)) - return -EINVAL; - -- if (fan != bios_cfg->cmd.chk_off) -+ if (fan != bios_cfg->cmd.cmd_off) - *state = ACERHDF_FAN_AUTO; - else - *state = ACERHDF_FAN_OFF; -@@ -640,10 +629,9 @@ static void __exit acerhdf_exit(void) - MODULE_LICENSE("GPL"); - MODULE_AUTHOR("Peter Feuerer"); - MODULE_DESCRIPTION("Aspire One temperature and fan driver"); --MODULE_ALIAS("dmi:*:*Acer*:pnAOA*:"); --MODULE_ALIAS("dmi:*:*Gateway*:pnAOA*:"); --MODULE_ALIAS("dmi:*:*Packard Bell*:pnAOA*:"); --MODULE_ALIAS("dmi:*:*Packard Bell*:pnDOA*:"); -+MODULE_ALIAS("dmi:*:*Acer*:*:"); -+MODULE_ALIAS("dmi:*:*Gateway*:*:"); -+MODULE_ALIAS("dmi:*:*Packard Bell*:*:"); - - module_init(acerhdf_init); - module_exit(acerhdf_exit); -diff --git a/drivers/platform/x86/asus-laptop.c b/drivers/platform/x86/asus-laptop.c -index 767cb61..b39d2bb 100644 ---- a/drivers/platform/x86/asus-laptop.c -+++ b/drivers/platform/x86/asus-laptop.c -@@ -221,7 +221,6 @@ static struct asus_hotk *hotk; - */ - static const struct acpi_device_id asus_device_ids[] = { - {"ATK0100", 0}, -- {"ATK0101", 0}, - {"", 0}, - }; - MODULE_DEVICE_TABLE(acpi, asus_device_ids); -@@ -294,11 +293,6 @@ struct key_entry { - enum { KE_KEY, KE_END }; - - static struct key_entry asus_keymap[] = { -- {KE_KEY, 0x02, KEY_SCREENLOCK}, -- {KE_KEY, 0x05, KEY_WLAN}, -- {KE_KEY, 0x08, BTN_TOUCH}, -- {KE_KEY, 0x17, KEY_ZOOM}, -- {KE_KEY, 0x1f, KEY_BATTERY}, - {KE_KEY, 0x30, KEY_VOLUMEUP}, - {KE_KEY, 0x31, KEY_VOLUMEDOWN}, - {KE_KEY, 0x32, KEY_MUTE}, -@@ -318,8 +312,6 @@ static struct key_entry asus_keymap[] = { - {KE_KEY, 0x5F, KEY_WLAN}, - {KE_KEY, 0x60, KEY_SWITCHVIDEOMODE}, - {KE_KEY, 0x61, KEY_SWITCHVIDEOMODE}, -- {KE_KEY, 0x62, KEY_SWITCHVIDEOMODE}, -- {KE_KEY, 0x63, KEY_SWITCHVIDEOMODE}, - {KE_KEY, 0x6B, BTN_TOUCH}, /* Lock Mouse */ - {KE_KEY, 0x82, KEY_CAMERA}, - {KE_KEY, 0x8A, KEY_PROG1}, -@@ -1291,8 +1283,8 @@ static int asus_hotk_add(struct acpi_device *device) - hotk->ledd_status = 0xFFF; - - /* Set initial values of light sensor and level */ -- hotk->light_switch = 0; /* Default to light sensor disabled */ -- hotk->light_level = 5; /* level 5 for sensor sensitivity */ -+ hotk->light_switch = 1; /* Default to light sensor disabled */ -+ hotk->light_level = 0; /* level 5 for sensor sensitivity */ - - if (ls_switch_handle) - set_light_sens_switch(hotk->light_switch); -diff --git a/drivers/platform/x86/dell-wmi.c b/drivers/platform/x86/dell-wmi.c -index 6dec7cc..0f900cc 100644 ---- a/drivers/platform/x86/dell-wmi.c -+++ b/drivers/platform/x86/dell-wmi.c -@@ -158,13 +158,8 @@ static void dell_wmi_notify(u32 value, void *context) - struct acpi_buffer response = { ACPI_ALLOCATE_BUFFER, NULL }; - static struct key_entry *key; - union acpi_object *obj; -- acpi_status status; - -- status = wmi_get_event_data(value, &response); -- if (status != AE_OK) { -- printk(KERN_INFO "dell-wmi: bad event status 0x%x\n", status); -- return; -- } -+ wmi_get_event_data(value, &response); - - obj = (union acpi_object *)response.pointer; - -@@ -185,7 +180,6 @@ static void dell_wmi_notify(u32 value, void *context) - printk(KERN_INFO "dell-wmi: Unknown key %x pressed\n", - buffer[1] & 0xFFFF); - } -- kfree(obj); - } - - static int __init dell_wmi_input_setup(void) -diff --git a/drivers/platform/x86/hp-wmi.c b/drivers/platform/x86/hp-wmi.c -index deb53b5..c284217 100644 ---- a/drivers/platform/x86/hp-wmi.c -+++ b/drivers/platform/x86/hp-wmi.c -@@ -334,13 +334,8 @@ static void hp_wmi_notify(u32 value, void *context) - struct acpi_buffer response = { ACPI_ALLOCATE_BUFFER, NULL }; - static struct key_entry *key; - union acpi_object *obj; -- acpi_status status; - -- status = wmi_get_event_data(value, &response); -- if (status != AE_OK) { -- printk(KERN_INFO "hp-wmi: bad event status 0x%x\n", status); -- return; -- } -+ wmi_get_event_data(value, &response); - - obj = (union acpi_object *)response.pointer; - -@@ -382,8 +377,6 @@ static void hp_wmi_notify(u32 value, void *context) - eventcode); - } else - printk(KERN_INFO "HP WMI: Unknown response received\n"); -- -- kfree(obj); - } - - static int __init hp_wmi_input_setup(void) -diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c -index 1ee734c..a848c7e 100644 ---- a/drivers/platform/x86/thinkpad_acpi.c -+++ b/drivers/platform/x86/thinkpad_acpi.c -@@ -3866,6 +3866,15 @@ enum { - - #define TPACPI_RFK_BLUETOOTH_SW_NAME "tpacpi_bluetooth_sw" - -+static void bluetooth_suspend(pm_message_t state) -+{ -+ /* Try to make sure radio will resume powered off */ -+ if (!acpi_evalf(NULL, NULL, "\\BLTH", "vd", -+ TP_ACPI_BLTH_PWR_OFF_ON_RESUME)) -+ vdbg_printk(TPACPI_DBG_RFKILL, -+ "bluetooth power down on resume request failed\n"); -+} -+ - static int bluetooth_get_status(void) - { - int status; -@@ -3899,9 +3908,10 @@ static int bluetooth_set_status(enum tpacpi_rfkill_state state) - #endif - - /* We make sure to keep TP_ACPI_BLUETOOTH_RESUMECTRL off */ -- status = TP_ACPI_BLUETOOTH_RESUMECTRL; - if (state == TPACPI_RFK_RADIO_ON) -- status |= TP_ACPI_BLUETOOTH_RADIOSSW; -+ status = TP_ACPI_BLUETOOTH_RADIOSSW; -+ else -+ status = 0; - - if (!acpi_evalf(hkey_handle, NULL, "SBDC", "vd", status)) - return -EIO; -@@ -4040,6 +4050,7 @@ static struct ibm_struct bluetooth_driver_data = { - .read = bluetooth_read, - .write = bluetooth_write, - .exit = bluetooth_exit, -+ .suspend = bluetooth_suspend, - .shutdown = bluetooth_shutdown, - }; - -@@ -4057,6 +4068,15 @@ enum { - - #define TPACPI_RFK_WWAN_SW_NAME "tpacpi_wwan_sw" - -+static void wan_suspend(pm_message_t state) -+{ -+ /* Try to make sure radio will resume powered off */ -+ if (!acpi_evalf(NULL, NULL, "\\WGSV", "qvd", -+ TP_ACPI_WGSV_PWR_OFF_ON_RESUME)) -+ vdbg_printk(TPACPI_DBG_RFKILL, -+ "WWAN power down on resume request failed\n"); -+} -+ - static int wan_get_status(void) - { - int status; -@@ -4089,10 +4109,11 @@ static int wan_set_status(enum tpacpi_rfkill_state state) - } - #endif - -- /* We make sure to set TP_ACPI_WANCARD_RESUMECTRL */ -- status = TP_ACPI_WANCARD_RESUMECTRL; -+ /* We make sure to keep TP_ACPI_WANCARD_RESUMECTRL off */ - if (state == TPACPI_RFK_RADIO_ON) -- status |= TP_ACPI_WANCARD_RADIOSSW; -+ status = TP_ACPI_WANCARD_RADIOSSW; -+ else -+ status = 0; - - if (!acpi_evalf(hkey_handle, NULL, "SWAN", "vd", status)) - return -EIO; -@@ -4230,6 +4251,7 @@ static struct ibm_struct wan_driver_data = { - .read = wan_read, - .write = wan_write, - .exit = wan_exit, -+ .suspend = wan_suspend, - .shutdown = wan_shutdown, - }; - -@@ -6101,8 +6123,8 @@ static const struct tpacpi_quirk brightness_quirk_table[] __initconst = { - - /* Models with Intel Extreme Graphics 2 */ - TPACPI_Q_IBM('1', 'U', TPACPI_BRGHT_Q_NOEC), -- TPACPI_Q_IBM('1', 'V', TPACPI_BRGHT_Q_ASK|TPACPI_BRGHT_Q_EC), -- TPACPI_Q_IBM('1', 'W', TPACPI_BRGHT_Q_ASK|TPACPI_BRGHT_Q_EC), -+ TPACPI_Q_IBM('1', 'V', TPACPI_BRGHT_Q_ASK|TPACPI_BRGHT_Q_NOEC), -+ TPACPI_Q_IBM('1', 'W', TPACPI_BRGHT_Q_ASK|TPACPI_BRGHT_Q_NOEC), - - /* Models with Intel GMA900 */ - TPACPI_Q_IBM('7', '0', TPACPI_BRGHT_Q_NOEC), /* T43, R52 */ -diff --git a/drivers/platform/x86/wmi.c b/drivers/platform/x86/wmi.c -index 87f4c97..177f8d7 100644 ---- a/drivers/platform/x86/wmi.c -+++ b/drivers/platform/x86/wmi.c -@@ -510,8 +510,8 @@ EXPORT_SYMBOL_GPL(wmi_remove_notify_handler); - /** - * wmi_get_event_data - Get WMI data associated with an event - * -- * @event: Event to find -- * @out: Buffer to hold event data. out->pointer should be freed with kfree() -+ * @event - Event to find -+ * &out - Buffer to hold event data - * - * Returns extra data associated with an event in WMI. - */ -diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c -index 1836053..efe568d 100644 ---- a/drivers/regulator/core.c -+++ b/drivers/regulator/core.c -@@ -640,7 +640,7 @@ static int suspend_prepare(struct regulator_dev *rdev, suspend_state_t state) - static void print_constraints(struct regulator_dev *rdev) - { - struct regulation_constraints *constraints = rdev->constraints; -- char buf[80] = ""; -+ char buf[80]; - int count; - - if (rdev->desc->type == REGULATOR_VOLTAGE) { -diff --git a/drivers/regulator/wm8350-regulator.c b/drivers/regulator/wm8350-regulator.c -index 43ed81e..768bd0e 100644 ---- a/drivers/regulator/wm8350-regulator.c -+++ b/drivers/regulator/wm8350-regulator.c -@@ -1504,8 +1504,7 @@ int wm8350_register_led(struct wm8350 *wm8350, int lednum, int dcdc, int isink, - led->isink_init.consumer_supplies = &led->isink_consumer; - led->isink_init.constraints.min_uA = 0; - led->isink_init.constraints.max_uA = pdata->max_uA; -- led->isink_init.constraints.valid_ops_mask -- = REGULATOR_CHANGE_CURRENT | REGULATOR_CHANGE_STATUS; -+ led->isink_init.constraints.valid_ops_mask = REGULATOR_CHANGE_CURRENT; - led->isink_init.constraints.valid_modes_mask = REGULATOR_MODE_NORMAL; - ret = wm8350_register_regulator(wm8350, isink, &led->isink_init); - if (ret != 0) { -@@ -1518,7 +1517,6 @@ int wm8350_register_led(struct wm8350 *wm8350, int lednum, int dcdc, int isink, - led->dcdc_init.num_consumer_supplies = 1; - led->dcdc_init.consumer_supplies = &led->dcdc_consumer; - led->dcdc_init.constraints.valid_modes_mask = REGULATOR_MODE_NORMAL; -- led->dcdc_init.constraints.valid_ops_mask = REGULATOR_CHANGE_STATUS; - ret = wm8350_register_regulator(wm8350, dcdc, &led->dcdc_init); - if (ret != 0) { - platform_device_put(pdev); -diff --git a/drivers/rtc/rtc-cmos.c b/drivers/rtc/rtc-cmos.c -index 473e5f2..f7a4701 100644 ---- a/drivers/rtc/rtc-cmos.c -+++ b/drivers/rtc/rtc-cmos.c -@@ -1099,9 +1099,9 @@ static int cmos_pnp_resume(struct pnp_dev *pnp) - #define cmos_pnp_resume NULL - #endif - --static void cmos_pnp_shutdown(struct pnp_dev *pnp) -+static void cmos_pnp_shutdown(struct device *pdev) - { -- if (system_state == SYSTEM_POWER_OFF && !cmos_poweroff(&pnp->dev)) -+ if (system_state == SYSTEM_POWER_OFF && !cmos_poweroff(pdev)) - return; - - cmos_do_shutdown(); -@@ -1120,12 +1120,15 @@ static struct pnp_driver cmos_pnp_driver = { - .id_table = rtc_ids, - .probe = cmos_pnp_probe, - .remove = __exit_p(cmos_pnp_remove), -- .shutdown = cmos_pnp_shutdown, - - /* flag ensures resume() gets called, and stops syslog spam */ - .flags = PNP_DRIVER_RES_DO_NOT_CHANGE, - .suspend = cmos_pnp_suspend, - .resume = cmos_pnp_resume, -+ .driver = { -+ .name = (char *)driver_name, -+ .shutdown = cmos_pnp_shutdown, -+ } - }; - - #endif /* CONFIG_PNP */ -diff --git a/drivers/rtc/rtc-fm3130.c b/drivers/rtc/rtc-fm3130.c -index 812c667..3a7be11 100644 ---- a/drivers/rtc/rtc-fm3130.c -+++ b/drivers/rtc/rtc-fm3130.c -@@ -376,22 +376,20 @@ static int __devinit fm3130_probe(struct i2c_client *client, - } - - /* Disabling calibration mode */ -- if (fm3130->regs[FM3130_RTC_CONTROL] & FM3130_RTC_CONTROL_BIT_CAL) { -+ if (fm3130->regs[FM3130_RTC_CONTROL] & FM3130_RTC_CONTROL_BIT_CAL) - i2c_smbus_write_byte_data(client, FM3130_RTC_CONTROL, - fm3130->regs[FM3130_RTC_CONTROL] & - ~(FM3130_RTC_CONTROL_BIT_CAL)); - dev_warn(&client->dev, "Disabling calibration mode!\n"); -- } - - /* Disabling read and write modes */ - if (fm3130->regs[FM3130_RTC_CONTROL] & FM3130_RTC_CONTROL_BIT_WRITE || -- fm3130->regs[FM3130_RTC_CONTROL] & FM3130_RTC_CONTROL_BIT_READ) { -+ fm3130->regs[FM3130_RTC_CONTROL] & FM3130_RTC_CONTROL_BIT_READ) - i2c_smbus_write_byte_data(client, FM3130_RTC_CONTROL, - fm3130->regs[FM3130_RTC_CONTROL] & - ~(FM3130_RTC_CONTROL_BIT_READ | - FM3130_RTC_CONTROL_BIT_WRITE)); - dev_warn(&client->dev, "Disabling READ or WRITE mode!\n"); -- } - - /* oscillator off? turn it on, so clock can tick. */ - if (fm3130->regs[FM3130_CAL_CONTROL] & FM3130_CAL_CONTROL_BIT_nOSCEN) -diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c -index d0ef15a..aaccc8e 100644 ---- a/drivers/s390/block/dasd.c -+++ b/drivers/s390/block/dasd.c -@@ -994,9 +994,10 @@ static void dasd_handle_killed_request(struct ccw_device *cdev, - return; - cqr = (struct dasd_ccw_req *) intparm; - if (cqr->status != DASD_CQR_IN_IO) { -- DBF_EVENT_DEVID(DBF_DEBUG, cdev, -- "invalid status in handle_killed_request: " -- "%02x", cqr->status); -+ DBF_EVENT(DBF_DEBUG, -+ "invalid status in handle_killed_request: " -+ "bus_id %s, status %02x", -+ dev_name(&cdev->dev), cqr->status); - return; - } - -@@ -1004,8 +1005,8 @@ static void dasd_handle_killed_request(struct ccw_device *cdev, - if (device == NULL || - device != dasd_device_from_cdev_locked(cdev) || - strncmp(device->discipline->ebcname, (char *) &cqr->magic, 4)) { -- DBF_EVENT_DEVID(DBF_DEBUG, cdev, "%s", -- "invalid device in request"); -+ DBF_DEV_EVENT(DBF_DEBUG, device, "invalid device in request: " -+ "bus_id %s", dev_name(&cdev->dev)); - return; - } - -@@ -1044,13 +1045,12 @@ void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm, - case -EIO: - break; - case -ETIMEDOUT: -- DBF_EVENT_DEVID(DBF_WARNING, cdev, "%s: " -- "request timed out\n", __func__); -+ DBF_EVENT(DBF_WARNING, "%s(%s): request timed out\n", -+ __func__, dev_name(&cdev->dev)); - break; - default: -- DBF_EVENT_DEVID(DBF_WARNING, cdev, "%s: " -- "unknown error %ld\n", __func__, -- PTR_ERR(irb)); -+ DBF_EVENT(DBF_WARNING, "%s(%s): unknown error %ld\n", -+ __func__, dev_name(&cdev->dev), PTR_ERR(irb)); - } - dasd_handle_killed_request(cdev, intparm); - return; -@@ -1078,8 +1078,8 @@ void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm, - device = (struct dasd_device *) cqr->startdev; - if (!device || - strncmp(device->discipline->ebcname, (char *) &cqr->magic, 4)) { -- DBF_EVENT_DEVID(DBF_DEBUG, cdev, "%s", -- "invalid device in request"); -+ DBF_DEV_EVENT(DBF_DEBUG, device, "invalid device in request: " -+ "bus_id %s", dev_name(&cdev->dev)); - return; - } - -@@ -2217,9 +2217,9 @@ int dasd_generic_probe(struct ccw_device *cdev, - } - ret = dasd_add_sysfs_files(cdev); - if (ret) { -- DBF_EVENT_DEVID(DBF_WARNING, cdev, "%s", -- "dasd_generic_probe: could not add " -- "sysfs entries"); -+ DBF_EVENT(DBF_WARNING, -+ "dasd_generic_probe: could not add sysfs entries " -+ "for %s\n", dev_name(&cdev->dev)); - return ret; - } - cdev->handler = &dasd_int_handler; -diff --git a/drivers/s390/block/dasd_diag.c b/drivers/s390/block/dasd_diag.c -index 8174ec9..4e49b4a 100644 ---- a/drivers/s390/block/dasd_diag.c -+++ b/drivers/s390/block/dasd_diag.c -@@ -145,15 +145,6 @@ dasd_diag_erp(struct dasd_device *device) - - mdsk_term_io(device); - rc = mdsk_init_io(device, device->block->bp_block, 0, NULL); -- if (rc == 4) { -- if (!(device->features & DASD_FEATURE_READONLY)) { -- dev_warn(&device->cdev->dev, -- "The access mode of a DIAG device changed" -- " to read-only"); -- device->features |= DASD_FEATURE_READONLY; -- } -- rc = 0; -- } - if (rc) - dev_warn(&device->cdev->dev, "DIAG ERP failed with " - "rc=%d\n", rc); -@@ -442,20 +433,16 @@ dasd_diag_check_device(struct dasd_device *device) - for (sb = 512; sb < bsize; sb = sb << 1) - block->s2b_shift++; - rc = mdsk_init_io(device, block->bp_block, 0, NULL); -- if (rc && (rc != 4)) { -+ if (rc) { - dev_warn(&device->cdev->dev, "DIAG initialization " - "failed with rc=%d\n", rc); - rc = -EIO; - } else { -- if (rc == 4) -- device->features |= DASD_FEATURE_READONLY; - dev_info(&device->cdev->dev, -- "New DASD with %ld byte/block, total size %ld KB%s\n", -+ "New DASD with %ld byte/block, total size %ld KB\n", - (unsigned long) block->bp_block, - (unsigned long) (block->blocks << -- block->s2b_shift) >> 1, -- (rc == 4) ? ", read-only device" : ""); -- rc = 0; -+ block->s2b_shift) >> 1); - } - out_label: - free_page((long) label); -diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c -index 678bb94..417b97c 100644 ---- a/drivers/s390/block/dasd_eckd.c -+++ b/drivers/s390/block/dasd_eckd.c -@@ -88,9 +88,9 @@ dasd_eckd_probe (struct ccw_device *cdev) - /* set ECKD specific ccw-device options */ - ret = ccw_device_set_options(cdev, CCWDEV_ALLOW_FORCE); - if (ret) { -- DBF_EVENT_DEVID(DBF_WARNING, cdev, "%s", -- "dasd_eckd_probe: could not set " -- "ccw-device options"); -+ DBF_EVENT(DBF_WARNING, -+ "dasd_eckd_probe: could not set ccw-device options " -+ "for %s\n", dev_name(&cdev->dev)); - return ret; - } - ret = dasd_generic_probe(cdev, &dasd_eckd_discipline); -@@ -885,15 +885,16 @@ static int dasd_eckd_read_conf(struct dasd_device *device) - rc = dasd_eckd_read_conf_lpm(device, &conf_data, - &conf_len, lpm); - if (rc && rc != -EOPNOTSUPP) { /* -EOPNOTSUPP is ok */ -- DBF_EVENT_DEVID(DBF_WARNING, device->cdev, -+ DBF_EVENT(DBF_WARNING, - "Read configuration data returned " -- "error %d", rc); -+ "error %d for device: %s", rc, -+ dev_name(&device->cdev->dev)); - return rc; - } - if (conf_data == NULL) { -- DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s", -- "No configuration data " -- "retrieved"); -+ DBF_EVENT(DBF_WARNING, "No configuration " -+ "data retrieved for device: %s", -+ dev_name(&device->cdev->dev)); - continue; /* no error */ - } - /* save first valid configuration data */ -@@ -940,8 +941,9 @@ static int dasd_eckd_read_features(struct dasd_device *device) - sizeof(struct dasd_rssd_features)), - device); - if (IS_ERR(cqr)) { -- DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s", "Could not " -- "allocate initialization request"); -+ DBF_EVENT(DBF_WARNING, "Could not allocate initialization " -+ "request for device: %s", -+ dev_name(&device->cdev->dev)); - return PTR_ERR(cqr); - } - cqr->startdev = device; -@@ -1069,8 +1071,10 @@ static int dasd_eckd_validate_server(struct dasd_device *device) - /* may be requested feature is not available on server, - * therefore just report error and go ahead */ - private = (struct dasd_eckd_private *) device->private; -- DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "PSF-SSC for SSID %04x " -- "returned rc=%d", private->uid.ssid, rc); -+ DBF_EVENT(DBF_WARNING, "PSF-SSC on storage subsystem %s.%s.%04x " -+ "returned rc=%d for device: %s", -+ private->uid.vendor, private->uid.serial, -+ private->uid.ssid, rc, dev_name(&device->cdev->dev)); - /* RE-Read Configuration Data */ - return dasd_eckd_read_conf(device); - } -@@ -1119,9 +1123,9 @@ dasd_eckd_check_characteristics(struct dasd_device *device) - if (private->uid.type == UA_BASE_DEVICE) { - block = dasd_alloc_block(); - if (IS_ERR(block)) { -- DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s", -- "could not allocate dasd " -- "block structure"); -+ DBF_EVENT(DBF_WARNING, "could not allocate dasd " -+ "block structure for device: %s", -+ dev_name(&device->cdev->dev)); - rc = PTR_ERR(block); - goto out_err1; - } -@@ -1149,8 +1153,9 @@ dasd_eckd_check_characteristics(struct dasd_device *device) - rc = dasd_generic_read_dev_chars(device, DASD_ECKD_MAGIC, - &private->rdc_data, 64); - if (rc) { -- DBF_EVENT_DEVID(DBF_WARNING, device->cdev, -- "Read device characteristic failed, rc=%d", rc); -+ DBF_EVENT(DBF_WARNING, -+ "Read device characteristics failed, rc=%d for " -+ "device: %s", rc, dev_name(&device->cdev->dev)); - goto out_err3; - } - /* find the vaild cylinder size */ -@@ -2975,7 +2980,7 @@ static void dasd_eckd_dump_sense_ccw(struct dasd_device *device, - len += sprintf(page + len, KERN_ERR PRINTK_HEADER - " in req: %p CS: 0x%02X DS: 0x%02X CC: 0x%02X RC: %d\n", - req, scsw_cstat(&irb->scsw), scsw_dstat(&irb->scsw), -- scsw_cc(&irb->scsw), req ? req->intrc : 0); -+ scsw_cc(&irb->scsw), req->intrc); - len += sprintf(page + len, KERN_ERR PRINTK_HEADER - " device %s: Failing CCW: %p\n", - dev_name(&device->cdev->dev), -@@ -3248,8 +3253,9 @@ int dasd_eckd_restore_device(struct dasd_device *device) - rc = dasd_generic_read_dev_chars(device, DASD_ECKD_MAGIC, - &temp_rdc_data, 64); - if (rc) { -- DBF_EVENT_DEVID(DBF_WARNING, device->cdev, -- "Read device characteristic failed, rc=%d", rc); -+ DBF_EVENT(DBF_WARNING, -+ "Read device characteristics failed, rc=%d for " -+ "device: %s", rc, dev_name(&device->cdev->dev)); - goto out_err; - } - spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); -diff --git a/drivers/s390/block/dasd_fba.c b/drivers/s390/block/dasd_fba.c -index 227b4e9..f245377 100644 ---- a/drivers/s390/block/dasd_fba.c -+++ b/drivers/s390/block/dasd_fba.c -@@ -141,8 +141,9 @@ dasd_fba_check_characteristics(struct dasd_device *device) - } - block = dasd_alloc_block(); - if (IS_ERR(block)) { -- DBF_EVENT_DEVID(DBF_WARNING, cdev, "%s", "could not allocate " -- "dasd block structure"); -+ DBF_EVENT(DBF_WARNING, "could not allocate dasd block " -+ "structure for device: %s", -+ dev_name(&device->cdev->dev)); - device->private = NULL; - kfree(private); - return PTR_ERR(block); -@@ -154,8 +155,9 @@ dasd_fba_check_characteristics(struct dasd_device *device) - rc = dasd_generic_read_dev_chars(device, DASD_FBA_MAGIC, - &private->rdc_data, 32); - if (rc) { -- DBF_EVENT_DEVID(DBF_WARNING, cdev, "Read device " -- "characteristics returned error %d", rc); -+ DBF_EVENT(DBF_WARNING, "Read device characteristics returned " -+ "error %d for device: %s", -+ rc, dev_name(&device->cdev->dev)); - device->block = NULL; - dasd_free_block(block); - device->private = NULL; -diff --git a/drivers/s390/block/dasd_int.h b/drivers/s390/block/dasd_int.h -index b19f309..8afd9fa 100644 ---- a/drivers/s390/block/dasd_int.h -+++ b/drivers/s390/block/dasd_int.h -@@ -108,16 +108,6 @@ do { \ - d_data); \ - } while(0) - --#define DBF_EVENT_DEVID(d_level, d_cdev, d_str, d_data...) \ --do { \ -- struct ccw_dev_id __dev_id; \ -- ccw_device_get_id(d_cdev, &__dev_id); \ -- debug_sprintf_event(dasd_debug_area, \ -- d_level, \ -- "0.%x.%04x " d_str "\n", \ -- __dev_id.ssid, __dev_id.devno, d_data); \ --} while (0) -- - #define DBF_EXC(d_level, d_str, d_data...)\ - do { \ - debug_sprintf_exception(dasd_debug_area, \ -diff --git a/drivers/s390/block/dasd_ioctl.c b/drivers/s390/block/dasd_ioctl.c -index a5354b8..f756a1b 100644 ---- a/drivers/s390/block/dasd_ioctl.c -+++ b/drivers/s390/block/dasd_ioctl.c -@@ -260,7 +260,7 @@ static int dasd_ioctl_information(struct dasd_block *block, - struct ccw_dev_id dev_id; - - base = block->base; -- if (!base->discipline || !base->discipline->fill_info) -+ if (!base->discipline->fill_info) - return -EINVAL; - - dasd_info = kzalloc(sizeof(struct dasd_information2_t), GFP_KERNEL); -@@ -303,7 +303,10 @@ static int dasd_ioctl_information(struct dasd_block *block, - dasd_info->features |= - ((base->features & DASD_FEATURE_READONLY) != 0); - -- memcpy(dasd_info->type, base->discipline->name, 4); -+ if (base->discipline) -+ memcpy(dasd_info->type, base->discipline->name, 4); -+ else -+ memcpy(dasd_info->type, "none", 4); - - if (block->request_queue->request_fn) { - struct list_head *l; -diff --git a/drivers/s390/block/dasd_proc.c b/drivers/s390/block/dasd_proc.c -index f9d7d38..654daa3 100644 ---- a/drivers/s390/block/dasd_proc.c -+++ b/drivers/s390/block/dasd_proc.c -@@ -71,7 +71,7 @@ dasd_devices_show(struct seq_file *m, void *v) - /* Print device number. */ - seq_printf(m, "%s", dev_name(&device->cdev->dev)); - /* Print discipline string. */ -- if (device->discipline != NULL) -+ if (device != NULL && device->discipline != NULL) - seq_printf(m, "(%s)", device->discipline->name); - else - seq_printf(m, "(none)"); -@@ -91,7 +91,10 @@ dasd_devices_show(struct seq_file *m, void *v) - substr = (device->features & DASD_FEATURE_READONLY) ? "(ro)" : " "; - seq_printf(m, "%4s: ", substr); - /* Print device status information. */ -- switch (device->state) { -+ switch ((device != NULL) ? device->state : -1) { -+ case -1: -+ seq_printf(m, "unknown"); -+ break; - case DASD_STATE_NEW: - seq_printf(m, "new"); - break; -diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c -index 55f9973..2490b74 100644 ---- a/drivers/s390/cio/device.c -+++ b/drivers/s390/cio/device.c -@@ -1292,7 +1292,7 @@ static int io_subchannel_probe(struct subchannel *sch) - sch->private = kzalloc(sizeof(struct io_subchannel_private), - GFP_KERNEL | GFP_DMA); - if (!sch->private) -- goto out_schedule; -+ goto out_err; - /* - * First check if a fitting device may be found amongst the - * disconnected devices or in the orphanage. -@@ -1317,7 +1317,7 @@ static int io_subchannel_probe(struct subchannel *sch) - } - cdev = io_subchannel_create_ccwdev(sch); - if (IS_ERR(cdev)) -- goto out_schedule; -+ goto out_err; - rc = io_subchannel_recog(cdev, sch); - if (rc) { - spin_lock_irqsave(sch->lock, flags); -@@ -1325,7 +1325,9 @@ static int io_subchannel_probe(struct subchannel *sch) - spin_unlock_irqrestore(sch->lock, flags); - } - return 0; -- -+out_err: -+ kfree(sch->private); -+ sysfs_remove_group(&sch->dev.kobj, &io_subchannel_attr_group); - out_schedule: - io_subchannel_schedule_removal(sch); - return 0; -@@ -1339,14 +1341,13 @@ io_subchannel_remove (struct subchannel *sch) - - cdev = sch_get_cdev(sch); - if (!cdev) -- goto out_free; -+ return 0; - /* Set ccw device to not operational and drop reference. */ - spin_lock_irqsave(cdev->ccwlock, flags); - sch_set_cdev(sch, NULL); - cdev->private->state = DEV_STATE_NOT_OPER; - spin_unlock_irqrestore(cdev->ccwlock, flags); - ccw_device_unregister(cdev); --out_free: - kfree(sch->private); - sysfs_remove_group(&sch->dev.kobj, &io_subchannel_attr_group); - return 0; -diff --git a/drivers/s390/cio/device_fsm.c b/drivers/s390/cio/device_fsm.c -index 13b703a..b9613d7 100644 ---- a/drivers/s390/cio/device_fsm.c -+++ b/drivers/s390/cio/device_fsm.c -@@ -1080,14 +1080,14 @@ void ccw_device_trigger_reprobe(struct ccw_device *cdev) - ccw_device_start_id(cdev, 0); - } - --static void ccw_device_disabled_irq(struct ccw_device *cdev, -- enum dev_event dev_event) -+static void -+ccw_device_offline_irq(struct ccw_device *cdev, enum dev_event dev_event) - { - struct subchannel *sch; - - sch = to_subchannel(cdev->dev.parent); - /* -- * An interrupt in a disabled state means a previous disable was not -+ * An interrupt in state offline means a previous disable was not - * successful - should not happen, but we try to disable again. - */ - cio_disable_subchannel(sch); -@@ -1150,12 +1150,25 @@ ccw_device_nop(struct ccw_device *cdev, enum dev_event dev_event) - } - - /* -+ * Bug operation action. -+ */ -+static void -+ccw_device_bug(struct ccw_device *cdev, enum dev_event dev_event) -+{ -+ CIO_MSG_EVENT(0, "Internal state [%i][%i] not handled for device " -+ "0.%x.%04x\n", cdev->private->state, dev_event, -+ cdev->private->dev_id.ssid, -+ cdev->private->dev_id.devno); -+ BUG(); -+} -+ -+/* - * device statemachine - */ - fsm_func_t *dev_jumptable[NR_DEV_STATES][NR_DEV_EVENTS] = { - [DEV_STATE_NOT_OPER] = { - [DEV_EVENT_NOTOPER] = ccw_device_nop, -- [DEV_EVENT_INTERRUPT] = ccw_device_disabled_irq, -+ [DEV_EVENT_INTERRUPT] = ccw_device_bug, - [DEV_EVENT_TIMEOUT] = ccw_device_nop, - [DEV_EVENT_VERIFY] = ccw_device_nop, - }, -@@ -1173,7 +1186,7 @@ fsm_func_t *dev_jumptable[NR_DEV_STATES][NR_DEV_EVENTS] = { - }, - [DEV_STATE_OFFLINE] = { - [DEV_EVENT_NOTOPER] = ccw_device_generic_notoper, -- [DEV_EVENT_INTERRUPT] = ccw_device_disabled_irq, -+ [DEV_EVENT_INTERRUPT] = ccw_device_offline_irq, - [DEV_EVENT_TIMEOUT] = ccw_device_nop, - [DEV_EVENT_VERIFY] = ccw_device_offline_verify, - }, -@@ -1230,7 +1243,7 @@ fsm_func_t *dev_jumptable[NR_DEV_STATES][NR_DEV_EVENTS] = { - [DEV_STATE_DISCONNECTED] = { - [DEV_EVENT_NOTOPER] = ccw_device_nop, - [DEV_EVENT_INTERRUPT] = ccw_device_start_id, -- [DEV_EVENT_TIMEOUT] = ccw_device_nop, -+ [DEV_EVENT_TIMEOUT] = ccw_device_bug, - [DEV_EVENT_VERIFY] = ccw_device_start_id, - }, - [DEV_STATE_DISCONNECTED_SENSE_ID] = { -diff --git a/drivers/s390/crypto/zcrypt_pcicc.c b/drivers/s390/crypto/zcrypt_pcicc.c -index 7f1e3ba..f4b0c47 100644 ---- a/drivers/s390/crypto/zcrypt_pcicc.c -+++ b/drivers/s390/crypto/zcrypt_pcicc.c -@@ -373,8 +373,6 @@ static int convert_type86(struct zcrypt_device *zdev, - zdev->max_mod_size = PCICC_MAX_MOD_SIZE_OLD; - return -EAGAIN; - } -- if (service_rc == 8 && service_rs == 72) -- return -EINVAL; - zdev->online = 0; - return -EAGAIN; /* repeat the request on a different device. */ - } -diff --git a/drivers/s390/crypto/zcrypt_pcixcc.c b/drivers/s390/crypto/zcrypt_pcixcc.c -index 1f9e923..5677b40 100644 ---- a/drivers/s390/crypto/zcrypt_pcixcc.c -+++ b/drivers/s390/crypto/zcrypt_pcixcc.c -@@ -462,8 +462,6 @@ static int convert_type86_ica(struct zcrypt_device *zdev, - } - if (service_rc == 12 && service_rs == 769) - return -EINVAL; -- if (service_rc == 8 && service_rs == 72) -- return -EINVAL; - zdev->online = 0; - return -EAGAIN; /* repeat the request on a different device. */ - } -diff --git a/drivers/s390/net/netiucv.c b/drivers/s390/net/netiucv.c -index 395c04c..c84eadd 100644 ---- a/drivers/s390/net/netiucv.c -+++ b/drivers/s390/net/netiucv.c -@@ -741,13 +741,13 @@ static void conn_action_txdone(fsm_instance *fi, int event, void *arg) - if (single_flag) { - if ((skb = skb_dequeue(&conn->commit_queue))) { - atomic_dec(&skb->users); -+ dev_kfree_skb_any(skb); - if (privptr) { - privptr->stats.tx_packets++; - privptr->stats.tx_bytes += - (skb->len - NETIUCV_HDRLEN -- - NETIUCV_HDRLEN); -+ - NETIUCV_HDRLEN); - } -- dev_kfree_skb_any(skb); - } - } - conn->tx_buff->data = conn->tx_buff->head; -diff --git a/drivers/scsi/device_handler/scsi_dh.c b/drivers/scsi/device_handler/scsi_dh.c -index bfec4fa..3ee1cbc 100644 ---- a/drivers/scsi/device_handler/scsi_dh.c -+++ b/drivers/scsi/device_handler/scsi_dh.c -@@ -304,15 +304,18 @@ static int scsi_dh_notifier(struct notifier_block *nb, - sdev = to_scsi_device(dev); - - if (action == BUS_NOTIFY_ADD_DEVICE) { -- err = device_create_file(dev, &scsi_dh_state_attr); -- /* don't care about err */ - devinfo = device_handler_match(NULL, sdev); -- if (devinfo) -- err = scsi_dh_handler_attach(sdev, devinfo); -+ if (!devinfo) -+ goto out; -+ -+ err = scsi_dh_handler_attach(sdev, devinfo); -+ if (!err) -+ err = device_create_file(dev, &scsi_dh_state_attr); - } else if (action == BUS_NOTIFY_DEL_DEVICE) { - device_remove_file(dev, &scsi_dh_state_attr); - scsi_dh_handler_detach(sdev, NULL); - } -+out: - return err; - } - -diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c -index 70ab5d0..704b8e0 100644 ---- a/drivers/scsi/fcoe/fcoe.c -+++ b/drivers/scsi/fcoe/fcoe.c -@@ -137,7 +137,7 @@ static struct scsi_host_template fcoe_shost_template = { - .change_queue_depth = fc_change_queue_depth, - .change_queue_type = fc_change_queue_type, - .this_id = -1, -- .cmd_per_lun = 3, -+ .cmd_per_lun = 32, - .can_queue = FCOE_MAX_OUTSTANDING_COMMANDS, - .use_clustering = ENABLE_CLUSTERING, - .sg_tablesize = SG_ALL, -@@ -160,7 +160,6 @@ static int fcoe_interface_setup(struct fcoe_interface *fcoe, - { - struct fcoe_ctlr *fip = &fcoe->ctlr; - struct netdev_hw_addr *ha; -- struct net_device *real_dev; - u8 flogi_maddr[ETH_ALEN]; - - fcoe->netdev = netdev; -@@ -174,12 +173,10 @@ static int fcoe_interface_setup(struct fcoe_interface *fcoe, - - /* look for SAN MAC address, if multiple SAN MACs exist, only - * use the first one for SPMA */ -- real_dev = (netdev->priv_flags & IFF_802_1Q_VLAN) ? -- vlan_dev_real_dev(netdev) : netdev; - rcu_read_lock(); -- for_each_dev_addr(real_dev, ha) { -+ for_each_dev_addr(netdev, ha) { - if ((ha->type == NETDEV_HW_ADDR_T_SAN) && -- (is_valid_ether_addr(ha->addr))) { -+ (is_valid_ether_addr(fip->ctl_src_addr))) { - memcpy(fip->ctl_src_addr, ha->addr, ETH_ALEN); - fip->spma = 1; - break; -@@ -667,7 +664,7 @@ static int fcoe_ddp_setup(struct fc_lport *lp, u16 xid, - { - struct net_device *n = fcoe_netdev(lp); - -- if (n->netdev_ops->ndo_fcoe_ddp_setup) -+ if (n->netdev_ops && n->netdev_ops->ndo_fcoe_ddp_setup) - return n->netdev_ops->ndo_fcoe_ddp_setup(n, xid, sgl, sgc); - - return 0; -@@ -684,7 +681,7 @@ static int fcoe_ddp_done(struct fc_lport *lp, u16 xid) - { - struct net_device *n = fcoe_netdev(lp); - -- if (n->netdev_ops->ndo_fcoe_ddp_done) -+ if (n->netdev_ops && n->netdev_ops->ndo_fcoe_ddp_done) - return n->netdev_ops->ndo_fcoe_ddp_done(n, xid); - return 0; - } -@@ -1634,7 +1631,7 @@ static int fcoe_destroy(const char *buffer, struct kernel_param *kp) - { - struct fcoe_interface *fcoe; - struct net_device *netdev; -- int rc = 0; -+ int rc; - - mutex_lock(&fcoe_config_mutex); - #ifdef CONFIG_FCOE_MODULE -diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c -index 554626e..c968cc3 100644 ---- a/drivers/scsi/hosts.c -+++ b/drivers/scsi/hosts.c -@@ -180,20 +180,14 @@ void scsi_remove_host(struct Scsi_Host *shost) - EXPORT_SYMBOL(scsi_remove_host); - - /** -- * scsi_add_host_with_dma - add a scsi host with dma device -+ * scsi_add_host - add a scsi host - * @shost: scsi host pointer to add - * @dev: a struct device of type scsi class -- * @dma_dev: dma device for the host -- * -- * Note: You rarely need to worry about this unless you're in a -- * virtualised host environments, so use the simpler scsi_add_host() -- * function instead. - * - * Return value: - * 0 on success / != 0 for error - **/ --int scsi_add_host_with_dma(struct Scsi_Host *shost, struct device *dev, -- struct device *dma_dev) -+int scsi_add_host(struct Scsi_Host *shost, struct device *dev) - { - struct scsi_host_template *sht = shost->hostt; - int error = -EINVAL; -@@ -213,7 +207,6 @@ int scsi_add_host_with_dma(struct Scsi_Host *shost, struct device *dev, - - if (!shost->shost_gendev.parent) - shost->shost_gendev.parent = dev ? dev : &platform_bus; -- shost->dma_dev = dma_dev; - - error = device_add(&shost->shost_gendev); - if (error) -@@ -269,7 +262,7 @@ int scsi_add_host_with_dma(struct Scsi_Host *shost, struct device *dev, - fail: - return error; - } --EXPORT_SYMBOL(scsi_add_host_with_dma); -+EXPORT_SYMBOL(scsi_add_host); - - static void scsi_host_dev_release(struct device *dev) - { -diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c -index c3ff9a6..76d294f 100644 ---- a/drivers/scsi/ipr.c -+++ b/drivers/scsi/ipr.c -@@ -6516,7 +6516,6 @@ static int ipr_reset_restore_cfg_space(struct ipr_cmnd *ipr_cmd) - int rc; - - ENTER; -- ioa_cfg->pdev->state_saved = true; - rc = pci_restore_state(ioa_cfg->pdev); - - if (rc != PCIBIOS_SUCCESSFUL) { -diff --git a/drivers/scsi/libfc/fc_disc.c b/drivers/scsi/libfc/fc_disc.c -index d4cb3f9..c48799e 100644 ---- a/drivers/scsi/libfc/fc_disc.c -+++ b/drivers/scsi/libfc/fc_disc.c -@@ -371,7 +371,7 @@ static void fc_disc_gpn_ft_req(struct fc_disc *disc) - disc, lport->e_d_tov)) - return; - err: -- fc_disc_error(disc, NULL); -+ fc_disc_error(disc, fp); - } - - /** -diff --git a/drivers/scsi/libfc/fc_elsct.c b/drivers/scsi/libfc/fc_elsct.c -index 9298458..5cfa687 100644 ---- a/drivers/scsi/libfc/fc_elsct.c -+++ b/drivers/scsi/libfc/fc_elsct.c -@@ -53,10 +53,8 @@ static struct fc_seq *fc_elsct_send(struct fc_lport *lport, - did = FC_FID_DIR_SERV; - } - -- if (rc) { -- fc_frame_free(fp); -+ if (rc) - return NULL; -- } - - fc_fill_fc_hdr(fp, r_ctl, did, fc_host_port_id(lport->host), fh_type, - FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0); -diff --git a/drivers/scsi/libfc/fc_fcp.c b/drivers/scsi/libfc/fc_fcp.c -index 7a14402..59a4408 100644 ---- a/drivers/scsi/libfc/fc_fcp.c -+++ b/drivers/scsi/libfc/fc_fcp.c -@@ -302,13 +302,10 @@ static void fc_fcp_ddp_done(struct fc_fcp_pkt *fsp) - if (!fsp) - return; - -- if (fsp->xfer_ddp == FC_XID_UNKNOWN) -- return; -- - lp = fsp->lp; -- if (lp->tt.ddp_done) { -+ if (fsp->xfer_ddp && lp->tt.ddp_done) { - fsp->xfer_len = lp->tt.ddp_done(lp, fsp->xfer_ddp); -- fsp->xfer_ddp = FC_XID_UNKNOWN; -+ fsp->xfer_ddp = 0; - } - } - -@@ -575,8 +572,7 @@ static int fc_fcp_send_data(struct fc_fcp_pkt *fsp, struct fc_seq *seq, - tlen -= sg_bytes; - remaining -= sg_bytes; - -- if ((skb_shinfo(fp_skb(fp))->nr_frags < FC_FRAME_SG_LEN) && -- (tlen)) -+ if (tlen) - continue; - - /* -@@ -1052,6 +1048,7 @@ static int fc_fcp_cmd_send(struct fc_lport *lp, struct fc_fcp_pkt *fsp, - - seq = lp->tt.exch_seq_send(lp, fp, resp, fc_fcp_pkt_destroy, fsp, 0); - if (!seq) { -+ fc_frame_free(fp); - rc = -1; - goto unlock; - } -@@ -1316,6 +1313,7 @@ static void fc_fcp_rec(struct fc_fcp_pkt *fsp) - fc_fcp_pkt_hold(fsp); /* hold while REC outstanding */ - return; - } -+ fc_frame_free(fp); - retry: - if (fsp->recov_retry++ < FC_MAX_RECOV_RETRY) - fc_fcp_timer_set(fsp, FC_SCSI_REC_TOV); -@@ -1563,9 +1561,10 @@ static void fc_fcp_srr(struct fc_fcp_pkt *fsp, enum fc_rctl r_ctl, u32 offset) - - seq = lp->tt.exch_seq_send(lp, fp, fc_fcp_srr_resp, NULL, - fsp, jiffies_to_msecs(FC_SCSI_REC_TOV)); -- if (!seq) -+ if (!seq) { -+ fc_frame_free(fp); - goto retry; -- -+ } - fsp->recov_seq = seq; - fsp->xfer_len = offset; - fsp->xfer_contig_end = offset; -@@ -1709,7 +1708,6 @@ int fc_queuecommand(struct scsi_cmnd *sc_cmd, void (*done)(struct scsi_cmnd *)) - fsp->cmd = sc_cmd; /* save the cmd */ - fsp->lp = lp; /* save the softc ptr */ - fsp->rport = rport; /* set the remote port ptr */ -- fsp->xfer_ddp = FC_XID_UNKNOWN; - sc_cmd->scsi_done = done; - - /* -@@ -1848,8 +1846,7 @@ static void fc_io_compl(struct fc_fcp_pkt *fsp) - * scsi status is good but transport level - * underrun. - */ -- sc_cmd->result = (fsp->state & FC_SRB_RCV_STATUS ? -- DID_OK : DID_ERROR) << 16; -+ sc_cmd->result = DID_OK << 16; - } else { - /* - * scsi got underrun, this is an error -@@ -2049,16 +2046,18 @@ EXPORT_SYMBOL(fc_eh_host_reset); - int fc_slave_alloc(struct scsi_device *sdev) - { - struct fc_rport *rport = starget_to_rport(scsi_target(sdev)); -+ int queue_depth; - - if (!rport || fc_remote_port_chkready(rport)) - return -ENXIO; - -- if (sdev->tagged_supported) -- scsi_activate_tcq(sdev, FC_FCP_DFLT_QUEUE_DEPTH); -- else -- scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), -- FC_FCP_DFLT_QUEUE_DEPTH); -- -+ if (sdev->tagged_supported) { -+ if (sdev->host->hostt->cmd_per_lun) -+ queue_depth = sdev->host->hostt->cmd_per_lun; -+ else -+ queue_depth = FC_FCP_DFLT_QUEUE_DEPTH; -+ scsi_activate_tcq(sdev, queue_depth); -+ } - return 0; - } - EXPORT_SYMBOL(fc_slave_alloc); -diff --git a/drivers/scsi/libfc/fc_lport.c b/drivers/scsi/libfc/fc_lport.c -index 536492a..bd2f771 100644 ---- a/drivers/scsi/libfc/fc_lport.c -+++ b/drivers/scsi/libfc/fc_lport.c -@@ -329,7 +329,7 @@ static void fc_lport_add_fc4_type(struct fc_lport *lport, enum fc_fh_type type) - * @sp: current sequence in the RLIR exchange - * @fp: RLIR request frame - * -- * Locking Note: The lport lock is expected to be held before calling -+ * Locking Note: The lport lock is exected to be held before calling - * this function. - */ - static void fc_lport_recv_rlir_req(struct fc_seq *sp, struct fc_frame *fp, -@@ -348,7 +348,7 @@ static void fc_lport_recv_rlir_req(struct fc_seq *sp, struct fc_frame *fp, - * @sp: current sequence in the ECHO exchange - * @fp: ECHO request frame - * -- * Locking Note: The lport lock is expected to be held before calling -+ * Locking Note: The lport lock is exected to be held before calling - * this function. - */ - static void fc_lport_recv_echo_req(struct fc_seq *sp, struct fc_frame *in_fp, -@@ -361,7 +361,7 @@ static void fc_lport_recv_echo_req(struct fc_seq *sp, struct fc_frame *in_fp, - void *dp; - u32 f_ctl; - -- FC_LPORT_DBG(lport, "Received ECHO request while in state %s\n", -+ FC_LPORT_DBG(lport, "Received RLIR request while in state %s\n", - fc_lport_state(lport)); - - len = fr_len(in_fp) - sizeof(struct fc_frame_header); -@@ -374,7 +374,7 @@ static void fc_lport_recv_echo_req(struct fc_seq *sp, struct fc_frame *in_fp, - if (fp) { - dp = fc_frame_payload_get(fp, len); - memcpy(dp, pp, len); -- *((__be32 *)dp) = htonl(ELS_LS_ACC << 24); -+ *((u32 *)dp) = htonl(ELS_LS_ACC << 24); - sp = lport->tt.seq_start_next(sp); - f_ctl = FC_FC_EX_CTX | FC_FC_LAST_SEQ | FC_FC_END_SEQ; - fc_fill_fc_hdr(fp, FC_RCTL_ELS_REP, ep->did, ep->sid, -@@ -385,12 +385,12 @@ static void fc_lport_recv_echo_req(struct fc_seq *sp, struct fc_frame *in_fp, - } - - /** -- * fc_lport_recv_rnid_req() - Handle received Request Node ID data request -- * @sp: The sequence in the RNID exchange -- * @fp: The RNID request frame -- * @lport: The local port recieving the RNID -+ * fc_lport_recv_echo_req() - Handle received Request Node ID data request -+ * @lport: Fibre Channel local port recieving the RNID -+ * @sp: current sequence in the RNID exchange -+ * @fp: RNID request frame - * -- * Locking Note: The lport lock is expected to be held before calling -+ * Locking Note: The lport lock is exected to be held before calling - * this function. - */ - static void fc_lport_recv_rnid_req(struct fc_seq *sp, struct fc_frame *in_fp, -@@ -667,7 +667,7 @@ static void fc_lport_enter_ready(struct fc_lport *lport) - * Accept it with the common service parameters indicating our N port. - * Set up to do a PLOGI if we have the higher-number WWPN. - * -- * Locking Note: The lport lock is expected to be held before calling -+ * Locking Note: The lport lock is exected to be held before calling - * this function. - */ - static void fc_lport_recv_flogi_req(struct fc_seq *sp_in, -@@ -1115,7 +1115,7 @@ static void fc_lport_enter_scr(struct fc_lport *lport) - - if (!lport->tt.elsct_send(lport, FC_FID_FCTRL, fp, ELS_SCR, - fc_lport_scr_resp, lport, lport->e_d_tov)) -- fc_lport_error(lport, NULL); -+ fc_lport_error(lport, fp); - } - - /** -@@ -1186,7 +1186,7 @@ static void fc_lport_enter_rpn_id(struct fc_lport *lport) - if (!lport->tt.elsct_send(lport, FC_FID_DIR_SERV, fp, FC_NS_RPN_ID, - fc_lport_rpn_id_resp, - lport, lport->e_d_tov)) -- fc_lport_error(lport, NULL); -+ fc_lport_error(lport, fp); - } - - static struct fc_rport_operations fc_lport_rport_ops = { -@@ -1237,12 +1237,9 @@ static void fc_lport_timeout(struct work_struct *work) - - switch (lport->state) { - case LPORT_ST_DISABLED: -- WARN_ON(1); -- break; - case LPORT_ST_READY: -- WARN_ON(1); -- break; - case LPORT_ST_RESET: -+ WARN_ON(1); - break; - case LPORT_ST_FLOGI: - fc_lport_enter_flogi(lport); -@@ -1340,7 +1337,7 @@ static void fc_lport_enter_logo(struct fc_lport *lport) - - if (!lport->tt.elsct_send(lport, FC_FID_FLOGI, fp, ELS_LOGO, - fc_lport_logo_resp, lport, lport->e_d_tov)) -- fc_lport_error(lport, NULL); -+ fc_lport_error(lport, fp); - } - - /** -@@ -1456,7 +1453,7 @@ void fc_lport_enter_flogi(struct fc_lport *lport) - - if (!lport->tt.elsct_send(lport, FC_FID_FLOGI, fp, ELS_FLOGI, - fc_lport_flogi_resp, lport, lport->e_d_tov)) -- fc_lport_error(lport, NULL); -+ fc_lport_error(lport, fp); - } - - /* Configure a fc_lport */ -diff --git a/drivers/scsi/libfc/fc_rport.c b/drivers/scsi/libfc/fc_rport.c -index ff558a6..03ea674 100644 ---- a/drivers/scsi/libfc/fc_rport.c -+++ b/drivers/scsi/libfc/fc_rport.c -@@ -86,7 +86,6 @@ static const char *fc_rport_state_names[] = { - [RPORT_ST_LOGO] = "LOGO", - [RPORT_ST_ADISC] = "ADISC", - [RPORT_ST_DELETE] = "Delete", -- [RPORT_ST_RESTART] = "Restart", - }; - - /** -@@ -100,7 +99,8 @@ static struct fc_rport_priv *fc_rport_lookup(const struct fc_lport *lport, - struct fc_rport_priv *rdata; - - list_for_each_entry(rdata, &lport->disc.rports, peers) -- if (rdata->ids.port_id == port_id) -+ if (rdata->ids.port_id == port_id && -+ rdata->rp_state != RPORT_ST_DELETE) - return rdata; - return NULL; - } -@@ -235,7 +235,6 @@ static void fc_rport_work(struct work_struct *work) - struct fc_rport_operations *rport_ops; - struct fc_rport_identifiers ids; - struct fc_rport *rport; -- int restart = 0; - - mutex_lock(&rdata->rp_mutex); - event = rdata->event; -@@ -288,20 +287,8 @@ static void fc_rport_work(struct work_struct *work) - mutex_unlock(&rdata->rp_mutex); - - if (port_id != FC_FID_DIR_SERV) { -- /* -- * We must drop rp_mutex before taking disc_mutex. -- * Re-evaluate state to allow for restart. -- * A transition to RESTART state must only happen -- * while disc_mutex is held and rdata is on the list. -- */ - mutex_lock(&lport->disc.disc_mutex); -- mutex_lock(&rdata->rp_mutex); -- if (rdata->rp_state == RPORT_ST_RESTART) -- restart = 1; -- else -- list_del(&rdata->peers); -- rdata->event = RPORT_EV_NONE; -- mutex_unlock(&rdata->rp_mutex); -+ list_del(&rdata->peers); - mutex_unlock(&lport->disc.disc_mutex); - } - -@@ -325,13 +312,7 @@ static void fc_rport_work(struct work_struct *work) - mutex_unlock(&rdata->rp_mutex); - fc_remote_port_delete(rport); - } -- if (restart) { -- mutex_lock(&rdata->rp_mutex); -- FC_RPORT_DBG(rdata, "work restart\n"); -- fc_rport_enter_plogi(rdata); -- mutex_unlock(&rdata->rp_mutex); -- } else -- kref_put(&rdata->kref, lport->tt.rport_destroy); -+ kref_put(&rdata->kref, lport->tt.rport_destroy); - break; - - default: -@@ -361,12 +342,6 @@ int fc_rport_login(struct fc_rport_priv *rdata) - FC_RPORT_DBG(rdata, "ADISC port\n"); - fc_rport_enter_adisc(rdata); - break; -- case RPORT_ST_RESTART: -- break; -- case RPORT_ST_DELETE: -- FC_RPORT_DBG(rdata, "Restart deleted port\n"); -- fc_rport_state_enter(rdata, RPORT_ST_RESTART); -- break; - default: - FC_RPORT_DBG(rdata, "Login to port\n"); - fc_rport_enter_plogi(rdata); -@@ -422,21 +397,20 @@ int fc_rport_logoff(struct fc_rport_priv *rdata) - - if (rdata->rp_state == RPORT_ST_DELETE) { - FC_RPORT_DBG(rdata, "Port in Delete state, not removing\n"); -+ mutex_unlock(&rdata->rp_mutex); - goto out; - } - -- if (rdata->rp_state == RPORT_ST_RESTART) -- FC_RPORT_DBG(rdata, "Port in Restart state, deleting\n"); -- else -- fc_rport_enter_logo(rdata); -+ fc_rport_enter_logo(rdata); - - /* - * Change the state to Delete so that we discard - * the response. - */ - fc_rport_enter_delete(rdata, RPORT_EV_STOP); --out: - mutex_unlock(&rdata->rp_mutex); -+ -+out: - return 0; - } - -@@ -492,7 +466,6 @@ static void fc_rport_timeout(struct work_struct *work) - case RPORT_ST_READY: - case RPORT_ST_INIT: - case RPORT_ST_DELETE: -- case RPORT_ST_RESTART: - break; - } - -@@ -526,7 +499,6 @@ static void fc_rport_error(struct fc_rport_priv *rdata, struct fc_frame *fp) - fc_rport_enter_logo(rdata); - break; - case RPORT_ST_DELETE: -- case RPORT_ST_RESTART: - case RPORT_ST_READY: - case RPORT_ST_INIT: - break; -@@ -660,7 +632,7 @@ static void fc_rport_enter_plogi(struct fc_rport_priv *rdata) - - if (!lport->tt.elsct_send(lport, rdata->ids.port_id, fp, ELS_PLOGI, - fc_rport_plogi_resp, rdata, lport->e_d_tov)) -- fc_rport_error_retry(rdata, NULL); -+ fc_rport_error_retry(rdata, fp); - else - kref_get(&rdata->kref); - } -@@ -821,7 +793,7 @@ static void fc_rport_enter_prli(struct fc_rport_priv *rdata) - - if (!lport->tt.elsct_send(lport, rdata->ids.port_id, fp, ELS_PRLI, - fc_rport_prli_resp, rdata, lport->e_d_tov)) -- fc_rport_error_retry(rdata, NULL); -+ fc_rport_error_retry(rdata, fp); - else - kref_get(&rdata->kref); - } -@@ -917,7 +889,7 @@ static void fc_rport_enter_rtv(struct fc_rport_priv *rdata) - - if (!lport->tt.elsct_send(lport, rdata->ids.port_id, fp, ELS_RTV, - fc_rport_rtv_resp, rdata, lport->e_d_tov)) -- fc_rport_error_retry(rdata, NULL); -+ fc_rport_error_retry(rdata, fp); - else - kref_get(&rdata->kref); - } -@@ -947,7 +919,7 @@ static void fc_rport_enter_logo(struct fc_rport_priv *rdata) - - if (!lport->tt.elsct_send(lport, rdata->ids.port_id, fp, ELS_LOGO, - fc_rport_logo_resp, rdata, lport->e_d_tov)) -- fc_rport_error_retry(rdata, NULL); -+ fc_rport_error_retry(rdata, fp); - else - kref_get(&rdata->kref); - } -@@ -1034,7 +1006,7 @@ static void fc_rport_enter_adisc(struct fc_rport_priv *rdata) - } - if (!lport->tt.elsct_send(lport, rdata->ids.port_id, fp, ELS_ADISC, - fc_rport_adisc_resp, rdata, lport->e_d_tov)) -- fc_rport_error_retry(rdata, NULL); -+ fc_rport_error_retry(rdata, fp); - else - kref_get(&rdata->kref); - } -@@ -1276,7 +1248,6 @@ static void fc_rport_recv_plogi_req(struct fc_lport *lport, - } - break; - case RPORT_ST_PRLI: -- case RPORT_ST_RTV: - case RPORT_ST_READY: - case RPORT_ST_ADISC: - FC_RPORT_DBG(rdata, "Received PLOGI in logged-in state %d " -@@ -1284,14 +1255,11 @@ static void fc_rport_recv_plogi_req(struct fc_lport *lport, - /* XXX TBD - should reset */ - break; - case RPORT_ST_DELETE: -- case RPORT_ST_LOGO: -- case RPORT_ST_RESTART: -- FC_RPORT_DBG(rdata, "Received PLOGI in state %s - send busy\n", -- fc_rport_state(rdata)); -- mutex_unlock(&rdata->rp_mutex); -- rjt_data.reason = ELS_RJT_BUSY; -- rjt_data.explan = ELS_EXPL_NONE; -- goto reject; -+ default: -+ FC_RPORT_DBG(rdata, "Received PLOGI in unexpected state %d\n", -+ rdata->rp_state); -+ fc_frame_free(rx_fp); -+ goto out; - } - - /* -@@ -1434,7 +1402,7 @@ static void fc_rport_recv_prli_req(struct fc_rport_priv *rdata, - break; - case FC_TYPE_FCP: - fcp_parm = ntohl(rspp->spp_params); -- if (fcp_parm & FCP_SPPF_RETRY) -+ if (fcp_parm * FCP_SPPF_RETRY) - rdata->flags |= FC_RP_FLAGS_RETRY; - rdata->supported_classes = FC_COS_CLASS3; - if (fcp_parm & FCP_SPPF_INIT_FCN) -@@ -1542,14 +1510,14 @@ static void fc_rport_recv_logo_req(struct fc_lport *lport, - FC_RPORT_DBG(rdata, "Received LOGO request while in state %s\n", - fc_rport_state(rdata)); - -- fc_rport_enter_delete(rdata, RPORT_EV_LOGO); -- - /* -- * If the remote port was created due to discovery, set state -- * to log back in. It may have seen a stale RSCN about us. -+ * If the remote port was created due to discovery, -+ * log back in. It may have seen a stale RSCN about us. - */ -- if (rdata->disc_id) -- fc_rport_state_enter(rdata, RPORT_ST_RESTART); -+ if (rdata->rp_state != RPORT_ST_DELETE && rdata->disc_id) -+ fc_rport_enter_plogi(rdata); -+ else -+ fc_rport_enter_delete(rdata, RPORT_EV_LOGO); - mutex_unlock(&rdata->rp_mutex); - } else - FC_RPORT_ID_DBG(lport, sid, -diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c -index 549bc7d..562d8ce 100644 ---- a/drivers/scsi/lpfc/lpfc_init.c -+++ b/drivers/scsi/lpfc/lpfc_init.c -@@ -2408,7 +2408,7 @@ lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev) - vport->els_tmofunc.function = lpfc_els_timeout; - vport->els_tmofunc.data = (unsigned long)vport; - -- error = scsi_add_host_with_dma(shost, dev, &phba->pcidev->dev); -+ error = scsi_add_host(shost, dev); - if (error) - goto out_put_shost; - -@@ -4384,13 +4384,9 @@ lpfc_sli_pci_mem_setup(struct lpfc_hba *phba) - pdev = phba->pcidev; - - /* Set the device DMA mask size */ -- if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0 -- || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(64)) != 0) { -- if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0 -- || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(32)) != 0) { -+ if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0) -+ if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) - return error; -- } -- } - - /* Get the bus address of Bar0 and Bar2 and the number of bytes - * required by each mapping. -@@ -5944,13 +5940,9 @@ lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba) - pdev = phba->pcidev; - - /* Set the device DMA mask size */ -- if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0 -- || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(64)) != 0) { -- if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0 -- || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(32)) != 0) { -+ if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0) -+ if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) - return error; -- } -- } - - /* Get the bus address of SLI4 device Bar0, Bar1, and Bar2 and the - * number of bytes required by each mapping. They are actually -diff --git a/drivers/scsi/megaraid/megaraid_sas.c b/drivers/scsi/megaraid/megaraid_sas.c -index 518712c..a39addc 100644 ---- a/drivers/scsi/megaraid/megaraid_sas.c -+++ b/drivers/scsi/megaraid/megaraid_sas.c -@@ -3032,7 +3032,7 @@ megasas_mgmt_fw_ioctl(struct megasas_instance *instance, - int error = 0, i; - void *sense = NULL; - dma_addr_t sense_handle; -- unsigned long *sense_ptr; -+ u32 *sense_ptr; - - memset(kbuff_arr, 0, sizeof(kbuff_arr)); - -@@ -3109,7 +3109,7 @@ megasas_mgmt_fw_ioctl(struct megasas_instance *instance, - } - - sense_ptr = -- (unsigned long *) ((unsigned long)cmd->frame + ioc->sense_off); -+ (u32 *) ((unsigned long)cmd->frame + ioc->sense_off); - *sense_ptr = sense_handle; - } - -@@ -3140,8 +3140,8 @@ megasas_mgmt_fw_ioctl(struct megasas_instance *instance, - * sense_ptr points to the location that has the user - * sense buffer address - */ -- sense_ptr = (unsigned long *) ((unsigned long)ioc->frame.raw + -- ioc->sense_off); -+ sense_ptr = (u32 *) ((unsigned long)ioc->frame.raw + -+ ioc->sense_off); - - if (copy_to_user((void __user *)((unsigned long)(*sense_ptr)), - sense, ioc->sense_len)) { -@@ -3451,7 +3451,7 @@ out: - return retval; - } - --static DRIVER_ATTR(poll_mode_io, S_IRUGO|S_IWUSR, -+static DRIVER_ATTR(poll_mode_io, S_IRUGO|S_IWUGO, - megasas_sysfs_show_poll_mode_io, - megasas_sysfs_set_poll_mode_io); - -diff --git a/drivers/scsi/mpt2sas/mpi/mpi2_cnfg.h b/drivers/scsi/mpt2sas/mpi/mpi2_cnfg.h -index 5af66db..ab47c46 100644 ---- a/drivers/scsi/mpt2sas/mpi/mpi2_cnfg.h -+++ b/drivers/scsi/mpt2sas/mpi/mpi2_cnfg.h -@@ -348,14 +348,6 @@ typedef struct _MPI2_CONFIG_REPLY - #define MPI2_MFGPAGE_DEVID_SAS2108_3 (0x0077) - #define MPI2_MFGPAGE_DEVID_SAS2116_1 (0x0064) - #define MPI2_MFGPAGE_DEVID_SAS2116_2 (0x0065) --#define MPI2_MFGPAGE_DEVID_SAS2208_1 (0x0080) --#define MPI2_MFGPAGE_DEVID_SAS2208_2 (0x0081) --#define MPI2_MFGPAGE_DEVID_SAS2208_3 (0x0082) --#define MPI2_MFGPAGE_DEVID_SAS2208_4 (0x0083) --#define MPI2_MFGPAGE_DEVID_SAS2208_5 (0x0084) --#define MPI2_MFGPAGE_DEVID_SAS2208_6 (0x0085) --#define MPI2_MFGPAGE_DEVID_SAS2208_7 (0x0086) --#define MPI2_MFGPAGE_DEVID_SAS2208_8 (0x0087) - - - /* Manufacturing Page 0 */ -diff --git a/drivers/scsi/mpt2sas/mpt2sas_scsih.c b/drivers/scsi/mpt2sas/mpt2sas_scsih.c -index 1743640..86ab32d 100644 ---- a/drivers/scsi/mpt2sas/mpt2sas_scsih.c -+++ b/drivers/scsi/mpt2sas/mpt2sas_scsih.c -@@ -196,28 +196,10 @@ static struct pci_device_id scsih_pci_table[] = { - PCI_ANY_ID, PCI_ANY_ID }, - { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2108_3, - PCI_ANY_ID, PCI_ANY_ID }, -- /* Meteor ~ 2116 */ - { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2116_1, - PCI_ANY_ID, PCI_ANY_ID }, - { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2116_2, - PCI_ANY_ID, PCI_ANY_ID }, -- /* Thunderbolt ~ 2208 */ -- { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_1, -- PCI_ANY_ID, PCI_ANY_ID }, -- { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_2, -- PCI_ANY_ID, PCI_ANY_ID }, -- { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_3, -- PCI_ANY_ID, PCI_ANY_ID }, -- { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_4, -- PCI_ANY_ID, PCI_ANY_ID }, -- { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_5, -- PCI_ANY_ID, PCI_ANY_ID }, -- { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_6, -- PCI_ANY_ID, PCI_ANY_ID }, -- { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_7, -- PCI_ANY_ID, PCI_ANY_ID }, -- { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_8, -- PCI_ANY_ID, PCI_ANY_ID }, - {0} /* Terminating entry */ - }; - MODULE_DEVICE_TABLE(pci, scsih_pci_table); -diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c -index 21e2bc4..fbcb82a 100644 ---- a/drivers/scsi/qla2xxx/qla_attr.c -+++ b/drivers/scsi/qla2xxx/qla_attr.c -@@ -1654,8 +1654,7 @@ qla24xx_vport_create(struct fc_vport *fc_vport, bool disable) - fc_vport_set_state(fc_vport, FC_VPORT_LINKDOWN); - } - -- if (scsi_add_host_with_dma(vha->host, &fc_vport->dev, -- &ha->pdev->dev)) { -+ if (scsi_add_host(vha->host, &fc_vport->dev)) { - DEBUG15(printk("scsi(%ld): scsi_add_host failure for VP[%d].\n", - vha->host_no, vha->vp_idx)); - goto vport_create_failed_2; -diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c -index 06bbe0d..b79fca7 100644 ---- a/drivers/scsi/qla2xxx/qla_os.c -+++ b/drivers/scsi/qla2xxx/qla_os.c -@@ -2016,13 +2016,13 @@ skip_dpc: - DEBUG2(printk("DEBUG: detect hba %ld at address = %p\n", - base_vha->host_no, ha)); - -+ base_vha->flags.init_done = 1; -+ base_vha->flags.online = 1; -+ - ret = scsi_add_host(host, &pdev->dev); - if (ret) - goto probe_failed; - -- base_vha->flags.init_done = 1; -- base_vha->flags.online = 1; -- - ha->isp_ops->enable_intrs(ha); - - scsi_scan_host(host); -diff --git a/drivers/scsi/scsi_devinfo.c b/drivers/scsi/scsi_devinfo.c -index 802e91c..93c2622 100644 ---- a/drivers/scsi/scsi_devinfo.c -+++ b/drivers/scsi/scsi_devinfo.c -@@ -168,10 +168,11 @@ static struct { - {"Generic", "USB SD Reader", "1.00", BLIST_FORCELUN | BLIST_INQUIRY_36}, - {"Generic", "USB Storage-SMC", "0180", BLIST_FORCELUN | BLIST_INQUIRY_36}, - {"Generic", "USB Storage-SMC", "0207", BLIST_FORCELUN | BLIST_INQUIRY_36}, -- {"HITACHI", "DF400", "*", BLIST_REPORTLUN2}, -- {"HITACHI", "DF500", "*", BLIST_REPORTLUN2}, -- {"HITACHI", "DISK-SUBSYSTEM", "*", BLIST_REPORTLUN2}, -- {"HITACHI", "OPEN-", "*", BLIST_REPORTLUN2}, -+ {"HITACHI", "DF400", "*", BLIST_SPARSELUN}, -+ {"HITACHI", "DF500", "*", BLIST_SPARSELUN}, -+ {"HITACHI", "DF600", "*", BLIST_SPARSELUN}, -+ {"HITACHI", "DISK-SUBSYSTEM", "*", BLIST_ATTACH_PQ3 | BLIST_SPARSELUN | BLIST_LARGELUN}, -+ {"HITACHI", "OPEN-E", "*", BLIST_ATTACH_PQ3 | BLIST_SPARSELUN | BLIST_LARGELUN}, - {"HITACHI", "OP-C-", "*", BLIST_SPARSELUN | BLIST_LARGELUN}, - {"HITACHI", "3380-", "*", BLIST_SPARSELUN | BLIST_LARGELUN}, - {"HITACHI", "3390-", "*", BLIST_SPARSELUN | BLIST_LARGELUN}, -diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c -index bc9a881..5987da8 100644 ---- a/drivers/scsi/scsi_lib.c -+++ b/drivers/scsi/scsi_lib.c -@@ -749,9 +749,9 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes) - */ - req->next_rq->resid_len = scsi_in(cmd)->resid; - -- scsi_release_buffers(cmd); - blk_end_request_all(req, 0); - -+ scsi_release_buffers(cmd); - scsi_next_command(cmd); - return; - } -diff --git a/drivers/scsi/scsi_lib_dma.c b/drivers/scsi/scsi_lib_dma.c -index dcd1285..ac6855c 100644 ---- a/drivers/scsi/scsi_lib_dma.c -+++ b/drivers/scsi/scsi_lib_dma.c -@@ -23,7 +23,7 @@ int scsi_dma_map(struct scsi_cmnd *cmd) - int nseg = 0; - - if (scsi_sg_count(cmd)) { -- struct device *dev = cmd->device->host->dma_dev; -+ struct device *dev = cmd->device->host->shost_gendev.parent; - - nseg = dma_map_sg(dev, scsi_sglist(cmd), scsi_sg_count(cmd), - cmd->sc_data_direction); -@@ -41,7 +41,7 @@ EXPORT_SYMBOL(scsi_dma_map); - void scsi_dma_unmap(struct scsi_cmnd *cmd) - { - if (scsi_sg_count(cmd)) { -- struct device *dev = cmd->device->host->dma_dev; -+ struct device *dev = cmd->device->host->shost_gendev.parent; - - dma_unmap_sg(dev, scsi_sglist(cmd), scsi_sg_count(cmd), - cmd->sc_data_direction); -diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c -index bf52dec..c6f70da 100644 ---- a/drivers/scsi/scsi_transport_fc.c -+++ b/drivers/scsi/scsi_transport_fc.c -@@ -648,22 +648,11 @@ static __init int fc_transport_init(void) - return error; - error = transport_class_register(&fc_vport_class); - if (error) -- goto unreg_host_class; -+ return error; - error = transport_class_register(&fc_rport_class); - if (error) -- goto unreg_vport_class; -- error = transport_class_register(&fc_transport_class); -- if (error) -- goto unreg_rport_class; -- return 0; -- --unreg_rport_class: -- transport_class_unregister(&fc_rport_class); --unreg_vport_class: -- transport_class_unregister(&fc_vport_class); --unreg_host_class: -- transport_class_unregister(&fc_host_class); -- return error; -+ return error; -+ return transport_class_register(&fc_transport_class); - } - - static void __exit fc_transport_exit(void) -@@ -2395,7 +2384,6 @@ fc_rport_final_delete(struct work_struct *work) - struct Scsi_Host *shost = rport_to_shost(rport); - struct fc_internal *i = to_fc_internal(shost->transportt); - unsigned long flags; -- int do_callback = 0; - - /* - * if a scan is pending, flush the SCSI Host work_q so that -@@ -2434,15 +2422,8 @@ fc_rport_final_delete(struct work_struct *work) - * Avoid this call if we already called it when we preserved the - * rport for the binding. - */ -- spin_lock_irqsave(shost->host_lock, flags); - if (!(rport->flags & FC_RPORT_DEVLOSS_CALLBK_DONE) && -- (i->f->dev_loss_tmo_callbk)) { -- rport->flags |= FC_RPORT_DEVLOSS_CALLBK_DONE; -- do_callback = 1; -- } -- spin_unlock_irqrestore(shost->host_lock, flags); -- -- if (do_callback) -+ (i->f->dev_loss_tmo_callbk)) - i->f->dev_loss_tmo_callbk(rport); - - fc_bsg_remove(rport->rqst_q); -@@ -2989,7 +2970,6 @@ fc_timeout_deleted_rport(struct work_struct *work) - struct fc_internal *i = to_fc_internal(shost->transportt); - struct fc_host_attrs *fc_host = shost_to_fc_host(shost); - unsigned long flags; -- int do_callback = 0; - - spin_lock_irqsave(shost->host_lock, flags); - -@@ -3055,6 +3035,7 @@ fc_timeout_deleted_rport(struct work_struct *work) - rport->roles = FC_PORT_ROLE_UNKNOWN; - rport->port_state = FC_PORTSTATE_NOTPRESENT; - rport->flags &= ~FC_RPORT_FAST_FAIL_TIMEDOUT; -+ rport->flags |= FC_RPORT_DEVLOSS_CALLBK_DONE; - - /* - * Pre-emptively kill I/O rather than waiting for the work queue -@@ -3064,40 +3045,32 @@ fc_timeout_deleted_rport(struct work_struct *work) - spin_unlock_irqrestore(shost->host_lock, flags); - fc_terminate_rport_io(rport); - -- spin_lock_irqsave(shost->host_lock, flags); -- -- if (rport->port_state == FC_PORTSTATE_NOTPRESENT) { /* still missing */ -- -- /* remove the identifiers that aren't used in the consisting binding */ -- switch (fc_host->tgtid_bind_type) { -- case FC_TGTID_BIND_BY_WWPN: -- rport->node_name = -1; -- rport->port_id = -1; -- break; -- case FC_TGTID_BIND_BY_WWNN: -- rport->port_name = -1; -- rport->port_id = -1; -- break; -- case FC_TGTID_BIND_BY_ID: -- rport->node_name = -1; -- rport->port_name = -1; -- break; -- case FC_TGTID_BIND_NONE: /* to keep compiler happy */ -- break; -- } -- -- /* -- * As this only occurs if the remote port (scsi target) -- * went away and didn't come back - we'll remove -- * all attached scsi devices. -- */ -- rport->flags |= FC_RPORT_DEVLOSS_CALLBK_DONE; -- fc_queue_work(shost, &rport->stgt_delete_work); -+ BUG_ON(rport->port_state != FC_PORTSTATE_NOTPRESENT); - -- do_callback = 1; -+ /* remove the identifiers that aren't used in the consisting binding */ -+ switch (fc_host->tgtid_bind_type) { -+ case FC_TGTID_BIND_BY_WWPN: -+ rport->node_name = -1; -+ rport->port_id = -1; -+ break; -+ case FC_TGTID_BIND_BY_WWNN: -+ rport->port_name = -1; -+ rport->port_id = -1; -+ break; -+ case FC_TGTID_BIND_BY_ID: -+ rport->node_name = -1; -+ rport->port_name = -1; -+ break; -+ case FC_TGTID_BIND_NONE: /* to keep compiler happy */ -+ break; - } - -- spin_unlock_irqrestore(shost->host_lock, flags); -+ /* -+ * As this only occurs if the remote port (scsi target) -+ * went away and didn't come back - we'll remove -+ * all attached scsi devices. -+ */ -+ fc_queue_work(shost, &rport->stgt_delete_work); - - /* - * Notify the driver that the rport is now dead. The LLDD will -@@ -3105,7 +3078,7 @@ fc_timeout_deleted_rport(struct work_struct *work) - * - * Note: we set the CALLBK_DONE flag above to correspond - */ -- if (do_callback && i->f->dev_loss_tmo_callbk) -+ if (i->f->dev_loss_tmo_callbk) - i->f->dev_loss_tmo_callbk(rport); - } - -diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c -index de2f8c4..ad897df 100644 ---- a/drivers/scsi/scsi_transport_iscsi.c -+++ b/drivers/scsi/scsi_transport_iscsi.c -@@ -627,10 +627,8 @@ static void __iscsi_block_session(struct work_struct *work) - spin_unlock_irqrestore(&session->lock, flags); - scsi_target_block(&session->dev); - ISCSI_DBG_TRANS_SESSION(session, "Completed SCSI target blocking\n"); -- if (session->recovery_tmo >= 0) -- queue_delayed_work(iscsi_eh_timer_workq, -- &session->recovery_work, -- session->recovery_tmo * HZ); -+ queue_delayed_work(iscsi_eh_timer_workq, &session->recovery_work, -+ session->recovery_tmo * HZ); - } - - void iscsi_block_session(struct iscsi_cls_session *session) -@@ -1350,7 +1348,8 @@ iscsi_set_param(struct iscsi_transport *transport, struct iscsi_uevent *ev) - switch (ev->u.set_param.param) { - case ISCSI_PARAM_SESS_RECOVERY_TMO: - sscanf(data, "%d", &value); -- session->recovery_tmo = value; -+ if (value != 0) -+ session->recovery_tmo = value; - break; - default: - err = transport->set_param(conn, ev->u.set_param.param, -diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c -index 5081f97..12d58a7 100644 ---- a/drivers/scsi/st.c -+++ b/drivers/scsi/st.c -@@ -552,15 +552,13 @@ st_do_scsi(struct st_request * SRpnt, struct scsi_tape * STp, unsigned char *cmd - SRpnt->waiting = waiting; - - if (STp->buffer->do_dio) { -- mdata->page_order = 0; - mdata->nr_entries = STp->buffer->sg_segs; - mdata->pages = STp->buffer->mapped_pages; - } else { -- mdata->page_order = STp->buffer->reserved_page_order; - mdata->nr_entries = - DIV_ROUND_UP(bytes, PAGE_SIZE << mdata->page_order); -- mdata->pages = STp->buffer->reserved_pages; -- mdata->offset = 0; -+ STp->buffer->map_data.pages = STp->buffer->reserved_pages; -+ STp->buffer->map_data.offset = 0; - } - - memcpy(SRpnt->cmd, cmd, sizeof(SRpnt->cmd)); -@@ -3720,7 +3718,7 @@ static int enlarge_buffer(struct st_buffer * STbuffer, int new_size, int need_dm - priority |= __GFP_ZERO; - - if (STbuffer->frp_segs) { -- order = STbuffer->reserved_page_order; -+ order = STbuffer->map_data.page_order; - b_size = PAGE_SIZE << order; - } else { - for (b_size = PAGE_SIZE, order = 0; -@@ -3753,7 +3751,7 @@ static int enlarge_buffer(struct st_buffer * STbuffer, int new_size, int need_dm - segs++; - } - STbuffer->b_data = page_address(STbuffer->reserved_pages[0]); -- STbuffer->reserved_page_order = order; -+ STbuffer->map_data.page_order = order; - - return 1; - } -@@ -3766,7 +3764,7 @@ static void clear_buffer(struct st_buffer * st_bp) - - for (i=0; i < st_bp->frp_segs; i++) - memset(page_address(st_bp->reserved_pages[i]), 0, -- PAGE_SIZE << st_bp->reserved_page_order); -+ PAGE_SIZE << st_bp->map_data.page_order); - st_bp->cleared = 1; - } - -@@ -3774,7 +3772,7 @@ static void clear_buffer(struct st_buffer * st_bp) - /* Release the extra buffer */ - static void normalize_buffer(struct st_buffer * STbuffer) - { -- int i, order = STbuffer->reserved_page_order; -+ int i, order = STbuffer->map_data.page_order; - - for (i = 0; i < STbuffer->frp_segs; i++) { - __free_pages(STbuffer->reserved_pages[i], order); -@@ -3782,7 +3780,7 @@ static void normalize_buffer(struct st_buffer * STbuffer) - } - STbuffer->frp_segs = 0; - STbuffer->sg_segs = 0; -- STbuffer->reserved_page_order = 0; -+ STbuffer->map_data.page_order = 0; - STbuffer->map_data.offset = 0; - } - -@@ -3792,7 +3790,7 @@ static void normalize_buffer(struct st_buffer * STbuffer) - static int append_to_buffer(const char __user *ubp, struct st_buffer * st_bp, int do_count) - { - int i, cnt, res, offset; -- int length = PAGE_SIZE << st_bp->reserved_page_order; -+ int length = PAGE_SIZE << st_bp->map_data.page_order; - - for (i = 0, offset = st_bp->buffer_bytes; - i < st_bp->frp_segs && offset >= length; i++) -@@ -3824,7 +3822,7 @@ static int append_to_buffer(const char __user *ubp, struct st_buffer * st_bp, in - static int from_buffer(struct st_buffer * st_bp, char __user *ubp, int do_count) - { - int i, cnt, res, offset; -- int length = PAGE_SIZE << st_bp->reserved_page_order; -+ int length = PAGE_SIZE << st_bp->map_data.page_order; - - for (i = 0, offset = st_bp->read_pointer; - i < st_bp->frp_segs && offset >= length; i++) -@@ -3857,7 +3855,7 @@ static void move_buffer_data(struct st_buffer * st_bp, int offset) - { - int src_seg, dst_seg, src_offset = 0, dst_offset; - int count, total; -- int length = PAGE_SIZE << st_bp->reserved_page_order; -+ int length = PAGE_SIZE << st_bp->map_data.page_order; - - if (offset == 0) - return; -@@ -4579,6 +4577,7 @@ static int sgl_map_user_pages(struct st_buffer *STbp, - } - - mdata->offset = uaddr & ~PAGE_MASK; -+ mdata->page_order = 0; - STbp->mapped_pages = pages; - - return nr_pages; -diff --git a/drivers/scsi/st.h b/drivers/scsi/st.h -index f91a67c..544dc6b 100644 ---- a/drivers/scsi/st.h -+++ b/drivers/scsi/st.h -@@ -46,7 +46,6 @@ struct st_buffer { - struct st_request *last_SRpnt; - struct st_cmdstatus cmdstat; - struct page **reserved_pages; -- int reserved_page_order; - struct page **mapped_pages; - struct rq_map_data map_data; - unsigned char *b_data; -diff --git a/drivers/serial/8250.c b/drivers/serial/8250.c -index 5ed1b82..737b4c9 100644 ---- a/drivers/serial/8250.c -+++ b/drivers/serial/8250.c -@@ -83,9 +83,6 @@ static unsigned int skip_txen_test; /* force skip of txen test at init time */ - - #define PASS_LIMIT 256 - --#define BOTH_EMPTY (UART_LSR_TEMT | UART_LSR_THRE) -- -- - /* - * We default to IRQ0 for the "no irq" hack. Some - * machine types want others as well - they're free -@@ -1342,12 +1339,14 @@ static void serial8250_start_tx(struct uart_port *port) - serial_out(up, UART_IER, up->ier); - - if (up->bugs & UART_BUG_TXEN) { -- unsigned char lsr; -+ unsigned char lsr, iir; - lsr = serial_in(up, UART_LSR); - up->lsr_saved_flags |= lsr & LSR_SAVE_FLAGS; -+ iir = serial_in(up, UART_IIR) & 0x0f; - if ((up->port.type == PORT_RM9000) ? -- (lsr & UART_LSR_THRE) : -- (lsr & UART_LSR_TEMT)) -+ (lsr & UART_LSR_THRE && -+ (iir == UART_IIR_NO_INT || iir == UART_IIR_THRI)) : -+ (lsr & UART_LSR_TEMT && iir & UART_IIR_NO_INT)) - transmit_chars(up); - } - } -@@ -1795,7 +1794,7 @@ static unsigned int serial8250_tx_empty(struct uart_port *port) - up->lsr_saved_flags |= lsr & LSR_SAVE_FLAGS; - spin_unlock_irqrestore(&up->port.lock, flags); - -- return (lsr & BOTH_EMPTY) == BOTH_EMPTY ? TIOCSER_TEMT : 0; -+ return lsr & UART_LSR_TEMT ? TIOCSER_TEMT : 0; - } - - static unsigned int serial8250_get_mctrl(struct uart_port *port) -@@ -1853,6 +1852,8 @@ static void serial8250_break_ctl(struct uart_port *port, int break_state) - spin_unlock_irqrestore(&up->port.lock, flags); - } - -+#define BOTH_EMPTY (UART_LSR_TEMT | UART_LSR_THRE) -+ - /* - * Wait for transmitter & holding register to empty - */ -diff --git a/drivers/serial/8250_pnp.c b/drivers/serial/8250_pnp.c -index deac67e..d71dfe3 100644 ---- a/drivers/serial/8250_pnp.c -+++ b/drivers/serial/8250_pnp.c -@@ -328,7 +328,15 @@ static const struct pnp_device_id pnp_dev_table[] = { - /* U.S. Robotics 56K Voice INT PnP*/ - { "USR9190", 0 }, - /* Wacom tablets */ -- { "WACFXXX", 0 }, -+ { "WACF004", 0 }, -+ { "WACF005", 0 }, -+ { "WACF006", 0 }, -+ { "WACF007", 0 }, -+ { "WACF008", 0 }, -+ { "WACF009", 0 }, -+ { "WACF00A", 0 }, -+ { "WACF00B", 0 }, -+ { "WACF00C", 0 }, - /* Compaq touchscreen */ - { "FPI2002", 0 }, - /* Fujitsu Stylistic touchscreens */ -@@ -346,8 +354,6 @@ static const struct pnp_device_id pnp_dev_table[] = { - { "FUJ02E5", 0 }, - /* Fujitsu P-series tablet PC device */ - { "FUJ02E6", 0 }, -- /* Fujitsu Wacom 2FGT Tablet PC device */ -- { "FUJ02E7", 0 }, - /* - * LG C1 EXPRESS DUAL (C1-PB11A3) touch screen (actually a FUJ02E6 in - * disguise) -diff --git a/drivers/serial/uartlite.c b/drivers/serial/uartlite.c -index ab2ab3c..377f271 100644 ---- a/drivers/serial/uartlite.c -+++ b/drivers/serial/uartlite.c -@@ -394,7 +394,7 @@ static void ulite_console_write(struct console *co, const char *s, - spin_unlock_irqrestore(&port->lock, flags); - } - --static int __devinit ulite_console_setup(struct console *co, char *options) -+static int __init ulite_console_setup(struct console *co, char *options) - { - struct uart_port *port; - int baud = 9600; -diff --git a/drivers/ssb/sprom.c b/drivers/ssb/sprom.c -index eb70843..8943015 100644 ---- a/drivers/ssb/sprom.c -+++ b/drivers/ssb/sprom.c -@@ -13,8 +13,6 @@ - - #include "ssb_private.h" - --#include -- - - static const struct ssb_sprom *fallback_sprom; - -@@ -35,27 +33,17 @@ static int sprom2hex(const u16 *sprom, char *buf, size_t buf_len, - static int hex2sprom(u16 *sprom, const char *dump, size_t len, - size_t sprom_size_words) - { -- char c, tmp[5] = { 0 }; -- int err, cnt = 0; -+ char tmp[5] = { 0 }; -+ int cnt = 0; - unsigned long parsed; - -- /* Strip whitespace at the end. */ -- while (len) { -- c = dump[len - 1]; -- if (!isspace(c) && c != '\0') -- break; -- len--; -- } -- /* Length must match exactly. */ -- if (len != sprom_size_words * 4) -+ if (len < sprom_size_words * 2) - return -EINVAL; - - while (cnt < sprom_size_words) { - memcpy(tmp, dump, 4); - dump += 4; -- err = strict_strtoul(tmp, 16, &parsed); -- if (err) -- return err; -+ parsed = simple_strtoul(tmp, NULL, 16); - sprom[cnt++] = swab16((u16)parsed); - } - -diff --git a/drivers/staging/asus_oled/asus_oled.c b/drivers/staging/asus_oled/asus_oled.c -index 43c57b7..f4c2657 100644 ---- a/drivers/staging/asus_oled/asus_oled.c -+++ b/drivers/staging/asus_oled/asus_oled.c -@@ -194,11 +194,9 @@ static ssize_t set_enabled(struct device *dev, struct device_attribute *attr, - { - struct usb_interface *intf = to_usb_interface(dev); - struct asus_oled_dev *odev = usb_get_intfdata(intf); -- unsigned long value; -- if (strict_strtoul(buf, 10, &value)) -- return -EINVAL; -+ int temp = strict_strtoul(buf, 10, NULL); - -- enable_oled(odev, value); -+ enable_oled(odev, temp); - - return count; - } -@@ -209,12 +207,10 @@ static ssize_t class_set_enabled(struct device *device, - { - struct asus_oled_dev *odev = - (struct asus_oled_dev *) dev_get_drvdata(device); -- unsigned long value; - -- if (strict_strtoul(buf, 10, &value)) -- return -EINVAL; -+ int temp = strict_strtoul(buf, 10, NULL); - -- enable_oled(odev, value); -+ enable_oled(odev, temp); - - return count; - } -diff --git a/drivers/staging/hv/Hv.c b/drivers/staging/hv/Hv.c -index c2809f2..c5b6613 100644 ---- a/drivers/staging/hv/Hv.c -+++ b/drivers/staging/hv/Hv.c -@@ -386,7 +386,7 @@ u16 HvSignalEvent(void) - * retrieve the initialized message and event pages. Otherwise, we create and - * initialize the message and event pages. - */ --void HvSynicInit(void *irqarg) -+int HvSynicInit(u32 irqVector) - { - u64 version; - union hv_synic_simp simp; -@@ -394,14 +394,13 @@ void HvSynicInit(void *irqarg) - union hv_synic_sint sharedSint; - union hv_synic_scontrol sctrl; - u64 guestID; -- u32 irqVector = *((u32 *)(irqarg)); -- int cpu = smp_processor_id(); -+ int ret = 0; - - DPRINT_ENTER(VMBUS); - - if (!gHvContext.HypercallPage) { - DPRINT_EXIT(VMBUS); -- return; -+ return ret; - } - - /* Check the version */ -@@ -426,27 +425,27 @@ void HvSynicInit(void *irqarg) - */ - rdmsrl(HV_X64_MSR_GUEST_OS_ID, guestID); - if (guestID == HV_LINUX_GUEST_ID) { -- gHvContext.synICMessagePage[cpu] = -+ gHvContext.synICMessagePage[0] = - phys_to_virt(simp.BaseSimpGpa << PAGE_SHIFT); -- gHvContext.synICEventPage[cpu] = -+ gHvContext.synICEventPage[0] = - phys_to_virt(siefp.BaseSiefpGpa << PAGE_SHIFT); - } else { - DPRINT_ERR(VMBUS, "unknown guest id!!"); - goto Cleanup; - } - DPRINT_DBG(VMBUS, "MAPPED: Simp: %p, Sifep: %p", -- gHvContext.synICMessagePage[cpu], -- gHvContext.synICEventPage[cpu]); -+ gHvContext.synICMessagePage[0], -+ gHvContext.synICEventPage[0]); - } else { -- gHvContext.synICMessagePage[cpu] = (void *)get_zeroed_page(GFP_ATOMIC); -- if (gHvContext.synICMessagePage[cpu] == NULL) { -+ gHvContext.synICMessagePage[0] = osd_PageAlloc(1); -+ if (gHvContext.synICMessagePage[0] == NULL) { - DPRINT_ERR(VMBUS, - "unable to allocate SYNIC message page!!"); - goto Cleanup; - } - -- gHvContext.synICEventPage[cpu] = (void *)get_zeroed_page(GFP_ATOMIC); -- if (gHvContext.synICEventPage[cpu] == NULL) { -+ gHvContext.synICEventPage[0] = osd_PageAlloc(1); -+ if (gHvContext.synICEventPage[0] == NULL) { - DPRINT_ERR(VMBUS, - "unable to allocate SYNIC event page!!"); - goto Cleanup; -@@ -455,7 +454,7 @@ void HvSynicInit(void *irqarg) - /* Setup the Synic's message page */ - rdmsrl(HV_X64_MSR_SIMP, simp.AsUINT64); - simp.SimpEnabled = 1; -- simp.BaseSimpGpa = virt_to_phys(gHvContext.synICMessagePage[cpu]) -+ simp.BaseSimpGpa = virt_to_phys(gHvContext.synICMessagePage[0]) - >> PAGE_SHIFT; - - DPRINT_DBG(VMBUS, "HV_X64_MSR_SIMP msr set to: %llx", -@@ -466,7 +465,7 @@ void HvSynicInit(void *irqarg) - /* Setup the Synic's event page */ - rdmsrl(HV_X64_MSR_SIEFP, siefp.AsUINT64); - siefp.SiefpEnabled = 1; -- siefp.BaseSiefpGpa = virt_to_phys(gHvContext.synICEventPage[cpu]) -+ siefp.BaseSiefpGpa = virt_to_phys(gHvContext.synICEventPage[0]) - >> PAGE_SHIFT; - - DPRINT_DBG(VMBUS, "HV_X64_MSR_SIEFP msr set to: %llx", -@@ -502,30 +501,32 @@ void HvSynicInit(void *irqarg) - - DPRINT_EXIT(VMBUS); - -- return; -+ return ret; - - Cleanup: -+ ret = -1; -+ - if (gHvContext.GuestId == HV_LINUX_GUEST_ID) { -- if (gHvContext.synICEventPage[cpu]) -- osd_PageFree(gHvContext.synICEventPage[cpu], 1); -+ if (gHvContext.synICEventPage[0]) -+ osd_PageFree(gHvContext.synICEventPage[0], 1); - -- if (gHvContext.synICMessagePage[cpu]) -- osd_PageFree(gHvContext.synICMessagePage[cpu], 1); -+ if (gHvContext.synICMessagePage[0]) -+ osd_PageFree(gHvContext.synICMessagePage[0], 1); - } - - DPRINT_EXIT(VMBUS); -- return; -+ -+ return ret; - } - - /** - * HvSynicCleanup - Cleanup routine for HvSynicInit(). - */ --void HvSynicCleanup(void *arg) -+void HvSynicCleanup(void) - { - union hv_synic_sint sharedSint; - union hv_synic_simp simp; - union hv_synic_siefp siefp; -- int cpu = smp_processor_id(); - - DPRINT_ENTER(VMBUS); - -@@ -538,7 +539,6 @@ void HvSynicCleanup(void *arg) - - sharedSint.Masked = 1; - -- /* Need to correctly cleanup in the case of SMP!!! */ - /* Disable the interrupt */ - wrmsrl(HV_X64_MSR_SINT0 + VMBUS_MESSAGE_SINT, sharedSint.AsUINT64); - -@@ -560,8 +560,8 @@ void HvSynicCleanup(void *arg) - - wrmsrl(HV_X64_MSR_SIEFP, siefp.AsUINT64); - -- osd_PageFree(gHvContext.synICMessagePage[cpu], 1); -- osd_PageFree(gHvContext.synICEventPage[cpu], 1); -+ osd_PageFree(gHvContext.synICMessagePage[0], 1); -+ osd_PageFree(gHvContext.synICEventPage[0], 1); - } - - DPRINT_EXIT(VMBUS); -diff --git a/drivers/staging/hv/Hv.h b/drivers/staging/hv/Hv.h -index fce4b5c..5379e4b 100644 ---- a/drivers/staging/hv/Hv.h -+++ b/drivers/staging/hv/Hv.h -@@ -93,7 +93,7 @@ static const struct hv_guid VMBUS_SERVICE_ID = { - }, - }; - --#define MAX_NUM_CPUS 32 -+#define MAX_NUM_CPUS 1 - - - struct hv_input_signal_event_buffer { -@@ -137,8 +137,8 @@ extern u16 HvPostMessage(union hv_connection_id connectionId, - - extern u16 HvSignalEvent(void); - --extern void HvSynicInit(void *irqarg); -+extern int HvSynicInit(u32 irqVector); - --extern void HvSynicCleanup(void *arg); -+extern void HvSynicCleanup(void); - - #endif /* __HV_H__ */ -diff --git a/drivers/staging/hv/Vmbus.c b/drivers/staging/hv/Vmbus.c -index 35a023e..a4dd06f 100644 ---- a/drivers/staging/hv/Vmbus.c -+++ b/drivers/staging/hv/Vmbus.c -@@ -129,7 +129,7 @@ static int VmbusOnDeviceAdd(struct hv_device *dev, void *AdditionalInfo) - - /* strcpy(dev->name, "vmbus"); */ - /* SynIC setup... */ -- on_each_cpu(HvSynicInit, (void *)irqvector, 1); -+ ret = HvSynicInit(*irqvector); - - /* Connect to VMBus in the root partition */ - ret = VmbusConnect(); -@@ -150,7 +150,7 @@ static int VmbusOnDeviceRemove(struct hv_device *dev) - DPRINT_ENTER(VMBUS); - VmbusChannelReleaseUnattachedChannels(); - VmbusDisconnect(); -- on_each_cpu(HvSynicCleanup, NULL, 1); -+ HvSynicCleanup(); - DPRINT_EXIT(VMBUS); - - return ret; -@@ -173,8 +173,7 @@ static void VmbusOnCleanup(struct hv_driver *drv) - */ - static void VmbusOnMsgDPC(struct hv_driver *drv) - { -- int cpu = smp_processor_id(); -- void *page_addr = gHvContext.synICMessagePage[cpu]; -+ void *page_addr = gHvContext.synICMessagePage[0]; - struct hv_message *msg = (struct hv_message *)page_addr + - VMBUS_MESSAGE_SINT; - struct hv_message *copied; -@@ -231,12 +230,11 @@ static void VmbusOnEventDPC(struct hv_driver *drv) - static int VmbusOnISR(struct hv_driver *drv) - { - int ret = 0; -- int cpu = smp_processor_id(); - void *page_addr; - struct hv_message *msg; - union hv_synic_event_flags *event; - -- page_addr = gHvContext.synICMessagePage[cpu]; -+ page_addr = gHvContext.synICMessagePage[0]; - msg = (struct hv_message *)page_addr + VMBUS_MESSAGE_SINT; - - DPRINT_ENTER(VMBUS); -@@ -250,7 +248,7 @@ static int VmbusOnISR(struct hv_driver *drv) - } - - /* TODO: Check if there are events to be process */ -- page_addr = gHvContext.synICEventPage[cpu]; -+ page_addr = gHvContext.synICEventPage[0]; - event = (union hv_synic_event_flags *)page_addr + VMBUS_MESSAGE_SINT; - - /* Since we are a child, we only need to check bit 0 */ -diff --git a/drivers/staging/rtl8187se/ieee80211/ieee80211.h b/drivers/staging/rtl8187se/ieee80211/ieee80211.h -index 0d490c1..3222c22 100644 ---- a/drivers/staging/rtl8187se/ieee80211/ieee80211.h -+++ b/drivers/staging/rtl8187se/ieee80211/ieee80211.h -@@ -1318,13 +1318,13 @@ extern int ieee80211_encrypt_fragment( - struct sk_buff *frag, - int hdr_len); - --extern int ieee80211_rtl_xmit(struct sk_buff *skb, -+extern int ieee80211_xmit(struct sk_buff *skb, - struct net_device *dev); - extern void ieee80211_txb_free(struct ieee80211_txb *); - - - /* ieee80211_rx.c */ --extern int ieee80211_rtl_rx(struct ieee80211_device *ieee, struct sk_buff *skb, -+extern int ieee80211_rx(struct ieee80211_device *ieee, struct sk_buff *skb, - struct ieee80211_rx_stats *rx_stats); - extern void ieee80211_rx_mgt(struct ieee80211_device *ieee, - struct ieee80211_hdr_4addr *header, -@@ -1376,8 +1376,8 @@ extern void ieee80211_stop_protocol(struct ieee80211_device *ieee); - extern void ieee80211_softmac_start_protocol(struct ieee80211_device *ieee); - extern void ieee80211_softmac_stop_protocol(struct ieee80211_device *ieee); - extern void ieee80211_reset_queue(struct ieee80211_device *ieee); --extern void ieee80211_rtl_wake_queue(struct ieee80211_device *ieee); --extern void ieee80211_rtl_stop_queue(struct ieee80211_device *ieee); -+extern void ieee80211_wake_queue(struct ieee80211_device *ieee); -+extern void ieee80211_stop_queue(struct ieee80211_device *ieee); - extern struct sk_buff *ieee80211_get_beacon(struct ieee80211_device *ieee); - extern void ieee80211_start_send_beacons(struct ieee80211_device *ieee); - extern void ieee80211_stop_send_beacons(struct ieee80211_device *ieee); -@@ -1385,7 +1385,7 @@ extern int ieee80211_wpa_supplicant_ioctl(struct ieee80211_device *ieee, struct - extern void notify_wx_assoc_event(struct ieee80211_device *ieee); - extern void ieee80211_ps_tx_ack(struct ieee80211_device *ieee, short success); - extern void SendDisassociation(struct ieee80211_device *ieee,u8* asSta,u8 asRsn); --extern void ieee80211_rtl_start_scan(struct ieee80211_device *ieee); -+extern void ieee80211_start_scan(struct ieee80211_device *ieee); - - //Add for RF power on power off by lizhaoming 080512 - extern void SendDisassociation(struct ieee80211_device *ieee, -diff --git a/drivers/staging/rtl8187se/ieee80211/ieee80211_rx.c b/drivers/staging/rtl8187se/ieee80211/ieee80211_rx.c -index 7ad305b..5e2e79b 100644 ---- a/drivers/staging/rtl8187se/ieee80211/ieee80211_rx.c -+++ b/drivers/staging/rtl8187se/ieee80211/ieee80211_rx.c -@@ -470,7 +470,7 @@ drop: - /* All received frames are sent to this function. @skb contains the frame in - * IEEE 802.11 format, i.e., in the format it was sent over air. - * This function is called only as a tasklet (software IRQ). */ --int ieee80211_rtl_rx(struct ieee80211_device *ieee, struct sk_buff *skb, -+int ieee80211_rx(struct ieee80211_device *ieee, struct sk_buff *skb, - struct ieee80211_rx_stats *rx_stats) - { - struct net_device *dev = ieee->dev; -diff --git a/drivers/staging/rtl8187se/ieee80211/ieee80211_softmac.c b/drivers/staging/rtl8187se/ieee80211/ieee80211_softmac.c -index a2fa9a9..334e4c7 100644 ---- a/drivers/staging/rtl8187se/ieee80211/ieee80211_softmac.c -+++ b/drivers/staging/rtl8187se/ieee80211/ieee80211_softmac.c -@@ -689,7 +689,7 @@ void ieee80211_stop_scan(struct ieee80211_device *ieee) - } - - /* called with ieee->lock held */ --void ieee80211_rtl_start_scan(struct ieee80211_device *ieee) -+void ieee80211_start_scan(struct ieee80211_device *ieee) - { - if(IS_DOT11D_ENABLE(ieee) ) - { -@@ -1196,7 +1196,7 @@ void ieee80211_associate_step1(struct ieee80211_device *ieee) - } - } - --void ieee80211_rtl_auth_challenge(struct ieee80211_device *ieee, u8 *challenge, int chlen) -+void ieee80211_auth_challenge(struct ieee80211_device *ieee, u8 *challenge, int chlen) - { - u8 *c; - struct sk_buff *skb; -@@ -1898,7 +1898,7 @@ associate_complete: - - ieee80211_associate_step2(ieee); - }else{ -- ieee80211_rtl_auth_challenge(ieee, challenge, chlen); -+ ieee80211_auth_challenge(ieee, challenge, chlen); - } - }else{ - ieee->softmac_stats.rx_auth_rs_err++; -@@ -2047,7 +2047,7 @@ void ieee80211_reset_queue(struct ieee80211_device *ieee) - - } - --void ieee80211_rtl_wake_queue(struct ieee80211_device *ieee) -+void ieee80211_wake_queue(struct ieee80211_device *ieee) - { - - unsigned long flags; -@@ -2089,7 +2089,7 @@ exit : - } - - --void ieee80211_rtl_stop_queue(struct ieee80211_device *ieee) -+void ieee80211_stop_queue(struct ieee80211_device *ieee) - { - //unsigned long flags; - //spin_lock_irqsave(&ieee->lock,flags); -@@ -2301,7 +2301,7 @@ void ieee80211_start_bss(struct ieee80211_device *ieee) - //#else - if (ieee->state == IEEE80211_NOLINK){ - ieee->actscanning = true; -- ieee80211_rtl_start_scan(ieee); -+ ieee80211_start_scan(ieee); - } - //#endif - spin_unlock_irqrestore(&ieee->lock, flags); -@@ -2357,7 +2357,7 @@ void ieee80211_associate_retry_wq(struct work_struct *work) - if(ieee->state == IEEE80211_NOLINK){ - ieee->beinretry = false; - ieee->actscanning = true; -- ieee80211_rtl_start_scan(ieee); -+ ieee80211_start_scan(ieee); - } - //YJ,add,080828, notify os here - if(ieee->state == IEEE80211_NOLINK) -diff --git a/drivers/staging/rtl8187se/ieee80211/ieee80211_tx.c b/drivers/staging/rtl8187se/ieee80211/ieee80211_tx.c -index c7996ea..e2945db 100644 ---- a/drivers/staging/rtl8187se/ieee80211/ieee80211_tx.c -+++ b/drivers/staging/rtl8187se/ieee80211/ieee80211_tx.c -@@ -305,7 +305,7 @@ ieee80211_classify(struct sk_buff *skb, struct ieee80211_network *network) - } - - /* SKBs are added to the ieee->tx_queue. */ --int ieee80211_rtl_xmit(struct sk_buff *skb, -+int ieee80211_xmit(struct sk_buff *skb, - struct net_device *dev) - { - struct ieee80211_device *ieee = netdev_priv(dev); -diff --git a/drivers/staging/rtl8187se/r8180_core.c b/drivers/staging/rtl8187se/r8180_core.c -index 3f19143..53e654d 100644 ---- a/drivers/staging/rtl8187se/r8180_core.c -+++ b/drivers/staging/rtl8187se/r8180_core.c -@@ -1830,7 +1830,7 @@ void rtl8180_rx(struct net_device *dev) - if(priv->rx_skb->len > 4) - skb_trim(priv->rx_skb,priv->rx_skb->len-4); - #ifndef RX_DONT_PASS_UL -- if(!ieee80211_rtl_rx(priv->ieee80211, -+ if(!ieee80211_rx(priv->ieee80211, - priv->rx_skb, &stats)){ - #endif // RX_DONT_PASS_UL - -@@ -1936,11 +1936,11 @@ rate) - if (!check_nic_enought_desc(dev, priority)){ - DMESGW("Error: no descriptor left by previous TX (avail %d) ", - get_curr_tx_free_desc(dev, priority)); -- ieee80211_rtl_stop_queue(priv->ieee80211); -+ ieee80211_stop_queue(priv->ieee80211); - } - rtl8180_tx(dev, skb->data, skb->len, priority, morefrag,0,rate); - if (!check_nic_enought_desc(dev, priority)) -- ieee80211_rtl_stop_queue(priv->ieee80211); -+ ieee80211_stop_queue(priv->ieee80211); - - spin_unlock_irqrestore(&priv->tx_lock,flags); - } -@@ -3846,7 +3846,7 @@ static const struct net_device_ops rtl8180_netdev_ops = { - .ndo_set_mac_address = r8180_set_mac_adr, - .ndo_validate_addr = eth_validate_addr, - .ndo_change_mtu = eth_change_mtu, -- .ndo_start_xmit = ieee80211_rtl_xmit, -+ .ndo_start_xmit = ieee80211_xmit, - }; - - static int __devinit rtl8180_pci_probe(struct pci_dev *pdev, -@@ -4066,7 +4066,7 @@ void rtl8180_try_wake_queue(struct net_device *dev, int pri) - spin_unlock_irqrestore(&priv->tx_lock,flags); - - if(enough_desc) -- ieee80211_rtl_wake_queue(priv->ieee80211); -+ ieee80211_wake_queue(priv->ieee80211); - } - - void rtl8180_tx_isr(struct net_device *dev, int pri,short error) -diff --git a/drivers/staging/rtl8187se/r8180_wx.c b/drivers/staging/rtl8187se/r8180_wx.c -index 637ee8e..766892e 100644 ---- a/drivers/staging/rtl8187se/r8180_wx.c -+++ b/drivers/staging/rtl8187se/r8180_wx.c -@@ -377,7 +377,7 @@ static int r8180_wx_set_scan(struct net_device *dev, struct iw_request_info *a, - // queue_work(priv->ieee80211->wq, &priv->ieee80211->wx_sync_scan_wq); - //printk("start scan============================>\n"); - ieee80211_softmac_ips_scan_syncro(priv->ieee80211); --//ieee80211_rtl_start_scan(priv->ieee80211); -+//ieee80211_start_scan(priv->ieee80211); - /* intentionally forget to up sem */ - // up(&priv->ieee80211->wx_sem); - ret = 0; -diff --git a/drivers/usb/class/usbtmc.c b/drivers/usb/class/usbtmc.c -index d9461c9..2473cf0 100644 ---- a/drivers/usb/class/usbtmc.c -+++ b/drivers/usb/class/usbtmc.c -@@ -562,16 +562,10 @@ static ssize_t usbtmc_write(struct file *filp, const char __user *buf, - n_bytes = roundup(12 + this_part, 4); - memset(buffer + 12 + this_part, 0, n_bytes - (12 + this_part)); - -- do { -- retval = usb_bulk_msg(data->usb_dev, -- usb_sndbulkpipe(data->usb_dev, -- data->bulk_out), -- buffer, n_bytes, -- &actual, USBTMC_TIMEOUT); -- if (retval != 0) -- break; -- n_bytes -= actual; -- } while (n_bytes); -+ retval = usb_bulk_msg(data->usb_dev, -+ usb_sndbulkpipe(data->usb_dev, -+ data->bulk_out), -+ buffer, n_bytes, &actual, USBTMC_TIMEOUT); - - data->bTag_last_write = data->bTag; - data->bTag++; -diff --git a/drivers/usb/core/devices.c b/drivers/usb/core/devices.c -index 355dffc..96f1171 100644 ---- a/drivers/usb/core/devices.c -+++ b/drivers/usb/core/devices.c -@@ -494,7 +494,7 @@ static ssize_t usb_device_dump(char __user **buffer, size_t *nbytes, - return 0; - /* allocate 2^1 pages = 8K (on i386); - * should be more than enough for one device */ -- pages_start = (char *)__get_free_pages(GFP_NOIO, 1); -+ pages_start = (char *)__get_free_pages(GFP_KERNEL, 1); - if (!pages_start) - return -ENOMEM; - -diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c -index 24120db..181f78c 100644 ---- a/drivers/usb/core/devio.c -+++ b/drivers/usb/core/devio.c -@@ -1312,9 +1312,9 @@ static int processcompl(struct async *as, void __user * __user *arg) - void __user *addr = as->userurb; - unsigned int i; - -- if (as->userbuffer && urb->actual_length) -+ if (as->userbuffer) - if (copy_to_user(as->userbuffer, urb->transfer_buffer, -- urb->actual_length)) -+ urb->transfer_buffer_length)) - goto err_out; - if (put_user(as->status, &userurb->status)) - goto err_out; -@@ -1334,11 +1334,14 @@ static int processcompl(struct async *as, void __user * __user *arg) - } - } - -+ free_async(as); -+ - if (put_user(addr, (void __user * __user *)arg)) - return -EFAULT; - return 0; - - err_out: -+ free_async(as); - return -EFAULT; - } - -@@ -1368,11 +1371,8 @@ static struct async *reap_as(struct dev_state *ps) - static int proc_reapurb(struct dev_state *ps, void __user *arg) - { - struct async *as = reap_as(ps); -- if (as) { -- int retval = processcompl(as, (void __user * __user *)arg); -- free_async(as); -- return retval; -- } -+ if (as) -+ return processcompl(as, (void __user * __user *)arg); - if (signal_pending(current)) - return -EINTR; - return -EIO; -@@ -1380,16 +1380,11 @@ static int proc_reapurb(struct dev_state *ps, void __user *arg) - - static int proc_reapurbnonblock(struct dev_state *ps, void __user *arg) - { -- int retval; - struct async *as; - -- as = async_getcompleted(ps); -- retval = -EAGAIN; -- if (as) { -- retval = processcompl(as, (void __user * __user *)arg); -- free_async(as); -- } -- return retval; -+ if (!(as = async_getcompleted(ps))) -+ return -EAGAIN; -+ return processcompl(as, (void __user * __user *)arg); - } - - #ifdef CONFIG_COMPAT -@@ -1440,9 +1435,9 @@ static int processcompl_compat(struct async *as, void __user * __user *arg) - void __user *addr = as->userurb; - unsigned int i; - -- if (as->userbuffer && urb->actual_length) -+ if (as->userbuffer) - if (copy_to_user(as->userbuffer, urb->transfer_buffer, -- urb->actual_length)) -+ urb->transfer_buffer_length)) - return -EFAULT; - if (put_user(as->status, &userurb->status)) - return -EFAULT; -@@ -1462,6 +1457,7 @@ static int processcompl_compat(struct async *as, void __user * __user *arg) - } - } - -+ free_async(as); - if (put_user(ptr_to_compat(addr), (u32 __user *)arg)) - return -EFAULT; - return 0; -@@ -1470,11 +1466,8 @@ static int processcompl_compat(struct async *as, void __user * __user *arg) - static int proc_reapurb_compat(struct dev_state *ps, void __user *arg) - { - struct async *as = reap_as(ps); -- if (as) { -- int retval = processcompl_compat(as, (void __user * __user *)arg); -- free_async(as); -- return retval; -- } -+ if (as) -+ return processcompl_compat(as, (void __user * __user *)arg); - if (signal_pending(current)) - return -EINTR; - return -EIO; -@@ -1482,16 +1475,11 @@ static int proc_reapurb_compat(struct dev_state *ps, void __user *arg) - - static int proc_reapurbnonblock_compat(struct dev_state *ps, void __user *arg) - { -- int retval; - struct async *as; - -- retval = -EAGAIN; -- as = async_getcompleted(ps); -- if (as) { -- retval = processcompl_compat(as, (void __user * __user *)arg); -- free_async(as); -- } -- return retval; -+ if (!(as = async_getcompleted(ps))) -+ return -EAGAIN; -+ return processcompl_compat(as, (void __user * __user *)arg); - } - - #endif -diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c -index 1a7d54b..0f857e6 100644 ---- a/drivers/usb/core/hub.c -+++ b/drivers/usb/core/hub.c -@@ -1612,12 +1612,12 @@ static inline void announce_device(struct usb_device *udev) { } - #endif - - /** -- * usb_enumerate_device_otg - FIXME (usbcore-internal) -+ * usb_configure_device_otg - FIXME (usbcore-internal) - * @udev: newly addressed device (in ADDRESS state) - * -- * Finish enumeration for On-The-Go devices -+ * Do configuration for On-The-Go devices - */ --static int usb_enumerate_device_otg(struct usb_device *udev) -+static int usb_configure_device_otg(struct usb_device *udev) - { - int err = 0; - -@@ -1688,7 +1688,7 @@ fail: - - - /** -- * usb_enumerate_device - Read device configs/intfs/otg (usbcore-internal) -+ * usb_configure_device - Detect and probe device intfs/otg (usbcore-internal) - * @udev: newly addressed device (in ADDRESS state) - * - * This is only called by usb_new_device() and usb_authorize_device() -@@ -1699,7 +1699,7 @@ fail: - * the string descriptors, as they will be errored out by the device - * until it has been authorized. - */ --static int usb_enumerate_device(struct usb_device *udev) -+static int usb_configure_device(struct usb_device *udev) - { - int err; - -@@ -1723,7 +1723,7 @@ static int usb_enumerate_device(struct usb_device *udev) - udev->descriptor.iManufacturer); - udev->serial = usb_cache_string(udev, udev->descriptor.iSerialNumber); - } -- err = usb_enumerate_device_otg(udev); -+ err = usb_configure_device_otg(udev); - fail: - return err; - } -@@ -1733,8 +1733,8 @@ fail: - * usb_new_device - perform initial device setup (usbcore-internal) - * @udev: newly addressed device (in ADDRESS state) - * -- * This is called with devices which have been detected but not fully -- * enumerated. The device descriptor is available, but not descriptors -+ * This is called with devices which have been enumerated, but not yet -+ * configured. The device descriptor is available, but not descriptors - * for any device configuration. The caller must have locked either - * the parent hub (if udev is a normal device) or else the - * usb_bus_list_lock (if udev is a root hub). The parent's pointer to -@@ -1757,8 +1757,8 @@ int usb_new_device(struct usb_device *udev) - if (udev->parent) - usb_autoresume_device(udev->parent); - -- usb_detect_quirks(udev); -- err = usb_enumerate_device(udev); /* Read descriptors */ -+ usb_detect_quirks(udev); /* Determine quirks */ -+ err = usb_configure_device(udev); /* detect & probe dev/intfs */ - if (err < 0) - goto fail; - dev_dbg(&udev->dev, "udev %d, busnum %d, minor = %d\n", -@@ -1803,23 +1803,21 @@ fail: - */ - int usb_deauthorize_device(struct usb_device *usb_dev) - { -+ unsigned cnt; - usb_lock_device(usb_dev); - if (usb_dev->authorized == 0) - goto out_unauthorized; -- - usb_dev->authorized = 0; - usb_set_configuration(usb_dev, -1); -- -- kfree(usb_dev->product); - usb_dev->product = kstrdup("n/a (unauthorized)", GFP_KERNEL); -- kfree(usb_dev->manufacturer); - usb_dev->manufacturer = kstrdup("n/a (unauthorized)", GFP_KERNEL); -- kfree(usb_dev->serial); - usb_dev->serial = kstrdup("n/a (unauthorized)", GFP_KERNEL); -- -- usb_destroy_configuration(usb_dev); -+ kfree(usb_dev->config); -+ usb_dev->config = NULL; -+ for (cnt = 0; cnt < usb_dev->descriptor.bNumConfigurations; cnt++) -+ kfree(usb_dev->rawdescriptors[cnt]); - usb_dev->descriptor.bNumConfigurations = 0; -- -+ kfree(usb_dev->rawdescriptors); - out_unauthorized: - usb_unlock_device(usb_dev); - return 0; -@@ -1829,11 +1827,15 @@ out_unauthorized: - int usb_authorize_device(struct usb_device *usb_dev) - { - int result = 0, c; -- - usb_lock_device(usb_dev); - if (usb_dev->authorized == 1) - goto out_authorized; -- -+ kfree(usb_dev->product); -+ usb_dev->product = NULL; -+ kfree(usb_dev->manufacturer); -+ usb_dev->manufacturer = NULL; -+ kfree(usb_dev->serial); -+ usb_dev->serial = NULL; - result = usb_autoresume_device(usb_dev); - if (result < 0) { - dev_err(&usb_dev->dev, -@@ -1846,18 +1848,10 @@ int usb_authorize_device(struct usb_device *usb_dev) - "authorization: %d\n", result); - goto error_device_descriptor; - } -- -- kfree(usb_dev->product); -- usb_dev->product = NULL; -- kfree(usb_dev->manufacturer); -- usb_dev->manufacturer = NULL; -- kfree(usb_dev->serial); -- usb_dev->serial = NULL; -- - usb_dev->authorized = 1; -- result = usb_enumerate_device(usb_dev); -+ result = usb_configure_device(usb_dev); - if (result < 0) -- goto error_enumerate; -+ goto error_configure; - /* Choose and set the configuration. This registers the interfaces - * with the driver core and lets interface drivers bind to them. - */ -@@ -1872,10 +1866,8 @@ int usb_authorize_device(struct usb_device *usb_dev) - } - } - dev_info(&usb_dev->dev, "authorized to connect\n"); -- --error_enumerate: -+error_configure: - error_device_descriptor: -- usb_autosuspend_device(usb_dev); - error_autoresume: - out_authorized: - usb_unlock_device(usb_dev); // complements locktree -@@ -3286,9 +3278,6 @@ static void hub_events(void) - USB_PORT_FEAT_C_SUSPEND); - udev = hdev->children[i-1]; - if (udev) { -- /* TRSMRCY = 10 msec */ -- msleep(10); -- - usb_lock_device(udev); - ret = remote_wakeup(hdev-> - children[i-1]); -diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c -index 980a8d2..da718e8 100644 ---- a/drivers/usb/core/message.c -+++ b/drivers/usb/core/message.c -@@ -911,11 +911,11 @@ char *usb_cache_string(struct usb_device *udev, int index) - if (index <= 0) - return NULL; - -- buf = kmalloc(MAX_USB_STRING_SIZE, GFP_NOIO); -+ buf = kmalloc(MAX_USB_STRING_SIZE, GFP_KERNEL); - if (buf) { - len = usb_string(udev, index, buf, MAX_USB_STRING_SIZE); - if (len > 0) { -- smallbuf = kmalloc(++len, GFP_NOIO); -+ smallbuf = kmalloc(++len, GFP_KERNEL); - if (!smallbuf) - return buf; - memcpy(smallbuf, buf, len); -@@ -1682,7 +1682,7 @@ int usb_set_configuration(struct usb_device *dev, int configuration) - if (cp) { - nintf = cp->desc.bNumInterfaces; - new_interfaces = kmalloc(nintf * sizeof(*new_interfaces), -- GFP_NOIO); -+ GFP_KERNEL); - if (!new_interfaces) { - dev_err(&dev->dev, "Out of memory\n"); - return -ENOMEM; -@@ -1691,7 +1691,7 @@ int usb_set_configuration(struct usb_device *dev, int configuration) - for (; n < nintf; ++n) { - new_interfaces[n] = kzalloc( - sizeof(struct usb_interface), -- GFP_NOIO); -+ GFP_KERNEL); - if (!new_interfaces[n]) { - dev_err(&dev->dev, "Out of memory\n"); - ret = -ENOMEM; -diff --git a/drivers/usb/core/sysfs.c b/drivers/usb/core/sysfs.c -index fcdcad4..7ec3041 100644 ---- a/drivers/usb/core/sysfs.c -+++ b/drivers/usb/core/sysfs.c -@@ -82,13 +82,9 @@ static ssize_t show_##name(struct device *dev, \ - struct device_attribute *attr, char *buf) \ - { \ - struct usb_device *udev; \ -- int retval; \ - \ - udev = to_usb_device(dev); \ -- usb_lock_device(udev); \ -- retval = sprintf(buf, "%s\n", udev->name); \ -- usb_unlock_device(udev); \ -- return retval; \ -+ return sprintf(buf, "%s\n", udev->name); \ - } \ - static DEVICE_ATTR(name, S_IRUGO, show_##name, NULL); - -@@ -115,12 +111,6 @@ show_speed(struct device *dev, struct device_attribute *attr, char *buf) - case USB_SPEED_HIGH: - speed = "480"; - break; -- case USB_SPEED_VARIABLE: -- speed = "480"; -- break; -- case USB_SPEED_SUPER: -- speed = "5000"; -- break; - default: - speed = "unknown"; - } -diff --git a/drivers/usb/core/usb.c b/drivers/usb/core/usb.c -index 52e5e31..b1b85ab 100644 ---- a/drivers/usb/core/usb.c -+++ b/drivers/usb/core/usb.c -@@ -132,7 +132,7 @@ EXPORT_SYMBOL_GPL(usb_altnum_to_altsetting); - - struct find_interface_arg { - int minor; -- struct device_driver *drv; -+ struct usb_interface *interface; - }; - - static int __find_interface(struct device *dev, void *data) -@@ -143,10 +143,12 @@ static int __find_interface(struct device *dev, void *data) - if (!is_usb_interface(dev)) - return 0; - -- if (dev->driver != arg->drv) -- return 0; - intf = to_usb_interface(dev); -- return intf->minor == arg->minor; -+ if (intf->minor != -1 && intf->minor == arg->minor) { -+ arg->interface = intf; -+ return 1; -+ } -+ return 0; - } - - /** -@@ -154,24 +156,21 @@ static int __find_interface(struct device *dev, void *data) - * @drv: the driver whose current configuration is considered - * @minor: the minor number of the desired device - * -- * This walks the bus device list and returns a pointer to the interface -- * with the matching minor and driver. Note, this only works for devices -- * that share the USB major number. -+ * This walks the driver device list and returns a pointer to the interface -+ * with the matching minor. Note, this only works for devices that share the -+ * USB major number. - */ - struct usb_interface *usb_find_interface(struct usb_driver *drv, int minor) - { - struct find_interface_arg argb; -- struct device *dev; -+ int retval; - - argb.minor = minor; -- argb.drv = &drv->drvwrap.driver; -- -- dev = bus_find_device(&usb_bus_type, NULL, &argb, __find_interface); -- -- /* Drop reference count from bus_find_device */ -- put_device(dev); -- -- return dev ? to_usb_interface(dev) : NULL; -+ argb.interface = NULL; -+ /* eat the error, it will be in argb.interface */ -+ retval = driver_for_each_device(&drv->drvwrap.driver, NULL, &argb, -+ __find_interface); -+ return argb.interface; - } - EXPORT_SYMBOL_GPL(usb_find_interface); - -diff --git a/drivers/usb/host/ehci-hcd.c b/drivers/usb/host/ehci-hcd.c -index e18c677..f5f5601 100644 ---- a/drivers/usb/host/ehci-hcd.c -+++ b/drivers/usb/host/ehci-hcd.c -@@ -785,10 +785,9 @@ static irqreturn_t ehci_irq (struct usb_hcd *hcd) - - /* start 20 msec resume signaling from this port, - * and make khubd collect PORT_STAT_C_SUSPEND to -- * stop that signaling. Use 5 ms extra for safety, -- * like usb_port_resume() does. -+ * stop that signaling. - */ -- ehci->reset_done[i] = jiffies + msecs_to_jiffies(25); -+ ehci->reset_done [i] = jiffies + msecs_to_jiffies (20); - ehci_dbg (ehci, "port %d remote wakeup\n", i + 1); - mod_timer(&hcd->rh_timer, ehci->reset_done[i]); - } -diff --git a/drivers/usb/host/ehci-hub.c b/drivers/usb/host/ehci-hub.c -index 698f461..1b6f1c0 100644 ---- a/drivers/usb/host/ehci-hub.c -+++ b/drivers/usb/host/ehci-hub.c -@@ -120,26 +120,9 @@ static int ehci_bus_suspend (struct usb_hcd *hcd) - del_timer_sync(&ehci->watchdog); - del_timer_sync(&ehci->iaa_watchdog); - -+ port = HCS_N_PORTS (ehci->hcs_params); - spin_lock_irq (&ehci->lock); - -- /* Once the controller is stopped, port resumes that are already -- * in progress won't complete. Hence if remote wakeup is enabled -- * for the root hub and any ports are in the middle of a resume or -- * remote wakeup, we must fail the suspend. -- */ -- if (hcd->self.root_hub->do_remote_wakeup) { -- port = HCS_N_PORTS(ehci->hcs_params); -- while (port--) { -- if (ehci->reset_done[port] != 0) { -- spin_unlock_irq(&ehci->lock); -- ehci_dbg(ehci, "suspend failed because " -- "port %d is resuming\n", -- port + 1); -- return -EBUSY; -- } -- } -- } -- - /* stop schedules, clean any completed work */ - if (HC_IS_RUNNING(hcd->state)) { - ehci_quiesce (ehci); -@@ -155,7 +138,6 @@ static int ehci_bus_suspend (struct usb_hcd *hcd) - */ - ehci->bus_suspended = 0; - ehci->owned_ports = 0; -- port = HCS_N_PORTS(ehci->hcs_params); - while (port--) { - u32 __iomem *reg = &ehci->regs->port_status [port]; - u32 t1 = ehci_readl(ehci, reg) & ~PORT_RWC_BITS; -diff --git a/drivers/usb/host/ehci-q.c b/drivers/usb/host/ehci-q.c -index c0d4b39..139a2cc 100644 ---- a/drivers/usb/host/ehci-q.c -+++ b/drivers/usb/host/ehci-q.c -@@ -827,10 +827,9 @@ qh_make ( - * But interval 1 scheduling is simpler, and - * includes high bandwidth. - */ -- urb->interval = 1; -- } else if (qh->period > ehci->periodic_size) { -- qh->period = ehci->periodic_size; -- urb->interval = qh->period << 3; -+ dbg ("intr period %d uframes, NYET!", -+ urb->interval); -+ goto done; - } - } else { - int think_time; -@@ -853,10 +852,6 @@ qh_make ( - usb_calc_bus_time (urb->dev->speed, - is_input, 0, max_packet (maxp))); - qh->period = urb->interval; -- if (qh->period > ehci->periodic_size) { -- qh->period = ehci->periodic_size; -- urb->interval = qh->period; -- } - } - } - -diff --git a/drivers/usb/host/r8a66597-hcd.c b/drivers/usb/host/r8a66597-hcd.c -index 9260c74..e33d362 100644 ---- a/drivers/usb/host/r8a66597-hcd.c -+++ b/drivers/usb/host/r8a66597-hcd.c -@@ -35,9 +35,7 @@ - #include - #include - #include --#include - #include --#include - - #include "../core/hcd.h" - #include "r8a66597.h" -@@ -218,17 +216,8 @@ static void disable_controller(struct r8a66597 *r8a66597) - { - int port; - -- /* disable interrupts */ - r8a66597_write(r8a66597, 0, INTENB0); -- r8a66597_write(r8a66597, 0, INTENB1); -- r8a66597_write(r8a66597, 0, BRDYENB); -- r8a66597_write(r8a66597, 0, BEMPENB); -- r8a66597_write(r8a66597, 0, NRDYENB); -- -- /* clear status */ -- r8a66597_write(r8a66597, 0, BRDYSTS); -- r8a66597_write(r8a66597, 0, NRDYSTS); -- r8a66597_write(r8a66597, 0, BEMPSTS); -+ r8a66597_write(r8a66597, 0, INTSTS0); - - for (port = 0; port < r8a66597->max_root_hub; port++) - r8a66597_disable_port(r8a66597, port); -@@ -822,26 +811,6 @@ static void enable_r8a66597_pipe(struct r8a66597 *r8a66597, struct urb *urb, - enable_r8a66597_pipe_dma(r8a66597, dev, pipe, urb); - } - --static void r8a66597_urb_done(struct r8a66597 *r8a66597, struct urb *urb, -- int status) --__releases(r8a66597->lock) --__acquires(r8a66597->lock) --{ -- if (usb_pipein(urb->pipe) && usb_pipetype(urb->pipe) != PIPE_CONTROL) { -- void *ptr; -- -- for (ptr = urb->transfer_buffer; -- ptr < urb->transfer_buffer + urb->transfer_buffer_length; -- ptr += PAGE_SIZE) -- flush_dcache_page(virt_to_page(ptr)); -- } -- -- usb_hcd_unlink_urb_from_ep(r8a66597_to_hcd(r8a66597), urb); -- spin_unlock(&r8a66597->lock); -- usb_hcd_giveback_urb(r8a66597_to_hcd(r8a66597), urb, status); -- spin_lock(&r8a66597->lock); --} -- - /* this function must be called with interrupt disabled */ - static void force_dequeue(struct r8a66597 *r8a66597, u16 pipenum, u16 address) - { -@@ -862,9 +831,15 @@ static void force_dequeue(struct r8a66597 *r8a66597, u16 pipenum, u16 address) - list_del(&td->queue); - kfree(td); - -- if (urb) -- r8a66597_urb_done(r8a66597, urb, -ENODEV); -+ if (urb) { -+ usb_hcd_unlink_urb_from_ep(r8a66597_to_hcd(r8a66597), -+ urb); - -+ spin_unlock(&r8a66597->lock); -+ usb_hcd_giveback_urb(r8a66597_to_hcd(r8a66597), urb, -+ -ENODEV); -+ spin_lock(&r8a66597->lock); -+ } - break; - } - } -@@ -1301,7 +1276,10 @@ __releases(r8a66597->lock) __acquires(r8a66597->lock) - if (usb_pipeisoc(urb->pipe)) - urb->start_frame = r8a66597_get_frame(hcd); - -- r8a66597_urb_done(r8a66597, urb, status); -+ usb_hcd_unlink_urb_from_ep(r8a66597_to_hcd(r8a66597), urb); -+ spin_unlock(&r8a66597->lock); -+ usb_hcd_giveback_urb(hcd, urb, status); -+ spin_lock(&r8a66597->lock); - } - - if (restart) { -@@ -2492,12 +2470,6 @@ static int __devinit r8a66597_probe(struct platform_device *pdev) - r8a66597->rh_timer.data = (unsigned long)r8a66597; - r8a66597->reg = (unsigned long)reg; - -- /* make sure no interrupts are pending */ -- ret = r8a66597_clock_enable(r8a66597); -- if (ret < 0) -- goto clean_up3; -- disable_controller(r8a66597); -- - for (i = 0; i < R8A66597_MAX_NUM_PIPE; i++) { - INIT_LIST_HEAD(&r8a66597->pipe_queue[i]); - init_timer(&r8a66597->td_timer[i]); -diff --git a/drivers/usb/host/uhci-hcd.c b/drivers/usb/host/uhci-hcd.c -index 99cd00f..5cd0e48 100644 ---- a/drivers/usb/host/uhci-hcd.c -+++ b/drivers/usb/host/uhci-hcd.c -@@ -749,20 +749,7 @@ static int uhci_rh_suspend(struct usb_hcd *hcd) - spin_lock_irq(&uhci->lock); - if (!test_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags)) - rc = -ESHUTDOWN; -- else if (uhci->dead) -- ; /* Dead controllers tell no tales */ -- -- /* Once the controller is stopped, port resumes that are already -- * in progress won't complete. Hence if remote wakeup is enabled -- * for the root hub and any ports are in the middle of a resume or -- * remote wakeup, we must fail the suspend. -- */ -- else if (hcd->self.root_hub->do_remote_wakeup && -- uhci->resuming_ports) { -- dev_dbg(uhci_dev(uhci), "suspend failed because a port " -- "is resuming\n"); -- rc = -EBUSY; -- } else -+ else if (!uhci->dead) - suspend_rh(uhci, UHCI_RH_SUSPENDED); - spin_unlock_irq(&uhci->lock); - return rc; -diff --git a/drivers/usb/host/uhci-hub.c b/drivers/usb/host/uhci-hub.c -index 8270055..885b585 100644 ---- a/drivers/usb/host/uhci-hub.c -+++ b/drivers/usb/host/uhci-hub.c -@@ -167,7 +167,7 @@ static void uhci_check_ports(struct uhci_hcd *uhci) - /* Port received a wakeup request */ - set_bit(port, &uhci->resuming_ports); - uhci->ports_timeout = jiffies + -- msecs_to_jiffies(25); -+ msecs_to_jiffies(20); - - /* Make sure we see the port again - * after the resuming period is over. */ -diff --git a/drivers/usb/misc/appledisplay.c b/drivers/usb/misc/appledisplay.c -index 62ff5e7..1d8e39a 100644 ---- a/drivers/usb/misc/appledisplay.c -+++ b/drivers/usb/misc/appledisplay.c -@@ -72,8 +72,8 @@ struct appledisplay { - struct usb_device *udev; /* usb device */ - struct urb *urb; /* usb request block */ - struct backlight_device *bd; /* backlight device */ -- u8 *urbdata; /* interrupt URB data buffer */ -- u8 *msgdata; /* control message data buffer */ -+ char *urbdata; /* interrupt URB data buffer */ -+ char *msgdata; /* control message data buffer */ - - struct delayed_work work; - int button_pressed; -diff --git a/drivers/usb/misc/emi62.c b/drivers/usb/misc/emi62.c -index 59860b3..602ee05 100644 ---- a/drivers/usb/misc/emi62.c -+++ b/drivers/usb/misc/emi62.c -@@ -167,7 +167,7 @@ static int emi62_load_firmware (struct usb_device *dev) - err("%s - error loading firmware: error = %d", __func__, err); - goto wraperr; - } -- } while (rec); -+ } while (i > 0); - - /* Assert reset (stop the CPU in the EMI) */ - err = emi62_set_reset(dev,1); -diff --git a/drivers/usb/musb/musb_gadget_ep0.c b/drivers/usb/musb/musb_gadget_ep0.c -index 067e5a9..522efb3 100644 ---- a/drivers/usb/musb/musb_gadget_ep0.c -+++ b/drivers/usb/musb/musb_gadget_ep0.c -@@ -199,6 +199,7 @@ service_in_request(struct musb *musb, const struct usb_ctrlrequest *ctrlrequest) - static void musb_g_ep0_giveback(struct musb *musb, struct usb_request *req) - { - musb_g_giveback(&musb->endpoints[0].ep_in, req, 0); -+ musb->ep0_state = MUSB_EP0_STAGE_SETUP; - } - - /* -@@ -647,7 +648,7 @@ irqreturn_t musb_g_ep0_irq(struct musb *musb) - musb->ep0_state = MUSB_EP0_STAGE_STATUSIN; - break; - default: -- ERR("SetupEnd came in a wrong ep0stage %s\n", -+ ERR("SetupEnd came in a wrong ep0stage %s", - decode_ep0stage(musb->ep0_state)); - } - csr = musb_readw(regs, MUSB_CSR0); -@@ -770,18 +771,12 @@ setup: - handled = service_zero_data_request( - musb, &setup); - -- /* -- * We're expecting no data in any case, so -- * always set the DATAEND bit -- doing this -- * here helps avoid SetupEnd interrupt coming -- * in the idle stage when we're stalling... -- */ -- musb->ackpend |= MUSB_CSR0_P_DATAEND; -- - /* status stage might be immediate */ -- if (handled > 0) -+ if (handled > 0) { -+ musb->ackpend |= MUSB_CSR0_P_DATAEND; - musb->ep0_state = - MUSB_EP0_STAGE_STATUSIN; -+ } - break; - - /* sequence #1 (IN to host), includes GET_STATUS -diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c -index 13a1b39..ebcc6d0 100644 ---- a/drivers/usb/serial/ftdi_sio.c -+++ b/drivers/usb/serial/ftdi_sio.c -@@ -598,20 +598,6 @@ static struct usb_device_id id_table_combined [] = { - { USB_DEVICE(BANDB_VID, BANDB_USOTL4_PID) }, - { USB_DEVICE(BANDB_VID, BANDB_USTL4_PID) }, - { USB_DEVICE(BANDB_VID, BANDB_USO9ML2_PID) }, -- { USB_DEVICE(BANDB_VID, BANDB_USOPTL4_PID) }, -- { USB_DEVICE(BANDB_VID, BANDB_USPTL4_PID) }, -- { USB_DEVICE(BANDB_VID, BANDB_USO9ML2DR_2_PID) }, -- { USB_DEVICE(BANDB_VID, BANDB_USO9ML2DR_PID) }, -- { USB_DEVICE(BANDB_VID, BANDB_USOPTL4DR2_PID) }, -- { USB_DEVICE(BANDB_VID, BANDB_USOPTL4DR_PID) }, -- { USB_DEVICE(BANDB_VID, BANDB_485USB9F_2W_PID) }, -- { USB_DEVICE(BANDB_VID, BANDB_485USB9F_4W_PID) }, -- { USB_DEVICE(BANDB_VID, BANDB_232USB9M_PID) }, -- { USB_DEVICE(BANDB_VID, BANDB_485USBTB_2W_PID) }, -- { USB_DEVICE(BANDB_VID, BANDB_485USBTB_4W_PID) }, -- { USB_DEVICE(BANDB_VID, BANDB_TTL5USB9M_PID) }, -- { USB_DEVICE(BANDB_VID, BANDB_TTL3USB9M_PID) }, -- { USB_DEVICE(BANDB_VID, BANDB_ZZ_PROG1_USB_PID) }, - { USB_DEVICE(FTDI_VID, EVER_ECO_PRO_CDS) }, - { USB_DEVICE(FTDI_VID, FTDI_4N_GALAXY_DE_1_PID) }, - { USB_DEVICE(FTDI_VID, FTDI_4N_GALAXY_DE_2_PID) }, -diff --git a/drivers/usb/serial/ftdi_sio.h b/drivers/usb/serial/ftdi_sio.h -index 4586a24..6f31e0d 100644 ---- a/drivers/usb/serial/ftdi_sio.h -+++ b/drivers/usb/serial/ftdi_sio.h -@@ -662,20 +662,6 @@ - #define BANDB_USOTL4_PID 0xAC01 /* USOTL4 Isolated RS-485 Converter */ - #define BANDB_USTL4_PID 0xAC02 /* USTL4 RS-485 Converter */ - #define BANDB_USO9ML2_PID 0xAC03 /* USO9ML2 Isolated RS-232 Converter */ --#define BANDB_USOPTL4_PID 0xAC11 --#define BANDB_USPTL4_PID 0xAC12 --#define BANDB_USO9ML2DR_2_PID 0xAC16 --#define BANDB_USO9ML2DR_PID 0xAC17 --#define BANDB_USOPTL4DR2_PID 0xAC18 /* USOPTL4R-2 2-port Isolated RS-232 Converter */ --#define BANDB_USOPTL4DR_PID 0xAC19 --#define BANDB_485USB9F_2W_PID 0xAC25 --#define BANDB_485USB9F_4W_PID 0xAC26 --#define BANDB_232USB9M_PID 0xAC27 --#define BANDB_485USBTB_2W_PID 0xAC33 --#define BANDB_485USBTB_4W_PID 0xAC34 --#define BANDB_TTL5USB9M_PID 0xAC49 --#define BANDB_TTL3USB9M_PID 0xAC50 --#define BANDB_ZZ_PROG1_USB_PID 0xBA02 - - /* - * RM Michaelides CANview USB (http://www.rmcan.com) -diff --git a/drivers/usb/serial/generic.c b/drivers/usb/serial/generic.c -index e0fb294..bbe005c 100644 ---- a/drivers/usb/serial/generic.c -+++ b/drivers/usb/serial/generic.c -@@ -489,8 +489,6 @@ void usb_serial_generic_write_bulk_callback(struct urb *urb) - dbg("%s - port %d", __func__, port->number); - - if (port->serial->type->max_in_flight_urbs) { -- kfree(urb->transfer_buffer); -- - spin_lock_irqsave(&port->lock, flags); - --port->urbs_in_flight; - port->tx_bytes_flight -= urb->transfer_buffer_length; -diff --git a/drivers/usb/serial/mos7840.c b/drivers/usb/serial/mos7840.c -index 485fa9c..f11abf5 100644 ---- a/drivers/usb/serial/mos7840.c -+++ b/drivers/usb/serial/mos7840.c -@@ -121,14 +121,8 @@ - * moschip_id_table_combined - */ - #define USB_VENDOR_ID_BANDB 0x0856 --#define BANDB_DEVICE_ID_USO9ML2_2 0xAC22 --#define BANDB_DEVICE_ID_USO9ML2_4 0xAC24 --#define BANDB_DEVICE_ID_US9ML2_2 0xAC29 --#define BANDB_DEVICE_ID_US9ML2_4 0xAC30 --#define BANDB_DEVICE_ID_USPTL4_2 0xAC31 --#define BANDB_DEVICE_ID_USPTL4_4 0xAC32 --#define BANDB_DEVICE_ID_USOPTL4_2 0xAC42 - #define BANDB_DEVICE_ID_USOPTL4_4 0xAC44 -+#define BANDB_DEVICE_ID_USOPTL4_2 0xAC42 - - /* This driver also supports - * ATEN UC2324 device using Moschip MCS7840 -@@ -183,14 +177,8 @@ - static struct usb_device_id moschip_port_id_table[] = { - {USB_DEVICE(USB_VENDOR_ID_MOSCHIP, MOSCHIP_DEVICE_ID_7840)}, - {USB_DEVICE(USB_VENDOR_ID_MOSCHIP, MOSCHIP_DEVICE_ID_7820)}, -- {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USO9ML2_2)}, -- {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USO9ML2_4)}, -- {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_US9ML2_2)}, -- {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_US9ML2_4)}, -- {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USPTL4_2)}, -- {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USPTL4_4)}, -- {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_2)}, - {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_4)}, -+ {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_2)}, - {USB_DEVICE(USB_VENDOR_ID_ATENINTL, ATENINTL_DEVICE_ID_UC2324)}, - {USB_DEVICE(USB_VENDOR_ID_ATENINTL, ATENINTL_DEVICE_ID_UC2322)}, - {} /* terminating entry */ -@@ -199,14 +187,8 @@ static struct usb_device_id moschip_port_id_table[] = { - static __devinitdata struct usb_device_id moschip_id_table_combined[] = { - {USB_DEVICE(USB_VENDOR_ID_MOSCHIP, MOSCHIP_DEVICE_ID_7840)}, - {USB_DEVICE(USB_VENDOR_ID_MOSCHIP, MOSCHIP_DEVICE_ID_7820)}, -- {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USO9ML2_2)}, -- {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USO9ML2_4)}, -- {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_US9ML2_2)}, -- {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_US9ML2_4)}, -- {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USPTL4_2)}, -- {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USPTL4_4)}, -- {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_2)}, - {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_4)}, -+ {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_2)}, - {USB_DEVICE(USB_VENDOR_ID_ATENINTL, ATENINTL_DEVICE_ID_UC2324)}, - {USB_DEVICE(USB_VENDOR_ID_ATENINTL, ATENINTL_DEVICE_ID_UC2322)}, - {} /* terminating entry */ -diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c -index be3dff1..0577e4b 100644 ---- a/drivers/usb/serial/option.c -+++ b/drivers/usb/serial/option.c -@@ -340,10 +340,6 @@ static int option_resume(struct usb_serial *serial); - #define FOUR_G_SYSTEMS_VENDOR_ID 0x1c9e - #define FOUR_G_SYSTEMS_PRODUCT_W14 0x9603 - --/* Haier products */ --#define HAIER_VENDOR_ID 0x201e --#define HAIER_PRODUCT_CE100 0x2009 -- - static struct usb_device_id option_ids[] = { - { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_COLT) }, - { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_RICOLA) }, -@@ -584,48 +580,12 @@ static struct usb_device_id option_ids[] = { - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0086, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x2002, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x2003, 0xff, 0xff, 0xff) }, -- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0104, 0xff, 0xff, 0xff) }, -- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0106, 0xff, 0xff, 0xff) }, -- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0108, 0xff, 0xff, 0xff) }, -- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0113, 0xff, 0xff, 0xff) }, -- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0117, 0xff, 0xff, 0xff) }, -- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0118, 0xff, 0xff, 0xff) }, -- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0121, 0xff, 0xff, 0xff) }, -- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0122, 0xff, 0xff, 0xff) }, -- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0123, 0xff, 0xff, 0xff) }, -- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0124, 0xff, 0xff, 0xff) }, -- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0125, 0xff, 0xff, 0xff) }, -- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0126, 0xff, 0xff, 0xff) }, -- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0128, 0xff, 0xff, 0xff) }, -- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0142, 0xff, 0xff, 0xff) }, -- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0143, 0xff, 0xff, 0xff) }, -- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0144, 0xff, 0xff, 0xff) }, -- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0145, 0xff, 0xff, 0xff) }, -- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0146, 0xff, 0xff, 0xff) }, -- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0147, 0xff, 0xff, 0xff) }, -- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0148, 0xff, 0xff, 0xff) }, -- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0149, 0xff, 0xff, 0xff) }, -- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0150, 0xff, 0xff, 0xff) }, -- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0151, 0xff, 0xff, 0xff) }, -- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0152, 0xff, 0xff, 0xff) }, -- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0153, 0xff, 0xff, 0xff) }, -- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0154, 0xff, 0xff, 0xff) }, -- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0155, 0xff, 0xff, 0xff) }, -- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0156, 0xff, 0xff, 0xff) }, -- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0157, 0xff, 0xff, 0xff) }, -- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0158, 0xff, 0xff, 0xff) }, -- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0159, 0xff, 0xff, 0xff) }, -- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0160, 0xff, 0xff, 0xff) }, -- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0161, 0xff, 0xff, 0xff) }, -- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0162, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0014, 0xff, 0xff, 0xff) }, /* ZTE CDMA products */ - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0027, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0059, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0060, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0070, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0073, 0xff, 0xff, 0xff) }, -- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0130, 0xff, 0xff, 0xff) }, -- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0141, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_CDMA_TECH, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_AC8710, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_AC2726, 0xff, 0xff, 0xff) }, -@@ -639,13 +599,11 @@ static struct usb_device_id option_ids[] = { - { USB_DEVICE(TOSHIBA_VENDOR_ID, TOSHIBA_PRODUCT_G450) }, - { USB_DEVICE(TOSHIBA_VENDOR_ID, TOSHIBA_PRODUCT_HSDPA_MINICARD ) }, /* Toshiba 3G HSDPA == Novatel Expedite EU870D MiniCard */ - { USB_DEVICE(ALINK_VENDOR_ID, 0x9000) }, -- { USB_DEVICE(ALINK_VENDOR_ID, 0xce16) }, - { USB_DEVICE_AND_INTERFACE_INFO(ALINK_VENDOR_ID, ALINK_PRODUCT_3GU, 0xff, 0xff, 0xff) }, - { USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_X060S) }, - { USB_DEVICE(AIRPLUS_VENDOR_ID, AIRPLUS_PRODUCT_MCD650) }, - { USB_DEVICE(TLAYTECH_VENDOR_ID, TLAYTECH_PRODUCT_TEU800) }, - { USB_DEVICE(FOUR_G_SYSTEMS_VENDOR_ID, FOUR_G_SYSTEMS_PRODUCT_W14) }, -- { USB_DEVICE(HAIER_VENDOR_ID, HAIER_PRODUCT_CE100) }, - { } /* Terminating entry */ - }; - MODULE_DEVICE_TABLE(usb, option_ids); -diff --git a/drivers/usb/storage/transport.c b/drivers/usb/storage/transport.c -index cc313d1..589f6b4 100644 ---- a/drivers/usb/storage/transport.c -+++ b/drivers/usb/storage/transport.c -@@ -666,11 +666,10 @@ void usb_stor_invoke_transport(struct scsi_cmnd *srb, struct us_data *us) - * to wait for at least one CHECK_CONDITION to determine - * SANE_SENSE support - */ -- if (unlikely((srb->cmnd[0] == ATA_16 || srb->cmnd[0] == ATA_12) && -+ if ((srb->cmnd[0] == ATA_16 || srb->cmnd[0] == ATA_12) && - result == USB_STOR_TRANSPORT_GOOD && - !(us->fflags & US_FL_SANE_SENSE) && -- !(us->fflags & US_FL_BAD_SENSE) && -- !(srb->cmnd[2] & 0x20))) { -+ !(srb->cmnd[2] & 0x20)) { - US_DEBUGP("-- SAT supported, increasing auto-sense\n"); - us->fflags |= US_FL_SANE_SENSE; - } -@@ -719,12 +718,6 @@ Retry_Sense: - if (test_bit(US_FLIDX_TIMED_OUT, &us->dflags)) { - US_DEBUGP("-- auto-sense aborted\n"); - srb->result = DID_ABORT << 16; -- -- /* If SANE_SENSE caused this problem, disable it */ -- if (sense_size != US_SENSE_SIZE) { -- us->fflags &= ~US_FL_SANE_SENSE; -- us->fflags |= US_FL_BAD_SENSE; -- } - goto Handle_Errors; - } - -@@ -734,11 +727,10 @@ Retry_Sense: - * (small) sense request. This fixes some USB GSM modems - */ - if (temp_result == USB_STOR_TRANSPORT_FAILED && -- sense_size != US_SENSE_SIZE) { -+ (us->fflags & US_FL_SANE_SENSE) && -+ sense_size != US_SENSE_SIZE) { - US_DEBUGP("-- auto-sense failure, retry small sense\n"); - sense_size = US_SENSE_SIZE; -- us->fflags &= ~US_FL_SANE_SENSE; -- us->fflags |= US_FL_BAD_SENSE; - goto Retry_Sense; - } - -@@ -762,7 +754,6 @@ Retry_Sense: - */ - if (srb->sense_buffer[7] > (US_SENSE_SIZE - 8) && - !(us->fflags & US_FL_SANE_SENSE) && -- !(us->fflags & US_FL_BAD_SENSE) && - (srb->sense_buffer[0] & 0x7C) == 0x70) { - US_DEBUGP("-- SANE_SENSE support enabled\n"); - us->fflags |= US_FL_SANE_SENSE; -diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h -index c932f90..d4f034e 100644 ---- a/drivers/usb/storage/unusual_devs.h -+++ b/drivers/usb/storage/unusual_devs.h -@@ -818,13 +818,6 @@ UNUSUAL_DEV( 0x066f, 0x8000, 0x0001, 0x0001, - US_SC_DEVICE, US_PR_DEVICE, NULL, - US_FL_FIX_CAPACITY ), - --/* Reported by Daniel Kukula */ --UNUSUAL_DEV( 0x067b, 0x1063, 0x0100, 0x0100, -- "Prolific Technology, Inc.", -- "Prolific Storage Gadget", -- US_SC_DEVICE, US_PR_DEVICE, NULL, -- US_FL_BAD_SENSE ), -- - /* Reported by Rogerio Brito */ - UNUSUAL_DEV( 0x067b, 0x2317, 0x0001, 0x001, - "Prolific Technology, Inc.", -@@ -1807,6 +1800,13 @@ UNUSUAL_DEV( 0x2735, 0x100b, 0x0000, 0x9999, - US_SC_DEVICE, US_PR_DEVICE, NULL, - US_FL_GO_SLOW ), - -+/* Reported by Rohan Hart */ -+UNUSUAL_DEV( 0x2770, 0x915d, 0x0010, 0x0010, -+ "INTOVA", -+ "Pixtreme", -+ US_SC_DEVICE, US_PR_DEVICE, NULL, -+ US_FL_FIX_CAPACITY ), -+ - /* Reported by Frederic Marchal - * Mio Moov 330 - */ -diff --git a/drivers/usb/storage/usb.c b/drivers/usb/storage/usb.c -index 33197fa..8060b85 100644 ---- a/drivers/usb/storage/usb.c -+++ b/drivers/usb/storage/usb.c -@@ -228,7 +228,6 @@ void fill_inquiry_response(struct us_data *us, unsigned char *data, - if (data_len<36) // You lose. - return; - -- memset(data+8, ' ', 28); - if(data[0]&0x20) { /* USB device currently not connected. Return - peripheral qualifier 001b ("...however, the - physical device is not currently connected -@@ -238,15 +237,15 @@ void fill_inquiry_response(struct us_data *us, unsigned char *data, - device, it may return zeros or ASCII spaces - (20h) in those fields until the data is - available from the device."). */ -+ memset(data+8,0,28); - } else { - u16 bcdDevice = le16_to_cpu(us->pusb_dev->descriptor.bcdDevice); -- int n; -- -- n = strlen(us->unusual_dev->vendorName); -- memcpy(data+8, us->unusual_dev->vendorName, min(8, n)); -- n = strlen(us->unusual_dev->productName); -- memcpy(data+16, us->unusual_dev->productName, min(16, n)); -- -+ memcpy(data+8, us->unusual_dev->vendorName, -+ strlen(us->unusual_dev->vendorName) > 8 ? 8 : -+ strlen(us->unusual_dev->vendorName)); -+ memcpy(data+16, us->unusual_dev->productName, -+ strlen(us->unusual_dev->productName) > 16 ? 16 : -+ strlen(us->unusual_dev->productName)); - data[32] = 0x30 + ((bcdDevice>>12) & 0x0F); - data[33] = 0x30 + ((bcdDevice>>8) & 0x0F); - data[34] = 0x30 + ((bcdDevice>>4) & 0x0F); -@@ -430,8 +429,7 @@ static void adjust_quirks(struct us_data *us) - u16 vid = le16_to_cpu(us->pusb_dev->descriptor.idVendor); - u16 pid = le16_to_cpu(us->pusb_dev->descriptor.idProduct); - unsigned f = 0; -- unsigned int mask = (US_FL_SANE_SENSE | US_FL_BAD_SENSE | -- US_FL_FIX_CAPACITY | -+ unsigned int mask = (US_FL_SANE_SENSE | US_FL_FIX_CAPACITY | - US_FL_CAPACITY_HEURISTICS | US_FL_IGNORE_DEVICE | - US_FL_NOT_LOCKABLE | US_FL_MAX_SECTORS_64 | - US_FL_CAPACITY_OK | US_FL_IGNORE_RESIDUE | -@@ -461,9 +459,6 @@ static void adjust_quirks(struct us_data *us) - case 'a': - f |= US_FL_SANE_SENSE; - break; -- case 'b': -- f |= US_FL_BAD_SENSE; -- break; - case 'c': - f |= US_FL_FIX_CAPACITY; - break; -diff --git a/drivers/video/imxfb.c b/drivers/video/imxfb.c -index b4b6dec..66358fa 100644 ---- a/drivers/video/imxfb.c -+++ b/drivers/video/imxfb.c -@@ -593,8 +593,7 @@ static int imxfb_activate_var(struct fb_var_screeninfo *var, struct fb_info *inf - */ - static int imxfb_suspend(struct platform_device *dev, pm_message_t state) - { -- struct fb_info *info = platform_get_drvdata(dev); -- struct imxfb_info *fbi = info->par; -+ struct imxfb_info *fbi = platform_get_drvdata(dev); - - pr_debug("%s\n", __func__); - -@@ -604,8 +603,7 @@ static int imxfb_suspend(struct platform_device *dev, pm_message_t state) - - static int imxfb_resume(struct platform_device *dev) - { -- struct fb_info *info = platform_get_drvdata(dev); -- struct imxfb_info *fbi = info->par; -+ struct imxfb_info *fbi = platform_get_drvdata(dev); - - pr_debug("%s\n", __func__); - -diff --git a/drivers/video/matrox/g450_pll.c b/drivers/video/matrox/g450_pll.c -index c15f8a5..09f6e04 100644 ---- a/drivers/video/matrox/g450_pll.c -+++ b/drivers/video/matrox/g450_pll.c -@@ -368,8 +368,7 @@ static int __g450_setclk(struct matrox_fb_info *minfo, unsigned int fout, - M1064_XDVICLKCTRL_C1DVICLKEN | - M1064_XDVICLKCTRL_DVILOOPCTL | - M1064_XDVICLKCTRL_P1LOOPBWDTCTL; -- /* Setting this breaks PC systems so don't do it */ -- /* matroxfb_DAC_out(minfo, M1064_XDVICLKCTRL, tmp); */ -+ matroxfb_DAC_out(minfo, M1064_XDVICLKCTRL, tmp); - matroxfb_DAC_out(minfo, M1064_XPWRCTRL, - xpwrctrl); - -diff --git a/drivers/video/mx3fb.c b/drivers/video/mx3fb.c -index 772ba3f..054ef29 100644 ---- a/drivers/video/mx3fb.c -+++ b/drivers/video/mx3fb.c -@@ -324,11 +324,8 @@ static void sdc_enable_channel(struct mx3fb_info *mx3_fbi) - unsigned long flags; - dma_cookie_t cookie; - -- if (mx3_fbi->txd) -- dev_dbg(mx3fb->dev, "mx3fbi %p, desc %p, sg %p\n", mx3_fbi, -- to_tx_desc(mx3_fbi->txd), to_tx_desc(mx3_fbi->txd)->sg); -- else -- dev_dbg(mx3fb->dev, "mx3fbi %p, txd = NULL\n", mx3_fbi); -+ dev_dbg(mx3fb->dev, "mx3fbi %p, desc %p, sg %p\n", mx3_fbi, -+ to_tx_desc(mx3_fbi->txd), to_tx_desc(mx3_fbi->txd)->sg); - - /* This enables the channel */ - if (mx3_fbi->cookie < 0) { -@@ -649,7 +646,6 @@ static int sdc_set_global_alpha(struct mx3fb_data *mx3fb, bool enable, uint8_t a - - static void sdc_set_brightness(struct mx3fb_data *mx3fb, uint8_t value) - { -- dev_dbg(mx3fb->dev, "%s: value = %d\n", __func__, value); - /* This might be board-specific */ - mx3fb_write_reg(mx3fb, 0x03000000UL | value << 16, SDC_PWM_CTRL); - return; -@@ -1490,12 +1486,12 @@ static int mx3fb_probe(struct platform_device *pdev) - goto ersdc0; - } - -- mx3fb->backlight_level = 255; -- - ret = init_fb_chan(mx3fb, to_idmac_chan(chan)); - if (ret < 0) - goto eisdc0; - -+ mx3fb->backlight_level = 255; -+ - return 0; - - eisdc0: -diff --git a/drivers/video/s3c-fb.c b/drivers/video/s3c-fb.c -index 53cb722..adf9632 100644 ---- a/drivers/video/s3c-fb.c -+++ b/drivers/video/s3c-fb.c -@@ -211,23 +211,21 @@ static int s3c_fb_check_var(struct fb_var_screeninfo *var, - - /** - * s3c_fb_calc_pixclk() - calculate the divider to create the pixel clock. -+ * @id: window id. - * @sfb: The hardware state. - * @pixclock: The pixel clock wanted, in picoseconds. - * - * Given the specified pixel clock, work out the necessary divider to get - * close to the output frequency. - */ --static int s3c_fb_calc_pixclk(struct s3c_fb *sfb, unsigned int pixclk) -+static int s3c_fb_calc_pixclk(unsigned char id, struct s3c_fb *sfb, unsigned int pixclk) - { -+ struct s3c_fb_pd_win *win = sfb->pdata->win[id]; - unsigned long clk = clk_get_rate(sfb->bus_clk); -- unsigned long long tmp; - unsigned int result; - -- tmp = (unsigned long long)clk; -- tmp *= pixclk; -- -- do_div(tmp, 1000000000UL); -- result = (unsigned int)tmp / 1000; -+ pixclk *= win->win_mode.refresh; -+ result = clk / pixclk; - - dev_dbg(sfb->dev, "pixclk=%u, clk=%lu, div=%d (%lu)\n", - pixclk, clk, result, clk / result); -@@ -303,7 +301,7 @@ static int s3c_fb_set_par(struct fb_info *info) - /* use window 0 as the basis for the lcd output timings */ - - if (win_no == 0) { -- clkdiv = s3c_fb_calc_pixclk(sfb, var->pixclock); -+ clkdiv = s3c_fb_calc_pixclk(win_no, sfb, var->pixclock); - - data = sfb->pdata->vidcon0; - data &= ~(VIDCON0_CLKVAL_F_MASK | VIDCON0_CLKDIR); -diff --git a/drivers/watchdog/iTCO_wdt.c b/drivers/watchdog/iTCO_wdt.c -index 4bdb7f1..6a51edd 100644 ---- a/drivers/watchdog/iTCO_wdt.c -+++ b/drivers/watchdog/iTCO_wdt.c -@@ -1,5 +1,5 @@ - /* -- * intel TCO Watchdog Driver -+ * intel TCO Watchdog Driver (Used in i82801 and i63xxESB chipsets) - * - * (c) Copyright 2006-2009 Wim Van Sebroeck . - * -@@ -14,24 +14,47 @@ - * - * The TCO watchdog is implemented in the following I/O controller hubs: - * (See the intel documentation on http://developer.intel.com.) -- * document number 290655-003, 290677-014: 82801AA (ICH), 82801AB (ICHO) -- * document number 290687-002, 298242-027: 82801BA (ICH2) -- * document number 290733-003, 290739-013: 82801CA (ICH3-S) -- * document number 290716-001, 290718-007: 82801CAM (ICH3-M) -- * document number 290744-001, 290745-025: 82801DB (ICH4) -- * document number 252337-001, 252663-008: 82801DBM (ICH4-M) -- * document number 273599-001, 273645-002: 82801E (C-ICH) -- * document number 252516-001, 252517-028: 82801EB (ICH5), 82801ER (ICH5R) -- * document number 300641-004, 300884-013: 6300ESB -- * document number 301473-002, 301474-026: 82801F (ICH6) -- * document number 313082-001, 313075-006: 631xESB, 632xESB -- * document number 307013-003, 307014-024: 82801G (ICH7) -- * document number 313056-003, 313057-017: 82801H (ICH8) -- * document number 316972-004, 316973-012: 82801I (ICH9) -- * document number 319973-002, 319974-002: 82801J (ICH10) -- * document number 322169-001, 322170-003: 5 Series, 3400 Series (PCH) -- * document number 320066-003, 320257-008: EP80597 (IICH) -- * document number TBD : Cougar Point (CPT) -+ * 82801AA (ICH) : document number 290655-003, 290677-014, -+ * 82801AB (ICHO) : document number 290655-003, 290677-014, -+ * 82801BA (ICH2) : document number 290687-002, 298242-027, -+ * 82801BAM (ICH2-M) : document number 290687-002, 298242-027, -+ * 82801CA (ICH3-S) : document number 290733-003, 290739-013, -+ * 82801CAM (ICH3-M) : document number 290716-001, 290718-007, -+ * 82801DB (ICH4) : document number 290744-001, 290745-025, -+ * 82801DBM (ICH4-M) : document number 252337-001, 252663-008, -+ * 82801E (C-ICH) : document number 273599-001, 273645-002, -+ * 82801EB (ICH5) : document number 252516-001, 252517-028, -+ * 82801ER (ICH5R) : document number 252516-001, 252517-028, -+ * 6300ESB (6300ESB) : document number 300641-004, 300884-013, -+ * 82801FB (ICH6) : document number 301473-002, 301474-026, -+ * 82801FR (ICH6R) : document number 301473-002, 301474-026, -+ * 82801FBM (ICH6-M) : document number 301473-002, 301474-026, -+ * 82801FW (ICH6W) : document number 301473-001, 301474-026, -+ * 82801FRW (ICH6RW) : document number 301473-001, 301474-026, -+ * 631xESB (631xESB) : document number 313082-001, 313075-006, -+ * 632xESB (632xESB) : document number 313082-001, 313075-006, -+ * 82801GB (ICH7) : document number 307013-003, 307014-024, -+ * 82801GR (ICH7R) : document number 307013-003, 307014-024, -+ * 82801GDH (ICH7DH) : document number 307013-003, 307014-024, -+ * 82801GBM (ICH7-M) : document number 307013-003, 307014-024, -+ * 82801GHM (ICH7-M DH) : document number 307013-003, 307014-024, -+ * 82801GU (ICH7-U) : document number 307013-003, 307014-024, -+ * 82801HB (ICH8) : document number 313056-003, 313057-017, -+ * 82801HR (ICH8R) : document number 313056-003, 313057-017, -+ * 82801HBM (ICH8M) : document number 313056-003, 313057-017, -+ * 82801HH (ICH8DH) : document number 313056-003, 313057-017, -+ * 82801HO (ICH8DO) : document number 313056-003, 313057-017, -+ * 82801HEM (ICH8M-E) : document number 313056-003, 313057-017, -+ * 82801IB (ICH9) : document number 316972-004, 316973-012, -+ * 82801IR (ICH9R) : document number 316972-004, 316973-012, -+ * 82801IH (ICH9DH) : document number 316972-004, 316973-012, -+ * 82801IO (ICH9DO) : document number 316972-004, 316973-012, -+ * 82801IBM (ICH9M) : document number 316972-004, 316973-012, -+ * 82801IEM (ICH9M-E) : document number 316972-004, 316973-012, -+ * 82801JIB (ICH10) : document number 319973-002, 319974-002, -+ * 82801JIR (ICH10R) : document number 319973-002, 319974-002, -+ * 82801JD (ICH10D) : document number 319973-002, 319974-002, -+ * 82801JDO (ICH10DO) : document number 319973-002, 319974-002 - */ - - /* -@@ -99,24 +122,6 @@ enum iTCO_chipsets { - TCO_ICH10R, /* ICH10R */ - TCO_ICH10D, /* ICH10D */ - TCO_ICH10DO, /* ICH10DO */ -- TCO_PCH, /* PCH Desktop Full Featured */ -- TCO_PCHM, /* PCH Mobile Full Featured */ -- TCO_P55, /* P55 */ -- TCO_PM55, /* PM55 */ -- TCO_H55, /* H55 */ -- TCO_QM57, /* QM57 */ -- TCO_H57, /* H57 */ -- TCO_HM55, /* HM55 */ -- TCO_Q57, /* Q57 */ -- TCO_HM57, /* HM57 */ -- TCO_PCHMSFF, /* PCH Mobile SFF Full Featured */ -- TCO_QS57, /* QS57 */ -- TCO_3400, /* 3400 */ -- TCO_3420, /* 3420 */ -- TCO_3450, /* 3450 */ -- TCO_EP80579, /* EP80579 */ -- TCO_CPTD, /* CPT Desktop */ -- TCO_CPTM, /* CPT Mobile */ - }; - - static struct { -@@ -157,24 +162,6 @@ static struct { - {"ICH10R", 2}, - {"ICH10D", 2}, - {"ICH10DO", 2}, -- {"PCH Desktop Full Featured", 2}, -- {"PCH Mobile Full Featured", 2}, -- {"P55", 2}, -- {"PM55", 2}, -- {"H55", 2}, -- {"QM57", 2}, -- {"H57", 2}, -- {"HM55", 2}, -- {"Q57", 2}, -- {"HM57", 2}, -- {"PCH Mobile SFF Full Featured", 2}, -- {"QS57", 2}, -- {"3400", 2}, -- {"3420", 2}, -- {"3450", 2}, -- {"EP80579", 2}, -- {"CPT Desktop", 2}, -- {"CPT Mobile", 2}, - {NULL, 0} - }; - -@@ -243,24 +230,6 @@ static struct pci_device_id iTCO_wdt_pci_tbl[] = { - { ITCO_PCI_DEVICE(0x3a16, TCO_ICH10R)}, - { ITCO_PCI_DEVICE(0x3a1a, TCO_ICH10D)}, - { ITCO_PCI_DEVICE(0x3a14, TCO_ICH10DO)}, -- { ITCO_PCI_DEVICE(0x3b00, TCO_PCH)}, -- { ITCO_PCI_DEVICE(0x3b01, TCO_PCHM)}, -- { ITCO_PCI_DEVICE(0x3b02, TCO_P55)}, -- { ITCO_PCI_DEVICE(0x3b03, TCO_PM55)}, -- { ITCO_PCI_DEVICE(0x3b06, TCO_H55)}, -- { ITCO_PCI_DEVICE(0x3b07, TCO_QM57)}, -- { ITCO_PCI_DEVICE(0x3b08, TCO_H57)}, -- { ITCO_PCI_DEVICE(0x3b09, TCO_HM55)}, -- { ITCO_PCI_DEVICE(0x3b0a, TCO_Q57)}, -- { ITCO_PCI_DEVICE(0x3b0b, TCO_HM57)}, -- { ITCO_PCI_DEVICE(0x3b0d, TCO_PCHMSFF)}, -- { ITCO_PCI_DEVICE(0x3b0f, TCO_QS57)}, -- { ITCO_PCI_DEVICE(0x3b12, TCO_3400)}, -- { ITCO_PCI_DEVICE(0x3b14, TCO_3420)}, -- { ITCO_PCI_DEVICE(0x3b16, TCO_3450)}, -- { ITCO_PCI_DEVICE(0x5031, TCO_EP80579)}, -- { ITCO_PCI_DEVICE(0x1c42, TCO_CPTD)}, -- { ITCO_PCI_DEVICE(0x1c43, TCO_CPTM)}, - { 0, }, /* End of list */ - }; - MODULE_DEVICE_TABLE(pci, iTCO_wdt_pci_tbl); -diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c -index 4204336..d31505b 100644 ---- a/drivers/xen/balloon.c -+++ b/drivers/xen/balloon.c -@@ -66,6 +66,8 @@ struct balloon_stats { - /* We aim for 'current allocation' == 'target allocation'. */ - unsigned long current_pages; - unsigned long target_pages; -+ /* We may hit the hard limit in Xen. If we do then we remember it. */ -+ unsigned long hard_limit; - /* - * Drivers may alter the memory reservation independently, but they - * must inform the balloon driver so we avoid hitting the hard limit. -@@ -134,8 +136,6 @@ static void balloon_append(struct page *page) - list_add(&page->lru, &ballooned_pages); - balloon_stats.balloon_low++; - } -- -- totalram_pages--; - } - - /* balloon_retrieve: rescue a page from the balloon, if it is not empty. */ -@@ -156,8 +156,6 @@ static struct page *balloon_retrieve(void) - else - balloon_stats.balloon_low--; - -- totalram_pages++; -- - return page; - } - -@@ -183,7 +181,7 @@ static void balloon_alarm(unsigned long unused) - - static unsigned long current_target(void) - { -- unsigned long target = balloon_stats.target_pages; -+ unsigned long target = min(balloon_stats.target_pages, balloon_stats.hard_limit); - - target = min(target, - balloon_stats.current_pages + -@@ -219,10 +217,23 @@ static int increase_reservation(unsigned long nr_pages) - set_xen_guest_handle(reservation.extent_start, frame_list); - reservation.nr_extents = nr_pages; - rc = HYPERVISOR_memory_op(XENMEM_populate_physmap, &reservation); -- if (rc < 0) -+ if (rc < nr_pages) { -+ if (rc > 0) { -+ int ret; -+ -+ /* We hit the Xen hard limit: reprobe. */ -+ reservation.nr_extents = rc; -+ ret = HYPERVISOR_memory_op(XENMEM_decrease_reservation, -+ &reservation); -+ BUG_ON(ret != rc); -+ } -+ if (rc >= 0) -+ balloon_stats.hard_limit = (balloon_stats.current_pages + rc - -+ balloon_stats.driver_pages); - goto out; -+ } - -- for (i = 0; i < rc; i++) { -+ for (i = 0; i < nr_pages; i++) { - page = balloon_retrieve(); - BUG_ON(page == NULL); - -@@ -248,12 +259,13 @@ static int increase_reservation(unsigned long nr_pages) - __free_page(page); - } - -- balloon_stats.current_pages += rc; -+ balloon_stats.current_pages += nr_pages; -+ totalram_pages = balloon_stats.current_pages; - - out: - spin_unlock_irqrestore(&balloon_lock, flags); - -- return rc < 0 ? rc : rc != nr_pages; -+ return 0; - } - - static int decrease_reservation(unsigned long nr_pages) -@@ -311,6 +323,7 @@ static int decrease_reservation(unsigned long nr_pages) - BUG_ON(ret != nr_pages); - - balloon_stats.current_pages -= nr_pages; -+ totalram_pages = balloon_stats.current_pages; - - spin_unlock_irqrestore(&balloon_lock, flags); - -@@ -354,6 +367,7 @@ static void balloon_process(struct work_struct *work) - static void balloon_set_new_target(unsigned long target) - { - /* No need for lock. Not read-modify-write updates. */ -+ balloon_stats.hard_limit = ~0UL; - balloon_stats.target_pages = target; - schedule_work(&balloon_worker); - } -@@ -408,10 +422,12 @@ static int __init balloon_init(void) - pr_info("xen_balloon: Initialising balloon driver.\n"); - - balloon_stats.current_pages = min(xen_start_info->nr_pages, max_pfn); -+ totalram_pages = balloon_stats.current_pages; - balloon_stats.target_pages = balloon_stats.current_pages; - balloon_stats.balloon_low = 0; - balloon_stats.balloon_high = 0; - balloon_stats.driver_pages = 0UL; -+ balloon_stats.hard_limit = ~0UL; - - init_timer(&balloon_timer); - balloon_timer.data = 0; -@@ -456,6 +472,9 @@ module_exit(balloon_exit); - BALLOON_SHOW(current_kb, "%lu\n", PAGES2KB(balloon_stats.current_pages)); - BALLOON_SHOW(low_kb, "%lu\n", PAGES2KB(balloon_stats.balloon_low)); - BALLOON_SHOW(high_kb, "%lu\n", PAGES2KB(balloon_stats.balloon_high)); -+BALLOON_SHOW(hard_limit_kb, -+ (balloon_stats.hard_limit!=~0UL) ? "%lu\n" : "???\n", -+ (balloon_stats.hard_limit!=~0UL) ? PAGES2KB(balloon_stats.hard_limit) : 0); - BALLOON_SHOW(driver_kb, "%lu\n", PAGES2KB(balloon_stats.driver_pages)); - - static ssize_t show_target_kb(struct sys_device *dev, struct sysdev_attribute *attr, -@@ -525,6 +544,7 @@ static struct attribute *balloon_info_attrs[] = { - &attr_current_kb.attr, - &attr_low_kb.attr, - &attr_high_kb.attr, -+ &attr_hard_limit_kb.attr, - &attr_driver_kb.attr, - NULL - }; -diff --git a/drivers/xen/events.c b/drivers/xen/events.c -index ce602dd..2f57276 100644 ---- a/drivers/xen/events.c -+++ b/drivers/xen/events.c -@@ -474,9 +474,6 @@ static void unbind_from_irq(unsigned int irq) - bind_evtchn_to_cpu(evtchn, 0); - - evtchn_to_irq[evtchn] = -1; -- } -- -- if (irq_info[irq].type != IRQT_UNBOUND) { - irq_info[irq] = mk_unbound_info(); - - dynamic_irq_cleanup(irq); -diff --git a/drivers/xen/manage.c b/drivers/xen/manage.c -index 5d42d55..10d03d7 100644 ---- a/drivers/xen/manage.c -+++ b/drivers/xen/manage.c -@@ -43,6 +43,7 @@ static int xen_suspend(void *data) - if (err) { - printk(KERN_ERR "xen_suspend: sysdev_suspend failed: %d\n", - err); -+ dpm_resume_noirq(PMSG_RESUME); - return err; - } - -@@ -68,6 +69,7 @@ static int xen_suspend(void *data) - } - - sysdev_resume(); -+ dpm_resume_noirq(PMSG_RESUME); - - return 0; - } -@@ -79,12 +81,6 @@ static void do_suspend(void) - - shutting_down = SHUTDOWN_SUSPEND; - -- err = stop_machine_create(); -- if (err) { -- printk(KERN_ERR "xen suspend: failed to setup stop_machine %d\n", err); -- goto out; -- } -- - #ifdef CONFIG_PREEMPT - /* If the kernel is preemptible, we need to freeze all the processes - to prevent them from being in the middle of a pagetable update -@@ -92,14 +88,14 @@ static void do_suspend(void) - err = freeze_processes(); - if (err) { - printk(KERN_ERR "xen suspend: freeze failed %d\n", err); -- goto out_destroy_sm; -+ return; - } - #endif - - err = dpm_suspend_start(PMSG_SUSPEND); - if (err) { - printk(KERN_ERR "xen suspend: dpm_suspend_start %d\n", err); -- goto out_thaw; -+ goto out; - } - - printk(KERN_DEBUG "suspending xenstore...\n"); -@@ -108,39 +104,32 @@ static void do_suspend(void) - err = dpm_suspend_noirq(PMSG_SUSPEND); - if (err) { - printk(KERN_ERR "dpm_suspend_noirq failed: %d\n", err); -- goto out_resume; -+ goto resume_devices; - } - - err = stop_machine(xen_suspend, &cancelled, cpumask_of(0)); -- -- dpm_resume_noirq(PMSG_RESUME); -- - if (err) { - printk(KERN_ERR "failed to start xen_suspend: %d\n", err); -- cancelled = 1; -+ goto out; - } - --out_resume: - if (!cancelled) { - xen_arch_resume(); - xs_resume(); - } else - xs_suspend_cancel(); - -+ dpm_resume_noirq(PMSG_RESUME); -+ -+resume_devices: - dpm_resume_end(PMSG_RESUME); - - /* Make sure timer events get retriggered on all CPUs */ - clock_was_set(); -- --out_thaw: -+out: - #ifdef CONFIG_PREEMPT - thaw_processes(); -- --out_destroy_sm: - #endif -- stop_machine_destroy(); -- --out: - shutting_down = SHUTDOWN_INVALID; - } - #endif /* CONFIG_PM_SLEEP */ -diff --git a/drivers/xen/xenbus/xenbus_probe.c b/drivers/xen/xenbus/xenbus_probe.c -index 649fcdf..d42e25d 100644 ---- a/drivers/xen/xenbus/xenbus_probe.c -+++ b/drivers/xen/xenbus/xenbus_probe.c -@@ -454,21 +454,21 @@ static ssize_t xendev_show_nodename(struct device *dev, - { - return sprintf(buf, "%s\n", to_xenbus_device(dev)->nodename); - } --static DEVICE_ATTR(nodename, S_IRUSR | S_IRGRP | S_IROTH, xendev_show_nodename, NULL); -+DEVICE_ATTR(nodename, S_IRUSR | S_IRGRP | S_IROTH, xendev_show_nodename, NULL); - - static ssize_t xendev_show_devtype(struct device *dev, - struct device_attribute *attr, char *buf) - { - return sprintf(buf, "%s\n", to_xenbus_device(dev)->devicetype); - } --static DEVICE_ATTR(devtype, S_IRUSR | S_IRGRP | S_IROTH, xendev_show_devtype, NULL); -+DEVICE_ATTR(devtype, S_IRUSR | S_IRGRP | S_IROTH, xendev_show_devtype, NULL); - - static ssize_t xendev_show_modalias(struct device *dev, - struct device_attribute *attr, char *buf) - { - return sprintf(buf, "xen:%s\n", to_xenbus_device(dev)->devicetype); - } --static DEVICE_ATTR(modalias, S_IRUSR | S_IRGRP | S_IROTH, xendev_show_modalias, NULL); -+DEVICE_ATTR(modalias, S_IRUSR | S_IRGRP | S_IROTH, xendev_show_modalias, NULL); - - int xenbus_probe_node(struct xen_bus_type *bus, - const char *type, -@@ -843,7 +843,7 @@ postcore_initcall(xenbus_probe_init); - - MODULE_LICENSE("GPL"); - --static int is_device_connecting(struct device *dev, void *data) -+static int is_disconnected_device(struct device *dev, void *data) - { - struct xenbus_device *xendev = to_xenbus_device(dev); - struct device_driver *drv = data; -@@ -861,15 +861,14 @@ static int is_device_connecting(struct device *dev, void *data) - return 0; - - xendrv = to_xenbus_driver(dev->driver); -- return (xendev->state < XenbusStateConnected || -- (xendev->state == XenbusStateConnected && -- xendrv->is_ready && !xendrv->is_ready(xendev))); -+ return (xendev->state != XenbusStateConnected || -+ (xendrv->is_ready && !xendrv->is_ready(xendev))); - } - --static int exists_connecting_device(struct device_driver *drv) -+static int exists_disconnected_device(struct device_driver *drv) - { - return bus_for_each_dev(&xenbus_frontend.bus, NULL, drv, -- is_device_connecting); -+ is_disconnected_device); - } - - static int print_device_status(struct device *dev, void *data) -@@ -885,13 +884,10 @@ static int print_device_status(struct device *dev, void *data) - /* Information only: is this too noisy? */ - printk(KERN_INFO "XENBUS: Device with no driver: %s\n", - xendev->nodename); -- } else if (xendev->state < XenbusStateConnected) { -- enum xenbus_state rstate = XenbusStateUnknown; -- if (xendev->otherend) -- rstate = xenbus_read_driver_state(xendev->otherend); -+ } else if (xendev->state != XenbusStateConnected) { - printk(KERN_WARNING "XENBUS: Timeout connecting " -- "to device: %s (local state %d, remote state %d)\n", -- xendev->nodename, xendev->state, rstate); -+ "to device: %s (state %d)\n", -+ xendev->nodename, xendev->state); - } - - return 0; -@@ -901,7 +897,7 @@ static int print_device_status(struct device *dev, void *data) - static int ready_to_wait_for_devices; - - /* -- * On a 5-minute timeout, wait for all devices currently configured. We need -+ * On a 10 second timeout, wait for all devices currently configured. We need - * to do this to guarantee that the filesystems and / or network devices - * needed for boot are available, before we can allow the boot to proceed. - * -@@ -916,30 +912,18 @@ static int ready_to_wait_for_devices; - */ - static void wait_for_devices(struct xenbus_driver *xendrv) - { -- unsigned long start = jiffies; -+ unsigned long timeout = jiffies + 10*HZ; - struct device_driver *drv = xendrv ? &xendrv->driver : NULL; -- unsigned int seconds_waited = 0; - - if (!ready_to_wait_for_devices || !xen_domain()) - return; - -- while (exists_connecting_device(drv)) { -- if (time_after(jiffies, start + (seconds_waited+5)*HZ)) { -- if (!seconds_waited) -- printk(KERN_WARNING "XENBUS: Waiting for " -- "devices to initialise: "); -- seconds_waited += 5; -- printk("%us...", 300 - seconds_waited); -- if (seconds_waited == 300) -- break; -- } -- -+ while (exists_disconnected_device(drv)) { -+ if (time_after(jiffies, timeout)) -+ break; - schedule_timeout_interruptible(HZ/10); - } - -- if (seconds_waited) -- printk("\n"); -- - bus_for_each_dev(&xenbus_frontend.bus, NULL, drv, - print_device_status); - } -diff --git a/fs/9p/vfs_super.c b/fs/9p/vfs_super.c -index 69357c0..14a8644 100644 ---- a/fs/9p/vfs_super.c -+++ b/fs/9p/vfs_super.c -@@ -188,8 +188,7 @@ static void v9fs_kill_super(struct super_block *s) - - P9_DPRINTK(P9_DEBUG_VFS, " %p\n", s); - -- if (s->s_root) -- v9fs_dentry_release(s->s_root); /* clunk root */ -+ v9fs_dentry_release(s->s_root); /* clunk root */ - - kill_anon_super(s); - -diff --git a/fs/affs/affs.h b/fs/affs/affs.h -index 0e40caa..e511dc6 100644 ---- a/fs/affs/affs.h -+++ b/fs/affs/affs.h -@@ -106,8 +106,8 @@ struct affs_sb_info { - u32 s_last_bmap; - struct buffer_head *s_bmap_bh; - char *s_prefix; /* Prefix for volumes and assigns. */ -+ int s_prefix_len; /* Length of prefix. */ - char s_volume[32]; /* Volume prefix for absolute symlinks. */ -- spinlock_t symlink_lock; /* protects the previous two */ - }; - - #define SF_INTL 0x0001 /* International filesystem. */ -diff --git a/fs/affs/namei.c b/fs/affs/namei.c -index d70bbba..960d336 100644 ---- a/fs/affs/namei.c -+++ b/fs/affs/namei.c -@@ -341,13 +341,10 @@ affs_symlink(struct inode *dir, struct dentry *dentry, const char *symname) - p = (char *)AFFS_HEAD(bh)->table; - lc = '/'; - if (*symname == '/') { -- struct affs_sb_info *sbi = AFFS_SB(sb); - while (*symname == '/') - symname++; -- spin_lock(&sbi->symlink_lock); -- while (sbi->s_volume[i]) /* Cannot overflow */ -- *p++ = sbi->s_volume[i++]; -- spin_unlock(&sbi->symlink_lock); -+ while (AFFS_SB(sb)->s_volume[i]) /* Cannot overflow */ -+ *p++ = AFFS_SB(sb)->s_volume[i++]; - } - while (i < maxlen && (c = *symname++)) { - if (c == '.' && lc == '/' && *symname == '.' && symname[1] == '/') { -diff --git a/fs/affs/super.c b/fs/affs/super.c -index d41e967..104fdcb 100644 ---- a/fs/affs/super.c -+++ b/fs/affs/super.c -@@ -203,7 +203,7 @@ parse_options(char *options, uid_t *uid, gid_t *gid, int *mode, int *reserved, s - switch (token) { - case Opt_bs: - if (match_int(&args[0], &n)) -- return 0; -+ return -EINVAL; - if (n != 512 && n != 1024 && n != 2048 - && n != 4096) { - printk ("AFFS: Invalid blocksize (512, 1024, 2048, 4096 allowed)\n"); -@@ -213,7 +213,7 @@ parse_options(char *options, uid_t *uid, gid_t *gid, int *mode, int *reserved, s - break; - case Opt_mode: - if (match_octal(&args[0], &option)) -- return 0; -+ return 1; - *mode = option & 0777; - *mount_opts |= SF_SETMODE; - break; -@@ -221,6 +221,8 @@ parse_options(char *options, uid_t *uid, gid_t *gid, int *mode, int *reserved, s - *mount_opts |= SF_MUFS; - break; - case Opt_prefix: -+ /* Free any previous prefix */ -+ kfree(*prefix); - *prefix = match_strdup(&args[0]); - if (!*prefix) - return 0; -@@ -231,21 +233,21 @@ parse_options(char *options, uid_t *uid, gid_t *gid, int *mode, int *reserved, s - break; - case Opt_reserved: - if (match_int(&args[0], reserved)) -- return 0; -+ return 1; - break; - case Opt_root: - if (match_int(&args[0], root)) -- return 0; -+ return 1; - break; - case Opt_setgid: - if (match_int(&args[0], &option)) -- return 0; -+ return 1; - *gid = option; - *mount_opts |= SF_SETGID; - break; - case Opt_setuid: - if (match_int(&args[0], &option)) -- return 0; -+ return -EINVAL; - *uid = option; - *mount_opts |= SF_SETUID; - break; -@@ -309,14 +311,11 @@ static int affs_fill_super(struct super_block *sb, void *data, int silent) - return -ENOMEM; - sb->s_fs_info = sbi; - mutex_init(&sbi->s_bmlock); -- spin_lock_init(&sbi->symlink_lock); - - if (!parse_options(data,&uid,&gid,&i,&reserved,&root_block, - &blocksize,&sbi->s_prefix, - sbi->s_volume, &mount_flags)) { - printk(KERN_ERR "AFFS: Error parsing options\n"); -- kfree(sbi->s_prefix); -- kfree(sbi); - return -EINVAL; - } - /* N.B. after this point s_prefix must be released */ -@@ -517,18 +516,14 @@ affs_remount(struct super_block *sb, int *flags, char *data) - unsigned long mount_flags; - int res = 0; - char *new_opts = kstrdup(data, GFP_KERNEL); -- char volume[32]; -- char *prefix = NULL; - - pr_debug("AFFS: remount(flags=0x%x,opts=\"%s\")\n",*flags,data); - - *flags |= MS_NODIRATIME; - -- memcpy(volume, sbi->s_volume, 32); - if (!parse_options(data, &uid, &gid, &mode, &reserved, &root_block, -- &blocksize, &prefix, volume, -+ &blocksize, &sbi->s_prefix, sbi->s_volume, - &mount_flags)) { -- kfree(prefix); - kfree(new_opts); - return -EINVAL; - } -@@ -539,14 +534,6 @@ affs_remount(struct super_block *sb, int *flags, char *data) - sbi->s_mode = mode; - sbi->s_uid = uid; - sbi->s_gid = gid; -- /* protect against readers */ -- spin_lock(&sbi->symlink_lock); -- if (prefix) { -- kfree(sbi->s_prefix); -- sbi->s_prefix = prefix; -- } -- memcpy(sbi->s_volume, volume, 32); -- spin_unlock(&sbi->symlink_lock); - - if ((*flags & MS_RDONLY) == (sb->s_flags & MS_RDONLY)) { - unlock_kernel(); -diff --git a/fs/affs/symlink.c b/fs/affs/symlink.c -index ee00f08..4178253 100644 ---- a/fs/affs/symlink.c -+++ b/fs/affs/symlink.c -@@ -20,6 +20,7 @@ static int affs_symlink_readpage(struct file *file, struct page *page) - int i, j; - char c; - char lc; -+ char *pf; - - pr_debug("AFFS: follow_link(ino=%lu)\n",inode->i_ino); - -@@ -31,15 +32,11 @@ static int affs_symlink_readpage(struct file *file, struct page *page) - j = 0; - lf = (struct slink_front *)bh->b_data; - lc = 0; -+ pf = AFFS_SB(inode->i_sb)->s_prefix ? AFFS_SB(inode->i_sb)->s_prefix : "/"; - - if (strchr(lf->symname,':')) { /* Handle assign or volume name */ -- struct affs_sb_info *sbi = AFFS_SB(inode->i_sb); -- char *pf; -- spin_lock(&sbi->symlink_lock); -- pf = sbi->s_prefix ? sbi->s_prefix : "/"; - while (i < 1023 && (c = pf[i])) - link[i++] = c; -- spin_unlock(&sbi->symlink_lock); - while (i < 1023 && lf->symname[j] != ':') - link[i++] = lf->symname[j++]; - if (i < 1023) -diff --git a/fs/befs/linuxvfs.c b/fs/befs/linuxvfs.c -index 34ddda8..33baf27 100644 ---- a/fs/befs/linuxvfs.c -+++ b/fs/befs/linuxvfs.c -@@ -873,7 +873,6 @@ befs_fill_super(struct super_block *sb, void *data, int silent) - brelse(bh); - - unacquire_priv_sbp: -- kfree(befs_sb->mount_opts.iocharset); - kfree(sb->s_fs_info); - - unacquire_none: -diff --git a/fs/bfs/inode.c b/fs/bfs/inode.c -index 8f3d9fd..6f60336 100644 ---- a/fs/bfs/inode.c -+++ b/fs/bfs/inode.c -@@ -353,35 +353,35 @@ static int bfs_fill_super(struct super_block *s, void *data, int silent) - struct inode *inode; - unsigned i, imap_len; - struct bfs_sb_info *info; -- int ret = -EINVAL; -+ long ret = -EINVAL; - unsigned long i_sblock, i_eblock, i_eoff, s_size; - - info = kzalloc(sizeof(*info), GFP_KERNEL); - if (!info) - return -ENOMEM; -- mutex_init(&info->bfs_lock); - s->s_fs_info = info; - - sb_set_blocksize(s, BFS_BSIZE); - -- info->si_sbh = sb_bread(s, 0); -- if (!info->si_sbh) -+ bh = sb_bread(s, 0); -+ if(!bh) - goto out; -- bfs_sb = (struct bfs_super_block *)info->si_sbh->b_data; -+ bfs_sb = (struct bfs_super_block *)bh->b_data; - if (le32_to_cpu(bfs_sb->s_magic) != BFS_MAGIC) { - if (!silent) - printf("No BFS filesystem on %s (magic=%08x)\n", - s->s_id, le32_to_cpu(bfs_sb->s_magic)); -- goto out1; -+ goto out; - } - if (BFS_UNCLEAN(bfs_sb, s) && !silent) - printf("%s is unclean, continuing\n", s->s_id); - - s->s_magic = BFS_MAGIC; -+ info->si_sbh = bh; - - if (le32_to_cpu(bfs_sb->s_start) > le32_to_cpu(bfs_sb->s_end)) { - printf("Superblock is corrupted\n"); -- goto out1; -+ goto out; - } - - info->si_lasti = (le32_to_cpu(bfs_sb->s_start) - BFS_BSIZE) / -@@ -390,7 +390,7 @@ static int bfs_fill_super(struct super_block *s, void *data, int silent) - imap_len = (info->si_lasti / 8) + 1; - info->si_imap = kzalloc(imap_len, GFP_KERNEL); - if (!info->si_imap) -- goto out1; -+ goto out; - for (i = 0; i < BFS_ROOT_INO; i++) - set_bit(i, info->si_imap); - -@@ -398,13 +398,15 @@ static int bfs_fill_super(struct super_block *s, void *data, int silent) - inode = bfs_iget(s, BFS_ROOT_INO); - if (IS_ERR(inode)) { - ret = PTR_ERR(inode); -- goto out2; -+ kfree(info->si_imap); -+ goto out; - } - s->s_root = d_alloc_root(inode); - if (!s->s_root) { - iput(inode); - ret = -ENOMEM; -- goto out2; -+ kfree(info->si_imap); -+ goto out; - } - - info->si_blocks = (le32_to_cpu(bfs_sb->s_end) + 1) >> BFS_BSIZE_BITS; -@@ -417,8 +419,10 @@ static int bfs_fill_super(struct super_block *s, void *data, int silent) - bh = sb_bread(s, info->si_blocks - 1); - if (!bh) { - printf("Last block not available: %lu\n", info->si_blocks - 1); -+ iput(inode); - ret = -EIO; -- goto out3; -+ kfree(info->si_imap); -+ goto out; - } - brelse(bh); - -@@ -455,8 +459,11 @@ static int bfs_fill_super(struct super_block *s, void *data, int silent) - printf("Inode 0x%08x corrupted\n", i); - - brelse(bh); -- ret = -EIO; -- goto out3; -+ s->s_root = NULL; -+ kfree(info->si_imap); -+ kfree(info); -+ s->s_fs_info = NULL; -+ return -EIO; - } - - if (!di->i_ino) { -@@ -476,17 +483,11 @@ static int bfs_fill_super(struct super_block *s, void *data, int silent) - s->s_dirt = 1; - } - dump_imap("read_super", s); -+ mutex_init(&info->bfs_lock); - return 0; - --out3: -- dput(s->s_root); -- s->s_root = NULL; --out2: -- kfree(info->si_imap); --out1: -- brelse(info->si_sbh); - out: -- mutex_destroy(&info->bfs_lock); -+ brelse(bh); - kfree(info); - s->s_fs_info = NULL; - return ret; -diff --git a/fs/binfmt_aout.c b/fs/binfmt_aout.c -index 0133b5a..b639dcf 100644 ---- a/fs/binfmt_aout.c -+++ b/fs/binfmt_aout.c -@@ -263,7 +263,6 @@ static int load_aout_binary(struct linux_binprm * bprm, struct pt_regs * regs) - #else - set_personality(PER_LINUX); - #endif -- setup_new_exec(bprm); - - current->mm->end_code = ex.a_text + - (current->mm->start_code = N_TXTADDR(ex)); -diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c -index 1ed37ba..b9b3bb5 100644 ---- a/fs/binfmt_elf.c -+++ b/fs/binfmt_elf.c -@@ -662,6 +662,27 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs) - if (elf_interpreter[elf_ppnt->p_filesz - 1] != '\0') - goto out_free_interp; - -+ /* -+ * The early SET_PERSONALITY here is so that the lookup -+ * for the interpreter happens in the namespace of the -+ * to-be-execed image. SET_PERSONALITY can select an -+ * alternate root. -+ * -+ * However, SET_PERSONALITY is NOT allowed to switch -+ * this task into the new images's memory mapping -+ * policy - that is, TASK_SIZE must still evaluate to -+ * that which is appropriate to the execing application. -+ * This is because exit_mmap() needs to have TASK_SIZE -+ * evaluate to the size of the old image. -+ * -+ * So if (say) a 64-bit application is execing a 32-bit -+ * application it is the architecture's responsibility -+ * to defer changing the value of TASK_SIZE until the -+ * switch really is going to happen - do this in -+ * flush_thread(). - akpm -+ */ -+ SET_PERSONALITY(loc->elf_ex); -+ - interpreter = open_exec(elf_interpreter); - retval = PTR_ERR(interpreter); - if (IS_ERR(interpreter)) -@@ -709,6 +730,9 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs) - /* Verify the interpreter has a valid arch */ - if (!elf_check_arch(&loc->interp_elf_ex)) - goto out_free_dentry; -+ } else { -+ /* Executables without an interpreter also need a personality */ -+ SET_PERSONALITY(loc->elf_ex); - } - - /* Flush all traces of the currently running executable */ -@@ -728,8 +752,7 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs) - - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space) - current->flags |= PF_RANDOMIZE; -- -- setup_new_exec(bprm); -+ arch_pick_mmap_layout(current->mm); - - /* Do this so that we can load the interpreter, if need be. We will - change some of these later */ -diff --git a/fs/binfmt_elf_fdpic.c b/fs/binfmt_elf_fdpic.c -index e7a0bb4..38502c6 100644 ---- a/fs/binfmt_elf_fdpic.c -+++ b/fs/binfmt_elf_fdpic.c -@@ -171,9 +171,6 @@ static int load_elf_fdpic_binary(struct linux_binprm *bprm, - #ifdef ELF_FDPIC_PLAT_INIT - unsigned long dynaddr; - #endif --#ifndef CONFIG_MMU -- unsigned long stack_prot; --#endif - struct file *interpreter = NULL; /* to shut gcc up */ - char *interpreter_name = NULL; - int executable_stack; -@@ -319,11 +316,6 @@ static int load_elf_fdpic_binary(struct linux_binprm *bprm, - * defunct, deceased, etc. after this point we have to exit via - * error_kill */ - set_personality(PER_LINUX_FDPIC); -- if (elf_read_implies_exec(&exec_params.hdr, executable_stack)) -- current->personality |= READ_IMPLIES_EXEC; -- -- setup_new_exec(bprm); -- - set_binfmt(&elf_fdpic_format); - - current->mm->start_code = 0; -@@ -385,13 +377,9 @@ static int load_elf_fdpic_binary(struct linux_binprm *bprm, - if (stack_size < PAGE_SIZE * 2) - stack_size = PAGE_SIZE * 2; - -- stack_prot = PROT_READ | PROT_WRITE; -- if (executable_stack == EXSTACK_ENABLE_X || -- (executable_stack == EXSTACK_DEFAULT && VM_STACK_FLAGS & VM_EXEC)) -- stack_prot |= PROT_EXEC; -- - down_write(¤t->mm->mmap_sem); -- current->mm->start_brk = do_mmap(NULL, 0, stack_size, stack_prot, -+ current->mm->start_brk = do_mmap(NULL, 0, stack_size, -+ PROT_READ | PROT_WRITE | PROT_EXEC, - MAP_PRIVATE | MAP_ANONYMOUS | MAP_GROWSDOWN, - 0); - -diff --git a/fs/binfmt_flat.c b/fs/binfmt_flat.c -index ca88c46..a279665 100644 ---- a/fs/binfmt_flat.c -+++ b/fs/binfmt_flat.c -@@ -519,7 +519,6 @@ static int load_flat_file(struct linux_binprm * bprm, - - /* OK, This is the point of no return */ - set_personality(PER_LINUX_32BIT); -- setup_new_exec(bprm); - } - - /* -diff --git a/fs/binfmt_som.c b/fs/binfmt_som.c -index 35cf002..eff74b9 100644 ---- a/fs/binfmt_som.c -+++ b/fs/binfmt_som.c -@@ -227,7 +227,6 @@ load_som_binary(struct linux_binprm * bprm, struct pt_regs * regs) - /* OK, This is the point of no return */ - current->flags &= ~PF_FORKNOEXEC; - current->personality = PER_HPUX; -- setup_new_exec(bprm); - - /* Set the task size for HP-UX processes such that - * the gateway page is outside the address space. -diff --git a/fs/bio-integrity.c b/fs/bio-integrity.c -index a16f29e..49a34e7 100644 ---- a/fs/bio-integrity.c -+++ b/fs/bio-integrity.c -@@ -61,7 +61,7 @@ static inline unsigned int vecs_to_idx(unsigned int nr) - - static inline int use_bip_pool(unsigned int idx) - { -- if (idx == BIOVEC_MAX_IDX) -+ if (idx == BIOVEC_NR_POOLS) - return 1; - - return 0; -@@ -95,7 +95,6 @@ struct bio_integrity_payload *bio_integrity_alloc_bioset(struct bio *bio, - - /* Use mempool if lower order alloc failed or max vecs were requested */ - if (bip == NULL) { -- idx = BIOVEC_MAX_IDX; /* so we free the payload properly later */ - bip = mempool_alloc(bs->bio_integrity_pool, gfp_mask); - - if (unlikely(bip == NULL)) { -diff --git a/fs/bio.c b/fs/bio.c -index e0c9e71..12da5db 100644 ---- a/fs/bio.c -+++ b/fs/bio.c -@@ -542,18 +542,13 @@ static int __bio_add_page(struct request_queue *q, struct bio *bio, struct page - - if (page == prev->bv_page && - offset == prev->bv_offset + prev->bv_len) { -- unsigned int prev_bv_len = prev->bv_len; - prev->bv_len += len; - - if (q->merge_bvec_fn) { - struct bvec_merge_data bvm = { -- /* prev_bvec is already charged in -- bi_size, discharge it in order to -- simulate merging updated prev_bvec -- as new bvec. */ - .bi_bdev = bio->bi_bdev, - .bi_sector = bio->bi_sector, -- .bi_size = bio->bi_size - prev_bv_len, -+ .bi_size = bio->bi_size, - .bi_rw = bio->bi_rw, - }; - -diff --git a/fs/block_dev.c b/fs/block_dev.c -index 34e2d20..8bed055 100644 ---- a/fs/block_dev.c -+++ b/fs/block_dev.c -@@ -246,8 +246,7 @@ struct super_block *freeze_bdev(struct block_device *bdev) - if (!sb) - goto out; - if (sb->s_flags & MS_RDONLY) { -- sb->s_frozen = SB_FREEZE_TRANS; -- up_write(&sb->s_umount); -+ deactivate_locked_super(sb); - mutex_unlock(&bdev->bd_fsfreeze_mutex); - return sb; - } -@@ -308,7 +307,7 @@ int thaw_bdev(struct block_device *bdev, struct super_block *sb) - BUG_ON(sb->s_bdev != bdev); - down_write(&sb->s_umount); - if (sb->s_flags & MS_RDONLY) -- goto out_unfrozen; -+ goto out_deactivate; - - if (sb->s_op->unfreeze_fs) { - error = sb->s_op->unfreeze_fs(sb); -@@ -322,11 +321,11 @@ int thaw_bdev(struct block_device *bdev, struct super_block *sb) - } - } - --out_unfrozen: - sb->s_frozen = SB_UNFROZEN; - smp_wmb(); - wake_up(&sb->s_wait_unfrozen); - -+out_deactivate: - if (sb) - deactivate_locked_super(sb); - out_unlock: -diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c -index 3bbcaa7..63ea83f 100644 ---- a/fs/cifs/connect.c -+++ b/fs/cifs/connect.c -@@ -2287,12 +2287,12 @@ int - cifs_mount(struct super_block *sb, struct cifs_sb_info *cifs_sb, - char *mount_data_global, const char *devname) - { -- int rc; -+ int rc = 0; - int xid; - struct smb_vol *volume_info; -- struct cifsSesInfo *pSesInfo; -- struct cifsTconInfo *tcon; -- struct TCP_Server_Info *srvTcp; -+ struct cifsSesInfo *pSesInfo = NULL; -+ struct cifsTconInfo *tcon = NULL; -+ struct TCP_Server_Info *srvTcp = NULL; - char *full_path; - char *mount_data = mount_data_global; - #ifdef CONFIG_CIFS_DFS_UPCALL -@@ -2301,10 +2301,6 @@ cifs_mount(struct super_block *sb, struct cifs_sb_info *cifs_sb, - int referral_walks_count = 0; - try_mount_again: - #endif -- rc = 0; -- tcon = NULL; -- pSesInfo = NULL; -- srvTcp = NULL; - full_path = NULL; - - xid = GetXid(); -@@ -2601,7 +2597,6 @@ remote_path_check: - - cleanup_volume_info(&volume_info); - referral_walks_count++; -- FreeXid(xid); - goto try_mount_again; - } - #else /* No DFS support, return error on mount */ -diff --git a/fs/cifs/readdir.c b/fs/cifs/readdir.c -index f5618f8..f84062f 100644 ---- a/fs/cifs/readdir.c -+++ b/fs/cifs/readdir.c -@@ -666,7 +666,6 @@ static int cifs_get_name_from_search_buf(struct qstr *pqst, - min(len, max_len), nlt, - cifs_sb->mnt_cifs_flags & - CIFS_MOUNT_MAP_SPECIAL_CHR); -- pqst->len -= nls_nullsize(nlt); - } else { - pqst->name = filename; - pqst->len = len; -diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c -index 39c6ee8..d22438e 100644 ---- a/fs/debugfs/inode.c -+++ b/fs/debugfs/inode.c -@@ -32,9 +32,7 @@ static struct vfsmount *debugfs_mount; - static int debugfs_mount_count; - static bool debugfs_registered; - --static struct inode *debugfs_get_inode(struct super_block *sb, int mode, dev_t dev, -- void *data, const struct file_operations *fops) -- -+static struct inode *debugfs_get_inode(struct super_block *sb, int mode, dev_t dev) - { - struct inode *inode = new_inode(sb); - -@@ -46,18 +44,14 @@ static struct inode *debugfs_get_inode(struct super_block *sb, int mode, dev_t d - init_special_inode(inode, mode, dev); - break; - case S_IFREG: -- inode->i_fop = fops ? fops : &debugfs_file_operations; -- inode->i_private = data; -+ inode->i_fop = &debugfs_file_operations; - break; - case S_IFLNK: - inode->i_op = &debugfs_link_operations; -- inode->i_fop = fops; -- inode->i_private = data; - break; - case S_IFDIR: - inode->i_op = &simple_dir_inode_operations; -- inode->i_fop = fops ? fops : &simple_dir_operations; -- inode->i_private = data; -+ inode->i_fop = &simple_dir_operations; - - /* directory inodes start off with i_nlink == 2 - * (for "." entry) */ -@@ -70,8 +64,7 @@ static struct inode *debugfs_get_inode(struct super_block *sb, int mode, dev_t d - - /* SMP-safe */ - static int debugfs_mknod(struct inode *dir, struct dentry *dentry, -- int mode, dev_t dev, void *data, -- const struct file_operations *fops) -+ int mode, dev_t dev) - { - struct inode *inode; - int error = -EPERM; -@@ -79,7 +72,7 @@ static int debugfs_mknod(struct inode *dir, struct dentry *dentry, - if (dentry->d_inode) - return -EEXIST; - -- inode = debugfs_get_inode(dir->i_sb, mode, dev, data, fops); -+ inode = debugfs_get_inode(dir->i_sb, mode, dev); - if (inode) { - d_instantiate(dentry, inode); - dget(dentry); -@@ -88,13 +81,12 @@ static int debugfs_mknod(struct inode *dir, struct dentry *dentry, - return error; - } - --static int debugfs_mkdir(struct inode *dir, struct dentry *dentry, int mode, -- void *data, const struct file_operations *fops) -+static int debugfs_mkdir(struct inode *dir, struct dentry *dentry, int mode) - { - int res; - - mode = (mode & (S_IRWXUGO | S_ISVTX)) | S_IFDIR; -- res = debugfs_mknod(dir, dentry, mode, 0, data, fops); -+ res = debugfs_mknod(dir, dentry, mode, 0); - if (!res) { - inc_nlink(dir); - fsnotify_mkdir(dir, dentry); -@@ -102,20 +94,18 @@ static int debugfs_mkdir(struct inode *dir, struct dentry *dentry, int mode, - return res; - } - --static int debugfs_link(struct inode *dir, struct dentry *dentry, int mode, -- void *data, const struct file_operations *fops) -+static int debugfs_link(struct inode *dir, struct dentry *dentry, int mode) - { - mode = (mode & S_IALLUGO) | S_IFLNK; -- return debugfs_mknod(dir, dentry, mode, 0, data, fops); -+ return debugfs_mknod(dir, dentry, mode, 0); - } - --static int debugfs_create(struct inode *dir, struct dentry *dentry, int mode, -- void *data, const struct file_operations *fops) -+static int debugfs_create(struct inode *dir, struct dentry *dentry, int mode) - { - int res; - - mode = (mode & S_IALLUGO) | S_IFREG; -- res = debugfs_mknod(dir, dentry, mode, 0, data, fops); -+ res = debugfs_mknod(dir, dentry, mode, 0); - if (!res) - fsnotify_create(dir, dentry); - return res; -@@ -149,9 +139,7 @@ static struct file_system_type debug_fs_type = { - - static int debugfs_create_by_name(const char *name, mode_t mode, - struct dentry *parent, -- struct dentry **dentry, -- void *data, -- const struct file_operations *fops) -+ struct dentry **dentry) - { - int error = 0; - -@@ -176,16 +164,13 @@ static int debugfs_create_by_name(const char *name, mode_t mode, - if (!IS_ERR(*dentry)) { - switch (mode & S_IFMT) { - case S_IFDIR: -- error = debugfs_mkdir(parent->d_inode, *dentry, mode, -- data, fops); -+ error = debugfs_mkdir(parent->d_inode, *dentry, mode); - break; - case S_IFLNK: -- error = debugfs_link(parent->d_inode, *dentry, mode, -- data, fops); -+ error = debugfs_link(parent->d_inode, *dentry, mode); - break; - default: -- error = debugfs_create(parent->d_inode, *dentry, mode, -- data, fops); -+ error = debugfs_create(parent->d_inode, *dentry, mode); - break; - } - dput(*dentry); -@@ -236,13 +221,19 @@ struct dentry *debugfs_create_file(const char *name, mode_t mode, - if (error) - goto exit; - -- error = debugfs_create_by_name(name, mode, parent, &dentry, -- data, fops); -+ error = debugfs_create_by_name(name, mode, parent, &dentry); - if (error) { - dentry = NULL; - simple_release_fs(&debugfs_mount, &debugfs_mount_count); - goto exit; - } -+ -+ if (dentry->d_inode) { -+ if (data) -+ dentry->d_inode->i_private = data; -+ if (fops) -+ dentry->d_inode->i_fop = fops; -+ } - exit: - return dentry; - } -diff --git a/fs/devpts/inode.c b/fs/devpts/inode.c -index 8882ecc..d5f8c96 100644 ---- a/fs/devpts/inode.c -+++ b/fs/devpts/inode.c -@@ -517,23 +517,11 @@ int devpts_pty_new(struct inode *ptmx_inode, struct tty_struct *tty) - - struct tty_struct *devpts_get_tty(struct inode *pts_inode, int number) - { -- struct dentry *dentry; -- struct tty_struct *tty; -- - BUG_ON(pts_inode->i_rdev == MKDEV(TTYAUX_MAJOR, PTMX_MINOR)); - -- /* Ensure dentry has not been deleted by devpts_pty_kill() */ -- dentry = d_find_alias(pts_inode); -- if (!dentry) -- return NULL; -- -- tty = NULL; - if (pts_inode->i_sb->s_magic == DEVPTS_SUPER_MAGIC) -- tty = (struct tty_struct *)pts_inode->i_private; -- -- dput(dentry); -- -- return tty; -+ return (struct tty_struct *)pts_inode->i_private; -+ return NULL; - } - - void devpts_pty_kill(struct tty_struct *tty) -diff --git a/fs/ecryptfs/crypto.c b/fs/ecryptfs/crypto.c -index 7cb0a59..fbb6e5e 100644 ---- a/fs/ecryptfs/crypto.c -+++ b/fs/ecryptfs/crypto.c -@@ -1748,7 +1748,7 @@ ecryptfs_process_key_cipher(struct crypto_blkcipher **key_tfm, - char *cipher_name, size_t *key_size) - { - char dummy_key[ECRYPTFS_MAX_KEY_BYTES]; -- char *full_alg_name = NULL; -+ char *full_alg_name; - int rc; - - *key_tfm = NULL; -@@ -1763,6 +1763,7 @@ ecryptfs_process_key_cipher(struct crypto_blkcipher **key_tfm, - if (rc) - goto out; - *key_tfm = crypto_alloc_blkcipher(full_alg_name, 0, CRYPTO_ALG_ASYNC); -+ kfree(full_alg_name); - if (IS_ERR(*key_tfm)) { - rc = PTR_ERR(*key_tfm); - printk(KERN_ERR "Unable to allocate crypto cipher with name " -@@ -1785,7 +1786,6 @@ ecryptfs_process_key_cipher(struct crypto_blkcipher **key_tfm, - goto out; - } - out: -- kfree(full_alg_name); - return rc; - } - -diff --git a/fs/ecryptfs/file.c b/fs/ecryptfs/file.c -index 1744f17..9e94405 100644 ---- a/fs/ecryptfs/file.c -+++ b/fs/ecryptfs/file.c -@@ -191,6 +191,13 @@ static int ecryptfs_open(struct inode *inode, struct file *file) - | ECRYPTFS_ENCRYPTED); - } - mutex_unlock(&crypt_stat->cs_mutex); -+ if ((ecryptfs_inode_to_private(inode)->lower_file->f_flags & O_RDONLY) -+ && !(file->f_flags & O_RDONLY)) { -+ rc = -EPERM; -+ printk(KERN_WARNING "%s: Lower persistent file is RO; eCryptfs " -+ "file must hence be opened RO\n", __func__); -+ goto out; -+ } - if (!ecryptfs_inode_to_private(inode)->lower_file) { - rc = ecryptfs_init_persistent_file(ecryptfs_dentry); - if (rc) { -@@ -201,13 +208,6 @@ static int ecryptfs_open(struct inode *inode, struct file *file) - goto out; - } - } -- if ((ecryptfs_inode_to_private(inode)->lower_file->f_flags & O_RDONLY) -- && !(file->f_flags & O_RDONLY)) { -- rc = -EPERM; -- printk(KERN_WARNING "%s: Lower persistent file is RO; eCryptfs " -- "file must hence be opened RO\n", __func__); -- goto out; -- } - ecryptfs_set_file_lower( - file, ecryptfs_inode_to_private(inode)->lower_file); - if (S_ISDIR(ecryptfs_dentry->d_inode->i_mode)) { -diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c -index 728f07e..056fed6 100644 ---- a/fs/ecryptfs/inode.c -+++ b/fs/ecryptfs/inode.c -@@ -971,21 +971,6 @@ out: - return rc; - } - --int ecryptfs_getattr(struct vfsmount *mnt, struct dentry *dentry, -- struct kstat *stat) --{ -- struct kstat lower_stat; -- int rc; -- -- rc = vfs_getattr(ecryptfs_dentry_to_lower_mnt(dentry), -- ecryptfs_dentry_to_lower(dentry), &lower_stat); -- if (!rc) { -- generic_fillattr(dentry->d_inode, stat); -- stat->blocks = lower_stat.blocks; -- } -- return rc; --} -- - int - ecryptfs_setxattr(struct dentry *dentry, const char *name, const void *value, - size_t size, int flags) -@@ -1115,7 +1100,6 @@ const struct inode_operations ecryptfs_dir_iops = { - const struct inode_operations ecryptfs_main_iops = { - .permission = ecryptfs_permission, - .setattr = ecryptfs_setattr, -- .getattr = ecryptfs_getattr, - .setxattr = ecryptfs_setxattr, - .getxattr = ecryptfs_getxattr, - .listxattr = ecryptfs_listxattr, -diff --git a/fs/exec.c b/fs/exec.c -index da36c20..606cf96 100644 ---- a/fs/exec.c -+++ b/fs/exec.c -@@ -19,7 +19,7 @@ - * current->executable is only used by the procfs. This allows a dispatch - * table to check for several different types of binary formats. We keep - * trying until we recognize the file or we run out of supported binary -- * formats. -+ * formats. - */ - - #include -@@ -57,6 +57,8 @@ - #include - #include - -+#include -+ - #include - #include - #include -@@ -80,7 +82,7 @@ int __register_binfmt(struct linux_binfmt * fmt, int insert) - insert ? list_add(&fmt->lh, &formats) : - list_add_tail(&fmt->lh, &formats); - write_unlock(&binfmt_lock); -- return 0; -+ return 0; - } - - EXPORT_SYMBOL(__register_binfmt); -@@ -572,9 +574,6 @@ int setup_arg_pages(struct linux_binprm *bprm, - struct vm_area_struct *prev = NULL; - unsigned long vm_flags; - unsigned long stack_base; -- unsigned long stack_size; -- unsigned long stack_expand; -- unsigned long rlim_stack; - - #ifdef CONFIG_STACK_GROWSUP - /* Limit stack size to 1GB */ -@@ -631,24 +630,10 @@ int setup_arg_pages(struct linux_binprm *bprm, - goto out_unlock; - } - -- stack_expand = EXTRA_STACK_VM_PAGES * PAGE_SIZE; -- stack_size = vma->vm_end - vma->vm_start; -- /* -- * Align this down to a page boundary as expand_stack -- * will align it up. -- */ -- rlim_stack = rlimit(RLIMIT_STACK) & PAGE_MASK; -- rlim_stack = min(rlim_stack, stack_size); - #ifdef CONFIG_STACK_GROWSUP -- if (stack_size + stack_expand > rlim_stack) -- stack_base = vma->vm_start + rlim_stack; -- else -- stack_base = vma->vm_end + stack_expand; -+ stack_base = vma->vm_end + EXTRA_STACK_VM_PAGES * PAGE_SIZE; - #else -- if (stack_size + stack_expand > rlim_stack) -- stack_base = vma->vm_end - rlim_stack; -- else -- stack_base = vma->vm_start - stack_expand; -+ stack_base = vma->vm_start - EXTRA_STACK_VM_PAGES * PAGE_SIZE; - #endif - ret = expand_stack(vma, stack_base); - if (ret) -@@ -948,7 +933,9 @@ void set_task_comm(struct task_struct *tsk, char *buf) - - int flush_old_exec(struct linux_binprm * bprm) - { -- int retval; -+ char * name; -+ int i, ch, retval; -+ char tcomm[sizeof(current->comm)]; - - /* - * Make sure we have a private signal table and that -@@ -969,25 +956,6 @@ int flush_old_exec(struct linux_binprm * bprm) - - bprm->mm = NULL; /* We're using it now */ - -- current->flags &= ~PF_RANDOMIZE; -- flush_thread(); -- current->personality &= ~bprm->per_clear; -- -- return 0; -- --out: -- return retval; --} --EXPORT_SYMBOL(flush_old_exec); -- --void setup_new_exec(struct linux_binprm * bprm) --{ -- int i, ch; -- char * name; -- char tcomm[sizeof(current->comm)]; -- -- arch_pick_mmap_layout(current->mm); -- - /* This is the point of no return */ - current->sas_ss_sp = current->sas_ss_size = 0; - -@@ -1009,6 +977,9 @@ void setup_new_exec(struct linux_binprm * bprm) - tcomm[i] = '\0'; - set_task_comm(current, tcomm); - -+ current->flags &= ~PF_RANDOMIZE; -+ flush_thread(); -+ - /* Set the new mm task size. We have to do that late because it may - * depend on TIF_32BIT which is only updated in flush_thread() on - * some architectures like powerpc -@@ -1024,6 +995,8 @@ void setup_new_exec(struct linux_binprm * bprm) - set_dumpable(current->mm, suid_dumpable); - } - -+ current->personality &= ~bprm->per_clear; -+ - /* - * Flush performance counters when crossing a - * security domain: -@@ -1035,11 +1008,17 @@ void setup_new_exec(struct linux_binprm * bprm) - group */ - - current->self_exec_id++; -- -+ - flush_signal_handlers(current, 0); - flush_old_files(current->files); -+ -+ return 0; -+ -+out: -+ return retval; - } --EXPORT_SYMBOL(setup_new_exec); -+ -+EXPORT_SYMBOL(flush_old_exec); - - /* - * Prepare credentials and lock ->cred_guard_mutex. -@@ -1125,8 +1104,8 @@ int check_unsafe_exec(struct linux_binprm *bprm) - return res; - } - --/* -- * Fill the binprm structure from the inode. -+/* -+ * Fill the binprm structure from the inode. - * Check permissions, then read the first 128 (BINPRM_BUF_SIZE) bytes - * - * This may be called multiple times for binary chains (scripts for example). -@@ -1341,6 +1320,7 @@ int do_execve(char * filename, - goto out_unmark; - - sched_exec(); -+ litmus_exec(); - - bprm->file = file; - bprm->filename = filename; -diff --git a/fs/exofs/inode.c b/fs/exofs/inode.c -index 6f7df0f..6c10f74 100644 ---- a/fs/exofs/inode.c -+++ b/fs/exofs/inode.c -@@ -731,28 +731,13 @@ static int exofs_write_begin_export(struct file *file, - fsdata); - } - --static int exofs_write_end(struct file *file, struct address_space *mapping, -- loff_t pos, unsigned len, unsigned copied, -- struct page *page, void *fsdata) --{ -- struct inode *inode = mapping->host; -- /* According to comment in simple_write_end i_mutex is held */ -- loff_t i_size = inode->i_size; -- int ret; -- -- ret = simple_write_end(file, mapping,pos, len, copied, page, fsdata); -- if (i_size != inode->i_size) -- mark_inode_dirty(inode); -- return ret; --} -- - const struct address_space_operations exofs_aops = { - .readpage = exofs_readpage, - .readpages = exofs_readpages, - .writepage = exofs_writepage, - .writepages = exofs_writepages, - .write_begin = exofs_write_begin_export, -- .write_end = exofs_write_end, -+ .write_end = simple_write_end, - }; - - /****************************************************************************** -diff --git a/fs/ext3/inode.c b/fs/ext3/inode.c -index f9d6937..354ed3b 100644 ---- a/fs/ext3/inode.c -+++ b/fs/ext3/inode.c -@@ -1151,16 +1151,6 @@ static int do_journal_get_write_access(handle_t *handle, - return ext3_journal_get_write_access(handle, bh); - } - --/* -- * Truncate blocks that were not used by write. We have to truncate the -- * pagecache as well so that corresponding buffers get properly unmapped. -- */ --static void ext3_truncate_failed_write(struct inode *inode) --{ -- truncate_inode_pages(inode->i_mapping, inode->i_size); -- ext3_truncate(inode); --} -- - static int ext3_write_begin(struct file *file, struct address_space *mapping, - loff_t pos, unsigned len, unsigned flags, - struct page **pagep, void **fsdata) -@@ -1219,7 +1209,7 @@ write_begin_failed: - unlock_page(page); - page_cache_release(page); - if (pos + len > inode->i_size) -- ext3_truncate_failed_write(inode); -+ ext3_truncate(inode); - } - if (ret == -ENOSPC && ext3_should_retry_alloc(inode->i_sb, &retries)) - goto retry; -@@ -1314,7 +1304,7 @@ static int ext3_ordered_write_end(struct file *file, - page_cache_release(page); - - if (pos + len > inode->i_size) -- ext3_truncate_failed_write(inode); -+ ext3_truncate(inode); - return ret ? ret : copied; - } - -@@ -1340,7 +1330,7 @@ static int ext3_writeback_write_end(struct file *file, - page_cache_release(page); - - if (pos + len > inode->i_size) -- ext3_truncate_failed_write(inode); -+ ext3_truncate(inode); - return ret ? ret : copied; - } - -@@ -1393,7 +1383,7 @@ static int ext3_journalled_write_end(struct file *file, - page_cache_release(page); - - if (pos + len > inode->i_size) -- ext3_truncate_failed_write(inode); -+ ext3_truncate(inode); - return ret ? ret : copied; - } - -diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c -index f3032c9..1d04189 100644 ---- a/fs/ext4/balloc.c -+++ b/fs/ext4/balloc.c -@@ -761,13 +761,7 @@ static unsigned long ext4_bg_num_gdb_meta(struct super_block *sb, - static unsigned long ext4_bg_num_gdb_nometa(struct super_block *sb, - ext4_group_t group) - { -- if (!ext4_bg_has_super(sb, group)) -- return 0; -- -- if (EXT4_HAS_INCOMPAT_FEATURE(sb,EXT4_FEATURE_INCOMPAT_META_BG)) -- return le32_to_cpu(EXT4_SB(sb)->s_es->s_first_meta_bg); -- else -- return EXT4_SB(sb)->s_gdb_count; -+ return ext4_bg_has_super(sb, group) ? EXT4_SB(sb)->s_gdb_count : 0; - } - - /** -diff --git a/fs/ext4/block_validity.c b/fs/ext4/block_validity.c -index dc79b75..50784ef 100644 ---- a/fs/ext4/block_validity.c -+++ b/fs/ext4/block_validity.c -@@ -160,7 +160,7 @@ int ext4_setup_system_zone(struct super_block *sb) - if (ext4_bg_has_super(sb, i) && - ((i < 5) || ((i % flex_size) == 0))) - add_system_zone(sbi, ext4_group_first_block_no(sb, i), -- ext4_bg_num_gdb(sb, i) + 1); -+ sbi->s_gdb_count + 1); - gdp = ext4_get_group_desc(sb, i, NULL); - ret = add_system_zone(sbi, ext4_block_bitmap(sb, gdp), 1); - if (ret) -diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h -index d0a2afb..8825515 100644 ---- a/fs/ext4/ext4.h -+++ b/fs/ext4/ext4.h -@@ -698,22 +698,11 @@ struct ext4_inode_info { - __u16 i_extra_isize; - - spinlock_t i_block_reservation_lock; --#ifdef CONFIG_QUOTA -- /* quota space reservation, managed internally by quota code */ -- qsize_t i_reserved_quota; --#endif - - /* completed async DIOs that might need unwritten extents handling */ - struct list_head i_aio_dio_complete_list; - /* current io_end structure for async DIO write*/ - ext4_io_end_t *cur_aio_dio; -- -- /* -- * Transactions that contain inode's metadata needed to complete -- * fsync and fdatasync, respectively. -- */ -- tid_t i_sync_tid; -- tid_t i_datasync_tid; - }; - - /* -@@ -761,7 +750,6 @@ struct ext4_inode_info { - #define EXT4_MOUNT_DELALLOC 0x8000000 /* Delalloc support */ - #define EXT4_MOUNT_DATA_ERR_ABORT 0x10000000 /* Abort on file data write */ - #define EXT4_MOUNT_BLOCK_VALIDITY 0x20000000 /* Block validity checking */ --#define EXT4_MOUNT_DISCARD 0x40000000 /* Issue DISCARD requests */ - - #define clear_opt(o, opt) o &= ~EXT4_MOUNT_##opt - #define set_opt(o, opt) o |= EXT4_MOUNT_##opt -@@ -1436,7 +1424,7 @@ extern int ext4_chunk_trans_blocks(struct inode *, int nrblocks); - extern int ext4_block_truncate_page(handle_t *handle, - struct address_space *mapping, loff_t from); - extern int ext4_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf); --extern qsize_t *ext4_get_reserved_space(struct inode *inode); -+extern qsize_t ext4_get_reserved_space(struct inode *inode); - extern int flush_aio_dio_completed_IO(struct inode *inode); - /* ioctl.c */ - extern long ext4_ioctl(struct file *, unsigned int, unsigned long); -diff --git a/fs/ext4/ext4_jbd2.h b/fs/ext4/ext4_jbd2.h -index 1892a77..a286598 100644 ---- a/fs/ext4/ext4_jbd2.h -+++ b/fs/ext4/ext4_jbd2.h -@@ -49,7 +49,7 @@ - - #define EXT4_DATA_TRANS_BLOCKS(sb) (EXT4_SINGLEDATA_TRANS_BLOCKS(sb) + \ - EXT4_XATTR_TRANS_BLOCKS - 2 + \ -- EXT4_MAXQUOTAS_TRANS_BLOCKS(sb)) -+ 2*EXT4_QUOTA_TRANS_BLOCKS(sb)) - - /* - * Define the number of metadata blocks we need to account to modify data. -@@ -57,7 +57,7 @@ - * This include super block, inode block, quota blocks and xattr blocks - */ - #define EXT4_META_TRANS_BLOCKS(sb) (EXT4_XATTR_TRANS_BLOCKS + \ -- EXT4_MAXQUOTAS_TRANS_BLOCKS(sb)) -+ 2*EXT4_QUOTA_TRANS_BLOCKS(sb)) - - /* Delete operations potentially hit one directory's namespace plus an - * entire inode, plus arbitrary amounts of bitmap/indirection data. Be -@@ -92,7 +92,6 @@ - * but inode, sb and group updates are done only once */ - #define EXT4_QUOTA_INIT_BLOCKS(sb) (test_opt(sb, QUOTA) ? (DQUOT_INIT_ALLOC*\ - (EXT4_SINGLEDATA_TRANS_BLOCKS(sb)-3)+3+DQUOT_INIT_REWRITE) : 0) -- - #define EXT4_QUOTA_DEL_BLOCKS(sb) (test_opt(sb, QUOTA) ? (DQUOT_DEL_ALLOC*\ - (EXT4_SINGLEDATA_TRANS_BLOCKS(sb)-3)+3+DQUOT_DEL_REWRITE) : 0) - #else -@@ -100,9 +99,6 @@ - #define EXT4_QUOTA_INIT_BLOCKS(sb) 0 - #define EXT4_QUOTA_DEL_BLOCKS(sb) 0 - #endif --#define EXT4_MAXQUOTAS_TRANS_BLOCKS(sb) (MAXQUOTAS*EXT4_QUOTA_TRANS_BLOCKS(sb)) --#define EXT4_MAXQUOTAS_INIT_BLOCKS(sb) (MAXQUOTAS*EXT4_QUOTA_INIT_BLOCKS(sb)) --#define EXT4_MAXQUOTAS_DEL_BLOCKS(sb) (MAXQUOTAS*EXT4_QUOTA_DEL_BLOCKS(sb)) - - int - ext4_mark_iloc_dirty(handle_t *handle, -@@ -258,19 +254,6 @@ static inline int ext4_jbd2_file_inode(handle_t *handle, struct inode *inode) - return 0; - } - --static inline void ext4_update_inode_fsync_trans(handle_t *handle, -- struct inode *inode, -- int datasync) --{ -- struct ext4_inode_info *ei = EXT4_I(inode); -- -- if (ext4_handle_valid(handle)) { -- ei->i_sync_tid = handle->h_transaction->t_tid; -- if (datasync) -- ei->i_datasync_tid = handle->h_transaction->t_tid; -- } --} -- - /* super.c */ - int ext4_force_commit(struct super_block *sb); - -diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c -index 8b8bae4..715264b 100644 ---- a/fs/ext4/extents.c -+++ b/fs/ext4/extents.c -@@ -1761,9 +1761,7 @@ int ext4_ext_walk_space(struct inode *inode, ext4_lblk_t block, - while (block < last && block != EXT_MAX_BLOCK) { - num = last - block; - /* find extent for this block */ -- down_read(&EXT4_I(inode)->i_data_sem); - path = ext4_ext_find_extent(inode, block, path); -- up_read(&EXT4_I(inode)->i_data_sem); - if (IS_ERR(path)) { - err = PTR_ERR(path); - path = NULL; -@@ -2076,7 +2074,7 @@ static int ext4_remove_blocks(handle_t *handle, struct inode *inode, - ext_debug("free last %u blocks starting %llu\n", num, start); - for (i = 0; i < num; i++) { - bh = sb_find_get_block(inode->i_sb, start + i); -- ext4_forget(handle, metadata, inode, bh, start + i); -+ ext4_forget(handle, 0, inode, bh, start + i); - } - ext4_free_blocks(handle, inode, start, num, metadata); - } else if (from == le32_to_cpu(ex->ee_block) -@@ -2169,7 +2167,7 @@ ext4_ext_rm_leaf(handle_t *handle, struct inode *inode, - correct_index = 1; - credits += (ext_depth(inode)) + 1; - } -- credits += EXT4_MAXQUOTAS_TRANS_BLOCKS(inode->i_sb); -+ credits += 2 * EXT4_QUOTA_TRANS_BLOCKS(inode->i_sb); - - err = ext4_ext_truncate_extend_restart(handle, inode, credits); - if (err) -@@ -3066,8 +3064,6 @@ ext4_ext_handle_uninitialized_extents(handle_t *handle, struct inode *inode, - if (flags == EXT4_GET_BLOCKS_DIO_CONVERT_EXT) { - ret = ext4_convert_unwritten_extents_dio(handle, inode, - path); -- if (ret >= 0) -- ext4_update_inode_fsync_trans(handle, inode, 1); - goto out2; - } - /* buffered IO case */ -@@ -3095,8 +3091,6 @@ ext4_ext_handle_uninitialized_extents(handle_t *handle, struct inode *inode, - ret = ext4_ext_convert_to_initialized(handle, inode, - path, iblock, - max_blocks); -- if (ret >= 0) -- ext4_update_inode_fsync_trans(handle, inode, 1); - out: - if (ret <= 0) { - err = ret; -@@ -3335,16 +3329,10 @@ int ext4_ext_get_blocks(handle_t *handle, struct inode *inode, - allocated = ext4_ext_get_actual_len(&newex); - set_buffer_new(bh_result); - -- /* -- * Cache the extent and update transaction to commit on fdatasync only -- * when it is _not_ an uninitialized extent. -- */ -- if ((flags & EXT4_GET_BLOCKS_UNINIT_EXT) == 0) { -+ /* Cache only when it is _not_ an uninitialized extent */ -+ if ((flags & EXT4_GET_BLOCKS_UNINIT_EXT) == 0) - ext4_ext_put_in_cache(inode, iblock, allocated, newblock, - EXT4_EXT_CACHE_EXTENT); -- ext4_update_inode_fsync_trans(handle, inode, 1); -- } else -- ext4_update_inode_fsync_trans(handle, inode, 0); - out: - if (allocated > max_blocks) - allocated = max_blocks; -@@ -3732,8 +3720,10 @@ int ext4_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, - * Walk the extent tree gathering extent information. - * ext4_ext_fiemap_cb will push extents back to user. - */ -+ down_read(&EXT4_I(inode)->i_data_sem); - error = ext4_ext_walk_space(inode, start_blk, len_blks, - ext4_ext_fiemap_cb, fieinfo); -+ up_read(&EXT4_I(inode)->i_data_sem); - } - - return error; -diff --git a/fs/ext4/fsync.c b/fs/ext4/fsync.c -index d6049e4..2b15312 100644 ---- a/fs/ext4/fsync.c -+++ b/fs/ext4/fsync.c -@@ -51,30 +51,25 @@ - int ext4_sync_file(struct file *file, struct dentry *dentry, int datasync) - { - struct inode *inode = dentry->d_inode; -- struct ext4_inode_info *ei = EXT4_I(inode); - journal_t *journal = EXT4_SB(inode->i_sb)->s_journal; -- int ret; -- tid_t commit_tid; -+ int err, ret = 0; - - J_ASSERT(ext4_journal_current_handle() == NULL); - - trace_ext4_sync_file(file, dentry, datasync); - -- if (inode->i_sb->s_flags & MS_RDONLY) -- return 0; -- - ret = flush_aio_dio_completed_IO(inode); - if (ret < 0) -- return ret; -- -- if (!journal) -- return simple_fsync(file, dentry, datasync); -- -+ goto out; - /* -- * data=writeback,ordered: -+ * data=writeback: - * The caller's filemap_fdatawrite()/wait will sync the data. -- * Metadata is in the journal, we wait for proper transaction to -- * commit here. -+ * sync_inode() will sync the metadata -+ * -+ * data=ordered: -+ * The caller's filemap_fdatawrite() will write the data and -+ * sync_inode() will write the inode if it is dirty. Then the caller's -+ * filemap_fdatawait() will wait on the pages. - * - * data=journal: - * filemap_fdatawrite won't do anything (the buffers are clean). -@@ -84,13 +79,32 @@ int ext4_sync_file(struct file *file, struct dentry *dentry, int datasync) - * (they were dirtied by commit). But that's OK - the blocks are - * safe in-journal, which is all fsync() needs to ensure. - */ -- if (ext4_should_journal_data(inode)) -- return ext4_force_commit(inode->i_sb); -+ if (ext4_should_journal_data(inode)) { -+ ret = ext4_force_commit(inode->i_sb); -+ goto out; -+ } - -- commit_tid = datasync ? ei->i_datasync_tid : ei->i_sync_tid; -- if (jbd2_log_start_commit(journal, commit_tid)) -- jbd2_log_wait_commit(journal, commit_tid); -- else if (journal->j_flags & JBD2_BARRIER) -+ if (!journal) -+ ret = sync_mapping_buffers(inode->i_mapping); -+ -+ if (datasync && !(inode->i_state & I_DIRTY_DATASYNC)) -+ goto out; -+ -+ /* -+ * The VFS has written the file data. If the inode is unaltered -+ * then we need not start a commit. -+ */ -+ if (inode->i_state & (I_DIRTY_SYNC|I_DIRTY_DATASYNC)) { -+ struct writeback_control wbc = { -+ .sync_mode = WB_SYNC_ALL, -+ .nr_to_write = 0, /* sys_fsync did this */ -+ }; -+ err = sync_inode(inode, &wbc); -+ if (ret == 0) -+ ret = err; -+ } -+out: -+ if (journal && (journal->j_flags & JBD2_BARRIER)) - blkdev_issue_flush(inode->i_sb->s_bdev, NULL); - return ret; - } -diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c -index e233879..2c8caa5 100644 ---- a/fs/ext4/inode.c -+++ b/fs/ext4/inode.c -@@ -1021,12 +1021,10 @@ static int ext4_ind_get_blocks(handle_t *handle, struct inode *inode, - if (!err) - err = ext4_splice_branch(handle, inode, iblock, - partial, indirect_blks, count); -- if (err) -+ else - goto cleanup; - - set_buffer_new(bh_result); -- -- ext4_update_inode_fsync_trans(handle, inode, 1); - got_it: - map_bh(bh_result, inode->i_sb, le32_to_cpu(chain[depth-1].key)); - if (count > blocks_to_boundary) -@@ -1045,12 +1043,17 @@ out: - return err; - } - --#ifdef CONFIG_QUOTA --qsize_t *ext4_get_reserved_space(struct inode *inode) -+qsize_t ext4_get_reserved_space(struct inode *inode) - { -- return &EXT4_I(inode)->i_reserved_quota; -+ unsigned long long total; -+ -+ spin_lock(&EXT4_I(inode)->i_block_reservation_lock); -+ total = EXT4_I(inode)->i_reserved_data_blocks + -+ EXT4_I(inode)->i_reserved_meta_blocks; -+ spin_unlock(&EXT4_I(inode)->i_block_reservation_lock); -+ -+ return total; - } --#endif - /* - * Calculate the number of metadata blocks need to reserve - * to allocate @blocks for non extent file based file -@@ -1531,16 +1534,6 @@ static int do_journal_get_write_access(handle_t *handle, - return ext4_journal_get_write_access(handle, bh); - } - --/* -- * Truncate blocks that were not used by write. We have to truncate the -- * pagecache as well so that corresponding buffers get properly unmapped. -- */ --static void ext4_truncate_failed_write(struct inode *inode) --{ -- truncate_inode_pages(inode->i_mapping, inode->i_size); -- ext4_truncate(inode); --} -- - static int ext4_write_begin(struct file *file, struct address_space *mapping, - loff_t pos, unsigned len, unsigned flags, - struct page **pagep, void **fsdata) -@@ -1606,7 +1599,7 @@ retry: - - ext4_journal_stop(handle); - if (pos + len > inode->i_size) { -- ext4_truncate_failed_write(inode); -+ ext4_truncate(inode); - /* - * If truncate failed early the inode might - * still be on the orphan list; we need to -@@ -1716,7 +1709,7 @@ static int ext4_ordered_write_end(struct file *file, - ret = ret2; - - if (pos + len > inode->i_size) { -- ext4_truncate_failed_write(inode); -+ ext4_truncate(inode); - /* - * If truncate failed early the inode might still be - * on the orphan list; we need to make sure the inode -@@ -1758,7 +1751,7 @@ static int ext4_writeback_write_end(struct file *file, - ret = ret2; - - if (pos + len > inode->i_size) { -- ext4_truncate_failed_write(inode); -+ ext4_truncate(inode); - /* - * If truncate failed early the inode might still be - * on the orphan list; we need to make sure the inode -@@ -1821,7 +1814,7 @@ static int ext4_journalled_write_end(struct file *file, - if (!ret) - ret = ret2; - if (pos + len > inode->i_size) { -- ext4_truncate_failed_write(inode); -+ ext4_truncate(inode); - /* - * If truncate failed early the inode might still be - * on the orphan list; we need to make sure the inode -@@ -1853,17 +1846,19 @@ repeat: - - md_needed = mdblocks - EXT4_I(inode)->i_reserved_meta_blocks; - total = md_needed + nrblocks; -- spin_unlock(&EXT4_I(inode)->i_block_reservation_lock); - - /* - * Make quota reservation here to prevent quota overflow - * later. Real quota accounting is done at pages writeout - * time. - */ -- if (vfs_dq_reserve_block(inode, total)) -+ if (vfs_dq_reserve_block(inode, total)) { -+ spin_unlock(&EXT4_I(inode)->i_block_reservation_lock); - return -EDQUOT; -+ } - - if (ext4_claim_free_blocks(sbi, total)) { -+ spin_unlock(&EXT4_I(inode)->i_block_reservation_lock); - vfs_dq_release_reservation_block(inode, total); - if (ext4_should_retry_alloc(inode->i_sb, &retries)) { - yield(); -@@ -1871,11 +1866,10 @@ repeat: - } - return -ENOSPC; - } -- spin_lock(&EXT4_I(inode)->i_block_reservation_lock); - EXT4_I(inode)->i_reserved_data_blocks += nrblocks; -- EXT4_I(inode)->i_reserved_meta_blocks += md_needed; -- spin_unlock(&EXT4_I(inode)->i_block_reservation_lock); -+ EXT4_I(inode)->i_reserved_meta_blocks = mdblocks; - -+ spin_unlock(&EXT4_I(inode)->i_block_reservation_lock); - return 0; /* success */ - } - -@@ -2794,7 +2788,7 @@ static int ext4_da_writepages_trans_blocks(struct inode *inode) - * number of contiguous block. So we will limit - * number of contiguous block to a sane value - */ -- if (!(EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL) && -+ if (!(inode->i_flags & EXT4_EXTENTS_FL) && - (max_blocks > EXT4_MAX_TRANS_DATA)) - max_blocks = EXT4_MAX_TRANS_DATA; - -@@ -3097,7 +3091,7 @@ retry: - * i_size_read because we hold i_mutex. - */ - if (pos + len > inode->i_size) -- ext4_truncate_failed_write(inode); -+ ext4_truncate(inode); - } - - if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries)) -@@ -4126,8 +4120,6 @@ static void ext4_clear_blocks(handle_t *handle, struct inode *inode, - __le32 *last) - { - __le32 *p; -- int is_metadata = S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode); -- - if (try_to_extend_transaction(handle, inode)) { - if (bh) { - BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata"); -@@ -4158,11 +4150,11 @@ static void ext4_clear_blocks(handle_t *handle, struct inode *inode, - - *p = 0; - tbh = sb_find_get_block(inode->i_sb, nr); -- ext4_forget(handle, is_metadata, inode, tbh, nr); -+ ext4_forget(handle, 0, inode, tbh, nr); - } - } - -- ext4_free_blocks(handle, inode, block_to_free, count, is_metadata); -+ ext4_free_blocks(handle, inode, block_to_free, count, 0); - } - - /** -@@ -4789,8 +4781,8 @@ struct inode *ext4_iget(struct super_block *sb, unsigned long ino) - struct ext4_iloc iloc; - struct ext4_inode *raw_inode; - struct ext4_inode_info *ei; -+ struct buffer_head *bh; - struct inode *inode; -- journal_t *journal = EXT4_SB(sb)->s_journal; - long ret; - int block; - -@@ -4801,11 +4793,11 @@ struct inode *ext4_iget(struct super_block *sb, unsigned long ino) - return inode; - - ei = EXT4_I(inode); -- iloc.bh = 0; - - ret = __ext4_get_inode_loc(inode, &iloc, 0); - if (ret < 0) - goto bad_inode; -+ bh = iloc.bh; - raw_inode = ext4_raw_inode(&iloc); - inode->i_mode = le16_to_cpu(raw_inode->i_mode); - inode->i_uid = (uid_t)le16_to_cpu(raw_inode->i_uid_low); -@@ -4828,6 +4820,7 @@ struct inode *ext4_iget(struct super_block *sb, unsigned long ino) - if (inode->i_mode == 0 || - !(EXT4_SB(inode->i_sb)->s_mount_state & EXT4_ORPHAN_FS)) { - /* this inode is deleted */ -+ brelse(bh); - ret = -ESTALE; - goto bad_inode; - } -@@ -4844,9 +4837,6 @@ struct inode *ext4_iget(struct super_block *sb, unsigned long ino) - ((__u64)le16_to_cpu(raw_inode->i_file_acl_high)) << 32; - inode->i_size = ext4_isize(raw_inode); - ei->i_disksize = inode->i_size; --#ifdef CONFIG_QUOTA -- ei->i_reserved_quota = 0; --#endif - inode->i_generation = le32_to_cpu(raw_inode->i_generation); - ei->i_block_group = iloc.block_group; - ei->i_last_alloc_group = ~0; -@@ -4858,35 +4848,11 @@ struct inode *ext4_iget(struct super_block *sb, unsigned long ino) - ei->i_data[block] = raw_inode->i_block[block]; - INIT_LIST_HEAD(&ei->i_orphan); - -- /* -- * Set transaction id's of transactions that have to be committed -- * to finish f[data]sync. We set them to currently running transaction -- * as we cannot be sure that the inode or some of its metadata isn't -- * part of the transaction - the inode could have been reclaimed and -- * now it is reread from disk. -- */ -- if (journal) { -- transaction_t *transaction; -- tid_t tid; -- -- spin_lock(&journal->j_state_lock); -- if (journal->j_running_transaction) -- transaction = journal->j_running_transaction; -- else -- transaction = journal->j_committing_transaction; -- if (transaction) -- tid = transaction->t_tid; -- else -- tid = journal->j_commit_sequence; -- spin_unlock(&journal->j_state_lock); -- ei->i_sync_tid = tid; -- ei->i_datasync_tid = tid; -- } -- - if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) { - ei->i_extra_isize = le16_to_cpu(raw_inode->i_extra_isize); - if (EXT4_GOOD_OLD_INODE_SIZE + ei->i_extra_isize > - EXT4_INODE_SIZE(inode->i_sb)) { -+ brelse(bh); - ret = -EIO; - goto bad_inode; - } -@@ -4918,7 +4884,10 @@ struct inode *ext4_iget(struct super_block *sb, unsigned long ino) - - ret = 0; - if (ei->i_file_acl && -- !ext4_data_block_valid(EXT4_SB(sb), ei->i_file_acl, 1)) { -+ ((ei->i_file_acl < -+ (le32_to_cpu(EXT4_SB(sb)->s_es->s_first_data_block) + -+ EXT4_SB(sb)->s_gdb_count)) || -+ (ei->i_file_acl >= ext4_blocks_count(EXT4_SB(sb)->s_es)))) { - ext4_error(sb, __func__, - "bad extended attribute block %llu in inode #%lu", - ei->i_file_acl, inode->i_ino); -@@ -4936,8 +4905,10 @@ struct inode *ext4_iget(struct super_block *sb, unsigned long ino) - /* Validate block references which are part of inode */ - ret = ext4_check_inode_blockref(inode); - } -- if (ret) -+ if (ret) { -+ brelse(bh); - goto bad_inode; -+ } - - if (S_ISREG(inode->i_mode)) { - inode->i_op = &ext4_file_inode_operations; -@@ -4965,6 +4936,7 @@ struct inode *ext4_iget(struct super_block *sb, unsigned long ino) - init_special_inode(inode, inode->i_mode, - new_decode_dev(le32_to_cpu(raw_inode->i_block[1]))); - } else { -+ brelse(bh); - ret = -EIO; - ext4_error(inode->i_sb, __func__, - "bogus i_mode (%o) for inode=%lu", -@@ -4977,7 +4949,6 @@ struct inode *ext4_iget(struct super_block *sb, unsigned long ino) - return inode; - - bad_inode: -- brelse(iloc.bh); - iget_failed(inode); - return ERR_PTR(ret); - } -@@ -5137,7 +5108,6 @@ static int ext4_do_update_inode(handle_t *handle, - err = rc; - ei->i_state &= ~EXT4_STATE_NEW; - -- ext4_update_inode_fsync_trans(handle, inode, 0); - out_brelse: - brelse(bh); - ext4_std_error(inode->i_sb, err); -@@ -5257,8 +5227,8 @@ int ext4_setattr(struct dentry *dentry, struct iattr *attr) - - /* (user+group)*(old+new) structure, inode write (sb, - * inode block, ? - but truncate inode update has it) */ -- handle = ext4_journal_start(inode, (EXT4_MAXQUOTAS_INIT_BLOCKS(inode->i_sb)+ -- EXT4_MAXQUOTAS_DEL_BLOCKS(inode->i_sb))+3); -+ handle = ext4_journal_start(inode, 2*(EXT4_QUOTA_INIT_BLOCKS(inode->i_sb)+ -+ EXT4_QUOTA_DEL_BLOCKS(inode->i_sb))+3); - if (IS_ERR(handle)) { - error = PTR_ERR(handle); - goto err_out; -diff --git a/fs/ext4/ioctl.c b/fs/ext4/ioctl.c -index b63d193..c1cdf61 100644 ---- a/fs/ext4/ioctl.c -+++ b/fs/ext4/ioctl.c -@@ -221,38 +221,31 @@ setversion_out: - struct file *donor_filp; - int err; - -- if (!(filp->f_mode & FMODE_READ) || -- !(filp->f_mode & FMODE_WRITE)) -- return -EBADF; -- - if (copy_from_user(&me, - (struct move_extent __user *)arg, sizeof(me))) - return -EFAULT; -- me.moved_len = 0; - - donor_filp = fget(me.donor_fd); - if (!donor_filp) - return -EBADF; - -- if (!(donor_filp->f_mode & FMODE_WRITE)) { -- err = -EBADF; -- goto mext_out; -+ if (!capable(CAP_DAC_OVERRIDE)) { -+ if ((current->real_cred->fsuid != inode->i_uid) || -+ !(inode->i_mode & S_IRUSR) || -+ !(donor_filp->f_dentry->d_inode->i_mode & -+ S_IRUSR)) { -+ fput(donor_filp); -+ return -EACCES; -+ } - } - -- err = mnt_want_write(filp->f_path.mnt); -- if (err) -- goto mext_out; -- - err = ext4_move_extents(filp, donor_filp, me.orig_start, - me.donor_start, me.len, &me.moved_len); -- mnt_drop_write(filp->f_path.mnt); -- if (me.moved_len > 0) -- file_remove_suid(donor_filp); -+ fput(donor_filp); - - if (copy_to_user((struct move_extent *)arg, &me, sizeof(me))) -- err = -EFAULT; --mext_out: -- fput(donor_filp); -+ return -EFAULT; -+ - return err; - } - -diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c -index 7d71148..bba1282 100644 ---- a/fs/ext4/mballoc.c -+++ b/fs/ext4/mballoc.c -@@ -2529,6 +2529,7 @@ static void release_blocks_on_commit(journal_t *journal, transaction_t *txn) - struct ext4_group_info *db; - int err, count = 0, count2 = 0; - struct ext4_free_data *entry; -+ ext4_fsblk_t discard_block; - struct list_head *l, *ltmp; - - list_for_each_safe(l, ltmp, &txn->t_private_list) { -@@ -2558,19 +2559,13 @@ static void release_blocks_on_commit(journal_t *journal, transaction_t *txn) - page_cache_release(e4b.bd_bitmap_page); - } - ext4_unlock_group(sb, entry->group); -- if (test_opt(sb, DISCARD)) { -- ext4_fsblk_t discard_block; -- struct ext4_super_block *es = EXT4_SB(sb)->s_es; -- -- discard_block = (ext4_fsblk_t)entry->group * -- EXT4_BLOCKS_PER_GROUP(sb) -- + entry->start_blk -- + le32_to_cpu(es->s_first_data_block); -- trace_ext4_discard_blocks(sb, -- (unsigned long long)discard_block, -- entry->count); -- sb_issue_discard(sb, discard_block, entry->count); -- } -+ discard_block = (ext4_fsblk_t) entry->group * EXT4_BLOCKS_PER_GROUP(sb) -+ + entry->start_blk -+ + le32_to_cpu(EXT4_SB(sb)->s_es->s_first_data_block); -+ trace_ext4_discard_blocks(sb, (unsigned long long)discard_block, -+ entry->count); -+ sb_issue_discard(sb, discard_block, entry->count); -+ - kmem_cache_free(ext4_free_ext_cachep, entry); - ext4_mb_release_desc(&e4b); - } -@@ -3011,24 +3006,6 @@ static void ext4_mb_collect_stats(struct ext4_allocation_context *ac) - } - - /* -- * Called on failure; free up any blocks from the inode PA for this -- * context. We don't need this for MB_GROUP_PA because we only change -- * pa_free in ext4_mb_release_context(), but on failure, we've already -- * zeroed out ac->ac_b_ex.fe_len, so group_pa->pa_free is not changed. -- */ --static void ext4_discard_allocated_blocks(struct ext4_allocation_context *ac) --{ -- struct ext4_prealloc_space *pa = ac->ac_pa; -- int len; -- -- if (pa && pa->pa_type == MB_INODE_PA) { -- len = ac->ac_b_ex.fe_len; -- pa->pa_free += len; -- } -- --} -- --/* - * use blocks preallocated to inode - */ - static void ext4_mb_use_inode_pa(struct ext4_allocation_context *ac, -@@ -4313,7 +4290,6 @@ repeat: - ac->ac_status = AC_STATUS_CONTINUE; - goto repeat; - } else if (*errp) { -- ext4_discard_allocated_blocks(ac); - ac->ac_b_ex.fe_len = 0; - ar->len = 0; - ext4_mb_show_ac(ac); -diff --git a/fs/ext4/migrate.c b/fs/ext4/migrate.c -index 8646149..a93d5b8 100644 ---- a/fs/ext4/migrate.c -+++ b/fs/ext4/migrate.c -@@ -238,7 +238,7 @@ static int extend_credit_for_blkdel(handle_t *handle, struct inode *inode) - * So allocate a credit of 3. We may update - * quota (user and group). - */ -- needed = 3 + EXT4_MAXQUOTAS_TRANS_BLOCKS(inode->i_sb); -+ needed = 3 + 2*EXT4_QUOTA_TRANS_BLOCKS(inode->i_sb); - - if (ext4_journal_extend(handle, needed) != 0) - retval = ext4_journal_restart(handle, needed); -@@ -477,7 +477,7 @@ int ext4_ext_migrate(struct inode *inode) - handle = ext4_journal_start(inode, - EXT4_DATA_TRANS_BLOCKS(inode->i_sb) + - EXT4_INDEX_EXTRA_TRANS_BLOCKS + 3 + -- EXT4_MAXQUOTAS_INIT_BLOCKS(inode->i_sb) -+ 2 * EXT4_QUOTA_INIT_BLOCKS(inode->i_sb) - + 1); - if (IS_ERR(handle)) { - retval = PTR_ERR(handle); -diff --git a/fs/ext4/move_extent.c b/fs/ext4/move_extent.c -index f5b03a1..25b6b14 100644 ---- a/fs/ext4/move_extent.c -+++ b/fs/ext4/move_extent.c -@@ -77,14 +77,12 @@ static int - mext_next_extent(struct inode *inode, struct ext4_ext_path *path, - struct ext4_extent **extent) - { -- struct ext4_extent_header *eh; - int ppos, leaf_ppos = path->p_depth; - - ppos = leaf_ppos; - if (EXT_LAST_EXTENT(path[ppos].p_hdr) > path[ppos].p_ext) { - /* leaf block */ - *extent = ++path[ppos].p_ext; -- path[ppos].p_block = ext_pblock(path[ppos].p_ext); - return 0; - } - -@@ -121,18 +119,9 @@ mext_next_extent(struct inode *inode, struct ext4_ext_path *path, - ext_block_hdr(path[cur_ppos+1].p_bh); - } - -- path[leaf_ppos].p_ext = *extent = NULL; -- -- eh = path[leaf_ppos].p_hdr; -- if (le16_to_cpu(eh->eh_entries) == 0) -- /* empty leaf is found */ -- return -ENODATA; -- - /* leaf block */ - path[leaf_ppos].p_ext = *extent = - EXT_FIRST_EXTENT(path[leaf_ppos].p_hdr); -- path[leaf_ppos].p_block = -- ext_pblock(path[leaf_ppos].p_ext); - return 0; - } - } -@@ -166,15 +155,40 @@ mext_check_null_inode(struct inode *inode1, struct inode *inode2, - } - - /** -- * double_down_write_data_sem - Acquire two inodes' write lock of i_data_sem -+ * mext_double_down_read - Acquire two inodes' read semaphore -+ * -+ * @orig_inode: original inode structure -+ * @donor_inode: donor inode structure -+ * Acquire read semaphore of the two inodes (orig and donor) by i_ino order. -+ */ -+static void -+mext_double_down_read(struct inode *orig_inode, struct inode *donor_inode) -+{ -+ struct inode *first = orig_inode, *second = donor_inode; -+ -+ /* -+ * Use the inode number to provide the stable locking order instead -+ * of its address, because the C language doesn't guarantee you can -+ * compare pointers that don't come from the same array. -+ */ -+ if (donor_inode->i_ino < orig_inode->i_ino) { -+ first = donor_inode; -+ second = orig_inode; -+ } -+ -+ down_read(&EXT4_I(first)->i_data_sem); -+ down_read(&EXT4_I(second)->i_data_sem); -+} -+ -+/** -+ * mext_double_down_write - Acquire two inodes' write semaphore - * - * @orig_inode: original inode structure - * @donor_inode: donor inode structure -- * Acquire write lock of i_data_sem of the two inodes (orig and donor) by -- * i_ino order. -+ * Acquire write semaphore of the two inodes (orig and donor) by i_ino order. - */ - static void --double_down_write_data_sem(struct inode *orig_inode, struct inode *donor_inode) -+mext_double_down_write(struct inode *orig_inode, struct inode *donor_inode) - { - struct inode *first = orig_inode, *second = donor_inode; - -@@ -189,18 +203,32 @@ double_down_write_data_sem(struct inode *orig_inode, struct inode *donor_inode) - } - - down_write(&EXT4_I(first)->i_data_sem); -- down_write_nested(&EXT4_I(second)->i_data_sem, SINGLE_DEPTH_NESTING); -+ down_write(&EXT4_I(second)->i_data_sem); - } - - /** -- * double_up_write_data_sem - Release two inodes' write lock of i_data_sem -+ * mext_double_up_read - Release two inodes' read semaphore - * - * @orig_inode: original inode structure to be released its lock first - * @donor_inode: donor inode structure to be released its lock second -- * Release write lock of i_data_sem of two inodes (orig and donor). -+ * Release read semaphore of two inodes (orig and donor). - */ - static void --double_up_write_data_sem(struct inode *orig_inode, struct inode *donor_inode) -+mext_double_up_read(struct inode *orig_inode, struct inode *donor_inode) -+{ -+ up_read(&EXT4_I(orig_inode)->i_data_sem); -+ up_read(&EXT4_I(donor_inode)->i_data_sem); -+} -+ -+/** -+ * mext_double_up_write - Release two inodes' write semaphore -+ * -+ * @orig_inode: original inode structure to be released its lock first -+ * @donor_inode: donor inode structure to be released its lock second -+ * Release write semaphore of two inodes (orig and donor). -+ */ -+static void -+mext_double_up_write(struct inode *orig_inode, struct inode *donor_inode) - { - up_write(&EXT4_I(orig_inode)->i_data_sem); - up_write(&EXT4_I(donor_inode)->i_data_sem); -@@ -633,7 +661,6 @@ mext_calc_swap_extents(struct ext4_extent *tmp_dext, - * @donor_inode: donor inode - * @from: block offset of orig_inode - * @count: block count to be replaced -- * @err: pointer to save return value - * - * Replace original inode extents and donor inode extents page by page. - * We implement this replacement in the following three steps: -@@ -644,33 +671,33 @@ mext_calc_swap_extents(struct ext4_extent *tmp_dext, - * 3. Change the block information of donor inode to point at the saved - * original inode blocks in the dummy extents. - * -- * Return replaced block count. -+ * Return 0 on success, or a negative error value on failure. - */ - static int - mext_replace_branches(handle_t *handle, struct inode *orig_inode, - struct inode *donor_inode, ext4_lblk_t from, -- ext4_lblk_t count, int *err) -+ ext4_lblk_t count) - { - struct ext4_ext_path *orig_path = NULL; - struct ext4_ext_path *donor_path = NULL; - struct ext4_extent *oext, *dext; - struct ext4_extent tmp_dext, tmp_oext; - ext4_lblk_t orig_off = from, donor_off = from; -+ int err = 0; - int depth; - int replaced_count = 0; - int dext_alen; - -- /* Protect extent trees against block allocations via delalloc */ -- double_down_write_data_sem(orig_inode, donor_inode); -+ mext_double_down_write(orig_inode, donor_inode); - - /* Get the original extent for the block "orig_off" */ -- *err = get_ext_path(orig_inode, orig_off, &orig_path); -- if (*err) -+ err = get_ext_path(orig_inode, orig_off, &orig_path); -+ if (err) - goto out; - - /* Get the donor extent for the head */ -- *err = get_ext_path(donor_inode, donor_off, &donor_path); -- if (*err) -+ err = get_ext_path(donor_inode, donor_off, &donor_path); -+ if (err) - goto out; - depth = ext_depth(orig_inode); - oext = orig_path[depth].p_ext; -@@ -680,9 +707,9 @@ mext_replace_branches(handle_t *handle, struct inode *orig_inode, - dext = donor_path[depth].p_ext; - tmp_dext = *dext; - -- *err = mext_calc_swap_extents(&tmp_dext, &tmp_oext, orig_off, -+ err = mext_calc_swap_extents(&tmp_dext, &tmp_oext, orig_off, - donor_off, count); -- if (*err) -+ if (err) - goto out; - - /* Loop for the donor extents */ -@@ -691,7 +718,7 @@ mext_replace_branches(handle_t *handle, struct inode *orig_inode, - if (!dext) { - ext4_error(donor_inode->i_sb, __func__, - "The extent for donor must be found"); -- *err = -EIO; -+ err = -EIO; - goto out; - } else if (donor_off != le32_to_cpu(tmp_dext.ee_block)) { - ext4_error(donor_inode->i_sb, __func__, -@@ -699,20 +726,20 @@ mext_replace_branches(handle_t *handle, struct inode *orig_inode, - "extent(%u) should be equal", - donor_off, - le32_to_cpu(tmp_dext.ee_block)); -- *err = -EIO; -+ err = -EIO; - goto out; - } - - /* Set donor extent to orig extent */ -- *err = mext_leaf_block(handle, orig_inode, -+ err = mext_leaf_block(handle, orig_inode, - orig_path, &tmp_dext, &orig_off); -- if (*err) -+ if (err < 0) - goto out; - - /* Set orig extent to donor extent */ -- *err = mext_leaf_block(handle, donor_inode, -+ err = mext_leaf_block(handle, donor_inode, - donor_path, &tmp_oext, &donor_off); -- if (*err) -+ if (err < 0) - goto out; - - dext_alen = ext4_ext_get_actual_len(&tmp_dext); -@@ -726,25 +753,35 @@ mext_replace_branches(handle_t *handle, struct inode *orig_inode, - - if (orig_path) - ext4_ext_drop_refs(orig_path); -- *err = get_ext_path(orig_inode, orig_off, &orig_path); -- if (*err) -+ err = get_ext_path(orig_inode, orig_off, &orig_path); -+ if (err) - goto out; - depth = ext_depth(orig_inode); - oext = orig_path[depth].p_ext; -+ if (le32_to_cpu(oext->ee_block) + -+ ext4_ext_get_actual_len(oext) <= orig_off) { -+ err = 0; -+ goto out; -+ } - tmp_oext = *oext; - - if (donor_path) - ext4_ext_drop_refs(donor_path); -- *err = get_ext_path(donor_inode, donor_off, &donor_path); -- if (*err) -+ err = get_ext_path(donor_inode, donor_off, &donor_path); -+ if (err) - goto out; - depth = ext_depth(donor_inode); - dext = donor_path[depth].p_ext; -+ if (le32_to_cpu(dext->ee_block) + -+ ext4_ext_get_actual_len(dext) <= donor_off) { -+ err = 0; -+ goto out; -+ } - tmp_dext = *dext; - -- *err = mext_calc_swap_extents(&tmp_dext, &tmp_oext, orig_off, -+ err = mext_calc_swap_extents(&tmp_dext, &tmp_oext, orig_off, - donor_off, count - replaced_count); -- if (*err) -+ if (err) - goto out; - } - -@@ -758,12 +795,8 @@ out: - kfree(donor_path); - } - -- ext4_ext_invalidate_cache(orig_inode); -- ext4_ext_invalidate_cache(donor_inode); -- -- double_up_write_data_sem(orig_inode, donor_inode); -- -- return replaced_count; -+ mext_double_up_write(orig_inode, donor_inode); -+ return err; - } - - /** -@@ -775,17 +808,16 @@ out: - * @data_offset_in_page: block index where data swapping starts - * @block_len_in_page: the number of blocks to be swapped - * @uninit: orig extent is uninitialized or not -- * @err: pointer to save return value - * - * Save the data in original inode blocks and replace original inode extents - * with donor inode extents by calling mext_replace_branches(). -- * Finally, write out the saved data in new original inode blocks. Return -- * replaced block count. -+ * Finally, write out the saved data in new original inode blocks. Return 0 -+ * on success, or a negative error value on failure. - */ - static int - move_extent_per_page(struct file *o_filp, struct inode *donor_inode, - pgoff_t orig_page_offset, int data_offset_in_page, -- int block_len_in_page, int uninit, int *err) -+ int block_len_in_page, int uninit) - { - struct inode *orig_inode = o_filp->f_dentry->d_inode; - struct address_space *mapping = orig_inode->i_mapping; -@@ -797,11 +829,9 @@ move_extent_per_page(struct file *o_filp, struct inode *donor_inode, - long long offs = orig_page_offset << PAGE_CACHE_SHIFT; - unsigned long blocksize = orig_inode->i_sb->s_blocksize; - unsigned int w_flags = 0; -- unsigned int tmp_data_size, data_size, replaced_size; -+ unsigned int tmp_data_len, data_len; - void *fsdata; -- int i, jblocks; -- int err2 = 0; -- int replaced_count = 0; -+ int ret, i, jblocks; - int blocks_per_page = PAGE_CACHE_SIZE >> orig_inode->i_blkbits; - - /* -@@ -811,8 +841,8 @@ move_extent_per_page(struct file *o_filp, struct inode *donor_inode, - jblocks = ext4_writepage_trans_blocks(orig_inode) * 2; - handle = ext4_journal_start(orig_inode, jblocks); - if (IS_ERR(handle)) { -- *err = PTR_ERR(handle); -- return 0; -+ ret = PTR_ERR(handle); -+ return ret; - } - - if (segment_eq(get_fs(), KERNEL_DS)) -@@ -828,36 +858,39 @@ move_extent_per_page(struct file *o_filp, struct inode *donor_inode, - * Just swap data blocks between orig and donor. - */ - if (uninit) { -- replaced_count = mext_replace_branches(handle, orig_inode, -- donor_inode, orig_blk_offset, -- block_len_in_page, err); -+ ret = mext_replace_branches(handle, orig_inode, -+ donor_inode, orig_blk_offset, -+ block_len_in_page); -+ -+ /* Clear the inode cache not to refer to the old data */ -+ ext4_ext_invalidate_cache(orig_inode); -+ ext4_ext_invalidate_cache(donor_inode); - goto out2; - } - - offs = (long long)orig_blk_offset << orig_inode->i_blkbits; - -- /* Calculate data_size */ -+ /* Calculate data_len */ - if ((orig_blk_offset + block_len_in_page - 1) == - ((orig_inode->i_size - 1) >> orig_inode->i_blkbits)) { - /* Replace the last block */ -- tmp_data_size = orig_inode->i_size & (blocksize - 1); -+ tmp_data_len = orig_inode->i_size & (blocksize - 1); - /* -- * If data_size equal zero, it shows data_size is multiples of -+ * If data_len equal zero, it shows data_len is multiples of - * blocksize. So we set appropriate value. - */ -- if (tmp_data_size == 0) -- tmp_data_size = blocksize; -+ if (tmp_data_len == 0) -+ tmp_data_len = blocksize; - -- data_size = tmp_data_size + -+ data_len = tmp_data_len + - ((block_len_in_page - 1) << orig_inode->i_blkbits); -- } else -- data_size = block_len_in_page << orig_inode->i_blkbits; -- -- replaced_size = data_size; -+ } else { -+ data_len = block_len_in_page << orig_inode->i_blkbits; -+ } - -- *err = a_ops->write_begin(o_filp, mapping, offs, data_size, w_flags, -+ ret = a_ops->write_begin(o_filp, mapping, offs, data_len, w_flags, - &page, &fsdata); -- if (unlikely(*err < 0)) -+ if (unlikely(ret < 0)) - goto out; - - if (!PageUptodate(page)) { -@@ -878,17 +911,14 @@ move_extent_per_page(struct file *o_filp, struct inode *donor_inode, - /* Release old bh and drop refs */ - try_to_release_page(page, 0); - -- replaced_count = mext_replace_branches(handle, orig_inode, donor_inode, -- orig_blk_offset, block_len_in_page, -- &err2); -- if (err2) { -- if (replaced_count) { -- block_len_in_page = replaced_count; -- replaced_size = -- block_len_in_page << orig_inode->i_blkbits; -- } else -- goto out; -- } -+ ret = mext_replace_branches(handle, orig_inode, donor_inode, -+ orig_blk_offset, block_len_in_page); -+ if (ret < 0) -+ goto out; -+ -+ /* Clear the inode cache not to refer to the old data */ -+ ext4_ext_invalidate_cache(orig_inode); -+ ext4_ext_invalidate_cache(donor_inode); - - if (!page_has_buffers(page)) - create_empty_buffers(page, 1 << orig_inode->i_blkbits, 0); -@@ -898,16 +928,16 @@ move_extent_per_page(struct file *o_filp, struct inode *donor_inode, - bh = bh->b_this_page; - - for (i = 0; i < block_len_in_page; i++) { -- *err = ext4_get_block(orig_inode, -+ ret = ext4_get_block(orig_inode, - (sector_t)(orig_blk_offset + i), bh, 0); -- if (*err < 0) -+ if (ret < 0) - goto out; - - if (bh->b_this_page != NULL) - bh = bh->b_this_page; - } - -- *err = a_ops->write_end(o_filp, mapping, offs, data_size, replaced_size, -+ ret = a_ops->write_end(o_filp, mapping, offs, data_len, data_len, - page, fsdata); - page = NULL; - -@@ -921,10 +951,7 @@ out: - out2: - ext4_journal_stop(handle); - -- if (err2) -- *err = err2; -- -- return replaced_count; -+ return ret < 0 ? ret : 0; - } - - /** -@@ -935,6 +962,7 @@ out2: - * @orig_start: logical start offset in block for orig - * @donor_start: logical start offset in block for donor - * @len: the number of blocks to be moved -+ * @moved_len: moved block length - * - * Check the arguments of ext4_move_extents() whether the files can be - * exchanged with each other. -@@ -942,8 +970,8 @@ out2: - */ - static int - mext_check_arguments(struct inode *orig_inode, -- struct inode *donor_inode, __u64 orig_start, -- __u64 donor_start, __u64 *len) -+ struct inode *donor_inode, __u64 orig_start, -+ __u64 donor_start, __u64 *len, __u64 moved_len) - { - ext4_lblk_t orig_blocks, donor_blocks; - unsigned int blkbits = orig_inode->i_blkbits; -@@ -957,13 +985,6 @@ mext_check_arguments(struct inode *orig_inode, - return -EINVAL; - } - -- if (donor_inode->i_mode & (S_ISUID|S_ISGID)) { -- ext4_debug("ext4 move extent: suid or sgid is set" -- " to donor file [ino:orig %lu, donor %lu]\n", -- orig_inode->i_ino, donor_inode->i_ino); -- return -EINVAL; -- } -- - /* Ext4 move extent does not support swapfile */ - if (IS_SWAPFILE(orig_inode) || IS_SWAPFILE(donor_inode)) { - ext4_debug("ext4 move extent: The argument files should " -@@ -1004,6 +1025,13 @@ mext_check_arguments(struct inode *orig_inode, - return -EINVAL; - } - -+ if (moved_len) { -+ ext4_debug("ext4 move extent: moved_len should be 0 " -+ "[ino:orig %lu, donor %lu]\n", orig_inode->i_ino, -+ donor_inode->i_ino); -+ return -EINVAL; -+ } -+ - if ((orig_start > EXT_MAX_BLOCK) || - (donor_start > EXT_MAX_BLOCK) || - (*len > EXT_MAX_BLOCK) || -@@ -1204,16 +1232,16 @@ ext4_move_extents(struct file *o_filp, struct file *d_filp, - return -EINVAL; - } - -- /* Protect orig and donor inodes against a truncate */ -+ /* protect orig and donor against a truncate */ - ret1 = mext_inode_double_lock(orig_inode, donor_inode); - if (ret1 < 0) - return ret1; - -- /* Protect extent tree against block allocations via delalloc */ -- double_down_write_data_sem(orig_inode, donor_inode); -+ mext_double_down_read(orig_inode, donor_inode); - /* Check the filesystem environment whether move_extent can be done */ - ret1 = mext_check_arguments(orig_inode, donor_inode, orig_start, -- donor_start, &len); -+ donor_start, &len, *moved_len); -+ mext_double_up_read(orig_inode, donor_inode); - if (ret1) - goto out; - -@@ -1327,39 +1355,36 @@ ext4_move_extents(struct file *o_filp, struct file *d_filp, - seq_start = le32_to_cpu(ext_cur->ee_block); - rest_blocks = seq_blocks; - -- /* -- * Up semaphore to avoid following problems: -- * a. transaction deadlock among ext4_journal_start, -- * ->write_begin via pagefault, and jbd2_journal_commit -- * b. racing with ->readpage, ->write_begin, and ext4_get_block -- * in move_extent_per_page -- */ -- double_up_write_data_sem(orig_inode, donor_inode); -+ /* Discard preallocations of two inodes */ -+ down_write(&EXT4_I(orig_inode)->i_data_sem); -+ ext4_discard_preallocations(orig_inode); -+ up_write(&EXT4_I(orig_inode)->i_data_sem); -+ -+ down_write(&EXT4_I(donor_inode)->i_data_sem); -+ ext4_discard_preallocations(donor_inode); -+ up_write(&EXT4_I(donor_inode)->i_data_sem); - - while (orig_page_offset <= seq_end_page) { - - /* Swap original branches with new branches */ -- block_len_in_page = move_extent_per_page( -- o_filp, donor_inode, -+ ret1 = move_extent_per_page(o_filp, donor_inode, - orig_page_offset, - data_offset_in_page, -- block_len_in_page, uninit, -- &ret1); -- -+ block_len_in_page, uninit); -+ if (ret1 < 0) -+ goto out; -+ orig_page_offset++; - /* Count how many blocks we have exchanged */ - *moved_len += block_len_in_page; -- if (ret1 < 0) -- break; - if (*moved_len > len) { - ext4_error(orig_inode->i_sb, __func__, - "We replaced blocks too much! " - "sum of replaced: %llu requested: %llu", - *moved_len, len); - ret1 = -EIO; -- break; -+ goto out; - } - -- orig_page_offset++; - data_offset_in_page = 0; - rest_blocks -= block_len_in_page; - if (rest_blocks > blocks_per_page) -@@ -1368,10 +1393,6 @@ ext4_move_extents(struct file *o_filp, struct file *d_filp, - block_len_in_page = rest_blocks; - } - -- double_down_write_data_sem(orig_inode, donor_inode); -- if (ret1 < 0) -- break; -- - /* Decrease buffer counter */ - if (holecheck_path) - ext4_ext_drop_refs(holecheck_path); -@@ -1393,11 +1414,6 @@ ext4_move_extents(struct file *o_filp, struct file *d_filp, - - } - out: -- if (*moved_len) { -- ext4_discard_preallocations(orig_inode); -- ext4_discard_preallocations(donor_inode); -- } -- - if (orig_path) { - ext4_ext_drop_refs(orig_path); - kfree(orig_path); -@@ -1406,7 +1422,7 @@ out: - ext4_ext_drop_refs(holecheck_path); - kfree(holecheck_path); - } -- double_up_write_data_sem(orig_inode, donor_inode); -+ - ret2 = mext_inode_double_unlock(orig_inode, donor_inode); - - if (ret1) -diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c -index 17a17e1..6d2c1b8 100644 ---- a/fs/ext4/namei.c -+++ b/fs/ext4/namei.c -@@ -1292,6 +1292,9 @@ errout: - * add_dirent_to_buf will attempt search the directory block for - * space. It will return -ENOSPC if no space is available, and -EIO - * and -EEXIST if directory entry already exists. -+ * -+ * NOTE! bh is NOT released in the case where ENOSPC is returned. In -+ * all other cases bh is released. - */ - static int add_dirent_to_buf(handle_t *handle, struct dentry *dentry, - struct inode *inode, struct ext4_dir_entry_2 *de, -@@ -1312,10 +1315,14 @@ static int add_dirent_to_buf(handle_t *handle, struct dentry *dentry, - top = bh->b_data + blocksize - reclen; - while ((char *) de <= top) { - if (!ext4_check_dir_entry("ext4_add_entry", dir, de, -- bh, offset)) -+ bh, offset)) { -+ brelse(bh); - return -EIO; -- if (ext4_match(namelen, name, de)) -+ } -+ if (ext4_match(namelen, name, de)) { -+ brelse(bh); - return -EEXIST; -+ } - nlen = EXT4_DIR_REC_LEN(de->name_len); - rlen = ext4_rec_len_from_disk(de->rec_len, blocksize); - if ((de->inode? rlen - nlen: rlen) >= reclen) -@@ -1330,6 +1337,7 @@ static int add_dirent_to_buf(handle_t *handle, struct dentry *dentry, - err = ext4_journal_get_write_access(handle, bh); - if (err) { - ext4_std_error(dir->i_sb, err); -+ brelse(bh); - return err; - } - -@@ -1369,6 +1377,7 @@ static int add_dirent_to_buf(handle_t *handle, struct dentry *dentry, - err = ext4_handle_dirty_metadata(handle, dir, bh); - if (err) - ext4_std_error(dir->i_sb, err); -+ brelse(bh); - return 0; - } - -@@ -1462,9 +1471,7 @@ static int make_indexed_dir(handle_t *handle, struct dentry *dentry, - if (!(de)) - return retval; - -- retval = add_dirent_to_buf(handle, dentry, inode, de, bh); -- brelse(bh); -- return retval; -+ return add_dirent_to_buf(handle, dentry, inode, de, bh); - } - - /* -@@ -1507,10 +1514,8 @@ static int ext4_add_entry(handle_t *handle, struct dentry *dentry, - if(!bh) - return retval; - retval = add_dirent_to_buf(handle, dentry, inode, NULL, bh); -- if (retval != -ENOSPC) { -- brelse(bh); -+ if (retval != -ENOSPC) - return retval; -- } - - if (blocks == 1 && !dx_fallback && - EXT4_HAS_COMPAT_FEATURE(sb, EXT4_FEATURE_COMPAT_DIR_INDEX)) -@@ -1523,9 +1528,7 @@ static int ext4_add_entry(handle_t *handle, struct dentry *dentry, - de = (struct ext4_dir_entry_2 *) bh->b_data; - de->inode = 0; - de->rec_len = ext4_rec_len_to_disk(blocksize, blocksize); -- retval = add_dirent_to_buf(handle, dentry, inode, de, bh); -- brelse(bh); -- return retval; -+ return add_dirent_to_buf(handle, dentry, inode, de, bh); - } - - /* -@@ -1558,8 +1561,10 @@ static int ext4_dx_add_entry(handle_t *handle, struct dentry *dentry, - goto journal_error; - - err = add_dirent_to_buf(handle, dentry, inode, NULL, bh); -- if (err != -ENOSPC) -+ if (err != -ENOSPC) { -+ bh = NULL; - goto cleanup; -+ } - - /* Block full, should compress but for now just split */ - dxtrace(printk(KERN_DEBUG "using %u of %u node entries\n", -@@ -1652,6 +1657,7 @@ static int ext4_dx_add_entry(handle_t *handle, struct dentry *dentry, - if (!de) - goto cleanup; - err = add_dirent_to_buf(handle, dentry, inode, de, bh); -+ bh = NULL; - goto cleanup; - - journal_error: -@@ -1769,7 +1775,7 @@ static int ext4_create(struct inode *dir, struct dentry *dentry, int mode, - retry: - handle = ext4_journal_start(dir, EXT4_DATA_TRANS_BLOCKS(dir->i_sb) + - EXT4_INDEX_EXTRA_TRANS_BLOCKS + 3 + -- EXT4_MAXQUOTAS_INIT_BLOCKS(dir->i_sb)); -+ 2*EXT4_QUOTA_INIT_BLOCKS(dir->i_sb)); - if (IS_ERR(handle)) - return PTR_ERR(handle); - -@@ -1803,7 +1809,7 @@ static int ext4_mknod(struct inode *dir, struct dentry *dentry, - retry: - handle = ext4_journal_start(dir, EXT4_DATA_TRANS_BLOCKS(dir->i_sb) + - EXT4_INDEX_EXTRA_TRANS_BLOCKS + 3 + -- EXT4_MAXQUOTAS_INIT_BLOCKS(dir->i_sb)); -+ 2*EXT4_QUOTA_INIT_BLOCKS(dir->i_sb)); - if (IS_ERR(handle)) - return PTR_ERR(handle); - -@@ -1840,7 +1846,7 @@ static int ext4_mkdir(struct inode *dir, struct dentry *dentry, int mode) - retry: - handle = ext4_journal_start(dir, EXT4_DATA_TRANS_BLOCKS(dir->i_sb) + - EXT4_INDEX_EXTRA_TRANS_BLOCKS + 3 + -- EXT4_MAXQUOTAS_INIT_BLOCKS(dir->i_sb)); -+ 2*EXT4_QUOTA_INIT_BLOCKS(dir->i_sb)); - if (IS_ERR(handle)) - return PTR_ERR(handle); - -@@ -2253,7 +2259,7 @@ static int ext4_symlink(struct inode *dir, - retry: - handle = ext4_journal_start(dir, EXT4_DATA_TRANS_BLOCKS(dir->i_sb) + - EXT4_INDEX_EXTRA_TRANS_BLOCKS + 5 + -- EXT4_MAXQUOTAS_INIT_BLOCKS(dir->i_sb)); -+ 2*EXT4_QUOTA_INIT_BLOCKS(dir->i_sb)); - if (IS_ERR(handle)) - return PTR_ERR(handle); - -diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c -index 3b2c554..3cfc343 100644 ---- a/fs/ext4/resize.c -+++ b/fs/ext4/resize.c -@@ -247,7 +247,7 @@ static int setup_new_group_blocks(struct super_block *sb, - goto exit_bh; - - if (IS_ERR(gdb = bclean(handle, sb, block))) { -- err = PTR_ERR(gdb); -+ err = PTR_ERR(bh); - goto exit_bh; - } - ext4_handle_dirty_metadata(handle, NULL, gdb); -diff --git a/fs/ext4/super.c b/fs/ext4/super.c -index 92943f2..d4ca92a 100644 ---- a/fs/ext4/super.c -+++ b/fs/ext4/super.c -@@ -603,6 +603,10 @@ static void ext4_put_super(struct super_block *sb) - if (sb->s_dirt) - ext4_commit_super(sb, 1); - -+ ext4_release_system_zone(sb); -+ ext4_mb_release(sb); -+ ext4_ext_release(sb); -+ ext4_xattr_put_super(sb); - if (sbi->s_journal) { - err = jbd2_journal_destroy(sbi->s_journal); - sbi->s_journal = NULL; -@@ -610,12 +614,6 @@ static void ext4_put_super(struct super_block *sb) - ext4_abort(sb, __func__, - "Couldn't clean up the journal"); - } -- -- ext4_release_system_zone(sb); -- ext4_mb_release(sb); -- ext4_ext_release(sb); -- ext4_xattr_put_super(sb); -- - if (!(sb->s_flags & MS_RDONLY)) { - EXT4_CLEAR_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_RECOVER); - es->s_state = cpu_to_le16(sbi->s_mount_state); -@@ -704,13 +702,8 @@ static struct inode *ext4_alloc_inode(struct super_block *sb) - ei->i_allocated_meta_blocks = 0; - ei->i_delalloc_reserved_flag = 0; - spin_lock_init(&(ei->i_block_reservation_lock)); --#ifdef CONFIG_QUOTA -- ei->i_reserved_quota = 0; --#endif - INIT_LIST_HEAD(&ei->i_aio_dio_complete_list); - ei->cur_aio_dio = NULL; -- ei->i_sync_tid = 0; -- ei->i_datasync_tid = 0; - - return &ei->vfs_inode; - } -@@ -906,12 +899,6 @@ static int ext4_show_options(struct seq_file *seq, struct vfsmount *vfs) - if (test_opt(sb, NO_AUTO_DA_ALLOC)) - seq_puts(seq, ",noauto_da_alloc"); - -- if (test_opt(sb, DISCARD)) -- seq_puts(seq, ",discard"); -- -- if (test_opt(sb, NOLOAD)) -- seq_puts(seq, ",norecovery"); -- - ext4_show_quota_options(seq, sb); - - return 0; -@@ -1004,9 +991,7 @@ static const struct dquot_operations ext4_quota_operations = { - .reserve_space = dquot_reserve_space, - .claim_space = dquot_claim_space, - .release_rsv = dquot_release_reserved_space, --#ifdef CONFIG_QUOTA - .get_reserved_space = ext4_get_reserved_space, --#endif - .alloc_inode = dquot_alloc_inode, - .free_space = dquot_free_space, - .free_inode = dquot_free_inode, -@@ -1094,8 +1079,7 @@ enum { - Opt_usrquota, Opt_grpquota, Opt_i_version, - Opt_stripe, Opt_delalloc, Opt_nodelalloc, - Opt_block_validity, Opt_noblock_validity, -- Opt_inode_readahead_blks, Opt_journal_ioprio, -- Opt_discard, Opt_nodiscard, -+ Opt_inode_readahead_blks, Opt_journal_ioprio - }; - - static const match_table_t tokens = { -@@ -1120,7 +1104,6 @@ static const match_table_t tokens = { - {Opt_acl, "acl"}, - {Opt_noacl, "noacl"}, - {Opt_noload, "noload"}, -- {Opt_noload, "norecovery"}, - {Opt_nobh, "nobh"}, - {Opt_bh, "bh"}, - {Opt_commit, "commit=%u"}, -@@ -1161,8 +1144,6 @@ static const match_table_t tokens = { - {Opt_auto_da_alloc, "auto_da_alloc=%u"}, - {Opt_auto_da_alloc, "auto_da_alloc"}, - {Opt_noauto_da_alloc, "noauto_da_alloc"}, -- {Opt_discard, "discard"}, -- {Opt_nodiscard, "nodiscard"}, - {Opt_err, NULL}, - }; - -@@ -1584,12 +1565,6 @@ set_qf_format: - else - set_opt(sbi->s_mount_opt,NO_AUTO_DA_ALLOC); - break; -- case Opt_discard: -- set_opt(sbi->s_mount_opt, DISCARD); -- break; -- case Opt_nodiscard: -- clear_opt(sbi->s_mount_opt, DISCARD); -- break; - default: - ext4_msg(sb, KERN_ERR, - "Unrecognized mount option \"%s\" " -@@ -1698,14 +1673,14 @@ static int ext4_fill_flex_info(struct super_block *sb) - size_t size; - int i; - -- sbi->s_log_groups_per_flex = sbi->s_es->s_log_groups_per_flex; -- groups_per_flex = 1 << sbi->s_log_groups_per_flex; -- -- if (groups_per_flex < 2) { -+ if (!sbi->s_es->s_log_groups_per_flex) { - sbi->s_log_groups_per_flex = 0; - return 1; - } - -+ sbi->s_log_groups_per_flex = sbi->s_es->s_log_groups_per_flex; -+ groups_per_flex = 1 << sbi->s_log_groups_per_flex; -+ - /* We allocate both existing and potentially added groups */ - flex_group_count = ((sbi->s_groups_count + groups_per_flex - 1) + - ((le16_to_cpu(sbi->s_es->s_reserved_gdt_blocks) + 1) << -@@ -3693,11 +3668,13 @@ static int ext4_statfs(struct dentry *dentry, struct kstatfs *buf) - buf->f_blocks = ext4_blocks_count(es) - sbi->s_overhead_last; - buf->f_bfree = percpu_counter_sum_positive(&sbi->s_freeblocks_counter) - - percpu_counter_sum_positive(&sbi->s_dirtyblocks_counter); -+ ext4_free_blocks_count_set(es, buf->f_bfree); - buf->f_bavail = buf->f_bfree - ext4_r_blocks_count(es); - if (buf->f_bfree < ext4_r_blocks_count(es)) - buf->f_bavail = 0; - buf->f_files = le32_to_cpu(es->s_inodes_count); - buf->f_ffree = percpu_counter_sum_positive(&sbi->s_freeinodes_counter); -+ es->s_free_inodes_count = cpu_to_le32(buf->f_ffree); - buf->f_namelen = EXT4_NAME_LEN; - fsid = le64_to_cpup((void *)es->s_uuid) ^ - le64_to_cpup((void *)es->s_uuid + sizeof(u64)); -diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c -index 0257019..fed5b01 100644 ---- a/fs/ext4/xattr.c -+++ b/fs/ext4/xattr.c -@@ -988,10 +988,6 @@ ext4_xattr_set_handle(handle_t *handle, struct inode *inode, int name_index, - if (error) - goto cleanup; - -- error = ext4_journal_get_write_access(handle, is.iloc.bh); -- if (error) -- goto cleanup; -- - if (EXT4_I(inode)->i_state & EXT4_STATE_NEW) { - struct ext4_inode *raw_inode = ext4_raw_inode(&is.iloc); - memset(raw_inode, 0, EXT4_SB(inode->i_sb)->s_inode_size); -@@ -1017,6 +1013,9 @@ ext4_xattr_set_handle(handle_t *handle, struct inode *inode, int name_index, - if (flags & XATTR_CREATE) - goto cleanup; - } -+ error = ext4_journal_get_write_access(handle, is.iloc.bh); -+ if (error) -+ goto cleanup; - if (!value) { - if (!is.s.not_found) - error = ext4_xattr_ibody_set(handle, inode, &i, &is); -diff --git a/fs/fcntl.c b/fs/fcntl.c -index 97e01dc..2cf93ec 100644 ---- a/fs/fcntl.c -+++ b/fs/fcntl.c -@@ -618,90 +618,60 @@ static DEFINE_RWLOCK(fasync_lock); - static struct kmem_cache *fasync_cache __read_mostly; - - /* -- * Remove a fasync entry. If successfully removed, return -- * positive and clear the FASYNC flag. If no entry exists, -- * do nothing and return 0. -- * -- * NOTE! It is very important that the FASYNC flag always -- * match the state "is the filp on a fasync list". -- * -- * We always take the 'filp->f_lock', in since fasync_lock -- * needs to be irq-safe. -+ * fasync_helper() is used by almost all character device drivers -+ * to set up the fasync queue. It returns negative on error, 0 if it did -+ * no changes and positive if it added/deleted the entry. - */ --static int fasync_remove_entry(struct file *filp, struct fasync_struct **fapp) -+int fasync_helper(int fd, struct file * filp, int on, struct fasync_struct **fapp) - { - struct fasync_struct *fa, **fp; -+ struct fasync_struct *new = NULL; - int result = 0; - -- spin_lock(&filp->f_lock); -- write_lock_irq(&fasync_lock); -- for (fp = fapp; (fa = *fp) != NULL; fp = &fa->fa_next) { -- if (fa->fa_file != filp) -- continue; -- *fp = fa->fa_next; -- kmem_cache_free(fasync_cache, fa); -- filp->f_flags &= ~FASYNC; -- result = 1; -- break; -+ if (on) { -+ new = kmem_cache_alloc(fasync_cache, GFP_KERNEL); -+ if (!new) -+ return -ENOMEM; - } -- write_unlock_irq(&fasync_lock); -- spin_unlock(&filp->f_lock); -- return result; --} -- --/* -- * Add a fasync entry. Return negative on error, positive if -- * added, and zero if did nothing but change an existing one. -- * -- * NOTE! It is very important that the FASYNC flag always -- * match the state "is the filp on a fasync list". -- */ --static int fasync_add_entry(int fd, struct file *filp, struct fasync_struct **fapp) --{ -- struct fasync_struct *new, *fa, **fp; -- int result = 0; -- -- new = kmem_cache_alloc(fasync_cache, GFP_KERNEL); -- if (!new) -- return -ENOMEM; - -+ /* -+ * We need to take f_lock first since it's not an IRQ-safe -+ * lock. -+ */ - spin_lock(&filp->f_lock); - write_lock_irq(&fasync_lock); - for (fp = fapp; (fa = *fp) != NULL; fp = &fa->fa_next) { -- if (fa->fa_file != filp) -- continue; -- fa->fa_fd = fd; -- kmem_cache_free(fasync_cache, new); -- goto out; -+ if (fa->fa_file == filp) { -+ if(on) { -+ fa->fa_fd = fd; -+ kmem_cache_free(fasync_cache, new); -+ } else { -+ *fp = fa->fa_next; -+ kmem_cache_free(fasync_cache, fa); -+ result = 1; -+ } -+ goto out; -+ } - } - -- new->magic = FASYNC_MAGIC; -- new->fa_file = filp; -- new->fa_fd = fd; -- new->fa_next = *fapp; -- *fapp = new; -- result = 1; -- filp->f_flags |= FASYNC; -- -+ if (on) { -+ new->magic = FASYNC_MAGIC; -+ new->fa_file = filp; -+ new->fa_fd = fd; -+ new->fa_next = *fapp; -+ *fapp = new; -+ result = 1; -+ } - out: -+ if (on) -+ filp->f_flags |= FASYNC; -+ else -+ filp->f_flags &= ~FASYNC; - write_unlock_irq(&fasync_lock); - spin_unlock(&filp->f_lock); - return result; - } - --/* -- * fasync_helper() is used by almost all character device drivers -- * to set up the fasync queue, and for regular files by the file -- * lease code. It returns negative on error, 0 if it did no changes -- * and positive if it added/deleted the entry. -- */ --int fasync_helper(int fd, struct file * filp, int on, struct fasync_struct **fapp) --{ -- if (!on) -- return fasync_remove_entry(filp, fapp); -- return fasync_add_entry(fd, filp, fapp); --} -- - EXPORT_SYMBOL(fasync_helper); - - void __kill_fasync(struct fasync_struct *fa, int sig, int band) -diff --git a/fs/fuse/file.c b/fs/fuse/file.c -index a9f5e13..c18913a 100644 ---- a/fs/fuse/file.c -+++ b/fs/fuse/file.c -@@ -828,9 +828,6 @@ static ssize_t fuse_fill_write_pages(struct fuse_req *req, - if (!page) - break; - -- if (mapping_writably_mapped(mapping)) -- flush_dcache_page(page); -- - pagefault_disable(); - tmp = iov_iter_copy_from_user_atomic(page, ii, offset, bytes); - pagefault_enable(); -diff --git a/fs/hfs/catalog.c b/fs/hfs/catalog.c -index 424b033..6d98f11 100644 ---- a/fs/hfs/catalog.c -+++ b/fs/hfs/catalog.c -@@ -289,10 +289,6 @@ int hfs_cat_move(u32 cnid, struct inode *src_dir, struct qstr *src_name, - err = hfs_brec_find(&src_fd); - if (err) - goto out; -- if (src_fd.entrylength > sizeof(entry) || src_fd.entrylength < 0) { -- err = -EIO; -- goto out; -- } - - hfs_bnode_read(src_fd.bnode, &entry, src_fd.entryoffset, - src_fd.entrylength); -diff --git a/fs/hfs/dir.c b/fs/hfs/dir.c -index 2b3b861..7c69b98 100644 ---- a/fs/hfs/dir.c -+++ b/fs/hfs/dir.c -@@ -79,11 +79,6 @@ static int hfs_readdir(struct file *filp, void *dirent, filldir_t filldir) - filp->f_pos++; - /* fall through */ - case 1: -- if (fd.entrylength > sizeof(entry) || fd.entrylength < 0) { -- err = -EIO; -- goto out; -- } -- - hfs_bnode_read(fd.bnode, &entry, fd.entryoffset, fd.entrylength); - if (entry.type != HFS_CDR_THD) { - printk(KERN_ERR "hfs: bad catalog folder thread\n"); -@@ -114,12 +109,6 @@ static int hfs_readdir(struct file *filp, void *dirent, filldir_t filldir) - err = -EIO; - goto out; - } -- -- if (fd.entrylength > sizeof(entry) || fd.entrylength < 0) { -- err = -EIO; -- goto out; -- } -- - hfs_bnode_read(fd.bnode, &entry, fd.entryoffset, fd.entrylength); - type = entry.type; - len = hfs_mac2asc(sb, strbuf, &fd.key->cat.CName); -diff --git a/fs/hfs/super.c b/fs/hfs/super.c -index 5ed7252..f7fcbe4 100644 ---- a/fs/hfs/super.c -+++ b/fs/hfs/super.c -@@ -409,13 +409,8 @@ static int hfs_fill_super(struct super_block *sb, void *data, int silent) - /* try to get the root inode */ - hfs_find_init(HFS_SB(sb)->cat_tree, &fd); - res = hfs_cat_find_brec(sb, HFS_ROOT_CNID, &fd); -- if (!res) { -- if (fd.entrylength > sizeof(rec) || fd.entrylength < 0) { -- res = -EIO; -- goto bail; -- } -+ if (!res) - hfs_bnode_read(fd.bnode, &rec, fd.entryoffset, fd.entrylength); -- } - if (res) { - hfs_find_exit(&fd); - goto bail_no_root; -diff --git a/fs/inode.c b/fs/inode.c -index 4d8e3be..de80bc2 100644 ---- a/fs/inode.c -+++ b/fs/inode.c -@@ -282,6 +282,8 @@ void inode_init_once(struct inode *inode) - #ifdef CONFIG_FSNOTIFY - INIT_HLIST_HEAD(&inode->i_fsnotify_mark_entries); - #endif -+ INIT_LIST_HEAD(&inode->i_obj_list); -+ mutex_init(&inode->i_obj_mutex); - } - EXPORT_SYMBOL(inode_init_once); - -diff --git a/fs/jbd2/commit.c b/fs/jbd2/commit.c -index 8896c1d..d4cfd6d 100644 ---- a/fs/jbd2/commit.c -+++ b/fs/jbd2/commit.c -@@ -636,10 +636,6 @@ void jbd2_journal_commit_transaction(journal_t *journal) - JBUFFER_TRACE(jh, "ph3: write metadata"); - flags = jbd2_journal_write_metadata_buffer(commit_transaction, - jh, &new_jh, blocknr); -- if (flags < 0) { -- jbd2_journal_abort(journal, flags); -- continue; -- } - set_bit(BH_JWrite, &jh2bh(new_jh)->b_state); - wbuf[bufs++] = jh2bh(new_jh); - -diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c -index b7ca3a9..fed8538 100644 ---- a/fs/jbd2/journal.c -+++ b/fs/jbd2/journal.c -@@ -78,7 +78,6 @@ EXPORT_SYMBOL(jbd2_journal_errno); - EXPORT_SYMBOL(jbd2_journal_ack_err); - EXPORT_SYMBOL(jbd2_journal_clear_err); - EXPORT_SYMBOL(jbd2_log_wait_commit); --EXPORT_SYMBOL(jbd2_log_start_commit); - EXPORT_SYMBOL(jbd2_journal_start_commit); - EXPORT_SYMBOL(jbd2_journal_force_commit_nested); - EXPORT_SYMBOL(jbd2_journal_wipe); -@@ -359,10 +358,6 @@ repeat: - - jbd_unlock_bh_state(bh_in); - tmp = jbd2_alloc(bh_in->b_size, GFP_NOFS); -- if (!tmp) { -- jbd2_journal_put_journal_head(new_jh); -- return -ENOMEM; -- } - jbd_lock_bh_state(bh_in); - if (jh_in->b_frozen_data) { - jbd2_free(tmp, bh_in->b_size); -@@ -1253,13 +1248,6 @@ int jbd2_journal_load(journal_t *journal) - if (jbd2_journal_recover(journal)) - goto recovery_error; - -- if (journal->j_failed_commit) { -- printk(KERN_ERR "JBD2: journal transaction %u on %s " -- "is corrupt.\n", journal->j_failed_commit, -- journal->j_devname); -- return -EIO; -- } -- - /* OK, we've finished with the dynamic journal bits: - * reinitialise the dynamic contents of the superblock in memory - * and reset them on disk. */ -diff --git a/fs/jffs2/gc.c b/fs/jffs2/gc.c -index 3b6f2fa..090c556 100644 ---- a/fs/jffs2/gc.c -+++ b/fs/jffs2/gc.c -@@ -700,8 +700,7 @@ static int jffs2_garbage_collect_metadata(struct jffs2_sb_info *c, struct jffs2_ - struct jffs2_raw_inode ri; - struct jffs2_node_frag *last_frag; - union jffs2_device_node dev; -- char *mdata = NULL; -- int mdatalen = 0; -+ char *mdata = NULL, mdatalen = 0; - uint32_t alloclen, ilen; - int ret; - -diff --git a/fs/namei.c b/fs/namei.c -index a2b3c28..d11f404 100644 ---- a/fs/namei.c -+++ b/fs/namei.c -@@ -234,7 +234,6 @@ int generic_permission(struct inode *inode, int mask, - /* - * Searching includes executable on directories, else just read. - */ -- mask &= MAY_READ | MAY_WRITE | MAY_EXEC; - if (mask == MAY_READ || (S_ISDIR(inode->i_mode) && !(mask & MAY_WRITE))) - if (capable(CAP_DAC_READ_SEARCH)) - return 0; -diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c -index 0d28982..e1d415e 100644 ---- a/fs/nfs/direct.c -+++ b/fs/nfs/direct.c -@@ -342,7 +342,6 @@ static ssize_t nfs_direct_read_schedule_segment(struct nfs_direct_req *dreq, - data->res.fattr = &data->fattr; - data->res.eof = 0; - data->res.count = bytes; -- nfs_fattr_init(&data->fattr); - msg.rpc_argp = &data->args; - msg.rpc_resp = &data->res; - -@@ -576,7 +575,6 @@ static void nfs_direct_commit_schedule(struct nfs_direct_req *dreq) - data->res.count = 0; - data->res.fattr = &data->fattr; - data->res.verf = &data->verf; -- nfs_fattr_init(&data->fattr); - - NFS_PROTO(data->inode)->commit_setup(data, &msg); - -@@ -768,7 +766,6 @@ static ssize_t nfs_direct_write_schedule_segment(struct nfs_direct_req *dreq, - data->res.fattr = &data->fattr; - data->res.count = bytes; - data->res.verf = &data->verf; -- nfs_fattr_init(&data->fattr); - - task_setup_data.task = &data->task; - task_setup_data.callback_data = data; -diff --git a/fs/nfs/file.c b/fs/nfs/file.c -index 393d40f..f5fdd39 100644 ---- a/fs/nfs/file.c -+++ b/fs/nfs/file.c -@@ -486,8 +486,6 @@ static int nfs_release_page(struct page *page, gfp_t gfp) - { - dfprintk(PAGECACHE, "NFS: release_page(%p)\n", page); - -- if (gfp & __GFP_WAIT) -- nfs_wb_page(page->mapping->host, page); - /* If PagePrivate() is set, then the page is not freeable */ - if (PagePrivate(page)) - return 0; -diff --git a/fs/nfs/fscache.c b/fs/nfs/fscache.c -index 237874f..fa58800 100644 ---- a/fs/nfs/fscache.c -+++ b/fs/nfs/fscache.c -@@ -354,11 +354,12 @@ void nfs_fscache_reset_inode_cookie(struct inode *inode) - */ - int nfs_fscache_release_page(struct page *page, gfp_t gfp) - { -- if (PageFsCache(page)) { -- struct nfs_inode *nfsi = NFS_I(page->mapping->host); -- struct fscache_cookie *cookie = nfsi->fscache; -+ struct nfs_inode *nfsi = NFS_I(page->mapping->host); -+ struct fscache_cookie *cookie = nfsi->fscache; - -- BUG_ON(!cookie); -+ BUG_ON(!cookie); -+ -+ if (PageFsCache(page)) { - dfprintk(FSCACHE, "NFS: fscache releasepage (0x%p/0x%p/0x%p)\n", - cookie, page, nfsi); - -diff --git a/fs/nfs/mount_clnt.c b/fs/nfs/mount_clnt.c -index 59047f8..0adefc4 100644 ---- a/fs/nfs/mount_clnt.c -+++ b/fs/nfs/mount_clnt.c -@@ -120,7 +120,7 @@ static struct { - { .status = MNT3ERR_INVAL, .errno = -EINVAL, }, - { .status = MNT3ERR_NAMETOOLONG, .errno = -ENAMETOOLONG, }, - { .status = MNT3ERR_NOTSUPP, .errno = -ENOTSUPP, }, -- { .status = MNT3ERR_SERVERFAULT, .errno = -EREMOTEIO, }, -+ { .status = MNT3ERR_SERVERFAULT, .errno = -ESERVERFAULT, }, - }; - - struct mountres { -diff --git a/fs/nfs/nfs2xdr.c b/fs/nfs/nfs2xdr.c -index 7bc2da8..5e078b2 100644 ---- a/fs/nfs/nfs2xdr.c -+++ b/fs/nfs/nfs2xdr.c -@@ -699,7 +699,7 @@ static struct { - { NFSERR_BAD_COOKIE, -EBADCOOKIE }, - { NFSERR_NOTSUPP, -ENOTSUPP }, - { NFSERR_TOOSMALL, -ETOOSMALL }, -- { NFSERR_SERVERFAULT, -EREMOTEIO }, -+ { NFSERR_SERVERFAULT, -ESERVERFAULT }, - { NFSERR_BADTYPE, -EBADTYPE }, - { NFSERR_JUKEBOX, -EJUKEBOX }, - { -1, -EIO } -diff --git a/fs/nfs/nfs4_fs.h b/fs/nfs/nfs4_fs.h -index b4a6b1a..6ea07a3 100644 ---- a/fs/nfs/nfs4_fs.h -+++ b/fs/nfs/nfs4_fs.h -@@ -141,7 +141,6 @@ enum { - NFS_O_RDWR_STATE, /* OPEN stateid has read/write state */ - NFS_STATE_RECLAIM_REBOOT, /* OPEN stateid server rebooted */ - NFS_STATE_RECLAIM_NOGRACE, /* OPEN stateid needs to recover state */ -- NFS_STATE_POSIX_LOCKS, /* Posix locks are supported */ - }; - - struct nfs4_state { -diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c -index 6c20059..741a562 100644 ---- a/fs/nfs/nfs4proc.c -+++ b/fs/nfs/nfs4proc.c -@@ -1573,8 +1573,6 @@ static int _nfs4_do_open(struct inode *dir, struct path *path, fmode_t fmode, in - status = PTR_ERR(state); - if (IS_ERR(state)) - goto err_opendata_put; -- if ((opendata->o_res.rflags & NFS4_OPEN_RESULT_LOCKTYPE_POSIX) != 0) -- set_bit(NFS_STATE_POSIX_LOCKS, &state->flags); - nfs4_opendata_put(opendata); - nfs4_put_state_owner(sp); - *res = state; -@@ -3978,22 +3976,6 @@ static const struct rpc_call_ops nfs4_lock_ops = { - .rpc_release = nfs4_lock_release, - }; - --static void nfs4_handle_setlk_error(struct nfs_server *server, struct nfs4_lock_state *lsp, int new_lock_owner, int error) --{ -- struct nfs_client *clp = server->nfs_client; -- struct nfs4_state *state = lsp->ls_state; -- -- switch (error) { -- case -NFS4ERR_ADMIN_REVOKED: -- case -NFS4ERR_BAD_STATEID: -- case -NFS4ERR_EXPIRED: -- if (new_lock_owner != 0 || -- (lsp->ls_flags & NFS_LOCK_INITIALIZED) != 0) -- nfs4_state_mark_reclaim_nograce(clp, state); -- lsp->ls_seqid.flags &= ~NFS_SEQID_CONFIRMED; -- }; --} -- - static int _nfs4_do_setlk(struct nfs4_state *state, int cmd, struct file_lock *fl, int reclaim) - { - struct nfs4_lockdata *data; -@@ -4029,9 +4011,6 @@ static int _nfs4_do_setlk(struct nfs4_state *state, int cmd, struct file_lock *f - ret = nfs4_wait_for_completion_rpc_task(task); - if (ret == 0) { - ret = data->rpc_status; -- if (ret) -- nfs4_handle_setlk_error(data->server, data->lsp, -- data->arg.new_lock_owner, ret); - } else - data->cancelled = 1; - rpc_put_task(task); -@@ -4081,11 +4060,8 @@ static int _nfs4_proc_setlk(struct nfs4_state *state, int cmd, struct file_lock - { - struct nfs_inode *nfsi = NFS_I(state->inode); - unsigned char fl_flags = request->fl_flags; -- int status = -ENOLCK; -+ int status; - -- if ((fl_flags & FL_POSIX) && -- !test_bit(NFS_STATE_POSIX_LOCKS, &state->flags)) -- goto out; - /* Is this a delegated open? */ - status = nfs4_set_lock_state(state, request); - if (status != 0) -diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c -index a4cd1b7..20b4e30 100644 ---- a/fs/nfs/nfs4xdr.c -+++ b/fs/nfs/nfs4xdr.c -@@ -4554,7 +4554,7 @@ static int decode_sequence(struct xdr_stream *xdr, - * If the server returns different values for sessionID, slotID or - * sequence number, the server is looney tunes. - */ -- status = -EREMOTEIO; -+ status = -ESERVERFAULT; - - if (memcmp(id.data, res->sr_session->sess_id.data, - NFS4_MAX_SESSIONID_LEN)) { -@@ -5678,7 +5678,7 @@ static struct { - { NFS4ERR_BAD_COOKIE, -EBADCOOKIE }, - { NFS4ERR_NOTSUPP, -ENOTSUPP }, - { NFS4ERR_TOOSMALL, -ETOOSMALL }, -- { NFS4ERR_SERVERFAULT, -EREMOTEIO }, -+ { NFS4ERR_SERVERFAULT, -ESERVERFAULT }, - { NFS4ERR_BADTYPE, -EBADTYPE }, - { NFS4ERR_LOCKED, -EAGAIN }, - { NFS4ERR_SYMLINK, -ELOOP }, -@@ -5705,7 +5705,7 @@ nfs4_stat_to_errno(int stat) - } - if (stat <= 10000 || stat > 10100) { - /* The server is looney tunes. */ -- return -EREMOTEIO; -+ return -ESERVERFAULT; - } - /* If we cannot translate the error, the recovery routines should - * handle it. -diff --git a/fs/nfs/pagelist.c b/fs/nfs/pagelist.c -index a12c45b..e297593 100644 ---- a/fs/nfs/pagelist.c -+++ b/fs/nfs/pagelist.c -@@ -176,12 +176,6 @@ void nfs_release_request(struct nfs_page *req) - kref_put(&req->wb_kref, nfs_free_request); - } - --static int nfs_wait_bit_uninterruptible(void *word) --{ -- io_schedule(); -- return 0; --} -- - /** - * nfs_wait_on_request - Wait for a request to complete. - * @req: request to wait upon. -@@ -192,9 +186,14 @@ static int nfs_wait_bit_uninterruptible(void *word) - int - nfs_wait_on_request(struct nfs_page *req) - { -- return wait_on_bit(&req->wb_flags, PG_BUSY, -- nfs_wait_bit_uninterruptible, -- TASK_UNINTERRUPTIBLE); -+ int ret = 0; -+ -+ if (!test_bit(PG_BUSY, &req->wb_flags)) -+ goto out; -+ ret = out_of_line_wait_on_bit(&req->wb_flags, PG_BUSY, -+ nfs_wait_bit_killable, TASK_KILLABLE); -+out: -+ return ret; - } - - /** -diff --git a/fs/nfs/super.c b/fs/nfs/super.c -index 4bf23f6..90be551 100644 ---- a/fs/nfs/super.c -+++ b/fs/nfs/super.c -@@ -241,7 +241,6 @@ static int nfs_show_stats(struct seq_file *, struct vfsmount *); - static int nfs_get_sb(struct file_system_type *, int, const char *, void *, struct vfsmount *); - static int nfs_xdev_get_sb(struct file_system_type *fs_type, - int flags, const char *dev_name, void *raw_data, struct vfsmount *mnt); --static void nfs_put_super(struct super_block *); - static void nfs_kill_super(struct super_block *); - static int nfs_remount(struct super_block *sb, int *flags, char *raw_data); - -@@ -265,7 +264,6 @@ static const struct super_operations nfs_sops = { - .alloc_inode = nfs_alloc_inode, - .destroy_inode = nfs_destroy_inode, - .write_inode = nfs_write_inode, -- .put_super = nfs_put_super, - .statfs = nfs_statfs, - .clear_inode = nfs_clear_inode, - .umount_begin = nfs_umount_begin, -@@ -335,7 +333,6 @@ static const struct super_operations nfs4_sops = { - .alloc_inode = nfs_alloc_inode, - .destroy_inode = nfs_destroy_inode, - .write_inode = nfs_write_inode, -- .put_super = nfs_put_super, - .statfs = nfs_statfs, - .clear_inode = nfs4_clear_inode, - .umount_begin = nfs_umount_begin, -@@ -737,6 +734,8 @@ static struct nfs_parsed_mount_data *nfs_alloc_parsed_mount_data(unsigned int ve - - data = kzalloc(sizeof(*data), GFP_KERNEL); - if (data) { -+ data->rsize = NFS_MAX_FILE_IO_SIZE; -+ data->wsize = NFS_MAX_FILE_IO_SIZE; - data->acregmin = NFS_DEF_ACREGMIN; - data->acregmax = NFS_DEF_ACREGMAX; - data->acdirmin = NFS_DEF_ACDIRMIN; -@@ -2199,17 +2198,6 @@ error_splat_super: - } - - /* -- * Ensure that we unregister the bdi before kill_anon_super -- * releases the device name -- */ --static void nfs_put_super(struct super_block *s) --{ -- struct nfs_server *server = NFS_SB(s); -- -- bdi_unregister(&server->backing_dev_info); --} -- --/* - * Destroy an NFS2/3 superblock - */ - static void nfs_kill_super(struct super_block *s) -@@ -2217,6 +2205,7 @@ static void nfs_kill_super(struct super_block *s) - struct nfs_server *server = NFS_SB(s); - - kill_anon_super(s); -+ bdi_unregister(&server->backing_dev_info); - nfs_fscache_release_super_cookie(s); - nfs_free_server(server); - } -diff --git a/fs/nfs/write.c b/fs/nfs/write.c -index cf6c06f..53eb26c 100644 ---- a/fs/nfs/write.c -+++ b/fs/nfs/write.c -@@ -1542,7 +1542,6 @@ int nfs_wb_page_cancel(struct inode *inode, struct page *page) - break; - } - ret = nfs_wait_on_request(req); -- nfs_release_request(req); - if (ret < 0) - goto out; - } -@@ -1613,16 +1612,15 @@ int nfs_migrate_page(struct address_space *mapping, struct page *newpage, - if (ret) - goto out_unlock; - page_cache_get(newpage); -- spin_lock(&mapping->host->i_lock); - req->wb_page = newpage; - SetPagePrivate(newpage); -- set_page_private(newpage, (unsigned long)req); -+ set_page_private(newpage, page_private(page)); - ClearPagePrivate(page); - set_page_private(page, 0); -- spin_unlock(&mapping->host->i_lock); - page_cache_release(page); - out_unlock: - nfs_clear_page_tag_locked(req); -+ nfs_release_request(req); - out: - return ret; - } -diff --git a/fs/nfsd/nfs4acl.c b/fs/nfsd/nfs4acl.c -index 6d9c6aa..725d02f 100644 ---- a/fs/nfsd/nfs4acl.c -+++ b/fs/nfsd/nfs4acl.c -@@ -389,7 +389,7 @@ sort_pacl(struct posix_acl *pacl) - sort_pacl_range(pacl, 1, i-1); - - BUG_ON(pacl->a_entries[i].e_tag != ACL_GROUP_OBJ); -- j = ++i; -+ j = i++; - while (pacl->a_entries[j].e_tag == ACL_GROUP) - j++; - sort_pacl_range(pacl, i, j-1); -diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c -index 570dd1c..a293f02 100644 ---- a/fs/nfsd/vfs.c -+++ b/fs/nfsd/vfs.c -@@ -774,9 +774,12 @@ static inline int nfsd_dosync(struct file *filp, struct dentry *dp, - int (*fsync) (struct file *, struct dentry *, int); - int err; - -- err = filemap_write_and_wait(inode->i_mapping); -+ err = filemap_fdatawrite(inode->i_mapping); - if (err == 0 && fop && (fsync = fop->fsync)) - err = fsync(filp, dp, 0); -+ if (err == 0) -+ err = filemap_fdatawait(inode->i_mapping); -+ - return err; - } - -diff --git a/fs/notify/inotify/inotify_fsnotify.c b/fs/notify/inotify/inotify_fsnotify.c -index 1afb0a1..c9ee67b 100644 ---- a/fs/notify/inotify/inotify_fsnotify.c -+++ b/fs/notify/inotify/inotify_fsnotify.c -@@ -121,7 +121,7 @@ static int idr_callback(int id, void *p, void *data) - if (warned) - return 0; - -- warned = true; -+ warned = false; - entry = p; - ientry = container_of(entry, struct inotify_inode_mark_entry, fsn_entry); - -diff --git a/fs/notify/inotify/inotify_user.c b/fs/notify/inotify/inotify_user.c -index ca44337..dcd2040 100644 ---- a/fs/notify/inotify/inotify_user.c -+++ b/fs/notify/inotify/inotify_user.c -@@ -558,7 +558,7 @@ retry: - - spin_lock(&group->inotify_data.idr_lock); - ret = idr_get_new_above(&group->inotify_data.idr, &tmp_ientry->fsn_entry, -- group->inotify_data.last_wd+1, -+ group->inotify_data.last_wd, - &tmp_ientry->wd); - spin_unlock(&group->inotify_data.idr_lock); - if (ret) { -@@ -638,7 +638,7 @@ static struct fsnotify_group *inotify_new_group(struct user_struct *user, unsign - - spin_lock_init(&group->inotify_data.idr_lock); - idr_init(&group->inotify_data.idr); -- group->inotify_data.last_wd = 0; -+ group->inotify_data.last_wd = 1; - group->inotify_data.user = user; - group->inotify_data.fa = NULL; - -diff --git a/fs/partitions/efi.c b/fs/partitions/efi.c -index 49cfd5f..038a602 100644 ---- a/fs/partitions/efi.c -+++ b/fs/partitions/efi.c -@@ -1,9 +1,7 @@ - /************************************************************ - * EFI GUID Partition Table handling -- * -- * http://www.uefi.org/specs/ -- * http://www.intel.com/technology/efi/ -- * -+ * Per Intel EFI Specification v1.02 -+ * http://developer.intel.com/technology/efi/efi.htm - * efi.[ch] by Matt Domsch - * Copyright 2000,2001,2002,2004 Dell Inc. - * -@@ -94,7 +92,6 @@ - * - ************************************************************/ - #include --#include - #include "check.h" - #include "efi.h" - -@@ -144,8 +141,7 @@ last_lba(struct block_device *bdev) - { - if (!bdev || !bdev->bd_inode) - return 0; -- return div_u64(bdev->bd_inode->i_size, -- bdev_logical_block_size(bdev)) - 1ULL; -+ return (bdev->bd_inode->i_size >> 9) - 1ULL; - } - - static inline int -@@ -192,7 +188,6 @@ static size_t - read_lba(struct block_device *bdev, u64 lba, u8 * buffer, size_t count) - { - size_t totalreadcount = 0; -- sector_t n = lba * (bdev_logical_block_size(bdev) / 512); - - if (!bdev || !buffer || lba > last_lba(bdev)) - return 0; -@@ -200,7 +195,7 @@ read_lba(struct block_device *bdev, u64 lba, u8 * buffer, size_t count) - while (count) { - int copied = 512; - Sector sect; -- unsigned char *data = read_dev_sector(bdev, n++, §); -+ unsigned char *data = read_dev_sector(bdev, lba++, §); - if (!data) - break; - if (copied > count) -@@ -262,16 +257,15 @@ static gpt_header * - alloc_read_gpt_header(struct block_device *bdev, u64 lba) - { - gpt_header *gpt; -- unsigned ssz = bdev_logical_block_size(bdev); -- - if (!bdev) - return NULL; - -- gpt = kzalloc(ssz, GFP_KERNEL); -+ gpt = kzalloc(sizeof (gpt_header), GFP_KERNEL); - if (!gpt) - return NULL; - -- if (read_lba(bdev, lba, (u8 *) gpt, ssz) < ssz) { -+ if (read_lba(bdev, lba, (u8 *) gpt, -+ sizeof (gpt_header)) < sizeof (gpt_header)) { - kfree(gpt); - gpt=NULL; - return NULL; -@@ -607,7 +601,6 @@ efi_partition(struct parsed_partitions *state, struct block_device *bdev) - gpt_header *gpt = NULL; - gpt_entry *ptes = NULL; - u32 i; -- unsigned ssz = bdev_logical_block_size(bdev) / 512; - - if (!find_valid_gpt(bdev, &gpt, &ptes) || !gpt || !ptes) { - kfree(gpt); -@@ -618,14 +611,13 @@ efi_partition(struct parsed_partitions *state, struct block_device *bdev) - pr_debug("GUID Partition Table is valid! Yea!\n"); - - for (i = 0; i < le32_to_cpu(gpt->num_partition_entries) && i < state->limit-1; i++) { -- u64 start = le64_to_cpu(ptes[i].starting_lba); -- u64 size = le64_to_cpu(ptes[i].ending_lba) - -- le64_to_cpu(ptes[i].starting_lba) + 1ULL; -- - if (!is_pte_valid(&ptes[i], last_lba(bdev))) - continue; - -- put_partition(state, i+1, start * ssz, size * ssz); -+ put_partition(state, i+1, le64_to_cpu(ptes[i].starting_lba), -+ (le64_to_cpu(ptes[i].ending_lba) - -+ le64_to_cpu(ptes[i].starting_lba) + -+ 1ULL)); - - /* If this is a RAID volume, tell md */ - if (!efi_guidcmp(ptes[i].partition_type_guid, -diff --git a/fs/partitions/efi.h b/fs/partitions/efi.h -index 6998b58..2cc89d0 100644 ---- a/fs/partitions/efi.h -+++ b/fs/partitions/efi.h -@@ -37,6 +37,7 @@ - #define EFI_PMBR_OSTYPE_EFI 0xEF - #define EFI_PMBR_OSTYPE_EFI_GPT 0xEE - -+#define GPT_BLOCK_SIZE 512 - #define GPT_HEADER_SIGNATURE 0x5452415020494645ULL - #define GPT_HEADER_REVISION_V1 0x00010000 - #define GPT_PRIMARY_PARTITION_TABLE_LBA 1 -@@ -78,12 +79,7 @@ typedef struct _gpt_header { - __le32 num_partition_entries; - __le32 sizeof_partition_entry; - __le32 partition_entry_array_crc32; -- -- /* The rest of the logical block is reserved by UEFI and must be zero. -- * EFI standard handles this by: -- * -- * uint8_t reserved2[ BlockSize - 92 ]; -- */ -+ u8 reserved2[GPT_BLOCK_SIZE - 92]; - } __attribute__ ((packed)) gpt_header; - - typedef struct _gpt_entry_attributes { -diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c -index 2534987..39b49c4 100644 ---- a/fs/quota/dquot.c -+++ b/fs/quota/dquot.c -@@ -1388,70 +1388,6 @@ void vfs_dq_drop(struct inode *inode) - EXPORT_SYMBOL(vfs_dq_drop); - - /* -- * inode_reserved_space is managed internally by quota, and protected by -- * i_lock similar to i_blocks+i_bytes. -- */ --static qsize_t *inode_reserved_space(struct inode * inode) --{ -- /* Filesystem must explicitly define it's own method in order to use -- * quota reservation interface */ -- BUG_ON(!inode->i_sb->dq_op->get_reserved_space); -- return inode->i_sb->dq_op->get_reserved_space(inode); --} -- --static void inode_add_rsv_space(struct inode *inode, qsize_t number) --{ -- spin_lock(&inode->i_lock); -- *inode_reserved_space(inode) += number; -- spin_unlock(&inode->i_lock); --} -- -- --static void inode_claim_rsv_space(struct inode *inode, qsize_t number) --{ -- spin_lock(&inode->i_lock); -- *inode_reserved_space(inode) -= number; -- __inode_add_bytes(inode, number); -- spin_unlock(&inode->i_lock); --} -- --static void inode_sub_rsv_space(struct inode *inode, qsize_t number) --{ -- spin_lock(&inode->i_lock); -- *inode_reserved_space(inode) -= number; -- spin_unlock(&inode->i_lock); --} -- --static qsize_t inode_get_rsv_space(struct inode *inode) --{ -- qsize_t ret; -- -- if (!inode->i_sb->dq_op->get_reserved_space) -- return 0; -- spin_lock(&inode->i_lock); -- ret = *inode_reserved_space(inode); -- spin_unlock(&inode->i_lock); -- return ret; --} -- --static void inode_incr_space(struct inode *inode, qsize_t number, -- int reserve) --{ -- if (reserve) -- inode_add_rsv_space(inode, number); -- else -- inode_add_bytes(inode, number); --} -- --static void inode_decr_space(struct inode *inode, qsize_t number, int reserve) --{ -- if (reserve) -- inode_sub_rsv_space(inode, number); -- else -- inode_sub_bytes(inode, number); --} -- --/* - * Following four functions update i_blocks+i_bytes fields and - * quota information (together with appropriate checks) - * NOTE: We absolutely rely on the fact that caller dirties -@@ -1469,21 +1405,6 @@ int __dquot_alloc_space(struct inode *inode, qsize_t number, - int cnt, ret = QUOTA_OK; - char warntype[MAXQUOTAS]; - -- /* -- * First test before acquiring mutex - solves deadlocks when we -- * re-enter the quota code and are already holding the mutex -- */ -- if (IS_NOQUOTA(inode)) { -- inode_incr_space(inode, number, reserve); -- goto out; -- } -- -- down_read(&sb_dqopt(inode->i_sb)->dqptr_sem); -- if (IS_NOQUOTA(inode)) { -- inode_incr_space(inode, number, reserve); -- goto out_unlock; -- } -- - for (cnt = 0; cnt < MAXQUOTAS; cnt++) - warntype[cnt] = QUOTA_NL_NOWARN; - -@@ -1494,8 +1415,7 @@ int __dquot_alloc_space(struct inode *inode, qsize_t number, - if (check_bdq(inode->i_dquot[cnt], number, warn, warntype+cnt) - == NO_QUOTA) { - ret = NO_QUOTA; -- spin_unlock(&dq_data_lock); -- goto out_flush_warn; -+ goto out_unlock; - } - } - for (cnt = 0; cnt < MAXQUOTAS; cnt++) { -@@ -1506,32 +1426,64 @@ int __dquot_alloc_space(struct inode *inode, qsize_t number, - else - dquot_incr_space(inode->i_dquot[cnt], number); - } -- inode_incr_space(inode, number, reserve); -+ if (!reserve) -+ inode_add_bytes(inode, number); -+out_unlock: - spin_unlock(&dq_data_lock); -+ flush_warnings(inode->i_dquot, warntype); -+ return ret; -+} -+ -+int dquot_alloc_space(struct inode *inode, qsize_t number, int warn) -+{ -+ int cnt, ret = QUOTA_OK; -+ -+ /* -+ * First test before acquiring mutex - solves deadlocks when we -+ * re-enter the quota code and are already holding the mutex -+ */ -+ if (IS_NOQUOTA(inode)) { -+ inode_add_bytes(inode, number); -+ goto out; -+ } -+ -+ down_read(&sb_dqopt(inode->i_sb)->dqptr_sem); -+ if (IS_NOQUOTA(inode)) { -+ inode_add_bytes(inode, number); -+ goto out_unlock; -+ } -+ -+ ret = __dquot_alloc_space(inode, number, warn, 0); -+ if (ret == NO_QUOTA) -+ goto out_unlock; - -- if (reserve) -- goto out_flush_warn; - /* Dirtify all the dquots - this can block when journalling */ - for (cnt = 0; cnt < MAXQUOTAS; cnt++) - if (inode->i_dquot[cnt]) - mark_dquot_dirty(inode->i_dquot[cnt]); --out_flush_warn: -- flush_warnings(inode->i_dquot, warntype); - out_unlock: - up_read(&sb_dqopt(inode->i_sb)->dqptr_sem); - out: - return ret; - } -- --int dquot_alloc_space(struct inode *inode, qsize_t number, int warn) --{ -- return __dquot_alloc_space(inode, number, warn, 0); --} - EXPORT_SYMBOL(dquot_alloc_space); - - int dquot_reserve_space(struct inode *inode, qsize_t number, int warn) - { -- return __dquot_alloc_space(inode, number, warn, 1); -+ int ret = QUOTA_OK; -+ -+ if (IS_NOQUOTA(inode)) -+ goto out; -+ -+ down_read(&sb_dqopt(inode->i_sb)->dqptr_sem); -+ if (IS_NOQUOTA(inode)) -+ goto out_unlock; -+ -+ ret = __dquot_alloc_space(inode, number, warn, 1); -+out_unlock: -+ up_read(&sb_dqopt(inode->i_sb)->dqptr_sem); -+out: -+ return ret; - } - EXPORT_SYMBOL(dquot_reserve_space); - -@@ -1588,14 +1540,14 @@ int dquot_claim_space(struct inode *inode, qsize_t number) - int ret = QUOTA_OK; - - if (IS_NOQUOTA(inode)) { -- inode_claim_rsv_space(inode, number); -+ inode_add_bytes(inode, number); - goto out; - } - - down_read(&sb_dqopt(inode->i_sb)->dqptr_sem); - if (IS_NOQUOTA(inode)) { - up_read(&sb_dqopt(inode->i_sb)->dqptr_sem); -- inode_claim_rsv_space(inode, number); -+ inode_add_bytes(inode, number); - goto out; - } - -@@ -1607,7 +1559,7 @@ int dquot_claim_space(struct inode *inode, qsize_t number) - number); - } - /* Update inode bytes */ -- inode_claim_rsv_space(inode, number); -+ inode_add_bytes(inode, number); - spin_unlock(&dq_data_lock); - /* Dirtify all the dquots - this can block when journalling */ - for (cnt = 0; cnt < MAXQUOTAS; cnt++) -@@ -1620,9 +1572,38 @@ out: - EXPORT_SYMBOL(dquot_claim_space); - - /* -+ * Release reserved quota space -+ */ -+void dquot_release_reserved_space(struct inode *inode, qsize_t number) -+{ -+ int cnt; -+ -+ if (IS_NOQUOTA(inode)) -+ goto out; -+ -+ down_read(&sb_dqopt(inode->i_sb)->dqptr_sem); -+ if (IS_NOQUOTA(inode)) -+ goto out_unlock; -+ -+ spin_lock(&dq_data_lock); -+ /* Release reserved dquots */ -+ for (cnt = 0; cnt < MAXQUOTAS; cnt++) { -+ if (inode->i_dquot[cnt]) -+ dquot_free_reserved_space(inode->i_dquot[cnt], number); -+ } -+ spin_unlock(&dq_data_lock); -+ -+out_unlock: -+ up_read(&sb_dqopt(inode->i_sb)->dqptr_sem); -+out: -+ return; -+} -+EXPORT_SYMBOL(dquot_release_reserved_space); -+ -+/* - * This operation can block, but only after everything is updated - */ --int __dquot_free_space(struct inode *inode, qsize_t number, int reserve) -+int dquot_free_space(struct inode *inode, qsize_t number) - { - unsigned int cnt; - char warntype[MAXQUOTAS]; -@@ -1631,7 +1612,7 @@ int __dquot_free_space(struct inode *inode, qsize_t number, int reserve) - * re-enter the quota code and are already holding the mutex */ - if (IS_NOQUOTA(inode)) { - out_sub: -- inode_decr_space(inode, number, reserve); -+ inode_sub_bytes(inode, number); - return QUOTA_OK; - } - -@@ -1646,43 +1627,21 @@ out_sub: - if (!inode->i_dquot[cnt]) - continue; - warntype[cnt] = info_bdq_free(inode->i_dquot[cnt], number); -- if (reserve) -- dquot_free_reserved_space(inode->i_dquot[cnt], number); -- else -- dquot_decr_space(inode->i_dquot[cnt], number); -+ dquot_decr_space(inode->i_dquot[cnt], number); - } -- inode_decr_space(inode, number, reserve); -+ inode_sub_bytes(inode, number); - spin_unlock(&dq_data_lock); -- -- if (reserve) -- goto out_unlock; - /* Dirtify all the dquots - this can block when journalling */ - for (cnt = 0; cnt < MAXQUOTAS; cnt++) - if (inode->i_dquot[cnt]) - mark_dquot_dirty(inode->i_dquot[cnt]); --out_unlock: - flush_warnings(inode->i_dquot, warntype); - up_read(&sb_dqopt(inode->i_sb)->dqptr_sem); - return QUOTA_OK; - } -- --int dquot_free_space(struct inode *inode, qsize_t number) --{ -- return __dquot_free_space(inode, number, 0); --} - EXPORT_SYMBOL(dquot_free_space); - - /* -- * Release reserved quota space -- */ --void dquot_release_reserved_space(struct inode *inode, qsize_t number) --{ -- __dquot_free_space(inode, number, 1); -- --} --EXPORT_SYMBOL(dquot_release_reserved_space); -- --/* - * This operation can block, but only after everything is updated - */ - int dquot_free_inode(const struct inode *inode, qsize_t number) -@@ -1720,6 +1679,19 @@ int dquot_free_inode(const struct inode *inode, qsize_t number) - EXPORT_SYMBOL(dquot_free_inode); - - /* -+ * call back function, get reserved quota space from underlying fs -+ */ -+qsize_t dquot_get_reserved_space(struct inode *inode) -+{ -+ qsize_t reserved_space = 0; -+ -+ if (sb_any_quota_active(inode->i_sb) && -+ inode->i_sb->dq_op->get_reserved_space) -+ reserved_space = inode->i_sb->dq_op->get_reserved_space(inode); -+ return reserved_space; -+} -+ -+/* - * Transfer the number of inode and blocks from one diskquota to an other. - * - * This operation can block, but only after everything is updated -@@ -1762,7 +1734,7 @@ int dquot_transfer(struct inode *inode, struct iattr *iattr) - } - spin_lock(&dq_data_lock); - cur_space = inode_get_bytes(inode); -- rsv_space = inode_get_rsv_space(inode); -+ rsv_space = dquot_get_reserved_space(inode); - space = cur_space + rsv_space; - /* Build the transfer_from list and check the limits */ - for (cnt = 0; cnt < MAXQUOTAS; cnt++) { -diff --git a/fs/reiserfs/inode.c b/fs/reiserfs/inode.c -index d240c15..a14d6cd 100644 ---- a/fs/reiserfs/inode.c -+++ b/fs/reiserfs/inode.c -@@ -2531,12 +2531,6 @@ static int reiserfs_writepage(struct page *page, struct writeback_control *wbc) - return reiserfs_write_full_page(page, wbc); - } - --static void reiserfs_truncate_failed_write(struct inode *inode) --{ -- truncate_inode_pages(inode->i_mapping, inode->i_size); -- reiserfs_truncate_file(inode, 0); --} -- - static int reiserfs_write_begin(struct file *file, - struct address_space *mapping, - loff_t pos, unsigned len, unsigned flags, -@@ -2603,8 +2597,6 @@ static int reiserfs_write_begin(struct file *file, - if (ret) { - unlock_page(page); - page_cache_release(page); -- /* Truncate allocated blocks */ -- reiserfs_truncate_failed_write(inode); - } - return ret; - } -@@ -2697,7 +2689,8 @@ static int reiserfs_write_end(struct file *file, struct address_space *mapping, - ** transaction tracking stuff when the size changes. So, we have - ** to do the i_size updates here. - */ -- if (pos + copied > inode->i_size) { -+ pos += copied; -+ if (pos > inode->i_size) { - struct reiserfs_transaction_handle myth; - reiserfs_write_lock(inode->i_sb); - /* If the file have grown beyond the border where it -@@ -2715,7 +2708,7 @@ static int reiserfs_write_end(struct file *file, struct address_space *mapping, - goto journal_error; - } - reiserfs_update_inode_transaction(inode); -- inode->i_size = pos + copied; -+ inode->i_size = pos; - /* - * this will just nest into our transaction. It's important - * to use mark_inode_dirty so the inode gets pushed around on the -@@ -2742,10 +2735,6 @@ static int reiserfs_write_end(struct file *file, struct address_space *mapping, - out: - unlock_page(page); - page_cache_release(page); -- -- if (pos + len > inode->i_size) -- reiserfs_truncate_failed_write(inode); -- - return ret == 0 ? copied : ret; - - journal_error: -diff --git a/fs/romfs/super.c b/fs/romfs/super.c -index 42d2135..c117fa8 100644 ---- a/fs/romfs/super.c -+++ b/fs/romfs/super.c -@@ -544,7 +544,6 @@ error: - error_rsb_inval: - ret = -EINVAL; - error_rsb: -- kfree(rsb); - return ret; - } - -diff --git a/fs/stat.c b/fs/stat.c -index c4ecd52..075694e 100644 ---- a/fs/stat.c -+++ b/fs/stat.c -@@ -401,9 +401,9 @@ SYSCALL_DEFINE4(fstatat64, int, dfd, char __user *, filename, - } - #endif /* __ARCH_WANT_STAT64 */ - --/* Caller is here responsible for sufficient locking (ie. inode->i_lock) */ --void __inode_add_bytes(struct inode *inode, loff_t bytes) -+void inode_add_bytes(struct inode *inode, loff_t bytes) - { -+ spin_lock(&inode->i_lock); - inode->i_blocks += bytes >> 9; - bytes &= 511; - inode->i_bytes += bytes; -@@ -411,12 +411,6 @@ void __inode_add_bytes(struct inode *inode, loff_t bytes) - inode->i_blocks++; - inode->i_bytes -= 512; - } --} -- --void inode_add_bytes(struct inode *inode, loff_t bytes) --{ -- spin_lock(&inode->i_lock); -- __inode_add_bytes(inode, bytes); - spin_unlock(&inode->i_lock); - } - -diff --git a/fs/super.c b/fs/super.c -index aff046b..19eb70b 100644 ---- a/fs/super.c -+++ b/fs/super.c -@@ -901,9 +901,8 @@ int get_sb_single(struct file_system_type *fs_type, - return error; - } - s->s_flags |= MS_ACTIVE; -- } else { -- do_remount_sb(s, flags, data, 0); - } -+ do_remount_sb(s, flags, data, 0); - simple_set_mnt(mnt, s); - return 0; - } -diff --git a/fs/sysfs/inode.c b/fs/sysfs/inode.c -index 02a022a..e28cecf 100644 ---- a/fs/sysfs/inode.c -+++ b/fs/sysfs/inode.c -@@ -94,29 +94,30 @@ int sysfs_setattr(struct dentry * dentry, struct iattr * iattr) - if (!sd_attrs) - return -ENOMEM; - sd->s_iattr = sd_attrs; -- } -- /* attributes were changed at least once in past */ -- iattrs = &sd_attrs->ia_iattr; -- -- if (ia_valid & ATTR_UID) -- iattrs->ia_uid = iattr->ia_uid; -- if (ia_valid & ATTR_GID) -- iattrs->ia_gid = iattr->ia_gid; -- if (ia_valid & ATTR_ATIME) -- iattrs->ia_atime = timespec_trunc(iattr->ia_atime, -- inode->i_sb->s_time_gran); -- if (ia_valid & ATTR_MTIME) -- iattrs->ia_mtime = timespec_trunc(iattr->ia_mtime, -- inode->i_sb->s_time_gran); -- if (ia_valid & ATTR_CTIME) -- iattrs->ia_ctime = timespec_trunc(iattr->ia_ctime, -- inode->i_sb->s_time_gran); -- if (ia_valid & ATTR_MODE) { -- umode_t mode = iattr->ia_mode; -- -- if (!in_group_p(inode->i_gid) && !capable(CAP_FSETID)) -- mode &= ~S_ISGID; -- iattrs->ia_mode = sd->s_mode = mode; -+ } else { -+ /* attributes were changed at least once in past */ -+ iattrs = &sd_attrs->ia_iattr; -+ -+ if (ia_valid & ATTR_UID) -+ iattrs->ia_uid = iattr->ia_uid; -+ if (ia_valid & ATTR_GID) -+ iattrs->ia_gid = iattr->ia_gid; -+ if (ia_valid & ATTR_ATIME) -+ iattrs->ia_atime = timespec_trunc(iattr->ia_atime, -+ inode->i_sb->s_time_gran); -+ if (ia_valid & ATTR_MTIME) -+ iattrs->ia_mtime = timespec_trunc(iattr->ia_mtime, -+ inode->i_sb->s_time_gran); -+ if (ia_valid & ATTR_CTIME) -+ iattrs->ia_ctime = timespec_trunc(iattr->ia_ctime, -+ inode->i_sb->s_time_gran); -+ if (ia_valid & ATTR_MODE) { -+ umode_t mode = iattr->ia_mode; -+ -+ if (!in_group_p(inode->i_gid) && !capable(CAP_FSETID)) -+ mode &= ~S_ISGID; -+ iattrs->ia_mode = sd->s_mode = mode; -+ } - } - return error; - } -diff --git a/fs/udf/super.c b/fs/udf/super.c -index 1e4543c..9d1b8c2 100644 ---- a/fs/udf/super.c -+++ b/fs/udf/super.c -@@ -1078,39 +1078,21 @@ static int udf_fill_partdesc_info(struct super_block *sb, - return 0; - } - --static void udf_find_vat_block(struct super_block *sb, int p_index, -- int type1_index, sector_t start_block) --{ -- struct udf_sb_info *sbi = UDF_SB(sb); -- struct udf_part_map *map = &sbi->s_partmaps[p_index]; -- sector_t vat_block; -- struct kernel_lb_addr ino; -- -- /* -- * VAT file entry is in the last recorded block. Some broken disks have -- * it a few blocks before so try a bit harder... -- */ -- ino.partitionReferenceNum = type1_index; -- for (vat_block = start_block; -- vat_block >= map->s_partition_root && -- vat_block >= start_block - 3 && -- !sbi->s_vat_inode; vat_block--) { -- ino.logicalBlockNum = vat_block - map->s_partition_root; -- sbi->s_vat_inode = udf_iget(sb, &ino); -- } --} -- - static int udf_load_vat(struct super_block *sb, int p_index, int type1_index) - { - struct udf_sb_info *sbi = UDF_SB(sb); - struct udf_part_map *map = &sbi->s_partmaps[p_index]; -+ struct kernel_lb_addr ino; - struct buffer_head *bh = NULL; - struct udf_inode_info *vati; - uint32_t pos; - struct virtualAllocationTable20 *vat20; - sector_t blocks = sb->s_bdev->bd_inode->i_size >> sb->s_blocksize_bits; - -- udf_find_vat_block(sb, p_index, type1_index, sbi->s_last_block); -+ /* VAT file entry is in the last recorded block */ -+ ino.partitionReferenceNum = type1_index; -+ ino.logicalBlockNum = sbi->s_last_block - map->s_partition_root; -+ sbi->s_vat_inode = udf_iget(sb, &ino); - if (!sbi->s_vat_inode && - sbi->s_last_block != blocks - 1) { - printk(KERN_NOTICE "UDF-fs: Failed to read VAT inode from the" -@@ -1118,7 +1100,9 @@ static int udf_load_vat(struct super_block *sb, int p_index, int type1_index) - "block of the device (%lu).\n", - (unsigned long)sbi->s_last_block, - (unsigned long)blocks - 1); -- udf_find_vat_block(sb, p_index, type1_index, blocks - 1); -+ ino.partitionReferenceNum = type1_index; -+ ino.logicalBlockNum = blocks - 1 - map->s_partition_root; -+ sbi->s_vat_inode = udf_iget(sb, &ino); - } - if (!sbi->s_vat_inode) - return 1; -diff --git a/include/acpi/platform/aclinux.h b/include/acpi/platform/aclinux.h -index 0946997..9d7febd 100644 ---- a/include/acpi/platform/aclinux.h -+++ b/include/acpi/platform/aclinux.h -@@ -152,7 +152,7 @@ static inline void *acpi_os_acquire_object(acpi_cache_t * cache) - #include - #define ACPI_PREEMPTION_POINT() \ - do { \ -- if (!in_atomic_preempt_off() && !irqs_disabled()) \ -+ if (!in_atomic_preempt_off()) \ - cond_resched(); \ - } while (0) - -diff --git a/include/drm/drmP.h b/include/drm/drmP.h -index 7ad3faa..c8e64bb 100644 ---- a/include/drm/drmP.h -+++ b/include/drm/drmP.h -@@ -1295,7 +1295,6 @@ extern u32 drm_vblank_count(struct drm_device *dev, int crtc); - extern void drm_handle_vblank(struct drm_device *dev, int crtc); - extern int drm_vblank_get(struct drm_device *dev, int crtc); - extern void drm_vblank_put(struct drm_device *dev, int crtc); --extern void drm_vblank_off(struct drm_device *dev, int crtc); - extern void drm_vblank_cleanup(struct drm_device *dev); - /* Modesetting support */ - extern void drm_vblank_pre_modeset(struct drm_device *dev, int crtc); -@@ -1402,7 +1401,7 @@ extern int drm_ati_pcigart_cleanup(struct drm_device *dev, - struct drm_ati_pcigart_info * gart_info); - - extern drm_dma_handle_t *drm_pci_alloc(struct drm_device *dev, size_t size, -- size_t align); -+ size_t align, dma_addr_t maxaddr); - extern void __drm_pci_free(struct drm_device *dev, drm_dma_handle_t * dmah); - extern void drm_pci_free(struct drm_device *dev, drm_dma_handle_t * dmah); - -diff --git a/include/drm/drm_os_linux.h b/include/drm/drm_os_linux.h -index 3933691..26641e9 100644 ---- a/include/drm/drm_os_linux.h -+++ b/include/drm/drm_os_linux.h -@@ -123,5 +123,5 @@ do { \ - remove_wait_queue(&(queue), &entry); \ - } while (0) - --#define DRM_WAKEUP( queue ) wake_up( queue ) -+#define DRM_WAKEUP( queue ) wake_up_interruptible( queue ) - #define DRM_INIT_WAITQUEUE( queue ) init_waitqueue_head( queue ) -diff --git a/include/drm/ttm/ttm_memory.h b/include/drm/ttm/ttm_memory.h -index b199170..6983a7c 100644 ---- a/include/drm/ttm/ttm_memory.h -+++ b/include/drm/ttm/ttm_memory.h -@@ -33,7 +33,6 @@ - #include - #include - #include --#include - - /** - * struct ttm_mem_shrink - callback to shrink TTM memory usage. -diff --git a/include/linux/acpi.h b/include/linux/acpi.h -index c010b94..dfcd920 100644 ---- a/include/linux/acpi.h -+++ b/include/linux/acpi.h -@@ -253,13 +253,6 @@ void __init acpi_old_suspend_ordering(void); - void __init acpi_s4_no_nvs(void); - #endif /* CONFIG_PM_SLEEP */ - --struct acpi_osc_context { -- char *uuid_str; /* uuid string */ -- int rev; -- struct acpi_buffer cap; /* arg2/arg3 */ -- struct acpi_buffer ret; /* free by caller if success */ --}; -- - #define OSC_QUERY_TYPE 0 - #define OSC_SUPPORT_TYPE 1 - #define OSC_CONTROL_TYPE 2 -@@ -272,15 +265,6 @@ struct acpi_osc_context { - #define OSC_INVALID_REVISION_ERROR 8 - #define OSC_CAPABILITIES_MASK_ERROR 16 - --acpi_status acpi_run_osc(acpi_handle handle, struct acpi_osc_context *context); -- --/* platform-wide _OSC bits */ --#define OSC_SB_PAD_SUPPORT 1 --#define OSC_SB_PPC_OST_SUPPORT 2 --#define OSC_SB_PR3_SUPPORT 4 --#define OSC_SB_CPUHP_OST_SUPPORT 8 --#define OSC_SB_APEI_SUPPORT 16 -- - /* _OSC DW1 Definition (OS Support Fields) */ - #define OSC_EXT_PCI_CONFIG_SUPPORT 1 - #define OSC_ACTIVE_STATE_PWR_SUPPORT 2 -diff --git a/include/linux/binfmts.h b/include/linux/binfmts.h -index 340f441..aece486 100644 ---- a/include/linux/binfmts.h -+++ b/include/linux/binfmts.h -@@ -101,7 +101,6 @@ extern int prepare_binprm(struct linux_binprm *); - extern int __must_check remove_arg_zero(struct linux_binprm *); - extern int search_binary_handler(struct linux_binprm *,struct pt_regs *); - extern int flush_old_exec(struct linux_binprm * bprm); --extern void setup_new_exec(struct linux_binprm * bprm); - - extern int suid_dumpable; - #define SUID_DUMP_DISABLE 0 /* No setuid dumping */ -diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h -index 912b8ff..221cecd 100644 ---- a/include/linux/blkdev.h -+++ b/include/linux/blkdev.h -@@ -942,8 +942,6 @@ extern void blk_queue_io_opt(struct request_queue *q, unsigned int opt); - extern void blk_set_default_limits(struct queue_limits *lim); - extern int blk_stack_limits(struct queue_limits *t, struct queue_limits *b, - sector_t offset); --extern int bdev_stack_limits(struct queue_limits *t, struct block_device *bdev, -- sector_t offset); - extern void disk_stack_limits(struct gendisk *disk, struct block_device *bdev, - sector_t offset); - extern void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b); -@@ -1116,18 +1114,11 @@ static inline int queue_alignment_offset(struct request_queue *q) - return q->limits.alignment_offset; - } - --static inline int queue_limit_alignment_offset(struct queue_limits *lim, sector_t offset) --{ -- unsigned int granularity = max(lim->physical_block_size, lim->io_min); -- -- offset &= granularity - 1; -- return (granularity + lim->alignment_offset - offset) & (granularity - 1); --} -- - static inline int queue_sector_alignment_offset(struct request_queue *q, - sector_t sector) - { -- return queue_limit_alignment_offset(&q->limits, sector << 9); -+ return ((sector << 9) - q->limits.alignment_offset) -+ & (q->limits.io_min - 1); - } - - static inline int bdev_alignment_offset(struct block_device *bdev) -diff --git a/include/linux/clocksource.h b/include/linux/clocksource.h -index 64b1a4c..83d2fbd 100644 ---- a/include/linux/clocksource.h -+++ b/include/linux/clocksource.h -@@ -151,7 +151,6 @@ extern u64 timecounter_cyc2time(struct timecounter *tc, - * subtraction of non 64 bit counters - * @mult: cycle to nanosecond multiplier - * @shift: cycle to nanosecond divisor (power of two) -- * @max_idle_ns: max idle time permitted by the clocksource (nsecs) - * @flags: flags describing special properties - * @vread: vsyscall based read - * @resume: resume function for the clocksource, if necessary -@@ -169,7 +168,6 @@ struct clocksource { - cycle_t mask; - u32 mult; - u32 shift; -- u64 max_idle_ns; - unsigned long flags; - cycle_t (*vread)(void); - void (*resume)(void); -diff --git a/include/linux/completion.h b/include/linux/completion.h -index 4a6b604..258bec1 100644 ---- a/include/linux/completion.h -+++ b/include/linux/completion.h -@@ -88,6 +88,7 @@ extern bool completion_done(struct completion *x); - - extern void complete(struct completion *); - extern void complete_all(struct completion *); -+extern void complete_n(struct completion *, int n); - - /** - * INIT_COMPLETION: - reinitialize a completion structure -diff --git a/include/linux/connector.h b/include/linux/connector.h -index ecb61c4..3a14615 100644 ---- a/include/linux/connector.h -+++ b/include/linux/connector.h -@@ -24,6 +24,9 @@ - - #include - -+#define CN_IDX_CONNECTOR 0xffffffff -+#define CN_VAL_CONNECTOR 0xffffffff -+ - /* - * Process Events connector unique ids -- used for message routing - */ -@@ -70,6 +73,30 @@ struct cn_msg { - __u8 data[0]; - }; - -+/* -+ * Notify structure - requests notification about -+ * registering/unregistering idx/val in range [first, first+range]. -+ */ -+struct cn_notify_req { -+ __u32 first; -+ __u32 range; -+}; -+ -+/* -+ * Main notification control message -+ * *_notify_num - number of appropriate cn_notify_req structures after -+ * this struct. -+ * group - notification receiver's idx. -+ * len - total length of the attached data. -+ */ -+struct cn_ctl_msg { -+ __u32 idx_notify_num; -+ __u32 val_notify_num; -+ __u32 group; -+ __u32 len; -+ __u8 data[0]; -+}; -+ - #ifdef __KERNEL__ - - #include -@@ -122,6 +149,11 @@ struct cn_callback_entry { - u32 seq, group; - }; - -+struct cn_ctl_entry { -+ struct list_head notify_entry; -+ struct cn_ctl_msg *msg; -+}; -+ - struct cn_dev { - struct cb_id id; - -diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h -index d77b547..789cf5f 100644 ---- a/include/linux/cpumask.h -+++ b/include/linux/cpumask.h -@@ -84,7 +84,6 @@ extern const struct cpumask *const cpu_active_mask; - #define num_online_cpus() cpumask_weight(cpu_online_mask) - #define num_possible_cpus() cpumask_weight(cpu_possible_mask) - #define num_present_cpus() cpumask_weight(cpu_present_mask) --#define num_active_cpus() cpumask_weight(cpu_active_mask) - #define cpu_online(cpu) cpumask_test_cpu((cpu), cpu_online_mask) - #define cpu_possible(cpu) cpumask_test_cpu((cpu), cpu_possible_mask) - #define cpu_present(cpu) cpumask_test_cpu((cpu), cpu_present_mask) -@@ -93,7 +92,6 @@ extern const struct cpumask *const cpu_active_mask; - #define num_online_cpus() 1 - #define num_possible_cpus() 1 - #define num_present_cpus() 1 --#define num_active_cpus() 1 - #define cpu_online(cpu) ((cpu) == 0) - #define cpu_possible(cpu) ((cpu) == 0) - #define cpu_present(cpu) ((cpu) == 0) -diff --git a/include/linux/enclosure.h b/include/linux/enclosure.h -index 9a33c5f..90d1c21 100644 ---- a/include/linux/enclosure.h -+++ b/include/linux/enclosure.h -@@ -42,8 +42,6 @@ enum enclosure_status { - ENCLOSURE_STATUS_NOT_INSTALLED, - ENCLOSURE_STATUS_UNKNOWN, - ENCLOSURE_STATUS_UNAVAILABLE, -- /* last element for counting purposes */ -- ENCLOSURE_STATUS_MAX - }; - - /* SFF-8485 activity light settings */ -diff --git a/include/linux/fs.h b/include/linux/fs.h -index 98ea200..5c7e0ff 100644 ---- a/include/linux/fs.h -+++ b/include/linux/fs.h -@@ -15,8 +15,8 @@ - * nr_file rlimit, so it's safe to set up a ridiculously high absolute - * upper limit on files-per-process. - * -- * Some programs (notably those using select()) may have to be -- * recompiled to take full advantage of the new limits.. -+ * Some programs (notably those using select()) may have to be -+ * recompiled to take full advantage of the new limits.. - */ - - /* Fixed constants first: */ -@@ -169,7 +169,7 @@ struct inodes_stat_t { - #define SEL_EX 4 - - /* public flags for file_system_type */ --#define FS_REQUIRES_DEV 1 -+#define FS_REQUIRES_DEV 1 - #define FS_BINARY_MOUNTDATA 2 - #define FS_HAS_SUBTYPE 4 - #define FS_REVAL_DOT 16384 /* Check the paths ".", ".." for staleness */ -@@ -466,7 +466,7 @@ struct iattr { - */ - #include - --/** -+/** - * enum positive_aop_returns - aop return codes with specific semantics - * - * @AOP_WRITEPAGE_ACTIVATE: Informs the caller that page writeback has -@@ -476,7 +476,7 @@ struct iattr { - * be a candidate for writeback again in the near - * future. Other callers must be careful to unlock - * the page if they get this return. Returned by -- * writepage(); -+ * writepage(); - * - * @AOP_TRUNCATED_PAGE: The AOP method that was handed a locked page has - * unlocked it and the page might have been truncated. -@@ -715,6 +715,7 @@ static inline int mapping_writably_mapped(struct address_space *mapping) - - struct posix_acl; - #define ACL_NOT_CACHED ((void *)(-1)) -+struct inode_obj_id_table; - - struct inode { - struct hlist_node i_hash; -@@ -783,6 +784,8 @@ struct inode { - struct posix_acl *i_acl; - struct posix_acl *i_default_acl; - #endif -+ struct list_head i_obj_list; -+ struct mutex i_obj_mutex; - void *i_private; /* fs or device private pointer */ - }; - -@@ -995,10 +998,10 @@ static inline int file_check_writeable(struct file *filp) - - #define MAX_NON_LFS ((1UL<<31) - 1) - --/* Page cache limit. The filesystems should put that into their s_maxbytes -- limits, otherwise bad things can happen in VM. */ -+/* Page cache limit. The filesystems should put that into their s_maxbytes -+ limits, otherwise bad things can happen in VM. */ - #if BITS_PER_LONG==32 --#define MAX_LFS_FILESIZE (((u64)PAGE_CACHE_SIZE << (BITS_PER_LONG-1))-1) -+#define MAX_LFS_FILESIZE (((u64)PAGE_CACHE_SIZE << (BITS_PER_LONG-1))-1) - #elif BITS_PER_LONG==64 - #define MAX_LFS_FILESIZE 0x7fffffffffffffffUL - #endif -@@ -2139,7 +2142,7 @@ extern int may_open(struct path *, int, int); - - extern int kernel_read(struct file *, loff_t, char *, unsigned long); - extern struct file * open_exec(const char *); -- -+ - /* fs/dcache.c -- generic fs support functions */ - extern int is_subdir(struct dentry *, struct dentry *); - extern ino_t find_inode_number(struct dentry *, struct qstr *); -@@ -2314,7 +2317,6 @@ extern const struct inode_operations page_symlink_inode_operations; - extern int generic_readlink(struct dentry *, char __user *, int); - extern void generic_fillattr(struct inode *, struct kstat *); - extern int vfs_getattr(struct vfsmount *, struct dentry *, struct kstat *); --void __inode_add_bytes(struct inode *inode, loff_t bytes); - void inode_add_bytes(struct inode *inode, loff_t bytes); - void inode_sub_bytes(struct inode *inode, loff_t bytes); - loff_t inode_get_bytes(struct inode *inode); -diff --git a/include/linux/hid.h b/include/linux/hid.h -index 8709365..10f6284 100644 ---- a/include/linux/hid.h -+++ b/include/linux/hid.h -@@ -312,7 +312,6 @@ struct hid_item { - #define HID_QUIRK_MULTI_INPUT 0x00000040 - #define HID_QUIRK_SKIP_OUTPUT_REPORTS 0x00010000 - #define HID_QUIRK_FULLSPEED_INTERVAL 0x10000000 --#define HID_QUIRK_NO_INIT_REPORTS 0x20000000 - - /* - * This is the global environment of the parser. This information is -diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h -index 9bace4b..b984b94 100644 ---- a/include/linux/hrtimer.h -+++ b/include/linux/hrtimer.h -@@ -166,6 +166,7 @@ struct hrtimer_clock_base { - * event devices whether high resolution mode can be - * activated. - * @nr_events: Total number of timer interrupt events -+ * @to_pull: LITMUS^RT list of timers to be pulled on this cpu - */ - struct hrtimer_cpu_base { - spinlock_t lock; -@@ -175,6 +176,26 @@ struct hrtimer_cpu_base { - int hres_active; - unsigned long nr_events; - #endif -+ struct list_head to_pull; -+}; -+ -+#define HRTIMER_START_ON_INACTIVE 0 -+#define HRTIMER_START_ON_QUEUED 1 -+ -+/* -+ * struct hrtimer_start_on_info - save timer info on remote cpu -+ * @list: list of hrtimer_start_on_info on remote cpu (to_pull) -+ * @timer: timer to be triggered on remote cpu -+ * @time: time event -+ * @mode: timer mode -+ * @state: activity flag -+ */ -+struct hrtimer_start_on_info { -+ struct list_head list; -+ struct hrtimer *timer; -+ ktime_t time; -+ enum hrtimer_mode mode; -+ atomic_t state; - }; - - static inline void hrtimer_set_expires(struct hrtimer *timer, ktime_t time) -@@ -343,6 +364,10 @@ __hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim, - unsigned long delta_ns, - const enum hrtimer_mode mode, int wakeup); - -+extern int hrtimer_start_on(int cpu, struct hrtimer_start_on_info *info, -+ struct hrtimer *timer, ktime_t time, -+ const enum hrtimer_mode mode); -+ - extern int hrtimer_cancel(struct hrtimer *timer); - extern int hrtimer_try_to_cancel(struct hrtimer *timer); - -@@ -446,7 +471,7 @@ extern void timer_stats_update_stats(void *timer, pid_t pid, void *startf, - - static inline void timer_stats_account_hrtimer(struct hrtimer *timer) - { -- if (likely(!timer_stats_active)) -+ if (likely(!timer->start_site)) - return; - timer_stats_update_stats(timer, timer->start_pid, timer->start_site, - timer->function, timer->start_comm, 0); -@@ -457,6 +482,8 @@ extern void __timer_stats_hrtimer_set_start_info(struct hrtimer *timer, - - static inline void timer_stats_hrtimer_set_start_info(struct hrtimer *timer) - { -+ if (likely(!timer_stats_active)) -+ return; - __timer_stats_hrtimer_set_start_info(timer, __builtin_return_address(0)); - } - -diff --git a/include/linux/inetdevice.h b/include/linux/inetdevice.h -index 9cd0bcf..ad27c7d 100644 ---- a/include/linux/inetdevice.h -+++ b/include/linux/inetdevice.h -@@ -83,7 +83,6 @@ static inline void ipv4_devconf_setall(struct in_device *in_dev) - #define IN_DEV_FORWARD(in_dev) IN_DEV_CONF_GET((in_dev), FORWARDING) - #define IN_DEV_MFORWARD(in_dev) IN_DEV_ANDCONF((in_dev), MC_FORWARDING) - #define IN_DEV_RPFILTER(in_dev) IN_DEV_MAXCONF((in_dev), RP_FILTER) --#define IN_DEV_SRC_VMARK(in_dev) IN_DEV_ORCONF((in_dev), SRC_VMARK) - #define IN_DEV_SOURCE_ROUTE(in_dev) IN_DEV_ANDCONF((in_dev), \ - ACCEPT_SOURCE_ROUTE) - #define IN_DEV_BOOTP_RELAY(in_dev) IN_DEV_ANDCONF((in_dev), BOOTP_RELAY) -diff --git a/include/linux/kvm.h b/include/linux/kvm.h -index 0eadd71..f8f8900 100644 ---- a/include/linux/kvm.h -+++ b/include/linux/kvm.h -@@ -116,11 +116,6 @@ struct kvm_run { - __u64 cr8; - __u64 apic_base; - --#ifdef __KVM_S390 -- /* the processor status word for s390 */ -- __u64 psw_mask; /* psw upper half */ -- __u64 psw_addr; /* psw lower half */ --#endif - union { - /* KVM_EXIT_UNKNOWN */ - struct { -@@ -172,6 +167,8 @@ struct kvm_run { - /* KVM_EXIT_S390_SIEIC */ - struct { - __u8 icptcode; -+ __u64 mask; /* psw upper half */ -+ __u64 addr; /* psw lower half */ - __u16 ipa; - __u32 ipb; - } s390_sieic; -@@ -439,7 +436,6 @@ struct kvm_ioeventfd { - #endif - #define KVM_CAP_IOEVENTFD 36 - #define KVM_CAP_SET_IDENTITY_MAP_ADDR 37 --#define KVM_CAP_ADJUST_CLOCK 39 - - #ifdef KVM_CAP_IRQ_ROUTING - -@@ -478,7 +474,6 @@ struct kvm_irq_routing { - }; - - #endif --#define KVM_CAP_S390_PSW 42 - - #ifdef KVM_CAP_MCE - /* x86 MCE */ -@@ -502,12 +497,6 @@ struct kvm_irqfd { - __u8 pad[20]; - }; - --struct kvm_clock_data { -- __u64 clock; -- __u32 flags; -- __u32 pad[9]; --}; -- - /* - * ioctls for VM fds - */ -@@ -557,8 +546,6 @@ struct kvm_clock_data { - #define KVM_CREATE_PIT2 _IOW(KVMIO, 0x77, struct kvm_pit_config) - #define KVM_SET_BOOT_CPU_ID _IO(KVMIO, 0x78) - #define KVM_IOEVENTFD _IOW(KVMIO, 0x79, struct kvm_ioeventfd) --#define KVM_SET_CLOCK _IOW(KVMIO, 0x7b, struct kvm_clock_data) --#define KVM_GET_CLOCK _IOR(KVMIO, 0x7c, struct kvm_clock_data) - - /* - * ioctls for vcpu fds -diff --git a/include/linux/libata.h b/include/linux/libata.h -index b0f6d97..8769864 100644 ---- a/include/linux/libata.h -+++ b/include/linux/libata.h -@@ -354,9 +354,6 @@ enum { - /* max tries if error condition is still set after ->error_handler */ - ATA_EH_MAX_TRIES = 5, - -- /* sometimes resuming a link requires several retries */ -- ATA_LINK_RESUME_TRIES = 5, -- - /* how hard are we gonna try to probe/recover devices */ - ATA_PROBE_MAX_TRIES = 3, - ATA_EH_DEV_TRIES = 3, -diff --git a/include/linux/mfd/wm8350/pmic.h b/include/linux/mfd/wm8350/pmic.h -index e786fe9..be3264e 100644 ---- a/include/linux/mfd/wm8350/pmic.h -+++ b/include/linux/mfd/wm8350/pmic.h -@@ -666,20 +666,20 @@ - #define WM8350_ISINK_FLASH_DUR_64MS (1 << 8) - #define WM8350_ISINK_FLASH_DUR_96MS (2 << 8) - #define WM8350_ISINK_FLASH_DUR_1024MS (3 << 8) --#define WM8350_ISINK_FLASH_ON_INSTANT (0 << 0) --#define WM8350_ISINK_FLASH_ON_0_25S (1 << 0) --#define WM8350_ISINK_FLASH_ON_0_50S (2 << 0) --#define WM8350_ISINK_FLASH_ON_1_00S (3 << 0) --#define WM8350_ISINK_FLASH_ON_1_95S (1 << 0) --#define WM8350_ISINK_FLASH_ON_3_91S (2 << 0) --#define WM8350_ISINK_FLASH_ON_7_80S (3 << 0) --#define WM8350_ISINK_FLASH_OFF_INSTANT (0 << 4) --#define WM8350_ISINK_FLASH_OFF_0_25S (1 << 4) --#define WM8350_ISINK_FLASH_OFF_0_50S (2 << 4) --#define WM8350_ISINK_FLASH_OFF_1_00S (3 << 4) --#define WM8350_ISINK_FLASH_OFF_1_95S (1 << 4) --#define WM8350_ISINK_FLASH_OFF_3_91S (2 << 4) --#define WM8350_ISINK_FLASH_OFF_7_80S (3 << 4) -+#define WM8350_ISINK_FLASH_ON_INSTANT (0 << 4) -+#define WM8350_ISINK_FLASH_ON_0_25S (1 << 4) -+#define WM8350_ISINK_FLASH_ON_0_50S (2 << 4) -+#define WM8350_ISINK_FLASH_ON_1_00S (3 << 4) -+#define WM8350_ISINK_FLASH_ON_1_95S (1 << 4) -+#define WM8350_ISINK_FLASH_ON_3_91S (2 << 4) -+#define WM8350_ISINK_FLASH_ON_7_80S (3 << 4) -+#define WM8350_ISINK_FLASH_OFF_INSTANT (0 << 0) -+#define WM8350_ISINK_FLASH_OFF_0_25S (1 << 0) -+#define WM8350_ISINK_FLASH_OFF_0_50S (2 << 0) -+#define WM8350_ISINK_FLASH_OFF_1_00S (3 << 0) -+#define WM8350_ISINK_FLASH_OFF_1_95S (1 << 0) -+#define WM8350_ISINK_FLASH_OFF_3_91S (2 << 0) -+#define WM8350_ISINK_FLASH_OFF_7_80S (3 << 0) - - /* - * Regulator Interrupts. -diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h -index 3c62ed4..ed5d750 100644 ---- a/include/linux/pagemap.h -+++ b/include/linux/pagemap.h -@@ -253,8 +253,6 @@ extern struct page * read_cache_page_async(struct address_space *mapping, - extern struct page * read_cache_page(struct address_space *mapping, - pgoff_t index, filler_t *filler, - void *data); --extern struct page * read_cache_page_gfp(struct address_space *mapping, -- pgoff_t index, gfp_t gfp_mask); - extern int read_cache_pages(struct address_space *mapping, - struct list_head *pages, filler_t *filler, void *data); - -diff --git a/include/linux/pci.h b/include/linux/pci.h -index 2547515..f5c7cd3 100644 ---- a/include/linux/pci.h -+++ b/include/linux/pci.h -@@ -564,9 +564,6 @@ void pcibios_align_resource(void *, struct resource *, resource_size_t, - resource_size_t); - void pcibios_update_irq(struct pci_dev *, int irq); - --/* Weak but can be overriden by arch */ --void pci_fixup_cardbus(struct pci_bus *); -- - /* Generic PCI functions used internally */ - - extern struct pci_bus *pci_find_bus(int domain, int busnr); -diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h -index 1b7f2a7..84cf1f3 100644 ---- a/include/linux/pci_ids.h -+++ b/include/linux/pci_ids.h -@@ -2290,20 +2290,6 @@ - #define PCI_DEVICE_ID_MPC8536 0x0051 - #define PCI_DEVICE_ID_P2020E 0x0070 - #define PCI_DEVICE_ID_P2020 0x0071 --#define PCI_DEVICE_ID_P2010E 0x0078 --#define PCI_DEVICE_ID_P2010 0x0079 --#define PCI_DEVICE_ID_P1020E 0x0100 --#define PCI_DEVICE_ID_P1020 0x0101 --#define PCI_DEVICE_ID_P1011E 0x0108 --#define PCI_DEVICE_ID_P1011 0x0109 --#define PCI_DEVICE_ID_P1022E 0x0110 --#define PCI_DEVICE_ID_P1022 0x0111 --#define PCI_DEVICE_ID_P1013E 0x0118 --#define PCI_DEVICE_ID_P1013 0x0119 --#define PCI_DEVICE_ID_P4080E 0x0400 --#define PCI_DEVICE_ID_P4080 0x0401 --#define PCI_DEVICE_ID_P4040E 0x0408 --#define PCI_DEVICE_ID_P4040 0x0409 - #define PCI_DEVICE_ID_MPC8641 0x7010 - #define PCI_DEVICE_ID_MPC8641D 0x7011 - #define PCI_DEVICE_ID_MPC8610 0x7018 -diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h -index 81c9689..9e70126 100644 ---- a/include/linux/perf_event.h -+++ b/include/linux/perf_event.h -@@ -219,7 +219,7 @@ struct perf_event_attr { - #define PERF_EVENT_IOC_DISABLE _IO ('$', 1) - #define PERF_EVENT_IOC_REFRESH _IO ('$', 2) - #define PERF_EVENT_IOC_RESET _IO ('$', 3) --#define PERF_EVENT_IOC_PERIOD _IOW('$', 4, __u64) -+#define PERF_EVENT_IOC_PERIOD _IOW('$', 4, u64) - #define PERF_EVENT_IOC_SET_OUTPUT _IO ('$', 5) - - enum perf_event_ioc_flags { -diff --git a/include/linux/quota.h b/include/linux/quota.h -index 8fd8efc..78c4889 100644 ---- a/include/linux/quota.h -+++ b/include/linux/quota.h -@@ -313,9 +313,8 @@ struct dquot_operations { - int (*claim_space) (struct inode *, qsize_t); - /* release rsved quota for delayed alloc */ - void (*release_rsv) (struct inode *, qsize_t); -- /* get reserved quota for delayed alloc, value returned is managed by -- * quota code only */ -- qsize_t *(*get_reserved_space) (struct inode *); -+ /* get reserved quota for delayed alloc */ -+ qsize_t (*get_reserved_space) (struct inode *); - }; - - /* Operations handling requests from userspace */ -diff --git a/include/linux/sched.h b/include/linux/sched.h -index e48311e..7248141 100644 ---- a/include/linux/sched.h -+++ b/include/linux/sched.h -@@ -38,6 +38,7 @@ - #define SCHED_BATCH 3 - /* SCHED_ISO: reserved but not implemented yet */ - #define SCHED_IDLE 5 -+#define SCHED_LITMUS 6 - /* Can be ORed in to make sure the process is reverted back to SCHED_NORMAL on fork */ - #define SCHED_RESET_ON_FORK 0x40000000 - -@@ -94,6 +95,8 @@ struct sched_param { - - #include - -+#include -+ - struct exec_domain; - struct futex_pi_state; - struct robust_list_head; -@@ -1211,6 +1214,7 @@ struct sched_rt_entity { - }; - - struct rcu_node; -+struct od_table_entry; - - struct task_struct { - volatile long state; /* -1 unrunnable, 0 runnable, >0 stopped */ -@@ -1293,9 +1297,9 @@ struct task_struct { - unsigned long stack_canary; - #endif - -- /* -+ /* - * pointers to (original) parent process, youngest child, younger sibling, -- * older sibling, respectively. (p->father can be replaced with -+ * older sibling, respectively. (p->father can be replaced with - * p->real_parent->pid) - */ - struct task_struct *real_parent; /* real parent process */ -@@ -1354,7 +1358,7 @@ struct task_struct { - char comm[TASK_COMM_LEN]; /* executable name excluding path - - access with [gs]et_task_comm (which lock - it with task_lock()) -- - initialized normally by setup_new_exec */ -+ - initialized normally by flush_old_exec */ - /* file system info */ - int link_count, total_link_count; - #ifdef CONFIG_SYSVIPC -@@ -1505,6 +1509,13 @@ struct task_struct { - int make_it_fail; - #endif - struct prop_local_single dirties; -+ -+ /* LITMUS RT parameters and state */ -+ struct rt_param rt_param; -+ -+ /* references to PI semaphores, etc. */ -+ struct od_table_entry *od_table; -+ - #ifdef CONFIG_LATENCYTOP - int latency_record_count; - struct latency_record latency_record[LT_SAVECOUNT]; -@@ -2044,7 +2055,7 @@ static inline int dequeue_signal_lock(struct task_struct *tsk, sigset_t *mask, s - spin_unlock_irqrestore(&tsk->sighand->siglock, flags); - - return ret; --} -+} - - extern void block_all_signals(int (*notifier)(void *priv), void *priv, - sigset_t *mask); -@@ -2086,18 +2097,11 @@ static inline int is_si_special(const struct siginfo *info) - return info <= SEND_SIG_FORCED; - } - --/* -- * True if we are on the alternate signal stack. -- */ -+/* True if we are on the alternate signal stack. */ -+ - static inline int on_sig_stack(unsigned long sp) - { --#ifdef CONFIG_STACK_GROWSUP -- return sp >= current->sas_ss_sp && -- sp - current->sas_ss_sp < current->sas_ss_size; --#else -- return sp > current->sas_ss_sp && -- sp - current->sas_ss_sp <= current->sas_ss_size; --#endif -+ return (sp - current->sas_ss_sp < current->sas_ss_size); - } - - static inline int sas_ss_flags(unsigned long sp) -@@ -2583,28 +2587,6 @@ static inline void mm_init_owner(struct mm_struct *mm, struct task_struct *p) - - #define TASK_STATE_TO_CHAR_STR "RSDTtZX" - --static inline unsigned long task_rlimit(const struct task_struct *tsk, -- unsigned int limit) --{ -- return ACCESS_ONCE(tsk->signal->rlim[limit].rlim_cur); --} -- --static inline unsigned long task_rlimit_max(const struct task_struct *tsk, -- unsigned int limit) --{ -- return ACCESS_ONCE(tsk->signal->rlim[limit].rlim_max); --} -- --static inline unsigned long rlimit(unsigned int limit) --{ -- return task_rlimit(current, limit); --} -- --static inline unsigned long rlimit_max(unsigned int limit) --{ -- return task_rlimit_max(current, limit); --} -- - #endif /* __KERNEL__ */ - - #endif -diff --git a/include/linux/security.h b/include/linux/security.h -index d40d23f..239e40d 100644 ---- a/include/linux/security.h -+++ b/include/linux/security.h -@@ -95,13 +95,8 @@ struct seq_file; - extern int cap_netlink_send(struct sock *sk, struct sk_buff *skb); - extern int cap_netlink_recv(struct sk_buff *skb, int cap); - --#ifdef CONFIG_MMU - extern unsigned long mmap_min_addr; - extern unsigned long dac_mmap_min_addr; --#else --#define dac_mmap_min_addr 0UL --#endif -- - /* - * Values used in the task_security_ops calls - */ -@@ -126,7 +121,6 @@ struct request_sock; - #define LSM_UNSAFE_PTRACE 2 - #define LSM_UNSAFE_PTRACE_CAP 4 - --#ifdef CONFIG_MMU - /* - * If a hint addr is less than mmap_min_addr change hint to be as - * low as possible but still greater than mmap_min_addr -@@ -141,7 +135,6 @@ static inline unsigned long round_hint_to_min(unsigned long hint) - } - extern int mmap_min_addr_handler(struct ctl_table *table, int write, - void __user *buffer, size_t *lenp, loff_t *ppos); --#endif - - #ifdef CONFIG_SECURITY - -diff --git a/include/linux/smp.h b/include/linux/smp.h -index 39c64ba..76bb3e4 100644 ---- a/include/linux/smp.h -+++ b/include/linux/smp.h -@@ -77,6 +77,11 @@ void __smp_call_function_single(int cpuid, struct call_single_data *data, - int wait); - - /* -+ * sends a 'pull timer' event to a remote CPU -+ */ -+extern void smp_send_pull_timers(int cpu); -+ -+/* - * Generic and arch helpers - */ - #ifdef CONFIG_USE_GENERIC_SMP_HELPERS -diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h -index 93515c6..a990ace 100644 ---- a/include/linux/syscalls.h -+++ b/include/linux/syscalls.h -@@ -879,8 +879,4 @@ int kernel_execve(const char *filename, char *const argv[], char *const envp[]); - asmlinkage long sys_perf_event_open( - struct perf_event_attr __user *attr_uptr, - pid_t pid, int cpu, int group_fd, unsigned long flags); -- --asmlinkage long sys_mmap_pgoff(unsigned long addr, unsigned long len, -- unsigned long prot, unsigned long flags, -- unsigned long fd, unsigned long pgoff); - #endif -diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h -index 0eb6942..1e4743e 100644 ---- a/include/linux/sysctl.h -+++ b/include/linux/sysctl.h -@@ -490,7 +490,6 @@ enum - NET_IPV4_CONF_PROMOTE_SECONDARIES=20, - NET_IPV4_CONF_ARP_ACCEPT=21, - NET_IPV4_CONF_ARP_NOTIFY=22, -- NET_IPV4_CONF_SRC_VMARK=24, - __NET_IPV4_CONF_MAX - }; - -diff --git a/include/linux/tick.h b/include/linux/tick.h -index 0482229..4f9ba05 100644 ---- a/include/linux/tick.h -+++ b/include/linux/tick.h -@@ -71,6 +71,11 @@ extern int tick_is_oneshot_available(void); - extern struct tick_device *tick_get_device(int cpu); - - # ifdef CONFIG_HIGH_RES_TIMERS -+/* LITMUS^RT tick alignment */ -+#define LINUX_DEFAULT_TICKS 0 -+#define LITMUS_ALIGNED_TICKS 1 -+#define LITMUS_STAGGERED_TICKS 2 -+ - extern int tick_init_highres(void); - extern int tick_program_event(ktime_t expires, int force); - extern void tick_setup_sched_timer(void); -diff --git a/include/linux/time.h b/include/linux/time.h -index 6e026e4..fe04e5e 100644 ---- a/include/linux/time.h -+++ b/include/linux/time.h -@@ -148,7 +148,6 @@ extern void monotonic_to_bootbased(struct timespec *ts); - - extern struct timespec timespec_trunc(struct timespec t, unsigned gran); - extern int timekeeping_valid_for_hres(void); --extern u64 timekeeping_max_deferment(void); - extern void update_wall_time(void); - extern void update_xtime_cache(u64 nsec); - extern void timekeeping_leap_insert(int leapsecond); -diff --git a/include/linux/usb_usual.h b/include/linux/usb_usual.h -index a4b947e..3d15fb9 100644 ---- a/include/linux/usb_usual.h -+++ b/include/linux/usb_usual.h -@@ -56,9 +56,7 @@ - US_FLAG(SANE_SENSE, 0x00008000) \ - /* Sane Sense (> 18 bytes) */ \ - US_FLAG(CAPACITY_OK, 0x00010000) \ -- /* READ CAPACITY response is correct */ \ -- US_FLAG(BAD_SENSE, 0x00020000) \ -- /* Bad Sense (never more than 18 bytes) */ -+ /* READ CAPACITY response is correct */ - - #define US_FLAG(name, value) US_FL_##name = value , - enum { US_DO_ALL_FLAGS }; -diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h -index 3c123c3..227c2a5 100644 ---- a/include/linux/vmalloc.h -+++ b/include/linux/vmalloc.h -@@ -115,11 +115,9 @@ extern rwlock_t vmlist_lock; - extern struct vm_struct *vmlist; - extern __init void vm_area_register_early(struct vm_struct *vm, size_t align); - --#ifndef CONFIG_HAVE_LEGACY_PER_CPU_AREA - struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets, - const size_t *sizes, int nr_vms, - size_t align, gfp_t gfp_mask); --#endif - - void pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms); - -diff --git a/include/litmus/bheap.h b/include/litmus/bheap.h -new file mode 100644 -index 0000000..cf4864a ---- /dev/null -+++ b/include/litmus/bheap.h -@@ -0,0 +1,77 @@ -+/* bheaps.h -- Binomial Heaps -+ * -+ * (c) 2008, 2009 Bjoern Brandenburg -+ */ -+ -+#ifndef BHEAP_H -+#define BHEAP_H -+ -+#define NOT_IN_HEAP UINT_MAX -+ -+struct bheap_node { -+ struct bheap_node* parent; -+ struct bheap_node* next; -+ struct bheap_node* child; -+ -+ unsigned int degree; -+ void* value; -+ struct bheap_node** ref; -+}; -+ -+struct bheap { -+ struct bheap_node* head; -+ /* We cache the minimum of the heap. -+ * This speeds up repeated peek operations. -+ */ -+ struct bheap_node* min; -+}; -+ -+typedef int (*bheap_prio_t)(struct bheap_node* a, struct bheap_node* b); -+ -+void bheap_init(struct bheap* heap); -+void bheap_node_init(struct bheap_node** ref_to_bheap_node_ptr, void* value); -+ -+static inline int bheap_node_in_heap(struct bheap_node* h) -+{ -+ return h->degree != NOT_IN_HEAP; -+} -+ -+static inline int bheap_empty(struct bheap* heap) -+{ -+ return heap->head == NULL && heap->min == NULL; -+} -+ -+/* insert (and reinitialize) a node into the heap */ -+void bheap_insert(bheap_prio_t higher_prio, -+ struct bheap* heap, -+ struct bheap_node* node); -+ -+/* merge addition into target */ -+void bheap_union(bheap_prio_t higher_prio, -+ struct bheap* target, -+ struct bheap* addition); -+ -+struct bheap_node* bheap_peek(bheap_prio_t higher_prio, -+ struct bheap* heap); -+ -+struct bheap_node* bheap_take(bheap_prio_t higher_prio, -+ struct bheap* heap); -+ -+void bheap_uncache_min(bheap_prio_t higher_prio, struct bheap* heap); -+int bheap_decrease(bheap_prio_t higher_prio, struct bheap_node* node); -+ -+void bheap_delete(bheap_prio_t higher_prio, -+ struct bheap* heap, -+ struct bheap_node* node); -+ -+/* allocate from memcache */ -+struct bheap_node* bheap_node_alloc(int gfp_flags); -+void bheap_node_free(struct bheap_node* hn); -+ -+/* allocate a heap node for value and insert into the heap */ -+int bheap_add(bheap_prio_t higher_prio, struct bheap* heap, -+ void* value, int gfp_flags); -+ -+void* bheap_take_del(bheap_prio_t higher_prio, -+ struct bheap* heap); -+#endif -diff --git a/include/litmus/edf_common.h b/include/litmus/edf_common.h -new file mode 100644 -index 0000000..80d4321 ---- /dev/null -+++ b/include/litmus/edf_common.h -@@ -0,0 +1,27 @@ -+/* -+ * EDF common data structures and utility functions shared by all EDF -+ * based scheduler plugins -+ */ -+ -+/* CLEANUP: Add comments and make it less messy. -+ * -+ */ -+ -+#ifndef __UNC_EDF_COMMON_H__ -+#define __UNC_EDF_COMMON_H__ -+ -+#include -+ -+void edf_domain_init(rt_domain_t* rt, check_resched_needed_t resched, -+ release_jobs_t release); -+ -+int edf_higher_prio(struct task_struct* first, -+ struct task_struct* second); -+ -+int edf_ready_order(struct bheap_node* a, struct bheap_node* b); -+ -+int edf_preemption_needed(rt_domain_t* rt, struct task_struct *t); -+ -+int edf_set_hp_task(struct pi_semaphore *sem); -+int edf_set_hp_cpu_task(struct pi_semaphore *sem, int cpu); -+#endif -diff --git a/include/litmus/fdso.h b/include/litmus/fdso.h -new file mode 100644 -index 0000000..286e10f ---- /dev/null -+++ b/include/litmus/fdso.h -@@ -0,0 +1,69 @@ -+/* fdso.h - file descriptor attached shared objects -+ * -+ * (c) 2007 B. Brandenburg, LITMUS^RT project -+ */ -+ -+#ifndef _LINUX_FDSO_H_ -+#define _LINUX_FDSO_H_ -+ -+#include -+#include -+ -+#include -+ -+#define MAX_OBJECT_DESCRIPTORS 32 -+ -+typedef enum { -+ MIN_OBJ_TYPE = 0, -+ -+ FMLP_SEM = 0, -+ SRP_SEM = 1, -+ -+ MAX_OBJ_TYPE = 1 -+} obj_type_t; -+ -+struct inode_obj_id { -+ struct list_head list; -+ atomic_t count; -+ struct inode* inode; -+ -+ obj_type_t type; -+ void* obj; -+ unsigned int id; -+}; -+ -+ -+struct od_table_entry { -+ unsigned int used; -+ -+ struct inode_obj_id* obj; -+ void* extra; -+}; -+ -+struct fdso_ops { -+ void* (*create) (void); -+ void (*destroy)(void*); -+ int (*open) (struct od_table_entry*, void* __user); -+ int (*close) (struct od_table_entry*); -+}; -+ -+/* translate a userspace supplied od into the raw table entry -+ * returns NULL if od is invalid -+ */ -+struct od_table_entry* __od_lookup(int od); -+ -+/* translate a userspace supplied od into the associated object -+ * returns NULL if od is invalid -+ */ -+static inline void* od_lookup(int od, obj_type_t type) -+{ -+ struct od_table_entry* e = __od_lookup(od); -+ return e && e->obj->type == type ? e->obj->obj : NULL; -+} -+ -+#define lookup_fmlp_sem(od)((struct pi_semaphore*) od_lookup(od, FMLP_SEM)) -+#define lookup_srp_sem(od) ((struct srp_semaphore*) od_lookup(od, SRP_SEM)) -+#define lookup_ics(od) ((struct ics*) od_lookup(od, ICS_ID)) -+ -+ -+#endif -diff --git a/include/litmus/feather_buffer.h b/include/litmus/feather_buffer.h -new file mode 100644 -index 0000000..6c18277 ---- /dev/null -+++ b/include/litmus/feather_buffer.h -@@ -0,0 +1,94 @@ -+#ifndef _FEATHER_BUFFER_H_ -+#define _FEATHER_BUFFER_H_ -+ -+/* requires UINT_MAX and memcpy */ -+ -+#define SLOT_FREE 0 -+#define SLOT_BUSY 1 -+#define SLOT_READY 2 -+ -+struct ft_buffer { -+ unsigned int slot_count; -+ unsigned int slot_size; -+ -+ int free_count; -+ unsigned int write_idx; -+ unsigned int read_idx; ++ int free_count; ++ unsigned int write_idx; ++ unsigned int read_idx; + + char* slots; + void* buffer_mem; @@ -26374,10 +2011,10 @@ index 0000000..5b94d1a +#endif diff --git a/include/litmus/sched_plugin.h b/include/litmus/sched_plugin.h new file mode 100644 -index 0000000..2d856d5 +index 0000000..9c1c9f2 --- /dev/null +++ b/include/litmus/sched_plugin.h -@@ -0,0 +1,159 @@ +@@ -0,0 +1,162 @@ +/* + * Definition of the scheduler plugin interface. + * @@ -26513,6 +2150,9 @@ index 0000000..2d856d5 + +extern struct sched_plugin *litmus; + ++/* cluster size: cache_index = 2 L2, cache_index = 3 L3 */ ++extern int cluster_cache_index; ++ +int register_sched_plugin(struct sched_plugin* plugin); +struct sched_plugin* find_sched_plugin(const char* name); +int print_sched_plugins(char* buf, int max); @@ -26926,441 +2566,6 @@ index 0000000..f0618e7 +__SYSCALL(__NR_null_call, sys_null_call) + +#define NR_litmus_syscalls 14 -diff --git a/include/net/ip.h b/include/net/ip.h -index 69db943..2f47e54 100644 ---- a/include/net/ip.h -+++ b/include/net/ip.h -@@ -342,7 +342,6 @@ enum ip_defrag_users - IP_DEFRAG_CALL_RA_CHAIN, - IP_DEFRAG_CONNTRACK_IN, - IP_DEFRAG_CONNTRACK_OUT, -- IP_DEFRAG_CONNTRACK_BRIDGE_IN, - IP_DEFRAG_VS_IN, - IP_DEFRAG_VS_OUT, - IP_DEFRAG_VS_FWD -diff --git a/include/net/ipv6.h b/include/net/ipv6.h -index 639bbf0..8c31d8a 100644 ---- a/include/net/ipv6.h -+++ b/include/net/ipv6.h -@@ -354,16 +354,8 @@ static inline int ipv6_prefix_equal(const struct in6_addr *a1, - - struct inet_frag_queue; - --enum ip6_defrag_users { -- IP6_DEFRAG_LOCAL_DELIVER, -- IP6_DEFRAG_CONNTRACK_IN, -- IP6_DEFRAG_CONNTRACK_OUT, -- IP6_DEFRAG_CONNTRACK_BRIDGE_IN, --}; -- - struct ip6_create_arg { - __be32 id; -- u32 user; - struct in6_addr *src; - struct in6_addr *dst; - }; -diff --git a/include/net/netfilter/ipv6/nf_conntrack_ipv6.h b/include/net/netfilter/ipv6/nf_conntrack_ipv6.h -index 1ee717e..abc55ad 100644 ---- a/include/net/netfilter/ipv6/nf_conntrack_ipv6.h -+++ b/include/net/netfilter/ipv6/nf_conntrack_ipv6.h -@@ -9,7 +9,7 @@ extern struct nf_conntrack_l4proto nf_conntrack_l4proto_icmpv6; - - extern int nf_ct_frag6_init(void); - extern void nf_ct_frag6_cleanup(void); --extern struct sk_buff *nf_ct_frag6_gather(struct sk_buff *skb, u32 user); -+extern struct sk_buff *nf_ct_frag6_gather(struct sk_buff *skb); - extern void nf_ct_frag6_output(unsigned int hooknum, struct sk_buff *skb, - struct net_device *in, - struct net_device *out, -diff --git a/include/net/netns/conntrack.h b/include/net/netns/conntrack.h -index 63d4498..ba1ba0c 100644 ---- a/include/net/netns/conntrack.h -+++ b/include/net/netns/conntrack.h -@@ -11,8 +11,6 @@ struct nf_conntrack_ecache; - struct netns_ct { - atomic_t count; - unsigned int expect_count; -- unsigned int htable_size; -- struct kmem_cache *nf_conntrack_cachep; - struct hlist_nulls_head *hash; - struct hlist_head *expect_hash; - struct hlist_nulls_head unconfirmed; -@@ -30,6 +28,5 @@ struct netns_ct { - #endif - int hash_vmalloc; - int expect_vmalloc; -- char *slabname; - }; - #endif -diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h -index 9a4b8b7..2eb3814 100644 ---- a/include/net/netns/ipv4.h -+++ b/include/net/netns/ipv4.h -@@ -40,7 +40,6 @@ struct netns_ipv4 { - struct xt_table *iptable_security; - struct xt_table *nat_table; - struct hlist_head *nat_bysource; -- unsigned int nat_htable_size; - int nat_vmalloced; - #endif - -diff --git a/include/net/netrom.h b/include/net/netrom.h -index ab170a6..15696b1 100644 ---- a/include/net/netrom.h -+++ b/include/net/netrom.h -@@ -132,8 +132,6 @@ static __inline__ void nr_node_put(struct nr_node *nr_node) - static __inline__ void nr_neigh_put(struct nr_neigh *nr_neigh) - { - if (atomic_dec_and_test(&nr_neigh->refcount)) { -- if (nr_neigh->ax25) -- ax25_cb_put(nr_neigh->ax25); - kfree(nr_neigh->digipeat); - kfree(nr_neigh); - } -diff --git a/include/net/tcp.h b/include/net/tcp.h -index 842ac4d..03a49c7 100644 ---- a/include/net/tcp.h -+++ b/include/net/tcp.h -@@ -1263,20 +1263,14 @@ static inline struct sk_buff *tcp_write_queue_prev(struct sock *sk, struct sk_bu - * TCP connection after "boundary" unsucessful, exponentially backed-off - * retransmissions with an initial RTO of TCP_RTO_MIN. - */ --static inline bool retransmits_timed_out(struct sock *sk, -+static inline bool retransmits_timed_out(const struct sock *sk, - unsigned int boundary) - { - unsigned int timeout, linear_backoff_thresh; -- unsigned int start_ts; - - if (!inet_csk(sk)->icsk_retransmits) - return false; - -- if (unlikely(!tcp_sk(sk)->retrans_stamp)) -- start_ts = TCP_SKB_CB(tcp_write_queue_head(sk))->when; -- else -- start_ts = tcp_sk(sk)->retrans_stamp; -- - linear_backoff_thresh = ilog2(TCP_RTO_MAX/TCP_RTO_MIN); - - if (boundary <= linear_backoff_thresh) -@@ -1285,7 +1279,7 @@ static inline bool retransmits_timed_out(struct sock *sk, - timeout = ((2 << linear_backoff_thresh) - 1) * TCP_RTO_MIN + - (boundary - linear_backoff_thresh) * TCP_RTO_MAX; - -- return (tcp_time_stamp - start_ts) >= timeout; -+ return (tcp_time_stamp - tcp_sk(sk)->retrans_stamp) >= timeout; - } - - static inline struct sk_buff *tcp_send_head(struct sock *sk) -diff --git a/include/scsi/fc_frame.h b/include/scsi/fc_frame.h -index 148126d..c35d238 100644 ---- a/include/scsi/fc_frame.h -+++ b/include/scsi/fc_frame.h -@@ -37,9 +37,6 @@ - #define FC_FRAME_HEADROOM 32 /* headroom for VLAN + FCoE headers */ - #define FC_FRAME_TAILROOM 8 /* trailer space for FCoE */ - --/* Max number of skb frags allowed, reserving one for fcoe_crc_eof page */ --#define FC_FRAME_SG_LEN (MAX_SKB_FRAGS - 1) -- - #define fp_skb(fp) (&((fp)->skb)) - #define fr_hdr(fp) ((fp)->skb.data) - #define fr_len(fp) ((fp)->skb.len) -diff --git a/include/scsi/libfc.h b/include/scsi/libfc.h -index 09a124b..65dc9aa 100644 ---- a/include/scsi/libfc.h -+++ b/include/scsi/libfc.h -@@ -145,7 +145,6 @@ enum fc_rport_state { - RPORT_ST_LOGO, /* port logout sent */ - RPORT_ST_ADISC, /* Discover Address sent */ - RPORT_ST_DELETE, /* port being deleted */ -- RPORT_ST_RESTART, /* remote port being deleted and will restart */ - }; - - /** -diff --git a/include/scsi/osd_protocol.h b/include/scsi/osd_protocol.h -index 6856612..2cc8e8b 100644 ---- a/include/scsi/osd_protocol.h -+++ b/include/scsi/osd_protocol.h -@@ -17,7 +17,6 @@ - #define __OSD_PROTOCOL_H__ - - #include --#include - #include - #include - -diff --git a/include/scsi/scsi_host.h b/include/scsi/scsi_host.h -index 0b4baba..47941fc 100644 ---- a/include/scsi/scsi_host.h -+++ b/include/scsi/scsi_host.h -@@ -677,12 +677,6 @@ struct Scsi_Host { - void *shost_data; - - /* -- * Points to the physical bus device we'd use to do DMA -- * Needed just in case we have virtual hosts. -- */ -- struct device *dma_dev; -- -- /* - * We should ensure that this is aligned, both for better performance - * and also because some compilers (m68k) don't automatically force - * alignment to a long boundary. -@@ -726,9 +720,7 @@ extern int scsi_queue_work(struct Scsi_Host *, struct work_struct *); - extern void scsi_flush_work(struct Scsi_Host *); - - extern struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *, int); --extern int __must_check scsi_add_host_with_dma(struct Scsi_Host *, -- struct device *, -- struct device *); -+extern int __must_check scsi_add_host(struct Scsi_Host *, struct device *); - extern void scsi_scan_host(struct Scsi_Host *); - extern void scsi_rescan_device(struct device *); - extern void scsi_remove_host(struct Scsi_Host *); -@@ -739,12 +731,6 @@ extern const char *scsi_host_state_name(enum scsi_host_state); - - extern u64 scsi_calculate_bounce_limit(struct Scsi_Host *); - --static inline int __must_check scsi_add_host(struct Scsi_Host *host, -- struct device *dev) --{ -- return scsi_add_host_with_dma(host, dev, dev); --} -- - static inline struct device *scsi_get_device(struct Scsi_Host *shost) - { - return shost->shost_gendev.parent; -diff --git a/include/trace/ftrace.h b/include/trace/ftrace.h -index dacb8ef..cc0d966 100644 ---- a/include/trace/ftrace.h -+++ b/include/trace/ftrace.h -@@ -159,7 +159,7 @@ - #undef __get_str - - #undef TP_printk --#define TP_printk(fmt, args...) "\"%s\", %s\n", fmt, __stringify(args) -+#define TP_printk(fmt, args...) "%s, %s\n", #fmt, __stringify(args) - - #undef TP_fast_assign - #define TP_fast_assign(args...) args -diff --git a/kernel/acct.c b/kernel/acct.c -index a6605ca..9a4715a 100644 ---- a/kernel/acct.c -+++ b/kernel/acct.c -@@ -536,8 +536,7 @@ static void do_acct_process(struct bsd_acct_struct *acct, - do_div(elapsed, AHZ); - ac.ac_btime = get_seconds() - elapsed; - /* we really need to bite the bullet and change layout */ -- ac.ac_uid = orig_cred->uid; -- ac.ac_gid = orig_cred->gid; -+ current_uid_gid(&ac.ac_uid, &ac.ac_gid); - #if ACCT_VERSION==2 - ac.ac_ahz = AHZ; - #endif -diff --git a/kernel/audit_tree.c b/kernel/audit_tree.c -index 4b05bd9..2451dc6 100644 ---- a/kernel/audit_tree.c -+++ b/kernel/audit_tree.c -@@ -277,7 +277,7 @@ static void untag_chunk(struct node *p) - owner->root = NULL; - } - -- for (i = j = 0; j <= size; i++, j++) { -+ for (i = j = 0; i < size; i++, j++) { - struct audit_tree *s; - if (&chunk->owners[j] == p) { - list_del_init(&p->list); -@@ -290,7 +290,7 @@ static void untag_chunk(struct node *p) - if (!s) /* result of earlier fallback */ - continue; - get_tree(s); -- list_replace_init(&chunk->owners[j].list, &new->owners[i].list); -+ list_replace_init(&chunk->owners[i].list, &new->owners[j].list); - } - - list_replace_rcu(&chunk->hash, &new->hash); -@@ -373,17 +373,15 @@ static int tag_chunk(struct inode *inode, struct audit_tree *tree) - for (n = 0; n < old->count; n++) { - if (old->owners[n].owner == tree) { - spin_unlock(&hash_lock); -- put_inotify_watch(&old->watch); -+ put_inotify_watch(watch); - return 0; - } - } - spin_unlock(&hash_lock); - - chunk = alloc_chunk(old->count + 1); -- if (!chunk) { -- put_inotify_watch(&old->watch); -+ if (!chunk) - return -ENOMEM; -- } - - mutex_lock(&inode->inotify_mutex); - if (inotify_clone_watch(&old->watch, &chunk->watch) < 0) { -@@ -427,8 +425,7 @@ static int tag_chunk(struct inode *inode, struct audit_tree *tree) - spin_unlock(&hash_lock); - inotify_evict_watch(&old->watch); - mutex_unlock(&inode->inotify_mutex); -- put_inotify_watch(&old->watch); /* pair to inotify_find_watch */ -- put_inotify_watch(&old->watch); /* and kill it */ -+ put_inotify_watch(&old->watch); - return 0; - } - -diff --git a/kernel/cgroup.c b/kernel/cgroup.c -index 1fbcc74..0249f4b 100644 ---- a/kernel/cgroup.c -+++ b/kernel/cgroup.c -@@ -2468,6 +2468,7 @@ static struct cgroup_pidlist *cgroup_pidlist_find(struct cgroup *cgrp, - /* make sure l doesn't vanish out from under us */ - down_write(&l->mutex); - mutex_unlock(&cgrp->pidlist_mutex); -+ l->use_count++; - return l; - } - } -diff --git a/kernel/cpu.c b/kernel/cpu.c -index 291ac58..6ba0f1e 100644 ---- a/kernel/cpu.c -+++ b/kernel/cpu.c -@@ -212,8 +212,6 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen) - err = __raw_notifier_call_chain(&cpu_chain, CPU_DOWN_PREPARE | mod, - hcpu, -1, &nr_calls); - if (err == NOTIFY_BAD) { -- set_cpu_active(cpu, true); -- - nr_calls--; - __raw_notifier_call_chain(&cpu_chain, CPU_DOWN_FAILED | mod, - hcpu, nr_calls, NULL); -@@ -225,11 +223,11 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen) - - /* Ensure that we are not runnable on dying cpu */ - cpumask_copy(old_allowed, ¤t->cpus_allowed); -- set_cpus_allowed_ptr(current, cpu_active_mask); -+ set_cpus_allowed_ptr(current, -+ cpumask_of(cpumask_any_but(cpu_online_mask, cpu))); - - err = __stop_machine(take_cpu_down, &tcd_param, cpumask_of(cpu)); - if (err) { -- set_cpu_active(cpu, true); - /* CPU didn't die: tell everyone. Can't complain. */ - if (raw_notifier_call_chain(&cpu_chain, CPU_DOWN_FAILED | mod, - hcpu) == NOTIFY_BAD) -@@ -294,6 +292,9 @@ int __ref cpu_down(unsigned int cpu) - - err = _cpu_down(cpu, 0); - -+ if (cpu_online(cpu)) -+ set_cpu_active(cpu, true); -+ - out: - cpu_maps_update_done(); - stop_machine_destroy(); -@@ -386,23 +387,15 @@ int disable_nonboot_cpus(void) - * with the userspace trying to use the CPU hotplug at the same time - */ - cpumask_clear(frozen_cpus); -- -- for_each_online_cpu(cpu) { -- if (cpu == first_cpu) -- continue; -- set_cpu_active(cpu, false); -- } -- -- synchronize_sched(); -- - printk("Disabling non-boot CPUs ...\n"); - for_each_online_cpu(cpu) { - if (cpu == first_cpu) - continue; - error = _cpu_down(cpu, 1); -- if (!error) -+ if (!error) { - cpumask_set_cpu(cpu, frozen_cpus); -- else { -+ printk("CPU%d is down\n", cpu); -+ } else { - printk(KERN_ERR "Error taking CPU%d down: %d\n", - cpu, error); - break; -diff --git a/kernel/cpuset.c b/kernel/cpuset.c -index 39e5121..b5cb469 100644 ---- a/kernel/cpuset.c -+++ b/kernel/cpuset.c -@@ -873,7 +873,7 @@ static int update_cpumask(struct cpuset *cs, struct cpuset *trialcs, - if (retval < 0) - return retval; - -- if (!cpumask_subset(trialcs->cpus_allowed, cpu_active_mask)) -+ if (!cpumask_subset(trialcs->cpus_allowed, cpu_online_mask)) - return -EINVAL; - } - retval = validate_change(cs, trialcs); -@@ -2011,7 +2011,7 @@ static void scan_for_empty_cpusets(struct cpuset *root) - } - - /* Continue past cpusets with all cpus, mems online */ -- if (cpumask_subset(cp->cpus_allowed, cpu_active_mask) && -+ if (cpumask_subset(cp->cpus_allowed, cpu_online_mask) && - nodes_subset(cp->mems_allowed, node_states[N_HIGH_MEMORY])) - continue; - -@@ -2020,7 +2020,7 @@ static void scan_for_empty_cpusets(struct cpuset *root) - /* Remove offline cpus and mems from this cpuset. */ - mutex_lock(&callback_mutex); - cpumask_and(cp->cpus_allowed, cp->cpus_allowed, -- cpu_active_mask); -+ cpu_online_mask); - nodes_and(cp->mems_allowed, cp->mems_allowed, - node_states[N_HIGH_MEMORY]); - mutex_unlock(&callback_mutex); -@@ -2058,10 +2058,8 @@ static int cpuset_track_online_cpus(struct notifier_block *unused_nb, - switch (phase) { - case CPU_ONLINE: - case CPU_ONLINE_FROZEN: -- case CPU_DOWN_PREPARE: -- case CPU_DOWN_PREPARE_FROZEN: -- case CPU_DOWN_FAILED: -- case CPU_DOWN_FAILED_FROZEN: -+ case CPU_DEAD: -+ case CPU_DEAD_FROZEN: - break; - - default: -@@ -2070,7 +2068,7 @@ static int cpuset_track_online_cpus(struct notifier_block *unused_nb, - - cgroup_lock(); - mutex_lock(&callback_mutex); -- cpumask_copy(top_cpuset.cpus_allowed, cpu_active_mask); -+ cpumask_copy(top_cpuset.cpus_allowed, cpu_online_mask); - mutex_unlock(&callback_mutex); - scan_for_empty_cpusets(&top_cpuset); - ndoms = generate_sched_domains(&doms, &attr); -@@ -2117,7 +2115,7 @@ static int cpuset_track_online_nodes(struct notifier_block *self, - - void __init cpuset_init_smp(void) - { -- cpumask_copy(top_cpuset.cpus_allowed, cpu_active_mask); -+ cpumask_copy(top_cpuset.cpus_allowed, cpu_online_mask); - top_cpuset.mems_allowed = node_states[N_HIGH_MEMORY]; - - hotcpu_notifier(cpuset_track_online_cpus, 0); -diff --git a/kernel/cred.c b/kernel/cred.c -index 1ed8ca1..dd76cfe 100644 ---- a/kernel/cred.c -+++ b/kernel/cred.c -@@ -224,7 +224,7 @@ struct cred *cred_alloc_blank(void) - #ifdef CONFIG_KEYS - new->tgcred = kzalloc(sizeof(*new->tgcred), GFP_KERNEL); - if (!new->tgcred) { -- kmem_cache_free(cred_jar, new); -+ kfree(new); - return NULL; - } - atomic_set(&new->tgcred->usage, 1); diff --git a/kernel/exit.c b/kernel/exit.c index f7864ac..3da0425 100644 --- a/kernel/exit.c @@ -27415,186 +2620,6 @@ index 166b8c4..9fad346 100644 err = prop_local_init_single(&tsk->dirties); if (err) goto out; -diff --git a/kernel/futex.c b/kernel/futex.c -index 1ad4fa6..fb65e82 100644 ---- a/kernel/futex.c -+++ b/kernel/futex.c -@@ -203,6 +203,8 @@ static void drop_futex_key_refs(union futex_key *key) - * @uaddr: virtual address of the futex - * @fshared: 0 for a PROCESS_PRIVATE futex, 1 for PROCESS_SHARED - * @key: address where result is stored. -+ * @rw: mapping needs to be read/write (values: VERIFY_READ, -+ * VERIFY_WRITE) - * - * Returns a negative error code or 0 - * The key words are stored in *key on success. -@@ -214,7 +216,7 @@ static void drop_futex_key_refs(union futex_key *key) - * lock_page() might sleep, the caller should not hold a spinlock. - */ - static int --get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key) -+get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key, int rw) - { - unsigned long address = (unsigned long)uaddr; - struct mm_struct *mm = current->mm; -@@ -237,7 +239,7 @@ get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key) - * but access_ok() should be faster than find_vma() - */ - if (!fshared) { -- if (unlikely(!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))) -+ if (unlikely(!access_ok(rw, uaddr, sizeof(u32)))) - return -EFAULT; - key->private.mm = mm; - key->private.address = address; -@@ -246,7 +248,7 @@ get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key) - } - - again: -- err = get_user_pages_fast(address, 1, 1, &page); -+ err = get_user_pages_fast(address, 1, rw == VERIFY_WRITE, &page); - if (err < 0) - return err; - -@@ -302,14 +304,8 @@ void put_futex_key(int fshared, union futex_key *key) - */ - static int fault_in_user_writeable(u32 __user *uaddr) - { -- struct mm_struct *mm = current->mm; -- int ret; -- -- down_read(&mm->mmap_sem); -- ret = get_user_pages(current, mm, (unsigned long)uaddr, -- 1, 1, 0, NULL, NULL); -- up_read(&mm->mmap_sem); -- -+ int ret = get_user_pages(current, current->mm, (unsigned long)uaddr, -+ 1, 1, 0, NULL, NULL); - return ret < 0 ? ret : 0; - } - -@@ -530,25 +526,8 @@ lookup_pi_state(u32 uval, struct futex_hash_bucket *hb, - return -EINVAL; - - WARN_ON(!atomic_read(&pi_state->refcount)); -- -- /* -- * When pi_state->owner is NULL then the owner died -- * and another waiter is on the fly. pi_state->owner -- * is fixed up by the task which acquires -- * pi_state->rt_mutex. -- * -- * We do not check for pid == 0 which can happen when -- * the owner died and robust_list_exit() cleared the -- * TID. -- */ -- if (pid && pi_state->owner) { -- /* -- * Bail out if user space manipulated the -- * futex value. -- */ -- if (pid != task_pid_vnr(pi_state->owner)) -- return -EINVAL; -- } -+ WARN_ON(pid && pi_state->owner && -+ pi_state->owner->pid != pid); - - atomic_inc(&pi_state->refcount); - *ps = pi_state; -@@ -775,13 +754,6 @@ static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_q *this) - if (!pi_state) - return -EINVAL; - -- /* -- * If current does not own the pi_state then the futex is -- * inconsistent and user space fiddled with the futex value. -- */ -- if (pi_state->owner != current) -- return -EINVAL; -- - spin_lock(&pi_state->pi_mutex.wait_lock); - new_owner = rt_mutex_next_owner(&pi_state->pi_mutex); - -@@ -889,7 +861,7 @@ static int futex_wake(u32 __user *uaddr, int fshared, int nr_wake, u32 bitset) - if (!bitset) - return -EINVAL; - -- ret = get_futex_key(uaddr, fshared, &key); -+ ret = get_futex_key(uaddr, fshared, &key, VERIFY_READ); - if (unlikely(ret != 0)) - goto out; - -@@ -935,10 +907,10 @@ futex_wake_op(u32 __user *uaddr1, int fshared, u32 __user *uaddr2, - int ret, op_ret; - - retry: -- ret = get_futex_key(uaddr1, fshared, &key1); -+ ret = get_futex_key(uaddr1, fshared, &key1, VERIFY_READ); - if (unlikely(ret != 0)) - goto out; -- ret = get_futex_key(uaddr2, fshared, &key2); -+ ret = get_futex_key(uaddr2, fshared, &key2, VERIFY_WRITE); - if (unlikely(ret != 0)) - goto out_put_key1; - -@@ -1197,10 +1169,11 @@ retry: - pi_state = NULL; - } - -- ret = get_futex_key(uaddr1, fshared, &key1); -+ ret = get_futex_key(uaddr1, fshared, &key1, VERIFY_READ); - if (unlikely(ret != 0)) - goto out; -- ret = get_futex_key(uaddr2, fshared, &key2); -+ ret = get_futex_key(uaddr2, fshared, &key2, -+ requeue_pi ? VERIFY_WRITE : VERIFY_READ); - if (unlikely(ret != 0)) - goto out_put_key1; - -@@ -1759,7 +1732,7 @@ static int futex_wait_setup(u32 __user *uaddr, u32 val, int fshared, - */ - retry: - q->key = FUTEX_KEY_INIT; -- ret = get_futex_key(uaddr, fshared, &q->key); -+ ret = get_futex_key(uaddr, fshared, &q->key, VERIFY_READ); - if (unlikely(ret != 0)) - return ret; - -@@ -1925,7 +1898,7 @@ static int futex_lock_pi(u32 __user *uaddr, int fshared, - q.requeue_pi_key = NULL; - retry: - q.key = FUTEX_KEY_INIT; -- ret = get_futex_key(uaddr, fshared, &q.key); -+ ret = get_futex_key(uaddr, fshared, &q.key, VERIFY_WRITE); - if (unlikely(ret != 0)) - goto out; - -@@ -1995,7 +1968,7 @@ retry_private: - /* Unqueue and drop the lock */ - unqueue_me_pi(&q); - -- goto out_put_key; -+ goto out; - - out_unlock_put_key: - queue_unlock(&q, hb); -@@ -2044,7 +2017,7 @@ retry: - if ((uval & FUTEX_TID_MASK) != task_pid_vnr(current)) - return -EPERM; - -- ret = get_futex_key(uaddr, fshared, &key); -+ ret = get_futex_key(uaddr, fshared, &key, VERIFY_WRITE); - if (unlikely(ret != 0)) - goto out; - -@@ -2236,7 +2209,7 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, int fshared, - rt_waiter.task = NULL; - - key2 = FUTEX_KEY_INIT; -- ret = get_futex_key(uaddr2, fshared, &key2); -+ ret = get_futex_key(uaddr2, fshared, &key2, VERIFY_WRITE); - if (unlikely(ret != 0)) - goto out; - diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c index 3e1c36e..7b19403 100644 --- a/kernel/hrtimer.c @@ -27686,298 +2711,22 @@ index 3e1c36e..7b19403 100644 + * has done so already + */ + smp_send_pull_timers(cpu); -+ } -+ preempt_enable(); -+ } -+ return in_use; -+} - - /** - * hrtimer_try_to_cancel - try to deactivate a timer -@@ -1597,6 +1678,7 @@ static void __cpuinit init_hrtimers_cpu(int cpu) - cpu_base->clock_base[i].cpu_base = cpu_base; - - hrtimer_init_hres(cpu_base); -+ INIT_LIST_HEAD(&cpu_base->to_pull); - } - - #ifdef CONFIG_HOTPLUG_CPU -diff --git a/kernel/module.c b/kernel/module.c -index dfa33e8..5842a71 100644 ---- a/kernel/module.c -+++ b/kernel/module.c -@@ -1030,23 +1030,11 @@ static int try_to_force_load(struct module *mod, const char *reason) - } - - #ifdef CONFIG_MODVERSIONS --/* If the arch applies (non-zero) relocations to kernel kcrctab, unapply it. */ --static unsigned long maybe_relocated(unsigned long crc, -- const struct module *crc_owner) --{ --#ifdef ARCH_RELOCATES_KCRCTAB -- if (crc_owner == NULL) -- return crc - (unsigned long)reloc_start; --#endif -- return crc; --} -- - static int check_version(Elf_Shdr *sechdrs, - unsigned int versindex, - const char *symname, - struct module *mod, -- const unsigned long *crc, -- const struct module *crc_owner) -+ const unsigned long *crc) - { - unsigned int i, num_versions; - struct modversion_info *versions; -@@ -1067,10 +1055,10 @@ static int check_version(Elf_Shdr *sechdrs, - if (strcmp(versions[i].name, symname) != 0) - continue; - -- if (versions[i].crc == maybe_relocated(*crc, crc_owner)) -+ if (versions[i].crc == *crc) - return 1; - DEBUGP("Found checksum %lX vs module %lX\n", -- maybe_relocated(*crc, crc_owner), versions[i].crc); -+ *crc, versions[i].crc); - goto bad_version; - } - -@@ -1093,8 +1081,7 @@ static inline int check_modstruct_version(Elf_Shdr *sechdrs, - if (!find_symbol(MODULE_SYMBOL_PREFIX "module_layout", NULL, - &crc, true, false)) - BUG(); -- return check_version(sechdrs, versindex, "module_layout", mod, crc, -- NULL); -+ return check_version(sechdrs, versindex, "module_layout", mod, crc); - } - - /* First part is kernel version, which we ignore if module has crcs. */ -@@ -1112,8 +1099,7 @@ static inline int check_version(Elf_Shdr *sechdrs, - unsigned int versindex, - const char *symname, - struct module *mod, -- const unsigned long *crc, -- const struct module *crc_owner) -+ const unsigned long *crc) - { - return 1; - } -@@ -1148,8 +1134,8 @@ static const struct kernel_symbol *resolve_symbol(Elf_Shdr *sechdrs, - /* use_module can fail due to OOM, - or module initialization or unloading */ - if (sym) { -- if (!check_version(sechdrs, versindex, name, mod, crc, owner) -- || !use_module(mod, owner)) -+ if (!check_version(sechdrs, versindex, name, mod, crc) || -+ !use_module(mod, owner)) - sym = NULL; - } - return sym; -@@ -1160,12 +1146,6 @@ static const struct kernel_symbol *resolve_symbol(Elf_Shdr *sechdrs, - * J. Corbet - */ - #if defined(CONFIG_KALLSYMS) && defined(CONFIG_SYSFS) -- --static inline bool sect_empty(const Elf_Shdr *sect) --{ -- return !(sect->sh_flags & SHF_ALLOC) || sect->sh_size == 0; --} -- - struct module_sect_attr - { - struct module_attribute mattr; -@@ -1207,7 +1187,8 @@ static void add_sect_attrs(struct module *mod, unsigned int nsect, - - /* Count loaded sections and allocate structures */ - for (i = 0; i < nsect; i++) -- if (!sect_empty(&sechdrs[i])) -+ if (sechdrs[i].sh_flags & SHF_ALLOC -+ && sechdrs[i].sh_size) - nloaded++; - size[0] = ALIGN(sizeof(*sect_attrs) - + nloaded * sizeof(sect_attrs->attrs[0]), -@@ -1225,7 +1206,9 @@ static void add_sect_attrs(struct module *mod, unsigned int nsect, - sattr = §_attrs->attrs[0]; - gattr = §_attrs->grp.attrs[0]; - for (i = 0; i < nsect; i++) { -- if (sect_empty(&sechdrs[i])) -+ if (! (sechdrs[i].sh_flags & SHF_ALLOC)) -+ continue; -+ if (!sechdrs[i].sh_size) - continue; - sattr->address = sechdrs[i].sh_addr; - sattr->name = kstrdup(secstrings + sechdrs[i].sh_name, -@@ -1309,7 +1292,7 @@ static void add_notes_attrs(struct module *mod, unsigned int nsect, - /* Count notes sections and allocate structures. */ - notes = 0; - for (i = 0; i < nsect; i++) -- if (!sect_empty(&sechdrs[i]) && -+ if ((sechdrs[i].sh_flags & SHF_ALLOC) && - (sechdrs[i].sh_type == SHT_NOTE)) - ++notes; - -@@ -1325,7 +1308,7 @@ static void add_notes_attrs(struct module *mod, unsigned int nsect, - notes_attrs->notes = notes; - nattr = ¬es_attrs->attrs[0]; - for (loaded = i = 0; i < nsect; ++i) { -- if (sect_empty(&sechdrs[i])) -+ if (!(sechdrs[i].sh_flags & SHF_ALLOC)) - continue; - if (sechdrs[i].sh_type == SHT_NOTE) { - nattr->attr.name = mod->sect_attrs->attrs[loaded].name; -diff --git a/kernel/perf_event.c b/kernel/perf_event.c -index 413d101..7f29643 100644 ---- a/kernel/perf_event.c -+++ b/kernel/perf_event.c -@@ -1359,9 +1359,6 @@ static void perf_ctx_adjust_freq(struct perf_event_context *ctx) - if (event->state != PERF_EVENT_STATE_ACTIVE) - continue; - -- if (event->cpu != -1 && event->cpu != smp_processor_id()) -- continue; -- - hwc = &event->hw; - - interrupts = hwc->interrupts; -@@ -1586,7 +1583,7 @@ static struct perf_event_context *find_get_context(pid_t pid, int cpu) - if (perf_paranoid_cpu() && !capable(CAP_SYS_ADMIN)) - return ERR_PTR(-EACCES); - -- if (cpu < 0 || cpu >= nr_cpumask_bits) -+ if (cpu < 0 || cpu > num_possible_cpus()) - return ERR_PTR(-EINVAL); - - /* -@@ -2177,7 +2174,6 @@ static void perf_mmap_data_free(struct perf_mmap_data *data) - perf_mmap_free_page((unsigned long)data->user_page); - for (i = 0; i < data->nr_pages; i++) - perf_mmap_free_page((unsigned long)data->data_pages[i]); -- kfree(data); - } - - #else -@@ -2218,7 +2214,6 @@ static void perf_mmap_data_free_work(struct work_struct *work) - perf_mmap_unmark_page(base + (i * PAGE_SIZE)); - - vfree(base); -- kfree(data); - } - - static void perf_mmap_data_free(struct perf_mmap_data *data) -@@ -2324,6 +2319,7 @@ static void perf_mmap_data_free_rcu(struct rcu_head *rcu_head) - - data = container_of(rcu_head, struct perf_mmap_data, rcu_head); - perf_mmap_data_free(data); -+ kfree(data); - } - - static void perf_mmap_data_release(struct perf_event *event) -@@ -3229,12 +3225,6 @@ static void perf_event_task_output(struct perf_event *event, - - static int perf_event_task_match(struct perf_event *event) - { -- if (event->state != PERF_EVENT_STATE_ACTIVE) -- return 0; -- -- if (event->cpu != -1 && event->cpu != smp_processor_id()) -- return 0; -- - if (event->attr.comm || event->attr.mmap || event->attr.task) - return 1; - -@@ -3264,13 +3254,13 @@ static void perf_event_task_event(struct perf_task_event *task_event) - - cpuctx = &get_cpu_var(perf_cpu_context); - perf_event_task_ctx(&cpuctx->ctx, task_event); -+ put_cpu_var(perf_cpu_context); - - rcu_read_lock(); - if (!ctx) - ctx = rcu_dereference(task_event->task->perf_event_ctxp); - if (ctx) - perf_event_task_ctx(ctx, task_event); -- put_cpu_var(perf_cpu_context); - rcu_read_unlock(); - } - -@@ -3347,12 +3337,6 @@ static void perf_event_comm_output(struct perf_event *event, - - static int perf_event_comm_match(struct perf_event *event) - { -- if (event->state != PERF_EVENT_STATE_ACTIVE) -- return 0; -- -- if (event->cpu != -1 && event->cpu != smp_processor_id()) -- return 0; -- - if (event->attr.comm) - return 1; - -@@ -3393,6 +3377,7 @@ static void perf_event_comm_event(struct perf_comm_event *comm_event) ++ } ++ preempt_enable(); ++ } ++ return in_use; ++} - cpuctx = &get_cpu_var(perf_cpu_context); - perf_event_comm_ctx(&cpuctx->ctx, comm_event); -+ put_cpu_var(perf_cpu_context); + /** + * hrtimer_try_to_cancel - try to deactivate a timer +@@ -1597,6 +1678,7 @@ static void __cpuinit init_hrtimers_cpu(int cpu) + cpu_base->clock_base[i].cpu_base = cpu_base; - rcu_read_lock(); - /* -@@ -3402,7 +3387,6 @@ static void perf_event_comm_event(struct perf_comm_event *comm_event) - ctx = rcu_dereference(current->perf_event_ctxp); - if (ctx) - perf_event_comm_ctx(ctx, comm_event); -- put_cpu_var(perf_cpu_context); - rcu_read_unlock(); + hrtimer_init_hres(cpu_base); ++ INIT_LIST_HEAD(&cpu_base->to_pull); } -@@ -3477,12 +3461,6 @@ static void perf_event_mmap_output(struct perf_event *event, - static int perf_event_mmap_match(struct perf_event *event, - struct perf_mmap_event *mmap_event) - { -- if (event->state != PERF_EVENT_STATE_ACTIVE) -- return 0; -- -- if (event->cpu != -1 && event->cpu != smp_processor_id()) -- return 0; -- - if (event->attr.mmap) - return 1; - -@@ -3560,6 +3538,7 @@ got_name: - - cpuctx = &get_cpu_var(perf_cpu_context); - perf_event_mmap_ctx(&cpuctx->ctx, mmap_event); -+ put_cpu_var(perf_cpu_context); - - rcu_read_lock(); - /* -@@ -3569,7 +3548,6 @@ got_name: - ctx = rcu_dereference(current->perf_event_ctxp); - if (ctx) - perf_event_mmap_ctx(ctx, mmap_event); -- put_cpu_var(perf_cpu_context); - rcu_read_unlock(); - - kfree(buf); -@@ -3832,9 +3810,6 @@ static int perf_swevent_match(struct perf_event *event, - enum perf_type_id type, - u32 event_id, struct pt_regs *regs) - { -- if (event->cpu != -1 && event->cpu != smp_processor_id()) -- return 0; -- - if (!perf_swevent_is_counting(event)) - return 0; - -@@ -3974,7 +3949,6 @@ static enum hrtimer_restart perf_swevent_hrtimer(struct hrtimer *hrtimer) - event->pmu->read(event); - - data.addr = 0; -- data.period = event->hw.last_period; - regs = get_irq_regs(); - /* - * In case we exclude kernel IPs or are somehow not in interrupt + #ifdef CONFIG_HOTPLUG_CPU diff --git a/kernel/printk.c b/kernel/printk.c index f38b07f..6712a25 100644 --- a/kernel/printk.c @@ -28024,437 +2773,8 @@ index f38b07f..6712a25 100644 __raw_get_cpu_var(printk_pending) = 1; } -diff --git a/kernel/rcutree.c b/kernel/rcutree.c -index 683c4f3..f3077c0 100644 ---- a/kernel/rcutree.c -+++ b/kernel/rcutree.c -@@ -176,29 +176,9 @@ static struct rcu_node *rcu_get_root(struct rcu_state *rsp) - return &rsp->node[0]; - } - --/* -- * Record the specified "completed" value, which is later used to validate -- * dynticks counter manipulations and CPU-offline checks. Specify -- * "rsp->completed - 1" to unconditionally invalidate any future dynticks -- * manipulations and CPU-offline checks. Such invalidation is useful at -- * the beginning of a grace period. -- */ --static void dyntick_record_completed(struct rcu_state *rsp, long comp) --{ -- rsp->dynticks_completed = comp; --} -- - #ifdef CONFIG_SMP - - /* -- * Recall the previously recorded value of the completion for dynticks. -- */ --static long dyntick_recall_completed(struct rcu_state *rsp) --{ -- return rsp->dynticks_completed; --} -- --/* - * If the specified CPU is offline, tell the caller that it is in - * a quiescent state. Otherwise, whack it with a reschedule IPI. - * Grace periods can end up waiting on an offline CPU when that -@@ -355,9 +335,28 @@ void rcu_irq_exit(void) - set_need_resched(); - } - -+/* -+ * Record the specified "completed" value, which is later used to validate -+ * dynticks counter manipulations. Specify "rsp->completed - 1" to -+ * unconditionally invalidate any future dynticks manipulations (which is -+ * useful at the beginning of a grace period). -+ */ -+static void dyntick_record_completed(struct rcu_state *rsp, long comp) -+{ -+ rsp->dynticks_completed = comp; -+} -+ - #ifdef CONFIG_SMP - - /* -+ * Recall the previously recorded value of the completion for dynticks. -+ */ -+static long dyntick_recall_completed(struct rcu_state *rsp) -+{ -+ return rsp->dynticks_completed; -+} -+ -+/* - * Snapshot the specified CPU's dynticks counter so that we can later - * credit them with an implicit quiescent state. Return 1 if this CPU - * is in dynticks idle mode, which is an extended quiescent state. -@@ -420,8 +419,24 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp) - - #else /* #ifdef CONFIG_NO_HZ */ - -+static void dyntick_record_completed(struct rcu_state *rsp, long comp) -+{ -+} -+ - #ifdef CONFIG_SMP - -+/* -+ * If there are no dynticks, then the only way that a CPU can passively -+ * be in a quiescent state is to be offline. Unlike dynticks idle, which -+ * is a point in time during the prior (already finished) grace period, -+ * an offline CPU is always in a quiescent state, and thus can be -+ * unconditionally applied. So just return the current value of completed. -+ */ -+static long dyntick_recall_completed(struct rcu_state *rsp) -+{ -+ return rsp->completed; -+} -+ - static int dyntick_save_progress_counter(struct rcu_data *rdp) - { - return 0; -@@ -538,33 +553,13 @@ static void check_cpu_stall(struct rcu_state *rsp, struct rcu_data *rdp) - /* - * Update CPU-local rcu_data state to record the newly noticed grace period. - * This is used both when we started the grace period and when we notice -- * that someone else started the grace period. The caller must hold the -- * ->lock of the leaf rcu_node structure corresponding to the current CPU, -- * and must have irqs disabled. -+ * that someone else started the grace period. - */ --static void __note_new_gpnum(struct rcu_state *rsp, struct rcu_node *rnp, struct rcu_data *rdp) --{ -- if (rdp->gpnum != rnp->gpnum) { -- rdp->qs_pending = 1; -- rdp->passed_quiesc = 0; -- rdp->gpnum = rnp->gpnum; -- } --} -- - static void note_new_gpnum(struct rcu_state *rsp, struct rcu_data *rdp) - { -- unsigned long flags; -- struct rcu_node *rnp; -- -- local_irq_save(flags); -- rnp = rdp->mynode; -- if (rdp->gpnum == ACCESS_ONCE(rnp->gpnum) || /* outside lock. */ -- !spin_trylock(&rnp->lock)) { /* irqs already off, retry later. */ -- local_irq_restore(flags); -- return; -- } -- __note_new_gpnum(rsp, rnp, rdp); -- spin_unlock_irqrestore(&rnp->lock, flags); -+ rdp->qs_pending = 1; -+ rdp->passed_quiesc = 0; -+ rdp->gpnum = rsp->gpnum; - } - - /* -@@ -588,79 +583,6 @@ check_for_new_grace_period(struct rcu_state *rsp, struct rcu_data *rdp) - } - - /* -- * Advance this CPU's callbacks, but only if the current grace period -- * has ended. This may be called only from the CPU to whom the rdp -- * belongs. In addition, the corresponding leaf rcu_node structure's -- * ->lock must be held by the caller, with irqs disabled. -- */ --static void --__rcu_process_gp_end(struct rcu_state *rsp, struct rcu_node *rnp, struct rcu_data *rdp) --{ -- /* Did another grace period end? */ -- if (rdp->completed != rnp->completed) { -- -- /* Advance callbacks. No harm if list empty. */ -- rdp->nxttail[RCU_DONE_TAIL] = rdp->nxttail[RCU_WAIT_TAIL]; -- rdp->nxttail[RCU_WAIT_TAIL] = rdp->nxttail[RCU_NEXT_READY_TAIL]; -- rdp->nxttail[RCU_NEXT_READY_TAIL] = rdp->nxttail[RCU_NEXT_TAIL]; -- -- /* Remember that we saw this grace-period completion. */ -- rdp->completed = rnp->completed; -- } --} -- --/* -- * Advance this CPU's callbacks, but only if the current grace period -- * has ended. This may be called only from the CPU to whom the rdp -- * belongs. -- */ --static void --rcu_process_gp_end(struct rcu_state *rsp, struct rcu_data *rdp) --{ -- unsigned long flags; -- struct rcu_node *rnp; -- -- local_irq_save(flags); -- rnp = rdp->mynode; -- if (rdp->completed == ACCESS_ONCE(rnp->completed) || /* outside lock. */ -- !spin_trylock(&rnp->lock)) { /* irqs already off, retry later. */ -- local_irq_restore(flags); -- return; -- } -- __rcu_process_gp_end(rsp, rnp, rdp); -- spin_unlock_irqrestore(&rnp->lock, flags); --} -- --/* -- * Do per-CPU grace-period initialization for running CPU. The caller -- * must hold the lock of the leaf rcu_node structure corresponding to -- * this CPU. -- */ --static void --rcu_start_gp_per_cpu(struct rcu_state *rsp, struct rcu_node *rnp, struct rcu_data *rdp) --{ -- /* Prior grace period ended, so advance callbacks for current CPU. */ -- __rcu_process_gp_end(rsp, rnp, rdp); -- -- /* -- * Because this CPU just now started the new grace period, we know -- * that all of its callbacks will be covered by this upcoming grace -- * period, even the ones that were registered arbitrarily recently. -- * Therefore, advance all outstanding callbacks to RCU_WAIT_TAIL. -- * -- * Other CPUs cannot be sure exactly when the grace period started. -- * Therefore, their recently registered callbacks must pass through -- * an additional RCU_NEXT_READY stage, so that they will be handled -- * by the next RCU grace period. -- */ -- rdp->nxttail[RCU_NEXT_READY_TAIL] = rdp->nxttail[RCU_NEXT_TAIL]; -- rdp->nxttail[RCU_WAIT_TAIL] = rdp->nxttail[RCU_NEXT_TAIL]; -- -- /* Set state so that this CPU will detect the next quiescent state. */ -- __note_new_gpnum(rsp, rnp, rdp); --} -- --/* - * Start a new RCU grace period if warranted, re-initializing the hierarchy - * in preparation for detecting the next grace period. The caller must hold - * the root node's ->lock, which is released before return. Hard irqs must -@@ -685,15 +607,28 @@ rcu_start_gp(struct rcu_state *rsp, unsigned long flags) - rsp->jiffies_force_qs = jiffies + RCU_JIFFIES_TILL_FORCE_QS; - record_gp_stall_check_time(rsp); - dyntick_record_completed(rsp, rsp->completed - 1); -+ note_new_gpnum(rsp, rdp); -+ -+ /* -+ * Because this CPU just now started the new grace period, we know -+ * that all of its callbacks will be covered by this upcoming grace -+ * period, even the ones that were registered arbitrarily recently. -+ * Therefore, advance all outstanding callbacks to RCU_WAIT_TAIL. -+ * -+ * Other CPUs cannot be sure exactly when the grace period started. -+ * Therefore, their recently registered callbacks must pass through -+ * an additional RCU_NEXT_READY stage, so that they will be handled -+ * by the next RCU grace period. -+ */ -+ rdp->nxttail[RCU_NEXT_READY_TAIL] = rdp->nxttail[RCU_NEXT_TAIL]; -+ rdp->nxttail[RCU_WAIT_TAIL] = rdp->nxttail[RCU_NEXT_TAIL]; - - /* Special-case the common single-level case. */ - if (NUM_RCU_NODES == 1) { - rcu_preempt_check_blocked_tasks(rnp); - rnp->qsmask = rnp->qsmaskinit; - rnp->gpnum = rsp->gpnum; -- rnp->completed = rsp->completed; - rsp->signaled = RCU_SIGNAL_INIT; /* force_quiescent_state OK. */ -- rcu_start_gp_per_cpu(rsp, rnp, rdp); - spin_unlock_irqrestore(&rnp->lock, flags); - return; - } -@@ -726,9 +661,6 @@ rcu_start_gp(struct rcu_state *rsp, unsigned long flags) - rcu_preempt_check_blocked_tasks(rnp); - rnp->qsmask = rnp->qsmaskinit; - rnp->gpnum = rsp->gpnum; -- rnp->completed = rsp->completed; -- if (rnp == rdp->mynode) -- rcu_start_gp_per_cpu(rsp, rnp, rdp); - spin_unlock(&rnp->lock); /* irqs remain disabled. */ - } - -@@ -740,6 +672,34 @@ rcu_start_gp(struct rcu_state *rsp, unsigned long flags) - } - - /* -+ * Advance this CPU's callbacks, but only if the current grace period -+ * has ended. This may be called only from the CPU to whom the rdp -+ * belongs. -+ */ -+static void -+rcu_process_gp_end(struct rcu_state *rsp, struct rcu_data *rdp) -+{ -+ long completed_snap; -+ unsigned long flags; -+ -+ local_irq_save(flags); -+ completed_snap = ACCESS_ONCE(rsp->completed); /* outside of lock. */ -+ -+ /* Did another grace period end? */ -+ if (rdp->completed != completed_snap) { -+ -+ /* Advance callbacks. No harm if list empty. */ -+ rdp->nxttail[RCU_DONE_TAIL] = rdp->nxttail[RCU_WAIT_TAIL]; -+ rdp->nxttail[RCU_WAIT_TAIL] = rdp->nxttail[RCU_NEXT_READY_TAIL]; -+ rdp->nxttail[RCU_NEXT_READY_TAIL] = rdp->nxttail[RCU_NEXT_TAIL]; -+ -+ /* Remember that we saw this grace-period completion. */ -+ rdp->completed = completed_snap; -+ } -+ local_irq_restore(flags); -+} -+ -+/* - * Clean up after the prior grace period and let rcu_start_gp() start up - * the next grace period if one is needed. Note that the caller must - * hold rnp->lock, as required by rcu_start_gp(), which will release it. -@@ -750,6 +710,7 @@ static void cpu_quiet_msk_finish(struct rcu_state *rsp, unsigned long flags) - WARN_ON_ONCE(!rcu_gp_in_progress(rsp)); - rsp->completed = rsp->gpnum; - rsp->signaled = RCU_GP_IDLE; -+ rcu_process_gp_end(rsp, rsp->rda[smp_processor_id()]); - rcu_start_gp(rsp, flags); /* releases root node's rnp->lock. */ - } - -@@ -1183,7 +1144,6 @@ static void force_quiescent_state(struct rcu_state *rsp, int relaxed) - long lastcomp; - struct rcu_node *rnp = rcu_get_root(rsp); - u8 signaled; -- u8 forcenow; - - if (!rcu_gp_in_progress(rsp)) - return; /* No grace period in progress, nothing to force. */ -@@ -1220,23 +1180,16 @@ static void force_quiescent_state(struct rcu_state *rsp, int relaxed) - if (rcu_process_dyntick(rsp, lastcomp, - dyntick_save_progress_counter)) - goto unlock_ret; -- /* fall into next case. */ -- -- case RCU_SAVE_COMPLETED: - - /* Update state, record completion counter. */ -- forcenow = 0; - spin_lock(&rnp->lock); - if (lastcomp == rsp->completed && -- rsp->signaled == signaled) { -+ rsp->signaled == RCU_SAVE_DYNTICK) { - rsp->signaled = RCU_FORCE_QS; - dyntick_record_completed(rsp, lastcomp); -- forcenow = signaled == RCU_SAVE_COMPLETED; - } - spin_unlock(&rnp->lock); -- if (!forcenow) -- break; -- /* fall into next case. */ -+ break; - - case RCU_FORCE_QS: - -@@ -1591,16 +1544,21 @@ static void __cpuinit - rcu_init_percpu_data(int cpu, struct rcu_state *rsp, int preemptable) - { - unsigned long flags; -+ long lastcomp; - unsigned long mask; - struct rcu_data *rdp = rsp->rda[cpu]; - struct rcu_node *rnp = rcu_get_root(rsp); - - /* Set up local state, ensuring consistent view of global state. */ - spin_lock_irqsave(&rnp->lock, flags); -+ lastcomp = rsp->completed; -+ rdp->completed = lastcomp; -+ rdp->gpnum = lastcomp; - rdp->passed_quiesc = 0; /* We could be racing with new GP, */ - rdp->qs_pending = 1; /* so set up to respond to current GP. */ - rdp->beenonline = 1; /* We have now been online. */ - rdp->preemptable = preemptable; -+ rdp->passed_quiesc_completed = lastcomp - 1; - rdp->qlen_last_fqs_check = 0; - rdp->n_force_qs_snap = rsp->n_force_qs; - rdp->blimit = blimit; -@@ -1622,11 +1580,6 @@ rcu_init_percpu_data(int cpu, struct rcu_state *rsp, int preemptable) - spin_lock(&rnp->lock); /* irqs already disabled. */ - rnp->qsmaskinit |= mask; - mask = rnp->grpmask; -- if (rnp == rdp->mynode) { -- rdp->gpnum = rnp->completed; /* if GP in progress... */ -- rdp->completed = rnp->completed; -- rdp->passed_quiesc_completed = rnp->completed - 1; -- } - spin_unlock(&rnp->lock); /* irqs already disabled. */ - rnp = rnp->parent; - } while (rnp != NULL && !(rnp->qsmaskinit & mask)); -diff --git a/kernel/rcutree.h b/kernel/rcutree.h -index ddb79ec..1899023 100644 ---- a/kernel/rcutree.h -+++ b/kernel/rcutree.h -@@ -84,9 +84,6 @@ struct rcu_node { - long gpnum; /* Current grace period for this node. */ - /* This will either be equal to or one */ - /* behind the root rcu_node's gpnum. */ -- long completed; /* Last grace period completed for this node. */ -- /* This will either be equal to or one */ -- /* behind the root rcu_node's gpnum. */ - unsigned long qsmask; /* CPUs or groups that need to switch in */ - /* order for current grace period to proceed.*/ - /* In leaf rcu_node, each bit corresponds to */ -@@ -207,12 +204,11 @@ struct rcu_data { - #define RCU_GP_IDLE 0 /* No grace period in progress. */ - #define RCU_GP_INIT 1 /* Grace period being initialized. */ - #define RCU_SAVE_DYNTICK 2 /* Need to scan dyntick state. */ --#define RCU_SAVE_COMPLETED 3 /* Need to save rsp->completed. */ --#define RCU_FORCE_QS 4 /* Need to force quiescent state. */ -+#define RCU_FORCE_QS 3 /* Need to force quiescent state. */ - #ifdef CONFIG_NO_HZ - #define RCU_SIGNAL_INIT RCU_SAVE_DYNTICK - #else /* #ifdef CONFIG_NO_HZ */ --#define RCU_SIGNAL_INIT RCU_SAVE_COMPLETED -+#define RCU_SIGNAL_INIT RCU_FORCE_QS - #endif /* #else #ifdef CONFIG_NO_HZ */ - - #define RCU_JIFFIES_TILL_FORCE_QS 3 /* for rsp->jiffies_force_qs */ -@@ -278,8 +274,9 @@ struct rcu_state { - unsigned long jiffies_stall; /* Time at which to check */ - /* for CPU stalls. */ - #endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */ -+#ifdef CONFIG_NO_HZ - long dynticks_completed; /* Value of completed @ snap. */ -- /* Protected by fqslock. */ -+#endif /* #ifdef CONFIG_NO_HZ */ - }; - - #ifdef RCU_TREE_NONCORE -@@ -301,7 +298,7 @@ DECLARE_PER_CPU(struct rcu_data, rcu_preempt_data); - #else /* #ifdef RCU_TREE_NONCORE */ - - /* Forward declarations for rcutree_plugin.h */ --static void rcu_bootup_announce(void); -+static inline void rcu_bootup_announce(void); - long rcu_batches_completed(void); - static void rcu_preempt_note_context_switch(int cpu); - static int rcu_preempted_readers(struct rcu_node *rnp); -diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h -index c03edf7..ef2a58c 100644 ---- a/kernel/rcutree_plugin.h -+++ b/kernel/rcutree_plugin.h -@@ -33,7 +33,7 @@ DEFINE_PER_CPU(struct rcu_data, rcu_preempt_data); - /* - * Tell them what RCU they are running. - */ --static void rcu_bootup_announce(void) -+static inline void rcu_bootup_announce(void) - { - printk(KERN_INFO - "Experimental preemptable hierarchical RCU implementation.\n"); -@@ -481,7 +481,7 @@ void exit_rcu(void) - /* - * Tell them what RCU they are running. - */ --static void rcu_bootup_announce(void) -+static inline void rcu_bootup_announce(void) - { - printk(KERN_INFO "Hierarchical RCU implementation.\n"); - } diff --git a/kernel/sched.c b/kernel/sched.c -index 60d74cc..1701eae 100644 +index 3c11ae0..adb5e92 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -77,6 +77,9 @@ @@ -28488,30 +2808,7 @@ index 60d74cc..1701eae 100644 #ifdef CONFIG_FAIR_GROUP_SCHED /* list of leaf cfs_rq on this cpu: */ -@@ -591,8 +601,6 @@ struct rq { - - u64 rt_avg; - u64 age_stamp; -- u64 idle_stamp; -- u64 avg_idle; - #endif - - /* calc_load related fields */ -@@ -816,7 +824,6 @@ const_debug unsigned int sysctl_sched_nr_migrate = 32; - * default: 0.25ms - */ - unsigned int sysctl_sched_shares_ratelimit = 250000; --unsigned int normalized_sysctl_sched_shares_ratelimit = 250000; - - /* - * Inject some fuzzyness into changing the per-cpu group shares -@@ -1813,17 +1820,17 @@ static void cfs_rq_set_shares(struct cfs_rq *cfs_rq, unsigned long shares) - #endif - - static void calc_load_account_active(struct rq *this_rq); --static void update_sysctl(void); - - #include "sched_stats.h" +@@ -1815,11 +1825,12 @@ static void calc_load_account_active(struct rq *this_rq); #include "sched_idletask.c" #include "sched_fair.c" #include "sched_rt.c" @@ -28525,27 +2822,7 @@ index 60d74cc..1701eae 100644 #define for_each_class(class) \ for (class = sched_class_highest; class; class = class->next) -@@ -2038,9 +2045,6 @@ task_hot(struct task_struct *p, u64 now, struct sched_domain *sd) - { - s64 delta; - -- if (p->sched_class != &fair_sched_class) -- return 0; -- - /* - * Buddy candidates are cache hot: - */ -@@ -2049,6 +2053,9 @@ task_hot(struct task_struct *p, u64 now, struct sched_domain *sd) - &p->se == cfs_rq_of(&p->se)->last)) - return 1; - -+ if (p->sched_class != &fair_sched_class) -+ return 0; -+ - if (sysctl_sched_migration_cost == -1) - return 1; - if (sysctl_sched_migration_cost == 0) -@@ -2347,6 +2354,9 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state, +@@ -2343,6 +2354,9 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state, unsigned long flags; struct rq *rq, *orig_rq; @@ -28555,7 +2832,7 @@ index 60d74cc..1701eae 100644 if (!sched_feat(SYNC_WAKEUPS)) wake_flags &= ~WF_SYNC; -@@ -2365,7 +2375,7 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state, +@@ -2361,7 +2375,7 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state, orig_cpu = cpu; #ifdef CONFIG_SMP @@ -28564,21 +2841,8 @@ index 60d74cc..1701eae 100644 goto out_activate; /* -@@ -2444,19 +2454,10 @@ out_running: - #ifdef CONFIG_SMP - if (p->sched_class->task_wake_up) +@@ -2442,6 +2456,8 @@ out_running: p->sched_class->task_wake_up(rq, p); -- -- if (unlikely(rq->idle_stamp)) { -- u64 delta = rq->clock - rq->idle_stamp; -- u64 max = 2*sysctl_sched_migration_cost; -- -- if (delta > max) -- rq->avg_idle = max; -- else -- update_avg(&rq->avg_idle, delta); -- rq->idle_stamp = 0; -- } #endif out: + if (is_realtime(p)) @@ -28586,7 +2850,7 @@ index 60d74cc..1701eae 100644 task_rq_unlock(rq, &flags); put_cpu(); -@@ -2765,6 +2766,8 @@ static void finish_task_switch(struct rq *rq, struct task_struct *prev) +@@ -2750,6 +2766,8 @@ static void finish_task_switch(struct rq *rq, struct task_struct *prev) */ prev_state = prev->state; finish_arch_switch(prev); @@ -28595,7 +2859,7 @@ index 60d74cc..1701eae 100644 perf_event_task_sched_in(current, cpu_of(rq)); finish_lock_switch(rq, prev); -@@ -2788,6 +2791,15 @@ static inline void pre_schedule(struct rq *rq, struct task_struct *prev) +@@ -2773,6 +2791,15 @@ static inline void pre_schedule(struct rq *rq, struct task_struct *prev) { if (prev->sched_class->pre_schedule) prev->sched_class->pre_schedule(rq, prev); @@ -28611,69 +2875,7 @@ index 60d74cc..1701eae 100644 } /* rq->lock is NOT held, but preemption is disabled */ -@@ -3179,6 +3191,10 @@ static void pull_task(struct rq *src_rq, struct task_struct *p, - deactivate_task(src_rq, p, 0); - set_task_cpu(p, this_cpu); - activate_task(this_rq, p, 0); -+ /* -+ * Note that idle threads have a prio of MAX_PRIO, for this test -+ * to be always true for them. -+ */ - check_preempt_curr(this_rq, p, 0); - } - -@@ -4137,7 +4153,7 @@ static int load_balance(int this_cpu, struct rq *this_rq, - unsigned long flags; - struct cpumask *cpus = __get_cpu_var(load_balance_tmpmask); - -- cpumask_copy(cpus, cpu_active_mask); -+ cpumask_setall(cpus); - - /* - * When power savings policy is enabled for the parent domain, idle -@@ -4300,7 +4316,7 @@ load_balance_newidle(int this_cpu, struct rq *this_rq, struct sched_domain *sd) - int all_pinned = 0; - struct cpumask *cpus = __get_cpu_var(load_balance_tmpmask); - -- cpumask_copy(cpus, cpu_active_mask); -+ cpumask_setall(cpus); - - /* - * When power savings policy is enabled for the parent domain, idle -@@ -4440,11 +4456,6 @@ static void idle_balance(int this_cpu, struct rq *this_rq) - int pulled_task = 0; - unsigned long next_balance = jiffies + HZ; - -- this_rq->idle_stamp = this_rq->clock; -- -- if (this_rq->avg_idle < sysctl_sched_migration_cost) -- return; -- - for_each_domain(this_cpu, sd) { - unsigned long interval; - -@@ -4459,10 +4470,8 @@ static void idle_balance(int this_cpu, struct rq *this_rq) - interval = msecs_to_jiffies(sd->balance_interval); - if (time_after(next_balance, sd->last_balance + interval)) - next_balance = sd->last_balance + interval; -- if (pulled_task) { -- this_rq->idle_stamp = 0; -+ if (pulled_task) - break; -- } - } - if (pulled_task || time_after(jiffies, this_rq->next_balance)) { - /* -@@ -4697,7 +4706,7 @@ int select_nohz_load_balancer(int stop_tick) - cpumask_set_cpu(cpu, nohz.cpu_mask); - - /* time for ilb owner also to sleep */ -- if (cpumask_weight(nohz.cpu_mask) == num_active_cpus()) { -+ if (cpumask_weight(nohz.cpu_mask) == num_online_cpus()) { - if (atomic_read(&nohz.load_balancer) == cpu) - atomic_set(&nohz.load_balancer, -1); - return 0; -@@ -5250,18 +5259,26 @@ void scheduler_tick(void) +@@ -5232,18 +5259,26 @@ void scheduler_tick(void) sched_clock_tick(); @@ -28701,7 +2903,7 @@ index 60d74cc..1701eae 100644 } notrace unsigned long get_parent_ip(unsigned long addr) -@@ -5404,12 +5421,20 @@ pick_next_task(struct rq *rq) +@@ -5386,12 +5421,20 @@ pick_next_task(struct rq *rq) /* * Optimization: we know that if all tasks are in * the fair class we can call that function directly: @@ -28724,7 +2926,7 @@ index 60d74cc..1701eae 100644 class = sched_class_highest; for ( ; ; ) { -@@ -5444,6 +5469,8 @@ need_resched: +@@ -5426,6 +5469,8 @@ need_resched: release_kernel_lock(prev); need_resched_nonpreemptible: @@ -28733,7 +2935,7 @@ index 60d74cc..1701eae 100644 schedule_debug(prev); -@@ -5478,24 +5505,40 @@ need_resched_nonpreemptible: +@@ -5460,24 +5505,36 @@ need_resched_nonpreemptible: rq->curr = next; ++*switch_count; @@ -28753,20 +2955,16 @@ index 60d74cc..1701eae 100644 spin_unlock_irq(&rq->lock); + } + -+ TS_SCHED2_START(current); + sched_trace_task_switch_to(current); post_schedule(rq); - if (unlikely(reacquire_kernel_lock(current) < 0)) + if (unlikely(reacquire_kernel_lock(current) < 0)) { -+ TS_SCHED2_END(current); goto need_resched_nonpreemptible; + } preempt_enable_no_resched(); -+ -+ TS_SCHED2_END(current); + if (need_resched()) goto need_resched; @@ -28776,7 +2974,7 @@ index 60d74cc..1701eae 100644 } EXPORT_SYMBOL(schedule); -@@ -5772,6 +5815,17 @@ void complete_all(struct completion *x) +@@ -5754,6 +5811,17 @@ void complete_all(struct completion *x) } EXPORT_SYMBOL(complete_all); @@ -28794,7 +2992,7 @@ index 60d74cc..1701eae 100644 static inline long __sched do_wait_for_common(struct completion *x, long timeout, int state) { -@@ -6203,6 +6257,9 @@ __setscheduler(struct rq *rq, struct task_struct *p, int policy, int prio) +@@ -6185,6 +6253,9 @@ __setscheduler(struct rq *rq, struct task_struct *p, int policy, int prio) case SCHED_RR: p->sched_class = &rt_sched_class; break; @@ -28804,7 +3002,7 @@ index 60d74cc..1701eae 100644 } p->rt_priority = prio; -@@ -6250,7 +6307,7 @@ recheck: +@@ -6232,7 +6303,7 @@ recheck: if (policy != SCHED_FIFO && policy != SCHED_RR && policy != SCHED_NORMAL && policy != SCHED_BATCH && @@ -28813,7 +3011,7 @@ index 60d74cc..1701eae 100644 return -EINVAL; } -@@ -6265,6 +6322,8 @@ recheck: +@@ -6247,6 +6318,8 @@ recheck: return -EINVAL; if (rt_policy(policy) != (param->sched_priority != 0)) return -EINVAL; @@ -28822,7 +3020,7 @@ index 60d74cc..1701eae 100644 /* * Allow unprivileged RT tasks to decrease priority: -@@ -6319,6 +6378,12 @@ recheck: +@@ -6301,6 +6374,12 @@ recheck: return retval; } @@ -28835,7 +3033,7 @@ index 60d74cc..1701eae 100644 /* * make sure no PI-waiters arrive (or leave) while we are * changing the priority of the task: -@@ -6346,9 +6411,18 @@ recheck: +@@ -6328,9 +6407,18 @@ recheck: p->sched_reset_on_fork = reset_on_fork; @@ -28854,7 +3052,7 @@ index 60d74cc..1701eae 100644 if (running) p->sched_class->set_curr_task(rq); if (on_rq) { -@@ -6518,10 +6592,11 @@ long sched_setaffinity(pid_t pid, const struct cpumask *in_mask) +@@ -6500,10 +6588,11 @@ long sched_setaffinity(pid_t pid, const struct cpumask *in_mask) read_lock(&tasklist_lock); p = find_process_by_pid(pid); @@ -28868,341 +3066,11 @@ index 60d74cc..1701eae 100644 } /* -@@ -6980,6 +7055,7 @@ void __cpuinit init_idle(struct task_struct *idle, int cpu) - __sched_fork(idle); - idle->se.exec_start = sched_clock(); - -+ idle->prio = idle->normal_prio = MAX_PRIO; - cpumask_copy(&idle->cpus_allowed, cpumask_of(cpu)); - __set_task_cpu(idle, cpu); - -@@ -7020,23 +7096,22 @@ cpumask_var_t nohz_cpu_mask; - * - * This idea comes from the SD scheduler of Con Kolivas: - */ --static void update_sysctl(void) -+static inline void sched_init_granularity(void) - { -- unsigned int cpus = min(num_online_cpus(), 8U); -- unsigned int factor = 1 + ilog2(cpus); -+ unsigned int factor = 1 + ilog2(num_online_cpus()); -+ const unsigned long limit = 200000000; - --#define SET_SYSCTL(name) \ -- (sysctl_##name = (factor) * normalized_sysctl_##name) -- SET_SYSCTL(sched_min_granularity); -- SET_SYSCTL(sched_latency); -- SET_SYSCTL(sched_wakeup_granularity); -- SET_SYSCTL(sched_shares_ratelimit); --#undef SET_SYSCTL --} -+ sysctl_sched_min_granularity *= factor; -+ if (sysctl_sched_min_granularity > limit) -+ sysctl_sched_min_granularity = limit; - --static inline void sched_init_granularity(void) --{ -- update_sysctl(); -+ sysctl_sched_latency *= factor; -+ if (sysctl_sched_latency > limit) -+ sysctl_sched_latency = limit; -+ -+ sysctl_sched_wakeup_granularity *= factor; -+ -+ sysctl_sched_shares_ratelimit *= factor; - } - - #ifdef CONFIG_SMP -@@ -7073,7 +7148,7 @@ int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask) - int ret = 0; - - rq = task_rq_lock(p, &flags); -- if (!cpumask_intersects(new_mask, cpu_active_mask)) { -+ if (!cpumask_intersects(new_mask, cpu_online_mask)) { - ret = -EINVAL; - goto out; - } -@@ -7095,7 +7170,7 @@ int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask) - if (cpumask_test_cpu(task_cpu(p), new_mask)) - goto out; - -- if (migrate_task(p, cpumask_any_and(cpu_active_mask, new_mask), &req)) { -+ if (migrate_task(p, cpumask_any_and(cpu_online_mask, new_mask), &req)) { - /* Need help from migration thread: drop lock and wait. */ - struct task_struct *mt = rq->migration_thread; - -@@ -7249,19 +7324,19 @@ static void move_task_off_dead_cpu(int dead_cpu, struct task_struct *p) - - again: - /* Look for allowed, online CPU in same node. */ -- for_each_cpu_and(dest_cpu, nodemask, cpu_active_mask) -+ for_each_cpu_and(dest_cpu, nodemask, cpu_online_mask) - if (cpumask_test_cpu(dest_cpu, &p->cpus_allowed)) - goto move; - - /* Any allowed, online CPU? */ -- dest_cpu = cpumask_any_and(&p->cpus_allowed, cpu_active_mask); -+ dest_cpu = cpumask_any_and(&p->cpus_allowed, cpu_online_mask); - if (dest_cpu < nr_cpu_ids) - goto move; - - /* No more Mr. Nice Guy. */ - if (dest_cpu >= nr_cpu_ids) { - cpuset_cpus_allowed_locked(p, &p->cpus_allowed); -- dest_cpu = cpumask_any_and(cpu_active_mask, &p->cpus_allowed); -+ dest_cpu = cpumask_any_and(cpu_online_mask, &p->cpus_allowed); - - /* - * Don't tell them about moving exiting tasks or -@@ -7290,7 +7365,7 @@ move: - */ - static void migrate_nr_uninterruptible(struct rq *rq_src) - { -- struct rq *rq_dest = cpu_rq(cpumask_any(cpu_active_mask)); -+ struct rq *rq_dest = cpu_rq(cpumask_any(cpu_online_mask)); - unsigned long flags; - - local_irq_save(flags); -@@ -7544,7 +7619,7 @@ static ctl_table *sd_alloc_ctl_cpu_table(int cpu) - static struct ctl_table_header *sd_sysctl_header; - static void register_sched_domain_sysctl(void) - { -- int i, cpu_num = num_possible_cpus(); -+ int i, cpu_num = num_online_cpus(); - struct ctl_table *entry = sd_alloc_ctl_entry(cpu_num + 1); - char buf[32]; - -@@ -7554,7 +7629,7 @@ static void register_sched_domain_sysctl(void) - if (entry == NULL) - return; - -- for_each_possible_cpu(i) { -+ for_each_online_cpu(i) { - snprintf(buf, 32, "cpu%d", i); - entry->procname = kstrdup(buf, GFP_KERNEL); - entry->mode = 0555; -@@ -7684,6 +7759,7 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu) - spin_lock_irq(&rq->lock); - update_rq_clock(rq); - deactivate_task(rq, rq->idle, 0); -+ rq->idle->static_prio = MAX_PRIO; - __setscheduler(rq, rq->idle, SCHED_NORMAL, 0); - rq->idle->sched_class = &idle_sched_class; - migrate_dead_tasks(cpu); -@@ -7922,8 +7998,6 @@ sd_parent_degenerate(struct sched_domain *sd, struct sched_domain *parent) - - static void free_rootdomain(struct root_domain *rd) - { -- synchronize_sched(); -- - cpupri_cleanup(&rd->cpupri); - - free_cpumask_var(rd->rto_mask); -@@ -8064,7 +8138,6 @@ static cpumask_var_t cpu_isolated_map; - /* Setup the mask of cpus configured for isolated domains */ - static int __init isolated_cpu_setup(char *str) - { -- alloc_bootmem_cpumask_var(&cpu_isolated_map); - cpulist_parse(str, cpu_isolated_map); - return 1; - } -@@ -9042,7 +9115,7 @@ match1: - if (doms_new == NULL) { - ndoms_cur = 0; - doms_new = fallback_doms; -- cpumask_andnot(&doms_new[0], cpu_active_mask, cpu_isolated_map); -+ cpumask_andnot(&doms_new[0], cpu_online_mask, cpu_isolated_map); - WARN_ON_ONCE(dattr_new); - } - -@@ -9173,10 +9246,8 @@ static int update_sched_domains(struct notifier_block *nfb, - switch (action) { - case CPU_ONLINE: - case CPU_ONLINE_FROZEN: -- case CPU_DOWN_PREPARE: -- case CPU_DOWN_PREPARE_FROZEN: -- case CPU_DOWN_FAILED: -- case CPU_DOWN_FAILED_FROZEN: -+ case CPU_DEAD: -+ case CPU_DEAD_FROZEN: - partition_sched_domains(1, NULL, NULL); - return NOTIFY_OK; - -@@ -9223,7 +9294,7 @@ void __init sched_init_smp(void) - #endif - get_online_cpus(); - mutex_lock(&sched_domains_mutex); -- arch_init_sched_domains(cpu_active_mask); -+ arch_init_sched_domains(cpu_online_mask); - cpumask_andnot(non_isolated_cpus, cpu_possible_mask, cpu_isolated_map); - if (cpumask_empty(non_isolated_cpus)) - cpumask_set_cpu(smp_processor_id(), non_isolated_cpus); -@@ -9544,8 +9615,6 @@ void __init sched_init(void) - rq->cpu = i; - rq->online = 0; - rq->migration_thread = NULL; -- rq->idle_stamp = 0; -- rq->avg_idle = 2*sysctl_sched_migration_cost; - INIT_LIST_HEAD(&rq->migration_queue); - rq_attach_root(rq, &def_root_domain); - #endif -@@ -9595,9 +9664,7 @@ void __init sched_init(void) - zalloc_cpumask_var(&nohz.cpu_mask, GFP_NOWAIT); - alloc_cpumask_var(&nohz.ilb_grp_nohz_mask, GFP_NOWAIT); - #endif -- /* May be allocated at isolcpus cmdline parse time */ -- if (cpu_isolated_map == NULL) -- zalloc_cpumask_var(&cpu_isolated_map, GFP_NOWAIT); -+ zalloc_cpumask_var(&cpu_isolated_map, GFP_NOWAIT); - #endif /* SMP */ - - perf_event_init(); -diff --git a/kernel/sched_clock.c b/kernel/sched_clock.c -index 5b49613..479ce56 100644 ---- a/kernel/sched_clock.c -+++ b/kernel/sched_clock.c -@@ -236,18 +236,6 @@ void sched_clock_idle_wakeup_event(u64 delta_ns) - } - EXPORT_SYMBOL_GPL(sched_clock_idle_wakeup_event); - --unsigned long long cpu_clock(int cpu) --{ -- unsigned long long clock; -- unsigned long flags; -- -- local_irq_save(flags); -- clock = sched_clock_cpu(cpu); -- local_irq_restore(flags); -- -- return clock; --} -- - #else /* CONFIG_HAVE_UNSTABLE_SCHED_CLOCK */ - - void sched_clock_init(void) -@@ -263,12 +251,17 @@ u64 sched_clock_cpu(int cpu) - return sched_clock(); - } - -+#endif /* CONFIG_HAVE_UNSTABLE_SCHED_CLOCK */ - - unsigned long long cpu_clock(int cpu) - { -- return sched_clock_cpu(cpu); --} -+ unsigned long long clock; -+ unsigned long flags; - --#endif /* CONFIG_HAVE_UNSTABLE_SCHED_CLOCK */ -+ local_irq_save(flags); -+ clock = sched_clock_cpu(cpu); -+ local_irq_restore(flags); - -+ return clock; -+} - EXPORT_SYMBOL_GPL(cpu_clock); -diff --git a/kernel/sched_debug.c b/kernel/sched_debug.c -index 6988cf0..efb8440 100644 ---- a/kernel/sched_debug.c -+++ b/kernel/sched_debug.c -@@ -285,16 +285,12 @@ static void print_cpu(struct seq_file *m, int cpu) - - #ifdef CONFIG_SCHEDSTATS - #define P(n) SEQ_printf(m, " .%-30s: %d\n", #n, rq->n); --#define P64(n) SEQ_printf(m, " .%-30s: %Ld\n", #n, rq->n); - - P(yld_count); - - P(sched_switch); - P(sched_count); - P(sched_goidle); --#ifdef CONFIG_SMP -- P64(avg_idle); --#endif - - P(ttwu_count); - P(ttwu_local); diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c -index d80812d..ef43ff9 100644 +index 37087a7..ef43ff9 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c -@@ -35,14 +35,12 @@ - * run vmstat and monitor the context-switches (cs) field) - */ - unsigned int sysctl_sched_latency = 5000000ULL; --unsigned int normalized_sysctl_sched_latency = 5000000ULL; - - /* - * Minimal preemption granularity for CPU-bound tasks: - * (default: 1 msec * (1 + ilog(ncpus)), units: nanoseconds) - */ - unsigned int sysctl_sched_min_granularity = 1000000ULL; --unsigned int normalized_sysctl_sched_min_granularity = 1000000ULL; - - /* - * is kept at sysctl_sched_latency / sysctl_sched_min_granularity -@@ -72,7 +70,6 @@ unsigned int __read_mostly sysctl_sched_compat_yield; - * have immediate wakeup/sleep latencies. - */ - unsigned int sysctl_sched_wakeup_granularity = 1000000UL; --unsigned int normalized_sysctl_sched_wakeup_granularity = 1000000UL; - - const_debug unsigned int sysctl_sched_migration_cost = 500000UL; - -@@ -1377,9 +1374,6 @@ static int select_task_rq_fair(struct task_struct *p, int sd_flag, int wake_flag - - rcu_read_lock(); - for_each_domain(cpu, tmp) { -- if (!(tmp->flags & SD_LOAD_BALANCE)) -- continue; -- - /* - * If power savings logic is enabled for a domain, see if we - * are not overloaded, if so, don't balance wider. -@@ -1404,38 +1398,11 @@ static int select_task_rq_fair(struct task_struct *p, int sd_flag, int wake_flag - want_sd = 0; - } - -- if (want_affine && (tmp->flags & SD_WAKE_AFFINE)) { -- int candidate = -1, i; -- -- if (cpumask_test_cpu(prev_cpu, sched_domain_span(tmp))) -- candidate = cpu; -- -- /* -- * Check for an idle shared cache. -- */ -- if (tmp->flags & SD_PREFER_SIBLING) { -- if (candidate == cpu) { -- if (!cpu_rq(prev_cpu)->cfs.nr_running) -- candidate = prev_cpu; -- } -- -- if (candidate == -1 || candidate == cpu) { -- for_each_cpu(i, sched_domain_span(tmp)) { -- if (!cpumask_test_cpu(i, &p->cpus_allowed)) -- continue; -- if (!cpu_rq(i)->cfs.nr_running) { -- candidate = i; -- break; -- } -- } -- } -- } -+ if (want_affine && (tmp->flags & SD_WAKE_AFFINE) && -+ cpumask_test_cpu(prev_cpu, sched_domain_span(tmp))) { - -- if (candidate >= 0) { -- affine_sd = tmp; -- want_affine = 0; -- cpu = candidate; -- } -+ affine_sd = tmp; -+ want_affine = 0; - } - - if (!want_sd && !want_affine) -@@ -1631,7 +1598,7 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_ +@@ -1598,7 +1598,7 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_ update_curr(cfs_rq); @@ -29211,303 +3079,24 @@ index d80812d..ef43ff9 100644 resched_task(curr); return; } -@@ -1883,17 +1850,6 @@ move_one_task_fair(struct rq *this_rq, int this_cpu, struct rq *busiest, - - return 0; - } -- --static void rq_online_fair(struct rq *rq) --{ -- update_sysctl(); --} -- --static void rq_offline_fair(struct rq *rq) --{ -- update_sysctl(); --} -- - #endif /* CONFIG_SMP */ - - /* -@@ -2041,8 +1997,6 @@ static const struct sched_class fair_sched_class = { - - .load_balance = load_balance_fair, - .move_one_task = move_one_task_fair, -- .rq_online = rq_online_fair, -- .rq_offline = rq_offline_fair, - #endif - - .set_curr_task = set_curr_task_fair, diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c index a4d790c..f622880 100644 --- a/kernel/sched_rt.c +++ b/kernel/sched_rt.c -@@ -1004,7 +1004,7 @@ static void check_preempt_equal_prio(struct rq *rq, struct task_struct *p) - */ - static void check_preempt_curr_rt(struct rq *rq, struct task_struct *p, int flags) - { -- if (p->prio < rq->curr->prio) { -+ if (p->prio < rq->curr->prio || p->policy == SCHED_LITMUS) { - resched_task(rq->curr); - return; - } -diff --git a/kernel/signal.c b/kernel/signal.c -index 4d0658d..6705320 100644 ---- a/kernel/signal.c -+++ b/kernel/signal.c -@@ -939,8 +939,7 @@ static void print_fatal_signal(struct pt_regs *regs, int signr) - for (i = 0; i < 16; i++) { - unsigned char insn; - -- if (get_user(insn, (unsigned char *)(regs->ip + i))) -- break; -+ __get_user(insn, (unsigned char *)(regs->ip + i)); - printk("%02x ", insn); - } - } -diff --git a/kernel/sysctl.c b/kernel/sysctl.c -index b8bd058..0d949c5 100644 ---- a/kernel/sysctl.c -+++ b/kernel/sysctl.c -@@ -1345,7 +1345,6 @@ static struct ctl_table vm_table[] = { - .strategy = &sysctl_jiffies, - }, - #endif --#ifdef CONFIG_MMU - { - .ctl_name = CTL_UNNUMBERED, - .procname = "mmap_min_addr", -@@ -1354,7 +1353,6 @@ static struct ctl_table vm_table[] = { - .mode = 0644, - .proc_handler = &mmap_min_addr_handler, - }, --#endif - #ifdef CONFIG_NUMA - { - .ctl_name = CTL_UNNUMBERED, -@@ -1607,8 +1605,7 @@ static struct ctl_table debug_table[] = { - .data = &show_unhandled_signals, - .maxlen = sizeof(int), - .mode = 0644, -- .proc_handler = proc_dointvec_minmax, -- .extra1 = &zero, -+ .proc_handler = proc_dointvec - }, - #endif - { .ctl_name = 0 } -diff --git a/kernel/sysctl_check.c b/kernel/sysctl_check.c -index 469193c..b6e7aae 100644 ---- a/kernel/sysctl_check.c -+++ b/kernel/sysctl_check.c -@@ -220,7 +220,6 @@ static const struct trans_ctl_table trans_net_ipv4_conf_vars_table[] = { - { NET_IPV4_CONF_PROMOTE_SECONDARIES, "promote_secondaries" }, - { NET_IPV4_CONF_ARP_ACCEPT, "arp_accept" }, - { NET_IPV4_CONF_ARP_NOTIFY, "arp_notify" }, -- { NET_IPV4_CONF_SRC_VMARK, "src_valid_mark" }, - {} - }; - -diff --git a/kernel/time/clockevents.c b/kernel/time/clockevents.c -index 0d809ae..620b58a 100644 ---- a/kernel/time/clockevents.c -+++ b/kernel/time/clockevents.c -@@ -20,8 +20,6 @@ - #include - #include - --#include "tick-internal.h" -- - /* The registered clock event devices */ - static LIST_HEAD(clockevent_devices); - static LIST_HEAD(clockevents_released); -@@ -239,9 +237,8 @@ void clockevents_exchange_device(struct clock_event_device *old, - */ - void clockevents_notify(unsigned long reason, void *arg) - { -- struct clock_event_device *dev, *tmp; -+ struct list_head *node, *tmp; - unsigned long flags; -- int cpu; - - spin_lock_irqsave(&clockevents_lock, flags); - clockevents_do_notify(reason, arg); -@@ -252,20 +249,8 @@ void clockevents_notify(unsigned long reason, void *arg) - * Unregister the clock event devices which were - * released from the users in the notify chain. - */ -- list_for_each_entry_safe(dev, tmp, &clockevents_released, list) -- list_del(&dev->list); -- /* -- * Now check whether the CPU has left unused per cpu devices -- */ -- cpu = *((int *)arg); -- list_for_each_entry_safe(dev, tmp, &clockevent_devices, list) { -- if (cpumask_test_cpu(cpu, dev->cpumask) && -- cpumask_weight(dev->cpumask) == 1 && -- !tick_is_broadcast_device(dev)) { -- BUG_ON(dev->mode != CLOCK_EVT_MODE_UNUSED); -- list_del(&dev->list); -- } -- } -+ list_for_each_safe(node, tmp, &clockevents_released) -+ list_del(node); - break; - default: - break; -diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c -index ecc7adb..5e18c6a 100644 ---- a/kernel/time/clocksource.c -+++ b/kernel/time/clocksource.c -@@ -413,47 +413,6 @@ void clocksource_touch_watchdog(void) - clocksource_resume_watchdog(); - } - --/** -- * clocksource_max_deferment - Returns max time the clocksource can be deferred -- * @cs: Pointer to clocksource -- * -- */ --static u64 clocksource_max_deferment(struct clocksource *cs) --{ -- u64 max_nsecs, max_cycles; -- -- /* -- * Calculate the maximum number of cycles that we can pass to the -- * cyc2ns function without overflowing a 64-bit signed result. The -- * maximum number of cycles is equal to ULLONG_MAX/cs->mult which -- * is equivalent to the below. -- * max_cycles < (2^63)/cs->mult -- * max_cycles < 2^(log2((2^63)/cs->mult)) -- * max_cycles < 2^(log2(2^63) - log2(cs->mult)) -- * max_cycles < 2^(63 - log2(cs->mult)) -- * max_cycles < 1 << (63 - log2(cs->mult)) -- * Please note that we add 1 to the result of the log2 to account for -- * any rounding errors, ensure the above inequality is satisfied and -- * no overflow will occur. -- */ -- max_cycles = 1ULL << (63 - (ilog2(cs->mult) + 1)); -- -- /* -- * The actual maximum number of cycles we can defer the clocksource is -- * determined by the minimum of max_cycles and cs->mask. -- */ -- max_cycles = min_t(u64, max_cycles, (u64) cs->mask); -- max_nsecs = clocksource_cyc2ns(max_cycles, cs->mult, cs->shift); -- -- /* -- * To ensure that the clocksource does not wrap whilst we are idle, -- * limit the time the clocksource can be deferred by 12.5%. Please -- * note a margin of 12.5% is used because this can be computed with -- * a shift, versus say 10% which would require division. -- */ -- return max_nsecs - (max_nsecs >> 5); --} -- - #ifdef CONFIG_GENERIC_TIME - - /** -@@ -552,9 +511,6 @@ static void clocksource_enqueue(struct clocksource *cs) +@@ -1004,7 +1004,7 @@ static void check_preempt_equal_prio(struct rq *rq, struct task_struct *p) */ - int clocksource_register(struct clocksource *cs) + static void check_preempt_curr_rt(struct rq *rq, struct task_struct *p, int flags) { -- /* calculate max idle time permitted for this clocksource */ -- cs->max_idle_ns = clocksource_max_deferment(cs); -- - mutex_lock(&clocksource_mutex); - clocksource_enqueue(cs); - clocksource_select(); +- if (p->prio < rq->curr->prio) { ++ if (p->prio < rq->curr->prio || p->policy == SCHED_LITMUS) { + resched_task(rq->curr); + return; + } diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c -index 44320b1..dcbff75 100644 +index 89aed59..dcbff75 100644 --- a/kernel/time/tick-sched.c +++ b/kernel/time/tick-sched.c -@@ -216,7 +216,6 @@ void tick_nohz_stop_sched_tick(int inidle) - struct tick_sched *ts; - ktime_t last_update, expires, now; - struct clock_event_device *dev = __get_cpu_var(tick_cpu_device).evtdev; -- u64 time_delta; - int cpu; - - local_irq_save(flags); -@@ -276,17 +275,6 @@ void tick_nohz_stop_sched_tick(int inidle) - seq = read_seqbegin(&xtime_lock); - last_update = last_jiffies_update; - last_jiffies = jiffies; -- -- /* -- * On SMP we really should only care for the CPU which -- * has the do_timer duty assigned. All other CPUs can -- * sleep as long as they want. -- */ -- if (cpu == tick_do_timer_cpu || -- tick_do_timer_cpu == TICK_DO_TIMER_NONE) -- time_delta = timekeeping_max_deferment(); -- else -- time_delta = KTIME_MAX; - } while (read_seqretry(&xtime_lock, seq)); - - /* Get the next timer wheel timer */ -@@ -306,26 +294,11 @@ void tick_nohz_stop_sched_tick(int inidle) - if ((long)delta_jiffies >= 1) { - - /* -- * calculate the expiry time for the next timer wheel -- * timer. delta_jiffies >= NEXT_TIMER_MAX_DELTA signals -- * that there is no timer pending or at least extremely -- * far into the future (12 days for HZ=1000). In this -- * case we set the expiry to the end of time. -- */ -- if (likely(delta_jiffies < NEXT_TIMER_MAX_DELTA)) { -- /* -- * Calculate the time delta for the next timer event. -- * If the time delta exceeds the maximum time delta -- * permitted by the current clocksource then adjust -- * the time delta accordingly to ensure the -- * clocksource does not wrap. -- */ -- time_delta = min_t(u64, time_delta, -- tick_period.tv64 * delta_jiffies); -- expires = ktime_add_ns(last_update, time_delta); -- } else { -- expires.tv64 = KTIME_MAX; -- } -+ * calculate the expiry time for the next timer wheel -+ * timer -+ */ -+ expires = ktime_add_ns(last_update, tick_period.tv64 * -+ delta_jiffies); - - /* - * If this cpu is the one which updates jiffies, then -@@ -369,19 +342,22 @@ void tick_nohz_stop_sched_tick(int inidle) - - ts->idle_sleeps++; - -- /* Mark expires */ -- ts->idle_expires = expires; -- - /* -- * If the expiration time == KTIME_MAX, then -- * in this case we simply stop the tick timer. -+ * delta_jiffies >= NEXT_TIMER_MAX_DELTA signals that -+ * there is no timer pending or at least extremly far -+ * into the future (12 days for HZ=1000). In this case -+ * we simply stop the tick timer: - */ -- if (unlikely(expires.tv64 == KTIME_MAX)) { -+ if (unlikely(delta_jiffies >= NEXT_TIMER_MAX_DELTA)) { -+ ts->idle_expires.tv64 = KTIME_MAX; - if (ts->nohz_mode == NOHZ_MODE_HIGHRES) - hrtimer_cancel(&ts->sched_timer); - goto out; - } - -+ /* Mark expiries */ -+ ts->idle_expires = expires; -+ - if (ts->nohz_mode == NOHZ_MODE_HIGHRES) { - hrtimer_start(&ts->sched_timer, expires, - HRTIMER_MODE_ABS_PINNED); -@@ -710,6 +686,46 @@ static enum hrtimer_restart tick_sched_timer(struct hrtimer *timer) +@@ -686,6 +686,46 @@ static enum hrtimer_restart tick_sched_timer(struct hrtimer *timer) } /** @@ -29554,7 +3143,7 @@ index 44320b1..dcbff75 100644 * tick_setup_sched_timer - setup the tick emulation timer */ void tick_setup_sched_timer(void) -@@ -726,9 +742,11 @@ void tick_setup_sched_timer(void) +@@ -702,9 +742,11 @@ void tick_setup_sched_timer(void) /* Get the next period (per cpu) */ hrtimer_set_expires(&ts->sched_timer, tick_init_jiffy_update()); @@ -29569,44 +3158,6 @@ index 44320b1..dcbff75 100644 hrtimer_add_expires_ns(&ts->sched_timer, offset); for (;;) { -diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c -index 8b709de..c3a4e29 100644 ---- a/kernel/time/timekeeping.c -+++ b/kernel/time/timekeeping.c -@@ -488,17 +488,6 @@ int timekeeping_valid_for_hres(void) - } - - /** -- * timekeeping_max_deferment - Returns max time the clocksource can be deferred -- * -- * Caller must observe xtime_lock via read_seqbegin/read_seqretry to -- * ensure that the clocksource does not change! -- */ --u64 timekeeping_max_deferment(void) --{ -- return timekeeper.clock->max_idle_ns; --} -- --/** - * read_persistent_clock - Return time from the persistent clock. - * - * Weak dummy function for arches that do not yet support it. -@@ -845,7 +834,6 @@ void getboottime(struct timespec *ts) - - set_normalized_timespec(ts, -boottime.tv_sec, -boottime.tv_nsec); - } --EXPORT_SYMBOL_GPL(getboottime); - - /** - * monotonic_to_bootbased - Convert the monotonic time to boot based. -@@ -855,7 +843,6 @@ void monotonic_to_bootbased(struct timespec *ts) - { - *ts = timespec_add_safe(*ts, total_sleep_time); - } --EXPORT_SYMBOL_GPL(monotonic_to_bootbased); - - unsigned long get_seconds(void) - { diff --git a/litmus/Kconfig b/litmus/Kconfig new file mode 100644 index 0000000..874794f @@ -31337,10 +4888,10 @@ index 0000000..36e3146 +} diff --git a/litmus/litmus.c b/litmus/litmus.c new file mode 100644 -index 0000000..3cf7cb9 +index 0000000..e43596a --- /dev/null +++ b/litmus/litmus.c -@@ -0,0 +1,699 @@ +@@ -0,0 +1,775 @@ +/* + * litmus.c -- Implementation of the LITMUS syscalls, + * the LITMUS intialization code, @@ -31367,6 +4918,8 @@ index 0000000..3cf7cb9 +/* Number of RT tasks that exist in the system */ +atomic_t rt_task_count = ATOMIC_INIT(0); +static DEFINE_SPINLOCK(task_transition_lock); ++/* synchronize plugin switching */ ++atomic_t cannot_use_plugin = ATOMIC_INIT(0); + +/* Give log messages sequential IDs. */ +atomic_t __log_seq_no = ATOMIC_INIT(0); @@ -31712,13 +5265,17 @@ index 0000000..3cf7cb9 + } +} + ++/* IPI callback to synchronize plugin switching */ ++static void synch_on_plugin_switch(void* info) ++{ ++ while (atomic_read(&cannot_use_plugin)) ++ cpu_relax(); ++} ++ +/* Switching a plugin in use is tricky. + * We must watch out that no real-time tasks exists + * (and that none is created in parallel) and that the plugin is not + * currently in use on any processor (in theory). -+ * -+ * For now, we don't enforce the second part since it is unlikely to cause -+ * any trouble by itself as long as we don't unload modules. + */ +int switch_sched_plugin(struct sched_plugin* plugin) +{ @@ -31727,6 +5284,11 @@ index 0000000..3cf7cb9 + + BUG_ON(!plugin); + ++ /* forbid other cpus to use the plugin */ ++ atomic_set(&cannot_use_plugin, 1); ++ /* send IPI to force other CPUs to synch with us */ ++ smp_call_function(synch_on_plugin_switch, NULL, 0); ++ + /* stop task transitions */ + spin_lock_irqsave(&task_transition_lock, flags); + @@ -31747,6 +5309,7 @@ index 0000000..3cf7cb9 + ret = -EBUSY; +out: + spin_unlock_irqrestore(&task_transition_lock, flags); ++ atomic_set(&cannot_use_plugin, 0); + return ret; +} + @@ -31897,6 +5460,55 @@ index 0000000..3cf7cb9 + return len; +} + ++static int proc_read_cluster_size(char *page, char **start, ++ off_t off, int count, ++ int *eof, void *data) ++{ ++ int len; ++ if (cluster_cache_index == 2) ++ len = snprintf(page, PAGE_SIZE, "L2\n"); ++ else if (cluster_cache_index == 3) ++ len = snprintf(page, PAGE_SIZE, "L3\n"); ++ else /* (cluster_cache_index == 1) */ ++ len = snprintf(page, PAGE_SIZE, "L1\n"); ++ ++ return len; ++} ++ ++static int proc_write_cluster_size(struct file *file, ++ const char *buffer, ++ unsigned long count, ++ void *data) ++{ ++ int len; ++ /* L2, L3 */ ++ char cache_name[33]; ++ ++ if(count > 32) ++ len = 32; ++ else ++ len = count; ++ ++ if(copy_from_user(cache_name, buffer, len)) ++ return -EFAULT; ++ ++ cache_name[len] = '\0'; ++ /* chomp name */ ++ if (len > 1 && cache_name[len - 1] == '\n') ++ cache_name[len - 1] = '\0'; ++ ++ /* do a quick and dirty comparison to find the cluster size */ ++ if (!strcmp(cache_name, "L2")) ++ cluster_cache_index = 2; ++ else if (!strcmp(cache_name, "L3")) ++ cluster_cache_index = 3; ++ else if (!strcmp(cache_name, "L1")) ++ cluster_cache_index = 1; ++ else ++ printk(KERN_INFO "Cluster '%s' is unknown.\n", cache_name); ++ ++ return len; ++} + +static int proc_read_release_master(char *page, char **start, + off_t off, int count, @@ -31952,6 +5564,7 @@ index 0000000..3cf7cb9 + *curr_file = NULL, + *stat_file = NULL, + *plugs_file = NULL, ++ *clus_cache_idx_file = NULL, + *release_master_file = NULL; + +static int __init init_litmus_proc(void) @@ -31982,6 +5595,16 @@ index 0000000..3cf7cb9 + release_master_file->read_proc = proc_read_release_master; + release_master_file->write_proc = proc_write_release_master; + ++ clus_cache_idx_file = create_proc_entry("cluster_cache", ++ 0644, litmus_dir); ++ if (!clus_cache_idx_file) { ++ printk(KERN_ERR "Could not allocate cluster_cache " ++ "procfs entry.\n"); ++ return -ENOMEM; ++ } ++ clus_cache_idx_file->read_proc = proc_read_cluster_size; ++ clus_cache_idx_file->write_proc = proc_write_cluster_size; ++ + stat_file = create_proc_read_entry("stats", 0444, litmus_dir, + proc_read_stats, NULL); + @@ -31999,6 +5622,10 @@ index 0000000..3cf7cb9 + remove_proc_entry("stats", litmus_dir); + if (curr_file) + remove_proc_entry("active_plugin", litmus_dir); ++ if (clus_cache_idx_file) ++ remove_proc_entry("cluster_cache", litmus_dir); ++ if (release_master_file) ++ remove_proc_entry("release_master", litmus_dir); + if (litmus_dir) + remove_proc_entry("litmus", NULL); +} @@ -32042,10 +5669,10 @@ index 0000000..3cf7cb9 +module_exit(_exit_litmus); diff --git a/litmus/rt_domain.c b/litmus/rt_domain.c new file mode 100644 -index 0000000..0ed6d5c +index 0000000..609ff0f --- /dev/null +++ b/litmus/rt_domain.c -@@ -0,0 +1,306 @@ +@@ -0,0 +1,310 @@ +/* + * litmus/rt_domain.c + * @@ -32349,21 +5976,36 @@ index 0000000..0ed6d5c + task->rt_param.domain = rt; + + /* start release timer */ ++ TS_SCHED2_START(task); ++ + arm_release_timer(rt); ++ ++ TS_SCHED2_END(task); +} + diff --git a/litmus/sched_cedf.c b/litmus/sched_cedf.c new file mode 100644 -index 0000000..d0767ce +index 0000000..da44b45 --- /dev/null +++ b/litmus/sched_cedf.c -@@ -0,0 +1,727 @@ +@@ -0,0 +1,756 @@ +/* -+ * kernel/sched_cedf.c ++ * litmus/sched_cedf.c + * -+ * Implementation of the Clustered EDF (C-EDF) scheduling algorithm. -+ * Linking is included so that support for synchronization (e.g., through -+ * the implementation of a "CSN-EDF" algorithm) can be added later if desired. ++ * Implementation of the C-EDF scheduling algorithm. ++ * ++ * This implementation is based on G-EDF: ++ * - CPUs are clustered around L2 or L3 caches. ++ * - Clusters topology is automatically detected (this is arch dependent ++ * and is working only on x86 at the moment --- and only with modern ++ * cpus that exports cpuid4 information) ++ * - The plugins _does not_ attempt to put tasks in the right cluster i.e. ++ * the programmer needs to be aware of the topology to place tasks ++ * in the desired cluster ++ * - default clustering is around L2 cache (cache index = 2) ++ * supported clusters are: L1 (private cache: pedf), L2, L3 ++ * ++ * For details on functions, take a look at sched_gsn_edf.c + * + * This version uses the simple approach and serializes all scheduling + * decisions by the use of a queue lock. This is probably not the @@ -32373,92 +6015,36 @@ index 0000000..d0767ce +#include +#include +#include -+#include + +#include +#include +#include +#include +#include ++ +#include + +#include + -+/* Overview of C-EDF operations. -+ * -+ * link_task_to_cpu(T, cpu) - Low-level operation to update the linkage -+ * structure (NOT the actually scheduled -+ * task). If there is another linked task To -+ * already it will set To->linked_on = NO_CPU -+ * (thereby removing its association with this -+ * CPU). However, it will not requeue the -+ * previously linked task (if any). It will set -+ * T's state to RT_F_RUNNING and check whether -+ * it is already running somewhere else. If T -+ * is scheduled somewhere else it will link -+ * it to that CPU instead (and pull the linked -+ * task to cpu). T may be NULL. -+ * -+ * unlink(T) - Unlink removes T from all scheduler data -+ * structures. If it is linked to some CPU it -+ * will link NULL to that CPU. If it is -+ * currently queued in the cedf queue for -+ * a partition, it will be removed from -+ * the rt_domain. It is safe to call -+ * unlink(T) if T is not linked. T may not -+ * be NULL. -+ * -+ * requeue(T) - Requeue will insert T into the appropriate -+ * queue. If the system is in real-time mode and -+ * the T is released already, it will go into the -+ * ready queue. If the system is not in -+ * real-time mode is T, then T will go into the -+ * release queue. If T's release time is in the -+ * future, it will go into the release -+ * queue. That means that T's release time/job -+ * no/etc. has to be updated before requeue(T) is -+ * called. It is not safe to call requeue(T) -+ * when T is already queued. T may not be NULL. -+ * -+ * cedf_job_arrival(T) - This is the catch-all function when T enters -+ * the system after either a suspension or at a -+ * job release. It will queue T (which means it -+ * is not safe to call cedf_job_arrival(T) if -+ * T is already queued) and then check whether a -+ * preemption is necessary. If a preemption is -+ * necessary it will update the linkage -+ * accordingly and cause scheduled to be called -+ * (either with an IPI or need_resched). It is -+ * safe to call cedf_job_arrival(T) if T's -+ * next job has not been actually released yet -+ * (release time in the future). T will be put -+ * on the release queue in that case. -+ * -+ * job_completion(T) - Take care of everything that needs to be done -+ * to prepare T for its next release and place -+ * it in the right queue with -+ * cedf_job_arrival(). -+ * -+ * -+ * When we now that T is linked to CPU then link_task_to_cpu(NULL, CPU) is -+ * equivalent to unlink(T). Note that if you unlink a task from a CPU none of -+ * the functions will automatically propagate pending task from the ready queue -+ * to a linked task. This is the job of the calling function ( by means of -+ * __take_ready). -+ */ ++/* forward declaration... a funny thing with C ;) */ ++struct clusterdomain; + +/* cpu_entry_t - maintain the linked and scheduled state ++ * ++ * A cpu also contains a pointer to the cedf_domain_t cluster ++ * that owns it (struct clusterdomain*) + */ +typedef struct { + int cpu; ++ struct clusterdomain* cluster; /* owning cluster */ + struct task_struct* linked; /* only RT tasks */ + struct task_struct* scheduled; /* only RT tasks */ -+ struct list_head list; + atomic_t will_schedule; /* prevent unneeded IPIs */ ++ struct bheap_node* hn; +} cpu_entry_t; -+DEFINE_PER_CPU(cpu_entry_t, cedf_cpu_entries); + -+cpu_entry_t* *cedf_cpu_entries_array; ++/* one cpu_entry_t per CPU */ ++DEFINE_PER_CPU(cpu_entry_t, cedf_cpu_entries); + +#define set_will_schedule() \ + (atomic_set(&__get_cpu_var(cedf_cpu_entries).will_schedule, 1)) @@ -32467,75 +6053,73 @@ index 0000000..d0767ce +#define test_will_schedule(cpu) \ + (atomic_read(&per_cpu(cedf_cpu_entries, cpu).will_schedule)) + -+/* Cluster size -- currently four. This is a variable to allow for -+ * the possibility of changing the cluster size online in the future. -+ */ -+int cluster_size = 4; -+ -+int do_cleanup = 1; -+ -+typedef struct { -+ rt_domain_t domain; -+ int first_cpu; -+ int last_cpu; -+ ++/* ++ * In C-EDF there is a cedf domain _per_ cluster ++ * The number of clusters is dynamically determined accordingly to the ++ * total cpu number and the cluster size ++ */ ++typedef struct clusterdomain { ++ /* rt_domain for this cluster */ ++ rt_domain_t domain; ++ /* cpus in this cluster */ ++ cpu_entry_t* *cpus; ++ /* map of this cluster cpus */ ++ cpumask_var_t cpu_map; + /* the cpus queue themselves according to priority in here */ -+ struct list_head cedf_cpu_queue; -+ -+ /* per-partition spinlock: protects the domain and -+ * serializes scheduling decisions -+ */ -+#define slock domain.ready_lock ++ struct bheap_node *heap_node; ++ struct bheap cpu_heap; ++ /* lock for this cluster */ ++#define lock domain.ready_lock +} cedf_domain_t; + -+DEFINE_PER_CPU(cedf_domain_t*, cedf_domains) = NULL; ++/* a cedf_domain per cluster; allocation is done at init/activation time */ ++cedf_domain_t *cedf; + -+cedf_domain_t* *cedf_domains_array; ++#define remote_cluster(cpu) ((cedf_domain_t *) per_cpu(cedf_cpu_entries, cpu).cluster) ++#define task_cpu_cluster(task) remote_cluster(get_partition(task)) + -+ -+/* These are defined similarly to partitioning, except that a -+ * tasks partition is any cpu of the cluster to which it -+ * is assigned, typically the lowest-numbered cpu. ++/* Uncomment WANT_ALL_SCHED_EVENTS if you want to see all scheduling ++ * decisions in the TRACE() log; uncomment VERBOSE_INIT for verbose ++ * information during the initialization of the plugin (e.g., topology) ++#define WANT_ALL_SCHED_EVENTS + */ -+#define local_edf (&__get_cpu_var(cedf_domains)->domain) -+#define local_cedf __get_cpu_var(cedf_domains) -+#define remote_edf(cpu) (&per_cpu(cedf_domains, cpu)->domain) -+#define remote_cedf(cpu) per_cpu(cedf_domains, cpu) -+#define task_edf(task) remote_edf(get_partition(task)) -+#define task_cedf(task) remote_cedf(get_partition(task)) ++#define VERBOSE_INIT ++ ++static int cpu_lower_prio(struct bheap_node *_a, struct bheap_node *_b) ++{ ++ cpu_entry_t *a, *b; ++ a = _a->value; ++ b = _b->value; ++ /* Note that a and b are inverted: we want the lowest-priority CPU at ++ * the top of the heap. ++ */ ++ return edf_higher_prio(b->linked, a->linked); ++} + +/* update_cpu_position - Move the cpu entry to the correct place to maintain + * order in the cpu queue. Caller must hold cedf lock. -+ * -+ * This really should be a heap. + */ +static void update_cpu_position(cpu_entry_t *entry) +{ -+ cpu_entry_t *other; -+ struct list_head *cedf_cpu_queue = -+ &(remote_cedf(entry->cpu))->cedf_cpu_queue; -+ struct list_head *pos; ++ cedf_domain_t *cluster = entry->cluster; + -+ BUG_ON(!cedf_cpu_queue); ++ if (likely(bheap_node_in_heap(entry->hn))) ++ bheap_delete(cpu_lower_prio, ++ &cluster->cpu_heap, ++ entry->hn); + -+ if (likely(in_list(&entry->list))) -+ list_del(&entry->list); -+ /* if we do not execute real-time jobs we just move -+ * to the end of the queue -+ */ -+ if (entry->linked) { -+ list_for_each(pos, cedf_cpu_queue) { -+ other = list_entry(pos, cpu_entry_t, list); -+ if (edf_higher_prio(entry->linked, other->linked)) { -+ __list_add(&entry->list, pos->prev, pos); -+ return; -+ } -+ } -+ } -+ /* if we get this far we have the lowest priority job */ -+ list_add_tail(&entry->list, cedf_cpu_queue); ++ bheap_insert(cpu_lower_prio, &cluster->cpu_heap, entry->hn); +} + ++/* caller must hold cedf lock */ ++static cpu_entry_t* lowest_prio_cpu(cedf_domain_t *cluster) ++{ ++ struct bheap_node* hn; ++ hn = bheap_peek(cpu_lower_prio, &cluster->cpu_heap); ++ return hn->value; ++} ++ ++ +/* link_task_to_cpu - Update the link of a CPU. + * Handles the case where the to-be-linked task is already + * scheduled on a different CPU. @@ -32549,9 +6133,6 @@ index 0000000..d0767ce + + BUG_ON(linked && !is_realtime(linked)); + -+ /* Cannot link task to a CPU that doesn't belong to its partition... */ -+ BUG_ON(linked && remote_cedf(entry->cpu) != task_cedf(linked)); -+ + /* Currently linked task is set to be unlinked. */ + if (entry->linked) { + entry->linked->rt_param.linked_on = NO_CPU; @@ -32573,6 +6154,9 @@ index 0000000..d0767ce + * the caller to get things right. + */ + if (entry != sched) { ++ TRACE_TASK(linked, ++ "already scheduled on %d, updating link.\n", ++ sched->cpu); + tmp = sched->linked; + linked->rt_param.linked_on = sched->cpu; + sched->linked = linked; @@ -32584,13 +6168,12 @@ index 0000000..d0767ce + linked->rt_param.linked_on = entry->cpu; + } + entry->linked = linked; -+ -+ if (entry->linked) -+ TRACE_TASK(entry->linked, "linked to CPU %d, state:%d\n", -+ entry->cpu, entry->linked->state); ++#ifdef WANT_ALL_SCHED_EVENTS ++ if (linked) ++ TRACE_TASK(linked, "linked to %d.\n", entry->cpu); + else -+ TRACE("NULL linked to CPU %d\n", entry->cpu); -+ ++ TRACE("NULL linked to %d.\n", entry->cpu); ++#endif + update_cpu_position(entry); +} + @@ -32606,6 +6189,7 @@ index 0000000..d0767ce + return; + } + ++ + if (t->rt_param.linked_on != NO_CPU) { + /* unlink */ + entry = &per_cpu(cedf_cpu_entries, t->rt_param.linked_on); @@ -32618,95 +6202,105 @@ index 0000000..d0767ce + * been relinked to this CPU), thus it must be in some + * queue. We must remove it from the list in this + * case. ++ * ++ * in C-EDF case is should be somewhere in the queue for ++ * its domain, therefore and we can get the domain using ++ * task_cpu_cluster + */ -+ remove(task_edf(t), t); ++ remove(&(task_cpu_cluster(t))->domain, t); + } +} + + +/* preempt - force a CPU to reschedule + */ -+static noinline void preempt(cpu_entry_t *entry) ++static void preempt(cpu_entry_t *entry) +{ + preempt_if_preemptable(entry->scheduled, entry->cpu); +} + -+/* requeue - Put an unlinked task into c-edf domain. ++/* requeue - Put an unlinked task into gsn-edf domain. + * Caller must hold cedf_lock. + */ +static noinline void requeue(struct task_struct* task) +{ -+ cedf_domain_t* cedf; -+ rt_domain_t* edf; -+ ++ cedf_domain_t *cluster = task_cpu_cluster(task); + BUG_ON(!task); -+ /* sanity check rt_list before insertion */ ++ /* sanity check before insertion */ + BUG_ON(is_queued(task)); + -+ /* Get correct real-time domain. */ -+ cedf = task_cedf(task); -+ edf = &cedf->domain; -+ + if (is_released(task, litmus_clock())) -+ __add_ready(edf, task); ++ __add_ready(&cluster->domain, task); + else { + /* it has got to wait */ -+ add_release(edf, task); ++ add_release(&cluster->domain, task); + } +} + -+static void check_for_preemptions(cedf_domain_t* cedf) ++/* check for any necessary preemptions */ ++static void check_for_preemptions(cedf_domain_t *cluster) +{ -+ cpu_entry_t *last; + struct task_struct *task; -+ struct list_head *cedf_cpu_queue; -+ cedf_cpu_queue = &cedf->cedf_cpu_queue; ++ cpu_entry_t* last; + -+ for(last = list_entry(cedf_cpu_queue->prev, cpu_entry_t, list); -+ edf_preemption_needed(&cedf->domain, last->linked); -+ last = list_entry(cedf_cpu_queue->prev, cpu_entry_t, list)) { ++ for(last = lowest_prio_cpu(cluster); ++ edf_preemption_needed(&cluster->domain, last->linked); ++ last = lowest_prio_cpu(cluster)) { + /* preemption necessary */ -+ task = __take_ready(&cedf->domain); -+ TRACE("check_for_preemptions: task %d linked to %d, state:%d\n", -+ task->pid, last->cpu, task->state); ++ task = __take_ready(&cluster->domain); ++ TRACE("check_for_preemptions: attempting to link task %d to %d\n", ++ task->pid, last->cpu); + if (last->linked) + requeue(last->linked); + link_task_to_cpu(task, last); + preempt(last); + } -+ +} + +/* cedf_job_arrival: task is either resumed or released */ +static noinline void cedf_job_arrival(struct task_struct* task) +{ -+ cedf_domain_t* cedf; -+ rt_domain_t* edf; -+ ++ cedf_domain_t *cluster = task_cpu_cluster(task); + BUG_ON(!task); + -+ /* Get correct real-time domain. */ -+ cedf = task_cedf(task); -+ edf = &cedf->domain; -+ -+ /* first queue arriving job */ + requeue(task); -+ -+ /* then check for any necessary preemptions */ -+ check_for_preemptions(cedf); ++ check_for_preemptions(cluster); +} + -+/* check for current job releases */ +static void cedf_release_jobs(rt_domain_t* rt, struct bheap* tasks) +{ -+ cedf_domain_t* cedf = container_of(rt, cedf_domain_t, domain); -+ unsigned long flags; ++ cedf_domain_t* cluster = container_of(rt, cedf_domain_t, domain); ++ unsigned long flags; ++ ++ spin_lock_irqsave(&cluster->lock, flags); ++ ++ __merge_ready(&cluster->domain, tasks); ++ check_for_preemptions(cluster); ++ ++ spin_unlock_irqrestore(&cluster->lock, flags); ++} ++ ++/* caller holds cedf_lock */ ++static noinline void job_completion(struct task_struct *t, int forced) ++{ ++ BUG_ON(!t); ++ ++ sched_trace_task_completion(t, forced); + -+ spin_lock_irqsave(&cedf->slock, flags); ++ TRACE_TASK(t, "job_completion().\n"); + -+ __merge_ready(&cedf->domain, tasks); -+ check_for_preemptions(cedf); -+ spin_unlock_irqrestore(&cedf->slock, flags); ++ /* set flags */ ++ set_rt_flags(t, RT_F_SLEEP); ++ /* prepare for next period */ ++ prepare_for_next_period(t); ++ if (is_released(t, litmus_clock())) ++ sched_trace_task_release(t); ++ /* unlink */ ++ unlink(t); ++ /* requeue ++ * But don't requeue a blocking task. */ ++ if (is_running(t)) ++ cedf_job_arrival(t); +} + +/* cedf_tick - this function is called for every local timer @@ -32717,8 +6311,6 @@ index 0000000..d0767ce + */ +static void cedf_tick(struct task_struct* t) +{ -+ BUG_ON(!t); -+ + if (is_realtime(t) && budget_exhausted(t)) { + if (!is_np(t)) { + /* np tasks will be preempted when they become @@ -32727,38 +6319,17 @@ index 0000000..d0767ce + set_tsk_need_resched(t); + set_will_schedule(); + TRACE("cedf_scheduler_tick: " -+ "%d is preemptable (state:%d) " -+ " => FORCE_RESCHED\n", t->pid, t->state); -+ } else if(is_user_np(t)) { ++ "%d is preemptable " ++ " => FORCE_RESCHED\n", t->pid); ++ } else if (is_user_np(t)) { + TRACE("cedf_scheduler_tick: " -+ "%d is non-preemptable (state:%d), " -+ "preemption delayed.\n", t->pid, t->state); ++ "%d is non-preemptable, " ++ "preemption delayed.\n", t->pid); + request_exit_np(t); + } + } +} + -+/* caller holds cedf_lock */ -+static noinline void job_completion(struct task_struct *t, int forced) -+{ -+ BUG_ON(!t); -+ -+ sched_trace_task_completion(t, forced); -+ -+ TRACE_TASK(t, "job_completion(). [state:%d]\n", t->state); -+ -+ /* set flags */ -+ set_rt_flags(t, RT_F_SLEEP); -+ /* prepare for next period */ -+ prepare_for_next_period(t); -+ /* unlink */ -+ unlink(t); -+ /* requeue -+ * But don't requeue a blocking task. */ -+ if (is_running(t)) -+ cedf_job_arrival(t); -+} -+ +/* Getting schedule() right is a bit tricky. schedule() may not make any + * assumptions on the state of the current task since it may be called for a + * number of reasons. The reasons include a scheduler_tick() determined that it @@ -32782,22 +6353,12 @@ index 0000000..d0767ce + */ +static struct task_struct* cedf_schedule(struct task_struct * prev) +{ -+ cedf_domain_t* cedf = local_cedf; -+ rt_domain_t* edf = &cedf->domain; -+ cpu_entry_t* entry = &__get_cpu_var(cedf_cpu_entries); -+ int out_of_time, sleep, preempt, np, -+ exists, blocks; -+ struct task_struct* next = NULL; -+ -+ BUG_ON(!prev); -+ BUG_ON(!cedf); -+ BUG_ON(!edf); -+ BUG_ON(!entry); -+ BUG_ON(cedf != remote_cedf(entry->cpu)); -+ BUG_ON(is_realtime(prev) && cedf != task_cedf(prev)); -+ -+ /* Will be released in finish_switch. */ -+ spin_lock(&cedf->slock); ++ cpu_entry_t* entry = &__get_cpu_var(cedf_cpu_entries); ++ cedf_domain_t *cluster = entry->cluster; ++ int out_of_time, sleep, preempt, np, exists, blocks; ++ struct task_struct* next = NULL; ++ ++ spin_lock(&cluster->lock); + clear_will_schedule(); + + /* sanity checking */ @@ -32813,6 +6374,21 @@ index 0000000..d0767ce + sleep = exists && get_rt_flags(entry->scheduled) == RT_F_SLEEP; + preempt = entry->scheduled != entry->linked; + ++#ifdef WANT_ALL_SCHED_EVENTS ++ TRACE_TASK(prev, "invoked cedf_schedule.\n"); ++#endif ++ ++ if (exists) ++ TRACE_TASK(prev, ++ "blocks:%d out_of_time:%d np:%d sleep:%d preempt:%d " ++ "state:%d sig:%d\n", ++ blocks, out_of_time, np, sleep, preempt, ++ prev->state, signal_pending(prev)); ++ if (entry->linked && preempt) ++ TRACE_TASK(prev, "will be preempted by %s/%d\n", ++ entry->linked->comm, entry->linked->pid); ++ ++ + /* If a task blocks we have no choice but to reschedule. + */ + if (blocks) @@ -32830,8 +6406,8 @@ index 0000000..d0767ce + + /* Any task that is preemptable and either exhausts its execution + * budget or wants to sleep completes. We may have to reschedule after -+ * this. Don't do a job completion if blocks (can't have timers -+ * running for blocked jobs). Preemption go first for the same reason. ++ * this. Don't do a job completion if we block (can't have timers running ++ * for blocked jobs). Preemption go first for the same reason. + */ + if (!np && (out_of_time || sleep) && !blocks && !preempt) + job_completion(entry->scheduled, !sleep); @@ -32839,10 +6415,10 @@ index 0000000..d0767ce + /* Link pending task if we became unlinked. + */ + if (!entry->linked) -+ link_task_to_cpu(__take_ready(edf), entry); ++ link_task_to_cpu(__take_ready(&cluster->domain), entry); + + /* The final scheduling decision. Do we need to switch for some reason? -+ * If linked different from scheduled select linked as next. ++ * If linked is different from scheduled, then select linked as next. + */ + if ((!np || blocks) && + entry->linked != entry->scheduled) { @@ -32851,76 +6427,91 @@ index 0000000..d0767ce + entry->linked->rt_param.scheduled_on = entry->cpu; + next = entry->linked; + } -+ if (entry->scheduled) { ++ if (entry->scheduled) { + /* not gonna be scheduled soon */ + entry->scheduled->rt_param.scheduled_on = NO_CPU; -+ TRACE_TASK(entry->scheduled, "cedf_schedule: scheduled_on = NO_CPU\n"); ++ TRACE_TASK(entry->scheduled, "scheduled_on = NO_CPU\n"); + } + } else -+ /* Only override Linux scheduler if we have real-time task ++ /* Only override Linux scheduler if we have a real-time task + * scheduled that needs to continue. + */ + if (exists) + next = prev; + -+ spin_unlock(&cedf->slock); ++ spin_unlock(&cluster->lock); ++ ++#ifdef WANT_ALL_SCHED_EVENTS ++ TRACE("cedf_lock released, next=0x%p\n", next); ++ ++ if (next) ++ TRACE_TASK(next, "scheduled at %llu\n", litmus_clock()); ++ else if (exists && !next) ++ TRACE("becomes idle at %llu.\n", litmus_clock()); ++#endif ++ + + return next; +} + ++ +/* _finish_switch - we just finished the switch away from prev + */ +static void cedf_finish_switch(struct task_struct *prev) +{ -+ cpu_entry_t* entry = &__get_cpu_var(cedf_cpu_entries); -+ -+ BUG_ON(!prev); -+ BUG_ON(!entry); ++ cpu_entry_t* entry = &__get_cpu_var(cedf_cpu_entries); + + entry->scheduled = is_realtime(current) ? current : NULL; ++#ifdef WANT_ALL_SCHED_EVENTS ++ TRACE_TASK(prev, "switched away from\n"); ++#endif +} + ++ +/* Prepare a task for running in RT mode + */ -+static void cedf_task_new(struct task_struct *t, int on_rq, int running) ++static void cedf_task_new(struct task_struct * t, int on_rq, int running) +{ + unsigned long flags; -+ cedf_domain_t* cedf = task_cedf(t); + cpu_entry_t* entry; ++ cedf_domain_t* cluster; ++ ++ TRACE("gsn edf: task new %d\n", t->pid); ++ ++ /* the cluster doesn't change even if t is running */ ++ cluster = task_cpu_cluster(t); ++ ++ spin_lock_irqsave(&cluster->domain.ready_lock, flags); + -+ BUG_ON(!cedf); ++ /* setup job params */ ++ release_at(t, litmus_clock()); + -+ spin_lock_irqsave(&cedf->slock, flags); + if (running) { + entry = &per_cpu(cedf_cpu_entries, task_cpu(t)); -+ BUG_ON(!entry); + BUG_ON(entry->scheduled); ++ + entry->scheduled = t; -+ t->rt_param.scheduled_on = task_cpu(t); -+ } else ++ tsk_rt(t)->scheduled_on = task_cpu(t); ++ } else { + t->rt_param.scheduled_on = NO_CPU; -+ t->rt_param.linked_on = NO_CPU; -+ -+ /* setup job params */ -+ release_at(t, litmus_clock()); ++ } ++ t->rt_param.linked_on = NO_CPU; + + cedf_job_arrival(t); -+ spin_unlock_irqrestore(&cedf->slock, flags); ++ spin_unlock_irqrestore(&(cluster->domain.ready_lock), flags); +} + -+ +static void cedf_task_wake_up(struct task_struct *task) +{ -+ unsigned long flags; -+ cedf_domain_t* cedf; -+ lt_t now; ++ unsigned long flags; ++ lt_t now; ++ cedf_domain_t *cluster; + -+ BUG_ON(!task); ++ TRACE_TASK(task, "wake_up at %llu\n", litmus_clock()); + -+ cedf = task_cedf(task); -+ BUG_ON(!cedf); ++ cluster = task_cpu_cluster(task); + -+ spin_lock_irqsave(&cedf->slock, flags); ++ spin_lock_irqsave(&cluster->lock, flags); + /* We need to take suspensions because of semaphores into + * account! If a job resumes after being suspended due to acquiring + * a semaphore, it should never be treated as a new job release. @@ -32934,48 +6525,49 @@ index 0000000..d0767ce + release_at(task, now); + sched_trace_task_release(task); + } -+ else if (task->rt.time_slice) -+ /* came back in time before deadline -+ */ -+ set_rt_flags(task, RT_F_RUNNING); ++ else { ++ if (task->rt.time_slice) { ++ /* came back in time before deadline ++ */ ++ set_rt_flags(task, RT_F_RUNNING); ++ } ++ } + } + cedf_job_arrival(task); -+ spin_unlock_irqrestore(&cedf->slock, flags); ++ spin_unlock_irqrestore(&cluster->lock, flags); +} + -+ +static void cedf_task_block(struct task_struct *t) +{ + unsigned long flags; ++ cedf_domain_t *cluster; + -+ BUG_ON(!t); ++ TRACE_TASK(t, "block at %llu\n", litmus_clock()); + -+ /* unlink if necessary */ -+ spin_lock_irqsave(&task_cedf(t)->slock, flags); ++ cluster = task_cpu_cluster(t); + -+ t->rt_param.scheduled_on = NO_CPU; ++ /* unlink if necessary */ ++ spin_lock_irqsave(&cluster->lock, flags); + unlink(t); -+ -+ spin_unlock_irqrestore(&task_cedf(t)->slock, flags); ++ spin_unlock_irqrestore(&cluster->lock, flags); + + BUG_ON(!is_realtime(t)); +} + ++ +static void cedf_task_exit(struct task_struct * t) +{ + unsigned long flags; -+ -+ BUG_ON(!t); ++ cedf_domain_t *cluster = task_cpu_cluster(t); + + /* unlink if necessary */ -+ spin_lock_irqsave(&task_cedf(t)->slock, flags); ++ spin_lock_irqsave(&cluster->lock, flags); + unlink(t); + if (tsk_rt(t)->scheduled_on != NO_CPU) { -+ cedf_cpu_entries_array[tsk_rt(t)->scheduled_on]-> -+ scheduled = NULL; ++ cluster->cpus[tsk_rt(t)->scheduled_on]->scheduled = NULL; + tsk_rt(t)->scheduled_on = NO_CPU; + } -+ spin_unlock_irqrestore(&task_cedf(t)->slock, flags); ++ spin_unlock_irqrestore(&cluster->lock, flags); + + BUG_ON(!is_realtime(t)); + TRACE_TASK(t, "RIP\n"); @@ -32983,108 +6575,176 @@ index 0000000..d0767ce + +static long cedf_admit_task(struct task_struct* tsk) +{ -+ return (task_cpu(tsk) >= task_cedf(tsk)->first_cpu && -+ task_cpu(tsk) <= task_cedf(tsk)->last_cpu) ? 0 : -EINVAL; ++ return task_cpu(tsk) == tsk->rt_param.task_params.cpu ? 0 : -EINVAL; +} + ++/* total number of cluster */ ++static int num_clusters; ++/* we do not support cluster of different sizes */ ++static unsigned int cluster_size; + -+/* Plugin object */ -+static struct sched_plugin cedf_plugin __cacheline_aligned_in_smp = { -+ .plugin_name = "C-EDF", -+ .finish_switch = cedf_finish_switch, -+ .tick = cedf_tick, -+ .task_new = cedf_task_new, -+ .complete_job = complete_job, -+ .task_exit = cedf_task_exit, -+ .schedule = cedf_schedule, -+ .task_wake_up = cedf_task_wake_up, -+ .task_block = cedf_task_block, -+ .admit_task = cedf_admit_task -+}; ++#ifdef VERBOSE_INIT ++static void print_cluster_topology(cpumask_var_t mask, int cpu) ++{ ++ int chk; ++ char buf[255]; + -+static void cedf_domain_init(int first_cpu, int last_cpu) ++ chk = cpulist_scnprintf(buf, 254, mask); ++ buf[chk] = '\0'; ++ printk(KERN_INFO "CPU = %d, shared cpu(s) = %s\n", cpu, buf); ++ ++} ++#endif ++ ++static int clusters_allocated = 0; ++ ++static void cleanup_cedf(void) +{ -+ int cpu; ++ int i; ++ ++ if (clusters_allocated) { ++ for (i = 0; i < num_clusters; i++) { ++ kfree(cedf[i].cpus); ++ kfree(cedf[i].heap_node); ++ free_cpumask_var(cedf[i].cpu_map); ++ } + -+ /* Create new domain for this cluster. */ -+ cedf_domain_t *new_cedf_domain = kmalloc(sizeof(*new_cedf_domain), -+ GFP_KERNEL); -+ -+ /* Initialize cluster domain. */ -+ edf_domain_init(&new_cedf_domain->domain, NULL, -+ cedf_release_jobs); -+ new_cedf_domain->first_cpu = first_cpu; -+ new_cedf_domain->last_cpu = last_cpu; -+ INIT_LIST_HEAD(&new_cedf_domain->cedf_cpu_queue); -+ -+ /* Assign all cpus in cluster to point to this domain. */ -+ for (cpu = first_cpu; cpu <= last_cpu; cpu++) { -+ remote_cedf(cpu) = new_cedf_domain; -+ cedf_domains_array[cpu] = new_cedf_domain; ++ kfree(cedf); + } +} + -+static int __init init_cedf(void) ++static long cedf_activate_plugin(void) +{ -+ int cpu; ++ int i, j, cpu, ccpu, cpu_count; + cpu_entry_t *entry; + -+ /* num_online_cpus() should have been set already -+ * if the number of available cpus is less then the cluster -+ * size (currently 4) then it is pointless trying to use -+ * CEDF, so we disable this plugin -+ */ -+ if(num_online_cpus() < cluster_size) { -+ printk(KERN_INFO "Not registering C-EDF plugin: " -+ "Num Online Cpus (%d) < Min Cluster Size (%d)\n", -+ num_online_cpus(), cluster_size); -+ do_cleanup = 0; -+ return 0; ++ cpumask_var_t mask; ++ int chk = 0; ++ ++ /* de-allocate old clusters, if any */ ++ cleanup_cedf(); ++ ++ printk(KERN_INFO "C-EDF: Activate Plugin, cache index = %d\n", ++ cluster_cache_index); ++ ++ /* need to get cluster_size first */ ++ if(!zalloc_cpumask_var(&mask, GFP_ATOMIC)) ++ return -ENOMEM; ++ ++ chk = get_shared_cpu_map(mask, 0, cluster_cache_index); ++ if (chk) { ++ /* if chk != 0 then it is the max allowed index */ ++ printk(KERN_INFO "C-EDF: Cannot support cache index = %d\n", ++ cluster_cache_index); ++ printk(KERN_INFO "C-EDF: Using cache index = %d\n", ++ chk); ++ cluster_cache_index = chk; + } + -+ /* -+ * initialize short_cut for per-cpu cedf state; -+ * there may be a problem here if someone removes a cpu -+ * while we are doing this initialization... and if cpus -+ * are added / removed later... is it a _real_ problem for cedf? -+ */ -+ cedf_cpu_entries_array = kmalloc( -+ sizeof(cpu_entry_t *) * num_online_cpus(), -+ GFP_KERNEL); ++ cluster_size = cpumask_weight(mask); + -+ cedf_domains_array = kmalloc( -+ sizeof(cedf_domain_t *) * num_online_cpus(), -+ GFP_KERNEL); ++ if ((num_online_cpus() % cluster_size) != 0) { ++ /* this can't be right, some cpus are left out */ ++ printk(KERN_ERR "C-EDF: Trying to group %d cpus in %d!\n", ++ num_online_cpus(), cluster_size); ++ return -1; ++ } + -+ /* initialize CPU state */ -+ for (cpu = 0; cpu < num_online_cpus(); cpu++) { -+ entry = &per_cpu(cedf_cpu_entries, cpu); -+ cedf_cpu_entries_array[cpu] = entry; -+ atomic_set(&entry->will_schedule, 0); -+ entry->linked = NULL; -+ entry->scheduled = NULL; -+ entry->cpu = cpu; -+ INIT_LIST_HEAD(&entry->list); ++ num_clusters = num_online_cpus() / cluster_size; ++ printk(KERN_INFO "C-EDF: %d cluster(s) of size = %d\n", ++ num_clusters, cluster_size); ++ ++ /* initialize clusters */ ++ cedf = kmalloc(num_clusters * sizeof(cedf_domain_t), GFP_ATOMIC); ++ for (i = 0; i < num_clusters; i++) { ++ ++ cedf[i].cpus = kmalloc(cluster_size * sizeof(cpu_entry_t), ++ GFP_ATOMIC); ++ cedf[i].heap_node = kmalloc( ++ cluster_size * sizeof(struct bheap_node), ++ GFP_ATOMIC); ++ bheap_init(&(cedf[i].cpu_heap)); ++ edf_domain_init(&(cedf[i].domain), NULL, cedf_release_jobs); ++ ++ if(!zalloc_cpumask_var(&cedf[i].cpu_map, GFP_ATOMIC)) ++ return -ENOMEM; ++ } ++ ++ /* cycle through cluster and add cpus to them */ ++ for (i = 0; i < num_clusters; i++) { ++ ++ for_each_online_cpu(cpu) { ++ /* check if the cpu is already in a cluster */ ++ for (j = 0; j < num_clusters; j++) ++ if (cpumask_test_cpu(cpu, cedf[j].cpu_map)) ++ break; ++ /* if it is in a cluster go to next cpu */ ++ if (cpumask_test_cpu(cpu, cedf[j].cpu_map)) ++ continue; ++ ++ /* this cpu isn't in any cluster */ ++ /* get the shared cpus */ ++ get_shared_cpu_map(mask, cpu, cluster_cache_index); ++ cpumask_copy(cedf[i].cpu_map, mask); ++#ifdef VERBOSE_INIT ++ print_cluster_topology(mask, cpu); ++#endif ++ /* add cpus to current cluster and init cpu_entry_t */ ++ cpu_count = 0; ++ for_each_cpu(ccpu, cedf[i].cpu_map) { ++ ++ entry = &per_cpu(cedf_cpu_entries, ccpu); ++ cedf[i].cpus[cpu_count] = entry; ++ atomic_set(&entry->will_schedule, 0); ++ entry->cpu = ccpu; ++ entry->cluster = &cedf[i]; ++ entry->hn = &(cedf[i].heap_node[cpu_count]); ++ bheap_node_init(&entry->hn, entry); ++ ++ cpu_count++; ++ ++ entry->linked = NULL; ++ entry->scheduled = NULL; ++ update_cpu_position(entry); ++ } ++ /* done with this cluster */ ++ break; ++ } + } + -+ /* initialize all cluster domains */ -+ for (cpu = 0; cpu < num_online_cpus(); cpu += cluster_size) -+ cedf_domain_init(cpu, cpu+cluster_size-1); ++ free_cpumask_var(mask); ++ clusters_allocated = 1; ++ return 0; ++} ++ ++/* Plugin object */ ++static struct sched_plugin cedf_plugin __cacheline_aligned_in_smp = { ++ .plugin_name = "C-EDF", ++ .finish_switch = cedf_finish_switch, ++ .tick = cedf_tick, ++ .task_new = cedf_task_new, ++ .complete_job = complete_job, ++ .task_exit = cedf_task_exit, ++ .schedule = cedf_schedule, ++ .task_wake_up = cedf_task_wake_up, ++ .task_block = cedf_task_block, ++ .admit_task = cedf_admit_task, ++ .activate_plugin = cedf_activate_plugin, ++}; ++ + ++static int __init init_cedf(void) ++{ + return register_sched_plugin(&cedf_plugin); +} + +static void clean_cedf(void) +{ -+ if(do_cleanup) { -+ kfree(cedf_cpu_entries_array); -+ kfree(cedf_domains_array); -+ } ++ cleanup_cedf(); +} + +module_init(init_cedf); +module_exit(clean_cedf); -+ diff --git a/litmus/sched_gsn_edf.c b/litmus/sched_gsn_edf.c new file mode 100644 index 0000000..b9310dd @@ -35147,10 +8807,10 @@ index 0000000..2ea3922 +module_exit(clean_pfair); diff --git a/litmus/sched_plugin.c b/litmus/sched_plugin.c new file mode 100644 -index 0000000..bc7c0e9 +index 0000000..3767b30 --- /dev/null +++ b/litmus/sched_plugin.c -@@ -0,0 +1,257 @@ +@@ -0,0 +1,265 @@ +/* sched_plugin.c -- core infrastructure for the scheduler plugin system + * + * This file includes the initialization of the plugin system, the no-op Linux @@ -35324,6 +8984,14 @@ index 0000000..bc7c0e9 +}; + +/* ++ * The cluster size is needed in C-EDF: it makes sense only to cluster ++ * around L2 or L3, so if cluster_cache_index = 2 (default) we cluster ++ * all the CPUs that shares a L2 cache, while cluster_cache_index = 3 ++ * we cluster all CPs that shares a L3 cache ++ */ ++int cluster_cache_index = 2; ++ ++/* + * The reference to current plugin that is used to schedule tasks within + * the system. It stores references to actual function implementations + * Should be initialized by calling "init_***_plugin()" @@ -36848,2133 +10516,184 @@ index 0000000..bf75fde + */ + ret = wait_for_completion_interruptible(&ts_release); + -+ return ret; -+} -+ -+int count_tasks_waiting_for_release(void) -+{ -+ unsigned long flags; -+ int task_count = 0; -+ struct list_head *pos; -+ -+ spin_lock_irqsave(&ts_release.wait.lock, flags); -+ list_for_each(pos, &ts_release.wait.task_list) { -+ task_count++; -+ } -+ spin_unlock_irqrestore(&ts_release.wait.lock, flags); -+ -+ return task_count; -+} -+ -+static long do_release_ts(lt_t start) -+{ -+ int task_count = 0; -+ unsigned long flags; -+ struct list_head *pos; -+ struct task_struct *t; -+ -+ -+ spin_lock_irqsave(&ts_release.wait.lock, flags); -+ TRACE("<<<<<< synchronous task system release >>>>>>\n"); -+ -+ sched_trace_sys_release(&start); -+ list_for_each(pos, &ts_release.wait.task_list) { -+ t = (struct task_struct*) list_entry(pos, -+ struct __wait_queue, -+ task_list)->private; -+ task_count++; -+ litmus->release_at(t, start + t->rt_param.task_params.phase); -+ sched_trace_task_release(t); -+ } -+ -+ spin_unlock_irqrestore(&ts_release.wait.lock, flags); -+ -+ complete_n(&ts_release, task_count); -+ -+ return task_count; -+} -+ -+ -+asmlinkage long sys_wait_for_ts_release(void) -+{ -+ long ret = -EPERM; -+ struct task_struct *t = current; -+ -+ if (is_realtime(t)) -+ ret = do_wait_for_ts_release(); -+ -+ return ret; -+} -+ -+ -+asmlinkage long sys_release_ts(lt_t __user *__delay) -+{ -+ long ret; -+ lt_t delay; -+ -+ /* FIXME: check capabilities... */ -+ -+ ret = copy_from_user(&delay, __delay, sizeof(delay)); -+ if (ret == 0) -+ ret = do_release_ts(litmus_clock() + delay); -+ -+ return ret; -+} -diff --git a/litmus/trace.c b/litmus/trace.c -new file mode 100644 -index 0000000..4403769 ---- /dev/null -+++ b/litmus/trace.c -@@ -0,0 +1,103 @@ -+#include -+ -+#include -+#include -+#include -+ -+/******************************************************************************/ -+/* Allocation */ -+/******************************************************************************/ -+ -+static struct ftdev overhead_dev; -+ -+#define trace_ts_buf overhead_dev.minor[0].buf -+ -+static unsigned int ts_seq_no = 0; -+ -+static inline void __save_timestamp_cpu(unsigned long event, -+ uint8_t type, uint8_t cpu) -+{ -+ unsigned int seq_no; -+ struct timestamp *ts; -+ seq_no = fetch_and_inc((int *) &ts_seq_no); -+ if (ft_buffer_start_write(trace_ts_buf, (void**) &ts)) { -+ ts->event = event; -+ ts->timestamp = ft_timestamp(); -+ ts->seq_no = seq_no; -+ ts->cpu = cpu; -+ ts->task_type = type; -+ ft_buffer_finish_write(trace_ts_buf, ts); -+ } -+} -+ -+static inline void __save_timestamp(unsigned long event, -+ uint8_t type) -+{ -+ __save_timestamp_cpu(event, type, raw_smp_processor_id()); ++ return ret; +} + -+feather_callback void save_timestamp(unsigned long event) ++int count_tasks_waiting_for_release(void) +{ -+ __save_timestamp(event, TSK_UNKNOWN); -+} ++ unsigned long flags; ++ int task_count = 0; ++ struct list_head *pos; + -+feather_callback void save_timestamp_def(unsigned long event, -+ unsigned long type) -+{ -+ __save_timestamp(event, (uint8_t) type); -+} ++ spin_lock_irqsave(&ts_release.wait.lock, flags); ++ list_for_each(pos, &ts_release.wait.task_list) { ++ task_count++; ++ } ++ spin_unlock_irqrestore(&ts_release.wait.lock, flags); + -+feather_callback void save_timestamp_task(unsigned long event, -+ unsigned long t_ptr) -+{ -+ int rt = is_realtime((struct task_struct *) t_ptr); -+ __save_timestamp(event, rt ? TSK_RT : TSK_BE); ++ return task_count; +} + -+feather_callback void save_timestamp_cpu(unsigned long event, -+ unsigned long cpu) ++static long do_release_ts(lt_t start) +{ -+ __save_timestamp_cpu(event, TSK_UNKNOWN, cpu); -+} -+ -+/******************************************************************************/ -+/* DEVICE FILE DRIVER */ -+/******************************************************************************/ ++ int task_count = 0; ++ unsigned long flags; ++ struct list_head *pos; ++ struct task_struct *t; + -+/* -+ * should be 8M; it is the max we can ask to buddy system allocator (MAX_ORDER) -+ * and we might not get as much -+ */ -+#define NO_TIMESTAMPS (2 << 11) + -+/* set MAJOR to 0 to have it dynamically assigned */ -+#define FT_TRACE_MAJOR 252 ++ spin_lock_irqsave(&ts_release.wait.lock, flags); ++ TRACE("<<<<<< synchronous task system release >>>>>>\n"); + -+static int alloc_timestamp_buffer(struct ftdev* ftdev, unsigned int idx) -+{ -+ unsigned int count = NO_TIMESTAMPS; -+ while (count && !trace_ts_buf) { -+ printk("time stamp buffer: trying to allocate %u time stamps.\n", count); -+ ftdev->minor[idx].buf = alloc_ft_buffer(count, sizeof(struct timestamp)); -+ count /= 2; ++ sched_trace_sys_release(&start); ++ list_for_each(pos, &ts_release.wait.task_list) { ++ t = (struct task_struct*) list_entry(pos, ++ struct __wait_queue, ++ task_list)->private; ++ task_count++; ++ litmus->release_at(t, start + t->rt_param.task_params.phase); ++ sched_trace_task_release(t); + } -+ return ftdev->minor[idx].buf ? 0 : -ENOMEM; -+} -+ -+static void free_timestamp_buffer(struct ftdev* ftdev, unsigned int idx) -+{ -+ free_ft_buffer(ftdev->minor[idx].buf); -+ ftdev->minor[idx].buf = NULL; -+} + -+static int __init init_ft_overhead_trace(void) -+{ -+ printk("Initializing Feather-Trace overhead tracing device.\n"); -+ ftdev_init(&overhead_dev, THIS_MODULE); -+ overhead_dev.minor_cnt = 1; /* only one buffer */ -+ overhead_dev.alloc = alloc_timestamp_buffer; -+ overhead_dev.free = free_timestamp_buffer; -+ return register_ftdev(&overhead_dev, "ft_trace", FT_TRACE_MAJOR); -+} ++ spin_unlock_irqrestore(&ts_release.wait.lock, flags); + -+module_init(init_ft_overhead_trace); -diff --git a/net/ax25/ax25_out.c b/net/ax25/ax25_out.c -index 1491260..bf706f8 100644 ---- a/net/ax25/ax25_out.c -+++ b/net/ax25/ax25_out.c -@@ -92,12 +92,6 @@ ax25_cb *ax25_send_frame(struct sk_buff *skb, int paclen, ax25_address *src, ax2 - #endif - } - -- /* -- * There is one ref for the state machine; a caller needs -- * one more to put it back, just like with the existing one. -- */ -- ax25_cb_hold(ax25); -- - ax25_cb_add(ax25); - - ax25->state = AX25_STATE_1; -diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c -index 0b7f262..bd1c654 100644 ---- a/net/bridge/netfilter/ebtables.c -+++ b/net/bridge/netfilter/ebtables.c -@@ -1406,9 +1406,6 @@ static int do_ebt_set_ctl(struct sock *sk, - { - int ret; - -- if (!capable(CAP_NET_ADMIN)) -- return -EPERM; -- - switch(cmd) { - case EBT_SO_SET_ENTRIES: - ret = do_replace(sock_net(sk), user, len); -@@ -1428,9 +1425,6 @@ static int do_ebt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len) - struct ebt_replace tmp; - struct ebt_table *t; - -- if (!capable(CAP_NET_ADMIN)) -- return -EPERM; -- - if (copy_from_user(&tmp, user, sizeof(tmp))) - return -EFAULT; - -diff --git a/net/core/dev.c b/net/core/dev.c -index 584046e..fe10551 100644 ---- a/net/core/dev.c -+++ b/net/core/dev.c -@@ -4860,11 +4860,6 @@ int register_netdevice(struct net_device *dev) - rollback_registered(dev); - dev->reg_state = NETREG_UNREGISTERED; - } -- /* -- * Prevent userspace races by waiting until the network -- * device is fully setup before sending notifications. -- */ -- rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U); - - out: - return ret; -@@ -5403,12 +5398,6 @@ int dev_change_net_namespace(struct net_device *dev, struct net *net, const char - /* Notify protocols, that a new device appeared. */ - call_netdevice_notifiers(NETDEV_REGISTER, dev); - -- /* -- * Prevent userspace races by waiting until the network -- * device is fully setup before sending notifications. -- */ -- rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U); -- - synchronize_net(); - err = 0; - out: -diff --git a/net/core/dst.c b/net/core/dst.c -index cb1b348..57bc4d5 100644 ---- a/net/core/dst.c -+++ b/net/core/dst.c -@@ -17,7 +17,6 @@ - #include - #include - #include --#include - - #include - -@@ -80,7 +79,6 @@ loop: - while ((dst = next) != NULL) { - next = dst->next; - prefetch(&next->next); -- cond_resched(); - if (likely(atomic_read(&dst->__refcnt))) { - last->next = dst; - last = dst; -diff --git a/net/core/pktgen.c b/net/core/pktgen.c -index 6a993b1..6e79e96 100644 ---- a/net/core/pktgen.c -+++ b/net/core/pktgen.c -@@ -3516,7 +3516,6 @@ static int pktgen_thread_worker(void *arg) - wait_event_interruptible_timeout(t->queue, - t->control != 0, - HZ/10); -- try_to_freeze(); - continue; - } - -diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c -index d4fd895..eb42873 100644 ---- a/net/core/rtnetlink.c -+++ b/net/core/rtnetlink.c -@@ -1334,11 +1334,13 @@ static int rtnetlink_event(struct notifier_block *this, unsigned long event, voi - case NETDEV_UNREGISTER: - rtmsg_ifinfo(RTM_DELLINK, dev, ~0U); - break; -+ case NETDEV_REGISTER: -+ rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U); -+ break; - case NETDEV_UP: - case NETDEV_DOWN: - rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING); - break; -- case NETDEV_REGISTER: - case NETDEV_CHANGE: - case NETDEV_GOING_DOWN: - break; -diff --git a/net/core/sock.c b/net/core/sock.c -index 6605e75..7626b6a 100644 ---- a/net/core/sock.c -+++ b/net/core/sock.c -@@ -1181,10 +1181,6 @@ struct sock *sk_clone(const struct sock *sk, const gfp_t priority) - - if (newsk->sk_prot->sockets_allocated) - percpu_counter_inc(newsk->sk_prot->sockets_allocated); -- -- if (sock_flag(newsk, SOCK_TIMESTAMP) || -- sock_flag(newsk, SOCK_TIMESTAMPING_RX_SOFTWARE)) -- net_enable_timestamp(); - } - out: - return newsk; -diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c -index 0030e73..5df2f6a 100644 ---- a/net/ipv4/devinet.c -+++ b/net/ipv4/devinet.c -@@ -1450,7 +1450,6 @@ static struct devinet_sysctl_table { - DEVINET_SYSCTL_RW_ENTRY(SEND_REDIRECTS, "send_redirects"), - DEVINET_SYSCTL_RW_ENTRY(ACCEPT_SOURCE_ROUTE, - "accept_source_route"), -- DEVINET_SYSCTL_RW_ENTRY(SRC_VMARK, "src_valid_mark"), - DEVINET_SYSCTL_RW_ENTRY(PROXY_ARP, "proxy_arp"), - DEVINET_SYSCTL_RW_ENTRY(MEDIUM_ID, "medium_id"), - DEVINET_SYSCTL_RW_ENTRY(BOOTP_RELAY, "bootp_relay"), -diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c -index 29391ee..aa00398 100644 ---- a/net/ipv4/fib_frontend.c -+++ b/net/ipv4/fib_frontend.c -@@ -251,8 +251,6 @@ int fib_validate_source(__be32 src, __be32 dst, u8 tos, int oif, - if (in_dev) { - no_addr = in_dev->ifa_list == NULL; - rpf = IN_DEV_RPFILTER(in_dev); -- if (mark && !IN_DEV_SRC_VMARK(in_dev)) -- fl.mark = 0; - } - rcu_read_unlock(); - -diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c -index 4d50daa..f989518 100644 ---- a/net/ipv4/ip_output.c -+++ b/net/ipv4/ip_output.c -@@ -501,8 +501,8 @@ int ip_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *)) - if (skb->sk) { - frag->sk = skb->sk; - frag->destructor = sock_wfree; -+ truesizes += frag->truesize; - } -- truesizes += frag->truesize; - } - - /* Everything is OK. Generate! */ -diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c -index 98442f3..27774c9 100644 ---- a/net/ipv4/netfilter/arp_tables.c -+++ b/net/ipv4/netfilter/arp_tables.c -@@ -925,10 +925,10 @@ static int get_info(struct net *net, void __user *user, int *len, int compat) - if (t && !IS_ERR(t)) { - struct arpt_getinfo info; - const struct xt_table_info *private = t->private; --#ifdef CONFIG_COMPAT -- struct xt_table_info tmp; - -+#ifdef CONFIG_COMPAT - if (compat) { -+ struct xt_table_info tmp; - ret = compat_table_info(private, &tmp); - xt_compat_flush_offsets(NFPROTO_ARP); - private = &tmp; -diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c -index 62aff31..cde755d 100644 ---- a/net/ipv4/netfilter/ip_tables.c -+++ b/net/ipv4/netfilter/ip_tables.c -@@ -1132,10 +1132,10 @@ static int get_info(struct net *net, void __user *user, int *len, int compat) - if (t && !IS_ERR(t)) { - struct ipt_getinfo info; - const struct xt_table_info *private = t->private; --#ifdef CONFIG_COMPAT -- struct xt_table_info tmp; - -+#ifdef CONFIG_COMPAT - if (compat) { -+ struct xt_table_info tmp; - ret = compat_table_info(private, &tmp); - xt_compat_flush_offsets(AF_INET); - private = &tmp; -diff --git a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c -index 1032a15..aa95bb8 100644 ---- a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c -+++ b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c -@@ -213,7 +213,7 @@ static ctl_table ip_ct_sysctl_table[] = { - { - .ctl_name = NET_IPV4_NF_CONNTRACK_BUCKETS, - .procname = "ip_conntrack_buckets", -- .data = &init_net.ct.htable_size, -+ .data = &nf_conntrack_htable_size, - .maxlen = sizeof(unsigned int), - .mode = 0444, - .proc_handler = proc_dointvec, -diff --git a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c -index 2fb7b76..8668a3d 100644 ---- a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c -+++ b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c -@@ -32,7 +32,7 @@ static struct hlist_nulls_node *ct_get_first(struct seq_file *seq) - struct hlist_nulls_node *n; - - for (st->bucket = 0; -- st->bucket < net->ct.htable_size; -+ st->bucket < nf_conntrack_htable_size; - st->bucket++) { - n = rcu_dereference(net->ct.hash[st->bucket].first); - if (!is_a_nulls(n)) -@@ -50,7 +50,7 @@ static struct hlist_nulls_node *ct_get_next(struct seq_file *seq, - head = rcu_dereference(head->next); - while (is_a_nulls(head)) { - if (likely(get_nulls_value(head) == st->bucket)) { -- if (++st->bucket >= net->ct.htable_size) -+ if (++st->bucket >= nf_conntrack_htable_size) - return NULL; - } - head = rcu_dereference(net->ct.hash[st->bucket].first); -diff --git a/net/ipv4/netfilter/nf_defrag_ipv4.c b/net/ipv4/netfilter/nf_defrag_ipv4.c -index 331ead3..fa2d6b6 100644 ---- a/net/ipv4/netfilter/nf_defrag_ipv4.c -+++ b/net/ipv4/netfilter/nf_defrag_ipv4.c -@@ -14,7 +14,6 @@ - #include - #include - --#include - #include - #include - -@@ -35,20 +34,6 @@ static int nf_ct_ipv4_gather_frags(struct sk_buff *skb, u_int32_t user) - return err; - } - --static enum ip_defrag_users nf_ct_defrag_user(unsigned int hooknum, -- struct sk_buff *skb) --{ --#ifdef CONFIG_BRIDGE_NETFILTER -- if (skb->nf_bridge && -- skb->nf_bridge->mask & BRNF_NF_BRIDGE_PREROUTING) -- return IP_DEFRAG_CONNTRACK_BRIDGE_IN; --#endif -- if (hooknum == NF_INET_PRE_ROUTING) -- return IP_DEFRAG_CONNTRACK_IN; -- else -- return IP_DEFRAG_CONNTRACK_OUT; --} -- - static unsigned int ipv4_conntrack_defrag(unsigned int hooknum, - struct sk_buff *skb, - const struct net_device *in, -@@ -65,8 +50,10 @@ static unsigned int ipv4_conntrack_defrag(unsigned int hooknum, - #endif - /* Gather fragments. */ - if (ip_hdr(skb)->frag_off & htons(IP_MF | IP_OFFSET)) { -- enum ip_defrag_users user = nf_ct_defrag_user(hooknum, skb); -- if (nf_ct_ipv4_gather_frags(skb, user)) -+ if (nf_ct_ipv4_gather_frags(skb, -+ hooknum == NF_INET_PRE_ROUTING ? -+ IP_DEFRAG_CONNTRACK_IN : -+ IP_DEFRAG_CONNTRACK_OUT)) - return NF_STOLEN; - } - return NF_ACCEPT; -diff --git a/net/ipv4/netfilter/nf_nat_core.c b/net/ipv4/netfilter/nf_nat_core.c -index 26066a2..fe1a644 100644 ---- a/net/ipv4/netfilter/nf_nat_core.c -+++ b/net/ipv4/netfilter/nf_nat_core.c -@@ -35,6 +35,9 @@ static DEFINE_SPINLOCK(nf_nat_lock); - - static struct nf_conntrack_l3proto *l3proto __read_mostly; - -+/* Calculated at init based on memory size */ -+static unsigned int nf_nat_htable_size __read_mostly; ++ complete_n(&ts_release, task_count); + - #define MAX_IP_NAT_PROTO 256 - static const struct nf_nat_protocol *nf_nat_protos[MAX_IP_NAT_PROTO] - __read_mostly; -@@ -69,7 +72,7 @@ EXPORT_SYMBOL_GPL(nf_nat_proto_put); - - /* We keep an extra hash for each conntrack, for fast searching. */ - static inline unsigned int --hash_by_src(const struct net *net, const struct nf_conntrack_tuple *tuple) -+hash_by_src(const struct nf_conntrack_tuple *tuple) - { - unsigned int hash; - -@@ -77,7 +80,7 @@ hash_by_src(const struct net *net, const struct nf_conntrack_tuple *tuple) - hash = jhash_3words((__force u32)tuple->src.u3.ip, - (__force u32)tuple->src.u.all, - tuple->dst.protonum, 0); -- return ((u64)hash * net->ipv4.nat_htable_size) >> 32; -+ return ((u64)hash * nf_nat_htable_size) >> 32; - } - - /* Is this tuple already taken? (not by us) */ -@@ -144,7 +147,7 @@ find_appropriate_src(struct net *net, - struct nf_conntrack_tuple *result, - const struct nf_nat_range *range) - { -- unsigned int h = hash_by_src(net, tuple); -+ unsigned int h = hash_by_src(tuple); - const struct nf_conn_nat *nat; - const struct nf_conn *ct; - const struct hlist_node *n; -@@ -327,7 +330,7 @@ nf_nat_setup_info(struct nf_conn *ct, - if (have_to_hash) { - unsigned int srchash; - -- srchash = hash_by_src(net, &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple); -+ srchash = hash_by_src(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple); - spin_lock_bh(&nf_nat_lock); - /* nf_conntrack_alter_reply might re-allocate exntension aera */ - nat = nfct_nat(ct); -@@ -676,10 +679,8 @@ nfnetlink_parse_nat_setup(struct nf_conn *ct, - - static int __net_init nf_nat_net_init(struct net *net) - { -- /* Leave them the same for the moment. */ -- net->ipv4.nat_htable_size = net->ct.htable_size; -- net->ipv4.nat_bysource = nf_ct_alloc_hashtable(&net->ipv4.nat_htable_size, -- &net->ipv4.nat_vmalloced, 0); -+ net->ipv4.nat_bysource = nf_ct_alloc_hashtable(&nf_nat_htable_size, -+ &net->ipv4.nat_vmalloced, 0); - if (!net->ipv4.nat_bysource) - return -ENOMEM; - return 0; -@@ -702,7 +703,7 @@ static void __net_exit nf_nat_net_exit(struct net *net) - nf_ct_iterate_cleanup(net, &clean_nat, NULL); - synchronize_rcu(); - nf_ct_free_hashtable(net->ipv4.nat_bysource, net->ipv4.nat_vmalloced, -- net->ipv4.nat_htable_size); -+ nf_nat_htable_size); - } - - static struct pernet_operations nf_nat_net_ops = { -@@ -723,6 +724,9 @@ static int __init nf_nat_init(void) - return ret; - } - -+ /* Leave them the same for the moment. */ -+ nf_nat_htable_size = nf_conntrack_htable_size; -+ - ret = register_pernet_subsys(&nf_nat_net_ops); - if (ret < 0) - goto cleanup_extend; -diff --git a/net/ipv6/exthdrs.c b/net/ipv6/exthdrs.c -index 4bac362..df159ff 100644 ---- a/net/ipv6/exthdrs.c -+++ b/net/ipv6/exthdrs.c -@@ -559,11 +559,6 @@ static inline struct inet6_dev *ipv6_skb_idev(struct sk_buff *skb) - return skb_dst(skb) ? ip6_dst_idev(skb_dst(skb)) : __in6_dev_get(skb->dev); - } - --static inline struct net *ipv6_skb_net(struct sk_buff *skb) --{ -- return skb_dst(skb) ? dev_net(skb_dst(skb)->dev) : dev_net(skb->dev); --} -- - /* Router Alert as of RFC 2711 */ - - static int ipv6_hop_ra(struct sk_buff *skb, int optoff) -@@ -585,8 +580,8 @@ static int ipv6_hop_ra(struct sk_buff *skb, int optoff) - static int ipv6_hop_jumbo(struct sk_buff *skb, int optoff) - { - const unsigned char *nh = skb_network_header(skb); -- struct net *net = ipv6_skb_net(skb); - u32 pkt_len; -+ struct net *net = dev_net(skb_dst(skb)->dev); - - if (nh[optoff + 1] != 4 || (optoff & 3) != 2) { - LIMIT_NETDEBUG(KERN_DEBUG "ipv6_hop_jumbo: wrong jumbo opt length/alignment %d\n", -diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c -index 1de56fd..cc9f8ef 100644 ---- a/net/ipv6/netfilter/ip6_tables.c -+++ b/net/ipv6/netfilter/ip6_tables.c -@@ -1164,10 +1164,10 @@ static int get_info(struct net *net, void __user *user, int *len, int compat) - if (t && !IS_ERR(t)) { - struct ip6t_getinfo info; - const struct xt_table_info *private = t->private; --#ifdef CONFIG_COMPAT -- struct xt_table_info tmp; - -+#ifdef CONFIG_COMPAT - if (compat) { -+ struct xt_table_info tmp; - ret = compat_table_info(private, &tmp); - xt_compat_flush_offsets(AF_INET6); - private = &tmp; -diff --git a/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c b/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c -index 0956eba..5f2ec20 100644 ---- a/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c -+++ b/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c -@@ -20,7 +20,6 @@ - #include - #include - --#include - #include - #include - #include -@@ -188,21 +187,6 @@ out: - return nf_conntrack_confirm(skb); - } - --static enum ip6_defrag_users nf_ct6_defrag_user(unsigned int hooknum, -- struct sk_buff *skb) --{ --#ifdef CONFIG_BRIDGE_NETFILTER -- if (skb->nf_bridge && -- skb->nf_bridge->mask & BRNF_NF_BRIDGE_PREROUTING) -- return IP6_DEFRAG_CONNTRACK_BRIDGE_IN; --#endif -- if (hooknum == NF_INET_PRE_ROUTING) -- return IP6_DEFRAG_CONNTRACK_IN; -- else -- return IP6_DEFRAG_CONNTRACK_OUT; -- --} -- - static unsigned int ipv6_defrag(unsigned int hooknum, - struct sk_buff *skb, - const struct net_device *in, -@@ -215,7 +199,8 @@ static unsigned int ipv6_defrag(unsigned int hooknum, - if (skb->nfct) - return NF_ACCEPT; - -- reasm = nf_ct_frag6_gather(skb, nf_ct6_defrag_user(hooknum, skb)); -+ reasm = nf_ct_frag6_gather(skb); -+ - /* queued */ - if (reasm == NULL) - return NF_STOLEN; -diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c -index 4b6a539..f3aba25 100644 ---- a/net/ipv6/netfilter/nf_conntrack_reasm.c -+++ b/net/ipv6/netfilter/nf_conntrack_reasm.c -@@ -170,14 +170,13 @@ out: - /* Creation primitives. */ - - static __inline__ struct nf_ct_frag6_queue * --fq_find(__be32 id, u32 user, struct in6_addr *src, struct in6_addr *dst) -+fq_find(__be32 id, struct in6_addr *src, struct in6_addr *dst) - { - struct inet_frag_queue *q; - struct ip6_create_arg arg; - unsigned int hash; - - arg.id = id; -- arg.user = user; - arg.src = src; - arg.dst = dst; - -@@ -562,7 +561,7 @@ find_prev_fhdr(struct sk_buff *skb, u8 *prevhdrp, int *prevhoff, int *fhoff) - return 0; - } - --struct sk_buff *nf_ct_frag6_gather(struct sk_buff *skb, u32 user) -+struct sk_buff *nf_ct_frag6_gather(struct sk_buff *skb) - { - struct sk_buff *clone; - struct net_device *dev = skb->dev; -@@ -608,7 +607,7 @@ struct sk_buff *nf_ct_frag6_gather(struct sk_buff *skb, u32 user) - if (atomic_read(&nf_init_frags.mem) > nf_init_frags.high_thresh) - nf_ct_frag6_evictor(); - -- fq = fq_find(fhdr->identification, user, &hdr->saddr, &hdr->daddr); -+ fq = fq_find(fhdr->identification, &hdr->saddr, &hdr->daddr); - if (fq == NULL) { - pr_debug("Can't find and can't create new queue\n"); - goto ret_orig; -diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c -index 4d18699..da5bd0e 100644 ---- a/net/ipv6/reassembly.c -+++ b/net/ipv6/reassembly.c -@@ -72,7 +72,6 @@ struct frag_queue - struct inet_frag_queue q; - - __be32 id; /* fragment id */ -- u32 user; - struct in6_addr saddr; - struct in6_addr daddr; - -@@ -142,7 +141,7 @@ int ip6_frag_match(struct inet_frag_queue *q, void *a) - struct ip6_create_arg *arg = a; - - fq = container_of(q, struct frag_queue, q); -- return (fq->id == arg->id && fq->user == arg->user && -+ return (fq->id == arg->id && - ipv6_addr_equal(&fq->saddr, arg->src) && - ipv6_addr_equal(&fq->daddr, arg->dst)); - } -@@ -164,7 +163,6 @@ void ip6_frag_init(struct inet_frag_queue *q, void *a) - struct ip6_create_arg *arg = a; - - fq->id = arg->id; -- fq->user = arg->user; - ipv6_addr_copy(&fq->saddr, arg->src); - ipv6_addr_copy(&fq->daddr, arg->dst); - } -@@ -246,7 +244,6 @@ fq_find(struct net *net, __be32 id, struct in6_addr *src, struct in6_addr *dst, - unsigned int hash; - - arg.id = id; -- arg.user = IP6_DEFRAG_LOCAL_DELIVER; - arg.src = src; - arg.dst = dst; - -diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c -index fe2d3f8..7b5131b 100644 ---- a/net/mac80211/cfg.c -+++ b/net/mac80211/cfg.c -@@ -338,8 +338,7 @@ static void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo) - sinfo->rx_packets = sta->rx_packets; - sinfo->tx_packets = sta->tx_packets; - -- if ((sta->local->hw.flags & IEEE80211_HW_SIGNAL_DBM) || -- (sta->local->hw.flags & IEEE80211_HW_SIGNAL_UNSPEC)) { -+ if (sta->local->hw.flags & IEEE80211_HW_SIGNAL_DBM) { - sinfo->filled |= STATION_INFO_SIGNAL; - sinfo->signal = (s8)sta->last_signal; - } -@@ -1306,9 +1305,6 @@ static int ieee80211_set_power_mgmt(struct wiphy *wiphy, struct net_device *dev, - struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); - struct ieee80211_conf *conf = &local->hw.conf; - -- if (sdata->vif.type != NL80211_IFTYPE_STATION) -- return -EOPNOTSUPP; -- - if (!(local->hw.flags & IEEE80211_HW_SUPPORTS_PS)) - return -EOPNOTSUPP; - -diff --git a/net/mac80211/driver-trace.h b/net/mac80211/driver-trace.h -index d87645e..37b9051 100644 ---- a/net/mac80211/driver-trace.h -+++ b/net/mac80211/driver-trace.h -@@ -655,7 +655,7 @@ TRACE_EVENT(drv_ampdu_action, - __entry->ret = ret; - __entry->action = action; - __entry->tid = tid; -- __entry->ssn = ssn ? *ssn : 0; -+ __entry->ssn = *ssn; - ), - - TP_printk( -diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c -index 07600a6..f1362f3 100644 ---- a/net/mac80211/ibss.c -+++ b/net/mac80211/ibss.c -@@ -455,10 +455,6 @@ static void ieee80211_sta_merge_ibss(struct ieee80211_sub_if_data *sdata) - - ieee80211_sta_expire(sdata, IEEE80211_IBSS_INACTIVITY_LIMIT); - -- if (time_before(jiffies, ifibss->last_scan_completed + -- IEEE80211_IBSS_MERGE_INTERVAL)) -- return; -- - if (ieee80211_sta_active_ibss(sdata)) - return; - -@@ -643,7 +639,7 @@ static void ieee80211_rx_mgmt_probe_req(struct ieee80211_sub_if_data *sdata, - } - if (pos[1] != 0 && - (pos[1] != ifibss->ssid_len || -- memcmp(pos + 2, ifibss->ssid, ifibss->ssid_len))) { -+ !memcmp(pos + 2, ifibss->ssid, ifibss->ssid_len))) { - /* Ignore ProbeReq for foreign SSID */ - return; - } -diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h -index 5a46164..10d316e 100644 ---- a/net/mac80211/ieee80211_i.h -+++ b/net/mac80211/ieee80211_i.h -@@ -808,7 +808,6 @@ struct ieee80211_local { - unsigned int wmm_acm; /* bit field of ACM bits (BIT(802.1D tag)) */ - - bool pspolling; -- bool scan_ps_enabled; - /* - * PS can only be enabled when we have exactly one managed - * interface (and monitors) in PS, this then points there. -diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c -index 079c500..b8295cb 100644 ---- a/net/mac80211/iface.c -+++ b/net/mac80211/iface.c -@@ -15,14 +15,12 @@ - #include - #include - #include --#include - #include "ieee80211_i.h" - #include "sta_info.h" - #include "debugfs_netdev.h" - #include "mesh.h" - #include "led.h" - #include "driver-ops.h" --#include "wme.h" - - /** - * DOC: Interface list locking -@@ -644,12 +642,6 @@ static void ieee80211_teardown_sdata(struct net_device *dev) - WARN_ON(flushed); - } - --static u16 ieee80211_netdev_select_queue(struct net_device *dev, -- struct sk_buff *skb) --{ -- return ieee80211_select_queue(IEEE80211_DEV_TO_SUB_IF(dev), skb); --} -- - static const struct net_device_ops ieee80211_dataif_ops = { - .ndo_open = ieee80211_open, - .ndo_stop = ieee80211_stop, -@@ -658,35 +650,8 @@ static const struct net_device_ops ieee80211_dataif_ops = { - .ndo_set_multicast_list = ieee80211_set_multicast_list, - .ndo_change_mtu = ieee80211_change_mtu, - .ndo_set_mac_address = eth_mac_addr, -- .ndo_select_queue = ieee80211_netdev_select_queue, - }; - --static u16 ieee80211_monitor_select_queue(struct net_device *dev, -- struct sk_buff *skb) --{ -- struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); -- struct ieee80211_local *local = sdata->local; -- struct ieee80211_hdr *hdr; -- struct ieee80211_radiotap_header *rtap = (void *)skb->data; -- -- if (local->hw.queues < 4) -- return 0; -- -- if (skb->len < 4 || -- skb->len < le16_to_cpu(rtap->it_len) + 2 /* frame control */) -- return 0; /* doesn't matter, frame will be dropped */ -- -- hdr = (void *)((u8 *)skb->data + le16_to_cpu(rtap->it_len)); -- -- if (!ieee80211_is_data(hdr->frame_control)) { -- skb->priority = 7; -- return ieee802_1d_to_ac[skb->priority]; -- } -- -- skb->priority = 0; -- return ieee80211_downgrade_queue(local, skb); --} -- - static const struct net_device_ops ieee80211_monitorif_ops = { - .ndo_open = ieee80211_open, - .ndo_stop = ieee80211_stop, -@@ -695,7 +660,6 @@ static const struct net_device_ops ieee80211_monitorif_ops = { - .ndo_set_multicast_list = ieee80211_set_multicast_list, - .ndo_change_mtu = ieee80211_change_mtu, - .ndo_set_mac_address = eth_mac_addr, -- .ndo_select_queue = ieee80211_monitor_select_queue, - }; - - static void ieee80211_if_setup(struct net_device *dev) -@@ -804,8 +768,8 @@ int ieee80211_if_add(struct ieee80211_local *local, const char *name, - - ASSERT_RTNL(); - -- ndev = alloc_netdev_mq(sizeof(*sdata) + local->hw.vif_data_size, -- name, ieee80211_if_setup, local->hw.queues); -+ ndev = alloc_netdev(sizeof(*sdata) + local->hw.vif_data_size, -+ name, ieee80211_if_setup); - if (!ndev) - return -ENOMEM; - dev_net_set(ndev, wiphy_net(local->hw.wiphy)); -diff --git a/net/mac80211/mesh.h b/net/mac80211/mesh.h -index 010ff2f..dd1c193 100644 ---- a/net/mac80211/mesh.h -+++ b/net/mac80211/mesh.h -@@ -186,9 +186,8 @@ struct mesh_rmc { - */ - #define MESH_PREQ_MIN_INT 10 - #define MESH_DIAM_TRAVERSAL_TIME 50 --/* A path will be refreshed if it is used PATH_REFRESH_TIME milliseconds before -- * timing out. This way it will remain ACTIVE and no data frames will be -- * unnecesarily held in the pending queue. -+/* Paths will be refreshed if they are closer than PATH_REFRESH_TIME to their -+ * expiration - */ - #define MESH_PATH_REFRESH_TIME 1000 - #define MESH_MIN_DISCOVERY_TIMEOUT (2 * MESH_DIAM_TRAVERSAL_TIME) -diff --git a/net/mac80211/mesh_hwmp.c b/net/mac80211/mesh_hwmp.c -index 93c49fc..29b82e9 100644 ---- a/net/mac80211/mesh_hwmp.c -+++ b/net/mac80211/mesh_hwmp.c -@@ -813,7 +813,7 @@ int mesh_nexthop_lookup(struct sk_buff *skb, - } - - if (mpath->flags & MESH_PATH_ACTIVE) { -- if (time_after(jiffies, mpath->exp_time - -+ if (time_after(jiffies, mpath->exp_time + - msecs_to_jiffies(sdata->u.mesh.mshcfg.path_refresh_time)) - && !memcmp(sdata->dev->dev_addr, hdr->addr4, - ETH_ALEN) -diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c -index 6cae295..dc5049d 100644 ---- a/net/mac80211/mlme.c -+++ b/net/mac80211/mlme.c -@@ -904,14 +904,6 @@ static void ieee80211_set_associated(struct ieee80211_sub_if_data *sdata, - sdata->u.mgd.flags &= ~(IEEE80211_STA_CONNECTION_POLL | - IEEE80211_STA_BEACON_POLL); - -- /* -- * Always handle WMM once after association regardless -- * of the first value the AP uses. Setting -1 here has -- * that effect because the AP values is an unsigned -- * 4-bit value. -- */ -- sdata->u.mgd.wmm_last_param_set = -1; -- - ieee80211_led_assoc(local, 1); - - sdata->vif.bss_conf.assoc = 1; -@@ -1953,9 +1945,7 @@ static void ieee80211_sta_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata, - rma = ieee80211_rx_mgmt_disassoc(sdata, mgmt, skb->len); - break; - case IEEE80211_STYPE_ACTION: -- if (mgmt->u.action.category != WLAN_CATEGORY_SPECTRUM_MGMT) -- break; -- -+ /* XXX: differentiate, can only happen for CSA now! */ - ieee80211_sta_process_chanswitch(sdata, - &mgmt->u.action.u.chan_switch.sw_elem, - ifmgd->associated); -diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c -index 16c6cdc..7170bf4 100644 ---- a/net/mac80211/rx.c -+++ b/net/mac80211/rx.c -@@ -1514,6 +1514,7 @@ ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx) - mpp_path_add(mesh_hdr->eaddr2, hdr->addr4, sdata); - } else { - spin_lock_bh(&mppath->state_lock); -+ mppath->exp_time = jiffies; - if (compare_ether_addr(mppath->mpp, hdr->addr4) != 0) - memcpy(mppath->mpp, hdr->addr4, ETH_ALEN); - spin_unlock_bh(&mppath->state_lock); -@@ -1548,9 +1549,7 @@ ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx) - memset(info, 0, sizeof(*info)); - info->flags |= IEEE80211_TX_INTFL_NEED_TXPROCESSING; - info->control.vif = &rx->sdata->vif; -- skb_set_queue_mapping(skb, -- ieee80211_select_queue(rx->sdata, fwd_skb)); -- ieee80211_set_qos_hdr(local, skb); -+ ieee80211_select_queue(local, fwd_skb); - if (is_multicast_ether_addr(fwd_hdr->addr1)) - IEEE80211_IFSTA_MESH_CTR_INC(&sdata->u.mesh, - fwded_mcast); -@@ -1810,10 +1809,6 @@ ieee80211_rx_h_action(struct ieee80211_rx_data *rx) - } - break; - default: -- /* do not process rejected action frames */ -- if (mgmt->u.action.category & 0x80) -- return RX_DROP_MONITOR; -- - return RX_CONTINUE; - } - -diff --git a/net/mac80211/scan.c b/net/mac80211/scan.c -index 1a41909..71e10ca 100644 ---- a/net/mac80211/scan.c -+++ b/net/mac80211/scan.c -@@ -196,8 +196,7 @@ ieee80211_scan_rx(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb) - static void ieee80211_scan_ps_enable(struct ieee80211_sub_if_data *sdata) - { - struct ieee80211_local *local = sdata->local; -- -- local->scan_ps_enabled = false; -+ bool ps = false; - - /* FIXME: what to do when local->pspolling is true? */ - -@@ -205,13 +204,12 @@ static void ieee80211_scan_ps_enable(struct ieee80211_sub_if_data *sdata) - cancel_work_sync(&local->dynamic_ps_enable_work); - - if (local->hw.conf.flags & IEEE80211_CONF_PS) { -- local->scan_ps_enabled = true; -+ ps = true; - local->hw.conf.flags &= ~IEEE80211_CONF_PS; - ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_PS); - } - -- if (!(local->scan_ps_enabled) || -- !(local->hw.flags & IEEE80211_HW_PS_NULLFUNC_STACK)) -+ if (!ps || !(local->hw.flags & IEEE80211_HW_PS_NULLFUNC_STACK)) - /* - * If power save was enabled, no need to send a nullfunc - * frame because AP knows that we are sleeping. But if the -@@ -232,7 +230,7 @@ static void ieee80211_scan_ps_disable(struct ieee80211_sub_if_data *sdata) - - if (!local->ps_sdata) - ieee80211_send_nullfunc(local, sdata, 0); -- else if (local->scan_ps_enabled) { -+ else { - /* - * In !IEEE80211_HW_PS_NULLFUNC_STACK case the hardware - * will send a nullfunc frame with the powersave bit set -@@ -248,16 +246,6 @@ static void ieee80211_scan_ps_disable(struct ieee80211_sub_if_data *sdata) - */ - local->hw.conf.flags |= IEEE80211_CONF_PS; - ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_PS); -- } else if (local->hw.conf.dynamic_ps_timeout > 0) { -- /* -- * If IEEE80211_CONF_PS was not set and the dynamic_ps_timer -- * had been running before leaving the operating channel, -- * restart the timer now and send a nullfunc frame to inform -- * the AP that we are awake. -- */ -- ieee80211_send_nullfunc(local, sdata, 0); -- mod_timer(&local->dynamic_ps_timer, jiffies + -- msecs_to_jiffies(local->hw.conf.dynamic_ps_timeout)); - } - } - -@@ -276,14 +264,10 @@ void ieee80211_scan_completed(struct ieee80211_hw *hw, bool aborted) - - mutex_lock(&local->scan_mtx); - -- /* -- * It's ok to abort a not-yet-running scan (that -- * we have one at all will be verified by checking -- * local->scan_req next), but not to complete it -- * successfully. -- */ -- if (WARN_ON(!local->scanning && !aborted)) -- aborted = true; -+ if (WARN_ON(!local->scanning)) { -+ mutex_unlock(&local->scan_mtx); -+ return; -+ } - - if (WARN_ON(!local->scan_req)) { - mutex_unlock(&local->scan_mtx); -diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c -index 441f68e..eaa4118 100644 ---- a/net/mac80211/tx.c -+++ b/net/mac80211/tx.c -@@ -1401,7 +1401,6 @@ static void ieee80211_xmit(struct ieee80211_sub_if_data *sdata, - - if ((local->hw.flags & IEEE80211_HW_PS_NULLFUNC_STACK) && - local->hw.conf.dynamic_ps_timeout > 0 && -- !local->quiescing && - !(local->scanning) && local->ps_sdata) { - if (local->hw.conf.flags & IEEE80211_CONF_PS) { - ieee80211_stop_queues_by_reason(&local->hw, -@@ -1482,7 +1481,7 @@ static void ieee80211_xmit(struct ieee80211_sub_if_data *sdata, - return; - } - -- ieee80211_set_qos_hdr(local, skb); -+ ieee80211_select_queue(local, skb); - ieee80211_tx(sdata, skb, false); - dev_put(sdata->dev); - } -@@ -2226,9 +2225,6 @@ void ieee80211_tx_skb(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb, - if (!encrypt) - info->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT; - -- /* send all internal mgmt frames on VO */ -- skb_set_queue_mapping(skb, 0); -- - /* - * The other path calling ieee80211_xmit is from the tasklet, - * and while we can handle concurrent transmissions locking -diff --git a/net/mac80211/util.c b/net/mac80211/util.c -index 553cffe..e6c08da 100644 ---- a/net/mac80211/util.c -+++ b/net/mac80211/util.c -@@ -269,7 +269,6 @@ static void __ieee80211_wake_queue(struct ieee80211_hw *hw, int queue, - enum queue_stop_reason reason) - { - struct ieee80211_local *local = hw_to_local(hw); -- struct ieee80211_sub_if_data *sdata; - - if (WARN_ON(queue >= hw->queues)) - return; -@@ -282,11 +281,6 @@ static void __ieee80211_wake_queue(struct ieee80211_hw *hw, int queue, - - if (!skb_queue_empty(&local->pending[queue])) - tasklet_schedule(&local->tx_pending_tasklet); -- -- rcu_read_lock(); -- list_for_each_entry_rcu(sdata, &local->interfaces, list) -- netif_tx_wake_queue(netdev_get_tx_queue(sdata->dev, queue)); -- rcu_read_unlock(); - } - - void ieee80211_wake_queue_by_reason(struct ieee80211_hw *hw, int queue, -@@ -311,17 +305,11 @@ static void __ieee80211_stop_queue(struct ieee80211_hw *hw, int queue, - enum queue_stop_reason reason) - { - struct ieee80211_local *local = hw_to_local(hw); -- struct ieee80211_sub_if_data *sdata; - - if (WARN_ON(queue >= hw->queues)) - return; - - __set_bit(reason, &local->queue_stop_reasons[queue]); -- -- rcu_read_lock(); -- list_for_each_entry_rcu(sdata, &local->interfaces, list) -- netif_tx_stop_queue(netdev_get_tx_queue(sdata->dev, queue)); -- rcu_read_unlock(); - } - - void ieee80211_stop_queue_by_reason(struct ieee80211_hw *hw, int queue, -@@ -591,7 +579,7 @@ u32 ieee802_11_parse_elems_crc(u8 *start, size_t len, - if (elen > left) - break; - -- if (calc_crc && id < 64 && (filter & (1ULL << id))) -+ if (calc_crc && id < 64 && (filter & BIT(id))) - crc = crc32_be(crc, pos - 2, elen + 2); - - switch (id) { -@@ -1043,19 +1031,7 @@ int ieee80211_reconfig(struct ieee80211_local *local) - - /* restart hardware */ - if (local->open_count) { -- /* -- * Upon resume hardware can sometimes be goofy due to -- * various platform / driver / bus issues, so restarting -- * the device may at times not work immediately. Propagate -- * the error. -- */ - res = drv_start(local); -- if (res) { -- WARN(local->suspended, "Harware became unavailable " -- "upon resume. This is could be a software issue" -- "prior to suspend or a harware issue\n"); -- return res; -- } - - ieee80211_led_radio(local, true); - } -diff --git a/net/mac80211/wme.c b/net/mac80211/wme.c -index 6d32ebf..b19b769 100644 ---- a/net/mac80211/wme.c -+++ b/net/mac80211/wme.c -@@ -44,62 +44,22 @@ static int wme_downgrade_ac(struct sk_buff *skb) - } - - --/* Indicate which queue to use. */ --u16 ieee80211_select_queue(struct ieee80211_sub_if_data *sdata, -- struct sk_buff *skb) -+/* Indicate which queue to use. */ -+static u16 classify80211(struct ieee80211_local *local, struct sk_buff *skb) - { -- struct ieee80211_local *local = sdata->local; -- struct sta_info *sta = NULL; -- u32 sta_flags = 0; -- const u8 *ra = NULL; -- bool qos = false; -+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; - -- if (local->hw.queues < 4 || skb->len < 6) { -- skb->priority = 0; /* required for correct WPA/11i MIC */ -- return min_t(u16, local->hw.queues - 1, -- ieee802_1d_to_ac[skb->priority]); -- } -- -- rcu_read_lock(); -- switch (sdata->vif.type) { -- case NL80211_IFTYPE_AP_VLAN: -- case NL80211_IFTYPE_AP: -- ra = skb->data; -- break; -- case NL80211_IFTYPE_WDS: -- ra = sdata->u.wds.remote_addr; -- break; --#ifdef CONFIG_MAC80211_MESH -- case NL80211_IFTYPE_MESH_POINT: -- /* -- * XXX: This is clearly broken ... but already was before, -- * because ieee80211_fill_mesh_addresses() would clear A1 -- * except for multicast addresses. -- */ -- break; --#endif -- case NL80211_IFTYPE_STATION: -- ra = sdata->u.mgd.bssid; -- break; -- case NL80211_IFTYPE_ADHOC: -- ra = skb->data; -- break; -- default: -- break; -+ if (!ieee80211_is_data(hdr->frame_control)) { -+ /* management frames go on AC_VO queue, but are sent -+ * without QoS control fields */ -+ return 0; - } - -- if (!sta && ra && !is_multicast_ether_addr(ra)) { -- sta = sta_info_get(local, ra); -- if (sta) -- sta_flags = get_sta_flags(sta); -+ if (0 /* injected */) { -+ /* use AC from radiotap */ - } - -- if (sta_flags & WLAN_STA_WME) -- qos = true; -- -- rcu_read_unlock(); -- -- if (!qos) { -+ if (!ieee80211_is_data_qos(hdr->frame_control)) { - skb->priority = 0; /* required for correct WPA/11i MIC */ - return ieee802_1d_to_ac[skb->priority]; - } -@@ -108,12 +68,6 @@ u16 ieee80211_select_queue(struct ieee80211_sub_if_data *sdata, - * data frame has */ - skb->priority = cfg80211_classify8021d(skb); - -- return ieee80211_downgrade_queue(local, skb); --} -- --u16 ieee80211_downgrade_queue(struct ieee80211_local *local, -- struct sk_buff *skb) --{ - /* in case we are a client verify acm is not set for this ac */ - while (unlikely(local->wmm_acm & BIT(skb->priority))) { - if (wme_downgrade_ac(skb)) { -@@ -131,17 +85,24 @@ u16 ieee80211_downgrade_queue(struct ieee80211_local *local, - return ieee802_1d_to_ac[skb->priority]; - } - --void ieee80211_set_qos_hdr(struct ieee80211_local *local, struct sk_buff *skb) -+void ieee80211_select_queue(struct ieee80211_local *local, struct sk_buff *skb) - { -- struct ieee80211_hdr *hdr = (void *)skb->data; -- -- /* Fill in the QoS header if there is one. */ -+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; -+ u16 queue; -+ u8 tid; ++ return task_count; ++} + -+ queue = classify80211(local, skb); -+ if (unlikely(queue >= local->hw.queues)) -+ queue = local->hw.queues - 1; + -+ /* -+ * Now we know the 1d priority, fill in the QoS header if -+ * there is one (and we haven't done this before). -+ */ - if (ieee80211_is_data_qos(hdr->frame_control)) { - u8 *p = ieee80211_get_qos_ctl(hdr); -- u8 ack_policy = 0, tid; -- -+ u8 ack_policy = 0; - tid = skb->priority & IEEE80211_QOS_CTL_TAG1D_MASK; -- - if (unlikely(local->wifi_wme_noack_test)) - ack_policy |= QOS_CONTROL_ACK_POLICY_NOACK << - QOS_CONTROL_ACK_POLICY_SHIFT; -@@ -149,4 +110,6 @@ void ieee80211_set_qos_hdr(struct ieee80211_local *local, struct sk_buff *skb) - *p++ = ack_policy | tid; - *p = 0; - } ++asmlinkage long sys_wait_for_ts_release(void) ++{ ++ long ret = -EPERM; ++ struct task_struct *t = current; + -+ skb_set_queue_mapping(skb, queue); - } -diff --git a/net/mac80211/wme.h b/net/mac80211/wme.h -index 6053b1c..d4fd87c 100644 ---- a/net/mac80211/wme.h -+++ b/net/mac80211/wme.h -@@ -20,11 +20,7 @@ - - extern const int ieee802_1d_to_ac[8]; - --u16 ieee80211_select_queue(struct ieee80211_sub_if_data *sdata, -- struct sk_buff *skb); --void ieee80211_set_qos_hdr(struct ieee80211_local *local, struct sk_buff *skb); --u16 ieee80211_downgrade_queue(struct ieee80211_local *local, -- struct sk_buff *skb); -- -+void ieee80211_select_queue(struct ieee80211_local *local, -+ struct sk_buff *skb); - - #endif /* _WME_H */ -diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c -index 02b2610..446e9bd 100644 ---- a/net/netfilter/ipvs/ip_vs_ctl.c -+++ b/net/netfilter/ipvs/ip_vs_ctl.c -@@ -2714,8 +2714,6 @@ static int ip_vs_genl_parse_service(struct ip_vs_service_user_kern *usvc, - if (!(nla_af && (nla_fwmark || (nla_port && nla_protocol && nla_addr)))) - return -EINVAL; - -- memset(usvc, 0, sizeof(*usvc)); -- - usvc->af = nla_get_u16(nla_af); - #ifdef CONFIG_IP_VS_IPV6 - if (usvc->af != AF_INET && usvc->af != AF_INET6) -@@ -2903,8 +2901,6 @@ static int ip_vs_genl_parse_dest(struct ip_vs_dest_user_kern *udest, - if (!(nla_addr && nla_port)) - return -EINVAL; - -- memset(udest, 0, sizeof(*udest)); -- - nla_memcpy(&udest->addr, nla_addr, sizeof(udest->addr)); - udest->port = nla_get_u16(nla_port); - -diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c -index 1374179..b9168c1 100644 ---- a/net/netfilter/nf_conntrack_core.c -+++ b/net/netfilter/nf_conntrack_core.c -@@ -30,7 +30,6 @@ - #include - #include - #include --#include - #include - - #include -@@ -64,6 +63,8 @@ EXPORT_SYMBOL_GPL(nf_conntrack_max); - struct nf_conn nf_conntrack_untracked __read_mostly; - EXPORT_SYMBOL_GPL(nf_conntrack_untracked); - -+static struct kmem_cache *nf_conntrack_cachep __read_mostly; ++ if (is_realtime(t)) ++ ret = do_wait_for_ts_release(); + - static int nf_conntrack_hash_rnd_initted; - static unsigned int nf_conntrack_hash_rnd; - -@@ -85,10 +86,9 @@ static u_int32_t __hash_conntrack(const struct nf_conntrack_tuple *tuple, - return ((u64)h * size) >> 32; - } - --static inline u_int32_t hash_conntrack(const struct net *net, -- const struct nf_conntrack_tuple *tuple) -+static inline u_int32_t hash_conntrack(const struct nf_conntrack_tuple *tuple) - { -- return __hash_conntrack(tuple, net->ct.htable_size, -+ return __hash_conntrack(tuple, nf_conntrack_htable_size, - nf_conntrack_hash_rnd); - } - -@@ -296,7 +296,7 @@ __nf_conntrack_find(struct net *net, const struct nf_conntrack_tuple *tuple) - { - struct nf_conntrack_tuple_hash *h; - struct hlist_nulls_node *n; -- unsigned int hash = hash_conntrack(net, tuple); -+ unsigned int hash = hash_conntrack(tuple); - - /* Disable BHs the entire time since we normally need to disable them - * at least once for the stats anyway. -@@ -366,11 +366,10 @@ static void __nf_conntrack_hash_insert(struct nf_conn *ct, - - void nf_conntrack_hash_insert(struct nf_conn *ct) - { -- struct net *net = nf_ct_net(ct); - unsigned int hash, repl_hash; - -- hash = hash_conntrack(net, &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple); -- repl_hash = hash_conntrack(net, &ct->tuplehash[IP_CT_DIR_REPLY].tuple); -+ hash = hash_conntrack(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple); -+ repl_hash = hash_conntrack(&ct->tuplehash[IP_CT_DIR_REPLY].tuple); - - __nf_conntrack_hash_insert(ct, hash, repl_hash); - } -@@ -398,8 +397,8 @@ __nf_conntrack_confirm(struct sk_buff *skb) - if (CTINFO2DIR(ctinfo) != IP_CT_DIR_ORIGINAL) - return NF_ACCEPT; - -- hash = hash_conntrack(net, &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple); -- repl_hash = hash_conntrack(net, &ct->tuplehash[IP_CT_DIR_REPLY].tuple); -+ hash = hash_conntrack(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple); -+ repl_hash = hash_conntrack(&ct->tuplehash[IP_CT_DIR_REPLY].tuple); - - /* We're not in hash table, and we refuse to set up related - connections for unconfirmed conns. But packet copies and -@@ -469,7 +468,7 @@ nf_conntrack_tuple_taken(const struct nf_conntrack_tuple *tuple, - struct net *net = nf_ct_net(ignored_conntrack); - struct nf_conntrack_tuple_hash *h; - struct hlist_nulls_node *n; -- unsigned int hash = hash_conntrack(net, tuple); -+ unsigned int hash = hash_conntrack(tuple); - - /* Disable BHs the entire time since we need to disable them at - * least once for the stats anyway. -@@ -504,7 +503,7 @@ static noinline int early_drop(struct net *net, unsigned int hash) - int dropped = 0; - - rcu_read_lock(); -- for (i = 0; i < net->ct.htable_size; i++) { -+ for (i = 0; i < nf_conntrack_htable_size; i++) { - hlist_nulls_for_each_entry_rcu(h, n, &net->ct.hash[hash], - hnnode) { - tmp = nf_ct_tuplehash_to_ctrack(h); -@@ -518,8 +517,7 @@ static noinline int early_drop(struct net *net, unsigned int hash) - ct = NULL; - if (ct || cnt >= NF_CT_EVICTION_RANGE) - break; -- -- hash = (hash + 1) % net->ct.htable_size; -+ hash = (hash + 1) % nf_conntrack_htable_size; - } - rcu_read_unlock(); - -@@ -553,7 +551,7 @@ struct nf_conn *nf_conntrack_alloc(struct net *net, - - if (nf_conntrack_max && - unlikely(atomic_read(&net->ct.count) > nf_conntrack_max)) { -- unsigned int hash = hash_conntrack(net, orig); -+ unsigned int hash = hash_conntrack(orig); - if (!early_drop(net, hash)) { - atomic_dec(&net->ct.count); - if (net_ratelimit()) -@@ -568,7 +566,7 @@ struct nf_conn *nf_conntrack_alloc(struct net *net, - * Do not use kmem_cache_zalloc(), as this cache uses - * SLAB_DESTROY_BY_RCU. - */ -- ct = kmem_cache_alloc(net->ct.nf_conntrack_cachep, gfp); -+ ct = kmem_cache_alloc(nf_conntrack_cachep, gfp); - if (ct == NULL) { - pr_debug("nf_conntrack_alloc: Can't alloc conntrack.\n"); - atomic_dec(&net->ct.count); -@@ -607,7 +605,7 @@ void nf_conntrack_free(struct nf_conn *ct) - nf_ct_ext_destroy(ct); - atomic_dec(&net->ct.count); - nf_ct_ext_free(ct); -- kmem_cache_free(net->ct.nf_conntrack_cachep, ct); -+ kmem_cache_free(nf_conntrack_cachep, ct); - } - EXPORT_SYMBOL_GPL(nf_conntrack_free); - -@@ -1010,7 +1008,7 @@ get_next_corpse(struct net *net, int (*iter)(struct nf_conn *i, void *data), - struct hlist_nulls_node *n; - - spin_lock_bh(&nf_conntrack_lock); -- for (; *bucket < net->ct.htable_size; (*bucket)++) { -+ for (; *bucket < nf_conntrack_htable_size; (*bucket)++) { - hlist_nulls_for_each_entry(h, n, &net->ct.hash[*bucket], hnnode) { - ct = nf_ct_tuplehash_to_ctrack(h); - if (iter(ct, data)) -@@ -1109,12 +1107,9 @@ static void nf_ct_release_dying_list(struct net *net) - - static void nf_conntrack_cleanup_init_net(void) - { -- /* wait until all references to nf_conntrack_untracked are dropped */ -- while (atomic_read(&nf_conntrack_untracked.ct_general.use) > 1) -- schedule(); -- - nf_conntrack_helper_fini(); - nf_conntrack_proto_fini(); -+ kmem_cache_destroy(nf_conntrack_cachep); - } - - static void nf_conntrack_cleanup_net(struct net *net) -@@ -1126,14 +1121,15 @@ static void nf_conntrack_cleanup_net(struct net *net) - schedule(); - goto i_see_dead_people; - } -+ /* wait until all references to nf_conntrack_untracked are dropped */ -+ while (atomic_read(&nf_conntrack_untracked.ct_general.use) > 1) -+ schedule(); - - nf_ct_free_hashtable(net->ct.hash, net->ct.hash_vmalloc, -- net->ct.htable_size); -+ nf_conntrack_htable_size); - nf_conntrack_ecache_fini(net); - nf_conntrack_acct_fini(net); - nf_conntrack_expect_fini(net); -- kmem_cache_destroy(net->ct.nf_conntrack_cachep); -- kfree(net->ct.slabname); - free_percpu(net->ct.stat); - } - -@@ -1188,12 +1184,10 @@ int nf_conntrack_set_hashsize(const char *val, struct kernel_param *kp) - { - int i, bucket, vmalloced, old_vmalloced; - unsigned int hashsize, old_size; -+ int rnd; - struct hlist_nulls_head *hash, *old_hash; - struct nf_conntrack_tuple_hash *h; - -- if (current->nsproxy->net_ns != &init_net) -- return -EOPNOTSUPP; -- - /* On boot, we can set this without any fancy locking. */ - if (!nf_conntrack_htable_size) - return param_set_uint(val, kp); -@@ -1206,29 +1200,33 @@ int nf_conntrack_set_hashsize(const char *val, struct kernel_param *kp) - if (!hash) - return -ENOMEM; - -+ /* We have to rehahs for the new table anyway, so we also can -+ * use a newrandom seed */ -+ get_random_bytes(&rnd, sizeof(rnd)); -+ - /* Lookups in the old hash might happen in parallel, which means we - * might get false negatives during connection lookup. New connections - * created because of a false negative won't make it into the hash - * though since that required taking the lock. - */ - spin_lock_bh(&nf_conntrack_lock); -- for (i = 0; i < init_net.ct.htable_size; i++) { -+ for (i = 0; i < nf_conntrack_htable_size; i++) { - while (!hlist_nulls_empty(&init_net.ct.hash[i])) { - h = hlist_nulls_entry(init_net.ct.hash[i].first, - struct nf_conntrack_tuple_hash, hnnode); - hlist_nulls_del_rcu(&h->hnnode); -- bucket = __hash_conntrack(&h->tuple, hashsize, -- nf_conntrack_hash_rnd); -+ bucket = __hash_conntrack(&h->tuple, hashsize, rnd); - hlist_nulls_add_head_rcu(&h->hnnode, &hash[bucket]); - } - } -- old_size = init_net.ct.htable_size; -+ old_size = nf_conntrack_htable_size; - old_vmalloced = init_net.ct.hash_vmalloc; - old_hash = init_net.ct.hash; - -- init_net.ct.htable_size = nf_conntrack_htable_size = hashsize; -+ nf_conntrack_htable_size = hashsize; - init_net.ct.hash_vmalloc = vmalloced; - init_net.ct.hash = hash; -+ nf_conntrack_hash_rnd = rnd; - spin_unlock_bh(&nf_conntrack_lock); - - nf_ct_free_hashtable(old_hash, old_vmalloced, old_size); -@@ -1267,6 +1265,15 @@ static int nf_conntrack_init_init_net(void) - NF_CONNTRACK_VERSION, nf_conntrack_htable_size, - nf_conntrack_max); - -+ nf_conntrack_cachep = kmem_cache_create("nf_conntrack", -+ sizeof(struct nf_conn), -+ 0, SLAB_DESTROY_BY_RCU, NULL); -+ if (!nf_conntrack_cachep) { -+ printk(KERN_ERR "Unable to create nf_conn slab cache\n"); -+ ret = -ENOMEM; -+ goto err_cache; -+ } ++ return ret; ++} + - ret = nf_conntrack_proto_init(); - if (ret < 0) - goto err_proto; -@@ -1275,19 +1282,13 @@ static int nf_conntrack_init_init_net(void) - if (ret < 0) - goto err_helper; - -- /* Set up fake conntrack: to never be deleted, not in any hashes */ --#ifdef CONFIG_NET_NS -- nf_conntrack_untracked.ct_net = &init_net; --#endif -- atomic_set(&nf_conntrack_untracked.ct_general.use, 1); -- /* - and look it like as a confirmed connection */ -- set_bit(IPS_CONFIRMED_BIT, &nf_conntrack_untracked.status); -- - return 0; - - err_helper: - nf_conntrack_proto_fini(); - err_proto: -+ kmem_cache_destroy(nf_conntrack_cachep); -+err_cache: - return ret; - } - -@@ -1309,24 +1310,7 @@ static int nf_conntrack_init_net(struct net *net) - ret = -ENOMEM; - goto err_stat; - } -- -- net->ct.slabname = kasprintf(GFP_KERNEL, "nf_conntrack_%p", net); -- if (!net->ct.slabname) { -- ret = -ENOMEM; -- goto err_slabname; -- } -- -- net->ct.nf_conntrack_cachep = kmem_cache_create(net->ct.slabname, -- sizeof(struct nf_conn), 0, -- SLAB_DESTROY_BY_RCU, NULL); -- if (!net->ct.nf_conntrack_cachep) { -- printk(KERN_ERR "Unable to create nf_conn slab cache\n"); -- ret = -ENOMEM; -- goto err_cache; -- } -- -- net->ct.htable_size = nf_conntrack_htable_size; -- net->ct.hash = nf_ct_alloc_hashtable(&net->ct.htable_size, -+ net->ct.hash = nf_ct_alloc_hashtable(&nf_conntrack_htable_size, - &net->ct.hash_vmalloc, 1); - if (!net->ct.hash) { - ret = -ENOMEM; -@@ -1343,6 +1327,15 @@ static int nf_conntrack_init_net(struct net *net) - if (ret < 0) - goto err_ecache; - -+ /* Set up fake conntrack: -+ - to never be deleted, not in any hashes */ -+#ifdef CONFIG_NET_NS -+ nf_conntrack_untracked.ct_net = &init_net; -+#endif -+ atomic_set(&nf_conntrack_untracked.ct_general.use, 1); -+ /* - and look it like as a confirmed connection */ -+ set_bit(IPS_CONFIRMED_BIT, &nf_conntrack_untracked.status); + - return 0; - - err_ecache: -@@ -1351,12 +1344,8 @@ err_acct: - nf_conntrack_expect_fini(net); - err_expect: - nf_ct_free_hashtable(net->ct.hash, net->ct.hash_vmalloc, -- net->ct.htable_size); -+ nf_conntrack_htable_size); - err_hash: -- kmem_cache_destroy(net->ct.nf_conntrack_cachep); --err_cache: -- kfree(net->ct.slabname); --err_slabname: - free_percpu(net->ct.stat); - err_stat: - return ret; -diff --git a/net/netfilter/nf_conntrack_expect.c b/net/netfilter/nf_conntrack_expect.c -index e73eb04..2032dfe 100644 ---- a/net/netfilter/nf_conntrack_expect.c -+++ b/net/netfilter/nf_conntrack_expect.c -@@ -569,7 +569,7 @@ static void exp_proc_remove(struct net *net) - #endif /* CONFIG_PROC_FS */ - } - --module_param_named(expect_hashsize, nf_ct_expect_hsize, uint, 0400); -+module_param_named(expect_hashsize, nf_ct_expect_hsize, uint, 0600); - - int nf_conntrack_expect_init(struct net *net) - { -@@ -577,7 +577,7 @@ int nf_conntrack_expect_init(struct net *net) - - if (net_eq(net, &init_net)) { - if (!nf_ct_expect_hsize) { -- nf_ct_expect_hsize = net->ct.htable_size / 256; -+ nf_ct_expect_hsize = nf_conntrack_htable_size / 256; - if (!nf_ct_expect_hsize) - nf_ct_expect_hsize = 1; - } -diff --git a/net/netfilter/nf_conntrack_ftp.c b/net/netfilter/nf_conntrack_ftp.c -index 7dfd469..5509dd1 100644 ---- a/net/netfilter/nf_conntrack_ftp.c -+++ b/net/netfilter/nf_conntrack_ftp.c -@@ -323,24 +323,24 @@ static void update_nl_seq(struct nf_conn *ct, u32 nl_seq, - struct nf_ct_ftp_master *info, int dir, - struct sk_buff *skb) - { -- unsigned int i, oldest; -+ unsigned int i, oldest = NUM_SEQ_TO_REMEMBER; - - /* Look for oldest: if we find exact match, we're done. */ - for (i = 0; i < info->seq_aft_nl_num[dir]; i++) { - if (info->seq_aft_nl[dir][i] == nl_seq) - return; -+ -+ if (oldest == info->seq_aft_nl_num[dir] || -+ before(info->seq_aft_nl[dir][i], -+ info->seq_aft_nl[dir][oldest])) -+ oldest = i; - } - - if (info->seq_aft_nl_num[dir] < NUM_SEQ_TO_REMEMBER) { - info->seq_aft_nl[dir][info->seq_aft_nl_num[dir]++] = nl_seq; -- } else { -- if (before(info->seq_aft_nl[dir][0], info->seq_aft_nl[dir][1])) -- oldest = 0; -- else -- oldest = 1; -- -- if (after(nl_seq, info->seq_aft_nl[dir][oldest])) -- info->seq_aft_nl[dir][oldest] = nl_seq; -+ } else if (oldest != NUM_SEQ_TO_REMEMBER && -+ after(nl_seq, info->seq_aft_nl[dir][oldest])) { -+ info->seq_aft_nl[dir][oldest] = nl_seq; - } - } - -diff --git a/net/netfilter/nf_conntrack_helper.c b/net/netfilter/nf_conntrack_helper.c -index 4b1a56b..65c2a7b 100644 ---- a/net/netfilter/nf_conntrack_helper.c -+++ b/net/netfilter/nf_conntrack_helper.c -@@ -192,7 +192,7 @@ static void __nf_conntrack_helper_unregister(struct nf_conntrack_helper *me, - /* Get rid of expecteds, set helpers to NULL. */ - hlist_nulls_for_each_entry(h, nn, &net->ct.unconfirmed, hnnode) - unhelp(h, me); -- for (i = 0; i < net->ct.htable_size; i++) { -+ for (i = 0; i < nf_conntrack_htable_size; i++) { - hlist_nulls_for_each_entry(h, nn, &net->ct.hash[i], hnnode) - unhelp(h, me); - } -diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c -index d521718..59d8064 100644 ---- a/net/netfilter/nf_conntrack_netlink.c -+++ b/net/netfilter/nf_conntrack_netlink.c -@@ -594,7 +594,7 @@ ctnetlink_dump_table(struct sk_buff *skb, struct netlink_callback *cb) - - rcu_read_lock(); - last = (struct nf_conn *)cb->args[1]; -- for (; cb->args[0] < init_net.ct.htable_size; cb->args[0]++) { -+ for (; cb->args[0] < nf_conntrack_htable_size; cb->args[0]++) { - restart: - hlist_nulls_for_each_entry_rcu(h, n, &init_net.ct.hash[cb->args[0]], - hnnode) { -diff --git a/net/netfilter/nf_conntrack_standalone.c b/net/netfilter/nf_conntrack_standalone.c -index 1a84bf6..1935153 100644 ---- a/net/netfilter/nf_conntrack_standalone.c -+++ b/net/netfilter/nf_conntrack_standalone.c -@@ -51,7 +51,7 @@ static struct hlist_nulls_node *ct_get_first(struct seq_file *seq) - struct hlist_nulls_node *n; - - for (st->bucket = 0; -- st->bucket < net->ct.htable_size; -+ st->bucket < nf_conntrack_htable_size; - st->bucket++) { - n = rcu_dereference(net->ct.hash[st->bucket].first); - if (!is_a_nulls(n)) -@@ -69,7 +69,7 @@ static struct hlist_nulls_node *ct_get_next(struct seq_file *seq, - head = rcu_dereference(head->next); - while (is_a_nulls(head)) { - if (likely(get_nulls_value(head) == st->bucket)) { -- if (++st->bucket >= net->ct.htable_size) -+ if (++st->bucket >= nf_conntrack_htable_size) - return NULL; - } - head = rcu_dereference(net->ct.hash[st->bucket].first); -@@ -358,7 +358,7 @@ static ctl_table nf_ct_sysctl_table[] = { - { - .ctl_name = NET_NF_CONNTRACK_BUCKETS, - .procname = "nf_conntrack_buckets", -- .data = &init_net.ct.htable_size, -+ .data = &nf_conntrack_htable_size, - .maxlen = sizeof(unsigned int), - .mode = 0444, - .proc_handler = proc_dointvec, -@@ -429,7 +429,6 @@ static int nf_conntrack_standalone_init_sysctl(struct net *net) - goto out_kmemdup; - - table[1].data = &net->ct.count; -- table[2].data = &net->ct.htable_size; - table[3].data = &net->ct.sysctl_checksum; - table[4].data = &net->ct.sysctl_log_invalid; - -diff --git a/net/netfilter/xt_conntrack.c b/net/netfilter/xt_conntrack.c -index ae66305..6dc4652 100644 ---- a/net/netfilter/xt_conntrack.c -+++ b/net/netfilter/xt_conntrack.c -@@ -113,8 +113,7 @@ ct_proto_port_check(const struct xt_conntrack_mtinfo2 *info, - } - - static bool --conntrack_mt(const struct sk_buff *skb, const struct xt_match_param *par, -- u16 state_mask, u16 status_mask) -+conntrack_mt(const struct sk_buff *skb, const struct xt_match_param *par) - { - const struct xt_conntrack_mtinfo2 *info = par->matchinfo; - enum ip_conntrack_info ctinfo; -@@ -137,7 +136,7 @@ conntrack_mt(const struct sk_buff *skb, const struct xt_match_param *par, - if (test_bit(IPS_DST_NAT_BIT, &ct->status)) - statebit |= XT_CONNTRACK_STATE_DNAT; - } -- if (!!(state_mask & statebit) ^ -+ if (!!(info->state_mask & statebit) ^ - !(info->invert_flags & XT_CONNTRACK_STATE)) - return false; - } -@@ -173,7 +172,7 @@ conntrack_mt(const struct sk_buff *skb, const struct xt_match_param *par, - return false; - - if ((info->match_flags & XT_CONNTRACK_STATUS) && -- (!!(status_mask & ct->status) ^ -+ (!!(info->status_mask & ct->status) ^ - !(info->invert_flags & XT_CONNTRACK_STATUS))) - return false; - -@@ -193,17 +192,11 @@ conntrack_mt(const struct sk_buff *skb, const struct xt_match_param *par, - static bool - conntrack_mt_v1(const struct sk_buff *skb, const struct xt_match_param *par) - { -- const struct xt_conntrack_mtinfo1 *info = par->matchinfo; -+ const struct xt_conntrack_mtinfo2 *const *info = par->matchinfo; -+ struct xt_match_param newpar = *par; - -- return conntrack_mt(skb, par, info->state_mask, info->status_mask); --} -- --static bool --conntrack_mt_v2(const struct sk_buff *skb, const struct xt_match_param *par) --{ -- const struct xt_conntrack_mtinfo2 *info = par->matchinfo; -- -- return conntrack_mt(skb, par, info->state_mask, info->status_mask); -+ newpar.matchinfo = *info; -+ return conntrack_mt(skb, &newpar); - } - - static bool conntrack_mt_check(const struct xt_mtchk_param *par) -@@ -216,11 +209,45 @@ static bool conntrack_mt_check(const struct xt_mtchk_param *par) - return true; - } - -+static bool conntrack_mt_check_v1(const struct xt_mtchk_param *par) ++asmlinkage long sys_release_ts(lt_t __user *__delay) +{ -+ struct xt_conntrack_mtinfo1 *info = par->matchinfo; -+ struct xt_conntrack_mtinfo2 *up; -+ int ret = conntrack_mt_check(par); ++ long ret; ++ lt_t delay; + -+ if (ret < 0) -+ return ret; ++ /* FIXME: check capabilities... */ + -+ up = kmalloc(sizeof(*up), GFP_KERNEL); -+ if (up == NULL) { -+ nf_ct_l3proto_module_put(par->family); -+ return -ENOMEM; ++ ret = copy_from_user(&delay, __delay, sizeof(delay)); ++ if (ret == 0) ++ ret = do_release_ts(litmus_clock() + delay); ++ ++ return ret; ++} +diff --git a/litmus/trace.c b/litmus/trace.c +new file mode 100644 +index 0000000..4403769 +--- /dev/null ++++ b/litmus/trace.c +@@ -0,0 +1,103 @@ ++#include ++ ++#include ++#include ++#include ++ ++/******************************************************************************/ ++/* Allocation */ ++/******************************************************************************/ ++ ++static struct ftdev overhead_dev; ++ ++#define trace_ts_buf overhead_dev.minor[0].buf ++ ++static unsigned int ts_seq_no = 0; ++ ++static inline void __save_timestamp_cpu(unsigned long event, ++ uint8_t type, uint8_t cpu) ++{ ++ unsigned int seq_no; ++ struct timestamp *ts; ++ seq_no = fetch_and_inc((int *) &ts_seq_no); ++ if (ft_buffer_start_write(trace_ts_buf, (void**) &ts)) { ++ ts->event = event; ++ ts->timestamp = ft_timestamp(); ++ ts->seq_no = seq_no; ++ ts->cpu = cpu; ++ ts->task_type = type; ++ ft_buffer_finish_write(trace_ts_buf, ts); + } ++} + -+ /* -+ * The strategy here is to minimize the overhead of v1 matching, -+ * by prebuilding a v2 struct and putting the pointer into the -+ * v1 dataspace. -+ */ -+ memcpy(up, info, offsetof(typeof(*info), state_mask)); -+ up->state_mask = info->state_mask; -+ up->status_mask = info->status_mask; -+ *(void **)info = up; -+ return true; ++static inline void __save_timestamp(unsigned long event, ++ uint8_t type) ++{ ++ __save_timestamp_cpu(event, type, raw_smp_processor_id()); +} + - static void conntrack_mt_destroy(const struct xt_mtdtor_param *par) - { - nf_ct_l3proto_module_put(par->family); - } - -+static void conntrack_mt_destroy_v1(const struct xt_mtdtor_param *par) -+{ -+ struct xt_conntrack_mtinfo2 **info = par->matchinfo; -+ kfree(*info); -+ conntrack_mt_destroy(par); -+} -+ - static struct xt_match conntrack_mt_reg[] __read_mostly = { - { - .name = "conntrack", -@@ -228,8 +255,8 @@ static struct xt_match conntrack_mt_reg[] __read_mostly = { - .family = NFPROTO_UNSPEC, - .matchsize = sizeof(struct xt_conntrack_mtinfo1), - .match = conntrack_mt_v1, -- .checkentry = conntrack_mt_check, -- .destroy = conntrack_mt_destroy, -+ .checkentry = conntrack_mt_check_v1, -+ .destroy = conntrack_mt_destroy_v1, - .me = THIS_MODULE, - }, - { -@@ -237,7 +264,7 @@ static struct xt_match conntrack_mt_reg[] __read_mostly = { - .revision = 2, - .family = NFPROTO_UNSPEC, - .matchsize = sizeof(struct xt_conntrack_mtinfo2), -- .match = conntrack_mt_v2, -+ .match = conntrack_mt, - .checkentry = conntrack_mt_check, - .destroy = conntrack_mt_destroy, - .me = THIS_MODULE, -diff --git a/net/netrom/nr_route.c b/net/netrom/nr_route.c -index 850ffc0..4eb1ac9 100644 ---- a/net/netrom/nr_route.c -+++ b/net/netrom/nr_route.c -@@ -842,13 +842,12 @@ int nr_route_frame(struct sk_buff *skb, ax25_cb *ax25) - dptr = skb_push(skb, 1); - *dptr = AX25_P_NETROM; - -- ax25s = nr_neigh->ax25; -- nr_neigh->ax25 = ax25_send_frame(skb, 256, -- (ax25_address *)dev->dev_addr, -- &nr_neigh->callsign, -- nr_neigh->digipeat, nr_neigh->dev); -- if (ax25s) -+ ax25s = ax25_send_frame(skb, 256, (ax25_address *)dev->dev_addr, &nr_neigh->callsign, nr_neigh->digipeat, nr_neigh->dev); -+ if (nr_neigh->ax25 && ax25s) { -+ /* We were already holding this ax25_cb */ - ax25_cb_put(ax25s); ++feather_callback void save_timestamp(unsigned long event) ++{ ++ __save_timestamp(event, TSK_UNKNOWN); ++} ++ ++feather_callback void save_timestamp_def(unsigned long event, ++ unsigned long type) ++{ ++ __save_timestamp(event, (uint8_t) type); ++} ++ ++feather_callback void save_timestamp_task(unsigned long event, ++ unsigned long t_ptr) ++{ ++ int rt = is_realtime((struct task_struct *) t_ptr); ++ __save_timestamp(event, rt ? TSK_RT : TSK_BE); ++} ++ ++feather_callback void save_timestamp_cpu(unsigned long event, ++ unsigned long cpu) ++{ ++ __save_timestamp_cpu(event, TSK_UNKNOWN, cpu); ++} ++ ++/******************************************************************************/ ++/* DEVICE FILE DRIVER */ ++/******************************************************************************/ ++ ++/* ++ * should be 8M; it is the max we can ask to buddy system allocator (MAX_ORDER) ++ * and we might not get as much ++ */ ++#define NO_TIMESTAMPS (2 << 11) ++ ++/* set MAJOR to 0 to have it dynamically assigned */ ++#define FT_TRACE_MAJOR 252 ++ ++static int alloc_timestamp_buffer(struct ftdev* ftdev, unsigned int idx) ++{ ++ unsigned int count = NO_TIMESTAMPS; ++ while (count && !trace_ts_buf) { ++ printk("time stamp buffer: trying to allocate %u time stamps.\n", count); ++ ftdev->minor[idx].buf = alloc_ft_buffer(count, sizeof(struct timestamp)); ++ count /= 2; + } -+ nr_neigh->ax25 = ax25s; - - dev_put(dev); - ret = (nr_neigh->ax25 != NULL); -diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c -index 41866eb..f2d116a 100644 ---- a/net/packet/af_packet.c -+++ b/net/packet/af_packet.c -@@ -1028,20 +1028,8 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg) - - status = TP_STATUS_SEND_REQUEST; - err = dev_queue_xmit(skb); -- if (unlikely(err > 0)) { -- err = net_xmit_errno(err); -- if (err && __packet_get_status(po, ph) == -- TP_STATUS_AVAILABLE) { -- /* skb was destructed already */ -- skb = NULL; -- goto out_status; -- } -- /* -- * skb was dropped but not destructed yet; -- * let's treat it like congestion or err < 0 -- */ -- err = 0; -- } -+ if (unlikely(err > 0 && (err = net_xmit_errno(err)) != 0)) -+ goto out_xmit; - packet_increment_head(&po->tx_ring); - len_sum += tp_len; - } while (likely((ph != NULL) || ((!(msg->msg_flags & MSG_DONTWAIT)) -@@ -1051,6 +1039,9 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg) - err = len_sum; - goto out_put; - -+out_xmit: -+ skb->destructor = sock_wfree; -+ atomic_dec(&po->tx_ring.pending); - out_status: - __packet_set_status(po, ph, status); - kfree_skb(skb); -diff --git a/net/rose/rose_link.c b/net/rose/rose_link.c -index 5ef5f69..bd86a63 100644 ---- a/net/rose/rose_link.c -+++ b/net/rose/rose_link.c -@@ -101,17 +101,13 @@ static void rose_t0timer_expiry(unsigned long param) - static int rose_send_frame(struct sk_buff *skb, struct rose_neigh *neigh) - { - ax25_address *rose_call; -- ax25_cb *ax25s; - - if (ax25cmp(&rose_callsign, &null_ax25_address) == 0) - rose_call = (ax25_address *)neigh->dev->dev_addr; - else - rose_call = &rose_callsign; - -- ax25s = neigh->ax25; - neigh->ax25 = ax25_send_frame(skb, 260, rose_call, &neigh->callsign, neigh->digipeat, neigh->dev); -- if (ax25s) -- ax25_cb_put(ax25s); - - return (neigh->ax25 != NULL); - } -@@ -124,17 +120,13 @@ static int rose_send_frame(struct sk_buff *skb, struct rose_neigh *neigh) - static int rose_link_up(struct rose_neigh *neigh) - { - ax25_address *rose_call; -- ax25_cb *ax25s; - - if (ax25cmp(&rose_callsign, &null_ax25_address) == 0) - rose_call = (ax25_address *)neigh->dev->dev_addr; - else - rose_call = &rose_callsign; - -- ax25s = neigh->ax25; - neigh->ax25 = ax25_find_cb(rose_call, &neigh->callsign, neigh->digipeat, neigh->dev); -- if (ax25s) -- ax25_cb_put(ax25s); - - return (neigh->ax25 != NULL); - } -diff --git a/net/rose/rose_route.c b/net/rose/rose_route.c -index 08230fa..f3e2198 100644 ---- a/net/rose/rose_route.c -+++ b/net/rose/rose_route.c -@@ -234,8 +234,6 @@ static void rose_remove_neigh(struct rose_neigh *rose_neigh) - - if ((s = rose_neigh_list) == rose_neigh) { - rose_neigh_list = rose_neigh->next; -- if (rose_neigh->ax25) -- ax25_cb_put(rose_neigh->ax25); - kfree(rose_neigh->digipeat); - kfree(rose_neigh); - return; -@@ -244,8 +242,6 @@ static void rose_remove_neigh(struct rose_neigh *rose_neigh) - while (s != NULL && s->next != NULL) { - if (s->next == rose_neigh) { - s->next = rose_neigh->next; -- if (rose_neigh->ax25) -- ax25_cb_put(rose_neigh->ax25); - kfree(rose_neigh->digipeat); - kfree(rose_neigh); - return; -@@ -814,7 +810,6 @@ void rose_link_failed(ax25_cb *ax25, int reason) - - if (rose_neigh != NULL) { - rose_neigh->ax25 = NULL; -- ax25_cb_put(ax25); - - rose_del_route_by_neigh(rose_neigh); - rose_kill_by_neigh(rose_neigh); -diff --git a/net/sunrpc/auth_gss/auth_gss.c b/net/sunrpc/auth_gss/auth_gss.c -index 9c5a19d..fc6a43c 100644 ---- a/net/sunrpc/auth_gss/auth_gss.c -+++ b/net/sunrpc/auth_gss/auth_gss.c -@@ -485,7 +485,7 @@ gss_refresh_upcall(struct rpc_task *task) - dprintk("RPC: %5u gss_refresh_upcall for uid %u\n", task->tk_pid, - cred->cr_uid); - gss_msg = gss_setup_upcall(task->tk_client, gss_auth, cred); -- if (PTR_ERR(gss_msg) == -EAGAIN) { -+ if (IS_ERR(gss_msg) == -EAGAIN) { - /* XXX: warning on the first, under the assumption we - * shouldn't normally hit this case on a refresh. */ - warn_gssd(); -@@ -644,22 +644,7 @@ gss_pipe_downcall(struct file *filp, const char __user *src, size_t mlen) - p = gss_fill_context(p, end, ctx, gss_msg->auth->mech); - if (IS_ERR(p)) { - err = PTR_ERR(p); -- switch (err) { -- case -EACCES: -- gss_msg->msg.errno = err; -- err = mlen; -- break; -- case -EFAULT: -- case -ENOMEM: -- case -EINVAL: -- case -ENOSYS: -- gss_msg->msg.errno = -EAGAIN; -- break; -- default: -- printk(KERN_CRIT "%s: bad return from " -- "gss_fill_context: %ld\n", __func__, err); -- BUG(); -- } -+ gss_msg->msg.errno = (err == -EAGAIN) ? -EAGAIN : -EACCES; - goto err_release_msg; - } - gss_msg->ctx = gss_get_ctx(ctx); -diff --git a/net/sunrpc/auth_gss/gss_krb5_mech.c b/net/sunrpc/auth_gss/gss_krb5_mech.c -index 2deb0ed..ef45eba 100644 ---- a/net/sunrpc/auth_gss/gss_krb5_mech.c -+++ b/net/sunrpc/auth_gss/gss_krb5_mech.c -@@ -131,10 +131,8 @@ gss_import_sec_context_kerberos(const void *p, - struct krb5_ctx *ctx; - int tmp; - -- if (!(ctx = kzalloc(sizeof(*ctx), GFP_NOFS))) { -- p = ERR_PTR(-ENOMEM); -+ if (!(ctx = kzalloc(sizeof(*ctx), GFP_NOFS))) - goto out_err; -- } - - p = simple_get_bytes(p, end, &ctx->initiate, sizeof(ctx->initiate)); - if (IS_ERR(p)) -diff --git a/net/sunrpc/auth_gss/gss_mech_switch.c b/net/sunrpc/auth_gss/gss_mech_switch.c -index 76e4c6f..6efbb0c 100644 ---- a/net/sunrpc/auth_gss/gss_mech_switch.c -+++ b/net/sunrpc/auth_gss/gss_mech_switch.c -@@ -252,7 +252,7 @@ gss_import_sec_context(const void *input_token, size_t bufsize, - struct gss_ctx **ctx_id) - { - if (!(*ctx_id = kzalloc(sizeof(**ctx_id), GFP_KERNEL))) -- return -ENOMEM; -+ return GSS_S_FAILURE; - (*ctx_id)->mech_type = gss_mech_get(mech); - - return mech->gm_ops -diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c -index 0266cca..df124f7 100644 ---- a/net/sunrpc/svc_xprt.c -+++ b/net/sunrpc/svc_xprt.c -@@ -711,8 +711,7 @@ int svc_recv(struct svc_rqst *rqstp, long timeout) - spin_unlock_bh(&pool->sp_lock); - - len = 0; -- if (test_bit(XPT_LISTENER, &xprt->xpt_flags) && -- !test_bit(XPT_CLOSE, &xprt->xpt_flags)) { -+ if (test_bit(XPT_LISTENER, &xprt->xpt_flags)) { - struct svc_xprt *newxpt; - newxpt = xprt->xpt_ops->xpo_accept(xprt); - if (newxpt) { -diff --git a/net/wireless/mlme.c b/net/wireless/mlme.c -index 0d86248..0a6b7a0 100644 ---- a/net/wireless/mlme.c -+++ b/net/wireless/mlme.c -@@ -94,18 +94,7 @@ void cfg80211_send_rx_assoc(struct net_device *dev, const u8 *buf, size_t len) - } - } - -- /* -- * We might be coming here because the driver reported -- * a successful association at the same time as the -- * user requested a deauth. In that case, we will have -- * removed the BSS from the auth_bsses list due to the -- * deauth request when the assoc response makes it. If -- * the two code paths acquire the lock the other way -- * around, that's just the standard situation of a -- * deauth being requested while connected. -- */ -- if (!bss) -- goto out; -+ WARN_ON(!bss); - } else if (wdev->conn) { - cfg80211_sme_failed_assoc(wdev); - need_connect_result = false; -diff --git a/net/wireless/reg.c b/net/wireless/reg.c -index efd24a7..f256dff 100644 ---- a/net/wireless/reg.c -+++ b/net/wireless/reg.c -@@ -1714,7 +1714,7 @@ int regulatory_hint_user(const char *alpha2) - request->wiphy_idx = WIPHY_IDX_STALE; - request->alpha2[0] = alpha2[0]; - request->alpha2[1] = alpha2[1]; -- request->initiator = NL80211_REGDOM_SET_BY_USER; -+ request->initiator = NL80211_REGDOM_SET_BY_USER, - - queue_regulatory_request(request); - -diff --git a/net/wireless/sme.c b/net/wireless/sme.c -index b2930e3..9f0b280 100644 ---- a/net/wireless/sme.c -+++ b/net/wireless/sme.c -@@ -655,7 +655,6 @@ void __cfg80211_disconnected(struct net_device *dev, const u8 *ie, - memset(&wrqu, 0, sizeof(wrqu)); - wrqu.ap_addr.sa_family = ARPHRD_ETHER; - wireless_send_event(dev, SIOCGIWAP, &wrqu, NULL); -- wdev->wext.connect.ssid_len = 0; - #endif - } - ++ return ftdev->minor[idx].buf ? 0 : -ENOMEM; ++} ++ ++static void free_timestamp_buffer(struct ftdev* ftdev, unsigned int idx) ++{ ++ free_ft_buffer(ftdev->minor[idx].buf); ++ ftdev->minor[idx].buf = NULL; ++} ++ ++static int __init init_ft_overhead_trace(void) ++{ ++ printk("Initializing Feather-Trace overhead tracing device.\n"); ++ ftdev_init(&overhead_dev, THIS_MODULE); ++ overhead_dev.minor_cnt = 1; /* only one buffer */ ++ overhead_dev.alloc = alloc_timestamp_buffer; ++ overhead_dev.free = free_timestamp_buffer; ++ return register_ftdev(&overhead_dev, "ft_trace", FT_TRACE_MAJOR); ++} ++ ++module_init(init_ft_overhead_trace); -- cgit v1.2.2