diff options
author | Thomas Gleixner <tglx@linutronix.de> | 2010-04-27 05:11:07 -0400 |
---|---|---|
committer | Thomas Gleixner <tglx@linutronix.de> | 2010-04-27 05:11:07 -0400 |
commit | 846bcf3923056965c3fc0c9ba811c10d25af75e5 (patch) | |
tree | c39bd4273a006da2fb1f727ebb7eb1108f5526c8 | |
parent | 863c3ad87b10617464fc52c21fc7e31987910559 (diff) | |
parent | 3e7ad8ed979ce9e3ad897dd87c50bf577966f89c (diff) |
Merge branch 'master' of
git://git.kernel.org/pub/scm/linux/kernel/git/stable/linux-2.6.33.y
into rt/2.6.33
Conflicts:
Makefile
arch/x86/include/asm/rwsem.h
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
144 files changed, 1492 insertions, 681 deletions
diff --git a/Documentation/i2c/busses/i2c-i801 b/Documentation/i2c/busses/i2c-i801 index 81c0c59a60ea..e1bb5b261693 100644 --- a/Documentation/i2c/busses/i2c-i801 +++ b/Documentation/i2c/busses/i2c-i801 | |||
@@ -15,7 +15,8 @@ Supported adapters: | |||
15 | * Intel 82801I (ICH9) | 15 | * Intel 82801I (ICH9) |
16 | * Intel EP80579 (Tolapai) | 16 | * Intel EP80579 (Tolapai) |
17 | * Intel 82801JI (ICH10) | 17 | * Intel 82801JI (ICH10) |
18 | * Intel PCH | 18 | * Intel 3400/5 Series (PCH) |
19 | * Intel Cougar Point (PCH) | ||
19 | Datasheets: Publicly available at the Intel website | 20 | Datasheets: Publicly available at the Intel website |
20 | 21 | ||
21 | Authors: | 22 | Authors: |
@@ -1,7 +1,7 @@ | |||
1 | VERSION = 2 | 1 | VERSION = 2 |
2 | PATCHLEVEL = 6 | 2 | PATCHLEVEL = 6 |
3 | SUBLEVEL = 33 | 3 | SUBLEVEL = 33 |
4 | EXTRAVERSION = .2-rt13 | 4 | EXTRAVERSION = .3-rt13 |
5 | NAME = Man-Eating Seals of Antiquity | 5 | NAME = Man-Eating Seals of Antiquity |
6 | 6 | ||
7 | # *DOCUMENTATION* | 7 | # *DOCUMENTATION* |
diff --git a/arch/arm/boot/compressed/head.S b/arch/arm/boot/compressed/head.S index 6b84a0471171..cbeb6e0de5cd 100644 --- a/arch/arm/boot/compressed/head.S +++ b/arch/arm/boot/compressed/head.S | |||
@@ -172,7 +172,7 @@ not_angel: | |||
172 | adr r0, LC0 | 172 | adr r0, LC0 |
173 | ARM( ldmia r0, {r1, r2, r3, r4, r5, r6, r11, ip, sp}) | 173 | ARM( ldmia r0, {r1, r2, r3, r4, r5, r6, r11, ip, sp}) |
174 | THUMB( ldmia r0, {r1, r2, r3, r4, r5, r6, r11, ip} ) | 174 | THUMB( ldmia r0, {r1, r2, r3, r4, r5, r6, r11, ip} ) |
175 | THUMB( ldr sp, [r0, #28] ) | 175 | THUMB( ldr sp, [r0, #32] ) |
176 | subs r0, r0, r1 @ calculate the delta offset | 176 | subs r0, r0, r1 @ calculate the delta offset |
177 | 177 | ||
178 | @ if delta is zero, we are | 178 | @ if delta is zero, we are |
diff --git a/arch/ia64/kvm/kvm-ia64.c b/arch/ia64/kvm/kvm-ia64.c index 5fdeec5fddcf..d76279aaaea1 100644 --- a/arch/ia64/kvm/kvm-ia64.c +++ b/arch/ia64/kvm/kvm-ia64.c | |||
@@ -1794,7 +1794,8 @@ static int kvm_ia64_sync_dirty_log(struct kvm *kvm, | |||
1794 | { | 1794 | { |
1795 | struct kvm_memory_slot *memslot; | 1795 | struct kvm_memory_slot *memslot; |
1796 | int r, i; | 1796 | int r, i; |
1797 | long n, base; | 1797 | long base; |
1798 | unsigned long n; | ||
1798 | unsigned long *dirty_bitmap = (unsigned long *)(kvm->arch.vm_base + | 1799 | unsigned long *dirty_bitmap = (unsigned long *)(kvm->arch.vm_base + |
1799 | offsetof(struct kvm_vm_data, kvm_mem_dirty_log)); | 1800 | offsetof(struct kvm_vm_data, kvm_mem_dirty_log)); |
1800 | 1801 | ||
@@ -1807,7 +1808,7 @@ static int kvm_ia64_sync_dirty_log(struct kvm *kvm, | |||
1807 | if (!memslot->dirty_bitmap) | 1808 | if (!memslot->dirty_bitmap) |
1808 | goto out; | 1809 | goto out; |
1809 | 1810 | ||
1810 | n = ALIGN(memslot->npages, BITS_PER_LONG) / 8; | 1811 | n = kvm_dirty_bitmap_bytes(memslot); |
1811 | base = memslot->base_gfn / BITS_PER_LONG; | 1812 | base = memslot->base_gfn / BITS_PER_LONG; |
1812 | 1813 | ||
1813 | for (i = 0; i < n/sizeof(long); ++i) { | 1814 | for (i = 0; i < n/sizeof(long); ++i) { |
@@ -1823,7 +1824,7 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, | |||
1823 | struct kvm_dirty_log *log) | 1824 | struct kvm_dirty_log *log) |
1824 | { | 1825 | { |
1825 | int r; | 1826 | int r; |
1826 | int n; | 1827 | unsigned long n; |
1827 | struct kvm_memory_slot *memslot; | 1828 | struct kvm_memory_slot *memslot; |
1828 | int is_dirty = 0; | 1829 | int is_dirty = 0; |
1829 | 1830 | ||
@@ -1841,7 +1842,7 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, | |||
1841 | if (is_dirty) { | 1842 | if (is_dirty) { |
1842 | kvm_flush_remote_tlbs(kvm); | 1843 | kvm_flush_remote_tlbs(kvm); |
1843 | memslot = &kvm->memslots[log->slot]; | 1844 | memslot = &kvm->memslots[log->slot]; |
1844 | n = ALIGN(memslot->npages, BITS_PER_LONG) / 8; | 1845 | n = kvm_dirty_bitmap_bytes(memslot); |
1845 | memset(memslot->dirty_bitmap, 0, n); | 1846 | memset(memslot->dirty_bitmap, 0, n); |
1846 | } | 1847 | } |
1847 | r = 0; | 1848 | r = 0; |
diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c index 3e294bd9b8c6..e6dc59558fc1 100644 --- a/arch/powerpc/kvm/book3s.c +++ b/arch/powerpc/kvm/book3s.c | |||
@@ -848,7 +848,8 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, | |||
848 | struct kvm_vcpu *vcpu; | 848 | struct kvm_vcpu *vcpu; |
849 | ulong ga, ga_end; | 849 | ulong ga, ga_end; |
850 | int is_dirty = 0; | 850 | int is_dirty = 0; |
851 | int r, n; | 851 | int r; |
852 | unsigned long n; | ||
852 | 853 | ||
853 | down_write(&kvm->slots_lock); | 854 | down_write(&kvm->slots_lock); |
854 | 855 | ||
@@ -866,7 +867,7 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, | |||
866 | kvm_for_each_vcpu(n, vcpu, kvm) | 867 | kvm_for_each_vcpu(n, vcpu, kvm) |
867 | kvmppc_mmu_pte_pflush(vcpu, ga, ga_end); | 868 | kvmppc_mmu_pte_pflush(vcpu, ga, ga_end); |
868 | 869 | ||
869 | n = ALIGN(memslot->npages, BITS_PER_LONG) / 8; | 870 | n = kvm_dirty_bitmap_bytes(memslot); |
870 | memset(memslot->dirty_bitmap, 0, n); | 871 | memset(memslot->dirty_bitmap, 0, n); |
871 | } | 872 | } |
872 | 873 | ||
diff --git a/arch/powerpc/platforms/pseries/offline_states.h b/arch/powerpc/platforms/pseries/offline_states.h index 22574e0d9d91..202d8692c6a2 100644 --- a/arch/powerpc/platforms/pseries/offline_states.h +++ b/arch/powerpc/platforms/pseries/offline_states.h | |||
@@ -9,10 +9,30 @@ enum cpu_state_vals { | |||
9 | CPU_MAX_OFFLINE_STATES | 9 | CPU_MAX_OFFLINE_STATES |
10 | }; | 10 | }; |
11 | 11 | ||
12 | #ifdef CONFIG_HOTPLUG_CPU | ||
12 | extern enum cpu_state_vals get_cpu_current_state(int cpu); | 13 | extern enum cpu_state_vals get_cpu_current_state(int cpu); |
13 | extern void set_cpu_current_state(int cpu, enum cpu_state_vals state); | 14 | extern void set_cpu_current_state(int cpu, enum cpu_state_vals state); |
14 | extern enum cpu_state_vals get_preferred_offline_state(int cpu); | ||
15 | extern void set_preferred_offline_state(int cpu, enum cpu_state_vals state); | 15 | extern void set_preferred_offline_state(int cpu, enum cpu_state_vals state); |
16 | extern void set_default_offline_state(int cpu); | 16 | extern void set_default_offline_state(int cpu); |
17 | #else | ||
18 | static inline enum cpu_state_vals get_cpu_current_state(int cpu) | ||
19 | { | ||
20 | return CPU_STATE_ONLINE; | ||
21 | } | ||
22 | |||
23 | static inline void set_cpu_current_state(int cpu, enum cpu_state_vals state) | ||
24 | { | ||
25 | } | ||
26 | |||
27 | static inline void set_preferred_offline_state(int cpu, enum cpu_state_vals state) | ||
28 | { | ||
29 | } | ||
30 | |||
31 | static inline void set_default_offline_state(int cpu) | ||
32 | { | ||
33 | } | ||
34 | #endif | ||
35 | |||
36 | extern enum cpu_state_vals get_preferred_offline_state(int cpu); | ||
17 | extern int start_secondary(void); | 37 | extern int start_secondary(void); |
18 | #endif | 38 | #endif |
diff --git a/arch/s390/mm/vmem.c b/arch/s390/mm/vmem.c index 300ab012b0fd..5f91a38d7592 100644 --- a/arch/s390/mm/vmem.c +++ b/arch/s390/mm/vmem.c | |||
@@ -70,12 +70,8 @@ static pte_t __ref *vmem_pte_alloc(void) | |||
70 | pte = alloc_bootmem(PTRS_PER_PTE * sizeof(pte_t)); | 70 | pte = alloc_bootmem(PTRS_PER_PTE * sizeof(pte_t)); |
71 | if (!pte) | 71 | if (!pte) |
72 | return NULL; | 72 | return NULL; |
73 | if (MACHINE_HAS_HPAGE) | 73 | clear_table((unsigned long *) pte, _PAGE_TYPE_EMPTY, |
74 | clear_table((unsigned long *) pte, _PAGE_TYPE_EMPTY | _PAGE_CO, | 74 | PTRS_PER_PTE * sizeof(pte_t)); |
75 | PTRS_PER_PTE * sizeof(pte_t)); | ||
76 | else | ||
77 | clear_table((unsigned long *) pte, _PAGE_TYPE_EMPTY, | ||
78 | PTRS_PER_PTE * sizeof(pte_t)); | ||
79 | return pte; | 75 | return pte; |
80 | } | 76 | } |
81 | 77 | ||
@@ -116,8 +112,7 @@ static int vmem_add_mem(unsigned long start, unsigned long size, int ro) | |||
116 | if (MACHINE_HAS_HPAGE && !(address & ~HPAGE_MASK) && | 112 | if (MACHINE_HAS_HPAGE && !(address & ~HPAGE_MASK) && |
117 | (address + HPAGE_SIZE <= start + size) && | 113 | (address + HPAGE_SIZE <= start + size) && |
118 | (address >= HPAGE_SIZE)) { | 114 | (address >= HPAGE_SIZE)) { |
119 | pte_val(pte) |= _SEGMENT_ENTRY_LARGE | | 115 | pte_val(pte) |= _SEGMENT_ENTRY_LARGE; |
120 | _SEGMENT_ENTRY_CO; | ||
121 | pmd_val(*pm_dir) = pte_val(pte); | 116 | pmd_val(*pm_dir) = pte_val(pte); |
122 | address += HPAGE_SIZE - PAGE_SIZE; | 117 | address += HPAGE_SIZE - PAGE_SIZE; |
123 | continue; | 118 | continue; |
diff --git a/arch/sh/include/asm/elf.h b/arch/sh/include/asm/elf.h index ac04255022b6..ce830faeebbf 100644 --- a/arch/sh/include/asm/elf.h +++ b/arch/sh/include/asm/elf.h | |||
@@ -211,7 +211,9 @@ extern void __kernel_vsyscall; | |||
211 | 211 | ||
212 | #define VSYSCALL_AUX_ENT \ | 212 | #define VSYSCALL_AUX_ENT \ |
213 | if (vdso_enabled) \ | 213 | if (vdso_enabled) \ |
214 | NEW_AUX_ENT(AT_SYSINFO_EHDR, VDSO_BASE); | 214 | NEW_AUX_ENT(AT_SYSINFO_EHDR, VDSO_BASE); \ |
215 | else \ | ||
216 | NEW_AUX_ENT(AT_IGNORE, 0); | ||
215 | #else | 217 | #else |
216 | #define VSYSCALL_AUX_ENT | 218 | #define VSYSCALL_AUX_ENT |
217 | #endif /* CONFIG_VSYSCALL */ | 219 | #endif /* CONFIG_VSYSCALL */ |
@@ -219,7 +221,7 @@ extern void __kernel_vsyscall; | |||
219 | #ifdef CONFIG_SH_FPU | 221 | #ifdef CONFIG_SH_FPU |
220 | #define FPU_AUX_ENT NEW_AUX_ENT(AT_FPUCW, FPSCR_INIT) | 222 | #define FPU_AUX_ENT NEW_AUX_ENT(AT_FPUCW, FPSCR_INIT) |
221 | #else | 223 | #else |
222 | #define FPU_AUX_ENT | 224 | #define FPU_AUX_ENT NEW_AUX_ENT(AT_IGNORE, 0) |
223 | #endif | 225 | #endif |
224 | 226 | ||
225 | extern int l1i_cache_shape, l1d_cache_shape, l2_cache_shape; | 227 | extern int l1i_cache_shape, l1d_cache_shape, l2_cache_shape; |
diff --git a/arch/sh/kernel/smp.c b/arch/sh/kernel/smp.c index 983e0792d5f3..1d19c199c73d 100644 --- a/arch/sh/kernel/smp.c +++ b/arch/sh/kernel/smp.c | |||
@@ -69,6 +69,7 @@ asmlinkage void __cpuinit start_secondary(void) | |||
69 | unsigned int cpu; | 69 | unsigned int cpu; |
70 | struct mm_struct *mm = &init_mm; | 70 | struct mm_struct *mm = &init_mm; |
71 | 71 | ||
72 | enable_mmu(); | ||
72 | atomic_inc(&mm->mm_count); | 73 | atomic_inc(&mm->mm_count); |
73 | atomic_inc(&mm->mm_users); | 74 | atomic_inc(&mm->mm_users); |
74 | current->active_mm = mm; | 75 | current->active_mm = mm; |
diff --git a/arch/sparc/kernel/ptrace_32.c b/arch/sparc/kernel/ptrace_32.c index 7e3dfd9bb97e..e608f397e11f 100644 --- a/arch/sparc/kernel/ptrace_32.c +++ b/arch/sparc/kernel/ptrace_32.c | |||
@@ -65,6 +65,7 @@ static int genregs32_get(struct task_struct *target, | |||
65 | *k++ = regs->u_regs[pos++]; | 65 | *k++ = regs->u_regs[pos++]; |
66 | 66 | ||
67 | reg_window = (unsigned long __user *) regs->u_regs[UREG_I6]; | 67 | reg_window = (unsigned long __user *) regs->u_regs[UREG_I6]; |
68 | reg_window -= 16; | ||
68 | for (; count > 0 && pos < 32; count--) { | 69 | for (; count > 0 && pos < 32; count--) { |
69 | if (get_user(*k++, ®_window[pos++])) | 70 | if (get_user(*k++, ®_window[pos++])) |
70 | return -EFAULT; | 71 | return -EFAULT; |
@@ -76,6 +77,7 @@ static int genregs32_get(struct task_struct *target, | |||
76 | } | 77 | } |
77 | 78 | ||
78 | reg_window = (unsigned long __user *) regs->u_regs[UREG_I6]; | 79 | reg_window = (unsigned long __user *) regs->u_regs[UREG_I6]; |
80 | reg_window -= 16; | ||
79 | for (; count > 0 && pos < 32; count--) { | 81 | for (; count > 0 && pos < 32; count--) { |
80 | if (get_user(reg, ®_window[pos++]) || | 82 | if (get_user(reg, ®_window[pos++]) || |
81 | put_user(reg, u++)) | 83 | put_user(reg, u++)) |
@@ -141,6 +143,7 @@ static int genregs32_set(struct task_struct *target, | |||
141 | regs->u_regs[pos++] = *k++; | 143 | regs->u_regs[pos++] = *k++; |
142 | 144 | ||
143 | reg_window = (unsigned long __user *) regs->u_regs[UREG_I6]; | 145 | reg_window = (unsigned long __user *) regs->u_regs[UREG_I6]; |
146 | reg_window -= 16; | ||
144 | for (; count > 0 && pos < 32; count--) { | 147 | for (; count > 0 && pos < 32; count--) { |
145 | if (put_user(*k++, ®_window[pos++])) | 148 | if (put_user(*k++, ®_window[pos++])) |
146 | return -EFAULT; | 149 | return -EFAULT; |
@@ -153,6 +156,7 @@ static int genregs32_set(struct task_struct *target, | |||
153 | } | 156 | } |
154 | 157 | ||
155 | reg_window = (unsigned long __user *) regs->u_regs[UREG_I6]; | 158 | reg_window = (unsigned long __user *) regs->u_regs[UREG_I6]; |
159 | reg_window -= 16; | ||
156 | for (; count > 0 && pos < 32; count--) { | 160 | for (; count > 0 && pos < 32; count--) { |
157 | if (get_user(reg, u++) || | 161 | if (get_user(reg, u++) || |
158 | put_user(reg, ®_window[pos++])) | 162 | put_user(reg, ®_window[pos++])) |
diff --git a/arch/sparc/kernel/ptrace_64.c b/arch/sparc/kernel/ptrace_64.c index 2f6524d1a817..aa90da08bf61 100644 --- a/arch/sparc/kernel/ptrace_64.c +++ b/arch/sparc/kernel/ptrace_64.c | |||
@@ -492,6 +492,7 @@ static int genregs32_get(struct task_struct *target, | |||
492 | *k++ = regs->u_regs[pos++]; | 492 | *k++ = regs->u_regs[pos++]; |
493 | 493 | ||
494 | reg_window = (compat_ulong_t __user *) regs->u_regs[UREG_I6]; | 494 | reg_window = (compat_ulong_t __user *) regs->u_regs[UREG_I6]; |
495 | reg_window -= 16; | ||
495 | if (target == current) { | 496 | if (target == current) { |
496 | for (; count > 0 && pos < 32; count--) { | 497 | for (; count > 0 && pos < 32; count--) { |
497 | if (get_user(*k++, ®_window[pos++])) | 498 | if (get_user(*k++, ®_window[pos++])) |
@@ -516,6 +517,7 @@ static int genregs32_get(struct task_struct *target, | |||
516 | } | 517 | } |
517 | 518 | ||
518 | reg_window = (compat_ulong_t __user *) regs->u_regs[UREG_I6]; | 519 | reg_window = (compat_ulong_t __user *) regs->u_regs[UREG_I6]; |
520 | reg_window -= 16; | ||
519 | if (target == current) { | 521 | if (target == current) { |
520 | for (; count > 0 && pos < 32; count--) { | 522 | for (; count > 0 && pos < 32; count--) { |
521 | if (get_user(reg, ®_window[pos++]) || | 523 | if (get_user(reg, ®_window[pos++]) || |
@@ -599,6 +601,7 @@ static int genregs32_set(struct task_struct *target, | |||
599 | regs->u_regs[pos++] = *k++; | 601 | regs->u_regs[pos++] = *k++; |
600 | 602 | ||
601 | reg_window = (compat_ulong_t __user *) regs->u_regs[UREG_I6]; | 603 | reg_window = (compat_ulong_t __user *) regs->u_regs[UREG_I6]; |
604 | reg_window -= 16; | ||
602 | if (target == current) { | 605 | if (target == current) { |
603 | for (; count > 0 && pos < 32; count--) { | 606 | for (; count > 0 && pos < 32; count--) { |
604 | if (put_user(*k++, ®_window[pos++])) | 607 | if (put_user(*k++, ®_window[pos++])) |
@@ -625,6 +628,7 @@ static int genregs32_set(struct task_struct *target, | |||
625 | } | 628 | } |
626 | 629 | ||
627 | reg_window = (compat_ulong_t __user *) regs->u_regs[UREG_I6]; | 630 | reg_window = (compat_ulong_t __user *) regs->u_regs[UREG_I6]; |
631 | reg_window -= 16; | ||
628 | if (target == current) { | 632 | if (target == current) { |
629 | for (; count > 0 && pos < 32; count--) { | 633 | for (; count > 0 && pos < 32; count--) { |
630 | if (get_user(reg, u++) || | 634 | if (get_user(reg, u++) || |
diff --git a/arch/um/sys-x86_64/Makefile b/arch/um/sys-x86_64/Makefile index 2201e9c20e4a..c1ea9eb04466 100644 --- a/arch/um/sys-x86_64/Makefile +++ b/arch/um/sys-x86_64/Makefile | |||
@@ -8,7 +8,8 @@ obj-y = bug.o bugs.o delay.o fault.o ldt.o mem.o ptrace.o ptrace_user.o \ | |||
8 | setjmp.o signal.o stub.o stub_segv.o syscalls.o syscall_table.o \ | 8 | setjmp.o signal.o stub.o stub_segv.o syscalls.o syscall_table.o \ |
9 | sysrq.o ksyms.o tls.o | 9 | sysrq.o ksyms.o tls.o |
10 | 10 | ||
11 | subarch-obj-y = lib/csum-partial_64.o lib/memcpy_64.o lib/thunk_64.o | 11 | subarch-obj-y = lib/csum-partial_64.o lib/memcpy_64.o lib/thunk_64.o \ |
12 | lib/rwsem_64.o | ||
12 | subarch-obj-$(CONFIG_MODULES) += kernel/module.o | 13 | subarch-obj-$(CONFIG_MODULES) += kernel/module.o |
13 | 14 | ||
14 | ldt-y = ../sys-i386/ldt.o | 15 | ldt-y = ../sys-i386/ldt.o |
diff --git a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu index f20ddf84a893..a19829374e6a 100644 --- a/arch/x86/Kconfig.cpu +++ b/arch/x86/Kconfig.cpu | |||
@@ -319,7 +319,7 @@ config X86_L1_CACHE_SHIFT | |||
319 | 319 | ||
320 | config X86_XADD | 320 | config X86_XADD |
321 | def_bool y | 321 | def_bool y |
322 | depends on X86_32 && !M386 | 322 | depends on X86_64 || !M386 |
323 | 323 | ||
324 | config X86_PPRO_FENCE | 324 | config X86_PPRO_FENCE |
325 | bool "PentiumPro memory ordering errata workaround" | 325 | bool "PentiumPro memory ordering errata workaround" |
diff --git a/arch/x86/include/asm/rwsem.h b/arch/x86/include/asm/rwsem.h index 605191838e02..42f8a37db6fb 100644 --- a/arch/x86/include/asm/rwsem.h +++ b/arch/x86/include/asm/rwsem.h | |||
@@ -41,6 +41,7 @@ | |||
41 | #include <linux/list.h> | 41 | #include <linux/list.h> |
42 | #include <linux/spinlock.h> | 42 | #include <linux/spinlock.h> |
43 | #include <linux/lockdep.h> | 43 | #include <linux/lockdep.h> |
44 | #include <asm/asm.h> | ||
44 | 45 | ||
45 | struct rwsem_waiter; | 46 | struct rwsem_waiter; |
46 | 47 | ||
@@ -55,17 +56,28 @@ extern asmregparm struct rw_anon_semaphore * | |||
55 | 56 | ||
56 | /* | 57 | /* |
57 | * the semaphore definition | 58 | * the semaphore definition |
59 | * | ||
60 | * The bias values and the counter type limits the number of | ||
61 | * potential readers/writers to 32767 for 32 bits and 2147483647 | ||
62 | * for 64 bits. | ||
58 | */ | 63 | */ |
59 | 64 | ||
60 | #define RWSEM_UNLOCKED_VALUE 0x00000000 | 65 | #ifdef CONFIG_X86_64 |
61 | #define RWSEM_ACTIVE_BIAS 0x00000001 | 66 | # define RWSEM_ACTIVE_MASK 0xffffffffL |
62 | #define RWSEM_ACTIVE_MASK 0x0000ffff | 67 | #else |
63 | #define RWSEM_WAITING_BIAS (-0x00010000) | 68 | # define RWSEM_ACTIVE_MASK 0x0000ffffL |
69 | #endif | ||
70 | |||
71 | #define RWSEM_UNLOCKED_VALUE 0x00000000L | ||
72 | #define RWSEM_ACTIVE_BIAS 0x00000001L | ||
73 | #define RWSEM_WAITING_BIAS (-RWSEM_ACTIVE_MASK-1) | ||
64 | #define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS | 74 | #define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS |
65 | #define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS) | 75 | #define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS) |
66 | 76 | ||
77 | typedef signed long rwsem_count_t; | ||
78 | |||
67 | struct rw_anon_semaphore { | 79 | struct rw_anon_semaphore { |
68 | signed long count; | 80 | rwsem_count_t count; |
69 | raw_spinlock_t wait_lock; | 81 | raw_spinlock_t wait_lock; |
70 | struct list_head wait_list; | 82 | struct list_head wait_list; |
71 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | 83 | #ifdef CONFIG_DEBUG_LOCK_ALLOC |
@@ -104,7 +116,7 @@ do { \ | |||
104 | static inline void __down_read(struct rw_anon_semaphore *sem) | 116 | static inline void __down_read(struct rw_anon_semaphore *sem) |
105 | { | 117 | { |
106 | asm volatile("# beginning down_read\n\t" | 118 | asm volatile("# beginning down_read\n\t" |
107 | LOCK_PREFIX " incl (%%eax)\n\t" | 119 | LOCK_PREFIX _ASM_INC "(%1)\n\t" |
108 | /* adds 0x00000001, returns the old value */ | 120 | /* adds 0x00000001, returns the old value */ |
109 | " jns 1f\n" | 121 | " jns 1f\n" |
110 | " call call_rwsem_down_read_failed\n" | 122 | " call call_rwsem_down_read_failed\n" |
@@ -120,14 +132,14 @@ static inline void __down_read(struct rw_anon_semaphore *sem) | |||
120 | */ | 132 | */ |
121 | static inline int __down_read_trylock(struct rw_anon_semaphore *sem) | 133 | static inline int __down_read_trylock(struct rw_anon_semaphore *sem) |
122 | { | 134 | { |
123 | __s32 result, tmp; | 135 | rwsem_count_t result, tmp; |
124 | asm volatile("# beginning __down_read_trylock\n\t" | 136 | asm volatile("# beginning __down_read_trylock\n\t" |
125 | " movl %0,%1\n\t" | 137 | " mov %0,%1\n\t" |
126 | "1:\n\t" | 138 | "1:\n\t" |
127 | " movl %1,%2\n\t" | 139 | " mov %1,%2\n\t" |
128 | " addl %3,%2\n\t" | 140 | " add %3,%2\n\t" |
129 | " jle 2f\n\t" | 141 | " jle 2f\n\t" |
130 | LOCK_PREFIX " cmpxchgl %2,%0\n\t" | 142 | LOCK_PREFIX " cmpxchg %2,%0\n\t" |
131 | " jnz 1b\n\t" | 143 | " jnz 1b\n\t" |
132 | "2:\n\t" | 144 | "2:\n\t" |
133 | "# ending __down_read_trylock\n\t" | 145 | "# ending __down_read_trylock\n\t" |
@@ -143,13 +155,13 @@ static inline int __down_read_trylock(struct rw_anon_semaphore *sem) | |||
143 | static inline void | 155 | static inline void |
144 | __down_write_nested(struct rw_anon_semaphore *sem, int subclass) | 156 | __down_write_nested(struct rw_anon_semaphore *sem, int subclass) |
145 | { | 157 | { |
146 | int tmp; | 158 | rwsem_count_t tmp; |
147 | 159 | ||
148 | tmp = RWSEM_ACTIVE_WRITE_BIAS; | 160 | tmp = RWSEM_ACTIVE_WRITE_BIAS; |
149 | asm volatile("# beginning down_write\n\t" | 161 | asm volatile("# beginning down_write\n\t" |
150 | LOCK_PREFIX " xadd %%edx,(%%eax)\n\t" | 162 | LOCK_PREFIX " xadd %1,(%2)\n\t" |
151 | /* subtract 0x0000ffff, returns the old value */ | 163 | /* subtract 0x0000ffff, returns the old value */ |
152 | " testl %%edx,%%edx\n\t" | 164 | " test %1,%1\n\t" |
153 | /* was the count 0 before? */ | 165 | /* was the count 0 before? */ |
154 | " jz 1f\n" | 166 | " jz 1f\n" |
155 | " call call_rwsem_down_write_failed\n" | 167 | " call call_rwsem_down_write_failed\n" |
@@ -170,9 +182,9 @@ static inline void __down_write(struct rw_anon_semaphore *sem) | |||
170 | */ | 182 | */ |
171 | static inline int __down_write_trylock(struct rw_anon_semaphore *sem) | 183 | static inline int __down_write_trylock(struct rw_anon_semaphore *sem) |
172 | { | 184 | { |
173 | signed long ret = cmpxchg(&sem->count, | 185 | rwsem_count_t ret = cmpxchg(&sem->count, |
174 | RWSEM_UNLOCKED_VALUE, | 186 | RWSEM_UNLOCKED_VALUE, |
175 | RWSEM_ACTIVE_WRITE_BIAS); | 187 | RWSEM_ACTIVE_WRITE_BIAS); |
176 | if (ret == RWSEM_UNLOCKED_VALUE) | 188 | if (ret == RWSEM_UNLOCKED_VALUE) |
177 | return 1; | 189 | return 1; |
178 | return 0; | 190 | return 0; |
@@ -183,9 +195,9 @@ static inline int __down_write_trylock(struct rw_anon_semaphore *sem) | |||
183 | */ | 195 | */ |
184 | static inline void __up_read(struct rw_anon_semaphore *sem) | 196 | static inline void __up_read(struct rw_anon_semaphore *sem) |
185 | { | 197 | { |
186 | __s32 tmp = -RWSEM_ACTIVE_READ_BIAS; | 198 | rwsem_count_t tmp = -RWSEM_ACTIVE_READ_BIAS; |
187 | asm volatile("# beginning __up_read\n\t" | 199 | asm volatile("# beginning __up_read\n\t" |
188 | LOCK_PREFIX " xadd %%edx,(%%eax)\n\t" | 200 | LOCK_PREFIX " xadd %1,(%2)\n\t" |
189 | /* subtracts 1, returns the old value */ | 201 | /* subtracts 1, returns the old value */ |
190 | " jns 1f\n\t" | 202 | " jns 1f\n\t" |
191 | " call call_rwsem_wake\n" | 203 | " call call_rwsem_wake\n" |
@@ -201,18 +213,18 @@ static inline void __up_read(struct rw_anon_semaphore *sem) | |||
201 | */ | 213 | */ |
202 | static inline void __up_write(struct rw_anon_semaphore *sem) | 214 | static inline void __up_write(struct rw_anon_semaphore *sem) |
203 | { | 215 | { |
216 | rwsem_count_t tmp; | ||
204 | asm volatile("# beginning __up_write\n\t" | 217 | asm volatile("# beginning __up_write\n\t" |
205 | " movl %2,%%edx\n\t" | 218 | LOCK_PREFIX " xadd %1,(%2)\n\t" |
206 | LOCK_PREFIX " xaddl %%edx,(%%eax)\n\t" | ||
207 | /* tries to transition | 219 | /* tries to transition |
208 | 0xffff0001 -> 0x00000000 */ | 220 | 0xffff0001 -> 0x00000000 */ |
209 | " jz 1f\n" | 221 | " jz 1f\n" |
210 | " call call_rwsem_wake\n" | 222 | " call call_rwsem_wake\n" |
211 | "1:\n\t" | 223 | "1:\n\t" |
212 | "# ending __up_write\n" | 224 | "# ending __up_write\n" |
213 | : "+m" (sem->count) | 225 | : "+m" (sem->count), "=d" (tmp) |
214 | : "a" (sem), "i" (-RWSEM_ACTIVE_WRITE_BIAS) | 226 | : "a" (sem), "1" (-RWSEM_ACTIVE_WRITE_BIAS) |
215 | : "memory", "cc", "edx"); | 227 | : "memory", "cc"); |
216 | } | 228 | } |
217 | 229 | ||
218 | /* | 230 | /* |
@@ -221,33 +233,38 @@ static inline void __up_write(struct rw_anon_semaphore *sem) | |||
221 | static inline void __downgrade_write(struct rw_anon_semaphore *sem) | 233 | static inline void __downgrade_write(struct rw_anon_semaphore *sem) |
222 | { | 234 | { |
223 | asm volatile("# beginning __downgrade_write\n\t" | 235 | asm volatile("# beginning __downgrade_write\n\t" |
224 | LOCK_PREFIX " addl %2,(%%eax)\n\t" | 236 | LOCK_PREFIX _ASM_ADD "%2,(%1)\n\t" |
225 | /* transitions 0xZZZZ0001 -> 0xYYYY0001 */ | 237 | /* |
238 | * transitions 0xZZZZ0001 -> 0xYYYY0001 (i386) | ||
239 | * 0xZZZZZZZZ00000001 -> 0xYYYYYYYY00000001 (x86_64) | ||
240 | */ | ||
226 | " jns 1f\n\t" | 241 | " jns 1f\n\t" |
227 | " call call_rwsem_downgrade_wake\n" | 242 | " call call_rwsem_downgrade_wake\n" |
228 | "1:\n\t" | 243 | "1:\n\t" |
229 | "# ending __downgrade_write\n" | 244 | "# ending __downgrade_write\n" |
230 | : "+m" (sem->count) | 245 | : "+m" (sem->count) |
231 | : "a" (sem), "i" (-RWSEM_WAITING_BIAS) | 246 | : "a" (sem), "er" (-RWSEM_WAITING_BIAS) |
232 | : "memory", "cc"); | 247 | : "memory", "cc"); |
233 | } | 248 | } |
234 | 249 | ||
235 | /* | 250 | /* |
236 | * implement atomic add functionality | 251 | * implement atomic add functionality |
237 | */ | 252 | */ |
238 | static inline void rwsem_atomic_add(int delta, struct rw_anon_semaphore *sem) | 253 | static inline void rwsem_atomic_add(rwsem_count_t delta, |
254 | struct rw_anon_semaphore *sem) | ||
239 | { | 255 | { |
240 | asm volatile(LOCK_PREFIX "addl %1,%0" | 256 | asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0" |
241 | : "+m" (sem->count) | 257 | : "+m" (sem->count) |
242 | : "ir" (delta)); | 258 | : "er" (delta)); |
243 | } | 259 | } |
244 | 260 | ||
245 | /* | 261 | /* |
246 | * implement exchange and add functionality | 262 | * implement exchange and add functionality |
247 | */ | 263 | */ |
248 | static inline int rwsem_atomic_update(int delta, struct rw_anon_semaphore *sem) | 264 | static inline rwsem_count_t rwsem_atomic_update(rwsem_count_t delta, |
265 | struct rw_anon_semaphore *sem) | ||
249 | { | 266 | { |
250 | int tmp = delta; | 267 | rwsem_count_t tmp = delta; |
251 | 268 | ||
252 | asm volatile(LOCK_PREFIX "xadd %0,%1" | 269 | asm volatile(LOCK_PREFIX "xadd %0,%1" |
253 | : "+r" (tmp), "+m" (sem->count) | 270 | : "+r" (tmp), "+m" (sem->count) |
diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h index 1e796782cd7b..4cfc90824068 100644 --- a/arch/x86/include/asm/smp.h +++ b/arch/x86/include/asm/smp.h | |||
@@ -135,6 +135,8 @@ int native_cpu_disable(void); | |||
135 | void native_cpu_die(unsigned int cpu); | 135 | void native_cpu_die(unsigned int cpu); |
136 | void native_play_dead(void); | 136 | void native_play_dead(void); |
137 | void play_dead_common(void); | 137 | void play_dead_common(void); |
138 | void wbinvd_on_cpu(int cpu); | ||
139 | int wbinvd_on_all_cpus(void); | ||
138 | 140 | ||
139 | void native_send_call_func_ipi(const struct cpumask *mask); | 141 | void native_send_call_func_ipi(const struct cpumask *mask); |
140 | void native_send_call_func_single_ipi(int cpu); | 142 | void native_send_call_func_single_ipi(int cpu); |
@@ -147,6 +149,13 @@ static inline int num_booting_cpus(void) | |||
147 | { | 149 | { |
148 | return cpumask_weight(cpu_callout_mask); | 150 | return cpumask_weight(cpu_callout_mask); |
149 | } | 151 | } |
152 | #else /* !CONFIG_SMP */ | ||
153 | #define wbinvd_on_cpu(cpu) wbinvd() | ||
154 | static inline int wbinvd_on_all_cpus(void) | ||
155 | { | ||
156 | wbinvd(); | ||
157 | return 0; | ||
158 | } | ||
150 | #endif /* CONFIG_SMP */ | 159 | #endif /* CONFIG_SMP */ |
151 | 160 | ||
152 | extern unsigned disabled_cpus __cpuinitdata; | 161 | extern unsigned disabled_cpus __cpuinitdata; |
diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c index adb0ba025702..2e775169bdf0 100644 --- a/arch/x86/kernel/amd_iommu.c +++ b/arch/x86/kernel/amd_iommu.c | |||
@@ -2298,7 +2298,7 @@ static void cleanup_domain(struct protection_domain *domain) | |||
2298 | list_for_each_entry_safe(dev_data, next, &domain->dev_list, list) { | 2298 | list_for_each_entry_safe(dev_data, next, &domain->dev_list, list) { |
2299 | struct device *dev = dev_data->dev; | 2299 | struct device *dev = dev_data->dev; |
2300 | 2300 | ||
2301 | do_detach(dev); | 2301 | __detach_device(dev); |
2302 | atomic_set(&dev_data->bind, 0); | 2302 | atomic_set(&dev_data->bind, 0); |
2303 | } | 2303 | } |
2304 | 2304 | ||
@@ -2379,9 +2379,7 @@ static void amd_iommu_domain_destroy(struct iommu_domain *dom) | |||
2379 | 2379 | ||
2380 | free_pagetable(domain); | 2380 | free_pagetable(domain); |
2381 | 2381 | ||
2382 | domain_id_free(domain->id); | 2382 | protection_domain_free(domain); |
2383 | |||
2384 | kfree(domain); | ||
2385 | 2383 | ||
2386 | dom->priv = NULL; | 2384 | dom->priv = NULL; |
2387 | } | 2385 | } |
diff --git a/arch/x86/kernel/amd_iommu_init.c b/arch/x86/kernel/amd_iommu_init.c index 9dc91b431470..883d61990623 100644 --- a/arch/x86/kernel/amd_iommu_init.c +++ b/arch/x86/kernel/amd_iommu_init.c | |||
@@ -1288,6 +1288,8 @@ static int __init amd_iommu_init(void) | |||
1288 | if (ret) | 1288 | if (ret) |
1289 | goto free; | 1289 | goto free; |
1290 | 1290 | ||
1291 | enable_iommus(); | ||
1292 | |||
1291 | if (iommu_pass_through) | 1293 | if (iommu_pass_through) |
1292 | ret = amd_iommu_init_passthrough(); | 1294 | ret = amd_iommu_init_passthrough(); |
1293 | else | 1295 | else |
@@ -1300,8 +1302,6 @@ static int __init amd_iommu_init(void) | |||
1300 | 1302 | ||
1301 | amd_iommu_init_notifier(); | 1303 | amd_iommu_init_notifier(); |
1302 | 1304 | ||
1303 | enable_iommus(); | ||
1304 | |||
1305 | if (iommu_pass_through) | 1305 | if (iommu_pass_through) |
1306 | goto out; | 1306 | goto out; |
1307 | 1307 | ||
@@ -1315,6 +1315,7 @@ out: | |||
1315 | return ret; | 1315 | return ret; |
1316 | 1316 | ||
1317 | free: | 1317 | free: |
1318 | disable_iommus(); | ||
1318 | 1319 | ||
1319 | amd_iommu_uninit_devices(); | 1320 | amd_iommu_uninit_devices(); |
1320 | 1321 | ||
diff --git a/arch/x86/kernel/aperture_64.c b/arch/x86/kernel/aperture_64.c index f147a95fd84a..19f2c703638e 100644 --- a/arch/x86/kernel/aperture_64.c +++ b/arch/x86/kernel/aperture_64.c | |||
@@ -394,6 +394,7 @@ void __init gart_iommu_hole_init(void) | |||
394 | for (i = 0; i < ARRAY_SIZE(bus_dev_ranges); i++) { | 394 | for (i = 0; i < ARRAY_SIZE(bus_dev_ranges); i++) { |
395 | int bus; | 395 | int bus; |
396 | int dev_base, dev_limit; | 396 | int dev_base, dev_limit; |
397 | u32 ctl; | ||
397 | 398 | ||
398 | bus = bus_dev_ranges[i].bus; | 399 | bus = bus_dev_ranges[i].bus; |
399 | dev_base = bus_dev_ranges[i].dev_base; | 400 | dev_base = bus_dev_ranges[i].dev_base; |
@@ -407,7 +408,19 @@ void __init gart_iommu_hole_init(void) | |||
407 | gart_iommu_aperture = 1; | 408 | gart_iommu_aperture = 1; |
408 | x86_init.iommu.iommu_init = gart_iommu_init; | 409 | x86_init.iommu.iommu_init = gart_iommu_init; |
409 | 410 | ||
410 | aper_order = (read_pci_config(bus, slot, 3, AMD64_GARTAPERTURECTL) >> 1) & 7; | 411 | ctl = read_pci_config(bus, slot, 3, |
412 | AMD64_GARTAPERTURECTL); | ||
413 | |||
414 | /* | ||
415 | * Before we do anything else disable the GART. It may | ||
416 | * still be enabled if we boot into a crash-kernel here. | ||
417 | * Reconfiguring the GART while it is enabled could have | ||
418 | * unknown side-effects. | ||
419 | */ | ||
420 | ctl &= ~GARTEN; | ||
421 | write_pci_config(bus, slot, 3, AMD64_GARTAPERTURECTL, ctl); | ||
422 | |||
423 | aper_order = (ctl >> 1) & 7; | ||
411 | aper_size = (32 * 1024 * 1024) << aper_order; | 424 | aper_size = (32 * 1024 * 1024) << aper_order; |
412 | aper_base = read_pci_config(bus, slot, 3, AMD64_GARTAPERTUREBASE) & 0x7fff; | 425 | aper_base = read_pci_config(bus, slot, 3, AMD64_GARTAPERTUREBASE) & 0x7fff; |
413 | aper_base <<= 25; | 426 | aper_base <<= 25; |
diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c index dfca210f6a10..d4df51725290 100644 --- a/arch/x86/kernel/apic/apic.c +++ b/arch/x86/kernel/apic/apic.c | |||
@@ -1640,8 +1640,10 @@ int __init APIC_init_uniprocessor(void) | |||
1640 | } | 1640 | } |
1641 | #endif | 1641 | #endif |
1642 | 1642 | ||
1643 | #ifndef CONFIG_SMP | ||
1643 | enable_IR_x2apic(); | 1644 | enable_IR_x2apic(); |
1644 | default_setup_apic_routing(); | 1645 | default_setup_apic_routing(); |
1646 | #endif | ||
1645 | 1647 | ||
1646 | verify_local_APIC(); | 1648 | verify_local_APIC(); |
1647 | connect_bsp_APIC(); | 1649 | connect_bsp_APIC(); |
diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c index fc6c8ef92dcc..d440123c556f 100644 --- a/arch/x86/kernel/cpu/intel_cacheinfo.c +++ b/arch/x86/kernel/cpu/intel_cacheinfo.c | |||
@@ -18,6 +18,7 @@ | |||
18 | #include <asm/processor.h> | 18 | #include <asm/processor.h> |
19 | #include <linux/smp.h> | 19 | #include <linux/smp.h> |
20 | #include <asm/k8.h> | 20 | #include <asm/k8.h> |
21 | #include <asm/smp.h> | ||
21 | 22 | ||
22 | #define LVL_1_INST 1 | 23 | #define LVL_1_INST 1 |
23 | #define LVL_1_DATA 2 | 24 | #define LVL_1_DATA 2 |
@@ -150,7 +151,8 @@ struct _cpuid4_info { | |||
150 | union _cpuid4_leaf_ebx ebx; | 151 | union _cpuid4_leaf_ebx ebx; |
151 | union _cpuid4_leaf_ecx ecx; | 152 | union _cpuid4_leaf_ecx ecx; |
152 | unsigned long size; | 153 | unsigned long size; |
153 | unsigned long can_disable; | 154 | bool can_disable; |
155 | unsigned int l3_indices; | ||
154 | DECLARE_BITMAP(shared_cpu_map, NR_CPUS); | 156 | DECLARE_BITMAP(shared_cpu_map, NR_CPUS); |
155 | }; | 157 | }; |
156 | 158 | ||
@@ -160,7 +162,8 @@ struct _cpuid4_info_regs { | |||
160 | union _cpuid4_leaf_ebx ebx; | 162 | union _cpuid4_leaf_ebx ebx; |
161 | union _cpuid4_leaf_ecx ecx; | 163 | union _cpuid4_leaf_ecx ecx; |
162 | unsigned long size; | 164 | unsigned long size; |
163 | unsigned long can_disable; | 165 | bool can_disable; |
166 | unsigned int l3_indices; | ||
164 | }; | 167 | }; |
165 | 168 | ||
166 | unsigned short num_cache_leaves; | 169 | unsigned short num_cache_leaves; |
@@ -290,6 +293,36 @@ amd_cpuid4(int leaf, union _cpuid4_leaf_eax *eax, | |||
290 | (ebx->split.ways_of_associativity + 1) - 1; | 293 | (ebx->split.ways_of_associativity + 1) - 1; |
291 | } | 294 | } |
292 | 295 | ||
296 | struct _cache_attr { | ||
297 | struct attribute attr; | ||
298 | ssize_t (*show)(struct _cpuid4_info *, char *); | ||
299 | ssize_t (*store)(struct _cpuid4_info *, const char *, size_t count); | ||
300 | }; | ||
301 | |||
302 | #ifdef CONFIG_CPU_SUP_AMD | ||
303 | static unsigned int __cpuinit amd_calc_l3_indices(void) | ||
304 | { | ||
305 | /* | ||
306 | * We're called over smp_call_function_single() and therefore | ||
307 | * are on the correct cpu. | ||
308 | */ | ||
309 | int cpu = smp_processor_id(); | ||
310 | int node = cpu_to_node(cpu); | ||
311 | struct pci_dev *dev = node_to_k8_nb_misc(node); | ||
312 | unsigned int sc0, sc1, sc2, sc3; | ||
313 | u32 val = 0; | ||
314 | |||
315 | pci_read_config_dword(dev, 0x1C4, &val); | ||
316 | |||
317 | /* calculate subcache sizes */ | ||
318 | sc0 = !(val & BIT(0)); | ||
319 | sc1 = !(val & BIT(4)); | ||
320 | sc2 = !(val & BIT(8)) + !(val & BIT(9)); | ||
321 | sc3 = !(val & BIT(12)) + !(val & BIT(13)); | ||
322 | |||
323 | return (max(max(max(sc0, sc1), sc2), sc3) << 10) - 1; | ||
324 | } | ||
325 | |||
293 | static void __cpuinit | 326 | static void __cpuinit |
294 | amd_check_l3_disable(int index, struct _cpuid4_info_regs *this_leaf) | 327 | amd_check_l3_disable(int index, struct _cpuid4_info_regs *this_leaf) |
295 | { | 328 | { |
@@ -299,12 +332,103 @@ amd_check_l3_disable(int index, struct _cpuid4_info_regs *this_leaf) | |||
299 | if (boot_cpu_data.x86 == 0x11) | 332 | if (boot_cpu_data.x86 == 0x11) |
300 | return; | 333 | return; |
301 | 334 | ||
302 | /* see erratum #382 */ | 335 | /* see errata #382 and #388 */ |
303 | if ((boot_cpu_data.x86 == 0x10) && (boot_cpu_data.x86_model < 0x8)) | 336 | if ((boot_cpu_data.x86 == 0x10) && |
337 | ((boot_cpu_data.x86_model < 0x8) || | ||
338 | (boot_cpu_data.x86_mask < 0x1))) | ||
304 | return; | 339 | return; |
305 | 340 | ||
306 | this_leaf->can_disable = 1; | 341 | this_leaf->can_disable = true; |
342 | this_leaf->l3_indices = amd_calc_l3_indices(); | ||
343 | } | ||
344 | |||
345 | static ssize_t show_cache_disable(struct _cpuid4_info *this_leaf, char *buf, | ||
346 | unsigned int index) | ||
347 | { | ||
348 | int cpu = cpumask_first(to_cpumask(this_leaf->shared_cpu_map)); | ||
349 | int node = amd_get_nb_id(cpu); | ||
350 | struct pci_dev *dev = node_to_k8_nb_misc(node); | ||
351 | unsigned int reg = 0; | ||
352 | |||
353 | if (!this_leaf->can_disable) | ||
354 | return -EINVAL; | ||
355 | |||
356 | if (!dev) | ||
357 | return -EINVAL; | ||
358 | |||
359 | pci_read_config_dword(dev, 0x1BC + index * 4, ®); | ||
360 | return sprintf(buf, "0x%08x\n", reg); | ||
361 | } | ||
362 | |||
363 | #define SHOW_CACHE_DISABLE(index) \ | ||
364 | static ssize_t \ | ||
365 | show_cache_disable_##index(struct _cpuid4_info *this_leaf, char *buf) \ | ||
366 | { \ | ||
367 | return show_cache_disable(this_leaf, buf, index); \ | ||
368 | } | ||
369 | SHOW_CACHE_DISABLE(0) | ||
370 | SHOW_CACHE_DISABLE(1) | ||
371 | |||
372 | static ssize_t store_cache_disable(struct _cpuid4_info *this_leaf, | ||
373 | const char *buf, size_t count, unsigned int index) | ||
374 | { | ||
375 | int cpu = cpumask_first(to_cpumask(this_leaf->shared_cpu_map)); | ||
376 | int node = amd_get_nb_id(cpu); | ||
377 | struct pci_dev *dev = node_to_k8_nb_misc(node); | ||
378 | unsigned long val = 0; | ||
379 | |||
380 | #define SUBCACHE_MASK (3UL << 20) | ||
381 | #define SUBCACHE_INDEX 0xfff | ||
382 | |||
383 | if (!this_leaf->can_disable) | ||
384 | return -EINVAL; | ||
385 | |||
386 | if (!capable(CAP_SYS_ADMIN)) | ||
387 | return -EPERM; | ||
388 | |||
389 | if (!dev) | ||
390 | return -EINVAL; | ||
391 | |||
392 | if (strict_strtoul(buf, 10, &val) < 0) | ||
393 | return -EINVAL; | ||
394 | |||
395 | /* do not allow writes outside of allowed bits */ | ||
396 | if ((val & ~(SUBCACHE_MASK | SUBCACHE_INDEX)) || | ||
397 | ((val & SUBCACHE_INDEX) > this_leaf->l3_indices)) | ||
398 | return -EINVAL; | ||
399 | |||
400 | val |= BIT(30); | ||
401 | pci_write_config_dword(dev, 0x1BC + index * 4, val); | ||
402 | /* | ||
403 | * We need to WBINVD on a core on the node containing the L3 cache which | ||
404 | * indices we disable therefore a simple wbinvd() is not sufficient. | ||
405 | */ | ||
406 | wbinvd_on_cpu(cpu); | ||
407 | pci_write_config_dword(dev, 0x1BC + index * 4, val | BIT(31)); | ||
408 | return count; | ||
409 | } | ||
410 | |||
411 | #define STORE_CACHE_DISABLE(index) \ | ||
412 | static ssize_t \ | ||
413 | store_cache_disable_##index(struct _cpuid4_info *this_leaf, \ | ||
414 | const char *buf, size_t count) \ | ||
415 | { \ | ||
416 | return store_cache_disable(this_leaf, buf, count, index); \ | ||
307 | } | 417 | } |
418 | STORE_CACHE_DISABLE(0) | ||
419 | STORE_CACHE_DISABLE(1) | ||
420 | |||
421 | static struct _cache_attr cache_disable_0 = __ATTR(cache_disable_0, 0644, | ||
422 | show_cache_disable_0, store_cache_disable_0); | ||
423 | static struct _cache_attr cache_disable_1 = __ATTR(cache_disable_1, 0644, | ||
424 | show_cache_disable_1, store_cache_disable_1); | ||
425 | |||
426 | #else /* CONFIG_CPU_SUP_AMD */ | ||
427 | static void __cpuinit | ||
428 | amd_check_l3_disable(int index, struct _cpuid4_info_regs *this_leaf) | ||
429 | { | ||
430 | }; | ||
431 | #endif /* CONFIG_CPU_SUP_AMD */ | ||
308 | 432 | ||
309 | static int | 433 | static int |
310 | __cpuinit cpuid4_cache_lookup_regs(int index, | 434 | __cpuinit cpuid4_cache_lookup_regs(int index, |
@@ -711,82 +835,6 @@ static ssize_t show_type(struct _cpuid4_info *this_leaf, char *buf) | |||
711 | #define to_object(k) container_of(k, struct _index_kobject, kobj) | 835 | #define to_object(k) container_of(k, struct _index_kobject, kobj) |
712 | #define to_attr(a) container_of(a, struct _cache_attr, attr) | 836 | #define to_attr(a) container_of(a, struct _cache_attr, attr) |
713 | 837 | ||
714 | static ssize_t show_cache_disable(struct _cpuid4_info *this_leaf, char *buf, | ||
715 | unsigned int index) | ||
716 | { | ||
717 | int cpu = cpumask_first(to_cpumask(this_leaf->shared_cpu_map)); | ||
718 | int node = cpu_to_node(cpu); | ||
719 | struct pci_dev *dev = node_to_k8_nb_misc(node); | ||
720 | unsigned int reg = 0; | ||
721 | |||
722 | if (!this_leaf->can_disable) | ||
723 | return -EINVAL; | ||
724 | |||
725 | if (!dev) | ||
726 | return -EINVAL; | ||
727 | |||
728 | pci_read_config_dword(dev, 0x1BC + index * 4, ®); | ||
729 | return sprintf(buf, "%x\n", reg); | ||
730 | } | ||
731 | |||
732 | #define SHOW_CACHE_DISABLE(index) \ | ||
733 | static ssize_t \ | ||
734 | show_cache_disable_##index(struct _cpuid4_info *this_leaf, char *buf) \ | ||
735 | { \ | ||
736 | return show_cache_disable(this_leaf, buf, index); \ | ||
737 | } | ||
738 | SHOW_CACHE_DISABLE(0) | ||
739 | SHOW_CACHE_DISABLE(1) | ||
740 | |||
741 | static ssize_t store_cache_disable(struct _cpuid4_info *this_leaf, | ||
742 | const char *buf, size_t count, unsigned int index) | ||
743 | { | ||
744 | int cpu = cpumask_first(to_cpumask(this_leaf->shared_cpu_map)); | ||
745 | int node = cpu_to_node(cpu); | ||
746 | struct pci_dev *dev = node_to_k8_nb_misc(node); | ||
747 | unsigned long val = 0; | ||
748 | unsigned int scrubber = 0; | ||
749 | |||
750 | if (!this_leaf->can_disable) | ||
751 | return -EINVAL; | ||
752 | |||
753 | if (!capable(CAP_SYS_ADMIN)) | ||
754 | return -EPERM; | ||
755 | |||
756 | if (!dev) | ||
757 | return -EINVAL; | ||
758 | |||
759 | if (strict_strtoul(buf, 10, &val) < 0) | ||
760 | return -EINVAL; | ||
761 | |||
762 | val |= 0xc0000000; | ||
763 | |||
764 | pci_read_config_dword(dev, 0x58, &scrubber); | ||
765 | scrubber &= ~0x1f000000; | ||
766 | pci_write_config_dword(dev, 0x58, scrubber); | ||
767 | |||
768 | pci_write_config_dword(dev, 0x1BC + index * 4, val & ~0x40000000); | ||
769 | wbinvd(); | ||
770 | pci_write_config_dword(dev, 0x1BC + index * 4, val); | ||
771 | return count; | ||
772 | } | ||
773 | |||
774 | #define STORE_CACHE_DISABLE(index) \ | ||
775 | static ssize_t \ | ||
776 | store_cache_disable_##index(struct _cpuid4_info *this_leaf, \ | ||
777 | const char *buf, size_t count) \ | ||
778 | { \ | ||
779 | return store_cache_disable(this_leaf, buf, count, index); \ | ||
780 | } | ||
781 | STORE_CACHE_DISABLE(0) | ||
782 | STORE_CACHE_DISABLE(1) | ||
783 | |||
784 | struct _cache_attr { | ||
785 | struct attribute attr; | ||
786 | ssize_t (*show)(struct _cpuid4_info *, char *); | ||
787 | ssize_t (*store)(struct _cpuid4_info *, const char *, size_t count); | ||
788 | }; | ||
789 | |||
790 | #define define_one_ro(_name) \ | 838 | #define define_one_ro(_name) \ |
791 | static struct _cache_attr _name = \ | 839 | static struct _cache_attr _name = \ |
792 | __ATTR(_name, 0444, show_##_name, NULL) | 840 | __ATTR(_name, 0444, show_##_name, NULL) |
@@ -801,23 +849,28 @@ define_one_ro(size); | |||
801 | define_one_ro(shared_cpu_map); | 849 | define_one_ro(shared_cpu_map); |
802 | define_one_ro(shared_cpu_list); | 850 | define_one_ro(shared_cpu_list); |
803 | 851 | ||
804 | static struct _cache_attr cache_disable_0 = __ATTR(cache_disable_0, 0644, | 852 | #define DEFAULT_SYSFS_CACHE_ATTRS \ |
805 | show_cache_disable_0, store_cache_disable_0); | 853 | &type.attr, \ |
806 | static struct _cache_attr cache_disable_1 = __ATTR(cache_disable_1, 0644, | 854 | &level.attr, \ |
807 | show_cache_disable_1, store_cache_disable_1); | 855 | &coherency_line_size.attr, \ |
856 | &physical_line_partition.attr, \ | ||
857 | &ways_of_associativity.attr, \ | ||
858 | &number_of_sets.attr, \ | ||
859 | &size.attr, \ | ||
860 | &shared_cpu_map.attr, \ | ||
861 | &shared_cpu_list.attr | ||
808 | 862 | ||
809 | static struct attribute *default_attrs[] = { | 863 | static struct attribute *default_attrs[] = { |
810 | &type.attr, | 864 | DEFAULT_SYSFS_CACHE_ATTRS, |
811 | &level.attr, | 865 | NULL |
812 | &coherency_line_size.attr, | 866 | }; |
813 | &physical_line_partition.attr, | 867 | |
814 | &ways_of_associativity.attr, | 868 | static struct attribute *default_l3_attrs[] = { |
815 | &number_of_sets.attr, | 869 | DEFAULT_SYSFS_CACHE_ATTRS, |
816 | &size.attr, | 870 | #ifdef CONFIG_CPU_SUP_AMD |
817 | &shared_cpu_map.attr, | ||
818 | &shared_cpu_list.attr, | ||
819 | &cache_disable_0.attr, | 871 | &cache_disable_0.attr, |
820 | &cache_disable_1.attr, | 872 | &cache_disable_1.attr, |
873 | #endif | ||
821 | NULL | 874 | NULL |
822 | }; | 875 | }; |
823 | 876 | ||
@@ -908,6 +961,7 @@ static int __cpuinit cache_add_dev(struct sys_device * sys_dev) | |||
908 | unsigned int cpu = sys_dev->id; | 961 | unsigned int cpu = sys_dev->id; |
909 | unsigned long i, j; | 962 | unsigned long i, j; |
910 | struct _index_kobject *this_object; | 963 | struct _index_kobject *this_object; |
964 | struct _cpuid4_info *this_leaf; | ||
911 | int retval; | 965 | int retval; |
912 | 966 | ||
913 | retval = cpuid4_cache_sysfs_init(cpu); | 967 | retval = cpuid4_cache_sysfs_init(cpu); |
@@ -926,6 +980,14 @@ static int __cpuinit cache_add_dev(struct sys_device * sys_dev) | |||
926 | this_object = INDEX_KOBJECT_PTR(cpu, i); | 980 | this_object = INDEX_KOBJECT_PTR(cpu, i); |
927 | this_object->cpu = cpu; | 981 | this_object->cpu = cpu; |
928 | this_object->index = i; | 982 | this_object->index = i; |
983 | |||
984 | this_leaf = CPUID4_INFO_IDX(cpu, i); | ||
985 | |||
986 | if (this_leaf->can_disable) | ||
987 | ktype_cache.default_attrs = default_l3_attrs; | ||
988 | else | ||
989 | ktype_cache.default_attrs = default_attrs; | ||
990 | |||
929 | retval = kobject_init_and_add(&(this_object->kobj), | 991 | retval = kobject_init_and_add(&(this_object->kobj), |
930 | &ktype_cache, | 992 | &ktype_cache, |
931 | per_cpu(ici_cache_kobject, cpu), | 993 | per_cpu(ici_cache_kobject, cpu), |
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c index 98819b32bb5f..c7ca8e26aa60 100644 --- a/arch/x86/kernel/cpu/perf_event.c +++ b/arch/x86/kernel/cpu/perf_event.c | |||
@@ -245,6 +245,97 @@ static u64 __read_mostly hw_cache_event_ids | |||
245 | [PERF_COUNT_HW_CACHE_OP_MAX] | 245 | [PERF_COUNT_HW_CACHE_OP_MAX] |
246 | [PERF_COUNT_HW_CACHE_RESULT_MAX]; | 246 | [PERF_COUNT_HW_CACHE_RESULT_MAX]; |
247 | 247 | ||
248 | static const u64 westmere_hw_cache_event_ids | ||
249 | [PERF_COUNT_HW_CACHE_MAX] | ||
250 | [PERF_COUNT_HW_CACHE_OP_MAX] | ||
251 | [PERF_COUNT_HW_CACHE_RESULT_MAX] = | ||
252 | { | ||
253 | [ C(L1D) ] = { | ||
254 | [ C(OP_READ) ] = { | ||
255 | [ C(RESULT_ACCESS) ] = 0x010b, /* MEM_INST_RETIRED.LOADS */ | ||
256 | [ C(RESULT_MISS) ] = 0x0151, /* L1D.REPL */ | ||
257 | }, | ||
258 | [ C(OP_WRITE) ] = { | ||
259 | [ C(RESULT_ACCESS) ] = 0x020b, /* MEM_INST_RETURED.STORES */ | ||
260 | [ C(RESULT_MISS) ] = 0x0251, /* L1D.M_REPL */ | ||
261 | }, | ||
262 | [ C(OP_PREFETCH) ] = { | ||
263 | [ C(RESULT_ACCESS) ] = 0x014e, /* L1D_PREFETCH.REQUESTS */ | ||
264 | [ C(RESULT_MISS) ] = 0x024e, /* L1D_PREFETCH.MISS */ | ||
265 | }, | ||
266 | }, | ||
267 | [ C(L1I ) ] = { | ||
268 | [ C(OP_READ) ] = { | ||
269 | [ C(RESULT_ACCESS) ] = 0x0380, /* L1I.READS */ | ||
270 | [ C(RESULT_MISS) ] = 0x0280, /* L1I.MISSES */ | ||
271 | }, | ||
272 | [ C(OP_WRITE) ] = { | ||
273 | [ C(RESULT_ACCESS) ] = -1, | ||
274 | [ C(RESULT_MISS) ] = -1, | ||
275 | }, | ||
276 | [ C(OP_PREFETCH) ] = { | ||
277 | [ C(RESULT_ACCESS) ] = 0x0, | ||
278 | [ C(RESULT_MISS) ] = 0x0, | ||
279 | }, | ||
280 | }, | ||
281 | [ C(LL ) ] = { | ||
282 | [ C(OP_READ) ] = { | ||
283 | [ C(RESULT_ACCESS) ] = 0x0324, /* L2_RQSTS.LOADS */ | ||
284 | [ C(RESULT_MISS) ] = 0x0224, /* L2_RQSTS.LD_MISS */ | ||
285 | }, | ||
286 | [ C(OP_WRITE) ] = { | ||
287 | [ C(RESULT_ACCESS) ] = 0x0c24, /* L2_RQSTS.RFOS */ | ||
288 | [ C(RESULT_MISS) ] = 0x0824, /* L2_RQSTS.RFO_MISS */ | ||
289 | }, | ||
290 | [ C(OP_PREFETCH) ] = { | ||
291 | [ C(RESULT_ACCESS) ] = 0x4f2e, /* LLC Reference */ | ||
292 | [ C(RESULT_MISS) ] = 0x412e, /* LLC Misses */ | ||
293 | }, | ||
294 | }, | ||
295 | [ C(DTLB) ] = { | ||
296 | [ C(OP_READ) ] = { | ||
297 | [ C(RESULT_ACCESS) ] = 0x010b, /* MEM_INST_RETIRED.LOADS */ | ||
298 | [ C(RESULT_MISS) ] = 0x0108, /* DTLB_LOAD_MISSES.ANY */ | ||
299 | }, | ||
300 | [ C(OP_WRITE) ] = { | ||
301 | [ C(RESULT_ACCESS) ] = 0x020b, /* MEM_INST_RETURED.STORES */ | ||
302 | [ C(RESULT_MISS) ] = 0x010c, /* MEM_STORE_RETIRED.DTLB_MISS */ | ||
303 | }, | ||
304 | [ C(OP_PREFETCH) ] = { | ||
305 | [ C(RESULT_ACCESS) ] = 0x0, | ||
306 | [ C(RESULT_MISS) ] = 0x0, | ||
307 | }, | ||
308 | }, | ||
309 | [ C(ITLB) ] = { | ||
310 | [ C(OP_READ) ] = { | ||
311 | [ C(RESULT_ACCESS) ] = 0x01c0, /* INST_RETIRED.ANY_P */ | ||
312 | [ C(RESULT_MISS) ] = 0x0185, /* ITLB_MISSES.ANY */ | ||
313 | }, | ||
314 | [ C(OP_WRITE) ] = { | ||
315 | [ C(RESULT_ACCESS) ] = -1, | ||
316 | [ C(RESULT_MISS) ] = -1, | ||
317 | }, | ||
318 | [ C(OP_PREFETCH) ] = { | ||
319 | [ C(RESULT_ACCESS) ] = -1, | ||
320 | [ C(RESULT_MISS) ] = -1, | ||
321 | }, | ||
322 | }, | ||
323 | [ C(BPU ) ] = { | ||
324 | [ C(OP_READ) ] = { | ||
325 | [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */ | ||
326 | [ C(RESULT_MISS) ] = 0x03e8, /* BPU_CLEARS.ANY */ | ||
327 | }, | ||
328 | [ C(OP_WRITE) ] = { | ||
329 | [ C(RESULT_ACCESS) ] = -1, | ||
330 | [ C(RESULT_MISS) ] = -1, | ||
331 | }, | ||
332 | [ C(OP_PREFETCH) ] = { | ||
333 | [ C(RESULT_ACCESS) ] = -1, | ||
334 | [ C(RESULT_MISS) ] = -1, | ||
335 | }, | ||
336 | }, | ||
337 | }; | ||
338 | |||
248 | static __initconst u64 nehalem_hw_cache_event_ids | 339 | static __initconst u64 nehalem_hw_cache_event_ids |
249 | [PERF_COUNT_HW_CACHE_MAX] | 340 | [PERF_COUNT_HW_CACHE_MAX] |
250 | [PERF_COUNT_HW_CACHE_OP_MAX] | 341 | [PERF_COUNT_HW_CACHE_OP_MAX] |
@@ -2118,6 +2209,7 @@ static __init int intel_pmu_init(void) | |||
2118 | * Install the hw-cache-events table: | 2209 | * Install the hw-cache-events table: |
2119 | */ | 2210 | */ |
2120 | switch (boot_cpu_data.x86_model) { | 2211 | switch (boot_cpu_data.x86_model) { |
2212 | |||
2121 | case 15: /* original 65 nm celeron/pentium/core2/xeon, "Merom"/"Conroe" */ | 2213 | case 15: /* original 65 nm celeron/pentium/core2/xeon, "Merom"/"Conroe" */ |
2122 | case 22: /* single-core 65 nm celeron/core2solo "Merom-L"/"Conroe-L" */ | 2214 | case 22: /* single-core 65 nm celeron/core2solo "Merom-L"/"Conroe-L" */ |
2123 | case 23: /* current 45 nm celeron/core2/xeon "Penryn"/"Wolfdale" */ | 2215 | case 23: /* current 45 nm celeron/core2/xeon "Penryn"/"Wolfdale" */ |
@@ -2129,7 +2221,9 @@ static __init int intel_pmu_init(void) | |||
2129 | event_constraints = intel_core_event_constraints; | 2221 | event_constraints = intel_core_event_constraints; |
2130 | break; | 2222 | break; |
2131 | default: | 2223 | default: |
2132 | case 26: | 2224 | case 26: /* 45 nm nehalem, "Bloomfield" */ |
2225 | case 30: /* 45 nm nehalem, "Lynnfield" */ | ||
2226 | case 46: /* 45 nm nehalem-ex, "Beckton" */ | ||
2133 | memcpy(hw_cache_event_ids, nehalem_hw_cache_event_ids, | 2227 | memcpy(hw_cache_event_ids, nehalem_hw_cache_event_ids, |
2134 | sizeof(hw_cache_event_ids)); | 2228 | sizeof(hw_cache_event_ids)); |
2135 | 2229 | ||
@@ -2142,6 +2236,14 @@ static __init int intel_pmu_init(void) | |||
2142 | 2236 | ||
2143 | pr_cont("Atom events, "); | 2237 | pr_cont("Atom events, "); |
2144 | break; | 2238 | break; |
2239 | |||
2240 | case 37: /* 32 nm nehalem, "Clarkdale" */ | ||
2241 | case 44: /* 32 nm nehalem, "Gulftown" */ | ||
2242 | memcpy(hw_cache_event_ids, westmere_hw_cache_event_ids, | ||
2243 | sizeof(hw_cache_event_ids)); | ||
2244 | |||
2245 | pr_cont("Westmere events, "); | ||
2246 | break; | ||
2145 | } | 2247 | } |
2146 | return 0; | 2248 | return 0; |
2147 | } | 2249 | } |
diff --git a/arch/x86/kernel/crash.c b/arch/x86/kernel/crash.c index a4849c10a77e..ebd4c51d096a 100644 --- a/arch/x86/kernel/crash.c +++ b/arch/x86/kernel/crash.c | |||
@@ -27,7 +27,6 @@ | |||
27 | #include <asm/cpu.h> | 27 | #include <asm/cpu.h> |
28 | #include <asm/reboot.h> | 28 | #include <asm/reboot.h> |
29 | #include <asm/virtext.h> | 29 | #include <asm/virtext.h> |
30 | #include <asm/x86_init.h> | ||
31 | 30 | ||
32 | #if defined(CONFIG_SMP) && defined(CONFIG_X86_LOCAL_APIC) | 31 | #if defined(CONFIG_SMP) && defined(CONFIG_X86_LOCAL_APIC) |
33 | 32 | ||
@@ -103,10 +102,5 @@ void native_machine_crash_shutdown(struct pt_regs *regs) | |||
103 | #ifdef CONFIG_HPET_TIMER | 102 | #ifdef CONFIG_HPET_TIMER |
104 | hpet_disable(); | 103 | hpet_disable(); |
105 | #endif | 104 | #endif |
106 | |||
107 | #ifdef CONFIG_X86_64 | ||
108 | x86_platform.iommu_shutdown(); | ||
109 | #endif | ||
110 | |||
111 | crash_save_cpu(regs, safe_smp_processor_id()); | 105 | crash_save_cpu(regs, safe_smp_processor_id()); |
112 | } | 106 | } |
diff --git a/arch/x86/kernel/hpet.c b/arch/x86/kernel/hpet.c index ad80a1c718c6..773afc9274a1 100644 --- a/arch/x86/kernel/hpet.c +++ b/arch/x86/kernel/hpet.c | |||
@@ -399,9 +399,15 @@ static int hpet_next_event(unsigned long delta, | |||
399 | * then we might have a real hardware problem. We can not do | 399 | * then we might have a real hardware problem. We can not do |
400 | * much about it here, but at least alert the user/admin with | 400 | * much about it here, but at least alert the user/admin with |
401 | * a prominent warning. | 401 | * a prominent warning. |
402 | * An erratum on some chipsets (ICH9,..), results in comparator read | ||
403 | * immediately following a write returning old value. Workaround | ||
404 | * for this is to read this value second time, when first | ||
405 | * read returns old value. | ||
402 | */ | 406 | */ |
403 | WARN_ONCE(hpet_readl(HPET_Tn_CMP(timer)) != cnt, | 407 | if (unlikely((u32)hpet_readl(HPET_Tn_CMP(timer)) != cnt)) { |
408 | WARN_ONCE(hpet_readl(HPET_Tn_CMP(timer)) != cnt, | ||
404 | KERN_WARNING "hpet: compare register read back failed.\n"); | 409 | KERN_WARNING "hpet: compare register read back failed.\n"); |
410 | } | ||
405 | 411 | ||
406 | return (s32)(hpet_readl(HPET_COUNTER) - cnt) >= 0 ? -ETIME : 0; | 412 | return (s32)(hpet_readl(HPET_COUNTER) - cnt) >= 0 ? -ETIME : 0; |
407 | } | 413 | } |
diff --git a/arch/x86/kernel/kgdb.c b/arch/x86/kernel/kgdb.c index bfba6019d762..b2258ca91003 100644 --- a/arch/x86/kernel/kgdb.c +++ b/arch/x86/kernel/kgdb.c | |||
@@ -618,8 +618,8 @@ int kgdb_arch_init(void) | |||
618 | * portion of kgdb because this operation requires mutexs to | 618 | * portion of kgdb because this operation requires mutexs to |
619 | * complete. | 619 | * complete. |
620 | */ | 620 | */ |
621 | hw_breakpoint_init(&attr); | ||
621 | attr.bp_addr = (unsigned long)kgdb_arch_init; | 622 | attr.bp_addr = (unsigned long)kgdb_arch_init; |
622 | attr.type = PERF_TYPE_BREAKPOINT; | ||
623 | attr.bp_len = HW_BREAKPOINT_LEN_1; | 623 | attr.bp_len = HW_BREAKPOINT_LEN_1; |
624 | attr.bp_type = HW_BREAKPOINT_W; | 624 | attr.bp_type = HW_BREAKPOINT_W; |
625 | attr.disabled = 1; | 625 | attr.disabled = 1; |
diff --git a/arch/x86/kernel/mpparse.c b/arch/x86/kernel/mpparse.c index a2c1edd2d3ac..e81030f71a8f 100644 --- a/arch/x86/kernel/mpparse.c +++ b/arch/x86/kernel/mpparse.c | |||
@@ -664,7 +664,7 @@ static void __init smp_reserve_memory(struct mpf_intel *mpf) | |||
664 | { | 664 | { |
665 | unsigned long size = get_mpc_size(mpf->physptr); | 665 | unsigned long size = get_mpc_size(mpf->physptr); |
666 | 666 | ||
667 | reserve_early(mpf->physptr, mpf->physptr+size, "MP-table mpc"); | 667 | reserve_early_overlap_ok(mpf->physptr, mpf->physptr+size, "MP-table mpc"); |
668 | } | 668 | } |
669 | 669 | ||
670 | static int __init smp_scan_config(unsigned long base, unsigned long length) | 670 | static int __init smp_scan_config(unsigned long base, unsigned long length) |
@@ -693,7 +693,7 @@ static int __init smp_scan_config(unsigned long base, unsigned long length) | |||
693 | mpf, (u64)virt_to_phys(mpf)); | 693 | mpf, (u64)virt_to_phys(mpf)); |
694 | 694 | ||
695 | mem = virt_to_phys(mpf); | 695 | mem = virt_to_phys(mpf); |
696 | reserve_early(mem, mem + sizeof(*mpf), "MP-table mpf"); | 696 | reserve_early_overlap_ok(mem, mem + sizeof(*mpf), "MP-table mpf"); |
697 | if (mpf->physptr) | 697 | if (mpf->physptr) |
698 | smp_reserve_memory(mpf); | 698 | smp_reserve_memory(mpf); |
699 | 699 | ||
diff --git a/arch/x86/kernel/pci-gart_64.c b/arch/x86/kernel/pci-gart_64.c index 34de53b46f87..4f41b29fde98 100644 --- a/arch/x86/kernel/pci-gart_64.c +++ b/arch/x86/kernel/pci-gart_64.c | |||
@@ -564,6 +564,9 @@ static void enable_gart_translations(void) | |||
564 | 564 | ||
565 | enable_gart_translation(dev, __pa(agp_gatt_table)); | 565 | enable_gart_translation(dev, __pa(agp_gatt_table)); |
566 | } | 566 | } |
567 | |||
568 | /* Flush the GART-TLB to remove stale entries */ | ||
569 | k8_flush_garts(); | ||
567 | } | 570 | } |
568 | 571 | ||
569 | /* | 572 | /* |
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index 89a49fb46a27..28c3d814c092 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c | |||
@@ -1502,8 +1502,8 @@ static int mmu_zap_unsync_children(struct kvm *kvm, | |||
1502 | for_each_sp(pages, sp, parents, i) { | 1502 | for_each_sp(pages, sp, parents, i) { |
1503 | kvm_mmu_zap_page(kvm, sp); | 1503 | kvm_mmu_zap_page(kvm, sp); |
1504 | mmu_pages_clear_parents(&parents); | 1504 | mmu_pages_clear_parents(&parents); |
1505 | zapped++; | ||
1505 | } | 1506 | } |
1506 | zapped += pages.nr; | ||
1507 | kvm_mmu_pages_init(parent, &parents, &pages); | 1507 | kvm_mmu_pages_init(parent, &parents, &pages); |
1508 | } | 1508 | } |
1509 | 1509 | ||
@@ -1554,14 +1554,16 @@ void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int kvm_nr_mmu_pages) | |||
1554 | */ | 1554 | */ |
1555 | 1555 | ||
1556 | if (used_pages > kvm_nr_mmu_pages) { | 1556 | if (used_pages > kvm_nr_mmu_pages) { |
1557 | while (used_pages > kvm_nr_mmu_pages) { | 1557 | while (used_pages > kvm_nr_mmu_pages && |
1558 | !list_empty(&kvm->arch.active_mmu_pages)) { | ||
1558 | struct kvm_mmu_page *page; | 1559 | struct kvm_mmu_page *page; |
1559 | 1560 | ||
1560 | page = container_of(kvm->arch.active_mmu_pages.prev, | 1561 | page = container_of(kvm->arch.active_mmu_pages.prev, |
1561 | struct kvm_mmu_page, link); | 1562 | struct kvm_mmu_page, link); |
1562 | kvm_mmu_zap_page(kvm, page); | 1563 | used_pages -= kvm_mmu_zap_page(kvm, page); |
1563 | used_pages--; | 1564 | used_pages--; |
1564 | } | 1565 | } |
1566 | kvm_nr_mmu_pages = used_pages; | ||
1565 | kvm->arch.n_free_mmu_pages = 0; | 1567 | kvm->arch.n_free_mmu_pages = 0; |
1566 | } | 1568 | } |
1567 | else | 1569 | else |
@@ -1608,7 +1610,8 @@ static void mmu_unshadow(struct kvm *kvm, gfn_t gfn) | |||
1608 | && !sp->role.invalid) { | 1610 | && !sp->role.invalid) { |
1609 | pgprintk("%s: zap %lx %x\n", | 1611 | pgprintk("%s: zap %lx %x\n", |
1610 | __func__, gfn, sp->role.word); | 1612 | __func__, gfn, sp->role.word); |
1611 | kvm_mmu_zap_page(kvm, sp); | 1613 | if (kvm_mmu_zap_page(kvm, sp)) |
1614 | nn = bucket->first; | ||
1612 | } | 1615 | } |
1613 | } | 1616 | } |
1614 | } | 1617 | } |
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index 1d9b33843c80..d42e191b17fa 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c | |||
@@ -698,29 +698,28 @@ static struct kvm_vcpu *svm_create_vcpu(struct kvm *kvm, unsigned int id) | |||
698 | if (err) | 698 | if (err) |
699 | goto free_svm; | 699 | goto free_svm; |
700 | 700 | ||
701 | err = -ENOMEM; | ||
701 | page = alloc_page(GFP_KERNEL); | 702 | page = alloc_page(GFP_KERNEL); |
702 | if (!page) { | 703 | if (!page) |
703 | err = -ENOMEM; | ||
704 | goto uninit; | 704 | goto uninit; |
705 | } | ||
706 | 705 | ||
707 | err = -ENOMEM; | ||
708 | msrpm_pages = alloc_pages(GFP_KERNEL, MSRPM_ALLOC_ORDER); | 706 | msrpm_pages = alloc_pages(GFP_KERNEL, MSRPM_ALLOC_ORDER); |
709 | if (!msrpm_pages) | 707 | if (!msrpm_pages) |
710 | goto uninit; | 708 | goto free_page1; |
711 | 709 | ||
712 | nested_msrpm_pages = alloc_pages(GFP_KERNEL, MSRPM_ALLOC_ORDER); | 710 | nested_msrpm_pages = alloc_pages(GFP_KERNEL, MSRPM_ALLOC_ORDER); |
713 | if (!nested_msrpm_pages) | 711 | if (!nested_msrpm_pages) |
714 | goto uninit; | 712 | goto free_page2; |
715 | |||
716 | svm->msrpm = page_address(msrpm_pages); | ||
717 | svm_vcpu_init_msrpm(svm->msrpm); | ||
718 | 713 | ||
719 | hsave_page = alloc_page(GFP_KERNEL); | 714 | hsave_page = alloc_page(GFP_KERNEL); |
720 | if (!hsave_page) | 715 | if (!hsave_page) |
721 | goto uninit; | 716 | goto free_page3; |
717 | |||
722 | svm->nested.hsave = page_address(hsave_page); | 718 | svm->nested.hsave = page_address(hsave_page); |
723 | 719 | ||
720 | svm->msrpm = page_address(msrpm_pages); | ||
721 | svm_vcpu_init_msrpm(svm->msrpm); | ||
722 | |||
724 | svm->nested.msrpm = page_address(nested_msrpm_pages); | 723 | svm->nested.msrpm = page_address(nested_msrpm_pages); |
725 | 724 | ||
726 | svm->vmcb = page_address(page); | 725 | svm->vmcb = page_address(page); |
@@ -737,6 +736,12 @@ static struct kvm_vcpu *svm_create_vcpu(struct kvm *kvm, unsigned int id) | |||
737 | 736 | ||
738 | return &svm->vcpu; | 737 | return &svm->vcpu; |
739 | 738 | ||
739 | free_page3: | ||
740 | __free_pages(nested_msrpm_pages, MSRPM_ALLOC_ORDER); | ||
741 | free_page2: | ||
742 | __free_pages(msrpm_pages, MSRPM_ALLOC_ORDER); | ||
743 | free_page1: | ||
744 | __free_page(page); | ||
740 | uninit: | 745 | uninit: |
741 | kvm_vcpu_uninit(&svm->vcpu); | 746 | kvm_vcpu_uninit(&svm->vcpu); |
742 | free_svm: | 747 | free_svm: |
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 8a8e13965076..3acbe194e525 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c | |||
@@ -61,6 +61,8 @@ module_param_named(unrestricted_guest, | |||
61 | static int __read_mostly emulate_invalid_guest_state = 0; | 61 | static int __read_mostly emulate_invalid_guest_state = 0; |
62 | module_param(emulate_invalid_guest_state, bool, S_IRUGO); | 62 | module_param(emulate_invalid_guest_state, bool, S_IRUGO); |
63 | 63 | ||
64 | #define RMODE_GUEST_OWNED_EFLAGS_BITS (~(X86_EFLAGS_IOPL | X86_EFLAGS_VM)) | ||
65 | |||
64 | /* | 66 | /* |
65 | * These 2 parameters are used to config the controls for Pause-Loop Exiting: | 67 | * These 2 parameters are used to config the controls for Pause-Loop Exiting: |
66 | * ple_gap: upper bound on the amount of time between two successive | 68 | * ple_gap: upper bound on the amount of time between two successive |
@@ -115,7 +117,7 @@ struct vcpu_vmx { | |||
115 | } host_state; | 117 | } host_state; |
116 | struct { | 118 | struct { |
117 | int vm86_active; | 119 | int vm86_active; |
118 | u8 save_iopl; | 120 | ulong save_rflags; |
119 | struct kvm_save_segment { | 121 | struct kvm_save_segment { |
120 | u16 selector; | 122 | u16 selector; |
121 | unsigned long base; | 123 | unsigned long base; |
@@ -787,18 +789,23 @@ static void vmx_fpu_deactivate(struct kvm_vcpu *vcpu) | |||
787 | 789 | ||
788 | static unsigned long vmx_get_rflags(struct kvm_vcpu *vcpu) | 790 | static unsigned long vmx_get_rflags(struct kvm_vcpu *vcpu) |
789 | { | 791 | { |
790 | unsigned long rflags; | 792 | unsigned long rflags, save_rflags; |
791 | 793 | ||
792 | rflags = vmcs_readl(GUEST_RFLAGS); | 794 | rflags = vmcs_readl(GUEST_RFLAGS); |
793 | if (to_vmx(vcpu)->rmode.vm86_active) | 795 | if (to_vmx(vcpu)->rmode.vm86_active) { |
794 | rflags &= ~(unsigned long)(X86_EFLAGS_IOPL | X86_EFLAGS_VM); | 796 | rflags &= RMODE_GUEST_OWNED_EFLAGS_BITS; |
797 | save_rflags = to_vmx(vcpu)->rmode.save_rflags; | ||
798 | rflags |= save_rflags & ~RMODE_GUEST_OWNED_EFLAGS_BITS; | ||
799 | } | ||
795 | return rflags; | 800 | return rflags; |
796 | } | 801 | } |
797 | 802 | ||
798 | static void vmx_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags) | 803 | static void vmx_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags) |
799 | { | 804 | { |
800 | if (to_vmx(vcpu)->rmode.vm86_active) | 805 | if (to_vmx(vcpu)->rmode.vm86_active) { |
806 | to_vmx(vcpu)->rmode.save_rflags = rflags; | ||
801 | rflags |= X86_EFLAGS_IOPL | X86_EFLAGS_VM; | 807 | rflags |= X86_EFLAGS_IOPL | X86_EFLAGS_VM; |
808 | } | ||
802 | vmcs_writel(GUEST_RFLAGS, rflags); | 809 | vmcs_writel(GUEST_RFLAGS, rflags); |
803 | } | 810 | } |
804 | 811 | ||
@@ -1431,8 +1438,8 @@ static void enter_pmode(struct kvm_vcpu *vcpu) | |||
1431 | vmcs_write32(GUEST_TR_AR_BYTES, vmx->rmode.tr.ar); | 1438 | vmcs_write32(GUEST_TR_AR_BYTES, vmx->rmode.tr.ar); |
1432 | 1439 | ||
1433 | flags = vmcs_readl(GUEST_RFLAGS); | 1440 | flags = vmcs_readl(GUEST_RFLAGS); |
1434 | flags &= ~(X86_EFLAGS_IOPL | X86_EFLAGS_VM); | 1441 | flags &= RMODE_GUEST_OWNED_EFLAGS_BITS; |
1435 | flags |= (vmx->rmode.save_iopl << IOPL_SHIFT); | 1442 | flags |= vmx->rmode.save_rflags & ~RMODE_GUEST_OWNED_EFLAGS_BITS; |
1436 | vmcs_writel(GUEST_RFLAGS, flags); | 1443 | vmcs_writel(GUEST_RFLAGS, flags); |
1437 | 1444 | ||
1438 | vmcs_writel(GUEST_CR4, (vmcs_readl(GUEST_CR4) & ~X86_CR4_VME) | | 1445 | vmcs_writel(GUEST_CR4, (vmcs_readl(GUEST_CR4) & ~X86_CR4_VME) | |
@@ -1501,8 +1508,7 @@ static void enter_rmode(struct kvm_vcpu *vcpu) | |||
1501 | vmcs_write32(GUEST_TR_AR_BYTES, 0x008b); | 1508 | vmcs_write32(GUEST_TR_AR_BYTES, 0x008b); |
1502 | 1509 | ||
1503 | flags = vmcs_readl(GUEST_RFLAGS); | 1510 | flags = vmcs_readl(GUEST_RFLAGS); |
1504 | vmx->rmode.save_iopl | 1511 | vmx->rmode.save_rflags = flags; |
1505 | = (flags & X86_EFLAGS_IOPL) >> IOPL_SHIFT; | ||
1506 | 1512 | ||
1507 | flags |= X86_EFLAGS_IOPL | X86_EFLAGS_VM; | 1513 | flags |= X86_EFLAGS_IOPL | X86_EFLAGS_VM; |
1508 | 1514 | ||
@@ -2719,6 +2725,12 @@ static int handle_rmode_exception(struct kvm_vcpu *vcpu, | |||
2719 | kvm_queue_exception(vcpu, vec); | 2725 | kvm_queue_exception(vcpu, vec); |
2720 | return 1; | 2726 | return 1; |
2721 | case BP_VECTOR: | 2727 | case BP_VECTOR: |
2728 | /* | ||
2729 | * Update instruction length as we may reinject the exception | ||
2730 | * from user space while in guest debugging mode. | ||
2731 | */ | ||
2732 | to_vmx(vcpu)->vcpu.arch.event_exit_inst_len = | ||
2733 | vmcs_read32(VM_EXIT_INSTRUCTION_LEN); | ||
2722 | if (vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP) | 2734 | if (vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP) |
2723 | return 0; | 2735 | return 0; |
2724 | /* fall through */ | 2736 | /* fall through */ |
@@ -2841,6 +2853,13 @@ static int handle_exception(struct kvm_vcpu *vcpu) | |||
2841 | kvm_run->debug.arch.dr7 = vmcs_readl(GUEST_DR7); | 2853 | kvm_run->debug.arch.dr7 = vmcs_readl(GUEST_DR7); |
2842 | /* fall through */ | 2854 | /* fall through */ |
2843 | case BP_VECTOR: | 2855 | case BP_VECTOR: |
2856 | /* | ||
2857 | * Update instruction length as we may reinject #BP from | ||
2858 | * user space while in guest debugging mode. Reading it for | ||
2859 | * #DB as well causes no harm, it is not used in that case. | ||
2860 | */ | ||
2861 | vmx->vcpu.arch.event_exit_inst_len = | ||
2862 | vmcs_read32(VM_EXIT_INSTRUCTION_LEN); | ||
2844 | kvm_run->exit_reason = KVM_EXIT_DEBUG; | 2863 | kvm_run->exit_reason = KVM_EXIT_DEBUG; |
2845 | kvm_run->debug.arch.pc = vmcs_readl(GUEST_CS_BASE) + rip; | 2864 | kvm_run->debug.arch.pc = vmcs_readl(GUEST_CS_BASE) + rip; |
2846 | kvm_run->debug.arch.exception = ex_no; | 2865 | kvm_run->debug.arch.exception = ex_no; |
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index d325c6345bab..55e20f8c518f 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c | |||
@@ -384,21 +384,16 @@ out: | |||
384 | void kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) | 384 | void kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) |
385 | { | 385 | { |
386 | if (cr0 & CR0_RESERVED_BITS) { | 386 | if (cr0 & CR0_RESERVED_BITS) { |
387 | printk(KERN_DEBUG "set_cr0: 0x%lx #GP, reserved bits 0x%lx\n", | ||
388 | cr0, vcpu->arch.cr0); | ||
389 | kvm_inject_gp(vcpu, 0); | 387 | kvm_inject_gp(vcpu, 0); |
390 | return; | 388 | return; |
391 | } | 389 | } |
392 | 390 | ||
393 | if ((cr0 & X86_CR0_NW) && !(cr0 & X86_CR0_CD)) { | 391 | if ((cr0 & X86_CR0_NW) && !(cr0 & X86_CR0_CD)) { |
394 | printk(KERN_DEBUG "set_cr0: #GP, CD == 0 && NW == 1\n"); | ||
395 | kvm_inject_gp(vcpu, 0); | 392 | kvm_inject_gp(vcpu, 0); |
396 | return; | 393 | return; |
397 | } | 394 | } |
398 | 395 | ||
399 | if ((cr0 & X86_CR0_PG) && !(cr0 & X86_CR0_PE)) { | 396 | if ((cr0 & X86_CR0_PG) && !(cr0 & X86_CR0_PE)) { |
400 | printk(KERN_DEBUG "set_cr0: #GP, set PG flag " | ||
401 | "and a clear PE flag\n"); | ||
402 | kvm_inject_gp(vcpu, 0); | 397 | kvm_inject_gp(vcpu, 0); |
403 | return; | 398 | return; |
404 | } | 399 | } |
@@ -409,15 +404,11 @@ void kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) | |||
409 | int cs_db, cs_l; | 404 | int cs_db, cs_l; |
410 | 405 | ||
411 | if (!is_pae(vcpu)) { | 406 | if (!is_pae(vcpu)) { |
412 | printk(KERN_DEBUG "set_cr0: #GP, start paging " | ||
413 | "in long mode while PAE is disabled\n"); | ||
414 | kvm_inject_gp(vcpu, 0); | 407 | kvm_inject_gp(vcpu, 0); |
415 | return; | 408 | return; |
416 | } | 409 | } |
417 | kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l); | 410 | kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l); |
418 | if (cs_l) { | 411 | if (cs_l) { |
419 | printk(KERN_DEBUG "set_cr0: #GP, start paging " | ||
420 | "in long mode while CS.L == 1\n"); | ||
421 | kvm_inject_gp(vcpu, 0); | 412 | kvm_inject_gp(vcpu, 0); |
422 | return; | 413 | return; |
423 | 414 | ||
@@ -425,8 +416,6 @@ void kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) | |||
425 | } else | 416 | } else |
426 | #endif | 417 | #endif |
427 | if (is_pae(vcpu) && !load_pdptrs(vcpu, vcpu->arch.cr3)) { | 418 | if (is_pae(vcpu) && !load_pdptrs(vcpu, vcpu->arch.cr3)) { |
428 | printk(KERN_DEBUG "set_cr0: #GP, pdptrs " | ||
429 | "reserved bits\n"); | ||
430 | kvm_inject_gp(vcpu, 0); | 419 | kvm_inject_gp(vcpu, 0); |
431 | return; | 420 | return; |
432 | } | 421 | } |
@@ -453,28 +442,23 @@ void kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) | |||
453 | unsigned long pdptr_bits = X86_CR4_PGE | X86_CR4_PSE | X86_CR4_PAE; | 442 | unsigned long pdptr_bits = X86_CR4_PGE | X86_CR4_PSE | X86_CR4_PAE; |
454 | 443 | ||
455 | if (cr4 & CR4_RESERVED_BITS) { | 444 | if (cr4 & CR4_RESERVED_BITS) { |
456 | printk(KERN_DEBUG "set_cr4: #GP, reserved bits\n"); | ||
457 | kvm_inject_gp(vcpu, 0); | 445 | kvm_inject_gp(vcpu, 0); |
458 | return; | 446 | return; |
459 | } | 447 | } |
460 | 448 | ||
461 | if (is_long_mode(vcpu)) { | 449 | if (is_long_mode(vcpu)) { |
462 | if (!(cr4 & X86_CR4_PAE)) { | 450 | if (!(cr4 & X86_CR4_PAE)) { |
463 | printk(KERN_DEBUG "set_cr4: #GP, clearing PAE while " | ||
464 | "in long mode\n"); | ||
465 | kvm_inject_gp(vcpu, 0); | 451 | kvm_inject_gp(vcpu, 0); |
466 | return; | 452 | return; |
467 | } | 453 | } |
468 | } else if (is_paging(vcpu) && (cr4 & X86_CR4_PAE) | 454 | } else if (is_paging(vcpu) && (cr4 & X86_CR4_PAE) |
469 | && ((cr4 ^ old_cr4) & pdptr_bits) | 455 | && ((cr4 ^ old_cr4) & pdptr_bits) |
470 | && !load_pdptrs(vcpu, vcpu->arch.cr3)) { | 456 | && !load_pdptrs(vcpu, vcpu->arch.cr3)) { |
471 | printk(KERN_DEBUG "set_cr4: #GP, pdptrs reserved bits\n"); | ||
472 | kvm_inject_gp(vcpu, 0); | 457 | kvm_inject_gp(vcpu, 0); |
473 | return; | 458 | return; |
474 | } | 459 | } |
475 | 460 | ||
476 | if (cr4 & X86_CR4_VMXE) { | 461 | if (cr4 & X86_CR4_VMXE) { |
477 | printk(KERN_DEBUG "set_cr4: #GP, setting VMXE\n"); | ||
478 | kvm_inject_gp(vcpu, 0); | 462 | kvm_inject_gp(vcpu, 0); |
479 | return; | 463 | return; |
480 | } | 464 | } |
@@ -495,21 +479,16 @@ void kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3) | |||
495 | 479 | ||
496 | if (is_long_mode(vcpu)) { | 480 | if (is_long_mode(vcpu)) { |
497 | if (cr3 & CR3_L_MODE_RESERVED_BITS) { | 481 | if (cr3 & CR3_L_MODE_RESERVED_BITS) { |
498 | printk(KERN_DEBUG "set_cr3: #GP, reserved bits\n"); | ||
499 | kvm_inject_gp(vcpu, 0); | 482 | kvm_inject_gp(vcpu, 0); |
500 | return; | 483 | return; |
501 | } | 484 | } |
502 | } else { | 485 | } else { |
503 | if (is_pae(vcpu)) { | 486 | if (is_pae(vcpu)) { |
504 | if (cr3 & CR3_PAE_RESERVED_BITS) { | 487 | if (cr3 & CR3_PAE_RESERVED_BITS) { |
505 | printk(KERN_DEBUG | ||
506 | "set_cr3: #GP, reserved bits\n"); | ||
507 | kvm_inject_gp(vcpu, 0); | 488 | kvm_inject_gp(vcpu, 0); |
508 | return; | 489 | return; |
509 | } | 490 | } |
510 | if (is_paging(vcpu) && !load_pdptrs(vcpu, cr3)) { | 491 | if (is_paging(vcpu) && !load_pdptrs(vcpu, cr3)) { |
511 | printk(KERN_DEBUG "set_cr3: #GP, pdptrs " | ||
512 | "reserved bits\n"); | ||
513 | kvm_inject_gp(vcpu, 0); | 492 | kvm_inject_gp(vcpu, 0); |
514 | return; | 493 | return; |
515 | } | 494 | } |
@@ -541,7 +520,6 @@ EXPORT_SYMBOL_GPL(kvm_set_cr3); | |||
541 | void kvm_set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8) | 520 | void kvm_set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8) |
542 | { | 521 | { |
543 | if (cr8 & CR8_RESERVED_BITS) { | 522 | if (cr8 & CR8_RESERVED_BITS) { |
544 | printk(KERN_DEBUG "set_cr8: #GP, reserved bits 0x%lx\n", cr8); | ||
545 | kvm_inject_gp(vcpu, 0); | 523 | kvm_inject_gp(vcpu, 0); |
546 | return; | 524 | return; |
547 | } | 525 | } |
@@ -595,15 +573,12 @@ static u32 emulated_msrs[] = { | |||
595 | static void set_efer(struct kvm_vcpu *vcpu, u64 efer) | 573 | static void set_efer(struct kvm_vcpu *vcpu, u64 efer) |
596 | { | 574 | { |
597 | if (efer & efer_reserved_bits) { | 575 | if (efer & efer_reserved_bits) { |
598 | printk(KERN_DEBUG "set_efer: 0x%llx #GP, reserved bits\n", | ||
599 | efer); | ||
600 | kvm_inject_gp(vcpu, 0); | 576 | kvm_inject_gp(vcpu, 0); |
601 | return; | 577 | return; |
602 | } | 578 | } |
603 | 579 | ||
604 | if (is_paging(vcpu) | 580 | if (is_paging(vcpu) |
605 | && (vcpu->arch.shadow_efer & EFER_LME) != (efer & EFER_LME)) { | 581 | && (vcpu->arch.shadow_efer & EFER_LME) != (efer & EFER_LME)) { |
606 | printk(KERN_DEBUG "set_efer: #GP, change LME while paging\n"); | ||
607 | kvm_inject_gp(vcpu, 0); | 582 | kvm_inject_gp(vcpu, 0); |
608 | return; | 583 | return; |
609 | } | 584 | } |
@@ -613,7 +588,6 @@ static void set_efer(struct kvm_vcpu *vcpu, u64 efer) | |||
613 | 588 | ||
614 | feat = kvm_find_cpuid_entry(vcpu, 0x80000001, 0); | 589 | feat = kvm_find_cpuid_entry(vcpu, 0x80000001, 0); |
615 | if (!feat || !(feat->edx & bit(X86_FEATURE_FXSR_OPT))) { | 590 | if (!feat || !(feat->edx & bit(X86_FEATURE_FXSR_OPT))) { |
616 | printk(KERN_DEBUG "set_efer: #GP, enable FFXSR w/o CPUID capability\n"); | ||
617 | kvm_inject_gp(vcpu, 0); | 591 | kvm_inject_gp(vcpu, 0); |
618 | return; | 592 | return; |
619 | } | 593 | } |
@@ -624,7 +598,6 @@ static void set_efer(struct kvm_vcpu *vcpu, u64 efer) | |||
624 | 598 | ||
625 | feat = kvm_find_cpuid_entry(vcpu, 0x80000001, 0); | 599 | feat = kvm_find_cpuid_entry(vcpu, 0x80000001, 0); |
626 | if (!feat || !(feat->ecx & bit(X86_FEATURE_SVM))) { | 600 | if (!feat || !(feat->ecx & bit(X86_FEATURE_SVM))) { |
627 | printk(KERN_DEBUG "set_efer: #GP, enable SVM w/o SVM\n"); | ||
628 | kvm_inject_gp(vcpu, 0); | 601 | kvm_inject_gp(vcpu, 0); |
629 | return; | 602 | return; |
630 | } | 603 | } |
@@ -913,9 +886,13 @@ static int set_msr_mce(struct kvm_vcpu *vcpu, u32 msr, u64 data) | |||
913 | if (msr >= MSR_IA32_MC0_CTL && | 886 | if (msr >= MSR_IA32_MC0_CTL && |
914 | msr < MSR_IA32_MC0_CTL + 4 * bank_num) { | 887 | msr < MSR_IA32_MC0_CTL + 4 * bank_num) { |
915 | u32 offset = msr - MSR_IA32_MC0_CTL; | 888 | u32 offset = msr - MSR_IA32_MC0_CTL; |
916 | /* only 0 or all 1s can be written to IA32_MCi_CTL */ | 889 | /* only 0 or all 1s can be written to IA32_MCi_CTL |
890 | * some Linux kernels though clear bit 10 in bank 4 to | ||
891 | * workaround a BIOS/GART TBL issue on AMD K8s, ignore | ||
892 | * this to avoid an uncatched #GP in the guest | ||
893 | */ | ||
917 | if ((offset & 0x3) == 0 && | 894 | if ((offset & 0x3) == 0 && |
918 | data != 0 && data != ~(u64)0) | 895 | data != 0 && (data | (1 << 10)) != ~(u64)0) |
919 | return -1; | 896 | return -1; |
920 | vcpu->arch.mce_banks[offset] = data; | 897 | vcpu->arch.mce_banks[offset] = data; |
921 | break; | 898 | break; |
@@ -2366,7 +2343,7 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, | |||
2366 | struct kvm_dirty_log *log) | 2343 | struct kvm_dirty_log *log) |
2367 | { | 2344 | { |
2368 | int r; | 2345 | int r; |
2369 | int n; | 2346 | unsigned long n; |
2370 | struct kvm_memory_slot *memslot; | 2347 | struct kvm_memory_slot *memslot; |
2371 | int is_dirty = 0; | 2348 | int is_dirty = 0; |
2372 | 2349 | ||
@@ -2382,7 +2359,7 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, | |||
2382 | kvm_mmu_slot_remove_write_access(kvm, log->slot); | 2359 | kvm_mmu_slot_remove_write_access(kvm, log->slot); |
2383 | spin_unlock(&kvm->mmu_lock); | 2360 | spin_unlock(&kvm->mmu_lock); |
2384 | memslot = &kvm->memslots[log->slot]; | 2361 | memslot = &kvm->memslots[log->slot]; |
2385 | n = ALIGN(memslot->npages, BITS_PER_LONG) / 8; | 2362 | n = kvm_dirty_bitmap_bytes(memslot); |
2386 | memset(memslot->dirty_bitmap, 0, n); | 2363 | memset(memslot->dirty_bitmap, 0, n); |
2387 | } | 2364 | } |
2388 | r = 0; | 2365 | r = 0; |
@@ -4599,6 +4576,7 @@ int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int reason) | |||
4599 | int ret = 0; | 4576 | int ret = 0; |
4600 | u32 old_tss_base = get_segment_base(vcpu, VCPU_SREG_TR); | 4577 | u32 old_tss_base = get_segment_base(vcpu, VCPU_SREG_TR); |
4601 | u16 old_tss_sel = get_segment_selector(vcpu, VCPU_SREG_TR); | 4578 | u16 old_tss_sel = get_segment_selector(vcpu, VCPU_SREG_TR); |
4579 | u32 desc_limit; | ||
4602 | 4580 | ||
4603 | old_tss_base = vcpu->arch.mmu.gva_to_gpa(vcpu, old_tss_base); | 4581 | old_tss_base = vcpu->arch.mmu.gva_to_gpa(vcpu, old_tss_base); |
4604 | 4582 | ||
@@ -4621,7 +4599,10 @@ int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int reason) | |||
4621 | } | 4599 | } |
4622 | } | 4600 | } |
4623 | 4601 | ||
4624 | if (!nseg_desc.p || get_desc_limit(&nseg_desc) < 0x67) { | 4602 | desc_limit = get_desc_limit(&nseg_desc); |
4603 | if (!nseg_desc.p || | ||
4604 | ((desc_limit < 0x67 && (nseg_desc.type & 8)) || | ||
4605 | desc_limit < 0x2b)) { | ||
4625 | kvm_queue_exception_e(vcpu, TS_VECTOR, tss_selector & 0xfffc); | 4606 | kvm_queue_exception_e(vcpu, TS_VECTOR, tss_selector & 0xfffc); |
4626 | return 1; | 4607 | return 1; |
4627 | } | 4608 | } |
diff --git a/arch/x86/lib/Makefile b/arch/x86/lib/Makefile index cffd754f3039..ddef409c2816 100644 --- a/arch/x86/lib/Makefile +++ b/arch/x86/lib/Makefile | |||
@@ -14,7 +14,7 @@ $(obj)/inat.o: $(obj)/inat-tables.c | |||
14 | 14 | ||
15 | clean-files := inat-tables.c | 15 | clean-files := inat-tables.c |
16 | 16 | ||
17 | obj-$(CONFIG_SMP) += msr-smp.o | 17 | obj-$(CONFIG_SMP) += msr-smp.o cache-smp.o |
18 | 18 | ||
19 | lib-y := delay.o | 19 | lib-y := delay.o |
20 | lib-y += thunk_$(BITS).o | 20 | lib-y += thunk_$(BITS).o |
@@ -39,4 +39,5 @@ else | |||
39 | lib-y += thunk_64.o clear_page_64.o copy_page_64.o | 39 | lib-y += thunk_64.o clear_page_64.o copy_page_64.o |
40 | lib-y += memmove_64.o memset_64.o | 40 | lib-y += memmove_64.o memset_64.o |
41 | lib-y += copy_user_64.o rwlock_64.o copy_user_nocache_64.o | 41 | lib-y += copy_user_64.o rwlock_64.o copy_user_nocache_64.o |
42 | lib-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem_64.o | ||
42 | endif | 43 | endif |
diff --git a/arch/x86/lib/cache-smp.c b/arch/x86/lib/cache-smp.c new file mode 100644 index 000000000000..a3c668875038 --- /dev/null +++ b/arch/x86/lib/cache-smp.c | |||
@@ -0,0 +1,19 @@ | |||
1 | #include <linux/smp.h> | ||
2 | #include <linux/module.h> | ||
3 | |||
4 | static void __wbinvd(void *dummy) | ||
5 | { | ||
6 | wbinvd(); | ||
7 | } | ||
8 | |||
9 | void wbinvd_on_cpu(int cpu) | ||
10 | { | ||
11 | smp_call_function_single(cpu, __wbinvd, NULL, 1); | ||
12 | } | ||
13 | EXPORT_SYMBOL(wbinvd_on_cpu); | ||
14 | |||
15 | int wbinvd_on_all_cpus(void) | ||
16 | { | ||
17 | return on_each_cpu(__wbinvd, NULL, 1); | ||
18 | } | ||
19 | EXPORT_SYMBOL(wbinvd_on_all_cpus); | ||
diff --git a/arch/x86/lib/rwsem_64.S b/arch/x86/lib/rwsem_64.S new file mode 100644 index 000000000000..15acecf0d7aa --- /dev/null +++ b/arch/x86/lib/rwsem_64.S | |||
@@ -0,0 +1,81 @@ | |||
1 | /* | ||
2 | * x86-64 rwsem wrappers | ||
3 | * | ||
4 | * This interfaces the inline asm code to the slow-path | ||
5 | * C routines. We need to save the call-clobbered regs | ||
6 | * that the asm does not mark as clobbered, and move the | ||
7 | * argument from %rax to %rdi. | ||
8 | * | ||
9 | * NOTE! We don't need to save %rax, because the functions | ||
10 | * will always return the semaphore pointer in %rax (which | ||
11 | * is also the input argument to these helpers) | ||
12 | * | ||
13 | * The following can clobber %rdx because the asm clobbers it: | ||
14 | * call_rwsem_down_write_failed | ||
15 | * call_rwsem_wake | ||
16 | * but %rdi, %rsi, %rcx, %r8-r11 always need saving. | ||
17 | */ | ||
18 | |||
19 | #include <linux/linkage.h> | ||
20 | #include <asm/rwlock.h> | ||
21 | #include <asm/alternative-asm.h> | ||
22 | #include <asm/frame.h> | ||
23 | #include <asm/dwarf2.h> | ||
24 | |||
25 | #define save_common_regs \ | ||
26 | pushq %rdi; \ | ||
27 | pushq %rsi; \ | ||
28 | pushq %rcx; \ | ||
29 | pushq %r8; \ | ||
30 | pushq %r9; \ | ||
31 | pushq %r10; \ | ||
32 | pushq %r11 | ||
33 | |||
34 | #define restore_common_regs \ | ||
35 | popq %r11; \ | ||
36 | popq %r10; \ | ||
37 | popq %r9; \ | ||
38 | popq %r8; \ | ||
39 | popq %rcx; \ | ||
40 | popq %rsi; \ | ||
41 | popq %rdi | ||
42 | |||
43 | /* Fix up special calling conventions */ | ||
44 | ENTRY(call_rwsem_down_read_failed) | ||
45 | save_common_regs | ||
46 | pushq %rdx | ||
47 | movq %rax,%rdi | ||
48 | call rwsem_down_read_failed | ||
49 | popq %rdx | ||
50 | restore_common_regs | ||
51 | ret | ||
52 | ENDPROC(call_rwsem_down_read_failed) | ||
53 | |||
54 | ENTRY(call_rwsem_down_write_failed) | ||
55 | save_common_regs | ||
56 | movq %rax,%rdi | ||
57 | call rwsem_down_write_failed | ||
58 | restore_common_regs | ||
59 | ret | ||
60 | ENDPROC(call_rwsem_down_write_failed) | ||
61 | |||
62 | ENTRY(call_rwsem_wake) | ||
63 | decw %dx /* do nothing if still outstanding active readers */ | ||
64 | jnz 1f | ||
65 | save_common_regs | ||
66 | movq %rax,%rdi | ||
67 | call rwsem_wake | ||
68 | restore_common_regs | ||
69 | 1: ret | ||
70 | ENDPROC(call_rwsem_wake) | ||
71 | |||
72 | /* Fix up special calling conventions */ | ||
73 | ENTRY(call_rwsem_downgrade_wake) | ||
74 | save_common_regs | ||
75 | pushq %rdx | ||
76 | movq %rax,%rdi | ||
77 | call rwsem_downgrade_wake | ||
78 | popq %rdx | ||
79 | restore_common_regs | ||
80 | ret | ||
81 | ENDPROC(call_rwsem_downgrade_wake) | ||
diff --git a/arch/x86/pci/irq.c b/arch/x86/pci/irq.c index 0696d506c4ad..b02f6d8ac922 100644 --- a/arch/x86/pci/irq.c +++ b/arch/x86/pci/irq.c | |||
@@ -590,6 +590,8 @@ static __init int intel_router_probe(struct irq_router *r, struct pci_dev *route | |||
590 | case PCI_DEVICE_ID_INTEL_ICH10_1: | 590 | case PCI_DEVICE_ID_INTEL_ICH10_1: |
591 | case PCI_DEVICE_ID_INTEL_ICH10_2: | 591 | case PCI_DEVICE_ID_INTEL_ICH10_2: |
592 | case PCI_DEVICE_ID_INTEL_ICH10_3: | 592 | case PCI_DEVICE_ID_INTEL_ICH10_3: |
593 | case PCI_DEVICE_ID_INTEL_CPT_LPC1: | ||
594 | case PCI_DEVICE_ID_INTEL_CPT_LPC2: | ||
593 | r->name = "PIIX/ICH"; | 595 | r->name = "PIIX/ICH"; |
594 | r->get = pirq_piix_get; | 596 | r->get = pirq_piix_get; |
595 | r->set = pirq_piix_set; | 597 | r->set = pirq_piix_set; |
diff --git a/arch/x86/power/hibernate_asm_32.S b/arch/x86/power/hibernate_asm_32.S index b641388d8286..ad47daeafa4e 100644 --- a/arch/x86/power/hibernate_asm_32.S +++ b/arch/x86/power/hibernate_asm_32.S | |||
@@ -27,10 +27,17 @@ ENTRY(swsusp_arch_suspend) | |||
27 | ret | 27 | ret |
28 | 28 | ||
29 | ENTRY(restore_image) | 29 | ENTRY(restore_image) |
30 | movl mmu_cr4_features, %ecx | ||
30 | movl resume_pg_dir, %eax | 31 | movl resume_pg_dir, %eax |
31 | subl $__PAGE_OFFSET, %eax | 32 | subl $__PAGE_OFFSET, %eax |
32 | movl %eax, %cr3 | 33 | movl %eax, %cr3 |
33 | 34 | ||
35 | jecxz 1f # cr4 Pentium and higher, skip if zero | ||
36 | andl $~(X86_CR4_PGE), %ecx | ||
37 | movl %ecx, %cr4; # turn off PGE | ||
38 | movl %cr3, %eax; # flush TLB | ||
39 | movl %eax, %cr3 | ||
40 | 1: | ||
34 | movl restore_pblist, %edx | 41 | movl restore_pblist, %edx |
35 | .p2align 4,,7 | 42 | .p2align 4,,7 |
36 | 43 | ||
@@ -54,16 +61,8 @@ done: | |||
54 | movl $swapper_pg_dir, %eax | 61 | movl $swapper_pg_dir, %eax |
55 | subl $__PAGE_OFFSET, %eax | 62 | subl $__PAGE_OFFSET, %eax |
56 | movl %eax, %cr3 | 63 | movl %eax, %cr3 |
57 | /* Flush TLB, including "global" things (vmalloc) */ | ||
58 | movl mmu_cr4_features, %ecx | 64 | movl mmu_cr4_features, %ecx |
59 | jecxz 1f # cr4 Pentium and higher, skip if zero | 65 | jecxz 1f # cr4 Pentium and higher, skip if zero |
60 | movl %ecx, %edx | ||
61 | andl $~(X86_CR4_PGE), %edx | ||
62 | movl %edx, %cr4; # turn off PGE | ||
63 | 1: | ||
64 | movl %cr3, %eax; # flush TLB | ||
65 | movl %eax, %cr3 | ||
66 | jecxz 1f # cr4 Pentium and higher, skip if zero | ||
67 | movl %ecx, %cr4; # turn PGE back on | 66 | movl %ecx, %cr4; # turn PGE back on |
68 | 1: | 67 | 1: |
69 | 68 | ||
diff --git a/drivers/acpi/acpica/exprep.c b/drivers/acpi/acpica/exprep.c index 52fec07064f0..83b62521d8d3 100644 --- a/drivers/acpi/acpica/exprep.c +++ b/drivers/acpi/acpica/exprep.c | |||
@@ -468,6 +468,23 @@ acpi_status acpi_ex_prep_field_value(struct acpi_create_field_info *info) | |||
468 | 468 | ||
469 | acpi_ut_add_reference(obj_desc->field.region_obj); | 469 | acpi_ut_add_reference(obj_desc->field.region_obj); |
470 | 470 | ||
471 | /* allow full data read from EC address space */ | ||
472 | if (obj_desc->field.region_obj->region.space_id == | ||
473 | ACPI_ADR_SPACE_EC) { | ||
474 | if (obj_desc->common_field.bit_length > 8) { | ||
475 | unsigned width = | ||
476 | ACPI_ROUND_BITS_UP_TO_BYTES( | ||
477 | obj_desc->common_field.bit_length); | ||
478 | // access_bit_width is u8, don't overflow it | ||
479 | if (width > 8) | ||
480 | width = 8; | ||
481 | obj_desc->common_field.access_byte_width = | ||
482 | width; | ||
483 | obj_desc->common_field.access_bit_width = | ||
484 | 8 * width; | ||
485 | } | ||
486 | } | ||
487 | |||
471 | ACPI_DEBUG_PRINT((ACPI_DB_BFIELD, | 488 | ACPI_DEBUG_PRINT((ACPI_DB_BFIELD, |
472 | "RegionField: BitOff %X, Off %X, Gran %X, Region %p\n", | 489 | "RegionField: BitOff %X, Off %X, Gran %X, Region %p\n", |
473 | obj_desc->field.start_field_bit_offset, | 490 | obj_desc->field.start_field_bit_offset, |
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c index e497639abfa7..e29382508718 100644 --- a/drivers/acpi/ec.c +++ b/drivers/acpi/ec.c | |||
@@ -601,12 +601,12 @@ static u32 acpi_ec_gpe_handler(void *data) | |||
601 | 601 | ||
602 | static acpi_status | 602 | static acpi_status |
603 | acpi_ec_space_handler(u32 function, acpi_physical_address address, | 603 | acpi_ec_space_handler(u32 function, acpi_physical_address address, |
604 | u32 bits, acpi_integer *value, | 604 | u32 bits, acpi_integer *value64, |
605 | void *handler_context, void *region_context) | 605 | void *handler_context, void *region_context) |
606 | { | 606 | { |
607 | struct acpi_ec *ec = handler_context; | 607 | struct acpi_ec *ec = handler_context; |
608 | int result = 0, i; | 608 | int result = 0, i, bytes = bits / 8; |
609 | u8 temp = 0; | 609 | u8 *value = (u8 *)value64; |
610 | 610 | ||
611 | if ((address > 0xFF) || !value || !handler_context) | 611 | if ((address > 0xFF) || !value || !handler_context) |
612 | return AE_BAD_PARAMETER; | 612 | return AE_BAD_PARAMETER; |
@@ -614,32 +614,15 @@ acpi_ec_space_handler(u32 function, acpi_physical_address address, | |||
614 | if (function != ACPI_READ && function != ACPI_WRITE) | 614 | if (function != ACPI_READ && function != ACPI_WRITE) |
615 | return AE_BAD_PARAMETER; | 615 | return AE_BAD_PARAMETER; |
616 | 616 | ||
617 | if (bits != 8 && acpi_strict) | 617 | if (EC_FLAGS_MSI || bits > 8) |
618 | return AE_BAD_PARAMETER; | ||
619 | |||
620 | if (EC_FLAGS_MSI) | ||
621 | acpi_ec_burst_enable(ec); | 618 | acpi_ec_burst_enable(ec); |
622 | 619 | ||
623 | if (function == ACPI_READ) { | 620 | for (i = 0; i < bytes; ++i, ++address, ++value) |
624 | result = acpi_ec_read(ec, address, &temp); | 621 | result = (function == ACPI_READ) ? |
625 | *value = temp; | 622 | acpi_ec_read(ec, address, value) : |
626 | } else { | 623 | acpi_ec_write(ec, address, *value); |
627 | temp = 0xff & (*value); | ||
628 | result = acpi_ec_write(ec, address, temp); | ||
629 | } | ||
630 | |||
631 | for (i = 8; unlikely(bits - i > 0); i += 8) { | ||
632 | ++address; | ||
633 | if (function == ACPI_READ) { | ||
634 | result = acpi_ec_read(ec, address, &temp); | ||
635 | (*value) |= ((acpi_integer)temp) << i; | ||
636 | } else { | ||
637 | temp = 0xff & ((*value) >> i); | ||
638 | result = acpi_ec_write(ec, address, temp); | ||
639 | } | ||
640 | } | ||
641 | 624 | ||
642 | if (EC_FLAGS_MSI) | 625 | if (EC_FLAGS_MSI || bits > 8) |
643 | acpi_ec_burst_disable(ec); | 626 | acpi_ec_burst_disable(ec); |
644 | 627 | ||
645 | switch (result) { | 628 | switch (result) { |
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c index 9e2feb6ce241..462200d93d71 100644 --- a/drivers/ata/ahci.c +++ b/drivers/ata/ahci.c | |||
@@ -570,6 +570,12 @@ static const struct pci_device_id ahci_pci_tbl[] = { | |||
570 | { PCI_VDEVICE(INTEL, 0x3b2b), board_ahci }, /* PCH RAID */ | 570 | { PCI_VDEVICE(INTEL, 0x3b2b), board_ahci }, /* PCH RAID */ |
571 | { PCI_VDEVICE(INTEL, 0x3b2c), board_ahci }, /* PCH RAID */ | 571 | { PCI_VDEVICE(INTEL, 0x3b2c), board_ahci }, /* PCH RAID */ |
572 | { PCI_VDEVICE(INTEL, 0x3b2f), board_ahci }, /* PCH AHCI */ | 572 | { PCI_VDEVICE(INTEL, 0x3b2f), board_ahci }, /* PCH AHCI */ |
573 | { PCI_VDEVICE(INTEL, 0x1c02), board_ahci }, /* CPT AHCI */ | ||
574 | { PCI_VDEVICE(INTEL, 0x1c03), board_ahci }, /* CPT AHCI */ | ||
575 | { PCI_VDEVICE(INTEL, 0x1c04), board_ahci }, /* CPT RAID */ | ||
576 | { PCI_VDEVICE(INTEL, 0x1c05), board_ahci }, /* CPT RAID */ | ||
577 | { PCI_VDEVICE(INTEL, 0x1c06), board_ahci }, /* CPT RAID */ | ||
578 | { PCI_VDEVICE(INTEL, 0x1c07), board_ahci }, /* CPT RAID */ | ||
573 | 579 | ||
574 | /* JMicron 360/1/3/5/6, match class to avoid IDE function */ | 580 | /* JMicron 360/1/3/5/6, match class to avoid IDE function */ |
575 | { PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, | 581 | { PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, |
diff --git a/drivers/ata/ata_piix.c b/drivers/ata/ata_piix.c index 6f3f2257d0f0..b5f614b9c245 100644 --- a/drivers/ata/ata_piix.c +++ b/drivers/ata/ata_piix.c | |||
@@ -291,6 +291,14 @@ static const struct pci_device_id piix_pci_tbl[] = { | |||
291 | { 0x8086, 0x3b2d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata }, | 291 | { 0x8086, 0x3b2d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata }, |
292 | /* SATA Controller IDE (PCH) */ | 292 | /* SATA Controller IDE (PCH) */ |
293 | { 0x8086, 0x3b2e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata }, | 293 | { 0x8086, 0x3b2e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata }, |
294 | /* SATA Controller IDE (CPT) */ | ||
295 | { 0x8086, 0x1c00, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata }, | ||
296 | /* SATA Controller IDE (CPT) */ | ||
297 | { 0x8086, 0x1c01, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata }, | ||
298 | /* SATA Controller IDE (CPT) */ | ||
299 | { 0x8086, 0x1c08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata }, | ||
300 | /* SATA Controller IDE (CPT) */ | ||
301 | { 0x8086, 0x1c09, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata }, | ||
294 | { } /* terminate list */ | 302 | { } /* terminate list */ |
295 | }; | 303 | }; |
296 | 304 | ||
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c index 6728328f3bea..2401c9cf31b7 100644 --- a/drivers/ata/libata-core.c +++ b/drivers/ata/libata-core.c | |||
@@ -4348,6 +4348,9 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = { | |||
4348 | { "HTS541080G9SA00", "MB4OC60D", ATA_HORKAGE_NONCQ, }, | 4348 | { "HTS541080G9SA00", "MB4OC60D", ATA_HORKAGE_NONCQ, }, |
4349 | { "HTS541010G9SA00", "MBZOC60D", ATA_HORKAGE_NONCQ, }, | 4349 | { "HTS541010G9SA00", "MBZOC60D", ATA_HORKAGE_NONCQ, }, |
4350 | 4350 | ||
4351 | /* https://bugzilla.kernel.org/show_bug.cgi?id=15573 */ | ||
4352 | { "C300-CTFDDAC128MAG", "0001", ATA_HORKAGE_NONCQ, }, | ||
4353 | |||
4351 | /* devices which puke on READ_NATIVE_MAX */ | 4354 | /* devices which puke on READ_NATIVE_MAX */ |
4352 | { "HDS724040KLSA80", "KFAOA20N", ATA_HORKAGE_BROKEN_HPA, }, | 4355 | { "HDS724040KLSA80", "KFAOA20N", ATA_HORKAGE_BROKEN_HPA, }, |
4353 | { "WDC WD3200JD-00KLB0", "WD-WCAMR1130137", ATA_HORKAGE_BROKEN_HPA }, | 4356 | { "WDC WD3200JD-00KLB0", "WD-WCAMR1130137", ATA_HORKAGE_BROKEN_HPA }, |
diff --git a/drivers/ata/pata_via.c b/drivers/ata/pata_via.c index be7c39552980..ad647502d35c 100644 --- a/drivers/ata/pata_via.c +++ b/drivers/ata/pata_via.c | |||
@@ -697,6 +697,7 @@ static const struct pci_device_id via[] = { | |||
697 | { PCI_VDEVICE(VIA, 0x3164), }, | 697 | { PCI_VDEVICE(VIA, 0x3164), }, |
698 | { PCI_VDEVICE(VIA, 0x5324), }, | 698 | { PCI_VDEVICE(VIA, 0x5324), }, |
699 | { PCI_VDEVICE(VIA, 0xC409), VIA_IDFLAG_SINGLE }, | 699 | { PCI_VDEVICE(VIA, 0xC409), VIA_IDFLAG_SINGLE }, |
700 | { PCI_VDEVICE(VIA, 0x9001), VIA_IDFLAG_SINGLE }, | ||
700 | 701 | ||
701 | { }, | 702 | { }, |
702 | }; | 703 | }; |
diff --git a/drivers/char/agp/intel-agp.c b/drivers/char/agp/intel-agp.c index 3999a5f25f38..8a713f1e9653 100644 --- a/drivers/char/agp/intel-agp.c +++ b/drivers/char/agp/intel-agp.c | |||
@@ -8,6 +8,7 @@ | |||
8 | #include <linux/kernel.h> | 8 | #include <linux/kernel.h> |
9 | #include <linux/pagemap.h> | 9 | #include <linux/pagemap.h> |
10 | #include <linux/agp_backend.h> | 10 | #include <linux/agp_backend.h> |
11 | #include <asm/smp.h> | ||
11 | #include "agp.h" | 12 | #include "agp.h" |
12 | 13 | ||
13 | /* | 14 | /* |
@@ -815,12 +816,6 @@ static void intel_i830_setup_flush(void) | |||
815 | intel_i830_fini_flush(); | 816 | intel_i830_fini_flush(); |
816 | } | 817 | } |
817 | 818 | ||
818 | static void | ||
819 | do_wbinvd(void *null) | ||
820 | { | ||
821 | wbinvd(); | ||
822 | } | ||
823 | |||
824 | /* The chipset_flush interface needs to get data that has already been | 819 | /* The chipset_flush interface needs to get data that has already been |
825 | * flushed out of the CPU all the way out to main memory, because the GPU | 820 | * flushed out of the CPU all the way out to main memory, because the GPU |
826 | * doesn't snoop those buffers. | 821 | * doesn't snoop those buffers. |
@@ -837,12 +832,10 @@ static void intel_i830_chipset_flush(struct agp_bridge_data *bridge) | |||
837 | 832 | ||
838 | memset(pg, 0, 1024); | 833 | memset(pg, 0, 1024); |
839 | 834 | ||
840 | if (cpu_has_clflush) { | 835 | if (cpu_has_clflush) |
841 | clflush_cache_range(pg, 1024); | 836 | clflush_cache_range(pg, 1024); |
842 | } else { | 837 | else if (wbinvd_on_all_cpus() != 0) |
843 | if (on_each_cpu(do_wbinvd, NULL, 1) != 0) | 838 | printk(KERN_ERR "Timed out waiting for cache flush.\n"); |
844 | printk(KERN_ERR "Timed out waiting for cache flush.\n"); | ||
845 | } | ||
846 | } | 839 | } |
847 | 840 | ||
848 | /* The intel i830 automatically initializes the agp aperture during POST. | 841 | /* The intel i830 automatically initializes the agp aperture during POST. |
diff --git a/drivers/char/raw.c b/drivers/char/raw.c index 64acd05f71c8..9abc3a19d53a 100644 --- a/drivers/char/raw.c +++ b/drivers/char/raw.c | |||
@@ -247,6 +247,7 @@ static const struct file_operations raw_fops = { | |||
247 | .aio_read = generic_file_aio_read, | 247 | .aio_read = generic_file_aio_read, |
248 | .write = do_sync_write, | 248 | .write = do_sync_write, |
249 | .aio_write = blkdev_aio_write, | 249 | .aio_write = blkdev_aio_write, |
250 | .fsync = block_fsync, | ||
250 | .open = raw_open, | 251 | .open = raw_open, |
251 | .release= raw_release, | 252 | .release= raw_release, |
252 | .ioctl = raw_ioctl, | 253 | .ioctl = raw_ioctl, |
diff --git a/drivers/char/tty_io.c b/drivers/char/tty_io.c index dcb9083ecde0..76253cf27028 100644 --- a/drivers/char/tty_io.c +++ b/drivers/char/tty_io.c | |||
@@ -1423,6 +1423,8 @@ static void release_one_tty(struct work_struct *work) | |||
1423 | list_del_init(&tty->tty_files); | 1423 | list_del_init(&tty->tty_files); |
1424 | file_list_unlock(); | 1424 | file_list_unlock(); |
1425 | 1425 | ||
1426 | put_pid(tty->pgrp); | ||
1427 | put_pid(tty->session); | ||
1426 | free_tty_struct(tty); | 1428 | free_tty_struct(tty); |
1427 | } | 1429 | } |
1428 | 1430 | ||
diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c index 7d0f00a935fa..99907c32ea29 100644 --- a/drivers/gpu/drm/drm_crtc_helper.c +++ b/drivers/gpu/drm/drm_crtc_helper.c | |||
@@ -104,6 +104,7 @@ int drm_helper_probe_single_connector_modes(struct drm_connector *connector, | |||
104 | if (connector->status == connector_status_disconnected) { | 104 | if (connector->status == connector_status_disconnected) { |
105 | DRM_DEBUG_KMS("%s is disconnected\n", | 105 | DRM_DEBUG_KMS("%s is disconnected\n", |
106 | drm_get_connector_name(connector)); | 106 | drm_get_connector_name(connector)); |
107 | drm_mode_connector_update_edid_property(connector, NULL); | ||
107 | goto prune; | 108 | goto prune; |
108 | } | 109 | } |
109 | 110 | ||
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c index ab6c97330412..bfd0e4acfee0 100644 --- a/drivers/gpu/drm/drm_edid.c +++ b/drivers/gpu/drm/drm_edid.c | |||
@@ -85,6 +85,8 @@ static struct edid_quirk { | |||
85 | 85 | ||
86 | /* Envision Peripherals, Inc. EN-7100e */ | 86 | /* Envision Peripherals, Inc. EN-7100e */ |
87 | { "EPI", 59264, EDID_QUIRK_135_CLOCK_TOO_HIGH }, | 87 | { "EPI", 59264, EDID_QUIRK_135_CLOCK_TOO_HIGH }, |
88 | /* Envision EN2028 */ | ||
89 | { "EPI", 8232, EDID_QUIRK_PREFER_LARGE_60 }, | ||
88 | 90 | ||
89 | /* Funai Electronics PM36B */ | 91 | /* Funai Electronics PM36B */ |
90 | { "FCM", 13600, EDID_QUIRK_PREFER_LARGE_75 | | 92 | { "FCM", 13600, EDID_QUIRK_PREFER_LARGE_75 | |
@@ -707,15 +709,6 @@ static struct drm_display_mode *drm_mode_detailed(struct drm_device *dev, | |||
707 | mode->vsync_end = mode->vsync_start + vsync_pulse_width; | 709 | mode->vsync_end = mode->vsync_start + vsync_pulse_width; |
708 | mode->vtotal = mode->vdisplay + vblank; | 710 | mode->vtotal = mode->vdisplay + vblank; |
709 | 711 | ||
710 | /* perform the basic check for the detailed timing */ | ||
711 | if (mode->hsync_end > mode->htotal || | ||
712 | mode->vsync_end > mode->vtotal) { | ||
713 | drm_mode_destroy(dev, mode); | ||
714 | DRM_DEBUG_KMS("Incorrect detailed timing. " | ||
715 | "Sync is beyond the blank.\n"); | ||
716 | return NULL; | ||
717 | } | ||
718 | |||
719 | /* Some EDIDs have bogus h/vtotal values */ | 712 | /* Some EDIDs have bogus h/vtotal values */ |
720 | if (mode->hsync_end > mode->htotal) | 713 | if (mode->hsync_end > mode->htotal) |
721 | mode->htotal = mode->hsync_end + 1; | 714 | mode->htotal = mode->hsync_end + 1; |
diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c index 08d14df3bb42..4804872f8b19 100644 --- a/drivers/gpu/drm/drm_fops.c +++ b/drivers/gpu/drm/drm_fops.c | |||
@@ -140,14 +140,16 @@ int drm_open(struct inode *inode, struct file *filp) | |||
140 | spin_unlock(&dev->count_lock); | 140 | spin_unlock(&dev->count_lock); |
141 | } | 141 | } |
142 | out: | 142 | out: |
143 | mutex_lock(&dev->struct_mutex); | 143 | if (!retcode) { |
144 | if (minor->type == DRM_MINOR_LEGACY) { | 144 | mutex_lock(&dev->struct_mutex); |
145 | BUG_ON((dev->dev_mapping != NULL) && | 145 | if (minor->type == DRM_MINOR_LEGACY) { |
146 | (dev->dev_mapping != inode->i_mapping)); | 146 | if (dev->dev_mapping == NULL) |
147 | if (dev->dev_mapping == NULL) | 147 | dev->dev_mapping = inode->i_mapping; |
148 | dev->dev_mapping = inode->i_mapping; | 148 | else if (dev->dev_mapping != inode->i_mapping) |
149 | retcode = -ENODEV; | ||
150 | } | ||
151 | mutex_unlock(&dev->struct_mutex); | ||
149 | } | 152 | } |
150 | mutex_unlock(&dev->struct_mutex); | ||
151 | 153 | ||
152 | return retcode; | 154 | return retcode; |
153 | } | 155 | } |
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c index 93031a75d112..1238bc981bb4 100644 --- a/drivers/gpu/drm/i915/intel_lvds.c +++ b/drivers/gpu/drm/i915/intel_lvds.c | |||
@@ -899,6 +899,14 @@ static const struct dmi_system_id intel_no_lvds[] = { | |||
899 | DMI_MATCH(DMI_PRODUCT_VERSION, "AO00001JW"), | 899 | DMI_MATCH(DMI_PRODUCT_VERSION, "AO00001JW"), |
900 | }, | 900 | }, |
901 | }, | 901 | }, |
902 | { | ||
903 | .callback = intel_no_lvds_dmi_callback, | ||
904 | .ident = "Clientron U800", | ||
905 | .matches = { | ||
906 | DMI_MATCH(DMI_SYS_VENDOR, "Clientron"), | ||
907 | DMI_MATCH(DMI_PRODUCT_NAME, "U800"), | ||
908 | }, | ||
909 | }, | ||
902 | 910 | ||
903 | { } /* terminating entry */ | 911 | { } /* terminating entry */ |
904 | }; | 912 | }; |
diff --git a/drivers/gpu/drm/radeon/atom.c b/drivers/gpu/drm/radeon/atom.c index d75788feac6c..b1f929d4e1aa 100644 --- a/drivers/gpu/drm/radeon/atom.c +++ b/drivers/gpu/drm/radeon/atom.c | |||
@@ -881,11 +881,16 @@ static void atom_op_shl(atom_exec_context *ctx, int *ptr, int arg) | |||
881 | uint8_t attr = U8((*ptr)++), shift; | 881 | uint8_t attr = U8((*ptr)++), shift; |
882 | uint32_t saved, dst; | 882 | uint32_t saved, dst; |
883 | int dptr = *ptr; | 883 | int dptr = *ptr; |
884 | uint32_t dst_align = atom_dst_to_src[(attr >> 3) & 7][(attr >> 6) & 3]; | ||
884 | SDEBUG(" dst: "); | 885 | SDEBUG(" dst: "); |
885 | dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1); | 886 | dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1); |
887 | /* op needs to full dst value */ | ||
888 | dst = saved; | ||
886 | shift = atom_get_src(ctx, attr, ptr); | 889 | shift = atom_get_src(ctx, attr, ptr); |
887 | SDEBUG(" shift: %d\n", shift); | 890 | SDEBUG(" shift: %d\n", shift); |
888 | dst <<= shift; | 891 | dst <<= shift; |
892 | dst &= atom_arg_mask[dst_align]; | ||
893 | dst >>= atom_arg_shift[dst_align]; | ||
889 | SDEBUG(" dst: "); | 894 | SDEBUG(" dst: "); |
890 | atom_put_dst(ctx, arg, attr, &dptr, dst, saved); | 895 | atom_put_dst(ctx, arg, attr, &dptr, dst, saved); |
891 | } | 896 | } |
@@ -895,11 +900,16 @@ static void atom_op_shr(atom_exec_context *ctx, int *ptr, int arg) | |||
895 | uint8_t attr = U8((*ptr)++), shift; | 900 | uint8_t attr = U8((*ptr)++), shift; |
896 | uint32_t saved, dst; | 901 | uint32_t saved, dst; |
897 | int dptr = *ptr; | 902 | int dptr = *ptr; |
903 | uint32_t dst_align = atom_dst_to_src[(attr >> 3) & 7][(attr >> 6) & 3]; | ||
898 | SDEBUG(" dst: "); | 904 | SDEBUG(" dst: "); |
899 | dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1); | 905 | dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1); |
906 | /* op needs to full dst value */ | ||
907 | dst = saved; | ||
900 | shift = atom_get_src(ctx, attr, ptr); | 908 | shift = atom_get_src(ctx, attr, ptr); |
901 | SDEBUG(" shift: %d\n", shift); | 909 | SDEBUG(" shift: %d\n", shift); |
902 | dst >>= shift; | 910 | dst >>= shift; |
911 | dst &= atom_arg_mask[dst_align]; | ||
912 | dst >>= atom_arg_shift[dst_align]; | ||
903 | SDEBUG(" dst: "); | 913 | SDEBUG(" dst: "); |
904 | atom_put_dst(ctx, arg, attr, &dptr, dst, saved); | 914 | atom_put_dst(ctx, arg, attr, &dptr, dst, saved); |
905 | } | 915 | } |
diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c index 43b55a030b4d..5bdfaf2780d8 100644 --- a/drivers/gpu/drm/radeon/r300.c +++ b/drivers/gpu/drm/radeon/r300.c | |||
@@ -364,11 +364,12 @@ void r300_gpu_init(struct radeon_device *rdev) | |||
364 | 364 | ||
365 | r100_hdp_reset(rdev); | 365 | r100_hdp_reset(rdev); |
366 | /* FIXME: rv380 one pipes ? */ | 366 | /* FIXME: rv380 one pipes ? */ |
367 | if ((rdev->family == CHIP_R300) || (rdev->family == CHIP_R350)) { | 367 | if ((rdev->family == CHIP_R300 && rdev->pdev->device != 0x4144) || |
368 | (rdev->family == CHIP_R350)) { | ||
368 | /* r300,r350 */ | 369 | /* r300,r350 */ |
369 | rdev->num_gb_pipes = 2; | 370 | rdev->num_gb_pipes = 2; |
370 | } else { | 371 | } else { |
371 | /* rv350,rv370,rv380 */ | 372 | /* rv350,rv370,rv380,r300 AD */ |
372 | rdev->num_gb_pipes = 1; | 373 | rdev->num_gb_pipes = 1; |
373 | } | 374 | } |
374 | rdev->num_z_pipes = 1; | 375 | rdev->num_z_pipes = 1; |
diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c index e7b19440102e..81b832eec095 100644 --- a/drivers/gpu/drm/radeon/radeon_combios.c +++ b/drivers/gpu/drm/radeon/radeon_combios.c | |||
@@ -670,7 +670,9 @@ struct radeon_encoder_primary_dac *radeon_combios_get_primary_dac_info(struct | |||
670 | dac = RBIOS8(dac_info + 0x3) & 0xf; | 670 | dac = RBIOS8(dac_info + 0x3) & 0xf; |
671 | p_dac->ps2_pdac_adj = (bg << 8) | (dac); | 671 | p_dac->ps2_pdac_adj = (bg << 8) | (dac); |
672 | } | 672 | } |
673 | found = 1; | 673 | /* if the values are all zeros, use the table */ |
674 | if (p_dac->ps2_pdac_adj) | ||
675 | found = 1; | ||
674 | } | 676 | } |
675 | 677 | ||
676 | out: | 678 | out: |
@@ -812,7 +814,9 @@ struct radeon_encoder_tv_dac *radeon_combios_get_tv_dac_info(struct | |||
812 | bg = RBIOS8(dac_info + 0x10) & 0xf; | 814 | bg = RBIOS8(dac_info + 0x10) & 0xf; |
813 | dac = RBIOS8(dac_info + 0x11) & 0xf; | 815 | dac = RBIOS8(dac_info + 0x11) & 0xf; |
814 | tv_dac->ntsc_tvdac_adj = (bg << 16) | (dac << 20); | 816 | tv_dac->ntsc_tvdac_adj = (bg << 16) | (dac << 20); |
815 | found = 1; | 817 | /* if the values are all zeros, use the table */ |
818 | if (tv_dac->ps2_tvdac_adj) | ||
819 | found = 1; | ||
816 | } else if (rev > 1) { | 820 | } else if (rev > 1) { |
817 | bg = RBIOS8(dac_info + 0xc) & 0xf; | 821 | bg = RBIOS8(dac_info + 0xc) & 0xf; |
818 | dac = (RBIOS8(dac_info + 0xc) >> 4) & 0xf; | 822 | dac = (RBIOS8(dac_info + 0xc) >> 4) & 0xf; |
@@ -825,7 +829,9 @@ struct radeon_encoder_tv_dac *radeon_combios_get_tv_dac_info(struct | |||
825 | bg = RBIOS8(dac_info + 0xe) & 0xf; | 829 | bg = RBIOS8(dac_info + 0xe) & 0xf; |
826 | dac = (RBIOS8(dac_info + 0xe) >> 4) & 0xf; | 830 | dac = (RBIOS8(dac_info + 0xe) >> 4) & 0xf; |
827 | tv_dac->ntsc_tvdac_adj = (bg << 16) | (dac << 20); | 831 | tv_dac->ntsc_tvdac_adj = (bg << 16) | (dac << 20); |
828 | found = 1; | 832 | /* if the values are all zeros, use the table */ |
833 | if (tv_dac->ps2_tvdac_adj) | ||
834 | found = 1; | ||
829 | } | 835 | } |
830 | tv_dac->tv_std = radeon_combios_get_tv_info(rdev); | 836 | tv_dac->tv_std = radeon_combios_get_tv_info(rdev); |
831 | } | 837 | } |
@@ -842,7 +848,9 @@ struct radeon_encoder_tv_dac *radeon_combios_get_tv_dac_info(struct | |||
842 | (bg << 16) | (dac << 20); | 848 | (bg << 16) | (dac << 20); |
843 | tv_dac->pal_tvdac_adj = tv_dac->ps2_tvdac_adj; | 849 | tv_dac->pal_tvdac_adj = tv_dac->ps2_tvdac_adj; |
844 | tv_dac->ntsc_tvdac_adj = tv_dac->ps2_tvdac_adj; | 850 | tv_dac->ntsc_tvdac_adj = tv_dac->ps2_tvdac_adj; |
845 | found = 1; | 851 | /* if the values are all zeros, use the table */ |
852 | if (tv_dac->ps2_tvdac_adj) | ||
853 | found = 1; | ||
846 | } else { | 854 | } else { |
847 | bg = RBIOS8(dac_info + 0x4) & 0xf; | 855 | bg = RBIOS8(dac_info + 0x4) & 0xf; |
848 | dac = RBIOS8(dac_info + 0x5) & 0xf; | 856 | dac = RBIOS8(dac_info + 0x5) & 0xf; |
@@ -850,7 +858,9 @@ struct radeon_encoder_tv_dac *radeon_combios_get_tv_dac_info(struct | |||
850 | (bg << 16) | (dac << 20); | 858 | (bg << 16) | (dac << 20); |
851 | tv_dac->pal_tvdac_adj = tv_dac->ps2_tvdac_adj; | 859 | tv_dac->pal_tvdac_adj = tv_dac->ps2_tvdac_adj; |
852 | tv_dac->ntsc_tvdac_adj = tv_dac->ps2_tvdac_adj; | 860 | tv_dac->ntsc_tvdac_adj = tv_dac->ps2_tvdac_adj; |
853 | found = 1; | 861 | /* if the values are all zeros, use the table */ |
862 | if (tv_dac->ps2_tvdac_adj) | ||
863 | found = 1; | ||
854 | } | 864 | } |
855 | } else { | 865 | } else { |
856 | DRM_INFO("No TV DAC info found in BIOS\n"); | 866 | DRM_INFO("No TV DAC info found in BIOS\n"); |
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c index 65f81942f399..2bdfbcd6d0e6 100644 --- a/drivers/gpu/drm/radeon/radeon_connectors.c +++ b/drivers/gpu/drm/radeon/radeon_connectors.c | |||
@@ -162,12 +162,14 @@ radeon_connector_analog_encoder_conflict_solve(struct drm_connector *connector, | |||
162 | { | 162 | { |
163 | struct drm_device *dev = connector->dev; | 163 | struct drm_device *dev = connector->dev; |
164 | struct drm_connector *conflict; | 164 | struct drm_connector *conflict; |
165 | struct radeon_connector *radeon_conflict; | ||
165 | int i; | 166 | int i; |
166 | 167 | ||
167 | list_for_each_entry(conflict, &dev->mode_config.connector_list, head) { | 168 | list_for_each_entry(conflict, &dev->mode_config.connector_list, head) { |
168 | if (conflict == connector) | 169 | if (conflict == connector) |
169 | continue; | 170 | continue; |
170 | 171 | ||
172 | radeon_conflict = to_radeon_connector(conflict); | ||
171 | for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) { | 173 | for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) { |
172 | if (conflict->encoder_ids[i] == 0) | 174 | if (conflict->encoder_ids[i] == 0) |
173 | break; | 175 | break; |
@@ -177,6 +179,9 @@ radeon_connector_analog_encoder_conflict_solve(struct drm_connector *connector, | |||
177 | if (conflict->status != connector_status_connected) | 179 | if (conflict->status != connector_status_connected) |
178 | continue; | 180 | continue; |
179 | 181 | ||
182 | if (radeon_conflict->use_digital) | ||
183 | continue; | ||
184 | |||
180 | if (priority == true) { | 185 | if (priority == true) { |
181 | DRM_INFO("1: conflicting encoders switching off %s\n", drm_get_connector_name(conflict)); | 186 | DRM_INFO("1: conflicting encoders switching off %s\n", drm_get_connector_name(conflict)); |
182 | DRM_INFO("in favor of %s\n", drm_get_connector_name(connector)); | 187 | DRM_INFO("in favor of %s\n", drm_get_connector_name(connector)); |
@@ -315,7 +320,7 @@ int radeon_connector_set_property(struct drm_connector *connector, struct drm_pr | |||
315 | radeon_encoder = to_radeon_encoder(encoder); | 320 | radeon_encoder = to_radeon_encoder(encoder); |
316 | if (!radeon_encoder->enc_priv) | 321 | if (!radeon_encoder->enc_priv) |
317 | return 0; | 322 | return 0; |
318 | if (rdev->is_atom_bios) { | 323 | if (ASIC_IS_AVIVO(rdev) || radeon_r4xx_atom) { |
319 | struct radeon_encoder_atom_dac *dac_int; | 324 | struct radeon_encoder_atom_dac *dac_int; |
320 | dac_int = radeon_encoder->enc_priv; | 325 | dac_int = radeon_encoder->enc_priv; |
321 | dac_int->tv_std = val; | 326 | dac_int->tv_std = val; |
diff --git a/drivers/gpu/drm/radeon/radeon_cp.c b/drivers/gpu/drm/radeon/radeon_cp.c index 06123ba31d31..f129bbb8bbc8 100644 --- a/drivers/gpu/drm/radeon/radeon_cp.c +++ b/drivers/gpu/drm/radeon/radeon_cp.c | |||
@@ -417,8 +417,9 @@ static int radeon_do_wait_for_idle(drm_radeon_private_t * dev_priv) | |||
417 | return -EBUSY; | 417 | return -EBUSY; |
418 | } | 418 | } |
419 | 419 | ||
420 | static void radeon_init_pipes(drm_radeon_private_t *dev_priv) | 420 | static void radeon_init_pipes(struct drm_device *dev) |
421 | { | 421 | { |
422 | drm_radeon_private_t *dev_priv = dev->dev_private; | ||
422 | uint32_t gb_tile_config, gb_pipe_sel = 0; | 423 | uint32_t gb_tile_config, gb_pipe_sel = 0; |
423 | 424 | ||
424 | if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV530) { | 425 | if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV530) { |
@@ -436,11 +437,12 @@ static void radeon_init_pipes(drm_radeon_private_t *dev_priv) | |||
436 | dev_priv->num_gb_pipes = ((gb_pipe_sel >> 12) & 0x3) + 1; | 437 | dev_priv->num_gb_pipes = ((gb_pipe_sel >> 12) & 0x3) + 1; |
437 | } else { | 438 | } else { |
438 | /* R3xx */ | 439 | /* R3xx */ |
439 | if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R300) || | 440 | if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R300 && |
441 | dev->pdev->device != 0x4144) || | ||
440 | ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R350)) { | 442 | ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R350)) { |
441 | dev_priv->num_gb_pipes = 2; | 443 | dev_priv->num_gb_pipes = 2; |
442 | } else { | 444 | } else { |
443 | /* R3Vxx */ | 445 | /* RV3xx/R300 AD */ |
444 | dev_priv->num_gb_pipes = 1; | 446 | dev_priv->num_gb_pipes = 1; |
445 | } | 447 | } |
446 | } | 448 | } |
@@ -736,7 +738,7 @@ static int radeon_do_engine_reset(struct drm_device * dev) | |||
736 | 738 | ||
737 | /* setup the raster pipes */ | 739 | /* setup the raster pipes */ |
738 | if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R300) | 740 | if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R300) |
739 | radeon_init_pipes(dev_priv); | 741 | radeon_init_pipes(dev); |
740 | 742 | ||
741 | /* Reset the CP ring */ | 743 | /* Reset the CP ring */ |
742 | radeon_do_cp_reset(dev_priv); | 744 | radeon_do_cp_reset(dev_priv); |
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c index e9d085021c1f..9933c2c70648 100644 --- a/drivers/gpu/drm/radeon/radeon_cs.c +++ b/drivers/gpu/drm/radeon/radeon_cs.c | |||
@@ -193,11 +193,13 @@ static void radeon_cs_parser_fini(struct radeon_cs_parser *parser, int error) | |||
193 | radeon_bo_list_fence(&parser->validated, parser->ib->fence); | 193 | radeon_bo_list_fence(&parser->validated, parser->ib->fence); |
194 | } | 194 | } |
195 | radeon_bo_list_unreserve(&parser->validated); | 195 | radeon_bo_list_unreserve(&parser->validated); |
196 | for (i = 0; i < parser->nrelocs; i++) { | 196 | if (parser->relocs != NULL) { |
197 | if (parser->relocs[i].gobj) { | 197 | for (i = 0; i < parser->nrelocs; i++) { |
198 | mutex_lock(&parser->rdev->ddev->struct_mutex); | 198 | if (parser->relocs[i].gobj) { |
199 | drm_gem_object_unreference(parser->relocs[i].gobj); | 199 | mutex_lock(&parser->rdev->ddev->struct_mutex); |
200 | mutex_unlock(&parser->rdev->ddev->struct_mutex); | 200 | drm_gem_object_unreference(parser->relocs[i].gobj); |
201 | mutex_unlock(&parser->rdev->ddev->struct_mutex); | ||
202 | } | ||
201 | } | 203 | } |
202 | } | 204 | } |
203 | kfree(parser->track); | 205 | kfree(parser->track); |
@@ -246,7 +248,8 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) | |||
246 | } | 248 | } |
247 | r = radeon_cs_parser_relocs(&parser); | 249 | r = radeon_cs_parser_relocs(&parser); |
248 | if (r) { | 250 | if (r) { |
249 | DRM_ERROR("Failed to parse relocation !\n"); | 251 | if (r != -ERESTARTSYS) |
252 | DRM_ERROR("Failed to parse relocation %d!\n", r); | ||
250 | radeon_cs_parser_fini(&parser, r); | 253 | radeon_cs_parser_fini(&parser, r); |
251 | mutex_unlock(&rdev->cs_mutex); | 254 | mutex_unlock(&rdev->cs_mutex); |
252 | return r; | 255 | return r; |
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c index f76ae34878d7..727a4622be93 100644 --- a/drivers/gpu/drm/radeon/radeon_device.c +++ b/drivers/gpu/drm/radeon/radeon_device.c | |||
@@ -655,6 +655,14 @@ int radeon_device_init(struct radeon_device *rdev, | |||
655 | return r; | 655 | return r; |
656 | radeon_check_arguments(rdev); | 656 | radeon_check_arguments(rdev); |
657 | 657 | ||
658 | /* all of the newer IGP chips have an internal gart | ||
659 | * However some rs4xx report as AGP, so remove that here. | ||
660 | */ | ||
661 | if ((rdev->family >= CHIP_RS400) && | ||
662 | (rdev->flags & RADEON_IS_IGP)) { | ||
663 | rdev->flags &= ~RADEON_IS_AGP; | ||
664 | } | ||
665 | |||
658 | if (rdev->flags & RADEON_IS_AGP && radeon_agpmode == -1) { | 666 | if (rdev->flags & RADEON_IS_AGP && radeon_agpmode == -1) { |
659 | radeon_agp_disable(rdev); | 667 | radeon_agp_disable(rdev); |
660 | } | 668 | } |
diff --git a/drivers/gpu/drm/radeon/radeon_encoders.c b/drivers/gpu/drm/radeon/radeon_encoders.c index 3c91724457ca..7626bd501ffc 100644 --- a/drivers/gpu/drm/radeon/radeon_encoders.c +++ b/drivers/gpu/drm/radeon/radeon_encoders.c | |||
@@ -1276,8 +1276,12 @@ radeon_atom_encoder_mode_set(struct drm_encoder *encoder, | |||
1276 | case ENCODER_OBJECT_ID_INTERNAL_DAC2: | 1276 | case ENCODER_OBJECT_ID_INTERNAL_DAC2: |
1277 | case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2: | 1277 | case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2: |
1278 | atombios_dac_setup(encoder, ATOM_ENABLE); | 1278 | atombios_dac_setup(encoder, ATOM_ENABLE); |
1279 | if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT | ATOM_DEVICE_CV_SUPPORT)) | 1279 | if (radeon_encoder->devices & (ATOM_DEVICE_TV_SUPPORT | ATOM_DEVICE_CV_SUPPORT)) { |
1280 | atombios_tv_setup(encoder, ATOM_ENABLE); | 1280 | if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT | ATOM_DEVICE_CV_SUPPORT)) |
1281 | atombios_tv_setup(encoder, ATOM_ENABLE); | ||
1282 | else | ||
1283 | atombios_tv_setup(encoder, ATOM_DISABLE); | ||
1284 | } | ||
1281 | break; | 1285 | break; |
1282 | } | 1286 | } |
1283 | atombios_apply_encoder_quirks(encoder, adjusted_mode); | 1287 | atombios_apply_encoder_quirks(encoder, adjusted_mode); |
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_tv.c b/drivers/gpu/drm/radeon/radeon_legacy_tv.c index 417684daef4c..f2ed27c8055b 100644 --- a/drivers/gpu/drm/radeon/radeon_legacy_tv.c +++ b/drivers/gpu/drm/radeon/radeon_legacy_tv.c | |||
@@ -57,6 +57,10 @@ | |||
57 | #define NTSC_TV_PLL_N_14 693 | 57 | #define NTSC_TV_PLL_N_14 693 |
58 | #define NTSC_TV_PLL_P_14 7 | 58 | #define NTSC_TV_PLL_P_14 7 |
59 | 59 | ||
60 | #define PAL_TV_PLL_M_14 19 | ||
61 | #define PAL_TV_PLL_N_14 353 | ||
62 | #define PAL_TV_PLL_P_14 5 | ||
63 | |||
60 | #define VERT_LEAD_IN_LINES 2 | 64 | #define VERT_LEAD_IN_LINES 2 |
61 | #define FRAC_BITS 0xe | 65 | #define FRAC_BITS 0xe |
62 | #define FRAC_MASK 0x3fff | 66 | #define FRAC_MASK 0x3fff |
@@ -205,9 +209,24 @@ static const struct radeon_tv_mode_constants available_tv_modes[] = { | |||
205 | 630627, /* defRestart */ | 209 | 630627, /* defRestart */ |
206 | 347, /* crtcPLL_N */ | 210 | 347, /* crtcPLL_N */ |
207 | 14, /* crtcPLL_M */ | 211 | 14, /* crtcPLL_M */ |
208 | 8, /* crtcPLL_postDiv */ | 212 | 8, /* crtcPLL_postDiv */ |
209 | 1022, /* pixToTV */ | 213 | 1022, /* pixToTV */ |
210 | }, | 214 | }, |
215 | { /* PAL timing for 14 Mhz ref clk */ | ||
216 | 800, /* horResolution */ | ||
217 | 600, /* verResolution */ | ||
218 | TV_STD_PAL, /* standard */ | ||
219 | 1131, /* horTotal */ | ||
220 | 742, /* verTotal */ | ||
221 | 813, /* horStart */ | ||
222 | 840, /* horSyncStart */ | ||
223 | 633, /* verSyncStart */ | ||
224 | 708369, /* defRestart */ | ||
225 | 211, /* crtcPLL_N */ | ||
226 | 9, /* crtcPLL_M */ | ||
227 | 8, /* crtcPLL_postDiv */ | ||
228 | 759, /* pixToTV */ | ||
229 | }, | ||
211 | }; | 230 | }; |
212 | 231 | ||
213 | #define N_AVAILABLE_MODES ARRAY_SIZE(available_tv_modes) | 232 | #define N_AVAILABLE_MODES ARRAY_SIZE(available_tv_modes) |
@@ -242,7 +261,7 @@ static const struct radeon_tv_mode_constants *radeon_legacy_tv_get_std_mode(stru | |||
242 | if (pll->reference_freq == 2700) | 261 | if (pll->reference_freq == 2700) |
243 | const_ptr = &available_tv_modes[1]; | 262 | const_ptr = &available_tv_modes[1]; |
244 | else | 263 | else |
245 | const_ptr = &available_tv_modes[1]; /* FIX ME */ | 264 | const_ptr = &available_tv_modes[3]; |
246 | } | 265 | } |
247 | return const_ptr; | 266 | return const_ptr; |
248 | } | 267 | } |
@@ -685,9 +704,9 @@ void radeon_legacy_tv_mode_set(struct drm_encoder *encoder, | |||
685 | n = PAL_TV_PLL_N_27; | 704 | n = PAL_TV_PLL_N_27; |
686 | p = PAL_TV_PLL_P_27; | 705 | p = PAL_TV_PLL_P_27; |
687 | } else { | 706 | } else { |
688 | m = PAL_TV_PLL_M_27; | 707 | m = PAL_TV_PLL_M_14; |
689 | n = PAL_TV_PLL_N_27; | 708 | n = PAL_TV_PLL_N_14; |
690 | p = PAL_TV_PLL_P_27; | 709 | p = PAL_TV_PLL_P_14; |
691 | } | 710 | } |
692 | } | 711 | } |
693 | 712 | ||
diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c index c3818562a13e..a27c09f33f86 100644 --- a/drivers/gpu/drm/radeon/rs600.c +++ b/drivers/gpu/drm/radeon/rs600.c | |||
@@ -175,7 +175,7 @@ void rs600_gart_tlb_flush(struct radeon_device *rdev) | |||
175 | WREG32_MC(R_000100_MC_PT0_CNTL, tmp); | 175 | WREG32_MC(R_000100_MC_PT0_CNTL, tmp); |
176 | 176 | ||
177 | tmp = RREG32_MC(R_000100_MC_PT0_CNTL); | 177 | tmp = RREG32_MC(R_000100_MC_PT0_CNTL); |
178 | tmp |= S_000100_INVALIDATE_ALL_L1_TLBS(1) & S_000100_INVALIDATE_L2_CACHE(1); | 178 | tmp |= S_000100_INVALIDATE_ALL_L1_TLBS(1) | S_000100_INVALIDATE_L2_CACHE(1); |
179 | WREG32_MC(R_000100_MC_PT0_CNTL, tmp); | 179 | WREG32_MC(R_000100_MC_PT0_CNTL, tmp); |
180 | 180 | ||
181 | tmp = RREG32_MC(R_000100_MC_PT0_CNTL); | 181 | tmp = RREG32_MC(R_000100_MC_PT0_CNTL); |
diff --git a/drivers/hid/hid-gyration.c b/drivers/hid/hid-gyration.c index cab13e8c7d29..62416e6baeca 100644 --- a/drivers/hid/hid-gyration.c +++ b/drivers/hid/hid-gyration.c | |||
@@ -53,10 +53,13 @@ static int gyration_input_mapping(struct hid_device *hdev, struct hid_input *hi, | |||
53 | static int gyration_event(struct hid_device *hdev, struct hid_field *field, | 53 | static int gyration_event(struct hid_device *hdev, struct hid_field *field, |
54 | struct hid_usage *usage, __s32 value) | 54 | struct hid_usage *usage, __s32 value) |
55 | { | 55 | { |
56 | struct input_dev *input = field->hidinput->input; | 56 | |
57 | if (!(hdev->claimed & HID_CLAIMED_INPUT) || !field->hidinput) | ||
58 | return 0; | ||
57 | 59 | ||
58 | if ((usage->hid & HID_USAGE_PAGE) == HID_UP_GENDESK && | 60 | if ((usage->hid & HID_USAGE_PAGE) == HID_UP_GENDESK && |
59 | (usage->hid & 0xff) == 0x82) { | 61 | (usage->hid & 0xff) == 0x82) { |
62 | struct input_dev *input = field->hidinput->input; | ||
60 | input_event(input, usage->type, usage->code, 1); | 63 | input_event(input, usage->type, usage->code, 1); |
61 | input_sync(input); | 64 | input_sync(input); |
62 | input_event(input, usage->type, usage->code, 0); | 65 | input_event(input, usage->type, usage->code, 0); |
diff --git a/drivers/hwmon/sht15.c b/drivers/hwmon/sht15.c index 864a371f6eb9..fbc997ee67d9 100644 --- a/drivers/hwmon/sht15.c +++ b/drivers/hwmon/sht15.c | |||
@@ -302,13 +302,13 @@ error_ret: | |||
302 | **/ | 302 | **/ |
303 | static inline int sht15_calc_temp(struct sht15_data *data) | 303 | static inline int sht15_calc_temp(struct sht15_data *data) |
304 | { | 304 | { |
305 | int d1 = 0; | 305 | int d1 = temppoints[0].d1; |
306 | int i; | 306 | int i; |
307 | 307 | ||
308 | for (i = 1; i < ARRAY_SIZE(temppoints); i++) | 308 | for (i = ARRAY_SIZE(temppoints) - 1; i > 0; i--) |
309 | /* Find pointer to interpolate */ | 309 | /* Find pointer to interpolate */ |
310 | if (data->supply_uV > temppoints[i - 1].vdd) { | 310 | if (data->supply_uV > temppoints[i - 1].vdd) { |
311 | d1 = (data->supply_uV/1000 - temppoints[i - 1].vdd) | 311 | d1 = (data->supply_uV - temppoints[i - 1].vdd) |
312 | * (temppoints[i].d1 - temppoints[i - 1].d1) | 312 | * (temppoints[i].d1 - temppoints[i - 1].d1) |
313 | / (temppoints[i].vdd - temppoints[i - 1].vdd) | 313 | / (temppoints[i].vdd - temppoints[i - 1].vdd) |
314 | + temppoints[i - 1].d1; | 314 | + temppoints[i - 1].d1; |
@@ -541,7 +541,12 @@ static int __devinit sht15_probe(struct platform_device *pdev) | |||
541 | /* If a regulator is available, query what the supply voltage actually is!*/ | 541 | /* If a regulator is available, query what the supply voltage actually is!*/ |
542 | data->reg = regulator_get(data->dev, "vcc"); | 542 | data->reg = regulator_get(data->dev, "vcc"); |
543 | if (!IS_ERR(data->reg)) { | 543 | if (!IS_ERR(data->reg)) { |
544 | data->supply_uV = regulator_get_voltage(data->reg); | 544 | int voltage; |
545 | |||
546 | voltage = regulator_get_voltage(data->reg); | ||
547 | if (voltage) | ||
548 | data->supply_uV = voltage; | ||
549 | |||
545 | regulator_enable(data->reg); | 550 | regulator_enable(data->reg); |
546 | /* setup a notifier block to update this if another device | 551 | /* setup a notifier block to update this if another device |
547 | * causes the voltage to change */ | 552 | * causes the voltage to change */ |
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig index 5f318ce29770..cb9f95cd51b1 100644 --- a/drivers/i2c/busses/Kconfig +++ b/drivers/i2c/busses/Kconfig | |||
@@ -77,7 +77,7 @@ config I2C_AMD8111 | |||
77 | will be called i2c-amd8111. | 77 | will be called i2c-amd8111. |
78 | 78 | ||
79 | config I2C_I801 | 79 | config I2C_I801 |
80 | tristate "Intel 82801 (ICH)" | 80 | tristate "Intel 82801 (ICH/PCH)" |
81 | depends on PCI | 81 | depends on PCI |
82 | help | 82 | help |
83 | If you say yes to this option, support will be included for the Intel | 83 | If you say yes to this option, support will be included for the Intel |
@@ -97,7 +97,8 @@ config I2C_I801 | |||
97 | ICH9 | 97 | ICH9 |
98 | Tolapai | 98 | Tolapai |
99 | ICH10 | 99 | ICH10 |
100 | PCH | 100 | 3400/5 Series (PCH) |
101 | Cougar Point (PCH) | ||
101 | 102 | ||
102 | This driver can also be built as a module. If so, the module | 103 | This driver can also be built as a module. If so, the module |
103 | will be called i2c-i801. | 104 | will be called i2c-i801. |
diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c index 5574be2ae6f9..e361da73b986 100644 --- a/drivers/i2c/busses/i2c-i801.c +++ b/drivers/i2c/busses/i2c-i801.c | |||
@@ -41,7 +41,8 @@ | |||
41 | Tolapai 0x5032 32 hard yes yes yes | 41 | Tolapai 0x5032 32 hard yes yes yes |
42 | ICH10 0x3a30 32 hard yes yes yes | 42 | ICH10 0x3a30 32 hard yes yes yes |
43 | ICH10 0x3a60 32 hard yes yes yes | 43 | ICH10 0x3a60 32 hard yes yes yes |
44 | PCH 0x3b30 32 hard yes yes yes | 44 | 3400/5 Series (PCH) 0x3b30 32 hard yes yes yes |
45 | Cougar Point (PCH) 0x1c22 32 hard yes yes yes | ||
45 | 46 | ||
46 | Features supported by this driver: | 47 | Features supported by this driver: |
47 | Software PEC no | 48 | Software PEC no |
@@ -580,6 +581,7 @@ static struct pci_device_id i801_ids[] = { | |||
580 | { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH10_4) }, | 581 | { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH10_4) }, |
581 | { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH10_5) }, | 582 | { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH10_5) }, |
582 | { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PCH_SMBUS) }, | 583 | { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PCH_SMBUS) }, |
584 | { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CPT_SMBUS) }, | ||
583 | { 0, } | 585 | { 0, } |
584 | }; | 586 | }; |
585 | 587 | ||
@@ -709,6 +711,7 @@ static int __devinit i801_probe(struct pci_dev *dev, const struct pci_device_id | |||
709 | case PCI_DEVICE_ID_INTEL_ICH10_4: | 711 | case PCI_DEVICE_ID_INTEL_ICH10_4: |
710 | case PCI_DEVICE_ID_INTEL_ICH10_5: | 712 | case PCI_DEVICE_ID_INTEL_ICH10_5: |
711 | case PCI_DEVICE_ID_INTEL_PCH_SMBUS: | 713 | case PCI_DEVICE_ID_INTEL_PCH_SMBUS: |
714 | case PCI_DEVICE_ID_INTEL_CPT_SMBUS: | ||
712 | i801_features |= FEATURE_I2C_BLOCK_READ; | 715 | i801_features |= FEATURE_I2C_BLOCK_READ; |
713 | /* fall through */ | 716 | /* fall through */ |
714 | case PCI_DEVICE_ID_INTEL_82801DB_3: | 717 | case PCI_DEVICE_ID_INTEL_82801DB_3: |
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c index 30bdf427ee6d..f8302c267743 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c | |||
@@ -752,6 +752,8 @@ void ipoib_cm_send(struct net_device *dev, struct sk_buff *skb, struct ipoib_cm_ | |||
752 | if (++priv->tx_outstanding == ipoib_sendq_size) { | 752 | if (++priv->tx_outstanding == ipoib_sendq_size) { |
753 | ipoib_dbg(priv, "TX ring 0x%x full, stopping kernel net queue\n", | 753 | ipoib_dbg(priv, "TX ring 0x%x full, stopping kernel net queue\n", |
754 | tx->qp->qp_num); | 754 | tx->qp->qp_num); |
755 | if (ib_req_notify_cq(priv->send_cq, IB_CQ_NEXT_COMP)) | ||
756 | ipoib_warn(priv, "request notify on send CQ failed\n"); | ||
755 | netif_stop_queue(dev); | 757 | netif_stop_queue(dev); |
756 | } | 758 | } |
757 | } | 759 | } |
diff --git a/drivers/input/sparse-keymap.c b/drivers/input/sparse-keymap.c index fbd3987af57f..e8d65b372034 100644 --- a/drivers/input/sparse-keymap.c +++ b/drivers/input/sparse-keymap.c | |||
@@ -161,7 +161,7 @@ int sparse_keymap_setup(struct input_dev *dev, | |||
161 | return 0; | 161 | return 0; |
162 | 162 | ||
163 | err_out: | 163 | err_out: |
164 | kfree(keymap); | 164 | kfree(map); |
165 | return error; | 165 | return error; |
166 | 166 | ||
167 | } | 167 | } |
diff --git a/drivers/input/tablet/wacom_sys.c b/drivers/input/tablet/wacom_sys.c index 072f33b3b2b0..e53ddc5d0fa1 100644 --- a/drivers/input/tablet/wacom_sys.c +++ b/drivers/input/tablet/wacom_sys.c | |||
@@ -644,13 +644,15 @@ static int wacom_resume(struct usb_interface *intf) | |||
644 | int rv; | 644 | int rv; |
645 | 645 | ||
646 | mutex_lock(&wacom->lock); | 646 | mutex_lock(&wacom->lock); |
647 | if (wacom->open) { | 647 | |
648 | /* switch to wacom mode first */ | ||
649 | wacom_query_tablet_data(intf, features); | ||
650 | |||
651 | if (wacom->open) | ||
648 | rv = usb_submit_urb(wacom->irq, GFP_NOIO); | 652 | rv = usb_submit_urb(wacom->irq, GFP_NOIO); |
649 | /* switch to wacom mode if needed */ | 653 | else |
650 | if (!wacom_retrieve_hid_descriptor(intf, features)) | ||
651 | wacom_query_tablet_data(intf, features); | ||
652 | } else | ||
653 | rv = 0; | 654 | rv = 0; |
655 | |||
654 | mutex_unlock(&wacom->lock); | 656 | mutex_unlock(&wacom->lock); |
655 | 657 | ||
656 | return rv; | 658 | return rv; |
diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c index e3cf5686d0aa..d7500e1c26f2 100644 --- a/drivers/md/dm-ioctl.c +++ b/drivers/md/dm-ioctl.c | |||
@@ -285,7 +285,8 @@ retry: | |||
285 | up_write(&_hash_lock); | 285 | up_write(&_hash_lock); |
286 | } | 286 | } |
287 | 287 | ||
288 | static int dm_hash_rename(uint32_t cookie, const char *old, const char *new) | 288 | static int dm_hash_rename(uint32_t cookie, uint32_t *flags, const char *old, |
289 | const char *new) | ||
289 | { | 290 | { |
290 | char *new_name, *old_name; | 291 | char *new_name, *old_name; |
291 | struct hash_cell *hc; | 292 | struct hash_cell *hc; |
@@ -344,7 +345,8 @@ static int dm_hash_rename(uint32_t cookie, const char *old, const char *new) | |||
344 | dm_table_put(table); | 345 | dm_table_put(table); |
345 | } | 346 | } |
346 | 347 | ||
347 | dm_kobject_uevent(hc->md, KOBJ_CHANGE, cookie); | 348 | if (!dm_kobject_uevent(hc->md, KOBJ_CHANGE, cookie)) |
349 | *flags |= DM_UEVENT_GENERATED_FLAG; | ||
348 | 350 | ||
349 | dm_put(hc->md); | 351 | dm_put(hc->md); |
350 | up_write(&_hash_lock); | 352 | up_write(&_hash_lock); |
@@ -736,10 +738,10 @@ static int dev_remove(struct dm_ioctl *param, size_t param_size) | |||
736 | __hash_remove(hc); | 738 | __hash_remove(hc); |
737 | up_write(&_hash_lock); | 739 | up_write(&_hash_lock); |
738 | 740 | ||
739 | dm_kobject_uevent(md, KOBJ_REMOVE, param->event_nr); | 741 | if (!dm_kobject_uevent(md, KOBJ_REMOVE, param->event_nr)) |
742 | param->flags |= DM_UEVENT_GENERATED_FLAG; | ||
740 | 743 | ||
741 | dm_put(md); | 744 | dm_put(md); |
742 | param->data_size = 0; | ||
743 | return 0; | 745 | return 0; |
744 | } | 746 | } |
745 | 747 | ||
@@ -773,7 +775,9 @@ static int dev_rename(struct dm_ioctl *param, size_t param_size) | |||
773 | return r; | 775 | return r; |
774 | 776 | ||
775 | param->data_size = 0; | 777 | param->data_size = 0; |
776 | return dm_hash_rename(param->event_nr, param->name, new_name); | 778 | |
779 | return dm_hash_rename(param->event_nr, ¶m->flags, param->name, | ||
780 | new_name); | ||
777 | } | 781 | } |
778 | 782 | ||
779 | static int dev_set_geometry(struct dm_ioctl *param, size_t param_size) | 783 | static int dev_set_geometry(struct dm_ioctl *param, size_t param_size) |
@@ -899,8 +903,8 @@ static int do_resume(struct dm_ioctl *param) | |||
899 | 903 | ||
900 | if (dm_suspended_md(md)) { | 904 | if (dm_suspended_md(md)) { |
901 | r = dm_resume(md); | 905 | r = dm_resume(md); |
902 | if (!r) | 906 | if (!r && !dm_kobject_uevent(md, KOBJ_CHANGE, param->event_nr)) |
903 | dm_kobject_uevent(md, KOBJ_CHANGE, param->event_nr); | 907 | param->flags |= DM_UEVENT_GENERATED_FLAG; |
904 | } | 908 | } |
905 | 909 | ||
906 | if (old_map) | 910 | if (old_map) |
@@ -1477,6 +1481,7 @@ static int validate_params(uint cmd, struct dm_ioctl *param) | |||
1477 | { | 1481 | { |
1478 | /* Always clear this flag */ | 1482 | /* Always clear this flag */ |
1479 | param->flags &= ~DM_BUFFER_FULL_FLAG; | 1483 | param->flags &= ~DM_BUFFER_FULL_FLAG; |
1484 | param->flags &= ~DM_UEVENT_GENERATED_FLAG; | ||
1480 | 1485 | ||
1481 | /* Ignores parameters */ | 1486 | /* Ignores parameters */ |
1482 | if (cmd == DM_REMOVE_ALL_CMD || | 1487 | if (cmd == DM_REMOVE_ALL_CMD || |
diff --git a/drivers/md/dm.c b/drivers/md/dm.c index fa786b9d8ba7..fe8889e5e355 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c | |||
@@ -2618,18 +2618,19 @@ out: | |||
2618 | /*----------------------------------------------------------------- | 2618 | /*----------------------------------------------------------------- |
2619 | * Event notification. | 2619 | * Event notification. |
2620 | *---------------------------------------------------------------*/ | 2620 | *---------------------------------------------------------------*/ |
2621 | void dm_kobject_uevent(struct mapped_device *md, enum kobject_action action, | 2621 | int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action, |
2622 | unsigned cookie) | 2622 | unsigned cookie) |
2623 | { | 2623 | { |
2624 | char udev_cookie[DM_COOKIE_LENGTH]; | 2624 | char udev_cookie[DM_COOKIE_LENGTH]; |
2625 | char *envp[] = { udev_cookie, NULL }; | 2625 | char *envp[] = { udev_cookie, NULL }; |
2626 | 2626 | ||
2627 | if (!cookie) | 2627 | if (!cookie) |
2628 | kobject_uevent(&disk_to_dev(md->disk)->kobj, action); | 2628 | return kobject_uevent(&disk_to_dev(md->disk)->kobj, action); |
2629 | else { | 2629 | else { |
2630 | snprintf(udev_cookie, DM_COOKIE_LENGTH, "%s=%u", | 2630 | snprintf(udev_cookie, DM_COOKIE_LENGTH, "%s=%u", |
2631 | DM_COOKIE_ENV_VAR_NAME, cookie); | 2631 | DM_COOKIE_ENV_VAR_NAME, cookie); |
2632 | kobject_uevent_env(&disk_to_dev(md->disk)->kobj, action, envp); | 2632 | return kobject_uevent_env(&disk_to_dev(md->disk)->kobj, |
2633 | action, envp); | ||
2633 | } | 2634 | } |
2634 | } | 2635 | } |
2635 | 2636 | ||
diff --git a/drivers/md/dm.h b/drivers/md/dm.h index 8dadaa5bc396..bad1724d4869 100644 --- a/drivers/md/dm.h +++ b/drivers/md/dm.h | |||
@@ -125,8 +125,8 @@ void dm_stripe_exit(void); | |||
125 | int dm_open_count(struct mapped_device *md); | 125 | int dm_open_count(struct mapped_device *md); |
126 | int dm_lock_for_deletion(struct mapped_device *md); | 126 | int dm_lock_for_deletion(struct mapped_device *md); |
127 | 127 | ||
128 | void dm_kobject_uevent(struct mapped_device *md, enum kobject_action action, | 128 | int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action, |
129 | unsigned cookie); | 129 | unsigned cookie); |
130 | 130 | ||
131 | int dm_io_init(void); | 131 | int dm_io_init(void); |
132 | void dm_io_exit(void); | 132 | void dm_io_exit(void); |
diff --git a/drivers/md/linear.c b/drivers/md/linear.c index 00435bd20699..001317b50034 100644 --- a/drivers/md/linear.c +++ b/drivers/md/linear.c | |||
@@ -172,12 +172,14 @@ static linear_conf_t *linear_conf(mddev_t *mddev, int raid_disks) | |||
172 | disk_stack_limits(mddev->gendisk, rdev->bdev, | 172 | disk_stack_limits(mddev->gendisk, rdev->bdev, |
173 | rdev->data_offset << 9); | 173 | rdev->data_offset << 9); |
174 | /* as we don't honour merge_bvec_fn, we must never risk | 174 | /* as we don't honour merge_bvec_fn, we must never risk |
175 | * violating it, so limit ->max_sector to one PAGE, as | 175 | * violating it, so limit max_phys_segments to 1 lying within |
176 | * a one page request is never in violation. | 176 | * a single page. |
177 | */ | 177 | */ |
178 | if (rdev->bdev->bd_disk->queue->merge_bvec_fn && | 178 | if (rdev->bdev->bd_disk->queue->merge_bvec_fn) { |
179 | queue_max_sectors(mddev->queue) > (PAGE_SIZE>>9)) | 179 | blk_queue_max_phys_segments(mddev->queue, 1); |
180 | blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9); | 180 | blk_queue_segment_boundary(mddev->queue, |
181 | PAGE_CACHE_SIZE - 1); | ||
182 | } | ||
181 | 183 | ||
182 | conf->array_sectors += rdev->sectors; | 184 | conf->array_sectors += rdev->sectors; |
183 | cnt++; | 185 | cnt++; |
diff --git a/drivers/md/multipath.c b/drivers/md/multipath.c index 32a662fc55c9..f9ee99f9a94c 100644 --- a/drivers/md/multipath.c +++ b/drivers/md/multipath.c | |||
@@ -301,14 +301,16 @@ static int multipath_add_disk(mddev_t *mddev, mdk_rdev_t *rdev) | |||
301 | rdev->data_offset << 9); | 301 | rdev->data_offset << 9); |
302 | 302 | ||
303 | /* as we don't honour merge_bvec_fn, we must never risk | 303 | /* as we don't honour merge_bvec_fn, we must never risk |
304 | * violating it, so limit ->max_sector to one PAGE, as | 304 | * violating it, so limit ->max_phys_segments to one, lying |
305 | * a one page request is never in violation. | 305 | * within a single page. |
306 | * (Note: it is very unlikely that a device with | 306 | * (Note: it is very unlikely that a device with |
307 | * merge_bvec_fn will be involved in multipath.) | 307 | * merge_bvec_fn will be involved in multipath.) |
308 | */ | 308 | */ |
309 | if (q->merge_bvec_fn && | 309 | if (q->merge_bvec_fn) { |
310 | queue_max_sectors(q) > (PAGE_SIZE>>9)) | 310 | blk_queue_max_phys_segments(mddev->queue, 1); |
311 | blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9); | 311 | blk_queue_segment_boundary(mddev->queue, |
312 | PAGE_CACHE_SIZE - 1); | ||
313 | } | ||
312 | 314 | ||
313 | conf->working_disks++; | 315 | conf->working_disks++; |
314 | mddev->degraded--; | 316 | mddev->degraded--; |
@@ -476,9 +478,11 @@ static int multipath_run (mddev_t *mddev) | |||
476 | /* as we don't honour merge_bvec_fn, we must never risk | 478 | /* as we don't honour merge_bvec_fn, we must never risk |
477 | * violating it, not that we ever expect a device with | 479 | * violating it, not that we ever expect a device with |
478 | * a merge_bvec_fn to be involved in multipath */ | 480 | * a merge_bvec_fn to be involved in multipath */ |
479 | if (rdev->bdev->bd_disk->queue->merge_bvec_fn && | 481 | if (rdev->bdev->bd_disk->queue->merge_bvec_fn) { |
480 | queue_max_sectors(mddev->queue) > (PAGE_SIZE>>9)) | 482 | blk_queue_max_phys_segments(mddev->queue, 1); |
481 | blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9); | 483 | blk_queue_segment_boundary(mddev->queue, |
484 | PAGE_CACHE_SIZE - 1); | ||
485 | } | ||
482 | 486 | ||
483 | if (!test_bit(Faulty, &rdev->flags)) | 487 | if (!test_bit(Faulty, &rdev->flags)) |
484 | conf->working_disks++; | 488 | conf->working_disks++; |
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c index 77605cdceaf1..41ee9deed250 100644 --- a/drivers/md/raid0.c +++ b/drivers/md/raid0.c | |||
@@ -176,14 +176,15 @@ static int create_strip_zones(mddev_t *mddev) | |||
176 | disk_stack_limits(mddev->gendisk, rdev1->bdev, | 176 | disk_stack_limits(mddev->gendisk, rdev1->bdev, |
177 | rdev1->data_offset << 9); | 177 | rdev1->data_offset << 9); |
178 | /* as we don't honour merge_bvec_fn, we must never risk | 178 | /* as we don't honour merge_bvec_fn, we must never risk |
179 | * violating it, so limit ->max_sector to one PAGE, as | 179 | * violating it, so limit ->max_phys_segments to 1, lying within |
180 | * a one page request is never in violation. | 180 | * a single page. |
181 | */ | 181 | */ |
182 | 182 | ||
183 | if (rdev1->bdev->bd_disk->queue->merge_bvec_fn && | 183 | if (rdev1->bdev->bd_disk->queue->merge_bvec_fn) { |
184 | queue_max_sectors(mddev->queue) > (PAGE_SIZE>>9)) | 184 | blk_queue_max_phys_segments(mddev->queue, 1); |
185 | blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9); | 185 | blk_queue_segment_boundary(mddev->queue, |
186 | 186 | PAGE_CACHE_SIZE - 1); | |
187 | } | ||
187 | if (!smallest || (rdev1->sectors < smallest->sectors)) | 188 | if (!smallest || (rdev1->sectors < smallest->sectors)) |
188 | smallest = rdev1; | 189 | smallest = rdev1; |
189 | cnt++; | 190 | cnt++; |
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index d119b7b75e71..047c468e3d28 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c | |||
@@ -1155,13 +1155,17 @@ static int raid10_add_disk(mddev_t *mddev, mdk_rdev_t *rdev) | |||
1155 | 1155 | ||
1156 | disk_stack_limits(mddev->gendisk, rdev->bdev, | 1156 | disk_stack_limits(mddev->gendisk, rdev->bdev, |
1157 | rdev->data_offset << 9); | 1157 | rdev->data_offset << 9); |
1158 | /* as we don't honour merge_bvec_fn, we must never risk | 1158 | /* as we don't honour merge_bvec_fn, we must |
1159 | * violating it, so limit ->max_sector to one PAGE, as | 1159 | * never risk violating it, so limit |
1160 | * a one page request is never in violation. | 1160 | * ->max_phys_segments to one lying with a single |
1161 | * page, as a one page request is never in | ||
1162 | * violation. | ||
1161 | */ | 1163 | */ |
1162 | if (rdev->bdev->bd_disk->queue->merge_bvec_fn && | 1164 | if (rdev->bdev->bd_disk->queue->merge_bvec_fn) { |
1163 | queue_max_sectors(mddev->queue) > (PAGE_SIZE>>9)) | 1165 | blk_queue_max_phys_segments(mddev->queue, 1); |
1164 | blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9); | 1166 | blk_queue_segment_boundary(mddev->queue, |
1167 | PAGE_CACHE_SIZE - 1); | ||
1168 | } | ||
1165 | 1169 | ||
1166 | p->head_position = 0; | 1170 | p->head_position = 0; |
1167 | rdev->raid_disk = mirror; | 1171 | rdev->raid_disk = mirror; |
@@ -2255,12 +2259,14 @@ static int run(mddev_t *mddev) | |||
2255 | disk_stack_limits(mddev->gendisk, rdev->bdev, | 2259 | disk_stack_limits(mddev->gendisk, rdev->bdev, |
2256 | rdev->data_offset << 9); | 2260 | rdev->data_offset << 9); |
2257 | /* as we don't honour merge_bvec_fn, we must never risk | 2261 | /* as we don't honour merge_bvec_fn, we must never risk |
2258 | * violating it, so limit ->max_sector to one PAGE, as | 2262 | * violating it, so limit max_phys_segments to 1 lying |
2259 | * a one page request is never in violation. | 2263 | * within a single page. |
2260 | */ | 2264 | */ |
2261 | if (rdev->bdev->bd_disk->queue->merge_bvec_fn && | 2265 | if (rdev->bdev->bd_disk->queue->merge_bvec_fn) { |
2262 | queue_max_sectors(mddev->queue) > (PAGE_SIZE>>9)) | 2266 | blk_queue_max_phys_segments(mddev->queue, 1); |
2263 | blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9); | 2267 | blk_queue_segment_boundary(mddev->queue, |
2268 | PAGE_CACHE_SIZE - 1); | ||
2269 | } | ||
2264 | 2270 | ||
2265 | disk->head_position = 0; | 2271 | disk->head_position = 0; |
2266 | } | 2272 | } |
diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c index 57f149b75fbe..4d353d25d60b 100644 --- a/drivers/net/e1000e/netdev.c +++ b/drivers/net/e1000e/netdev.c | |||
@@ -660,6 +660,8 @@ static bool e1000_clean_tx_irq(struct e1000_adapter *adapter) | |||
660 | i = 0; | 660 | i = 0; |
661 | } | 661 | } |
662 | 662 | ||
663 | if (i == tx_ring->next_to_use) | ||
664 | break; | ||
663 | eop = tx_ring->buffer_info[i].next_to_watch; | 665 | eop = tx_ring->buffer_info[i].next_to_watch; |
664 | eop_desc = E1000_TX_DESC(*tx_ring, eop); | 666 | eop_desc = E1000_TX_DESC(*tx_ring, eop); |
665 | } | 667 | } |
diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c index 67d414b061d7..3db85daa3671 100644 --- a/drivers/net/r8169.c +++ b/drivers/net/r8169.c | |||
@@ -3255,8 +3255,8 @@ static void rtl8169_set_rxbufsize(struct rtl8169_private *tp, | |||
3255 | unsigned int max_frame = mtu + VLAN_ETH_HLEN + ETH_FCS_LEN; | 3255 | unsigned int max_frame = mtu + VLAN_ETH_HLEN + ETH_FCS_LEN; |
3256 | 3256 | ||
3257 | if (max_frame != 16383) | 3257 | if (max_frame != 16383) |
3258 | printk(KERN_WARNING "WARNING! Changing of MTU on this NIC" | 3258 | printk(KERN_WARNING PFX "WARNING! Changing of MTU on this " |
3259 | "May lead to frame reception errors!\n"); | 3259 | "NIC may lead to frame reception errors!\n"); |
3260 | 3260 | ||
3261 | tp->rx_buf_sz = (max_frame > RX_BUF_SIZE) ? max_frame : RX_BUF_SIZE; | 3261 | tp->rx_buf_sz = (max_frame > RX_BUF_SIZE) ? max_frame : RX_BUF_SIZE; |
3262 | } | 3262 | } |
diff --git a/drivers/net/wireless/ath/ar9170/usb.c b/drivers/net/wireless/ath/ar9170/usb.c index e0799d924057..0387658d1e14 100644 --- a/drivers/net/wireless/ath/ar9170/usb.c +++ b/drivers/net/wireless/ath/ar9170/usb.c | |||
@@ -414,7 +414,7 @@ static int ar9170_usb_exec_cmd(struct ar9170 *ar, enum ar9170_cmd cmd, | |||
414 | spin_unlock_irqrestore(&aru->common.cmdlock, flags); | 414 | spin_unlock_irqrestore(&aru->common.cmdlock, flags); |
415 | 415 | ||
416 | usb_fill_int_urb(urb, aru->udev, | 416 | usb_fill_int_urb(urb, aru->udev, |
417 | usb_sndbulkpipe(aru->udev, AR9170_EP_CMD), | 417 | usb_sndintpipe(aru->udev, AR9170_EP_CMD), |
418 | aru->common.cmdbuf, plen + 4, | 418 | aru->common.cmdbuf, plen + 4, |
419 | ar9170_usb_tx_urb_complete, NULL, 1); | 419 | ar9170_usb_tx_urb_complete, NULL, 1); |
420 | 420 | ||
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c index 33a10716af8b..7b1eab4d85cb 100644 --- a/drivers/net/wireless/ath/ath9k/main.c +++ b/drivers/net/wireless/ath/ath9k/main.c | |||
@@ -2721,8 +2721,7 @@ static int ath9k_config(struct ieee80211_hw *hw, u32 changed) | |||
2721 | all_wiphys_idle = ath9k_all_wiphys_idle(sc); | 2721 | all_wiphys_idle = ath9k_all_wiphys_idle(sc); |
2722 | ath9k_set_wiphy_idle(aphy, idle); | 2722 | ath9k_set_wiphy_idle(aphy, idle); |
2723 | 2723 | ||
2724 | if (!idle && all_wiphys_idle) | 2724 | enable_radio = (!idle && all_wiphys_idle); |
2725 | enable_radio = true; | ||
2726 | 2725 | ||
2727 | /* | 2726 | /* |
2728 | * After we unlock here its possible another wiphy | 2727 | * After we unlock here its possible another wiphy |
diff --git a/drivers/net/wireless/b43/Kconfig b/drivers/net/wireless/b43/Kconfig index 64c12e1bced3..0a00d42642cd 100644 --- a/drivers/net/wireless/b43/Kconfig +++ b/drivers/net/wireless/b43/Kconfig | |||
@@ -78,11 +78,11 @@ config B43_SDIO | |||
78 | 78 | ||
79 | If unsure, say N. | 79 | If unsure, say N. |
80 | 80 | ||
81 | # Data transfers to the device via PIO | 81 | #Data transfers to the device via PIO. We want it as a fallback even |
82 | # This is only needed on PCMCIA and SDIO devices. All others can do DMA properly. | 82 | # if we can do DMA. |
83 | config B43_PIO | 83 | config B43_PIO |
84 | bool | 84 | bool |
85 | depends on B43 && (B43_SDIO || B43_PCMCIA || B43_FORCE_PIO) | 85 | depends on B43 |
86 | select SSB_BLOCKIO | 86 | select SSB_BLOCKIO |
87 | default y | 87 | default y |
88 | 88 | ||
diff --git a/drivers/net/wireless/b43/Makefile b/drivers/net/wireless/b43/Makefile index 84772a2542dc..5e83b6f0a3a0 100644 --- a/drivers/net/wireless/b43/Makefile +++ b/drivers/net/wireless/b43/Makefile | |||
@@ -12,7 +12,7 @@ b43-y += xmit.o | |||
12 | b43-y += lo.o | 12 | b43-y += lo.o |
13 | b43-y += wa.o | 13 | b43-y += wa.o |
14 | b43-y += dma.o | 14 | b43-y += dma.o |
15 | b43-$(CONFIG_B43_PIO) += pio.o | 15 | b43-y += pio.o |
16 | b43-y += rfkill.o | 16 | b43-y += rfkill.o |
17 | b43-$(CONFIG_B43_LEDS) += leds.o | 17 | b43-$(CONFIG_B43_LEDS) += leds.o |
18 | b43-$(CONFIG_B43_PCMCIA) += pcmcia.o | 18 | b43-$(CONFIG_B43_PCMCIA) += pcmcia.o |
diff --git a/drivers/net/wireless/b43/b43.h b/drivers/net/wireless/b43/b43.h index c484cc253892..7df822e84da0 100644 --- a/drivers/net/wireless/b43/b43.h +++ b/drivers/net/wireless/b43/b43.h | |||
@@ -694,6 +694,7 @@ struct b43_wldev { | |||
694 | bool radio_hw_enable; /* saved state of radio hardware enabled state */ | 694 | bool radio_hw_enable; /* saved state of radio hardware enabled state */ |
695 | bool qos_enabled; /* TRUE, if QoS is used. */ | 695 | bool qos_enabled; /* TRUE, if QoS is used. */ |
696 | bool hwcrypto_enabled; /* TRUE, if HW crypto acceleration is enabled. */ | 696 | bool hwcrypto_enabled; /* TRUE, if HW crypto acceleration is enabled. */ |
697 | bool use_pio; /* TRUE if next init should use PIO */ | ||
697 | 698 | ||
698 | /* PHY/Radio device. */ | 699 | /* PHY/Radio device. */ |
699 | struct b43_phy phy; | 700 | struct b43_phy phy; |
@@ -822,11 +823,9 @@ struct b43_wl { | |||
822 | /* The device LEDs. */ | 823 | /* The device LEDs. */ |
823 | struct b43_leds leds; | 824 | struct b43_leds leds; |
824 | 825 | ||
825 | #ifdef CONFIG_B43_PIO | ||
826 | /* Kmalloc'ed scratch space for PIO TX/RX. Protected by wl->mutex. */ | 826 | /* Kmalloc'ed scratch space for PIO TX/RX. Protected by wl->mutex. */ |
827 | u8 pio_scratchspace[110] __attribute__((__aligned__(8))); | 827 | u8 pio_scratchspace[110] __attribute__((__aligned__(8))); |
828 | u8 pio_tailspace[4] __attribute__((__aligned__(8))); | 828 | u8 pio_tailspace[4] __attribute__((__aligned__(8))); |
829 | #endif /* CONFIG_B43_PIO */ | ||
830 | }; | 829 | }; |
831 | 830 | ||
832 | static inline struct b43_wl *hw_to_b43_wl(struct ieee80211_hw *hw) | 831 | static inline struct b43_wl *hw_to_b43_wl(struct ieee80211_hw *hw) |
@@ -877,20 +876,15 @@ static inline void b43_write32(struct b43_wldev *dev, u16 offset, u32 value) | |||
877 | 876 | ||
878 | static inline bool b43_using_pio_transfers(struct b43_wldev *dev) | 877 | static inline bool b43_using_pio_transfers(struct b43_wldev *dev) |
879 | { | 878 | { |
880 | #ifdef CONFIG_B43_PIO | ||
881 | return dev->__using_pio_transfers; | 879 | return dev->__using_pio_transfers; |
882 | #else | ||
883 | return 0; | ||
884 | #endif | ||
885 | } | 880 | } |
886 | 881 | ||
887 | #ifdef CONFIG_B43_FORCE_PIO | 882 | #ifdef CONFIG_B43_FORCE_PIO |
888 | # define B43_FORCE_PIO 1 | 883 | # define B43_PIO_DEFAULT 1 |
889 | #else | 884 | #else |
890 | # define B43_FORCE_PIO 0 | 885 | # define B43_PIO_DEFAULT 0 |
891 | #endif | 886 | #endif |
892 | 887 | ||
893 | |||
894 | /* Message printing */ | 888 | /* Message printing */ |
895 | void b43info(struct b43_wl *wl, const char *fmt, ...) | 889 | void b43info(struct b43_wl *wl, const char *fmt, ...) |
896 | __attribute__ ((format(printf, 2, 3))); | 890 | __attribute__ ((format(printf, 2, 3))); |
diff --git a/drivers/net/wireless/b43/dma.c b/drivers/net/wireless/b43/dma.c index 88d1fd02d40a..615af22c49fd 100644 --- a/drivers/net/wireless/b43/dma.c +++ b/drivers/net/wireless/b43/dma.c | |||
@@ -1653,7 +1653,6 @@ void b43_dma_tx_resume(struct b43_wldev *dev) | |||
1653 | b43_power_saving_ctl_bits(dev, 0); | 1653 | b43_power_saving_ctl_bits(dev, 0); |
1654 | } | 1654 | } |
1655 | 1655 | ||
1656 | #ifdef CONFIG_B43_PIO | ||
1657 | static void direct_fifo_rx(struct b43_wldev *dev, enum b43_dmatype type, | 1656 | static void direct_fifo_rx(struct b43_wldev *dev, enum b43_dmatype type, |
1658 | u16 mmio_base, bool enable) | 1657 | u16 mmio_base, bool enable) |
1659 | { | 1658 | { |
@@ -1687,4 +1686,3 @@ void b43_dma_direct_fifo_rx(struct b43_wldev *dev, | |||
1687 | mmio_base = b43_dmacontroller_base(type, engine_index); | 1686 | mmio_base = b43_dmacontroller_base(type, engine_index); |
1688 | direct_fifo_rx(dev, type, mmio_base, enable); | 1687 | direct_fifo_rx(dev, type, mmio_base, enable); |
1689 | } | 1688 | } |
1690 | #endif /* CONFIG_B43_PIO */ | ||
diff --git a/drivers/net/wireless/b43/main.c b/drivers/net/wireless/b43/main.c index 629c166cc512..9eb4f5ead6ff 100644 --- a/drivers/net/wireless/b43/main.c +++ b/drivers/net/wireless/b43/main.c | |||
@@ -102,6 +102,9 @@ int b43_modparam_verbose = B43_VERBOSITY_DEFAULT; | |||
102 | module_param_named(verbose, b43_modparam_verbose, int, 0644); | 102 | module_param_named(verbose, b43_modparam_verbose, int, 0644); |
103 | MODULE_PARM_DESC(verbose, "Log message verbosity: 0=error, 1=warn, 2=info(default), 3=debug"); | 103 | MODULE_PARM_DESC(verbose, "Log message verbosity: 0=error, 1=warn, 2=info(default), 3=debug"); |
104 | 104 | ||
105 | int b43_modparam_pio = B43_PIO_DEFAULT; | ||
106 | module_param_named(pio, b43_modparam_pio, int, 0644); | ||
107 | MODULE_PARM_DESC(pio, "Use PIO accesses by default: 0=DMA, 1=PIO"); | ||
105 | 108 | ||
106 | static const struct ssb_device_id b43_ssb_tbl[] = { | 109 | static const struct ssb_device_id b43_ssb_tbl[] = { |
107 | SSB_DEVICE(SSB_VENDOR_BROADCOM, SSB_DEV_80211, 5), | 110 | SSB_DEVICE(SSB_VENDOR_BROADCOM, SSB_DEV_80211, 5), |
@@ -1790,8 +1793,9 @@ static void b43_do_interrupt_thread(struct b43_wldev *dev) | |||
1790 | dma_reason[4], dma_reason[5]); | 1793 | dma_reason[4], dma_reason[5]); |
1791 | b43err(dev->wl, "This device does not support DMA " | 1794 | b43err(dev->wl, "This device does not support DMA " |
1792 | "on your system. Please use PIO instead.\n"); | 1795 | "on your system. Please use PIO instead.\n"); |
1793 | b43err(dev->wl, "CONFIG_B43_FORCE_PIO must be set in " | 1796 | /* Fall back to PIO transfers if we get fatal DMA errors! */ |
1794 | "your kernel configuration.\n"); | 1797 | dev->use_pio = 1; |
1798 | b43_controller_restart(dev, "DMA error"); | ||
1795 | return; | 1799 | return; |
1796 | } | 1800 | } |
1797 | if (merged_dma_reason & B43_DMAIRQ_NONFATALMASK) { | 1801 | if (merged_dma_reason & B43_DMAIRQ_NONFATALMASK) { |
@@ -4358,7 +4362,7 @@ static int b43_wireless_core_init(struct b43_wldev *dev) | |||
4358 | 4362 | ||
4359 | if ((dev->dev->bus->bustype == SSB_BUSTYPE_PCMCIA) || | 4363 | if ((dev->dev->bus->bustype == SSB_BUSTYPE_PCMCIA) || |
4360 | (dev->dev->bus->bustype == SSB_BUSTYPE_SDIO) || | 4364 | (dev->dev->bus->bustype == SSB_BUSTYPE_SDIO) || |
4361 | B43_FORCE_PIO) { | 4365 | dev->use_pio) { |
4362 | dev->__using_pio_transfers = 1; | 4366 | dev->__using_pio_transfers = 1; |
4363 | err = b43_pio_init(dev); | 4367 | err = b43_pio_init(dev); |
4364 | } else { | 4368 | } else { |
@@ -4826,6 +4830,7 @@ static int b43_one_core_attach(struct ssb_device *dev, struct b43_wl *wl) | |||
4826 | if (!wldev) | 4830 | if (!wldev) |
4827 | goto out; | 4831 | goto out; |
4828 | 4832 | ||
4833 | wldev->use_pio = b43_modparam_pio; | ||
4829 | wldev->dev = dev; | 4834 | wldev->dev = dev; |
4830 | wldev->wl = wl; | 4835 | wldev->wl = wl; |
4831 | b43_set_status(wldev, B43_STAT_UNINIT); | 4836 | b43_set_status(wldev, B43_STAT_UNINIT); |
diff --git a/drivers/net/wireless/b43/pio.h b/drivers/net/wireless/b43/pio.h index 7dd649c9ddad..7b3c42f93a16 100644 --- a/drivers/net/wireless/b43/pio.h +++ b/drivers/net/wireless/b43/pio.h | |||
@@ -55,8 +55,6 @@ | |||
55 | #define B43_PIO_MAX_NR_TXPACKETS 32 | 55 | #define B43_PIO_MAX_NR_TXPACKETS 32 |
56 | 56 | ||
57 | 57 | ||
58 | #ifdef CONFIG_B43_PIO | ||
59 | |||
60 | struct b43_pio_txpacket { | 58 | struct b43_pio_txpacket { |
61 | /* Pointer to the TX queue we belong to. */ | 59 | /* Pointer to the TX queue we belong to. */ |
62 | struct b43_pio_txqueue *queue; | 60 | struct b43_pio_txqueue *queue; |
@@ -169,42 +167,4 @@ void b43_pio_rx(struct b43_pio_rxqueue *q); | |||
169 | void b43_pio_tx_suspend(struct b43_wldev *dev); | 167 | void b43_pio_tx_suspend(struct b43_wldev *dev); |
170 | void b43_pio_tx_resume(struct b43_wldev *dev); | 168 | void b43_pio_tx_resume(struct b43_wldev *dev); |
171 | 169 | ||
172 | |||
173 | #else /* CONFIG_B43_PIO */ | ||
174 | |||
175 | |||
176 | static inline int b43_pio_init(struct b43_wldev *dev) | ||
177 | { | ||
178 | return 0; | ||
179 | } | ||
180 | static inline void b43_pio_free(struct b43_wldev *dev) | ||
181 | { | ||
182 | } | ||
183 | static inline void b43_pio_stop(struct b43_wldev *dev) | ||
184 | { | ||
185 | } | ||
186 | static inline int b43_pio_tx(struct b43_wldev *dev, | ||
187 | struct sk_buff *skb) | ||
188 | { | ||
189 | return 0; | ||
190 | } | ||
191 | static inline void b43_pio_handle_txstatus(struct b43_wldev *dev, | ||
192 | const struct b43_txstatus *status) | ||
193 | { | ||
194 | } | ||
195 | static inline void b43_pio_get_tx_stats(struct b43_wldev *dev, | ||
196 | struct ieee80211_tx_queue_stats *stats) | ||
197 | { | ||
198 | } | ||
199 | static inline void b43_pio_rx(struct b43_pio_rxqueue *q) | ||
200 | { | ||
201 | } | ||
202 | static inline void b43_pio_tx_suspend(struct b43_wldev *dev) | ||
203 | { | ||
204 | } | ||
205 | static inline void b43_pio_tx_resume(struct b43_wldev *dev) | ||
206 | { | ||
207 | } | ||
208 | |||
209 | #endif /* CONFIG_B43_PIO */ | ||
210 | #endif /* B43_PIO_H_ */ | 170 | #endif /* B43_PIO_H_ */ |
diff --git a/drivers/net/wireless/iwlwifi/iwl-4965.c b/drivers/net/wireless/iwlwifi/iwl-4965.c index 31462813bac0..3b4c5a4610af 100644 --- a/drivers/net/wireless/iwlwifi/iwl-4965.c +++ b/drivers/net/wireless/iwlwifi/iwl-4965.c | |||
@@ -581,6 +581,8 @@ static int iwl4965_alive_notify(struct iwl_priv *priv) | |||
581 | 581 | ||
582 | iwl4965_set_wr_ptrs(priv, IWL_CMD_QUEUE_NUM, 0); | 582 | iwl4965_set_wr_ptrs(priv, IWL_CMD_QUEUE_NUM, 0); |
583 | 583 | ||
584 | /* reset to 0 to enable all the queue first */ | ||
585 | priv->txq_ctx_active_msk = 0; | ||
584 | /* Map each Tx/cmd queue to its corresponding fifo */ | 586 | /* Map each Tx/cmd queue to its corresponding fifo */ |
585 | for (i = 0; i < ARRAY_SIZE(default_queue_to_tx_fifo); i++) { | 587 | for (i = 0; i < ARRAY_SIZE(default_queue_to_tx_fifo); i++) { |
586 | int ac = default_queue_to_tx_fifo[i]; | 588 | int ac = default_queue_to_tx_fifo[i]; |
@@ -2008,7 +2010,9 @@ static void iwl4965_rx_reply_tx(struct iwl_priv *priv, | |||
2008 | IWL_DEBUG_TX_REPLY(priv, "Retry scheduler reclaim scd_ssn " | 2010 | IWL_DEBUG_TX_REPLY(priv, "Retry scheduler reclaim scd_ssn " |
2009 | "%d index %d\n", scd_ssn , index); | 2011 | "%d index %d\n", scd_ssn , index); |
2010 | freed = iwl_tx_queue_reclaim(priv, txq_id, index); | 2012 | freed = iwl_tx_queue_reclaim(priv, txq_id, index); |
2011 | iwl_free_tfds_in_queue(priv, sta_id, tid, freed); | 2013 | if (qc) |
2014 | iwl_free_tfds_in_queue(priv, sta_id, | ||
2015 | tid, freed); | ||
2012 | 2016 | ||
2013 | if (priv->mac80211_registered && | 2017 | if (priv->mac80211_registered && |
2014 | (iwl_queue_space(&txq->q) > txq->q.low_mark) && | 2018 | (iwl_queue_space(&txq->q) > txq->q.low_mark) && |
@@ -2035,13 +2039,14 @@ static void iwl4965_rx_reply_tx(struct iwl_priv *priv, | |||
2035 | 2039 | ||
2036 | freed = iwl_tx_queue_reclaim(priv, txq_id, index); | 2040 | freed = iwl_tx_queue_reclaim(priv, txq_id, index); |
2037 | if (qc && likely(sta_id != IWL_INVALID_STATION)) | 2041 | if (qc && likely(sta_id != IWL_INVALID_STATION)) |
2038 | priv->stations[sta_id].tid[tid].tfds_in_queue -= freed; | 2042 | iwl_free_tfds_in_queue(priv, sta_id, tid, freed); |
2043 | else if (sta_id == IWL_INVALID_STATION) | ||
2044 | IWL_DEBUG_TX_REPLY(priv, "Station not known\n"); | ||
2039 | 2045 | ||
2040 | if (priv->mac80211_registered && | 2046 | if (priv->mac80211_registered && |
2041 | (iwl_queue_space(&txq->q) > txq->q.low_mark)) | 2047 | (iwl_queue_space(&txq->q) > txq->q.low_mark)) |
2042 | iwl_wake_queue(priv, txq_id); | 2048 | iwl_wake_queue(priv, txq_id); |
2043 | } | 2049 | } |
2044 | |||
2045 | if (qc && likely(sta_id != IWL_INVALID_STATION)) | 2050 | if (qc && likely(sta_id != IWL_INVALID_STATION)) |
2046 | iwl_txq_check_empty(priv, sta_id, tid, txq_id); | 2051 | iwl_txq_check_empty(priv, sta_id, tid, txq_id); |
2047 | 2052 | ||
diff --git a/drivers/net/wireless/iwlwifi/iwl-5000.c b/drivers/net/wireless/iwlwifi/iwl-5000.c index cffaae772d51..c610e5fbd718 100644 --- a/drivers/net/wireless/iwlwifi/iwl-5000.c +++ b/drivers/net/wireless/iwlwifi/iwl-5000.c | |||
@@ -657,6 +657,8 @@ int iwl5000_alive_notify(struct iwl_priv *priv) | |||
657 | 657 | ||
658 | iwl5000_set_wr_ptrs(priv, IWL_CMD_QUEUE_NUM, 0); | 658 | iwl5000_set_wr_ptrs(priv, IWL_CMD_QUEUE_NUM, 0); |
659 | 659 | ||
660 | /* reset to 0 to enable all the queue first */ | ||
661 | priv->txq_ctx_active_msk = 0; | ||
660 | /* map qos queues to fifos one-to-one */ | 662 | /* map qos queues to fifos one-to-one */ |
661 | for (i = 0; i < ARRAY_SIZE(iwl5000_default_queue_to_tx_fifo); i++) { | 663 | for (i = 0; i < ARRAY_SIZE(iwl5000_default_queue_to_tx_fifo); i++) { |
662 | int ac = iwl5000_default_queue_to_tx_fifo[i]; | 664 | int ac = iwl5000_default_queue_to_tx_fifo[i]; |
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.c b/drivers/net/wireless/iwlwifi/iwl-agn.c index 1c9866daf815..5622a55a939e 100644 --- a/drivers/net/wireless/iwlwifi/iwl-agn.c +++ b/drivers/net/wireless/iwlwifi/iwl-agn.c | |||
@@ -2461,7 +2461,7 @@ static int iwl_setup_mac(struct iwl_priv *priv) | |||
2461 | BIT(NL80211_IFTYPE_STATION) | | 2461 | BIT(NL80211_IFTYPE_STATION) | |
2462 | BIT(NL80211_IFTYPE_ADHOC); | 2462 | BIT(NL80211_IFTYPE_ADHOC); |
2463 | 2463 | ||
2464 | hw->wiphy->flags |= WIPHY_FLAG_STRICT_REGULATORY | | 2464 | hw->wiphy->flags |= WIPHY_FLAG_CUSTOM_REGULATORY | |
2465 | WIPHY_FLAG_DISABLE_BEACON_HINTS; | 2465 | WIPHY_FLAG_DISABLE_BEACON_HINTS; |
2466 | 2466 | ||
2467 | /* | 2467 | /* |
diff --git a/drivers/net/wireless/iwlwifi/iwl-scan.c b/drivers/net/wireless/iwlwifi/iwl-scan.c index fa1c89ba6459..8f1b8509cd66 100644 --- a/drivers/net/wireless/iwlwifi/iwl-scan.c +++ b/drivers/net/wireless/iwlwifi/iwl-scan.c | |||
@@ -404,21 +404,6 @@ EXPORT_SYMBOL(iwl_init_scan_params); | |||
404 | 404 | ||
405 | static int iwl_scan_initiate(struct iwl_priv *priv) | 405 | static int iwl_scan_initiate(struct iwl_priv *priv) |
406 | { | 406 | { |
407 | if (!iwl_is_ready_rf(priv)) { | ||
408 | IWL_DEBUG_SCAN(priv, "Aborting scan due to not ready.\n"); | ||
409 | return -EIO; | ||
410 | } | ||
411 | |||
412 | if (test_bit(STATUS_SCANNING, &priv->status)) { | ||
413 | IWL_DEBUG_SCAN(priv, "Scan already in progress.\n"); | ||
414 | return -EAGAIN; | ||
415 | } | ||
416 | |||
417 | if (test_bit(STATUS_SCAN_ABORTING, &priv->status)) { | ||
418 | IWL_DEBUG_SCAN(priv, "Scan request while abort pending\n"); | ||
419 | return -EAGAIN; | ||
420 | } | ||
421 | |||
422 | IWL_DEBUG_INFO(priv, "Starting scan...\n"); | 407 | IWL_DEBUG_INFO(priv, "Starting scan...\n"); |
423 | set_bit(STATUS_SCANNING, &priv->status); | 408 | set_bit(STATUS_SCANNING, &priv->status); |
424 | priv->scan_start = jiffies; | 409 | priv->scan_start = jiffies; |
@@ -449,6 +434,18 @@ int iwl_mac_hw_scan(struct ieee80211_hw *hw, | |||
449 | goto out_unlock; | 434 | goto out_unlock; |
450 | } | 435 | } |
451 | 436 | ||
437 | if (test_bit(STATUS_SCANNING, &priv->status)) { | ||
438 | IWL_DEBUG_SCAN(priv, "Scan already in progress.\n"); | ||
439 | ret = -EAGAIN; | ||
440 | goto out_unlock; | ||
441 | } | ||
442 | |||
443 | if (test_bit(STATUS_SCAN_ABORTING, &priv->status)) { | ||
444 | IWL_DEBUG_SCAN(priv, "Scan request while abort pending\n"); | ||
445 | ret = -EAGAIN; | ||
446 | goto out_unlock; | ||
447 | } | ||
448 | |||
452 | /* We don't schedule scan within next_scan_jiffies period. | 449 | /* We don't schedule scan within next_scan_jiffies period. |
453 | * Avoid scanning during possible EAPOL exchange, return | 450 | * Avoid scanning during possible EAPOL exchange, return |
454 | * success immediately. | 451 | * success immediately. |
diff --git a/drivers/net/wireless/iwlwifi/iwl3945-base.c b/drivers/net/wireless/iwlwifi/iwl3945-base.c index f29786521bc3..adbb3ea4d865 100644 --- a/drivers/net/wireless/iwlwifi/iwl3945-base.c +++ b/drivers/net/wireless/iwlwifi/iwl3945-base.c | |||
@@ -1926,7 +1926,7 @@ static void iwl3945_init_hw_rates(struct iwl_priv *priv, | |||
1926 | { | 1926 | { |
1927 | int i; | 1927 | int i; |
1928 | 1928 | ||
1929 | for (i = 0; i < IWL_RATE_COUNT; i++) { | 1929 | for (i = 0; i < IWL_RATE_COUNT_LEGACY; i++) { |
1930 | rates[i].bitrate = iwl3945_rates[i].ieee * 5; | 1930 | rates[i].bitrate = iwl3945_rates[i].ieee * 5; |
1931 | rates[i].hw_value = i; /* Rate scaling will work on indexes */ | 1931 | rates[i].hw_value = i; /* Rate scaling will work on indexes */ |
1932 | rates[i].hw_value_short = i; | 1932 | rates[i].hw_value_short = i; |
@@ -3903,7 +3903,7 @@ static int iwl3945_setup_mac(struct iwl_priv *priv) | |||
3903 | BIT(NL80211_IFTYPE_STATION) | | 3903 | BIT(NL80211_IFTYPE_STATION) | |
3904 | BIT(NL80211_IFTYPE_ADHOC); | 3904 | BIT(NL80211_IFTYPE_ADHOC); |
3905 | 3905 | ||
3906 | hw->wiphy->flags |= WIPHY_FLAG_STRICT_REGULATORY | | 3906 | hw->wiphy->flags |= WIPHY_FLAG_CUSTOM_REGULATORY | |
3907 | WIPHY_FLAG_DISABLE_BEACON_HINTS; | 3907 | WIPHY_FLAG_DISABLE_BEACON_HINTS; |
3908 | 3908 | ||
3909 | hw->wiphy->max_scan_ssids = PROBE_OPTION_MAX_3945; | 3909 | hw->wiphy->max_scan_ssids = PROBE_OPTION_MAX_3945; |
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c index 8fb70082b45a..be792519adcd 100644 --- a/drivers/pci/pci.c +++ b/drivers/pci/pci.c | |||
@@ -2612,6 +2612,23 @@ int pci_resource_bar(struct pci_dev *dev, int resno, enum pci_bar_type *type) | |||
2612 | return 0; | 2612 | return 0; |
2613 | } | 2613 | } |
2614 | 2614 | ||
2615 | /* Some architectures require additional programming to enable VGA */ | ||
2616 | static arch_set_vga_state_t arch_set_vga_state; | ||
2617 | |||
2618 | void __init pci_register_set_vga_state(arch_set_vga_state_t func) | ||
2619 | { | ||
2620 | arch_set_vga_state = func; /* NULL disables */ | ||
2621 | } | ||
2622 | |||
2623 | static int pci_set_vga_state_arch(struct pci_dev *dev, bool decode, | ||
2624 | unsigned int command_bits, bool change_bridge) | ||
2625 | { | ||
2626 | if (arch_set_vga_state) | ||
2627 | return arch_set_vga_state(dev, decode, command_bits, | ||
2628 | change_bridge); | ||
2629 | return 0; | ||
2630 | } | ||
2631 | |||
2615 | /** | 2632 | /** |
2616 | * pci_set_vga_state - set VGA decode state on device and parents if requested | 2633 | * pci_set_vga_state - set VGA decode state on device and parents if requested |
2617 | * @dev: the PCI device | 2634 | * @dev: the PCI device |
@@ -2625,9 +2642,15 @@ int pci_set_vga_state(struct pci_dev *dev, bool decode, | |||
2625 | struct pci_bus *bus; | 2642 | struct pci_bus *bus; |
2626 | struct pci_dev *bridge; | 2643 | struct pci_dev *bridge; |
2627 | u16 cmd; | 2644 | u16 cmd; |
2645 | int rc; | ||
2628 | 2646 | ||
2629 | WARN_ON(command_bits & ~(PCI_COMMAND_IO|PCI_COMMAND_MEMORY)); | 2647 | WARN_ON(command_bits & ~(PCI_COMMAND_IO|PCI_COMMAND_MEMORY)); |
2630 | 2648 | ||
2649 | /* ARCH specific VGA enables */ | ||
2650 | rc = pci_set_vga_state_arch(dev, decode, command_bits, change_bridge); | ||
2651 | if (rc) | ||
2652 | return rc; | ||
2653 | |||
2631 | pci_read_config_word(dev, PCI_COMMAND, &cmd); | 2654 | pci_read_config_word(dev, PCI_COMMAND, &cmd); |
2632 | if (decode == true) | 2655 | if (decode == true) |
2633 | cmd |= command_bits; | 2656 | cmd |= command_bits; |
@@ -2874,4 +2897,3 @@ EXPORT_SYMBOL(pci_target_state); | |||
2874 | EXPORT_SYMBOL(pci_prepare_to_sleep); | 2897 | EXPORT_SYMBOL(pci_prepare_to_sleep); |
2875 | EXPORT_SYMBOL(pci_back_from_sleep); | 2898 | EXPORT_SYMBOL(pci_back_from_sleep); |
2876 | EXPORT_SYMBOL_GPL(pci_set_pcie_reset_state); | 2899 | EXPORT_SYMBOL_GPL(pci_set_pcie_reset_state); |
2877 | |||
diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c index c28a712fd4db..e6b67f2d3229 100644 --- a/drivers/scsi/libiscsi.c +++ b/drivers/scsi/libiscsi.c | |||
@@ -3027,14 +3027,15 @@ static void iscsi_start_session_recovery(struct iscsi_session *session, | |||
3027 | session->state = ISCSI_STATE_TERMINATE; | 3027 | session->state = ISCSI_STATE_TERMINATE; |
3028 | else if (conn->stop_stage != STOP_CONN_RECOVER) | 3028 | else if (conn->stop_stage != STOP_CONN_RECOVER) |
3029 | session->state = ISCSI_STATE_IN_RECOVERY; | 3029 | session->state = ISCSI_STATE_IN_RECOVERY; |
3030 | |||
3031 | old_stop_stage = conn->stop_stage; | ||
3032 | conn->stop_stage = flag; | ||
3030 | spin_unlock_bh(&session->lock); | 3033 | spin_unlock_bh(&session->lock); |
3031 | 3034 | ||
3032 | del_timer_sync(&conn->transport_timer); | 3035 | del_timer_sync(&conn->transport_timer); |
3033 | iscsi_suspend_tx(conn); | 3036 | iscsi_suspend_tx(conn); |
3034 | 3037 | ||
3035 | spin_lock_bh(&session->lock); | 3038 | spin_lock_bh(&session->lock); |
3036 | old_stop_stage = conn->stop_stage; | ||
3037 | conn->stop_stage = flag; | ||
3038 | conn->c_stage = ISCSI_CONN_STOPPED; | 3039 | conn->c_stage = ISCSI_CONN_STOPPED; |
3039 | spin_unlock_bh(&session->lock); | 3040 | spin_unlock_bh(&session->lock); |
3040 | 3041 | ||
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c index 34d4eb98829e..db6b07136ea3 100644 --- a/drivers/usb/class/cdc-acm.c +++ b/drivers/usb/class/cdc-acm.c | |||
@@ -170,6 +170,7 @@ static void acm_write_done(struct acm *acm, struct acm_wb *wb) | |||
170 | { | 170 | { |
171 | wb->use = 0; | 171 | wb->use = 0; |
172 | acm->transmitting--; | 172 | acm->transmitting--; |
173 | usb_autopm_put_interface_async(acm->control); | ||
173 | } | 174 | } |
174 | 175 | ||
175 | /* | 176 | /* |
@@ -211,9 +212,12 @@ static int acm_write_start(struct acm *acm, int wbn) | |||
211 | } | 212 | } |
212 | 213 | ||
213 | dbg("%s susp_count: %d", __func__, acm->susp_count); | 214 | dbg("%s susp_count: %d", __func__, acm->susp_count); |
215 | usb_autopm_get_interface_async(acm->control); | ||
214 | if (acm->susp_count) { | 216 | if (acm->susp_count) { |
215 | acm->delayed_wb = wb; | 217 | if (!acm->delayed_wb) |
216 | schedule_work(&acm->waker); | 218 | acm->delayed_wb = wb; |
219 | else | ||
220 | usb_autopm_put_interface_async(acm->control); | ||
217 | spin_unlock_irqrestore(&acm->write_lock, flags); | 221 | spin_unlock_irqrestore(&acm->write_lock, flags); |
218 | return 0; /* A white lie */ | 222 | return 0; /* A white lie */ |
219 | } | 223 | } |
@@ -534,23 +538,6 @@ static void acm_softint(struct work_struct *work) | |||
534 | tty_kref_put(tty); | 538 | tty_kref_put(tty); |
535 | } | 539 | } |
536 | 540 | ||
537 | static void acm_waker(struct work_struct *waker) | ||
538 | { | ||
539 | struct acm *acm = container_of(waker, struct acm, waker); | ||
540 | int rv; | ||
541 | |||
542 | rv = usb_autopm_get_interface(acm->control); | ||
543 | if (rv < 0) { | ||
544 | dev_err(&acm->dev->dev, "Autopm failure in %s\n", __func__); | ||
545 | return; | ||
546 | } | ||
547 | if (acm->delayed_wb) { | ||
548 | acm_start_wb(acm, acm->delayed_wb); | ||
549 | acm->delayed_wb = NULL; | ||
550 | } | ||
551 | usb_autopm_put_interface(acm->control); | ||
552 | } | ||
553 | |||
554 | /* | 541 | /* |
555 | * TTY handlers | 542 | * TTY handlers |
556 | */ | 543 | */ |
@@ -1178,7 +1165,6 @@ made_compressed_probe: | |||
1178 | acm->urb_task.func = acm_rx_tasklet; | 1165 | acm->urb_task.func = acm_rx_tasklet; |
1179 | acm->urb_task.data = (unsigned long) acm; | 1166 | acm->urb_task.data = (unsigned long) acm; |
1180 | INIT_WORK(&acm->work, acm_softint); | 1167 | INIT_WORK(&acm->work, acm_softint); |
1181 | INIT_WORK(&acm->waker, acm_waker); | ||
1182 | init_waitqueue_head(&acm->drain_wait); | 1168 | init_waitqueue_head(&acm->drain_wait); |
1183 | spin_lock_init(&acm->throttle_lock); | 1169 | spin_lock_init(&acm->throttle_lock); |
1184 | spin_lock_init(&acm->write_lock); | 1170 | spin_lock_init(&acm->write_lock); |
@@ -1343,7 +1329,6 @@ static void stop_data_traffic(struct acm *acm) | |||
1343 | tasklet_enable(&acm->urb_task); | 1329 | tasklet_enable(&acm->urb_task); |
1344 | 1330 | ||
1345 | cancel_work_sync(&acm->work); | 1331 | cancel_work_sync(&acm->work); |
1346 | cancel_work_sync(&acm->waker); | ||
1347 | } | 1332 | } |
1348 | 1333 | ||
1349 | static void acm_disconnect(struct usb_interface *intf) | 1334 | static void acm_disconnect(struct usb_interface *intf) |
@@ -1435,6 +1420,7 @@ static int acm_suspend(struct usb_interface *intf, pm_message_t message) | |||
1435 | static int acm_resume(struct usb_interface *intf) | 1420 | static int acm_resume(struct usb_interface *intf) |
1436 | { | 1421 | { |
1437 | struct acm *acm = usb_get_intfdata(intf); | 1422 | struct acm *acm = usb_get_intfdata(intf); |
1423 | struct acm_wb *wb; | ||
1438 | int rv = 0; | 1424 | int rv = 0; |
1439 | int cnt; | 1425 | int cnt; |
1440 | 1426 | ||
@@ -1449,6 +1435,21 @@ static int acm_resume(struct usb_interface *intf) | |||
1449 | mutex_lock(&acm->mutex); | 1435 | mutex_lock(&acm->mutex); |
1450 | if (acm->port.count) { | 1436 | if (acm->port.count) { |
1451 | rv = usb_submit_urb(acm->ctrlurb, GFP_NOIO); | 1437 | rv = usb_submit_urb(acm->ctrlurb, GFP_NOIO); |
1438 | |||
1439 | spin_lock_irq(&acm->write_lock); | ||
1440 | if (acm->delayed_wb) { | ||
1441 | wb = acm->delayed_wb; | ||
1442 | acm->delayed_wb = NULL; | ||
1443 | spin_unlock_irq(&acm->write_lock); | ||
1444 | acm_start_wb(acm, wb); | ||
1445 | } else { | ||
1446 | spin_unlock_irq(&acm->write_lock); | ||
1447 | } | ||
1448 | |||
1449 | /* | ||
1450 | * delayed error checking because we must | ||
1451 | * do the write path at all cost | ||
1452 | */ | ||
1452 | if (rv < 0) | 1453 | if (rv < 0) |
1453 | goto err_out; | 1454 | goto err_out; |
1454 | 1455 | ||
diff --git a/drivers/usb/class/cdc-acm.h b/drivers/usb/class/cdc-acm.h index c4a0ee8ffccf..519eb638b6e9 100644 --- a/drivers/usb/class/cdc-acm.h +++ b/drivers/usb/class/cdc-acm.h | |||
@@ -112,7 +112,6 @@ struct acm { | |||
112 | struct mutex mutex; | 112 | struct mutex mutex; |
113 | struct usb_cdc_line_coding line; /* bits, stop, parity */ | 113 | struct usb_cdc_line_coding line; /* bits, stop, parity */ |
114 | struct work_struct work; /* work queue entry for line discipline waking up */ | 114 | struct work_struct work; /* work queue entry for line discipline waking up */ |
115 | struct work_struct waker; | ||
116 | wait_queue_head_t drain_wait; /* close processing */ | 115 | wait_queue_head_t drain_wait; /* close processing */ |
117 | struct tasklet_struct urb_task; /* rx processing */ | 116 | struct tasklet_struct urb_task; /* rx processing */ |
118 | spinlock_t throttle_lock; /* synchronize throtteling and read callback */ | 117 | spinlock_t throttle_lock; /* synchronize throtteling and read callback */ |
diff --git a/drivers/video/backlight/mbp_nvidia_bl.c b/drivers/video/backlight/mbp_nvidia_bl.c index 2e78b0784bdc..9804ee9a79d8 100644 --- a/drivers/video/backlight/mbp_nvidia_bl.c +++ b/drivers/video/backlight/mbp_nvidia_bl.c | |||
@@ -139,6 +139,51 @@ static int mbp_dmi_match(const struct dmi_system_id *id) | |||
139 | static const struct dmi_system_id __initdata mbp_device_table[] = { | 139 | static const struct dmi_system_id __initdata mbp_device_table[] = { |
140 | { | 140 | { |
141 | .callback = mbp_dmi_match, | 141 | .callback = mbp_dmi_match, |
142 | .ident = "MacBook 1,1", | ||
143 | .matches = { | ||
144 | DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."), | ||
145 | DMI_MATCH(DMI_PRODUCT_NAME, "MacBook1,1"), | ||
146 | }, | ||
147 | .driver_data = (void *)&intel_chipset_data, | ||
148 | }, | ||
149 | { | ||
150 | .callback = mbp_dmi_match, | ||
151 | .ident = "MacBook 2,1", | ||
152 | .matches = { | ||
153 | DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."), | ||
154 | DMI_MATCH(DMI_PRODUCT_NAME, "MacBook2,1"), | ||
155 | }, | ||
156 | .driver_data = (void *)&intel_chipset_data, | ||
157 | }, | ||
158 | { | ||
159 | .callback = mbp_dmi_match, | ||
160 | .ident = "MacBook 3,1", | ||
161 | .matches = { | ||
162 | DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."), | ||
163 | DMI_MATCH(DMI_PRODUCT_NAME, "MacBook3,1"), | ||
164 | }, | ||
165 | .driver_data = (void *)&intel_chipset_data, | ||
166 | }, | ||
167 | { | ||
168 | .callback = mbp_dmi_match, | ||
169 | .ident = "MacBook 4,1", | ||
170 | .matches = { | ||
171 | DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."), | ||
172 | DMI_MATCH(DMI_PRODUCT_NAME, "MacBook4,1"), | ||
173 | }, | ||
174 | .driver_data = (void *)&intel_chipset_data, | ||
175 | }, | ||
176 | { | ||
177 | .callback = mbp_dmi_match, | ||
178 | .ident = "MacBook 4,2", | ||
179 | .matches = { | ||
180 | DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."), | ||
181 | DMI_MATCH(DMI_PRODUCT_NAME, "MacBook4,2"), | ||
182 | }, | ||
183 | .driver_data = (void *)&intel_chipset_data, | ||
184 | }, | ||
185 | { | ||
186 | .callback = mbp_dmi_match, | ||
142 | .ident = "MacBookPro 3,1", | 187 | .ident = "MacBookPro 3,1", |
143 | .matches = { | 188 | .matches = { |
144 | DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."), | 189 | DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."), |
diff --git a/drivers/video/sunxvr500.c b/drivers/video/sunxvr500.c index 4cd50497264d..3803745d6eee 100644 --- a/drivers/video/sunxvr500.c +++ b/drivers/video/sunxvr500.c | |||
@@ -242,11 +242,27 @@ static int __devinit e3d_set_fbinfo(struct e3d_info *ep) | |||
242 | static int __devinit e3d_pci_register(struct pci_dev *pdev, | 242 | static int __devinit e3d_pci_register(struct pci_dev *pdev, |
243 | const struct pci_device_id *ent) | 243 | const struct pci_device_id *ent) |
244 | { | 244 | { |
245 | struct device_node *of_node; | ||
246 | const char *device_type; | ||
245 | struct fb_info *info; | 247 | struct fb_info *info; |
246 | struct e3d_info *ep; | 248 | struct e3d_info *ep; |
247 | unsigned int line_length; | 249 | unsigned int line_length; |
248 | int err; | 250 | int err; |
249 | 251 | ||
252 | of_node = pci_device_to_OF_node(pdev); | ||
253 | if (!of_node) { | ||
254 | printk(KERN_ERR "e3d: Cannot find OF node of %s\n", | ||
255 | pci_name(pdev)); | ||
256 | return -ENODEV; | ||
257 | } | ||
258 | |||
259 | device_type = of_get_property(of_node, "device_type", NULL); | ||
260 | if (!device_type) { | ||
261 | printk(KERN_INFO "e3d: Ignoring secondary output device " | ||
262 | "at %s\n", pci_name(pdev)); | ||
263 | return -ENODEV; | ||
264 | } | ||
265 | |||
250 | err = pci_enable_device(pdev); | 266 | err = pci_enable_device(pdev); |
251 | if (err < 0) { | 267 | if (err < 0) { |
252 | printk(KERN_ERR "e3d: Cannot enable PCI device %s\n", | 268 | printk(KERN_ERR "e3d: Cannot enable PCI device %s\n", |
@@ -265,13 +281,7 @@ static int __devinit e3d_pci_register(struct pci_dev *pdev, | |||
265 | ep->info = info; | 281 | ep->info = info; |
266 | ep->pdev = pdev; | 282 | ep->pdev = pdev; |
267 | spin_lock_init(&ep->lock); | 283 | spin_lock_init(&ep->lock); |
268 | ep->of_node = pci_device_to_OF_node(pdev); | 284 | ep->of_node = of_node; |
269 | if (!ep->of_node) { | ||
270 | printk(KERN_ERR "e3d: Cannot find OF node of %s\n", | ||
271 | pci_name(pdev)); | ||
272 | err = -ENODEV; | ||
273 | goto err_release_fb; | ||
274 | } | ||
275 | 285 | ||
276 | /* Read the PCI base register of the frame buffer, which we | 286 | /* Read the PCI base register of the frame buffer, which we |
277 | * need in order to interpret the RAMDAC_VID_*FB* values in | 287 | * need in order to interpret the RAMDAC_VID_*FB* values in |
diff --git a/drivers/watchdog/hpwdt.c b/drivers/watchdog/hpwdt.c index a6c5674c78e6..0b9190754e24 100644 --- a/drivers/watchdog/hpwdt.c +++ b/drivers/watchdog/hpwdt.c | |||
@@ -443,7 +443,7 @@ static void hpwdt_ping(void) | |||
443 | static int hpwdt_change_timer(int new_margin) | 443 | static int hpwdt_change_timer(int new_margin) |
444 | { | 444 | { |
445 | /* Arbitrary, can't find the card's limits */ | 445 | /* Arbitrary, can't find the card's limits */ |
446 | if (new_margin < 30 || new_margin > 600) { | 446 | if (new_margin < 5 || new_margin > 600) { |
447 | printk(KERN_WARNING | 447 | printk(KERN_WARNING |
448 | "hpwdt: New value passed in is invalid: %d seconds.\n", | 448 | "hpwdt: New value passed in is invalid: %d seconds.\n", |
449 | new_margin); | 449 | new_margin); |
diff --git a/drivers/watchdog/iTCO_wdt.c b/drivers/watchdog/iTCO_wdt.c index 4bdb7f1a9077..e2ebe084986b 100644 --- a/drivers/watchdog/iTCO_wdt.c +++ b/drivers/watchdog/iTCO_wdt.c | |||
@@ -115,8 +115,37 @@ enum iTCO_chipsets { | |||
115 | TCO_3420, /* 3420 */ | 115 | TCO_3420, /* 3420 */ |
116 | TCO_3450, /* 3450 */ | 116 | TCO_3450, /* 3450 */ |
117 | TCO_EP80579, /* EP80579 */ | 117 | TCO_EP80579, /* EP80579 */ |
118 | TCO_CPTD, /* CPT Desktop */ | 118 | TCO_CPT1, /* Cougar Point */ |
119 | TCO_CPTM, /* CPT Mobile */ | 119 | TCO_CPT2, /* Cougar Point Desktop */ |
120 | TCO_CPT3, /* Cougar Point Mobile */ | ||
121 | TCO_CPT4, /* Cougar Point */ | ||
122 | TCO_CPT5, /* Cougar Point */ | ||
123 | TCO_CPT6, /* Cougar Point */ | ||
124 | TCO_CPT7, /* Cougar Point */ | ||
125 | TCO_CPT8, /* Cougar Point */ | ||
126 | TCO_CPT9, /* Cougar Point */ | ||
127 | TCO_CPT10, /* Cougar Point */ | ||
128 | TCO_CPT11, /* Cougar Point */ | ||
129 | TCO_CPT12, /* Cougar Point */ | ||
130 | TCO_CPT13, /* Cougar Point */ | ||
131 | TCO_CPT14, /* Cougar Point */ | ||
132 | TCO_CPT15, /* Cougar Point */ | ||
133 | TCO_CPT16, /* Cougar Point */ | ||
134 | TCO_CPT17, /* Cougar Point */ | ||
135 | TCO_CPT18, /* Cougar Point */ | ||
136 | TCO_CPT19, /* Cougar Point */ | ||
137 | TCO_CPT20, /* Cougar Point */ | ||
138 | TCO_CPT21, /* Cougar Point */ | ||
139 | TCO_CPT22, /* Cougar Point */ | ||
140 | TCO_CPT23, /* Cougar Point */ | ||
141 | TCO_CPT24, /* Cougar Point */ | ||
142 | TCO_CPT25, /* Cougar Point */ | ||
143 | TCO_CPT26, /* Cougar Point */ | ||
144 | TCO_CPT27, /* Cougar Point */ | ||
145 | TCO_CPT28, /* Cougar Point */ | ||
146 | TCO_CPT29, /* Cougar Point */ | ||
147 | TCO_CPT30, /* Cougar Point */ | ||
148 | TCO_CPT31, /* Cougar Point */ | ||
120 | }; | 149 | }; |
121 | 150 | ||
122 | static struct { | 151 | static struct { |
@@ -173,8 +202,37 @@ static struct { | |||
173 | {"3420", 2}, | 202 | {"3420", 2}, |
174 | {"3450", 2}, | 203 | {"3450", 2}, |
175 | {"EP80579", 2}, | 204 | {"EP80579", 2}, |
176 | {"CPT Desktop", 2}, | 205 | {"Cougar Point", 2}, |
177 | {"CPT Mobile", 2}, | 206 | {"Cougar Point", 2}, |
207 | {"Cougar Point", 2}, | ||
208 | {"Cougar Point", 2}, | ||
209 | {"Cougar Point", 2}, | ||
210 | {"Cougar Point", 2}, | ||
211 | {"Cougar Point", 2}, | ||
212 | {"Cougar Point", 2}, | ||
213 | {"Cougar Point", 2}, | ||
214 | {"Cougar Point", 2}, | ||
215 | {"Cougar Point", 2}, | ||
216 | {"Cougar Point", 2}, | ||
217 | {"Cougar Point", 2}, | ||
218 | {"Cougar Point", 2}, | ||
219 | {"Cougar Point", 2}, | ||
220 | {"Cougar Point", 2}, | ||
221 | {"Cougar Point", 2}, | ||
222 | {"Cougar Point", 2}, | ||
223 | {"Cougar Point", 2}, | ||
224 | {"Cougar Point", 2}, | ||
225 | {"Cougar Point", 2}, | ||
226 | {"Cougar Point", 2}, | ||
227 | {"Cougar Point", 2}, | ||
228 | {"Cougar Point", 2}, | ||
229 | {"Cougar Point", 2}, | ||
230 | {"Cougar Point", 2}, | ||
231 | {"Cougar Point", 2}, | ||
232 | {"Cougar Point", 2}, | ||
233 | {"Cougar Point", 2}, | ||
234 | {"Cougar Point", 2}, | ||
235 | {"Cougar Point", 2}, | ||
178 | {NULL, 0} | 236 | {NULL, 0} |
179 | }; | 237 | }; |
180 | 238 | ||
@@ -259,8 +317,37 @@ static struct pci_device_id iTCO_wdt_pci_tbl[] = { | |||
259 | { ITCO_PCI_DEVICE(0x3b14, TCO_3420)}, | 317 | { ITCO_PCI_DEVICE(0x3b14, TCO_3420)}, |
260 | { ITCO_PCI_DEVICE(0x3b16, TCO_3450)}, | 318 | { ITCO_PCI_DEVICE(0x3b16, TCO_3450)}, |
261 | { ITCO_PCI_DEVICE(0x5031, TCO_EP80579)}, | 319 | { ITCO_PCI_DEVICE(0x5031, TCO_EP80579)}, |
262 | { ITCO_PCI_DEVICE(0x1c42, TCO_CPTD)}, | 320 | { ITCO_PCI_DEVICE(0x1c41, TCO_CPT1)}, |
263 | { ITCO_PCI_DEVICE(0x1c43, TCO_CPTM)}, | 321 | { ITCO_PCI_DEVICE(0x1c42, TCO_CPT2)}, |
322 | { ITCO_PCI_DEVICE(0x1c43, TCO_CPT3)}, | ||
323 | { ITCO_PCI_DEVICE(0x1c44, TCO_CPT4)}, | ||
324 | { ITCO_PCI_DEVICE(0x1c45, TCO_CPT5)}, | ||
325 | { ITCO_PCI_DEVICE(0x1c46, TCO_CPT6)}, | ||
326 | { ITCO_PCI_DEVICE(0x1c47, TCO_CPT7)}, | ||
327 | { ITCO_PCI_DEVICE(0x1c48, TCO_CPT8)}, | ||
328 | { ITCO_PCI_DEVICE(0x1c49, TCO_CPT9)}, | ||
329 | { ITCO_PCI_DEVICE(0x1c4a, TCO_CPT10)}, | ||
330 | { ITCO_PCI_DEVICE(0x1c4b, TCO_CPT11)}, | ||
331 | { ITCO_PCI_DEVICE(0x1c4c, TCO_CPT12)}, | ||
332 | { ITCO_PCI_DEVICE(0x1c4d, TCO_CPT13)}, | ||
333 | { ITCO_PCI_DEVICE(0x1c4e, TCO_CPT14)}, | ||
334 | { ITCO_PCI_DEVICE(0x1c4f, TCO_CPT15)}, | ||
335 | { ITCO_PCI_DEVICE(0x1c50, TCO_CPT16)}, | ||
336 | { ITCO_PCI_DEVICE(0x1c51, TCO_CPT17)}, | ||
337 | { ITCO_PCI_DEVICE(0x1c52, TCO_CPT18)}, | ||
338 | { ITCO_PCI_DEVICE(0x1c53, TCO_CPT19)}, | ||
339 | { ITCO_PCI_DEVICE(0x1c54, TCO_CPT20)}, | ||
340 | { ITCO_PCI_DEVICE(0x1c55, TCO_CPT21)}, | ||
341 | { ITCO_PCI_DEVICE(0x1c56, TCO_CPT22)}, | ||
342 | { ITCO_PCI_DEVICE(0x1c57, TCO_CPT23)}, | ||
343 | { ITCO_PCI_DEVICE(0x1c58, TCO_CPT24)}, | ||
344 | { ITCO_PCI_DEVICE(0x1c59, TCO_CPT25)}, | ||
345 | { ITCO_PCI_DEVICE(0x1c5a, TCO_CPT26)}, | ||
346 | { ITCO_PCI_DEVICE(0x1c5b, TCO_CPT27)}, | ||
347 | { ITCO_PCI_DEVICE(0x1c5c, TCO_CPT28)}, | ||
348 | { ITCO_PCI_DEVICE(0x1c5d, TCO_CPT29)}, | ||
349 | { ITCO_PCI_DEVICE(0x1c5e, TCO_CPT30)}, | ||
350 | { ITCO_PCI_DEVICE(0x1c5f, TCO_CPT31)}, | ||
264 | { 0, }, /* End of list */ | 351 | { 0, }, /* End of list */ |
265 | }; | 352 | }; |
266 | MODULE_DEVICE_TABLE(pci, iTCO_wdt_pci_tbl); | 353 | MODULE_DEVICE_TABLE(pci, iTCO_wdt_pci_tbl); |
diff --git a/fs/9p/vfs_file.c b/fs/9p/vfs_file.c index 74a0461a9ac0..92f9590429b2 100644 --- a/fs/9p/vfs_file.c +++ b/fs/9p/vfs_file.c | |||
@@ -114,7 +114,7 @@ static int v9fs_file_lock(struct file *filp, int cmd, struct file_lock *fl) | |||
114 | P9_DPRINTK(P9_DEBUG_VFS, "filp: %p lock: %p\n", filp, fl); | 114 | P9_DPRINTK(P9_DEBUG_VFS, "filp: %p lock: %p\n", filp, fl); |
115 | 115 | ||
116 | /* No mandatory locks */ | 116 | /* No mandatory locks */ |
117 | if (__mandatory_lock(inode)) | 117 | if (__mandatory_lock(inode) && fl->fl_type != F_UNLCK) |
118 | return -ENOLCK; | 118 | return -ENOLCK; |
119 | 119 | ||
120 | if ((IS_SETLK(cmd) || IS_SETLKW(cmd)) && fl->fl_type != F_UNLCK) { | 120 | if ((IS_SETLK(cmd) || IS_SETLKW(cmd)) && fl->fl_type != F_UNLCK) { |
diff --git a/fs/block_dev.c b/fs/block_dev.c index d11d0289f3d2..8db62b2b6df8 100644 --- a/fs/block_dev.c +++ b/fs/block_dev.c | |||
@@ -404,7 +404,7 @@ static loff_t block_llseek(struct file *file, loff_t offset, int origin) | |||
404 | * NULL first argument is nfsd_sync_dir() and that's not a directory. | 404 | * NULL first argument is nfsd_sync_dir() and that's not a directory. |
405 | */ | 405 | */ |
406 | 406 | ||
407 | static int block_fsync(struct file *filp, struct dentry *dentry, int datasync) | 407 | int block_fsync(struct file *filp, struct dentry *dentry, int datasync) |
408 | { | 408 | { |
409 | struct block_device *bdev = I_BDEV(filp->f_mapping->host); | 409 | struct block_device *bdev = I_BDEV(filp->f_mapping->host); |
410 | int error; | 410 | int error; |
@@ -418,6 +418,7 @@ static int block_fsync(struct file *filp, struct dentry *dentry, int datasync) | |||
418 | error = 0; | 418 | error = 0; |
419 | return error; | 419 | return error; |
420 | } | 420 | } |
421 | EXPORT_SYMBOL(block_fsync); | ||
421 | 422 | ||
422 | /* | 423 | /* |
423 | * pseudo-fs | 424 | * pseudo-fs |
diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c index 63de4d6a8756..bc3114cb10de 100644 --- a/fs/cifs/cifssmb.c +++ b/fs/cifs/cifssmb.c | |||
@@ -1430,6 +1430,8 @@ CIFSSMBWrite(const int xid, struct cifsTconInfo *tcon, | |||
1430 | __u32 bytes_sent; | 1430 | __u32 bytes_sent; |
1431 | __u16 byte_count; | 1431 | __u16 byte_count; |
1432 | 1432 | ||
1433 | *nbytes = 0; | ||
1434 | |||
1433 | /* cFYI(1, ("write at %lld %d bytes", offset, count));*/ | 1435 | /* cFYI(1, ("write at %lld %d bytes", offset, count));*/ |
1434 | if (tcon->ses == NULL) | 1436 | if (tcon->ses == NULL) |
1435 | return -ECONNABORTED; | 1437 | return -ECONNABORTED; |
@@ -1512,11 +1514,18 @@ CIFSSMBWrite(const int xid, struct cifsTconInfo *tcon, | |||
1512 | cifs_stats_inc(&tcon->num_writes); | 1514 | cifs_stats_inc(&tcon->num_writes); |
1513 | if (rc) { | 1515 | if (rc) { |
1514 | cFYI(1, ("Send error in write = %d", rc)); | 1516 | cFYI(1, ("Send error in write = %d", rc)); |
1515 | *nbytes = 0; | ||
1516 | } else { | 1517 | } else { |
1517 | *nbytes = le16_to_cpu(pSMBr->CountHigh); | 1518 | *nbytes = le16_to_cpu(pSMBr->CountHigh); |
1518 | *nbytes = (*nbytes) << 16; | 1519 | *nbytes = (*nbytes) << 16; |
1519 | *nbytes += le16_to_cpu(pSMBr->Count); | 1520 | *nbytes += le16_to_cpu(pSMBr->Count); |
1521 | |||
1522 | /* | ||
1523 | * Mask off high 16 bits when bytes written as returned by the | ||
1524 | * server is greater than bytes requested by the client. Some | ||
1525 | * OS/2 servers are known to set incorrect CountHigh values. | ||
1526 | */ | ||
1527 | if (*nbytes > count) | ||
1528 | *nbytes &= 0xFFFF; | ||
1520 | } | 1529 | } |
1521 | 1530 | ||
1522 | cifs_buf_release(pSMB); | 1531 | cifs_buf_release(pSMB); |
@@ -1605,6 +1614,14 @@ CIFSSMBWrite2(const int xid, struct cifsTconInfo *tcon, | |||
1605 | *nbytes = le16_to_cpu(pSMBr->CountHigh); | 1614 | *nbytes = le16_to_cpu(pSMBr->CountHigh); |
1606 | *nbytes = (*nbytes) << 16; | 1615 | *nbytes = (*nbytes) << 16; |
1607 | *nbytes += le16_to_cpu(pSMBr->Count); | 1616 | *nbytes += le16_to_cpu(pSMBr->Count); |
1617 | |||
1618 | /* | ||
1619 | * Mask off high 16 bits when bytes written as returned by the | ||
1620 | * server is greater than bytes requested by the client. OS/2 | ||
1621 | * servers are known to set incorrect CountHigh values. | ||
1622 | */ | ||
1623 | if (*nbytes > count) | ||
1624 | *nbytes &= 0xFFFF; | ||
1608 | } | 1625 | } |
1609 | 1626 | ||
1610 | /* cifs_small_buf_release(pSMB); */ /* Freed earlier now in SendReceive2 */ | 1627 | /* cifs_small_buf_release(pSMB); */ /* Freed earlier now in SendReceive2 */ |
diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c index 4a430ab4115c..23dc2af56dc6 100644 --- a/fs/ecryptfs/inode.c +++ b/fs/ecryptfs/inode.c | |||
@@ -647,38 +647,17 @@ out_lock: | |||
647 | return rc; | 647 | return rc; |
648 | } | 648 | } |
649 | 649 | ||
650 | static int | 650 | static int ecryptfs_readlink_lower(struct dentry *dentry, char **buf, |
651 | ecryptfs_readlink(struct dentry *dentry, char __user *buf, int bufsiz) | 651 | size_t *bufsiz) |
652 | { | 652 | { |
653 | struct dentry *lower_dentry = ecryptfs_dentry_to_lower(dentry); | ||
653 | char *lower_buf; | 654 | char *lower_buf; |
654 | size_t lower_bufsiz; | 655 | size_t lower_bufsiz = PATH_MAX; |
655 | struct dentry *lower_dentry; | ||
656 | struct ecryptfs_mount_crypt_stat *mount_crypt_stat; | ||
657 | char *plaintext_name; | ||
658 | size_t plaintext_name_size; | ||
659 | mm_segment_t old_fs; | 656 | mm_segment_t old_fs; |
660 | int rc; | 657 | int rc; |
661 | 658 | ||
662 | lower_dentry = ecryptfs_dentry_to_lower(dentry); | ||
663 | if (!lower_dentry->d_inode->i_op->readlink) { | ||
664 | rc = -EINVAL; | ||
665 | goto out; | ||
666 | } | ||
667 | mount_crypt_stat = &ecryptfs_superblock_to_private( | ||
668 | dentry->d_sb)->mount_crypt_stat; | ||
669 | /* | ||
670 | * If the lower filename is encrypted, it will result in a significantly | ||
671 | * longer name. If needed, truncate the name after decode and decrypt. | ||
672 | */ | ||
673 | if (mount_crypt_stat->flags & ECRYPTFS_GLOBAL_ENCRYPT_FILENAMES) | ||
674 | lower_bufsiz = PATH_MAX; | ||
675 | else | ||
676 | lower_bufsiz = bufsiz; | ||
677 | /* Released in this function */ | ||
678 | lower_buf = kmalloc(lower_bufsiz, GFP_KERNEL); | 659 | lower_buf = kmalloc(lower_bufsiz, GFP_KERNEL); |
679 | if (lower_buf == NULL) { | 660 | if (!lower_buf) { |
680 | printk(KERN_ERR "%s: Out of memory whilst attempting to " | ||
681 | "kmalloc [%zd] bytes\n", __func__, lower_bufsiz); | ||
682 | rc = -ENOMEM; | 661 | rc = -ENOMEM; |
683 | goto out; | 662 | goto out; |
684 | } | 663 | } |
@@ -688,29 +667,31 @@ ecryptfs_readlink(struct dentry *dentry, char __user *buf, int bufsiz) | |||
688 | (char __user *)lower_buf, | 667 | (char __user *)lower_buf, |
689 | lower_bufsiz); | 668 | lower_bufsiz); |
690 | set_fs(old_fs); | 669 | set_fs(old_fs); |
691 | if (rc >= 0) { | 670 | if (rc < 0) |
692 | rc = ecryptfs_decode_and_decrypt_filename(&plaintext_name, | 671 | goto out; |
693 | &plaintext_name_size, | 672 | lower_bufsiz = rc; |
694 | dentry, lower_buf, | 673 | rc = ecryptfs_decode_and_decrypt_filename(buf, bufsiz, dentry, |
695 | rc); | 674 | lower_buf, lower_bufsiz); |
696 | if (rc) { | 675 | out: |
697 | printk(KERN_ERR "%s: Error attempting to decode and " | ||
698 | "decrypt filename; rc = [%d]\n", __func__, | ||
699 | rc); | ||
700 | goto out_free_lower_buf; | ||
701 | } | ||
702 | /* Check for bufsiz <= 0 done in sys_readlinkat() */ | ||
703 | rc = copy_to_user(buf, plaintext_name, | ||
704 | min((size_t) bufsiz, plaintext_name_size)); | ||
705 | if (rc) | ||
706 | rc = -EFAULT; | ||
707 | else | ||
708 | rc = plaintext_name_size; | ||
709 | kfree(plaintext_name); | ||
710 | fsstack_copy_attr_atime(dentry->d_inode, lower_dentry->d_inode); | ||
711 | } | ||
712 | out_free_lower_buf: | ||
713 | kfree(lower_buf); | 676 | kfree(lower_buf); |
677 | return rc; | ||
678 | } | ||
679 | |||
680 | static int | ||
681 | ecryptfs_readlink(struct dentry *dentry, char __user *buf, int bufsiz) | ||
682 | { | ||
683 | char *kbuf; | ||
684 | size_t kbufsiz, copied; | ||
685 | int rc; | ||
686 | |||
687 | rc = ecryptfs_readlink_lower(dentry, &kbuf, &kbufsiz); | ||
688 | if (rc) | ||
689 | goto out; | ||
690 | copied = min_t(size_t, bufsiz, kbufsiz); | ||
691 | rc = copy_to_user(buf, kbuf, copied) ? -EFAULT : copied; | ||
692 | kfree(kbuf); | ||
693 | fsstack_copy_attr_atime(dentry->d_inode, | ||
694 | ecryptfs_dentry_to_lower(dentry)->d_inode); | ||
714 | out: | 695 | out: |
715 | return rc; | 696 | return rc; |
716 | } | 697 | } |
@@ -1015,6 +996,28 @@ out: | |||
1015 | return rc; | 996 | return rc; |
1016 | } | 997 | } |
1017 | 998 | ||
999 | int ecryptfs_getattr_link(struct vfsmount *mnt, struct dentry *dentry, | ||
1000 | struct kstat *stat) | ||
1001 | { | ||
1002 | struct ecryptfs_mount_crypt_stat *mount_crypt_stat; | ||
1003 | int rc = 0; | ||
1004 | |||
1005 | mount_crypt_stat = &ecryptfs_superblock_to_private( | ||
1006 | dentry->d_sb)->mount_crypt_stat; | ||
1007 | generic_fillattr(dentry->d_inode, stat); | ||
1008 | if (mount_crypt_stat->flags & ECRYPTFS_GLOBAL_ENCRYPT_FILENAMES) { | ||
1009 | char *target; | ||
1010 | size_t targetsiz; | ||
1011 | |||
1012 | rc = ecryptfs_readlink_lower(dentry, &target, &targetsiz); | ||
1013 | if (!rc) { | ||
1014 | kfree(target); | ||
1015 | stat->size = targetsiz; | ||
1016 | } | ||
1017 | } | ||
1018 | return rc; | ||
1019 | } | ||
1020 | |||
1018 | int ecryptfs_getattr(struct vfsmount *mnt, struct dentry *dentry, | 1021 | int ecryptfs_getattr(struct vfsmount *mnt, struct dentry *dentry, |
1019 | struct kstat *stat) | 1022 | struct kstat *stat) |
1020 | { | 1023 | { |
@@ -1039,7 +1042,7 @@ ecryptfs_setxattr(struct dentry *dentry, const char *name, const void *value, | |||
1039 | 1042 | ||
1040 | lower_dentry = ecryptfs_dentry_to_lower(dentry); | 1043 | lower_dentry = ecryptfs_dentry_to_lower(dentry); |
1041 | if (!lower_dentry->d_inode->i_op->setxattr) { | 1044 | if (!lower_dentry->d_inode->i_op->setxattr) { |
1042 | rc = -ENOSYS; | 1045 | rc = -EOPNOTSUPP; |
1043 | goto out; | 1046 | goto out; |
1044 | } | 1047 | } |
1045 | mutex_lock(&lower_dentry->d_inode->i_mutex); | 1048 | mutex_lock(&lower_dentry->d_inode->i_mutex); |
@@ -1057,7 +1060,7 @@ ecryptfs_getxattr_lower(struct dentry *lower_dentry, const char *name, | |||
1057 | int rc = 0; | 1060 | int rc = 0; |
1058 | 1061 | ||
1059 | if (!lower_dentry->d_inode->i_op->getxattr) { | 1062 | if (!lower_dentry->d_inode->i_op->getxattr) { |
1060 | rc = -ENOSYS; | 1063 | rc = -EOPNOTSUPP; |
1061 | goto out; | 1064 | goto out; |
1062 | } | 1065 | } |
1063 | mutex_lock(&lower_dentry->d_inode->i_mutex); | 1066 | mutex_lock(&lower_dentry->d_inode->i_mutex); |
@@ -1084,7 +1087,7 @@ ecryptfs_listxattr(struct dentry *dentry, char *list, size_t size) | |||
1084 | 1087 | ||
1085 | lower_dentry = ecryptfs_dentry_to_lower(dentry); | 1088 | lower_dentry = ecryptfs_dentry_to_lower(dentry); |
1086 | if (!lower_dentry->d_inode->i_op->listxattr) { | 1089 | if (!lower_dentry->d_inode->i_op->listxattr) { |
1087 | rc = -ENOSYS; | 1090 | rc = -EOPNOTSUPP; |
1088 | goto out; | 1091 | goto out; |
1089 | } | 1092 | } |
1090 | mutex_lock(&lower_dentry->d_inode->i_mutex); | 1093 | mutex_lock(&lower_dentry->d_inode->i_mutex); |
@@ -1101,7 +1104,7 @@ static int ecryptfs_removexattr(struct dentry *dentry, const char *name) | |||
1101 | 1104 | ||
1102 | lower_dentry = ecryptfs_dentry_to_lower(dentry); | 1105 | lower_dentry = ecryptfs_dentry_to_lower(dentry); |
1103 | if (!lower_dentry->d_inode->i_op->removexattr) { | 1106 | if (!lower_dentry->d_inode->i_op->removexattr) { |
1104 | rc = -ENOSYS; | 1107 | rc = -EOPNOTSUPP; |
1105 | goto out; | 1108 | goto out; |
1106 | } | 1109 | } |
1107 | mutex_lock(&lower_dentry->d_inode->i_mutex); | 1110 | mutex_lock(&lower_dentry->d_inode->i_mutex); |
@@ -1132,6 +1135,7 @@ const struct inode_operations ecryptfs_symlink_iops = { | |||
1132 | .put_link = ecryptfs_put_link, | 1135 | .put_link = ecryptfs_put_link, |
1133 | .permission = ecryptfs_permission, | 1136 | .permission = ecryptfs_permission, |
1134 | .setattr = ecryptfs_setattr, | 1137 | .setattr = ecryptfs_setattr, |
1138 | .getattr = ecryptfs_getattr_link, | ||
1135 | .setxattr = ecryptfs_setxattr, | 1139 | .setxattr = ecryptfs_setxattr, |
1136 | .getxattr = ecryptfs_getxattr, | 1140 | .getxattr = ecryptfs_getxattr, |
1137 | .listxattr = ecryptfs_listxattr, | 1141 | .listxattr = ecryptfs_listxattr, |
diff --git a/fs/ecryptfs/super.c b/fs/ecryptfs/super.c index b15a43a80ab7..1a037f77aa52 100644 --- a/fs/ecryptfs/super.c +++ b/fs/ecryptfs/super.c | |||
@@ -85,7 +85,6 @@ static void ecryptfs_destroy_inode(struct inode *inode) | |||
85 | if (lower_dentry->d_inode) { | 85 | if (lower_dentry->d_inode) { |
86 | fput(inode_info->lower_file); | 86 | fput(inode_info->lower_file); |
87 | inode_info->lower_file = NULL; | 87 | inode_info->lower_file = NULL; |
88 | d_drop(lower_dentry); | ||
89 | } | 88 | } |
90 | } | 89 | } |
91 | ecryptfs_destroy_crypt_stat(&inode_info->crypt_stat); | 90 | ecryptfs_destroy_crypt_stat(&inode_info->crypt_stat); |
diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h index 874d169a193e..602d5ad6f5e7 100644 --- a/fs/ext4/ext4.h +++ b/fs/ext4/ext4.h | |||
@@ -139,8 +139,8 @@ typedef struct ext4_io_end { | |||
139 | struct inode *inode; /* file being written to */ | 139 | struct inode *inode; /* file being written to */ |
140 | unsigned int flag; /* unwritten or not */ | 140 | unsigned int flag; /* unwritten or not */ |
141 | int error; /* I/O error code */ | 141 | int error; /* I/O error code */ |
142 | ext4_lblk_t offset; /* offset in the file */ | 142 | loff_t offset; /* offset in the file */ |
143 | size_t size; /* size of the extent */ | 143 | ssize_t size; /* size of the extent */ |
144 | struct work_struct work; /* data work queue */ | 144 | struct work_struct work; /* data work queue */ |
145 | } ext4_io_end_t; | 145 | } ext4_io_end_t; |
146 | 146 | ||
@@ -1744,7 +1744,7 @@ extern void ext4_ext_release(struct super_block *); | |||
1744 | extern long ext4_fallocate(struct inode *inode, int mode, loff_t offset, | 1744 | extern long ext4_fallocate(struct inode *inode, int mode, loff_t offset, |
1745 | loff_t len); | 1745 | loff_t len); |
1746 | extern int ext4_convert_unwritten_extents(struct inode *inode, loff_t offset, | 1746 | extern int ext4_convert_unwritten_extents(struct inode *inode, loff_t offset, |
1747 | loff_t len); | 1747 | ssize_t len); |
1748 | extern int ext4_get_blocks(handle_t *handle, struct inode *inode, | 1748 | extern int ext4_get_blocks(handle_t *handle, struct inode *inode, |
1749 | sector_t block, unsigned int max_blocks, | 1749 | sector_t block, unsigned int max_blocks, |
1750 | struct buffer_head *bh, int flags); | 1750 | struct buffer_head *bh, int flags); |
diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c index 765a4826b118..c56877972b0e 100644 --- a/fs/ext4/extents.c +++ b/fs/ext4/extents.c | |||
@@ -3603,7 +3603,7 @@ retry: | |||
3603 | * Returns 0 on success. | 3603 | * Returns 0 on success. |
3604 | */ | 3604 | */ |
3605 | int ext4_convert_unwritten_extents(struct inode *inode, loff_t offset, | 3605 | int ext4_convert_unwritten_extents(struct inode *inode, loff_t offset, |
3606 | loff_t len) | 3606 | ssize_t len) |
3607 | { | 3607 | { |
3608 | handle_t *handle; | 3608 | handle_t *handle; |
3609 | ext4_lblk_t block; | 3609 | ext4_lblk_t block; |
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c index 89dbb3874943..c16c3b92d8da 100644 --- a/fs/ext4/inode.c +++ b/fs/ext4/inode.c | |||
@@ -3551,7 +3551,7 @@ static int ext4_end_aio_dio_nolock(ext4_io_end_t *io) | |||
3551 | { | 3551 | { |
3552 | struct inode *inode = io->inode; | 3552 | struct inode *inode = io->inode; |
3553 | loff_t offset = io->offset; | 3553 | loff_t offset = io->offset; |
3554 | size_t size = io->size; | 3554 | ssize_t size = io->size; |
3555 | int ret = 0; | 3555 | int ret = 0; |
3556 | 3556 | ||
3557 | ext4_debug("end_aio_dio_onlock: io 0x%p from inode %lu,list->next 0x%p," | 3557 | ext4_debug("end_aio_dio_onlock: io 0x%p from inode %lu,list->next 0x%p," |
diff --git a/fs/fat/namei_vfat.c b/fs/fat/namei_vfat.c index f565f24019b5..72646e2c0f48 100644 --- a/fs/fat/namei_vfat.c +++ b/fs/fat/namei_vfat.c | |||
@@ -309,7 +309,7 @@ static int vfat_create_shortname(struct inode *dir, struct nls_table *nls, | |||
309 | { | 309 | { |
310 | struct fat_mount_options *opts = &MSDOS_SB(dir->i_sb)->options; | 310 | struct fat_mount_options *opts = &MSDOS_SB(dir->i_sb)->options; |
311 | wchar_t *ip, *ext_start, *end, *name_start; | 311 | wchar_t *ip, *ext_start, *end, *name_start; |
312 | unsigned char base[9], ext[4], buf[8], *p; | 312 | unsigned char base[9], ext[4], buf[5], *p; |
313 | unsigned char charbuf[NLS_MAX_CHARSET_SIZE]; | 313 | unsigned char charbuf[NLS_MAX_CHARSET_SIZE]; |
314 | int chl, chi; | 314 | int chl, chi; |
315 | int sz = 0, extlen, baselen, i, numtail_baselen, numtail2_baselen; | 315 | int sz = 0, extlen, baselen, i, numtail_baselen, numtail2_baselen; |
@@ -467,7 +467,7 @@ static int vfat_create_shortname(struct inode *dir, struct nls_table *nls, | |||
467 | return 0; | 467 | return 0; |
468 | } | 468 | } |
469 | 469 | ||
470 | i = jiffies & 0xffff; | 470 | i = jiffies; |
471 | sz = (jiffies >> 16) & 0x7; | 471 | sz = (jiffies >> 16) & 0x7; |
472 | if (baselen > 2) { | 472 | if (baselen > 2) { |
473 | baselen = numtail2_baselen; | 473 | baselen = numtail2_baselen; |
@@ -476,7 +476,7 @@ static int vfat_create_shortname(struct inode *dir, struct nls_table *nls, | |||
476 | name_res[baselen + 4] = '~'; | 476 | name_res[baselen + 4] = '~'; |
477 | name_res[baselen + 5] = '1' + sz; | 477 | name_res[baselen + 5] = '1' + sz; |
478 | while (1) { | 478 | while (1) { |
479 | sprintf(buf, "%04X", i); | 479 | snprintf(buf, sizeof(buf), "%04X", i & 0xffff); |
480 | memcpy(&name_res[baselen], buf, 4); | 480 | memcpy(&name_res[baselen], buf, 4); |
481 | if (vfat_find_form(dir, name_res) < 0) | 481 | if (vfat_find_form(dir, name_res) < 0) |
482 | break; | 482 | break; |
diff --git a/fs/nfs/client.c b/fs/nfs/client.c index ee77713ce68b..bd39abc51508 100644 --- a/fs/nfs/client.c +++ b/fs/nfs/client.c | |||
@@ -1293,7 +1293,8 @@ static int nfs4_init_server(struct nfs_server *server, | |||
1293 | 1293 | ||
1294 | /* Initialise the client representation from the mount data */ | 1294 | /* Initialise the client representation from the mount data */ |
1295 | server->flags = data->flags; | 1295 | server->flags = data->flags; |
1296 | server->caps |= NFS_CAP_ATOMIC_OPEN|NFS_CAP_CHANGE_ATTR; | 1296 | server->caps |= NFS_CAP_ATOMIC_OPEN|NFS_CAP_CHANGE_ATTR| |
1297 | NFS_CAP_POSIX_LOCK; | ||
1297 | server->options = data->options; | 1298 | server->options = data->options; |
1298 | 1299 | ||
1299 | /* Get a client record */ | 1300 | /* Get a client record */ |
diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c index 8b5382efd454..af6948d6faf2 100644 --- a/fs/nfs/dir.c +++ b/fs/nfs/dir.c | |||
@@ -1025,12 +1025,12 @@ static struct dentry *nfs_atomic_lookup(struct inode *dir, struct dentry *dentry | |||
1025 | res = NULL; | 1025 | res = NULL; |
1026 | goto out; | 1026 | goto out; |
1027 | /* This turned out not to be a regular file */ | 1027 | /* This turned out not to be a regular file */ |
1028 | case -EISDIR: | ||
1028 | case -ENOTDIR: | 1029 | case -ENOTDIR: |
1029 | goto no_open; | 1030 | goto no_open; |
1030 | case -ELOOP: | 1031 | case -ELOOP: |
1031 | if (!(nd->intent.open.flags & O_NOFOLLOW)) | 1032 | if (!(nd->intent.open.flags & O_NOFOLLOW)) |
1032 | goto no_open; | 1033 | goto no_open; |
1033 | /* case -EISDIR: */ | ||
1034 | /* case -EINVAL: */ | 1034 | /* case -EINVAL: */ |
1035 | default: | 1035 | default: |
1036 | goto out; | 1036 | goto out; |
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index 375f0fae2c6a..ecf660204755 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c | |||
@@ -1520,6 +1520,8 @@ static int _nfs4_proc_open(struct nfs4_opendata *data) | |||
1520 | nfs_post_op_update_inode(dir, o_res->dir_attr); | 1520 | nfs_post_op_update_inode(dir, o_res->dir_attr); |
1521 | } else | 1521 | } else |
1522 | nfs_refresh_inode(dir, o_res->dir_attr); | 1522 | nfs_refresh_inode(dir, o_res->dir_attr); |
1523 | if ((o_res->rflags & NFS4_OPEN_RESULT_LOCKTYPE_POSIX) == 0) | ||
1524 | server->caps &= ~NFS_CAP_POSIX_LOCK; | ||
1523 | if(o_res->rflags & NFS4_OPEN_RESULT_CONFIRM) { | 1525 | if(o_res->rflags & NFS4_OPEN_RESULT_CONFIRM) { |
1524 | status = _nfs4_proc_open_confirm(data); | 1526 | status = _nfs4_proc_open_confirm(data); |
1525 | if (status != 0) | 1527 | if (status != 0) |
@@ -1660,7 +1662,7 @@ static int _nfs4_do_open(struct inode *dir, struct path *path, fmode_t fmode, in | |||
1660 | status = PTR_ERR(state); | 1662 | status = PTR_ERR(state); |
1661 | if (IS_ERR(state)) | 1663 | if (IS_ERR(state)) |
1662 | goto err_opendata_put; | 1664 | goto err_opendata_put; |
1663 | if ((opendata->o_res.rflags & NFS4_OPEN_RESULT_LOCKTYPE_POSIX) != 0) | 1665 | if (server->caps & NFS_CAP_POSIX_LOCK) |
1664 | set_bit(NFS_STATE_POSIX_LOCKS, &state->flags); | 1666 | set_bit(NFS_STATE_POSIX_LOCKS, &state->flags); |
1665 | nfs4_opendata_put(opendata); | 1667 | nfs4_opendata_put(opendata); |
1666 | nfs4_put_state_owner(sp); | 1668 | nfs4_put_state_owner(sp); |
diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c index a8587e90fd5a..bbf72d8f9fc0 100644 --- a/fs/nfsd/nfs4xdr.c +++ b/fs/nfsd/nfs4xdr.c | |||
@@ -2121,9 +2121,15 @@ out_acl: | |||
2121 | * and this is the root of a cross-mounted filesystem. | 2121 | * and this is the root of a cross-mounted filesystem. |
2122 | */ | 2122 | */ |
2123 | if (ignore_crossmnt == 0 && | 2123 | if (ignore_crossmnt == 0 && |
2124 | exp->ex_path.mnt->mnt_root->d_inode == dentry->d_inode) { | 2124 | dentry == exp->ex_path.mnt->mnt_root) { |
2125 | err = vfs_getattr(exp->ex_path.mnt->mnt_parent, | 2125 | struct path path = exp->ex_path; |
2126 | exp->ex_path.mnt->mnt_mountpoint, &stat); | 2126 | path_get(&path); |
2127 | while (follow_up(&path)) { | ||
2128 | if (path.dentry != path.mnt->mnt_root) | ||
2129 | break; | ||
2130 | } | ||
2131 | err = vfs_getattr(path.mnt, path.dentry, &stat); | ||
2132 | path_put(&path); | ||
2127 | if (err) | 2133 | if (err) |
2128 | goto out_nfserr; | 2134 | goto out_nfserr; |
2129 | } | 2135 | } |
diff --git a/fs/ocfs2/acl.c b/fs/ocfs2/acl.c index 0501974bedd0..8ccf0f8c9cc8 100644 --- a/fs/ocfs2/acl.c +++ b/fs/ocfs2/acl.c | |||
@@ -30,6 +30,8 @@ | |||
30 | #include "alloc.h" | 30 | #include "alloc.h" |
31 | #include "dlmglue.h" | 31 | #include "dlmglue.h" |
32 | #include "file.h" | 32 | #include "file.h" |
33 | #include "inode.h" | ||
34 | #include "journal.h" | ||
33 | #include "ocfs2_fs.h" | 35 | #include "ocfs2_fs.h" |
34 | 36 | ||
35 | #include "xattr.h" | 37 | #include "xattr.h" |
@@ -166,6 +168,60 @@ static struct posix_acl *ocfs2_get_acl(struct inode *inode, int type) | |||
166 | } | 168 | } |
167 | 169 | ||
168 | /* | 170 | /* |
171 | * Helper function to set i_mode in memory and disk. Some call paths | ||
172 | * will not have di_bh or a journal handle to pass, in which case it | ||
173 | * will create it's own. | ||
174 | */ | ||
175 | static int ocfs2_acl_set_mode(struct inode *inode, struct buffer_head *di_bh, | ||
176 | handle_t *handle, umode_t new_mode) | ||
177 | { | ||
178 | int ret, commit_handle = 0; | ||
179 | struct ocfs2_dinode *di; | ||
180 | |||
181 | if (di_bh == NULL) { | ||
182 | ret = ocfs2_read_inode_block(inode, &di_bh); | ||
183 | if (ret) { | ||
184 | mlog_errno(ret); | ||
185 | goto out; | ||
186 | } | ||
187 | } else | ||
188 | get_bh(di_bh); | ||
189 | |||
190 | if (handle == NULL) { | ||
191 | handle = ocfs2_start_trans(OCFS2_SB(inode->i_sb), | ||
192 | OCFS2_INODE_UPDATE_CREDITS); | ||
193 | if (IS_ERR(handle)) { | ||
194 | ret = PTR_ERR(handle); | ||
195 | mlog_errno(ret); | ||
196 | goto out_brelse; | ||
197 | } | ||
198 | |||
199 | commit_handle = 1; | ||
200 | } | ||
201 | |||
202 | di = (struct ocfs2_dinode *)di_bh->b_data; | ||
203 | ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), di_bh, | ||
204 | OCFS2_JOURNAL_ACCESS_WRITE); | ||
205 | if (ret) { | ||
206 | mlog_errno(ret); | ||
207 | goto out_commit; | ||
208 | } | ||
209 | |||
210 | inode->i_mode = new_mode; | ||
211 | di->i_mode = cpu_to_le16(inode->i_mode); | ||
212 | |||
213 | ocfs2_journal_dirty(handle, di_bh); | ||
214 | |||
215 | out_commit: | ||
216 | if (commit_handle) | ||
217 | ocfs2_commit_trans(OCFS2_SB(inode->i_sb), handle); | ||
218 | out_brelse: | ||
219 | brelse(di_bh); | ||
220 | out: | ||
221 | return ret; | ||
222 | } | ||
223 | |||
224 | /* | ||
169 | * Set the access or default ACL of an inode. | 225 | * Set the access or default ACL of an inode. |
170 | */ | 226 | */ |
171 | static int ocfs2_set_acl(handle_t *handle, | 227 | static int ocfs2_set_acl(handle_t *handle, |
@@ -193,9 +249,14 @@ static int ocfs2_set_acl(handle_t *handle, | |||
193 | if (ret < 0) | 249 | if (ret < 0) |
194 | return ret; | 250 | return ret; |
195 | else { | 251 | else { |
196 | inode->i_mode = mode; | ||
197 | if (ret == 0) | 252 | if (ret == 0) |
198 | acl = NULL; | 253 | acl = NULL; |
254 | |||
255 | ret = ocfs2_acl_set_mode(inode, di_bh, | ||
256 | handle, mode); | ||
257 | if (ret) | ||
258 | return ret; | ||
259 | |||
199 | } | 260 | } |
200 | } | 261 | } |
201 | break; | 262 | break; |
@@ -283,6 +344,7 @@ int ocfs2_init_acl(handle_t *handle, | |||
283 | struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); | 344 | struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); |
284 | struct posix_acl *acl = NULL; | 345 | struct posix_acl *acl = NULL; |
285 | int ret = 0; | 346 | int ret = 0; |
347 | mode_t mode; | ||
286 | 348 | ||
287 | if (!S_ISLNK(inode->i_mode)) { | 349 | if (!S_ISLNK(inode->i_mode)) { |
288 | if (osb->s_mount_opt & OCFS2_MOUNT_POSIX_ACL) { | 350 | if (osb->s_mount_opt & OCFS2_MOUNT_POSIX_ACL) { |
@@ -291,12 +353,17 @@ int ocfs2_init_acl(handle_t *handle, | |||
291 | if (IS_ERR(acl)) | 353 | if (IS_ERR(acl)) |
292 | return PTR_ERR(acl); | 354 | return PTR_ERR(acl); |
293 | } | 355 | } |
294 | if (!acl) | 356 | if (!acl) { |
295 | inode->i_mode &= ~current_umask(); | 357 | mode = inode->i_mode & ~current_umask(); |
358 | ret = ocfs2_acl_set_mode(inode, di_bh, handle, mode); | ||
359 | if (ret) { | ||
360 | mlog_errno(ret); | ||
361 | goto cleanup; | ||
362 | } | ||
363 | } | ||
296 | } | 364 | } |
297 | if ((osb->s_mount_opt & OCFS2_MOUNT_POSIX_ACL) && acl) { | 365 | if ((osb->s_mount_opt & OCFS2_MOUNT_POSIX_ACL) && acl) { |
298 | struct posix_acl *clone; | 366 | struct posix_acl *clone; |
299 | mode_t mode; | ||
300 | 367 | ||
301 | if (S_ISDIR(inode->i_mode)) { | 368 | if (S_ISDIR(inode->i_mode)) { |
302 | ret = ocfs2_set_acl(handle, inode, di_bh, | 369 | ret = ocfs2_set_acl(handle, inode, di_bh, |
@@ -313,7 +380,7 @@ int ocfs2_init_acl(handle_t *handle, | |||
313 | mode = inode->i_mode; | 380 | mode = inode->i_mode; |
314 | ret = posix_acl_create_masq(clone, &mode); | 381 | ret = posix_acl_create_masq(clone, &mode); |
315 | if (ret >= 0) { | 382 | if (ret >= 0) { |
316 | inode->i_mode = mode; | 383 | ret = ocfs2_acl_set_mode(inode, di_bh, handle, mode); |
317 | if (ret > 0) { | 384 | if (ret > 0) { |
318 | ret = ocfs2_set_acl(handle, inode, | 385 | ret = ocfs2_set_acl(handle, inode, |
319 | di_bh, ACL_TYPE_ACCESS, | 386 | di_bh, ACL_TYPE_ACCESS, |
diff --git a/fs/ocfs2/suballoc.c b/fs/ocfs2/suballoc.c index c30b644d9572..79b5dacf9312 100644 --- a/fs/ocfs2/suballoc.c +++ b/fs/ocfs2/suballoc.c | |||
@@ -152,7 +152,7 @@ static u32 ocfs2_bits_per_group(struct ocfs2_chain_list *cl) | |||
152 | 152 | ||
153 | #define do_error(fmt, ...) \ | 153 | #define do_error(fmt, ...) \ |
154 | do{ \ | 154 | do{ \ |
155 | if (clean_error) \ | 155 | if (resize) \ |
156 | mlog(ML_ERROR, fmt "\n", ##__VA_ARGS__); \ | 156 | mlog(ML_ERROR, fmt "\n", ##__VA_ARGS__); \ |
157 | else \ | 157 | else \ |
158 | ocfs2_error(sb, fmt, ##__VA_ARGS__); \ | 158 | ocfs2_error(sb, fmt, ##__VA_ARGS__); \ |
@@ -160,7 +160,7 @@ static u32 ocfs2_bits_per_group(struct ocfs2_chain_list *cl) | |||
160 | 160 | ||
161 | static int ocfs2_validate_gd_self(struct super_block *sb, | 161 | static int ocfs2_validate_gd_self(struct super_block *sb, |
162 | struct buffer_head *bh, | 162 | struct buffer_head *bh, |
163 | int clean_error) | 163 | int resize) |
164 | { | 164 | { |
165 | struct ocfs2_group_desc *gd = (struct ocfs2_group_desc *)bh->b_data; | 165 | struct ocfs2_group_desc *gd = (struct ocfs2_group_desc *)bh->b_data; |
166 | 166 | ||
@@ -211,7 +211,7 @@ static int ocfs2_validate_gd_self(struct super_block *sb, | |||
211 | static int ocfs2_validate_gd_parent(struct super_block *sb, | 211 | static int ocfs2_validate_gd_parent(struct super_block *sb, |
212 | struct ocfs2_dinode *di, | 212 | struct ocfs2_dinode *di, |
213 | struct buffer_head *bh, | 213 | struct buffer_head *bh, |
214 | int clean_error) | 214 | int resize) |
215 | { | 215 | { |
216 | unsigned int max_bits; | 216 | unsigned int max_bits; |
217 | struct ocfs2_group_desc *gd = (struct ocfs2_group_desc *)bh->b_data; | 217 | struct ocfs2_group_desc *gd = (struct ocfs2_group_desc *)bh->b_data; |
@@ -233,8 +233,11 @@ static int ocfs2_validate_gd_parent(struct super_block *sb, | |||
233 | return -EINVAL; | 233 | return -EINVAL; |
234 | } | 234 | } |
235 | 235 | ||
236 | if (le16_to_cpu(gd->bg_chain) >= | 236 | /* In resize, we may meet the case bg_chain == cl_next_free_rec. */ |
237 | le16_to_cpu(di->id2.i_chain.cl_next_free_rec)) { | 237 | if ((le16_to_cpu(gd->bg_chain) > |
238 | le16_to_cpu(di->id2.i_chain.cl_next_free_rec)) || | ||
239 | ((le16_to_cpu(gd->bg_chain) == | ||
240 | le16_to_cpu(di->id2.i_chain.cl_next_free_rec)) && !resize)) { | ||
238 | do_error("Group descriptor #%llu has bad chain %u", | 241 | do_error("Group descriptor #%llu has bad chain %u", |
239 | (unsigned long long)bh->b_blocknr, | 242 | (unsigned long long)bh->b_blocknr, |
240 | le16_to_cpu(gd->bg_chain)); | 243 | le16_to_cpu(gd->bg_chain)); |
diff --git a/fs/proc/base.c b/fs/proc/base.c index 58324c299165..3cd449d23352 100644 --- a/fs/proc/base.c +++ b/fs/proc/base.c | |||
@@ -442,12 +442,13 @@ static const struct file_operations proc_lstats_operations = { | |||
442 | unsigned long badness(struct task_struct *p, unsigned long uptime); | 442 | unsigned long badness(struct task_struct *p, unsigned long uptime); |
443 | static int proc_oom_score(struct task_struct *task, char *buffer) | 443 | static int proc_oom_score(struct task_struct *task, char *buffer) |
444 | { | 444 | { |
445 | unsigned long points; | 445 | unsigned long points = 0; |
446 | struct timespec uptime; | 446 | struct timespec uptime; |
447 | 447 | ||
448 | do_posix_clock_monotonic_gettime(&uptime); | 448 | do_posix_clock_monotonic_gettime(&uptime); |
449 | read_lock(&tasklist_lock); | 449 | read_lock(&tasklist_lock); |
450 | points = badness(task->group_leader, uptime.tv_sec); | 450 | if (pid_alive(task)) |
451 | points = badness(task, uptime.tv_sec); | ||
451 | read_unlock(&tasklist_lock); | 452 | read_unlock(&tasklist_lock); |
452 | return sprintf(buffer, "%lu\n", points); | 453 | return sprintf(buffer, "%lu\n", points); |
453 | } | 454 | } |
diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c index 6e722c11ce13..6c9da00ddda2 100644 --- a/fs/quota/dquot.c +++ b/fs/quota/dquot.c | |||
@@ -2321,34 +2321,34 @@ static int do_set_dqblk(struct dquot *dquot, struct if_dqblk *di) | |||
2321 | if (di->dqb_valid & QIF_SPACE) { | 2321 | if (di->dqb_valid & QIF_SPACE) { |
2322 | dm->dqb_curspace = di->dqb_curspace - dm->dqb_rsvspace; | 2322 | dm->dqb_curspace = di->dqb_curspace - dm->dqb_rsvspace; |
2323 | check_blim = 1; | 2323 | check_blim = 1; |
2324 | __set_bit(DQ_LASTSET_B + QIF_SPACE_B, &dquot->dq_flags); | 2324 | set_bit(DQ_LASTSET_B + QIF_SPACE_B, &dquot->dq_flags); |
2325 | } | 2325 | } |
2326 | if (di->dqb_valid & QIF_BLIMITS) { | 2326 | if (di->dqb_valid & QIF_BLIMITS) { |
2327 | dm->dqb_bsoftlimit = qbtos(di->dqb_bsoftlimit); | 2327 | dm->dqb_bsoftlimit = qbtos(di->dqb_bsoftlimit); |
2328 | dm->dqb_bhardlimit = qbtos(di->dqb_bhardlimit); | 2328 | dm->dqb_bhardlimit = qbtos(di->dqb_bhardlimit); |
2329 | check_blim = 1; | 2329 | check_blim = 1; |
2330 | __set_bit(DQ_LASTSET_B + QIF_BLIMITS_B, &dquot->dq_flags); | 2330 | set_bit(DQ_LASTSET_B + QIF_BLIMITS_B, &dquot->dq_flags); |
2331 | } | 2331 | } |
2332 | if (di->dqb_valid & QIF_INODES) { | 2332 | if (di->dqb_valid & QIF_INODES) { |
2333 | dm->dqb_curinodes = di->dqb_curinodes; | 2333 | dm->dqb_curinodes = di->dqb_curinodes; |
2334 | check_ilim = 1; | 2334 | check_ilim = 1; |
2335 | __set_bit(DQ_LASTSET_B + QIF_INODES_B, &dquot->dq_flags); | 2335 | set_bit(DQ_LASTSET_B + QIF_INODES_B, &dquot->dq_flags); |
2336 | } | 2336 | } |
2337 | if (di->dqb_valid & QIF_ILIMITS) { | 2337 | if (di->dqb_valid & QIF_ILIMITS) { |
2338 | dm->dqb_isoftlimit = di->dqb_isoftlimit; | 2338 | dm->dqb_isoftlimit = di->dqb_isoftlimit; |
2339 | dm->dqb_ihardlimit = di->dqb_ihardlimit; | 2339 | dm->dqb_ihardlimit = di->dqb_ihardlimit; |
2340 | check_ilim = 1; | 2340 | check_ilim = 1; |
2341 | __set_bit(DQ_LASTSET_B + QIF_ILIMITS_B, &dquot->dq_flags); | 2341 | set_bit(DQ_LASTSET_B + QIF_ILIMITS_B, &dquot->dq_flags); |
2342 | } | 2342 | } |
2343 | if (di->dqb_valid & QIF_BTIME) { | 2343 | if (di->dqb_valid & QIF_BTIME) { |
2344 | dm->dqb_btime = di->dqb_btime; | 2344 | dm->dqb_btime = di->dqb_btime; |
2345 | check_blim = 1; | 2345 | check_blim = 1; |
2346 | __set_bit(DQ_LASTSET_B + QIF_BTIME_B, &dquot->dq_flags); | 2346 | set_bit(DQ_LASTSET_B + QIF_BTIME_B, &dquot->dq_flags); |
2347 | } | 2347 | } |
2348 | if (di->dqb_valid & QIF_ITIME) { | 2348 | if (di->dqb_valid & QIF_ITIME) { |
2349 | dm->dqb_itime = di->dqb_itime; | 2349 | dm->dqb_itime = di->dqb_itime; |
2350 | check_ilim = 1; | 2350 | check_ilim = 1; |
2351 | __set_bit(DQ_LASTSET_B + QIF_ITIME_B, &dquot->dq_flags); | 2351 | set_bit(DQ_LASTSET_B + QIF_ITIME_B, &dquot->dq_flags); |
2352 | } | 2352 | } |
2353 | 2353 | ||
2354 | if (check_blim) { | 2354 | if (check_blim) { |
diff --git a/fs/reiserfs/super.c b/fs/reiserfs/super.c index b4a7dd03bdb9..33bc410c6689 100644 --- a/fs/reiserfs/super.c +++ b/fs/reiserfs/super.c | |||
@@ -1619,10 +1619,8 @@ static int reiserfs_fill_super(struct super_block *s, void *data, int silent) | |||
1619 | save_mount_options(s, data); | 1619 | save_mount_options(s, data); |
1620 | 1620 | ||
1621 | sbi = kzalloc(sizeof(struct reiserfs_sb_info), GFP_KERNEL); | 1621 | sbi = kzalloc(sizeof(struct reiserfs_sb_info), GFP_KERNEL); |
1622 | if (!sbi) { | 1622 | if (!sbi) |
1623 | errval = -ENOMEM; | 1623 | return -ENOMEM; |
1624 | goto error_alloc; | ||
1625 | } | ||
1626 | s->s_fs_info = sbi; | 1624 | s->s_fs_info = sbi; |
1627 | /* Set default values for options: non-aggressive tails, RO on errors */ | 1625 | /* Set default values for options: non-aggressive tails, RO on errors */ |
1628 | REISERFS_SB(s)->s_mount_opt |= (1 << REISERFS_SMALLTAIL); | 1626 | REISERFS_SB(s)->s_mount_opt |= (1 << REISERFS_SMALLTAIL); |
@@ -1879,12 +1877,12 @@ static int reiserfs_fill_super(struct super_block *s, void *data, int silent) | |||
1879 | return (0); | 1877 | return (0); |
1880 | 1878 | ||
1881 | error: | 1879 | error: |
1882 | reiserfs_write_unlock(s); | ||
1883 | error_alloc: | ||
1884 | if (jinit_done) { /* kill the commit thread, free journal ram */ | 1880 | if (jinit_done) { /* kill the commit thread, free journal ram */ |
1885 | journal_release_error(NULL, s); | 1881 | journal_release_error(NULL, s); |
1886 | } | 1882 | } |
1887 | 1883 | ||
1884 | reiserfs_write_unlock(s); | ||
1885 | |||
1888 | reiserfs_free_bitmap_cache(s); | 1886 | reiserfs_free_bitmap_cache(s); |
1889 | if (SB_BUFFER_WITH_SB(s)) | 1887 | if (SB_BUFFER_WITH_SB(s)) |
1890 | brelse(SB_BUFFER_WITH_SB(s)); | 1888 | brelse(SB_BUFFER_WITH_SB(s)); |
diff --git a/fs/xfs/linux-2.6/xfs_aops.c b/fs/xfs/linux-2.6/xfs_aops.c index 66abe36c1213..1c65a2b3f4ee 100644 --- a/fs/xfs/linux-2.6/xfs_aops.c +++ b/fs/xfs/linux-2.6/xfs_aops.c | |||
@@ -163,14 +163,17 @@ xfs_ioend_new_eof( | |||
163 | } | 163 | } |
164 | 164 | ||
165 | /* | 165 | /* |
166 | * Update on-disk file size now that data has been written to disk. | 166 | * Update on-disk file size now that data has been written to disk. The |
167 | * The current in-memory file size is i_size. If a write is beyond | 167 | * current in-memory file size is i_size. If a write is beyond eof i_new_size |
168 | * eof i_new_size will be the intended file size until i_size is | 168 | * will be the intended file size until i_size is updated. If this write does |
169 | * updated. If this write does not extend all the way to the valid | 169 | * not extend all the way to the valid file size then restrict this update to |
170 | * file size then restrict this update to the end of the write. | 170 | * the end of the write. |
171 | * | ||
172 | * This function does not block as blocking on the inode lock in IO completion | ||
173 | * can lead to IO completion order dependency deadlocks.. If it can't get the | ||
174 | * inode ilock it will return EAGAIN. Callers must handle this. | ||
171 | */ | 175 | */ |
172 | 176 | STATIC int | |
173 | STATIC void | ||
174 | xfs_setfilesize( | 177 | xfs_setfilesize( |
175 | xfs_ioend_t *ioend) | 178 | xfs_ioend_t *ioend) |
176 | { | 179 | { |
@@ -181,9 +184,11 @@ xfs_setfilesize( | |||
181 | ASSERT(ioend->io_type != IOMAP_READ); | 184 | ASSERT(ioend->io_type != IOMAP_READ); |
182 | 185 | ||
183 | if (unlikely(ioend->io_error)) | 186 | if (unlikely(ioend->io_error)) |
184 | return; | 187 | return 0; |
188 | |||
189 | if (!xfs_ilock_nowait(ip, XFS_ILOCK_EXCL)) | ||
190 | return EAGAIN; | ||
185 | 191 | ||
186 | xfs_ilock(ip, XFS_ILOCK_EXCL); | ||
187 | isize = xfs_ioend_new_eof(ioend); | 192 | isize = xfs_ioend_new_eof(ioend); |
188 | if (isize) { | 193 | if (isize) { |
189 | ip->i_d.di_size = isize; | 194 | ip->i_d.di_size = isize; |
@@ -191,6 +196,28 @@ xfs_setfilesize( | |||
191 | } | 196 | } |
192 | 197 | ||
193 | xfs_iunlock(ip, XFS_ILOCK_EXCL); | 198 | xfs_iunlock(ip, XFS_ILOCK_EXCL); |
199 | return 0; | ||
200 | } | ||
201 | |||
202 | /* | ||
203 | * Schedule IO completion handling on a xfsdatad if this was | ||
204 | * the final hold on this ioend. If we are asked to wait, | ||
205 | * flush the workqueue. | ||
206 | */ | ||
207 | STATIC void | ||
208 | xfs_finish_ioend( | ||
209 | xfs_ioend_t *ioend, | ||
210 | int wait) | ||
211 | { | ||
212 | if (atomic_dec_and_test(&ioend->io_remaining)) { | ||
213 | struct workqueue_struct *wq; | ||
214 | |||
215 | wq = (ioend->io_type == IOMAP_UNWRITTEN) ? | ||
216 | xfsconvertd_workqueue : xfsdatad_workqueue; | ||
217 | queue_work(wq, &ioend->io_work); | ||
218 | if (wait) | ||
219 | flush_workqueue(wq); | ||
220 | } | ||
194 | } | 221 | } |
195 | 222 | ||
196 | /* | 223 | /* |
@@ -198,11 +225,11 @@ xfs_setfilesize( | |||
198 | */ | 225 | */ |
199 | STATIC void | 226 | STATIC void |
200 | xfs_end_io( | 227 | xfs_end_io( |
201 | struct work_struct *work) | 228 | struct work_struct *work) |
202 | { | 229 | { |
203 | xfs_ioend_t *ioend = | 230 | xfs_ioend_t *ioend = container_of(work, xfs_ioend_t, io_work); |
204 | container_of(work, xfs_ioend_t, io_work); | 231 | struct xfs_inode *ip = XFS_I(ioend->io_inode); |
205 | struct xfs_inode *ip = XFS_I(ioend->io_inode); | 232 | int error; |
206 | 233 | ||
207 | /* | 234 | /* |
208 | * For unwritten extents we need to issue transactions to convert a | 235 | * For unwritten extents we need to issue transactions to convert a |
@@ -210,7 +237,6 @@ xfs_end_io( | |||
210 | */ | 237 | */ |
211 | if (ioend->io_type == IOMAP_UNWRITTEN && | 238 | if (ioend->io_type == IOMAP_UNWRITTEN && |
212 | likely(!ioend->io_error && !XFS_FORCED_SHUTDOWN(ip->i_mount))) { | 239 | likely(!ioend->io_error && !XFS_FORCED_SHUTDOWN(ip->i_mount))) { |
213 | int error; | ||
214 | 240 | ||
215 | error = xfs_iomap_write_unwritten(ip, ioend->io_offset, | 241 | error = xfs_iomap_write_unwritten(ip, ioend->io_offset, |
216 | ioend->io_size); | 242 | ioend->io_size); |
@@ -222,30 +248,23 @@ xfs_end_io( | |||
222 | * We might have to update the on-disk file size after extending | 248 | * We might have to update the on-disk file size after extending |
223 | * writes. | 249 | * writes. |
224 | */ | 250 | */ |
225 | if (ioend->io_type != IOMAP_READ) | 251 | if (ioend->io_type != IOMAP_READ) { |
226 | xfs_setfilesize(ioend); | 252 | error = xfs_setfilesize(ioend); |
227 | xfs_destroy_ioend(ioend); | 253 | ASSERT(!error || error == EAGAIN); |
228 | } | ||
229 | |||
230 | /* | ||
231 | * Schedule IO completion handling on a xfsdatad if this was | ||
232 | * the final hold on this ioend. If we are asked to wait, | ||
233 | * flush the workqueue. | ||
234 | */ | ||
235 | STATIC void | ||
236 | xfs_finish_ioend( | ||
237 | xfs_ioend_t *ioend, | ||
238 | int wait) | ||
239 | { | ||
240 | if (atomic_dec_and_test(&ioend->io_remaining)) { | ||
241 | struct workqueue_struct *wq; | ||
242 | |||
243 | wq = (ioend->io_type == IOMAP_UNWRITTEN) ? | ||
244 | xfsconvertd_workqueue : xfsdatad_workqueue; | ||
245 | queue_work(wq, &ioend->io_work); | ||
246 | if (wait) | ||
247 | flush_workqueue(wq); | ||
248 | } | 254 | } |
255 | |||
256 | /* | ||
257 | * If we didn't complete processing of the ioend, requeue it to the | ||
258 | * tail of the workqueue for another attempt later. Otherwise destroy | ||
259 | * it. | ||
260 | */ | ||
261 | if (error == EAGAIN) { | ||
262 | atomic_inc(&ioend->io_remaining); | ||
263 | xfs_finish_ioend(ioend, 0); | ||
264 | /* ensure we don't spin on blocked ioends */ | ||
265 | delay(1); | ||
266 | } else | ||
267 | xfs_destroy_ioend(ioend); | ||
249 | } | 268 | } |
250 | 269 | ||
251 | /* | 270 | /* |
diff --git a/fs/xfs/linux-2.6/xfs_sync.c b/fs/xfs/linux-2.6/xfs_sync.c index 1f5e4bb5e970..6b6b39416ad3 100644 --- a/fs/xfs/linux-2.6/xfs_sync.c +++ b/fs/xfs/linux-2.6/xfs_sync.c | |||
@@ -613,7 +613,8 @@ xfssyncd( | |||
613 | set_freezable(); | 613 | set_freezable(); |
614 | timeleft = xfs_syncd_centisecs * msecs_to_jiffies(10); | 614 | timeleft = xfs_syncd_centisecs * msecs_to_jiffies(10); |
615 | for (;;) { | 615 | for (;;) { |
616 | timeleft = schedule_timeout_interruptible(timeleft); | 616 | if (list_empty(&mp->m_sync_list)) |
617 | timeleft = schedule_timeout_interruptible(timeleft); | ||
617 | /* swsusp */ | 618 | /* swsusp */ |
618 | try_to_freeze(); | 619 | try_to_freeze(); |
619 | if (kthread_should_stop() && list_empty(&mp->m_sync_list)) | 620 | if (kthread_should_stop() && list_empty(&mp->m_sync_list)) |
@@ -633,8 +634,7 @@ xfssyncd( | |||
633 | list_add_tail(&mp->m_sync_work.w_list, | 634 | list_add_tail(&mp->m_sync_work.w_list, |
634 | &mp->m_sync_list); | 635 | &mp->m_sync_list); |
635 | } | 636 | } |
636 | list_for_each_entry_safe(work, n, &mp->m_sync_list, w_list) | 637 | list_splice_init(&mp->m_sync_list, &tmp); |
637 | list_move(&work->w_list, &tmp); | ||
638 | spin_unlock(&mp->m_sync_lock); | 638 | spin_unlock(&mp->m_sync_lock); |
639 | 639 | ||
640 | list_for_each_entry_safe(work, n, &tmp, w_list) { | 640 | list_for_each_entry_safe(work, n, &tmp, w_list) { |
@@ -693,12 +693,12 @@ xfs_inode_set_reclaim_tag( | |||
693 | xfs_mount_t *mp = ip->i_mount; | 693 | xfs_mount_t *mp = ip->i_mount; |
694 | xfs_perag_t *pag = xfs_get_perag(mp, ip->i_ino); | 694 | xfs_perag_t *pag = xfs_get_perag(mp, ip->i_ino); |
695 | 695 | ||
696 | read_lock(&pag->pag_ici_lock); | 696 | write_lock(&pag->pag_ici_lock); |
697 | spin_lock(&ip->i_flags_lock); | 697 | spin_lock(&ip->i_flags_lock); |
698 | __xfs_inode_set_reclaim_tag(pag, ip); | 698 | __xfs_inode_set_reclaim_tag(pag, ip); |
699 | __xfs_iflags_set(ip, XFS_IRECLAIMABLE); | 699 | __xfs_iflags_set(ip, XFS_IRECLAIMABLE); |
700 | spin_unlock(&ip->i_flags_lock); | 700 | spin_unlock(&ip->i_flags_lock); |
701 | read_unlock(&pag->pag_ici_lock); | 701 | write_unlock(&pag->pag_ici_lock); |
702 | xfs_put_perag(mp, pag); | 702 | xfs_put_perag(mp, pag); |
703 | } | 703 | } |
704 | 704 | ||
diff --git a/fs/xfs/xfs_iget.c b/fs/xfs/xfs_iget.c index 155e798f30a1..fd21160a6238 100644 --- a/fs/xfs/xfs_iget.c +++ b/fs/xfs/xfs_iget.c | |||
@@ -190,13 +190,12 @@ xfs_iget_cache_hit( | |||
190 | trace_xfs_iget_reclaim(ip); | 190 | trace_xfs_iget_reclaim(ip); |
191 | 191 | ||
192 | /* | 192 | /* |
193 | * We need to set XFS_INEW atomically with clearing the | 193 | * We need to set XFS_IRECLAIM to prevent xfs_reclaim_inode |
194 | * reclaimable tag so that we do have an indicator of the | 194 | * from stomping over us while we recycle the inode. We can't |
195 | * inode still being initialized. | 195 | * clear the radix tree reclaimable tag yet as it requires |
196 | * pag_ici_lock to be held exclusive. | ||
196 | */ | 197 | */ |
197 | ip->i_flags |= XFS_INEW; | 198 | ip->i_flags |= XFS_IRECLAIM; |
198 | ip->i_flags &= ~XFS_IRECLAIMABLE; | ||
199 | __xfs_inode_clear_reclaim_tag(mp, pag, ip); | ||
200 | 199 | ||
201 | spin_unlock(&ip->i_flags_lock); | 200 | spin_unlock(&ip->i_flags_lock); |
202 | read_unlock(&pag->pag_ici_lock); | 201 | read_unlock(&pag->pag_ici_lock); |
@@ -216,7 +215,15 @@ xfs_iget_cache_hit( | |||
216 | trace_xfs_iget_reclaim(ip); | 215 | trace_xfs_iget_reclaim(ip); |
217 | goto out_error; | 216 | goto out_error; |
218 | } | 217 | } |
218 | |||
219 | write_lock(&pag->pag_ici_lock); | ||
220 | spin_lock(&ip->i_flags_lock); | ||
221 | ip->i_flags &= ~(XFS_IRECLAIMABLE | XFS_IRECLAIM); | ||
222 | ip->i_flags |= XFS_INEW; | ||
223 | __xfs_inode_clear_reclaim_tag(mp, pag, ip); | ||
219 | inode->i_state = I_NEW; | 224 | inode->i_state = I_NEW; |
225 | spin_unlock(&ip->i_flags_lock); | ||
226 | write_unlock(&pag->pag_ici_lock); | ||
220 | } else { | 227 | } else { |
221 | /* If the VFS inode is being torn down, pause and try again. */ | 228 | /* If the VFS inode is being torn down, pause and try again. */ |
222 | if (!igrab(inode)) { | 229 | if (!igrab(inode)) { |
diff --git a/include/drm/drm_pciids.h b/include/drm/drm_pciids.h index e6f3b120f51a..0cbdccc4003d 100644 --- a/include/drm/drm_pciids.h +++ b/include/drm/drm_pciids.h | |||
@@ -6,6 +6,7 @@ | |||
6 | {0x1002, 0x3150, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY}, \ | 6 | {0x1002, 0x3150, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY}, \ |
7 | {0x1002, 0x3152, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ | 7 | {0x1002, 0x3152, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ |
8 | {0x1002, 0x3154, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ | 8 | {0x1002, 0x3154, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ |
9 | {0x1002, 0x3155, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ | ||
9 | {0x1002, 0x3E50, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_NEW_MEMMAP}, \ | 10 | {0x1002, 0x3E50, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_NEW_MEMMAP}, \ |
10 | {0x1002, 0x3E54, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_NEW_MEMMAP}, \ | 11 | {0x1002, 0x3E54, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_NEW_MEMMAP}, \ |
11 | {0x1002, 0x4136, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS100|RADEON_IS_IGP}, \ | 12 | {0x1002, 0x4136, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS100|RADEON_IS_IGP}, \ |
@@ -375,6 +376,7 @@ | |||
375 | {0x1002, 0x9712, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS880|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ | 376 | {0x1002, 0x9712, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS880|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ |
376 | {0x1002, 0x9713, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS880|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ | 377 | {0x1002, 0x9713, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS880|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ |
377 | {0x1002, 0x9714, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS880|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ | 378 | {0x1002, 0x9714, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS880|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ |
379 | {0x1002, 0x9715, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS880|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ | ||
378 | {0, 0, 0} | 380 | {0, 0, 0} |
379 | 381 | ||
380 | #define r128_PCI_IDS \ | 382 | #define r128_PCI_IDS \ |
diff --git a/include/linux/dm-ioctl.h b/include/linux/dm-ioctl.h index aa95508d2f95..2c445e113790 100644 --- a/include/linux/dm-ioctl.h +++ b/include/linux/dm-ioctl.h | |||
@@ -266,9 +266,9 @@ enum { | |||
266 | #define DM_DEV_SET_GEOMETRY _IOWR(DM_IOCTL, DM_DEV_SET_GEOMETRY_CMD, struct dm_ioctl) | 266 | #define DM_DEV_SET_GEOMETRY _IOWR(DM_IOCTL, DM_DEV_SET_GEOMETRY_CMD, struct dm_ioctl) |
267 | 267 | ||
268 | #define DM_VERSION_MAJOR 4 | 268 | #define DM_VERSION_MAJOR 4 |
269 | #define DM_VERSION_MINOR 16 | 269 | #define DM_VERSION_MINOR 17 |
270 | #define DM_VERSION_PATCHLEVEL 0 | 270 | #define DM_VERSION_PATCHLEVEL 0 |
271 | #define DM_VERSION_EXTRA "-ioctl (2009-11-05)" | 271 | #define DM_VERSION_EXTRA "-ioctl (2010-03-05)" |
272 | 272 | ||
273 | /* Status bits */ | 273 | /* Status bits */ |
274 | #define DM_READONLY_FLAG (1 << 0) /* In/Out */ | 274 | #define DM_READONLY_FLAG (1 << 0) /* In/Out */ |
@@ -316,4 +316,9 @@ enum { | |||
316 | */ | 316 | */ |
317 | #define DM_QUERY_INACTIVE_TABLE_FLAG (1 << 12) /* In */ | 317 | #define DM_QUERY_INACTIVE_TABLE_FLAG (1 << 12) /* In */ |
318 | 318 | ||
319 | /* | ||
320 | * If set, a uevent was generated for which the caller may need to wait. | ||
321 | */ | ||
322 | #define DM_UEVENT_GENERATED_FLAG (1 << 13) /* Out */ | ||
323 | |||
319 | #endif /* _LINUX_DM_IOCTL_H */ | 324 | #endif /* _LINUX_DM_IOCTL_H */ |
diff --git a/include/linux/freezer.h b/include/linux/freezer.h index 5a361f85cfec..da7e52b099f3 100644 --- a/include/linux/freezer.h +++ b/include/linux/freezer.h | |||
@@ -64,9 +64,12 @@ extern bool freeze_task(struct task_struct *p, bool sig_only); | |||
64 | extern void cancel_freezing(struct task_struct *p); | 64 | extern void cancel_freezing(struct task_struct *p); |
65 | 65 | ||
66 | #ifdef CONFIG_CGROUP_FREEZER | 66 | #ifdef CONFIG_CGROUP_FREEZER |
67 | extern int cgroup_frozen(struct task_struct *task); | 67 | extern int cgroup_freezing_or_frozen(struct task_struct *task); |
68 | #else /* !CONFIG_CGROUP_FREEZER */ | 68 | #else /* !CONFIG_CGROUP_FREEZER */ |
69 | static inline int cgroup_frozen(struct task_struct *task) { return 0; } | 69 | static inline int cgroup_freezing_or_frozen(struct task_struct *task) |
70 | { | ||
71 | return 0; | ||
72 | } | ||
70 | #endif /* !CONFIG_CGROUP_FREEZER */ | 73 | #endif /* !CONFIG_CGROUP_FREEZER */ |
71 | 74 | ||
72 | /* | 75 | /* |
diff --git a/include/linux/fs.h b/include/linux/fs.h index 917508a27ac7..5191f49c2fec 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h | |||
@@ -2214,6 +2214,7 @@ extern int generic_segment_checks(const struct iovec *iov, | |||
2214 | /* fs/block_dev.c */ | 2214 | /* fs/block_dev.c */ |
2215 | extern ssize_t blkdev_aio_write(struct kiocb *iocb, const struct iovec *iov, | 2215 | extern ssize_t blkdev_aio_write(struct kiocb *iocb, const struct iovec *iov, |
2216 | unsigned long nr_segs, loff_t pos); | 2216 | unsigned long nr_segs, loff_t pos); |
2217 | extern int block_fsync(struct file *filp, struct dentry *dentry, int datasync); | ||
2217 | 2218 | ||
2218 | /* fs/splice.c */ | 2219 | /* fs/splice.c */ |
2219 | extern ssize_t generic_file_splice_read(struct file *, loff_t *, | 2220 | extern ssize_t generic_file_splice_read(struct file *, loff_t *, |
diff --git a/include/linux/kfifo.h b/include/linux/kfifo.h index ece0b1c33816..e117b1aee69c 100644 --- a/include/linux/kfifo.h +++ b/include/linux/kfifo.h | |||
@@ -86,7 +86,8 @@ union { \ | |||
86 | */ | 86 | */ |
87 | #define INIT_KFIFO(name) \ | 87 | #define INIT_KFIFO(name) \ |
88 | name = __kfifo_initializer(sizeof(name##kfifo_buffer) - \ | 88 | name = __kfifo_initializer(sizeof(name##kfifo_buffer) - \ |
89 | sizeof(struct kfifo), name##kfifo_buffer) | 89 | sizeof(struct kfifo), \ |
90 | name##kfifo_buffer + sizeof(struct kfifo)) | ||
90 | 91 | ||
91 | /** | 92 | /** |
92 | * DEFINE_KFIFO - macro to define and initialize a kfifo | 93 | * DEFINE_KFIFO - macro to define and initialize a kfifo |
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index bd5a616d9373..1fe293e98a08 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h | |||
@@ -53,7 +53,7 @@ extern struct kmem_cache *kvm_vcpu_cache; | |||
53 | */ | 53 | */ |
54 | struct kvm_io_bus { | 54 | struct kvm_io_bus { |
55 | int dev_count; | 55 | int dev_count; |
56 | #define NR_IOBUS_DEVS 6 | 56 | #define NR_IOBUS_DEVS 200 |
57 | struct kvm_io_device *devs[NR_IOBUS_DEVS]; | 57 | struct kvm_io_device *devs[NR_IOBUS_DEVS]; |
58 | }; | 58 | }; |
59 | 59 | ||
@@ -116,6 +116,11 @@ struct kvm_memory_slot { | |||
116 | int user_alloc; | 116 | int user_alloc; |
117 | }; | 117 | }; |
118 | 118 | ||
119 | static inline unsigned long kvm_dirty_bitmap_bytes(struct kvm_memory_slot *memslot) | ||
120 | { | ||
121 | return ALIGN(memslot->npages, BITS_PER_LONG) / 8; | ||
122 | } | ||
123 | |||
119 | struct kvm_kernel_irq_routing_entry { | 124 | struct kvm_kernel_irq_routing_entry { |
120 | u32 gsi; | 125 | u32 gsi; |
121 | u32 type; | 126 | u32 type; |
diff --git a/include/linux/module.h b/include/linux/module.h index 6cb1a3cab5d3..bd465d47216a 100644 --- a/include/linux/module.h +++ b/include/linux/module.h | |||
@@ -457,7 +457,7 @@ void symbol_put_addr(void *addr); | |||
457 | static inline local_t *__module_ref_addr(struct module *mod, int cpu) | 457 | static inline local_t *__module_ref_addr(struct module *mod, int cpu) |
458 | { | 458 | { |
459 | #ifdef CONFIG_SMP | 459 | #ifdef CONFIG_SMP |
460 | return (local_t *) (mod->refptr + per_cpu_offset(cpu)); | 460 | return (local_t *) per_cpu_ptr(mod->refptr, cpu); |
461 | #else | 461 | #else |
462 | return &mod->ref; | 462 | return &mod->ref; |
463 | #endif | 463 | #endif |
diff --git a/include/linux/nfs_fs_sb.h b/include/linux/nfs_fs_sb.h index 34fc6be5bfcf..ebc48090c2a7 100644 --- a/include/linux/nfs_fs_sb.h +++ b/include/linux/nfs_fs_sb.h | |||
@@ -176,6 +176,7 @@ struct nfs_server { | |||
176 | #define NFS_CAP_ATIME (1U << 11) | 176 | #define NFS_CAP_ATIME (1U << 11) |
177 | #define NFS_CAP_CTIME (1U << 12) | 177 | #define NFS_CAP_CTIME (1U << 12) |
178 | #define NFS_CAP_MTIME (1U << 13) | 178 | #define NFS_CAP_MTIME (1U << 13) |
179 | #define NFS_CAP_POSIX_LOCK (1U << 14) | ||
179 | 180 | ||
180 | 181 | ||
181 | /* maximum number of slots to use */ | 182 | /* maximum number of slots to use */ |
diff --git a/include/linux/pci.h b/include/linux/pci.h index c1968f464c38..0afb5272d859 100644 --- a/include/linux/pci.h +++ b/include/linux/pci.h | |||
@@ -959,6 +959,11 @@ static inline int pci_proc_domain(struct pci_bus *bus) | |||
959 | } | 959 | } |
960 | #endif /* CONFIG_PCI_DOMAINS */ | 960 | #endif /* CONFIG_PCI_DOMAINS */ |
961 | 961 | ||
962 | /* some architectures require additional setup to direct VGA traffic */ | ||
963 | typedef int (*arch_set_vga_state_t)(struct pci_dev *pdev, bool decode, | ||
964 | unsigned int command_bits, bool change_bridge); | ||
965 | extern void pci_register_set_vga_state(arch_set_vga_state_t func); | ||
966 | |||
962 | #else /* CONFIG_PCI is not enabled */ | 967 | #else /* CONFIG_PCI is not enabled */ |
963 | 968 | ||
964 | /* | 969 | /* |
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h index cca8a044e2b6..0be824320580 100644 --- a/include/linux/pci_ids.h +++ b/include/linux/pci_ids.h | |||
@@ -2417,6 +2417,9 @@ | |||
2417 | #define PCI_DEVICE_ID_INTEL_82840_HB 0x1a21 | 2417 | #define PCI_DEVICE_ID_INTEL_82840_HB 0x1a21 |
2418 | #define PCI_DEVICE_ID_INTEL_82845_HB 0x1a30 | 2418 | #define PCI_DEVICE_ID_INTEL_82845_HB 0x1a30 |
2419 | #define PCI_DEVICE_ID_INTEL_IOAT 0x1a38 | 2419 | #define PCI_DEVICE_ID_INTEL_IOAT 0x1a38 |
2420 | #define PCI_DEVICE_ID_INTEL_CPT_SMBUS 0x1c22 | ||
2421 | #define PCI_DEVICE_ID_INTEL_CPT_LPC1 0x1c42 | ||
2422 | #define PCI_DEVICE_ID_INTEL_CPT_LPC2 0x1c43 | ||
2420 | #define PCI_DEVICE_ID_INTEL_82801AA_0 0x2410 | 2423 | #define PCI_DEVICE_ID_INTEL_82801AA_0 0x2410 |
2421 | #define PCI_DEVICE_ID_INTEL_82801AA_1 0x2411 | 2424 | #define PCI_DEVICE_ID_INTEL_82801AA_1 0x2411 |
2422 | #define PCI_DEVICE_ID_INTEL_82801AA_3 0x2413 | 2425 | #define PCI_DEVICE_ID_INTEL_82801AA_3 0x2413 |
diff --git a/kernel/cgroup_freezer.c b/kernel/cgroup_freezer.c index 59e9ef6aab40..eb3f34d57419 100644 --- a/kernel/cgroup_freezer.c +++ b/kernel/cgroup_freezer.c | |||
@@ -47,17 +47,20 @@ static inline struct freezer *task_freezer(struct task_struct *task) | |||
47 | struct freezer, css); | 47 | struct freezer, css); |
48 | } | 48 | } |
49 | 49 | ||
50 | int cgroup_frozen(struct task_struct *task) | 50 | int cgroup_freezing_or_frozen(struct task_struct *task) |
51 | { | 51 | { |
52 | struct freezer *freezer; | 52 | struct freezer *freezer; |
53 | enum freezer_state state; | 53 | enum freezer_state state; |
54 | 54 | ||
55 | task_lock(task); | 55 | task_lock(task); |
56 | freezer = task_freezer(task); | 56 | freezer = task_freezer(task); |
57 | state = freezer->state; | 57 | if (!freezer->css.cgroup->parent) |
58 | state = CGROUP_THAWED; /* root cgroup can't be frozen */ | ||
59 | else | ||
60 | state = freezer->state; | ||
58 | task_unlock(task); | 61 | task_unlock(task); |
59 | 62 | ||
60 | return state == CGROUP_FROZEN; | 63 | return (state == CGROUP_FREEZING) || (state == CGROUP_FROZEN); |
61 | } | 64 | } |
62 | 65 | ||
63 | /* | 66 | /* |
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c index 8f6284f9e620..b75d3d2d71f8 100644 --- a/kernel/irq/manage.c +++ b/kernel/irq/manage.c | |||
@@ -850,6 +850,16 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new) | |||
850 | if (new->flags & IRQF_ONESHOT) | 850 | if (new->flags & IRQF_ONESHOT) |
851 | desc->status |= IRQ_ONESHOT; | 851 | desc->status |= IRQ_ONESHOT; |
852 | 852 | ||
853 | /* | ||
854 | * Force MSI interrupts to run with interrupts | ||
855 | * disabled. The multi vector cards can cause stack | ||
856 | * overflows due to nested interrupts when enough of | ||
857 | * them are directed to a core and fire at the same | ||
858 | * time. | ||
859 | */ | ||
860 | if (desc->msi_desc) | ||
861 | new->flags |= IRQF_DISABLED; | ||
862 | |||
853 | if (!(desc->status & IRQ_NOAUTOEN)) { | 863 | if (!(desc->status & IRQ_NOAUTOEN)) { |
854 | desc->depth = 0; | 864 | desc->depth = 0; |
855 | desc->status &= ~IRQ_DISABLED; | 865 | desc->status &= ~IRQ_DISABLED; |
diff --git a/kernel/lockdep.c b/kernel/lockdep.c index 1199bdaab194..8627512a2b8f 100644 --- a/kernel/lockdep.c +++ b/kernel/lockdep.c | |||
@@ -600,9 +600,9 @@ static int static_obj(void *obj) | |||
600 | * percpu var? | 600 | * percpu var? |
601 | */ | 601 | */ |
602 | for_each_possible_cpu(i) { | 602 | for_each_possible_cpu(i) { |
603 | start = (unsigned long) &__per_cpu_start + per_cpu_offset(i); | 603 | start = (unsigned long) per_cpu_ptr(&__per_cpu_start, i); |
604 | end = (unsigned long) &__per_cpu_start + PERCPU_ENOUGH_ROOM | 604 | end = (unsigned long) per_cpu_ptr(&__per_cpu_start, i) |
605 | + per_cpu_offset(i); | 605 | + PERCPU_ENOUGH_ROOM; |
606 | 606 | ||
607 | if ((addr >= start) && (addr < end)) | 607 | if ((addr >= start) && (addr < end)) |
608 | return 1; | 608 | return 1; |
diff --git a/kernel/module.c b/kernel/module.c index f82386bd9ee9..5b6ce39e5f65 100644 --- a/kernel/module.c +++ b/kernel/module.c | |||
@@ -405,7 +405,7 @@ static void percpu_modcopy(void *pcpudest, const void *from, unsigned long size) | |||
405 | int cpu; | 405 | int cpu; |
406 | 406 | ||
407 | for_each_possible_cpu(cpu) | 407 | for_each_possible_cpu(cpu) |
408 | memcpy(pcpudest + per_cpu_offset(cpu), from, size); | 408 | memcpy(per_cpu_ptr(pcpudest, cpu), from, size); |
409 | } | 409 | } |
410 | 410 | ||
411 | #else /* ... !CONFIG_SMP */ | 411 | #else /* ... !CONFIG_SMP */ |
diff --git a/kernel/power/process.c b/kernel/power/process.c index 5ade1bdcf366..de5301557774 100644 --- a/kernel/power/process.c +++ b/kernel/power/process.c | |||
@@ -145,7 +145,7 @@ static void thaw_tasks(bool nosig_only) | |||
145 | if (nosig_only && should_send_signal(p)) | 145 | if (nosig_only && should_send_signal(p)) |
146 | continue; | 146 | continue; |
147 | 147 | ||
148 | if (cgroup_frozen(p)) | 148 | if (cgroup_freezing_or_frozen(p)) |
149 | continue; | 149 | continue; |
150 | 150 | ||
151 | thaw_process(p); | 151 | thaw_process(p); |
diff --git a/kernel/sched.c b/kernel/sched.c index e6b1ac499313..21c1cf2e27aa 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -7050,7 +7050,9 @@ SYSCALL_DEFINE3(sched_getaffinity, pid_t, pid, unsigned int, len, | |||
7050 | int ret; | 7050 | int ret; |
7051 | cpumask_var_t mask; | 7051 | cpumask_var_t mask; |
7052 | 7052 | ||
7053 | if (len < cpumask_size()) | 7053 | if ((len * BITS_PER_BYTE) < nr_cpu_ids) |
7054 | return -EINVAL; | ||
7055 | if (len & (sizeof(unsigned long)-1)) | ||
7054 | return -EINVAL; | 7056 | return -EINVAL; |
7055 | 7057 | ||
7056 | if (!alloc_cpumask_var(&mask, GFP_KERNEL)) | 7058 | if (!alloc_cpumask_var(&mask, GFP_KERNEL)) |
@@ -7058,10 +7060,12 @@ SYSCALL_DEFINE3(sched_getaffinity, pid_t, pid, unsigned int, len, | |||
7058 | 7060 | ||
7059 | ret = sched_getaffinity(pid, mask); | 7061 | ret = sched_getaffinity(pid, mask); |
7060 | if (ret == 0) { | 7062 | if (ret == 0) { |
7061 | if (copy_to_user(user_mask_ptr, mask, cpumask_size())) | 7063 | size_t retlen = min_t(size_t, len, cpumask_size()); |
7064 | |||
7065 | if (copy_to_user(user_mask_ptr, mask, retlen)) | ||
7062 | ret = -EFAULT; | 7066 | ret = -EFAULT; |
7063 | else | 7067 | else |
7064 | ret = cpumask_size(); | 7068 | ret = retlen; |
7065 | } | 7069 | } |
7066 | free_cpumask_var(mask); | 7070 | free_cpumask_var(mask); |
7067 | 7071 | ||
diff --git a/mm/readahead.c b/mm/readahead.c index 337b20e946f6..fe1a069fb595 100644 --- a/mm/readahead.c +++ b/mm/readahead.c | |||
@@ -502,7 +502,7 @@ void page_cache_sync_readahead(struct address_space *mapping, | |||
502 | return; | 502 | return; |
503 | 503 | ||
504 | /* be dumb */ | 504 | /* be dumb */ |
505 | if (filp->f_mode & FMODE_RANDOM) { | 505 | if (filp && (filp->f_mode & FMODE_RANDOM)) { |
506 | force_page_cache_readahead(mapping, filp, offset, req_size); | 506 | force_page_cache_readahead(mapping, filp, offset, req_size); |
507 | return; | 507 | return; |
508 | } | 508 | } |
diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c index 6a4331429598..ba1fadb01192 100644 --- a/net/mac80211/mesh.c +++ b/net/mac80211/mesh.c | |||
@@ -749,9 +749,6 @@ ieee80211_mesh_rx_mgmt(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb) | |||
749 | 749 | ||
750 | switch (fc & IEEE80211_FCTL_STYPE) { | 750 | switch (fc & IEEE80211_FCTL_STYPE) { |
751 | case IEEE80211_STYPE_ACTION: | 751 | case IEEE80211_STYPE_ACTION: |
752 | if (skb->len < IEEE80211_MIN_ACTION_SIZE) | ||
753 | return RX_DROP_MONITOR; | ||
754 | /* fall through */ | ||
755 | case IEEE80211_STYPE_PROBE_RESP: | 752 | case IEEE80211_STYPE_PROBE_RESP: |
756 | case IEEE80211_STYPE_BEACON: | 753 | case IEEE80211_STYPE_BEACON: |
757 | skb_queue_tail(&ifmsh->skb_queue, skb); | 754 | skb_queue_tail(&ifmsh->skb_queue, skb); |
diff --git a/net/mac80211/mesh_hwmp.c b/net/mac80211/mesh_hwmp.c index d28acb6b1f81..4eed81bde1a6 100644 --- a/net/mac80211/mesh_hwmp.c +++ b/net/mac80211/mesh_hwmp.c | |||
@@ -391,7 +391,7 @@ static u32 hwmp_route_info_get(struct ieee80211_sub_if_data *sdata, | |||
391 | if (SN_GT(mpath->sn, orig_sn) || | 391 | if (SN_GT(mpath->sn, orig_sn) || |
392 | (mpath->sn == orig_sn && | 392 | (mpath->sn == orig_sn && |
393 | action == MPATH_PREQ && | 393 | action == MPATH_PREQ && |
394 | new_metric > mpath->metric)) { | 394 | new_metric >= mpath->metric)) { |
395 | process = false; | 395 | process = false; |
396 | fresh_info = false; | 396 | fresh_info = false; |
397 | } | 397 | } |
@@ -611,7 +611,7 @@ static void hwmp_prep_frame_process(struct ieee80211_sub_if_data *sdata, | |||
611 | 611 | ||
612 | mesh_path_sel_frame_tx(MPATH_PREP, flags, orig_addr, | 612 | mesh_path_sel_frame_tx(MPATH_PREP, flags, orig_addr, |
613 | cpu_to_le32(orig_sn), 0, target_addr, | 613 | cpu_to_le32(orig_sn), 0, target_addr, |
614 | cpu_to_le32(target_sn), mpath->next_hop->sta.addr, hopcount, | 614 | cpu_to_le32(target_sn), next_hop, hopcount, |
615 | ttl, cpu_to_le32(lifetime), cpu_to_le32(metric), | 615 | ttl, cpu_to_le32(lifetime), cpu_to_le32(metric), |
616 | 0, sdata); | 616 | 0, sdata); |
617 | rcu_read_unlock(); | 617 | rcu_read_unlock(); |
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c index 3e1ea43d5be6..a71c32c2778c 100644 --- a/net/mac80211/rx.c +++ b/net/mac80211/rx.c | |||
@@ -2355,6 +2355,11 @@ static int prepare_for_handlers(struct ieee80211_sub_if_data *sdata, | |||
2355 | /* should never get here */ | 2355 | /* should never get here */ |
2356 | WARN_ON(1); | 2356 | WARN_ON(1); |
2357 | break; | 2357 | break; |
2358 | case MESH_PLINK_CATEGORY: | ||
2359 | case MESH_PATH_SEL_CATEGORY: | ||
2360 | if (ieee80211_vif_is_mesh(&sdata->vif)) | ||
2361 | return ieee80211_mesh_rx_mgmt(sdata, rx->skb); | ||
2362 | break; | ||
2358 | } | 2363 | } |
2359 | 2364 | ||
2360 | return 1; | 2365 | return 1; |
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c index 70c79c3013fa..1fdc0a562b47 100644 --- a/net/mac80211/tx.c +++ b/net/mac80211/tx.c | |||
@@ -1945,6 +1945,7 @@ static bool ieee80211_tx_pending_skb(struct ieee80211_local *local, | |||
1945 | void ieee80211_tx_pending(unsigned long data) | 1945 | void ieee80211_tx_pending(unsigned long data) |
1946 | { | 1946 | { |
1947 | struct ieee80211_local *local = (struct ieee80211_local *)data; | 1947 | struct ieee80211_local *local = (struct ieee80211_local *)data; |
1948 | struct ieee80211_sub_if_data *sdata; | ||
1948 | unsigned long flags; | 1949 | unsigned long flags; |
1949 | int i; | 1950 | int i; |
1950 | bool txok; | 1951 | bool txok; |
@@ -1983,6 +1984,11 @@ void ieee80211_tx_pending(unsigned long data) | |||
1983 | if (!txok) | 1984 | if (!txok) |
1984 | break; | 1985 | break; |
1985 | } | 1986 | } |
1987 | |||
1988 | if (skb_queue_empty(&local->pending[i])) | ||
1989 | list_for_each_entry_rcu(sdata, &local->interfaces, list) | ||
1990 | netif_tx_wake_queue( | ||
1991 | netdev_get_tx_queue(sdata->dev, i)); | ||
1986 | } | 1992 | } |
1987 | spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags); | 1993 | spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags); |
1988 | 1994 | ||
diff --git a/net/mac80211/util.c b/net/mac80211/util.c index 3848140313f5..27212e8fcaf4 100644 --- a/net/mac80211/util.c +++ b/net/mac80211/util.c | |||
@@ -280,13 +280,13 @@ static void __ieee80211_wake_queue(struct ieee80211_hw *hw, int queue, | |||
280 | /* someone still has this queue stopped */ | 280 | /* someone still has this queue stopped */ |
281 | return; | 281 | return; |
282 | 282 | ||
283 | if (!skb_queue_empty(&local->pending[queue])) | 283 | if (skb_queue_empty(&local->pending[queue])) { |
284 | rcu_read_lock(); | ||
285 | list_for_each_entry_rcu(sdata, &local->interfaces, list) | ||
286 | netif_tx_wake_queue(netdev_get_tx_queue(sdata->dev, queue)); | ||
287 | rcu_read_unlock(); | ||
288 | } else | ||
284 | tasklet_schedule(&local->tx_pending_tasklet); | 289 | tasklet_schedule(&local->tx_pending_tasklet); |
285 | |||
286 | rcu_read_lock(); | ||
287 | list_for_each_entry_rcu(sdata, &local->interfaces, list) | ||
288 | netif_tx_wake_queue(netdev_get_tx_queue(sdata->dev, queue)); | ||
289 | rcu_read_unlock(); | ||
290 | } | 290 | } |
291 | 291 | ||
292 | void ieee80211_wake_queue_by_reason(struct ieee80211_hw *hw, int queue, | 292 | void ieee80211_wake_queue_by_reason(struct ieee80211_hw *hw, int queue, |
@@ -1145,6 +1145,14 @@ int ieee80211_reconfig(struct ieee80211_local *local) | |||
1145 | } | 1145 | } |
1146 | } | 1146 | } |
1147 | 1147 | ||
1148 | rcu_read_lock(); | ||
1149 | if (hw->flags & IEEE80211_HW_AMPDU_AGGREGATION) { | ||
1150 | list_for_each_entry_rcu(sta, &local->sta_list, list) { | ||
1151 | ieee80211_sta_tear_down_BA_sessions(sta); | ||
1152 | } | ||
1153 | } | ||
1154 | rcu_read_unlock(); | ||
1155 | |||
1148 | /* add back keys */ | 1156 | /* add back keys */ |
1149 | list_for_each_entry(sdata, &local->interfaces, list) | 1157 | list_for_each_entry(sdata, &local->interfaces, list) |
1150 | if (netif_running(sdata->dev)) | 1158 | if (netif_running(sdata->dev)) |
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c index 9ace8eb0207c..062a8b051b83 100644 --- a/sound/pci/hda/hda_intel.c +++ b/sound/pci/hda/hda_intel.c | |||
@@ -125,6 +125,7 @@ MODULE_SUPPORTED_DEVICE("{{Intel, ICH6}," | |||
125 | "{Intel, ICH9}," | 125 | "{Intel, ICH9}," |
126 | "{Intel, ICH10}," | 126 | "{Intel, ICH10}," |
127 | "{Intel, PCH}," | 127 | "{Intel, PCH}," |
128 | "{Intel, CPT}," | ||
128 | "{Intel, SCH}," | 129 | "{Intel, SCH}," |
129 | "{ATI, SB450}," | 130 | "{ATI, SB450}," |
130 | "{ATI, SB600}," | 131 | "{ATI, SB600}," |
@@ -449,6 +450,7 @@ struct azx { | |||
449 | /* driver types */ | 450 | /* driver types */ |
450 | enum { | 451 | enum { |
451 | AZX_DRIVER_ICH, | 452 | AZX_DRIVER_ICH, |
453 | AZX_DRIVER_PCH, | ||
452 | AZX_DRIVER_SCH, | 454 | AZX_DRIVER_SCH, |
453 | AZX_DRIVER_ATI, | 455 | AZX_DRIVER_ATI, |
454 | AZX_DRIVER_ATIHDMI, | 456 | AZX_DRIVER_ATIHDMI, |
@@ -463,6 +465,7 @@ enum { | |||
463 | 465 | ||
464 | static char *driver_short_names[] __devinitdata = { | 466 | static char *driver_short_names[] __devinitdata = { |
465 | [AZX_DRIVER_ICH] = "HDA Intel", | 467 | [AZX_DRIVER_ICH] = "HDA Intel", |
468 | [AZX_DRIVER_PCH] = "HDA Intel PCH", | ||
466 | [AZX_DRIVER_SCH] = "HDA Intel MID", | 469 | [AZX_DRIVER_SCH] = "HDA Intel MID", |
467 | [AZX_DRIVER_ATI] = "HDA ATI SB", | 470 | [AZX_DRIVER_ATI] = "HDA ATI SB", |
468 | [AZX_DRIVER_ATIHDMI] = "HDA ATI HDMI", | 471 | [AZX_DRIVER_ATIHDMI] = "HDA ATI HDMI", |
@@ -1065,6 +1068,7 @@ static void azx_init_pci(struct azx *chip) | |||
1065 | 0x01, NVIDIA_HDA_ENABLE_COHBIT); | 1068 | 0x01, NVIDIA_HDA_ENABLE_COHBIT); |
1066 | break; | 1069 | break; |
1067 | case AZX_DRIVER_SCH: | 1070 | case AZX_DRIVER_SCH: |
1071 | case AZX_DRIVER_PCH: | ||
1068 | pci_read_config_word(chip->pci, INTEL_SCH_HDA_DEVC, &snoop); | 1072 | pci_read_config_word(chip->pci, INTEL_SCH_HDA_DEVC, &snoop); |
1069 | if (snoop & INTEL_SCH_HDA_DEVC_NOSNOOP) { | 1073 | if (snoop & INTEL_SCH_HDA_DEVC_NOSNOOP) { |
1070 | pci_write_config_word(chip->pci, INTEL_SCH_HDA_DEVC, | 1074 | pci_write_config_word(chip->pci, INTEL_SCH_HDA_DEVC, |
@@ -2268,6 +2272,7 @@ static struct snd_pci_quirk position_fix_list[] __devinitdata = { | |||
2268 | SND_PCI_QUIRK(0x1458, 0xa022, "ga-ma770-ud3", POS_FIX_LPIB), | 2272 | SND_PCI_QUIRK(0x1458, 0xa022, "ga-ma770-ud3", POS_FIX_LPIB), |
2269 | SND_PCI_QUIRK(0x1462, 0x1002, "MSI Wind U115", POS_FIX_LPIB), | 2273 | SND_PCI_QUIRK(0x1462, 0x1002, "MSI Wind U115", POS_FIX_LPIB), |
2270 | SND_PCI_QUIRK(0x1565, 0x820f, "Biostar Microtech", POS_FIX_LPIB), | 2274 | SND_PCI_QUIRK(0x1565, 0x820f, "Biostar Microtech", POS_FIX_LPIB), |
2275 | SND_PCI_QUIRK(0x1565, 0x8218, "Biostar Microtech", POS_FIX_LPIB), | ||
2271 | SND_PCI_QUIRK(0x8086, 0xd601, "eMachines T5212", POS_FIX_LPIB), | 2276 | SND_PCI_QUIRK(0x8086, 0xd601, "eMachines T5212", POS_FIX_LPIB), |
2272 | {} | 2277 | {} |
2273 | }; | 2278 | }; |
@@ -2357,6 +2362,8 @@ static struct snd_pci_quirk msi_black_list[] __devinitdata = { | |||
2357 | SND_PCI_QUIRK(0x1043, 0x81f2, "ASUS", 0), /* Athlon64 X2 + nvidia */ | 2362 | SND_PCI_QUIRK(0x1043, 0x81f2, "ASUS", 0), /* Athlon64 X2 + nvidia */ |
2358 | SND_PCI_QUIRK(0x1043, 0x81f6, "ASUS", 0), /* nvidia */ | 2363 | SND_PCI_QUIRK(0x1043, 0x81f6, "ASUS", 0), /* nvidia */ |
2359 | SND_PCI_QUIRK(0x1043, 0x822d, "ASUS", 0), /* Athlon64 X2 + nvidia MCP55 */ | 2364 | SND_PCI_QUIRK(0x1043, 0x822d, "ASUS", 0), /* Athlon64 X2 + nvidia MCP55 */ |
2365 | SND_PCI_QUIRK(0x1849, 0x0888, "ASRock", 0), /* Athlon64 X2 + nvidia */ | ||
2366 | SND_PCI_QUIRK(0xa0a0, 0x0575, "Aopen MZ915-M", 0), /* ICH6 */ | ||
2360 | {} | 2367 | {} |
2361 | }; | 2368 | }; |
2362 | 2369 | ||
@@ -2431,6 +2438,7 @@ static int __devinit azx_create(struct snd_card *card, struct pci_dev *pci, | |||
2431 | if (bdl_pos_adj[dev] < 0) { | 2438 | if (bdl_pos_adj[dev] < 0) { |
2432 | switch (chip->driver_type) { | 2439 | switch (chip->driver_type) { |
2433 | case AZX_DRIVER_ICH: | 2440 | case AZX_DRIVER_ICH: |
2441 | case AZX_DRIVER_PCH: | ||
2434 | bdl_pos_adj[dev] = 1; | 2442 | bdl_pos_adj[dev] = 1; |
2435 | break; | 2443 | break; |
2436 | default: | 2444 | default: |
@@ -2709,6 +2717,8 @@ static struct pci_device_id azx_ids[] = { | |||
2709 | { PCI_DEVICE(0x8086, 0x3a6e), .driver_data = AZX_DRIVER_ICH }, | 2717 | { PCI_DEVICE(0x8086, 0x3a6e), .driver_data = AZX_DRIVER_ICH }, |
2710 | /* PCH */ | 2718 | /* PCH */ |
2711 | { PCI_DEVICE(0x8086, 0x3b56), .driver_data = AZX_DRIVER_ICH }, | 2719 | { PCI_DEVICE(0x8086, 0x3b56), .driver_data = AZX_DRIVER_ICH }, |
2720 | /* CPT */ | ||
2721 | { PCI_DEVICE(0x8086, 0x1c20), .driver_data = AZX_DRIVER_PCH }, | ||
2712 | /* SCH */ | 2722 | /* SCH */ |
2713 | { PCI_DEVICE(0x8086, 0x811b), .driver_data = AZX_DRIVER_SCH }, | 2723 | { PCI_DEVICE(0x8086, 0x811b), .driver_data = AZX_DRIVER_SCH }, |
2714 | /* ATI SB 450/600 */ | 2724 | /* ATI SB 450/600 */ |
diff --git a/sound/pci/hda/patch_analog.c b/sound/pci/hda/patch_analog.c index 706944126d11..263bf3bd4793 100644 --- a/sound/pci/hda/patch_analog.c +++ b/sound/pci/hda/patch_analog.c | |||
@@ -1805,6 +1805,14 @@ static int patch_ad1981(struct hda_codec *codec) | |||
1805 | case AD1981_THINKPAD: | 1805 | case AD1981_THINKPAD: |
1806 | spec->mixers[0] = ad1981_thinkpad_mixers; | 1806 | spec->mixers[0] = ad1981_thinkpad_mixers; |
1807 | spec->input_mux = &ad1981_thinkpad_capture_source; | 1807 | spec->input_mux = &ad1981_thinkpad_capture_source; |
1808 | /* set the upper-limit for mixer amp to 0dB for avoiding the | ||
1809 | * possible damage by overloading | ||
1810 | */ | ||
1811 | snd_hda_override_amp_caps(codec, 0x11, HDA_INPUT, | ||
1812 | (0x17 << AC_AMPCAP_OFFSET_SHIFT) | | ||
1813 | (0x17 << AC_AMPCAP_NUM_STEPS_SHIFT) | | ||
1814 | (0x05 << AC_AMPCAP_STEP_SIZE_SHIFT) | | ||
1815 | (1 << AC_AMPCAP_MUTE_SHIFT)); | ||
1808 | break; | 1816 | break; |
1809 | case AD1981_TOSHIBA: | 1817 | case AD1981_TOSHIBA: |
1810 | spec->mixers[0] = ad1981_hp_mixers; | 1818 | spec->mixers[0] = ad1981_hp_mixers; |
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index a79f84119967..bd8a567c9367 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c | |||
@@ -9074,6 +9074,7 @@ static struct snd_pci_quirk alc882_cfg_tbl[] = { | |||
9074 | SND_PCI_QUIRK(0x1462, 0xaa08, "MSI", ALC883_TARGA_2ch_DIG), | 9074 | SND_PCI_QUIRK(0x1462, 0xaa08, "MSI", ALC883_TARGA_2ch_DIG), |
9075 | 9075 | ||
9076 | SND_PCI_QUIRK(0x147b, 0x1083, "Abit IP35-PRO", ALC883_6ST_DIG), | 9076 | SND_PCI_QUIRK(0x147b, 0x1083, "Abit IP35-PRO", ALC883_6ST_DIG), |
9077 | SND_PCI_QUIRK(0x1558, 0x0571, "Clevo laptop M570U", ALC883_3ST_6ch_DIG), | ||
9077 | SND_PCI_QUIRK(0x1558, 0x0721, "Clevo laptop M720R", ALC883_CLEVO_M720), | 9078 | SND_PCI_QUIRK(0x1558, 0x0721, "Clevo laptop M720R", ALC883_CLEVO_M720), |
9078 | SND_PCI_QUIRK(0x1558, 0x0722, "Clevo laptop M720SR", ALC883_CLEVO_M720), | 9079 | SND_PCI_QUIRK(0x1558, 0x0722, "Clevo laptop M720SR", ALC883_CLEVO_M720), |
9079 | SND_PCI_QUIRK(0x1558, 0x5409, "Clevo laptop M540R", ALC883_CLEVO_M540R), | 9080 | SND_PCI_QUIRK(0x1558, 0x5409, "Clevo laptop M540R", ALC883_CLEVO_M540R), |
diff --git a/sound/pci/mixart/mixart.c b/sound/pci/mixart/mixart.c index a83d1968a845..32f98535c0b4 100644 --- a/sound/pci/mixart/mixart.c +++ b/sound/pci/mixart/mixart.c | |||
@@ -1161,13 +1161,15 @@ static long snd_mixart_BA0_read(struct snd_info_entry *entry, void *file_private | |||
1161 | unsigned long count, unsigned long pos) | 1161 | unsigned long count, unsigned long pos) |
1162 | { | 1162 | { |
1163 | struct mixart_mgr *mgr = entry->private_data; | 1163 | struct mixart_mgr *mgr = entry->private_data; |
1164 | unsigned long maxsize; | ||
1164 | 1165 | ||
1165 | count = count & ~3; /* make sure the read size is a multiple of 4 bytes */ | 1166 | if (pos >= MIXART_BA0_SIZE) |
1166 | if(count <= 0) | ||
1167 | return 0; | 1167 | return 0; |
1168 | if(pos + count > MIXART_BA0_SIZE) | 1168 | maxsize = MIXART_BA0_SIZE - pos; |
1169 | count = (long)(MIXART_BA0_SIZE - pos); | 1169 | if (count > maxsize) |
1170 | if(copy_to_user_fromio(buf, MIXART_MEM( mgr, pos ), count)) | 1170 | count = maxsize; |
1171 | count = count & ~3; /* make sure the read size is a multiple of 4 bytes */ | ||
1172 | if (copy_to_user_fromio(buf, MIXART_MEM(mgr, pos), count)) | ||
1171 | return -EFAULT; | 1173 | return -EFAULT; |
1172 | return count; | 1174 | return count; |
1173 | } | 1175 | } |
@@ -1180,13 +1182,15 @@ static long snd_mixart_BA1_read(struct snd_info_entry *entry, void *file_private | |||
1180 | unsigned long count, unsigned long pos) | 1182 | unsigned long count, unsigned long pos) |
1181 | { | 1183 | { |
1182 | struct mixart_mgr *mgr = entry->private_data; | 1184 | struct mixart_mgr *mgr = entry->private_data; |
1185 | unsigned long maxsize; | ||
1183 | 1186 | ||
1184 | count = count & ~3; /* make sure the read size is a multiple of 4 bytes */ | 1187 | if (pos > MIXART_BA1_SIZE) |
1185 | if(count <= 0) | ||
1186 | return 0; | 1188 | return 0; |
1187 | if(pos + count > MIXART_BA1_SIZE) | 1189 | maxsize = MIXART_BA1_SIZE - pos; |
1188 | count = (long)(MIXART_BA1_SIZE - pos); | 1190 | if (count > maxsize) |
1189 | if(copy_to_user_fromio(buf, MIXART_REG( mgr, pos ), count)) | 1191 | count = maxsize; |
1192 | count = count & ~3; /* make sure the read size is a multiple of 4 bytes */ | ||
1193 | if (copy_to_user_fromio(buf, MIXART_REG(mgr, pos), count)) | ||
1190 | return -EFAULT; | 1194 | return -EFAULT; |
1191 | return count; | 1195 | return count; |
1192 | } | 1196 | } |
diff --git a/sound/usb/usbmidi.c b/sound/usb/usbmidi.c index b2da478a0fae..c7cb207963f5 100644 --- a/sound/usb/usbmidi.c +++ b/sound/usb/usbmidi.c | |||
@@ -984,6 +984,8 @@ static void snd_usbmidi_output_drain(struct snd_rawmidi_substream *substream) | |||
984 | DEFINE_WAIT(wait); | 984 | DEFINE_WAIT(wait); |
985 | long timeout = msecs_to_jiffies(50); | 985 | long timeout = msecs_to_jiffies(50); |
986 | 986 | ||
987 | if (ep->umidi->disconnected) | ||
988 | return; | ||
987 | /* | 989 | /* |
988 | * The substream buffer is empty, but some data might still be in the | 990 | * The substream buffer is empty, but some data might still be in the |
989 | * currently active URBs, so we have to wait for those to complete. | 991 | * currently active URBs, so we have to wait for those to complete. |
@@ -1121,14 +1123,21 @@ static int snd_usbmidi_in_endpoint_create(struct snd_usb_midi* umidi, | |||
1121 | * Frees an output endpoint. | 1123 | * Frees an output endpoint. |
1122 | * May be called when ep hasn't been initialized completely. | 1124 | * May be called when ep hasn't been initialized completely. |
1123 | */ | 1125 | */ |
1124 | static void snd_usbmidi_out_endpoint_delete(struct snd_usb_midi_out_endpoint* ep) | 1126 | static void snd_usbmidi_out_endpoint_clear(struct snd_usb_midi_out_endpoint *ep) |
1125 | { | 1127 | { |
1126 | unsigned int i; | 1128 | unsigned int i; |
1127 | 1129 | ||
1128 | for (i = 0; i < OUTPUT_URBS; ++i) | 1130 | for (i = 0; i < OUTPUT_URBS; ++i) |
1129 | if (ep->urbs[i].urb) | 1131 | if (ep->urbs[i].urb) { |
1130 | free_urb_and_buffer(ep->umidi, ep->urbs[i].urb, | 1132 | free_urb_and_buffer(ep->umidi, ep->urbs[i].urb, |
1131 | ep->max_transfer); | 1133 | ep->max_transfer); |
1134 | ep->urbs[i].urb = NULL; | ||
1135 | } | ||
1136 | } | ||
1137 | |||
1138 | static void snd_usbmidi_out_endpoint_delete(struct snd_usb_midi_out_endpoint *ep) | ||
1139 | { | ||
1140 | snd_usbmidi_out_endpoint_clear(ep); | ||
1132 | kfree(ep); | 1141 | kfree(ep); |
1133 | } | 1142 | } |
1134 | 1143 | ||
@@ -1260,15 +1269,18 @@ void snd_usbmidi_disconnect(struct list_head* p) | |||
1260 | usb_kill_urb(ep->out->urbs[j].urb); | 1269 | usb_kill_urb(ep->out->urbs[j].urb); |
1261 | if (umidi->usb_protocol_ops->finish_out_endpoint) | 1270 | if (umidi->usb_protocol_ops->finish_out_endpoint) |
1262 | umidi->usb_protocol_ops->finish_out_endpoint(ep->out); | 1271 | umidi->usb_protocol_ops->finish_out_endpoint(ep->out); |
1272 | ep->out->active_urbs = 0; | ||
1273 | if (ep->out->drain_urbs) { | ||
1274 | ep->out->drain_urbs = 0; | ||
1275 | wake_up(&ep->out->drain_wait); | ||
1276 | } | ||
1263 | } | 1277 | } |
1264 | if (ep->in) | 1278 | if (ep->in) |
1265 | for (j = 0; j < INPUT_URBS; ++j) | 1279 | for (j = 0; j < INPUT_URBS; ++j) |
1266 | usb_kill_urb(ep->in->urbs[j]); | 1280 | usb_kill_urb(ep->in->urbs[j]); |
1267 | /* free endpoints here; later call can result in Oops */ | 1281 | /* free endpoints here; later call can result in Oops */ |
1268 | if (ep->out) { | 1282 | if (ep->out) |
1269 | snd_usbmidi_out_endpoint_delete(ep->out); | 1283 | snd_usbmidi_out_endpoint_clear(ep->out); |
1270 | ep->out = NULL; | ||
1271 | } | ||
1272 | if (ep->in) { | 1284 | if (ep->in) { |
1273 | snd_usbmidi_in_endpoint_delete(ep->in); | 1285 | snd_usbmidi_in_endpoint_delete(ep->in); |
1274 | ep->in = NULL; | 1286 | ep->in = NULL; |
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 6d0e484b40f3..bb06fca047f2 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c | |||
@@ -638,7 +638,7 @@ skip_lpage: | |||
638 | 638 | ||
639 | /* Allocate page dirty bitmap if needed */ | 639 | /* Allocate page dirty bitmap if needed */ |
640 | if ((new.flags & KVM_MEM_LOG_DIRTY_PAGES) && !new.dirty_bitmap) { | 640 | if ((new.flags & KVM_MEM_LOG_DIRTY_PAGES) && !new.dirty_bitmap) { |
641 | unsigned dirty_bytes = ALIGN(npages, BITS_PER_LONG) / 8; | 641 | unsigned long dirty_bytes = kvm_dirty_bitmap_bytes(&new); |
642 | 642 | ||
643 | new.dirty_bitmap = vmalloc(dirty_bytes); | 643 | new.dirty_bitmap = vmalloc(dirty_bytes); |
644 | if (!new.dirty_bitmap) | 644 | if (!new.dirty_bitmap) |
@@ -721,7 +721,7 @@ int kvm_get_dirty_log(struct kvm *kvm, | |||
721 | { | 721 | { |
722 | struct kvm_memory_slot *memslot; | 722 | struct kvm_memory_slot *memslot; |
723 | int r, i; | 723 | int r, i; |
724 | int n; | 724 | unsigned long n; |
725 | unsigned long any = 0; | 725 | unsigned long any = 0; |
726 | 726 | ||
727 | r = -EINVAL; | 727 | r = -EINVAL; |
@@ -733,7 +733,7 @@ int kvm_get_dirty_log(struct kvm *kvm, | |||
733 | if (!memslot->dirty_bitmap) | 733 | if (!memslot->dirty_bitmap) |
734 | goto out; | 734 | goto out; |
735 | 735 | ||
736 | n = ALIGN(memslot->npages, BITS_PER_LONG) / 8; | 736 | n = kvm_dirty_bitmap_bytes(memslot); |
737 | 737 | ||
738 | for (i = 0; !any && i < n/sizeof(long); ++i) | 738 | for (i = 0; !any && i < n/sizeof(long); ++i) |
739 | any = memslot->dirty_bitmap[i]; | 739 | any = memslot->dirty_bitmap[i]; |
@@ -1075,10 +1075,13 @@ void mark_page_dirty(struct kvm *kvm, gfn_t gfn) | |||
1075 | memslot = gfn_to_memslot_unaliased(kvm, gfn); | 1075 | memslot = gfn_to_memslot_unaliased(kvm, gfn); |
1076 | if (memslot && memslot->dirty_bitmap) { | 1076 | if (memslot && memslot->dirty_bitmap) { |
1077 | unsigned long rel_gfn = gfn - memslot->base_gfn; | 1077 | unsigned long rel_gfn = gfn - memslot->base_gfn; |
1078 | unsigned long *p = memslot->dirty_bitmap + | ||
1079 | rel_gfn / BITS_PER_LONG; | ||
1080 | int offset = rel_gfn % BITS_PER_LONG; | ||
1078 | 1081 | ||
1079 | /* avoid RMW */ | 1082 | /* avoid RMW */ |
1080 | if (!generic_test_le_bit(rel_gfn, memslot->dirty_bitmap)) | 1083 | if (!generic_test_le_bit(offset, p)) |
1081 | generic___set_le_bit(rel_gfn, memslot->dirty_bitmap); | 1084 | generic___set_le_bit(offset, p); |
1082 | } | 1085 | } |
1083 | } | 1086 | } |
1084 | 1087 | ||