diff options
138 files changed, 1187 insertions, 687 deletions
diff --git a/Documentation/filesystems/f2fs.txt b/Documentation/filesystems/f2fs.txt index 8fbd8b46ee34..dcf338e62b71 100644 --- a/Documentation/filesystems/f2fs.txt +++ b/Documentation/filesystems/f2fs.txt | |||
@@ -175,9 +175,9 @@ consists of multiple segments as described below. | |||
175 | align with the zone size <-| | 175 | align with the zone size <-| |
176 | |-> align with the segment size | 176 | |-> align with the segment size |
177 | _________________________________________________________________________ | 177 | _________________________________________________________________________ |
178 | | | | Node | Segment | Segment | | | 178 | | | | Segment | Node | Segment | | |
179 | | Superblock | Checkpoint | Address | Info. | Summary | Main | | 179 | | Superblock | Checkpoint | Info. | Address | Summary | Main | |
180 | | (SB) | (CP) | Table (NAT) | Table (SIT) | Area (SSA) | | | 180 | | (SB) | (CP) | Table (SIT) | Table (NAT) | Area (SSA) | | |
181 | |____________|_____2______|______N______|______N______|______N_____|__N___| | 181 | |____________|_____2______|______N______|______N______|______N_____|__N___| |
182 | . . | 182 | . . |
183 | . . | 183 | . . |
@@ -200,14 +200,14 @@ consists of multiple segments as described below. | |||
200 | : It contains file system information, bitmaps for valid NAT/SIT sets, orphan | 200 | : It contains file system information, bitmaps for valid NAT/SIT sets, orphan |
201 | inode lists, and summary entries of current active segments. | 201 | inode lists, and summary entries of current active segments. |
202 | 202 | ||
203 | - Node Address Table (NAT) | ||
204 | : It is composed of a block address table for all the node blocks stored in | ||
205 | Main area. | ||
206 | |||
207 | - Segment Information Table (SIT) | 203 | - Segment Information Table (SIT) |
208 | : It contains segment information such as valid block count and bitmap for the | 204 | : It contains segment information such as valid block count and bitmap for the |
209 | validity of all the blocks. | 205 | validity of all the blocks. |
210 | 206 | ||
207 | - Node Address Table (NAT) | ||
208 | : It is composed of a block address table for all the node blocks stored in | ||
209 | Main area. | ||
210 | |||
211 | - Segment Summary Area (SSA) | 211 | - Segment Summary Area (SSA) |
212 | : It contains summary entries which contains the owner information of all the | 212 | : It contains summary entries which contains the owner information of all the |
213 | data and node blocks stored in Main area. | 213 | data and node blocks stored in Main area. |
@@ -236,13 +236,13 @@ For file system consistency, each CP points to which NAT and SIT copies are | |||
236 | valid, as shown as below. | 236 | valid, as shown as below. |
237 | 237 | ||
238 | +--------+----------+---------+ | 238 | +--------+----------+---------+ |
239 | | CP | NAT | SIT | | 239 | | CP | SIT | NAT | |
240 | +--------+----------+---------+ | 240 | +--------+----------+---------+ |
241 | . . . . | 241 | . . . . |
242 | . . . . | 242 | . . . . |
243 | . . . . | 243 | . . . . |
244 | +-------+-------+--------+--------+--------+--------+ | 244 | +-------+-------+--------+--------+--------+--------+ |
245 | | CP #0 | CP #1 | NAT #0 | NAT #1 | SIT #0 | SIT #1 | | 245 | | CP #0 | CP #1 | SIT #0 | SIT #1 | NAT #0 | NAT #1 | |
246 | +-------+-------+--------+--------+--------+--------+ | 246 | +-------+-------+--------+--------+--------+--------+ |
247 | | ^ ^ | 247 | | ^ ^ |
248 | | | | | 248 | | | | |
diff --git a/MAINTAINERS b/MAINTAINERS index 3105c4868c4e..8ae709e34523 100644 --- a/MAINTAINERS +++ b/MAINTAINERS | |||
@@ -6585,7 +6585,7 @@ F: drivers/media/platform/s3c-camif/ | |||
6585 | F: include/media/s3c_camif.h | 6585 | F: include/media/s3c_camif.h |
6586 | 6586 | ||
6587 | SERIAL DRIVERS | 6587 | SERIAL DRIVERS |
6588 | M: Alan Cox <alan@linux.intel.com> | 6588 | M: Greg Kroah-Hartman <gregkh@linuxfoundation.org> |
6589 | L: linux-serial@vger.kernel.org | 6589 | L: linux-serial@vger.kernel.org |
6590 | S: Maintained | 6590 | S: Maintained |
6591 | F: drivers/tty/serial | 6591 | F: drivers/tty/serial |
@@ -169,7 +169,7 @@ SUBARCH := $(shell uname -m | sed -e s/i.86/i386/ -e s/sun4u/sparc64/ \ | |||
169 | -e s/arm.*/arm/ -e s/sa110/arm/ \ | 169 | -e s/arm.*/arm/ -e s/sa110/arm/ \ |
170 | -e s/s390x/s390/ -e s/parisc64/parisc/ \ | 170 | -e s/s390x/s390/ -e s/parisc64/parisc/ \ |
171 | -e s/ppc.*/powerpc/ -e s/mips.*/mips/ \ | 171 | -e s/ppc.*/powerpc/ -e s/mips.*/mips/ \ |
172 | -e s/sh[234].*/sh/ ) | 172 | -e s/sh[234].*/sh/ -e s/aarch64.*/arm64/ ) |
173 | 173 | ||
174 | # Cross compiling and selecting different set of gcc/bin-utils | 174 | # Cross compiling and selecting different set of gcc/bin-utils |
175 | # --------------------------------------------------------------------------- | 175 | # --------------------------------------------------------------------------- |
diff --git a/arch/arm64/include/asm/elf.h b/arch/arm64/include/asm/elf.h index 07fea290d7c1..fe32c0e4ac01 100644 --- a/arch/arm64/include/asm/elf.h +++ b/arch/arm64/include/asm/elf.h | |||
@@ -26,7 +26,10 @@ | |||
26 | 26 | ||
27 | typedef unsigned long elf_greg_t; | 27 | typedef unsigned long elf_greg_t; |
28 | 28 | ||
29 | #define ELF_NGREG (sizeof (struct pt_regs) / sizeof(elf_greg_t)) | 29 | #define ELF_NGREG (sizeof(struct user_pt_regs) / sizeof(elf_greg_t)) |
30 | #define ELF_CORE_COPY_REGS(dest, regs) \ | ||
31 | *(struct user_pt_regs *)&(dest) = (regs)->user_regs; | ||
32 | |||
30 | typedef elf_greg_t elf_gregset_t[ELF_NGREG]; | 33 | typedef elf_greg_t elf_gregset_t[ELF_NGREG]; |
31 | typedef struct user_fpsimd_state elf_fpregset_t; | 34 | typedef struct user_fpsimd_state elf_fpregset_t; |
32 | 35 | ||
diff --git a/arch/ia64/kernel/ptrace.c b/arch/ia64/kernel/ptrace.c index 4265ff64219b..b7a5fffe0924 100644 --- a/arch/ia64/kernel/ptrace.c +++ b/arch/ia64/kernel/ptrace.c | |||
@@ -672,33 +672,6 @@ ptrace_attach_sync_user_rbs (struct task_struct *child) | |||
672 | read_unlock(&tasklist_lock); | 672 | read_unlock(&tasklist_lock); |
673 | } | 673 | } |
674 | 674 | ||
675 | static inline int | ||
676 | thread_matches (struct task_struct *thread, unsigned long addr) | ||
677 | { | ||
678 | unsigned long thread_rbs_end; | ||
679 | struct pt_regs *thread_regs; | ||
680 | |||
681 | if (ptrace_check_attach(thread, 0) < 0) | ||
682 | /* | ||
683 | * If the thread is not in an attachable state, we'll | ||
684 | * ignore it. The net effect is that if ADDR happens | ||
685 | * to overlap with the portion of the thread's | ||
686 | * register backing store that is currently residing | ||
687 | * on the thread's kernel stack, then ptrace() may end | ||
688 | * up accessing a stale value. But if the thread | ||
689 | * isn't stopped, that's a problem anyhow, so we're | ||
690 | * doing as well as we can... | ||
691 | */ | ||
692 | return 0; | ||
693 | |||
694 | thread_regs = task_pt_regs(thread); | ||
695 | thread_rbs_end = ia64_get_user_rbs_end(thread, thread_regs, NULL); | ||
696 | if (!on_kernel_rbs(addr, thread_regs->ar_bspstore, thread_rbs_end)) | ||
697 | return 0; | ||
698 | |||
699 | return 1; /* looks like we've got a winner */ | ||
700 | } | ||
701 | |||
702 | /* | 675 | /* |
703 | * Write f32-f127 back to task->thread.fph if it has been modified. | 676 | * Write f32-f127 back to task->thread.fph if it has been modified. |
704 | */ | 677 | */ |
diff --git a/arch/m68k/include/asm/dma-mapping.h b/arch/m68k/include/asm/dma-mapping.h index 17f7a45948ea..3e6b8445af6a 100644 --- a/arch/m68k/include/asm/dma-mapping.h +++ b/arch/m68k/include/asm/dma-mapping.h | |||
@@ -21,6 +21,22 @@ extern void *dma_alloc_coherent(struct device *, size_t, | |||
21 | extern void dma_free_coherent(struct device *, size_t, | 21 | extern void dma_free_coherent(struct device *, size_t, |
22 | void *, dma_addr_t); | 22 | void *, dma_addr_t); |
23 | 23 | ||
24 | static inline void *dma_alloc_attrs(struct device *dev, size_t size, | ||
25 | dma_addr_t *dma_handle, gfp_t flag, | ||
26 | struct dma_attrs *attrs) | ||
27 | { | ||
28 | /* attrs is not supported and ignored */ | ||
29 | return dma_alloc_coherent(dev, size, dma_handle, flag); | ||
30 | } | ||
31 | |||
32 | static inline void dma_free_attrs(struct device *dev, size_t size, | ||
33 | void *cpu_addr, dma_addr_t dma_handle, | ||
34 | struct dma_attrs *attrs) | ||
35 | { | ||
36 | /* attrs is not supported and ignored */ | ||
37 | dma_free_coherent(dev, size, cpu_addr, dma_handle); | ||
38 | } | ||
39 | |||
24 | static inline void *dma_alloc_noncoherent(struct device *dev, size_t size, | 40 | static inline void *dma_alloc_noncoherent(struct device *dev, size_t size, |
25 | dma_addr_t *handle, gfp_t flag) | 41 | dma_addr_t *handle, gfp_t flag) |
26 | { | 42 | { |
diff --git a/arch/m68k/include/asm/pgtable_no.h b/arch/m68k/include/asm/pgtable_no.h index bf86b29fe64a..037028f4ab70 100644 --- a/arch/m68k/include/asm/pgtable_no.h +++ b/arch/m68k/include/asm/pgtable_no.h | |||
@@ -64,6 +64,8 @@ extern unsigned int kobjsize(const void *objp); | |||
64 | */ | 64 | */ |
65 | #define VMALLOC_START 0 | 65 | #define VMALLOC_START 0 |
66 | #define VMALLOC_END 0xffffffff | 66 | #define VMALLOC_END 0xffffffff |
67 | #define KMAP_START 0 | ||
68 | #define KMAP_END 0xffffffff | ||
67 | 69 | ||
68 | #include <asm-generic/pgtable.h> | 70 | #include <asm-generic/pgtable.h> |
69 | 71 | ||
diff --git a/arch/m68k/include/asm/unistd.h b/arch/m68k/include/asm/unistd.h index 847994ce6804..f9337f614660 100644 --- a/arch/m68k/include/asm/unistd.h +++ b/arch/m68k/include/asm/unistd.h | |||
@@ -4,7 +4,7 @@ | |||
4 | #include <uapi/asm/unistd.h> | 4 | #include <uapi/asm/unistd.h> |
5 | 5 | ||
6 | 6 | ||
7 | #define NR_syscalls 348 | 7 | #define NR_syscalls 349 |
8 | 8 | ||
9 | #define __ARCH_WANT_OLD_READDIR | 9 | #define __ARCH_WANT_OLD_READDIR |
10 | #define __ARCH_WANT_OLD_STAT | 10 | #define __ARCH_WANT_OLD_STAT |
diff --git a/arch/m68k/include/uapi/asm/unistd.h b/arch/m68k/include/uapi/asm/unistd.h index b94bfbf90705..625f321001dc 100644 --- a/arch/m68k/include/uapi/asm/unistd.h +++ b/arch/m68k/include/uapi/asm/unistd.h | |||
@@ -353,5 +353,6 @@ | |||
353 | #define __NR_process_vm_readv 345 | 353 | #define __NR_process_vm_readv 345 |
354 | #define __NR_process_vm_writev 346 | 354 | #define __NR_process_vm_writev 346 |
355 | #define __NR_kcmp 347 | 355 | #define __NR_kcmp 347 |
356 | #define __NR_finit_module 348 | ||
356 | 357 | ||
357 | #endif /* _UAPI_ASM_M68K_UNISTD_H_ */ | 358 | #endif /* _UAPI_ASM_M68K_UNISTD_H_ */ |
diff --git a/arch/m68k/kernel/syscalltable.S b/arch/m68k/kernel/syscalltable.S index c30da5b3f2db..3f04ea0ab802 100644 --- a/arch/m68k/kernel/syscalltable.S +++ b/arch/m68k/kernel/syscalltable.S | |||
@@ -368,4 +368,5 @@ ENTRY(sys_call_table) | |||
368 | .long sys_process_vm_readv /* 345 */ | 368 | .long sys_process_vm_readv /* 345 */ |
369 | .long sys_process_vm_writev | 369 | .long sys_process_vm_writev |
370 | .long sys_kcmp | 370 | .long sys_kcmp |
371 | .long sys_finit_module | ||
371 | 372 | ||
diff --git a/arch/m68k/mm/init.c b/arch/m68k/mm/init.c index f0e05bce92f2..afd8106fd83b 100644 --- a/arch/m68k/mm/init.c +++ b/arch/m68k/mm/init.c | |||
@@ -39,6 +39,11 @@ | |||
39 | void *empty_zero_page; | 39 | void *empty_zero_page; |
40 | EXPORT_SYMBOL(empty_zero_page); | 40 | EXPORT_SYMBOL(empty_zero_page); |
41 | 41 | ||
42 | #if !defined(CONFIG_SUN3) && !defined(CONFIG_COLDFIRE) | ||
43 | extern void init_pointer_table(unsigned long ptable); | ||
44 | extern pmd_t *zero_pgtable; | ||
45 | #endif | ||
46 | |||
42 | #ifdef CONFIG_MMU | 47 | #ifdef CONFIG_MMU |
43 | 48 | ||
44 | pg_data_t pg_data_map[MAX_NUMNODES]; | 49 | pg_data_t pg_data_map[MAX_NUMNODES]; |
@@ -69,9 +74,6 @@ void __init m68k_setup_node(int node) | |||
69 | node_set_online(node); | 74 | node_set_online(node); |
70 | } | 75 | } |
71 | 76 | ||
72 | extern void init_pointer_table(unsigned long ptable); | ||
73 | extern pmd_t *zero_pgtable; | ||
74 | |||
75 | #else /* CONFIG_MMU */ | 77 | #else /* CONFIG_MMU */ |
76 | 78 | ||
77 | /* | 79 | /* |
diff --git a/arch/parisc/kernel/entry.S b/arch/parisc/kernel/entry.S index bfb44247d7a7..eb7850b46c25 100644 --- a/arch/parisc/kernel/entry.S +++ b/arch/parisc/kernel/entry.S | |||
@@ -1865,7 +1865,7 @@ syscall_restore: | |||
1865 | 1865 | ||
1866 | /* Are we being ptraced? */ | 1866 | /* Are we being ptraced? */ |
1867 | ldw TASK_FLAGS(%r1),%r19 | 1867 | ldw TASK_FLAGS(%r1),%r19 |
1868 | ldi (_TIF_SINGLESTEP|_TIF_BLOCKSTEP),%r2 | 1868 | ldi _TIF_SYSCALL_TRACE_MASK,%r2 |
1869 | and,COND(=) %r19,%r2,%r0 | 1869 | and,COND(=) %r19,%r2,%r0 |
1870 | b,n syscall_restore_rfi | 1870 | b,n syscall_restore_rfi |
1871 | 1871 | ||
@@ -1978,15 +1978,23 @@ syscall_restore_rfi: | |||
1978 | /* sr2 should be set to zero for userspace syscalls */ | 1978 | /* sr2 should be set to zero for userspace syscalls */ |
1979 | STREG %r0,TASK_PT_SR2(%r1) | 1979 | STREG %r0,TASK_PT_SR2(%r1) |
1980 | 1980 | ||
1981 | pt_regs_ok: | ||
1982 | LDREG TASK_PT_GR31(%r1),%r2 | 1981 | LDREG TASK_PT_GR31(%r1),%r2 |
1983 | depi 3,31,2,%r2 /* ensure return to user mode. */ | 1982 | depi 3,31,2,%r2 /* ensure return to user mode. */ |
1984 | STREG %r2,TASK_PT_IAOQ0(%r1) | 1983 | STREG %r2,TASK_PT_IAOQ0(%r1) |
1985 | ldo 4(%r2),%r2 | 1984 | ldo 4(%r2),%r2 |
1986 | STREG %r2,TASK_PT_IAOQ1(%r1) | 1985 | STREG %r2,TASK_PT_IAOQ1(%r1) |
1986 | b intr_restore | ||
1987 | copy %r25,%r16 | 1987 | copy %r25,%r16 |
1988 | |||
1989 | pt_regs_ok: | ||
1990 | LDREG TASK_PT_IAOQ0(%r1),%r2 | ||
1991 | depi 3,31,2,%r2 /* ensure return to user mode. */ | ||
1992 | STREG %r2,TASK_PT_IAOQ0(%r1) | ||
1993 | LDREG TASK_PT_IAOQ1(%r1),%r2 | ||
1994 | depi 3,31,2,%r2 | ||
1995 | STREG %r2,TASK_PT_IAOQ1(%r1) | ||
1988 | b intr_restore | 1996 | b intr_restore |
1989 | nop | 1997 | copy %r25,%r16 |
1990 | 1998 | ||
1991 | .import schedule,code | 1999 | .import schedule,code |
1992 | syscall_do_resched: | 2000 | syscall_do_resched: |
diff --git a/arch/parisc/kernel/irq.c b/arch/parisc/kernel/irq.c index c0b1affc06a8..0299d63cd112 100644 --- a/arch/parisc/kernel/irq.c +++ b/arch/parisc/kernel/irq.c | |||
@@ -410,11 +410,13 @@ void __init init_IRQ(void) | |||
410 | { | 410 | { |
411 | local_irq_disable(); /* PARANOID - should already be disabled */ | 411 | local_irq_disable(); /* PARANOID - should already be disabled */ |
412 | mtctl(~0UL, 23); /* EIRR : clear all pending external intr */ | 412 | mtctl(~0UL, 23); /* EIRR : clear all pending external intr */ |
413 | claim_cpu_irqs(); | ||
414 | #ifdef CONFIG_SMP | 413 | #ifdef CONFIG_SMP |
415 | if (!cpu_eiem) | 414 | if (!cpu_eiem) { |
415 | claim_cpu_irqs(); | ||
416 | cpu_eiem = EIEM_MASK(IPI_IRQ) | EIEM_MASK(TIMER_IRQ); | 416 | cpu_eiem = EIEM_MASK(IPI_IRQ) | EIEM_MASK(TIMER_IRQ); |
417 | } | ||
417 | #else | 418 | #else |
419 | claim_cpu_irqs(); | ||
418 | cpu_eiem = EIEM_MASK(TIMER_IRQ); | 420 | cpu_eiem = EIEM_MASK(TIMER_IRQ); |
419 | #endif | 421 | #endif |
420 | set_eiem(cpu_eiem); /* EIEM : enable all external intr */ | 422 | set_eiem(cpu_eiem); /* EIEM : enable all external intr */ |
diff --git a/arch/parisc/kernel/ptrace.c b/arch/parisc/kernel/ptrace.c index 857c2f545470..534abd4936e1 100644 --- a/arch/parisc/kernel/ptrace.c +++ b/arch/parisc/kernel/ptrace.c | |||
@@ -26,7 +26,7 @@ | |||
26 | #include <asm/asm-offsets.h> | 26 | #include <asm/asm-offsets.h> |
27 | 27 | ||
28 | /* PSW bits we allow the debugger to modify */ | 28 | /* PSW bits we allow the debugger to modify */ |
29 | #define USER_PSW_BITS (PSW_N | PSW_V | PSW_CB) | 29 | #define USER_PSW_BITS (PSW_N | PSW_B | PSW_V | PSW_CB) |
30 | 30 | ||
31 | /* | 31 | /* |
32 | * Called by kernel/ptrace.c when detaching.. | 32 | * Called by kernel/ptrace.c when detaching.. |
diff --git a/arch/parisc/kernel/signal.c b/arch/parisc/kernel/signal.c index 537996955998..fd051705a407 100644 --- a/arch/parisc/kernel/signal.c +++ b/arch/parisc/kernel/signal.c | |||
@@ -190,8 +190,10 @@ get_sigframe(struct k_sigaction *ka, unsigned long sp, size_t frame_size) | |||
190 | DBG(1,"get_sigframe: ka = %#lx, sp = %#lx, frame_size = %#lx\n", | 190 | DBG(1,"get_sigframe: ka = %#lx, sp = %#lx, frame_size = %#lx\n", |
191 | (unsigned long)ka, sp, frame_size); | 191 | (unsigned long)ka, sp, frame_size); |
192 | 192 | ||
193 | /* Align alternate stack and reserve 64 bytes for the signal | ||
194 | handler's frame marker. */ | ||
193 | if ((ka->sa.sa_flags & SA_ONSTACK) != 0 && ! sas_ss_flags(sp)) | 195 | if ((ka->sa.sa_flags & SA_ONSTACK) != 0 && ! sas_ss_flags(sp)) |
194 | sp = current->sas_ss_sp; /* Stacks grow up! */ | 196 | sp = (current->sas_ss_sp + 0x7f) & ~0x3f; /* Stacks grow up! */ |
195 | 197 | ||
196 | DBG(1,"get_sigframe: Returning sp = %#lx\n", (unsigned long)sp); | 198 | DBG(1,"get_sigframe: Returning sp = %#lx\n", (unsigned long)sp); |
197 | return (void __user *) sp; /* Stacks grow up. Fun. */ | 199 | return (void __user *) sp; /* Stacks grow up. Fun. */ |
diff --git a/arch/parisc/math-emu/cnv_float.h b/arch/parisc/math-emu/cnv_float.h index 9071e093164a..933423fa5144 100644 --- a/arch/parisc/math-emu/cnv_float.h +++ b/arch/parisc/math-emu/cnv_float.h | |||
@@ -347,16 +347,15 @@ | |||
347 | Sgl_isinexact_to_fix(sgl_value,exponent) | 347 | Sgl_isinexact_to_fix(sgl_value,exponent) |
348 | 348 | ||
349 | #define Duint_from_sgl_mantissa(sgl_value,exponent,dresultA,dresultB) \ | 349 | #define Duint_from_sgl_mantissa(sgl_value,exponent,dresultA,dresultB) \ |
350 | {Sall(sgl_value) <<= SGL_EXP_LENGTH; /* left-justify */ \ | 350 | {unsigned int val = Sall(sgl_value) << SGL_EXP_LENGTH; \ |
351 | if (exponent <= 31) { \ | 351 | if (exponent <= 31) { \ |
352 | Dintp1(dresultA) = 0; \ | 352 | Dintp1(dresultA) = 0; \ |
353 | Dintp2(dresultB) = (unsigned)Sall(sgl_value) >> (31 - exponent); \ | 353 | Dintp2(dresultB) = val >> (31 - exponent); \ |
354 | } \ | 354 | } \ |
355 | else { \ | 355 | else { \ |
356 | Dintp1(dresultA) = Sall(sgl_value) >> (63 - exponent); \ | 356 | Dintp1(dresultA) = val >> (63 - exponent); \ |
357 | Dintp2(dresultB) = Sall(sgl_value) << (exponent - 31); \ | 357 | Dintp2(dresultB) = exponent <= 62 ? val << (exponent - 31) : 0; \ |
358 | } \ | 358 | } \ |
359 | Sall(sgl_value) >>= SGL_EXP_LENGTH; /* return to original */ \ | ||
360 | } | 359 | } |
361 | 360 | ||
362 | #define Duint_setzero(dresultA,dresultB) \ | 361 | #define Duint_setzero(dresultA,dresultB) \ |
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c index 4428fd178bce..6774c17a5576 100644 --- a/arch/x86/kernel/cpu/perf_event.c +++ b/arch/x86/kernel/cpu/perf_event.c | |||
@@ -340,9 +340,6 @@ int x86_setup_perfctr(struct perf_event *event) | |||
340 | /* BTS is currently only allowed for user-mode. */ | 340 | /* BTS is currently only allowed for user-mode. */ |
341 | if (!attr->exclude_kernel) | 341 | if (!attr->exclude_kernel) |
342 | return -EOPNOTSUPP; | 342 | return -EOPNOTSUPP; |
343 | |||
344 | if (!attr->exclude_guest) | ||
345 | return -EOPNOTSUPP; | ||
346 | } | 343 | } |
347 | 344 | ||
348 | hwc->config |= config; | 345 | hwc->config |= config; |
@@ -385,9 +382,6 @@ int x86_pmu_hw_config(struct perf_event *event) | |||
385 | if (event->attr.precise_ip) { | 382 | if (event->attr.precise_ip) { |
386 | int precise = 0; | 383 | int precise = 0; |
387 | 384 | ||
388 | if (!event->attr.exclude_guest) | ||
389 | return -EOPNOTSUPP; | ||
390 | |||
391 | /* Support for constant skid */ | 385 | /* Support for constant skid */ |
392 | if (x86_pmu.pebs_active && !x86_pmu.pebs_broken) { | 386 | if (x86_pmu.pebs_active && !x86_pmu.pebs_broken) { |
393 | precise++; | 387 | precise++; |
diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S index ff84d5469d77..6ed91d9980e2 100644 --- a/arch/x86/kernel/entry_32.S +++ b/arch/x86/kernel/entry_32.S | |||
@@ -1065,7 +1065,6 @@ ENTRY(xen_failsafe_callback) | |||
1065 | lea 16(%esp),%esp | 1065 | lea 16(%esp),%esp |
1066 | CFI_ADJUST_CFA_OFFSET -16 | 1066 | CFI_ADJUST_CFA_OFFSET -16 |
1067 | jz 5f | 1067 | jz 5f |
1068 | addl $16,%esp | ||
1069 | jmp iret_exc | 1068 | jmp iret_exc |
1070 | 5: pushl_cfi $-1 /* orig_ax = -1 => not a system call */ | 1069 | 5: pushl_cfi $-1 /* orig_ax = -1 => not a system call */ |
1071 | SAVE_ALL | 1070 | SAVE_ALL |
diff --git a/arch/x86/kernel/step.c b/arch/x86/kernel/step.c index cd3b2438a980..9b4d51d0c0d0 100644 --- a/arch/x86/kernel/step.c +++ b/arch/x86/kernel/step.c | |||
@@ -165,10 +165,11 @@ void set_task_blockstep(struct task_struct *task, bool on) | |||
165 | * Ensure irq/preemption can't change debugctl in between. | 165 | * Ensure irq/preemption can't change debugctl in between. |
166 | * Note also that both TIF_BLOCKSTEP and debugctl should | 166 | * Note also that both TIF_BLOCKSTEP and debugctl should |
167 | * be changed atomically wrt preemption. | 167 | * be changed atomically wrt preemption. |
168 | * FIXME: this means that set/clear TIF_BLOCKSTEP is simply | 168 | * |
169 | * wrong if task != current, SIGKILL can wakeup the stopped | 169 | * NOTE: this means that set/clear TIF_BLOCKSTEP is only safe if |
170 | * tracee and set/clear can play with the running task, this | 170 | * task is current or it can't be running, otherwise we can race |
171 | * can confuse the next __switch_to_xtra(). | 171 | * with __switch_to_xtra(). We rely on ptrace_freeze_traced() but |
172 | * PTRACE_KILL is not safe. | ||
172 | */ | 173 | */ |
173 | local_irq_disable(); | 174 | local_irq_disable(); |
174 | debugctl = get_debugctlmsr(); | 175 | debugctl = get_debugctlmsr(); |
diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c index 4f7d2599b484..34bc4cee8887 100644 --- a/arch/x86/xen/smp.c +++ b/arch/x86/xen/smp.c | |||
@@ -432,13 +432,6 @@ static void __cpuinit xen_play_dead(void) /* used only with HOTPLUG_CPU */ | |||
432 | play_dead_common(); | 432 | play_dead_common(); |
433 | HYPERVISOR_vcpu_op(VCPUOP_down, smp_processor_id(), NULL); | 433 | HYPERVISOR_vcpu_op(VCPUOP_down, smp_processor_id(), NULL); |
434 | cpu_bringup(); | 434 | cpu_bringup(); |
435 | /* | ||
436 | * Balance out the preempt calls - as we are running in cpu_idle | ||
437 | * loop which has been called at bootup from cpu_bringup_and_idle. | ||
438 | * The cpucpu_bringup_and_idle called cpu_bringup which made a | ||
439 | * preempt_disable() So this preempt_enable will balance it out. | ||
440 | */ | ||
441 | preempt_enable(); | ||
442 | } | 435 | } |
443 | 436 | ||
444 | #else /* !CONFIG_HOTPLUG_CPU */ | 437 | #else /* !CONFIG_HOTPLUG_CPU */ |
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c index 7862d17976b7..497912732566 100644 --- a/drivers/ata/ahci.c +++ b/drivers/ata/ahci.c | |||
@@ -53,6 +53,7 @@ | |||
53 | 53 | ||
54 | enum { | 54 | enum { |
55 | AHCI_PCI_BAR_STA2X11 = 0, | 55 | AHCI_PCI_BAR_STA2X11 = 0, |
56 | AHCI_PCI_BAR_ENMOTUS = 2, | ||
56 | AHCI_PCI_BAR_STANDARD = 5, | 57 | AHCI_PCI_BAR_STANDARD = 5, |
57 | }; | 58 | }; |
58 | 59 | ||
@@ -410,6 +411,9 @@ static const struct pci_device_id ahci_pci_tbl[] = { | |||
410 | { PCI_VDEVICE(ASMEDIA, 0x0611), board_ahci }, /* ASM1061 */ | 411 | { PCI_VDEVICE(ASMEDIA, 0x0611), board_ahci }, /* ASM1061 */ |
411 | { PCI_VDEVICE(ASMEDIA, 0x0612), board_ahci }, /* ASM1062 */ | 412 | { PCI_VDEVICE(ASMEDIA, 0x0612), board_ahci }, /* ASM1062 */ |
412 | 413 | ||
414 | /* Enmotus */ | ||
415 | { PCI_DEVICE(0x1c44, 0x8000), board_ahci }, | ||
416 | |||
413 | /* Generic, PCI class code for AHCI */ | 417 | /* Generic, PCI class code for AHCI */ |
414 | { PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, | 418 | { PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, |
415 | PCI_CLASS_STORAGE_SATA_AHCI, 0xffffff, board_ahci }, | 419 | PCI_CLASS_STORAGE_SATA_AHCI, 0xffffff, board_ahci }, |
@@ -1098,9 +1102,11 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
1098 | dev_info(&pdev->dev, | 1102 | dev_info(&pdev->dev, |
1099 | "PDC42819 can only drive SATA devices with this driver\n"); | 1103 | "PDC42819 can only drive SATA devices with this driver\n"); |
1100 | 1104 | ||
1101 | /* The Connext uses non-standard BAR */ | 1105 | /* Both Connext and Enmotus devices use non-standard BARs */ |
1102 | if (pdev->vendor == PCI_VENDOR_ID_STMICRO && pdev->device == 0xCC06) | 1106 | if (pdev->vendor == PCI_VENDOR_ID_STMICRO && pdev->device == 0xCC06) |
1103 | ahci_pci_bar = AHCI_PCI_BAR_STA2X11; | 1107 | ahci_pci_bar = AHCI_PCI_BAR_STA2X11; |
1108 | else if (pdev->vendor == 0x1c44 && pdev->device == 0x8000) | ||
1109 | ahci_pci_bar = AHCI_PCI_BAR_ENMOTUS; | ||
1104 | 1110 | ||
1105 | /* acquire resources */ | 1111 | /* acquire resources */ |
1106 | rc = pcim_enable_device(pdev); | 1112 | rc = pcim_enable_device(pdev); |
diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c index 320712a7b9ea..6cd7805e47ca 100644 --- a/drivers/ata/libahci.c +++ b/drivers/ata/libahci.c | |||
@@ -1951,13 +1951,13 @@ static void ahci_set_aggressive_devslp(struct ata_port *ap, bool sleep) | |||
1951 | /* Use the nominal value 10 ms if the read MDAT is zero, | 1951 | /* Use the nominal value 10 ms if the read MDAT is zero, |
1952 | * the nominal value of DETO is 20 ms. | 1952 | * the nominal value of DETO is 20 ms. |
1953 | */ | 1953 | */ |
1954 | if (dev->sata_settings[ATA_LOG_DEVSLP_VALID] & | 1954 | if (dev->devslp_timing[ATA_LOG_DEVSLP_VALID] & |
1955 | ATA_LOG_DEVSLP_VALID_MASK) { | 1955 | ATA_LOG_DEVSLP_VALID_MASK) { |
1956 | mdat = dev->sata_settings[ATA_LOG_DEVSLP_MDAT] & | 1956 | mdat = dev->devslp_timing[ATA_LOG_DEVSLP_MDAT] & |
1957 | ATA_LOG_DEVSLP_MDAT_MASK; | 1957 | ATA_LOG_DEVSLP_MDAT_MASK; |
1958 | if (!mdat) | 1958 | if (!mdat) |
1959 | mdat = 10; | 1959 | mdat = 10; |
1960 | deto = dev->sata_settings[ATA_LOG_DEVSLP_DETO]; | 1960 | deto = dev->devslp_timing[ATA_LOG_DEVSLP_DETO]; |
1961 | if (!deto) | 1961 | if (!deto) |
1962 | deto = 20; | 1962 | deto = 20; |
1963 | } else { | 1963 | } else { |
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c index 9e8b99af400d..46cd3f4c6aaa 100644 --- a/drivers/ata/libata-core.c +++ b/drivers/ata/libata-core.c | |||
@@ -2325,24 +2325,28 @@ int ata_dev_configure(struct ata_device *dev) | |||
2325 | } | 2325 | } |
2326 | } | 2326 | } |
2327 | 2327 | ||
2328 | /* check and mark DevSlp capability */ | 2328 | /* Check and mark DevSlp capability. Get DevSlp timing variables |
2329 | if (ata_id_has_devslp(dev->id)) | 2329 | * from SATA Settings page of Identify Device Data Log. |
2330 | dev->flags |= ATA_DFLAG_DEVSLP; | ||
2331 | |||
2332 | /* Obtain SATA Settings page from Identify Device Data Log, | ||
2333 | * which contains DevSlp timing variables etc. | ||
2334 | * Exclude old devices with ata_id_has_ncq() | ||
2335 | */ | 2330 | */ |
2336 | if (ata_id_has_ncq(dev->id)) { | 2331 | if (ata_id_has_devslp(dev->id)) { |
2332 | u8 sata_setting[ATA_SECT_SIZE]; | ||
2333 | int i, j; | ||
2334 | |||
2335 | dev->flags |= ATA_DFLAG_DEVSLP; | ||
2337 | err_mask = ata_read_log_page(dev, | 2336 | err_mask = ata_read_log_page(dev, |
2338 | ATA_LOG_SATA_ID_DEV_DATA, | 2337 | ATA_LOG_SATA_ID_DEV_DATA, |
2339 | ATA_LOG_SATA_SETTINGS, | 2338 | ATA_LOG_SATA_SETTINGS, |
2340 | dev->sata_settings, | 2339 | sata_setting, |
2341 | 1); | 2340 | 1); |
2342 | if (err_mask) | 2341 | if (err_mask) |
2343 | ata_dev_dbg(dev, | 2342 | ata_dev_dbg(dev, |
2344 | "failed to get Identify Device Data, Emask 0x%x\n", | 2343 | "failed to get Identify Device Data, Emask 0x%x\n", |
2345 | err_mask); | 2344 | err_mask); |
2345 | else | ||
2346 | for (i = 0; i < ATA_LOG_DEVSLP_SIZE; i++) { | ||
2347 | j = ATA_LOG_DEVSLP_OFFSET + i; | ||
2348 | dev->devslp_timing[i] = sata_setting[j]; | ||
2349 | } | ||
2346 | } | 2350 | } |
2347 | 2351 | ||
2348 | dev->cdb_len = 16; | 2352 | dev->cdb_len = 16; |
diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c index bf039b0e97b7..bcf4437214f5 100644 --- a/drivers/ata/libata-eh.c +++ b/drivers/ata/libata-eh.c | |||
@@ -2094,7 +2094,7 @@ static unsigned int ata_eh_speed_down(struct ata_device *dev, | |||
2094 | */ | 2094 | */ |
2095 | static inline int ata_eh_worth_retry(struct ata_queued_cmd *qc) | 2095 | static inline int ata_eh_worth_retry(struct ata_queued_cmd *qc) |
2096 | { | 2096 | { |
2097 | if (qc->flags & AC_ERR_MEDIA) | 2097 | if (qc->err_mask & AC_ERR_MEDIA) |
2098 | return 0; /* don't retry media errors */ | 2098 | return 0; /* don't retry media errors */ |
2099 | if (qc->flags & ATA_QCFLAG_IO) | 2099 | if (qc->flags & ATA_QCFLAG_IO) |
2100 | return 1; /* otherwise retry anything from fs stack */ | 2100 | return 1; /* otherwise retry anything from fs stack */ |
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c index 9d8409c02082..8ad21a25bc0d 100644 --- a/drivers/block/virtio_blk.c +++ b/drivers/block/virtio_blk.c | |||
@@ -889,6 +889,7 @@ static void virtblk_remove(struct virtio_device *vdev) | |||
889 | { | 889 | { |
890 | struct virtio_blk *vblk = vdev->priv; | 890 | struct virtio_blk *vblk = vdev->priv; |
891 | int index = vblk->index; | 891 | int index = vblk->index; |
892 | int refc; | ||
892 | 893 | ||
893 | /* Prevent config work handler from accessing the device. */ | 894 | /* Prevent config work handler from accessing the device. */ |
894 | mutex_lock(&vblk->config_lock); | 895 | mutex_lock(&vblk->config_lock); |
@@ -903,11 +904,15 @@ static void virtblk_remove(struct virtio_device *vdev) | |||
903 | 904 | ||
904 | flush_work(&vblk->config_work); | 905 | flush_work(&vblk->config_work); |
905 | 906 | ||
907 | refc = atomic_read(&disk_to_dev(vblk->disk)->kobj.kref.refcount); | ||
906 | put_disk(vblk->disk); | 908 | put_disk(vblk->disk); |
907 | mempool_destroy(vblk->pool); | 909 | mempool_destroy(vblk->pool); |
908 | vdev->config->del_vqs(vdev); | 910 | vdev->config->del_vqs(vdev); |
909 | kfree(vblk); | 911 | kfree(vblk); |
910 | ida_simple_remove(&vd_index_ida, index); | 912 | |
913 | /* Only free device id if we don't have any users */ | ||
914 | if (refc == 1) | ||
915 | ida_simple_remove(&vd_index_ida, index); | ||
911 | } | 916 | } |
912 | 917 | ||
913 | #ifdef CONFIG_PM | 918 | #ifdef CONFIG_PM |
diff --git a/drivers/gpio/gpio-mvebu.c b/drivers/gpio/gpio-mvebu.c index 7d9bd94be8d2..6819d63cb167 100644 --- a/drivers/gpio/gpio-mvebu.c +++ b/drivers/gpio/gpio-mvebu.c | |||
@@ -547,7 +547,6 @@ static int mvebu_gpio_probe(struct platform_device *pdev) | |||
547 | mvchip->membase = devm_request_and_ioremap(&pdev->dev, res); | 547 | mvchip->membase = devm_request_and_ioremap(&pdev->dev, res); |
548 | if (! mvchip->membase) { | 548 | if (! mvchip->membase) { |
549 | dev_err(&pdev->dev, "Cannot ioremap\n"); | 549 | dev_err(&pdev->dev, "Cannot ioremap\n"); |
550 | kfree(mvchip->chip.label); | ||
551 | return -ENOMEM; | 550 | return -ENOMEM; |
552 | } | 551 | } |
553 | 552 | ||
@@ -557,14 +556,12 @@ static int mvebu_gpio_probe(struct platform_device *pdev) | |||
557 | res = platform_get_resource(pdev, IORESOURCE_MEM, 1); | 556 | res = platform_get_resource(pdev, IORESOURCE_MEM, 1); |
558 | if (! res) { | 557 | if (! res) { |
559 | dev_err(&pdev->dev, "Cannot get memory resource\n"); | 558 | dev_err(&pdev->dev, "Cannot get memory resource\n"); |
560 | kfree(mvchip->chip.label); | ||
561 | return -ENODEV; | 559 | return -ENODEV; |
562 | } | 560 | } |
563 | 561 | ||
564 | mvchip->percpu_membase = devm_request_and_ioremap(&pdev->dev, res); | 562 | mvchip->percpu_membase = devm_request_and_ioremap(&pdev->dev, res); |
565 | if (! mvchip->percpu_membase) { | 563 | if (! mvchip->percpu_membase) { |
566 | dev_err(&pdev->dev, "Cannot ioremap\n"); | 564 | dev_err(&pdev->dev, "Cannot ioremap\n"); |
567 | kfree(mvchip->chip.label); | ||
568 | return -ENOMEM; | 565 | return -ENOMEM; |
569 | } | 566 | } |
570 | } | 567 | } |
@@ -625,7 +622,6 @@ static int mvebu_gpio_probe(struct platform_device *pdev) | |||
625 | mvchip->irqbase = irq_alloc_descs(-1, 0, ngpios, -1); | 622 | mvchip->irqbase = irq_alloc_descs(-1, 0, ngpios, -1); |
626 | if (mvchip->irqbase < 0) { | 623 | if (mvchip->irqbase < 0) { |
627 | dev_err(&pdev->dev, "no irqs\n"); | 624 | dev_err(&pdev->dev, "no irqs\n"); |
628 | kfree(mvchip->chip.label); | ||
629 | return -ENOMEM; | 625 | return -ENOMEM; |
630 | } | 626 | } |
631 | 627 | ||
@@ -633,7 +629,6 @@ static int mvebu_gpio_probe(struct platform_device *pdev) | |||
633 | mvchip->membase, handle_level_irq); | 629 | mvchip->membase, handle_level_irq); |
634 | if (! gc) { | 630 | if (! gc) { |
635 | dev_err(&pdev->dev, "Cannot allocate generic irq_chip\n"); | 631 | dev_err(&pdev->dev, "Cannot allocate generic irq_chip\n"); |
636 | kfree(mvchip->chip.label); | ||
637 | return -ENOMEM; | 632 | return -ENOMEM; |
638 | } | 633 | } |
639 | 634 | ||
@@ -668,7 +663,6 @@ static int mvebu_gpio_probe(struct platform_device *pdev) | |||
668 | irq_remove_generic_chip(gc, IRQ_MSK(ngpios), IRQ_NOREQUEST, | 663 | irq_remove_generic_chip(gc, IRQ_MSK(ngpios), IRQ_NOREQUEST, |
669 | IRQ_LEVEL | IRQ_NOPROBE); | 664 | IRQ_LEVEL | IRQ_NOPROBE); |
670 | kfree(gc); | 665 | kfree(gc); |
671 | kfree(mvchip->chip.label); | ||
672 | return -ENODEV; | 666 | return -ENODEV; |
673 | } | 667 | } |
674 | 668 | ||
diff --git a/drivers/gpio/gpio-samsung.c b/drivers/gpio/gpio-samsung.c index 01f7fe955590..76be7eed79de 100644 --- a/drivers/gpio/gpio-samsung.c +++ b/drivers/gpio/gpio-samsung.c | |||
@@ -32,7 +32,6 @@ | |||
32 | 32 | ||
33 | #include <mach/hardware.h> | 33 | #include <mach/hardware.h> |
34 | #include <mach/map.h> | 34 | #include <mach/map.h> |
35 | #include <mach/regs-clock.h> | ||
36 | #include <mach/regs-gpio.h> | 35 | #include <mach/regs-gpio.h> |
37 | 36 | ||
38 | #include <plat/cpu.h> | 37 | #include <plat/cpu.h> |
@@ -446,7 +445,7 @@ static struct samsung_gpio_cfg s3c24xx_gpiocfg_banka = { | |||
446 | }; | 445 | }; |
447 | #endif | 446 | #endif |
448 | 447 | ||
449 | #if defined(CONFIG_ARCH_EXYNOS4) || defined(CONFIG_ARCH_EXYNOS5) | 448 | #if defined(CONFIG_ARCH_EXYNOS4) || defined(CONFIG_SOC_EXYNOS5250) |
450 | static struct samsung_gpio_cfg exynos_gpio_cfg = { | 449 | static struct samsung_gpio_cfg exynos_gpio_cfg = { |
451 | .set_pull = exynos_gpio_setpull, | 450 | .set_pull = exynos_gpio_setpull, |
452 | .get_pull = exynos_gpio_getpull, | 451 | .get_pull = exynos_gpio_getpull, |
@@ -2446,7 +2445,7 @@ static struct samsung_gpio_chip exynos4_gpios_3[] = { | |||
2446 | }; | 2445 | }; |
2447 | #endif | 2446 | #endif |
2448 | 2447 | ||
2449 | #ifdef CONFIG_ARCH_EXYNOS5 | 2448 | #ifdef CONFIG_SOC_EXYNOS5250 |
2450 | static struct samsung_gpio_chip exynos5_gpios_1[] = { | 2449 | static struct samsung_gpio_chip exynos5_gpios_1[] = { |
2451 | { | 2450 | { |
2452 | .chip = { | 2451 | .chip = { |
@@ -2614,7 +2613,7 @@ static struct samsung_gpio_chip exynos5_gpios_1[] = { | |||
2614 | }; | 2613 | }; |
2615 | #endif | 2614 | #endif |
2616 | 2615 | ||
2617 | #ifdef CONFIG_ARCH_EXYNOS5 | 2616 | #ifdef CONFIG_SOC_EXYNOS5250 |
2618 | static struct samsung_gpio_chip exynos5_gpios_2[] = { | 2617 | static struct samsung_gpio_chip exynos5_gpios_2[] = { |
2619 | { | 2618 | { |
2620 | .chip = { | 2619 | .chip = { |
@@ -2675,7 +2674,7 @@ static struct samsung_gpio_chip exynos5_gpios_2[] = { | |||
2675 | }; | 2674 | }; |
2676 | #endif | 2675 | #endif |
2677 | 2676 | ||
2678 | #ifdef CONFIG_ARCH_EXYNOS5 | 2677 | #ifdef CONFIG_SOC_EXYNOS5250 |
2679 | static struct samsung_gpio_chip exynos5_gpios_3[] = { | 2678 | static struct samsung_gpio_chip exynos5_gpios_3[] = { |
2680 | { | 2679 | { |
2681 | .chip = { | 2680 | .chip = { |
@@ -2711,7 +2710,7 @@ static struct samsung_gpio_chip exynos5_gpios_3[] = { | |||
2711 | }; | 2710 | }; |
2712 | #endif | 2711 | #endif |
2713 | 2712 | ||
2714 | #ifdef CONFIG_ARCH_EXYNOS5 | 2713 | #ifdef CONFIG_SOC_EXYNOS5250 |
2715 | static struct samsung_gpio_chip exynos5_gpios_4[] = { | 2714 | static struct samsung_gpio_chip exynos5_gpios_4[] = { |
2716 | { | 2715 | { |
2717 | .chip = { | 2716 | .chip = { |
@@ -3010,7 +3009,7 @@ static __init int samsung_gpiolib_init(void) | |||
3010 | int i, nr_chips; | 3009 | int i, nr_chips; |
3011 | int group = 0; | 3010 | int group = 0; |
3012 | 3011 | ||
3013 | #ifdef CONFIG_PINCTRL_SAMSUNG | 3012 | #if defined(CONFIG_PINCTRL_EXYNOS) || defined(CONFIG_PINCTRL_EXYNOS5440) |
3014 | /* | 3013 | /* |
3015 | * This gpio driver includes support for device tree support and there | 3014 | * This gpio driver includes support for device tree support and there |
3016 | * are platforms using it. In order to maintain compatibility with those | 3015 | * are platforms using it. In order to maintain compatibility with those |
@@ -3026,6 +3025,7 @@ static __init int samsung_gpiolib_init(void) | |||
3026 | static const struct of_device_id exynos_pinctrl_ids[] = { | 3025 | static const struct of_device_id exynos_pinctrl_ids[] = { |
3027 | { .compatible = "samsung,pinctrl-exynos4210", }, | 3026 | { .compatible = "samsung,pinctrl-exynos4210", }, |
3028 | { .compatible = "samsung,pinctrl-exynos4x12", }, | 3027 | { .compatible = "samsung,pinctrl-exynos4x12", }, |
3028 | { .compatible = "samsung,pinctrl-exynos5440", }, | ||
3029 | }; | 3029 | }; |
3030 | for_each_matching_node(pctrl_np, exynos_pinctrl_ids) | 3030 | for_each_matching_node(pctrl_np, exynos_pinctrl_ids) |
3031 | if (pctrl_np && of_device_is_available(pctrl_np)) | 3031 | if (pctrl_np && of_device_is_available(pctrl_np)) |
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index e6a11ca85eaf..7944d301518a 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c | |||
@@ -641,6 +641,7 @@ static void i915_ring_error_state(struct seq_file *m, | |||
641 | seq_printf(m, "%s command stream:\n", ring_str(ring)); | 641 | seq_printf(m, "%s command stream:\n", ring_str(ring)); |
642 | seq_printf(m, " HEAD: 0x%08x\n", error->head[ring]); | 642 | seq_printf(m, " HEAD: 0x%08x\n", error->head[ring]); |
643 | seq_printf(m, " TAIL: 0x%08x\n", error->tail[ring]); | 643 | seq_printf(m, " TAIL: 0x%08x\n", error->tail[ring]); |
644 | seq_printf(m, " CTL: 0x%08x\n", error->ctl[ring]); | ||
644 | seq_printf(m, " ACTHD: 0x%08x\n", error->acthd[ring]); | 645 | seq_printf(m, " ACTHD: 0x%08x\n", error->acthd[ring]); |
645 | seq_printf(m, " IPEIR: 0x%08x\n", error->ipeir[ring]); | 646 | seq_printf(m, " IPEIR: 0x%08x\n", error->ipeir[ring]); |
646 | seq_printf(m, " IPEHR: 0x%08x\n", error->ipehr[ring]); | 647 | seq_printf(m, " IPEHR: 0x%08x\n", error->ipehr[ring]); |
@@ -693,6 +694,8 @@ static int i915_error_state(struct seq_file *m, void *unused) | |||
693 | seq_printf(m, "EIR: 0x%08x\n", error->eir); | 694 | seq_printf(m, "EIR: 0x%08x\n", error->eir); |
694 | seq_printf(m, "IER: 0x%08x\n", error->ier); | 695 | seq_printf(m, "IER: 0x%08x\n", error->ier); |
695 | seq_printf(m, "PGTBL_ER: 0x%08x\n", error->pgtbl_er); | 696 | seq_printf(m, "PGTBL_ER: 0x%08x\n", error->pgtbl_er); |
697 | seq_printf(m, "FORCEWAKE: 0x%08x\n", error->forcewake); | ||
698 | seq_printf(m, "DERRMR: 0x%08x\n", error->derrmr); | ||
696 | seq_printf(m, "CCID: 0x%08x\n", error->ccid); | 699 | seq_printf(m, "CCID: 0x%08x\n", error->ccid); |
697 | 700 | ||
698 | for (i = 0; i < dev_priv->num_fence_regs; i++) | 701 | for (i = 0; i < dev_priv->num_fence_regs; i++) |
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index ed3059575576..12ab3bdea54d 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h | |||
@@ -188,10 +188,13 @@ struct drm_i915_error_state { | |||
188 | u32 pgtbl_er; | 188 | u32 pgtbl_er; |
189 | u32 ier; | 189 | u32 ier; |
190 | u32 ccid; | 190 | u32 ccid; |
191 | u32 derrmr; | ||
192 | u32 forcewake; | ||
191 | bool waiting[I915_NUM_RINGS]; | 193 | bool waiting[I915_NUM_RINGS]; |
192 | u32 pipestat[I915_MAX_PIPES]; | 194 | u32 pipestat[I915_MAX_PIPES]; |
193 | u32 tail[I915_NUM_RINGS]; | 195 | u32 tail[I915_NUM_RINGS]; |
194 | u32 head[I915_NUM_RINGS]; | 196 | u32 head[I915_NUM_RINGS]; |
197 | u32 ctl[I915_NUM_RINGS]; | ||
195 | u32 ipeir[I915_NUM_RINGS]; | 198 | u32 ipeir[I915_NUM_RINGS]; |
196 | u32 ipehr[I915_NUM_RINGS]; | 199 | u32 ipehr[I915_NUM_RINGS]; |
197 | u32 instdone[I915_NUM_RINGS]; | 200 | u32 instdone[I915_NUM_RINGS]; |
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c index d6a994a07393..26d08bb58218 100644 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c | |||
@@ -539,6 +539,8 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev, | |||
539 | total = 0; | 539 | total = 0; |
540 | for (i = 0; i < count; i++) { | 540 | for (i = 0; i < count; i++) { |
541 | struct drm_i915_gem_relocation_entry __user *user_relocs; | 541 | struct drm_i915_gem_relocation_entry __user *user_relocs; |
542 | u64 invalid_offset = (u64)-1; | ||
543 | int j; | ||
542 | 544 | ||
543 | user_relocs = (void __user *)(uintptr_t)exec[i].relocs_ptr; | 545 | user_relocs = (void __user *)(uintptr_t)exec[i].relocs_ptr; |
544 | 546 | ||
@@ -549,6 +551,25 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev, | |||
549 | goto err; | 551 | goto err; |
550 | } | 552 | } |
551 | 553 | ||
554 | /* As we do not update the known relocation offsets after | ||
555 | * relocating (due to the complexities in lock handling), | ||
556 | * we need to mark them as invalid now so that we force the | ||
557 | * relocation processing next time. Just in case the target | ||
558 | * object is evicted and then rebound into its old | ||
559 | * presumed_offset before the next execbuffer - if that | ||
560 | * happened we would make the mistake of assuming that the | ||
561 | * relocations were valid. | ||
562 | */ | ||
563 | for (j = 0; j < exec[i].relocation_count; j++) { | ||
564 | if (copy_to_user(&user_relocs[j].presumed_offset, | ||
565 | &invalid_offset, | ||
566 | sizeof(invalid_offset))) { | ||
567 | ret = -EFAULT; | ||
568 | mutex_lock(&dev->struct_mutex); | ||
569 | goto err; | ||
570 | } | ||
571 | } | ||
572 | |||
552 | reloc_offset[i] = total; | 573 | reloc_offset[i] = total; |
553 | total += exec[i].relocation_count; | 574 | total += exec[i].relocation_count; |
554 | } | 575 | } |
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 2220dec3e5d9..fe843389c7b4 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c | |||
@@ -1157,6 +1157,7 @@ static void i915_record_ring_state(struct drm_device *dev, | |||
1157 | error->acthd[ring->id] = intel_ring_get_active_head(ring); | 1157 | error->acthd[ring->id] = intel_ring_get_active_head(ring); |
1158 | error->head[ring->id] = I915_READ_HEAD(ring); | 1158 | error->head[ring->id] = I915_READ_HEAD(ring); |
1159 | error->tail[ring->id] = I915_READ_TAIL(ring); | 1159 | error->tail[ring->id] = I915_READ_TAIL(ring); |
1160 | error->ctl[ring->id] = I915_READ_CTL(ring); | ||
1160 | 1161 | ||
1161 | error->cpu_ring_head[ring->id] = ring->head; | 1162 | error->cpu_ring_head[ring->id] = ring->head; |
1162 | error->cpu_ring_tail[ring->id] = ring->tail; | 1163 | error->cpu_ring_tail[ring->id] = ring->tail; |
@@ -1251,6 +1252,16 @@ static void i915_capture_error_state(struct drm_device *dev) | |||
1251 | else | 1252 | else |
1252 | error->ier = I915_READ(IER); | 1253 | error->ier = I915_READ(IER); |
1253 | 1254 | ||
1255 | if (INTEL_INFO(dev)->gen >= 6) | ||
1256 | error->derrmr = I915_READ(DERRMR); | ||
1257 | |||
1258 | if (IS_VALLEYVIEW(dev)) | ||
1259 | error->forcewake = I915_READ(FORCEWAKE_VLV); | ||
1260 | else if (INTEL_INFO(dev)->gen >= 7) | ||
1261 | error->forcewake = I915_READ(FORCEWAKE_MT); | ||
1262 | else if (INTEL_INFO(dev)->gen == 6) | ||
1263 | error->forcewake = I915_READ(FORCEWAKE); | ||
1264 | |||
1254 | for_each_pipe(pipe) | 1265 | for_each_pipe(pipe) |
1255 | error->pipestat[pipe] = I915_READ(PIPESTAT(pipe)); | 1266 | error->pipestat[pipe] = I915_READ(PIPESTAT(pipe)); |
1256 | 1267 | ||
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 186ee5c85b51..b401788e1791 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h | |||
@@ -512,6 +512,8 @@ | |||
512 | #define GEN7_ERR_INT 0x44040 | 512 | #define GEN7_ERR_INT 0x44040 |
513 | #define ERR_INT_MMIO_UNCLAIMED (1<<13) | 513 | #define ERR_INT_MMIO_UNCLAIMED (1<<13) |
514 | 514 | ||
515 | #define DERRMR 0x44050 | ||
516 | |||
515 | /* GM45+ chicken bits -- debug workaround bits that may be required | 517 | /* GM45+ chicken bits -- debug workaround bits that may be required |
516 | * for various sorts of correct behavior. The top 16 bits of each are | 518 | * for various sorts of correct behavior. The top 16 bits of each are |
517 | * the enables for writing to the corresponding low bit. | 519 | * the enables for writing to the corresponding low bit. |
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index 1b63d55318a0..fb3715b4b09d 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c | |||
@@ -2579,7 +2579,8 @@ intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connect | |||
2579 | 2579 | ||
2580 | static void | 2580 | static void |
2581 | intel_dp_init_panel_power_sequencer(struct drm_device *dev, | 2581 | intel_dp_init_panel_power_sequencer(struct drm_device *dev, |
2582 | struct intel_dp *intel_dp) | 2582 | struct intel_dp *intel_dp, |
2583 | struct edp_power_seq *out) | ||
2583 | { | 2584 | { |
2584 | struct drm_i915_private *dev_priv = dev->dev_private; | 2585 | struct drm_i915_private *dev_priv = dev->dev_private; |
2585 | struct edp_power_seq cur, vbt, spec, final; | 2586 | struct edp_power_seq cur, vbt, spec, final; |
@@ -2650,16 +2651,35 @@ intel_dp_init_panel_power_sequencer(struct drm_device *dev, | |||
2650 | intel_dp->panel_power_cycle_delay = get_delay(t11_t12); | 2651 | intel_dp->panel_power_cycle_delay = get_delay(t11_t12); |
2651 | #undef get_delay | 2652 | #undef get_delay |
2652 | 2653 | ||
2654 | DRM_DEBUG_KMS("panel power up delay %d, power down delay %d, power cycle delay %d\n", | ||
2655 | intel_dp->panel_power_up_delay, intel_dp->panel_power_down_delay, | ||
2656 | intel_dp->panel_power_cycle_delay); | ||
2657 | |||
2658 | DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n", | ||
2659 | intel_dp->backlight_on_delay, intel_dp->backlight_off_delay); | ||
2660 | |||
2661 | if (out) | ||
2662 | *out = final; | ||
2663 | } | ||
2664 | |||
2665 | static void | ||
2666 | intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev, | ||
2667 | struct intel_dp *intel_dp, | ||
2668 | struct edp_power_seq *seq) | ||
2669 | { | ||
2670 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
2671 | u32 pp_on, pp_off, pp_div; | ||
2672 | |||
2653 | /* And finally store the new values in the power sequencer. */ | 2673 | /* And finally store the new values in the power sequencer. */ |
2654 | pp_on = (final.t1_t3 << PANEL_POWER_UP_DELAY_SHIFT) | | 2674 | pp_on = (seq->t1_t3 << PANEL_POWER_UP_DELAY_SHIFT) | |
2655 | (final.t8 << PANEL_LIGHT_ON_DELAY_SHIFT); | 2675 | (seq->t8 << PANEL_LIGHT_ON_DELAY_SHIFT); |
2656 | pp_off = (final.t9 << PANEL_LIGHT_OFF_DELAY_SHIFT) | | 2676 | pp_off = (seq->t9 << PANEL_LIGHT_OFF_DELAY_SHIFT) | |
2657 | (final.t10 << PANEL_POWER_DOWN_DELAY_SHIFT); | 2677 | (seq->t10 << PANEL_POWER_DOWN_DELAY_SHIFT); |
2658 | /* Compute the divisor for the pp clock, simply match the Bspec | 2678 | /* Compute the divisor for the pp clock, simply match the Bspec |
2659 | * formula. */ | 2679 | * formula. */ |
2660 | pp_div = ((100 * intel_pch_rawclk(dev))/2 - 1) | 2680 | pp_div = ((100 * intel_pch_rawclk(dev))/2 - 1) |
2661 | << PP_REFERENCE_DIVIDER_SHIFT; | 2681 | << PP_REFERENCE_DIVIDER_SHIFT; |
2662 | pp_div |= (DIV_ROUND_UP(final.t11_t12, 1000) | 2682 | pp_div |= (DIV_ROUND_UP(seq->t11_t12, 1000) |
2663 | << PANEL_POWER_CYCLE_DELAY_SHIFT); | 2683 | << PANEL_POWER_CYCLE_DELAY_SHIFT); |
2664 | 2684 | ||
2665 | /* Haswell doesn't have any port selection bits for the panel | 2685 | /* Haswell doesn't have any port selection bits for the panel |
@@ -2675,14 +2695,6 @@ intel_dp_init_panel_power_sequencer(struct drm_device *dev, | |||
2675 | I915_WRITE(PCH_PP_OFF_DELAYS, pp_off); | 2695 | I915_WRITE(PCH_PP_OFF_DELAYS, pp_off); |
2676 | I915_WRITE(PCH_PP_DIVISOR, pp_div); | 2696 | I915_WRITE(PCH_PP_DIVISOR, pp_div); |
2677 | 2697 | ||
2678 | |||
2679 | DRM_DEBUG_KMS("panel power up delay %d, power down delay %d, power cycle delay %d\n", | ||
2680 | intel_dp->panel_power_up_delay, intel_dp->panel_power_down_delay, | ||
2681 | intel_dp->panel_power_cycle_delay); | ||
2682 | |||
2683 | DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n", | ||
2684 | intel_dp->backlight_on_delay, intel_dp->backlight_off_delay); | ||
2685 | |||
2686 | DRM_DEBUG_KMS("panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n", | 2698 | DRM_DEBUG_KMS("panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n", |
2687 | I915_READ(PCH_PP_ON_DELAYS), | 2699 | I915_READ(PCH_PP_ON_DELAYS), |
2688 | I915_READ(PCH_PP_OFF_DELAYS), | 2700 | I915_READ(PCH_PP_OFF_DELAYS), |
@@ -2699,6 +2711,7 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port, | |||
2699 | struct drm_device *dev = intel_encoder->base.dev; | 2711 | struct drm_device *dev = intel_encoder->base.dev; |
2700 | struct drm_i915_private *dev_priv = dev->dev_private; | 2712 | struct drm_i915_private *dev_priv = dev->dev_private; |
2701 | struct drm_display_mode *fixed_mode = NULL; | 2713 | struct drm_display_mode *fixed_mode = NULL; |
2714 | struct edp_power_seq power_seq = { 0 }; | ||
2702 | enum port port = intel_dig_port->port; | 2715 | enum port port = intel_dig_port->port; |
2703 | const char *name = NULL; | 2716 | const char *name = NULL; |
2704 | int type; | 2717 | int type; |
@@ -2771,7 +2784,7 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port, | |||
2771 | } | 2784 | } |
2772 | 2785 | ||
2773 | if (is_edp(intel_dp)) | 2786 | if (is_edp(intel_dp)) |
2774 | intel_dp_init_panel_power_sequencer(dev, intel_dp); | 2787 | intel_dp_init_panel_power_sequencer(dev, intel_dp, &power_seq); |
2775 | 2788 | ||
2776 | intel_dp_i2c_init(intel_dp, intel_connector, name); | 2789 | intel_dp_i2c_init(intel_dp, intel_connector, name); |
2777 | 2790 | ||
@@ -2798,6 +2811,10 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port, | |||
2798 | return; | 2811 | return; |
2799 | } | 2812 | } |
2800 | 2813 | ||
2814 | /* We now know it's not a ghost, init power sequence regs. */ | ||
2815 | intel_dp_init_panel_power_sequencer_registers(dev, intel_dp, | ||
2816 | &power_seq); | ||
2817 | |||
2801 | ironlake_edp_panel_vdd_on(intel_dp); | 2818 | ironlake_edp_panel_vdd_on(intel_dp); |
2802 | edid = drm_get_edid(connector, &intel_dp->adapter); | 2819 | edid = drm_get_edid(connector, &intel_dp->adapter); |
2803 | if (edid) { | 2820 | if (edid) { |
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index e83a11794172..3280cffe50f4 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c | |||
@@ -4250,7 +4250,8 @@ static void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv) | |||
4250 | static void __gen6_gt_force_wake_mt_reset(struct drm_i915_private *dev_priv) | 4250 | static void __gen6_gt_force_wake_mt_reset(struct drm_i915_private *dev_priv) |
4251 | { | 4251 | { |
4252 | I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_DISABLE(0xffff)); | 4252 | I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_DISABLE(0xffff)); |
4253 | POSTING_READ(ECOBUS); /* something from same cacheline, but !FORCEWAKE */ | 4253 | /* something from same cacheline, but !FORCEWAKE_MT */ |
4254 | POSTING_READ(ECOBUS); | ||
4254 | } | 4255 | } |
4255 | 4256 | ||
4256 | static void __gen6_gt_force_wake_mt_get(struct drm_i915_private *dev_priv) | 4257 | static void __gen6_gt_force_wake_mt_get(struct drm_i915_private *dev_priv) |
@@ -4267,7 +4268,8 @@ static void __gen6_gt_force_wake_mt_get(struct drm_i915_private *dev_priv) | |||
4267 | DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n"); | 4268 | DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n"); |
4268 | 4269 | ||
4269 | I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL)); | 4270 | I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL)); |
4270 | POSTING_READ(ECOBUS); /* something from same cacheline, but !FORCEWAKE */ | 4271 | /* something from same cacheline, but !FORCEWAKE_MT */ |
4272 | POSTING_READ(ECOBUS); | ||
4271 | 4273 | ||
4272 | if (wait_for_atomic((I915_READ_NOTRACE(forcewake_ack) & 1), | 4274 | if (wait_for_atomic((I915_READ_NOTRACE(forcewake_ack) & 1), |
4273 | FORCEWAKE_ACK_TIMEOUT_MS)) | 4275 | FORCEWAKE_ACK_TIMEOUT_MS)) |
@@ -4304,14 +4306,16 @@ void gen6_gt_check_fifodbg(struct drm_i915_private *dev_priv) | |||
4304 | static void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv) | 4306 | static void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv) |
4305 | { | 4307 | { |
4306 | I915_WRITE_NOTRACE(FORCEWAKE, 0); | 4308 | I915_WRITE_NOTRACE(FORCEWAKE, 0); |
4307 | /* gen6_gt_check_fifodbg doubles as the POSTING_READ */ | 4309 | /* something from same cacheline, but !FORCEWAKE */ |
4310 | POSTING_READ(ECOBUS); | ||
4308 | gen6_gt_check_fifodbg(dev_priv); | 4311 | gen6_gt_check_fifodbg(dev_priv); |
4309 | } | 4312 | } |
4310 | 4313 | ||
4311 | static void __gen6_gt_force_wake_mt_put(struct drm_i915_private *dev_priv) | 4314 | static void __gen6_gt_force_wake_mt_put(struct drm_i915_private *dev_priv) |
4312 | { | 4315 | { |
4313 | I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL)); | 4316 | I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL)); |
4314 | /* gen6_gt_check_fifodbg doubles as the POSTING_READ */ | 4317 | /* something from same cacheline, but !FORCEWAKE_MT */ |
4318 | POSTING_READ(ECOBUS); | ||
4315 | gen6_gt_check_fifodbg(dev_priv); | 4319 | gen6_gt_check_fifodbg(dev_priv); |
4316 | } | 4320 | } |
4317 | 4321 | ||
@@ -4351,6 +4355,8 @@ int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv) | |||
4351 | static void vlv_force_wake_reset(struct drm_i915_private *dev_priv) | 4355 | static void vlv_force_wake_reset(struct drm_i915_private *dev_priv) |
4352 | { | 4356 | { |
4353 | I915_WRITE_NOTRACE(FORCEWAKE_VLV, _MASKED_BIT_DISABLE(0xffff)); | 4357 | I915_WRITE_NOTRACE(FORCEWAKE_VLV, _MASKED_BIT_DISABLE(0xffff)); |
4358 | /* something from same cacheline, but !FORCEWAKE_VLV */ | ||
4359 | POSTING_READ(FORCEWAKE_ACK_VLV); | ||
4354 | } | 4360 | } |
4355 | 4361 | ||
4356 | static void vlv_force_wake_get(struct drm_i915_private *dev_priv) | 4362 | static void vlv_force_wake_get(struct drm_i915_private *dev_priv) |
@@ -4371,7 +4377,8 @@ static void vlv_force_wake_get(struct drm_i915_private *dev_priv) | |||
4371 | static void vlv_force_wake_put(struct drm_i915_private *dev_priv) | 4377 | static void vlv_force_wake_put(struct drm_i915_private *dev_priv) |
4372 | { | 4378 | { |
4373 | I915_WRITE_NOTRACE(FORCEWAKE_VLV, _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL)); | 4379 | I915_WRITE_NOTRACE(FORCEWAKE_VLV, _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL)); |
4374 | /* The below doubles as a POSTING_READ */ | 4380 | /* something from same cacheline, but !FORCEWAKE_VLV */ |
4381 | POSTING_READ(FORCEWAKE_ACK_VLV); | ||
4375 | gen6_gt_check_fifodbg(dev_priv); | 4382 | gen6_gt_check_fifodbg(dev_priv); |
4376 | } | 4383 | } |
4377 | 4384 | ||
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c index 061fa0a28900..4d0e60adbc6d 100644 --- a/drivers/gpu/drm/radeon/evergreen.c +++ b/drivers/gpu/drm/radeon/evergreen.c | |||
@@ -2401,6 +2401,12 @@ static int evergreen_gpu_soft_reset(struct radeon_device *rdev, u32 reset_mask) | |||
2401 | { | 2401 | { |
2402 | struct evergreen_mc_save save; | 2402 | struct evergreen_mc_save save; |
2403 | 2403 | ||
2404 | if (!(RREG32(GRBM_STATUS) & GUI_ACTIVE)) | ||
2405 | reset_mask &= ~(RADEON_RESET_GFX | RADEON_RESET_COMPUTE); | ||
2406 | |||
2407 | if (RREG32(DMA_STATUS_REG) & DMA_IDLE) | ||
2408 | reset_mask &= ~RADEON_RESET_DMA; | ||
2409 | |||
2404 | if (reset_mask == 0) | 2410 | if (reset_mask == 0) |
2405 | return 0; | 2411 | return 0; |
2406 | 2412 | ||
diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c index 896f1cbc58a5..59acabb45c9b 100644 --- a/drivers/gpu/drm/radeon/ni.c +++ b/drivers/gpu/drm/radeon/ni.c | |||
@@ -1409,6 +1409,12 @@ static int cayman_gpu_soft_reset(struct radeon_device *rdev, u32 reset_mask) | |||
1409 | { | 1409 | { |
1410 | struct evergreen_mc_save save; | 1410 | struct evergreen_mc_save save; |
1411 | 1411 | ||
1412 | if (!(RREG32(GRBM_STATUS) & GUI_ACTIVE)) | ||
1413 | reset_mask &= ~(RADEON_RESET_GFX | RADEON_RESET_COMPUTE); | ||
1414 | |||
1415 | if (RREG32(DMA_STATUS_REG) & DMA_IDLE) | ||
1416 | reset_mask &= ~RADEON_RESET_DMA; | ||
1417 | |||
1412 | if (reset_mask == 0) | 1418 | if (reset_mask == 0) |
1413 | return 0; | 1419 | return 0; |
1414 | 1420 | ||
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c index 537e259b3837..3cb9d6089373 100644 --- a/drivers/gpu/drm/radeon/r600.c +++ b/drivers/gpu/drm/radeon/r600.c | |||
@@ -1378,6 +1378,12 @@ static int r600_gpu_soft_reset(struct radeon_device *rdev, u32 reset_mask) | |||
1378 | { | 1378 | { |
1379 | struct rv515_mc_save save; | 1379 | struct rv515_mc_save save; |
1380 | 1380 | ||
1381 | if (!(RREG32(GRBM_STATUS) & GUI_ACTIVE)) | ||
1382 | reset_mask &= ~(RADEON_RESET_GFX | RADEON_RESET_COMPUTE); | ||
1383 | |||
1384 | if (RREG32(DMA_STATUS_REG) & DMA_IDLE) | ||
1385 | reset_mask &= ~RADEON_RESET_DMA; | ||
1386 | |||
1381 | if (reset_mask == 0) | 1387 | if (reset_mask == 0) |
1382 | return 0; | 1388 | return 0; |
1383 | 1389 | ||
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index 34e52304a525..a08f657329a0 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h | |||
@@ -324,7 +324,6 @@ struct radeon_bo { | |||
324 | struct list_head list; | 324 | struct list_head list; |
325 | /* Protected by tbo.reserved */ | 325 | /* Protected by tbo.reserved */ |
326 | u32 placements[3]; | 326 | u32 placements[3]; |
327 | u32 busy_placements[3]; | ||
328 | struct ttm_placement placement; | 327 | struct ttm_placement placement; |
329 | struct ttm_buffer_object tbo; | 328 | struct ttm_buffer_object tbo; |
330 | struct ttm_bo_kmap_obj kmap; | 329 | struct ttm_bo_kmap_obj kmap; |
@@ -654,6 +653,8 @@ struct radeon_ring { | |||
654 | u32 ptr_reg_mask; | 653 | u32 ptr_reg_mask; |
655 | u32 nop; | 654 | u32 nop; |
656 | u32 idx; | 655 | u32 idx; |
656 | u64 last_semaphore_signal_addr; | ||
657 | u64 last_semaphore_wait_addr; | ||
657 | }; | 658 | }; |
658 | 659 | ||
659 | /* | 660 | /* |
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c index dff6cf77f953..d9bf96ee299a 100644 --- a/drivers/gpu/drm/radeon/radeon_drv.c +++ b/drivers/gpu/drm/radeon/radeon_drv.c | |||
@@ -69,9 +69,10 @@ | |||
69 | * 2.26.0 - r600-eg: fix htile size computation | 69 | * 2.26.0 - r600-eg: fix htile size computation |
70 | * 2.27.0 - r600-SI: Add CS ioctl support for async DMA | 70 | * 2.27.0 - r600-SI: Add CS ioctl support for async DMA |
71 | * 2.28.0 - r600-eg: Add MEM_WRITE packet support | 71 | * 2.28.0 - r600-eg: Add MEM_WRITE packet support |
72 | * 2.29.0 - R500 FP16 color clear registers | ||
72 | */ | 73 | */ |
73 | #define KMS_DRIVER_MAJOR 2 | 74 | #define KMS_DRIVER_MAJOR 2 |
74 | #define KMS_DRIVER_MINOR 28 | 75 | #define KMS_DRIVER_MINOR 29 |
75 | #define KMS_DRIVER_PATCHLEVEL 0 | 76 | #define KMS_DRIVER_PATCHLEVEL 0 |
76 | int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags); | 77 | int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags); |
77 | int radeon_driver_unload_kms(struct drm_device *dev); | 78 | int radeon_driver_unload_kms(struct drm_device *dev); |
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c index 883c95d8d90f..d3aface2d12d 100644 --- a/drivers/gpu/drm/radeon/radeon_object.c +++ b/drivers/gpu/drm/radeon/radeon_object.c | |||
@@ -84,6 +84,7 @@ void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain) | |||
84 | rbo->placement.fpfn = 0; | 84 | rbo->placement.fpfn = 0; |
85 | rbo->placement.lpfn = 0; | 85 | rbo->placement.lpfn = 0; |
86 | rbo->placement.placement = rbo->placements; | 86 | rbo->placement.placement = rbo->placements; |
87 | rbo->placement.busy_placement = rbo->placements; | ||
87 | if (domain & RADEON_GEM_DOMAIN_VRAM) | 88 | if (domain & RADEON_GEM_DOMAIN_VRAM) |
88 | rbo->placements[c++] = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED | | 89 | rbo->placements[c++] = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED | |
89 | TTM_PL_FLAG_VRAM; | 90 | TTM_PL_FLAG_VRAM; |
@@ -104,14 +105,6 @@ void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain) | |||
104 | if (!c) | 105 | if (!c) |
105 | rbo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM; | 106 | rbo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM; |
106 | rbo->placement.num_placement = c; | 107 | rbo->placement.num_placement = c; |
107 | |||
108 | c = 0; | ||
109 | rbo->placement.busy_placement = rbo->busy_placements; | ||
110 | if (rbo->rdev->flags & RADEON_IS_AGP) { | ||
111 | rbo->busy_placements[c++] = TTM_PL_FLAG_WC | TTM_PL_FLAG_TT; | ||
112 | } else { | ||
113 | rbo->busy_placements[c++] = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_TT; | ||
114 | } | ||
115 | rbo->placement.num_busy_placement = c; | 108 | rbo->placement.num_busy_placement = c; |
116 | } | 109 | } |
117 | 110 | ||
@@ -357,6 +350,7 @@ int radeon_bo_list_validate(struct list_head *head) | |||
357 | { | 350 | { |
358 | struct radeon_bo_list *lobj; | 351 | struct radeon_bo_list *lobj; |
359 | struct radeon_bo *bo; | 352 | struct radeon_bo *bo; |
353 | u32 domain; | ||
360 | int r; | 354 | int r; |
361 | 355 | ||
362 | r = ttm_eu_reserve_buffers(head); | 356 | r = ttm_eu_reserve_buffers(head); |
@@ -366,9 +360,17 @@ int radeon_bo_list_validate(struct list_head *head) | |||
366 | list_for_each_entry(lobj, head, tv.head) { | 360 | list_for_each_entry(lobj, head, tv.head) { |
367 | bo = lobj->bo; | 361 | bo = lobj->bo; |
368 | if (!bo->pin_count) { | 362 | if (!bo->pin_count) { |
363 | domain = lobj->wdomain ? lobj->wdomain : lobj->rdomain; | ||
364 | |||
365 | retry: | ||
366 | radeon_ttm_placement_from_domain(bo, domain); | ||
369 | r = ttm_bo_validate(&bo->tbo, &bo->placement, | 367 | r = ttm_bo_validate(&bo->tbo, &bo->placement, |
370 | true, false); | 368 | true, false); |
371 | if (unlikely(r)) { | 369 | if (unlikely(r)) { |
370 | if (r != -ERESTARTSYS && domain == RADEON_GEM_DOMAIN_VRAM) { | ||
371 | domain |= RADEON_GEM_DOMAIN_GTT; | ||
372 | goto retry; | ||
373 | } | ||
372 | return r; | 374 | return r; |
373 | } | 375 | } |
374 | } | 376 | } |
diff --git a/drivers/gpu/drm/radeon/radeon_ring.c b/drivers/gpu/drm/radeon/radeon_ring.c index 141f2b6a9cf2..2430d80b1871 100644 --- a/drivers/gpu/drm/radeon/radeon_ring.c +++ b/drivers/gpu/drm/radeon/radeon_ring.c | |||
@@ -784,6 +784,8 @@ static int radeon_debugfs_ring_info(struct seq_file *m, void *data) | |||
784 | } | 784 | } |
785 | seq_printf(m, "driver's copy of the wptr: 0x%08x [%5d]\n", ring->wptr, ring->wptr); | 785 | seq_printf(m, "driver's copy of the wptr: 0x%08x [%5d]\n", ring->wptr, ring->wptr); |
786 | seq_printf(m, "driver's copy of the rptr: 0x%08x [%5d]\n", ring->rptr, ring->rptr); | 786 | seq_printf(m, "driver's copy of the rptr: 0x%08x [%5d]\n", ring->rptr, ring->rptr); |
787 | seq_printf(m, "last semaphore signal addr : 0x%016llx\n", ring->last_semaphore_signal_addr); | ||
788 | seq_printf(m, "last semaphore wait addr : 0x%016llx\n", ring->last_semaphore_wait_addr); | ||
787 | seq_printf(m, "%u free dwords in ring\n", ring->ring_free_dw); | 789 | seq_printf(m, "%u free dwords in ring\n", ring->ring_free_dw); |
788 | seq_printf(m, "%u dwords in ring\n", count); | 790 | seq_printf(m, "%u dwords in ring\n", count); |
789 | /* print 8 dw before current rptr as often it's the last executed | 791 | /* print 8 dw before current rptr as often it's the last executed |
diff --git a/drivers/gpu/drm/radeon/radeon_semaphore.c b/drivers/gpu/drm/radeon/radeon_semaphore.c index 97f3ece81cd2..8dcc20f53d73 100644 --- a/drivers/gpu/drm/radeon/radeon_semaphore.c +++ b/drivers/gpu/drm/radeon/radeon_semaphore.c | |||
@@ -95,6 +95,10 @@ int radeon_semaphore_sync_rings(struct radeon_device *rdev, | |||
95 | /* we assume caller has already allocated space on waiters ring */ | 95 | /* we assume caller has already allocated space on waiters ring */ |
96 | radeon_semaphore_emit_wait(rdev, waiter, semaphore); | 96 | radeon_semaphore_emit_wait(rdev, waiter, semaphore); |
97 | 97 | ||
98 | /* for debugging lockup only, used by sysfs debug files */ | ||
99 | rdev->ring[signaler].last_semaphore_signal_addr = semaphore->gpu_addr; | ||
100 | rdev->ring[waiter].last_semaphore_wait_addr = semaphore->gpu_addr; | ||
101 | |||
98 | return 0; | 102 | return 0; |
99 | } | 103 | } |
100 | 104 | ||
diff --git a/drivers/gpu/drm/radeon/reg_srcs/rv515 b/drivers/gpu/drm/radeon/reg_srcs/rv515 index 911a8fbd32bb..78d5e99d759d 100644 --- a/drivers/gpu/drm/radeon/reg_srcs/rv515 +++ b/drivers/gpu/drm/radeon/reg_srcs/rv515 | |||
@@ -324,6 +324,8 @@ rv515 0x6d40 | |||
324 | 0x46AC US_OUT_FMT_2 | 324 | 0x46AC US_OUT_FMT_2 |
325 | 0x46B0 US_OUT_FMT_3 | 325 | 0x46B0 US_OUT_FMT_3 |
326 | 0x46B4 US_W_FMT | 326 | 0x46B4 US_W_FMT |
327 | 0x46C0 RB3D_COLOR_CLEAR_VALUE_AR | ||
328 | 0x46C4 RB3D_COLOR_CLEAR_VALUE_GB | ||
327 | 0x4BC0 FG_FOG_BLEND | 329 | 0x4BC0 FG_FOG_BLEND |
328 | 0x4BC4 FG_FOG_FACTOR | 330 | 0x4BC4 FG_FOG_FACTOR |
329 | 0x4BC8 FG_FOG_COLOR_R | 331 | 0x4BC8 FG_FOG_COLOR_R |
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c index 3240a3d64f30..ae8b48205a6c 100644 --- a/drivers/gpu/drm/radeon/si.c +++ b/drivers/gpu/drm/radeon/si.c | |||
@@ -2215,6 +2215,12 @@ static int si_gpu_soft_reset(struct radeon_device *rdev, u32 reset_mask) | |||
2215 | { | 2215 | { |
2216 | struct evergreen_mc_save save; | 2216 | struct evergreen_mc_save save; |
2217 | 2217 | ||
2218 | if (!(RREG32(GRBM_STATUS) & GUI_ACTIVE)) | ||
2219 | reset_mask &= ~(RADEON_RESET_GFX | RADEON_RESET_COMPUTE); | ||
2220 | |||
2221 | if (RREG32(DMA_STATUS_REG) & DMA_IDLE) | ||
2222 | reset_mask &= ~RADEON_RESET_DMA; | ||
2223 | |||
2218 | if (reset_mask == 0) | 2224 | if (reset_mask == 0) |
2219 | return 0; | 2225 | return 0; |
2220 | 2226 | ||
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index 33d20be87db5..52b20b12c83a 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c | |||
@@ -434,6 +434,7 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo, | |||
434 | bo->mem = tmp_mem; | 434 | bo->mem = tmp_mem; |
435 | bdev->driver->move_notify(bo, mem); | 435 | bdev->driver->move_notify(bo, mem); |
436 | bo->mem = *mem; | 436 | bo->mem = *mem; |
437 | *mem = tmp_mem; | ||
437 | } | 438 | } |
438 | 439 | ||
439 | goto out_err; | 440 | goto out_err; |
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c index d73d6e3e17b2..44420fca7dfa 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_util.c +++ b/drivers/gpu/drm/ttm/ttm_bo_util.c | |||
@@ -344,8 +344,12 @@ int ttm_bo_move_memcpy(struct ttm_buffer_object *bo, | |||
344 | 344 | ||
345 | if (ttm->state == tt_unpopulated) { | 345 | if (ttm->state == tt_unpopulated) { |
346 | ret = ttm->bdev->driver->ttm_tt_populate(ttm); | 346 | ret = ttm->bdev->driver->ttm_tt_populate(ttm); |
347 | if (ret) | 347 | if (ret) { |
348 | /* if we fail here don't nuke the mm node | ||
349 | * as the bo still owns it */ | ||
350 | old_copy.mm_node = NULL; | ||
348 | goto out1; | 351 | goto out1; |
352 | } | ||
349 | } | 353 | } |
350 | 354 | ||
351 | add = 0; | 355 | add = 0; |
@@ -371,8 +375,11 @@ int ttm_bo_move_memcpy(struct ttm_buffer_object *bo, | |||
371 | prot); | 375 | prot); |
372 | } else | 376 | } else |
373 | ret = ttm_copy_io_page(new_iomap, old_iomap, page); | 377 | ret = ttm_copy_io_page(new_iomap, old_iomap, page); |
374 | if (ret) | 378 | if (ret) { |
379 | /* failing here, means keep old copy as-is */ | ||
380 | old_copy.mm_node = NULL; | ||
375 | goto out1; | 381 | goto out1; |
382 | } | ||
376 | } | 383 | } |
377 | mb(); | 384 | mb(); |
378 | out2: | 385 | out2: |
diff --git a/drivers/hv/hv_balloon.c b/drivers/hv/hv_balloon.c index f6c0011a0337..dd289fd179ca 100644 --- a/drivers/hv/hv_balloon.c +++ b/drivers/hv/hv_balloon.c | |||
@@ -403,7 +403,7 @@ struct dm_info_header { | |||
403 | */ | 403 | */ |
404 | 404 | ||
405 | struct dm_info_msg { | 405 | struct dm_info_msg { |
406 | struct dm_info_header header; | 406 | struct dm_header hdr; |
407 | __u32 reserved; | 407 | __u32 reserved; |
408 | __u32 info_size; | 408 | __u32 info_size; |
409 | __u8 info[]; | 409 | __u8 info[]; |
@@ -503,13 +503,17 @@ static void hot_add_req(struct hv_dynmem_device *dm, struct dm_hot_add *msg) | |||
503 | 503 | ||
504 | static void process_info(struct hv_dynmem_device *dm, struct dm_info_msg *msg) | 504 | static void process_info(struct hv_dynmem_device *dm, struct dm_info_msg *msg) |
505 | { | 505 | { |
506 | switch (msg->header.type) { | 506 | struct dm_info_header *info_hdr; |
507 | |||
508 | info_hdr = (struct dm_info_header *)msg->info; | ||
509 | |||
510 | switch (info_hdr->type) { | ||
507 | case INFO_TYPE_MAX_PAGE_CNT: | 511 | case INFO_TYPE_MAX_PAGE_CNT: |
508 | pr_info("Received INFO_TYPE_MAX_PAGE_CNT\n"); | 512 | pr_info("Received INFO_TYPE_MAX_PAGE_CNT\n"); |
509 | pr_info("Data Size is %d\n", msg->header.data_size); | 513 | pr_info("Data Size is %d\n", info_hdr->data_size); |
510 | break; | 514 | break; |
511 | default: | 515 | default: |
512 | pr_info("Received Unknown type: %d\n", msg->header.type); | 516 | pr_info("Received Unknown type: %d\n", info_hdr->type); |
513 | } | 517 | } |
514 | } | 518 | } |
515 | 519 | ||
@@ -879,7 +883,7 @@ static int balloon_probe(struct hv_device *dev, | |||
879 | balloon_onchannelcallback, dev); | 883 | balloon_onchannelcallback, dev); |
880 | 884 | ||
881 | if (ret) | 885 | if (ret) |
882 | return ret; | 886 | goto probe_error0; |
883 | 887 | ||
884 | dm_device.dev = dev; | 888 | dm_device.dev = dev; |
885 | dm_device.state = DM_INITIALIZING; | 889 | dm_device.state = DM_INITIALIZING; |
@@ -891,7 +895,7 @@ static int balloon_probe(struct hv_device *dev, | |||
891 | kthread_run(dm_thread_func, &dm_device, "hv_balloon"); | 895 | kthread_run(dm_thread_func, &dm_device, "hv_balloon"); |
892 | if (IS_ERR(dm_device.thread)) { | 896 | if (IS_ERR(dm_device.thread)) { |
893 | ret = PTR_ERR(dm_device.thread); | 897 | ret = PTR_ERR(dm_device.thread); |
894 | goto probe_error0; | 898 | goto probe_error1; |
895 | } | 899 | } |
896 | 900 | ||
897 | hv_set_drvdata(dev, &dm_device); | 901 | hv_set_drvdata(dev, &dm_device); |
@@ -914,12 +918,12 @@ static int balloon_probe(struct hv_device *dev, | |||
914 | VM_PKT_DATA_INBAND, | 918 | VM_PKT_DATA_INBAND, |
915 | VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED); | 919 | VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED); |
916 | if (ret) | 920 | if (ret) |
917 | goto probe_error1; | 921 | goto probe_error2; |
918 | 922 | ||
919 | t = wait_for_completion_timeout(&dm_device.host_event, 5*HZ); | 923 | t = wait_for_completion_timeout(&dm_device.host_event, 5*HZ); |
920 | if (t == 0) { | 924 | if (t == 0) { |
921 | ret = -ETIMEDOUT; | 925 | ret = -ETIMEDOUT; |
922 | goto probe_error1; | 926 | goto probe_error2; |
923 | } | 927 | } |
924 | 928 | ||
925 | /* | 929 | /* |
@@ -928,7 +932,7 @@ static int balloon_probe(struct hv_device *dev, | |||
928 | */ | 932 | */ |
929 | if (dm_device.state == DM_INIT_ERROR) { | 933 | if (dm_device.state == DM_INIT_ERROR) { |
930 | ret = -ETIMEDOUT; | 934 | ret = -ETIMEDOUT; |
931 | goto probe_error1; | 935 | goto probe_error2; |
932 | } | 936 | } |
933 | /* | 937 | /* |
934 | * Now submit our capabilities to the host. | 938 | * Now submit our capabilities to the host. |
@@ -961,12 +965,12 @@ static int balloon_probe(struct hv_device *dev, | |||
961 | VM_PKT_DATA_INBAND, | 965 | VM_PKT_DATA_INBAND, |
962 | VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED); | 966 | VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED); |
963 | if (ret) | 967 | if (ret) |
964 | goto probe_error1; | 968 | goto probe_error2; |
965 | 969 | ||
966 | t = wait_for_completion_timeout(&dm_device.host_event, 5*HZ); | 970 | t = wait_for_completion_timeout(&dm_device.host_event, 5*HZ); |
967 | if (t == 0) { | 971 | if (t == 0) { |
968 | ret = -ETIMEDOUT; | 972 | ret = -ETIMEDOUT; |
969 | goto probe_error1; | 973 | goto probe_error2; |
970 | } | 974 | } |
971 | 975 | ||
972 | /* | 976 | /* |
@@ -975,18 +979,20 @@ static int balloon_probe(struct hv_device *dev, | |||
975 | */ | 979 | */ |
976 | if (dm_device.state == DM_INIT_ERROR) { | 980 | if (dm_device.state == DM_INIT_ERROR) { |
977 | ret = -ETIMEDOUT; | 981 | ret = -ETIMEDOUT; |
978 | goto probe_error1; | 982 | goto probe_error2; |
979 | } | 983 | } |
980 | 984 | ||
981 | dm_device.state = DM_INITIALIZED; | 985 | dm_device.state = DM_INITIALIZED; |
982 | 986 | ||
983 | return 0; | 987 | return 0; |
984 | 988 | ||
985 | probe_error1: | 989 | probe_error2: |
986 | kthread_stop(dm_device.thread); | 990 | kthread_stop(dm_device.thread); |
987 | 991 | ||
988 | probe_error0: | 992 | probe_error1: |
989 | vmbus_close(dev->channel); | 993 | vmbus_close(dev->channel); |
994 | probe_error0: | ||
995 | kfree(send_buffer); | ||
990 | return ret; | 996 | return ret; |
991 | } | 997 | } |
992 | 998 | ||
@@ -999,6 +1005,7 @@ static int balloon_remove(struct hv_device *dev) | |||
999 | 1005 | ||
1000 | vmbus_close(dev->channel); | 1006 | vmbus_close(dev->channel); |
1001 | kthread_stop(dm->thread); | 1007 | kthread_stop(dm->thread); |
1008 | kfree(send_buffer); | ||
1002 | 1009 | ||
1003 | return 0; | 1010 | return 0; |
1004 | } | 1011 | } |
diff --git a/drivers/media/i2c/m5mols/m5mols_core.c b/drivers/media/i2c/m5mols/m5mols_core.c index 8a8d42fe2633..d4e7567b367c 100644 --- a/drivers/media/i2c/m5mols/m5mols_core.c +++ b/drivers/media/i2c/m5mols/m5mols_core.c | |||
@@ -556,7 +556,7 @@ static int m5mols_get_fmt(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh, | |||
556 | mutex_lock(&info->lock); | 556 | mutex_lock(&info->lock); |
557 | 557 | ||
558 | format = __find_format(info, fh, fmt->which, info->res_type); | 558 | format = __find_format(info, fh, fmt->which, info->res_type); |
559 | if (!format) | 559 | if (format) |
560 | fmt->format = *format; | 560 | fmt->format = *format; |
561 | else | 561 | else |
562 | ret = -EINVAL; | 562 | ret = -EINVAL; |
diff --git a/drivers/media/platform/omap3isp/ispvideo.c b/drivers/media/platform/omap3isp/ispvideo.c index e0d73a642186..8dac17511e61 100644 --- a/drivers/media/platform/omap3isp/ispvideo.c +++ b/drivers/media/platform/omap3isp/ispvideo.c | |||
@@ -35,9 +35,6 @@ | |||
35 | #include <linux/vmalloc.h> | 35 | #include <linux/vmalloc.h> |
36 | #include <media/v4l2-dev.h> | 36 | #include <media/v4l2-dev.h> |
37 | #include <media/v4l2-ioctl.h> | 37 | #include <media/v4l2-ioctl.h> |
38 | #include <plat/iommu.h> | ||
39 | #include <plat/iovmm.h> | ||
40 | #include <plat/omap-pm.h> | ||
41 | 38 | ||
42 | #include "ispvideo.h" | 39 | #include "ispvideo.h" |
43 | #include "isp.h" | 40 | #include "isp.h" |
diff --git a/drivers/media/platform/s5p-fimc/fimc-mdevice.c b/drivers/media/platform/s5p-fimc/fimc-mdevice.c index 4ab99f3a7b09..b4a68ecf0ca7 100644 --- a/drivers/media/platform/s5p-fimc/fimc-mdevice.c +++ b/drivers/media/platform/s5p-fimc/fimc-mdevice.c | |||
@@ -593,7 +593,7 @@ static int __fimc_md_create_flite_source_links(struct fimc_md *fmd) | |||
593 | { | 593 | { |
594 | struct media_entity *source, *sink; | 594 | struct media_entity *source, *sink; |
595 | unsigned int flags = MEDIA_LNK_FL_ENABLED; | 595 | unsigned int flags = MEDIA_LNK_FL_ENABLED; |
596 | int i, ret; | 596 | int i, ret = 0; |
597 | 597 | ||
598 | for (i = 0; i < FIMC_LITE_MAX_DEVS; i++) { | 598 | for (i = 0; i < FIMC_LITE_MAX_DEVS; i++) { |
599 | struct fimc_lite *fimc = fmd->fimc_lite[i]; | 599 | struct fimc_lite *fimc = fmd->fimc_lite[i]; |
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc.c b/drivers/media/platform/s5p-mfc/s5p_mfc.c index 379f57433711..681bc6ba149d 100644 --- a/drivers/media/platform/s5p-mfc/s5p_mfc.c +++ b/drivers/media/platform/s5p-mfc/s5p_mfc.c | |||
@@ -412,62 +412,48 @@ leave_handle_frame: | |||
412 | } | 412 | } |
413 | 413 | ||
414 | /* Error handling for interrupt */ | 414 | /* Error handling for interrupt */ |
415 | static void s5p_mfc_handle_error(struct s5p_mfc_ctx *ctx, | 415 | static void s5p_mfc_handle_error(struct s5p_mfc_dev *dev, |
416 | unsigned int reason, unsigned int err) | 416 | struct s5p_mfc_ctx *ctx, unsigned int reason, unsigned int err) |
417 | { | 417 | { |
418 | struct s5p_mfc_dev *dev; | ||
419 | unsigned long flags; | 418 | unsigned long flags; |
420 | 419 | ||
421 | /* If no context is available then all necessary | ||
422 | * processing has been done. */ | ||
423 | if (ctx == NULL) | ||
424 | return; | ||
425 | |||
426 | dev = ctx->dev; | ||
427 | mfc_err("Interrupt Error: %08x\n", err); | 420 | mfc_err("Interrupt Error: %08x\n", err); |
428 | s5p_mfc_hw_call(dev->mfc_ops, clear_int_flags, dev); | ||
429 | wake_up_dev(dev, reason, err); | ||
430 | 421 | ||
431 | /* Error recovery is dependent on the state of context */ | 422 | if (ctx != NULL) { |
432 | switch (ctx->state) { | 423 | /* Error recovery is dependent on the state of context */ |
433 | case MFCINST_INIT: | 424 | switch (ctx->state) { |
434 | /* This error had to happen while acquireing instance */ | 425 | case MFCINST_RES_CHANGE_INIT: |
435 | case MFCINST_GOT_INST: | 426 | case MFCINST_RES_CHANGE_FLUSH: |
436 | /* This error had to happen while parsing the header */ | 427 | case MFCINST_RES_CHANGE_END: |
437 | case MFCINST_HEAD_PARSED: | 428 | case MFCINST_FINISHING: |
438 | /* This error had to happen while setting dst buffers */ | 429 | case MFCINST_FINISHED: |
439 | case MFCINST_RETURN_INST: | 430 | case MFCINST_RUNNING: |
440 | /* This error had to happen while releasing instance */ | 431 | /* It is higly probable that an error occured |
441 | clear_work_bit(ctx); | 432 | * while decoding a frame */ |
442 | wake_up_ctx(ctx, reason, err); | 433 | clear_work_bit(ctx); |
443 | if (test_and_clear_bit(0, &dev->hw_lock) == 0) | 434 | ctx->state = MFCINST_ERROR; |
444 | BUG(); | 435 | /* Mark all dst buffers as having an error */ |
445 | s5p_mfc_clock_off(); | 436 | spin_lock_irqsave(&dev->irqlock, flags); |
446 | ctx->state = MFCINST_ERROR; | 437 | s5p_mfc_hw_call(dev->mfc_ops, cleanup_queue, |
447 | break; | 438 | &ctx->dst_queue, &ctx->vq_dst); |
448 | case MFCINST_FINISHING: | 439 | /* Mark all src buffers as having an error */ |
449 | case MFCINST_FINISHED: | 440 | s5p_mfc_hw_call(dev->mfc_ops, cleanup_queue, |
450 | case MFCINST_RUNNING: | 441 | &ctx->src_queue, &ctx->vq_src); |
451 | /* It is higly probable that an error occured | 442 | spin_unlock_irqrestore(&dev->irqlock, flags); |
452 | * while decoding a frame */ | 443 | wake_up_ctx(ctx, reason, err); |
453 | clear_work_bit(ctx); | 444 | break; |
454 | ctx->state = MFCINST_ERROR; | 445 | default: |
455 | /* Mark all dst buffers as having an error */ | 446 | clear_work_bit(ctx); |
456 | spin_lock_irqsave(&dev->irqlock, flags); | 447 | ctx->state = MFCINST_ERROR; |
457 | s5p_mfc_hw_call(dev->mfc_ops, cleanup_queue, &ctx->dst_queue, | 448 | wake_up_ctx(ctx, reason, err); |
458 | &ctx->vq_dst); | 449 | break; |
459 | /* Mark all src buffers as having an error */ | 450 | } |
460 | s5p_mfc_hw_call(dev->mfc_ops, cleanup_queue, &ctx->src_queue, | ||
461 | &ctx->vq_src); | ||
462 | spin_unlock_irqrestore(&dev->irqlock, flags); | ||
463 | if (test_and_clear_bit(0, &dev->hw_lock) == 0) | ||
464 | BUG(); | ||
465 | s5p_mfc_clock_off(); | ||
466 | break; | ||
467 | default: | ||
468 | mfc_err("Encountered an error interrupt which had not been handled\n"); | ||
469 | break; | ||
470 | } | 451 | } |
452 | if (test_and_clear_bit(0, &dev->hw_lock) == 0) | ||
453 | BUG(); | ||
454 | s5p_mfc_hw_call(dev->mfc_ops, clear_int_flags, dev); | ||
455 | s5p_mfc_clock_off(); | ||
456 | wake_up_dev(dev, reason, err); | ||
471 | return; | 457 | return; |
472 | } | 458 | } |
473 | 459 | ||
@@ -632,7 +618,7 @@ static irqreturn_t s5p_mfc_irq(int irq, void *priv) | |||
632 | dev->warn_start) | 618 | dev->warn_start) |
633 | s5p_mfc_handle_frame(ctx, reason, err); | 619 | s5p_mfc_handle_frame(ctx, reason, err); |
634 | else | 620 | else |
635 | s5p_mfc_handle_error(ctx, reason, err); | 621 | s5p_mfc_handle_error(dev, ctx, reason, err); |
636 | clear_bit(0, &dev->enter_suspend); | 622 | clear_bit(0, &dev->enter_suspend); |
637 | break; | 623 | break; |
638 | 624 | ||
diff --git a/drivers/media/usb/gspca/kinect.c b/drivers/media/usb/gspca/kinect.c index 40ad6687ee5d..3773a8a745df 100644 --- a/drivers/media/usb/gspca/kinect.c +++ b/drivers/media/usb/gspca/kinect.c | |||
@@ -381,6 +381,7 @@ static const struct sd_desc sd_desc = { | |||
381 | /* -- module initialisation -- */ | 381 | /* -- module initialisation -- */ |
382 | static const struct usb_device_id device_table[] = { | 382 | static const struct usb_device_id device_table[] = { |
383 | {USB_DEVICE(0x045e, 0x02ae)}, | 383 | {USB_DEVICE(0x045e, 0x02ae)}, |
384 | {USB_DEVICE(0x045e, 0x02bf)}, | ||
384 | {} | 385 | {} |
385 | }; | 386 | }; |
386 | 387 | ||
diff --git a/drivers/media/usb/gspca/sonixb.c b/drivers/media/usb/gspca/sonixb.c index 70511d5f9538..1220340e7602 100644 --- a/drivers/media/usb/gspca/sonixb.c +++ b/drivers/media/usb/gspca/sonixb.c | |||
@@ -496,7 +496,7 @@ static void reg_w(struct gspca_dev *gspca_dev, | |||
496 | } | 496 | } |
497 | } | 497 | } |
498 | 498 | ||
499 | static void i2c_w(struct gspca_dev *gspca_dev, const __u8 *buffer) | 499 | static void i2c_w(struct gspca_dev *gspca_dev, const u8 *buf) |
500 | { | 500 | { |
501 | int retry = 60; | 501 | int retry = 60; |
502 | 502 | ||
@@ -504,16 +504,19 @@ static void i2c_w(struct gspca_dev *gspca_dev, const __u8 *buffer) | |||
504 | return; | 504 | return; |
505 | 505 | ||
506 | /* is i2c ready */ | 506 | /* is i2c ready */ |
507 | reg_w(gspca_dev, 0x08, buffer, 8); | 507 | reg_w(gspca_dev, 0x08, buf, 8); |
508 | while (retry--) { | 508 | while (retry--) { |
509 | if (gspca_dev->usb_err < 0) | 509 | if (gspca_dev->usb_err < 0) |
510 | return; | 510 | return; |
511 | msleep(10); | 511 | msleep(1); |
512 | reg_r(gspca_dev, 0x08); | 512 | reg_r(gspca_dev, 0x08); |
513 | if (gspca_dev->usb_buf[0] & 0x04) { | 513 | if (gspca_dev->usb_buf[0] & 0x04) { |
514 | if (gspca_dev->usb_buf[0] & 0x08) { | 514 | if (gspca_dev->usb_buf[0] & 0x08) { |
515 | dev_err(gspca_dev->v4l2_dev.dev, | 515 | dev_err(gspca_dev->v4l2_dev.dev, |
516 | "i2c write error\n"); | 516 | "i2c error writing %02x %02x %02x %02x" |
517 | " %02x %02x %02x %02x\n", | ||
518 | buf[0], buf[1], buf[2], buf[3], | ||
519 | buf[4], buf[5], buf[6], buf[7]); | ||
517 | gspca_dev->usb_err = -EIO; | 520 | gspca_dev->usb_err = -EIO; |
518 | } | 521 | } |
519 | return; | 522 | return; |
@@ -530,7 +533,7 @@ static void i2c_w_vector(struct gspca_dev *gspca_dev, | |||
530 | for (;;) { | 533 | for (;;) { |
531 | if (gspca_dev->usb_err < 0) | 534 | if (gspca_dev->usb_err < 0) |
532 | return; | 535 | return; |
533 | reg_w(gspca_dev, 0x08, *buffer, 8); | 536 | i2c_w(gspca_dev, *buffer); |
534 | len -= 8; | 537 | len -= 8; |
535 | if (len <= 0) | 538 | if (len <= 0) |
536 | break; | 539 | break; |
diff --git a/drivers/media/usb/gspca/sonixj.c b/drivers/media/usb/gspca/sonixj.c index 5a86047b846f..36307a9028a9 100644 --- a/drivers/media/usb/gspca/sonixj.c +++ b/drivers/media/usb/gspca/sonixj.c | |||
@@ -1550,6 +1550,7 @@ static void i2c_w1(struct gspca_dev *gspca_dev, u8 reg, u8 val) | |||
1550 | 0, | 1550 | 0, |
1551 | gspca_dev->usb_buf, 8, | 1551 | gspca_dev->usb_buf, 8, |
1552 | 500); | 1552 | 500); |
1553 | msleep(2); | ||
1553 | if (ret < 0) { | 1554 | if (ret < 0) { |
1554 | pr_err("i2c_w1 err %d\n", ret); | 1555 | pr_err("i2c_w1 err %d\n", ret); |
1555 | gspca_dev->usb_err = ret; | 1556 | gspca_dev->usb_err = ret; |
diff --git a/drivers/media/usb/uvc/uvc_ctrl.c b/drivers/media/usb/uvc/uvc_ctrl.c index 2bb7613ddebb..d5baab17a5ef 100644 --- a/drivers/media/usb/uvc/uvc_ctrl.c +++ b/drivers/media/usb/uvc/uvc_ctrl.c | |||
@@ -1431,8 +1431,10 @@ int uvc_ctrl_set(struct uvc_video_chain *chain, | |||
1431 | int ret; | 1431 | int ret; |
1432 | 1432 | ||
1433 | ctrl = uvc_find_control(chain, xctrl->id, &mapping); | 1433 | ctrl = uvc_find_control(chain, xctrl->id, &mapping); |
1434 | if (ctrl == NULL || (ctrl->info.flags & UVC_CTRL_FLAG_SET_CUR) == 0) | 1434 | if (ctrl == NULL) |
1435 | return -EINVAL; | 1435 | return -EINVAL; |
1436 | if (!(ctrl->info.flags & UVC_CTRL_FLAG_SET_CUR)) | ||
1437 | return -EACCES; | ||
1436 | 1438 | ||
1437 | /* Clamp out of range values. */ | 1439 | /* Clamp out of range values. */ |
1438 | switch (mapping->v4l2_type) { | 1440 | switch (mapping->v4l2_type) { |
diff --git a/drivers/media/usb/uvc/uvc_v4l2.c b/drivers/media/usb/uvc/uvc_v4l2.c index f2ee8c6b0d8d..68d59b527492 100644 --- a/drivers/media/usb/uvc/uvc_v4l2.c +++ b/drivers/media/usb/uvc/uvc_v4l2.c | |||
@@ -657,8 +657,7 @@ static long uvc_v4l2_do_ioctl(struct file *file, unsigned int cmd, void *arg) | |||
657 | ret = uvc_ctrl_get(chain, ctrl); | 657 | ret = uvc_ctrl_get(chain, ctrl); |
658 | if (ret < 0) { | 658 | if (ret < 0) { |
659 | uvc_ctrl_rollback(handle); | 659 | uvc_ctrl_rollback(handle); |
660 | ctrls->error_idx = ret == -ENOENT | 660 | ctrls->error_idx = i; |
661 | ? ctrls->count : i; | ||
662 | return ret; | 661 | return ret; |
663 | } | 662 | } |
664 | } | 663 | } |
@@ -686,8 +685,7 @@ static long uvc_v4l2_do_ioctl(struct file *file, unsigned int cmd, void *arg) | |||
686 | ret = uvc_ctrl_set(chain, ctrl); | 685 | ret = uvc_ctrl_set(chain, ctrl); |
687 | if (ret < 0) { | 686 | if (ret < 0) { |
688 | uvc_ctrl_rollback(handle); | 687 | uvc_ctrl_rollback(handle); |
689 | ctrls->error_idx = (ret == -ENOENT && | 688 | ctrls->error_idx = cmd == VIDIOC_S_EXT_CTRLS |
690 | cmd == VIDIOC_S_EXT_CTRLS) | ||
691 | ? ctrls->count : i; | 689 | ? ctrls->count : i; |
692 | return ret; | 690 | return ret; |
693 | } | 691 | } |
diff --git a/drivers/media/v4l2-core/videobuf2-core.c b/drivers/media/v4l2-core/videobuf2-core.c index 9f81be23a81f..e02c4797b1c6 100644 --- a/drivers/media/v4l2-core/videobuf2-core.c +++ b/drivers/media/v4l2-core/videobuf2-core.c | |||
@@ -921,8 +921,10 @@ static void __fill_vb2_buffer(struct vb2_buffer *vb, const struct v4l2_buffer *b | |||
921 | * In videobuf we use our internal V4l2_planes struct for | 921 | * In videobuf we use our internal V4l2_planes struct for |
922 | * single-planar buffers as well, for simplicity. | 922 | * single-planar buffers as well, for simplicity. |
923 | */ | 923 | */ |
924 | if (V4L2_TYPE_IS_OUTPUT(b->type)) | 924 | if (V4L2_TYPE_IS_OUTPUT(b->type)) { |
925 | v4l2_planes[0].bytesused = b->bytesused; | 925 | v4l2_planes[0].bytesused = b->bytesused; |
926 | v4l2_planes[0].data_offset = 0; | ||
927 | } | ||
926 | 928 | ||
927 | if (b->memory == V4L2_MEMORY_USERPTR) { | 929 | if (b->memory == V4L2_MEMORY_USERPTR) { |
928 | v4l2_planes[0].m.userptr = b->m.userptr; | 930 | v4l2_planes[0].m.userptr = b->m.userptr; |
diff --git a/drivers/misc/ti-st/st_kim.c b/drivers/misc/ti-st/st_kim.c index 9ff942a346ed..83269f1d16e3 100644 --- a/drivers/misc/ti-st/st_kim.c +++ b/drivers/misc/ti-st/st_kim.c | |||
@@ -468,6 +468,11 @@ long st_kim_start(void *kim_data) | |||
468 | if (pdata->chip_enable) | 468 | if (pdata->chip_enable) |
469 | pdata->chip_enable(kim_gdata); | 469 | pdata->chip_enable(kim_gdata); |
470 | 470 | ||
471 | /* Configure BT nShutdown to HIGH state */ | ||
472 | gpio_set_value(kim_gdata->nshutdown, GPIO_LOW); | ||
473 | mdelay(5); /* FIXME: a proper toggle */ | ||
474 | gpio_set_value(kim_gdata->nshutdown, GPIO_HIGH); | ||
475 | mdelay(100); | ||
471 | /* re-initialize the completion */ | 476 | /* re-initialize the completion */ |
472 | INIT_COMPLETION(kim_gdata->ldisc_installed); | 477 | INIT_COMPLETION(kim_gdata->ldisc_installed); |
473 | /* send notification to UIM */ | 478 | /* send notification to UIM */ |
@@ -509,7 +514,8 @@ long st_kim_start(void *kim_data) | |||
509 | * (b) upon failure to either install ldisc or download firmware. | 514 | * (b) upon failure to either install ldisc or download firmware. |
510 | * The function is responsible to (a) notify UIM about un-installation, | 515 | * The function is responsible to (a) notify UIM about un-installation, |
511 | * (b) flush UART if the ldisc was installed. | 516 | * (b) flush UART if the ldisc was installed. |
512 | * (c) invoke platform's chip disabling routine. | 517 | * (c) reset BT_EN - pull down nshutdown at the end. |
518 | * (d) invoke platform's chip disabling routine. | ||
513 | */ | 519 | */ |
514 | long st_kim_stop(void *kim_data) | 520 | long st_kim_stop(void *kim_data) |
515 | { | 521 | { |
@@ -541,6 +547,13 @@ long st_kim_stop(void *kim_data) | |||
541 | err = -ETIMEDOUT; | 547 | err = -ETIMEDOUT; |
542 | } | 548 | } |
543 | 549 | ||
550 | /* By default configure BT nShutdown to LOW state */ | ||
551 | gpio_set_value(kim_gdata->nshutdown, GPIO_LOW); | ||
552 | mdelay(1); | ||
553 | gpio_set_value(kim_gdata->nshutdown, GPIO_HIGH); | ||
554 | mdelay(1); | ||
555 | gpio_set_value(kim_gdata->nshutdown, GPIO_LOW); | ||
556 | |||
544 | /* platform specific disable */ | 557 | /* platform specific disable */ |
545 | if (pdata->chip_disable) | 558 | if (pdata->chip_disable) |
546 | pdata->chip_disable(kim_gdata); | 559 | pdata->chip_disable(kim_gdata); |
@@ -733,6 +746,20 @@ static int kim_probe(struct platform_device *pdev) | |||
733 | /* refer to itself */ | 746 | /* refer to itself */ |
734 | kim_gdata->core_data->kim_data = kim_gdata; | 747 | kim_gdata->core_data->kim_data = kim_gdata; |
735 | 748 | ||
749 | /* Claim the chip enable nShutdown gpio from the system */ | ||
750 | kim_gdata->nshutdown = pdata->nshutdown_gpio; | ||
751 | err = gpio_request(kim_gdata->nshutdown, "kim"); | ||
752 | if (unlikely(err)) { | ||
753 | pr_err(" gpio %ld request failed ", kim_gdata->nshutdown); | ||
754 | return err; | ||
755 | } | ||
756 | |||
757 | /* Configure nShutdown GPIO as output=0 */ | ||
758 | err = gpio_direction_output(kim_gdata->nshutdown, 0); | ||
759 | if (unlikely(err)) { | ||
760 | pr_err(" unable to configure gpio %ld", kim_gdata->nshutdown); | ||
761 | return err; | ||
762 | } | ||
736 | /* get reference of pdev for request_firmware | 763 | /* get reference of pdev for request_firmware |
737 | */ | 764 | */ |
738 | kim_gdata->kim_pdev = pdev; | 765 | kim_gdata->kim_pdev = pdev; |
@@ -779,10 +806,18 @@ err_core_init: | |||
779 | 806 | ||
780 | static int kim_remove(struct platform_device *pdev) | 807 | static int kim_remove(struct platform_device *pdev) |
781 | { | 808 | { |
809 | /* free the GPIOs requested */ | ||
810 | struct ti_st_plat_data *pdata = pdev->dev.platform_data; | ||
782 | struct kim_data_s *kim_gdata; | 811 | struct kim_data_s *kim_gdata; |
783 | 812 | ||
784 | kim_gdata = dev_get_drvdata(&pdev->dev); | 813 | kim_gdata = dev_get_drvdata(&pdev->dev); |
785 | 814 | ||
815 | /* Free the Bluetooth/FM/GPIO | ||
816 | * nShutdown gpio from the system | ||
817 | */ | ||
818 | gpio_free(pdata->nshutdown_gpio); | ||
819 | pr_info("nshutdown GPIO Freed"); | ||
820 | |||
786 | debugfs_remove_recursive(kim_debugfs_dir); | 821 | debugfs_remove_recursive(kim_debugfs_dir); |
787 | sysfs_remove_group(&pdev->dev.kobj, &uim_attr_grp); | 822 | sysfs_remove_group(&pdev->dev.kobj, &uim_attr_grp); |
788 | pr_info("sysfs entries removed"); | 823 | pr_info("sysfs entries removed"); |
diff --git a/drivers/pci/hotplug/pciehp.h b/drivers/pci/hotplug/pciehp.h index 26ffd3e3fb74..2c113de94323 100644 --- a/drivers/pci/hotplug/pciehp.h +++ b/drivers/pci/hotplug/pciehp.h | |||
@@ -44,7 +44,6 @@ extern bool pciehp_poll_mode; | |||
44 | extern int pciehp_poll_time; | 44 | extern int pciehp_poll_time; |
45 | extern bool pciehp_debug; | 45 | extern bool pciehp_debug; |
46 | extern bool pciehp_force; | 46 | extern bool pciehp_force; |
47 | extern struct workqueue_struct *pciehp_wq; | ||
48 | 47 | ||
49 | #define dbg(format, arg...) \ | 48 | #define dbg(format, arg...) \ |
50 | do { \ | 49 | do { \ |
@@ -78,6 +77,7 @@ struct slot { | |||
78 | struct hotplug_slot *hotplug_slot; | 77 | struct hotplug_slot *hotplug_slot; |
79 | struct delayed_work work; /* work for button event */ | 78 | struct delayed_work work; /* work for button event */ |
80 | struct mutex lock; | 79 | struct mutex lock; |
80 | struct workqueue_struct *wq; | ||
81 | }; | 81 | }; |
82 | 82 | ||
83 | struct event_info { | 83 | struct event_info { |
diff --git a/drivers/pci/hotplug/pciehp_core.c b/drivers/pci/hotplug/pciehp_core.c index 916bf4f53aba..939bd1d4b5b1 100644 --- a/drivers/pci/hotplug/pciehp_core.c +++ b/drivers/pci/hotplug/pciehp_core.c | |||
@@ -42,7 +42,6 @@ bool pciehp_debug; | |||
42 | bool pciehp_poll_mode; | 42 | bool pciehp_poll_mode; |
43 | int pciehp_poll_time; | 43 | int pciehp_poll_time; |
44 | bool pciehp_force; | 44 | bool pciehp_force; |
45 | struct workqueue_struct *pciehp_wq; | ||
46 | 45 | ||
47 | #define DRIVER_VERSION "0.4" | 46 | #define DRIVER_VERSION "0.4" |
48 | #define DRIVER_AUTHOR "Dan Zink <dan.zink@compaq.com>, Greg Kroah-Hartman <greg@kroah.com>, Dely Sy <dely.l.sy@intel.com>" | 47 | #define DRIVER_AUTHOR "Dan Zink <dan.zink@compaq.com>, Greg Kroah-Hartman <greg@kroah.com>, Dely Sy <dely.l.sy@intel.com>" |
@@ -340,18 +339,13 @@ static int __init pcied_init(void) | |||
340 | { | 339 | { |
341 | int retval = 0; | 340 | int retval = 0; |
342 | 341 | ||
343 | pciehp_wq = alloc_workqueue("pciehp", 0, 0); | ||
344 | if (!pciehp_wq) | ||
345 | return -ENOMEM; | ||
346 | |||
347 | pciehp_firmware_init(); | 342 | pciehp_firmware_init(); |
348 | retval = pcie_port_service_register(&hpdriver_portdrv); | 343 | retval = pcie_port_service_register(&hpdriver_portdrv); |
349 | dbg("pcie_port_service_register = %d\n", retval); | 344 | dbg("pcie_port_service_register = %d\n", retval); |
350 | info(DRIVER_DESC " version: " DRIVER_VERSION "\n"); | 345 | info(DRIVER_DESC " version: " DRIVER_VERSION "\n"); |
351 | if (retval) { | 346 | if (retval) |
352 | destroy_workqueue(pciehp_wq); | ||
353 | dbg("Failure to register service\n"); | 347 | dbg("Failure to register service\n"); |
354 | } | 348 | |
355 | return retval; | 349 | return retval; |
356 | } | 350 | } |
357 | 351 | ||
@@ -359,7 +353,6 @@ static void __exit pcied_cleanup(void) | |||
359 | { | 353 | { |
360 | dbg("unload_pciehpd()\n"); | 354 | dbg("unload_pciehpd()\n"); |
361 | pcie_port_service_unregister(&hpdriver_portdrv); | 355 | pcie_port_service_unregister(&hpdriver_portdrv); |
362 | destroy_workqueue(pciehp_wq); | ||
363 | info(DRIVER_DESC " version: " DRIVER_VERSION " unloaded\n"); | 356 | info(DRIVER_DESC " version: " DRIVER_VERSION " unloaded\n"); |
364 | } | 357 | } |
365 | 358 | ||
diff --git a/drivers/pci/hotplug/pciehp_ctrl.c b/drivers/pci/hotplug/pciehp_ctrl.c index 27f44295a657..38f018679175 100644 --- a/drivers/pci/hotplug/pciehp_ctrl.c +++ b/drivers/pci/hotplug/pciehp_ctrl.c | |||
@@ -49,7 +49,7 @@ static int queue_interrupt_event(struct slot *p_slot, u32 event_type) | |||
49 | info->p_slot = p_slot; | 49 | info->p_slot = p_slot; |
50 | INIT_WORK(&info->work, interrupt_event_handler); | 50 | INIT_WORK(&info->work, interrupt_event_handler); |
51 | 51 | ||
52 | queue_work(pciehp_wq, &info->work); | 52 | queue_work(p_slot->wq, &info->work); |
53 | 53 | ||
54 | return 0; | 54 | return 0; |
55 | } | 55 | } |
@@ -344,7 +344,7 @@ void pciehp_queue_pushbutton_work(struct work_struct *work) | |||
344 | kfree(info); | 344 | kfree(info); |
345 | goto out; | 345 | goto out; |
346 | } | 346 | } |
347 | queue_work(pciehp_wq, &info->work); | 347 | queue_work(p_slot->wq, &info->work); |
348 | out: | 348 | out: |
349 | mutex_unlock(&p_slot->lock); | 349 | mutex_unlock(&p_slot->lock); |
350 | } | 350 | } |
@@ -377,7 +377,7 @@ static void handle_button_press_event(struct slot *p_slot) | |||
377 | if (ATTN_LED(ctrl)) | 377 | if (ATTN_LED(ctrl)) |
378 | pciehp_set_attention_status(p_slot, 0); | 378 | pciehp_set_attention_status(p_slot, 0); |
379 | 379 | ||
380 | queue_delayed_work(pciehp_wq, &p_slot->work, 5*HZ); | 380 | queue_delayed_work(p_slot->wq, &p_slot->work, 5*HZ); |
381 | break; | 381 | break; |
382 | case BLINKINGOFF_STATE: | 382 | case BLINKINGOFF_STATE: |
383 | case BLINKINGON_STATE: | 383 | case BLINKINGON_STATE: |
@@ -439,7 +439,7 @@ static void handle_surprise_event(struct slot *p_slot) | |||
439 | else | 439 | else |
440 | p_slot->state = POWERON_STATE; | 440 | p_slot->state = POWERON_STATE; |
441 | 441 | ||
442 | queue_work(pciehp_wq, &info->work); | 442 | queue_work(p_slot->wq, &info->work); |
443 | } | 443 | } |
444 | 444 | ||
445 | static void interrupt_event_handler(struct work_struct *work) | 445 | static void interrupt_event_handler(struct work_struct *work) |
diff --git a/drivers/pci/hotplug/pciehp_hpc.c b/drivers/pci/hotplug/pciehp_hpc.c index 13b2eaf7ba43..5127f3f41821 100644 --- a/drivers/pci/hotplug/pciehp_hpc.c +++ b/drivers/pci/hotplug/pciehp_hpc.c | |||
@@ -773,23 +773,32 @@ static void pcie_shutdown_notification(struct controller *ctrl) | |||
773 | static int pcie_init_slot(struct controller *ctrl) | 773 | static int pcie_init_slot(struct controller *ctrl) |
774 | { | 774 | { |
775 | struct slot *slot; | 775 | struct slot *slot; |
776 | char name[32]; | ||
776 | 777 | ||
777 | slot = kzalloc(sizeof(*slot), GFP_KERNEL); | 778 | slot = kzalloc(sizeof(*slot), GFP_KERNEL); |
778 | if (!slot) | 779 | if (!slot) |
779 | return -ENOMEM; | 780 | return -ENOMEM; |
780 | 781 | ||
782 | snprintf(name, sizeof(name), "pciehp-%u", PSN(ctrl)); | ||
783 | slot->wq = alloc_workqueue(name, 0, 0); | ||
784 | if (!slot->wq) | ||
785 | goto abort; | ||
786 | |||
781 | slot->ctrl = ctrl; | 787 | slot->ctrl = ctrl; |
782 | mutex_init(&slot->lock); | 788 | mutex_init(&slot->lock); |
783 | INIT_DELAYED_WORK(&slot->work, pciehp_queue_pushbutton_work); | 789 | INIT_DELAYED_WORK(&slot->work, pciehp_queue_pushbutton_work); |
784 | ctrl->slot = slot; | 790 | ctrl->slot = slot; |
785 | return 0; | 791 | return 0; |
792 | abort: | ||
793 | kfree(slot); | ||
794 | return -ENOMEM; | ||
786 | } | 795 | } |
787 | 796 | ||
788 | static void pcie_cleanup_slot(struct controller *ctrl) | 797 | static void pcie_cleanup_slot(struct controller *ctrl) |
789 | { | 798 | { |
790 | struct slot *slot = ctrl->slot; | 799 | struct slot *slot = ctrl->slot; |
791 | cancel_delayed_work(&slot->work); | 800 | cancel_delayed_work(&slot->work); |
792 | flush_workqueue(pciehp_wq); | 801 | destroy_workqueue(slot->wq); |
793 | kfree(slot); | 802 | kfree(slot); |
794 | } | 803 | } |
795 | 804 | ||
diff --git a/drivers/pci/hotplug/shpchp.h b/drivers/pci/hotplug/shpchp.h index ca64932e658b..b849f995075a 100644 --- a/drivers/pci/hotplug/shpchp.h +++ b/drivers/pci/hotplug/shpchp.h | |||
@@ -46,8 +46,6 @@ | |||
46 | extern bool shpchp_poll_mode; | 46 | extern bool shpchp_poll_mode; |
47 | extern int shpchp_poll_time; | 47 | extern int shpchp_poll_time; |
48 | extern bool shpchp_debug; | 48 | extern bool shpchp_debug; |
49 | extern struct workqueue_struct *shpchp_wq; | ||
50 | extern struct workqueue_struct *shpchp_ordered_wq; | ||
51 | 49 | ||
52 | #define dbg(format, arg...) \ | 50 | #define dbg(format, arg...) \ |
53 | do { \ | 51 | do { \ |
@@ -91,6 +89,7 @@ struct slot { | |||
91 | struct list_head slot_list; | 89 | struct list_head slot_list; |
92 | struct delayed_work work; /* work for button event */ | 90 | struct delayed_work work; /* work for button event */ |
93 | struct mutex lock; | 91 | struct mutex lock; |
92 | struct workqueue_struct *wq; | ||
94 | u8 hp_slot; | 93 | u8 hp_slot; |
95 | }; | 94 | }; |
96 | 95 | ||
diff --git a/drivers/pci/hotplug/shpchp_core.c b/drivers/pci/hotplug/shpchp_core.c index b6de307248e4..3100c52c837c 100644 --- a/drivers/pci/hotplug/shpchp_core.c +++ b/drivers/pci/hotplug/shpchp_core.c | |||
@@ -39,8 +39,6 @@ | |||
39 | bool shpchp_debug; | 39 | bool shpchp_debug; |
40 | bool shpchp_poll_mode; | 40 | bool shpchp_poll_mode; |
41 | int shpchp_poll_time; | 41 | int shpchp_poll_time; |
42 | struct workqueue_struct *shpchp_wq; | ||
43 | struct workqueue_struct *shpchp_ordered_wq; | ||
44 | 42 | ||
45 | #define DRIVER_VERSION "0.4" | 43 | #define DRIVER_VERSION "0.4" |
46 | #define DRIVER_AUTHOR "Dan Zink <dan.zink@compaq.com>, Greg Kroah-Hartman <greg@kroah.com>, Dely Sy <dely.l.sy@intel.com>" | 44 | #define DRIVER_AUTHOR "Dan Zink <dan.zink@compaq.com>, Greg Kroah-Hartman <greg@kroah.com>, Dely Sy <dely.l.sy@intel.com>" |
@@ -129,6 +127,14 @@ static int init_slots(struct controller *ctrl) | |||
129 | slot->device = ctrl->slot_device_offset + i; | 127 | slot->device = ctrl->slot_device_offset + i; |
130 | slot->hpc_ops = ctrl->hpc_ops; | 128 | slot->hpc_ops = ctrl->hpc_ops; |
131 | slot->number = ctrl->first_slot + (ctrl->slot_num_inc * i); | 129 | slot->number = ctrl->first_slot + (ctrl->slot_num_inc * i); |
130 | |||
131 | snprintf(name, sizeof(name), "shpchp-%d", slot->number); | ||
132 | slot->wq = alloc_workqueue(name, 0, 0); | ||
133 | if (!slot->wq) { | ||
134 | retval = -ENOMEM; | ||
135 | goto error_info; | ||
136 | } | ||
137 | |||
132 | mutex_init(&slot->lock); | 138 | mutex_init(&slot->lock); |
133 | INIT_DELAYED_WORK(&slot->work, shpchp_queue_pushbutton_work); | 139 | INIT_DELAYED_WORK(&slot->work, shpchp_queue_pushbutton_work); |
134 | 140 | ||
@@ -148,7 +154,7 @@ static int init_slots(struct controller *ctrl) | |||
148 | if (retval) { | 154 | if (retval) { |
149 | ctrl_err(ctrl, "pci_hp_register failed with error %d\n", | 155 | ctrl_err(ctrl, "pci_hp_register failed with error %d\n", |
150 | retval); | 156 | retval); |
151 | goto error_info; | 157 | goto error_slotwq; |
152 | } | 158 | } |
153 | 159 | ||
154 | get_power_status(hotplug_slot, &info->power_status); | 160 | get_power_status(hotplug_slot, &info->power_status); |
@@ -160,6 +166,8 @@ static int init_slots(struct controller *ctrl) | |||
160 | } | 166 | } |
161 | 167 | ||
162 | return 0; | 168 | return 0; |
169 | error_slotwq: | ||
170 | destroy_workqueue(slot->wq); | ||
163 | error_info: | 171 | error_info: |
164 | kfree(info); | 172 | kfree(info); |
165 | error_hpslot: | 173 | error_hpslot: |
@@ -180,8 +188,7 @@ void cleanup_slots(struct controller *ctrl) | |||
180 | slot = list_entry(tmp, struct slot, slot_list); | 188 | slot = list_entry(tmp, struct slot, slot_list); |
181 | list_del(&slot->slot_list); | 189 | list_del(&slot->slot_list); |
182 | cancel_delayed_work(&slot->work); | 190 | cancel_delayed_work(&slot->work); |
183 | flush_workqueue(shpchp_wq); | 191 | destroy_workqueue(slot->wq); |
184 | flush_workqueue(shpchp_ordered_wq); | ||
185 | pci_hp_deregister(slot->hotplug_slot); | 192 | pci_hp_deregister(slot->hotplug_slot); |
186 | } | 193 | } |
187 | } | 194 | } |
@@ -364,25 +371,12 @@ static struct pci_driver shpc_driver = { | |||
364 | 371 | ||
365 | static int __init shpcd_init(void) | 372 | static int __init shpcd_init(void) |
366 | { | 373 | { |
367 | int retval = 0; | 374 | int retval; |
368 | |||
369 | shpchp_wq = alloc_ordered_workqueue("shpchp", 0); | ||
370 | if (!shpchp_wq) | ||
371 | return -ENOMEM; | ||
372 | |||
373 | shpchp_ordered_wq = alloc_ordered_workqueue("shpchp_ordered", 0); | ||
374 | if (!shpchp_ordered_wq) { | ||
375 | destroy_workqueue(shpchp_wq); | ||
376 | return -ENOMEM; | ||
377 | } | ||
378 | 375 | ||
379 | retval = pci_register_driver(&shpc_driver); | 376 | retval = pci_register_driver(&shpc_driver); |
380 | dbg("%s: pci_register_driver = %d\n", __func__, retval); | 377 | dbg("%s: pci_register_driver = %d\n", __func__, retval); |
381 | info(DRIVER_DESC " version: " DRIVER_VERSION "\n"); | 378 | info(DRIVER_DESC " version: " DRIVER_VERSION "\n"); |
382 | if (retval) { | 379 | |
383 | destroy_workqueue(shpchp_ordered_wq); | ||
384 | destroy_workqueue(shpchp_wq); | ||
385 | } | ||
386 | return retval; | 380 | return retval; |
387 | } | 381 | } |
388 | 382 | ||
@@ -390,8 +384,6 @@ static void __exit shpcd_cleanup(void) | |||
390 | { | 384 | { |
391 | dbg("unload_shpchpd()\n"); | 385 | dbg("unload_shpchpd()\n"); |
392 | pci_unregister_driver(&shpc_driver); | 386 | pci_unregister_driver(&shpc_driver); |
393 | destroy_workqueue(shpchp_ordered_wq); | ||
394 | destroy_workqueue(shpchp_wq); | ||
395 | info(DRIVER_DESC " version: " DRIVER_VERSION " unloaded\n"); | 387 | info(DRIVER_DESC " version: " DRIVER_VERSION " unloaded\n"); |
396 | } | 388 | } |
397 | 389 | ||
diff --git a/drivers/pci/hotplug/shpchp_ctrl.c b/drivers/pci/hotplug/shpchp_ctrl.c index f9b5a52e4115..58499277903a 100644 --- a/drivers/pci/hotplug/shpchp_ctrl.c +++ b/drivers/pci/hotplug/shpchp_ctrl.c | |||
@@ -51,7 +51,7 @@ static int queue_interrupt_event(struct slot *p_slot, u32 event_type) | |||
51 | info->p_slot = p_slot; | 51 | info->p_slot = p_slot; |
52 | INIT_WORK(&info->work, interrupt_event_handler); | 52 | INIT_WORK(&info->work, interrupt_event_handler); |
53 | 53 | ||
54 | queue_work(shpchp_wq, &info->work); | 54 | queue_work(p_slot->wq, &info->work); |
55 | 55 | ||
56 | return 0; | 56 | return 0; |
57 | } | 57 | } |
@@ -453,7 +453,7 @@ void shpchp_queue_pushbutton_work(struct work_struct *work) | |||
453 | kfree(info); | 453 | kfree(info); |
454 | goto out; | 454 | goto out; |
455 | } | 455 | } |
456 | queue_work(shpchp_ordered_wq, &info->work); | 456 | queue_work(p_slot->wq, &info->work); |
457 | out: | 457 | out: |
458 | mutex_unlock(&p_slot->lock); | 458 | mutex_unlock(&p_slot->lock); |
459 | } | 459 | } |
@@ -501,7 +501,7 @@ static void handle_button_press_event(struct slot *p_slot) | |||
501 | p_slot->hpc_ops->green_led_blink(p_slot); | 501 | p_slot->hpc_ops->green_led_blink(p_slot); |
502 | p_slot->hpc_ops->set_attention_status(p_slot, 0); | 502 | p_slot->hpc_ops->set_attention_status(p_slot, 0); |
503 | 503 | ||
504 | queue_delayed_work(shpchp_wq, &p_slot->work, 5*HZ); | 504 | queue_delayed_work(p_slot->wq, &p_slot->work, 5*HZ); |
505 | break; | 505 | break; |
506 | case BLINKINGOFF_STATE: | 506 | case BLINKINGOFF_STATE: |
507 | case BLINKINGON_STATE: | 507 | case BLINKINGON_STATE: |
diff --git a/drivers/pci/pcie/Kconfig b/drivers/pci/pcie/Kconfig index 6c8bc5809787..fde4a32a0295 100644 --- a/drivers/pci/pcie/Kconfig +++ b/drivers/pci/pcie/Kconfig | |||
@@ -82,4 +82,4 @@ endchoice | |||
82 | 82 | ||
83 | config PCIE_PME | 83 | config PCIE_PME |
84 | def_bool y | 84 | def_bool y |
85 | depends on PCIEPORTBUS && PM_RUNTIME && EXPERIMENTAL && ACPI | 85 | depends on PCIEPORTBUS && PM_RUNTIME && ACPI |
diff --git a/drivers/pci/pcie/aer/aerdrv_core.c b/drivers/pci/pcie/aer/aerdrv_core.c index 421bbc5fee32..564d97f94b6c 100644 --- a/drivers/pci/pcie/aer/aerdrv_core.c +++ b/drivers/pci/pcie/aer/aerdrv_core.c | |||
@@ -630,6 +630,7 @@ static void aer_recover_work_func(struct work_struct *work) | |||
630 | continue; | 630 | continue; |
631 | } | 631 | } |
632 | do_recovery(pdev, entry.severity); | 632 | do_recovery(pdev, entry.severity); |
633 | pci_dev_put(pdev); | ||
633 | } | 634 | } |
634 | } | 635 | } |
635 | #endif | 636 | #endif |
diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c index b52630b8eada..8474b6a4fc9b 100644 --- a/drivers/pci/pcie/aspm.c +++ b/drivers/pci/pcie/aspm.c | |||
@@ -771,6 +771,9 @@ void pcie_clear_aspm(struct pci_bus *bus) | |||
771 | { | 771 | { |
772 | struct pci_dev *child; | 772 | struct pci_dev *child; |
773 | 773 | ||
774 | if (aspm_force) | ||
775 | return; | ||
776 | |||
774 | /* | 777 | /* |
775 | * Clear any ASPM setup that the firmware has carried out on this bus | 778 | * Clear any ASPM setup that the firmware has carried out on this bus |
776 | */ | 779 | */ |
diff --git a/drivers/staging/iio/adc/mxs-lradc.c b/drivers/staging/iio/adc/mxs-lradc.c index fb31b457a56a..c5ceb9d90ea8 100644 --- a/drivers/staging/iio/adc/mxs-lradc.c +++ b/drivers/staging/iio/adc/mxs-lradc.c | |||
@@ -239,7 +239,7 @@ static irqreturn_t mxs_lradc_trigger_handler(int irq, void *p) | |||
239 | struct mxs_lradc *lradc = iio_priv(iio); | 239 | struct mxs_lradc *lradc = iio_priv(iio); |
240 | const uint32_t chan_value = LRADC_CH_ACCUMULATE | | 240 | const uint32_t chan_value = LRADC_CH_ACCUMULATE | |
241 | ((LRADC_DELAY_TIMER_LOOP - 1) << LRADC_CH_NUM_SAMPLES_OFFSET); | 241 | ((LRADC_DELAY_TIMER_LOOP - 1) << LRADC_CH_NUM_SAMPLES_OFFSET); |
242 | int i, j = 0; | 242 | unsigned int i, j = 0; |
243 | 243 | ||
244 | for_each_set_bit(i, iio->active_scan_mask, iio->masklength) { | 244 | for_each_set_bit(i, iio->active_scan_mask, iio->masklength) { |
245 | lradc->buffer[j] = readl(lradc->base + LRADC_CH(j)); | 245 | lradc->buffer[j] = readl(lradc->base + LRADC_CH(j)); |
diff --git a/drivers/staging/iio/gyro/adis16080_core.c b/drivers/staging/iio/gyro/adis16080_core.c index 3525a68d6a75..41d7350d030f 100644 --- a/drivers/staging/iio/gyro/adis16080_core.c +++ b/drivers/staging/iio/gyro/adis16080_core.c | |||
@@ -69,7 +69,7 @@ static int adis16080_spi_read(struct iio_dev *indio_dev, | |||
69 | ret = spi_read(st->us, st->buf, 2); | 69 | ret = spi_read(st->us, st->buf, 2); |
70 | 70 | ||
71 | if (ret == 0) | 71 | if (ret == 0) |
72 | *val = ((st->buf[0] & 0xF) << 8) | st->buf[1]; | 72 | *val = sign_extend32(((st->buf[0] & 0xF) << 8) | st->buf[1], 11); |
73 | mutex_unlock(&st->buf_lock); | 73 | mutex_unlock(&st->buf_lock); |
74 | 74 | ||
75 | return ret; | 75 | return ret; |
diff --git a/drivers/staging/sb105x/sb_pci_mp.c b/drivers/staging/sb105x/sb_pci_mp.c index 131afd0c460c..9464f3874346 100644 --- a/drivers/staging/sb105x/sb_pci_mp.c +++ b/drivers/staging/sb105x/sb_pci_mp.c | |||
@@ -3054,7 +3054,7 @@ static int init_mp_dev(struct pci_dev *pcidev, mppcibrd_t brd) | |||
3054 | sbdev->nr_ports = ((portnum_hex/16)*10) + (portnum_hex % 16); | 3054 | sbdev->nr_ports = ((portnum_hex/16)*10) + (portnum_hex % 16); |
3055 | } | 3055 | } |
3056 | break; | 3056 | break; |
3057 | #ifdef CONFIG_PARPORT | 3057 | #ifdef CONFIG_PARPORT_PC |
3058 | case PCI_DEVICE_ID_MP2S1P : | 3058 | case PCI_DEVICE_ID_MP2S1P : |
3059 | sbdev->nr_ports = 2; | 3059 | sbdev->nr_ports = 2; |
3060 | 3060 | ||
diff --git a/drivers/staging/vt6656/bssdb.h b/drivers/staging/vt6656/bssdb.h index 6b2ec390e775..806cbf72fb59 100644 --- a/drivers/staging/vt6656/bssdb.h +++ b/drivers/staging/vt6656/bssdb.h | |||
@@ -90,7 +90,6 @@ typedef struct tagSRSNCapObject { | |||
90 | } SRSNCapObject, *PSRSNCapObject; | 90 | } SRSNCapObject, *PSRSNCapObject; |
91 | 91 | ||
92 | // BSS info(AP) | 92 | // BSS info(AP) |
93 | #pragma pack(1) | ||
94 | typedef struct tagKnownBSS { | 93 | typedef struct tagKnownBSS { |
95 | // BSS info | 94 | // BSS info |
96 | BOOL bActive; | 95 | BOOL bActive; |
diff --git a/drivers/staging/vt6656/int.h b/drivers/staging/vt6656/int.h index 5d8faf9f96ec..e0d2b07ba608 100644 --- a/drivers/staging/vt6656/int.h +++ b/drivers/staging/vt6656/int.h | |||
@@ -34,7 +34,6 @@ | |||
34 | #include "device.h" | 34 | #include "device.h" |
35 | 35 | ||
36 | /*--------------------- Export Definitions -------------------------*/ | 36 | /*--------------------- Export Definitions -------------------------*/ |
37 | #pragma pack(1) | ||
38 | typedef struct tagSINTData { | 37 | typedef struct tagSINTData { |
39 | BYTE byTSR0; | 38 | BYTE byTSR0; |
40 | BYTE byPkt0; | 39 | BYTE byPkt0; |
diff --git a/drivers/staging/vt6656/iocmd.h b/drivers/staging/vt6656/iocmd.h index 22710cef751d..ae6e2d237b20 100644 --- a/drivers/staging/vt6656/iocmd.h +++ b/drivers/staging/vt6656/iocmd.h | |||
@@ -95,13 +95,12 @@ typedef enum tagWZONETYPE { | |||
95 | // Ioctl interface structure | 95 | // Ioctl interface structure |
96 | // Command structure | 96 | // Command structure |
97 | // | 97 | // |
98 | #pragma pack(1) | ||
99 | typedef struct tagSCmdRequest { | 98 | typedef struct tagSCmdRequest { |
100 | u8 name[16]; | 99 | u8 name[16]; |
101 | void *data; | 100 | void *data; |
102 | u16 wResult; | 101 | u16 wResult; |
103 | u16 wCmdCode; | 102 | u16 wCmdCode; |
104 | } SCmdRequest, *PSCmdRequest; | 103 | } __packed SCmdRequest, *PSCmdRequest; |
105 | 104 | ||
106 | // | 105 | // |
107 | // Scan | 106 | // Scan |
@@ -111,7 +110,7 @@ typedef struct tagSCmdScan { | |||
111 | 110 | ||
112 | u8 ssid[SSID_MAXLEN + 2]; | 111 | u8 ssid[SSID_MAXLEN + 2]; |
113 | 112 | ||
114 | } SCmdScan, *PSCmdScan; | 113 | } __packed SCmdScan, *PSCmdScan; |
115 | 114 | ||
116 | // | 115 | // |
117 | // BSS Join | 116 | // BSS Join |
@@ -126,7 +125,7 @@ typedef struct tagSCmdBSSJoin { | |||
126 | BOOL bPSEnable; | 125 | BOOL bPSEnable; |
127 | BOOL bShareKeyAuth; | 126 | BOOL bShareKeyAuth; |
128 | 127 | ||
129 | } SCmdBSSJoin, *PSCmdBSSJoin; | 128 | } __packed SCmdBSSJoin, *PSCmdBSSJoin; |
130 | 129 | ||
131 | // | 130 | // |
132 | // Zonetype Setting | 131 | // Zonetype Setting |
@@ -137,7 +136,7 @@ typedef struct tagSCmdZoneTypeSet { | |||
137 | BOOL bWrite; | 136 | BOOL bWrite; |
138 | WZONETYPE ZoneType; | 137 | WZONETYPE ZoneType; |
139 | 138 | ||
140 | } SCmdZoneTypeSet, *PSCmdZoneTypeSet; | 139 | } __packed SCmdZoneTypeSet, *PSCmdZoneTypeSet; |
141 | 140 | ||
142 | typedef struct tagSWPAResult { | 141 | typedef struct tagSWPAResult { |
143 | char ifname[100]; | 142 | char ifname[100]; |
@@ -145,7 +144,7 @@ typedef struct tagSWPAResult { | |||
145 | u8 key_mgmt; | 144 | u8 key_mgmt; |
146 | u8 eap_type; | 145 | u8 eap_type; |
147 | BOOL authenticated; | 146 | BOOL authenticated; |
148 | } SWPAResult, *PSWPAResult; | 147 | } __packed SWPAResult, *PSWPAResult; |
149 | 148 | ||
150 | typedef struct tagSCmdStartAP { | 149 | typedef struct tagSCmdStartAP { |
151 | 150 | ||
@@ -157,7 +156,7 @@ typedef struct tagSCmdStartAP { | |||
157 | BOOL bShareKeyAuth; | 156 | BOOL bShareKeyAuth; |
158 | u8 byBasicRate; | 157 | u8 byBasicRate; |
159 | 158 | ||
160 | } SCmdStartAP, *PSCmdStartAP; | 159 | } __packed SCmdStartAP, *PSCmdStartAP; |
161 | 160 | ||
162 | typedef struct tagSCmdSetWEP { | 161 | typedef struct tagSCmdSetWEP { |
163 | 162 | ||
@@ -167,7 +166,7 @@ typedef struct tagSCmdSetWEP { | |||
167 | BOOL bWepKeyAvailable[WEP_NKEYS]; | 166 | BOOL bWepKeyAvailable[WEP_NKEYS]; |
168 | u32 auWepKeyLength[WEP_NKEYS]; | 167 | u32 auWepKeyLength[WEP_NKEYS]; |
169 | 168 | ||
170 | } SCmdSetWEP, *PSCmdSetWEP; | 169 | } __packed SCmdSetWEP, *PSCmdSetWEP; |
171 | 170 | ||
172 | typedef struct tagSBSSIDItem { | 171 | typedef struct tagSBSSIDItem { |
173 | 172 | ||
@@ -180,14 +179,14 @@ typedef struct tagSBSSIDItem { | |||
180 | BOOL bWEPOn; | 179 | BOOL bWEPOn; |
181 | u32 uRSSI; | 180 | u32 uRSSI; |
182 | 181 | ||
183 | } SBSSIDItem; | 182 | } __packed SBSSIDItem; |
184 | 183 | ||
185 | 184 | ||
186 | typedef struct tagSBSSIDList { | 185 | typedef struct tagSBSSIDList { |
187 | 186 | ||
188 | u32 uItem; | 187 | u32 uItem; |
189 | SBSSIDItem sBSSIDList[0]; | 188 | SBSSIDItem sBSSIDList[0]; |
190 | } SBSSIDList, *PSBSSIDList; | 189 | } __packed SBSSIDList, *PSBSSIDList; |
191 | 190 | ||
192 | 191 | ||
193 | typedef struct tagSNodeItem { | 192 | typedef struct tagSNodeItem { |
@@ -208,7 +207,7 @@ typedef struct tagSNodeItem { | |||
208 | u32 uTxAttempts; | 207 | u32 uTxAttempts; |
209 | u16 wFailureRatio; | 208 | u16 wFailureRatio; |
210 | 209 | ||
211 | } SNodeItem; | 210 | } __packed SNodeItem; |
212 | 211 | ||
213 | 212 | ||
214 | typedef struct tagSNodeList { | 213 | typedef struct tagSNodeList { |
@@ -216,7 +215,7 @@ typedef struct tagSNodeList { | |||
216 | u32 uItem; | 215 | u32 uItem; |
217 | SNodeItem sNodeList[0]; | 216 | SNodeItem sNodeList[0]; |
218 | 217 | ||
219 | } SNodeList, *PSNodeList; | 218 | } __packed SNodeList, *PSNodeList; |
220 | 219 | ||
221 | 220 | ||
222 | typedef struct tagSCmdLinkStatus { | 221 | typedef struct tagSCmdLinkStatus { |
@@ -229,7 +228,7 @@ typedef struct tagSCmdLinkStatus { | |||
229 | u32 uChannel; | 228 | u32 uChannel; |
230 | u32 uLinkRate; | 229 | u32 uLinkRate; |
231 | 230 | ||
232 | } SCmdLinkStatus, *PSCmdLinkStatus; | 231 | } __packed SCmdLinkStatus, *PSCmdLinkStatus; |
233 | 232 | ||
234 | // | 233 | // |
235 | // 802.11 counter | 234 | // 802.11 counter |
@@ -247,7 +246,7 @@ typedef struct tagSDot11MIBCount { | |||
247 | u32 ReceivedFragmentCount; | 246 | u32 ReceivedFragmentCount; |
248 | u32 MulticastReceivedFrameCount; | 247 | u32 MulticastReceivedFrameCount; |
249 | u32 FCSErrorCount; | 248 | u32 FCSErrorCount; |
250 | } SDot11MIBCount, *PSDot11MIBCount; | 249 | } __packed SDot11MIBCount, *PSDot11MIBCount; |
251 | 250 | ||
252 | 251 | ||
253 | 252 | ||
@@ -355,13 +354,13 @@ typedef struct tagSStatMIBCount { | |||
355 | u32 ullTxBroadcastBytes[2]; | 354 | u32 ullTxBroadcastBytes[2]; |
356 | u32 ullTxMulticastBytes[2]; | 355 | u32 ullTxMulticastBytes[2]; |
357 | u32 ullTxDirectedBytes[2]; | 356 | u32 ullTxDirectedBytes[2]; |
358 | } SStatMIBCount, *PSStatMIBCount; | 357 | } __packed SStatMIBCount, *PSStatMIBCount; |
359 | 358 | ||
360 | typedef struct tagSCmdValue { | 359 | typedef struct tagSCmdValue { |
361 | 360 | ||
362 | u32 dwValue; | 361 | u32 dwValue; |
363 | 362 | ||
364 | } SCmdValue, *PSCmdValue; | 363 | } __packed SCmdValue, *PSCmdValue; |
365 | 364 | ||
366 | // | 365 | // |
367 | // hostapd & viawget ioctl related | 366 | // hostapd & viawget ioctl related |
@@ -431,7 +430,7 @@ struct viawget_hostapd_param { | |||
431 | u8 ssid[32]; | 430 | u8 ssid[32]; |
432 | } scan_req; | 431 | } scan_req; |
433 | } u; | 432 | } u; |
434 | }; | 433 | } __packed; |
435 | 434 | ||
436 | /*--------------------- Export Classes ----------------------------*/ | 435 | /*--------------------- Export Classes ----------------------------*/ |
437 | 436 | ||
diff --git a/drivers/staging/vt6656/iowpa.h b/drivers/staging/vt6656/iowpa.h index 959c8868f6e2..2522ddec718d 100644 --- a/drivers/staging/vt6656/iowpa.h +++ b/drivers/staging/vt6656/iowpa.h | |||
@@ -67,12 +67,11 @@ enum { | |||
67 | 67 | ||
68 | 68 | ||
69 | 69 | ||
70 | #pragma pack(1) | ||
71 | typedef struct viawget_wpa_header { | 70 | typedef struct viawget_wpa_header { |
72 | u8 type; | 71 | u8 type; |
73 | u16 req_ie_len; | 72 | u16 req_ie_len; |
74 | u16 resp_ie_len; | 73 | u16 resp_ie_len; |
75 | } viawget_wpa_header; | 74 | } __packed viawget_wpa_header; |
76 | 75 | ||
77 | struct viawget_wpa_param { | 76 | struct viawget_wpa_param { |
78 | u32 cmd; | 77 | u32 cmd; |
@@ -113,9 +112,8 @@ struct viawget_wpa_param { | |||
113 | u8 *buf; | 112 | u8 *buf; |
114 | } scan_results; | 113 | } scan_results; |
115 | } u; | 114 | } u; |
116 | }; | 115 | } __packed; |
117 | 116 | ||
118 | #pragma pack(1) | ||
119 | struct viawget_scan_result { | 117 | struct viawget_scan_result { |
120 | u8 bssid[6]; | 118 | u8 bssid[6]; |
121 | u8 ssid[32]; | 119 | u8 ssid[32]; |
@@ -130,7 +128,7 @@ struct viawget_scan_result { | |||
130 | int noise; | 128 | int noise; |
131 | int level; | 129 | int level; |
132 | int maxrate; | 130 | int maxrate; |
133 | }; | 131 | } __packed; |
134 | 132 | ||
135 | /*--------------------- Export Classes ----------------------------*/ | 133 | /*--------------------- Export Classes ----------------------------*/ |
136 | 134 | ||
diff --git a/drivers/staging/wlan-ng/prism2mgmt.c b/drivers/staging/wlan-ng/prism2mgmt.c index 4efa9bc0fcf0..89bfd858bb28 100644 --- a/drivers/staging/wlan-ng/prism2mgmt.c +++ b/drivers/staging/wlan-ng/prism2mgmt.c | |||
@@ -406,7 +406,7 @@ int prism2mgmt_scan_results(wlandevice_t *wlandev, void *msgp) | |||
406 | /* SSID */ | 406 | /* SSID */ |
407 | req->ssid.status = P80211ENUM_msgitem_status_data_ok; | 407 | req->ssid.status = P80211ENUM_msgitem_status_data_ok; |
408 | req->ssid.data.len = le16_to_cpu(item->ssid.len); | 408 | req->ssid.data.len = le16_to_cpu(item->ssid.len); |
409 | req->ssid.data.len = min_t(u16, req->ssid.data.len, WLAN_BSSID_LEN); | 409 | req->ssid.data.len = min_t(u16, req->ssid.data.len, WLAN_SSID_MAXLEN); |
410 | memcpy(req->ssid.data.data, item->ssid.data, req->ssid.data.len); | 410 | memcpy(req->ssid.data.data, item->ssid.data, req->ssid.data.len); |
411 | 411 | ||
412 | /* supported rates */ | 412 | /* supported rates */ |
diff --git a/drivers/tty/pty.c b/drivers/tty/pty.c index be6a373601b7..79ff3a5e925d 100644 --- a/drivers/tty/pty.c +++ b/drivers/tty/pty.c | |||
@@ -441,6 +441,8 @@ static int pty_bsd_ioctl(struct tty_struct *tty, | |||
441 | return pty_get_pktmode(tty, (int __user *)arg); | 441 | return pty_get_pktmode(tty, (int __user *)arg); |
442 | case TIOCSIG: /* Send signal to other side of pty */ | 442 | case TIOCSIG: /* Send signal to other side of pty */ |
443 | return pty_signal(tty, (int) arg); | 443 | return pty_signal(tty, (int) arg); |
444 | case TIOCGPTN: /* TTY returns ENOTTY, but glibc expects EINVAL here */ | ||
445 | return -EINVAL; | ||
444 | } | 446 | } |
445 | return -ENOIOCTLCMD; | 447 | return -ENOIOCTLCMD; |
446 | } | 448 | } |
diff --git a/drivers/tty/serial/8250/8250.c b/drivers/tty/serial/8250/8250.c index d085e3a8ec06..f9320437a649 100644 --- a/drivers/tty/serial/8250/8250.c +++ b/drivers/tty/serial/8250/8250.c | |||
@@ -300,6 +300,12 @@ static const struct serial8250_config uart_config[] = { | |||
300 | UART_FCR_R_TRIG_00 | UART_FCR_T_TRIG_00, | 300 | UART_FCR_R_TRIG_00 | UART_FCR_T_TRIG_00, |
301 | .flags = UART_CAP_FIFO, | 301 | .flags = UART_CAP_FIFO, |
302 | }, | 302 | }, |
303 | [PORT_BRCM_TRUMANAGE] = { | ||
304 | .name = "TruManage", | ||
305 | .fifo_size = 1, | ||
306 | .tx_loadsz = 1024, | ||
307 | .flags = UART_CAP_HFIFO, | ||
308 | }, | ||
303 | [PORT_8250_CIR] = { | 309 | [PORT_8250_CIR] = { |
304 | .name = "CIR port" | 310 | .name = "CIR port" |
305 | } | 311 | } |
@@ -1490,6 +1496,11 @@ void serial8250_tx_chars(struct uart_8250_port *up) | |||
1490 | port->icount.tx++; | 1496 | port->icount.tx++; |
1491 | if (uart_circ_empty(xmit)) | 1497 | if (uart_circ_empty(xmit)) |
1492 | break; | 1498 | break; |
1499 | if (up->capabilities & UART_CAP_HFIFO) { | ||
1500 | if ((serial_port_in(port, UART_LSR) & BOTH_EMPTY) != | ||
1501 | BOTH_EMPTY) | ||
1502 | break; | ||
1503 | } | ||
1493 | } while (--count > 0); | 1504 | } while (--count > 0); |
1494 | 1505 | ||
1495 | if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) | 1506 | if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) |
diff --git a/drivers/tty/serial/8250/8250.h b/drivers/tty/serial/8250/8250.h index 3b4ea84898c2..12caa1292b75 100644 --- a/drivers/tty/serial/8250/8250.h +++ b/drivers/tty/serial/8250/8250.h | |||
@@ -40,6 +40,7 @@ struct serial8250_config { | |||
40 | #define UART_CAP_AFE (1 << 11) /* MCR-based hw flow control */ | 40 | #define UART_CAP_AFE (1 << 11) /* MCR-based hw flow control */ |
41 | #define UART_CAP_UUE (1 << 12) /* UART needs IER bit 6 set (Xscale) */ | 41 | #define UART_CAP_UUE (1 << 12) /* UART needs IER bit 6 set (Xscale) */ |
42 | #define UART_CAP_RTOIE (1 << 13) /* UART needs IER bit 4 set (Xscale, Tegra) */ | 42 | #define UART_CAP_RTOIE (1 << 13) /* UART needs IER bit 4 set (Xscale, Tegra) */ |
43 | #define UART_CAP_HFIFO (1 << 14) /* UART has a "hidden" FIFO */ | ||
43 | 44 | ||
44 | #define UART_BUG_QUOT (1 << 0) /* UART has buggy quot LSB */ | 45 | #define UART_BUG_QUOT (1 << 0) /* UART has buggy quot LSB */ |
45 | #define UART_BUG_TXEN (1 << 1) /* UART has buggy TX IIR status */ | 46 | #define UART_BUG_TXEN (1 << 1) /* UART has buggy TX IIR status */ |
diff --git a/drivers/tty/serial/8250/8250_dw.c b/drivers/tty/serial/8250/8250_dw.c index 1d0dba2d562d..096d2ef48b32 100644 --- a/drivers/tty/serial/8250/8250_dw.c +++ b/drivers/tty/serial/8250/8250_dw.c | |||
@@ -79,7 +79,7 @@ static int dw8250_handle_irq(struct uart_port *p) | |||
79 | } else if ((iir & UART_IIR_BUSY) == UART_IIR_BUSY) { | 79 | } else if ((iir & UART_IIR_BUSY) == UART_IIR_BUSY) { |
80 | /* Clear the USR and write the LCR again. */ | 80 | /* Clear the USR and write the LCR again. */ |
81 | (void)p->serial_in(p, UART_USR); | 81 | (void)p->serial_in(p, UART_USR); |
82 | p->serial_out(p, d->last_lcr, UART_LCR); | 82 | p->serial_out(p, UART_LCR, d->last_lcr); |
83 | 83 | ||
84 | return 1; | 84 | return 1; |
85 | } | 85 | } |
diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c index 26b9dc012ed0..a27a98e1b066 100644 --- a/drivers/tty/serial/8250/8250_pci.c +++ b/drivers/tty/serial/8250/8250_pci.c | |||
@@ -1085,6 +1085,18 @@ pci_omegapci_setup(struct serial_private *priv, | |||
1085 | return setup_port(priv, port, 2, idx * 8, 0); | 1085 | return setup_port(priv, port, 2, idx * 8, 0); |
1086 | } | 1086 | } |
1087 | 1087 | ||
1088 | static int | ||
1089 | pci_brcm_trumanage_setup(struct serial_private *priv, | ||
1090 | const struct pciserial_board *board, | ||
1091 | struct uart_8250_port *port, int idx) | ||
1092 | { | ||
1093 | int ret = pci_default_setup(priv, board, port, idx); | ||
1094 | |||
1095 | port->port.type = PORT_BRCM_TRUMANAGE; | ||
1096 | port->port.flags = (port->port.flags | UPF_FIXED_PORT | UPF_FIXED_TYPE); | ||
1097 | return ret; | ||
1098 | } | ||
1099 | |||
1088 | static int skip_tx_en_setup(struct serial_private *priv, | 1100 | static int skip_tx_en_setup(struct serial_private *priv, |
1089 | const struct pciserial_board *board, | 1101 | const struct pciserial_board *board, |
1090 | struct uart_8250_port *port, int idx) | 1102 | struct uart_8250_port *port, int idx) |
@@ -1301,9 +1313,10 @@ pci_wch_ch353_setup(struct serial_private *priv, | |||
1301 | #define PCI_VENDOR_ID_AGESTAR 0x5372 | 1313 | #define PCI_VENDOR_ID_AGESTAR 0x5372 |
1302 | #define PCI_DEVICE_ID_AGESTAR_9375 0x6872 | 1314 | #define PCI_DEVICE_ID_AGESTAR_9375 0x6872 |
1303 | #define PCI_VENDOR_ID_ASIX 0x9710 | 1315 | #define PCI_VENDOR_ID_ASIX 0x9710 |
1304 | #define PCI_DEVICE_ID_COMMTECH_4222PCIE 0x0019 | ||
1305 | #define PCI_DEVICE_ID_COMMTECH_4224PCIE 0x0020 | 1316 | #define PCI_DEVICE_ID_COMMTECH_4224PCIE 0x0020 |
1306 | #define PCI_DEVICE_ID_COMMTECH_4228PCIE 0x0021 | 1317 | #define PCI_DEVICE_ID_COMMTECH_4228PCIE 0x0021 |
1318 | #define PCI_DEVICE_ID_COMMTECH_4222PCIE 0x0022 | ||
1319 | #define PCI_DEVICE_ID_BROADCOM_TRUMANAGE 0x160a | ||
1307 | 1320 | ||
1308 | 1321 | ||
1309 | /* Unknown vendors/cards - this should not be in linux/pci_ids.h */ | 1322 | /* Unknown vendors/cards - this should not be in linux/pci_ids.h */ |
@@ -1954,6 +1967,17 @@ static struct pci_serial_quirk pci_serial_quirks[] __refdata = { | |||
1954 | .setup = pci_xr17v35x_setup, | 1967 | .setup = pci_xr17v35x_setup, |
1955 | }, | 1968 | }, |
1956 | /* | 1969 | /* |
1970 | * Broadcom TruManage (NetXtreme) | ||
1971 | */ | ||
1972 | { | ||
1973 | .vendor = PCI_VENDOR_ID_BROADCOM, | ||
1974 | .device = PCI_DEVICE_ID_BROADCOM_TRUMANAGE, | ||
1975 | .subvendor = PCI_ANY_ID, | ||
1976 | .subdevice = PCI_ANY_ID, | ||
1977 | .setup = pci_brcm_trumanage_setup, | ||
1978 | }, | ||
1979 | |||
1980 | /* | ||
1957 | * Default "match everything" terminator entry | 1981 | * Default "match everything" terminator entry |
1958 | */ | 1982 | */ |
1959 | { | 1983 | { |
@@ -2148,6 +2172,7 @@ enum pci_board_num_t { | |||
2148 | pbn_ce4100_1_115200, | 2172 | pbn_ce4100_1_115200, |
2149 | pbn_omegapci, | 2173 | pbn_omegapci, |
2150 | pbn_NETMOS9900_2s_115200, | 2174 | pbn_NETMOS9900_2s_115200, |
2175 | pbn_brcm_trumanage, | ||
2151 | }; | 2176 | }; |
2152 | 2177 | ||
2153 | /* | 2178 | /* |
@@ -2246,7 +2271,7 @@ static struct pciserial_board pci_boards[] = { | |||
2246 | 2271 | ||
2247 | [pbn_b0_8_1152000_200] = { | 2272 | [pbn_b0_8_1152000_200] = { |
2248 | .flags = FL_BASE0, | 2273 | .flags = FL_BASE0, |
2249 | .num_ports = 2, | 2274 | .num_ports = 8, |
2250 | .base_baud = 1152000, | 2275 | .base_baud = 1152000, |
2251 | .uart_offset = 0x200, | 2276 | .uart_offset = 0x200, |
2252 | }, | 2277 | }, |
@@ -2892,6 +2917,12 @@ static struct pciserial_board pci_boards[] = { | |||
2892 | .num_ports = 2, | 2917 | .num_ports = 2, |
2893 | .base_baud = 115200, | 2918 | .base_baud = 115200, |
2894 | }, | 2919 | }, |
2920 | [pbn_brcm_trumanage] = { | ||
2921 | .flags = FL_BASE0, | ||
2922 | .num_ports = 1, | ||
2923 | .reg_shift = 2, | ||
2924 | .base_baud = 115200, | ||
2925 | }, | ||
2895 | }; | 2926 | }; |
2896 | 2927 | ||
2897 | static const struct pci_device_id blacklist[] = { | 2928 | static const struct pci_device_id blacklist[] = { |
@@ -4471,6 +4502,13 @@ static struct pci_device_id serial_pci_tbl[] = { | |||
4471 | pbn_omegapci }, | 4502 | pbn_omegapci }, |
4472 | 4503 | ||
4473 | /* | 4504 | /* |
4505 | * Broadcom TruManage | ||
4506 | */ | ||
4507 | { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BROADCOM_TRUMANAGE, | ||
4508 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, | ||
4509 | pbn_brcm_trumanage }, | ||
4510 | |||
4511 | /* | ||
4474 | * AgeStar as-prs2-009 | 4512 | * AgeStar as-prs2-009 |
4475 | */ | 4513 | */ |
4476 | { PCI_VENDOR_ID_AGESTAR, PCI_DEVICE_ID_AGESTAR_9375, | 4514 | { PCI_VENDOR_ID_AGESTAR, PCI_DEVICE_ID_AGESTAR_9375, |
diff --git a/drivers/tty/serial/ifx6x60.c b/drivers/tty/serial/ifx6x60.c index 675d94ab0aff..8cb6d8d66a13 100644 --- a/drivers/tty/serial/ifx6x60.c +++ b/drivers/tty/serial/ifx6x60.c | |||
@@ -637,6 +637,7 @@ static void ifx_port_shutdown(struct tty_port *port) | |||
637 | 637 | ||
638 | clear_bit(IFX_SPI_STATE_IO_AVAILABLE, &ifx_dev->flags); | 638 | clear_bit(IFX_SPI_STATE_IO_AVAILABLE, &ifx_dev->flags); |
639 | mrdy_set_low(ifx_dev); | 639 | mrdy_set_low(ifx_dev); |
640 | del_timer(&ifx_dev->spi_timer); | ||
640 | clear_bit(IFX_SPI_STATE_TIMER_PENDING, &ifx_dev->flags); | 641 | clear_bit(IFX_SPI_STATE_TIMER_PENDING, &ifx_dev->flags); |
641 | tasklet_kill(&ifx_dev->io_work_tasklet); | 642 | tasklet_kill(&ifx_dev->io_work_tasklet); |
642 | } | 643 | } |
@@ -810,7 +811,8 @@ static void ifx_spi_io(unsigned long data) | |||
810 | ifx_dev->spi_xfer.cs_change = 0; | 811 | ifx_dev->spi_xfer.cs_change = 0; |
811 | ifx_dev->spi_xfer.speed_hz = ifx_dev->spi_dev->max_speed_hz; | 812 | ifx_dev->spi_xfer.speed_hz = ifx_dev->spi_dev->max_speed_hz; |
812 | /* ifx_dev->spi_xfer.speed_hz = 390625; */ | 813 | /* ifx_dev->spi_xfer.speed_hz = 390625; */ |
813 | ifx_dev->spi_xfer.bits_per_word = spi_bpw; | 814 | ifx_dev->spi_xfer.bits_per_word = |
815 | ifx_dev->spi_dev->bits_per_word; | ||
814 | 816 | ||
815 | ifx_dev->spi_xfer.tx_buf = ifx_dev->tx_buffer; | 817 | ifx_dev->spi_xfer.tx_buf = ifx_dev->tx_buffer; |
816 | ifx_dev->spi_xfer.rx_buf = ifx_dev->rx_buffer; | 818 | ifx_dev->spi_xfer.rx_buf = ifx_dev->rx_buffer; |
diff --git a/drivers/tty/serial/mxs-auart.c b/drivers/tty/serial/mxs-auart.c index 6db23b035efe..e55615eb34ad 100644 --- a/drivers/tty/serial/mxs-auart.c +++ b/drivers/tty/serial/mxs-auart.c | |||
@@ -253,7 +253,7 @@ static void mxs_auart_tx_chars(struct mxs_auart_port *s) | |||
253 | struct circ_buf *xmit = &s->port.state->xmit; | 253 | struct circ_buf *xmit = &s->port.state->xmit; |
254 | 254 | ||
255 | if (auart_dma_enabled(s)) { | 255 | if (auart_dma_enabled(s)) { |
256 | int i = 0; | 256 | u32 i = 0; |
257 | int size; | 257 | int size; |
258 | void *buffer = s->tx_dma_buf; | 258 | void *buffer = s->tx_dma_buf; |
259 | 259 | ||
@@ -412,10 +412,12 @@ static void mxs_auart_set_mctrl(struct uart_port *u, unsigned mctrl) | |||
412 | 412 | ||
413 | u32 ctrl = readl(u->membase + AUART_CTRL2); | 413 | u32 ctrl = readl(u->membase + AUART_CTRL2); |
414 | 414 | ||
415 | ctrl &= ~AUART_CTRL2_RTSEN; | 415 | ctrl &= ~(AUART_CTRL2_RTSEN | AUART_CTRL2_RTS); |
416 | if (mctrl & TIOCM_RTS) { | 416 | if (mctrl & TIOCM_RTS) { |
417 | if (tty_port_cts_enabled(&u->state->port)) | 417 | if (tty_port_cts_enabled(&u->state->port)) |
418 | ctrl |= AUART_CTRL2_RTSEN; | 418 | ctrl |= AUART_CTRL2_RTSEN; |
419 | else | ||
420 | ctrl |= AUART_CTRL2_RTS; | ||
419 | } | 421 | } |
420 | 422 | ||
421 | s->ctrl = mctrl; | 423 | s->ctrl = mctrl; |
diff --git a/drivers/tty/serial/samsung.c b/drivers/tty/serial/samsung.c index 12e5249d053e..e514b3a4dc57 100644 --- a/drivers/tty/serial/samsung.c +++ b/drivers/tty/serial/samsung.c | |||
@@ -1006,7 +1006,6 @@ static void s3c24xx_serial_resetport(struct uart_port *port, | |||
1006 | 1006 | ||
1007 | ucon &= ucon_mask; | 1007 | ucon &= ucon_mask; |
1008 | wr_regl(port, S3C2410_UCON, ucon | cfg->ucon); | 1008 | wr_regl(port, S3C2410_UCON, ucon | cfg->ucon); |
1009 | wr_regl(port, S3C2410_ULCON, cfg->ulcon); | ||
1010 | 1009 | ||
1011 | /* reset both fifos */ | 1010 | /* reset both fifos */ |
1012 | wr_regl(port, S3C2410_UFCON, cfg->ufcon | S3C2410_UFCON_RESETBOTH); | 1011 | wr_regl(port, S3C2410_UFCON, cfg->ufcon | S3C2410_UFCON_RESETBOTH); |
diff --git a/drivers/tty/serial/vt8500_serial.c b/drivers/tty/serial/vt8500_serial.c index 8fd181436a6b..d5ed9f613005 100644 --- a/drivers/tty/serial/vt8500_serial.c +++ b/drivers/tty/serial/vt8500_serial.c | |||
@@ -604,7 +604,7 @@ static int vt8500_serial_probe(struct platform_device *pdev) | |||
604 | vt8500_port->uart.flags = UPF_IOREMAP | UPF_BOOT_AUTOCONF; | 604 | vt8500_port->uart.flags = UPF_IOREMAP | UPF_BOOT_AUTOCONF; |
605 | 605 | ||
606 | vt8500_port->clk = of_clk_get(pdev->dev.of_node, 0); | 606 | vt8500_port->clk = of_clk_get(pdev->dev.of_node, 0); |
607 | if (vt8500_port->clk) { | 607 | if (!IS_ERR(vt8500_port->clk)) { |
608 | vt8500_port->uart.uartclk = clk_get_rate(vt8500_port->clk); | 608 | vt8500_port->uart.uartclk = clk_get_rate(vt8500_port->clk); |
609 | } else { | 609 | } else { |
610 | /* use the default of 24Mhz if not specified and warn */ | 610 | /* use the default of 24Mhz if not specified and warn */ |
diff --git a/drivers/vfio/pci/vfio_pci_rdwr.c b/drivers/vfio/pci/vfio_pci_rdwr.c index 4362d9e7baa3..f72323ef618f 100644 --- a/drivers/vfio/pci/vfio_pci_rdwr.c +++ b/drivers/vfio/pci/vfio_pci_rdwr.c | |||
@@ -240,17 +240,17 @@ ssize_t vfio_pci_mem_readwrite(struct vfio_pci_device *vdev, char __user *buf, | |||
240 | filled = 1; | 240 | filled = 1; |
241 | } else { | 241 | } else { |
242 | /* Drop writes, fill reads with FF */ | 242 | /* Drop writes, fill reads with FF */ |
243 | filled = min((size_t)(x_end - pos), count); | ||
243 | if (!iswrite) { | 244 | if (!iswrite) { |
244 | char val = 0xFF; | 245 | char val = 0xFF; |
245 | size_t i; | 246 | size_t i; |
246 | 247 | ||
247 | for (i = 0; i < x_end - pos; i++) { | 248 | for (i = 0; i < filled; i++) { |
248 | if (put_user(val, buf + i)) | 249 | if (put_user(val, buf + i)) |
249 | goto out; | 250 | goto out; |
250 | } | 251 | } |
251 | } | 252 | } |
252 | 253 | ||
253 | filled = x_end - pos; | ||
254 | } | 254 | } |
255 | 255 | ||
256 | count -= filled; | 256 | count -= filled; |
diff --git a/drivers/xen/cpu_hotplug.c b/drivers/xen/cpu_hotplug.c index 4dcfced107f5..084041d42c9a 100644 --- a/drivers/xen/cpu_hotplug.c +++ b/drivers/xen/cpu_hotplug.c | |||
@@ -25,10 +25,10 @@ static void disable_hotplug_cpu(int cpu) | |||
25 | static int vcpu_online(unsigned int cpu) | 25 | static int vcpu_online(unsigned int cpu) |
26 | { | 26 | { |
27 | int err; | 27 | int err; |
28 | char dir[32], state[32]; | 28 | char dir[16], state[16]; |
29 | 29 | ||
30 | sprintf(dir, "cpu/%u", cpu); | 30 | sprintf(dir, "cpu/%u", cpu); |
31 | err = xenbus_scanf(XBT_NIL, dir, "availability", "%s", state); | 31 | err = xenbus_scanf(XBT_NIL, dir, "availability", "%15s", state); |
32 | if (err != 1) { | 32 | if (err != 1) { |
33 | if (!xen_initial_domain()) | 33 | if (!xen_initial_domain()) |
34 | printk(KERN_ERR "XENBUS: Unable to read cpu state\n"); | 34 | printk(KERN_ERR "XENBUS: Unable to read cpu state\n"); |
diff --git a/drivers/xen/gntdev.c b/drivers/xen/gntdev.c index 2e22df2f7a3f..3c8803feba26 100644 --- a/drivers/xen/gntdev.c +++ b/drivers/xen/gntdev.c | |||
@@ -56,10 +56,15 @@ MODULE_PARM_DESC(limit, "Maximum number of grants that may be mapped by " | |||
56 | static atomic_t pages_mapped = ATOMIC_INIT(0); | 56 | static atomic_t pages_mapped = ATOMIC_INIT(0); |
57 | 57 | ||
58 | static int use_ptemod; | 58 | static int use_ptemod; |
59 | #define populate_freeable_maps use_ptemod | ||
59 | 60 | ||
60 | struct gntdev_priv { | 61 | struct gntdev_priv { |
62 | /* maps with visible offsets in the file descriptor */ | ||
61 | struct list_head maps; | 63 | struct list_head maps; |
62 | /* lock protects maps from concurrent changes */ | 64 | /* maps that are not visible; will be freed on munmap. |
65 | * Only populated if populate_freeable_maps == 1 */ | ||
66 | struct list_head freeable_maps; | ||
67 | /* lock protects maps and freeable_maps */ | ||
63 | spinlock_t lock; | 68 | spinlock_t lock; |
64 | struct mm_struct *mm; | 69 | struct mm_struct *mm; |
65 | struct mmu_notifier mn; | 70 | struct mmu_notifier mn; |
@@ -193,7 +198,7 @@ static struct grant_map *gntdev_find_map_index(struct gntdev_priv *priv, | |||
193 | return NULL; | 198 | return NULL; |
194 | } | 199 | } |
195 | 200 | ||
196 | static void gntdev_put_map(struct grant_map *map) | 201 | static void gntdev_put_map(struct gntdev_priv *priv, struct grant_map *map) |
197 | { | 202 | { |
198 | if (!map) | 203 | if (!map) |
199 | return; | 204 | return; |
@@ -208,6 +213,12 @@ static void gntdev_put_map(struct grant_map *map) | |||
208 | evtchn_put(map->notify.event); | 213 | evtchn_put(map->notify.event); |
209 | } | 214 | } |
210 | 215 | ||
216 | if (populate_freeable_maps && priv) { | ||
217 | spin_lock(&priv->lock); | ||
218 | list_del(&map->next); | ||
219 | spin_unlock(&priv->lock); | ||
220 | } | ||
221 | |||
211 | if (map->pages && !use_ptemod) | 222 | if (map->pages && !use_ptemod) |
212 | unmap_grant_pages(map, 0, map->count); | 223 | unmap_grant_pages(map, 0, map->count); |
213 | gntdev_free_map(map); | 224 | gntdev_free_map(map); |
@@ -301,17 +312,10 @@ static int __unmap_grant_pages(struct grant_map *map, int offset, int pages) | |||
301 | 312 | ||
302 | if (map->notify.flags & UNMAP_NOTIFY_CLEAR_BYTE) { | 313 | if (map->notify.flags & UNMAP_NOTIFY_CLEAR_BYTE) { |
303 | int pgno = (map->notify.addr >> PAGE_SHIFT); | 314 | int pgno = (map->notify.addr >> PAGE_SHIFT); |
304 | if (pgno >= offset && pgno < offset + pages && use_ptemod) { | 315 | if (pgno >= offset && pgno < offset + pages) { |
305 | void __user *tmp = (void __user *) | 316 | /* No need for kmap, pages are in lowmem */ |
306 | map->vma->vm_start + map->notify.addr; | 317 | uint8_t *tmp = pfn_to_kaddr(page_to_pfn(map->pages[pgno])); |
307 | err = copy_to_user(tmp, &err, 1); | ||
308 | if (err) | ||
309 | return -EFAULT; | ||
310 | map->notify.flags &= ~UNMAP_NOTIFY_CLEAR_BYTE; | ||
311 | } else if (pgno >= offset && pgno < offset + pages) { | ||
312 | uint8_t *tmp = kmap(map->pages[pgno]); | ||
313 | tmp[map->notify.addr & (PAGE_SIZE-1)] = 0; | 318 | tmp[map->notify.addr & (PAGE_SIZE-1)] = 0; |
314 | kunmap(map->pages[pgno]); | ||
315 | map->notify.flags &= ~UNMAP_NOTIFY_CLEAR_BYTE; | 319 | map->notify.flags &= ~UNMAP_NOTIFY_CLEAR_BYTE; |
316 | } | 320 | } |
317 | } | 321 | } |
@@ -376,11 +380,24 @@ static void gntdev_vma_open(struct vm_area_struct *vma) | |||
376 | static void gntdev_vma_close(struct vm_area_struct *vma) | 380 | static void gntdev_vma_close(struct vm_area_struct *vma) |
377 | { | 381 | { |
378 | struct grant_map *map = vma->vm_private_data; | 382 | struct grant_map *map = vma->vm_private_data; |
383 | struct file *file = vma->vm_file; | ||
384 | struct gntdev_priv *priv = file->private_data; | ||
379 | 385 | ||
380 | pr_debug("gntdev_vma_close %p\n", vma); | 386 | pr_debug("gntdev_vma_close %p\n", vma); |
381 | map->vma = NULL; | 387 | if (use_ptemod) { |
388 | /* It is possible that an mmu notifier could be running | ||
389 | * concurrently, so take priv->lock to ensure that the vma won't | ||
390 | * vanishing during the unmap_grant_pages call, since we will | ||
391 | * spin here until that completes. Such a concurrent call will | ||
392 | * not do any unmapping, since that has been done prior to | ||
393 | * closing the vma, but it may still iterate the unmap_ops list. | ||
394 | */ | ||
395 | spin_lock(&priv->lock); | ||
396 | map->vma = NULL; | ||
397 | spin_unlock(&priv->lock); | ||
398 | } | ||
382 | vma->vm_private_data = NULL; | 399 | vma->vm_private_data = NULL; |
383 | gntdev_put_map(map); | 400 | gntdev_put_map(priv, map); |
384 | } | 401 | } |
385 | 402 | ||
386 | static struct vm_operations_struct gntdev_vmops = { | 403 | static struct vm_operations_struct gntdev_vmops = { |
@@ -390,33 +407,43 @@ static struct vm_operations_struct gntdev_vmops = { | |||
390 | 407 | ||
391 | /* ------------------------------------------------------------------ */ | 408 | /* ------------------------------------------------------------------ */ |
392 | 409 | ||
410 | static void unmap_if_in_range(struct grant_map *map, | ||
411 | unsigned long start, unsigned long end) | ||
412 | { | ||
413 | unsigned long mstart, mend; | ||
414 | int err; | ||
415 | |||
416 | if (!map->vma) | ||
417 | return; | ||
418 | if (map->vma->vm_start >= end) | ||
419 | return; | ||
420 | if (map->vma->vm_end <= start) | ||
421 | return; | ||
422 | mstart = max(start, map->vma->vm_start); | ||
423 | mend = min(end, map->vma->vm_end); | ||
424 | pr_debug("map %d+%d (%lx %lx), range %lx %lx, mrange %lx %lx\n", | ||
425 | map->index, map->count, | ||
426 | map->vma->vm_start, map->vma->vm_end, | ||
427 | start, end, mstart, mend); | ||
428 | err = unmap_grant_pages(map, | ||
429 | (mstart - map->vma->vm_start) >> PAGE_SHIFT, | ||
430 | (mend - mstart) >> PAGE_SHIFT); | ||
431 | WARN_ON(err); | ||
432 | } | ||
433 | |||
393 | static void mn_invl_range_start(struct mmu_notifier *mn, | 434 | static void mn_invl_range_start(struct mmu_notifier *mn, |
394 | struct mm_struct *mm, | 435 | struct mm_struct *mm, |
395 | unsigned long start, unsigned long end) | 436 | unsigned long start, unsigned long end) |
396 | { | 437 | { |
397 | struct gntdev_priv *priv = container_of(mn, struct gntdev_priv, mn); | 438 | struct gntdev_priv *priv = container_of(mn, struct gntdev_priv, mn); |
398 | struct grant_map *map; | 439 | struct grant_map *map; |
399 | unsigned long mstart, mend; | ||
400 | int err; | ||
401 | 440 | ||
402 | spin_lock(&priv->lock); | 441 | spin_lock(&priv->lock); |
403 | list_for_each_entry(map, &priv->maps, next) { | 442 | list_for_each_entry(map, &priv->maps, next) { |
404 | if (!map->vma) | 443 | unmap_if_in_range(map, start, end); |
405 | continue; | 444 | } |
406 | if (map->vma->vm_start >= end) | 445 | list_for_each_entry(map, &priv->freeable_maps, next) { |
407 | continue; | 446 | unmap_if_in_range(map, start, end); |
408 | if (map->vma->vm_end <= start) | ||
409 | continue; | ||
410 | mstart = max(start, map->vma->vm_start); | ||
411 | mend = min(end, map->vma->vm_end); | ||
412 | pr_debug("map %d+%d (%lx %lx), range %lx %lx, mrange %lx %lx\n", | ||
413 | map->index, map->count, | ||
414 | map->vma->vm_start, map->vma->vm_end, | ||
415 | start, end, mstart, mend); | ||
416 | err = unmap_grant_pages(map, | ||
417 | (mstart - map->vma->vm_start) >> PAGE_SHIFT, | ||
418 | (mend - mstart) >> PAGE_SHIFT); | ||
419 | WARN_ON(err); | ||
420 | } | 447 | } |
421 | spin_unlock(&priv->lock); | 448 | spin_unlock(&priv->lock); |
422 | } | 449 | } |
@@ -445,6 +472,15 @@ static void mn_release(struct mmu_notifier *mn, | |||
445 | err = unmap_grant_pages(map, /* offset */ 0, map->count); | 472 | err = unmap_grant_pages(map, /* offset */ 0, map->count); |
446 | WARN_ON(err); | 473 | WARN_ON(err); |
447 | } | 474 | } |
475 | list_for_each_entry(map, &priv->freeable_maps, next) { | ||
476 | if (!map->vma) | ||
477 | continue; | ||
478 | pr_debug("map %d+%d (%lx %lx)\n", | ||
479 | map->index, map->count, | ||
480 | map->vma->vm_start, map->vma->vm_end); | ||
481 | err = unmap_grant_pages(map, /* offset */ 0, map->count); | ||
482 | WARN_ON(err); | ||
483 | } | ||
448 | spin_unlock(&priv->lock); | 484 | spin_unlock(&priv->lock); |
449 | } | 485 | } |
450 | 486 | ||
@@ -466,6 +502,7 @@ static int gntdev_open(struct inode *inode, struct file *flip) | |||
466 | return -ENOMEM; | 502 | return -ENOMEM; |
467 | 503 | ||
468 | INIT_LIST_HEAD(&priv->maps); | 504 | INIT_LIST_HEAD(&priv->maps); |
505 | INIT_LIST_HEAD(&priv->freeable_maps); | ||
469 | spin_lock_init(&priv->lock); | 506 | spin_lock_init(&priv->lock); |
470 | 507 | ||
471 | if (use_ptemod) { | 508 | if (use_ptemod) { |
@@ -500,8 +537,9 @@ static int gntdev_release(struct inode *inode, struct file *flip) | |||
500 | while (!list_empty(&priv->maps)) { | 537 | while (!list_empty(&priv->maps)) { |
501 | map = list_entry(priv->maps.next, struct grant_map, next); | 538 | map = list_entry(priv->maps.next, struct grant_map, next); |
502 | list_del(&map->next); | 539 | list_del(&map->next); |
503 | gntdev_put_map(map); | 540 | gntdev_put_map(NULL /* already removed */, map); |
504 | } | 541 | } |
542 | WARN_ON(!list_empty(&priv->freeable_maps)); | ||
505 | 543 | ||
506 | if (use_ptemod) | 544 | if (use_ptemod) |
507 | mmu_notifier_unregister(&priv->mn, priv->mm); | 545 | mmu_notifier_unregister(&priv->mn, priv->mm); |
@@ -529,14 +567,14 @@ static long gntdev_ioctl_map_grant_ref(struct gntdev_priv *priv, | |||
529 | 567 | ||
530 | if (unlikely(atomic_add_return(op.count, &pages_mapped) > limit)) { | 568 | if (unlikely(atomic_add_return(op.count, &pages_mapped) > limit)) { |
531 | pr_debug("can't map: over limit\n"); | 569 | pr_debug("can't map: over limit\n"); |
532 | gntdev_put_map(map); | 570 | gntdev_put_map(NULL, map); |
533 | return err; | 571 | return err; |
534 | } | 572 | } |
535 | 573 | ||
536 | if (copy_from_user(map->grants, &u->refs, | 574 | if (copy_from_user(map->grants, &u->refs, |
537 | sizeof(map->grants[0]) * op.count) != 0) { | 575 | sizeof(map->grants[0]) * op.count) != 0) { |
538 | gntdev_put_map(map); | 576 | gntdev_put_map(NULL, map); |
539 | return err; | 577 | return -EFAULT; |
540 | } | 578 | } |
541 | 579 | ||
542 | spin_lock(&priv->lock); | 580 | spin_lock(&priv->lock); |
@@ -565,11 +603,13 @@ static long gntdev_ioctl_unmap_grant_ref(struct gntdev_priv *priv, | |||
565 | map = gntdev_find_map_index(priv, op.index >> PAGE_SHIFT, op.count); | 603 | map = gntdev_find_map_index(priv, op.index >> PAGE_SHIFT, op.count); |
566 | if (map) { | 604 | if (map) { |
567 | list_del(&map->next); | 605 | list_del(&map->next); |
606 | if (populate_freeable_maps) | ||
607 | list_add_tail(&map->next, &priv->freeable_maps); | ||
568 | err = 0; | 608 | err = 0; |
569 | } | 609 | } |
570 | spin_unlock(&priv->lock); | 610 | spin_unlock(&priv->lock); |
571 | if (map) | 611 | if (map) |
572 | gntdev_put_map(map); | 612 | gntdev_put_map(priv, map); |
573 | return err; | 613 | return err; |
574 | } | 614 | } |
575 | 615 | ||
@@ -579,25 +619,31 @@ static long gntdev_ioctl_get_offset_for_vaddr(struct gntdev_priv *priv, | |||
579 | struct ioctl_gntdev_get_offset_for_vaddr op; | 619 | struct ioctl_gntdev_get_offset_for_vaddr op; |
580 | struct vm_area_struct *vma; | 620 | struct vm_area_struct *vma; |
581 | struct grant_map *map; | 621 | struct grant_map *map; |
622 | int rv = -EINVAL; | ||
582 | 623 | ||
583 | if (copy_from_user(&op, u, sizeof(op)) != 0) | 624 | if (copy_from_user(&op, u, sizeof(op)) != 0) |
584 | return -EFAULT; | 625 | return -EFAULT; |
585 | pr_debug("priv %p, offset for vaddr %lx\n", priv, (unsigned long)op.vaddr); | 626 | pr_debug("priv %p, offset for vaddr %lx\n", priv, (unsigned long)op.vaddr); |
586 | 627 | ||
628 | down_read(¤t->mm->mmap_sem); | ||
587 | vma = find_vma(current->mm, op.vaddr); | 629 | vma = find_vma(current->mm, op.vaddr); |
588 | if (!vma || vma->vm_ops != &gntdev_vmops) | 630 | if (!vma || vma->vm_ops != &gntdev_vmops) |
589 | return -EINVAL; | 631 | goto out_unlock; |
590 | 632 | ||
591 | map = vma->vm_private_data; | 633 | map = vma->vm_private_data; |
592 | if (!map) | 634 | if (!map) |
593 | return -EINVAL; | 635 | goto out_unlock; |
594 | 636 | ||
595 | op.offset = map->index << PAGE_SHIFT; | 637 | op.offset = map->index << PAGE_SHIFT; |
596 | op.count = map->count; | 638 | op.count = map->count; |
639 | rv = 0; | ||
597 | 640 | ||
598 | if (copy_to_user(u, &op, sizeof(op)) != 0) | 641 | out_unlock: |
642 | up_read(¤t->mm->mmap_sem); | ||
643 | |||
644 | if (rv == 0 && copy_to_user(u, &op, sizeof(op)) != 0) | ||
599 | return -EFAULT; | 645 | return -EFAULT; |
600 | return 0; | 646 | return rv; |
601 | } | 647 | } |
602 | 648 | ||
603 | static long gntdev_ioctl_notify(struct gntdev_priv *priv, void __user *u) | 649 | static long gntdev_ioctl_notify(struct gntdev_priv *priv, void __user *u) |
@@ -778,7 +824,7 @@ out_unlock_put: | |||
778 | out_put_map: | 824 | out_put_map: |
779 | if (use_ptemod) | 825 | if (use_ptemod) |
780 | map->vma = NULL; | 826 | map->vma = NULL; |
781 | gntdev_put_map(map); | 827 | gntdev_put_map(priv, map); |
782 | return err; | 828 | return err; |
783 | } | 829 | } |
784 | 830 | ||
diff --git a/drivers/xen/grant-table.c b/drivers/xen/grant-table.c index 7038de53652b..157c0ccda3ef 100644 --- a/drivers/xen/grant-table.c +++ b/drivers/xen/grant-table.c | |||
@@ -56,10 +56,6 @@ | |||
56 | /* External tools reserve first few grant table entries. */ | 56 | /* External tools reserve first few grant table entries. */ |
57 | #define NR_RESERVED_ENTRIES 8 | 57 | #define NR_RESERVED_ENTRIES 8 |
58 | #define GNTTAB_LIST_END 0xffffffff | 58 | #define GNTTAB_LIST_END 0xffffffff |
59 | #define GREFS_PER_GRANT_FRAME \ | ||
60 | (grant_table_version == 1 ? \ | ||
61 | (PAGE_SIZE / sizeof(struct grant_entry_v1)) : \ | ||
62 | (PAGE_SIZE / sizeof(union grant_entry_v2))) | ||
63 | 59 | ||
64 | static grant_ref_t **gnttab_list; | 60 | static grant_ref_t **gnttab_list; |
65 | static unsigned int nr_grant_frames; | 61 | static unsigned int nr_grant_frames; |
@@ -154,6 +150,7 @@ static struct gnttab_ops *gnttab_interface; | |||
154 | static grant_status_t *grstatus; | 150 | static grant_status_t *grstatus; |
155 | 151 | ||
156 | static int grant_table_version; | 152 | static int grant_table_version; |
153 | static int grefs_per_grant_frame; | ||
157 | 154 | ||
158 | static struct gnttab_free_callback *gnttab_free_callback_list; | 155 | static struct gnttab_free_callback *gnttab_free_callback_list; |
159 | 156 | ||
@@ -767,12 +764,14 @@ static int grow_gnttab_list(unsigned int more_frames) | |||
767 | unsigned int new_nr_grant_frames, extra_entries, i; | 764 | unsigned int new_nr_grant_frames, extra_entries, i; |
768 | unsigned int nr_glist_frames, new_nr_glist_frames; | 765 | unsigned int nr_glist_frames, new_nr_glist_frames; |
769 | 766 | ||
767 | BUG_ON(grefs_per_grant_frame == 0); | ||
768 | |||
770 | new_nr_grant_frames = nr_grant_frames + more_frames; | 769 | new_nr_grant_frames = nr_grant_frames + more_frames; |
771 | extra_entries = more_frames * GREFS_PER_GRANT_FRAME; | 770 | extra_entries = more_frames * grefs_per_grant_frame; |
772 | 771 | ||
773 | nr_glist_frames = (nr_grant_frames * GREFS_PER_GRANT_FRAME + RPP - 1) / RPP; | 772 | nr_glist_frames = (nr_grant_frames * grefs_per_grant_frame + RPP - 1) / RPP; |
774 | new_nr_glist_frames = | 773 | new_nr_glist_frames = |
775 | (new_nr_grant_frames * GREFS_PER_GRANT_FRAME + RPP - 1) / RPP; | 774 | (new_nr_grant_frames * grefs_per_grant_frame + RPP - 1) / RPP; |
776 | for (i = nr_glist_frames; i < new_nr_glist_frames; i++) { | 775 | for (i = nr_glist_frames; i < new_nr_glist_frames; i++) { |
777 | gnttab_list[i] = (grant_ref_t *)__get_free_page(GFP_ATOMIC); | 776 | gnttab_list[i] = (grant_ref_t *)__get_free_page(GFP_ATOMIC); |
778 | if (!gnttab_list[i]) | 777 | if (!gnttab_list[i]) |
@@ -780,12 +779,12 @@ static int grow_gnttab_list(unsigned int more_frames) | |||
780 | } | 779 | } |
781 | 780 | ||
782 | 781 | ||
783 | for (i = GREFS_PER_GRANT_FRAME * nr_grant_frames; | 782 | for (i = grefs_per_grant_frame * nr_grant_frames; |
784 | i < GREFS_PER_GRANT_FRAME * new_nr_grant_frames - 1; i++) | 783 | i < grefs_per_grant_frame * new_nr_grant_frames - 1; i++) |
785 | gnttab_entry(i) = i + 1; | 784 | gnttab_entry(i) = i + 1; |
786 | 785 | ||
787 | gnttab_entry(i) = gnttab_free_head; | 786 | gnttab_entry(i) = gnttab_free_head; |
788 | gnttab_free_head = GREFS_PER_GRANT_FRAME * nr_grant_frames; | 787 | gnttab_free_head = grefs_per_grant_frame * nr_grant_frames; |
789 | gnttab_free_count += extra_entries; | 788 | gnttab_free_count += extra_entries; |
790 | 789 | ||
791 | nr_grant_frames = new_nr_grant_frames; | 790 | nr_grant_frames = new_nr_grant_frames; |
@@ -957,7 +956,8 @@ EXPORT_SYMBOL_GPL(gnttab_unmap_refs); | |||
957 | 956 | ||
958 | static unsigned nr_status_frames(unsigned nr_grant_frames) | 957 | static unsigned nr_status_frames(unsigned nr_grant_frames) |
959 | { | 958 | { |
960 | return (nr_grant_frames * GREFS_PER_GRANT_FRAME + SPP - 1) / SPP; | 959 | BUG_ON(grefs_per_grant_frame == 0); |
960 | return (nr_grant_frames * grefs_per_grant_frame + SPP - 1) / SPP; | ||
961 | } | 961 | } |
962 | 962 | ||
963 | static int gnttab_map_frames_v1(xen_pfn_t *frames, unsigned int nr_gframes) | 963 | static int gnttab_map_frames_v1(xen_pfn_t *frames, unsigned int nr_gframes) |
@@ -1115,6 +1115,7 @@ static void gnttab_request_version(void) | |||
1115 | rc = HYPERVISOR_grant_table_op(GNTTABOP_set_version, &gsv, 1); | 1115 | rc = HYPERVISOR_grant_table_op(GNTTABOP_set_version, &gsv, 1); |
1116 | if (rc == 0 && gsv.version == 2) { | 1116 | if (rc == 0 && gsv.version == 2) { |
1117 | grant_table_version = 2; | 1117 | grant_table_version = 2; |
1118 | grefs_per_grant_frame = PAGE_SIZE / sizeof(union grant_entry_v2); | ||
1118 | gnttab_interface = &gnttab_v2_ops; | 1119 | gnttab_interface = &gnttab_v2_ops; |
1119 | } else if (grant_table_version == 2) { | 1120 | } else if (grant_table_version == 2) { |
1120 | /* | 1121 | /* |
@@ -1127,17 +1128,17 @@ static void gnttab_request_version(void) | |||
1127 | panic("we need grant tables version 2, but only version 1 is available"); | 1128 | panic("we need grant tables version 2, but only version 1 is available"); |
1128 | } else { | 1129 | } else { |
1129 | grant_table_version = 1; | 1130 | grant_table_version = 1; |
1131 | grefs_per_grant_frame = PAGE_SIZE / sizeof(struct grant_entry_v1); | ||
1130 | gnttab_interface = &gnttab_v1_ops; | 1132 | gnttab_interface = &gnttab_v1_ops; |
1131 | } | 1133 | } |
1132 | printk(KERN_INFO "Grant tables using version %d layout.\n", | 1134 | printk(KERN_INFO "Grant tables using version %d layout.\n", |
1133 | grant_table_version); | 1135 | grant_table_version); |
1134 | } | 1136 | } |
1135 | 1137 | ||
1136 | int gnttab_resume(void) | 1138 | static int gnttab_setup(void) |
1137 | { | 1139 | { |
1138 | unsigned int max_nr_gframes; | 1140 | unsigned int max_nr_gframes; |
1139 | 1141 | ||
1140 | gnttab_request_version(); | ||
1141 | max_nr_gframes = gnttab_max_grant_frames(); | 1142 | max_nr_gframes = gnttab_max_grant_frames(); |
1142 | if (max_nr_gframes < nr_grant_frames) | 1143 | if (max_nr_gframes < nr_grant_frames) |
1143 | return -ENOSYS; | 1144 | return -ENOSYS; |
@@ -1160,6 +1161,12 @@ int gnttab_resume(void) | |||
1160 | return 0; | 1161 | return 0; |
1161 | } | 1162 | } |
1162 | 1163 | ||
1164 | int gnttab_resume(void) | ||
1165 | { | ||
1166 | gnttab_request_version(); | ||
1167 | return gnttab_setup(); | ||
1168 | } | ||
1169 | |||
1163 | int gnttab_suspend(void) | 1170 | int gnttab_suspend(void) |
1164 | { | 1171 | { |
1165 | gnttab_interface->unmap_frames(); | 1172 | gnttab_interface->unmap_frames(); |
@@ -1171,9 +1178,10 @@ static int gnttab_expand(unsigned int req_entries) | |||
1171 | int rc; | 1178 | int rc; |
1172 | unsigned int cur, extra; | 1179 | unsigned int cur, extra; |
1173 | 1180 | ||
1181 | BUG_ON(grefs_per_grant_frame == 0); | ||
1174 | cur = nr_grant_frames; | 1182 | cur = nr_grant_frames; |
1175 | extra = ((req_entries + (GREFS_PER_GRANT_FRAME-1)) / | 1183 | extra = ((req_entries + (grefs_per_grant_frame-1)) / |
1176 | GREFS_PER_GRANT_FRAME); | 1184 | grefs_per_grant_frame); |
1177 | if (cur + extra > gnttab_max_grant_frames()) | 1185 | if (cur + extra > gnttab_max_grant_frames()) |
1178 | return -ENOSPC; | 1186 | return -ENOSPC; |
1179 | 1187 | ||
@@ -1191,21 +1199,23 @@ int gnttab_init(void) | |||
1191 | unsigned int nr_init_grefs; | 1199 | unsigned int nr_init_grefs; |
1192 | int ret; | 1200 | int ret; |
1193 | 1201 | ||
1202 | gnttab_request_version(); | ||
1194 | nr_grant_frames = 1; | 1203 | nr_grant_frames = 1; |
1195 | boot_max_nr_grant_frames = __max_nr_grant_frames(); | 1204 | boot_max_nr_grant_frames = __max_nr_grant_frames(); |
1196 | 1205 | ||
1197 | /* Determine the maximum number of frames required for the | 1206 | /* Determine the maximum number of frames required for the |
1198 | * grant reference free list on the current hypervisor. | 1207 | * grant reference free list on the current hypervisor. |
1199 | */ | 1208 | */ |
1209 | BUG_ON(grefs_per_grant_frame == 0); | ||
1200 | max_nr_glist_frames = (boot_max_nr_grant_frames * | 1210 | max_nr_glist_frames = (boot_max_nr_grant_frames * |
1201 | GREFS_PER_GRANT_FRAME / RPP); | 1211 | grefs_per_grant_frame / RPP); |
1202 | 1212 | ||
1203 | gnttab_list = kmalloc(max_nr_glist_frames * sizeof(grant_ref_t *), | 1213 | gnttab_list = kmalloc(max_nr_glist_frames * sizeof(grant_ref_t *), |
1204 | GFP_KERNEL); | 1214 | GFP_KERNEL); |
1205 | if (gnttab_list == NULL) | 1215 | if (gnttab_list == NULL) |
1206 | return -ENOMEM; | 1216 | return -ENOMEM; |
1207 | 1217 | ||
1208 | nr_glist_frames = (nr_grant_frames * GREFS_PER_GRANT_FRAME + RPP - 1) / RPP; | 1218 | nr_glist_frames = (nr_grant_frames * grefs_per_grant_frame + RPP - 1) / RPP; |
1209 | for (i = 0; i < nr_glist_frames; i++) { | 1219 | for (i = 0; i < nr_glist_frames; i++) { |
1210 | gnttab_list[i] = (grant_ref_t *)__get_free_page(GFP_KERNEL); | 1220 | gnttab_list[i] = (grant_ref_t *)__get_free_page(GFP_KERNEL); |
1211 | if (gnttab_list[i] == NULL) { | 1221 | if (gnttab_list[i] == NULL) { |
@@ -1214,12 +1224,12 @@ int gnttab_init(void) | |||
1214 | } | 1224 | } |
1215 | } | 1225 | } |
1216 | 1226 | ||
1217 | if (gnttab_resume() < 0) { | 1227 | if (gnttab_setup() < 0) { |
1218 | ret = -ENODEV; | 1228 | ret = -ENODEV; |
1219 | goto ini_nomem; | 1229 | goto ini_nomem; |
1220 | } | 1230 | } |
1221 | 1231 | ||
1222 | nr_init_grefs = nr_grant_frames * GREFS_PER_GRANT_FRAME; | 1232 | nr_init_grefs = nr_grant_frames * grefs_per_grant_frame; |
1223 | 1233 | ||
1224 | for (i = NR_RESERVED_ENTRIES; i < nr_init_grefs - 1; i++) | 1234 | for (i = NR_RESERVED_ENTRIES; i < nr_init_grefs - 1; i++) |
1225 | gnttab_entry(i) = i + 1; | 1235 | gnttab_entry(i) = i + 1; |
diff --git a/drivers/xen/privcmd.c b/drivers/xen/privcmd.c index 0bbbccbb1f12..ca2b00e9d558 100644 --- a/drivers/xen/privcmd.c +++ b/drivers/xen/privcmd.c | |||
@@ -199,9 +199,6 @@ static long privcmd_ioctl_mmap(void __user *udata) | |||
199 | LIST_HEAD(pagelist); | 199 | LIST_HEAD(pagelist); |
200 | struct mmap_mfn_state state; | 200 | struct mmap_mfn_state state; |
201 | 201 | ||
202 | if (!xen_initial_domain()) | ||
203 | return -EPERM; | ||
204 | |||
205 | /* We only support privcmd_ioctl_mmap_batch for auto translated. */ | 202 | /* We only support privcmd_ioctl_mmap_batch for auto translated. */ |
206 | if (xen_feature(XENFEAT_auto_translated_physmap)) | 203 | if (xen_feature(XENFEAT_auto_translated_physmap)) |
207 | return -ENOSYS; | 204 | return -ENOSYS; |
@@ -261,11 +258,12 @@ struct mmap_batch_state { | |||
261 | * -ENOENT if at least 1 -ENOENT has happened. | 258 | * -ENOENT if at least 1 -ENOENT has happened. |
262 | */ | 259 | */ |
263 | int global_error; | 260 | int global_error; |
264 | /* An array for individual errors */ | 261 | int version; |
265 | int *err; | ||
266 | 262 | ||
267 | /* User-space mfn array to store errors in the second pass for V1. */ | 263 | /* User-space mfn array to store errors in the second pass for V1. */ |
268 | xen_pfn_t __user *user_mfn; | 264 | xen_pfn_t __user *user_mfn; |
265 | /* User-space int array to store errors in the second pass for V2. */ | ||
266 | int __user *user_err; | ||
269 | }; | 267 | }; |
270 | 268 | ||
271 | /* auto translated dom0 note: if domU being created is PV, then mfn is | 269 | /* auto translated dom0 note: if domU being created is PV, then mfn is |
@@ -288,7 +286,19 @@ static int mmap_batch_fn(void *data, void *state) | |||
288 | &cur_page); | 286 | &cur_page); |
289 | 287 | ||
290 | /* Store error code for second pass. */ | 288 | /* Store error code for second pass. */ |
291 | *(st->err++) = ret; | 289 | if (st->version == 1) { |
290 | if (ret < 0) { | ||
291 | /* | ||
292 | * V1 encodes the error codes in the 32bit top nibble of the | ||
293 | * mfn (with its known limitations vis-a-vis 64 bit callers). | ||
294 | */ | ||
295 | *mfnp |= (ret == -ENOENT) ? | ||
296 | PRIVCMD_MMAPBATCH_PAGED_ERROR : | ||
297 | PRIVCMD_MMAPBATCH_MFN_ERROR; | ||
298 | } | ||
299 | } else { /* st->version == 2 */ | ||
300 | *((int *) mfnp) = ret; | ||
301 | } | ||
292 | 302 | ||
293 | /* And see if it affects the global_error. */ | 303 | /* And see if it affects the global_error. */ |
294 | if (ret < 0) { | 304 | if (ret < 0) { |
@@ -305,20 +315,25 @@ static int mmap_batch_fn(void *data, void *state) | |||
305 | return 0; | 315 | return 0; |
306 | } | 316 | } |
307 | 317 | ||
308 | static int mmap_return_errors_v1(void *data, void *state) | 318 | static int mmap_return_errors(void *data, void *state) |
309 | { | 319 | { |
310 | xen_pfn_t *mfnp = data; | ||
311 | struct mmap_batch_state *st = state; | 320 | struct mmap_batch_state *st = state; |
312 | int err = *(st->err++); | ||
313 | 321 | ||
314 | /* | 322 | if (st->version == 1) { |
315 | * V1 encodes the error codes in the 32bit top nibble of the | 323 | xen_pfn_t mfnp = *((xen_pfn_t *) data); |
316 | * mfn (with its known limitations vis-a-vis 64 bit callers). | 324 | if (mfnp & PRIVCMD_MMAPBATCH_MFN_ERROR) |
317 | */ | 325 | return __put_user(mfnp, st->user_mfn++); |
318 | *mfnp |= (err == -ENOENT) ? | 326 | else |
319 | PRIVCMD_MMAPBATCH_PAGED_ERROR : | 327 | st->user_mfn++; |
320 | PRIVCMD_MMAPBATCH_MFN_ERROR; | 328 | } else { /* st->version == 2 */ |
321 | return __put_user(*mfnp, st->user_mfn++); | 329 | int err = *((int *) data); |
330 | if (err) | ||
331 | return __put_user(err, st->user_err++); | ||
332 | else | ||
333 | st->user_err++; | ||
334 | } | ||
335 | |||
336 | return 0; | ||
322 | } | 337 | } |
323 | 338 | ||
324 | /* Allocate pfns that are then mapped with gmfns from foreign domid. Update | 339 | /* Allocate pfns that are then mapped with gmfns from foreign domid. Update |
@@ -357,12 +372,8 @@ static long privcmd_ioctl_mmap_batch(void __user *udata, int version) | |||
357 | struct vm_area_struct *vma; | 372 | struct vm_area_struct *vma; |
358 | unsigned long nr_pages; | 373 | unsigned long nr_pages; |
359 | LIST_HEAD(pagelist); | 374 | LIST_HEAD(pagelist); |
360 | int *err_array = NULL; | ||
361 | struct mmap_batch_state state; | 375 | struct mmap_batch_state state; |
362 | 376 | ||
363 | if (!xen_initial_domain()) | ||
364 | return -EPERM; | ||
365 | |||
366 | switch (version) { | 377 | switch (version) { |
367 | case 1: | 378 | case 1: |
368 | if (copy_from_user(&m, udata, sizeof(struct privcmd_mmapbatch))) | 379 | if (copy_from_user(&m, udata, sizeof(struct privcmd_mmapbatch))) |
@@ -396,10 +407,12 @@ static long privcmd_ioctl_mmap_batch(void __user *udata, int version) | |||
396 | goto out; | 407 | goto out; |
397 | } | 408 | } |
398 | 409 | ||
399 | err_array = kcalloc(m.num, sizeof(int), GFP_KERNEL); | 410 | if (version == 2) { |
400 | if (err_array == NULL) { | 411 | /* Zero error array now to only copy back actual errors. */ |
401 | ret = -ENOMEM; | 412 | if (clear_user(m.err, sizeof(int) * m.num)) { |
402 | goto out; | 413 | ret = -EFAULT; |
414 | goto out; | ||
415 | } | ||
403 | } | 416 | } |
404 | 417 | ||
405 | down_write(&mm->mmap_sem); | 418 | down_write(&mm->mmap_sem); |
@@ -427,7 +440,7 @@ static long privcmd_ioctl_mmap_batch(void __user *udata, int version) | |||
427 | state.va = m.addr; | 440 | state.va = m.addr; |
428 | state.index = 0; | 441 | state.index = 0; |
429 | state.global_error = 0; | 442 | state.global_error = 0; |
430 | state.err = err_array; | 443 | state.version = version; |
431 | 444 | ||
432 | /* mmap_batch_fn guarantees ret == 0 */ | 445 | /* mmap_batch_fn guarantees ret == 0 */ |
433 | BUG_ON(traverse_pages(m.num, sizeof(xen_pfn_t), | 446 | BUG_ON(traverse_pages(m.num, sizeof(xen_pfn_t), |
@@ -435,21 +448,14 @@ static long privcmd_ioctl_mmap_batch(void __user *udata, int version) | |||
435 | 448 | ||
436 | up_write(&mm->mmap_sem); | 449 | up_write(&mm->mmap_sem); |
437 | 450 | ||
438 | if (version == 1) { | 451 | if (state.global_error) { |
439 | if (state.global_error) { | 452 | /* Write back errors in second pass. */ |
440 | /* Write back errors in second pass. */ | 453 | state.user_mfn = (xen_pfn_t *)m.arr; |
441 | state.user_mfn = (xen_pfn_t *)m.arr; | 454 | state.user_err = m.err; |
442 | state.err = err_array; | 455 | ret = traverse_pages(m.num, sizeof(xen_pfn_t), |
443 | ret = traverse_pages(m.num, sizeof(xen_pfn_t), | 456 | &pagelist, mmap_return_errors, &state); |
444 | &pagelist, mmap_return_errors_v1, &state); | 457 | } else |
445 | } else | 458 | ret = 0; |
446 | ret = 0; | ||
447 | |||
448 | } else if (version == 2) { | ||
449 | ret = __copy_to_user(m.err, err_array, m.num * sizeof(int)); | ||
450 | if (ret) | ||
451 | ret = -EFAULT; | ||
452 | } | ||
453 | 459 | ||
454 | /* If we have not had any EFAULT-like global errors then set the global | 460 | /* If we have not had any EFAULT-like global errors then set the global |
455 | * error to -ENOENT if necessary. */ | 461 | * error to -ENOENT if necessary. */ |
@@ -457,7 +463,6 @@ static long privcmd_ioctl_mmap_batch(void __user *udata, int version) | |||
457 | ret = -ENOENT; | 463 | ret = -ENOENT; |
458 | 464 | ||
459 | out: | 465 | out: |
460 | kfree(err_array); | ||
461 | free_page_list(&pagelist); | 466 | free_page_list(&pagelist); |
462 | 467 | ||
463 | return ret; | 468 | return ret; |
diff --git a/drivers/xen/xen-pciback/pciback.h b/drivers/xen/xen-pciback/pciback.h index a7def010eba3..f72af87640e0 100644 --- a/drivers/xen/xen-pciback/pciback.h +++ b/drivers/xen/xen-pciback/pciback.h | |||
@@ -124,7 +124,7 @@ static inline int xen_pcibk_add_pci_dev(struct xen_pcibk_device *pdev, | |||
124 | static inline void xen_pcibk_release_pci_dev(struct xen_pcibk_device *pdev, | 124 | static inline void xen_pcibk_release_pci_dev(struct xen_pcibk_device *pdev, |
125 | struct pci_dev *dev) | 125 | struct pci_dev *dev) |
126 | { | 126 | { |
127 | if (xen_pcibk_backend && xen_pcibk_backend->free) | 127 | if (xen_pcibk_backend && xen_pcibk_backend->release) |
128 | return xen_pcibk_backend->release(pdev, dev); | 128 | return xen_pcibk_backend->release(pdev, dev); |
129 | } | 129 | } |
130 | 130 | ||
diff --git a/fs/Kconfig b/fs/Kconfig index cfe512fd1caf..780725a463b1 100644 --- a/fs/Kconfig +++ b/fs/Kconfig | |||
@@ -68,16 +68,6 @@ source "fs/quota/Kconfig" | |||
68 | source "fs/autofs4/Kconfig" | 68 | source "fs/autofs4/Kconfig" |
69 | source "fs/fuse/Kconfig" | 69 | source "fs/fuse/Kconfig" |
70 | 70 | ||
71 | config CUSE | ||
72 | tristate "Character device in Userspace support" | ||
73 | depends on FUSE_FS | ||
74 | help | ||
75 | This FUSE extension allows character devices to be | ||
76 | implemented in userspace. | ||
77 | |||
78 | If you want to develop or use userspace character device | ||
79 | based on CUSE, answer Y or M. | ||
80 | |||
81 | config GENERIC_ACL | 71 | config GENERIC_ACL |
82 | bool | 72 | bool |
83 | select FS_POSIX_ACL | 73 | select FS_POSIX_ACL |
diff --git a/fs/f2fs/acl.c b/fs/f2fs/acl.c index e95b94945d5f..137af4255da6 100644 --- a/fs/f2fs/acl.c +++ b/fs/f2fs/acl.c | |||
@@ -191,15 +191,14 @@ struct posix_acl *f2fs_get_acl(struct inode *inode, int type) | |||
191 | retval = f2fs_getxattr(inode, name_index, "", value, retval); | 191 | retval = f2fs_getxattr(inode, name_index, "", value, retval); |
192 | } | 192 | } |
193 | 193 | ||
194 | if (retval < 0) { | 194 | if (retval > 0) |
195 | if (retval == -ENODATA) | ||
196 | acl = NULL; | ||
197 | else | ||
198 | acl = ERR_PTR(retval); | ||
199 | } else { | ||
200 | acl = f2fs_acl_from_disk(value, retval); | 195 | acl = f2fs_acl_from_disk(value, retval); |
201 | } | 196 | else if (retval == -ENODATA) |
197 | acl = NULL; | ||
198 | else | ||
199 | acl = ERR_PTR(retval); | ||
202 | kfree(value); | 200 | kfree(value); |
201 | |||
203 | if (!IS_ERR(acl)) | 202 | if (!IS_ERR(acl)) |
204 | set_cached_acl(inode, type, acl); | 203 | set_cached_acl(inode, type, acl); |
205 | 204 | ||
diff --git a/fs/f2fs/checkpoint.c b/fs/f2fs/checkpoint.c index 6ef36c37e2be..ff3c8439af87 100644 --- a/fs/f2fs/checkpoint.c +++ b/fs/f2fs/checkpoint.c | |||
@@ -214,7 +214,6 @@ retry: | |||
214 | goto retry; | 214 | goto retry; |
215 | } | 215 | } |
216 | new->ino = ino; | 216 | new->ino = ino; |
217 | INIT_LIST_HEAD(&new->list); | ||
218 | 217 | ||
219 | /* add new_oentry into list which is sorted by inode number */ | 218 | /* add new_oentry into list which is sorted by inode number */ |
220 | if (orphan) { | 219 | if (orphan) { |
@@ -772,7 +771,7 @@ void init_orphan_info(struct f2fs_sb_info *sbi) | |||
772 | sbi->n_orphans = 0; | 771 | sbi->n_orphans = 0; |
773 | } | 772 | } |
774 | 773 | ||
775 | int create_checkpoint_caches(void) | 774 | int __init create_checkpoint_caches(void) |
776 | { | 775 | { |
777 | orphan_entry_slab = f2fs_kmem_cache_create("f2fs_orphan_entry", | 776 | orphan_entry_slab = f2fs_kmem_cache_create("f2fs_orphan_entry", |
778 | sizeof(struct orphan_inode_entry), NULL); | 777 | sizeof(struct orphan_inode_entry), NULL); |
diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c index 3aa5ce7cab83..7bd22a201125 100644 --- a/fs/f2fs/data.c +++ b/fs/f2fs/data.c | |||
@@ -547,6 +547,15 @@ redirty_out: | |||
547 | 547 | ||
548 | #define MAX_DESIRED_PAGES_WP 4096 | 548 | #define MAX_DESIRED_PAGES_WP 4096 |
549 | 549 | ||
550 | static int __f2fs_writepage(struct page *page, struct writeback_control *wbc, | ||
551 | void *data) | ||
552 | { | ||
553 | struct address_space *mapping = data; | ||
554 | int ret = mapping->a_ops->writepage(page, wbc); | ||
555 | mapping_set_error(mapping, ret); | ||
556 | return ret; | ||
557 | } | ||
558 | |||
550 | static int f2fs_write_data_pages(struct address_space *mapping, | 559 | static int f2fs_write_data_pages(struct address_space *mapping, |
551 | struct writeback_control *wbc) | 560 | struct writeback_control *wbc) |
552 | { | 561 | { |
@@ -563,7 +572,7 @@ static int f2fs_write_data_pages(struct address_space *mapping, | |||
563 | 572 | ||
564 | if (!S_ISDIR(inode->i_mode)) | 573 | if (!S_ISDIR(inode->i_mode)) |
565 | mutex_lock(&sbi->writepages); | 574 | mutex_lock(&sbi->writepages); |
566 | ret = generic_writepages(mapping, wbc); | 575 | ret = write_cache_pages(mapping, wbc, __f2fs_writepage, mapping); |
567 | if (!S_ISDIR(inode->i_mode)) | 576 | if (!S_ISDIR(inode->i_mode)) |
568 | mutex_unlock(&sbi->writepages); | 577 | mutex_unlock(&sbi->writepages); |
569 | f2fs_submit_bio(sbi, DATA, (wbc->sync_mode == WB_SYNC_ALL)); | 578 | f2fs_submit_bio(sbi, DATA, (wbc->sync_mode == WB_SYNC_ALL)); |
@@ -689,6 +698,11 @@ static int f2fs_set_data_page_dirty(struct page *page) | |||
689 | return 0; | 698 | return 0; |
690 | } | 699 | } |
691 | 700 | ||
701 | static sector_t f2fs_bmap(struct address_space *mapping, sector_t block) | ||
702 | { | ||
703 | return generic_block_bmap(mapping, block, get_data_block_ro); | ||
704 | } | ||
705 | |||
692 | const struct address_space_operations f2fs_dblock_aops = { | 706 | const struct address_space_operations f2fs_dblock_aops = { |
693 | .readpage = f2fs_read_data_page, | 707 | .readpage = f2fs_read_data_page, |
694 | .readpages = f2fs_read_data_pages, | 708 | .readpages = f2fs_read_data_pages, |
@@ -700,4 +714,5 @@ const struct address_space_operations f2fs_dblock_aops = { | |||
700 | .invalidatepage = f2fs_invalidate_data_page, | 714 | .invalidatepage = f2fs_invalidate_data_page, |
701 | .releasepage = f2fs_release_data_page, | 715 | .releasepage = f2fs_release_data_page, |
702 | .direct_IO = f2fs_direct_IO, | 716 | .direct_IO = f2fs_direct_IO, |
717 | .bmap = f2fs_bmap, | ||
703 | }; | 718 | }; |
diff --git a/fs/f2fs/debug.c b/fs/f2fs/debug.c index 0e0380a588ad..c8c37307b326 100644 --- a/fs/f2fs/debug.c +++ b/fs/f2fs/debug.c | |||
@@ -26,6 +26,7 @@ | |||
26 | 26 | ||
27 | static LIST_HEAD(f2fs_stat_list); | 27 | static LIST_HEAD(f2fs_stat_list); |
28 | static struct dentry *debugfs_root; | 28 | static struct dentry *debugfs_root; |
29 | static DEFINE_MUTEX(f2fs_stat_mutex); | ||
29 | 30 | ||
30 | static void update_general_status(struct f2fs_sb_info *sbi) | 31 | static void update_general_status(struct f2fs_sb_info *sbi) |
31 | { | 32 | { |
@@ -180,18 +181,14 @@ static int stat_show(struct seq_file *s, void *v) | |||
180 | int i = 0; | 181 | int i = 0; |
181 | int j; | 182 | int j; |
182 | 183 | ||
184 | mutex_lock(&f2fs_stat_mutex); | ||
183 | list_for_each_entry_safe(si, next, &f2fs_stat_list, stat_list) { | 185 | list_for_each_entry_safe(si, next, &f2fs_stat_list, stat_list) { |
184 | 186 | ||
185 | mutex_lock(&si->stat_lock); | ||
186 | if (!si->sbi) { | ||
187 | mutex_unlock(&si->stat_lock); | ||
188 | continue; | ||
189 | } | ||
190 | update_general_status(si->sbi); | 187 | update_general_status(si->sbi); |
191 | 188 | ||
192 | seq_printf(s, "\n=====[ partition info. #%d ]=====\n", i++); | 189 | seq_printf(s, "\n=====[ partition info. #%d ]=====\n", i++); |
193 | seq_printf(s, "[SB: 1] [CP: 2] [NAT: %d] [SIT: %d] ", | 190 | seq_printf(s, "[SB: 1] [CP: 2] [SIT: %d] [NAT: %d] ", |
194 | si->nat_area_segs, si->sit_area_segs); | 191 | si->sit_area_segs, si->nat_area_segs); |
195 | seq_printf(s, "[SSA: %d] [MAIN: %d", | 192 | seq_printf(s, "[SSA: %d] [MAIN: %d", |
196 | si->ssa_area_segs, si->main_area_segs); | 193 | si->ssa_area_segs, si->main_area_segs); |
197 | seq_printf(s, "(OverProv:%d Resv:%d)]\n\n", | 194 | seq_printf(s, "(OverProv:%d Resv:%d)]\n\n", |
@@ -286,8 +283,8 @@ static int stat_show(struct seq_file *s, void *v) | |||
286 | seq_printf(s, "\nMemory: %u KB = static: %u + cached: %u\n", | 283 | seq_printf(s, "\nMemory: %u KB = static: %u + cached: %u\n", |
287 | (si->base_mem + si->cache_mem) >> 10, | 284 | (si->base_mem + si->cache_mem) >> 10, |
288 | si->base_mem >> 10, si->cache_mem >> 10); | 285 | si->base_mem >> 10, si->cache_mem >> 10); |
289 | mutex_unlock(&si->stat_lock); | ||
290 | } | 286 | } |
287 | mutex_unlock(&f2fs_stat_mutex); | ||
291 | return 0; | 288 | return 0; |
292 | } | 289 | } |
293 | 290 | ||
@@ -303,7 +300,7 @@ static const struct file_operations stat_fops = { | |||
303 | .release = single_release, | 300 | .release = single_release, |
304 | }; | 301 | }; |
305 | 302 | ||
306 | static int init_stats(struct f2fs_sb_info *sbi) | 303 | int f2fs_build_stats(struct f2fs_sb_info *sbi) |
307 | { | 304 | { |
308 | struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi); | 305 | struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi); |
309 | struct f2fs_stat_info *si; | 306 | struct f2fs_stat_info *si; |
@@ -313,9 +310,6 @@ static int init_stats(struct f2fs_sb_info *sbi) | |||
313 | return -ENOMEM; | 310 | return -ENOMEM; |
314 | 311 | ||
315 | si = sbi->stat_info; | 312 | si = sbi->stat_info; |
316 | mutex_init(&si->stat_lock); | ||
317 | list_add_tail(&si->stat_list, &f2fs_stat_list); | ||
318 | |||
319 | si->all_area_segs = le32_to_cpu(raw_super->segment_count); | 313 | si->all_area_segs = le32_to_cpu(raw_super->segment_count); |
320 | si->sit_area_segs = le32_to_cpu(raw_super->segment_count_sit); | 314 | si->sit_area_segs = le32_to_cpu(raw_super->segment_count_sit); |
321 | si->nat_area_segs = le32_to_cpu(raw_super->segment_count_nat); | 315 | si->nat_area_segs = le32_to_cpu(raw_super->segment_count_nat); |
@@ -325,21 +319,11 @@ static int init_stats(struct f2fs_sb_info *sbi) | |||
325 | si->main_area_zones = si->main_area_sections / | 319 | si->main_area_zones = si->main_area_sections / |
326 | le32_to_cpu(raw_super->secs_per_zone); | 320 | le32_to_cpu(raw_super->secs_per_zone); |
327 | si->sbi = sbi; | 321 | si->sbi = sbi; |
328 | return 0; | ||
329 | } | ||
330 | 322 | ||
331 | int f2fs_build_stats(struct f2fs_sb_info *sbi) | 323 | mutex_lock(&f2fs_stat_mutex); |
332 | { | 324 | list_add_tail(&si->stat_list, &f2fs_stat_list); |
333 | int retval; | 325 | mutex_unlock(&f2fs_stat_mutex); |
334 | |||
335 | retval = init_stats(sbi); | ||
336 | if (retval) | ||
337 | return retval; | ||
338 | |||
339 | if (!debugfs_root) | ||
340 | debugfs_root = debugfs_create_dir("f2fs", NULL); | ||
341 | 326 | ||
342 | debugfs_create_file("status", S_IRUGO, debugfs_root, NULL, &stat_fops); | ||
343 | return 0; | 327 | return 0; |
344 | } | 328 | } |
345 | 329 | ||
@@ -347,14 +331,22 @@ void f2fs_destroy_stats(struct f2fs_sb_info *sbi) | |||
347 | { | 331 | { |
348 | struct f2fs_stat_info *si = sbi->stat_info; | 332 | struct f2fs_stat_info *si = sbi->stat_info; |
349 | 333 | ||
334 | mutex_lock(&f2fs_stat_mutex); | ||
350 | list_del(&si->stat_list); | 335 | list_del(&si->stat_list); |
351 | mutex_lock(&si->stat_lock); | 336 | mutex_unlock(&f2fs_stat_mutex); |
352 | si->sbi = NULL; | 337 | |
353 | mutex_unlock(&si->stat_lock); | ||
354 | kfree(sbi->stat_info); | 338 | kfree(sbi->stat_info); |
355 | } | 339 | } |
356 | 340 | ||
357 | void destroy_root_stats(void) | 341 | void __init f2fs_create_root_stats(void) |
342 | { | ||
343 | debugfs_root = debugfs_create_dir("f2fs", NULL); | ||
344 | if (debugfs_root) | ||
345 | debugfs_create_file("status", S_IRUGO, debugfs_root, | ||
346 | NULL, &stat_fops); | ||
347 | } | ||
348 | |||
349 | void f2fs_destroy_root_stats(void) | ||
358 | { | 350 | { |
359 | debugfs_remove_recursive(debugfs_root); | 351 | debugfs_remove_recursive(debugfs_root); |
360 | debugfs_root = NULL; | 352 | debugfs_root = NULL; |
diff --git a/fs/f2fs/dir.c b/fs/f2fs/dir.c index 951ed52748f6..989980e16d0b 100644 --- a/fs/f2fs/dir.c +++ b/fs/f2fs/dir.c | |||
@@ -503,7 +503,7 @@ void f2fs_delete_entry(struct f2fs_dir_entry *dentry, struct page *page, | |||
503 | } | 503 | } |
504 | 504 | ||
505 | if (inode) { | 505 | if (inode) { |
506 | inode->i_ctime = dir->i_ctime = dir->i_mtime = CURRENT_TIME; | 506 | inode->i_ctime = CURRENT_TIME; |
507 | drop_nlink(inode); | 507 | drop_nlink(inode); |
508 | if (S_ISDIR(inode->i_mode)) { | 508 | if (S_ISDIR(inode->i_mode)) { |
509 | drop_nlink(inode); | 509 | drop_nlink(inode); |
diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h index 13c6dfbb7183..c8e2d751ef9c 100644 --- a/fs/f2fs/f2fs.h +++ b/fs/f2fs/f2fs.h | |||
@@ -211,11 +211,11 @@ struct dnode_of_data { | |||
211 | static inline void set_new_dnode(struct dnode_of_data *dn, struct inode *inode, | 211 | static inline void set_new_dnode(struct dnode_of_data *dn, struct inode *inode, |
212 | struct page *ipage, struct page *npage, nid_t nid) | 212 | struct page *ipage, struct page *npage, nid_t nid) |
213 | { | 213 | { |
214 | memset(dn, 0, sizeof(*dn)); | ||
214 | dn->inode = inode; | 215 | dn->inode = inode; |
215 | dn->inode_page = ipage; | 216 | dn->inode_page = ipage; |
216 | dn->node_page = npage; | 217 | dn->node_page = npage; |
217 | dn->nid = nid; | 218 | dn->nid = nid; |
218 | dn->inode_page_locked = 0; | ||
219 | } | 219 | } |
220 | 220 | ||
221 | /* | 221 | /* |
@@ -877,6 +877,8 @@ bool f2fs_empty_dir(struct inode *); | |||
877 | * super.c | 877 | * super.c |
878 | */ | 878 | */ |
879 | int f2fs_sync_fs(struct super_block *, int); | 879 | int f2fs_sync_fs(struct super_block *, int); |
880 | extern __printf(3, 4) | ||
881 | void f2fs_msg(struct super_block *, const char *, const char *, ...); | ||
880 | 882 | ||
881 | /* | 883 | /* |
882 | * hash.c | 884 | * hash.c |
@@ -912,7 +914,7 @@ int restore_node_summary(struct f2fs_sb_info *, unsigned int, | |||
912 | void flush_nat_entries(struct f2fs_sb_info *); | 914 | void flush_nat_entries(struct f2fs_sb_info *); |
913 | int build_node_manager(struct f2fs_sb_info *); | 915 | int build_node_manager(struct f2fs_sb_info *); |
914 | void destroy_node_manager(struct f2fs_sb_info *); | 916 | void destroy_node_manager(struct f2fs_sb_info *); |
915 | int create_node_manager_caches(void); | 917 | int __init create_node_manager_caches(void); |
916 | void destroy_node_manager_caches(void); | 918 | void destroy_node_manager_caches(void); |
917 | 919 | ||
918 | /* | 920 | /* |
@@ -964,7 +966,7 @@ void sync_dirty_dir_inodes(struct f2fs_sb_info *); | |||
964 | void block_operations(struct f2fs_sb_info *); | 966 | void block_operations(struct f2fs_sb_info *); |
965 | void write_checkpoint(struct f2fs_sb_info *, bool, bool); | 967 | void write_checkpoint(struct f2fs_sb_info *, bool, bool); |
966 | void init_orphan_info(struct f2fs_sb_info *); | 968 | void init_orphan_info(struct f2fs_sb_info *); |
967 | int create_checkpoint_caches(void); | 969 | int __init create_checkpoint_caches(void); |
968 | void destroy_checkpoint_caches(void); | 970 | void destroy_checkpoint_caches(void); |
969 | 971 | ||
970 | /* | 972 | /* |
@@ -984,9 +986,9 @@ int do_write_data_page(struct page *); | |||
984 | int start_gc_thread(struct f2fs_sb_info *); | 986 | int start_gc_thread(struct f2fs_sb_info *); |
985 | void stop_gc_thread(struct f2fs_sb_info *); | 987 | void stop_gc_thread(struct f2fs_sb_info *); |
986 | block_t start_bidx_of_node(unsigned int); | 988 | block_t start_bidx_of_node(unsigned int); |
987 | int f2fs_gc(struct f2fs_sb_info *, int); | 989 | int f2fs_gc(struct f2fs_sb_info *); |
988 | void build_gc_manager(struct f2fs_sb_info *); | 990 | void build_gc_manager(struct f2fs_sb_info *); |
989 | int create_gc_caches(void); | 991 | int __init create_gc_caches(void); |
990 | void destroy_gc_caches(void); | 992 | void destroy_gc_caches(void); |
991 | 993 | ||
992 | /* | 994 | /* |
@@ -1058,7 +1060,8 @@ struct f2fs_stat_info { | |||
1058 | 1060 | ||
1059 | int f2fs_build_stats(struct f2fs_sb_info *); | 1061 | int f2fs_build_stats(struct f2fs_sb_info *); |
1060 | void f2fs_destroy_stats(struct f2fs_sb_info *); | 1062 | void f2fs_destroy_stats(struct f2fs_sb_info *); |
1061 | void destroy_root_stats(void); | 1063 | void __init f2fs_create_root_stats(void); |
1064 | void f2fs_destroy_root_stats(void); | ||
1062 | #else | 1065 | #else |
1063 | #define stat_inc_call_count(si) | 1066 | #define stat_inc_call_count(si) |
1064 | #define stat_inc_seg_count(si, type) | 1067 | #define stat_inc_seg_count(si, type) |
@@ -1068,7 +1071,8 @@ void destroy_root_stats(void); | |||
1068 | 1071 | ||
1069 | static inline int f2fs_build_stats(struct f2fs_sb_info *sbi) { return 0; } | 1072 | static inline int f2fs_build_stats(struct f2fs_sb_info *sbi) { return 0; } |
1070 | static inline void f2fs_destroy_stats(struct f2fs_sb_info *sbi) { } | 1073 | static inline void f2fs_destroy_stats(struct f2fs_sb_info *sbi) { } |
1071 | static inline void destroy_root_stats(void) { } | 1074 | static inline void __init f2fs_create_root_stats(void) { } |
1075 | static inline void f2fs_destroy_root_stats(void) { } | ||
1072 | #endif | 1076 | #endif |
1073 | 1077 | ||
1074 | extern const struct file_operations f2fs_dir_operations; | 1078 | extern const struct file_operations f2fs_dir_operations; |
diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c index 7f9ea9271ebe..3191b52aafb0 100644 --- a/fs/f2fs/file.c +++ b/fs/f2fs/file.c | |||
@@ -96,8 +96,9 @@ out: | |||
96 | } | 96 | } |
97 | 97 | ||
98 | static const struct vm_operations_struct f2fs_file_vm_ops = { | 98 | static const struct vm_operations_struct f2fs_file_vm_ops = { |
99 | .fault = filemap_fault, | 99 | .fault = filemap_fault, |
100 | .page_mkwrite = f2fs_vm_page_mkwrite, | 100 | .page_mkwrite = f2fs_vm_page_mkwrite, |
101 | .remap_pages = generic_file_remap_pages, | ||
101 | }; | 102 | }; |
102 | 103 | ||
103 | static int need_to_sync_dir(struct f2fs_sb_info *sbi, struct inode *inode) | 104 | static int need_to_sync_dir(struct f2fs_sb_info *sbi, struct inode *inode) |
@@ -137,6 +138,9 @@ int f2fs_sync_file(struct file *file, loff_t start, loff_t end, int datasync) | |||
137 | if (ret) | 138 | if (ret) |
138 | return ret; | 139 | return ret; |
139 | 140 | ||
141 | /* guarantee free sections for fsync */ | ||
142 | f2fs_balance_fs(sbi); | ||
143 | |||
140 | mutex_lock(&inode->i_mutex); | 144 | mutex_lock(&inode->i_mutex); |
141 | 145 | ||
142 | if (datasync && !(inode->i_state & I_DIRTY_DATASYNC)) | 146 | if (datasync && !(inode->i_state & I_DIRTY_DATASYNC)) |
@@ -407,6 +411,8 @@ int truncate_hole(struct inode *inode, pgoff_t pg_start, pgoff_t pg_end) | |||
407 | struct dnode_of_data dn; | 411 | struct dnode_of_data dn; |
408 | struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb); | 412 | struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb); |
409 | 413 | ||
414 | f2fs_balance_fs(sbi); | ||
415 | |||
410 | mutex_lock_op(sbi, DATA_TRUNC); | 416 | mutex_lock_op(sbi, DATA_TRUNC); |
411 | set_new_dnode(&dn, inode, NULL, NULL, 0); | 417 | set_new_dnode(&dn, inode, NULL, NULL, 0); |
412 | err = get_dnode_of_data(&dn, index, RDONLY_NODE); | 418 | err = get_dnode_of_data(&dn, index, RDONLY_NODE); |
@@ -534,7 +540,6 @@ static long f2fs_fallocate(struct file *file, int mode, | |||
534 | loff_t offset, loff_t len) | 540 | loff_t offset, loff_t len) |
535 | { | 541 | { |
536 | struct inode *inode = file->f_path.dentry->d_inode; | 542 | struct inode *inode = file->f_path.dentry->d_inode; |
537 | struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb); | ||
538 | long ret; | 543 | long ret; |
539 | 544 | ||
540 | if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE)) | 545 | if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE)) |
@@ -545,7 +550,10 @@ static long f2fs_fallocate(struct file *file, int mode, | |||
545 | else | 550 | else |
546 | ret = expand_inode_data(inode, offset, len, mode); | 551 | ret = expand_inode_data(inode, offset, len, mode); |
547 | 552 | ||
548 | f2fs_balance_fs(sbi); | 553 | if (!ret) { |
554 | inode->i_mtime = inode->i_ctime = CURRENT_TIME; | ||
555 | mark_inode_dirty(inode); | ||
556 | } | ||
549 | return ret; | 557 | return ret; |
550 | } | 558 | } |
551 | 559 | ||
diff --git a/fs/f2fs/gc.c b/fs/f2fs/gc.c index b0ec721e984a..c386910dacc5 100644 --- a/fs/f2fs/gc.c +++ b/fs/f2fs/gc.c | |||
@@ -78,7 +78,7 @@ static int gc_thread_func(void *data) | |||
78 | 78 | ||
79 | sbi->bg_gc++; | 79 | sbi->bg_gc++; |
80 | 80 | ||
81 | if (f2fs_gc(sbi, 1) == GC_NONE) | 81 | if (f2fs_gc(sbi) == GC_NONE) |
82 | wait_ms = GC_THREAD_NOGC_SLEEP_TIME; | 82 | wait_ms = GC_THREAD_NOGC_SLEEP_TIME; |
83 | else if (wait_ms == GC_THREAD_NOGC_SLEEP_TIME) | 83 | else if (wait_ms == GC_THREAD_NOGC_SLEEP_TIME) |
84 | wait_ms = GC_THREAD_MAX_SLEEP_TIME; | 84 | wait_ms = GC_THREAD_MAX_SLEEP_TIME; |
@@ -424,7 +424,11 @@ next_step: | |||
424 | } | 424 | } |
425 | 425 | ||
426 | /* | 426 | /* |
427 | * Calculate start block index that this node page contains | 427 | * Calculate start block index indicating the given node offset. |
428 | * Be careful, caller should give this node offset only indicating direct node | ||
429 | * blocks. If any node offsets, which point the other types of node blocks such | ||
430 | * as indirect or double indirect node blocks, are given, it must be a caller's | ||
431 | * bug. | ||
428 | */ | 432 | */ |
429 | block_t start_bidx_of_node(unsigned int node_ofs) | 433 | block_t start_bidx_of_node(unsigned int node_ofs) |
430 | { | 434 | { |
@@ -651,62 +655,44 @@ static int do_garbage_collect(struct f2fs_sb_info *sbi, unsigned int segno, | |||
651 | return ret; | 655 | return ret; |
652 | } | 656 | } |
653 | 657 | ||
654 | int f2fs_gc(struct f2fs_sb_info *sbi, int nGC) | 658 | int f2fs_gc(struct f2fs_sb_info *sbi) |
655 | { | 659 | { |
656 | unsigned int segno; | ||
657 | int old_free_secs, cur_free_secs; | ||
658 | int gc_status, nfree; | ||
659 | struct list_head ilist; | 660 | struct list_head ilist; |
661 | unsigned int segno, i; | ||
660 | int gc_type = BG_GC; | 662 | int gc_type = BG_GC; |
663 | int gc_status = GC_NONE; | ||
661 | 664 | ||
662 | INIT_LIST_HEAD(&ilist); | 665 | INIT_LIST_HEAD(&ilist); |
663 | gc_more: | 666 | gc_more: |
664 | nfree = 0; | 667 | if (!(sbi->sb->s_flags & MS_ACTIVE)) |
665 | gc_status = GC_NONE; | 668 | goto stop; |
666 | 669 | ||
667 | if (has_not_enough_free_secs(sbi)) | 670 | if (has_not_enough_free_secs(sbi)) |
668 | old_free_secs = reserved_sections(sbi); | 671 | gc_type = FG_GC; |
669 | else | ||
670 | old_free_secs = free_sections(sbi); | ||
671 | |||
672 | while (sbi->sb->s_flags & MS_ACTIVE) { | ||
673 | int i; | ||
674 | if (has_not_enough_free_secs(sbi)) | ||
675 | gc_type = FG_GC; | ||
676 | 672 | ||
677 | cur_free_secs = free_sections(sbi) + nfree; | 673 | if (!__get_victim(sbi, &segno, gc_type, NO_CHECK_TYPE)) |
674 | goto stop; | ||
678 | 675 | ||
679 | /* We got free space successfully. */ | 676 | for (i = 0; i < sbi->segs_per_sec; i++) { |
680 | if (nGC < cur_free_secs - old_free_secs) | 677 | /* |
681 | break; | 678 | * do_garbage_collect will give us three gc_status: |
682 | 679 | * GC_ERROR, GC_DONE, and GC_BLOCKED. | |
683 | if (!__get_victim(sbi, &segno, gc_type, NO_CHECK_TYPE)) | 680 | * If GC is finished uncleanly, we have to return |
681 | * the victim to dirty segment list. | ||
682 | */ | ||
683 | gc_status = do_garbage_collect(sbi, segno + i, &ilist, gc_type); | ||
684 | if (gc_status != GC_DONE) | ||
684 | break; | 685 | break; |
685 | |||
686 | for (i = 0; i < sbi->segs_per_sec; i++) { | ||
687 | /* | ||
688 | * do_garbage_collect will give us three gc_status: | ||
689 | * GC_ERROR, GC_DONE, and GC_BLOCKED. | ||
690 | * If GC is finished uncleanly, we have to return | ||
691 | * the victim to dirty segment list. | ||
692 | */ | ||
693 | gc_status = do_garbage_collect(sbi, segno + i, | ||
694 | &ilist, gc_type); | ||
695 | if (gc_status != GC_DONE) | ||
696 | goto stop; | ||
697 | nfree++; | ||
698 | } | ||
699 | } | 686 | } |
700 | stop: | 687 | if (has_not_enough_free_secs(sbi)) { |
701 | if (has_not_enough_free_secs(sbi) || gc_status == GC_BLOCKED) { | ||
702 | write_checkpoint(sbi, (gc_status == GC_BLOCKED), false); | 688 | write_checkpoint(sbi, (gc_status == GC_BLOCKED), false); |
703 | if (nfree) | 689 | if (has_not_enough_free_secs(sbi)) |
704 | goto gc_more; | 690 | goto gc_more; |
705 | } | 691 | } |
692 | stop: | ||
706 | mutex_unlock(&sbi->gc_mutex); | 693 | mutex_unlock(&sbi->gc_mutex); |
707 | 694 | ||
708 | put_gc_inode(&ilist); | 695 | put_gc_inode(&ilist); |
709 | BUG_ON(!list_empty(&ilist)); | ||
710 | return gc_status; | 696 | return gc_status; |
711 | } | 697 | } |
712 | 698 | ||
@@ -715,7 +701,7 @@ void build_gc_manager(struct f2fs_sb_info *sbi) | |||
715 | DIRTY_I(sbi)->v_ops = &default_v_ops; | 701 | DIRTY_I(sbi)->v_ops = &default_v_ops; |
716 | } | 702 | } |
717 | 703 | ||
718 | int create_gc_caches(void) | 704 | int __init create_gc_caches(void) |
719 | { | 705 | { |
720 | winode_slab = f2fs_kmem_cache_create("f2fs_gc_inodes", | 706 | winode_slab = f2fs_kmem_cache_create("f2fs_gc_inodes", |
721 | sizeof(struct inode_entry), NULL); | 707 | sizeof(struct inode_entry), NULL); |
diff --git a/fs/f2fs/inode.c b/fs/f2fs/inode.c index bf20b4d03214..794241777322 100644 --- a/fs/f2fs/inode.c +++ b/fs/f2fs/inode.c | |||
@@ -217,6 +217,9 @@ int f2fs_write_inode(struct inode *inode, struct writeback_control *wbc) | |||
217 | inode->i_ino == F2FS_META_INO(sbi)) | 217 | inode->i_ino == F2FS_META_INO(sbi)) |
218 | return 0; | 218 | return 0; |
219 | 219 | ||
220 | if (wbc) | ||
221 | f2fs_balance_fs(sbi); | ||
222 | |||
220 | node_page = get_node_page(sbi, inode->i_ino); | 223 | node_page = get_node_page(sbi, inode->i_ino); |
221 | if (IS_ERR(node_page)) | 224 | if (IS_ERR(node_page)) |
222 | return PTR_ERR(node_page); | 225 | return PTR_ERR(node_page); |
diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c index 5066bfd256c9..9bda63c9c166 100644 --- a/fs/f2fs/node.c +++ b/fs/f2fs/node.c | |||
@@ -1124,6 +1124,12 @@ static int f2fs_write_node_page(struct page *page, | |||
1124 | return 0; | 1124 | return 0; |
1125 | } | 1125 | } |
1126 | 1126 | ||
1127 | /* | ||
1128 | * It is very important to gather dirty pages and write at once, so that we can | ||
1129 | * submit a big bio without interfering other data writes. | ||
1130 | * Be default, 512 pages (2MB), a segment size, is quite reasonable. | ||
1131 | */ | ||
1132 | #define COLLECT_DIRTY_NODES 512 | ||
1127 | static int f2fs_write_node_pages(struct address_space *mapping, | 1133 | static int f2fs_write_node_pages(struct address_space *mapping, |
1128 | struct writeback_control *wbc) | 1134 | struct writeback_control *wbc) |
1129 | { | 1135 | { |
@@ -1131,17 +1137,16 @@ static int f2fs_write_node_pages(struct address_space *mapping, | |||
1131 | struct block_device *bdev = sbi->sb->s_bdev; | 1137 | struct block_device *bdev = sbi->sb->s_bdev; |
1132 | long nr_to_write = wbc->nr_to_write; | 1138 | long nr_to_write = wbc->nr_to_write; |
1133 | 1139 | ||
1134 | if (wbc->for_kupdate) | 1140 | /* First check balancing cached NAT entries */ |
1135 | return 0; | ||
1136 | |||
1137 | if (get_pages(sbi, F2FS_DIRTY_NODES) == 0) | ||
1138 | return 0; | ||
1139 | |||
1140 | if (try_to_free_nats(sbi, NAT_ENTRY_PER_BLOCK)) { | 1141 | if (try_to_free_nats(sbi, NAT_ENTRY_PER_BLOCK)) { |
1141 | write_checkpoint(sbi, false, false); | 1142 | write_checkpoint(sbi, false, false); |
1142 | return 0; | 1143 | return 0; |
1143 | } | 1144 | } |
1144 | 1145 | ||
1146 | /* collect a number of dirty node pages and write together */ | ||
1147 | if (get_pages(sbi, F2FS_DIRTY_NODES) < COLLECT_DIRTY_NODES) | ||
1148 | return 0; | ||
1149 | |||
1145 | /* if mounting is failed, skip writing node pages */ | 1150 | /* if mounting is failed, skip writing node pages */ |
1146 | wbc->nr_to_write = bio_get_nr_vecs(bdev); | 1151 | wbc->nr_to_write = bio_get_nr_vecs(bdev); |
1147 | sync_node_pages(sbi, 0, wbc); | 1152 | sync_node_pages(sbi, 0, wbc); |
@@ -1732,7 +1737,7 @@ void destroy_node_manager(struct f2fs_sb_info *sbi) | |||
1732 | kfree(nm_i); | 1737 | kfree(nm_i); |
1733 | } | 1738 | } |
1734 | 1739 | ||
1735 | int create_node_manager_caches(void) | 1740 | int __init create_node_manager_caches(void) |
1736 | { | 1741 | { |
1737 | nat_entry_slab = f2fs_kmem_cache_create("nat_entry", | 1742 | nat_entry_slab = f2fs_kmem_cache_create("nat_entry", |
1738 | sizeof(struct nat_entry), NULL); | 1743 | sizeof(struct nat_entry), NULL); |
diff --git a/fs/f2fs/recovery.c b/fs/f2fs/recovery.c index b571fee677d5..f42e4060b399 100644 --- a/fs/f2fs/recovery.c +++ b/fs/f2fs/recovery.c | |||
@@ -67,7 +67,7 @@ static int recover_dentry(struct page *ipage, struct inode *inode) | |||
67 | kunmap(page); | 67 | kunmap(page); |
68 | f2fs_put_page(page, 0); | 68 | f2fs_put_page(page, 0); |
69 | } else { | 69 | } else { |
70 | f2fs_add_link(&dent, inode); | 70 | err = f2fs_add_link(&dent, inode); |
71 | } | 71 | } |
72 | iput(dir); | 72 | iput(dir); |
73 | out: | 73 | out: |
@@ -151,7 +151,6 @@ static int find_fsync_dnodes(struct f2fs_sb_info *sbi, struct list_head *head) | |||
151 | goto out; | 151 | goto out; |
152 | } | 152 | } |
153 | 153 | ||
154 | INIT_LIST_HEAD(&entry->list); | ||
155 | list_add_tail(&entry->list, head); | 154 | list_add_tail(&entry->list, head); |
156 | entry->blkaddr = blkaddr; | 155 | entry->blkaddr = blkaddr; |
157 | } | 156 | } |
@@ -174,10 +173,9 @@ out: | |||
174 | static void destroy_fsync_dnodes(struct f2fs_sb_info *sbi, | 173 | static void destroy_fsync_dnodes(struct f2fs_sb_info *sbi, |
175 | struct list_head *head) | 174 | struct list_head *head) |
176 | { | 175 | { |
177 | struct list_head *this; | 176 | struct fsync_inode_entry *entry, *tmp; |
178 | struct fsync_inode_entry *entry; | 177 | |
179 | list_for_each(this, head) { | 178 | list_for_each_entry_safe(entry, tmp, head, list) { |
180 | entry = list_entry(this, struct fsync_inode_entry, list); | ||
181 | iput(entry->inode); | 179 | iput(entry->inode); |
182 | list_del(&entry->list); | 180 | list_del(&entry->list); |
183 | kmem_cache_free(fsync_entry_slab, entry); | 181 | kmem_cache_free(fsync_entry_slab, entry); |
diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c index de6240922b0a..4b0099066582 100644 --- a/fs/f2fs/segment.c +++ b/fs/f2fs/segment.c | |||
@@ -31,7 +31,7 @@ void f2fs_balance_fs(struct f2fs_sb_info *sbi) | |||
31 | */ | 31 | */ |
32 | if (has_not_enough_free_secs(sbi)) { | 32 | if (has_not_enough_free_secs(sbi)) { |
33 | mutex_lock(&sbi->gc_mutex); | 33 | mutex_lock(&sbi->gc_mutex); |
34 | f2fs_gc(sbi, 1); | 34 | f2fs_gc(sbi); |
35 | } | 35 | } |
36 | } | 36 | } |
37 | 37 | ||
diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c index 08a94c814bdc..37fad04c8669 100644 --- a/fs/f2fs/super.c +++ b/fs/f2fs/super.c | |||
@@ -53,6 +53,18 @@ static match_table_t f2fs_tokens = { | |||
53 | {Opt_err, NULL}, | 53 | {Opt_err, NULL}, |
54 | }; | 54 | }; |
55 | 55 | ||
56 | void f2fs_msg(struct super_block *sb, const char *level, const char *fmt, ...) | ||
57 | { | ||
58 | struct va_format vaf; | ||
59 | va_list args; | ||
60 | |||
61 | va_start(args, fmt); | ||
62 | vaf.fmt = fmt; | ||
63 | vaf.va = &args; | ||
64 | printk("%sF2FS-fs (%s): %pV\n", level, sb->s_id, &vaf); | ||
65 | va_end(args); | ||
66 | } | ||
67 | |||
56 | static void init_once(void *foo) | 68 | static void init_once(void *foo) |
57 | { | 69 | { |
58 | struct f2fs_inode_info *fi = (struct f2fs_inode_info *) foo; | 70 | struct f2fs_inode_info *fi = (struct f2fs_inode_info *) foo; |
@@ -125,6 +137,8 @@ int f2fs_sync_fs(struct super_block *sb, int sync) | |||
125 | 137 | ||
126 | if (sync) | 138 | if (sync) |
127 | write_checkpoint(sbi, false, false); | 139 | write_checkpoint(sbi, false, false); |
140 | else | ||
141 | f2fs_balance_fs(sbi); | ||
128 | 142 | ||
129 | return 0; | 143 | return 0; |
130 | } | 144 | } |
@@ -247,7 +261,8 @@ static const struct export_operations f2fs_export_ops = { | |||
247 | .get_parent = f2fs_get_parent, | 261 | .get_parent = f2fs_get_parent, |
248 | }; | 262 | }; |
249 | 263 | ||
250 | static int parse_options(struct f2fs_sb_info *sbi, char *options) | 264 | static int parse_options(struct super_block *sb, struct f2fs_sb_info *sbi, |
265 | char *options) | ||
251 | { | 266 | { |
252 | substring_t args[MAX_OPT_ARGS]; | 267 | substring_t args[MAX_OPT_ARGS]; |
253 | char *p; | 268 | char *p; |
@@ -286,7 +301,8 @@ static int parse_options(struct f2fs_sb_info *sbi, char *options) | |||
286 | break; | 301 | break; |
287 | #else | 302 | #else |
288 | case Opt_nouser_xattr: | 303 | case Opt_nouser_xattr: |
289 | pr_info("nouser_xattr options not supported\n"); | 304 | f2fs_msg(sb, KERN_INFO, |
305 | "nouser_xattr options not supported"); | ||
290 | break; | 306 | break; |
291 | #endif | 307 | #endif |
292 | #ifdef CONFIG_F2FS_FS_POSIX_ACL | 308 | #ifdef CONFIG_F2FS_FS_POSIX_ACL |
@@ -295,7 +311,7 @@ static int parse_options(struct f2fs_sb_info *sbi, char *options) | |||
295 | break; | 311 | break; |
296 | #else | 312 | #else |
297 | case Opt_noacl: | 313 | case Opt_noacl: |
298 | pr_info("noacl options not supported\n"); | 314 | f2fs_msg(sb, KERN_INFO, "noacl options not supported"); |
299 | break; | 315 | break; |
300 | #endif | 316 | #endif |
301 | case Opt_active_logs: | 317 | case Opt_active_logs: |
@@ -309,8 +325,9 @@ static int parse_options(struct f2fs_sb_info *sbi, char *options) | |||
309 | set_opt(sbi, DISABLE_EXT_IDENTIFY); | 325 | set_opt(sbi, DISABLE_EXT_IDENTIFY); |
310 | break; | 326 | break; |
311 | default: | 327 | default: |
312 | pr_err("Unrecognized mount option \"%s\" or missing value\n", | 328 | f2fs_msg(sb, KERN_ERR, |
313 | p); | 329 | "Unrecognized mount option \"%s\" or missing value", |
330 | p); | ||
314 | return -EINVAL; | 331 | return -EINVAL; |
315 | } | 332 | } |
316 | } | 333 | } |
@@ -337,23 +354,36 @@ static loff_t max_file_size(unsigned bits) | |||
337 | return result; | 354 | return result; |
338 | } | 355 | } |
339 | 356 | ||
340 | static int sanity_check_raw_super(struct f2fs_super_block *raw_super) | 357 | static int sanity_check_raw_super(struct super_block *sb, |
358 | struct f2fs_super_block *raw_super) | ||
341 | { | 359 | { |
342 | unsigned int blocksize; | 360 | unsigned int blocksize; |
343 | 361 | ||
344 | if (F2FS_SUPER_MAGIC != le32_to_cpu(raw_super->magic)) | 362 | if (F2FS_SUPER_MAGIC != le32_to_cpu(raw_super->magic)) { |
363 | f2fs_msg(sb, KERN_INFO, | ||
364 | "Magic Mismatch, valid(0x%x) - read(0x%x)", | ||
365 | F2FS_SUPER_MAGIC, le32_to_cpu(raw_super->magic)); | ||
345 | return 1; | 366 | return 1; |
367 | } | ||
346 | 368 | ||
347 | /* Currently, support only 4KB block size */ | 369 | /* Currently, support only 4KB block size */ |
348 | blocksize = 1 << le32_to_cpu(raw_super->log_blocksize); | 370 | blocksize = 1 << le32_to_cpu(raw_super->log_blocksize); |
349 | if (blocksize != PAGE_CACHE_SIZE) | 371 | if (blocksize != PAGE_CACHE_SIZE) { |
372 | f2fs_msg(sb, KERN_INFO, | ||
373 | "Invalid blocksize (%u), supports only 4KB\n", | ||
374 | blocksize); | ||
350 | return 1; | 375 | return 1; |
376 | } | ||
351 | if (le32_to_cpu(raw_super->log_sectorsize) != | 377 | if (le32_to_cpu(raw_super->log_sectorsize) != |
352 | F2FS_LOG_SECTOR_SIZE) | 378 | F2FS_LOG_SECTOR_SIZE) { |
379 | f2fs_msg(sb, KERN_INFO, "Invalid log sectorsize"); | ||
353 | return 1; | 380 | return 1; |
381 | } | ||
354 | if (le32_to_cpu(raw_super->log_sectors_per_block) != | 382 | if (le32_to_cpu(raw_super->log_sectors_per_block) != |
355 | F2FS_LOG_SECTORS_PER_BLOCK) | 383 | F2FS_LOG_SECTORS_PER_BLOCK) { |
384 | f2fs_msg(sb, KERN_INFO, "Invalid log sectors per block"); | ||
356 | return 1; | 385 | return 1; |
386 | } | ||
357 | return 0; | 387 | return 0; |
358 | } | 388 | } |
359 | 389 | ||
@@ -413,14 +443,17 @@ static int f2fs_fill_super(struct super_block *sb, void *data, int silent) | |||
413 | if (!sbi) | 443 | if (!sbi) |
414 | return -ENOMEM; | 444 | return -ENOMEM; |
415 | 445 | ||
416 | /* set a temporary block size */ | 446 | /* set a block size */ |
417 | if (!sb_set_blocksize(sb, F2FS_BLKSIZE)) | 447 | if (!sb_set_blocksize(sb, F2FS_BLKSIZE)) { |
448 | f2fs_msg(sb, KERN_ERR, "unable to set blocksize"); | ||
418 | goto free_sbi; | 449 | goto free_sbi; |
450 | } | ||
419 | 451 | ||
420 | /* read f2fs raw super block */ | 452 | /* read f2fs raw super block */ |
421 | raw_super_buf = sb_bread(sb, 0); | 453 | raw_super_buf = sb_bread(sb, 0); |
422 | if (!raw_super_buf) { | 454 | if (!raw_super_buf) { |
423 | err = -EIO; | 455 | err = -EIO; |
456 | f2fs_msg(sb, KERN_ERR, "unable to read superblock"); | ||
424 | goto free_sbi; | 457 | goto free_sbi; |
425 | } | 458 | } |
426 | raw_super = (struct f2fs_super_block *) | 459 | raw_super = (struct f2fs_super_block *) |
@@ -438,12 +471,14 @@ static int f2fs_fill_super(struct super_block *sb, void *data, int silent) | |||
438 | set_opt(sbi, POSIX_ACL); | 471 | set_opt(sbi, POSIX_ACL); |
439 | #endif | 472 | #endif |
440 | /* parse mount options */ | 473 | /* parse mount options */ |
441 | if (parse_options(sbi, (char *)data)) | 474 | if (parse_options(sb, sbi, (char *)data)) |
442 | goto free_sb_buf; | 475 | goto free_sb_buf; |
443 | 476 | ||
444 | /* sanity checking of raw super */ | 477 | /* sanity checking of raw super */ |
445 | if (sanity_check_raw_super(raw_super)) | 478 | if (sanity_check_raw_super(sb, raw_super)) { |
479 | f2fs_msg(sb, KERN_ERR, "Can't find a valid F2FS filesystem"); | ||
446 | goto free_sb_buf; | 480 | goto free_sb_buf; |
481 | } | ||
447 | 482 | ||
448 | sb->s_maxbytes = max_file_size(le32_to_cpu(raw_super->log_blocksize)); | 483 | sb->s_maxbytes = max_file_size(le32_to_cpu(raw_super->log_blocksize)); |
449 | sb->s_max_links = F2FS_LINK_MAX; | 484 | sb->s_max_links = F2FS_LINK_MAX; |
@@ -477,18 +512,23 @@ static int f2fs_fill_super(struct super_block *sb, void *data, int silent) | |||
477 | /* get an inode for meta space */ | 512 | /* get an inode for meta space */ |
478 | sbi->meta_inode = f2fs_iget(sb, F2FS_META_INO(sbi)); | 513 | sbi->meta_inode = f2fs_iget(sb, F2FS_META_INO(sbi)); |
479 | if (IS_ERR(sbi->meta_inode)) { | 514 | if (IS_ERR(sbi->meta_inode)) { |
515 | f2fs_msg(sb, KERN_ERR, "Failed to read F2FS meta data inode"); | ||
480 | err = PTR_ERR(sbi->meta_inode); | 516 | err = PTR_ERR(sbi->meta_inode); |
481 | goto free_sb_buf; | 517 | goto free_sb_buf; |
482 | } | 518 | } |
483 | 519 | ||
484 | err = get_valid_checkpoint(sbi); | 520 | err = get_valid_checkpoint(sbi); |
485 | if (err) | 521 | if (err) { |
522 | f2fs_msg(sb, KERN_ERR, "Failed to get valid F2FS checkpoint"); | ||
486 | goto free_meta_inode; | 523 | goto free_meta_inode; |
524 | } | ||
487 | 525 | ||
488 | /* sanity checking of checkpoint */ | 526 | /* sanity checking of checkpoint */ |
489 | err = -EINVAL; | 527 | err = -EINVAL; |
490 | if (sanity_check_ckpt(raw_super, sbi->ckpt)) | 528 | if (sanity_check_ckpt(raw_super, sbi->ckpt)) { |
529 | f2fs_msg(sb, KERN_ERR, "Invalid F2FS checkpoint"); | ||
491 | goto free_cp; | 530 | goto free_cp; |
531 | } | ||
492 | 532 | ||
493 | sbi->total_valid_node_count = | 533 | sbi->total_valid_node_count = |
494 | le32_to_cpu(sbi->ckpt->valid_node_count); | 534 | le32_to_cpu(sbi->ckpt->valid_node_count); |
@@ -502,25 +542,28 @@ static int f2fs_fill_super(struct super_block *sb, void *data, int silent) | |||
502 | INIT_LIST_HEAD(&sbi->dir_inode_list); | 542 | INIT_LIST_HEAD(&sbi->dir_inode_list); |
503 | spin_lock_init(&sbi->dir_inode_lock); | 543 | spin_lock_init(&sbi->dir_inode_lock); |
504 | 544 | ||
505 | /* init super block */ | ||
506 | if (!sb_set_blocksize(sb, sbi->blocksize)) | ||
507 | goto free_cp; | ||
508 | |||
509 | init_orphan_info(sbi); | 545 | init_orphan_info(sbi); |
510 | 546 | ||
511 | /* setup f2fs internal modules */ | 547 | /* setup f2fs internal modules */ |
512 | err = build_segment_manager(sbi); | 548 | err = build_segment_manager(sbi); |
513 | if (err) | 549 | if (err) { |
550 | f2fs_msg(sb, KERN_ERR, | ||
551 | "Failed to initialize F2FS segment manager"); | ||
514 | goto free_sm; | 552 | goto free_sm; |
553 | } | ||
515 | err = build_node_manager(sbi); | 554 | err = build_node_manager(sbi); |
516 | if (err) | 555 | if (err) { |
556 | f2fs_msg(sb, KERN_ERR, | ||
557 | "Failed to initialize F2FS node manager"); | ||
517 | goto free_nm; | 558 | goto free_nm; |
559 | } | ||
518 | 560 | ||
519 | build_gc_manager(sbi); | 561 | build_gc_manager(sbi); |
520 | 562 | ||
521 | /* get an inode for node space */ | 563 | /* get an inode for node space */ |
522 | sbi->node_inode = f2fs_iget(sb, F2FS_NODE_INO(sbi)); | 564 | sbi->node_inode = f2fs_iget(sb, F2FS_NODE_INO(sbi)); |
523 | if (IS_ERR(sbi->node_inode)) { | 565 | if (IS_ERR(sbi->node_inode)) { |
566 | f2fs_msg(sb, KERN_ERR, "Failed to read node inode"); | ||
524 | err = PTR_ERR(sbi->node_inode); | 567 | err = PTR_ERR(sbi->node_inode); |
525 | goto free_nm; | 568 | goto free_nm; |
526 | } | 569 | } |
@@ -533,6 +576,7 @@ static int f2fs_fill_super(struct super_block *sb, void *data, int silent) | |||
533 | /* read root inode and dentry */ | 576 | /* read root inode and dentry */ |
534 | root = f2fs_iget(sb, F2FS_ROOT_INO(sbi)); | 577 | root = f2fs_iget(sb, F2FS_ROOT_INO(sbi)); |
535 | if (IS_ERR(root)) { | 578 | if (IS_ERR(root)) { |
579 | f2fs_msg(sb, KERN_ERR, "Failed to read root inode"); | ||
536 | err = PTR_ERR(root); | 580 | err = PTR_ERR(root); |
537 | goto free_node_inode; | 581 | goto free_node_inode; |
538 | } | 582 | } |
@@ -596,7 +640,7 @@ static struct file_system_type f2fs_fs_type = { | |||
596 | .fs_flags = FS_REQUIRES_DEV, | 640 | .fs_flags = FS_REQUIRES_DEV, |
597 | }; | 641 | }; |
598 | 642 | ||
599 | static int init_inodecache(void) | 643 | static int __init init_inodecache(void) |
600 | { | 644 | { |
601 | f2fs_inode_cachep = f2fs_kmem_cache_create("f2fs_inode_cache", | 645 | f2fs_inode_cachep = f2fs_kmem_cache_create("f2fs_inode_cache", |
602 | sizeof(struct f2fs_inode_info), NULL); | 646 | sizeof(struct f2fs_inode_info), NULL); |
@@ -631,14 +675,17 @@ static int __init init_f2fs_fs(void) | |||
631 | err = create_checkpoint_caches(); | 675 | err = create_checkpoint_caches(); |
632 | if (err) | 676 | if (err) |
633 | goto fail; | 677 | goto fail; |
634 | return register_filesystem(&f2fs_fs_type); | 678 | err = register_filesystem(&f2fs_fs_type); |
679 | if (err) | ||
680 | goto fail; | ||
681 | f2fs_create_root_stats(); | ||
635 | fail: | 682 | fail: |
636 | return err; | 683 | return err; |
637 | } | 684 | } |
638 | 685 | ||
639 | static void __exit exit_f2fs_fs(void) | 686 | static void __exit exit_f2fs_fs(void) |
640 | { | 687 | { |
641 | destroy_root_stats(); | 688 | f2fs_destroy_root_stats(); |
642 | unregister_filesystem(&f2fs_fs_type); | 689 | unregister_filesystem(&f2fs_fs_type); |
643 | destroy_checkpoint_caches(); | 690 | destroy_checkpoint_caches(); |
644 | destroy_gc_caches(); | 691 | destroy_gc_caches(); |
diff --git a/fs/f2fs/xattr.c b/fs/f2fs/xattr.c index 940136a3d3a6..8038c0496504 100644 --- a/fs/f2fs/xattr.c +++ b/fs/f2fs/xattr.c | |||
@@ -318,6 +318,8 @@ int f2fs_setxattr(struct inode *inode, int name_index, const char *name, | |||
318 | if (name_len > 255 || value_len > MAX_VALUE_LEN) | 318 | if (name_len > 255 || value_len > MAX_VALUE_LEN) |
319 | return -ERANGE; | 319 | return -ERANGE; |
320 | 320 | ||
321 | f2fs_balance_fs(sbi); | ||
322 | |||
321 | mutex_lock_op(sbi, NODE_NEW); | 323 | mutex_lock_op(sbi, NODE_NEW); |
322 | if (!fi->i_xattr_nid) { | 324 | if (!fi->i_xattr_nid) { |
323 | /* Allocate new attribute block */ | 325 | /* Allocate new attribute block */ |
diff --git a/fs/fuse/Kconfig b/fs/fuse/Kconfig index 0cf160a94eda..1b2f6c2c3aaf 100644 --- a/fs/fuse/Kconfig +++ b/fs/fuse/Kconfig | |||
@@ -4,12 +4,24 @@ config FUSE_FS | |||
4 | With FUSE it is possible to implement a fully functional filesystem | 4 | With FUSE it is possible to implement a fully functional filesystem |
5 | in a userspace program. | 5 | in a userspace program. |
6 | 6 | ||
7 | There's also companion library: libfuse. This library along with | 7 | There's also a companion library: libfuse2. This library is available |
8 | utilities is available from the FUSE homepage: | 8 | from the FUSE homepage: |
9 | <http://fuse.sourceforge.net/> | 9 | <http://fuse.sourceforge.net/> |
10 | although chances are your distribution already has that library | ||
11 | installed if you've installed the "fuse" package itself. | ||
10 | 12 | ||
11 | See <file:Documentation/filesystems/fuse.txt> for more information. | 13 | See <file:Documentation/filesystems/fuse.txt> for more information. |
12 | See <file:Documentation/Changes> for needed library/utility version. | 14 | See <file:Documentation/Changes> for needed library/utility version. |
13 | 15 | ||
14 | If you want to develop a userspace FS, or if you want to use | 16 | If you want to develop a userspace FS, or if you want to use |
15 | a filesystem based on FUSE, answer Y or M. | 17 | a filesystem based on FUSE, answer Y or M. |
18 | |||
19 | config CUSE | ||
20 | tristate "Character device in Userspace support" | ||
21 | depends on FUSE_FS | ||
22 | help | ||
23 | This FUSE extension allows character devices to be | ||
24 | implemented in userspace. | ||
25 | |||
26 | If you want to develop or use a userspace character device | ||
27 | based on CUSE, answer Y or M. | ||
diff --git a/fs/fuse/cuse.c b/fs/fuse/cuse.c index ee8d55042298..e397b675b029 100644 --- a/fs/fuse/cuse.c +++ b/fs/fuse/cuse.c | |||
@@ -45,7 +45,6 @@ | |||
45 | #include <linux/miscdevice.h> | 45 | #include <linux/miscdevice.h> |
46 | #include <linux/mutex.h> | 46 | #include <linux/mutex.h> |
47 | #include <linux/slab.h> | 47 | #include <linux/slab.h> |
48 | #include <linux/spinlock.h> | ||
49 | #include <linux/stat.h> | 48 | #include <linux/stat.h> |
50 | #include <linux/module.h> | 49 | #include <linux/module.h> |
51 | 50 | ||
@@ -63,7 +62,7 @@ struct cuse_conn { | |||
63 | bool unrestricted_ioctl; | 62 | bool unrestricted_ioctl; |
64 | }; | 63 | }; |
65 | 64 | ||
66 | static DEFINE_SPINLOCK(cuse_lock); /* protects cuse_conntbl */ | 65 | static DEFINE_MUTEX(cuse_lock); /* protects registration */ |
67 | static struct list_head cuse_conntbl[CUSE_CONNTBL_LEN]; | 66 | static struct list_head cuse_conntbl[CUSE_CONNTBL_LEN]; |
68 | static struct class *cuse_class; | 67 | static struct class *cuse_class; |
69 | 68 | ||
@@ -114,14 +113,14 @@ static int cuse_open(struct inode *inode, struct file *file) | |||
114 | int rc; | 113 | int rc; |
115 | 114 | ||
116 | /* look up and get the connection */ | 115 | /* look up and get the connection */ |
117 | spin_lock(&cuse_lock); | 116 | mutex_lock(&cuse_lock); |
118 | list_for_each_entry(pos, cuse_conntbl_head(devt), list) | 117 | list_for_each_entry(pos, cuse_conntbl_head(devt), list) |
119 | if (pos->dev->devt == devt) { | 118 | if (pos->dev->devt == devt) { |
120 | fuse_conn_get(&pos->fc); | 119 | fuse_conn_get(&pos->fc); |
121 | cc = pos; | 120 | cc = pos; |
122 | break; | 121 | break; |
123 | } | 122 | } |
124 | spin_unlock(&cuse_lock); | 123 | mutex_unlock(&cuse_lock); |
125 | 124 | ||
126 | /* dead? */ | 125 | /* dead? */ |
127 | if (!cc) | 126 | if (!cc) |
@@ -267,7 +266,7 @@ static int cuse_parse_one(char **pp, char *end, char **keyp, char **valp) | |||
267 | static int cuse_parse_devinfo(char *p, size_t len, struct cuse_devinfo *devinfo) | 266 | static int cuse_parse_devinfo(char *p, size_t len, struct cuse_devinfo *devinfo) |
268 | { | 267 | { |
269 | char *end = p + len; | 268 | char *end = p + len; |
270 | char *key, *val; | 269 | char *uninitialized_var(key), *uninitialized_var(val); |
271 | int rc; | 270 | int rc; |
272 | 271 | ||
273 | while (true) { | 272 | while (true) { |
@@ -305,14 +304,14 @@ static void cuse_gendev_release(struct device *dev) | |||
305 | */ | 304 | */ |
306 | static void cuse_process_init_reply(struct fuse_conn *fc, struct fuse_req *req) | 305 | static void cuse_process_init_reply(struct fuse_conn *fc, struct fuse_req *req) |
307 | { | 306 | { |
308 | struct cuse_conn *cc = fc_to_cc(fc); | 307 | struct cuse_conn *cc = fc_to_cc(fc), *pos; |
309 | struct cuse_init_out *arg = req->out.args[0].value; | 308 | struct cuse_init_out *arg = req->out.args[0].value; |
310 | struct page *page = req->pages[0]; | 309 | struct page *page = req->pages[0]; |
311 | struct cuse_devinfo devinfo = { }; | 310 | struct cuse_devinfo devinfo = { }; |
312 | struct device *dev; | 311 | struct device *dev; |
313 | struct cdev *cdev; | 312 | struct cdev *cdev; |
314 | dev_t devt; | 313 | dev_t devt; |
315 | int rc; | 314 | int rc, i; |
316 | 315 | ||
317 | if (req->out.h.error || | 316 | if (req->out.h.error || |
318 | arg->major != FUSE_KERNEL_VERSION || arg->minor < 11) { | 317 | arg->major != FUSE_KERNEL_VERSION || arg->minor < 11) { |
@@ -356,15 +355,24 @@ static void cuse_process_init_reply(struct fuse_conn *fc, struct fuse_req *req) | |||
356 | dev_set_drvdata(dev, cc); | 355 | dev_set_drvdata(dev, cc); |
357 | dev_set_name(dev, "%s", devinfo.name); | 356 | dev_set_name(dev, "%s", devinfo.name); |
358 | 357 | ||
358 | mutex_lock(&cuse_lock); | ||
359 | |||
360 | /* make sure the device-name is unique */ | ||
361 | for (i = 0; i < CUSE_CONNTBL_LEN; ++i) { | ||
362 | list_for_each_entry(pos, &cuse_conntbl[i], list) | ||
363 | if (!strcmp(dev_name(pos->dev), dev_name(dev))) | ||
364 | goto err_unlock; | ||
365 | } | ||
366 | |||
359 | rc = device_add(dev); | 367 | rc = device_add(dev); |
360 | if (rc) | 368 | if (rc) |
361 | goto err_device; | 369 | goto err_unlock; |
362 | 370 | ||
363 | /* register cdev */ | 371 | /* register cdev */ |
364 | rc = -ENOMEM; | 372 | rc = -ENOMEM; |
365 | cdev = cdev_alloc(); | 373 | cdev = cdev_alloc(); |
366 | if (!cdev) | 374 | if (!cdev) |
367 | goto err_device; | 375 | goto err_unlock; |
368 | 376 | ||
369 | cdev->owner = THIS_MODULE; | 377 | cdev->owner = THIS_MODULE; |
370 | cdev->ops = &cuse_frontend_fops; | 378 | cdev->ops = &cuse_frontend_fops; |
@@ -377,9 +385,8 @@ static void cuse_process_init_reply(struct fuse_conn *fc, struct fuse_req *req) | |||
377 | cc->cdev = cdev; | 385 | cc->cdev = cdev; |
378 | 386 | ||
379 | /* make the device available */ | 387 | /* make the device available */ |
380 | spin_lock(&cuse_lock); | ||
381 | list_add(&cc->list, cuse_conntbl_head(devt)); | 388 | list_add(&cc->list, cuse_conntbl_head(devt)); |
382 | spin_unlock(&cuse_lock); | 389 | mutex_unlock(&cuse_lock); |
383 | 390 | ||
384 | /* announce device availability */ | 391 | /* announce device availability */ |
385 | dev_set_uevent_suppress(dev, 0); | 392 | dev_set_uevent_suppress(dev, 0); |
@@ -391,7 +398,8 @@ out: | |||
391 | 398 | ||
392 | err_cdev: | 399 | err_cdev: |
393 | cdev_del(cdev); | 400 | cdev_del(cdev); |
394 | err_device: | 401 | err_unlock: |
402 | mutex_unlock(&cuse_lock); | ||
395 | put_device(dev); | 403 | put_device(dev); |
396 | err_region: | 404 | err_region: |
397 | unregister_chrdev_region(devt, 1); | 405 | unregister_chrdev_region(devt, 1); |
@@ -520,9 +528,9 @@ static int cuse_channel_release(struct inode *inode, struct file *file) | |||
520 | int rc; | 528 | int rc; |
521 | 529 | ||
522 | /* remove from the conntbl, no more access from this point on */ | 530 | /* remove from the conntbl, no more access from this point on */ |
523 | spin_lock(&cuse_lock); | 531 | mutex_lock(&cuse_lock); |
524 | list_del_init(&cc->list); | 532 | list_del_init(&cc->list); |
525 | spin_unlock(&cuse_lock); | 533 | mutex_unlock(&cuse_lock); |
526 | 534 | ||
527 | /* remove device */ | 535 | /* remove device */ |
528 | if (cc->dev) | 536 | if (cc->dev) |
diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c index c16335315e5d..e83351aa5bad 100644 --- a/fs/fuse/dev.c +++ b/fs/fuse/dev.c | |||
@@ -692,8 +692,6 @@ static int fuse_try_move_page(struct fuse_copy_state *cs, struct page **pagep) | |||
692 | struct page *oldpage = *pagep; | 692 | struct page *oldpage = *pagep; |
693 | struct page *newpage; | 693 | struct page *newpage; |
694 | struct pipe_buffer *buf = cs->pipebufs; | 694 | struct pipe_buffer *buf = cs->pipebufs; |
695 | struct address_space *mapping; | ||
696 | pgoff_t index; | ||
697 | 695 | ||
698 | unlock_request(cs->fc, cs->req); | 696 | unlock_request(cs->fc, cs->req); |
699 | fuse_copy_finish(cs); | 697 | fuse_copy_finish(cs); |
@@ -724,9 +722,6 @@ static int fuse_try_move_page(struct fuse_copy_state *cs, struct page **pagep) | |||
724 | if (fuse_check_page(newpage) != 0) | 722 | if (fuse_check_page(newpage) != 0) |
725 | goto out_fallback_unlock; | 723 | goto out_fallback_unlock; |
726 | 724 | ||
727 | mapping = oldpage->mapping; | ||
728 | index = oldpage->index; | ||
729 | |||
730 | /* | 725 | /* |
731 | * This is a new and locked page, it shouldn't be mapped or | 726 | * This is a new and locked page, it shouldn't be mapped or |
732 | * have any special flags on it | 727 | * have any special flags on it |
diff --git a/fs/fuse/file.c b/fs/fuse/file.c index e21d4d8f87e3..f3ab824fa302 100644 --- a/fs/fuse/file.c +++ b/fs/fuse/file.c | |||
@@ -2177,8 +2177,8 @@ fuse_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov, | |||
2177 | return ret; | 2177 | return ret; |
2178 | } | 2178 | } |
2179 | 2179 | ||
2180 | long fuse_file_fallocate(struct file *file, int mode, loff_t offset, | 2180 | static long fuse_file_fallocate(struct file *file, int mode, loff_t offset, |
2181 | loff_t length) | 2181 | loff_t length) |
2182 | { | 2182 | { |
2183 | struct fuse_file *ff = file->private_data; | 2183 | struct fuse_file *ff = file->private_data; |
2184 | struct fuse_conn *fc = ff->fc; | 2184 | struct fuse_conn *fc = ff->fc; |
@@ -2213,7 +2213,6 @@ long fuse_file_fallocate(struct file *file, int mode, loff_t offset, | |||
2213 | 2213 | ||
2214 | return err; | 2214 | return err; |
2215 | } | 2215 | } |
2216 | EXPORT_SYMBOL_GPL(fuse_file_fallocate); | ||
2217 | 2216 | ||
2218 | static const struct file_operations fuse_file_operations = { | 2217 | static const struct file_operations fuse_file_operations = { |
2219 | .llseek = fuse_file_llseek, | 2218 | .llseek = fuse_file_llseek, |
diff --git a/include/asm-generic/dma-mapping-broken.h b/include/asm-generic/dma-mapping-broken.h index ccf7b4f34a3c..6c32af918c2f 100644 --- a/include/asm-generic/dma-mapping-broken.h +++ b/include/asm-generic/dma-mapping-broken.h | |||
@@ -16,6 +16,22 @@ extern void | |||
16 | dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, | 16 | dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, |
17 | dma_addr_t dma_handle); | 17 | dma_addr_t dma_handle); |
18 | 18 | ||
19 | static inline void *dma_alloc_attrs(struct device *dev, size_t size, | ||
20 | dma_addr_t *dma_handle, gfp_t flag, | ||
21 | struct dma_attrs *attrs) | ||
22 | { | ||
23 | /* attrs is not supported and ignored */ | ||
24 | return dma_alloc_coherent(dev, size, dma_handle, flag); | ||
25 | } | ||
26 | |||
27 | static inline void dma_free_attrs(struct device *dev, size_t size, | ||
28 | void *cpu_addr, dma_addr_t dma_handle, | ||
29 | struct dma_attrs *attrs) | ||
30 | { | ||
31 | /* attrs is not supported and ignored */ | ||
32 | dma_free_coherent(dev, size, cpu_addr, dma_handle); | ||
33 | } | ||
34 | |||
19 | #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) | 35 | #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) |
20 | #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) | 36 | #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) |
21 | 37 | ||
diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h index 701beab27aab..5cf680a98f9b 100644 --- a/include/asm-generic/pgtable.h +++ b/include/asm-generic/pgtable.h | |||
@@ -461,10 +461,8 @@ static inline int is_zero_pfn(unsigned long pfn) | |||
461 | return offset_from_zero_pfn <= (zero_page_mask >> PAGE_SHIFT); | 461 | return offset_from_zero_pfn <= (zero_page_mask >> PAGE_SHIFT); |
462 | } | 462 | } |
463 | 463 | ||
464 | static inline unsigned long my_zero_pfn(unsigned long addr) | 464 | #define my_zero_pfn(addr) page_to_pfn(ZERO_PAGE(addr)) |
465 | { | 465 | |
466 | return page_to_pfn(ZERO_PAGE(addr)); | ||
467 | } | ||
468 | #else | 466 | #else |
469 | static inline int is_zero_pfn(unsigned long pfn) | 467 | static inline int is_zero_pfn(unsigned long pfn) |
470 | { | 468 | { |
diff --git a/include/asm-generic/syscalls.h b/include/asm-generic/syscalls.h index 58f466ff00d3..1db51b8524e9 100644 --- a/include/asm-generic/syscalls.h +++ b/include/asm-generic/syscalls.h | |||
@@ -21,10 +21,12 @@ asmlinkage long sys_mmap(unsigned long addr, unsigned long len, | |||
21 | unsigned long fd, off_t pgoff); | 21 | unsigned long fd, off_t pgoff); |
22 | #endif | 22 | #endif |
23 | 23 | ||
24 | #ifndef CONFIG_GENERIC_SIGALTSTACK | ||
24 | #ifndef sys_sigaltstack | 25 | #ifndef sys_sigaltstack |
25 | asmlinkage long sys_sigaltstack(const stack_t __user *, stack_t __user *, | 26 | asmlinkage long sys_sigaltstack(const stack_t __user *, stack_t __user *, |
26 | struct pt_regs *); | 27 | struct pt_regs *); |
27 | #endif | 28 | #endif |
29 | #endif | ||
28 | 30 | ||
29 | #ifndef sys_rt_sigreturn | 31 | #ifndef sys_rt_sigreturn |
30 | asmlinkage long sys_rt_sigreturn(struct pt_regs *regs); | 32 | asmlinkage long sys_rt_sigreturn(struct pt_regs *regs); |
diff --git a/include/linux/ata.h b/include/linux/ata.h index 408da9502177..8f7a3d68371a 100644 --- a/include/linux/ata.h +++ b/include/linux/ata.h | |||
@@ -297,10 +297,12 @@ enum { | |||
297 | ATA_LOG_SATA_NCQ = 0x10, | 297 | ATA_LOG_SATA_NCQ = 0x10, |
298 | ATA_LOG_SATA_ID_DEV_DATA = 0x30, | 298 | ATA_LOG_SATA_ID_DEV_DATA = 0x30, |
299 | ATA_LOG_SATA_SETTINGS = 0x08, | 299 | ATA_LOG_SATA_SETTINGS = 0x08, |
300 | ATA_LOG_DEVSLP_MDAT = 0x30, | 300 | ATA_LOG_DEVSLP_OFFSET = 0x30, |
301 | ATA_LOG_DEVSLP_SIZE = 0x08, | ||
302 | ATA_LOG_DEVSLP_MDAT = 0x00, | ||
301 | ATA_LOG_DEVSLP_MDAT_MASK = 0x1F, | 303 | ATA_LOG_DEVSLP_MDAT_MASK = 0x1F, |
302 | ATA_LOG_DEVSLP_DETO = 0x31, | 304 | ATA_LOG_DEVSLP_DETO = 0x01, |
303 | ATA_LOG_DEVSLP_VALID = 0x37, | 305 | ATA_LOG_DEVSLP_VALID = 0x07, |
304 | ATA_LOG_DEVSLP_VALID_MASK = 0x80, | 306 | ATA_LOG_DEVSLP_VALID_MASK = 0x80, |
305 | 307 | ||
306 | /* READ/WRITE LONG (obsolete) */ | 308 | /* READ/WRITE LONG (obsolete) */ |
diff --git a/include/linux/libata.h b/include/linux/libata.h index 83ba0ab2c915..649e5f86b5f0 100644 --- a/include/linux/libata.h +++ b/include/linux/libata.h | |||
@@ -652,8 +652,8 @@ struct ata_device { | |||
652 | u32 gscr[SATA_PMP_GSCR_DWORDS]; /* PMP GSCR block */ | 652 | u32 gscr[SATA_PMP_GSCR_DWORDS]; /* PMP GSCR block */ |
653 | }; | 653 | }; |
654 | 654 | ||
655 | /* Identify Device Data Log (30h), SATA Settings (page 08h) */ | 655 | /* DEVSLP Timing Variables from Identify Device Data Log */ |
656 | u8 sata_settings[ATA_SECT_SIZE]; | 656 | u8 devslp_timing[ATA_LOG_DEVSLP_SIZE]; |
657 | 657 | ||
658 | /* error history */ | 658 | /* error history */ |
659 | int spdn_cnt; | 659 | int spdn_cnt; |
diff --git a/include/linux/module.h b/include/linux/module.h index 7760c6d344a3..1375ee3f03aa 100644 --- a/include/linux/module.h +++ b/include/linux/module.h | |||
@@ -199,11 +199,11 @@ struct module_use { | |||
199 | struct module *source, *target; | 199 | struct module *source, *target; |
200 | }; | 200 | }; |
201 | 201 | ||
202 | enum module_state | 202 | enum module_state { |
203 | { | 203 | MODULE_STATE_LIVE, /* Normal state. */ |
204 | MODULE_STATE_LIVE, | 204 | MODULE_STATE_COMING, /* Full formed, running module_init. */ |
205 | MODULE_STATE_COMING, | 205 | MODULE_STATE_GOING, /* Going away. */ |
206 | MODULE_STATE_GOING, | 206 | MODULE_STATE_UNFORMED, /* Still setting it up. */ |
207 | }; | 207 | }; |
208 | 208 | ||
209 | /** | 209 | /** |
diff --git a/include/linux/ptrace.h b/include/linux/ptrace.h index 1693775ecfe8..89573a33ab3c 100644 --- a/include/linux/ptrace.h +++ b/include/linux/ptrace.h | |||
@@ -45,7 +45,6 @@ extern long arch_ptrace(struct task_struct *child, long request, | |||
45 | extern int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst, int len); | 45 | extern int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst, int len); |
46 | extern int ptrace_writedata(struct task_struct *tsk, char __user *src, unsigned long dst, int len); | 46 | extern int ptrace_writedata(struct task_struct *tsk, char __user *src, unsigned long dst, int len); |
47 | extern void ptrace_disable(struct task_struct *); | 47 | extern void ptrace_disable(struct task_struct *); |
48 | extern int ptrace_check_attach(struct task_struct *task, bool ignore_state); | ||
49 | extern int ptrace_request(struct task_struct *child, long request, | 48 | extern int ptrace_request(struct task_struct *child, long request, |
50 | unsigned long addr, unsigned long data); | 49 | unsigned long addr, unsigned long data); |
51 | extern void ptrace_notify(int exit_code); | 50 | extern void ptrace_notify(int exit_code); |
diff --git a/include/linux/sched.h b/include/linux/sched.h index 6fc8f45de4e9..d2112477ff5e 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h | |||
@@ -2714,7 +2714,16 @@ static inline void thread_group_cputime_init(struct signal_struct *sig) | |||
2714 | extern void recalc_sigpending_and_wake(struct task_struct *t); | 2714 | extern void recalc_sigpending_and_wake(struct task_struct *t); |
2715 | extern void recalc_sigpending(void); | 2715 | extern void recalc_sigpending(void); |
2716 | 2716 | ||
2717 | extern void signal_wake_up(struct task_struct *t, int resume_stopped); | 2717 | extern void signal_wake_up_state(struct task_struct *t, unsigned int state); |
2718 | |||
2719 | static inline void signal_wake_up(struct task_struct *t, bool resume) | ||
2720 | { | ||
2721 | signal_wake_up_state(t, resume ? TASK_WAKEKILL : 0); | ||
2722 | } | ||
2723 | static inline void ptrace_signal_wake_up(struct task_struct *t, bool resume) | ||
2724 | { | ||
2725 | signal_wake_up_state(t, resume ? __TASK_TRACED : 0); | ||
2726 | } | ||
2718 | 2727 | ||
2719 | /* | 2728 | /* |
2720 | * Wrappers for p->thread_info->cpu access. No-op on UP. | 2729 | * Wrappers for p->thread_info->cpu access. No-op on UP. |
diff --git a/include/uapi/linux/serial_core.h b/include/uapi/linux/serial_core.h index 78f99d97475b..2c6c85f18ea0 100644 --- a/include/uapi/linux/serial_core.h +++ b/include/uapi/linux/serial_core.h | |||
@@ -50,7 +50,8 @@ | |||
50 | #define PORT_LPC3220 22 /* NXP LPC32xx SoC "Standard" UART */ | 50 | #define PORT_LPC3220 22 /* NXP LPC32xx SoC "Standard" UART */ |
51 | #define PORT_8250_CIR 23 /* CIR infrared port, has its own driver */ | 51 | #define PORT_8250_CIR 23 /* CIR infrared port, has its own driver */ |
52 | #define PORT_XR17V35X 24 /* Exar XR17V35x UARTs */ | 52 | #define PORT_XR17V35X 24 /* Exar XR17V35x UARTs */ |
53 | #define PORT_MAX_8250 24 /* max port ID */ | 53 | #define PORT_BRCM_TRUMANAGE 24 |
54 | #define PORT_MAX_8250 25 /* max port ID */ | ||
54 | 55 | ||
55 | /* | 56 | /* |
56 | * ARM specific type numbers. These are not currently guaranteed | 57 | * ARM specific type numbers. These are not currently guaranteed |
diff --git a/init/do_mounts_initrd.c b/init/do_mounts_initrd.c index 5e4ded51788e..f9acf71b9810 100644 --- a/init/do_mounts_initrd.c +++ b/init/do_mounts_initrd.c | |||
@@ -36,6 +36,10 @@ __setup("noinitrd", no_initrd); | |||
36 | static int init_linuxrc(struct subprocess_info *info, struct cred *new) | 36 | static int init_linuxrc(struct subprocess_info *info, struct cred *new) |
37 | { | 37 | { |
38 | sys_unshare(CLONE_FS | CLONE_FILES); | 38 | sys_unshare(CLONE_FS | CLONE_FILES); |
39 | /* stdin/stdout/stderr for /linuxrc */ | ||
40 | sys_open("/dev/console", O_RDWR, 0); | ||
41 | sys_dup(0); | ||
42 | sys_dup(0); | ||
39 | /* move initrd over / and chdir/chroot in initrd root */ | 43 | /* move initrd over / and chdir/chroot in initrd root */ |
40 | sys_chdir("/root"); | 44 | sys_chdir("/root"); |
41 | sys_mount(".", "/", NULL, MS_MOVE, NULL); | 45 | sys_mount(".", "/", NULL, MS_MOVE, NULL); |
diff --git a/init/main.c b/init/main.c index 85d69dffe864..92d728a32d51 100644 --- a/init/main.c +++ b/init/main.c | |||
@@ -802,7 +802,7 @@ static int run_init_process(const char *init_filename) | |||
802 | (const char __user *const __user *)envp_init); | 802 | (const char __user *const __user *)envp_init); |
803 | } | 803 | } |
804 | 804 | ||
805 | static void __init kernel_init_freeable(void); | 805 | static noinline void __init kernel_init_freeable(void); |
806 | 806 | ||
807 | static int __ref kernel_init(void *unused) | 807 | static int __ref kernel_init(void *unused) |
808 | { | 808 | { |
@@ -845,7 +845,7 @@ static int __ref kernel_init(void *unused) | |||
845 | "See Linux Documentation/init.txt for guidance."); | 845 | "See Linux Documentation/init.txt for guidance."); |
846 | } | 846 | } |
847 | 847 | ||
848 | static void __init kernel_init_freeable(void) | 848 | static noinline void __init kernel_init_freeable(void) |
849 | { | 849 | { |
850 | /* | 850 | /* |
851 | * Wait until kthreadd is all set-up. | 851 | * Wait until kthreadd is all set-up. |
diff --git a/kernel/async.c b/kernel/async.c index a1d585c351d6..6f34904a0b53 100644 --- a/kernel/async.c +++ b/kernel/async.c | |||
@@ -86,18 +86,27 @@ static atomic_t entry_count; | |||
86 | */ | 86 | */ |
87 | static async_cookie_t __lowest_in_progress(struct async_domain *running) | 87 | static async_cookie_t __lowest_in_progress(struct async_domain *running) |
88 | { | 88 | { |
89 | async_cookie_t first_running = next_cookie; /* infinity value */ | ||
90 | async_cookie_t first_pending = next_cookie; /* ditto */ | ||
89 | struct async_entry *entry; | 91 | struct async_entry *entry; |
90 | 92 | ||
93 | /* | ||
94 | * Both running and pending lists are sorted but not disjoint. | ||
95 | * Take the first cookies from both and return the min. | ||
96 | */ | ||
91 | if (!list_empty(&running->domain)) { | 97 | if (!list_empty(&running->domain)) { |
92 | entry = list_first_entry(&running->domain, typeof(*entry), list); | 98 | entry = list_first_entry(&running->domain, typeof(*entry), list); |
93 | return entry->cookie; | 99 | first_running = entry->cookie; |
94 | } | 100 | } |
95 | 101 | ||
96 | list_for_each_entry(entry, &async_pending, list) | 102 | list_for_each_entry(entry, &async_pending, list) { |
97 | if (entry->running == running) | 103 | if (entry->running == running) { |
98 | return entry->cookie; | 104 | first_pending = entry->cookie; |
105 | break; | ||
106 | } | ||
107 | } | ||
99 | 108 | ||
100 | return next_cookie; /* "infinity" value */ | 109 | return min(first_running, first_pending); |
101 | } | 110 | } |
102 | 111 | ||
103 | static async_cookie_t lowest_in_progress(struct async_domain *running) | 112 | static async_cookie_t lowest_in_progress(struct async_domain *running) |
@@ -118,13 +127,17 @@ static void async_run_entry_fn(struct work_struct *work) | |||
118 | { | 127 | { |
119 | struct async_entry *entry = | 128 | struct async_entry *entry = |
120 | container_of(work, struct async_entry, work); | 129 | container_of(work, struct async_entry, work); |
130 | struct async_entry *pos; | ||
121 | unsigned long flags; | 131 | unsigned long flags; |
122 | ktime_t uninitialized_var(calltime), delta, rettime; | 132 | ktime_t uninitialized_var(calltime), delta, rettime; |
123 | struct async_domain *running = entry->running; | 133 | struct async_domain *running = entry->running; |
124 | 134 | ||
125 | /* 1) move self to the running queue */ | 135 | /* 1) move self to the running queue, make sure it stays sorted */ |
126 | spin_lock_irqsave(&async_lock, flags); | 136 | spin_lock_irqsave(&async_lock, flags); |
127 | list_move_tail(&entry->list, &running->domain); | 137 | list_for_each_entry_reverse(pos, &running->domain, list) |
138 | if (entry->cookie < pos->cookie) | ||
139 | break; | ||
140 | list_move_tail(&entry->list, &pos->list); | ||
128 | spin_unlock_irqrestore(&async_lock, flags); | 141 | spin_unlock_irqrestore(&async_lock, flags); |
129 | 142 | ||
130 | /* 2) run (and print duration) */ | 143 | /* 2) run (and print duration) */ |
diff --git a/kernel/compat.c b/kernel/compat.c index f6150e92dfc9..36700e9e2be9 100644 --- a/kernel/compat.c +++ b/kernel/compat.c | |||
@@ -535,9 +535,11 @@ asmlinkage long compat_sys_getrusage(int who, struct compat_rusage __user *ru) | |||
535 | return 0; | 535 | return 0; |
536 | } | 536 | } |
537 | 537 | ||
538 | asmlinkage long | 538 | COMPAT_SYSCALL_DEFINE4(wait4, |
539 | compat_sys_wait4(compat_pid_t pid, compat_uint_t __user *stat_addr, int options, | 539 | compat_pid_t, pid, |
540 | struct compat_rusage __user *ru) | 540 | compat_uint_t __user *, stat_addr, |
541 | int, options, | ||
542 | struct compat_rusage __user *, ru) | ||
541 | { | 543 | { |
542 | if (!ru) { | 544 | if (!ru) { |
543 | return sys_wait4(pid, stat_addr, options, NULL); | 545 | return sys_wait4(pid, stat_addr, options, NULL); |
@@ -564,9 +566,10 @@ compat_sys_wait4(compat_pid_t pid, compat_uint_t __user *stat_addr, int options, | |||
564 | } | 566 | } |
565 | } | 567 | } |
566 | 568 | ||
567 | asmlinkage long compat_sys_waitid(int which, compat_pid_t pid, | 569 | COMPAT_SYSCALL_DEFINE5(waitid, |
568 | struct compat_siginfo __user *uinfo, int options, | 570 | int, which, compat_pid_t, pid, |
569 | struct compat_rusage __user *uru) | 571 | struct compat_siginfo __user *, uinfo, int, options, |
572 | struct compat_rusage __user *, uru) | ||
570 | { | 573 | { |
571 | siginfo_t info; | 574 | siginfo_t info; |
572 | struct rusage ru; | 575 | struct rusage ru; |
@@ -584,7 +587,11 @@ asmlinkage long compat_sys_waitid(int which, compat_pid_t pid, | |||
584 | return ret; | 587 | return ret; |
585 | 588 | ||
586 | if (uru) { | 589 | if (uru) { |
587 | ret = put_compat_rusage(&ru, uru); | 590 | /* sys_waitid() overwrites everything in ru */ |
591 | if (COMPAT_USE_64BIT_TIME) | ||
592 | ret = copy_to_user(uru, &ru, sizeof(ru)); | ||
593 | else | ||
594 | ret = put_compat_rusage(&ru, uru); | ||
588 | if (ret) | 595 | if (ret) |
589 | return ret; | 596 | return ret; |
590 | } | 597 | } |
@@ -994,7 +1001,7 @@ compat_sys_rt_sigtimedwait (compat_sigset_t __user *uthese, | |||
994 | sigset_from_compat(&s, &s32); | 1001 | sigset_from_compat(&s, &s32); |
995 | 1002 | ||
996 | if (uts) { | 1003 | if (uts) { |
997 | if (get_compat_timespec(&t, uts)) | 1004 | if (compat_get_timespec(&t, uts)) |
998 | return -EFAULT; | 1005 | return -EFAULT; |
999 | } | 1006 | } |
1000 | 1007 | ||
diff --git a/kernel/debug/kdb/kdb_main.c b/kernel/debug/kdb/kdb_main.c index 4d5f8d5612f3..8875254120b6 100644 --- a/kernel/debug/kdb/kdb_main.c +++ b/kernel/debug/kdb/kdb_main.c | |||
@@ -1970,6 +1970,8 @@ static int kdb_lsmod(int argc, const char **argv) | |||
1970 | 1970 | ||
1971 | kdb_printf("Module Size modstruct Used by\n"); | 1971 | kdb_printf("Module Size modstruct Used by\n"); |
1972 | list_for_each_entry(mod, kdb_modules, list) { | 1972 | list_for_each_entry(mod, kdb_modules, list) { |
1973 | if (mod->state == MODULE_STATE_UNFORMED) | ||
1974 | continue; | ||
1973 | 1975 | ||
1974 | kdb_printf("%-20s%8u 0x%p ", mod->name, | 1976 | kdb_printf("%-20s%8u 0x%p ", mod->name, |
1975 | mod->core_size, (void *)mod); | 1977 | mod->core_size, (void *)mod); |
diff --git a/kernel/fork.c b/kernel/fork.c index 65ca6d27f24e..c535f33bbb9c 100644 --- a/kernel/fork.c +++ b/kernel/fork.c | |||
@@ -1668,8 +1668,10 @@ SYSCALL_DEFINE5(clone, unsigned long, clone_flags, unsigned long, newsp, | |||
1668 | int, tls_val) | 1668 | int, tls_val) |
1669 | #endif | 1669 | #endif |
1670 | { | 1670 | { |
1671 | return do_fork(clone_flags, newsp, 0, | 1671 | long ret = do_fork(clone_flags, newsp, 0, parent_tidptr, child_tidptr); |
1672 | parent_tidptr, child_tidptr); | 1672 | asmlinkage_protect(5, ret, clone_flags, newsp, |
1673 | parent_tidptr, child_tidptr, tls_val); | ||
1674 | return ret; | ||
1673 | } | 1675 | } |
1674 | #endif | 1676 | #endif |
1675 | 1677 | ||
diff --git a/kernel/module.c b/kernel/module.c index b10b048367e1..eab08274ec9b 100644 --- a/kernel/module.c +++ b/kernel/module.c | |||
@@ -188,6 +188,7 @@ struct load_info { | |||
188 | ongoing or failed initialization etc. */ | 188 | ongoing or failed initialization etc. */ |
189 | static inline int strong_try_module_get(struct module *mod) | 189 | static inline int strong_try_module_get(struct module *mod) |
190 | { | 190 | { |
191 | BUG_ON(mod && mod->state == MODULE_STATE_UNFORMED); | ||
191 | if (mod && mod->state == MODULE_STATE_COMING) | 192 | if (mod && mod->state == MODULE_STATE_COMING) |
192 | return -EBUSY; | 193 | return -EBUSY; |
193 | if (try_module_get(mod)) | 194 | if (try_module_get(mod)) |
@@ -343,6 +344,9 @@ bool each_symbol_section(bool (*fn)(const struct symsearch *arr, | |||
343 | #endif | 344 | #endif |
344 | }; | 345 | }; |
345 | 346 | ||
347 | if (mod->state == MODULE_STATE_UNFORMED) | ||
348 | continue; | ||
349 | |||
346 | if (each_symbol_in_section(arr, ARRAY_SIZE(arr), mod, fn, data)) | 350 | if (each_symbol_in_section(arr, ARRAY_SIZE(arr), mod, fn, data)) |
347 | return true; | 351 | return true; |
348 | } | 352 | } |
@@ -450,16 +454,24 @@ const struct kernel_symbol *find_symbol(const char *name, | |||
450 | EXPORT_SYMBOL_GPL(find_symbol); | 454 | EXPORT_SYMBOL_GPL(find_symbol); |
451 | 455 | ||
452 | /* Search for module by name: must hold module_mutex. */ | 456 | /* Search for module by name: must hold module_mutex. */ |
453 | struct module *find_module(const char *name) | 457 | static struct module *find_module_all(const char *name, |
458 | bool even_unformed) | ||
454 | { | 459 | { |
455 | struct module *mod; | 460 | struct module *mod; |
456 | 461 | ||
457 | list_for_each_entry(mod, &modules, list) { | 462 | list_for_each_entry(mod, &modules, list) { |
463 | if (!even_unformed && mod->state == MODULE_STATE_UNFORMED) | ||
464 | continue; | ||
458 | if (strcmp(mod->name, name) == 0) | 465 | if (strcmp(mod->name, name) == 0) |
459 | return mod; | 466 | return mod; |
460 | } | 467 | } |
461 | return NULL; | 468 | return NULL; |
462 | } | 469 | } |
470 | |||
471 | struct module *find_module(const char *name) | ||
472 | { | ||
473 | return find_module_all(name, false); | ||
474 | } | ||
463 | EXPORT_SYMBOL_GPL(find_module); | 475 | EXPORT_SYMBOL_GPL(find_module); |
464 | 476 | ||
465 | #ifdef CONFIG_SMP | 477 | #ifdef CONFIG_SMP |
@@ -525,6 +537,8 @@ bool is_module_percpu_address(unsigned long addr) | |||
525 | preempt_disable(); | 537 | preempt_disable(); |
526 | 538 | ||
527 | list_for_each_entry_rcu(mod, &modules, list) { | 539 | list_for_each_entry_rcu(mod, &modules, list) { |
540 | if (mod->state == MODULE_STATE_UNFORMED) | ||
541 | continue; | ||
528 | if (!mod->percpu_size) | 542 | if (!mod->percpu_size) |
529 | continue; | 543 | continue; |
530 | for_each_possible_cpu(cpu) { | 544 | for_each_possible_cpu(cpu) { |
@@ -1048,6 +1062,8 @@ static ssize_t show_initstate(struct module_attribute *mattr, | |||
1048 | case MODULE_STATE_GOING: | 1062 | case MODULE_STATE_GOING: |
1049 | state = "going"; | 1063 | state = "going"; |
1050 | break; | 1064 | break; |
1065 | default: | ||
1066 | BUG(); | ||
1051 | } | 1067 | } |
1052 | return sprintf(buffer, "%s\n", state); | 1068 | return sprintf(buffer, "%s\n", state); |
1053 | } | 1069 | } |
@@ -1786,6 +1802,8 @@ void set_all_modules_text_rw(void) | |||
1786 | 1802 | ||
1787 | mutex_lock(&module_mutex); | 1803 | mutex_lock(&module_mutex); |
1788 | list_for_each_entry_rcu(mod, &modules, list) { | 1804 | list_for_each_entry_rcu(mod, &modules, list) { |
1805 | if (mod->state == MODULE_STATE_UNFORMED) | ||
1806 | continue; | ||
1789 | if ((mod->module_core) && (mod->core_text_size)) { | 1807 | if ((mod->module_core) && (mod->core_text_size)) { |
1790 | set_page_attributes(mod->module_core, | 1808 | set_page_attributes(mod->module_core, |
1791 | mod->module_core + mod->core_text_size, | 1809 | mod->module_core + mod->core_text_size, |
@@ -1807,6 +1825,8 @@ void set_all_modules_text_ro(void) | |||
1807 | 1825 | ||
1808 | mutex_lock(&module_mutex); | 1826 | mutex_lock(&module_mutex); |
1809 | list_for_each_entry_rcu(mod, &modules, list) { | 1827 | list_for_each_entry_rcu(mod, &modules, list) { |
1828 | if (mod->state == MODULE_STATE_UNFORMED) | ||
1829 | continue; | ||
1810 | if ((mod->module_core) && (mod->core_text_size)) { | 1830 | if ((mod->module_core) && (mod->core_text_size)) { |
1811 | set_page_attributes(mod->module_core, | 1831 | set_page_attributes(mod->module_core, |
1812 | mod->module_core + mod->core_text_size, | 1832 | mod->module_core + mod->core_text_size, |
@@ -2527,6 +2547,13 @@ static int copy_module_from_fd(int fd, struct load_info *info) | |||
2527 | err = -EFBIG; | 2547 | err = -EFBIG; |
2528 | goto out; | 2548 | goto out; |
2529 | } | 2549 | } |
2550 | |||
2551 | /* Don't hand 0 to vmalloc, it whines. */ | ||
2552 | if (stat.size == 0) { | ||
2553 | err = -EINVAL; | ||
2554 | goto out; | ||
2555 | } | ||
2556 | |||
2530 | info->hdr = vmalloc(stat.size); | 2557 | info->hdr = vmalloc(stat.size); |
2531 | if (!info->hdr) { | 2558 | if (!info->hdr) { |
2532 | err = -ENOMEM; | 2559 | err = -ENOMEM; |
@@ -2990,8 +3017,9 @@ static bool finished_loading(const char *name) | |||
2990 | bool ret; | 3017 | bool ret; |
2991 | 3018 | ||
2992 | mutex_lock(&module_mutex); | 3019 | mutex_lock(&module_mutex); |
2993 | mod = find_module(name); | 3020 | mod = find_module_all(name, true); |
2994 | ret = !mod || mod->state != MODULE_STATE_COMING; | 3021 | ret = !mod || mod->state == MODULE_STATE_LIVE |
3022 | || mod->state == MODULE_STATE_GOING; | ||
2995 | mutex_unlock(&module_mutex); | 3023 | mutex_unlock(&module_mutex); |
2996 | 3024 | ||
2997 | return ret; | 3025 | return ret; |
@@ -3136,6 +3164,32 @@ static int load_module(struct load_info *info, const char __user *uargs, | |||
3136 | goto free_copy; | 3164 | goto free_copy; |
3137 | } | 3165 | } |
3138 | 3166 | ||
3167 | /* | ||
3168 | * We try to place it in the list now to make sure it's unique | ||
3169 | * before we dedicate too many resources. In particular, | ||
3170 | * temporary percpu memory exhaustion. | ||
3171 | */ | ||
3172 | mod->state = MODULE_STATE_UNFORMED; | ||
3173 | again: | ||
3174 | mutex_lock(&module_mutex); | ||
3175 | if ((old = find_module_all(mod->name, true)) != NULL) { | ||
3176 | if (old->state == MODULE_STATE_COMING | ||
3177 | || old->state == MODULE_STATE_UNFORMED) { | ||
3178 | /* Wait in case it fails to load. */ | ||
3179 | mutex_unlock(&module_mutex); | ||
3180 | err = wait_event_interruptible(module_wq, | ||
3181 | finished_loading(mod->name)); | ||
3182 | if (err) | ||
3183 | goto free_module; | ||
3184 | goto again; | ||
3185 | } | ||
3186 | err = -EEXIST; | ||
3187 | mutex_unlock(&module_mutex); | ||
3188 | goto free_module; | ||
3189 | } | ||
3190 | list_add_rcu(&mod->list, &modules); | ||
3191 | mutex_unlock(&module_mutex); | ||
3192 | |||
3139 | #ifdef CONFIG_MODULE_SIG | 3193 | #ifdef CONFIG_MODULE_SIG |
3140 | mod->sig_ok = info->sig_ok; | 3194 | mod->sig_ok = info->sig_ok; |
3141 | if (!mod->sig_ok) | 3195 | if (!mod->sig_ok) |
@@ -3145,7 +3199,7 @@ static int load_module(struct load_info *info, const char __user *uargs, | |||
3145 | /* Now module is in final location, initialize linked lists, etc. */ | 3199 | /* Now module is in final location, initialize linked lists, etc. */ |
3146 | err = module_unload_init(mod); | 3200 | err = module_unload_init(mod); |
3147 | if (err) | 3201 | if (err) |
3148 | goto free_module; | 3202 | goto unlink_mod; |
3149 | 3203 | ||
3150 | /* Now we've got everything in the final locations, we can | 3204 | /* Now we've got everything in the final locations, we can |
3151 | * find optional sections. */ | 3205 | * find optional sections. */ |
@@ -3180,54 +3234,33 @@ static int load_module(struct load_info *info, const char __user *uargs, | |||
3180 | goto free_arch_cleanup; | 3234 | goto free_arch_cleanup; |
3181 | } | 3235 | } |
3182 | 3236 | ||
3183 | /* Mark state as coming so strong_try_module_get() ignores us. */ | ||
3184 | mod->state = MODULE_STATE_COMING; | ||
3185 | |||
3186 | /* Now sew it into the lists so we can get lockdep and oops | ||
3187 | * info during argument parsing. No one should access us, since | ||
3188 | * strong_try_module_get() will fail. | ||
3189 | * lockdep/oops can run asynchronous, so use the RCU list insertion | ||
3190 | * function to insert in a way safe to concurrent readers. | ||
3191 | * The mutex protects against concurrent writers. | ||
3192 | */ | ||
3193 | again: | ||
3194 | mutex_lock(&module_mutex); | ||
3195 | if ((old = find_module(mod->name)) != NULL) { | ||
3196 | if (old->state == MODULE_STATE_COMING) { | ||
3197 | /* Wait in case it fails to load. */ | ||
3198 | mutex_unlock(&module_mutex); | ||
3199 | err = wait_event_interruptible(module_wq, | ||
3200 | finished_loading(mod->name)); | ||
3201 | if (err) | ||
3202 | goto free_arch_cleanup; | ||
3203 | goto again; | ||
3204 | } | ||
3205 | err = -EEXIST; | ||
3206 | goto unlock; | ||
3207 | } | ||
3208 | |||
3209 | /* This has to be done once we're sure module name is unique. */ | ||
3210 | dynamic_debug_setup(info->debug, info->num_debug); | 3237 | dynamic_debug_setup(info->debug, info->num_debug); |
3211 | 3238 | ||
3212 | /* Find duplicate symbols */ | 3239 | mutex_lock(&module_mutex); |
3240 | /* Find duplicate symbols (must be called under lock). */ | ||
3213 | err = verify_export_symbols(mod); | 3241 | err = verify_export_symbols(mod); |
3214 | if (err < 0) | 3242 | if (err < 0) |
3215 | goto ddebug; | 3243 | goto ddebug_cleanup; |
3216 | 3244 | ||
3245 | /* This relies on module_mutex for list integrity. */ | ||
3217 | module_bug_finalize(info->hdr, info->sechdrs, mod); | 3246 | module_bug_finalize(info->hdr, info->sechdrs, mod); |
3218 | list_add_rcu(&mod->list, &modules); | 3247 | |
3248 | /* Mark state as coming so strong_try_module_get() ignores us, | ||
3249 | * but kallsyms etc. can see us. */ | ||
3250 | mod->state = MODULE_STATE_COMING; | ||
3251 | |||
3219 | mutex_unlock(&module_mutex); | 3252 | mutex_unlock(&module_mutex); |
3220 | 3253 | ||
3221 | /* Module is ready to execute: parsing args may do that. */ | 3254 | /* Module is ready to execute: parsing args may do that. */ |
3222 | err = parse_args(mod->name, mod->args, mod->kp, mod->num_kp, | 3255 | err = parse_args(mod->name, mod->args, mod->kp, mod->num_kp, |
3223 | -32768, 32767, &ddebug_dyndbg_module_param_cb); | 3256 | -32768, 32767, &ddebug_dyndbg_module_param_cb); |
3224 | if (err < 0) | 3257 | if (err < 0) |
3225 | goto unlink; | 3258 | goto bug_cleanup; |
3226 | 3259 | ||
3227 | /* Link in to syfs. */ | 3260 | /* Link in to syfs. */ |
3228 | err = mod_sysfs_setup(mod, info, mod->kp, mod->num_kp); | 3261 | err = mod_sysfs_setup(mod, info, mod->kp, mod->num_kp); |
3229 | if (err < 0) | 3262 | if (err < 0) |
3230 | goto unlink; | 3263 | goto bug_cleanup; |
3231 | 3264 | ||
3232 | /* Get rid of temporary copy. */ | 3265 | /* Get rid of temporary copy. */ |
3233 | free_copy(info); | 3266 | free_copy(info); |
@@ -3237,16 +3270,13 @@ again: | |||
3237 | 3270 | ||
3238 | return do_init_module(mod); | 3271 | return do_init_module(mod); |
3239 | 3272 | ||
3240 | unlink: | 3273 | bug_cleanup: |
3274 | /* module_bug_cleanup needs module_mutex protection */ | ||
3241 | mutex_lock(&module_mutex); | 3275 | mutex_lock(&module_mutex); |
3242 | /* Unlink carefully: kallsyms could be walking list. */ | ||
3243 | list_del_rcu(&mod->list); | ||
3244 | module_bug_cleanup(mod); | 3276 | module_bug_cleanup(mod); |
3245 | wake_up_all(&module_wq); | 3277 | ddebug_cleanup: |
3246 | ddebug: | ||
3247 | dynamic_debug_remove(info->debug); | ||
3248 | unlock: | ||
3249 | mutex_unlock(&module_mutex); | 3278 | mutex_unlock(&module_mutex); |
3279 | dynamic_debug_remove(info->debug); | ||
3250 | synchronize_sched(); | 3280 | synchronize_sched(); |
3251 | kfree(mod->args); | 3281 | kfree(mod->args); |
3252 | free_arch_cleanup: | 3282 | free_arch_cleanup: |
@@ -3255,6 +3285,12 @@ again: | |||
3255 | free_modinfo(mod); | 3285 | free_modinfo(mod); |
3256 | free_unload: | 3286 | free_unload: |
3257 | module_unload_free(mod); | 3287 | module_unload_free(mod); |
3288 | unlink_mod: | ||
3289 | mutex_lock(&module_mutex); | ||
3290 | /* Unlink carefully: kallsyms could be walking list. */ | ||
3291 | list_del_rcu(&mod->list); | ||
3292 | wake_up_all(&module_wq); | ||
3293 | mutex_unlock(&module_mutex); | ||
3258 | free_module: | 3294 | free_module: |
3259 | module_deallocate(mod, info); | 3295 | module_deallocate(mod, info); |
3260 | free_copy: | 3296 | free_copy: |
@@ -3377,6 +3413,8 @@ const char *module_address_lookup(unsigned long addr, | |||
3377 | 3413 | ||
3378 | preempt_disable(); | 3414 | preempt_disable(); |
3379 | list_for_each_entry_rcu(mod, &modules, list) { | 3415 | list_for_each_entry_rcu(mod, &modules, list) { |
3416 | if (mod->state == MODULE_STATE_UNFORMED) | ||
3417 | continue; | ||
3380 | if (within_module_init(addr, mod) || | 3418 | if (within_module_init(addr, mod) || |
3381 | within_module_core(addr, mod)) { | 3419 | within_module_core(addr, mod)) { |
3382 | if (modname) | 3420 | if (modname) |
@@ -3400,6 +3438,8 @@ int lookup_module_symbol_name(unsigned long addr, char *symname) | |||
3400 | 3438 | ||
3401 | preempt_disable(); | 3439 | preempt_disable(); |
3402 | list_for_each_entry_rcu(mod, &modules, list) { | 3440 | list_for_each_entry_rcu(mod, &modules, list) { |
3441 | if (mod->state == MODULE_STATE_UNFORMED) | ||
3442 | continue; | ||
3403 | if (within_module_init(addr, mod) || | 3443 | if (within_module_init(addr, mod) || |
3404 | within_module_core(addr, mod)) { | 3444 | within_module_core(addr, mod)) { |
3405 | const char *sym; | 3445 | const char *sym; |
@@ -3424,6 +3464,8 @@ int lookup_module_symbol_attrs(unsigned long addr, unsigned long *size, | |||
3424 | 3464 | ||
3425 | preempt_disable(); | 3465 | preempt_disable(); |
3426 | list_for_each_entry_rcu(mod, &modules, list) { | 3466 | list_for_each_entry_rcu(mod, &modules, list) { |
3467 | if (mod->state == MODULE_STATE_UNFORMED) | ||
3468 | continue; | ||
3427 | if (within_module_init(addr, mod) || | 3469 | if (within_module_init(addr, mod) || |
3428 | within_module_core(addr, mod)) { | 3470 | within_module_core(addr, mod)) { |
3429 | const char *sym; | 3471 | const char *sym; |
@@ -3451,6 +3493,8 @@ int module_get_kallsym(unsigned int symnum, unsigned long *value, char *type, | |||
3451 | 3493 | ||
3452 | preempt_disable(); | 3494 | preempt_disable(); |
3453 | list_for_each_entry_rcu(mod, &modules, list) { | 3495 | list_for_each_entry_rcu(mod, &modules, list) { |
3496 | if (mod->state == MODULE_STATE_UNFORMED) | ||
3497 | continue; | ||
3454 | if (symnum < mod->num_symtab) { | 3498 | if (symnum < mod->num_symtab) { |
3455 | *value = mod->symtab[symnum].st_value; | 3499 | *value = mod->symtab[symnum].st_value; |
3456 | *type = mod->symtab[symnum].st_info; | 3500 | *type = mod->symtab[symnum].st_info; |
@@ -3493,9 +3537,12 @@ unsigned long module_kallsyms_lookup_name(const char *name) | |||
3493 | ret = mod_find_symname(mod, colon+1); | 3537 | ret = mod_find_symname(mod, colon+1); |
3494 | *colon = ':'; | 3538 | *colon = ':'; |
3495 | } else { | 3539 | } else { |
3496 | list_for_each_entry_rcu(mod, &modules, list) | 3540 | list_for_each_entry_rcu(mod, &modules, list) { |
3541 | if (mod->state == MODULE_STATE_UNFORMED) | ||
3542 | continue; | ||
3497 | if ((ret = mod_find_symname(mod, name)) != 0) | 3543 | if ((ret = mod_find_symname(mod, name)) != 0) |
3498 | break; | 3544 | break; |
3545 | } | ||
3499 | } | 3546 | } |
3500 | preempt_enable(); | 3547 | preempt_enable(); |
3501 | return ret; | 3548 | return ret; |
@@ -3510,6 +3557,8 @@ int module_kallsyms_on_each_symbol(int (*fn)(void *, const char *, | |||
3510 | int ret; | 3557 | int ret; |
3511 | 3558 | ||
3512 | list_for_each_entry(mod, &modules, list) { | 3559 | list_for_each_entry(mod, &modules, list) { |
3560 | if (mod->state == MODULE_STATE_UNFORMED) | ||
3561 | continue; | ||
3513 | for (i = 0; i < mod->num_symtab; i++) { | 3562 | for (i = 0; i < mod->num_symtab; i++) { |
3514 | ret = fn(data, mod->strtab + mod->symtab[i].st_name, | 3563 | ret = fn(data, mod->strtab + mod->symtab[i].st_name, |
3515 | mod, mod->symtab[i].st_value); | 3564 | mod, mod->symtab[i].st_value); |
@@ -3525,6 +3574,7 @@ static char *module_flags(struct module *mod, char *buf) | |||
3525 | { | 3574 | { |
3526 | int bx = 0; | 3575 | int bx = 0; |
3527 | 3576 | ||
3577 | BUG_ON(mod->state == MODULE_STATE_UNFORMED); | ||
3528 | if (mod->taints || | 3578 | if (mod->taints || |
3529 | mod->state == MODULE_STATE_GOING || | 3579 | mod->state == MODULE_STATE_GOING || |
3530 | mod->state == MODULE_STATE_COMING) { | 3580 | mod->state == MODULE_STATE_COMING) { |
@@ -3566,6 +3616,10 @@ static int m_show(struct seq_file *m, void *p) | |||
3566 | struct module *mod = list_entry(p, struct module, list); | 3616 | struct module *mod = list_entry(p, struct module, list); |
3567 | char buf[8]; | 3617 | char buf[8]; |
3568 | 3618 | ||
3619 | /* We always ignore unformed modules. */ | ||
3620 | if (mod->state == MODULE_STATE_UNFORMED) | ||
3621 | return 0; | ||
3622 | |||
3569 | seq_printf(m, "%s %u", | 3623 | seq_printf(m, "%s %u", |
3570 | mod->name, mod->init_size + mod->core_size); | 3624 | mod->name, mod->init_size + mod->core_size); |
3571 | print_unload_info(m, mod); | 3625 | print_unload_info(m, mod); |
@@ -3626,6 +3680,8 @@ const struct exception_table_entry *search_module_extables(unsigned long addr) | |||
3626 | 3680 | ||
3627 | preempt_disable(); | 3681 | preempt_disable(); |
3628 | list_for_each_entry_rcu(mod, &modules, list) { | 3682 | list_for_each_entry_rcu(mod, &modules, list) { |
3683 | if (mod->state == MODULE_STATE_UNFORMED) | ||
3684 | continue; | ||
3629 | if (mod->num_exentries == 0) | 3685 | if (mod->num_exentries == 0) |
3630 | continue; | 3686 | continue; |
3631 | 3687 | ||
@@ -3674,10 +3730,13 @@ struct module *__module_address(unsigned long addr) | |||
3674 | if (addr < module_addr_min || addr > module_addr_max) | 3730 | if (addr < module_addr_min || addr > module_addr_max) |
3675 | return NULL; | 3731 | return NULL; |
3676 | 3732 | ||
3677 | list_for_each_entry_rcu(mod, &modules, list) | 3733 | list_for_each_entry_rcu(mod, &modules, list) { |
3734 | if (mod->state == MODULE_STATE_UNFORMED) | ||
3735 | continue; | ||
3678 | if (within_module_core(addr, mod) | 3736 | if (within_module_core(addr, mod) |
3679 | || within_module_init(addr, mod)) | 3737 | || within_module_init(addr, mod)) |
3680 | return mod; | 3738 | return mod; |
3739 | } | ||
3681 | return NULL; | 3740 | return NULL; |
3682 | } | 3741 | } |
3683 | EXPORT_SYMBOL_GPL(__module_address); | 3742 | EXPORT_SYMBOL_GPL(__module_address); |
@@ -3730,8 +3789,11 @@ void print_modules(void) | |||
3730 | printk(KERN_DEFAULT "Modules linked in:"); | 3789 | printk(KERN_DEFAULT "Modules linked in:"); |
3731 | /* Most callers should already have preempt disabled, but make sure */ | 3790 | /* Most callers should already have preempt disabled, but make sure */ |
3732 | preempt_disable(); | 3791 | preempt_disable(); |
3733 | list_for_each_entry_rcu(mod, &modules, list) | 3792 | list_for_each_entry_rcu(mod, &modules, list) { |
3793 | if (mod->state == MODULE_STATE_UNFORMED) | ||
3794 | continue; | ||
3734 | printk(" %s%s", mod->name, module_flags(mod, buf)); | 3795 | printk(" %s%s", mod->name, module_flags(mod, buf)); |
3796 | } | ||
3735 | preempt_enable(); | 3797 | preempt_enable(); |
3736 | if (last_unloaded_module[0]) | 3798 | if (last_unloaded_module[0]) |
3737 | printk(" [last unloaded: %s]", last_unloaded_module); | 3799 | printk(" [last unloaded: %s]", last_unloaded_module); |
diff --git a/kernel/ptrace.c b/kernel/ptrace.c index 1599157336a6..6cbeaae4406d 100644 --- a/kernel/ptrace.c +++ b/kernel/ptrace.c | |||
@@ -117,11 +117,45 @@ void __ptrace_unlink(struct task_struct *child) | |||
117 | * TASK_KILLABLE sleeps. | 117 | * TASK_KILLABLE sleeps. |
118 | */ | 118 | */ |
119 | if (child->jobctl & JOBCTL_STOP_PENDING || task_is_traced(child)) | 119 | if (child->jobctl & JOBCTL_STOP_PENDING || task_is_traced(child)) |
120 | signal_wake_up(child, task_is_traced(child)); | 120 | ptrace_signal_wake_up(child, true); |
121 | 121 | ||
122 | spin_unlock(&child->sighand->siglock); | 122 | spin_unlock(&child->sighand->siglock); |
123 | } | 123 | } |
124 | 124 | ||
125 | /* Ensure that nothing can wake it up, even SIGKILL */ | ||
126 | static bool ptrace_freeze_traced(struct task_struct *task) | ||
127 | { | ||
128 | bool ret = false; | ||
129 | |||
130 | /* Lockless, nobody but us can set this flag */ | ||
131 | if (task->jobctl & JOBCTL_LISTENING) | ||
132 | return ret; | ||
133 | |||
134 | spin_lock_irq(&task->sighand->siglock); | ||
135 | if (task_is_traced(task) && !__fatal_signal_pending(task)) { | ||
136 | task->state = __TASK_TRACED; | ||
137 | ret = true; | ||
138 | } | ||
139 | spin_unlock_irq(&task->sighand->siglock); | ||
140 | |||
141 | return ret; | ||
142 | } | ||
143 | |||
144 | static void ptrace_unfreeze_traced(struct task_struct *task) | ||
145 | { | ||
146 | if (task->state != __TASK_TRACED) | ||
147 | return; | ||
148 | |||
149 | WARN_ON(!task->ptrace || task->parent != current); | ||
150 | |||
151 | spin_lock_irq(&task->sighand->siglock); | ||
152 | if (__fatal_signal_pending(task)) | ||
153 | wake_up_state(task, __TASK_TRACED); | ||
154 | else | ||
155 | task->state = TASK_TRACED; | ||
156 | spin_unlock_irq(&task->sighand->siglock); | ||
157 | } | ||
158 | |||
125 | /** | 159 | /** |
126 | * ptrace_check_attach - check whether ptracee is ready for ptrace operation | 160 | * ptrace_check_attach - check whether ptracee is ready for ptrace operation |
127 | * @child: ptracee to check for | 161 | * @child: ptracee to check for |
@@ -139,7 +173,7 @@ void __ptrace_unlink(struct task_struct *child) | |||
139 | * RETURNS: | 173 | * RETURNS: |
140 | * 0 on success, -ESRCH if %child is not ready. | 174 | * 0 on success, -ESRCH if %child is not ready. |
141 | */ | 175 | */ |
142 | int ptrace_check_attach(struct task_struct *child, bool ignore_state) | 176 | static int ptrace_check_attach(struct task_struct *child, bool ignore_state) |
143 | { | 177 | { |
144 | int ret = -ESRCH; | 178 | int ret = -ESRCH; |
145 | 179 | ||
@@ -151,24 +185,29 @@ int ptrace_check_attach(struct task_struct *child, bool ignore_state) | |||
151 | * be changed by us so it's not changing right after this. | 185 | * be changed by us so it's not changing right after this. |
152 | */ | 186 | */ |
153 | read_lock(&tasklist_lock); | 187 | read_lock(&tasklist_lock); |
154 | if ((child->ptrace & PT_PTRACED) && child->parent == current) { | 188 | if (child->ptrace && child->parent == current) { |
189 | WARN_ON(child->state == __TASK_TRACED); | ||
155 | /* | 190 | /* |
156 | * child->sighand can't be NULL, release_task() | 191 | * child->sighand can't be NULL, release_task() |
157 | * does ptrace_unlink() before __exit_signal(). | 192 | * does ptrace_unlink() before __exit_signal(). |
158 | */ | 193 | */ |
159 | spin_lock_irq(&child->sighand->siglock); | 194 | if (ignore_state || ptrace_freeze_traced(child)) |
160 | WARN_ON_ONCE(task_is_stopped(child)); | ||
161 | if (ignore_state || (task_is_traced(child) && | ||
162 | !(child->jobctl & JOBCTL_LISTENING))) | ||
163 | ret = 0; | 195 | ret = 0; |
164 | spin_unlock_irq(&child->sighand->siglock); | ||
165 | } | 196 | } |
166 | read_unlock(&tasklist_lock); | 197 | read_unlock(&tasklist_lock); |
167 | 198 | ||
168 | if (!ret && !ignore_state) | 199 | if (!ret && !ignore_state) { |
169 | ret = wait_task_inactive(child, TASK_TRACED) ? 0 : -ESRCH; | 200 | if (!wait_task_inactive(child, __TASK_TRACED)) { |
201 | /* | ||
202 | * This can only happen if may_ptrace_stop() fails and | ||
203 | * ptrace_stop() changes ->state back to TASK_RUNNING, | ||
204 | * so we should not worry about leaking __TASK_TRACED. | ||
205 | */ | ||
206 | WARN_ON(child->state == __TASK_TRACED); | ||
207 | ret = -ESRCH; | ||
208 | } | ||
209 | } | ||
170 | 210 | ||
171 | /* All systems go.. */ | ||
172 | return ret; | 211 | return ret; |
173 | } | 212 | } |
174 | 213 | ||
@@ -317,7 +356,7 @@ static int ptrace_attach(struct task_struct *task, long request, | |||
317 | */ | 356 | */ |
318 | if (task_is_stopped(task) && | 357 | if (task_is_stopped(task) && |
319 | task_set_jobctl_pending(task, JOBCTL_TRAP_STOP | JOBCTL_TRAPPING)) | 358 | task_set_jobctl_pending(task, JOBCTL_TRAP_STOP | JOBCTL_TRAPPING)) |
320 | signal_wake_up(task, 1); | 359 | signal_wake_up_state(task, __TASK_STOPPED); |
321 | 360 | ||
322 | spin_unlock(&task->sighand->siglock); | 361 | spin_unlock(&task->sighand->siglock); |
323 | 362 | ||
@@ -737,7 +776,7 @@ int ptrace_request(struct task_struct *child, long request, | |||
737 | * tracee into STOP. | 776 | * tracee into STOP. |
738 | */ | 777 | */ |
739 | if (likely(task_set_jobctl_pending(child, JOBCTL_TRAP_STOP))) | 778 | if (likely(task_set_jobctl_pending(child, JOBCTL_TRAP_STOP))) |
740 | signal_wake_up(child, child->jobctl & JOBCTL_LISTENING); | 779 | ptrace_signal_wake_up(child, child->jobctl & JOBCTL_LISTENING); |
741 | 780 | ||
742 | unlock_task_sighand(child, &flags); | 781 | unlock_task_sighand(child, &flags); |
743 | ret = 0; | 782 | ret = 0; |
@@ -763,7 +802,7 @@ int ptrace_request(struct task_struct *child, long request, | |||
763 | * start of this trap and now. Trigger re-trap. | 802 | * start of this trap and now. Trigger re-trap. |
764 | */ | 803 | */ |
765 | if (child->jobctl & JOBCTL_TRAP_NOTIFY) | 804 | if (child->jobctl & JOBCTL_TRAP_NOTIFY) |
766 | signal_wake_up(child, true); | 805 | ptrace_signal_wake_up(child, true); |
767 | ret = 0; | 806 | ret = 0; |
768 | } | 807 | } |
769 | unlock_task_sighand(child, &flags); | 808 | unlock_task_sighand(child, &flags); |
@@ -900,6 +939,8 @@ SYSCALL_DEFINE4(ptrace, long, request, long, pid, unsigned long, addr, | |||
900 | goto out_put_task_struct; | 939 | goto out_put_task_struct; |
901 | 940 | ||
902 | ret = arch_ptrace(child, request, addr, data); | 941 | ret = arch_ptrace(child, request, addr, data); |
942 | if (ret || request != PTRACE_DETACH) | ||
943 | ptrace_unfreeze_traced(child); | ||
903 | 944 | ||
904 | out_put_task_struct: | 945 | out_put_task_struct: |
905 | put_task_struct(child); | 946 | put_task_struct(child); |
@@ -1039,8 +1080,11 @@ asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid, | |||
1039 | 1080 | ||
1040 | ret = ptrace_check_attach(child, request == PTRACE_KILL || | 1081 | ret = ptrace_check_attach(child, request == PTRACE_KILL || |
1041 | request == PTRACE_INTERRUPT); | 1082 | request == PTRACE_INTERRUPT); |
1042 | if (!ret) | 1083 | if (!ret) { |
1043 | ret = compat_arch_ptrace(child, request, addr, data); | 1084 | ret = compat_arch_ptrace(child, request, addr, data); |
1085 | if (ret || request != PTRACE_DETACH) | ||
1086 | ptrace_unfreeze_traced(child); | ||
1087 | } | ||
1044 | 1088 | ||
1045 | out_put_task_struct: | 1089 | out_put_task_struct: |
1046 | put_task_struct(child); | 1090 | put_task_struct(child); |
diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 257002c13bb0..26058d0bebba 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c | |||
@@ -1523,7 +1523,8 @@ out: | |||
1523 | */ | 1523 | */ |
1524 | int wake_up_process(struct task_struct *p) | 1524 | int wake_up_process(struct task_struct *p) |
1525 | { | 1525 | { |
1526 | return try_to_wake_up(p, TASK_ALL, 0); | 1526 | WARN_ON(task_is_stopped_or_traced(p)); |
1527 | return try_to_wake_up(p, TASK_NORMAL, 0); | ||
1527 | } | 1528 | } |
1528 | EXPORT_SYMBOL(wake_up_process); | 1529 | EXPORT_SYMBOL(wake_up_process); |
1529 | 1530 | ||
diff --git a/kernel/signal.c b/kernel/signal.c index 372771e948c2..3d09cf6cde75 100644 --- a/kernel/signal.c +++ b/kernel/signal.c | |||
@@ -680,23 +680,17 @@ int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info) | |||
680 | * No need to set need_resched since signal event passing | 680 | * No need to set need_resched since signal event passing |
681 | * goes through ->blocked | 681 | * goes through ->blocked |
682 | */ | 682 | */ |
683 | void signal_wake_up(struct task_struct *t, int resume) | 683 | void signal_wake_up_state(struct task_struct *t, unsigned int state) |
684 | { | 684 | { |
685 | unsigned int mask; | ||
686 | |||
687 | set_tsk_thread_flag(t, TIF_SIGPENDING); | 685 | set_tsk_thread_flag(t, TIF_SIGPENDING); |
688 | |||
689 | /* | 686 | /* |
690 | * For SIGKILL, we want to wake it up in the stopped/traced/killable | 687 | * TASK_WAKEKILL also means wake it up in the stopped/traced/killable |
691 | * case. We don't check t->state here because there is a race with it | 688 | * case. We don't check t->state here because there is a race with it |
692 | * executing another processor and just now entering stopped state. | 689 | * executing another processor and just now entering stopped state. |
693 | * By using wake_up_state, we ensure the process will wake up and | 690 | * By using wake_up_state, we ensure the process will wake up and |
694 | * handle its death signal. | 691 | * handle its death signal. |
695 | */ | 692 | */ |
696 | mask = TASK_INTERRUPTIBLE; | 693 | if (!wake_up_state(t, state | TASK_INTERRUPTIBLE)) |
697 | if (resume) | ||
698 | mask |= TASK_WAKEKILL; | ||
699 | if (!wake_up_state(t, mask)) | ||
700 | kick_process(t); | 694 | kick_process(t); |
701 | } | 695 | } |
702 | 696 | ||
@@ -844,7 +838,7 @@ static void ptrace_trap_notify(struct task_struct *t) | |||
844 | assert_spin_locked(&t->sighand->siglock); | 838 | assert_spin_locked(&t->sighand->siglock); |
845 | 839 | ||
846 | task_set_jobctl_pending(t, JOBCTL_TRAP_NOTIFY); | 840 | task_set_jobctl_pending(t, JOBCTL_TRAP_NOTIFY); |
847 | signal_wake_up(t, t->jobctl & JOBCTL_LISTENING); | 841 | ptrace_signal_wake_up(t, t->jobctl & JOBCTL_LISTENING); |
848 | } | 842 | } |
849 | 843 | ||
850 | /* | 844 | /* |
@@ -1800,6 +1794,10 @@ static inline int may_ptrace_stop(void) | |||
1800 | * If SIGKILL was already sent before the caller unlocked | 1794 | * If SIGKILL was already sent before the caller unlocked |
1801 | * ->siglock we must see ->core_state != NULL. Otherwise it | 1795 | * ->siglock we must see ->core_state != NULL. Otherwise it |
1802 | * is safe to enter schedule(). | 1796 | * is safe to enter schedule(). |
1797 | * | ||
1798 | * This is almost outdated, a task with the pending SIGKILL can't | ||
1799 | * block in TASK_TRACED. But PTRACE_EVENT_EXIT can be reported | ||
1800 | * after SIGKILL was already dequeued. | ||
1803 | */ | 1801 | */ |
1804 | if (unlikely(current->mm->core_state) && | 1802 | if (unlikely(current->mm->core_state) && |
1805 | unlikely(current->mm == current->parent->mm)) | 1803 | unlikely(current->mm == current->parent->mm)) |
@@ -1925,6 +1923,7 @@ static void ptrace_stop(int exit_code, int why, int clear_code, siginfo_t *info) | |||
1925 | if (gstop_done) | 1923 | if (gstop_done) |
1926 | do_notify_parent_cldstop(current, false, why); | 1924 | do_notify_parent_cldstop(current, false, why); |
1927 | 1925 | ||
1926 | /* tasklist protects us from ptrace_freeze_traced() */ | ||
1928 | __set_current_state(TASK_RUNNING); | 1927 | __set_current_state(TASK_RUNNING); |
1929 | if (clear_code) | 1928 | if (clear_code) |
1930 | current->exit_code = 0; | 1929 | current->exit_code = 0; |
@@ -3116,8 +3115,9 @@ int __save_altstack(stack_t __user *uss, unsigned long sp) | |||
3116 | 3115 | ||
3117 | #ifdef CONFIG_COMPAT | 3116 | #ifdef CONFIG_COMPAT |
3118 | #ifdef CONFIG_GENERIC_SIGALTSTACK | 3117 | #ifdef CONFIG_GENERIC_SIGALTSTACK |
3119 | asmlinkage long compat_sys_sigaltstack(const compat_stack_t __user *uss_ptr, | 3118 | COMPAT_SYSCALL_DEFINE2(sigaltstack, |
3120 | compat_stack_t __user *uoss_ptr) | 3119 | const compat_stack_t __user *, uss_ptr, |
3120 | compat_stack_t __user *, uoss_ptr) | ||
3121 | { | 3121 | { |
3122 | stack_t uss, uoss; | 3122 | stack_t uss, uoss; |
3123 | int ret; | 3123 | int ret; |
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index 3ffe4c5ad3f3..41473b4ad7a4 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c | |||
@@ -3998,7 +3998,7 @@ static int ftrace_module_notify(struct notifier_block *self, | |||
3998 | 3998 | ||
3999 | struct notifier_block ftrace_module_nb = { | 3999 | struct notifier_block ftrace_module_nb = { |
4000 | .notifier_call = ftrace_module_notify, | 4000 | .notifier_call = ftrace_module_notify, |
4001 | .priority = 0, | 4001 | .priority = INT_MAX, /* Run before anything that can use kprobes */ |
4002 | }; | 4002 | }; |
4003 | 4003 | ||
4004 | extern unsigned long __start_mcount_loc[]; | 4004 | extern unsigned long __start_mcount_loc[]; |
@@ -55,6 +55,7 @@ static inline unsigned long bug_addr(const struct bug_entry *bug) | |||
55 | } | 55 | } |
56 | 56 | ||
57 | #ifdef CONFIG_MODULES | 57 | #ifdef CONFIG_MODULES |
58 | /* Updates are protected by module mutex */ | ||
58 | static LIST_HEAD(module_bug_list); | 59 | static LIST_HEAD(module_bug_list); |
59 | 60 | ||
60 | static const struct bug_entry *module_find_bug(unsigned long bugaddr) | 61 | static const struct bug_entry *module_find_bug(unsigned long bugaddr) |
diff --git a/security/device_cgroup.c b/security/device_cgroup.c index 19ecc8de9e6b..d794abcc4b3b 100644 --- a/security/device_cgroup.c +++ b/security/device_cgroup.c | |||
@@ -215,7 +215,9 @@ static void devcgroup_css_free(struct cgroup *cgroup) | |||
215 | struct dev_cgroup *dev_cgroup; | 215 | struct dev_cgroup *dev_cgroup; |
216 | 216 | ||
217 | dev_cgroup = cgroup_to_devcgroup(cgroup); | 217 | dev_cgroup = cgroup_to_devcgroup(cgroup); |
218 | mutex_lock(&devcgroup_mutex); | ||
218 | dev_exception_clean(dev_cgroup); | 219 | dev_exception_clean(dev_cgroup); |
220 | mutex_unlock(&devcgroup_mutex); | ||
219 | kfree(dev_cgroup); | 221 | kfree(dev_cgroup); |
220 | } | 222 | } |
221 | 223 | ||
diff --git a/security/integrity/evm/evm_crypto.c b/security/integrity/evm/evm_crypto.c index dfb26918699c..7dd538ef5b83 100644 --- a/security/integrity/evm/evm_crypto.c +++ b/security/integrity/evm/evm_crypto.c | |||
@@ -205,9 +205,9 @@ int evm_update_evmxattr(struct dentry *dentry, const char *xattr_name, | |||
205 | rc = __vfs_setxattr_noperm(dentry, XATTR_NAME_EVM, | 205 | rc = __vfs_setxattr_noperm(dentry, XATTR_NAME_EVM, |
206 | &xattr_data, | 206 | &xattr_data, |
207 | sizeof(xattr_data), 0); | 207 | sizeof(xattr_data), 0); |
208 | } | 208 | } else if (rc == -ENODATA && inode->i_op->removexattr) { |
209 | else if (rc == -ENODATA) | ||
210 | rc = inode->i_op->removexattr(dentry, XATTR_NAME_EVM); | 209 | rc = inode->i_op->removexattr(dentry, XATTR_NAME_EVM); |
210 | } | ||
211 | return rc; | 211 | return rc; |
212 | } | 212 | } |
213 | 213 | ||
diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c index b8fb0a5adb9b..822df971972c 100644 --- a/sound/pci/hda/hda_codec.c +++ b/sound/pci/hda/hda_codec.c | |||
@@ -3654,6 +3654,7 @@ static void hda_call_codec_resume(struct hda_codec *codec) | |||
3654 | hda_set_power_state(codec, AC_PWRST_D0); | 3654 | hda_set_power_state(codec, AC_PWRST_D0); |
3655 | restore_shutup_pins(codec); | 3655 | restore_shutup_pins(codec); |
3656 | hda_exec_init_verbs(codec); | 3656 | hda_exec_init_verbs(codec); |
3657 | snd_hda_jack_set_dirty_all(codec); | ||
3657 | if (codec->patch_ops.resume) | 3658 | if (codec->patch_ops.resume) |
3658 | codec->patch_ops.resume(codec); | 3659 | codec->patch_ops.resume(codec); |
3659 | else { | 3660 | else { |
@@ -3665,10 +3666,8 @@ static void hda_call_codec_resume(struct hda_codec *codec) | |||
3665 | 3666 | ||
3666 | if (codec->jackpoll_interval) | 3667 | if (codec->jackpoll_interval) |
3667 | hda_jackpoll_work(&codec->jackpoll_work.work); | 3668 | hda_jackpoll_work(&codec->jackpoll_work.work); |
3668 | else { | 3669 | else |
3669 | snd_hda_jack_set_dirty_all(codec); | ||
3670 | snd_hda_jack_report_sync(codec); | 3670 | snd_hda_jack_report_sync(codec); |
3671 | } | ||
3672 | 3671 | ||
3673 | codec->in_pm = 0; | 3672 | codec->in_pm = 0; |
3674 | snd_hda_power_down(codec); /* flag down before returning */ | 3673 | snd_hda_power_down(codec); /* flag down before returning */ |
diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c index dd798c3196ff..009b77a693cf 100644 --- a/sound/pci/hda/patch_conexant.c +++ b/sound/pci/hda/patch_conexant.c | |||
@@ -4636,6 +4636,12 @@ static const struct hda_codec_preset snd_hda_preset_conexant[] = { | |||
4636 | .patch = patch_conexant_auto }, | 4636 | .patch = patch_conexant_auto }, |
4637 | { .id = 0x14f15111, .name = "CX20753/4", | 4637 | { .id = 0x14f15111, .name = "CX20753/4", |
4638 | .patch = patch_conexant_auto }, | 4638 | .patch = patch_conexant_auto }, |
4639 | { .id = 0x14f15113, .name = "CX20755", | ||
4640 | .patch = patch_conexant_auto }, | ||
4641 | { .id = 0x14f15114, .name = "CX20756", | ||
4642 | .patch = patch_conexant_auto }, | ||
4643 | { .id = 0x14f15115, .name = "CX20757", | ||
4644 | .patch = patch_conexant_auto }, | ||
4639 | {} /* terminator */ | 4645 | {} /* terminator */ |
4640 | }; | 4646 | }; |
4641 | 4647 | ||
@@ -4659,6 +4665,9 @@ MODULE_ALIAS("snd-hda-codec-id:14f150b9"); | |||
4659 | MODULE_ALIAS("snd-hda-codec-id:14f1510f"); | 4665 | MODULE_ALIAS("snd-hda-codec-id:14f1510f"); |
4660 | MODULE_ALIAS("snd-hda-codec-id:14f15110"); | 4666 | MODULE_ALIAS("snd-hda-codec-id:14f15110"); |
4661 | MODULE_ALIAS("snd-hda-codec-id:14f15111"); | 4667 | MODULE_ALIAS("snd-hda-codec-id:14f15111"); |
4668 | MODULE_ALIAS("snd-hda-codec-id:14f15113"); | ||
4669 | MODULE_ALIAS("snd-hda-codec-id:14f15114"); | ||
4670 | MODULE_ALIAS("snd-hda-codec-id:14f15115"); | ||
4662 | 4671 | ||
4663 | MODULE_LICENSE("GPL"); | 4672 | MODULE_LICENSE("GPL"); |
4664 | MODULE_DESCRIPTION("Conexant HD-audio codec"); | 4673 | MODULE_DESCRIPTION("Conexant HD-audio codec"); |
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index f5196277b6e9..cf3886171109 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c | |||
@@ -6251,6 +6251,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { | |||
6251 | SND_PCI_QUIRK(0x1025, 0x0349, "Acer AOD260", ALC269_FIXUP_INV_DMIC), | 6251 | SND_PCI_QUIRK(0x1025, 0x0349, "Acer AOD260", ALC269_FIXUP_INV_DMIC), |
6252 | SND_PCI_QUIRK(0x103c, 0x1586, "HP", ALC269_FIXUP_MIC2_MUTE_LED), | 6252 | SND_PCI_QUIRK(0x103c, 0x1586, "HP", ALC269_FIXUP_MIC2_MUTE_LED), |
6253 | SND_PCI_QUIRK(0x103c, 0x1972, "HP Pavilion 17", ALC269_FIXUP_MIC1_MUTE_LED), | 6253 | SND_PCI_QUIRK(0x103c, 0x1972, "HP Pavilion 17", ALC269_FIXUP_MIC1_MUTE_LED), |
6254 | SND_PCI_QUIRK(0x103c, 0x1977, "HP Pavilion 14", ALC269_FIXUP_MIC1_MUTE_LED), | ||
6254 | SND_PCI_QUIRK(0x1043, 0x1427, "Asus Zenbook UX31E", ALC269VB_FIXUP_DMIC), | 6255 | SND_PCI_QUIRK(0x1043, 0x1427, "Asus Zenbook UX31E", ALC269VB_FIXUP_DMIC), |
6255 | SND_PCI_QUIRK(0x1043, 0x1517, "Asus Zenbook UX31A", ALC269VB_FIXUP_DMIC), | 6256 | SND_PCI_QUIRK(0x1043, 0x1517, "Asus Zenbook UX31A", ALC269VB_FIXUP_DMIC), |
6256 | SND_PCI_QUIRK(0x1043, 0x1a13, "Asus G73Jw", ALC269_FIXUP_ASUS_G73JW), | 6257 | SND_PCI_QUIRK(0x1043, 0x1a13, "Asus G73Jw", ALC269_FIXUP_ASUS_G73JW), |
@@ -6265,6 +6266,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { | |||
6265 | SND_PCI_QUIRK(0x104d, 0x9084, "Sony VAIO", ALC275_FIXUP_SONY_HWEQ), | 6266 | SND_PCI_QUIRK(0x104d, 0x9084, "Sony VAIO", ALC275_FIXUP_SONY_HWEQ), |
6266 | SND_PCI_QUIRK_VENDOR(0x104d, "Sony VAIO", ALC269_FIXUP_SONY_VAIO), | 6267 | SND_PCI_QUIRK_VENDOR(0x104d, "Sony VAIO", ALC269_FIXUP_SONY_VAIO), |
6267 | SND_PCI_QUIRK(0x1028, 0x0470, "Dell M101z", ALC269_FIXUP_DELL_M101Z), | 6268 | SND_PCI_QUIRK(0x1028, 0x0470, "Dell M101z", ALC269_FIXUP_DELL_M101Z), |
6269 | SND_PCI_QUIRK(0x1025, 0x0740, "Acer AO725", ALC271_FIXUP_HP_GATE_MIC_JACK), | ||
6268 | SND_PCI_QUIRK(0x1025, 0x0742, "Acer AO756", ALC271_FIXUP_HP_GATE_MIC_JACK), | 6270 | SND_PCI_QUIRK(0x1025, 0x0742, "Acer AO756", ALC271_FIXUP_HP_GATE_MIC_JACK), |
6269 | SND_PCI_QUIRK_VENDOR(0x1025, "Acer Aspire", ALC271_FIXUP_DMIC), | 6271 | SND_PCI_QUIRK_VENDOR(0x1025, "Acer Aspire", ALC271_FIXUP_DMIC), |
6270 | SND_PCI_QUIRK(0x10cf, 0x1475, "Lifebook", ALC269_FIXUP_LIFEBOOK), | 6272 | SND_PCI_QUIRK(0x10cf, 0x1475, "Lifebook", ALC269_FIXUP_LIFEBOOK), |
diff --git a/tools/perf/MANIFEST b/tools/perf/MANIFEST index 80db3f4bcf7a..39d41068484f 100644 --- a/tools/perf/MANIFEST +++ b/tools/perf/MANIFEST | |||
@@ -11,11 +11,21 @@ lib/rbtree.c | |||
11 | include/linux/swab.h | 11 | include/linux/swab.h |
12 | arch/*/include/asm/unistd*.h | 12 | arch/*/include/asm/unistd*.h |
13 | arch/*/include/asm/perf_regs.h | 13 | arch/*/include/asm/perf_regs.h |
14 | arch/*/include/uapi/asm/unistd*.h | ||
15 | arch/*/include/uapi/asm/perf_regs.h | ||
14 | arch/*/lib/memcpy*.S | 16 | arch/*/lib/memcpy*.S |
15 | arch/*/lib/memset*.S | 17 | arch/*/lib/memset*.S |
16 | include/linux/poison.h | 18 | include/linux/poison.h |
17 | include/linux/magic.h | 19 | include/linux/magic.h |
18 | include/linux/hw_breakpoint.h | 20 | include/linux/hw_breakpoint.h |
21 | include/linux/rbtree_augmented.h | ||
22 | include/uapi/linux/perf_event.h | ||
23 | include/uapi/linux/const.h | ||
24 | include/uapi/linux/swab.h | ||
25 | include/uapi/linux/hw_breakpoint.h | ||
19 | arch/x86/include/asm/svm.h | 26 | arch/x86/include/asm/svm.h |
20 | arch/x86/include/asm/vmx.h | 27 | arch/x86/include/asm/vmx.h |
21 | arch/x86/include/asm/kvm_host.h | 28 | arch/x86/include/asm/kvm_host.h |
29 | arch/x86/include/uapi/asm/svm.h | ||
30 | arch/x86/include/uapi/asm/vmx.h | ||
31 | arch/x86/include/uapi/asm/kvm.h | ||
diff --git a/tools/perf/Makefile b/tools/perf/Makefile index 891bc77bdb2c..8ab05e543ef4 100644 --- a/tools/perf/Makefile +++ b/tools/perf/Makefile | |||
@@ -58,7 +58,7 @@ ARCH ?= $(shell echo $(uname_M) | sed -e s/i.86/i386/ -e s/sun4u/sparc64/ \ | |||
58 | -e s/arm.*/arm/ -e s/sa110/arm/ \ | 58 | -e s/arm.*/arm/ -e s/sa110/arm/ \ |
59 | -e s/s390x/s390/ -e s/parisc64/parisc/ \ | 59 | -e s/s390x/s390/ -e s/parisc64/parisc/ \ |
60 | -e s/ppc.*/powerpc/ -e s/mips.*/mips/ \ | 60 | -e s/ppc.*/powerpc/ -e s/mips.*/mips/ \ |
61 | -e s/sh[234].*/sh/ ) | 61 | -e s/sh[234].*/sh/ -e s/aarch64.*/arm64/ ) |
62 | NO_PERF_REGS := 1 | 62 | NO_PERF_REGS := 1 |
63 | 63 | ||
64 | CC = $(CROSS_COMPILE)gcc | 64 | CC = $(CROSS_COMPILE)gcc |