diff options
author | Dave Airlie <airlied@redhat.com> | 2010-10-05 21:10:48 -0400 |
---|---|---|
committer | Dave Airlie <airlied@redhat.com> | 2010-10-05 21:10:48 -0400 |
commit | fb7ba2114bcd8bb51640c20bc68f89164b29b9ed (patch) | |
tree | 80b4a779130a477680a72109257fb8c19d66cf22 | |
parent | 9a170caed6fce89da77852575a7eee7dbadee332 (diff) | |
parent | abb295f3b3db602f91accf58b526b30b48673af1 (diff) |
Merge remote branch 'korg/drm-fixes' into drm-vmware-next
necessary for some of the vmware fixes to be pushed in.
Conflicts:
drivers/gpu/drm/drm_gem.c
drivers/gpu/drm/i915/intel_fb.c
include/drm/drmP.h
81 files changed, 645 insertions, 375 deletions
diff --git a/MAINTAINERS b/MAINTAINERS index 534f4d0accb..5e1169df8c1 100644 --- a/MAINTAINERS +++ b/MAINTAINERS | |||
@@ -2677,6 +2677,8 @@ M: Guenter Roeck <guenter.roeck@ericsson.com> | |||
2677 | L: lm-sensors@lm-sensors.org | 2677 | L: lm-sensors@lm-sensors.org |
2678 | W: http://www.lm-sensors.org/ | 2678 | W: http://www.lm-sensors.org/ |
2679 | T: quilt kernel.org/pub/linux/kernel/people/jdelvare/linux-2.6/jdelvare-hwmon/ | 2679 | T: quilt kernel.org/pub/linux/kernel/people/jdelvare/linux-2.6/jdelvare-hwmon/ |
2680 | T: quilt kernel.org/pub/linux/kernel/people/groeck/linux-staging/ | ||
2681 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/groeck/linux-staging.git | ||
2680 | S: Maintained | 2682 | S: Maintained |
2681 | F: Documentation/hwmon/ | 2683 | F: Documentation/hwmon/ |
2682 | F: drivers/hwmon/ | 2684 | F: drivers/hwmon/ |
diff --git a/arch/alpha/kernel/entry.S b/arch/alpha/kernel/entry.S index ab1ee0ab082..d1273c1a136 100644 --- a/arch/alpha/kernel/entry.S +++ b/arch/alpha/kernel/entry.S | |||
@@ -73,8 +73,6 @@ | |||
73 | ldq $20, HAE_REG($19); \ | 73 | ldq $20, HAE_REG($19); \ |
74 | stq $21, HAE_CACHE($19); \ | 74 | stq $21, HAE_CACHE($19); \ |
75 | stq $21, 0($20); \ | 75 | stq $21, 0($20); \ |
76 | ldq $0, 0($sp); \ | ||
77 | ldq $1, 8($sp); \ | ||
78 | 99:; \ | 76 | 99:; \ |
79 | ldq $19, 72($sp); \ | 77 | ldq $19, 72($sp); \ |
80 | ldq $20, 80($sp); \ | 78 | ldq $20, 80($sp); \ |
@@ -316,7 +314,7 @@ ret_from_sys_call: | |||
316 | cmovne $26, 0, $19 /* $19 = 0 => non-restartable */ | 314 | cmovne $26, 0, $19 /* $19 = 0 => non-restartable */ |
317 | ldq $0, SP_OFF($sp) | 315 | ldq $0, SP_OFF($sp) |
318 | and $0, 8, $0 | 316 | and $0, 8, $0 |
319 | beq $0, restore_all | 317 | beq $0, ret_to_kernel |
320 | ret_to_user: | 318 | ret_to_user: |
321 | /* Make sure need_resched and sigpending don't change between | 319 | /* Make sure need_resched and sigpending don't change between |
322 | sampling and the rti. */ | 320 | sampling and the rti. */ |
@@ -329,6 +327,11 @@ restore_all: | |||
329 | RESTORE_ALL | 327 | RESTORE_ALL |
330 | call_pal PAL_rti | 328 | call_pal PAL_rti |
331 | 329 | ||
330 | ret_to_kernel: | ||
331 | lda $16, 7 | ||
332 | call_pal PAL_swpipl | ||
333 | br restore_all | ||
334 | |||
332 | .align 3 | 335 | .align 3 |
333 | $syscall_error: | 336 | $syscall_error: |
334 | /* | 337 | /* |
@@ -657,7 +660,7 @@ kernel_thread: | |||
657 | /* We don't actually care for a3 success widgetry in the kernel. | 660 | /* We don't actually care for a3 success widgetry in the kernel. |
658 | Not for positive errno values. */ | 661 | Not for positive errno values. */ |
659 | stq $0, 0($sp) /* $0 */ | 662 | stq $0, 0($sp) /* $0 */ |
660 | br restore_all | 663 | br ret_to_kernel |
661 | .end kernel_thread | 664 | .end kernel_thread |
662 | 665 | ||
663 | /* | 666 | /* |
diff --git a/arch/alpha/kernel/process.c b/arch/alpha/kernel/process.c index 842dba308ea..3ec35066f1d 100644 --- a/arch/alpha/kernel/process.c +++ b/arch/alpha/kernel/process.c | |||
@@ -356,7 +356,7 @@ dump_elf_thread(elf_greg_t *dest, struct pt_regs *pt, struct thread_info *ti) | |||
356 | dest[27] = pt->r27; | 356 | dest[27] = pt->r27; |
357 | dest[28] = pt->r28; | 357 | dest[28] = pt->r28; |
358 | dest[29] = pt->gp; | 358 | dest[29] = pt->gp; |
359 | dest[30] = rdusp(); | 359 | dest[30] = ti == current_thread_info() ? rdusp() : ti->pcb.usp; |
360 | dest[31] = pt->pc; | 360 | dest[31] = pt->pc; |
361 | 361 | ||
362 | /* Once upon a time this was the PS value. Which is stupid | 362 | /* Once upon a time this was the PS value. Which is stupid |
diff --git a/arch/m32r/include/asm/signal.h b/arch/m32r/include/asm/signal.h index 9c1acb2b1a9..b2eeb0de1c8 100644 --- a/arch/m32r/include/asm/signal.h +++ b/arch/m32r/include/asm/signal.h | |||
@@ -157,7 +157,6 @@ typedef struct sigaltstack { | |||
157 | #undef __HAVE_ARCH_SIG_BITOPS | 157 | #undef __HAVE_ARCH_SIG_BITOPS |
158 | 158 | ||
159 | struct pt_regs; | 159 | struct pt_regs; |
160 | extern int do_signal(struct pt_regs *regs, sigset_t *oldset); | ||
161 | 160 | ||
162 | #define ptrace_signal_deliver(regs, cookie) do { } while (0) | 161 | #define ptrace_signal_deliver(regs, cookie) do { } while (0) |
163 | 162 | ||
diff --git a/arch/m32r/include/asm/unistd.h b/arch/m32r/include/asm/unistd.h index 76125777483..c70545689da 100644 --- a/arch/m32r/include/asm/unistd.h +++ b/arch/m32r/include/asm/unistd.h | |||
@@ -351,6 +351,7 @@ | |||
351 | #define __ARCH_WANT_SYS_OLD_GETRLIMIT /*will be unused*/ | 351 | #define __ARCH_WANT_SYS_OLD_GETRLIMIT /*will be unused*/ |
352 | #define __ARCH_WANT_SYS_OLDUMOUNT | 352 | #define __ARCH_WANT_SYS_OLDUMOUNT |
353 | #define __ARCH_WANT_SYS_RT_SIGACTION | 353 | #define __ARCH_WANT_SYS_RT_SIGACTION |
354 | #define __ARCH_WANT_SYS_RT_SIGSUSPEND | ||
354 | 355 | ||
355 | #define __IGNORE_lchown | 356 | #define __IGNORE_lchown |
356 | #define __IGNORE_setuid | 357 | #define __IGNORE_setuid |
diff --git a/arch/m32r/kernel/entry.S b/arch/m32r/kernel/entry.S index 403869833b9..225412bc227 100644 --- a/arch/m32r/kernel/entry.S +++ b/arch/m32r/kernel/entry.S | |||
@@ -235,10 +235,9 @@ work_resched: | |||
235 | work_notifysig: ; deal with pending signals and | 235 | work_notifysig: ; deal with pending signals and |
236 | ; notify-resume requests | 236 | ; notify-resume requests |
237 | mv r0, sp ; arg1 : struct pt_regs *regs | 237 | mv r0, sp ; arg1 : struct pt_regs *regs |
238 | ldi r1, #0 ; arg2 : sigset_t *oldset | 238 | mv r1, r9 ; arg2 : __u32 thread_info_flags |
239 | mv r2, r9 ; arg3 : __u32 thread_info_flags | ||
240 | bl do_notify_resume | 239 | bl do_notify_resume |
241 | bra restore_all | 240 | bra resume_userspace |
242 | 241 | ||
243 | ; perform syscall exit tracing | 242 | ; perform syscall exit tracing |
244 | ALIGN | 243 | ALIGN |
diff --git a/arch/m32r/kernel/ptrace.c b/arch/m32r/kernel/ptrace.c index e555091eb97..0021ade4cba 100644 --- a/arch/m32r/kernel/ptrace.c +++ b/arch/m32r/kernel/ptrace.c | |||
@@ -592,16 +592,17 @@ void user_enable_single_step(struct task_struct *child) | |||
592 | 592 | ||
593 | if (access_process_vm(child, pc&~3, &insn, sizeof(insn), 0) | 593 | if (access_process_vm(child, pc&~3, &insn, sizeof(insn), 0) |
594 | != sizeof(insn)) | 594 | != sizeof(insn)) |
595 | break; | 595 | return -EIO; |
596 | 596 | ||
597 | compute_next_pc(insn, pc, &next_pc, child); | 597 | compute_next_pc(insn, pc, &next_pc, child); |
598 | if (next_pc & 0x80000000) | 598 | if (next_pc & 0x80000000) |
599 | break; | 599 | return -EIO; |
600 | 600 | ||
601 | if (embed_debug_trap(child, next_pc)) | 601 | if (embed_debug_trap(child, next_pc)) |
602 | break; | 602 | return -EIO; |
603 | 603 | ||
604 | invalidate_cache(); | 604 | invalidate_cache(); |
605 | return 0; | ||
605 | } | 606 | } |
606 | 607 | ||
607 | void user_disable_single_step(struct task_struct *child) | 608 | void user_disable_single_step(struct task_struct *child) |
diff --git a/arch/m32r/kernel/signal.c b/arch/m32r/kernel/signal.c index 144b0f124fc..7bbe38645ed 100644 --- a/arch/m32r/kernel/signal.c +++ b/arch/m32r/kernel/signal.c | |||
@@ -28,37 +28,6 @@ | |||
28 | 28 | ||
29 | #define DEBUG_SIG 0 | 29 | #define DEBUG_SIG 0 |
30 | 30 | ||
31 | #define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP))) | ||
32 | |||
33 | int do_signal(struct pt_regs *, sigset_t *); | ||
34 | |||
35 | asmlinkage int | ||
36 | sys_rt_sigsuspend(sigset_t __user *unewset, size_t sigsetsize, | ||
37 | unsigned long r2, unsigned long r3, unsigned long r4, | ||
38 | unsigned long r5, unsigned long r6, struct pt_regs *regs) | ||
39 | { | ||
40 | sigset_t newset; | ||
41 | |||
42 | /* XXX: Don't preclude handling different sized sigset_t's. */ | ||
43 | if (sigsetsize != sizeof(sigset_t)) | ||
44 | return -EINVAL; | ||
45 | |||
46 | if (copy_from_user(&newset, unewset, sizeof(newset))) | ||
47 | return -EFAULT; | ||
48 | sigdelsetmask(&newset, sigmask(SIGKILL)|sigmask(SIGSTOP)); | ||
49 | |||
50 | spin_lock_irq(¤t->sighand->siglock); | ||
51 | current->saved_sigmask = current->blocked; | ||
52 | current->blocked = newset; | ||
53 | recalc_sigpending(); | ||
54 | spin_unlock_irq(¤t->sighand->siglock); | ||
55 | |||
56 | current->state = TASK_INTERRUPTIBLE; | ||
57 | schedule(); | ||
58 | set_thread_flag(TIF_RESTORE_SIGMASK); | ||
59 | return -ERESTARTNOHAND; | ||
60 | } | ||
61 | |||
62 | asmlinkage int | 31 | asmlinkage int |
63 | sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss, | 32 | sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss, |
64 | unsigned long r2, unsigned long r3, unsigned long r4, | 33 | unsigned long r2, unsigned long r3, unsigned long r4, |
@@ -218,7 +187,7 @@ get_sigframe(struct k_sigaction *ka, unsigned long sp, size_t frame_size) | |||
218 | return (void __user *)((sp - frame_size) & -8ul); | 187 | return (void __user *)((sp - frame_size) & -8ul); |
219 | } | 188 | } |
220 | 189 | ||
221 | static void setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, | 190 | static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, |
222 | sigset_t *set, struct pt_regs *regs) | 191 | sigset_t *set, struct pt_regs *regs) |
223 | { | 192 | { |
224 | struct rt_sigframe __user *frame; | 193 | struct rt_sigframe __user *frame; |
@@ -275,22 +244,34 @@ static void setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, | |||
275 | current->comm, current->pid, frame, regs->pc); | 244 | current->comm, current->pid, frame, regs->pc); |
276 | #endif | 245 | #endif |
277 | 246 | ||
278 | return; | 247 | return 0; |
279 | 248 | ||
280 | give_sigsegv: | 249 | give_sigsegv: |
281 | force_sigsegv(sig, current); | 250 | force_sigsegv(sig, current); |
251 | return -EFAULT; | ||
252 | } | ||
253 | |||
254 | static int prev_insn(struct pt_regs *regs) | ||
255 | { | ||
256 | u16 inst; | ||
257 | if (get_user(&inst, (u16 __user *)(regs->bpc - 2))) | ||
258 | return -EFAULT; | ||
259 | if ((inst & 0xfff0) == 0x10f0) /* trap ? */ | ||
260 | regs->bpc -= 2; | ||
261 | else | ||
262 | regs->bpc -= 4; | ||
263 | regs->syscall_nr = -1; | ||
264 | return 0; | ||
282 | } | 265 | } |
283 | 266 | ||
284 | /* | 267 | /* |
285 | * OK, we're invoking a handler | 268 | * OK, we're invoking a handler |
286 | */ | 269 | */ |
287 | 270 | ||
288 | static void | 271 | static int |
289 | handle_signal(unsigned long sig, struct k_sigaction *ka, siginfo_t *info, | 272 | handle_signal(unsigned long sig, struct k_sigaction *ka, siginfo_t *info, |
290 | sigset_t *oldset, struct pt_regs *regs) | 273 | sigset_t *oldset, struct pt_regs *regs) |
291 | { | 274 | { |
292 | unsigned short inst; | ||
293 | |||
294 | /* Are we from a system call? */ | 275 | /* Are we from a system call? */ |
295 | if (regs->syscall_nr >= 0) { | 276 | if (regs->syscall_nr >= 0) { |
296 | /* If so, check system call restarting.. */ | 277 | /* If so, check system call restarting.. */ |
@@ -308,16 +289,14 @@ handle_signal(unsigned long sig, struct k_sigaction *ka, siginfo_t *info, | |||
308 | /* fallthrough */ | 289 | /* fallthrough */ |
309 | case -ERESTARTNOINTR: | 290 | case -ERESTARTNOINTR: |
310 | regs->r0 = regs->orig_r0; | 291 | regs->r0 = regs->orig_r0; |
311 | inst = *(unsigned short *)(regs->bpc - 2); | 292 | if (prev_insn(regs) < 0) |
312 | if ((inst & 0xfff0) == 0x10f0) /* trap ? */ | 293 | return -EFAULT; |
313 | regs->bpc -= 2; | ||
314 | else | ||
315 | regs->bpc -= 4; | ||
316 | } | 294 | } |
317 | } | 295 | } |
318 | 296 | ||
319 | /* Set up the stack frame */ | 297 | /* Set up the stack frame */ |
320 | setup_rt_frame(sig, ka, info, oldset, regs); | 298 | if (setup_rt_frame(sig, ka, info, oldset, regs)) |
299 | return -EFAULT; | ||
321 | 300 | ||
322 | spin_lock_irq(¤t->sighand->siglock); | 301 | spin_lock_irq(¤t->sighand->siglock); |
323 | sigorsets(¤t->blocked,¤t->blocked,&ka->sa.sa_mask); | 302 | sigorsets(¤t->blocked,¤t->blocked,&ka->sa.sa_mask); |
@@ -325,6 +304,7 @@ handle_signal(unsigned long sig, struct k_sigaction *ka, siginfo_t *info, | |||
325 | sigaddset(¤t->blocked,sig); | 304 | sigaddset(¤t->blocked,sig); |
326 | recalc_sigpending(); | 305 | recalc_sigpending(); |
327 | spin_unlock_irq(¤t->sighand->siglock); | 306 | spin_unlock_irq(¤t->sighand->siglock); |
307 | return 0; | ||
328 | } | 308 | } |
329 | 309 | ||
330 | /* | 310 | /* |
@@ -332,12 +312,12 @@ handle_signal(unsigned long sig, struct k_sigaction *ka, siginfo_t *info, | |||
332 | * want to handle. Thus you cannot kill init even with a SIGKILL even by | 312 | * want to handle. Thus you cannot kill init even with a SIGKILL even by |
333 | * mistake. | 313 | * mistake. |
334 | */ | 314 | */ |
335 | int do_signal(struct pt_regs *regs, sigset_t *oldset) | 315 | static void do_signal(struct pt_regs *regs) |
336 | { | 316 | { |
337 | siginfo_t info; | 317 | siginfo_t info; |
338 | int signr; | 318 | int signr; |
339 | struct k_sigaction ka; | 319 | struct k_sigaction ka; |
340 | unsigned short inst; | 320 | sigset_t *oldset; |
341 | 321 | ||
342 | /* | 322 | /* |
343 | * We want the common case to go fast, which | 323 | * We want the common case to go fast, which |
@@ -346,12 +326,14 @@ int do_signal(struct pt_regs *regs, sigset_t *oldset) | |||
346 | * if so. | 326 | * if so. |
347 | */ | 327 | */ |
348 | if (!user_mode(regs)) | 328 | if (!user_mode(regs)) |
349 | return 1; | 329 | return; |
350 | 330 | ||
351 | if (try_to_freeze()) | 331 | if (try_to_freeze()) |
352 | goto no_signal; | 332 | goto no_signal; |
353 | 333 | ||
354 | if (!oldset) | 334 | if (test_thread_flag(TIF_RESTORE_SIGMASK)) |
335 | oldset = ¤t->saved_sigmask; | ||
336 | else | ||
355 | oldset = ¤t->blocked; | 337 | oldset = ¤t->blocked; |
356 | 338 | ||
357 | signr = get_signal_to_deliver(&info, &ka, regs, NULL); | 339 | signr = get_signal_to_deliver(&info, &ka, regs, NULL); |
@@ -363,8 +345,10 @@ int do_signal(struct pt_regs *regs, sigset_t *oldset) | |||
363 | */ | 345 | */ |
364 | 346 | ||
365 | /* Whee! Actually deliver the signal. */ | 347 | /* Whee! Actually deliver the signal. */ |
366 | handle_signal(signr, &ka, &info, oldset, regs); | 348 | if (handle_signal(signr, &ka, &info, oldset, regs) == 0) |
367 | return 1; | 349 | clear_thread_flag(TIF_RESTORE_SIGMASK); |
350 | |||
351 | return; | ||
368 | } | 352 | } |
369 | 353 | ||
370 | no_signal: | 354 | no_signal: |
@@ -375,31 +359,24 @@ int do_signal(struct pt_regs *regs, sigset_t *oldset) | |||
375 | regs->r0 == -ERESTARTSYS || | 359 | regs->r0 == -ERESTARTSYS || |
376 | regs->r0 == -ERESTARTNOINTR) { | 360 | regs->r0 == -ERESTARTNOINTR) { |
377 | regs->r0 = regs->orig_r0; | 361 | regs->r0 = regs->orig_r0; |
378 | inst = *(unsigned short *)(regs->bpc - 2); | 362 | prev_insn(regs); |
379 | if ((inst & 0xfff0) == 0x10f0) /* trap ? */ | 363 | } else if (regs->r0 == -ERESTART_RESTARTBLOCK){ |
380 | regs->bpc -= 2; | ||
381 | else | ||
382 | regs->bpc -= 4; | ||
383 | } | ||
384 | if (regs->r0 == -ERESTART_RESTARTBLOCK){ | ||
385 | regs->r0 = regs->orig_r0; | 364 | regs->r0 = regs->orig_r0; |
386 | regs->r7 = __NR_restart_syscall; | 365 | regs->r7 = __NR_restart_syscall; |
387 | inst = *(unsigned short *)(regs->bpc - 2); | 366 | prev_insn(regs); |
388 | if ((inst & 0xfff0) == 0x10f0) /* trap ? */ | ||
389 | regs->bpc -= 2; | ||
390 | else | ||
391 | regs->bpc -= 4; | ||
392 | } | 367 | } |
393 | } | 368 | } |
394 | return 0; | 369 | if (test_thread_flag(TIF_RESTORE_SIGMASK)) { |
370 | clear_thread_flag(TIF_RESTORE_SIGMASK); | ||
371 | sigprocmask(SIG_SETMASK, ¤t->saved_sigmask, NULL); | ||
372 | } | ||
395 | } | 373 | } |
396 | 374 | ||
397 | /* | 375 | /* |
398 | * notification of userspace execution resumption | 376 | * notification of userspace execution resumption |
399 | * - triggered by current->work.notify_resume | 377 | * - triggered by current->work.notify_resume |
400 | */ | 378 | */ |
401 | void do_notify_resume(struct pt_regs *regs, sigset_t *oldset, | 379 | void do_notify_resume(struct pt_regs *regs, __u32 thread_info_flags) |
402 | __u32 thread_info_flags) | ||
403 | { | 380 | { |
404 | /* Pending single-step? */ | 381 | /* Pending single-step? */ |
405 | if (thread_info_flags & _TIF_SINGLESTEP) | 382 | if (thread_info_flags & _TIF_SINGLESTEP) |
@@ -407,7 +384,7 @@ void do_notify_resume(struct pt_regs *regs, sigset_t *oldset, | |||
407 | 384 | ||
408 | /* deal with pending signal delivery */ | 385 | /* deal with pending signal delivery */ |
409 | if (thread_info_flags & _TIF_SIGPENDING) | 386 | if (thread_info_flags & _TIF_SIGPENDING) |
410 | do_signal(regs,oldset); | 387 | do_signal(regs); |
411 | 388 | ||
412 | if (thread_info_flags & _TIF_NOTIFY_RESUME) { | 389 | if (thread_info_flags & _TIF_NOTIFY_RESUME) { |
413 | clear_thread_flag(TIF_NOTIFY_RESUME); | 390 | clear_thread_flag(TIF_NOTIFY_RESUME); |
diff --git a/arch/tile/kernel/intvec_32.S b/arch/tile/kernel/intvec_32.S index 84f296ca9e6..8f58bdff20d 100644 --- a/arch/tile/kernel/intvec_32.S +++ b/arch/tile/kernel/intvec_32.S | |||
@@ -1506,13 +1506,6 @@ handle_ill: | |||
1506 | } | 1506 | } |
1507 | STD_ENDPROC(handle_ill) | 1507 | STD_ENDPROC(handle_ill) |
1508 | 1508 | ||
1509 | .pushsection .rodata, "a" | ||
1510 | .align 8 | ||
1511 | bpt_code: | ||
1512 | bpt | ||
1513 | ENDPROC(bpt_code) | ||
1514 | .popsection | ||
1515 | |||
1516 | /* Various stub interrupt handlers and syscall handlers */ | 1509 | /* Various stub interrupt handlers and syscall handlers */ |
1517 | 1510 | ||
1518 | STD_ENTRY_LOCAL(_kernel_double_fault) | 1511 | STD_ENTRY_LOCAL(_kernel_double_fault) |
diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h index c6fbb7b430d..3f76523589a 100644 --- a/arch/x86/include/asm/cpufeature.h +++ b/arch/x86/include/asm/cpufeature.h | |||
@@ -168,6 +168,7 @@ | |||
168 | #define X86_FEATURE_XSAVEOPT (7*32+ 4) /* Optimized Xsave */ | 168 | #define X86_FEATURE_XSAVEOPT (7*32+ 4) /* Optimized Xsave */ |
169 | #define X86_FEATURE_PLN (7*32+ 5) /* Intel Power Limit Notification */ | 169 | #define X86_FEATURE_PLN (7*32+ 5) /* Intel Power Limit Notification */ |
170 | #define X86_FEATURE_PTS (7*32+ 6) /* Intel Package Thermal Status */ | 170 | #define X86_FEATURE_PTS (7*32+ 6) /* Intel Package Thermal Status */ |
171 | #define X86_FEATURE_DTS (7*32+ 7) /* Digital Thermal Sensor */ | ||
171 | 172 | ||
172 | /* Virtualization flags: Linux defined, word 8 */ | 173 | /* Virtualization flags: Linux defined, word 8 */ |
173 | #define X86_FEATURE_TPR_SHADOW (8*32+ 0) /* Intel TPR Shadow */ | 174 | #define X86_FEATURE_TPR_SHADOW (8*32+ 0) /* Intel TPR Shadow */ |
diff --git a/arch/x86/kernel/cpu/scattered.c b/arch/x86/kernel/cpu/scattered.c index 34b4dad6f0b..d4907951512 100644 --- a/arch/x86/kernel/cpu/scattered.c +++ b/arch/x86/kernel/cpu/scattered.c | |||
@@ -31,6 +31,7 @@ void __cpuinit init_scattered_cpuid_features(struct cpuinfo_x86 *c) | |||
31 | const struct cpuid_bit *cb; | 31 | const struct cpuid_bit *cb; |
32 | 32 | ||
33 | static const struct cpuid_bit __cpuinitconst cpuid_bits[] = { | 33 | static const struct cpuid_bit __cpuinitconst cpuid_bits[] = { |
34 | { X86_FEATURE_DTS, CR_EAX, 0, 0x00000006, 0 }, | ||
34 | { X86_FEATURE_IDA, CR_EAX, 1, 0x00000006, 0 }, | 35 | { X86_FEATURE_IDA, CR_EAX, 1, 0x00000006, 0 }, |
35 | { X86_FEATURE_ARAT, CR_EAX, 2, 0x00000006, 0 }, | 36 | { X86_FEATURE_ARAT, CR_EAX, 2, 0x00000006, 0 }, |
36 | { X86_FEATURE_PLN, CR_EAX, 4, 0x00000006, 0 }, | 37 | { X86_FEATURE_PLN, CR_EAX, 4, 0x00000006, 0 }, |
diff --git a/block/blk-merge.c b/block/blk-merge.c index 3b0cd424967..eafc94f68d7 100644 --- a/block/blk-merge.c +++ b/block/blk-merge.c | |||
@@ -362,6 +362,18 @@ static int attempt_merge(struct request_queue *q, struct request *req, | |||
362 | return 0; | 362 | return 0; |
363 | 363 | ||
364 | /* | 364 | /* |
365 | * Don't merge file system requests and discard requests | ||
366 | */ | ||
367 | if ((req->cmd_flags & REQ_DISCARD) != (next->cmd_flags & REQ_DISCARD)) | ||
368 | return 0; | ||
369 | |||
370 | /* | ||
371 | * Don't merge discard requests and secure discard requests | ||
372 | */ | ||
373 | if ((req->cmd_flags & REQ_SECURE) != (next->cmd_flags & REQ_SECURE)) | ||
374 | return 0; | ||
375 | |||
376 | /* | ||
365 | * not contiguous | 377 | * not contiguous |
366 | */ | 378 | */ |
367 | if (blk_rq_pos(req) + blk_rq_sectors(req) != blk_rq_pos(next)) | 379 | if (blk_rq_pos(req) + blk_rq_sectors(req) != blk_rq_pos(next)) |
diff --git a/drivers/gpu/drm/drm_buffer.c b/drivers/gpu/drm/drm_buffer.c index 55d03ed0500..529a0dbe9fc 100644 --- a/drivers/gpu/drm/drm_buffer.c +++ b/drivers/gpu/drm/drm_buffer.c | |||
@@ -98,8 +98,8 @@ EXPORT_SYMBOL(drm_buffer_alloc); | |||
98 | * user_data: A pointer the data that is copied to the buffer. | 98 | * user_data: A pointer the data that is copied to the buffer. |
99 | * size: The Number of bytes to copy. | 99 | * size: The Number of bytes to copy. |
100 | */ | 100 | */ |
101 | extern int drm_buffer_copy_from_user(struct drm_buffer *buf, | 101 | int drm_buffer_copy_from_user(struct drm_buffer *buf, |
102 | void __user *user_data, int size) | 102 | void __user *user_data, int size) |
103 | { | 103 | { |
104 | int nr_pages = size / PAGE_SIZE + 1; | 104 | int nr_pages = size / PAGE_SIZE + 1; |
105 | int idx; | 105 | int idx; |
@@ -163,7 +163,7 @@ void *drm_buffer_read_object(struct drm_buffer *buf, | |||
163 | { | 163 | { |
164 | int idx = drm_buffer_index(buf); | 164 | int idx = drm_buffer_index(buf); |
165 | int page = drm_buffer_page(buf); | 165 | int page = drm_buffer_page(buf); |
166 | void *obj = 0; | 166 | void *obj = NULL; |
167 | 167 | ||
168 | if (idx + objsize <= PAGE_SIZE) { | 168 | if (idx + objsize <= PAGE_SIZE) { |
169 | obj = &buf->data[page][idx]; | 169 | obj = &buf->data[page][idx]; |
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c index 3ea0692ce59..ea1c4b019eb 100644 --- a/drivers/gpu/drm/drm_gem.c +++ b/drivers/gpu/drm/drm_gem.c | |||
@@ -142,7 +142,7 @@ int drm_gem_object_init(struct drm_device *dev, | |||
142 | return -ENOMEM; | 142 | return -ENOMEM; |
143 | 143 | ||
144 | kref_init(&obj->refcount); | 144 | kref_init(&obj->refcount); |
145 | kref_init(&obj->handlecount); | 145 | atomic_set(&obj->handle_count, 0); |
146 | obj->size = size; | 146 | obj->size = size; |
147 | 147 | ||
148 | return 0; | 148 | return 0; |
@@ -448,26 +448,6 @@ drm_gem_object_free(struct kref *kref) | |||
448 | } | 448 | } |
449 | EXPORT_SYMBOL(drm_gem_object_free); | 449 | EXPORT_SYMBOL(drm_gem_object_free); |
450 | 450 | ||
451 | /** | ||
452 | * Called after the last reference to the object has been lost. | ||
453 | * Must be called without holding struct_mutex | ||
454 | * | ||
455 | * Frees the object | ||
456 | */ | ||
457 | void | ||
458 | drm_gem_object_free_unlocked(struct kref *kref) | ||
459 | { | ||
460 | struct drm_gem_object *obj = (struct drm_gem_object *) kref; | ||
461 | struct drm_device *dev = obj->dev; | ||
462 | |||
463 | if (dev->driver->gem_free_object != NULL) { | ||
464 | mutex_lock(&dev->struct_mutex); | ||
465 | dev->driver->gem_free_object(obj); | ||
466 | mutex_unlock(&dev->struct_mutex); | ||
467 | } | ||
468 | } | ||
469 | EXPORT_SYMBOL(drm_gem_object_free_unlocked); | ||
470 | |||
471 | static void drm_gem_object_ref_bug(struct kref *list_kref) | 451 | static void drm_gem_object_ref_bug(struct kref *list_kref) |
472 | { | 452 | { |
473 | BUG(); | 453 | BUG(); |
@@ -480,12 +460,8 @@ static void drm_gem_object_ref_bug(struct kref *list_kref) | |||
480 | * called before drm_gem_object_free or we'll be touching | 460 | * called before drm_gem_object_free or we'll be touching |
481 | * freed memory | 461 | * freed memory |
482 | */ | 462 | */ |
483 | void | 463 | void drm_gem_object_handle_free(struct drm_gem_object *obj) |
484 | drm_gem_object_handle_free(struct kref *kref) | ||
485 | { | 464 | { |
486 | struct drm_gem_object *obj = container_of(kref, | ||
487 | struct drm_gem_object, | ||
488 | handlecount); | ||
489 | struct drm_device *dev = obj->dev; | 465 | struct drm_device *dev = obj->dev; |
490 | 466 | ||
491 | /* Remove any name for this object */ | 467 | /* Remove any name for this object */ |
@@ -512,6 +488,10 @@ void drm_gem_vm_open(struct vm_area_struct *vma) | |||
512 | struct drm_gem_object *obj = vma->vm_private_data; | 488 | struct drm_gem_object *obj = vma->vm_private_data; |
513 | 489 | ||
514 | drm_gem_object_reference(obj); | 490 | drm_gem_object_reference(obj); |
491 | |||
492 | mutex_lock(&obj->dev->struct_mutex); | ||
493 | drm_vm_open_locked(vma); | ||
494 | mutex_unlock(&obj->dev->struct_mutex); | ||
515 | } | 495 | } |
516 | EXPORT_SYMBOL(drm_gem_vm_open); | 496 | EXPORT_SYMBOL(drm_gem_vm_open); |
517 | 497 | ||
@@ -519,7 +499,10 @@ void drm_gem_vm_close(struct vm_area_struct *vma) | |||
519 | { | 499 | { |
520 | struct drm_gem_object *obj = vma->vm_private_data; | 500 | struct drm_gem_object *obj = vma->vm_private_data; |
521 | 501 | ||
522 | drm_gem_object_unreference_unlocked(obj); | 502 | mutex_lock(&obj->dev->struct_mutex); |
503 | drm_vm_close_locked(vma); | ||
504 | drm_gem_object_unreference(obj); | ||
505 | mutex_unlock(&obj->dev->struct_mutex); | ||
523 | } | 506 | } |
524 | EXPORT_SYMBOL(drm_gem_vm_close); | 507 | EXPORT_SYMBOL(drm_gem_vm_close); |
525 | 508 | ||
diff --git a/drivers/gpu/drm/drm_info.c b/drivers/gpu/drm/drm_info.c index 5aff08e236c..3cdbaf379bb 100644 --- a/drivers/gpu/drm/drm_info.c +++ b/drivers/gpu/drm/drm_info.c | |||
@@ -255,7 +255,7 @@ int drm_gem_one_name_info(int id, void *ptr, void *data) | |||
255 | 255 | ||
256 | seq_printf(m, "%6d %8zd %7d %8d\n", | 256 | seq_printf(m, "%6d %8zd %7d %8d\n", |
257 | obj->name, obj->size, | 257 | obj->name, obj->size, |
258 | atomic_read(&obj->handlecount.refcount), | 258 | atomic_read(&obj->handle_count), |
259 | atomic_read(&obj->refcount.refcount)); | 259 | atomic_read(&obj->refcount.refcount)); |
260 | return 0; | 260 | return 0; |
261 | } | 261 | } |
diff --git a/drivers/gpu/drm/drm_vm.c b/drivers/gpu/drm/drm_vm.c index ee879d6bb52..2c3fcbdfd8f 100644 --- a/drivers/gpu/drm/drm_vm.c +++ b/drivers/gpu/drm/drm_vm.c | |||
@@ -433,15 +433,7 @@ static void drm_vm_open(struct vm_area_struct *vma) | |||
433 | mutex_unlock(&dev->struct_mutex); | 433 | mutex_unlock(&dev->struct_mutex); |
434 | } | 434 | } |
435 | 435 | ||
436 | /** | 436 | void drm_vm_close_locked(struct vm_area_struct *vma) |
437 | * \c close method for all virtual memory types. | ||
438 | * | ||
439 | * \param vma virtual memory area. | ||
440 | * | ||
441 | * Search the \p vma private data entry in drm_device::vmalist, unlink it, and | ||
442 | * free it. | ||
443 | */ | ||
444 | static void drm_vm_close(struct vm_area_struct *vma) | ||
445 | { | 437 | { |
446 | struct drm_file *priv = vma->vm_file->private_data; | 438 | struct drm_file *priv = vma->vm_file->private_data; |
447 | struct drm_device *dev = priv->minor->dev; | 439 | struct drm_device *dev = priv->minor->dev; |
@@ -451,7 +443,6 @@ static void drm_vm_close(struct vm_area_struct *vma) | |||
451 | vma->vm_start, vma->vm_end - vma->vm_start); | 443 | vma->vm_start, vma->vm_end - vma->vm_start); |
452 | atomic_dec(&dev->vma_count); | 444 | atomic_dec(&dev->vma_count); |
453 | 445 | ||
454 | mutex_lock(&dev->struct_mutex); | ||
455 | list_for_each_entry_safe(pt, temp, &dev->vmalist, head) { | 446 | list_for_each_entry_safe(pt, temp, &dev->vmalist, head) { |
456 | if (pt->vma == vma) { | 447 | if (pt->vma == vma) { |
457 | list_del(&pt->head); | 448 | list_del(&pt->head); |
@@ -459,6 +450,23 @@ static void drm_vm_close(struct vm_area_struct *vma) | |||
459 | break; | 450 | break; |
460 | } | 451 | } |
461 | } | 452 | } |
453 | } | ||
454 | |||
455 | /** | ||
456 | * \c close method for all virtual memory types. | ||
457 | * | ||
458 | * \param vma virtual memory area. | ||
459 | * | ||
460 | * Search the \p vma private data entry in drm_device::vmalist, unlink it, and | ||
461 | * free it. | ||
462 | */ | ||
463 | static void drm_vm_close(struct vm_area_struct *vma) | ||
464 | { | ||
465 | struct drm_file *priv = vma->vm_file->private_data; | ||
466 | struct drm_device *dev = priv->minor->dev; | ||
467 | |||
468 | mutex_lock(&dev->struct_mutex); | ||
469 | drm_vm_close_locked(vma); | ||
462 | mutex_unlock(&dev->struct_mutex); | 470 | mutex_unlock(&dev->struct_mutex); |
463 | } | 471 | } |
464 | 472 | ||
diff --git a/drivers/gpu/drm/i810/i810_dma.c b/drivers/gpu/drm/i810/i810_dma.c index 61b4caf220f..fb07e73581e 100644 --- a/drivers/gpu/drm/i810/i810_dma.c +++ b/drivers/gpu/drm/i810/i810_dma.c | |||
@@ -116,7 +116,7 @@ static int i810_mmap_buffers(struct file *filp, struct vm_area_struct *vma) | |||
116 | static const struct file_operations i810_buffer_fops = { | 116 | static const struct file_operations i810_buffer_fops = { |
117 | .open = drm_open, | 117 | .open = drm_open, |
118 | .release = drm_release, | 118 | .release = drm_release, |
119 | .unlocked_ioctl = drm_ioctl, | 119 | .unlocked_ioctl = i810_ioctl, |
120 | .mmap = i810_mmap_buffers, | 120 | .mmap = i810_mmap_buffers, |
121 | .fasync = drm_fasync, | 121 | .fasync = drm_fasync, |
122 | }; | 122 | }; |
diff --git a/drivers/gpu/drm/i830/i830_dma.c b/drivers/gpu/drm/i830/i830_dma.c index 671aa18415a..cc92c7e6236 100644 --- a/drivers/gpu/drm/i830/i830_dma.c +++ b/drivers/gpu/drm/i830/i830_dma.c | |||
@@ -118,7 +118,7 @@ static int i830_mmap_buffers(struct file *filp, struct vm_area_struct *vma) | |||
118 | static const struct file_operations i830_buffer_fops = { | 118 | static const struct file_operations i830_buffer_fops = { |
119 | .open = drm_open, | 119 | .open = drm_open, |
120 | .release = drm_release, | 120 | .release = drm_release, |
121 | .unlocked_ioctl = drm_ioctl, | 121 | .unlocked_ioctl = i830_ioctl, |
122 | .mmap = i830_mmap_buffers, | 122 | .mmap = i830_mmap_buffers, |
123 | .fasync = drm_fasync, | 123 | .fasync = drm_fasync, |
124 | }; | 124 | }; |
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 29e97c07542..100a7537980 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c | |||
@@ -244,14 +244,12 @@ i915_gem_create_ioctl(struct drm_device *dev, void *data, | |||
244 | return -ENOMEM; | 244 | return -ENOMEM; |
245 | 245 | ||
246 | ret = drm_gem_handle_create(file_priv, obj, &handle); | 246 | ret = drm_gem_handle_create(file_priv, obj, &handle); |
247 | /* drop reference from allocate - handle holds it now */ | ||
248 | drm_gem_object_unreference_unlocked(obj); | ||
247 | if (ret) { | 249 | if (ret) { |
248 | drm_gem_object_unreference_unlocked(obj); | ||
249 | return ret; | 250 | return ret; |
250 | } | 251 | } |
251 | 252 | ||
252 | /* Sink the floating reference from kref_init(handlecount) */ | ||
253 | drm_gem_object_handle_unreference_unlocked(obj); | ||
254 | |||
255 | args->handle = handle; | 253 | args->handle = handle; |
256 | return 0; | 254 | return 0; |
257 | } | 255 | } |
diff --git a/drivers/gpu/drm/i915/intel_fb.c b/drivers/gpu/drm/i915/intel_fb.c index 7dc50acd65d..b937ccfa7be 100644 --- a/drivers/gpu/drm/i915/intel_fb.c +++ b/drivers/gpu/drm/i915/intel_fb.c | |||
@@ -224,8 +224,10 @@ static void intel_fbdev_destroy(struct drm_device *dev, | |||
224 | drm_fb_helper_fini(&ifbdev->helper); | 224 | drm_fb_helper_fini(&ifbdev->helper); |
225 | 225 | ||
226 | drm_framebuffer_cleanup(&ifb->base); | 226 | drm_framebuffer_cleanup(&ifb->base); |
227 | if (ifb->obj) | 227 | if (ifb->obj) { |
228 | drm_gem_object_handle_unreference_unlocked(ifb->obj); | ||
228 | drm_gem_object_unreference_unlocked(ifb->obj); | 229 | drm_gem_object_unreference_unlocked(ifb->obj); |
230 | } | ||
229 | } | 231 | } |
230 | 232 | ||
231 | int intel_fbdev_init(struct drm_device *dev) | 233 | int intel_fbdev_init(struct drm_device *dev) |
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c index 87186a4bbf0..fc737037f75 100644 --- a/drivers/gpu/drm/nouveau/nouveau_connector.c +++ b/drivers/gpu/drm/nouveau/nouveau_connector.c | |||
@@ -558,8 +558,10 @@ nouveau_connector_get_modes(struct drm_connector *connector) | |||
558 | if (nv_encoder->dcb->type == OUTPUT_LVDS && | 558 | if (nv_encoder->dcb->type == OUTPUT_LVDS && |
559 | (nv_encoder->dcb->lvdsconf.use_straps_for_mode || | 559 | (nv_encoder->dcb->lvdsconf.use_straps_for_mode || |
560 | dev_priv->vbios.fp_no_ddc) && nouveau_bios_fp_mode(dev, NULL)) { | 560 | dev_priv->vbios.fp_no_ddc) && nouveau_bios_fp_mode(dev, NULL)) { |
561 | nv_connector->native_mode = drm_mode_create(dev); | 561 | struct drm_display_mode mode; |
562 | nouveau_bios_fp_mode(dev, nv_connector->native_mode); | 562 | |
563 | nouveau_bios_fp_mode(dev, &mode); | ||
564 | nv_connector->native_mode = drm_mode_duplicate(dev, &mode); | ||
563 | } | 565 | } |
564 | 566 | ||
565 | /* Find the native mode if this is a digital panel, if we didn't | 567 | /* Find the native mode if this is a digital panel, if we didn't |
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c index dbd30b2e43f..d2047713dc5 100644 --- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c +++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c | |||
@@ -352,6 +352,7 @@ nouveau_fbcon_destroy(struct drm_device *dev, struct nouveau_fbdev *nfbdev) | |||
352 | 352 | ||
353 | if (nouveau_fb->nvbo) { | 353 | if (nouveau_fb->nvbo) { |
354 | nouveau_bo_unmap(nouveau_fb->nvbo); | 354 | nouveau_bo_unmap(nouveau_fb->nvbo); |
355 | drm_gem_object_handle_unreference_unlocked(nouveau_fb->nvbo->gem); | ||
355 | drm_gem_object_unreference_unlocked(nouveau_fb->nvbo->gem); | 356 | drm_gem_object_unreference_unlocked(nouveau_fb->nvbo->gem); |
356 | nouveau_fb->nvbo = NULL; | 357 | nouveau_fb->nvbo = NULL; |
357 | } | 358 | } |
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c index ead7b8fc53f..19620a6709f 100644 --- a/drivers/gpu/drm/nouveau/nouveau_gem.c +++ b/drivers/gpu/drm/nouveau/nouveau_gem.c | |||
@@ -167,11 +167,9 @@ nouveau_gem_ioctl_new(struct drm_device *dev, void *data, | |||
167 | goto out; | 167 | goto out; |
168 | 168 | ||
169 | ret = drm_gem_handle_create(file_priv, nvbo->gem, &req->info.handle); | 169 | ret = drm_gem_handle_create(file_priv, nvbo->gem, &req->info.handle); |
170 | /* drop reference from allocate - handle holds it now */ | ||
171 | drm_gem_object_unreference_unlocked(nvbo->gem); | ||
170 | out: | 172 | out: |
171 | drm_gem_object_handle_unreference_unlocked(nvbo->gem); | ||
172 | |||
173 | if (ret) | ||
174 | drm_gem_object_unreference_unlocked(nvbo->gem); | ||
175 | return ret; | 173 | return ret; |
176 | } | 174 | } |
177 | 175 | ||
diff --git a/drivers/gpu/drm/nouveau/nouveau_notifier.c b/drivers/gpu/drm/nouveau/nouveau_notifier.c index 3ec181ff50c..3c9964a8fba 100644 --- a/drivers/gpu/drm/nouveau/nouveau_notifier.c +++ b/drivers/gpu/drm/nouveau/nouveau_notifier.c | |||
@@ -79,6 +79,7 @@ nouveau_notifier_takedown_channel(struct nouveau_channel *chan) | |||
79 | mutex_lock(&dev->struct_mutex); | 79 | mutex_lock(&dev->struct_mutex); |
80 | nouveau_bo_unpin(chan->notifier_bo); | 80 | nouveau_bo_unpin(chan->notifier_bo); |
81 | mutex_unlock(&dev->struct_mutex); | 81 | mutex_unlock(&dev->struct_mutex); |
82 | drm_gem_object_handle_unreference_unlocked(chan->notifier_bo->gem); | ||
82 | drm_gem_object_unreference_unlocked(chan->notifier_bo->gem); | 83 | drm_gem_object_unreference_unlocked(chan->notifier_bo->gem); |
83 | drm_mm_takedown(&chan->notifier_heap); | 84 | drm_mm_takedown(&chan->notifier_heap); |
84 | } | 85 | } |
diff --git a/drivers/gpu/drm/radeon/atombios.h b/drivers/gpu/drm/radeon/atombios.h index 1bc72c3190a..fe359a239df 100644 --- a/drivers/gpu/drm/radeon/atombios.h +++ b/drivers/gpu/drm/radeon/atombios.h | |||
@@ -4999,7 +4999,7 @@ typedef struct _SW_I2C_IO_DATA_PARAMETERS | |||
4999 | #define SW_I2C_CNTL_WRITE1BIT 6 | 4999 | #define SW_I2C_CNTL_WRITE1BIT 6 |
5000 | 5000 | ||
5001 | //==============================VESA definition Portion=============================== | 5001 | //==============================VESA definition Portion=============================== |
5002 | #define VESA_OEM_PRODUCT_REV '01.00' | 5002 | #define VESA_OEM_PRODUCT_REV "01.00" |
5003 | #define VESA_MODE_ATTRIBUTE_MODE_SUPPORT 0xBB //refer to VBE spec p.32, no TTY support | 5003 | #define VESA_MODE_ATTRIBUTE_MODE_SUPPORT 0xBB //refer to VBE spec p.32, no TTY support |
5004 | #define VESA_MODE_WIN_ATTRIBUTE 7 | 5004 | #define VESA_MODE_WIN_ATTRIBUTE 7 |
5005 | #define VESA_WIN_SIZE 64 | 5005 | #define VESA_WIN_SIZE 64 |
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c index afc18d87fdc..7a04959ba0e 100644 --- a/drivers/gpu/drm/radeon/r600.c +++ b/drivers/gpu/drm/radeon/r600.c | |||
@@ -2729,7 +2729,7 @@ int r600_ib_test(struct radeon_device *rdev) | |||
2729 | if (i < rdev->usec_timeout) { | 2729 | if (i < rdev->usec_timeout) { |
2730 | DRM_INFO("ib test succeeded in %u usecs\n", i); | 2730 | DRM_INFO("ib test succeeded in %u usecs\n", i); |
2731 | } else { | 2731 | } else { |
2732 | DRM_ERROR("radeon: ib test failed (sracth(0x%04X)=0x%08X)\n", | 2732 | DRM_ERROR("radeon: ib test failed (scratch(0x%04X)=0x%08X)\n", |
2733 | scratch, tmp); | 2733 | scratch, tmp); |
2734 | r = -EINVAL; | 2734 | r = -EINVAL; |
2735 | } | 2735 | } |
@@ -3528,7 +3528,8 @@ void r600_ioctl_wait_idle(struct radeon_device *rdev, struct radeon_bo *bo) | |||
3528 | /* r7xx hw bug. write to HDP_DEBUG1 followed by fb read | 3528 | /* r7xx hw bug. write to HDP_DEBUG1 followed by fb read |
3529 | * rather than write to HDP_REG_COHERENCY_FLUSH_CNTL | 3529 | * rather than write to HDP_REG_COHERENCY_FLUSH_CNTL |
3530 | */ | 3530 | */ |
3531 | if ((rdev->family >= CHIP_RV770) && (rdev->family <= CHIP_RV740)) { | 3531 | if ((rdev->family >= CHIP_RV770) && (rdev->family <= CHIP_RV740) && |
3532 | rdev->vram_scratch.ptr) { | ||
3532 | void __iomem *ptr = (void *)rdev->vram_scratch.ptr; | 3533 | void __iomem *ptr = (void *)rdev->vram_scratch.ptr; |
3533 | u32 tmp; | 3534 | u32 tmp; |
3534 | 3535 | ||
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c index ebae14c4b76..68932ba7b8a 100644 --- a/drivers/gpu/drm/radeon/radeon_atombios.c +++ b/drivers/gpu/drm/radeon/radeon_atombios.c | |||
@@ -317,6 +317,15 @@ static bool radeon_atom_apply_quirks(struct drm_device *dev, | |||
317 | *connector_type = DRM_MODE_CONNECTOR_DVID; | 317 | *connector_type = DRM_MODE_CONNECTOR_DVID; |
318 | } | 318 | } |
319 | 319 | ||
320 | /* MSI K9A2GM V2/V3 board has no HDMI or DVI */ | ||
321 | if ((dev->pdev->device == 0x796e) && | ||
322 | (dev->pdev->subsystem_vendor == 0x1462) && | ||
323 | (dev->pdev->subsystem_device == 0x7302)) { | ||
324 | if ((supported_device == ATOM_DEVICE_DFP2_SUPPORT) || | ||
325 | (supported_device == ATOM_DEVICE_DFP3_SUPPORT)) | ||
326 | return false; | ||
327 | } | ||
328 | |||
320 | /* a-bit f-i90hd - ciaranm on #radeonhd - this board has no DVI */ | 329 | /* a-bit f-i90hd - ciaranm on #radeonhd - this board has no DVI */ |
321 | if ((dev->pdev->device == 0x7941) && | 330 | if ((dev->pdev->device == 0x7941) && |
322 | (dev->pdev->subsystem_vendor == 0x147b) && | 331 | (dev->pdev->subsystem_vendor == 0x147b) && |
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c index 127a395f70f..b92d2f2fcbe 100644 --- a/drivers/gpu/drm/radeon/radeon_display.c +++ b/drivers/gpu/drm/radeon/radeon_display.c | |||
@@ -349,6 +349,8 @@ static void radeon_print_display_setup(struct drm_device *dev) | |||
349 | DRM_INFO(" DFP4: %s\n", encoder_names[radeon_encoder->encoder_id]); | 349 | DRM_INFO(" DFP4: %s\n", encoder_names[radeon_encoder->encoder_id]); |
350 | if (devices & ATOM_DEVICE_DFP5_SUPPORT) | 350 | if (devices & ATOM_DEVICE_DFP5_SUPPORT) |
351 | DRM_INFO(" DFP5: %s\n", encoder_names[radeon_encoder->encoder_id]); | 351 | DRM_INFO(" DFP5: %s\n", encoder_names[radeon_encoder->encoder_id]); |
352 | if (devices & ATOM_DEVICE_DFP6_SUPPORT) | ||
353 | DRM_INFO(" DFP6: %s\n", encoder_names[radeon_encoder->encoder_id]); | ||
352 | if (devices & ATOM_DEVICE_TV1_SUPPORT) | 354 | if (devices & ATOM_DEVICE_TV1_SUPPORT) |
353 | DRM_INFO(" TV1: %s\n", encoder_names[radeon_encoder->encoder_id]); | 355 | DRM_INFO(" TV1: %s\n", encoder_names[radeon_encoder->encoder_id]); |
354 | if (devices & ATOM_DEVICE_CV_SUPPORT) | 356 | if (devices & ATOM_DEVICE_CV_SUPPORT) |
@@ -841,8 +843,9 @@ static void radeon_user_framebuffer_destroy(struct drm_framebuffer *fb) | |||
841 | { | 843 | { |
842 | struct radeon_framebuffer *radeon_fb = to_radeon_framebuffer(fb); | 844 | struct radeon_framebuffer *radeon_fb = to_radeon_framebuffer(fb); |
843 | 845 | ||
844 | if (radeon_fb->obj) | 846 | if (radeon_fb->obj) { |
845 | drm_gem_object_unreference_unlocked(radeon_fb->obj); | 847 | drm_gem_object_unreference_unlocked(radeon_fb->obj); |
848 | } | ||
846 | drm_framebuffer_cleanup(fb); | 849 | drm_framebuffer_cleanup(fb); |
847 | kfree(radeon_fb); | 850 | kfree(radeon_fb); |
848 | } | 851 | } |
diff --git a/drivers/gpu/drm/radeon/radeon_fb.c b/drivers/gpu/drm/radeon/radeon_fb.c index c74a8b20d94..9cdf6a35bc2 100644 --- a/drivers/gpu/drm/radeon/radeon_fb.c +++ b/drivers/gpu/drm/radeon/radeon_fb.c | |||
@@ -94,8 +94,10 @@ static void radeonfb_destroy_pinned_object(struct drm_gem_object *gobj) | |||
94 | ret = radeon_bo_reserve(rbo, false); | 94 | ret = radeon_bo_reserve(rbo, false); |
95 | if (likely(ret == 0)) { | 95 | if (likely(ret == 0)) { |
96 | radeon_bo_kunmap(rbo); | 96 | radeon_bo_kunmap(rbo); |
97 | radeon_bo_unpin(rbo); | ||
97 | radeon_bo_unreserve(rbo); | 98 | radeon_bo_unreserve(rbo); |
98 | } | 99 | } |
100 | drm_gem_object_handle_unreference(gobj); | ||
99 | drm_gem_object_unreference_unlocked(gobj); | 101 | drm_gem_object_unreference_unlocked(gobj); |
100 | } | 102 | } |
101 | 103 | ||
@@ -325,8 +327,6 @@ static int radeon_fbdev_destroy(struct drm_device *dev, struct radeon_fbdev *rfb | |||
325 | { | 327 | { |
326 | struct fb_info *info; | 328 | struct fb_info *info; |
327 | struct radeon_framebuffer *rfb = &rfbdev->rfb; | 329 | struct radeon_framebuffer *rfb = &rfbdev->rfb; |
328 | struct radeon_bo *rbo; | ||
329 | int r; | ||
330 | 330 | ||
331 | if (rfbdev->helper.fbdev) { | 331 | if (rfbdev->helper.fbdev) { |
332 | info = rfbdev->helper.fbdev; | 332 | info = rfbdev->helper.fbdev; |
@@ -338,14 +338,8 @@ static int radeon_fbdev_destroy(struct drm_device *dev, struct radeon_fbdev *rfb | |||
338 | } | 338 | } |
339 | 339 | ||
340 | if (rfb->obj) { | 340 | if (rfb->obj) { |
341 | rbo = rfb->obj->driver_private; | 341 | radeonfb_destroy_pinned_object(rfb->obj); |
342 | r = radeon_bo_reserve(rbo, false); | 342 | rfb->obj = NULL; |
343 | if (likely(r == 0)) { | ||
344 | radeon_bo_kunmap(rbo); | ||
345 | radeon_bo_unpin(rbo); | ||
346 | radeon_bo_unreserve(rbo); | ||
347 | } | ||
348 | drm_gem_object_unreference_unlocked(rfb->obj); | ||
349 | } | 343 | } |
350 | drm_fb_helper_fini(&rfbdev->helper); | 344 | drm_fb_helper_fini(&rfbdev->helper); |
351 | drm_framebuffer_cleanup(&rfb->base); | 345 | drm_framebuffer_cleanup(&rfb->base); |
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c index c578f265b24..d1e595d9172 100644 --- a/drivers/gpu/drm/radeon/radeon_gem.c +++ b/drivers/gpu/drm/radeon/radeon_gem.c | |||
@@ -201,11 +201,11 @@ int radeon_gem_create_ioctl(struct drm_device *dev, void *data, | |||
201 | return r; | 201 | return r; |
202 | } | 202 | } |
203 | r = drm_gem_handle_create(filp, gobj, &handle); | 203 | r = drm_gem_handle_create(filp, gobj, &handle); |
204 | /* drop reference from allocate - handle holds it now */ | ||
205 | drm_gem_object_unreference_unlocked(gobj); | ||
204 | if (r) { | 206 | if (r) { |
205 | drm_gem_object_unreference_unlocked(gobj); | ||
206 | return r; | 207 | return r; |
207 | } | 208 | } |
208 | drm_gem_object_handle_unreference_unlocked(gobj); | ||
209 | args->handle = handle; | 209 | args->handle = handle; |
210 | return 0; | 210 | return 0; |
211 | } | 211 | } |
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c index 5eee3c41d12..8fbbe1c6ebb 100644 --- a/drivers/gpu/drm/radeon/radeon_kms.c +++ b/drivers/gpu/drm/radeon/radeon_kms.c | |||
@@ -203,6 +203,10 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) | |||
203 | */ | 203 | */ |
204 | int radeon_driver_firstopen_kms(struct drm_device *dev) | 204 | int radeon_driver_firstopen_kms(struct drm_device *dev) |
205 | { | 205 | { |
206 | struct radeon_device *rdev = dev->dev_private; | ||
207 | |||
208 | if (rdev->powered_down) | ||
209 | return -EINVAL; | ||
206 | return 0; | 210 | return 0; |
207 | } | 211 | } |
208 | 212 | ||
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c index 7cffb3e0423..3451a82adba 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_util.c +++ b/drivers/gpu/drm/ttm/ttm_bo_util.c | |||
@@ -351,6 +351,7 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo, | |||
351 | INIT_LIST_HEAD(&fbo->lru); | 351 | INIT_LIST_HEAD(&fbo->lru); |
352 | INIT_LIST_HEAD(&fbo->swap); | 352 | INIT_LIST_HEAD(&fbo->swap); |
353 | fbo->vm_node = NULL; | 353 | fbo->vm_node = NULL; |
354 | atomic_set(&fbo->cpu_writers, 0); | ||
354 | 355 | ||
355 | fbo->sync_obj = driver->sync_obj_ref(bo->sync_obj); | 356 | fbo->sync_obj = driver->sync_obj_ref(bo->sync_obj); |
356 | kref_init(&fbo->list_kref); | 357 | kref_init(&fbo->list_kref); |
diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c index ca904799f01..b1e02fffd3c 100644 --- a/drivers/gpu/drm/ttm/ttm_page_alloc.c +++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c | |||
@@ -69,7 +69,7 @@ struct ttm_page_pool { | |||
69 | spinlock_t lock; | 69 | spinlock_t lock; |
70 | bool fill_lock; | 70 | bool fill_lock; |
71 | struct list_head list; | 71 | struct list_head list; |
72 | int gfp_flags; | 72 | gfp_t gfp_flags; |
73 | unsigned npages; | 73 | unsigned npages; |
74 | char *name; | 74 | char *name; |
75 | unsigned long nfrees; | 75 | unsigned long nfrees; |
@@ -475,7 +475,7 @@ static void ttm_handle_caching_state_failure(struct list_head *pages, | |||
475 | * This function is reentrant if caller updates count depending on number of | 475 | * This function is reentrant if caller updates count depending on number of |
476 | * pages returned in pages array. | 476 | * pages returned in pages array. |
477 | */ | 477 | */ |
478 | static int ttm_alloc_new_pages(struct list_head *pages, int gfp_flags, | 478 | static int ttm_alloc_new_pages(struct list_head *pages, gfp_t gfp_flags, |
479 | int ttm_flags, enum ttm_caching_state cstate, unsigned count) | 479 | int ttm_flags, enum ttm_caching_state cstate, unsigned count) |
480 | { | 480 | { |
481 | struct page **caching_array; | 481 | struct page **caching_array; |
@@ -666,7 +666,7 @@ int ttm_get_pages(struct list_head *pages, int flags, | |||
666 | { | 666 | { |
667 | struct ttm_page_pool *pool = ttm_get_pool(flags, cstate); | 667 | struct ttm_page_pool *pool = ttm_get_pool(flags, cstate); |
668 | struct page *p = NULL; | 668 | struct page *p = NULL; |
669 | int gfp_flags = GFP_USER; | 669 | gfp_t gfp_flags = GFP_USER; |
670 | int r; | 670 | int r; |
671 | 671 | ||
672 | /* set zero flag for page allocation if required */ | 672 | /* set zero flag for page allocation if required */ |
@@ -818,7 +818,7 @@ int ttm_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages) | |||
818 | return 0; | 818 | return 0; |
819 | } | 819 | } |
820 | 820 | ||
821 | void ttm_page_alloc_fini() | 821 | void ttm_page_alloc_fini(void) |
822 | { | 822 | { |
823 | int i; | 823 | int i; |
824 | 824 | ||
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c index e645f44e430..5c845b6ec49 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c | |||
@@ -148,13 +148,16 @@ static struct pci_device_id vmw_pci_id_list[] = { | |||
148 | {0, 0, 0} | 148 | {0, 0, 0} |
149 | }; | 149 | }; |
150 | 150 | ||
151 | static char *vmw_devname = "vmwgfx"; | 151 | static int enable_fbdev; |
152 | 152 | ||
153 | static int vmw_probe(struct pci_dev *, const struct pci_device_id *); | 153 | static int vmw_probe(struct pci_dev *, const struct pci_device_id *); |
154 | static void vmw_master_init(struct vmw_master *); | 154 | static void vmw_master_init(struct vmw_master *); |
155 | static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val, | 155 | static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val, |
156 | void *ptr); | 156 | void *ptr); |
157 | 157 | ||
158 | MODULE_PARM_DESC(enable_fbdev, "Enable vmwgfx fbdev"); | ||
159 | module_param_named(enable_fbdev, enable_fbdev, int, 0600); | ||
160 | |||
158 | static void vmw_print_capabilities(uint32_t capabilities) | 161 | static void vmw_print_capabilities(uint32_t capabilities) |
159 | { | 162 | { |
160 | DRM_INFO("Capabilities:\n"); | 163 | DRM_INFO("Capabilities:\n"); |
@@ -192,8 +195,6 @@ static int vmw_request_device(struct vmw_private *dev_priv) | |||
192 | { | 195 | { |
193 | int ret; | 196 | int ret; |
194 | 197 | ||
195 | vmw_kms_save_vga(dev_priv); | ||
196 | |||
197 | ret = vmw_fifo_init(dev_priv, &dev_priv->fifo); | 198 | ret = vmw_fifo_init(dev_priv, &dev_priv->fifo); |
198 | if (unlikely(ret != 0)) { | 199 | if (unlikely(ret != 0)) { |
199 | DRM_ERROR("Unable to initialize FIFO.\n"); | 200 | DRM_ERROR("Unable to initialize FIFO.\n"); |
@@ -206,9 +207,35 @@ static int vmw_request_device(struct vmw_private *dev_priv) | |||
206 | static void vmw_release_device(struct vmw_private *dev_priv) | 207 | static void vmw_release_device(struct vmw_private *dev_priv) |
207 | { | 208 | { |
208 | vmw_fifo_release(dev_priv, &dev_priv->fifo); | 209 | vmw_fifo_release(dev_priv, &dev_priv->fifo); |
209 | vmw_kms_restore_vga(dev_priv); | ||
210 | } | 210 | } |
211 | 211 | ||
212 | int vmw_3d_resource_inc(struct vmw_private *dev_priv) | ||
213 | { | ||
214 | int ret = 0; | ||
215 | |||
216 | mutex_lock(&dev_priv->release_mutex); | ||
217 | if (unlikely(dev_priv->num_3d_resources++ == 0)) { | ||
218 | ret = vmw_request_device(dev_priv); | ||
219 | if (unlikely(ret != 0)) | ||
220 | --dev_priv->num_3d_resources; | ||
221 | } | ||
222 | mutex_unlock(&dev_priv->release_mutex); | ||
223 | return ret; | ||
224 | } | ||
225 | |||
226 | |||
227 | void vmw_3d_resource_dec(struct vmw_private *dev_priv) | ||
228 | { | ||
229 | int32_t n3d; | ||
230 | |||
231 | mutex_lock(&dev_priv->release_mutex); | ||
232 | if (unlikely(--dev_priv->num_3d_resources == 0)) | ||
233 | vmw_release_device(dev_priv); | ||
234 | n3d = (int32_t) dev_priv->num_3d_resources; | ||
235 | mutex_unlock(&dev_priv->release_mutex); | ||
236 | |||
237 | BUG_ON(n3d < 0); | ||
238 | } | ||
212 | 239 | ||
213 | static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) | 240 | static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) |
214 | { | 241 | { |
@@ -228,6 +255,7 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) | |||
228 | dev_priv->last_read_sequence = (uint32_t) -100; | 255 | dev_priv->last_read_sequence = (uint32_t) -100; |
229 | mutex_init(&dev_priv->hw_mutex); | 256 | mutex_init(&dev_priv->hw_mutex); |
230 | mutex_init(&dev_priv->cmdbuf_mutex); | 257 | mutex_init(&dev_priv->cmdbuf_mutex); |
258 | mutex_init(&dev_priv->release_mutex); | ||
231 | rwlock_init(&dev_priv->resource_lock); | 259 | rwlock_init(&dev_priv->resource_lock); |
232 | idr_init(&dev_priv->context_idr); | 260 | idr_init(&dev_priv->context_idr); |
233 | idr_init(&dev_priv->surface_idr); | 261 | idr_init(&dev_priv->surface_idr); |
@@ -244,6 +272,8 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) | |||
244 | dev_priv->vram_start = pci_resource_start(dev->pdev, 1); | 272 | dev_priv->vram_start = pci_resource_start(dev->pdev, 1); |
245 | dev_priv->mmio_start = pci_resource_start(dev->pdev, 2); | 273 | dev_priv->mmio_start = pci_resource_start(dev->pdev, 2); |
246 | 274 | ||
275 | dev_priv->enable_fb = enable_fbdev; | ||
276 | |||
247 | mutex_lock(&dev_priv->hw_mutex); | 277 | mutex_lock(&dev_priv->hw_mutex); |
248 | 278 | ||
249 | vmw_write(dev_priv, SVGA_REG_ID, SVGA_ID_2); | 279 | vmw_write(dev_priv, SVGA_REG_ID, SVGA_ID_2); |
@@ -343,17 +373,6 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) | |||
343 | 373 | ||
344 | dev->dev_private = dev_priv; | 374 | dev->dev_private = dev_priv; |
345 | 375 | ||
346 | if (!dev->devname) | ||
347 | dev->devname = vmw_devname; | ||
348 | |||
349 | if (dev_priv->capabilities & SVGA_CAP_IRQMASK) { | ||
350 | ret = drm_irq_install(dev); | ||
351 | if (unlikely(ret != 0)) { | ||
352 | DRM_ERROR("Failed installing irq: %d\n", ret); | ||
353 | goto out_no_irq; | ||
354 | } | ||
355 | } | ||
356 | |||
357 | ret = pci_request_regions(dev->pdev, "vmwgfx probe"); | 376 | ret = pci_request_regions(dev->pdev, "vmwgfx probe"); |
358 | dev_priv->stealth = (ret != 0); | 377 | dev_priv->stealth = (ret != 0); |
359 | if (dev_priv->stealth) { | 378 | if (dev_priv->stealth) { |
@@ -369,26 +388,52 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) | |||
369 | goto out_no_device; | 388 | goto out_no_device; |
370 | } | 389 | } |
371 | } | 390 | } |
372 | ret = vmw_request_device(dev_priv); | 391 | ret = vmw_kms_init(dev_priv); |
373 | if (unlikely(ret != 0)) | 392 | if (unlikely(ret != 0)) |
374 | goto out_no_device; | 393 | goto out_no_kms; |
375 | vmw_kms_init(dev_priv); | ||
376 | vmw_overlay_init(dev_priv); | 394 | vmw_overlay_init(dev_priv); |
377 | vmw_fb_init(dev_priv); | 395 | if (dev_priv->enable_fb) { |
396 | ret = vmw_3d_resource_inc(dev_priv); | ||
397 | if (unlikely(ret != 0)) | ||
398 | goto out_no_fifo; | ||
399 | vmw_kms_save_vga(dev_priv); | ||
400 | vmw_fb_init(dev_priv); | ||
401 | DRM_INFO("%s", vmw_fifo_have_3d(dev_priv) ? | ||
402 | "Detected device 3D availability.\n" : | ||
403 | "Detected no device 3D availability.\n"); | ||
404 | } else { | ||
405 | DRM_INFO("Delayed 3D detection since we're not " | ||
406 | "running the device in SVGA mode yet.\n"); | ||
407 | } | ||
408 | |||
409 | if (dev_priv->capabilities & SVGA_CAP_IRQMASK) { | ||
410 | ret = drm_irq_install(dev); | ||
411 | if (unlikely(ret != 0)) { | ||
412 | DRM_ERROR("Failed installing irq: %d\n", ret); | ||
413 | goto out_no_irq; | ||
414 | } | ||
415 | } | ||
378 | 416 | ||
379 | dev_priv->pm_nb.notifier_call = vmwgfx_pm_notifier; | 417 | dev_priv->pm_nb.notifier_call = vmwgfx_pm_notifier; |
380 | register_pm_notifier(&dev_priv->pm_nb); | 418 | register_pm_notifier(&dev_priv->pm_nb); |
381 | 419 | ||
382 | DRM_INFO("%s", vmw_fifo_have_3d(dev_priv) ? "Have 3D\n" : "No 3D\n"); | ||
383 | |||
384 | return 0; | 420 | return 0; |
385 | 421 | ||
386 | out_no_device: | ||
387 | if (dev_priv->capabilities & SVGA_CAP_IRQMASK) | ||
388 | drm_irq_uninstall(dev_priv->dev); | ||
389 | if (dev->devname == vmw_devname) | ||
390 | dev->devname = NULL; | ||
391 | out_no_irq: | 422 | out_no_irq: |
423 | if (dev_priv->enable_fb) { | ||
424 | vmw_fb_close(dev_priv); | ||
425 | vmw_kms_restore_vga(dev_priv); | ||
426 | vmw_3d_resource_dec(dev_priv); | ||
427 | } | ||
428 | out_no_fifo: | ||
429 | vmw_overlay_close(dev_priv); | ||
430 | vmw_kms_close(dev_priv); | ||
431 | out_no_kms: | ||
432 | if (dev_priv->stealth) | ||
433 | pci_release_region(dev->pdev, 2); | ||
434 | else | ||
435 | pci_release_regions(dev->pdev); | ||
436 | out_no_device: | ||
392 | ttm_object_device_release(&dev_priv->tdev); | 437 | ttm_object_device_release(&dev_priv->tdev); |
393 | out_err4: | 438 | out_err4: |
394 | iounmap(dev_priv->mmio_virt); | 439 | iounmap(dev_priv->mmio_virt); |
@@ -415,19 +460,20 @@ static int vmw_driver_unload(struct drm_device *dev) | |||
415 | 460 | ||
416 | unregister_pm_notifier(&dev_priv->pm_nb); | 461 | unregister_pm_notifier(&dev_priv->pm_nb); |
417 | 462 | ||
418 | vmw_fb_close(dev_priv); | 463 | if (dev_priv->capabilities & SVGA_CAP_IRQMASK) |
464 | drm_irq_uninstall(dev_priv->dev); | ||
465 | if (dev_priv->enable_fb) { | ||
466 | vmw_fb_close(dev_priv); | ||
467 | vmw_kms_restore_vga(dev_priv); | ||
468 | vmw_3d_resource_dec(dev_priv); | ||
469 | } | ||
419 | vmw_kms_close(dev_priv); | 470 | vmw_kms_close(dev_priv); |
420 | vmw_overlay_close(dev_priv); | 471 | vmw_overlay_close(dev_priv); |
421 | vmw_release_device(dev_priv); | ||
422 | if (dev_priv->stealth) | 472 | if (dev_priv->stealth) |
423 | pci_release_region(dev->pdev, 2); | 473 | pci_release_region(dev->pdev, 2); |
424 | else | 474 | else |
425 | pci_release_regions(dev->pdev); | 475 | pci_release_regions(dev->pdev); |
426 | 476 | ||
427 | if (dev_priv->capabilities & SVGA_CAP_IRQMASK) | ||
428 | drm_irq_uninstall(dev_priv->dev); | ||
429 | if (dev->devname == vmw_devname) | ||
430 | dev->devname = NULL; | ||
431 | ttm_object_device_release(&dev_priv->tdev); | 477 | ttm_object_device_release(&dev_priv->tdev); |
432 | iounmap(dev_priv->mmio_virt); | 478 | iounmap(dev_priv->mmio_virt); |
433 | drm_mtrr_del(dev_priv->mmio_mtrr, dev_priv->mmio_start, | 479 | drm_mtrr_del(dev_priv->mmio_mtrr, dev_priv->mmio_start, |
@@ -500,7 +546,7 @@ static long vmw_unlocked_ioctl(struct file *filp, unsigned int cmd, | |||
500 | struct drm_ioctl_desc *ioctl = | 546 | struct drm_ioctl_desc *ioctl = |
501 | &vmw_ioctls[nr - DRM_COMMAND_BASE]; | 547 | &vmw_ioctls[nr - DRM_COMMAND_BASE]; |
502 | 548 | ||
503 | if (unlikely(ioctl->cmd != cmd)) { | 549 | if (unlikely(ioctl->cmd_drv != cmd)) { |
504 | DRM_ERROR("Invalid command format, ioctl %d\n", | 550 | DRM_ERROR("Invalid command format, ioctl %d\n", |
505 | nr - DRM_COMMAND_BASE); | 551 | nr - DRM_COMMAND_BASE); |
506 | return -EINVAL; | 552 | return -EINVAL; |
@@ -589,6 +635,16 @@ static int vmw_master_set(struct drm_device *dev, | |||
589 | struct vmw_master *vmaster = vmw_master(file_priv->master); | 635 | struct vmw_master *vmaster = vmw_master(file_priv->master); |
590 | int ret = 0; | 636 | int ret = 0; |
591 | 637 | ||
638 | if (!dev_priv->enable_fb) { | ||
639 | ret = vmw_3d_resource_inc(dev_priv); | ||
640 | if (unlikely(ret != 0)) | ||
641 | return ret; | ||
642 | vmw_kms_save_vga(dev_priv); | ||
643 | mutex_lock(&dev_priv->hw_mutex); | ||
644 | vmw_write(dev_priv, SVGA_REG_TRACES, 0); | ||
645 | mutex_unlock(&dev_priv->hw_mutex); | ||
646 | } | ||
647 | |||
592 | if (active) { | 648 | if (active) { |
593 | BUG_ON(active != &dev_priv->fbdev_master); | 649 | BUG_ON(active != &dev_priv->fbdev_master); |
594 | ret = ttm_vt_lock(&active->lock, false, vmw_fp->tfile); | 650 | ret = ttm_vt_lock(&active->lock, false, vmw_fp->tfile); |
@@ -617,7 +673,13 @@ static int vmw_master_set(struct drm_device *dev, | |||
617 | return 0; | 673 | return 0; |
618 | 674 | ||
619 | out_no_active_lock: | 675 | out_no_active_lock: |
620 | vmw_release_device(dev_priv); | 676 | if (!dev_priv->enable_fb) { |
677 | mutex_lock(&dev_priv->hw_mutex); | ||
678 | vmw_write(dev_priv, SVGA_REG_TRACES, 1); | ||
679 | mutex_unlock(&dev_priv->hw_mutex); | ||
680 | vmw_kms_restore_vga(dev_priv); | ||
681 | vmw_3d_resource_dec(dev_priv); | ||
682 | } | ||
621 | return ret; | 683 | return ret; |
622 | } | 684 | } |
623 | 685 | ||
@@ -645,11 +707,23 @@ static void vmw_master_drop(struct drm_device *dev, | |||
645 | 707 | ||
646 | ttm_lock_set_kill(&vmaster->lock, true, SIGTERM); | 708 | ttm_lock_set_kill(&vmaster->lock, true, SIGTERM); |
647 | 709 | ||
710 | if (!dev_priv->enable_fb) { | ||
711 | ret = ttm_bo_evict_mm(&dev_priv->bdev, TTM_PL_VRAM); | ||
712 | if (unlikely(ret != 0)) | ||
713 | DRM_ERROR("Unable to clean VRAM on master drop.\n"); | ||
714 | mutex_lock(&dev_priv->hw_mutex); | ||
715 | vmw_write(dev_priv, SVGA_REG_TRACES, 1); | ||
716 | mutex_unlock(&dev_priv->hw_mutex); | ||
717 | vmw_kms_restore_vga(dev_priv); | ||
718 | vmw_3d_resource_dec(dev_priv); | ||
719 | } | ||
720 | |||
648 | dev_priv->active_master = &dev_priv->fbdev_master; | 721 | dev_priv->active_master = &dev_priv->fbdev_master; |
649 | ttm_lock_set_kill(&dev_priv->fbdev_master.lock, false, SIGTERM); | 722 | ttm_lock_set_kill(&dev_priv->fbdev_master.lock, false, SIGTERM); |
650 | ttm_vt_unlock(&dev_priv->fbdev_master.lock); | 723 | ttm_vt_unlock(&dev_priv->fbdev_master.lock); |
651 | 724 | ||
652 | vmw_fb_on(dev_priv); | 725 | if (dev_priv->enable_fb) |
726 | vmw_fb_on(dev_priv); | ||
653 | } | 727 | } |
654 | 728 | ||
655 | 729 | ||
@@ -722,6 +796,7 @@ static struct drm_driver driver = { | |||
722 | .irq_postinstall = vmw_irq_postinstall, | 796 | .irq_postinstall = vmw_irq_postinstall, |
723 | .irq_uninstall = vmw_irq_uninstall, | 797 | .irq_uninstall = vmw_irq_uninstall, |
724 | .irq_handler = vmw_irq_handler, | 798 | .irq_handler = vmw_irq_handler, |
799 | .get_vblank_counter = vmw_get_vblank_counter, | ||
725 | .reclaim_buffers_locked = NULL, | 800 | .reclaim_buffers_locked = NULL, |
726 | .ioctls = vmw_ioctls, | 801 | .ioctls = vmw_ioctls, |
727 | .num_ioctls = DRM_ARRAY_SIZE(vmw_ioctls), | 802 | .num_ioctls = DRM_ARRAY_SIZE(vmw_ioctls), |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h index 429f917b60b..58de6393f61 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h | |||
@@ -277,6 +277,7 @@ struct vmw_private { | |||
277 | 277 | ||
278 | bool stealth; | 278 | bool stealth; |
279 | bool is_opened; | 279 | bool is_opened; |
280 | bool enable_fb; | ||
280 | 281 | ||
281 | /** | 282 | /** |
282 | * Master management. | 283 | * Master management. |
@@ -285,6 +286,9 @@ struct vmw_private { | |||
285 | struct vmw_master *active_master; | 286 | struct vmw_master *active_master; |
286 | struct vmw_master fbdev_master; | 287 | struct vmw_master fbdev_master; |
287 | struct notifier_block pm_nb; | 288 | struct notifier_block pm_nb; |
289 | |||
290 | struct mutex release_mutex; | ||
291 | uint32_t num_3d_resources; | ||
288 | }; | 292 | }; |
289 | 293 | ||
290 | static inline struct vmw_private *vmw_priv(struct drm_device *dev) | 294 | static inline struct vmw_private *vmw_priv(struct drm_device *dev) |
@@ -319,6 +323,9 @@ static inline uint32_t vmw_read(struct vmw_private *dev_priv, | |||
319 | return val; | 323 | return val; |
320 | } | 324 | } |
321 | 325 | ||
326 | int vmw_3d_resource_inc(struct vmw_private *dev_priv); | ||
327 | void vmw_3d_resource_dec(struct vmw_private *dev_priv); | ||
328 | |||
322 | /** | 329 | /** |
323 | * GMR utilities - vmwgfx_gmr.c | 330 | * GMR utilities - vmwgfx_gmr.c |
324 | */ | 331 | */ |
@@ -511,6 +518,7 @@ void vmw_kms_write_svga(struct vmw_private *vmw_priv, | |||
511 | unsigned bbp, unsigned depth); | 518 | unsigned bbp, unsigned depth); |
512 | int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data, | 519 | int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data, |
513 | struct drm_file *file_priv); | 520 | struct drm_file *file_priv); |
521 | u32 vmw_get_vblank_counter(struct drm_device *dev, int crtc); | ||
514 | 522 | ||
515 | /** | 523 | /** |
516 | * Overlay control - vmwgfx_overlay.c | 524 | * Overlay control - vmwgfx_overlay.c |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c index 870967a97c1..409e172f4ab 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c | |||
@@ -615,6 +615,11 @@ int vmw_dmabuf_to_start_of_vram(struct vmw_private *vmw_priv, | |||
615 | if (unlikely(ret != 0)) | 615 | if (unlikely(ret != 0)) |
616 | goto err_unlock; | 616 | goto err_unlock; |
617 | 617 | ||
618 | if (bo->mem.mem_type == TTM_PL_VRAM && | ||
619 | bo->mem.mm_node->start < bo->num_pages) | ||
620 | (void) ttm_bo_validate(bo, &vmw_sys_placement, false, | ||
621 | false, false); | ||
622 | |||
618 | ret = ttm_bo_validate(bo, &ne_placement, false, false, false); | 623 | ret = ttm_bo_validate(bo, &ne_placement, false, false, false); |
619 | 624 | ||
620 | /* Could probably bug on */ | 625 | /* Could probably bug on */ |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c index e6a1eb7ea95..0fe31766e4c 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c | |||
@@ -106,6 +106,7 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo) | |||
106 | mutex_lock(&dev_priv->hw_mutex); | 106 | mutex_lock(&dev_priv->hw_mutex); |
107 | dev_priv->enable_state = vmw_read(dev_priv, SVGA_REG_ENABLE); | 107 | dev_priv->enable_state = vmw_read(dev_priv, SVGA_REG_ENABLE); |
108 | dev_priv->config_done_state = vmw_read(dev_priv, SVGA_REG_CONFIG_DONE); | 108 | dev_priv->config_done_state = vmw_read(dev_priv, SVGA_REG_CONFIG_DONE); |
109 | dev_priv->traces_state = vmw_read(dev_priv, SVGA_REG_TRACES); | ||
109 | vmw_write(dev_priv, SVGA_REG_ENABLE, 1); | 110 | vmw_write(dev_priv, SVGA_REG_ENABLE, 1); |
110 | 111 | ||
111 | min = 4; | 112 | min = 4; |
@@ -175,6 +176,8 @@ void vmw_fifo_release(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo) | |||
175 | dev_priv->config_done_state); | 176 | dev_priv->config_done_state); |
176 | vmw_write(dev_priv, SVGA_REG_ENABLE, | 177 | vmw_write(dev_priv, SVGA_REG_ENABLE, |
177 | dev_priv->enable_state); | 178 | dev_priv->enable_state); |
179 | vmw_write(dev_priv, SVGA_REG_TRACES, | ||
180 | dev_priv->traces_state); | ||
178 | 181 | ||
179 | mutex_unlock(&dev_priv->hw_mutex); | 182 | mutex_unlock(&dev_priv->hw_mutex); |
180 | vmw_fence_queue_takedown(&fifo->fence_queue); | 183 | vmw_fence_queue_takedown(&fifo->fence_queue); |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c index 64d7f47df86..e882ba099f0 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c | |||
@@ -898,7 +898,19 @@ int vmw_kms_save_vga(struct vmw_private *vmw_priv) | |||
898 | save->width = vmw_read(vmw_priv, SVGA_REG_DISPLAY_WIDTH); | 898 | save->width = vmw_read(vmw_priv, SVGA_REG_DISPLAY_WIDTH); |
899 | save->height = vmw_read(vmw_priv, SVGA_REG_DISPLAY_HEIGHT); | 899 | save->height = vmw_read(vmw_priv, SVGA_REG_DISPLAY_HEIGHT); |
900 | vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID); | 900 | vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID); |
901 | if (i == 0 && vmw_priv->num_displays == 1 && | ||
902 | save->width == 0 && save->height == 0) { | ||
903 | |||
904 | /* | ||
905 | * It should be fairly safe to assume that these | ||
906 | * values are uninitialized. | ||
907 | */ | ||
908 | |||
909 | save->width = vmw_priv->vga_width - save->pos_x; | ||
910 | save->height = vmw_priv->vga_height - save->pos_y; | ||
911 | } | ||
901 | } | 912 | } |
913 | |||
902 | return 0; | 914 | return 0; |
903 | } | 915 | } |
904 | 916 | ||
@@ -984,3 +996,8 @@ out_unlock: | |||
984 | ttm_read_unlock(&vmaster->lock); | 996 | ttm_read_unlock(&vmaster->lock); |
985 | return ret; | 997 | return ret; |
986 | } | 998 | } |
999 | |||
1000 | u32 vmw_get_vblank_counter(struct drm_device *dev, int crtc) | ||
1001 | { | ||
1002 | return 0; | ||
1003 | } | ||
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c index 7083b1a24df..11cb39e3acc 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c | |||
@@ -27,6 +27,8 @@ | |||
27 | 27 | ||
28 | #include "vmwgfx_kms.h" | 28 | #include "vmwgfx_kms.h" |
29 | 29 | ||
30 | #define VMWGFX_LDU_NUM_DU 8 | ||
31 | |||
30 | #define vmw_crtc_to_ldu(x) \ | 32 | #define vmw_crtc_to_ldu(x) \ |
31 | container_of(x, struct vmw_legacy_display_unit, base.crtc) | 33 | container_of(x, struct vmw_legacy_display_unit, base.crtc) |
32 | #define vmw_encoder_to_ldu(x) \ | 34 | #define vmw_encoder_to_ldu(x) \ |
@@ -536,6 +538,10 @@ static int vmw_ldu_init(struct vmw_private *dev_priv, unsigned unit) | |||
536 | 538 | ||
537 | int vmw_kms_init_legacy_display_system(struct vmw_private *dev_priv) | 539 | int vmw_kms_init_legacy_display_system(struct vmw_private *dev_priv) |
538 | { | 540 | { |
541 | struct drm_device *dev = dev_priv->dev; | ||
542 | int i; | ||
543 | int ret; | ||
544 | |||
539 | if (dev_priv->ldu_priv) { | 545 | if (dev_priv->ldu_priv) { |
540 | DRM_INFO("ldu system already on\n"); | 546 | DRM_INFO("ldu system already on\n"); |
541 | return -EINVAL; | 547 | return -EINVAL; |
@@ -553,23 +559,24 @@ int vmw_kms_init_legacy_display_system(struct vmw_private *dev_priv) | |||
553 | 559 | ||
554 | drm_mode_create_dirty_info_property(dev_priv->dev); | 560 | drm_mode_create_dirty_info_property(dev_priv->dev); |
555 | 561 | ||
556 | vmw_ldu_init(dev_priv, 0); | ||
557 | /* for old hardware without multimon only enable one display */ | ||
558 | if (dev_priv->capabilities & SVGA_CAP_MULTIMON) { | 562 | if (dev_priv->capabilities & SVGA_CAP_MULTIMON) { |
559 | vmw_ldu_init(dev_priv, 1); | 563 | for (i = 0; i < VMWGFX_LDU_NUM_DU; ++i) |
560 | vmw_ldu_init(dev_priv, 2); | 564 | vmw_ldu_init(dev_priv, i); |
561 | vmw_ldu_init(dev_priv, 3); | 565 | ret = drm_vblank_init(dev, VMWGFX_LDU_NUM_DU); |
562 | vmw_ldu_init(dev_priv, 4); | 566 | } else { |
563 | vmw_ldu_init(dev_priv, 5); | 567 | /* for old hardware without multimon only enable one display */ |
564 | vmw_ldu_init(dev_priv, 6); | 568 | vmw_ldu_init(dev_priv, 0); |
565 | vmw_ldu_init(dev_priv, 7); | 569 | ret = drm_vblank_init(dev, 1); |
566 | } | 570 | } |
567 | 571 | ||
568 | return 0; | 572 | return ret; |
569 | } | 573 | } |
570 | 574 | ||
571 | int vmw_kms_close_legacy_display_system(struct vmw_private *dev_priv) | 575 | int vmw_kms_close_legacy_display_system(struct vmw_private *dev_priv) |
572 | { | 576 | { |
577 | struct drm_device *dev = dev_priv->dev; | ||
578 | |||
579 | drm_vblank_cleanup(dev); | ||
573 | if (!dev_priv->ldu_priv) | 580 | if (!dev_priv->ldu_priv) |
574 | return -ENOSYS; | 581 | return -ENOSYS; |
575 | 582 | ||
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c index 5f2d5df01e5..c8c40e9979d 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c | |||
@@ -211,6 +211,7 @@ static void vmw_hw_context_destroy(struct vmw_resource *res) | |||
211 | cmd->body.cid = cpu_to_le32(res->id); | 211 | cmd->body.cid = cpu_to_le32(res->id); |
212 | 212 | ||
213 | vmw_fifo_commit(dev_priv, sizeof(*cmd)); | 213 | vmw_fifo_commit(dev_priv, sizeof(*cmd)); |
214 | vmw_3d_resource_dec(dev_priv); | ||
214 | } | 215 | } |
215 | 216 | ||
216 | static int vmw_context_init(struct vmw_private *dev_priv, | 217 | static int vmw_context_init(struct vmw_private *dev_priv, |
@@ -247,6 +248,7 @@ static int vmw_context_init(struct vmw_private *dev_priv, | |||
247 | cmd->body.cid = cpu_to_le32(res->id); | 248 | cmd->body.cid = cpu_to_le32(res->id); |
248 | 249 | ||
249 | vmw_fifo_commit(dev_priv, sizeof(*cmd)); | 250 | vmw_fifo_commit(dev_priv, sizeof(*cmd)); |
251 | (void) vmw_3d_resource_inc(dev_priv); | ||
250 | vmw_resource_activate(res, vmw_hw_context_destroy); | 252 | vmw_resource_activate(res, vmw_hw_context_destroy); |
251 | return 0; | 253 | return 0; |
252 | } | 254 | } |
@@ -406,6 +408,7 @@ static void vmw_hw_surface_destroy(struct vmw_resource *res) | |||
406 | cmd->body.sid = cpu_to_le32(res->id); | 408 | cmd->body.sid = cpu_to_le32(res->id); |
407 | 409 | ||
408 | vmw_fifo_commit(dev_priv, sizeof(*cmd)); | 410 | vmw_fifo_commit(dev_priv, sizeof(*cmd)); |
411 | vmw_3d_resource_dec(dev_priv); | ||
409 | } | 412 | } |
410 | 413 | ||
411 | void vmw_surface_res_free(struct vmw_resource *res) | 414 | void vmw_surface_res_free(struct vmw_resource *res) |
@@ -473,6 +476,7 @@ int vmw_surface_init(struct vmw_private *dev_priv, | |||
473 | } | 476 | } |
474 | 477 | ||
475 | vmw_fifo_commit(dev_priv, submit_size); | 478 | vmw_fifo_commit(dev_priv, submit_size); |
479 | (void) vmw_3d_resource_inc(dev_priv); | ||
476 | vmw_resource_activate(res, vmw_hw_surface_destroy); | 480 | vmw_resource_activate(res, vmw_hw_surface_destroy); |
477 | return 0; | 481 | return 0; |
478 | } | 482 | } |
diff --git a/drivers/gpu/vga/vgaarb.c b/drivers/gpu/vga/vgaarb.c index b87569e96b1..f366f968155 100644 --- a/drivers/gpu/vga/vgaarb.c +++ b/drivers/gpu/vga/vgaarb.c | |||
@@ -598,7 +598,7 @@ static inline void vga_update_device_decodes(struct vga_device *vgadev, | |||
598 | pr_debug("vgaarb: decoding count now is: %d\n", vga_decode_count); | 598 | pr_debug("vgaarb: decoding count now is: %d\n", vga_decode_count); |
599 | } | 599 | } |
600 | 600 | ||
601 | void __vga_set_legacy_decoding(struct pci_dev *pdev, unsigned int decodes, bool userspace) | 601 | static void __vga_set_legacy_decoding(struct pci_dev *pdev, unsigned int decodes, bool userspace) |
602 | { | 602 | { |
603 | struct vga_device *vgadev; | 603 | struct vga_device *vgadev; |
604 | unsigned long flags; | 604 | unsigned long flags; |
diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig index 4d4d09bdec0..97499d00615 100644 --- a/drivers/hwmon/Kconfig +++ b/drivers/hwmon/Kconfig | |||
@@ -409,7 +409,7 @@ config SENSORS_CORETEMP | |||
409 | 409 | ||
410 | config SENSORS_PKGTEMP | 410 | config SENSORS_PKGTEMP |
411 | tristate "Intel processor package temperature sensor" | 411 | tristate "Intel processor package temperature sensor" |
412 | depends on X86 && PCI && EXPERIMENTAL | 412 | depends on X86 && EXPERIMENTAL |
413 | help | 413 | help |
414 | If you say yes here you get support for the package level temperature | 414 | If you say yes here you get support for the package level temperature |
415 | sensor inside your CPU. Check documentation/driver for details. | 415 | sensor inside your CPU. Check documentation/driver for details. |
diff --git a/drivers/hwmon/coretemp.c b/drivers/hwmon/coretemp.c index de8111114f4..baa842a80b4 100644 --- a/drivers/hwmon/coretemp.c +++ b/drivers/hwmon/coretemp.c | |||
@@ -423,9 +423,18 @@ static int __cpuinit coretemp_device_add(unsigned int cpu) | |||
423 | int err; | 423 | int err; |
424 | struct platform_device *pdev; | 424 | struct platform_device *pdev; |
425 | struct pdev_entry *pdev_entry; | 425 | struct pdev_entry *pdev_entry; |
426 | #ifdef CONFIG_SMP | ||
427 | struct cpuinfo_x86 *c = &cpu_data(cpu); | 426 | struct cpuinfo_x86 *c = &cpu_data(cpu); |
428 | #endif | 427 | |
428 | /* | ||
429 | * CPUID.06H.EAX[0] indicates whether the CPU has thermal | ||
430 | * sensors. We check this bit only, all the early CPUs | ||
431 | * without thermal sensors will be filtered out. | ||
432 | */ | ||
433 | if (!cpu_has(c, X86_FEATURE_DTS)) { | ||
434 | printk(KERN_INFO DRVNAME ": CPU (model=0x%x)" | ||
435 | " has no thermal sensor.\n", c->x86_model); | ||
436 | return 0; | ||
437 | } | ||
429 | 438 | ||
430 | mutex_lock(&pdev_list_mutex); | 439 | mutex_lock(&pdev_list_mutex); |
431 | 440 | ||
@@ -482,14 +491,22 @@ exit: | |||
482 | 491 | ||
483 | static void coretemp_device_remove(unsigned int cpu) | 492 | static void coretemp_device_remove(unsigned int cpu) |
484 | { | 493 | { |
485 | struct pdev_entry *p, *n; | 494 | struct pdev_entry *p; |
495 | unsigned int i; | ||
496 | |||
486 | mutex_lock(&pdev_list_mutex); | 497 | mutex_lock(&pdev_list_mutex); |
487 | list_for_each_entry_safe(p, n, &pdev_list, list) { | 498 | list_for_each_entry(p, &pdev_list, list) { |
488 | if (p->cpu == cpu) { | 499 | if (p->cpu != cpu) |
489 | platform_device_unregister(p->pdev); | 500 | continue; |
490 | list_del(&p->list); | 501 | |
491 | kfree(p); | 502 | platform_device_unregister(p->pdev); |
492 | } | 503 | list_del(&p->list); |
504 | mutex_unlock(&pdev_list_mutex); | ||
505 | kfree(p); | ||
506 | for_each_cpu(i, cpu_sibling_mask(cpu)) | ||
507 | if (i != cpu && !coretemp_device_add(i)) | ||
508 | break; | ||
509 | return; | ||
493 | } | 510 | } |
494 | mutex_unlock(&pdev_list_mutex); | 511 | mutex_unlock(&pdev_list_mutex); |
495 | } | 512 | } |
@@ -527,30 +544,21 @@ static int __init coretemp_init(void) | |||
527 | if (err) | 544 | if (err) |
528 | goto exit; | 545 | goto exit; |
529 | 546 | ||
530 | for_each_online_cpu(i) { | 547 | for_each_online_cpu(i) |
531 | struct cpuinfo_x86 *c = &cpu_data(i); | 548 | coretemp_device_add(i); |
532 | /* | 549 | |
533 | * CPUID.06H.EAX[0] indicates whether the CPU has thermal | 550 | #ifndef CONFIG_HOTPLUG_CPU |
534 | * sensors. We check this bit only, all the early CPUs | ||
535 | * without thermal sensors will be filtered out. | ||
536 | */ | ||
537 | if (c->cpuid_level >= 6 && (cpuid_eax(0x06) & 0x01)) | ||
538 | coretemp_device_add(i); | ||
539 | else { | ||
540 | printk(KERN_INFO DRVNAME ": CPU (model=0x%x)" | ||
541 | " has no thermal sensor.\n", c->x86_model); | ||
542 | } | ||
543 | } | ||
544 | if (list_empty(&pdev_list)) { | 551 | if (list_empty(&pdev_list)) { |
545 | err = -ENODEV; | 552 | err = -ENODEV; |
546 | goto exit_driver_unreg; | 553 | goto exit_driver_unreg; |
547 | } | 554 | } |
555 | #endif | ||
548 | 556 | ||
549 | register_hotcpu_notifier(&coretemp_cpu_notifier); | 557 | register_hotcpu_notifier(&coretemp_cpu_notifier); |
550 | return 0; | 558 | return 0; |
551 | 559 | ||
552 | exit_driver_unreg: | ||
553 | #ifndef CONFIG_HOTPLUG_CPU | 560 | #ifndef CONFIG_HOTPLUG_CPU |
561 | exit_driver_unreg: | ||
554 | platform_driver_unregister(&coretemp_driver); | 562 | platform_driver_unregister(&coretemp_driver); |
555 | #endif | 563 | #endif |
556 | exit: | 564 | exit: |
diff --git a/drivers/hwmon/lis3lv02d.c b/drivers/hwmon/lis3lv02d.c index 6138f036b15..fc591ae5310 100644 --- a/drivers/hwmon/lis3lv02d.c +++ b/drivers/hwmon/lis3lv02d.c | |||
@@ -277,7 +277,7 @@ static irqreturn_t lis302dl_interrupt(int irq, void *dummy) | |||
277 | wake_up_interruptible(&lis3_dev.misc_wait); | 277 | wake_up_interruptible(&lis3_dev.misc_wait); |
278 | kill_fasync(&lis3_dev.async_queue, SIGIO, POLL_IN); | 278 | kill_fasync(&lis3_dev.async_queue, SIGIO, POLL_IN); |
279 | out: | 279 | out: |
280 | if (lis3_dev.whoami == WAI_8B && lis3_dev.idev && | 280 | if (lis3_dev.pdata && lis3_dev.whoami == WAI_8B && lis3_dev.idev && |
281 | lis3_dev.idev->input->users) | 281 | lis3_dev.idev->input->users) |
282 | return IRQ_WAKE_THREAD; | 282 | return IRQ_WAKE_THREAD; |
283 | return IRQ_HANDLED; | 283 | return IRQ_HANDLED; |
@@ -718,7 +718,7 @@ int lis3lv02d_init_device(struct lis3lv02d *dev) | |||
718 | * io-apic is not configurable (and generates a warning) but I keep it | 718 | * io-apic is not configurable (and generates a warning) but I keep it |
719 | * in case of support for other hardware. | 719 | * in case of support for other hardware. |
720 | */ | 720 | */ |
721 | if (dev->whoami == WAI_8B) | 721 | if (dev->pdata && dev->whoami == WAI_8B) |
722 | thread_fn = lis302dl_interrupt_thread1_8b; | 722 | thread_fn = lis302dl_interrupt_thread1_8b; |
723 | else | 723 | else |
724 | thread_fn = NULL; | 724 | thread_fn = NULL; |
diff --git a/drivers/hwmon/pkgtemp.c b/drivers/hwmon/pkgtemp.c index 74157fcda6e..f11903936c8 100644 --- a/drivers/hwmon/pkgtemp.c +++ b/drivers/hwmon/pkgtemp.c | |||
@@ -33,7 +33,6 @@ | |||
33 | #include <linux/list.h> | 33 | #include <linux/list.h> |
34 | #include <linux/platform_device.h> | 34 | #include <linux/platform_device.h> |
35 | #include <linux/cpu.h> | 35 | #include <linux/cpu.h> |
36 | #include <linux/pci.h> | ||
37 | #include <asm/msr.h> | 36 | #include <asm/msr.h> |
38 | #include <asm/processor.h> | 37 | #include <asm/processor.h> |
39 | 38 | ||
@@ -224,7 +223,7 @@ static int __devinit pkgtemp_probe(struct platform_device *pdev) | |||
224 | 223 | ||
225 | err = sysfs_create_group(&pdev->dev.kobj, &pkgtemp_group); | 224 | err = sysfs_create_group(&pdev->dev.kobj, &pkgtemp_group); |
226 | if (err) | 225 | if (err) |
227 | goto exit_free; | 226 | goto exit_dev; |
228 | 227 | ||
229 | data->hwmon_dev = hwmon_device_register(&pdev->dev); | 228 | data->hwmon_dev = hwmon_device_register(&pdev->dev); |
230 | if (IS_ERR(data->hwmon_dev)) { | 229 | if (IS_ERR(data->hwmon_dev)) { |
@@ -238,6 +237,8 @@ static int __devinit pkgtemp_probe(struct platform_device *pdev) | |||
238 | 237 | ||
239 | exit_class: | 238 | exit_class: |
240 | sysfs_remove_group(&pdev->dev.kobj, &pkgtemp_group); | 239 | sysfs_remove_group(&pdev->dev.kobj, &pkgtemp_group); |
240 | exit_dev: | ||
241 | device_remove_file(&pdev->dev, &sensor_dev_attr_temp1_max.dev_attr); | ||
241 | exit_free: | 242 | exit_free: |
242 | kfree(data); | 243 | kfree(data); |
243 | exit: | 244 | exit: |
@@ -250,6 +251,7 @@ static int __devexit pkgtemp_remove(struct platform_device *pdev) | |||
250 | 251 | ||
251 | hwmon_device_unregister(data->hwmon_dev); | 252 | hwmon_device_unregister(data->hwmon_dev); |
252 | sysfs_remove_group(&pdev->dev.kobj, &pkgtemp_group); | 253 | sysfs_remove_group(&pdev->dev.kobj, &pkgtemp_group); |
254 | device_remove_file(&pdev->dev, &sensor_dev_attr_temp1_max.dev_attr); | ||
253 | platform_set_drvdata(pdev, NULL); | 255 | platform_set_drvdata(pdev, NULL); |
254 | kfree(data); | 256 | kfree(data); |
255 | return 0; | 257 | return 0; |
@@ -281,9 +283,10 @@ static int __cpuinit pkgtemp_device_add(unsigned int cpu) | |||
281 | int err; | 283 | int err; |
282 | struct platform_device *pdev; | 284 | struct platform_device *pdev; |
283 | struct pdev_entry *pdev_entry; | 285 | struct pdev_entry *pdev_entry; |
284 | #ifdef CONFIG_SMP | ||
285 | struct cpuinfo_x86 *c = &cpu_data(cpu); | 286 | struct cpuinfo_x86 *c = &cpu_data(cpu); |
286 | #endif | 287 | |
288 | if (!cpu_has(c, X86_FEATURE_PTS)) | ||
289 | return 0; | ||
287 | 290 | ||
288 | mutex_lock(&pdev_list_mutex); | 291 | mutex_lock(&pdev_list_mutex); |
289 | 292 | ||
@@ -339,17 +342,18 @@ exit: | |||
339 | #ifdef CONFIG_HOTPLUG_CPU | 342 | #ifdef CONFIG_HOTPLUG_CPU |
340 | static void pkgtemp_device_remove(unsigned int cpu) | 343 | static void pkgtemp_device_remove(unsigned int cpu) |
341 | { | 344 | { |
342 | struct pdev_entry *p, *n; | 345 | struct pdev_entry *p; |
343 | unsigned int i; | 346 | unsigned int i; |
344 | int err; | 347 | int err; |
345 | 348 | ||
346 | mutex_lock(&pdev_list_mutex); | 349 | mutex_lock(&pdev_list_mutex); |
347 | list_for_each_entry_safe(p, n, &pdev_list, list) { | 350 | list_for_each_entry(p, &pdev_list, list) { |
348 | if (p->cpu != cpu) | 351 | if (p->cpu != cpu) |
349 | continue; | 352 | continue; |
350 | 353 | ||
351 | platform_device_unregister(p->pdev); | 354 | platform_device_unregister(p->pdev); |
352 | list_del(&p->list); | 355 | list_del(&p->list); |
356 | mutex_unlock(&pdev_list_mutex); | ||
353 | kfree(p); | 357 | kfree(p); |
354 | for_each_cpu(i, cpu_core_mask(cpu)) { | 358 | for_each_cpu(i, cpu_core_mask(cpu)) { |
355 | if (i != cpu) { | 359 | if (i != cpu) { |
@@ -358,7 +362,7 @@ static void pkgtemp_device_remove(unsigned int cpu) | |||
358 | break; | 362 | break; |
359 | } | 363 | } |
360 | } | 364 | } |
361 | break; | 365 | return; |
362 | } | 366 | } |
363 | mutex_unlock(&pdev_list_mutex); | 367 | mutex_unlock(&pdev_list_mutex); |
364 | } | 368 | } |
@@ -399,11 +403,6 @@ static int __init pkgtemp_init(void) | |||
399 | goto exit; | 403 | goto exit; |
400 | 404 | ||
401 | for_each_online_cpu(i) { | 405 | for_each_online_cpu(i) { |
402 | struct cpuinfo_x86 *c = &cpu_data(i); | ||
403 | |||
404 | if (!cpu_has(c, X86_FEATURE_PTS)) | ||
405 | continue; | ||
406 | |||
407 | err = pkgtemp_device_add(i); | 406 | err = pkgtemp_device_add(i); |
408 | if (err) | 407 | if (err) |
409 | goto exit_devices_unreg; | 408 | goto exit_devices_unreg; |
diff --git a/drivers/staging/ti-st/st.h b/drivers/staging/ti-st/st.h index 9952579425b..1b3060eb292 100644 --- a/drivers/staging/ti-st/st.h +++ b/drivers/staging/ti-st/st.h | |||
@@ -80,5 +80,4 @@ struct st_proto_s { | |||
80 | extern long st_register(struct st_proto_s *); | 80 | extern long st_register(struct st_proto_s *); |
81 | extern long st_unregister(enum proto_type); | 81 | extern long st_unregister(enum proto_type); |
82 | 82 | ||
83 | extern struct platform_device *st_get_plat_device(void); | ||
84 | #endif /* ST_H */ | 83 | #endif /* ST_H */ |
diff --git a/drivers/staging/ti-st/st_core.c b/drivers/staging/ti-st/st_core.c index 063c9b1db1a..b85d8bfdf60 100644 --- a/drivers/staging/ti-st/st_core.c +++ b/drivers/staging/ti-st/st_core.c | |||
@@ -38,7 +38,6 @@ | |||
38 | #include "st_ll.h" | 38 | #include "st_ll.h" |
39 | #include "st.h" | 39 | #include "st.h" |
40 | 40 | ||
41 | #define VERBOSE | ||
42 | /* strings to be used for rfkill entries and by | 41 | /* strings to be used for rfkill entries and by |
43 | * ST Core to be used for sysfs debug entry | 42 | * ST Core to be used for sysfs debug entry |
44 | */ | 43 | */ |
@@ -581,7 +580,7 @@ long st_register(struct st_proto_s *new_proto) | |||
581 | long err = 0; | 580 | long err = 0; |
582 | unsigned long flags = 0; | 581 | unsigned long flags = 0; |
583 | 582 | ||
584 | st_kim_ref(&st_gdata); | 583 | st_kim_ref(&st_gdata, 0); |
585 | pr_info("%s(%d) ", __func__, new_proto->type); | 584 | pr_info("%s(%d) ", __func__, new_proto->type); |
586 | if (st_gdata == NULL || new_proto == NULL || new_proto->recv == NULL | 585 | if (st_gdata == NULL || new_proto == NULL || new_proto->recv == NULL |
587 | || new_proto->reg_complete_cb == NULL) { | 586 | || new_proto->reg_complete_cb == NULL) { |
@@ -713,7 +712,7 @@ long st_unregister(enum proto_type type) | |||
713 | 712 | ||
714 | pr_debug("%s: %d ", __func__, type); | 713 | pr_debug("%s: %d ", __func__, type); |
715 | 714 | ||
716 | st_kim_ref(&st_gdata); | 715 | st_kim_ref(&st_gdata, 0); |
717 | if (type < ST_BT || type >= ST_MAX) { | 716 | if (type < ST_BT || type >= ST_MAX) { |
718 | pr_err(" protocol %d not supported", type); | 717 | pr_err(" protocol %d not supported", type); |
719 | return -EPROTONOSUPPORT; | 718 | return -EPROTONOSUPPORT; |
@@ -767,7 +766,7 @@ long st_write(struct sk_buff *skb) | |||
767 | #endif | 766 | #endif |
768 | long len; | 767 | long len; |
769 | 768 | ||
770 | st_kim_ref(&st_gdata); | 769 | st_kim_ref(&st_gdata, 0); |
771 | if (unlikely(skb == NULL || st_gdata == NULL | 770 | if (unlikely(skb == NULL || st_gdata == NULL |
772 | || st_gdata->tty == NULL)) { | 771 | || st_gdata->tty == NULL)) { |
773 | pr_err("data/tty unavailable to perform write"); | 772 | pr_err("data/tty unavailable to perform write"); |
@@ -818,7 +817,7 @@ static int st_tty_open(struct tty_struct *tty) | |||
818 | struct st_data_s *st_gdata; | 817 | struct st_data_s *st_gdata; |
819 | pr_info("%s ", __func__); | 818 | pr_info("%s ", __func__); |
820 | 819 | ||
821 | st_kim_ref(&st_gdata); | 820 | st_kim_ref(&st_gdata, 0); |
822 | st_gdata->tty = tty; | 821 | st_gdata->tty = tty; |
823 | tty->disc_data = st_gdata; | 822 | tty->disc_data = st_gdata; |
824 | 823 | ||
diff --git a/drivers/staging/ti-st/st_core.h b/drivers/staging/ti-st/st_core.h index e0c32d149f5..8601320a679 100644 --- a/drivers/staging/ti-st/st_core.h +++ b/drivers/staging/ti-st/st_core.h | |||
@@ -117,7 +117,7 @@ int st_core_init(struct st_data_s **); | |||
117 | void st_core_exit(struct st_data_s *); | 117 | void st_core_exit(struct st_data_s *); |
118 | 118 | ||
119 | /* ask for reference from KIM */ | 119 | /* ask for reference from KIM */ |
120 | void st_kim_ref(struct st_data_s **); | 120 | void st_kim_ref(struct st_data_s **, int); |
121 | 121 | ||
122 | #define GPS_STUB_TEST | 122 | #define GPS_STUB_TEST |
123 | #ifdef GPS_STUB_TEST | 123 | #ifdef GPS_STUB_TEST |
diff --git a/drivers/staging/ti-st/st_kim.c b/drivers/staging/ti-st/st_kim.c index b4a6c7fdc4e..9e99463f76e 100644 --- a/drivers/staging/ti-st/st_kim.c +++ b/drivers/staging/ti-st/st_kim.c | |||
@@ -72,11 +72,26 @@ const unsigned char *protocol_names[] = { | |||
72 | PROTO_ENTRY(ST_GPS, "GPS"), | 72 | PROTO_ENTRY(ST_GPS, "GPS"), |
73 | }; | 73 | }; |
74 | 74 | ||
75 | #define MAX_ST_DEVICES 3 /* Imagine 1 on each UART for now */ | ||
76 | struct platform_device *st_kim_devices[MAX_ST_DEVICES]; | ||
75 | 77 | ||
76 | /**********************************************************************/ | 78 | /**********************************************************************/ |
77 | /* internal functions */ | 79 | /* internal functions */ |
78 | 80 | ||
79 | /** | 81 | /** |
82 | * st_get_plat_device - | ||
83 | * function which returns the reference to the platform device | ||
84 | * requested by id. As of now only 1 such device exists (id=0) | ||
85 | * the context requesting for reference can get the id to be | ||
86 | * requested by a. The protocol driver which is registering or | ||
87 | * b. the tty device which is opened. | ||
88 | */ | ||
89 | static struct platform_device *st_get_plat_device(int id) | ||
90 | { | ||
91 | return st_kim_devices[id]; | ||
92 | } | ||
93 | |||
94 | /** | ||
80 | * validate_firmware_response - | 95 | * validate_firmware_response - |
81 | * function to return whether the firmware response was proper | 96 | * function to return whether the firmware response was proper |
82 | * in case of error don't complete so that waiting for proper | 97 | * in case of error don't complete so that waiting for proper |
@@ -353,7 +368,7 @@ void st_kim_chip_toggle(enum proto_type type, enum kim_gpio_state state) | |||
353 | struct kim_data_s *kim_gdata; | 368 | struct kim_data_s *kim_gdata; |
354 | pr_info(" %s ", __func__); | 369 | pr_info(" %s ", __func__); |
355 | 370 | ||
356 | kim_pdev = st_get_plat_device(); | 371 | kim_pdev = st_get_plat_device(0); |
357 | kim_gdata = dev_get_drvdata(&kim_pdev->dev); | 372 | kim_gdata = dev_get_drvdata(&kim_pdev->dev); |
358 | 373 | ||
359 | if (kim_gdata->gpios[type] == -1) { | 374 | if (kim_gdata->gpios[type] == -1) { |
@@ -574,12 +589,12 @@ static int kim_toggle_radio(void *data, bool blocked) | |||
574 | * This would enable multiple such platform devices to exist | 589 | * This would enable multiple such platform devices to exist |
575 | * on a given platform | 590 | * on a given platform |
576 | */ | 591 | */ |
577 | void st_kim_ref(struct st_data_s **core_data) | 592 | void st_kim_ref(struct st_data_s **core_data, int id) |
578 | { | 593 | { |
579 | struct platform_device *pdev; | 594 | struct platform_device *pdev; |
580 | struct kim_data_s *kim_gdata; | 595 | struct kim_data_s *kim_gdata; |
581 | /* get kim_gdata reference from platform device */ | 596 | /* get kim_gdata reference from platform device */ |
582 | pdev = st_get_plat_device(); | 597 | pdev = st_get_plat_device(id); |
583 | kim_gdata = dev_get_drvdata(&pdev->dev); | 598 | kim_gdata = dev_get_drvdata(&pdev->dev); |
584 | *core_data = kim_gdata->core_data; | 599 | *core_data = kim_gdata->core_data; |
585 | } | 600 | } |
@@ -623,6 +638,7 @@ static int kim_probe(struct platform_device *pdev) | |||
623 | long *gpios = pdev->dev.platform_data; | 638 | long *gpios = pdev->dev.platform_data; |
624 | struct kim_data_s *kim_gdata; | 639 | struct kim_data_s *kim_gdata; |
625 | 640 | ||
641 | st_kim_devices[pdev->id] = pdev; | ||
626 | kim_gdata = kzalloc(sizeof(struct kim_data_s), GFP_ATOMIC); | 642 | kim_gdata = kzalloc(sizeof(struct kim_data_s), GFP_ATOMIC); |
627 | if (!kim_gdata) { | 643 | if (!kim_gdata) { |
628 | pr_err("no mem to allocate"); | 644 | pr_err("no mem to allocate"); |
diff --git a/drivers/usb/core/Kconfig b/drivers/usb/core/Kconfig index 7e594449600..9eed5b52d9d 100644 --- a/drivers/usb/core/Kconfig +++ b/drivers/usb/core/Kconfig | |||
@@ -91,12 +91,12 @@ config USB_DYNAMIC_MINORS | |||
91 | If you are unsure about this, say N here. | 91 | If you are unsure about this, say N here. |
92 | 92 | ||
93 | config USB_SUSPEND | 93 | config USB_SUSPEND |
94 | bool "USB runtime power management (suspend/resume and wakeup)" | 94 | bool "USB runtime power management (autosuspend) and wakeup" |
95 | depends on USB && PM_RUNTIME | 95 | depends on USB && PM_RUNTIME |
96 | help | 96 | help |
97 | If you say Y here, you can use driver calls or the sysfs | 97 | If you say Y here, you can use driver calls or the sysfs |
98 | "power/level" file to suspend or resume individual USB | 98 | "power/control" file to enable or disable autosuspend for |
99 | peripherals and to enable or disable autosuspend (see | 99 | individual USB peripherals (see |
100 | Documentation/usb/power-management.txt for more details). | 100 | Documentation/usb/power-management.txt for more details). |
101 | 101 | ||
102 | Also, USB "remote wakeup" signaling is supported, whereby some | 102 | Also, USB "remote wakeup" signaling is supported, whereby some |
diff --git a/drivers/usb/core/file.c b/drivers/usb/core/file.c index f06f5dbc8cd..1e6ccef2cf0 100644 --- a/drivers/usb/core/file.c +++ b/drivers/usb/core/file.c | |||
@@ -159,9 +159,9 @@ void usb_major_cleanup(void) | |||
159 | int usb_register_dev(struct usb_interface *intf, | 159 | int usb_register_dev(struct usb_interface *intf, |
160 | struct usb_class_driver *class_driver) | 160 | struct usb_class_driver *class_driver) |
161 | { | 161 | { |
162 | int retval = -EINVAL; | 162 | int retval; |
163 | int minor_base = class_driver->minor_base; | 163 | int minor_base = class_driver->minor_base; |
164 | int minor = 0; | 164 | int minor; |
165 | char name[20]; | 165 | char name[20]; |
166 | char *temp; | 166 | char *temp; |
167 | 167 | ||
@@ -173,12 +173,17 @@ int usb_register_dev(struct usb_interface *intf, | |||
173 | */ | 173 | */ |
174 | minor_base = 0; | 174 | minor_base = 0; |
175 | #endif | 175 | #endif |
176 | intf->minor = -1; | ||
177 | |||
178 | dbg ("looking for a minor, starting at %d", minor_base); | ||
179 | 176 | ||
180 | if (class_driver->fops == NULL) | 177 | if (class_driver->fops == NULL) |
181 | goto exit; | 178 | return -EINVAL; |
179 | if (intf->minor >= 0) | ||
180 | return -EADDRINUSE; | ||
181 | |||
182 | retval = init_usb_class(); | ||
183 | if (retval) | ||
184 | return retval; | ||
185 | |||
186 | dev_dbg(&intf->dev, "looking for a minor, starting at %d", minor_base); | ||
182 | 187 | ||
183 | down_write(&minor_rwsem); | 188 | down_write(&minor_rwsem); |
184 | for (minor = minor_base; minor < MAX_USB_MINORS; ++minor) { | 189 | for (minor = minor_base; minor < MAX_USB_MINORS; ++minor) { |
@@ -186,20 +191,12 @@ int usb_register_dev(struct usb_interface *intf, | |||
186 | continue; | 191 | continue; |
187 | 192 | ||
188 | usb_minors[minor] = class_driver->fops; | 193 | usb_minors[minor] = class_driver->fops; |
189 | 194 | intf->minor = minor; | |
190 | retval = 0; | ||
191 | break; | 195 | break; |
192 | } | 196 | } |
193 | up_write(&minor_rwsem); | 197 | up_write(&minor_rwsem); |
194 | 198 | if (intf->minor < 0) | |
195 | if (retval) | 199 | return -EXFULL; |
196 | goto exit; | ||
197 | |||
198 | retval = init_usb_class(); | ||
199 | if (retval) | ||
200 | goto exit; | ||
201 | |||
202 | intf->minor = minor; | ||
203 | 200 | ||
204 | /* create a usb class device for this usb interface */ | 201 | /* create a usb class device for this usb interface */ |
205 | snprintf(name, sizeof(name), class_driver->name, minor - minor_base); | 202 | snprintf(name, sizeof(name), class_driver->name, minor - minor_base); |
@@ -213,11 +210,11 @@ int usb_register_dev(struct usb_interface *intf, | |||
213 | "%s", temp); | 210 | "%s", temp); |
214 | if (IS_ERR(intf->usb_dev)) { | 211 | if (IS_ERR(intf->usb_dev)) { |
215 | down_write(&minor_rwsem); | 212 | down_write(&minor_rwsem); |
216 | usb_minors[intf->minor] = NULL; | 213 | usb_minors[minor] = NULL; |
214 | intf->minor = -1; | ||
217 | up_write(&minor_rwsem); | 215 | up_write(&minor_rwsem); |
218 | retval = PTR_ERR(intf->usb_dev); | 216 | retval = PTR_ERR(intf->usb_dev); |
219 | } | 217 | } |
220 | exit: | ||
221 | return retval; | 218 | return retval; |
222 | } | 219 | } |
223 | EXPORT_SYMBOL_GPL(usb_register_dev); | 220 | EXPORT_SYMBOL_GPL(usb_register_dev); |
diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c index 844683e5038..9f0ce7de0e3 100644 --- a/drivers/usb/core/message.c +++ b/drivers/usb/core/message.c | |||
@@ -1802,6 +1802,7 @@ free_interfaces: | |||
1802 | intf->dev.groups = usb_interface_groups; | 1802 | intf->dev.groups = usb_interface_groups; |
1803 | intf->dev.dma_mask = dev->dev.dma_mask; | 1803 | intf->dev.dma_mask = dev->dev.dma_mask; |
1804 | INIT_WORK(&intf->reset_ws, __usb_queue_reset_device); | 1804 | INIT_WORK(&intf->reset_ws, __usb_queue_reset_device); |
1805 | intf->minor = -1; | ||
1805 | device_initialize(&intf->dev); | 1806 | device_initialize(&intf->dev); |
1806 | dev_set_name(&intf->dev, "%d-%s:%d.%d", | 1807 | dev_set_name(&intf->dev, "%d-%s:%d.%d", |
1807 | dev->bus->busnum, dev->devpath, | 1808 | dev->bus->busnum, dev->devpath, |
diff --git a/drivers/usb/musb/cppi_dma.c b/drivers/usb/musb/cppi_dma.c index 59dc3d351b6..5ab5bb89bae 100644 --- a/drivers/usb/musb/cppi_dma.c +++ b/drivers/usb/musb/cppi_dma.c | |||
@@ -322,6 +322,7 @@ cppi_channel_allocate(struct dma_controller *c, | |||
322 | index, transmit ? 'T' : 'R', cppi_ch); | 322 | index, transmit ? 'T' : 'R', cppi_ch); |
323 | cppi_ch->hw_ep = ep; | 323 | cppi_ch->hw_ep = ep; |
324 | cppi_ch->channel.status = MUSB_DMA_STATUS_FREE; | 324 | cppi_ch->channel.status = MUSB_DMA_STATUS_FREE; |
325 | cppi_ch->channel.max_len = 0x7fffffff; | ||
325 | 326 | ||
326 | DBG(4, "Allocate CPPI%d %cX\n", index, transmit ? 'T' : 'R'); | 327 | DBG(4, "Allocate CPPI%d %cX\n", index, transmit ? 'T' : 'R'); |
327 | return &cppi_ch->channel; | 328 | return &cppi_ch->channel; |
diff --git a/drivers/usb/musb/musb_gadget.c b/drivers/usb/musb/musb_gadget.c index 6fca870e957..d065e23f123 100644 --- a/drivers/usb/musb/musb_gadget.c +++ b/drivers/usb/musb/musb_gadget.c | |||
@@ -300,6 +300,11 @@ static void txstate(struct musb *musb, struct musb_request *req) | |||
300 | #ifndef CONFIG_MUSB_PIO_ONLY | 300 | #ifndef CONFIG_MUSB_PIO_ONLY |
301 | if (is_dma_capable() && musb_ep->dma) { | 301 | if (is_dma_capable() && musb_ep->dma) { |
302 | struct dma_controller *c = musb->dma_controller; | 302 | struct dma_controller *c = musb->dma_controller; |
303 | size_t request_size; | ||
304 | |||
305 | /* setup DMA, then program endpoint CSR */ | ||
306 | request_size = min_t(size_t, request->length - request->actual, | ||
307 | musb_ep->dma->max_len); | ||
303 | 308 | ||
304 | use_dma = (request->dma != DMA_ADDR_INVALID); | 309 | use_dma = (request->dma != DMA_ADDR_INVALID); |
305 | 310 | ||
@@ -307,11 +312,6 @@ static void txstate(struct musb *musb, struct musb_request *req) | |||
307 | 312 | ||
308 | #ifdef CONFIG_USB_INVENTRA_DMA | 313 | #ifdef CONFIG_USB_INVENTRA_DMA |
309 | { | 314 | { |
310 | size_t request_size; | ||
311 | |||
312 | /* setup DMA, then program endpoint CSR */ | ||
313 | request_size = min_t(size_t, request->length, | ||
314 | musb_ep->dma->max_len); | ||
315 | if (request_size < musb_ep->packet_sz) | 315 | if (request_size < musb_ep->packet_sz) |
316 | musb_ep->dma->desired_mode = 0; | 316 | musb_ep->dma->desired_mode = 0; |
317 | else | 317 | else |
@@ -373,8 +373,8 @@ static void txstate(struct musb *musb, struct musb_request *req) | |||
373 | use_dma = use_dma && c->channel_program( | 373 | use_dma = use_dma && c->channel_program( |
374 | musb_ep->dma, musb_ep->packet_sz, | 374 | musb_ep->dma, musb_ep->packet_sz, |
375 | 0, | 375 | 0, |
376 | request->dma, | 376 | request->dma + request->actual, |
377 | request->length); | 377 | request_size); |
378 | if (!use_dma) { | 378 | if (!use_dma) { |
379 | c->channel_release(musb_ep->dma); | 379 | c->channel_release(musb_ep->dma); |
380 | musb_ep->dma = NULL; | 380 | musb_ep->dma = NULL; |
@@ -386,8 +386,8 @@ static void txstate(struct musb *musb, struct musb_request *req) | |||
386 | use_dma = use_dma && c->channel_program( | 386 | use_dma = use_dma && c->channel_program( |
387 | musb_ep->dma, musb_ep->packet_sz, | 387 | musb_ep->dma, musb_ep->packet_sz, |
388 | request->zero, | 388 | request->zero, |
389 | request->dma, | 389 | request->dma + request->actual, |
390 | request->length); | 390 | request_size); |
391 | #endif | 391 | #endif |
392 | } | 392 | } |
393 | #endif | 393 | #endif |
@@ -501,26 +501,14 @@ void musb_g_tx(struct musb *musb, u8 epnum) | |||
501 | request->zero = 0; | 501 | request->zero = 0; |
502 | } | 502 | } |
503 | 503 | ||
504 | /* ... or if not, then complete it. */ | 504 | if (request->actual == request->length) { |
505 | musb_g_giveback(musb_ep, request, 0); | 505 | musb_g_giveback(musb_ep, request, 0); |
506 | 506 | request = musb_ep->desc ? next_request(musb_ep) : NULL; | |
507 | /* | 507 | if (!request) { |
508 | * Kickstart next transfer if appropriate; | 508 | DBG(4, "%s idle now\n", |
509 | * the packet that just completed might not | 509 | musb_ep->end_point.name); |
510 | * be transmitted for hours or days. | 510 | return; |
511 | * REVISIT for double buffering... | 511 | } |
512 | * FIXME revisit for stalls too... | ||
513 | */ | ||
514 | musb_ep_select(mbase, epnum); | ||
515 | csr = musb_readw(epio, MUSB_TXCSR); | ||
516 | if (csr & MUSB_TXCSR_FIFONOTEMPTY) | ||
517 | return; | ||
518 | |||
519 | request = musb_ep->desc ? next_request(musb_ep) : NULL; | ||
520 | if (!request) { | ||
521 | DBG(4, "%s idle now\n", | ||
522 | musb_ep->end_point.name); | ||
523 | return; | ||
524 | } | 512 | } |
525 | } | 513 | } |
526 | 514 | ||
@@ -568,11 +556,19 @@ static void rxstate(struct musb *musb, struct musb_request *req) | |||
568 | { | 556 | { |
569 | const u8 epnum = req->epnum; | 557 | const u8 epnum = req->epnum; |
570 | struct usb_request *request = &req->request; | 558 | struct usb_request *request = &req->request; |
571 | struct musb_ep *musb_ep = &musb->endpoints[epnum].ep_out; | 559 | struct musb_ep *musb_ep; |
572 | void __iomem *epio = musb->endpoints[epnum].regs; | 560 | void __iomem *epio = musb->endpoints[epnum].regs; |
573 | unsigned fifo_count = 0; | 561 | unsigned fifo_count = 0; |
574 | u16 len = musb_ep->packet_sz; | 562 | u16 len; |
575 | u16 csr = musb_readw(epio, MUSB_RXCSR); | 563 | u16 csr = musb_readw(epio, MUSB_RXCSR); |
564 | struct musb_hw_ep *hw_ep = &musb->endpoints[epnum]; | ||
565 | |||
566 | if (hw_ep->is_shared_fifo) | ||
567 | musb_ep = &hw_ep->ep_in; | ||
568 | else | ||
569 | musb_ep = &hw_ep->ep_out; | ||
570 | |||
571 | len = musb_ep->packet_sz; | ||
576 | 572 | ||
577 | /* We shouldn't get here while DMA is active, but we do... */ | 573 | /* We shouldn't get here while DMA is active, but we do... */ |
578 | if (dma_channel_status(musb_ep->dma) == MUSB_DMA_STATUS_BUSY) { | 574 | if (dma_channel_status(musb_ep->dma) == MUSB_DMA_STATUS_BUSY) { |
@@ -647,8 +643,8 @@ static void rxstate(struct musb *musb, struct musb_request *req) | |||
647 | */ | 643 | */ |
648 | 644 | ||
649 | csr |= MUSB_RXCSR_DMAENAB; | 645 | csr |= MUSB_RXCSR_DMAENAB; |
650 | #ifdef USE_MODE1 | ||
651 | csr |= MUSB_RXCSR_AUTOCLEAR; | 646 | csr |= MUSB_RXCSR_AUTOCLEAR; |
647 | #ifdef USE_MODE1 | ||
652 | /* csr |= MUSB_RXCSR_DMAMODE; */ | 648 | /* csr |= MUSB_RXCSR_DMAMODE; */ |
653 | 649 | ||
654 | /* this special sequence (enabling and then | 650 | /* this special sequence (enabling and then |
@@ -663,10 +659,11 @@ static void rxstate(struct musb *musb, struct musb_request *req) | |||
663 | if (request->actual < request->length) { | 659 | if (request->actual < request->length) { |
664 | int transfer_size = 0; | 660 | int transfer_size = 0; |
665 | #ifdef USE_MODE1 | 661 | #ifdef USE_MODE1 |
666 | transfer_size = min(request->length, | 662 | transfer_size = min(request->length - request->actual, |
667 | channel->max_len); | 663 | channel->max_len); |
668 | #else | 664 | #else |
669 | transfer_size = len; | 665 | transfer_size = min(request->length - request->actual, |
666 | (unsigned)len); | ||
670 | #endif | 667 | #endif |
671 | if (transfer_size <= musb_ep->packet_sz) | 668 | if (transfer_size <= musb_ep->packet_sz) |
672 | musb_ep->dma->desired_mode = 0; | 669 | musb_ep->dma->desired_mode = 0; |
@@ -740,9 +737,15 @@ void musb_g_rx(struct musb *musb, u8 epnum) | |||
740 | u16 csr; | 737 | u16 csr; |
741 | struct usb_request *request; | 738 | struct usb_request *request; |
742 | void __iomem *mbase = musb->mregs; | 739 | void __iomem *mbase = musb->mregs; |
743 | struct musb_ep *musb_ep = &musb->endpoints[epnum].ep_out; | 740 | struct musb_ep *musb_ep; |
744 | void __iomem *epio = musb->endpoints[epnum].regs; | 741 | void __iomem *epio = musb->endpoints[epnum].regs; |
745 | struct dma_channel *dma; | 742 | struct dma_channel *dma; |
743 | struct musb_hw_ep *hw_ep = &musb->endpoints[epnum]; | ||
744 | |||
745 | if (hw_ep->is_shared_fifo) | ||
746 | musb_ep = &hw_ep->ep_in; | ||
747 | else | ||
748 | musb_ep = &hw_ep->ep_out; | ||
746 | 749 | ||
747 | musb_ep_select(mbase, epnum); | 750 | musb_ep_select(mbase, epnum); |
748 | 751 | ||
@@ -1081,7 +1084,7 @@ struct free_record { | |||
1081 | /* | 1084 | /* |
1082 | * Context: controller locked, IRQs blocked. | 1085 | * Context: controller locked, IRQs blocked. |
1083 | */ | 1086 | */ |
1084 | static void musb_ep_restart(struct musb *musb, struct musb_request *req) | 1087 | void musb_ep_restart(struct musb *musb, struct musb_request *req) |
1085 | { | 1088 | { |
1086 | DBG(3, "<== %s request %p len %u on hw_ep%d\n", | 1089 | DBG(3, "<== %s request %p len %u on hw_ep%d\n", |
1087 | req->tx ? "TX/IN" : "RX/OUT", | 1090 | req->tx ? "TX/IN" : "RX/OUT", |
diff --git a/drivers/usb/musb/musb_gadget.h b/drivers/usb/musb/musb_gadget.h index c8b140325d8..572b1da7f2d 100644 --- a/drivers/usb/musb/musb_gadget.h +++ b/drivers/usb/musb/musb_gadget.h | |||
@@ -105,4 +105,6 @@ extern void musb_gadget_cleanup(struct musb *); | |||
105 | 105 | ||
106 | extern void musb_g_giveback(struct musb_ep *, struct usb_request *, int); | 106 | extern void musb_g_giveback(struct musb_ep *, struct usb_request *, int); |
107 | 107 | ||
108 | extern void musb_ep_restart(struct musb *, struct musb_request *); | ||
109 | |||
108 | #endif /* __MUSB_GADGET_H */ | 110 | #endif /* __MUSB_GADGET_H */ |
diff --git a/drivers/usb/musb/musb_gadget_ep0.c b/drivers/usb/musb/musb_gadget_ep0.c index 59bef8f3a35..6dd03f4c5f4 100644 --- a/drivers/usb/musb/musb_gadget_ep0.c +++ b/drivers/usb/musb/musb_gadget_ep0.c | |||
@@ -261,6 +261,7 @@ __acquires(musb->lock) | |||
261 | ctrlrequest->wIndex & 0x0f; | 261 | ctrlrequest->wIndex & 0x0f; |
262 | struct musb_ep *musb_ep; | 262 | struct musb_ep *musb_ep; |
263 | struct musb_hw_ep *ep; | 263 | struct musb_hw_ep *ep; |
264 | struct musb_request *request; | ||
264 | void __iomem *regs; | 265 | void __iomem *regs; |
265 | int is_in; | 266 | int is_in; |
266 | u16 csr; | 267 | u16 csr; |
@@ -302,6 +303,14 @@ __acquires(musb->lock) | |||
302 | musb_writew(regs, MUSB_RXCSR, csr); | 303 | musb_writew(regs, MUSB_RXCSR, csr); |
303 | } | 304 | } |
304 | 305 | ||
306 | /* Maybe start the first request in the queue */ | ||
307 | request = to_musb_request( | ||
308 | next_request(musb_ep)); | ||
309 | if (!musb_ep->busy && request) { | ||
310 | DBG(3, "restarting the request\n"); | ||
311 | musb_ep_restart(musb, request); | ||
312 | } | ||
313 | |||
305 | /* select ep0 again */ | 314 | /* select ep0 again */ |
306 | musb_ep_select(mbase, 0); | 315 | musb_ep_select(mbase, 0); |
307 | } break; | 316 | } break; |
diff --git a/drivers/usb/musb/musb_host.c b/drivers/usb/musb/musb_host.c index 877d20b1dff..9e65c47cc98 100644 --- a/drivers/usb/musb/musb_host.c +++ b/drivers/usb/musb/musb_host.c | |||
@@ -660,6 +660,12 @@ static bool musb_tx_dma_program(struct dma_controller *dma, | |||
660 | 660 | ||
661 | qh->segsize = length; | 661 | qh->segsize = length; |
662 | 662 | ||
663 | /* | ||
664 | * Ensure the data reaches to main memory before starting | ||
665 | * DMA transfer | ||
666 | */ | ||
667 | wmb(); | ||
668 | |||
663 | if (!dma->channel_program(channel, pkt_size, mode, | 669 | if (!dma->channel_program(channel, pkt_size, mode, |
664 | urb->transfer_dma + offset, length)) { | 670 | urb->transfer_dma + offset, length)) { |
665 | dma->channel_release(channel); | 671 | dma->channel_release(channel); |
diff --git a/fs/ocfs2/acl.c b/fs/ocfs2/acl.c index a76e0aa5cd3..391915093fe 100644 --- a/fs/ocfs2/acl.c +++ b/fs/ocfs2/acl.c | |||
@@ -209,7 +209,10 @@ static int ocfs2_acl_set_mode(struct inode *inode, struct buffer_head *di_bh, | |||
209 | } | 209 | } |
210 | 210 | ||
211 | inode->i_mode = new_mode; | 211 | inode->i_mode = new_mode; |
212 | inode->i_ctime = CURRENT_TIME; | ||
212 | di->i_mode = cpu_to_le16(inode->i_mode); | 213 | di->i_mode = cpu_to_le16(inode->i_mode); |
214 | di->i_ctime = cpu_to_le64(inode->i_ctime.tv_sec); | ||
215 | di->i_ctime_nsec = cpu_to_le32(inode->i_ctime.tv_nsec); | ||
213 | 216 | ||
214 | ocfs2_journal_dirty(handle, di_bh); | 217 | ocfs2_journal_dirty(handle, di_bh); |
215 | 218 | ||
diff --git a/fs/ocfs2/cluster/tcp.c b/fs/ocfs2/cluster/tcp.c index 1361997cf20..cbe2f057cc2 100644 --- a/fs/ocfs2/cluster/tcp.c +++ b/fs/ocfs2/cluster/tcp.c | |||
@@ -977,7 +977,7 @@ static int o2net_tx_can_proceed(struct o2net_node *nn, | |||
977 | int o2net_send_message_vec(u32 msg_type, u32 key, struct kvec *caller_vec, | 977 | int o2net_send_message_vec(u32 msg_type, u32 key, struct kvec *caller_vec, |
978 | size_t caller_veclen, u8 target_node, int *status) | 978 | size_t caller_veclen, u8 target_node, int *status) |
979 | { | 979 | { |
980 | int ret; | 980 | int ret = 0; |
981 | struct o2net_msg *msg = NULL; | 981 | struct o2net_msg *msg = NULL; |
982 | size_t veclen, caller_bytes = 0; | 982 | size_t veclen, caller_bytes = 0; |
983 | struct kvec *vec = NULL; | 983 | struct kvec *vec = NULL; |
diff --git a/fs/ocfs2/dir.c b/fs/ocfs2/dir.c index f04ebcfffc4..c49f6de0e7a 100644 --- a/fs/ocfs2/dir.c +++ b/fs/ocfs2/dir.c | |||
@@ -3931,6 +3931,15 @@ static int ocfs2_dx_dir_rebalance(struct ocfs2_super *osb, struct inode *dir, | |||
3931 | goto out_commit; | 3931 | goto out_commit; |
3932 | } | 3932 | } |
3933 | 3933 | ||
3934 | cpos = split_hash; | ||
3935 | ret = ocfs2_dx_dir_new_cluster(dir, &et, cpos, handle, | ||
3936 | data_ac, meta_ac, new_dx_leaves, | ||
3937 | num_dx_leaves); | ||
3938 | if (ret) { | ||
3939 | mlog_errno(ret); | ||
3940 | goto out_commit; | ||
3941 | } | ||
3942 | |||
3934 | for (i = 0; i < num_dx_leaves; i++) { | 3943 | for (i = 0; i < num_dx_leaves; i++) { |
3935 | ret = ocfs2_journal_access_dl(handle, INODE_CACHE(dir), | 3944 | ret = ocfs2_journal_access_dl(handle, INODE_CACHE(dir), |
3936 | orig_dx_leaves[i], | 3945 | orig_dx_leaves[i], |
@@ -3939,15 +3948,14 @@ static int ocfs2_dx_dir_rebalance(struct ocfs2_super *osb, struct inode *dir, | |||
3939 | mlog_errno(ret); | 3948 | mlog_errno(ret); |
3940 | goto out_commit; | 3949 | goto out_commit; |
3941 | } | 3950 | } |
3942 | } | ||
3943 | 3951 | ||
3944 | cpos = split_hash; | 3952 | ret = ocfs2_journal_access_dl(handle, INODE_CACHE(dir), |
3945 | ret = ocfs2_dx_dir_new_cluster(dir, &et, cpos, handle, | 3953 | new_dx_leaves[i], |
3946 | data_ac, meta_ac, new_dx_leaves, | 3954 | OCFS2_JOURNAL_ACCESS_WRITE); |
3947 | num_dx_leaves); | 3955 | if (ret) { |
3948 | if (ret) { | 3956 | mlog_errno(ret); |
3949 | mlog_errno(ret); | 3957 | goto out_commit; |
3950 | goto out_commit; | 3958 | } |
3951 | } | 3959 | } |
3952 | 3960 | ||
3953 | ocfs2_dx_dir_transfer_leaf(dir, split_hash, handle, tmp_dx_leaf, | 3961 | ocfs2_dx_dir_transfer_leaf(dir, split_hash, handle, tmp_dx_leaf, |
diff --git a/fs/ocfs2/dlm/dlmcommon.h b/fs/ocfs2/dlm/dlmcommon.h index 4b6ae2c13b4..765298908f1 100644 --- a/fs/ocfs2/dlm/dlmcommon.h +++ b/fs/ocfs2/dlm/dlmcommon.h | |||
@@ -1030,6 +1030,7 @@ int dlm_drop_lockres_ref(struct dlm_ctxt *dlm, | |||
1030 | struct dlm_lock_resource *res); | 1030 | struct dlm_lock_resource *res); |
1031 | void dlm_clean_master_list(struct dlm_ctxt *dlm, | 1031 | void dlm_clean_master_list(struct dlm_ctxt *dlm, |
1032 | u8 dead_node); | 1032 | u8 dead_node); |
1033 | void dlm_force_free_mles(struct dlm_ctxt *dlm); | ||
1033 | int dlm_lock_basts_flushed(struct dlm_ctxt *dlm, struct dlm_lock *lock); | 1034 | int dlm_lock_basts_flushed(struct dlm_ctxt *dlm, struct dlm_lock *lock); |
1034 | int __dlm_lockres_has_locks(struct dlm_lock_resource *res); | 1035 | int __dlm_lockres_has_locks(struct dlm_lock_resource *res); |
1035 | int __dlm_lockres_unused(struct dlm_lock_resource *res); | 1036 | int __dlm_lockres_unused(struct dlm_lock_resource *res); |
diff --git a/fs/ocfs2/dlm/dlmdebug.c b/fs/ocfs2/dlm/dlmdebug.c index 5efdd37dfe4..901ca52bf86 100644 --- a/fs/ocfs2/dlm/dlmdebug.c +++ b/fs/ocfs2/dlm/dlmdebug.c | |||
@@ -636,8 +636,14 @@ static void *lockres_seq_start(struct seq_file *m, loff_t *pos) | |||
636 | spin_lock(&dlm->track_lock); | 636 | spin_lock(&dlm->track_lock); |
637 | if (oldres) | 637 | if (oldres) |
638 | track_list = &oldres->tracking; | 638 | track_list = &oldres->tracking; |
639 | else | 639 | else { |
640 | track_list = &dlm->tracking_list; | 640 | track_list = &dlm->tracking_list; |
641 | if (list_empty(track_list)) { | ||
642 | dl = NULL; | ||
643 | spin_unlock(&dlm->track_lock); | ||
644 | goto bail; | ||
645 | } | ||
646 | } | ||
641 | 647 | ||
642 | list_for_each_entry(res, track_list, tracking) { | 648 | list_for_each_entry(res, track_list, tracking) { |
643 | if (&res->tracking == &dlm->tracking_list) | 649 | if (&res->tracking == &dlm->tracking_list) |
@@ -660,6 +666,7 @@ static void *lockres_seq_start(struct seq_file *m, loff_t *pos) | |||
660 | } else | 666 | } else |
661 | dl = NULL; | 667 | dl = NULL; |
662 | 668 | ||
669 | bail: | ||
663 | /* passed to seq_show */ | 670 | /* passed to seq_show */ |
664 | return dl; | 671 | return dl; |
665 | } | 672 | } |
diff --git a/fs/ocfs2/dlm/dlmdomain.c b/fs/ocfs2/dlm/dlmdomain.c index 153abb5abef..11a5c87fd7f 100644 --- a/fs/ocfs2/dlm/dlmdomain.c +++ b/fs/ocfs2/dlm/dlmdomain.c | |||
@@ -693,6 +693,7 @@ void dlm_unregister_domain(struct dlm_ctxt *dlm) | |||
693 | 693 | ||
694 | dlm_mark_domain_leaving(dlm); | 694 | dlm_mark_domain_leaving(dlm); |
695 | dlm_leave_domain(dlm); | 695 | dlm_leave_domain(dlm); |
696 | dlm_force_free_mles(dlm); | ||
696 | dlm_complete_dlm_shutdown(dlm); | 697 | dlm_complete_dlm_shutdown(dlm); |
697 | } | 698 | } |
698 | dlm_put(dlm); | 699 | dlm_put(dlm); |
diff --git a/fs/ocfs2/dlm/dlmmaster.c b/fs/ocfs2/dlm/dlmmaster.c index ffb4c68dafa..f564b0e5f80 100644 --- a/fs/ocfs2/dlm/dlmmaster.c +++ b/fs/ocfs2/dlm/dlmmaster.c | |||
@@ -3433,3 +3433,43 @@ void dlm_lockres_release_ast(struct dlm_ctxt *dlm, | |||
3433 | wake_up(&res->wq); | 3433 | wake_up(&res->wq); |
3434 | wake_up(&dlm->migration_wq); | 3434 | wake_up(&dlm->migration_wq); |
3435 | } | 3435 | } |
3436 | |||
3437 | void dlm_force_free_mles(struct dlm_ctxt *dlm) | ||
3438 | { | ||
3439 | int i; | ||
3440 | struct hlist_head *bucket; | ||
3441 | struct dlm_master_list_entry *mle; | ||
3442 | struct hlist_node *tmp, *list; | ||
3443 | |||
3444 | /* | ||
3445 | * We notified all other nodes that we are exiting the domain and | ||
3446 | * marked the dlm state to DLM_CTXT_LEAVING. If any mles are still | ||
3447 | * around we force free them and wake any processes that are waiting | ||
3448 | * on the mles | ||
3449 | */ | ||
3450 | spin_lock(&dlm->spinlock); | ||
3451 | spin_lock(&dlm->master_lock); | ||
3452 | |||
3453 | BUG_ON(dlm->dlm_state != DLM_CTXT_LEAVING); | ||
3454 | BUG_ON((find_next_bit(dlm->domain_map, O2NM_MAX_NODES, 0) < O2NM_MAX_NODES)); | ||
3455 | |||
3456 | for (i = 0; i < DLM_HASH_BUCKETS; i++) { | ||
3457 | bucket = dlm_master_hash(dlm, i); | ||
3458 | hlist_for_each_safe(list, tmp, bucket) { | ||
3459 | mle = hlist_entry(list, struct dlm_master_list_entry, | ||
3460 | master_hash_node); | ||
3461 | if (mle->type != DLM_MLE_BLOCK) { | ||
3462 | mlog(ML_ERROR, "bad mle: %p\n", mle); | ||
3463 | dlm_print_one_mle(mle); | ||
3464 | } | ||
3465 | atomic_set(&mle->woken, 1); | ||
3466 | wake_up(&mle->wq); | ||
3467 | |||
3468 | __dlm_unlink_mle(dlm, mle); | ||
3469 | __dlm_mle_detach_hb_events(dlm, mle); | ||
3470 | __dlm_put_mle(mle); | ||
3471 | } | ||
3472 | } | ||
3473 | spin_unlock(&dlm->master_lock); | ||
3474 | spin_unlock(&dlm->spinlock); | ||
3475 | } | ||
diff --git a/fs/ocfs2/dlmglue.h b/fs/ocfs2/dlmglue.h index d1ce48e1b3d..1d596d8c4a4 100644 --- a/fs/ocfs2/dlmglue.h +++ b/fs/ocfs2/dlmglue.h | |||
@@ -84,6 +84,7 @@ enum { | |||
84 | OI_LS_PARENT, | 84 | OI_LS_PARENT, |
85 | OI_LS_RENAME1, | 85 | OI_LS_RENAME1, |
86 | OI_LS_RENAME2, | 86 | OI_LS_RENAME2, |
87 | OI_LS_REFLINK_TARGET, | ||
87 | }; | 88 | }; |
88 | 89 | ||
89 | int ocfs2_dlm_init(struct ocfs2_super *osb); | 90 | int ocfs2_dlm_init(struct ocfs2_super *osb); |
diff --git a/fs/ocfs2/ocfs2_fs.h b/fs/ocfs2/ocfs2_fs.h index 33f1c9a8258..fa31d05e41b 100644 --- a/fs/ocfs2/ocfs2_fs.h +++ b/fs/ocfs2/ocfs2_fs.h | |||
@@ -235,18 +235,31 @@ | |||
235 | #define OCFS2_HAS_REFCOUNT_FL (0x0010) | 235 | #define OCFS2_HAS_REFCOUNT_FL (0x0010) |
236 | 236 | ||
237 | /* Inode attributes, keep in sync with EXT2 */ | 237 | /* Inode attributes, keep in sync with EXT2 */ |
238 | #define OCFS2_SECRM_FL (0x00000001) /* Secure deletion */ | 238 | #define OCFS2_SECRM_FL FS_SECRM_FL /* Secure deletion */ |
239 | #define OCFS2_UNRM_FL (0x00000002) /* Undelete */ | 239 | #define OCFS2_UNRM_FL FS_UNRM_FL /* Undelete */ |
240 | #define OCFS2_COMPR_FL (0x00000004) /* Compress file */ | 240 | #define OCFS2_COMPR_FL FS_COMPR_FL /* Compress file */ |
241 | #define OCFS2_SYNC_FL (0x00000008) /* Synchronous updates */ | 241 | #define OCFS2_SYNC_FL FS_SYNC_FL /* Synchronous updates */ |
242 | #define OCFS2_IMMUTABLE_FL (0x00000010) /* Immutable file */ | 242 | #define OCFS2_IMMUTABLE_FL FS_IMMUTABLE_FL /* Immutable file */ |
243 | #define OCFS2_APPEND_FL (0x00000020) /* writes to file may only append */ | 243 | #define OCFS2_APPEND_FL FS_APPEND_FL /* writes to file may only append */ |
244 | #define OCFS2_NODUMP_FL (0x00000040) /* do not dump file */ | 244 | #define OCFS2_NODUMP_FL FS_NODUMP_FL /* do not dump file */ |
245 | #define OCFS2_NOATIME_FL (0x00000080) /* do not update atime */ | 245 | #define OCFS2_NOATIME_FL FS_NOATIME_FL /* do not update atime */ |
246 | #define OCFS2_DIRSYNC_FL (0x00010000) /* dirsync behaviour (directories only) */ | 246 | /* Reserved for compression usage... */ |
247 | 247 | #define OCFS2_DIRTY_FL FS_DIRTY_FL | |
248 | #define OCFS2_FL_VISIBLE (0x000100FF) /* User visible flags */ | 248 | #define OCFS2_COMPRBLK_FL FS_COMPRBLK_FL /* One or more compressed clusters */ |
249 | #define OCFS2_FL_MODIFIABLE (0x000100FF) /* User modifiable flags */ | 249 | #define OCFS2_NOCOMP_FL FS_NOCOMP_FL /* Don't compress */ |
250 | #define OCFS2_ECOMPR_FL FS_ECOMPR_FL /* Compression error */ | ||
251 | /* End compression flags --- maybe not all used */ | ||
252 | #define OCFS2_BTREE_FL FS_BTREE_FL /* btree format dir */ | ||
253 | #define OCFS2_INDEX_FL FS_INDEX_FL /* hash-indexed directory */ | ||
254 | #define OCFS2_IMAGIC_FL FS_IMAGIC_FL /* AFS directory */ | ||
255 | #define OCFS2_JOURNAL_DATA_FL FS_JOURNAL_DATA_FL /* Reserved for ext3 */ | ||
256 | #define OCFS2_NOTAIL_FL FS_NOTAIL_FL /* file tail should not be merged */ | ||
257 | #define OCFS2_DIRSYNC_FL FS_DIRSYNC_FL /* dirsync behaviour (directories only) */ | ||
258 | #define OCFS2_TOPDIR_FL FS_TOPDIR_FL /* Top of directory hierarchies*/ | ||
259 | #define OCFS2_RESERVED_FL FS_RESERVED_FL /* reserved for ext2 lib */ | ||
260 | |||
261 | #define OCFS2_FL_VISIBLE FS_FL_USER_VISIBLE /* User visible flags */ | ||
262 | #define OCFS2_FL_MODIFIABLE FS_FL_USER_MODIFIABLE /* User modifiable flags */ | ||
250 | 263 | ||
251 | /* | 264 | /* |
252 | * Extent record flags (e_node.leaf.flags) | 265 | * Extent record flags (e_node.leaf.flags) |
diff --git a/fs/ocfs2/ocfs2_ioctl.h b/fs/ocfs2/ocfs2_ioctl.h index 2d3420af1a8..5d241505690 100644 --- a/fs/ocfs2/ocfs2_ioctl.h +++ b/fs/ocfs2/ocfs2_ioctl.h | |||
@@ -23,10 +23,10 @@ | |||
23 | /* | 23 | /* |
24 | * ioctl commands | 24 | * ioctl commands |
25 | */ | 25 | */ |
26 | #define OCFS2_IOC_GETFLAGS _IOR('f', 1, long) | 26 | #define OCFS2_IOC_GETFLAGS FS_IOC_GETFLAGS |
27 | #define OCFS2_IOC_SETFLAGS _IOW('f', 2, long) | 27 | #define OCFS2_IOC_SETFLAGS FS_IOC_SETFLAGS |
28 | #define OCFS2_IOC32_GETFLAGS _IOR('f', 1, int) | 28 | #define OCFS2_IOC32_GETFLAGS FS_IOC32_GETFLAGS |
29 | #define OCFS2_IOC32_SETFLAGS _IOW('f', 2, int) | 29 | #define OCFS2_IOC32_SETFLAGS FS_IOC32_SETFLAGS |
30 | 30 | ||
31 | /* | 31 | /* |
32 | * Space reservation / allocation / free ioctls and argument structure | 32 | * Space reservation / allocation / free ioctls and argument structure |
diff --git a/fs/ocfs2/refcounttree.c b/fs/ocfs2/refcounttree.c index 0afeda83120..efdd7560740 100644 --- a/fs/ocfs2/refcounttree.c +++ b/fs/ocfs2/refcounttree.c | |||
@@ -4201,8 +4201,9 @@ static int __ocfs2_reflink(struct dentry *old_dentry, | |||
4201 | goto out; | 4201 | goto out; |
4202 | } | 4202 | } |
4203 | 4203 | ||
4204 | mutex_lock(&new_inode->i_mutex); | 4204 | mutex_lock_nested(&new_inode->i_mutex, I_MUTEX_CHILD); |
4205 | ret = ocfs2_inode_lock(new_inode, &new_bh, 1); | 4205 | ret = ocfs2_inode_lock_nested(new_inode, &new_bh, 1, |
4206 | OI_LS_REFLINK_TARGET); | ||
4206 | if (ret) { | 4207 | if (ret) { |
4207 | mlog_errno(ret); | 4208 | mlog_errno(ret); |
4208 | goto out_unlock; | 4209 | goto out_unlock; |
diff --git a/fs/ocfs2/reservations.c b/fs/ocfs2/reservations.c index d8b6e4259b8..3e78db361bc 100644 --- a/fs/ocfs2/reservations.c +++ b/fs/ocfs2/reservations.c | |||
@@ -732,25 +732,23 @@ int ocfs2_resmap_resv_bits(struct ocfs2_reservation_map *resmap, | |||
732 | struct ocfs2_alloc_reservation *resv, | 732 | struct ocfs2_alloc_reservation *resv, |
733 | int *cstart, int *clen) | 733 | int *cstart, int *clen) |
734 | { | 734 | { |
735 | unsigned int wanted = *clen; | ||
736 | |||
737 | if (resv == NULL || ocfs2_resmap_disabled(resmap)) | 735 | if (resv == NULL || ocfs2_resmap_disabled(resmap)) |
738 | return -ENOSPC; | 736 | return -ENOSPC; |
739 | 737 | ||
740 | spin_lock(&resv_lock); | 738 | spin_lock(&resv_lock); |
741 | 739 | ||
742 | /* | ||
743 | * We don't want to over-allocate for temporary | ||
744 | * windows. Otherwise, we run the risk of fragmenting the | ||
745 | * allocation space. | ||
746 | */ | ||
747 | wanted = ocfs2_resv_window_bits(resmap, resv); | ||
748 | if ((resv->r_flags & OCFS2_RESV_FLAG_TMP) || wanted < *clen) | ||
749 | wanted = *clen; | ||
750 | |||
751 | if (ocfs2_resv_empty(resv)) { | 740 | if (ocfs2_resv_empty(resv)) { |
752 | mlog(0, "empty reservation, find new window\n"); | 741 | /* |
742 | * We don't want to over-allocate for temporary | ||
743 | * windows. Otherwise, we run the risk of fragmenting the | ||
744 | * allocation space. | ||
745 | */ | ||
746 | unsigned int wanted = ocfs2_resv_window_bits(resmap, resv); | ||
753 | 747 | ||
748 | if ((resv->r_flags & OCFS2_RESV_FLAG_TMP) || wanted < *clen) | ||
749 | wanted = *clen; | ||
750 | |||
751 | mlog(0, "empty reservation, find new window\n"); | ||
754 | /* | 752 | /* |
755 | * Try to get a window here. If it works, we must fall | 753 | * Try to get a window here. If it works, we must fall |
756 | * through and test the bitmap . This avoids some | 754 | * through and test the bitmap . This avoids some |
diff --git a/fs/ocfs2/suballoc.c b/fs/ocfs2/suballoc.c index 8a286f54dca..849c2f0e0a0 100644 --- a/fs/ocfs2/suballoc.c +++ b/fs/ocfs2/suballoc.c | |||
@@ -357,7 +357,7 @@ out: | |||
357 | static void ocfs2_bg_discontig_add_extent(struct ocfs2_super *osb, | 357 | static void ocfs2_bg_discontig_add_extent(struct ocfs2_super *osb, |
358 | struct ocfs2_group_desc *bg, | 358 | struct ocfs2_group_desc *bg, |
359 | struct ocfs2_chain_list *cl, | 359 | struct ocfs2_chain_list *cl, |
360 | u64 p_blkno, u32 clusters) | 360 | u64 p_blkno, unsigned int clusters) |
361 | { | 361 | { |
362 | struct ocfs2_extent_list *el = &bg->bg_list; | 362 | struct ocfs2_extent_list *el = &bg->bg_list; |
363 | struct ocfs2_extent_rec *rec; | 363 | struct ocfs2_extent_rec *rec; |
@@ -369,7 +369,7 @@ static void ocfs2_bg_discontig_add_extent(struct ocfs2_super *osb, | |||
369 | rec->e_blkno = cpu_to_le64(p_blkno); | 369 | rec->e_blkno = cpu_to_le64(p_blkno); |
370 | rec->e_cpos = cpu_to_le32(le16_to_cpu(bg->bg_bits) / | 370 | rec->e_cpos = cpu_to_le32(le16_to_cpu(bg->bg_bits) / |
371 | le16_to_cpu(cl->cl_bpc)); | 371 | le16_to_cpu(cl->cl_bpc)); |
372 | rec->e_leaf_clusters = cpu_to_le32(clusters); | 372 | rec->e_leaf_clusters = cpu_to_le16(clusters); |
373 | le16_add_cpu(&bg->bg_bits, clusters * le16_to_cpu(cl->cl_bpc)); | 373 | le16_add_cpu(&bg->bg_bits, clusters * le16_to_cpu(cl->cl_bpc)); |
374 | le16_add_cpu(&bg->bg_free_bits_count, | 374 | le16_add_cpu(&bg->bg_free_bits_count, |
375 | clusters * le16_to_cpu(cl->cl_bpc)); | 375 | clusters * le16_to_cpu(cl->cl_bpc)); |
diff --git a/fs/ocfs2/xattr.c b/fs/ocfs2/xattr.c index d03469f6180..06fa5e77c40 100644 --- a/fs/ocfs2/xattr.c +++ b/fs/ocfs2/xattr.c | |||
@@ -1286,13 +1286,11 @@ int ocfs2_xattr_get_nolock(struct inode *inode, | |||
1286 | xis.inode_bh = xbs.inode_bh = di_bh; | 1286 | xis.inode_bh = xbs.inode_bh = di_bh; |
1287 | di = (struct ocfs2_dinode *)di_bh->b_data; | 1287 | di = (struct ocfs2_dinode *)di_bh->b_data; |
1288 | 1288 | ||
1289 | down_read(&oi->ip_xattr_sem); | ||
1290 | ret = ocfs2_xattr_ibody_get(inode, name_index, name, buffer, | 1289 | ret = ocfs2_xattr_ibody_get(inode, name_index, name, buffer, |
1291 | buffer_size, &xis); | 1290 | buffer_size, &xis); |
1292 | if (ret == -ENODATA && di->i_xattr_loc) | 1291 | if (ret == -ENODATA && di->i_xattr_loc) |
1293 | ret = ocfs2_xattr_block_get(inode, name_index, name, buffer, | 1292 | ret = ocfs2_xattr_block_get(inode, name_index, name, buffer, |
1294 | buffer_size, &xbs); | 1293 | buffer_size, &xbs); |
1295 | up_read(&oi->ip_xattr_sem); | ||
1296 | 1294 | ||
1297 | return ret; | 1295 | return ret; |
1298 | } | 1296 | } |
@@ -1316,8 +1314,10 @@ static int ocfs2_xattr_get(struct inode *inode, | |||
1316 | mlog_errno(ret); | 1314 | mlog_errno(ret); |
1317 | return ret; | 1315 | return ret; |
1318 | } | 1316 | } |
1317 | down_read(&OCFS2_I(inode)->ip_xattr_sem); | ||
1319 | ret = ocfs2_xattr_get_nolock(inode, di_bh, name_index, | 1318 | ret = ocfs2_xattr_get_nolock(inode, di_bh, name_index, |
1320 | name, buffer, buffer_size); | 1319 | name, buffer, buffer_size); |
1320 | up_read(&OCFS2_I(inode)->ip_xattr_sem); | ||
1321 | 1321 | ||
1322 | ocfs2_inode_unlock(inode, 0); | 1322 | ocfs2_inode_unlock(inode, 0); |
1323 | 1323 | ||
diff --git a/include/drm/drmP.h b/include/drm/drmP.h index bb5c41893c0..274eaaa15c3 100644 --- a/include/drm/drmP.h +++ b/include/drm/drmP.h | |||
@@ -612,7 +612,7 @@ struct drm_gem_object { | |||
612 | struct kref refcount; | 612 | struct kref refcount; |
613 | 613 | ||
614 | /** Handle count of this object. Each handle also holds a reference */ | 614 | /** Handle count of this object. Each handle also holds a reference */ |
615 | struct kref handlecount; | 615 | atomic_t handle_count; /* number of handles on this object */ |
616 | 616 | ||
617 | /** Related drm device */ | 617 | /** Related drm device */ |
618 | struct drm_device *dev; | 618 | struct drm_device *dev; |
@@ -1151,6 +1151,7 @@ extern int drm_release(struct inode *inode, struct file *filp); | |||
1151 | extern int drm_mmap(struct file *filp, struct vm_area_struct *vma); | 1151 | extern int drm_mmap(struct file *filp, struct vm_area_struct *vma); |
1152 | extern int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma); | 1152 | extern int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma); |
1153 | extern void drm_vm_open_locked(struct vm_area_struct *vma); | 1153 | extern void drm_vm_open_locked(struct vm_area_struct *vma); |
1154 | extern void drm_vm_close_locked(struct vm_area_struct *vma); | ||
1154 | extern unsigned int drm_poll(struct file *filp, struct poll_table_struct *wait); | 1155 | extern unsigned int drm_poll(struct file *filp, struct poll_table_struct *wait); |
1155 | 1156 | ||
1156 | /* Memory management support (drm_memory.h) */ | 1157 | /* Memory management support (drm_memory.h) */ |
@@ -1411,12 +1412,11 @@ int drm_gem_init(struct drm_device *dev); | |||
1411 | void drm_gem_destroy(struct drm_device *dev); | 1412 | void drm_gem_destroy(struct drm_device *dev); |
1412 | void drm_gem_object_release(struct drm_gem_object *obj); | 1413 | void drm_gem_object_release(struct drm_gem_object *obj); |
1413 | void drm_gem_object_free(struct kref *kref); | 1414 | void drm_gem_object_free(struct kref *kref); |
1414 | void drm_gem_object_free_unlocked(struct kref *kref); | ||
1415 | struct drm_gem_object *drm_gem_object_alloc(struct drm_device *dev, | 1415 | struct drm_gem_object *drm_gem_object_alloc(struct drm_device *dev, |
1416 | size_t size); | 1416 | size_t size); |
1417 | int drm_gem_object_init(struct drm_device *dev, | 1417 | int drm_gem_object_init(struct drm_device *dev, |
1418 | struct drm_gem_object *obj, size_t size); | 1418 | struct drm_gem_object *obj, size_t size); |
1419 | void drm_gem_object_handle_free(struct kref *kref); | 1419 | void drm_gem_object_handle_free(struct drm_gem_object *obj); |
1420 | void drm_gem_vm_open(struct vm_area_struct *vma); | 1420 | void drm_gem_vm_open(struct vm_area_struct *vma); |
1421 | void drm_gem_vm_close(struct vm_area_struct *vma); | 1421 | void drm_gem_vm_close(struct vm_area_struct *vma); |
1422 | int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma); | 1422 | int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma); |
@@ -1439,8 +1439,12 @@ drm_gem_object_unreference(struct drm_gem_object *obj) | |||
1439 | static inline void | 1439 | static inline void |
1440 | drm_gem_object_unreference_unlocked(struct drm_gem_object *obj) | 1440 | drm_gem_object_unreference_unlocked(struct drm_gem_object *obj) |
1441 | { | 1441 | { |
1442 | if (obj != NULL) | 1442 | if (obj != NULL) { |
1443 | kref_put(&obj->refcount, drm_gem_object_free_unlocked); | 1443 | struct drm_device *dev = obj->dev; |
1444 | mutex_lock(&dev->struct_mutex); | ||
1445 | kref_put(&obj->refcount, drm_gem_object_free); | ||
1446 | mutex_unlock(&dev->struct_mutex); | ||
1447 | } | ||
1444 | } | 1448 | } |
1445 | 1449 | ||
1446 | int drm_gem_handle_create(struct drm_file *file_priv, | 1450 | int drm_gem_handle_create(struct drm_file *file_priv, |
@@ -1451,7 +1455,7 @@ static inline void | |||
1451 | drm_gem_object_handle_reference(struct drm_gem_object *obj) | 1455 | drm_gem_object_handle_reference(struct drm_gem_object *obj) |
1452 | { | 1456 | { |
1453 | drm_gem_object_reference(obj); | 1457 | drm_gem_object_reference(obj); |
1454 | kref_get(&obj->handlecount); | 1458 | atomic_inc(&obj->handle_count); |
1455 | } | 1459 | } |
1456 | 1460 | ||
1457 | static inline void | 1461 | static inline void |
@@ -1460,12 +1464,15 @@ drm_gem_object_handle_unreference(struct drm_gem_object *obj) | |||
1460 | if (obj == NULL) | 1464 | if (obj == NULL) |
1461 | return; | 1465 | return; |
1462 | 1466 | ||
1467 | if (atomic_read(&obj->handle_count) == 0) | ||
1468 | return; | ||
1463 | /* | 1469 | /* |
1464 | * Must bump handle count first as this may be the last | 1470 | * Must bump handle count first as this may be the last |
1465 | * ref, in which case the object would disappear before we | 1471 | * ref, in which case the object would disappear before we |
1466 | * checked for a name | 1472 | * checked for a name |
1467 | */ | 1473 | */ |
1468 | kref_put(&obj->handlecount, drm_gem_object_handle_free); | 1474 | if (atomic_dec_and_test(&obj->handle_count)) |
1475 | drm_gem_object_handle_free(obj); | ||
1469 | drm_gem_object_unreference(obj); | 1476 | drm_gem_object_unreference(obj); |
1470 | } | 1477 | } |
1471 | 1478 | ||
@@ -1475,12 +1482,17 @@ drm_gem_object_handle_unreference_unlocked(struct drm_gem_object *obj) | |||
1475 | if (obj == NULL) | 1482 | if (obj == NULL) |
1476 | return; | 1483 | return; |
1477 | 1484 | ||
1485 | if (atomic_read(&obj->handle_count) == 0) | ||
1486 | return; | ||
1487 | |||
1478 | /* | 1488 | /* |
1479 | * Must bump handle count first as this may be the last | 1489 | * Must bump handle count first as this may be the last |
1480 | * ref, in which case the object would disappear before we | 1490 | * ref, in which case the object would disappear before we |
1481 | * checked for a name | 1491 | * checked for a name |
1482 | */ | 1492 | */ |
1483 | kref_put(&obj->handlecount, drm_gem_object_handle_free); | 1493 | |
1494 | if (atomic_dec_and_test(&obj->handle_count)) | ||
1495 | drm_gem_object_handle_free(obj); | ||
1484 | drm_gem_object_unreference_unlocked(obj); | 1496 | drm_gem_object_unreference_unlocked(obj); |
1485 | } | 1497 | } |
1486 | 1498 | ||
diff --git a/include/drm/drm_pciids.h b/include/drm/drm_pciids.h index 3a9940ef728..883c1d43989 100644 --- a/include/drm/drm_pciids.h +++ b/include/drm/drm_pciids.h | |||
@@ -85,7 +85,6 @@ | |||
85 | {0x1002, 0x5460, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY}, \ | 85 | {0x1002, 0x5460, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY}, \ |
86 | {0x1002, 0x5462, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY}, \ | 86 | {0x1002, 0x5462, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY}, \ |
87 | {0x1002, 0x5464, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY}, \ | 87 | {0x1002, 0x5464, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY}, \ |
88 | {0x1002, 0x5657, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_NEW_MEMMAP}, \ | ||
89 | {0x1002, 0x5548, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_NEW_MEMMAP}, \ | 88 | {0x1002, 0x5548, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_NEW_MEMMAP}, \ |
90 | {0x1002, 0x5549, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_NEW_MEMMAP}, \ | 89 | {0x1002, 0x5549, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_NEW_MEMMAP}, \ |
91 | {0x1002, 0x554A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_NEW_MEMMAP}, \ | 90 | {0x1002, 0x554A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_NEW_MEMMAP}, \ |
@@ -103,6 +102,7 @@ | |||
103 | {0x1002, 0x564F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ | 102 | {0x1002, 0x564F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ |
104 | {0x1002, 0x5652, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ | 103 | {0x1002, 0x5652, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ |
105 | {0x1002, 0x5653, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ | 104 | {0x1002, 0x5653, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ |
105 | {0x1002, 0x5657, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_NEW_MEMMAP}, \ | ||
106 | {0x1002, 0x5834, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS300|RADEON_IS_IGP}, \ | 106 | {0x1002, 0x5834, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS300|RADEON_IS_IGP}, \ |
107 | {0x1002, 0x5835, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS300|RADEON_IS_IGP|RADEON_IS_MOBILITY}, \ | 107 | {0x1002, 0x5835, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS300|RADEON_IS_IGP|RADEON_IS_MOBILITY}, \ |
108 | {0x1002, 0x5954, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS480|RADEON_IS_IGP|RADEON_IS_MOBILITY|RADEON_IS_IGPGART}, \ | 108 | {0x1002, 0x5954, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS480|RADEON_IS_IGP|RADEON_IS_MOBILITY|RADEON_IS_IGPGART}, \ |
diff --git a/mm/fremap.c b/mm/fremap.c index 46f5dacf90a..ec520c7b28d 100644 --- a/mm/fremap.c +++ b/mm/fremap.c | |||
@@ -125,7 +125,6 @@ SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size, | |||
125 | { | 125 | { |
126 | struct mm_struct *mm = current->mm; | 126 | struct mm_struct *mm = current->mm; |
127 | struct address_space *mapping; | 127 | struct address_space *mapping; |
128 | unsigned long end = start + size; | ||
129 | struct vm_area_struct *vma; | 128 | struct vm_area_struct *vma; |
130 | int err = -EINVAL; | 129 | int err = -EINVAL; |
131 | int has_write_lock = 0; | 130 | int has_write_lock = 0; |
@@ -142,6 +141,10 @@ SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size, | |||
142 | if (start + size <= start) | 141 | if (start + size <= start) |
143 | return err; | 142 | return err; |
144 | 143 | ||
144 | /* Does pgoff wrap? */ | ||
145 | if (pgoff + (size >> PAGE_SHIFT) < pgoff) | ||
146 | return err; | ||
147 | |||
145 | /* Can we represent this offset inside this architecture's pte's? */ | 148 | /* Can we represent this offset inside this architecture's pte's? */ |
146 | #if PTE_FILE_MAX_BITS < BITS_PER_LONG | 149 | #if PTE_FILE_MAX_BITS < BITS_PER_LONG |
147 | if (pgoff + (size >> PAGE_SHIFT) >= (1UL << PTE_FILE_MAX_BITS)) | 150 | if (pgoff + (size >> PAGE_SHIFT) >= (1UL << PTE_FILE_MAX_BITS)) |
@@ -168,7 +171,7 @@ SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size, | |||
168 | if (!(vma->vm_flags & VM_CAN_NONLINEAR)) | 171 | if (!(vma->vm_flags & VM_CAN_NONLINEAR)) |
169 | goto out; | 172 | goto out; |
170 | 173 | ||
171 | if (end <= start || start < vma->vm_start || end > vma->vm_end) | 174 | if (start < vma->vm_start || start + size > vma->vm_end) |
172 | goto out; | 175 | goto out; |
173 | 176 | ||
174 | /* Must set VM_NONLINEAR before any pages are populated. */ | 177 | /* Must set VM_NONLINEAR before any pages are populated. */ |
diff --git a/sound/pci/hda/patch_analog.c b/sound/pci/hda/patch_analog.c index b697fd2a6f8..10bbbaf6ebc 100644 --- a/sound/pci/hda/patch_analog.c +++ b/sound/pci/hda/patch_analog.c | |||
@@ -3641,6 +3641,7 @@ static struct snd_pci_quirk ad1984_cfg_tbl[] = { | |||
3641 | /* Lenovo Thinkpad T61/X61 */ | 3641 | /* Lenovo Thinkpad T61/X61 */ |
3642 | SND_PCI_QUIRK_VENDOR(0x17aa, "Lenovo Thinkpad", AD1984_THINKPAD), | 3642 | SND_PCI_QUIRK_VENDOR(0x17aa, "Lenovo Thinkpad", AD1984_THINKPAD), |
3643 | SND_PCI_QUIRK(0x1028, 0x0214, "Dell T3400", AD1984_DELL_DESKTOP), | 3643 | SND_PCI_QUIRK(0x1028, 0x0214, "Dell T3400", AD1984_DELL_DESKTOP), |
3644 | SND_PCI_QUIRK(0x1028, 0x0233, "Dell Latitude E6400", AD1984_DELL_DESKTOP), | ||
3644 | {} | 3645 | {} |
3645 | }; | 3646 | }; |
3646 | 3647 | ||
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index a1312a6c8af..a432e6efd19 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c | |||
@@ -1594,12 +1594,22 @@ static void alc_auto_parse_digital(struct hda_codec *codec) | |||
1594 | } | 1594 | } |
1595 | 1595 | ||
1596 | if (spec->autocfg.dig_in_pin) { | 1596 | if (spec->autocfg.dig_in_pin) { |
1597 | hda_nid_t dig_nid; | 1597 | dig_nid = codec->start_nid; |
1598 | err = snd_hda_get_connections(codec, | 1598 | for (i = 0; i < codec->num_nodes; i++, dig_nid++) { |
1599 | spec->autocfg.dig_in_pin, | 1599 | unsigned int wcaps = get_wcaps(codec, dig_nid); |
1600 | &dig_nid, 1); | 1600 | if (get_wcaps_type(wcaps) != AC_WID_AUD_IN) |
1601 | if (err > 0) | 1601 | continue; |
1602 | spec->dig_in_nid = dig_nid; | 1602 | if (!(wcaps & AC_WCAP_DIGITAL)) |
1603 | continue; | ||
1604 | if (!(wcaps & AC_WCAP_CONN_LIST)) | ||
1605 | continue; | ||
1606 | err = get_connection_index(codec, dig_nid, | ||
1607 | spec->autocfg.dig_in_pin); | ||
1608 | if (err >= 0) { | ||
1609 | spec->dig_in_nid = dig_nid; | ||
1610 | break; | ||
1611 | } | ||
1612 | } | ||
1603 | } | 1613 | } |
1604 | } | 1614 | } |
1605 | 1615 | ||
diff --git a/sound/pci/oxygen/oxygen.c b/sound/pci/oxygen/oxygen.c index 289cb4dacfc..6c0a11adb2a 100644 --- a/sound/pci/oxygen/oxygen.c +++ b/sound/pci/oxygen/oxygen.c | |||
@@ -543,6 +543,10 @@ static int __devinit get_oxygen_model(struct oxygen *chip, | |||
543 | chip->model.suspend = claro_suspend; | 543 | chip->model.suspend = claro_suspend; |
544 | chip->model.resume = claro_resume; | 544 | chip->model.resume = claro_resume; |
545 | chip->model.set_adc_params = set_ak5385_params; | 545 | chip->model.set_adc_params = set_ak5385_params; |
546 | chip->model.device_config = PLAYBACK_0_TO_I2S | | ||
547 | PLAYBACK_1_TO_SPDIF | | ||
548 | CAPTURE_0_FROM_I2S_2 | | ||
549 | CAPTURE_1_FROM_SPDIF; | ||
546 | break; | 550 | break; |
547 | } | 551 | } |
548 | if (id->driver_data == MODEL_MERIDIAN || | 552 | if (id->driver_data == MODEL_MERIDIAN || |
diff --git a/sound/pci/rme9652/hdsp.c b/sound/pci/rme9652/hdsp.c index b92adef8e81..d6fa7bfd9aa 100644 --- a/sound/pci/rme9652/hdsp.c +++ b/sound/pci/rme9652/hdsp.c | |||
@@ -4609,6 +4609,7 @@ static int snd_hdsp_hwdep_ioctl(struct snd_hwdep *hw, struct file *file, unsigne | |||
4609 | if (err < 0) | 4609 | if (err < 0) |
4610 | return err; | 4610 | return err; |
4611 | 4611 | ||
4612 | memset(&info, 0, sizeof(info)); | ||
4612 | spin_lock_irqsave(&hdsp->lock, flags); | 4613 | spin_lock_irqsave(&hdsp->lock, flags); |
4613 | info.pref_sync_ref = (unsigned char)hdsp_pref_sync_ref(hdsp); | 4614 | info.pref_sync_ref = (unsigned char)hdsp_pref_sync_ref(hdsp); |
4614 | info.wordclock_sync_check = (unsigned char)hdsp_wc_sync_check(hdsp); | 4615 | info.wordclock_sync_check = (unsigned char)hdsp_wc_sync_check(hdsp); |
diff --git a/sound/pci/rme9652/hdspm.c b/sound/pci/rme9652/hdspm.c index 547b713d720..0c98ef9156d 100644 --- a/sound/pci/rme9652/hdspm.c +++ b/sound/pci/rme9652/hdspm.c | |||
@@ -4127,6 +4127,7 @@ static int snd_hdspm_hwdep_ioctl(struct snd_hwdep * hw, struct file *file, | |||
4127 | 4127 | ||
4128 | case SNDRV_HDSPM_IOCTL_GET_CONFIG_INFO: | 4128 | case SNDRV_HDSPM_IOCTL_GET_CONFIG_INFO: |
4129 | 4129 | ||
4130 | memset(&info, 0, sizeof(info)); | ||
4130 | spin_lock_irq(&hdspm->lock); | 4131 | spin_lock_irq(&hdspm->lock); |
4131 | info.pref_sync_ref = hdspm_pref_sync_ref(hdspm); | 4132 | info.pref_sync_ref = hdspm_pref_sync_ref(hdspm); |
4132 | info.wordclock_sync_check = hdspm_wc_sync_check(hdspm); | 4133 | info.wordclock_sync_check = hdspm_wc_sync_check(hdspm); |
diff --git a/sound/soc/sh/migor.c b/sound/soc/sh/migor.c index b823a5c9b9b..87e2b7fcbf1 100644 --- a/sound/soc/sh/migor.c +++ b/sound/soc/sh/migor.c | |||
@@ -12,6 +12,7 @@ | |||
12 | #include <linux/firmware.h> | 12 | #include <linux/firmware.h> |
13 | #include <linux/module.h> | 13 | #include <linux/module.h> |
14 | 14 | ||
15 | #include <asm/clkdev.h> | ||
15 | #include <asm/clock.h> | 16 | #include <asm/clock.h> |
16 | 17 | ||
17 | #include <cpu/sh7722.h> | 18 | #include <cpu/sh7722.h> |
@@ -40,12 +41,12 @@ static struct clk_ops siumckb_clk_ops = { | |||
40 | }; | 41 | }; |
41 | 42 | ||
42 | static struct clk siumckb_clk = { | 43 | static struct clk siumckb_clk = { |
43 | .name = "siumckb_clk", | ||
44 | .id = -1, | ||
45 | .ops = &siumckb_clk_ops, | 44 | .ops = &siumckb_clk_ops, |
46 | .rate = 0, /* initialised at run-time */ | 45 | .rate = 0, /* initialised at run-time */ |
47 | }; | 46 | }; |
48 | 47 | ||
48 | static struct clk_lookup *siumckb_lookup; | ||
49 | |||
49 | static int migor_hw_params(struct snd_pcm_substream *substream, | 50 | static int migor_hw_params(struct snd_pcm_substream *substream, |
50 | struct snd_pcm_hw_params *params) | 51 | struct snd_pcm_hw_params *params) |
51 | { | 52 | { |
@@ -180,6 +181,13 @@ static int __init migor_init(void) | |||
180 | if (ret < 0) | 181 | if (ret < 0) |
181 | return ret; | 182 | return ret; |
182 | 183 | ||
184 | siumckb_lookup = clkdev_alloc(&siumckb_clk, "siumckb_clk", NULL); | ||
185 | if (!siumckb_lookup) { | ||
186 | ret = -ENOMEM; | ||
187 | goto eclkdevalloc; | ||
188 | } | ||
189 | clkdev_add(siumckb_lookup); | ||
190 | |||
183 | /* Port number used on this machine: port B */ | 191 | /* Port number used on this machine: port B */ |
184 | migor_snd_device = platform_device_alloc("soc-audio", 1); | 192 | migor_snd_device = platform_device_alloc("soc-audio", 1); |
185 | if (!migor_snd_device) { | 193 | if (!migor_snd_device) { |
@@ -200,12 +208,15 @@ static int __init migor_init(void) | |||
200 | epdevadd: | 208 | epdevadd: |
201 | platform_device_put(migor_snd_device); | 209 | platform_device_put(migor_snd_device); |
202 | epdevalloc: | 210 | epdevalloc: |
211 | clkdev_drop(siumckb_lookup); | ||
212 | eclkdevalloc: | ||
203 | clk_unregister(&siumckb_clk); | 213 | clk_unregister(&siumckb_clk); |
204 | return ret; | 214 | return ret; |
205 | } | 215 | } |
206 | 216 | ||
207 | static void __exit migor_exit(void) | 217 | static void __exit migor_exit(void) |
208 | { | 218 | { |
219 | clkdev_drop(siumckb_lookup); | ||
209 | clk_unregister(&siumckb_clk); | 220 | clk_unregister(&siumckb_clk); |
210 | platform_device_unregister(migor_snd_device); | 221 | platform_device_unregister(migor_snd_device); |
211 | } | 222 | } |
diff --git a/sound/soc/soc-cache.c b/sound/soc/soc-cache.c index adbc68ce905..f6b0d2829ea 100644 --- a/sound/soc/soc-cache.c +++ b/sound/soc/soc-cache.c | |||
@@ -203,8 +203,9 @@ static int snd_soc_8_16_write(struct snd_soc_codec *codec, unsigned int reg, | |||
203 | data[1] = (value >> 8) & 0xff; | 203 | data[1] = (value >> 8) & 0xff; |
204 | data[2] = value & 0xff; | 204 | data[2] = value & 0xff; |
205 | 205 | ||
206 | if (!snd_soc_codec_volatile_register(codec, reg)) | 206 | if (!snd_soc_codec_volatile_register(codec, reg) |
207 | reg_cache[reg] = value; | 207 | && reg < codec->reg_cache_size) |
208 | reg_cache[reg] = value; | ||
208 | 209 | ||
209 | if (codec->cache_only) { | 210 | if (codec->cache_only) { |
210 | codec->cache_sync = 1; | 211 | codec->cache_sync = 1; |