diff options
Diffstat (limited to 'arch')
-rw-r--r-- | arch/i386/xen/enlighten.c | 4 | ||||
-rw-r--r-- | arch/powerpc/kernel/time.c | 8 | ||||
-rw-r--r-- | arch/powerpc/kernel/vdso.c | 12 | ||||
-rw-r--r-- | arch/powerpc/platforms/cell/spufs/sched.c | 4 | ||||
-rw-r--r-- | arch/um/include/kern_util.h | 2 | ||||
-rw-r--r-- | arch/um/kernel/irq.c | 7 | ||||
-rw-r--r-- | arch/um/os-Linux/file.c | 3 | ||||
-rw-r--r-- | arch/um/os-Linux/signal.c | 4 | ||||
-rw-r--r-- | arch/x86_64/mm/fault.c | 7 |
9 files changed, 37 insertions, 14 deletions
diff --git a/arch/i386/xen/enlighten.c b/arch/i386/xen/enlighten.c index f0c37511d8da..f01bfcd4bdee 100644 --- a/arch/i386/xen/enlighten.c +++ b/arch/i386/xen/enlighten.c | |||
@@ -623,8 +623,8 @@ static unsigned long xen_read_cr2_direct(void) | |||
623 | 623 | ||
624 | static void xen_write_cr4(unsigned long cr4) | 624 | static void xen_write_cr4(unsigned long cr4) |
625 | { | 625 | { |
626 | /* never allow TSC to be disabled */ | 626 | /* Just ignore cr4 changes; Xen doesn't allow us to do |
627 | native_write_cr4(cr4 & ~X86_CR4_TSD); | 627 | anything anyway. */ |
628 | } | 628 | } |
629 | 629 | ||
630 | static unsigned long xen_read_cr3(void) | 630 | static unsigned long xen_read_cr3(void) |
diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c index 727a6699f2f4..c627cf86d1e3 100644 --- a/arch/powerpc/kernel/time.c +++ b/arch/powerpc/kernel/time.c | |||
@@ -239,7 +239,7 @@ static void snapshot_tb_and_purr(void *data) | |||
239 | struct cpu_purr_data *p = &__get_cpu_var(cpu_purr_data); | 239 | struct cpu_purr_data *p = &__get_cpu_var(cpu_purr_data); |
240 | 240 | ||
241 | local_irq_save(flags); | 241 | local_irq_save(flags); |
242 | p->tb = mftb(); | 242 | p->tb = get_tb_or_rtc(); |
243 | p->purr = mfspr(SPRN_PURR); | 243 | p->purr = mfspr(SPRN_PURR); |
244 | wmb(); | 244 | wmb(); |
245 | p->initialized = 1; | 245 | p->initialized = 1; |
@@ -317,7 +317,7 @@ static void snapshot_purr(void) | |||
317 | */ | 317 | */ |
318 | void snapshot_timebase(void) | 318 | void snapshot_timebase(void) |
319 | { | 319 | { |
320 | __get_cpu_var(last_jiffy) = get_tb(); | 320 | __get_cpu_var(last_jiffy) = get_tb_or_rtc(); |
321 | snapshot_purr(); | 321 | snapshot_purr(); |
322 | } | 322 | } |
323 | 323 | ||
@@ -684,6 +684,8 @@ void timer_interrupt(struct pt_regs * regs) | |||
684 | 684 | ||
685 | write_seqlock(&xtime_lock); | 685 | write_seqlock(&xtime_lock); |
686 | tb_next_jiffy = tb_last_jiffy + tb_ticks_per_jiffy; | 686 | tb_next_jiffy = tb_last_jiffy + tb_ticks_per_jiffy; |
687 | if (__USE_RTC() && tb_next_jiffy >= 1000000000) | ||
688 | tb_next_jiffy -= 1000000000; | ||
687 | if (per_cpu(last_jiffy, cpu) >= tb_next_jiffy) { | 689 | if (per_cpu(last_jiffy, cpu) >= tb_next_jiffy) { |
688 | tb_last_jiffy = tb_next_jiffy; | 690 | tb_last_jiffy = tb_next_jiffy; |
689 | do_timer(1); | 691 | do_timer(1); |
@@ -977,7 +979,7 @@ void __init time_init(void) | |||
977 | tb_to_ns_scale = scale; | 979 | tb_to_ns_scale = scale; |
978 | tb_to_ns_shift = shift; | 980 | tb_to_ns_shift = shift; |
979 | /* Save the current timebase to pretty up CONFIG_PRINTK_TIME */ | 981 | /* Save the current timebase to pretty up CONFIG_PRINTK_TIME */ |
980 | boot_tb = get_tb(); | 982 | boot_tb = get_tb_or_rtc(); |
981 | 983 | ||
982 | tm = get_boot_time(); | 984 | tm = get_boot_time(); |
983 | 985 | ||
diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c index cef01e4e8989..213fa31ac537 100644 --- a/arch/powerpc/kernel/vdso.c +++ b/arch/powerpc/kernel/vdso.c | |||
@@ -98,6 +98,18 @@ static struct vdso_patch_def vdso_patches[] = { | |||
98 | CPU_FTR_USE_TB, 0, | 98 | CPU_FTR_USE_TB, 0, |
99 | "__kernel_gettimeofday", NULL | 99 | "__kernel_gettimeofday", NULL |
100 | }, | 100 | }, |
101 | { | ||
102 | CPU_FTR_USE_TB, 0, | ||
103 | "__kernel_clock_gettime", NULL | ||
104 | }, | ||
105 | { | ||
106 | CPU_FTR_USE_TB, 0, | ||
107 | "__kernel_clock_getres", NULL | ||
108 | }, | ||
109 | { | ||
110 | CPU_FTR_USE_TB, 0, | ||
111 | "__kernel_get_tbfreq", NULL | ||
112 | }, | ||
101 | }; | 113 | }; |
102 | 114 | ||
103 | /* | 115 | /* |
diff --git a/arch/powerpc/platforms/cell/spufs/sched.c b/arch/powerpc/platforms/cell/spufs/sched.c index c784edd40ea7..5bebe7fbe056 100644 --- a/arch/powerpc/platforms/cell/spufs/sched.c +++ b/arch/powerpc/platforms/cell/spufs/sched.c | |||
@@ -579,7 +579,7 @@ static struct spu *find_victim(struct spu_context *ctx) | |||
579 | list_for_each_entry(spu, &cbe_spu_info[node].spus, cbe_list) { | 579 | list_for_each_entry(spu, &cbe_spu_info[node].spus, cbe_list) { |
580 | struct spu_context *tmp = spu->ctx; | 580 | struct spu_context *tmp = spu->ctx; |
581 | 581 | ||
582 | if (tmp->prio > ctx->prio && | 582 | if (tmp && tmp->prio > ctx->prio && |
583 | (!victim || tmp->prio > victim->prio)) | 583 | (!victim || tmp->prio > victim->prio)) |
584 | victim = spu->ctx; | 584 | victim = spu->ctx; |
585 | } | 585 | } |
@@ -611,9 +611,9 @@ static struct spu *find_victim(struct spu_context *ctx) | |||
611 | 611 | ||
612 | mutex_lock(&cbe_spu_info[node].list_mutex); | 612 | mutex_lock(&cbe_spu_info[node].list_mutex); |
613 | cbe_spu_info[node].nr_active--; | 613 | cbe_spu_info[node].nr_active--; |
614 | spu_unbind_context(spu, victim); | ||
614 | mutex_unlock(&cbe_spu_info[node].list_mutex); | 615 | mutex_unlock(&cbe_spu_info[node].list_mutex); |
615 | 616 | ||
616 | spu_unbind_context(spu, victim); | ||
617 | victim->stats.invol_ctx_switch++; | 617 | victim->stats.invol_ctx_switch++; |
618 | spu->stats.invol_ctx_switch++; | 618 | spu->stats.invol_ctx_switch++; |
619 | mutex_unlock(&victim->state_mutex); | 619 | mutex_unlock(&victim->state_mutex); |
diff --git a/arch/um/include/kern_util.h b/arch/um/include/kern_util.h index 8d7f7c1cb9c6..6c2be26f1d7d 100644 --- a/arch/um/include/kern_util.h +++ b/arch/um/include/kern_util.h | |||
@@ -117,7 +117,7 @@ extern void sigio_handler(int sig, union uml_pt_regs *regs); | |||
117 | 117 | ||
118 | extern void copy_sc(union uml_pt_regs *regs, void *from); | 118 | extern void copy_sc(union uml_pt_regs *regs, void *from); |
119 | 119 | ||
120 | unsigned long to_irq_stack(int sig, unsigned long *mask_out); | 120 | extern unsigned long to_irq_stack(unsigned long *mask_out); |
121 | unsigned long from_irq_stack(int nested); | 121 | unsigned long from_irq_stack(int nested); |
122 | 122 | ||
123 | #endif | 123 | #endif |
diff --git a/arch/um/kernel/irq.c b/arch/um/kernel/irq.c index 9870febdbead..cf0dd9cf8c43 100644 --- a/arch/um/kernel/irq.c +++ b/arch/um/kernel/irq.c | |||
@@ -518,13 +518,13 @@ int init_aio_irq(int irq, char *name, irq_handler_t handler) | |||
518 | 518 | ||
519 | static unsigned long pending_mask; | 519 | static unsigned long pending_mask; |
520 | 520 | ||
521 | unsigned long to_irq_stack(int sig, unsigned long *mask_out) | 521 | unsigned long to_irq_stack(unsigned long *mask_out) |
522 | { | 522 | { |
523 | struct thread_info *ti; | 523 | struct thread_info *ti; |
524 | unsigned long mask, old; | 524 | unsigned long mask, old; |
525 | int nested; | 525 | int nested; |
526 | 526 | ||
527 | mask = xchg(&pending_mask, 1 << sig); | 527 | mask = xchg(&pending_mask, *mask_out); |
528 | if(mask != 0){ | 528 | if(mask != 0){ |
529 | /* If any interrupts come in at this point, we want to | 529 | /* If any interrupts come in at this point, we want to |
530 | * make sure that their bits aren't lost by our | 530 | * make sure that their bits aren't lost by our |
@@ -534,7 +534,7 @@ unsigned long to_irq_stack(int sig, unsigned long *mask_out) | |||
534 | * and pending_mask contains a bit for each interrupt | 534 | * and pending_mask contains a bit for each interrupt |
535 | * that came in. | 535 | * that came in. |
536 | */ | 536 | */ |
537 | old = 1 << sig; | 537 | old = *mask_out; |
538 | do { | 538 | do { |
539 | old |= mask; | 539 | old |= mask; |
540 | mask = xchg(&pending_mask, old); | 540 | mask = xchg(&pending_mask, old); |
@@ -550,6 +550,7 @@ unsigned long to_irq_stack(int sig, unsigned long *mask_out) | |||
550 | 550 | ||
551 | task = cpu_tasks[ti->cpu].task; | 551 | task = cpu_tasks[ti->cpu].task; |
552 | tti = task_thread_info(task); | 552 | tti = task_thread_info(task); |
553 | |||
553 | *ti = *tti; | 554 | *ti = *tti; |
554 | ti->real_thread = tti; | 555 | ti->real_thread = tti; |
555 | task->stack = ti; | 556 | task->stack = ti; |
diff --git a/arch/um/os-Linux/file.c b/arch/um/os-Linux/file.c index 6f92f732d253..c3ecc2a84e0c 100644 --- a/arch/um/os-Linux/file.c +++ b/arch/um/os-Linux/file.c | |||
@@ -320,7 +320,8 @@ int os_file_size(char *file, unsigned long long *size_out) | |||
320 | } | 320 | } |
321 | 321 | ||
322 | if(S_ISBLK(buf.ust_mode)){ | 322 | if(S_ISBLK(buf.ust_mode)){ |
323 | int fd, blocks; | 323 | int fd; |
324 | long blocks; | ||
324 | 325 | ||
325 | fd = os_open_file(file, of_read(OPENFLAGS()), 0); | 326 | fd = os_open_file(file, of_read(OPENFLAGS()), 0); |
326 | if(fd < 0){ | 327 | if(fd < 0){ |
diff --git a/arch/um/os-Linux/signal.c b/arch/um/os-Linux/signal.c index 18e5c8b67eb8..b98f7ea2d2f6 100644 --- a/arch/um/os-Linux/signal.c +++ b/arch/um/os-Linux/signal.c | |||
@@ -119,7 +119,7 @@ void (*handlers[_NSIG])(int sig, struct sigcontext *sc); | |||
119 | 119 | ||
120 | void handle_signal(int sig, struct sigcontext *sc) | 120 | void handle_signal(int sig, struct sigcontext *sc) |
121 | { | 121 | { |
122 | unsigned long pending = 0; | 122 | unsigned long pending = 1UL << sig; |
123 | 123 | ||
124 | do { | 124 | do { |
125 | int nested, bail; | 125 | int nested, bail; |
@@ -134,7 +134,7 @@ void handle_signal(int sig, struct sigcontext *sc) | |||
134 | * have to return, and the upper handler will deal | 134 | * have to return, and the upper handler will deal |
135 | * with this interrupt. | 135 | * with this interrupt. |
136 | */ | 136 | */ |
137 | bail = to_irq_stack(sig, &pending); | 137 | bail = to_irq_stack(&pending); |
138 | if(bail) | 138 | if(bail) |
139 | return; | 139 | return; |
140 | 140 | ||
diff --git a/arch/x86_64/mm/fault.c b/arch/x86_64/mm/fault.c index 327c9f2fa626..54816adb8e93 100644 --- a/arch/x86_64/mm/fault.c +++ b/arch/x86_64/mm/fault.c | |||
@@ -374,6 +374,13 @@ asmlinkage void __kprobes do_page_fault(struct pt_regs *regs, | |||
374 | if (unlikely(in_atomic() || !mm)) | 374 | if (unlikely(in_atomic() || !mm)) |
375 | goto bad_area_nosemaphore; | 375 | goto bad_area_nosemaphore; |
376 | 376 | ||
377 | /* | ||
378 | * User-mode registers count as a user access even for any | ||
379 | * potential system fault or CPU buglet. | ||
380 | */ | ||
381 | if (user_mode_vm(regs)) | ||
382 | error_code |= PF_USER; | ||
383 | |||
377 | again: | 384 | again: |
378 | /* When running in the kernel we expect faults to occur only to | 385 | /* When running in the kernel we expect faults to occur only to |
379 | * addresses in user space. All other faults represent errors in the | 386 | * addresses in user space. All other faults represent errors in the |