aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@woody.linux-foundation.org>2007-11-09 18:27:54 -0500
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-11-09 18:27:54 -0500
commita70a93229943c177f0062490b4f8e44be4cef685 (patch)
tree24cc6f087307f18cda2f55ad91c7649dd5388b86 /arch
parenta80b824f0b63fa3a8c269903828beb0837d738e7 (diff)
parente6fe6649b4ec11aa3075e394b4d8743eebe1f64c (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/mingo/linux-2.6-sched
* git://git.kernel.org/pub/scm/linux/kernel/git/mingo/linux-2.6-sched: sched: proper prototype for kernel/sched.c:migration_init() sched: avoid large irq-latencies in smp-balancing sched: fix copy_namespace() <-> sched_fork() dependency in do_fork sched: clean up the wakeup preempt check, #2 sched: clean up the wakeup preempt check sched: wakeup preemption fix sched: remove PREEMPT_RESTRICT sched: turn off PREEMPT_RESTRICT KVM: fix !SMP build error x86: make nmi_cpu_busy() always defined x86: make ipi_handler() always defined sched: cleanup, use NSEC_PER_MSEC and NSEC_PER_SEC sched: reintroduce SMP tunings again sched: restore deterministic CPU accounting on powerpc sched: fix delay accounting regression sched: reintroduce the sched_min_granularity tunable sched: documentation: place_entity() comments sched: fix vslice
Diffstat (limited to 'arch')
-rw-r--r--arch/powerpc/kernel/process.c2
-rw-r--r--arch/powerpc/kernel/time.c25
-rw-r--r--arch/s390/kernel/time.c4
-rw-r--r--arch/s390/kernel/vtime.c8
-rw-r--r--arch/x86/kernel/cpu/mtrr/main.c6
-rw-r--r--arch/x86/kernel/nmi_32.c4
6 files changed, 7 insertions, 42 deletions
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index b9d88374f14f..41e13f4cc6e3 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -350,7 +350,7 @@ struct task_struct *__switch_to(struct task_struct *prev,
350 local_irq_save(flags); 350 local_irq_save(flags);
351 351
352 account_system_vtime(current); 352 account_system_vtime(current);
353 account_process_vtime(current); 353 account_process_tick(current, 0);
354 calculate_steal_time(); 354 calculate_steal_time();
355 355
356 last = _switch(old_thread, new_thread); 356 last = _switch(old_thread, new_thread);
diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c
index 99ebcd3884d2..4beb6329dfb7 100644
--- a/arch/powerpc/kernel/time.c
+++ b/arch/powerpc/kernel/time.c
@@ -259,7 +259,7 @@ void account_system_vtime(struct task_struct *tsk)
259 * user and system time records. 259 * user and system time records.
260 * Must be called with interrupts disabled. 260 * Must be called with interrupts disabled.
261 */ 261 */
262void account_process_vtime(struct task_struct *tsk) 262void account_process_tick(struct task_struct *tsk, int user_tick)
263{ 263{
264 cputime_t utime, utimescaled; 264 cputime_t utime, utimescaled;
265 265
@@ -274,18 +274,6 @@ void account_process_vtime(struct task_struct *tsk)
274 account_user_time_scaled(tsk, utimescaled); 274 account_user_time_scaled(tsk, utimescaled);
275} 275}
276 276
277static void account_process_time(struct pt_regs *regs)
278{
279 int cpu = smp_processor_id();
280
281 account_process_vtime(current);
282 run_local_timers();
283 if (rcu_pending(cpu))
284 rcu_check_callbacks(cpu, user_mode(regs));
285 scheduler_tick();
286 run_posix_cpu_timers(current);
287}
288
289/* 277/*
290 * Stuff for accounting stolen time. 278 * Stuff for accounting stolen time.
291 */ 279 */
@@ -375,7 +363,6 @@ static void snapshot_purr(void)
375 363
376#else /* ! CONFIG_VIRT_CPU_ACCOUNTING */ 364#else /* ! CONFIG_VIRT_CPU_ACCOUNTING */
377#define calc_cputime_factors() 365#define calc_cputime_factors()
378#define account_process_time(regs) update_process_times(user_mode(regs))
379#define calculate_steal_time() do { } while (0) 366#define calculate_steal_time() do { } while (0)
380#endif 367#endif
381 368
@@ -599,16 +586,6 @@ void timer_interrupt(struct pt_regs * regs)
599 get_lppaca()->int_dword.fields.decr_int = 0; 586 get_lppaca()->int_dword.fields.decr_int = 0;
600#endif 587#endif
601 588
602 /*
603 * We cannot disable the decrementer, so in the period
604 * between this cpu's being marked offline in cpu_online_map
605 * and calling stop-self, it is taking timer interrupts.
606 * Avoid calling into the scheduler rebalancing code if this
607 * is the case.
608 */
609 if (!cpu_is_offline(cpu))
610 account_process_time(regs);
611
612 if (evt->event_handler) 589 if (evt->event_handler)
613 evt->event_handler(evt); 590 evt->event_handler(evt);
614 591
diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c
index a963fe81359e..22b800ce2126 100644
--- a/arch/s390/kernel/time.c
+++ b/arch/s390/kernel/time.c
@@ -145,12 +145,8 @@ void account_ticks(u64 time)
145 do_timer(ticks); 145 do_timer(ticks);
146#endif 146#endif
147 147
148#ifdef CONFIG_VIRT_CPU_ACCOUNTING
149 account_tick_vtime(current);
150#else
151 while (ticks--) 148 while (ticks--)
152 update_process_times(user_mode(get_irq_regs())); 149 update_process_times(user_mode(get_irq_regs()));
153#endif
154 150
155 s390_do_profile(); 151 s390_do_profile();
156} 152}
diff --git a/arch/s390/kernel/vtime.c b/arch/s390/kernel/vtime.c
index 84ff78de6bac..c5f05b3fb2c3 100644
--- a/arch/s390/kernel/vtime.c
+++ b/arch/s390/kernel/vtime.c
@@ -32,7 +32,7 @@ static DEFINE_PER_CPU(struct vtimer_queue, virt_cpu_timer);
32 * Update process times based on virtual cpu times stored by entry.S 32 * Update process times based on virtual cpu times stored by entry.S
33 * to the lowcore fields user_timer, system_timer & steal_clock. 33 * to the lowcore fields user_timer, system_timer & steal_clock.
34 */ 34 */
35void account_tick_vtime(struct task_struct *tsk) 35void account_process_tick(struct task_struct *tsk, int user_tick)
36{ 36{
37 cputime_t cputime; 37 cputime_t cputime;
38 __u64 timer, clock; 38 __u64 timer, clock;
@@ -64,12 +64,6 @@ void account_tick_vtime(struct task_struct *tsk)
64 S390_lowcore.steal_clock -= cputime << 12; 64 S390_lowcore.steal_clock -= cputime << 12;
65 account_steal_time(tsk, cputime); 65 account_steal_time(tsk, cputime);
66 } 66 }
67
68 run_local_timers();
69 if (rcu_pending(smp_processor_id()))
70 rcu_check_callbacks(smp_processor_id(), rcu_user_flag);
71 scheduler_tick();
72 run_posix_cpu_timers(tsk);
73} 67}
74 68
75/* 69/*
diff --git a/arch/x86/kernel/cpu/mtrr/main.c b/arch/x86/kernel/cpu/mtrr/main.c
index 9abbdf7562c5..3b20613325dc 100644
--- a/arch/x86/kernel/cpu/mtrr/main.c
+++ b/arch/x86/kernel/cpu/mtrr/main.c
@@ -139,13 +139,12 @@ struct set_mtrr_data {
139 mtrr_type smp_type; 139 mtrr_type smp_type;
140}; 140};
141 141
142#ifdef CONFIG_SMP
143
144static void ipi_handler(void *info) 142static void ipi_handler(void *info)
145/* [SUMMARY] Synchronisation handler. Executed by "other" CPUs. 143/* [SUMMARY] Synchronisation handler. Executed by "other" CPUs.
146 [RETURNS] Nothing. 144 [RETURNS] Nothing.
147*/ 145*/
148{ 146{
147#ifdef CONFIG_SMP
149 struct set_mtrr_data *data = info; 148 struct set_mtrr_data *data = info;
150 unsigned long flags; 149 unsigned long flags;
151 150
@@ -168,9 +167,8 @@ static void ipi_handler(void *info)
168 167
169 atomic_dec(&data->count); 168 atomic_dec(&data->count);
170 local_irq_restore(flags); 169 local_irq_restore(flags);
171}
172
173#endif 170#endif
171}
174 172
175static inline int types_compatible(mtrr_type type1, mtrr_type type2) { 173static inline int types_compatible(mtrr_type type1, mtrr_type type2) {
176 return type1 == MTRR_TYPE_UNCACHABLE || 174 return type1 == MTRR_TYPE_UNCACHABLE ||
diff --git a/arch/x86/kernel/nmi_32.c b/arch/x86/kernel/nmi_32.c
index f803ed0ed1c4..600fd404e440 100644
--- a/arch/x86/kernel/nmi_32.c
+++ b/arch/x86/kernel/nmi_32.c
@@ -51,13 +51,13 @@ static int unknown_nmi_panic_callback(struct pt_regs *regs, int cpu);
51 51
52static int endflag __initdata = 0; 52static int endflag __initdata = 0;
53 53
54#ifdef CONFIG_SMP
55/* The performance counters used by NMI_LOCAL_APIC don't trigger when 54/* The performance counters used by NMI_LOCAL_APIC don't trigger when
56 * the CPU is idle. To make sure the NMI watchdog really ticks on all 55 * the CPU is idle. To make sure the NMI watchdog really ticks on all
57 * CPUs during the test make them busy. 56 * CPUs during the test make them busy.
58 */ 57 */
59static __init void nmi_cpu_busy(void *data) 58static __init void nmi_cpu_busy(void *data)
60{ 59{
60#ifdef CONFIG_SMP
61 local_irq_enable_in_hardirq(); 61 local_irq_enable_in_hardirq();
62 /* Intentionally don't use cpu_relax here. This is 62 /* Intentionally don't use cpu_relax here. This is
63 to make sure that the performance counter really ticks, 63 to make sure that the performance counter really ticks,
@@ -67,8 +67,8 @@ static __init void nmi_cpu_busy(void *data)
67 care if they get somewhat less cycles. */ 67 care if they get somewhat less cycles. */
68 while (endflag == 0) 68 while (endflag == 0)
69 mb(); 69 mb();
70}
71#endif 70#endif
71}
72 72
73static int __init check_nmi_watchdog(void) 73static int __init check_nmi_watchdog(void)
74{ 74{