aboutsummaryrefslogtreecommitdiffstats
path: root/arch/ia64
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2012-10-01 13:43:39 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2012-10-01 13:43:39 -0400
commit0b981cb94bc63a2d0e5eccccdca75fe57643ffce (patch)
tree966ad6e6807fd1041d9962c9904e032a5ab07a65 /arch/ia64
parent4cba3335826cbb36a218c3f5a1387e2c7c7ca9aa (diff)
parentfdf9c356502ae02238efcdf90cefd7b473a63fd4 (diff)
Merge branch 'sched-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull scheduler changes from Ingo Molnar: "Continued quest to clean up and enhance the cputime code by Frederic Weisbecker, in preparation for future tickless kernel features. Other than that, smallish changes." Fix up trivial conflicts due to additions next to each other in arch/{x86/}Kconfig * 'sched-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (24 commits) cputime: Make finegrained irqtime accounting generally available cputime: Gather time/stats accounting config options into a single menu ia64: Reuse system and user vtime accounting functions on task switch ia64: Consolidate user vtime accounting vtime: Consolidate system/idle context detection cputime: Use a proper subsystem naming for vtime related APIs sched: cpu_power: enable ARCH_POWER sched/nohz: Clean up select_nohz_load_balancer() sched: Fix load avg vs. cpu-hotplug sched: Remove __ARCH_WANT_INTERRUPTS_ON_CTXSW sched: Fix nohz_idle_balance() sched: Remove useless code in yield_to() sched: Add time unit suffix to sched sysctl knobs sched/debug: Limit sd->*_idx range on sysctl sched: Remove AFFINE_WAKEUPS feature flag s390: Remove leftover account_tick_vtime() header cputime: Consolidate vtime handling on context switch sched: Move cputime code to its own file cputime: Generalize CONFIG_VIRT_CPU_ACCOUNTING tile: Remove SD_PREFER_LOCAL leftover ...
Diffstat (limited to 'arch/ia64')
-rw-r--r--arch/ia64/Kconfig12
-rw-r--r--arch/ia64/include/asm/switch_to.h8
-rw-r--r--arch/ia64/kernel/time.c66
3 files changed, 34 insertions, 52 deletions
diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig
index 310cf5781fa..3c720ef6c32 100644
--- a/arch/ia64/Kconfig
+++ b/arch/ia64/Kconfig
@@ -25,6 +25,7 @@ config IA64
25 select HAVE_GENERIC_HARDIRQS 25 select HAVE_GENERIC_HARDIRQS
26 select HAVE_MEMBLOCK 26 select HAVE_MEMBLOCK
27 select HAVE_MEMBLOCK_NODE_MAP 27 select HAVE_MEMBLOCK_NODE_MAP
28 select HAVE_VIRT_CPU_ACCOUNTING
28 select ARCH_DISCARD_MEMBLOCK 29 select ARCH_DISCARD_MEMBLOCK
29 select GENERIC_IRQ_PROBE 30 select GENERIC_IRQ_PROBE
30 select GENERIC_PENDING_IRQ if SMP 31 select GENERIC_PENDING_IRQ if SMP
@@ -340,17 +341,6 @@ config FORCE_MAX_ZONEORDER
340 default "17" if HUGETLB_PAGE 341 default "17" if HUGETLB_PAGE
341 default "11" 342 default "11"
342 343
343config VIRT_CPU_ACCOUNTING
344 bool "Deterministic task and CPU time accounting"
345 default n
346 help
347 Select this option to enable more accurate task and CPU time
348 accounting. This is done by reading a CPU counter on each
349 kernel entry and exit and on transitions within the kernel
350 between system, softirq and hardirq state, so there is a
351 small performance impact.
352 If in doubt, say N here.
353
354config SMP 344config SMP
355 bool "Symmetric multi-processing support" 345 bool "Symmetric multi-processing support"
356 select USE_GENERIC_SMP_HELPERS 346 select USE_GENERIC_SMP_HELPERS
diff --git a/arch/ia64/include/asm/switch_to.h b/arch/ia64/include/asm/switch_to.h
index cb2412fcd17..d38c7ea5eea 100644
--- a/arch/ia64/include/asm/switch_to.h
+++ b/arch/ia64/include/asm/switch_to.h
@@ -30,13 +30,6 @@ extern struct task_struct *ia64_switch_to (void *next_task);
30extern void ia64_save_extra (struct task_struct *task); 30extern void ia64_save_extra (struct task_struct *task);
31extern void ia64_load_extra (struct task_struct *task); 31extern void ia64_load_extra (struct task_struct *task);
32 32
33#ifdef CONFIG_VIRT_CPU_ACCOUNTING
34extern void ia64_account_on_switch (struct task_struct *prev, struct task_struct *next);
35# define IA64_ACCOUNT_ON_SWITCH(p,n) ia64_account_on_switch(p,n)
36#else
37# define IA64_ACCOUNT_ON_SWITCH(p,n)
38#endif
39
40#ifdef CONFIG_PERFMON 33#ifdef CONFIG_PERFMON
41 DECLARE_PER_CPU(unsigned long, pfm_syst_info); 34 DECLARE_PER_CPU(unsigned long, pfm_syst_info);
42# define PERFMON_IS_SYSWIDE() (__get_cpu_var(pfm_syst_info) & 0x1) 35# define PERFMON_IS_SYSWIDE() (__get_cpu_var(pfm_syst_info) & 0x1)
@@ -49,7 +42,6 @@ extern void ia64_account_on_switch (struct task_struct *prev, struct task_struct
49 || PERFMON_IS_SYSWIDE()) 42 || PERFMON_IS_SYSWIDE())
50 43
51#define __switch_to(prev,next,last) do { \ 44#define __switch_to(prev,next,last) do { \
52 IA64_ACCOUNT_ON_SWITCH(prev, next); \
53 if (IA64_HAS_EXTRA_STATE(prev)) \ 45 if (IA64_HAS_EXTRA_STATE(prev)) \
54 ia64_save_extra(prev); \ 46 ia64_save_extra(prev); \
55 if (IA64_HAS_EXTRA_STATE(next)) \ 47 if (IA64_HAS_EXTRA_STATE(next)) \
diff --git a/arch/ia64/kernel/time.c b/arch/ia64/kernel/time.c
index ecc904b33c5..80ff9acc5ed 100644
--- a/arch/ia64/kernel/time.c
+++ b/arch/ia64/kernel/time.c
@@ -83,32 +83,36 @@ static struct clocksource *itc_clocksource;
83 83
84extern cputime_t cycle_to_cputime(u64 cyc); 84extern cputime_t cycle_to_cputime(u64 cyc);
85 85
86static void vtime_account_user(struct task_struct *tsk)
87{
88 cputime_t delta_utime;
89 struct thread_info *ti = task_thread_info(tsk);
90
91 if (ti->ac_utime) {
92 delta_utime = cycle_to_cputime(ti->ac_utime);
93 account_user_time(tsk, delta_utime, delta_utime);
94 ti->ac_utime = 0;
95 }
96}
97
86/* 98/*
87 * Called from the context switch with interrupts disabled, to charge all 99 * Called from the context switch with interrupts disabled, to charge all
88 * accumulated times to the current process, and to prepare accounting on 100 * accumulated times to the current process, and to prepare accounting on
89 * the next process. 101 * the next process.
90 */ 102 */
91void ia64_account_on_switch(struct task_struct *prev, struct task_struct *next) 103void vtime_task_switch(struct task_struct *prev)
92{ 104{
93 struct thread_info *pi = task_thread_info(prev); 105 struct thread_info *pi = task_thread_info(prev);
94 struct thread_info *ni = task_thread_info(next); 106 struct thread_info *ni = task_thread_info(current);
95 cputime_t delta_stime, delta_utime;
96 __u64 now;
97 107
98 now = ia64_get_itc();
99
100 delta_stime = cycle_to_cputime(pi->ac_stime + (now - pi->ac_stamp));
101 if (idle_task(smp_processor_id()) != prev) 108 if (idle_task(smp_processor_id()) != prev)
102 account_system_time(prev, 0, delta_stime, delta_stime); 109 vtime_account_system(prev);
103 else 110 else
104 account_idle_time(delta_stime); 111 vtime_account_idle(prev);
105 112
106 if (pi->ac_utime) { 113 vtime_account_user(prev);
107 delta_utime = cycle_to_cputime(pi->ac_utime);
108 account_user_time(prev, delta_utime, delta_utime);
109 }
110 114
111 pi->ac_stamp = ni->ac_stamp = now; 115 pi->ac_stamp = ni->ac_stamp;
112 ni->ac_stime = ni->ac_utime = 0; 116 ni->ac_stime = ni->ac_utime = 0;
113} 117}
114 118
@@ -116,29 +120,32 @@ void ia64_account_on_switch(struct task_struct *prev, struct task_struct *next)
116 * Account time for a transition between system, hard irq or soft irq state. 120 * Account time for a transition between system, hard irq or soft irq state.
117 * Note that this function is called with interrupts enabled. 121 * Note that this function is called with interrupts enabled.
118 */ 122 */
119void account_system_vtime(struct task_struct *tsk) 123static cputime_t vtime_delta(struct task_struct *tsk)
120{ 124{
121 struct thread_info *ti = task_thread_info(tsk); 125 struct thread_info *ti = task_thread_info(tsk);
122 unsigned long flags;
123 cputime_t delta_stime; 126 cputime_t delta_stime;
124 __u64 now; 127 __u64 now;
125 128
126 local_irq_save(flags);
127
128 now = ia64_get_itc(); 129 now = ia64_get_itc();
129 130
130 delta_stime = cycle_to_cputime(ti->ac_stime + (now - ti->ac_stamp)); 131 delta_stime = cycle_to_cputime(ti->ac_stime + (now - ti->ac_stamp));
131 if (irq_count() || idle_task(smp_processor_id()) != tsk)
132 account_system_time(tsk, 0, delta_stime, delta_stime);
133 else
134 account_idle_time(delta_stime);
135 ti->ac_stime = 0; 132 ti->ac_stime = 0;
136
137 ti->ac_stamp = now; 133 ti->ac_stamp = now;
138 134
139 local_irq_restore(flags); 135 return delta_stime;
136}
137
138void vtime_account_system(struct task_struct *tsk)
139{
140 cputime_t delta = vtime_delta(tsk);
141
142 account_system_time(tsk, 0, delta, delta);
143}
144
145void vtime_account_idle(struct task_struct *tsk)
146{
147 account_idle_time(vtime_delta(tsk));
140} 148}
141EXPORT_SYMBOL_GPL(account_system_vtime);
142 149
143/* 150/*
144 * Called from the timer interrupt handler to charge accumulated user time 151 * Called from the timer interrupt handler to charge accumulated user time
@@ -146,14 +153,7 @@ EXPORT_SYMBOL_GPL(account_system_vtime);
146 */ 153 */
147void account_process_tick(struct task_struct *p, int user_tick) 154void account_process_tick(struct task_struct *p, int user_tick)
148{ 155{
149 struct thread_info *ti = task_thread_info(p); 156 vtime_account_user(p);
150 cputime_t delta_utime;
151
152 if (ti->ac_utime) {
153 delta_utime = cycle_to_cputime(ti->ac_utime);
154 account_user_time(p, delta_utime, delta_utime);
155 ti->ac_utime = 0;
156 }
157} 157}
158 158
159#endif /* CONFIG_VIRT_CPU_ACCOUNTING */ 159#endif /* CONFIG_VIRT_CPU_ACCOUNTING */