aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorRoman Zippel <zippel@linux-m68k.org>2008-05-01 07:34:28 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2008-05-01 11:03:58 -0400
commit6f6d6a1a6a1336431a6cba60ace9e97c3a496a19 (patch)
treef32e82fc3a50b6877afa3220bdb6f7ea0582e07f /kernel
parent71abb3af62dfa52930755f3b6497eafbe1d6ec85 (diff)
rename div64_64 to div64_u64
Rename div64_64 to div64_u64 to make it consistent with the other divide functions, so it clearly includes the type of the divide. Move its definition to math64.h as currently no architecture overrides the generic implementation. They can still override it of course, but the duplicated declarations are avoided. Signed-off-by: Roman Zippel <zippel@linux-m68k.org> Cc: Avi Kivity <avi@qumranet.com> Cc: Russell King <rmk@arm.linux.org.uk> Cc: Geert Uytterhoeven <geert@linux-m68k.org> Cc: Ralf Baechle <ralf@linux-mips.org> Cc: David Howells <dhowells@redhat.com> Cc: Jeff Dike <jdike@addtoit.com> Cc: Ingo Molnar <mingo@elte.hu> Cc: "David S. Miller" <davem@davemloft.net> Cc: Patrick McHardy <kaber@trash.net> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched.c6
-rw-r--r--kernel/sched_debug.c4
2 files changed, 5 insertions, 5 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index e2f7f5acc807..34bcc5bc120e 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -8025,7 +8025,7 @@ static void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq,
8025 8025
8026 se->my_q = cfs_rq; 8026 se->my_q = cfs_rq;
8027 se->load.weight = tg->shares; 8027 se->load.weight = tg->shares;
8028 se->load.inv_weight = div64_64(1ULL<<32, se->load.weight); 8028 se->load.inv_weight = div64_u64(1ULL<<32, se->load.weight);
8029 se->parent = parent; 8029 se->parent = parent;
8030} 8030}
8031#endif 8031#endif
@@ -8692,7 +8692,7 @@ static void __set_se_shares(struct sched_entity *se, unsigned long shares)
8692 dequeue_entity(cfs_rq, se, 0); 8692 dequeue_entity(cfs_rq, se, 0);
8693 8693
8694 se->load.weight = shares; 8694 se->load.weight = shares;
8695 se->load.inv_weight = div64_64((1ULL<<32), shares); 8695 se->load.inv_weight = div64_u64((1ULL<<32), shares);
8696 8696
8697 if (on_rq) 8697 if (on_rq)
8698 enqueue_entity(cfs_rq, se, 0); 8698 enqueue_entity(cfs_rq, se, 0);
@@ -8787,7 +8787,7 @@ static unsigned long to_ratio(u64 period, u64 runtime)
8787 if (runtime == RUNTIME_INF) 8787 if (runtime == RUNTIME_INF)
8788 return 1ULL << 16; 8788 return 1ULL << 16;
8789 8789
8790 return div64_64(runtime << 16, period); 8790 return div64_u64(runtime << 16, period);
8791} 8791}
8792 8792
8793#ifdef CONFIG_CGROUP_SCHED 8793#ifdef CONFIG_CGROUP_SCHED
diff --git a/kernel/sched_debug.c b/kernel/sched_debug.c
index 8a9498e7c831..6b4a12558e88 100644
--- a/kernel/sched_debug.c
+++ b/kernel/sched_debug.c
@@ -357,8 +357,8 @@ void proc_sched_show_task(struct task_struct *p, struct seq_file *m)
357 357
358 avg_per_cpu = p->se.sum_exec_runtime; 358 avg_per_cpu = p->se.sum_exec_runtime;
359 if (p->se.nr_migrations) { 359 if (p->se.nr_migrations) {
360 avg_per_cpu = div64_64(avg_per_cpu, 360 avg_per_cpu = div64_u64(avg_per_cpu,
361 p->se.nr_migrations); 361 p->se.nr_migrations);
362 } else { 362 } else {
363 avg_per_cpu = -1LL; 363 avg_per_cpu = -1LL;
364 } 364 }