aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2009-06-24 02:13:48 -0400
committerTejun Heo <tj@kernel.org>2009-06-24 02:13:48 -0400
commit245b2e70eabd797932adb263a65da0bab3711753 (patch)
tree30f0b790dadd2b70bf06e534abcf66a76e97b05a /mm
parentb9bf3121af348d9255f1c917830fe8c2df52efcb (diff)
percpu: clean up percpu variable definitions
Percpu variable definition is about to be updated such that all percpu symbols including the static ones must be unique. Update percpu variable definitions accordingly. * as,cfq: rename ioc_count uniquely * cpufreq: rename cpu_dbs_info uniquely * xen: move nesting_count out of xen_evtchn_do_upcall() and rename it * mm: move ratelimits out of balance_dirty_pages_ratelimited_nr() and rename it * ipv4,6: rename cookie_scratch uniquely * x86 perf_counter: rename prev_left to pmc_prev_left, irq_entry to pmc_irq_entry and nmi_entry to pmc_nmi_entry * perf_counter: rename disable_count to perf_disable_count * ftrace: rename test_event_disable to ftrace_test_event_disable * kmemleak: rename test_pointer to kmemleak_test_pointer * mce: rename next_interval to mce_next_interval [ Impact: percpu usage cleanups, no duplicate static percpu var names ] Signed-off-by: Tejun Heo <tj@kernel.org> Reviewed-by: Christoph Lameter <cl@linux-foundation.org> Cc: Ivan Kokshaysky <ink@jurassic.park.msu.ru> Cc: Jens Axboe <jens.axboe@oracle.com> Cc: Dave Jones <davej@redhat.com> Cc: Jeremy Fitzhardinge <jeremy@xensource.com> Cc: linux-mm <linux-mm@kvack.org> Cc: David S. Miller <davem@davemloft.net> Cc: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Steven Rostedt <srostedt@redhat.com> Cc: Li Zefan <lizf@cn.fujitsu.com> Cc: Catalin Marinas <catalin.marinas@arm.com> Cc: Andi Kleen <andi@firstfloor.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/kmemleak-test.c6
-rw-r--r--mm/page-writeback.c5
2 files changed, 6 insertions, 5 deletions
diff --git a/mm/kmemleak-test.c b/mm/kmemleak-test.c
index d5292fc6f523..177a5169bbde 100644
--- a/mm/kmemleak-test.c
+++ b/mm/kmemleak-test.c
@@ -36,7 +36,7 @@ struct test_node {
36}; 36};
37 37
38static LIST_HEAD(test_list); 38static LIST_HEAD(test_list);
39static DEFINE_PER_CPU(void *, test_pointer); 39static DEFINE_PER_CPU(void *, kmemleak_test_pointer);
40 40
41/* 41/*
42 * Some very simple testing. This function needs to be extended for 42 * Some very simple testing. This function needs to be extended for
@@ -86,9 +86,9 @@ static int __init kmemleak_test_init(void)
86 } 86 }
87 87
88 for_each_possible_cpu(i) { 88 for_each_possible_cpu(i) {
89 per_cpu(test_pointer, i) = kmalloc(129, GFP_KERNEL); 89 per_cpu(kmemleak_test_pointer, i) = kmalloc(129, GFP_KERNEL);
90 pr_info("kmemleak: kmalloc(129) = %p\n", 90 pr_info("kmemleak: kmalloc(129) = %p\n",
91 per_cpu(test_pointer, i)); 91 per_cpu(kmemleak_test_pointer, i));
92 } 92 }
93 93
94 return 0; 94 return 0;
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index 7b0dcea4935b..2c075dcf03d4 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -607,6 +607,8 @@ void set_page_dirty_balance(struct page *page, int page_mkwrite)
607 } 607 }
608} 608}
609 609
610static DEFINE_PER_CPU(unsigned long, bdp_ratelimits) = 0;
611
610/** 612/**
611 * balance_dirty_pages_ratelimited_nr - balance dirty memory state 613 * balance_dirty_pages_ratelimited_nr - balance dirty memory state
612 * @mapping: address_space which was dirtied 614 * @mapping: address_space which was dirtied
@@ -624,7 +626,6 @@ void set_page_dirty_balance(struct page *page, int page_mkwrite)
624void balance_dirty_pages_ratelimited_nr(struct address_space *mapping, 626void balance_dirty_pages_ratelimited_nr(struct address_space *mapping,
625 unsigned long nr_pages_dirtied) 627 unsigned long nr_pages_dirtied)
626{ 628{
627 static DEFINE_PER_CPU(unsigned long, ratelimits) = 0;
628 unsigned long ratelimit; 629 unsigned long ratelimit;
629 unsigned long *p; 630 unsigned long *p;
630 631
@@ -637,7 +638,7 @@ void balance_dirty_pages_ratelimited_nr(struct address_space *mapping,
637 * tasks in balance_dirty_pages(). Period. 638 * tasks in balance_dirty_pages(). Period.
638 */ 639 */
639 preempt_disable(); 640 preempt_disable();
640 p = &__get_cpu_var(ratelimits); 641 p = &__get_cpu_var(bdp_ratelimits);
641 *p += nr_pages_dirtied; 642 *p += nr_pages_dirtied;
642 if (unlikely(*p >= ratelimit)) { 643 if (unlikely(*p >= ratelimit)) {
643 *p = 0; 644 *p = 0;