aboutsummaryrefslogtreecommitdiffstats
path: root/mm/page-writeback.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2009-09-15 12:39:44 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2009-09-15 12:39:44 -0400
commitada3fa15057205b7d3f727bba5cd26b5912e350f (patch)
tree60962fc9e4021b92f484d1a58e72cd3906d4f3db /mm/page-writeback.c
parent2f82af08fcc7dc01a7e98a49a5995a77e32a2925 (diff)
parent5579fd7e6aed8860ea0c8e3f11897493153b10ad (diff)
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/percpu
* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/percpu: (46 commits) powerpc64: convert to dynamic percpu allocator sparc64: use embedding percpu first chunk allocator percpu: kill lpage first chunk allocator x86,percpu: use embedding for 64bit NUMA and page for 32bit NUMA percpu: update embedding first chunk allocator to handle sparse units percpu: use group information to allocate vmap areas sparsely vmalloc: implement pcpu_get_vm_areas() vmalloc: separate out insert_vmalloc_vm() percpu: add chunk->base_addr percpu: add pcpu_unit_offsets[] percpu: introduce pcpu_alloc_info and pcpu_group_info percpu: move pcpu_lpage_build_unit_map() and pcpul_lpage_dump_cfg() upward percpu: add @align to pcpu_fc_alloc_fn_t percpu: make @dyn_size mandatory for pcpu_setup_first_chunk() percpu: drop @static_size from first chunk allocators percpu: generalize first chunk allocator selection percpu: build first chunk allocators selectively percpu: rename 4k first chunk allocator to page percpu: improve boot messages percpu: fix pcpu_reclaim() locking ... Fix trivial conflict as by Tejun Heo in kernel/sched.c
Diffstat (limited to 'mm/page-writeback.c')
-rw-r--r--mm/page-writeback.c5
1 files changed, 3 insertions, 2 deletions
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index 25e7770309b8..dd73d29c15a8 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -604,6 +604,8 @@ void set_page_dirty_balance(struct page *page, int page_mkwrite)
604 } 604 }
605} 605}
606 606
607static DEFINE_PER_CPU(unsigned long, bdp_ratelimits) = 0;
608
607/** 609/**
608 * balance_dirty_pages_ratelimited_nr - balance dirty memory state 610 * balance_dirty_pages_ratelimited_nr - balance dirty memory state
609 * @mapping: address_space which was dirtied 611 * @mapping: address_space which was dirtied
@@ -621,7 +623,6 @@ void set_page_dirty_balance(struct page *page, int page_mkwrite)
621void balance_dirty_pages_ratelimited_nr(struct address_space *mapping, 623void balance_dirty_pages_ratelimited_nr(struct address_space *mapping,
622 unsigned long nr_pages_dirtied) 624 unsigned long nr_pages_dirtied)
623{ 625{
624 static DEFINE_PER_CPU(unsigned long, ratelimits) = 0;
625 unsigned long ratelimit; 626 unsigned long ratelimit;
626 unsigned long *p; 627 unsigned long *p;
627 628
@@ -634,7 +635,7 @@ void balance_dirty_pages_ratelimited_nr(struct address_space *mapping,
634 * tasks in balance_dirty_pages(). Period. 635 * tasks in balance_dirty_pages(). Period.
635 */ 636 */
636 preempt_disable(); 637 preempt_disable();
637 p = &__get_cpu_var(ratelimits); 638 p = &__get_cpu_var(bdp_ratelimits);
638 *p += nr_pages_dirtied; 639 *p += nr_pages_dirtied;
639 if (unlikely(*p >= ratelimit)) { 640 if (unlikely(*p >= ratelimit)) {
640 *p = 0; 641 *p = 0;