aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2009-11-15 03:50:38 -0500
committerIngo Molnar <mingo@elte.hu>2009-11-15 03:50:41 -0500
commit39dc78b6510323848e3356452f7dab9499736978 (patch)
treecf8a8fede74e41b203fd00e3ccd21ead2e851442 /mm
parent4c49b12853fbb5eff4849b7b6a1e895776f027a1 (diff)
parent156171c71a0dc4bce12b4408bb1591f8fe32dc1a (diff)
Merge commit 'v2.6.32-rc7' into perf/core
Merge reason: pick up perf fixlets Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'mm')
-rw-r--r--mm/backing-dev.c3
-rw-r--r--mm/highmem.c17
-rw-r--r--mm/ksm.c1
-rw-r--r--mm/migrate.c2
-rw-r--r--mm/page_alloc.c4
5 files changed, 18 insertions, 9 deletions
diff --git a/mm/backing-dev.c b/mm/backing-dev.c
index 1065b715ef64..11aee09dd2a6 100644
--- a/mm/backing-dev.c
+++ b/mm/backing-dev.c
@@ -628,6 +628,8 @@ static void bdi_prune_sb(struct backing_dev_info *bdi)
628void bdi_unregister(struct backing_dev_info *bdi) 628void bdi_unregister(struct backing_dev_info *bdi)
629{ 629{
630 if (bdi->dev) { 630 if (bdi->dev) {
631 bdi_prune_sb(bdi);
632
631 if (!bdi_cap_flush_forker(bdi)) 633 if (!bdi_cap_flush_forker(bdi))
632 bdi_wb_shutdown(bdi); 634 bdi_wb_shutdown(bdi);
633 bdi_debug_unregister(bdi); 635 bdi_debug_unregister(bdi);
@@ -697,7 +699,6 @@ void bdi_destroy(struct backing_dev_info *bdi)
697 spin_unlock(&inode_lock); 699 spin_unlock(&inode_lock);
698 } 700 }
699 701
700 bdi_prune_sb(bdi);
701 bdi_unregister(bdi); 702 bdi_unregister(bdi);
702 703
703 for (i = 0; i < NR_BDI_STAT_ITEMS; i++) 704 for (i = 0; i < NR_BDI_STAT_ITEMS; i++)
diff --git a/mm/highmem.c b/mm/highmem.c
index 25878cc49daa..9c1e627f282e 100644
--- a/mm/highmem.c
+++ b/mm/highmem.c
@@ -426,16 +426,21 @@ void __init page_address_init(void)
426 426
427void debug_kmap_atomic(enum km_type type) 427void debug_kmap_atomic(enum km_type type)
428{ 428{
429 static unsigned warn_count = 10; 429 static int warn_count = 10;
430 430
431 if (unlikely(warn_count == 0)) 431 if (unlikely(warn_count < 0))
432 return; 432 return;
433 433
434 if (unlikely(in_interrupt())) { 434 if (unlikely(in_interrupt())) {
435 if (in_irq()) { 435 if (in_nmi()) {
436 if (type != KM_NMI && type != KM_NMI_PTE) {
437 WARN_ON(1);
438 warn_count--;
439 }
440 } else if (in_irq()) {
436 if (type != KM_IRQ0 && type != KM_IRQ1 && 441 if (type != KM_IRQ0 && type != KM_IRQ1 &&
437 type != KM_BIO_SRC_IRQ && type != KM_BIO_DST_IRQ && 442 type != KM_BIO_SRC_IRQ && type != KM_BIO_DST_IRQ &&
438 type != KM_BOUNCE_READ) { 443 type != KM_BOUNCE_READ && type != KM_IRQ_PTE) {
439 WARN_ON(1); 444 WARN_ON(1);
440 warn_count--; 445 warn_count--;
441 } 446 }
@@ -452,7 +457,9 @@ void debug_kmap_atomic(enum km_type type)
452 } 457 }
453 458
454 if (type == KM_IRQ0 || type == KM_IRQ1 || type == KM_BOUNCE_READ || 459 if (type == KM_IRQ0 || type == KM_IRQ1 || type == KM_BOUNCE_READ ||
455 type == KM_BIO_SRC_IRQ || type == KM_BIO_DST_IRQ) { 460 type == KM_BIO_SRC_IRQ || type == KM_BIO_DST_IRQ ||
461 type == KM_IRQ_PTE || type == KM_NMI ||
462 type == KM_NMI_PTE ) {
456 if (!irqs_disabled()) { 463 if (!irqs_disabled()) {
457 WARN_ON(1); 464 WARN_ON(1);
458 warn_count--; 465 warn_count--;
diff --git a/mm/ksm.c b/mm/ksm.c
index bef1af4f77e3..5575f8628fef 100644
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -1012,6 +1012,7 @@ static struct rmap_item *unstable_tree_search_insert(struct page *page,
1012 struct rmap_item *tree_rmap_item; 1012 struct rmap_item *tree_rmap_item;
1013 int ret; 1013 int ret;
1014 1014
1015 cond_resched();
1015 tree_rmap_item = rb_entry(*new, struct rmap_item, node); 1016 tree_rmap_item = rb_entry(*new, struct rmap_item, node);
1016 page2[0] = get_mergeable_page(tree_rmap_item); 1017 page2[0] = get_mergeable_page(tree_rmap_item);
1017 if (!page2[0]) 1018 if (!page2[0])
diff --git a/mm/migrate.c b/mm/migrate.c
index 1a4bf4813780..7dbcb22316d2 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -602,7 +602,7 @@ static int unmap_and_move(new_page_t get_new_page, unsigned long private,
602 struct page *newpage = get_new_page(page, private, &result); 602 struct page *newpage = get_new_page(page, private, &result);
603 int rcu_locked = 0; 603 int rcu_locked = 0;
604 int charge = 0; 604 int charge = 0;
605 struct mem_cgroup *mem; 605 struct mem_cgroup *mem = NULL;
606 606
607 if (!newpage) 607 if (!newpage)
608 return -ENOMEM; 608 return -ENOMEM;
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index cdcedf661616..2bc2ac63f41e 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -1769,7 +1769,7 @@ gfp_to_alloc_flags(gfp_t gfp_mask)
1769 * See also cpuset_zone_allowed() comment in kernel/cpuset.c. 1769 * See also cpuset_zone_allowed() comment in kernel/cpuset.c.
1770 */ 1770 */
1771 alloc_flags &= ~ALLOC_CPUSET; 1771 alloc_flags &= ~ALLOC_CPUSET;
1772 } else if (unlikely(rt_task(p))) 1772 } else if (unlikely(rt_task(p)) && !in_interrupt())
1773 alloc_flags |= ALLOC_HARDER; 1773 alloc_flags |= ALLOC_HARDER;
1774 1774
1775 if (likely(!(gfp_mask & __GFP_NOMEMALLOC))) { 1775 if (likely(!(gfp_mask & __GFP_NOMEMALLOC))) {
@@ -1817,9 +1817,9 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
1817 if (NUMA_BUILD && (gfp_mask & GFP_THISNODE) == GFP_THISNODE) 1817 if (NUMA_BUILD && (gfp_mask & GFP_THISNODE) == GFP_THISNODE)
1818 goto nopage; 1818 goto nopage;
1819 1819
1820restart:
1820 wake_all_kswapd(order, zonelist, high_zoneidx); 1821 wake_all_kswapd(order, zonelist, high_zoneidx);
1821 1822
1822restart:
1823 /* 1823 /*
1824 * OK, we're below the kswapd watermark and have kicked background 1824 * OK, we're below the kswapd watermark and have kicked background
1825 * reclaim. Now things get more complex, so set up alloc_flags according 1825 * reclaim. Now things get more complex, so set up alloc_flags according