aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorPaul Mackerras <paulus@samba.org>2008-04-14 07:11:02 -0400
committerPaul Mackerras <paulus@samba.org>2008-04-14 07:11:02 -0400
commitac7c5353b189e10cf5dd27399f64f7b013abffc6 (patch)
tree8222d92b774c256d6ec4399c716d76b3f05ddc4b /mm
parenta8f75ea70c58546205fb7673be41455b9da5d9a7 (diff)
parent120dd64cacd4fb796bca0acba3665553f1d9ecaa (diff)
Merge branch 'linux-2.6'
Diffstat (limited to 'mm')
-rw-r--r--mm/hugetlb.c17
-rw-r--r--mm/memcontrol.c26
-rw-r--r--mm/slab.c4
-rw-r--r--mm/slub.c5
-rw-r--r--mm/sparse-vmemmap.c8
-rw-r--r--mm/tiny-shmem.c2
6 files changed, 48 insertions, 14 deletions
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 74c1b6b0b37b..51c9e2c01640 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -401,12 +401,20 @@ static void return_unused_surplus_pages(unsigned long unused_resv_pages)
401 struct page *page; 401 struct page *page;
402 unsigned long nr_pages; 402 unsigned long nr_pages;
403 403
404 /*
405 * We want to release as many surplus pages as possible, spread
406 * evenly across all nodes. Iterate across all nodes until we
407 * can no longer free unreserved surplus pages. This occurs when
408 * the nodes with surplus pages have no free pages.
409 */
410 unsigned long remaining_iterations = num_online_nodes();
411
404 /* Uncommit the reservation */ 412 /* Uncommit the reservation */
405 resv_huge_pages -= unused_resv_pages; 413 resv_huge_pages -= unused_resv_pages;
406 414
407 nr_pages = min(unused_resv_pages, surplus_huge_pages); 415 nr_pages = min(unused_resv_pages, surplus_huge_pages);
408 416
409 while (nr_pages) { 417 while (remaining_iterations-- && nr_pages) {
410 nid = next_node(nid, node_online_map); 418 nid = next_node(nid, node_online_map);
411 if (nid == MAX_NUMNODES) 419 if (nid == MAX_NUMNODES)
412 nid = first_node(node_online_map); 420 nid = first_node(node_online_map);
@@ -424,6 +432,7 @@ static void return_unused_surplus_pages(unsigned long unused_resv_pages)
424 surplus_huge_pages--; 432 surplus_huge_pages--;
425 surplus_huge_pages_node[nid]--; 433 surplus_huge_pages_node[nid]--;
426 nr_pages--; 434 nr_pages--;
435 remaining_iterations = num_online_nodes();
427 } 436 }
428 } 437 }
429} 438}
@@ -671,9 +680,11 @@ int hugetlb_report_node_meminfo(int nid, char *buf)
671{ 680{
672 return sprintf(buf, 681 return sprintf(buf,
673 "Node %d HugePages_Total: %5u\n" 682 "Node %d HugePages_Total: %5u\n"
674 "Node %d HugePages_Free: %5u\n", 683 "Node %d HugePages_Free: %5u\n"
684 "Node %d HugePages_Surp: %5u\n",
675 nid, nr_huge_pages_node[nid], 685 nid, nr_huge_pages_node[nid],
676 nid, free_huge_pages_node[nid]); 686 nid, free_huge_pages_node[nid],
687 nid, surplus_huge_pages_node[nid]);
677} 688}
678 689
679/* Return the number pages of memory we physically have, in PAGE_SIZE units. */ 690/* Return the number pages of memory we physically have, in PAGE_SIZE units. */
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 9b648bd63451..2e0bfc93484b 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -533,6 +533,9 @@ static int mem_cgroup_charge_common(struct page *page, struct mm_struct *mm,
533 unsigned long nr_retries = MEM_CGROUP_RECLAIM_RETRIES; 533 unsigned long nr_retries = MEM_CGROUP_RECLAIM_RETRIES;
534 struct mem_cgroup_per_zone *mz; 534 struct mem_cgroup_per_zone *mz;
535 535
536 if (mem_cgroup_subsys.disabled)
537 return 0;
538
536 /* 539 /*
537 * Should page_cgroup's go to their own slab? 540 * Should page_cgroup's go to their own slab?
538 * One could optimize the performance of the charging routine 541 * One could optimize the performance of the charging routine
@@ -665,6 +668,9 @@ void mem_cgroup_uncharge_page(struct page *page)
665 struct mem_cgroup_per_zone *mz; 668 struct mem_cgroup_per_zone *mz;
666 unsigned long flags; 669 unsigned long flags;
667 670
671 if (mem_cgroup_subsys.disabled)
672 return;
673
668 /* 674 /*
669 * Check if our page_cgroup is valid 675 * Check if our page_cgroup is valid
670 */ 676 */
@@ -705,6 +711,9 @@ int mem_cgroup_prepare_migration(struct page *page)
705{ 711{
706 struct page_cgroup *pc; 712 struct page_cgroup *pc;
707 713
714 if (mem_cgroup_subsys.disabled)
715 return 0;
716
708 lock_page_cgroup(page); 717 lock_page_cgroup(page);
709 pc = page_get_page_cgroup(page); 718 pc = page_get_page_cgroup(page);
710 if (pc) 719 if (pc)
@@ -803,6 +812,9 @@ static int mem_cgroup_force_empty(struct mem_cgroup *mem)
803 int ret = -EBUSY; 812 int ret = -EBUSY;
804 int node, zid; 813 int node, zid;
805 814
815 if (mem_cgroup_subsys.disabled)
816 return 0;
817
806 css_get(&mem->css); 818 css_get(&mem->css);
807 /* 819 /*
808 * page reclaim code (kswapd etc..) will move pages between 820 * page reclaim code (kswapd etc..) will move pages between
@@ -966,7 +978,7 @@ static int alloc_mem_cgroup_per_zone_info(struct mem_cgroup *mem, int node)
966{ 978{
967 struct mem_cgroup_per_node *pn; 979 struct mem_cgroup_per_node *pn;
968 struct mem_cgroup_per_zone *mz; 980 struct mem_cgroup_per_zone *mz;
969 int zone; 981 int zone, tmp = node;
970 /* 982 /*
971 * This routine is called against possible nodes. 983 * This routine is called against possible nodes.
972 * But it's BUG to call kmalloc() against offline node. 984 * But it's BUG to call kmalloc() against offline node.
@@ -975,10 +987,9 @@ static int alloc_mem_cgroup_per_zone_info(struct mem_cgroup *mem, int node)
975 * never be onlined. It's better to use memory hotplug callback 987 * never be onlined. It's better to use memory hotplug callback
976 * function. 988 * function.
977 */ 989 */
978 if (node_state(node, N_HIGH_MEMORY)) 990 if (!node_state(node, N_NORMAL_MEMORY))
979 pn = kmalloc_node(sizeof(*pn), GFP_KERNEL, node); 991 tmp = -1;
980 else 992 pn = kmalloc_node(sizeof(*pn), GFP_KERNEL, tmp);
981 pn = kmalloc(sizeof(*pn), GFP_KERNEL);
982 if (!pn) 993 if (!pn)
983 return 1; 994 return 1;
984 995
@@ -1053,6 +1064,8 @@ static void mem_cgroup_destroy(struct cgroup_subsys *ss,
1053static int mem_cgroup_populate(struct cgroup_subsys *ss, 1064static int mem_cgroup_populate(struct cgroup_subsys *ss,
1054 struct cgroup *cont) 1065 struct cgroup *cont)
1055{ 1066{
1067 if (mem_cgroup_subsys.disabled)
1068 return 0;
1056 return cgroup_add_files(cont, ss, mem_cgroup_files, 1069 return cgroup_add_files(cont, ss, mem_cgroup_files,
1057 ARRAY_SIZE(mem_cgroup_files)); 1070 ARRAY_SIZE(mem_cgroup_files));
1058} 1071}
@@ -1065,6 +1078,9 @@ static void mem_cgroup_move_task(struct cgroup_subsys *ss,
1065 struct mm_struct *mm; 1078 struct mm_struct *mm;
1066 struct mem_cgroup *mem, *old_mem; 1079 struct mem_cgroup *mem, *old_mem;
1067 1080
1081 if (mem_cgroup_subsys.disabled)
1082 return;
1083
1068 mm = get_task_mm(p); 1084 mm = get_task_mm(p);
1069 if (mm == NULL) 1085 if (mm == NULL)
1070 return; 1086 return;
diff --git a/mm/slab.c b/mm/slab.c
index bb4070e1079f..04b308c3bc54 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -1481,7 +1481,7 @@ void __init kmem_cache_init(void)
1481 list_add(&cache_cache.next, &cache_chain); 1481 list_add(&cache_cache.next, &cache_chain);
1482 cache_cache.colour_off = cache_line_size(); 1482 cache_cache.colour_off = cache_line_size();
1483 cache_cache.array[smp_processor_id()] = &initarray_cache.cache; 1483 cache_cache.array[smp_processor_id()] = &initarray_cache.cache;
1484 cache_cache.nodelists[node] = &initkmem_list3[CACHE_CACHE]; 1484 cache_cache.nodelists[node] = &initkmem_list3[CACHE_CACHE + node];
1485 1485
1486 /* 1486 /*
1487 * struct kmem_cache size depends on nr_node_ids, which 1487 * struct kmem_cache size depends on nr_node_ids, which
@@ -1602,7 +1602,7 @@ void __init kmem_cache_init(void)
1602 int nid; 1602 int nid;
1603 1603
1604 for_each_online_node(nid) { 1604 for_each_online_node(nid) {
1605 init_list(&cache_cache, &initkmem_list3[CACHE_CACHE], nid); 1605 init_list(&cache_cache, &initkmem_list3[CACHE_CACHE + nid], nid);
1606 1606
1607 init_list(malloc_sizes[INDEX_AC].cs_cachep, 1607 init_list(malloc_sizes[INDEX_AC].cs_cachep,
1608 &initkmem_list3[SIZE_AC + nid], nid); 1608 &initkmem_list3[SIZE_AC + nid], nid);
diff --git a/mm/slub.c b/mm/slub.c
index ca71d5b81e4a..acc975fcc8cc 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -1470,6 +1470,9 @@ static void *__slab_alloc(struct kmem_cache *s,
1470 void **object; 1470 void **object;
1471 struct page *new; 1471 struct page *new;
1472 1472
1473 /* We handle __GFP_ZERO in the caller */
1474 gfpflags &= ~__GFP_ZERO;
1475
1473 if (!c->page) 1476 if (!c->page)
1474 goto new_slab; 1477 goto new_slab;
1475 1478
@@ -2685,6 +2688,7 @@ void kfree(const void *x)
2685} 2688}
2686EXPORT_SYMBOL(kfree); 2689EXPORT_SYMBOL(kfree);
2687 2690
2691#if defined(CONFIG_SLUB_DEBUG) || defined(CONFIG_SLABINFO)
2688static unsigned long count_partial(struct kmem_cache_node *n) 2692static unsigned long count_partial(struct kmem_cache_node *n)
2689{ 2693{
2690 unsigned long flags; 2694 unsigned long flags;
@@ -2697,6 +2701,7 @@ static unsigned long count_partial(struct kmem_cache_node *n)
2697 spin_unlock_irqrestore(&n->list_lock, flags); 2701 spin_unlock_irqrestore(&n->list_lock, flags);
2698 return x; 2702 return x;
2699} 2703}
2704#endif
2700 2705
2701/* 2706/*
2702 * kmem_cache_shrink removes empty slabs from the partial lists and sorts 2707 * kmem_cache_shrink removes empty slabs from the partial lists and sorts
diff --git a/mm/sparse-vmemmap.c b/mm/sparse-vmemmap.c
index cd75b21dd4c3..99c4f36eb8a3 100644
--- a/mm/sparse-vmemmap.c
+++ b/mm/sparse-vmemmap.c
@@ -76,7 +76,7 @@ pte_t * __meminit vmemmap_pte_populate(pmd_t *pmd, unsigned long addr, int node)
76 pte_t entry; 76 pte_t entry;
77 void *p = vmemmap_alloc_block(PAGE_SIZE, node); 77 void *p = vmemmap_alloc_block(PAGE_SIZE, node);
78 if (!p) 78 if (!p)
79 return 0; 79 return NULL;
80 entry = pfn_pte(__pa(p) >> PAGE_SHIFT, PAGE_KERNEL); 80 entry = pfn_pte(__pa(p) >> PAGE_SHIFT, PAGE_KERNEL);
81 set_pte_at(&init_mm, addr, pte, entry); 81 set_pte_at(&init_mm, addr, pte, entry);
82 } 82 }
@@ -89,7 +89,7 @@ pmd_t * __meminit vmemmap_pmd_populate(pud_t *pud, unsigned long addr, int node)
89 if (pmd_none(*pmd)) { 89 if (pmd_none(*pmd)) {
90 void *p = vmemmap_alloc_block(PAGE_SIZE, node); 90 void *p = vmemmap_alloc_block(PAGE_SIZE, node);
91 if (!p) 91 if (!p)
92 return 0; 92 return NULL;
93 pmd_populate_kernel(&init_mm, pmd, p); 93 pmd_populate_kernel(&init_mm, pmd, p);
94 } 94 }
95 return pmd; 95 return pmd;
@@ -101,7 +101,7 @@ pud_t * __meminit vmemmap_pud_populate(pgd_t *pgd, unsigned long addr, int node)
101 if (pud_none(*pud)) { 101 if (pud_none(*pud)) {
102 void *p = vmemmap_alloc_block(PAGE_SIZE, node); 102 void *p = vmemmap_alloc_block(PAGE_SIZE, node);
103 if (!p) 103 if (!p)
104 return 0; 104 return NULL;
105 pud_populate(&init_mm, pud, p); 105 pud_populate(&init_mm, pud, p);
106 } 106 }
107 return pud; 107 return pud;
@@ -113,7 +113,7 @@ pgd_t * __meminit vmemmap_pgd_populate(unsigned long addr, int node)
113 if (pgd_none(*pgd)) { 113 if (pgd_none(*pgd)) {
114 void *p = vmemmap_alloc_block(PAGE_SIZE, node); 114 void *p = vmemmap_alloc_block(PAGE_SIZE, node);
115 if (!p) 115 if (!p)
116 return 0; 116 return NULL;
117 pgd_populate(&init_mm, pgd, p); 117 pgd_populate(&init_mm, pgd, p);
118 } 118 }
119 return pgd; 119 return pgd;
diff --git a/mm/tiny-shmem.c b/mm/tiny-shmem.c
index f0f55875dd6a..ae532f501943 100644
--- a/mm/tiny-shmem.c
+++ b/mm/tiny-shmem.c
@@ -88,6 +88,8 @@ struct file *shmem_file_setup(char *name, loff_t size, unsigned long flags)
88 88
89close_file: 89close_file:
90 put_filp(file); 90 put_filp(file);
91 return ERR_PTR(error);
92
91put_dentry: 93put_dentry:
92 dput(dentry); 94 dput(dentry);
93put_memory: 95put_memory: