aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorBenjamin Herrenschmidt <benh@kernel.crashing.org>2008-07-15 01:44:51 -0400
committerBenjamin Herrenschmidt <benh@kernel.crashing.org>2008-07-15 01:44:51 -0400
commit43d2548bb2ef7e6d753f91468a746784041e522d (patch)
tree77d13fcd48fd998393abb825ec36e2b732684a73 /mm
parent585583d95c5660973bc0cf64add517b040acd8a4 (diff)
parent85082fd7cbe3173198aac0eb5e85ab1edcc6352c (diff)
Merge commit '85082fd7cbe3173198aac0eb5e85ab1edcc6352c' into test-build
Manual fixup of: arch/powerpc/Kconfig
Diffstat (limited to 'mm')
-rw-r--r--mm/Kconfig4
-rw-r--r--mm/mprotect.c10
-rw-r--r--mm/page-writeback.c10
-rw-r--r--mm/page_alloc.c94
-rw-r--r--mm/slub.c5
5 files changed, 91 insertions, 32 deletions
diff --git a/mm/Kconfig b/mm/Kconfig
index 3aa819d628c1..c4de85285bb4 100644
--- a/mm/Kconfig
+++ b/mm/Kconfig
@@ -129,7 +129,7 @@ config MEMORY_HOTPLUG
129 bool "Allow for memory hot-add" 129 bool "Allow for memory hot-add"
130 depends on SPARSEMEM || X86_64_ACPI_NUMA 130 depends on SPARSEMEM || X86_64_ACPI_NUMA
131 depends on HOTPLUG && !HIBERNATION && ARCH_ENABLE_MEMORY_HOTPLUG 131 depends on HOTPLUG && !HIBERNATION && ARCH_ENABLE_MEMORY_HOTPLUG
132 depends on (IA64 || X86 || PPC64 || SUPERH) 132 depends on (IA64 || X86 || PPC64 || SUPERH || S390)
133 133
134comment "Memory hotplug is currently incompatible with Software Suspend" 134comment "Memory hotplug is currently incompatible with Software Suspend"
135 depends on SPARSEMEM && HOTPLUG && HIBERNATION 135 depends on SPARSEMEM && HOTPLUG && HIBERNATION
@@ -199,7 +199,7 @@ config BOUNCE
199config NR_QUICK 199config NR_QUICK
200 int 200 int
201 depends on QUICKLIST 201 depends on QUICKLIST
202 default "2" if SUPERH 202 default "2" if SUPERH || AVR32
203 default "1" 203 default "1"
204 204
205config VIRT_TO_BUS 205config VIRT_TO_BUS
diff --git a/mm/mprotect.c b/mm/mprotect.c
index ecfaa5844b5f..360d9cc8b38c 100644
--- a/mm/mprotect.c
+++ b/mm/mprotect.c
@@ -47,19 +47,17 @@ static void change_pte_range(struct mm_struct *mm, pmd_t *pmd,
47 if (pte_present(oldpte)) { 47 if (pte_present(oldpte)) {
48 pte_t ptent; 48 pte_t ptent;
49 49
50 /* Avoid an SMP race with hardware updated dirty/clean 50 ptent = ptep_modify_prot_start(mm, addr, pte);
51 * bits by wiping the pte and then setting the new pte
52 * into place.
53 */
54 ptent = ptep_get_and_clear(mm, addr, pte);
55 ptent = pte_modify(ptent, newprot); 51 ptent = pte_modify(ptent, newprot);
52
56 /* 53 /*
57 * Avoid taking write faults for pages we know to be 54 * Avoid taking write faults for pages we know to be
58 * dirty. 55 * dirty.
59 */ 56 */
60 if (dirty_accountable && pte_dirty(ptent)) 57 if (dirty_accountable && pte_dirty(ptent))
61 ptent = pte_mkwrite(ptent); 58 ptent = pte_mkwrite(ptent);
62 set_pte_at(mm, addr, pte, ptent); 59
60 ptep_modify_prot_commit(mm, addr, pte, ptent);
63#ifdef CONFIG_MIGRATION 61#ifdef CONFIG_MIGRATION
64 } else if (!pte_file(oldpte)) { 62 } else if (!pte_file(oldpte)) {
65 swp_entry_t entry = pte_to_swp_entry(oldpte); 63 swp_entry_t entry = pte_to_swp_entry(oldpte);
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index 789b6adbef37..b38f700825fc 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -126,8 +126,6 @@ static void background_writeout(unsigned long _min_pages);
126static struct prop_descriptor vm_completions; 126static struct prop_descriptor vm_completions;
127static struct prop_descriptor vm_dirties; 127static struct prop_descriptor vm_dirties;
128 128
129static unsigned long determine_dirtyable_memory(void);
130
131/* 129/*
132 * couple the period to the dirty_ratio: 130 * couple the period to the dirty_ratio:
133 * 131 *
@@ -347,7 +345,13 @@ static unsigned long highmem_dirtyable_memory(unsigned long total)
347#endif 345#endif
348} 346}
349 347
350static unsigned long determine_dirtyable_memory(void) 348/**
349 * determine_dirtyable_memory - amount of memory that may be used
350 *
351 * Returns the numebr of pages that can currently be freed and used
352 * by the kernel for direct mappings.
353 */
354unsigned long determine_dirtyable_memory(void)
351{ 355{
352 unsigned long x; 356 unsigned long x;
353 357
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index f32fae3121f0..f024b9b3a2a6 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -2929,6 +2929,18 @@ void __init free_bootmem_with_active_regions(int nid,
2929 } 2929 }
2930} 2930}
2931 2931
2932void __init work_with_active_regions(int nid, work_fn_t work_fn, void *data)
2933{
2934 int i;
2935 int ret;
2936
2937 for_each_active_range_index_in_nid(i, nid) {
2938 ret = work_fn(early_node_map[i].start_pfn,
2939 early_node_map[i].end_pfn, data);
2940 if (ret)
2941 break;
2942 }
2943}
2932/** 2944/**
2933 * sparse_memory_present_with_active_regions - Call memory_present for each active range 2945 * sparse_memory_present_with_active_regions - Call memory_present for each active range
2934 * @nid: The node to call memory_present for. If MAX_NUMNODES, all nodes will be used. 2946 * @nid: The node to call memory_present for. If MAX_NUMNODES, all nodes will be used.
@@ -3461,6 +3473,11 @@ void __paginginit free_area_init_node(int nid, struct pglist_data *pgdat,
3461 calculate_node_totalpages(pgdat, zones_size, zholes_size); 3473 calculate_node_totalpages(pgdat, zones_size, zholes_size);
3462 3474
3463 alloc_node_mem_map(pgdat); 3475 alloc_node_mem_map(pgdat);
3476#ifdef CONFIG_FLAT_NODE_MEM_MAP
3477 printk(KERN_DEBUG "free_area_init_node: node %d, pgdat %08lx, node_mem_map %08lx\n",
3478 nid, (unsigned long)pgdat,
3479 (unsigned long)pgdat->node_mem_map);
3480#endif
3464 3481
3465 free_area_init_core(pgdat, zones_size, zholes_size); 3482 free_area_init_core(pgdat, zones_size, zholes_size);
3466} 3483}
@@ -3503,7 +3520,7 @@ void __init add_active_range(unsigned int nid, unsigned long start_pfn,
3503{ 3520{
3504 int i; 3521 int i;
3505 3522
3506 printk(KERN_DEBUG "Entering add_active_range(%d, %lu, %lu) " 3523 printk(KERN_DEBUG "Entering add_active_range(%d, %#lx, %#lx) "
3507 "%d entries of %d used\n", 3524 "%d entries of %d used\n",
3508 nid, start_pfn, end_pfn, 3525 nid, start_pfn, end_pfn,
3509 nr_nodemap_entries, MAX_ACTIVE_REGIONS); 3526 nr_nodemap_entries, MAX_ACTIVE_REGIONS);
@@ -3547,27 +3564,68 @@ void __init add_active_range(unsigned int nid, unsigned long start_pfn,
3547} 3564}
3548 3565
3549/** 3566/**
3550 * shrink_active_range - Shrink an existing registered range of PFNs 3567 * remove_active_range - Shrink an existing registered range of PFNs
3551 * @nid: The node id the range is on that should be shrunk 3568 * @nid: The node id the range is on that should be shrunk
3552 * @old_end_pfn: The old end PFN of the range 3569 * @start_pfn: The new PFN of the range
3553 * @new_end_pfn: The new PFN of the range 3570 * @end_pfn: The new PFN of the range
3554 * 3571 *
3555 * i386 with NUMA use alloc_remap() to store a node_mem_map on a local node. 3572 * i386 with NUMA use alloc_remap() to store a node_mem_map on a local node.
3556 * The map is kept at the end physical page range that has already been 3573 * The map is kept near the end physical page range that has already been
3557 * registered with add_active_range(). This function allows an arch to shrink 3574 * registered. This function allows an arch to shrink an existing registered
3558 * an existing registered range. 3575 * range.
3559 */ 3576 */
3560void __init shrink_active_range(unsigned int nid, unsigned long old_end_pfn, 3577void __init remove_active_range(unsigned int nid, unsigned long start_pfn,
3561 unsigned long new_end_pfn) 3578 unsigned long end_pfn)
3562{ 3579{
3563 int i; 3580 int i, j;
3581 int removed = 0;
3582
3583 printk(KERN_DEBUG "remove_active_range (%d, %lu, %lu)\n",
3584 nid, start_pfn, end_pfn);
3564 3585
3565 /* Find the old active region end and shrink */ 3586 /* Find the old active region end and shrink */
3566 for_each_active_range_index_in_nid(i, nid) 3587 for_each_active_range_index_in_nid(i, nid) {
3567 if (early_node_map[i].end_pfn == old_end_pfn) { 3588 if (early_node_map[i].start_pfn >= start_pfn &&
3568 early_node_map[i].end_pfn = new_end_pfn; 3589 early_node_map[i].end_pfn <= end_pfn) {
3569 break; 3590 /* clear it */
3591 early_node_map[i].start_pfn = 0;
3592 early_node_map[i].end_pfn = 0;
3593 removed = 1;
3594 continue;
3595 }
3596 if (early_node_map[i].start_pfn < start_pfn &&
3597 early_node_map[i].end_pfn > start_pfn) {
3598 unsigned long temp_end_pfn = early_node_map[i].end_pfn;
3599 early_node_map[i].end_pfn = start_pfn;
3600 if (temp_end_pfn > end_pfn)
3601 add_active_range(nid, end_pfn, temp_end_pfn);
3602 continue;
3570 } 3603 }
3604 if (early_node_map[i].start_pfn >= start_pfn &&
3605 early_node_map[i].end_pfn > end_pfn &&
3606 early_node_map[i].start_pfn < end_pfn) {
3607 early_node_map[i].start_pfn = end_pfn;
3608 continue;
3609 }
3610 }
3611
3612 if (!removed)
3613 return;
3614
3615 /* remove the blank ones */
3616 for (i = nr_nodemap_entries - 1; i > 0; i--) {
3617 if (early_node_map[i].nid != nid)
3618 continue;
3619 if (early_node_map[i].end_pfn)
3620 continue;
3621 /* we found it, get rid of it */
3622 for (j = i; j < nr_nodemap_entries - 1; j++)
3623 memcpy(&early_node_map[j], &early_node_map[j+1],
3624 sizeof(early_node_map[j]));
3625 j = nr_nodemap_entries - 1;
3626 memset(&early_node_map[j], 0, sizeof(early_node_map[j]));
3627 nr_nodemap_entries--;
3628 }
3571} 3629}
3572 3630
3573/** 3631/**
@@ -3611,7 +3669,7 @@ static void __init sort_node_map(void)
3611} 3669}
3612 3670
3613/* Find the lowest pfn for a node */ 3671/* Find the lowest pfn for a node */
3614unsigned long __init find_min_pfn_for_node(unsigned long nid) 3672unsigned long __init find_min_pfn_for_node(int nid)
3615{ 3673{
3616 int i; 3674 int i;
3617 unsigned long min_pfn = ULONG_MAX; 3675 unsigned long min_pfn = ULONG_MAX;
@@ -3622,7 +3680,7 @@ unsigned long __init find_min_pfn_for_node(unsigned long nid)
3622 3680
3623 if (min_pfn == ULONG_MAX) { 3681 if (min_pfn == ULONG_MAX) {
3624 printk(KERN_WARNING 3682 printk(KERN_WARNING
3625 "Could not find start_pfn for node %lu\n", nid); 3683 "Could not find start_pfn for node %d\n", nid);
3626 return 0; 3684 return 0;
3627 } 3685 }
3628 3686
@@ -3878,7 +3936,7 @@ void __init free_area_init_nodes(unsigned long *max_zone_pfn)
3878 for (i = 0; i < MAX_NR_ZONES; i++) { 3936 for (i = 0; i < MAX_NR_ZONES; i++) {
3879 if (i == ZONE_MOVABLE) 3937 if (i == ZONE_MOVABLE)
3880 continue; 3938 continue;
3881 printk(" %-8s %8lu -> %8lu\n", 3939 printk(" %-8s %0#10lx -> %0#10lx\n",
3882 zone_names[i], 3940 zone_names[i],
3883 arch_zone_lowest_possible_pfn[i], 3941 arch_zone_lowest_possible_pfn[i],
3884 arch_zone_highest_possible_pfn[i]); 3942 arch_zone_highest_possible_pfn[i]);
@@ -3894,7 +3952,7 @@ void __init free_area_init_nodes(unsigned long *max_zone_pfn)
3894 /* Print out the early_node_map[] */ 3952 /* Print out the early_node_map[] */
3895 printk("early_node_map[%d] active PFN ranges\n", nr_nodemap_entries); 3953 printk("early_node_map[%d] active PFN ranges\n", nr_nodemap_entries);
3896 for (i = 0; i < nr_nodemap_entries; i++) 3954 for (i = 0; i < nr_nodemap_entries; i++)
3897 printk(" %3d: %8lu -> %8lu\n", early_node_map[i].nid, 3955 printk(" %3d: %0#10lx -> %0#10lx\n", early_node_map[i].nid,
3898 early_node_map[i].start_pfn, 3956 early_node_map[i].start_pfn,
3899 early_node_map[i].end_pfn); 3957 early_node_map[i].end_pfn);
3900 3958
diff --git a/mm/slub.c b/mm/slub.c
index 315c392253c7..5f6e2c4a2ba7 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -431,9 +431,8 @@ static void print_track(const char *s, struct track *t)
431 if (!t->addr) 431 if (!t->addr)
432 return; 432 return;
433 433
434 printk(KERN_ERR "INFO: %s in ", s); 434 printk(KERN_ERR "INFO: %s in %pS age=%lu cpu=%u pid=%d\n",
435 __print_symbol("%s", (unsigned long)t->addr); 435 s, t->addr, jiffies - t->when, t->cpu, t->pid);
436 printk(" age=%lu cpu=%u pid=%d\n", jiffies - t->when, t->cpu, t->pid);
437} 436}
438 437
439static void print_tracking(struct kmem_cache *s, void *object) 438static void print_tracking(struct kmem_cache *s, void *object)