aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2008-07-15 18:29:07 -0400
committerIngo Molnar <mingo@elte.hu>2008-07-15 18:29:07 -0400
commit82638844d9a8581bbf33201cc209a14876eca167 (patch)
tree961d7f9360194421a71aa644a9d0c176a960ce49 /mm
parent9982fbface82893e77d211fbabfbd229da6bdde6 (diff)
parent63cf13b77ab785e87c867defa8545e6d4a989774 (diff)
Merge branch 'linus' into cpus4096
Conflicts: arch/x86/xen/smp.c kernel/sched_rt.c net/iucv/iucv.c Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'mm')
-rw-r--r--mm/Kconfig4
-rw-r--r--mm/filemap.c3
-rw-r--r--mm/mprotect.c10
-rw-r--r--mm/page-writeback.c13
-rw-r--r--mm/page_alloc.c96
-rw-r--r--mm/slab.c18
-rw-r--r--mm/slub.c14
7 files changed, 108 insertions, 50 deletions
diff --git a/mm/Kconfig b/mm/Kconfig
index 3aa819d628c1..c4de85285bb4 100644
--- a/mm/Kconfig
+++ b/mm/Kconfig
@@ -129,7 +129,7 @@ config MEMORY_HOTPLUG
129 bool "Allow for memory hot-add" 129 bool "Allow for memory hot-add"
130 depends on SPARSEMEM || X86_64_ACPI_NUMA 130 depends on SPARSEMEM || X86_64_ACPI_NUMA
131 depends on HOTPLUG && !HIBERNATION && ARCH_ENABLE_MEMORY_HOTPLUG 131 depends on HOTPLUG && !HIBERNATION && ARCH_ENABLE_MEMORY_HOTPLUG
132 depends on (IA64 || X86 || PPC64 || SUPERH) 132 depends on (IA64 || X86 || PPC64 || SUPERH || S390)
133 133
134comment "Memory hotplug is currently incompatible with Software Suspend" 134comment "Memory hotplug is currently incompatible with Software Suspend"
135 depends on SPARSEMEM && HOTPLUG && HIBERNATION 135 depends on SPARSEMEM && HOTPLUG && HIBERNATION
@@ -199,7 +199,7 @@ config BOUNCE
199config NR_QUICK 199config NR_QUICK
200 int 200 int
201 depends on QUICKLIST 201 depends on QUICKLIST
202 default "2" if SUPERH 202 default "2" if SUPERH || AVR32
203 default "1" 203 default "1"
204 204
205config VIRT_TO_BUS 205config VIRT_TO_BUS
diff --git a/mm/filemap.c b/mm/filemap.c
index 1e6a7d34874f..65d9d9e2b755 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -236,11 +236,12 @@ int filemap_fdatawrite(struct address_space *mapping)
236} 236}
237EXPORT_SYMBOL(filemap_fdatawrite); 237EXPORT_SYMBOL(filemap_fdatawrite);
238 238
239static int filemap_fdatawrite_range(struct address_space *mapping, loff_t start, 239int filemap_fdatawrite_range(struct address_space *mapping, loff_t start,
240 loff_t end) 240 loff_t end)
241{ 241{
242 return __filemap_fdatawrite_range(mapping, start, end, WB_SYNC_ALL); 242 return __filemap_fdatawrite_range(mapping, start, end, WB_SYNC_ALL);
243} 243}
244EXPORT_SYMBOL(filemap_fdatawrite_range);
244 245
245/** 246/**
246 * filemap_flush - mostly a non-blocking flush 247 * filemap_flush - mostly a non-blocking flush
diff --git a/mm/mprotect.c b/mm/mprotect.c
index a5bf31c27375..acfe7c8d72fc 100644
--- a/mm/mprotect.c
+++ b/mm/mprotect.c
@@ -47,19 +47,17 @@ static void change_pte_range(struct mm_struct *mm, pmd_t *pmd,
47 if (pte_present(oldpte)) { 47 if (pte_present(oldpte)) {
48 pte_t ptent; 48 pte_t ptent;
49 49
50 /* Avoid an SMP race with hardware updated dirty/clean 50 ptent = ptep_modify_prot_start(mm, addr, pte);
51 * bits by wiping the pte and then setting the new pte
52 * into place.
53 */
54 ptent = ptep_get_and_clear(mm, addr, pte);
55 ptent = pte_modify(ptent, newprot); 51 ptent = pte_modify(ptent, newprot);
52
56 /* 53 /*
57 * Avoid taking write faults for pages we know to be 54 * Avoid taking write faults for pages we know to be
58 * dirty. 55 * dirty.
59 */ 56 */
60 if (dirty_accountable && pte_dirty(ptent)) 57 if (dirty_accountable && pte_dirty(ptent))
61 ptent = pte_mkwrite(ptent); 58 ptent = pte_mkwrite(ptent);
62 set_pte_at(mm, addr, pte, ptent); 59
60 ptep_modify_prot_commit(mm, addr, pte, ptent);
63#ifdef CONFIG_MIGRATION 61#ifdef CONFIG_MIGRATION
64 } else if (!pte_file(oldpte)) { 62 } else if (!pte_file(oldpte)) {
65 swp_entry_t entry = pte_to_swp_entry(oldpte); 63 swp_entry_t entry = pte_to_swp_entry(oldpte);
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index 789b6adbef37..94c6d8988ab3 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -126,8 +126,6 @@ static void background_writeout(unsigned long _min_pages);
126static struct prop_descriptor vm_completions; 126static struct prop_descriptor vm_completions;
127static struct prop_descriptor vm_dirties; 127static struct prop_descriptor vm_dirties;
128 128
129static unsigned long determine_dirtyable_memory(void);
130
131/* 129/*
132 * couple the period to the dirty_ratio: 130 * couple the period to the dirty_ratio:
133 * 131 *
@@ -347,7 +345,13 @@ static unsigned long highmem_dirtyable_memory(unsigned long total)
347#endif 345#endif
348} 346}
349 347
350static unsigned long determine_dirtyable_memory(void) 348/**
349 * determine_dirtyable_memory - amount of memory that may be used
350 *
351 * Returns the numebr of pages that can currently be freed and used
352 * by the kernel for direct mappings.
353 */
354unsigned long determine_dirtyable_memory(void)
351{ 355{
352 unsigned long x; 356 unsigned long x;
353 357
@@ -956,6 +960,9 @@ retry:
956 } 960 }
957 if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0)) 961 if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
958 mapping->writeback_index = index; 962 mapping->writeback_index = index;
963
964 if (wbc->range_cont)
965 wbc->range_start = index << PAGE_CACHE_SHIFT;
959 return ret; 966 return ret;
960} 967}
961EXPORT_SYMBOL(write_cache_pages); 968EXPORT_SYMBOL(write_cache_pages);
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index f32fae3121f0..79ac4afc908c 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -918,7 +918,7 @@ void drain_local_pages(void *arg)
918 */ 918 */
919void drain_all_pages(void) 919void drain_all_pages(void)
920{ 920{
921 on_each_cpu(drain_local_pages, NULL, 0, 1); 921 on_each_cpu(drain_local_pages, NULL, 1);
922} 922}
923 923
924#ifdef CONFIG_HIBERNATION 924#ifdef CONFIG_HIBERNATION
@@ -2929,6 +2929,18 @@ void __init free_bootmem_with_active_regions(int nid,
2929 } 2929 }
2930} 2930}
2931 2931
2932void __init work_with_active_regions(int nid, work_fn_t work_fn, void *data)
2933{
2934 int i;
2935 int ret;
2936
2937 for_each_active_range_index_in_nid(i, nid) {
2938 ret = work_fn(early_node_map[i].start_pfn,
2939 early_node_map[i].end_pfn, data);
2940 if (ret)
2941 break;
2942 }
2943}
2932/** 2944/**
2933 * sparse_memory_present_with_active_regions - Call memory_present for each active range 2945 * sparse_memory_present_with_active_regions - Call memory_present for each active range
2934 * @nid: The node to call memory_present for. If MAX_NUMNODES, all nodes will be used. 2946 * @nid: The node to call memory_present for. If MAX_NUMNODES, all nodes will be used.
@@ -3461,6 +3473,11 @@ void __paginginit free_area_init_node(int nid, struct pglist_data *pgdat,
3461 calculate_node_totalpages(pgdat, zones_size, zholes_size); 3473 calculate_node_totalpages(pgdat, zones_size, zholes_size);
3462 3474
3463 alloc_node_mem_map(pgdat); 3475 alloc_node_mem_map(pgdat);
3476#ifdef CONFIG_FLAT_NODE_MEM_MAP
3477 printk(KERN_DEBUG "free_area_init_node: node %d, pgdat %08lx, node_mem_map %08lx\n",
3478 nid, (unsigned long)pgdat,
3479 (unsigned long)pgdat->node_mem_map);
3480#endif
3464 3481
3465 free_area_init_core(pgdat, zones_size, zholes_size); 3482 free_area_init_core(pgdat, zones_size, zholes_size);
3466} 3483}
@@ -3503,7 +3520,7 @@ void __init add_active_range(unsigned int nid, unsigned long start_pfn,
3503{ 3520{
3504 int i; 3521 int i;
3505 3522
3506 printk(KERN_DEBUG "Entering add_active_range(%d, %lu, %lu) " 3523 printk(KERN_DEBUG "Entering add_active_range(%d, %#lx, %#lx) "
3507 "%d entries of %d used\n", 3524 "%d entries of %d used\n",
3508 nid, start_pfn, end_pfn, 3525 nid, start_pfn, end_pfn,
3509 nr_nodemap_entries, MAX_ACTIVE_REGIONS); 3526 nr_nodemap_entries, MAX_ACTIVE_REGIONS);
@@ -3547,27 +3564,68 @@ void __init add_active_range(unsigned int nid, unsigned long start_pfn,
3547} 3564}
3548 3565
3549/** 3566/**
3550 * shrink_active_range - Shrink an existing registered range of PFNs 3567 * remove_active_range - Shrink an existing registered range of PFNs
3551 * @nid: The node id the range is on that should be shrunk 3568 * @nid: The node id the range is on that should be shrunk
3552 * @old_end_pfn: The old end PFN of the range 3569 * @start_pfn: The new PFN of the range
3553 * @new_end_pfn: The new PFN of the range 3570 * @end_pfn: The new PFN of the range
3554 * 3571 *
3555 * i386 with NUMA use alloc_remap() to store a node_mem_map on a local node. 3572 * i386 with NUMA use alloc_remap() to store a node_mem_map on a local node.
3556 * The map is kept at the end physical page range that has already been 3573 * The map is kept near the end physical page range that has already been
3557 * registered with add_active_range(). This function allows an arch to shrink 3574 * registered. This function allows an arch to shrink an existing registered
3558 * an existing registered range. 3575 * range.
3559 */ 3576 */
3560void __init shrink_active_range(unsigned int nid, unsigned long old_end_pfn, 3577void __init remove_active_range(unsigned int nid, unsigned long start_pfn,
3561 unsigned long new_end_pfn) 3578 unsigned long end_pfn)
3562{ 3579{
3563 int i; 3580 int i, j;
3581 int removed = 0;
3582
3583 printk(KERN_DEBUG "remove_active_range (%d, %lu, %lu)\n",
3584 nid, start_pfn, end_pfn);
3564 3585
3565 /* Find the old active region end and shrink */ 3586 /* Find the old active region end and shrink */
3566 for_each_active_range_index_in_nid(i, nid) 3587 for_each_active_range_index_in_nid(i, nid) {
3567 if (early_node_map[i].end_pfn == old_end_pfn) { 3588 if (early_node_map[i].start_pfn >= start_pfn &&
3568 early_node_map[i].end_pfn = new_end_pfn; 3589 early_node_map[i].end_pfn <= end_pfn) {
3569 break; 3590 /* clear it */
3591 early_node_map[i].start_pfn = 0;
3592 early_node_map[i].end_pfn = 0;
3593 removed = 1;
3594 continue;
3595 }
3596 if (early_node_map[i].start_pfn < start_pfn &&
3597 early_node_map[i].end_pfn > start_pfn) {
3598 unsigned long temp_end_pfn = early_node_map[i].end_pfn;
3599 early_node_map[i].end_pfn = start_pfn;
3600 if (temp_end_pfn > end_pfn)
3601 add_active_range(nid, end_pfn, temp_end_pfn);
3602 continue;
3570 } 3603 }
3604 if (early_node_map[i].start_pfn >= start_pfn &&
3605 early_node_map[i].end_pfn > end_pfn &&
3606 early_node_map[i].start_pfn < end_pfn) {
3607 early_node_map[i].start_pfn = end_pfn;
3608 continue;
3609 }
3610 }
3611
3612 if (!removed)
3613 return;
3614
3615 /* remove the blank ones */
3616 for (i = nr_nodemap_entries - 1; i > 0; i--) {
3617 if (early_node_map[i].nid != nid)
3618 continue;
3619 if (early_node_map[i].end_pfn)
3620 continue;
3621 /* we found it, get rid of it */
3622 for (j = i; j < nr_nodemap_entries - 1; j++)
3623 memcpy(&early_node_map[j], &early_node_map[j+1],
3624 sizeof(early_node_map[j]));
3625 j = nr_nodemap_entries - 1;
3626 memset(&early_node_map[j], 0, sizeof(early_node_map[j]));
3627 nr_nodemap_entries--;
3628 }
3571} 3629}
3572 3630
3573/** 3631/**
@@ -3611,7 +3669,7 @@ static void __init sort_node_map(void)
3611} 3669}
3612 3670
3613/* Find the lowest pfn for a node */ 3671/* Find the lowest pfn for a node */
3614unsigned long __init find_min_pfn_for_node(unsigned long nid) 3672unsigned long __init find_min_pfn_for_node(int nid)
3615{ 3673{
3616 int i; 3674 int i;
3617 unsigned long min_pfn = ULONG_MAX; 3675 unsigned long min_pfn = ULONG_MAX;
@@ -3622,7 +3680,7 @@ unsigned long __init find_min_pfn_for_node(unsigned long nid)
3622 3680
3623 if (min_pfn == ULONG_MAX) { 3681 if (min_pfn == ULONG_MAX) {
3624 printk(KERN_WARNING 3682 printk(KERN_WARNING
3625 "Could not find start_pfn for node %lu\n", nid); 3683 "Could not find start_pfn for node %d\n", nid);
3626 return 0; 3684 return 0;
3627 } 3685 }
3628 3686
@@ -3878,7 +3936,7 @@ void __init free_area_init_nodes(unsigned long *max_zone_pfn)
3878 for (i = 0; i < MAX_NR_ZONES; i++) { 3936 for (i = 0; i < MAX_NR_ZONES; i++) {
3879 if (i == ZONE_MOVABLE) 3937 if (i == ZONE_MOVABLE)
3880 continue; 3938 continue;
3881 printk(" %-8s %8lu -> %8lu\n", 3939 printk(" %-8s %0#10lx -> %0#10lx\n",
3882 zone_names[i], 3940 zone_names[i],
3883 arch_zone_lowest_possible_pfn[i], 3941 arch_zone_lowest_possible_pfn[i],
3884 arch_zone_highest_possible_pfn[i]); 3942 arch_zone_highest_possible_pfn[i]);
@@ -3894,7 +3952,7 @@ void __init free_area_init_nodes(unsigned long *max_zone_pfn)
3894 /* Print out the early_node_map[] */ 3952 /* Print out the early_node_map[] */
3895 printk("early_node_map[%d] active PFN ranges\n", nr_nodemap_entries); 3953 printk("early_node_map[%d] active PFN ranges\n", nr_nodemap_entries);
3896 for (i = 0; i < nr_nodemap_entries; i++) 3954 for (i = 0; i < nr_nodemap_entries; i++)
3897 printk(" %3d: %8lu -> %8lu\n", early_node_map[i].nid, 3955 printk(" %3d: %0#10lx -> %0#10lx\n", early_node_map[i].nid,
3898 early_node_map[i].start_pfn, 3956 early_node_map[i].start_pfn,
3899 early_node_map[i].end_pfn); 3957 early_node_map[i].end_pfn);
3900 3958
diff --git a/mm/slab.c b/mm/slab.c
index 046607f05f3e..052e7d64537e 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -1901,15 +1901,7 @@ static void check_poison_obj(struct kmem_cache *cachep, void *objp)
1901#endif 1901#endif
1902 1902
1903#if DEBUG 1903#if DEBUG
1904/** 1904static void slab_destroy_debugcheck(struct kmem_cache *cachep, struct slab *slabp)
1905 * slab_destroy_objs - destroy a slab and its objects
1906 * @cachep: cache pointer being destroyed
1907 * @slabp: slab pointer being destroyed
1908 *
1909 * Call the registered destructor for each object in a slab that is being
1910 * destroyed.
1911 */
1912static void slab_destroy_objs(struct kmem_cache *cachep, struct slab *slabp)
1913{ 1905{
1914 int i; 1906 int i;
1915 for (i = 0; i < cachep->num; i++) { 1907 for (i = 0; i < cachep->num; i++) {
@@ -1938,7 +1930,7 @@ static void slab_destroy_objs(struct kmem_cache *cachep, struct slab *slabp)
1938 } 1930 }
1939} 1931}
1940#else 1932#else
1941static void slab_destroy_objs(struct kmem_cache *cachep, struct slab *slabp) 1933static void slab_destroy_debugcheck(struct kmem_cache *cachep, struct slab *slabp)
1942{ 1934{
1943} 1935}
1944#endif 1936#endif
@@ -1956,7 +1948,7 @@ static void slab_destroy(struct kmem_cache *cachep, struct slab *slabp)
1956{ 1948{
1957 void *addr = slabp->s_mem - slabp->colouroff; 1949 void *addr = slabp->s_mem - slabp->colouroff;
1958 1950
1959 slab_destroy_objs(cachep, slabp); 1951 slab_destroy_debugcheck(cachep, slabp);
1960 if (unlikely(cachep->flags & SLAB_DESTROY_BY_RCU)) { 1952 if (unlikely(cachep->flags & SLAB_DESTROY_BY_RCU)) {
1961 struct slab_rcu *slab_rcu; 1953 struct slab_rcu *slab_rcu;
1962 1954
@@ -2454,7 +2446,7 @@ static void drain_cpu_caches(struct kmem_cache *cachep)
2454 struct kmem_list3 *l3; 2446 struct kmem_list3 *l3;
2455 int node; 2447 int node;
2456 2448
2457 on_each_cpu(do_drain, cachep, 1, 1); 2449 on_each_cpu(do_drain, cachep, 1);
2458 check_irq_on(); 2450 check_irq_on();
2459 for_each_online_node(node) { 2451 for_each_online_node(node) {
2460 l3 = cachep->nodelists[node]; 2452 l3 = cachep->nodelists[node];
@@ -3939,7 +3931,7 @@ static int do_tune_cpucache(struct kmem_cache *cachep, int limit,
3939 } 3931 }
3940 new->cachep = cachep; 3932 new->cachep = cachep;
3941 3933
3942 on_each_cpu(do_ccupdate_local, (void *)new, 1, 1); 3934 on_each_cpu(do_ccupdate_local, (void *)new, 1);
3943 3935
3944 check_irq_on(); 3936 check_irq_on();
3945 cachep->batchcount = batchcount; 3937 cachep->batchcount = batchcount;
diff --git a/mm/slub.c b/mm/slub.c
index 1a427c0ae83b..35ab38a94b46 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -411,7 +411,7 @@ static void set_track(struct kmem_cache *s, void *object,
411 if (addr) { 411 if (addr) {
412 p->addr = addr; 412 p->addr = addr;
413 p->cpu = smp_processor_id(); 413 p->cpu = smp_processor_id();
414 p->pid = current ? current->pid : -1; 414 p->pid = current->pid;
415 p->when = jiffies; 415 p->when = jiffies;
416 } else 416 } else
417 memset(p, 0, sizeof(struct track)); 417 memset(p, 0, sizeof(struct track));
@@ -431,9 +431,8 @@ static void print_track(const char *s, struct track *t)
431 if (!t->addr) 431 if (!t->addr)
432 return; 432 return;
433 433
434 printk(KERN_ERR "INFO: %s in ", s); 434 printk(KERN_ERR "INFO: %s in %pS age=%lu cpu=%u pid=%d\n",
435 __print_symbol("%s", (unsigned long)t->addr); 435 s, t->addr, jiffies - t->when, t->cpu, t->pid);
436 printk(" age=%lu cpu=%u pid=%d\n", jiffies - t->when, t->cpu, t->pid);
437} 436}
438 437
439static void print_tracking(struct kmem_cache *s, void *object) 438static void print_tracking(struct kmem_cache *s, void *object)
@@ -1497,7 +1496,7 @@ static void flush_cpu_slab(void *d)
1497static void flush_all(struct kmem_cache *s) 1496static void flush_all(struct kmem_cache *s)
1498{ 1497{
1499#ifdef CONFIG_SMP 1498#ifdef CONFIG_SMP
1500 on_each_cpu(flush_cpu_slab, s, 1, 1); 1499 on_each_cpu(flush_cpu_slab, s, 1);
1501#else 1500#else
1502 unsigned long flags; 1501 unsigned long flags;
1503 1502
@@ -1628,9 +1627,11 @@ static __always_inline void *slab_alloc(struct kmem_cache *s,
1628 void **object; 1627 void **object;
1629 struct kmem_cache_cpu *c; 1628 struct kmem_cache_cpu *c;
1630 unsigned long flags; 1629 unsigned long flags;
1630 unsigned int objsize;
1631 1631
1632 local_irq_save(flags); 1632 local_irq_save(flags);
1633 c = get_cpu_slab(s, smp_processor_id()); 1633 c = get_cpu_slab(s, smp_processor_id());
1634 objsize = c->objsize;
1634 if (unlikely(!c->freelist || !node_match(c, node))) 1635 if (unlikely(!c->freelist || !node_match(c, node)))
1635 1636
1636 object = __slab_alloc(s, gfpflags, node, addr, c); 1637 object = __slab_alloc(s, gfpflags, node, addr, c);
@@ -1643,7 +1644,7 @@ static __always_inline void *slab_alloc(struct kmem_cache *s,
1643 local_irq_restore(flags); 1644 local_irq_restore(flags);
1644 1645
1645 if (unlikely((gfpflags & __GFP_ZERO) && object)) 1646 if (unlikely((gfpflags & __GFP_ZERO) && object))
1646 memset(object, 0, c->objsize); 1647 memset(object, 0, objsize);
1647 1648
1648 return object; 1649 return object;
1649} 1650}
@@ -2765,6 +2766,7 @@ void kfree(const void *x)
2765 2766
2766 page = virt_to_head_page(x); 2767 page = virt_to_head_page(x);
2767 if (unlikely(!PageSlab(page))) { 2768 if (unlikely(!PageSlab(page))) {
2769 BUG_ON(!PageCompound(page));
2768 put_page(page); 2770 put_page(page);
2769 return; 2771 return;
2770 } 2772 }