aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorRussell King <rmk@dyn-67.arm.linux.org.uk>2009-05-29 05:04:24 -0400
committerRussell King <rmk+kernel@arm.linux.org.uk>2009-05-29 05:04:24 -0400
commit42f1d2e06a25628ae4ceaadccc4fa67c7787e2b4 (patch)
tree8aac83e399de7ad13748a59853a0feea9ed2c5cb /mm
parent9af5324a070e0b2dcb6e22c89e17424eba245fcc (diff)
parent77bbca138c64cb80259732db6f70e1668123f2a7 (diff)
Merge branch 'for-next' of git://git.kernel.org/pub/scm/linux/kernel/git/khilman/linux-davinci into devel
Diffstat (limited to 'mm')
-rw-r--r--mm/mmzone.c15
-rw-r--r--mm/page-writeback.c6
-rw-r--r--mm/rmap.c2
-rw-r--r--mm/slob.c5
-rw-r--r--mm/slub.c6
-rw-r--r--mm/vmstat.c19
6 files changed, 32 insertions, 21 deletions
diff --git a/mm/mmzone.c b/mm/mmzone.c
index 16ce8b955dcf..f5b7d1760213 100644
--- a/mm/mmzone.c
+++ b/mm/mmzone.c
@@ -6,6 +6,7 @@
6 6
7 7
8#include <linux/stddef.h> 8#include <linux/stddef.h>
9#include <linux/mm.h>
9#include <linux/mmzone.h> 10#include <linux/mmzone.h>
10#include <linux/module.h> 11#include <linux/module.h>
11 12
@@ -72,3 +73,17 @@ struct zoneref *next_zones_zonelist(struct zoneref *z,
72 *zone = zonelist_zone(z); 73 *zone = zonelist_zone(z);
73 return z; 74 return z;
74} 75}
76
77#ifdef CONFIG_ARCH_HAS_HOLES_MEMORYMODEL
78int memmap_valid_within(unsigned long pfn,
79 struct page *page, struct zone *zone)
80{
81 if (page_to_pfn(page) != pfn)
82 return 0;
83
84 if (page_zone(page) != zone)
85 return 0;
86
87 return 1;
88}
89#endif /* CONFIG_ARCH_HAS_HOLES_MEMORYMODEL */
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index 30351f0063ac..bb553c3e955d 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -94,12 +94,12 @@ unsigned long vm_dirty_bytes;
94/* 94/*
95 * The interval between `kupdate'-style writebacks 95 * The interval between `kupdate'-style writebacks
96 */ 96 */
97unsigned int dirty_writeback_interval = 5 * 100; /* sentiseconds */ 97unsigned int dirty_writeback_interval = 5 * 100; /* centiseconds */
98 98
99/* 99/*
100 * The longest time for which data is allowed to remain dirty 100 * The longest time for which data is allowed to remain dirty
101 */ 101 */
102unsigned int dirty_expire_interval = 30 * 100; /* sentiseconds */ 102unsigned int dirty_expire_interval = 30 * 100; /* centiseconds */
103 103
104/* 104/*
105 * Flag that makes the machine dump writes/reads and block dirtyings. 105 * Flag that makes the machine dump writes/reads and block dirtyings.
@@ -770,7 +770,7 @@ static void wb_kupdate(unsigned long arg)
770 770
771 sync_supers(); 771 sync_supers();
772 772
773 oldest_jif = jiffies - msecs_to_jiffies(dirty_expire_interval); 773 oldest_jif = jiffies - msecs_to_jiffies(dirty_expire_interval * 10);
774 start_jif = jiffies; 774 start_jif = jiffies;
775 next_jif = start_jif + msecs_to_jiffies(dirty_writeback_interval * 10); 775 next_jif = start_jif + msecs_to_jiffies(dirty_writeback_interval * 10);
776 nr_to_write = global_page_state(NR_FILE_DIRTY) + 776 nr_to_write = global_page_state(NR_FILE_DIRTY) +
diff --git a/mm/rmap.c b/mm/rmap.c
index 16521664010d..23122af32611 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -14,7 +14,7 @@
14 * Original design by Rik van Riel <riel@conectiva.com.br> 2001 14 * Original design by Rik van Riel <riel@conectiva.com.br> 2001
15 * File methods by Dave McCracken <dmccr@us.ibm.com> 2003, 2004 15 * File methods by Dave McCracken <dmccr@us.ibm.com> 2003, 2004
16 * Anonymous methods by Andrea Arcangeli <andrea@suse.de> 2004 16 * Anonymous methods by Andrea Arcangeli <andrea@suse.de> 2004
17 * Contributions by Hugh Dickins <hugh@veritas.com> 2003, 2004 17 * Contributions by Hugh Dickins 2003, 2004
18 */ 18 */
19 19
20/* 20/*
diff --git a/mm/slob.c b/mm/slob.c
index a2d4ab32198d..f92e66d558bd 100644
--- a/mm/slob.c
+++ b/mm/slob.c
@@ -60,6 +60,7 @@
60#include <linux/kernel.h> 60#include <linux/kernel.h>
61#include <linux/slab.h> 61#include <linux/slab.h>
62#include <linux/mm.h> 62#include <linux/mm.h>
63#include <linux/swap.h> /* struct reclaim_state */
63#include <linux/cache.h> 64#include <linux/cache.h>
64#include <linux/init.h> 65#include <linux/init.h>
65#include <linux/module.h> 66#include <linux/module.h>
@@ -255,6 +256,8 @@ static void *slob_new_pages(gfp_t gfp, int order, int node)
255 256
256static void slob_free_pages(void *b, int order) 257static void slob_free_pages(void *b, int order)
257{ 258{
259 if (current->reclaim_state)
260 current->reclaim_state->reclaimed_slab += 1 << order;
258 free_pages((unsigned long)b, order); 261 free_pages((unsigned long)b, order);
259} 262}
260 263
@@ -407,7 +410,7 @@ static void slob_free(void *block, int size)
407 spin_unlock_irqrestore(&slob_lock, flags); 410 spin_unlock_irqrestore(&slob_lock, flags);
408 clear_slob_page(sp); 411 clear_slob_page(sp);
409 free_slob_page(sp); 412 free_slob_page(sp);
410 free_page((unsigned long)b); 413 slob_free_pages(b, 0);
411 return; 414 return;
412 } 415 }
413 416
diff --git a/mm/slub.c b/mm/slub.c
index 7ab54ecbd3f3..65ffda5934b0 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -9,6 +9,7 @@
9 */ 9 */
10 10
11#include <linux/mm.h> 11#include <linux/mm.h>
12#include <linux/swap.h> /* struct reclaim_state */
12#include <linux/module.h> 13#include <linux/module.h>
13#include <linux/bit_spinlock.h> 14#include <linux/bit_spinlock.h>
14#include <linux/interrupt.h> 15#include <linux/interrupt.h>
@@ -1170,6 +1171,8 @@ static void __free_slab(struct kmem_cache *s, struct page *page)
1170 1171
1171 __ClearPageSlab(page); 1172 __ClearPageSlab(page);
1172 reset_page_mapcount(page); 1173 reset_page_mapcount(page);
1174 if (current->reclaim_state)
1175 current->reclaim_state->reclaimed_slab += pages;
1173 __free_pages(page, order); 1176 __free_pages(page, order);
1174} 1177}
1175 1178
@@ -1909,7 +1912,7 @@ static inline int calculate_order(int size)
1909 * Doh this slab cannot be placed using slub_max_order. 1912 * Doh this slab cannot be placed using slub_max_order.
1910 */ 1913 */
1911 order = slab_order(size, 1, MAX_ORDER, 1); 1914 order = slab_order(size, 1, MAX_ORDER, 1);
1912 if (order <= MAX_ORDER) 1915 if (order < MAX_ORDER)
1913 return order; 1916 return order;
1914 return -ENOSYS; 1917 return -ENOSYS;
1915} 1918}
@@ -2522,6 +2525,7 @@ __setup("slub_min_order=", setup_slub_min_order);
2522static int __init setup_slub_max_order(char *str) 2525static int __init setup_slub_max_order(char *str)
2523{ 2526{
2524 get_option(&str, &slub_max_order); 2527 get_option(&str, &slub_max_order);
2528 slub_max_order = min(slub_max_order, MAX_ORDER - 1);
2525 2529
2526 return 1; 2530 return 1;
2527} 2531}
diff --git a/mm/vmstat.c b/mm/vmstat.c
index 66f6130976cb..74d66dba0cbe 100644
--- a/mm/vmstat.c
+++ b/mm/vmstat.c
@@ -509,22 +509,11 @@ static void pagetypeinfo_showblockcount_print(struct seq_file *m,
509 continue; 509 continue;
510 510
511 page = pfn_to_page(pfn); 511 page = pfn_to_page(pfn);
512#ifdef CONFIG_ARCH_FLATMEM_HAS_HOLES 512
513 /* 513 /* Watch for unexpected holes punched in the memmap */
514 * Ordinarily, memory holes in flatmem still have a valid 514 if (!memmap_valid_within(pfn, page, zone))
515 * memmap for the PFN range. However, an architecture for
516 * embedded systems (e.g. ARM) can free up the memmap backing
517 * holes to save memory on the assumption the memmap is
518 * never used. The page_zone linkages are then broken even
519 * though pfn_valid() returns true. Skip the page if the
520 * linkages are broken. Even if this test passed, the impact
521 * is that the counters for the movable type are off but
522 * fragmentation monitoring is likely meaningless on small
523 * systems.
524 */
525 if (page_zone(page) != zone)
526 continue; 515 continue;
527#endif 516
528 mtype = get_pageblock_migratetype(page); 517 mtype = get_pageblock_migratetype(page);
529 518
530 if (mtype < MIGRATE_TYPES) 519 if (mtype < MIGRATE_TYPES)