diff options
author | Ingo Molnar <mingo@elte.hu> | 2009-04-29 08:46:59 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2009-04-29 08:47:05 -0400 |
commit | e7fd5d4b3d240f42c30a9e3d20a4689c4d3a795a (patch) | |
tree | 4ba588631dd8189a818a91c9e3976526071178b6 /mm | |
parent | 1130b0296184bc21806225fd06d533515a99d2db (diff) | |
parent | 56a50adda49b2020156616c4eb15353e0f9ad7de (diff) |
Merge branch 'linus' into perfcounters/core
Merge reason: This brach was on -rc1, refresh it to almost-rc4 to pick up
the latest upstream fixes.
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'mm')
-rw-r--r-- | mm/Kconfig | 2 | ||||
-rw-r--r-- | mm/filemap.c | 5 | ||||
-rw-r--r-- | mm/memcontrol.c | 2 | ||||
-rw-r--r-- | mm/mmap.c | 2 | ||||
-rw-r--r-- | mm/shmem.c | 27 | ||||
-rw-r--r-- | mm/util.c | 16 | ||||
-rw-r--r-- | mm/vmscan.c | 17 |
7 files changed, 55 insertions, 16 deletions
diff --git a/mm/Kconfig b/mm/Kconfig index b53427ad30a3..57971d2ab848 100644 --- a/mm/Kconfig +++ b/mm/Kconfig | |||
@@ -213,6 +213,8 @@ config UNEVICTABLE_LRU | |||
213 | will use one page flag and increase the code size a little, | 213 | will use one page flag and increase the code size a little, |
214 | say Y unless you know what you are doing. | 214 | say Y unless you know what you are doing. |
215 | 215 | ||
216 | See Documentation/vm/unevictable-lru.txt for more information. | ||
217 | |||
216 | config HAVE_MLOCK | 218 | config HAVE_MLOCK |
217 | bool | 219 | bool |
218 | default y if MMU=y | 220 | default y if MMU=y |
diff --git a/mm/filemap.c b/mm/filemap.c index 2e2d38ebda4b..379ff0bcbf6e 100644 --- a/mm/filemap.c +++ b/mm/filemap.c | |||
@@ -441,6 +441,7 @@ int filemap_write_and_wait_range(struct address_space *mapping, | |||
441 | } | 441 | } |
442 | return err; | 442 | return err; |
443 | } | 443 | } |
444 | EXPORT_SYMBOL(filemap_write_and_wait_range); | ||
444 | 445 | ||
445 | /** | 446 | /** |
446 | * add_to_page_cache_locked - add a locked page to the pagecache | 447 | * add_to_page_cache_locked - add a locked page to the pagecache |
@@ -567,8 +568,8 @@ EXPORT_SYMBOL(wait_on_page_bit); | |||
567 | 568 | ||
568 | /** | 569 | /** |
569 | * add_page_wait_queue - Add an arbitrary waiter to a page's wait queue | 570 | * add_page_wait_queue - Add an arbitrary waiter to a page's wait queue |
570 | * @page - Page defining the wait queue of interest | 571 | * @page: Page defining the wait queue of interest |
571 | * @waiter - Waiter to add to the queue | 572 | * @waiter: Waiter to add to the queue |
572 | * | 573 | * |
573 | * Add an arbitrary @waiter to the wait queue for the nominated @page. | 574 | * Add an arbitrary @waiter to the wait queue for the nominated @page. |
574 | */ | 575 | */ |
diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 2fc6d6c48238..e44fb0fbb80e 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c | |||
@@ -932,7 +932,7 @@ static int __mem_cgroup_try_charge(struct mm_struct *mm, | |||
932 | if (unlikely(!mem)) | 932 | if (unlikely(!mem)) |
933 | return 0; | 933 | return 0; |
934 | 934 | ||
935 | VM_BUG_ON(mem_cgroup_is_obsolete(mem)); | 935 | VM_BUG_ON(!mem || mem_cgroup_is_obsolete(mem)); |
936 | 936 | ||
937 | while (1) { | 937 | while (1) { |
938 | int ret; | 938 | int ret; |
@@ -1579,7 +1579,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns | |||
1579 | * Overcommit.. This must be the final test, as it will | 1579 | * Overcommit.. This must be the final test, as it will |
1580 | * update security statistics. | 1580 | * update security statistics. |
1581 | */ | 1581 | */ |
1582 | if (security_vm_enough_memory(grow)) | 1582 | if (security_vm_enough_memory_mm(mm, grow)) |
1583 | return -ENOMEM; | 1583 | return -ENOMEM; |
1584 | 1584 | ||
1585 | /* Ok, everything looks good - let it rip */ | 1585 | /* Ok, everything looks good - let it rip */ |
diff --git a/mm/shmem.c b/mm/shmem.c index d94d2e9146bc..f9cb20ebb990 100644 --- a/mm/shmem.c +++ b/mm/shmem.c | |||
@@ -24,6 +24,7 @@ | |||
24 | #include <linux/init.h> | 24 | #include <linux/init.h> |
25 | #include <linux/vfs.h> | 25 | #include <linux/vfs.h> |
26 | #include <linux/mount.h> | 26 | #include <linux/mount.h> |
27 | #include <linux/pagemap.h> | ||
27 | #include <linux/file.h> | 28 | #include <linux/file.h> |
28 | #include <linux/mm.h> | 29 | #include <linux/mm.h> |
29 | #include <linux/module.h> | 30 | #include <linux/module.h> |
@@ -43,7 +44,6 @@ static struct vfsmount *shm_mnt; | |||
43 | #include <linux/exportfs.h> | 44 | #include <linux/exportfs.h> |
44 | #include <linux/generic_acl.h> | 45 | #include <linux/generic_acl.h> |
45 | #include <linux/mman.h> | 46 | #include <linux/mman.h> |
46 | #include <linux/pagemap.h> | ||
47 | #include <linux/string.h> | 47 | #include <linux/string.h> |
48 | #include <linux/slab.h> | 48 | #include <linux/slab.h> |
49 | #include <linux/backing-dev.h> | 49 | #include <linux/backing-dev.h> |
@@ -65,13 +65,28 @@ static struct vfsmount *shm_mnt; | |||
65 | #include <asm/div64.h> | 65 | #include <asm/div64.h> |
66 | #include <asm/pgtable.h> | 66 | #include <asm/pgtable.h> |
67 | 67 | ||
68 | /* | ||
69 | * The maximum size of a shmem/tmpfs file is limited by the maximum size of | ||
70 | * its triple-indirect swap vector - see illustration at shmem_swp_entry(). | ||
71 | * | ||
72 | * With 4kB page size, maximum file size is just over 2TB on a 32-bit kernel, | ||
73 | * but one eighth of that on a 64-bit kernel. With 8kB page size, maximum | ||
74 | * file size is just over 4TB on a 64-bit kernel, but 16TB on a 32-bit kernel, | ||
75 | * MAX_LFS_FILESIZE being then more restrictive than swap vector layout. | ||
76 | * | ||
77 | * We use / and * instead of shifts in the definitions below, so that the swap | ||
78 | * vector can be tested with small even values (e.g. 20) for ENTRIES_PER_PAGE. | ||
79 | */ | ||
68 | #define ENTRIES_PER_PAGE (PAGE_CACHE_SIZE/sizeof(unsigned long)) | 80 | #define ENTRIES_PER_PAGE (PAGE_CACHE_SIZE/sizeof(unsigned long)) |
69 | #define ENTRIES_PER_PAGEPAGE (ENTRIES_PER_PAGE*ENTRIES_PER_PAGE) | 81 | #define ENTRIES_PER_PAGEPAGE ((unsigned long long)ENTRIES_PER_PAGE*ENTRIES_PER_PAGE) |
70 | #define BLOCKS_PER_PAGE (PAGE_CACHE_SIZE/512) | ||
71 | 82 | ||
72 | #define SHMEM_MAX_INDEX (SHMEM_NR_DIRECT + (ENTRIES_PER_PAGEPAGE/2) * (ENTRIES_PER_PAGE+1)) | 83 | #define SHMSWP_MAX_INDEX (SHMEM_NR_DIRECT + (ENTRIES_PER_PAGEPAGE/2) * (ENTRIES_PER_PAGE+1)) |
73 | #define SHMEM_MAX_BYTES ((unsigned long long)SHMEM_MAX_INDEX << PAGE_CACHE_SHIFT) | 84 | #define SHMSWP_MAX_BYTES (SHMSWP_MAX_INDEX << PAGE_CACHE_SHIFT) |
74 | 85 | ||
86 | #define SHMEM_MAX_BYTES min_t(unsigned long long, SHMSWP_MAX_BYTES, MAX_LFS_FILESIZE) | ||
87 | #define SHMEM_MAX_INDEX ((unsigned long)((SHMEM_MAX_BYTES+1) >> PAGE_CACHE_SHIFT)) | ||
88 | |||
89 | #define BLOCKS_PER_PAGE (PAGE_CACHE_SIZE/512) | ||
75 | #define VM_ACCT(size) (PAGE_CACHE_ALIGN(size) >> PAGE_SHIFT) | 90 | #define VM_ACCT(size) (PAGE_CACHE_ALIGN(size) >> PAGE_SHIFT) |
76 | 91 | ||
77 | /* info->flags needs VM_flags to handle pagein/truncate races efficiently */ | 92 | /* info->flags needs VM_flags to handle pagein/truncate races efficiently */ |
@@ -2581,7 +2596,7 @@ int shmem_unuse(swp_entry_t entry, struct page *page) | |||
2581 | #define shmem_get_inode(sb, mode, dev, flags) ramfs_get_inode(sb, mode, dev) | 2596 | #define shmem_get_inode(sb, mode, dev, flags) ramfs_get_inode(sb, mode, dev) |
2582 | #define shmem_acct_size(flags, size) 0 | 2597 | #define shmem_acct_size(flags, size) 0 |
2583 | #define shmem_unacct_size(flags, size) do {} while (0) | 2598 | #define shmem_unacct_size(flags, size) do {} while (0) |
2584 | #define SHMEM_MAX_BYTES LLONG_MAX | 2599 | #define SHMEM_MAX_BYTES MAX_LFS_FILESIZE |
2585 | 2600 | ||
2586 | #endif /* CONFIG_SHMEM */ | 2601 | #endif /* CONFIG_SHMEM */ |
2587 | 2602 | ||
@@ -223,6 +223,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm) | |||
223 | } | 223 | } |
224 | #endif | 224 | #endif |
225 | 225 | ||
226 | /** | ||
227 | * get_user_pages_fast() - pin user pages in memory | ||
228 | * @start: starting user address | ||
229 | * @nr_pages: number of pages from start to pin | ||
230 | * @write: whether pages will be written to | ||
231 | * @pages: array that receives pointers to the pages pinned. | ||
232 | * Should be at least nr_pages long. | ||
233 | * | ||
234 | * Attempt to pin user pages in memory without taking mm->mmap_sem. | ||
235 | * If not successful, it will fall back to taking the lock and | ||
236 | * calling get_user_pages(). | ||
237 | * | ||
238 | * Returns number of pages pinned. This may be fewer than the number | ||
239 | * requested. If nr_pages is 0 or negative, returns 0. If no pages | ||
240 | * were pinned, returns -errno. | ||
241 | */ | ||
226 | int __attribute__((weak)) get_user_pages_fast(unsigned long start, | 242 | int __attribute__((weak)) get_user_pages_fast(unsigned long start, |
227 | int nr_pages, int write, struct page **pages) | 243 | int nr_pages, int write, struct page **pages) |
228 | { | 244 | { |
diff --git a/mm/vmscan.c b/mm/vmscan.c index 39fdfb14eeaa..eac9577941f9 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c | |||
@@ -63,6 +63,9 @@ struct scan_control { | |||
63 | /* Can mapped pages be reclaimed? */ | 63 | /* Can mapped pages be reclaimed? */ |
64 | int may_unmap; | 64 | int may_unmap; |
65 | 65 | ||
66 | /* Can pages be swapped as part of reclaim? */ | ||
67 | int may_swap; | ||
68 | |||
66 | /* This context's SWAP_CLUSTER_MAX. If freeing memory for | 69 | /* This context's SWAP_CLUSTER_MAX. If freeing memory for |
67 | * suspend, we effectively ignore SWAP_CLUSTER_MAX. | 70 | * suspend, we effectively ignore SWAP_CLUSTER_MAX. |
68 | * In this context, it doesn't matter that we scan the | 71 | * In this context, it doesn't matter that we scan the |
@@ -1380,7 +1383,7 @@ static void get_scan_ratio(struct zone *zone, struct scan_control *sc, | |||
1380 | struct zone_reclaim_stat *reclaim_stat = get_reclaim_stat(zone, sc); | 1383 | struct zone_reclaim_stat *reclaim_stat = get_reclaim_stat(zone, sc); |
1381 | 1384 | ||
1382 | /* If we have no swap space, do not bother scanning anon pages. */ | 1385 | /* If we have no swap space, do not bother scanning anon pages. */ |
1383 | if (nr_swap_pages <= 0) { | 1386 | if (!sc->may_swap || (nr_swap_pages <= 0)) { |
1384 | percent[0] = 0; | 1387 | percent[0] = 0; |
1385 | percent[1] = 100; | 1388 | percent[1] = 100; |
1386 | return; | 1389 | return; |
@@ -1697,6 +1700,7 @@ unsigned long try_to_free_pages(struct zonelist *zonelist, int order, | |||
1697 | .may_writepage = !laptop_mode, | 1700 | .may_writepage = !laptop_mode, |
1698 | .swap_cluster_max = SWAP_CLUSTER_MAX, | 1701 | .swap_cluster_max = SWAP_CLUSTER_MAX, |
1699 | .may_unmap = 1, | 1702 | .may_unmap = 1, |
1703 | .may_swap = 1, | ||
1700 | .swappiness = vm_swappiness, | 1704 | .swappiness = vm_swappiness, |
1701 | .order = order, | 1705 | .order = order, |
1702 | .mem_cgroup = NULL, | 1706 | .mem_cgroup = NULL, |
@@ -1717,6 +1721,7 @@ unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem_cont, | |||
1717 | struct scan_control sc = { | 1721 | struct scan_control sc = { |
1718 | .may_writepage = !laptop_mode, | 1722 | .may_writepage = !laptop_mode, |
1719 | .may_unmap = 1, | 1723 | .may_unmap = 1, |
1724 | .may_swap = !noswap, | ||
1720 | .swap_cluster_max = SWAP_CLUSTER_MAX, | 1725 | .swap_cluster_max = SWAP_CLUSTER_MAX, |
1721 | .swappiness = swappiness, | 1726 | .swappiness = swappiness, |
1722 | .order = 0, | 1727 | .order = 0, |
@@ -1726,9 +1731,6 @@ unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem_cont, | |||
1726 | }; | 1731 | }; |
1727 | struct zonelist *zonelist; | 1732 | struct zonelist *zonelist; |
1728 | 1733 | ||
1729 | if (noswap) | ||
1730 | sc.may_unmap = 0; | ||
1731 | |||
1732 | sc.gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) | | 1734 | sc.gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) | |
1733 | (GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK); | 1735 | (GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK); |
1734 | zonelist = NODE_DATA(numa_node_id())->node_zonelists; | 1736 | zonelist = NODE_DATA(numa_node_id())->node_zonelists; |
@@ -1767,6 +1769,7 @@ static unsigned long balance_pgdat(pg_data_t *pgdat, int order) | |||
1767 | struct scan_control sc = { | 1769 | struct scan_control sc = { |
1768 | .gfp_mask = GFP_KERNEL, | 1770 | .gfp_mask = GFP_KERNEL, |
1769 | .may_unmap = 1, | 1771 | .may_unmap = 1, |
1772 | .may_swap = 1, | ||
1770 | .swap_cluster_max = SWAP_CLUSTER_MAX, | 1773 | .swap_cluster_max = SWAP_CLUSTER_MAX, |
1771 | .swappiness = vm_swappiness, | 1774 | .swappiness = vm_swappiness, |
1772 | .order = order, | 1775 | .order = order, |
@@ -2088,13 +2091,13 @@ static void shrink_all_zones(unsigned long nr_pages, int prio, | |||
2088 | nr_reclaimed += shrink_list(l, nr_to_scan, zone, | 2091 | nr_reclaimed += shrink_list(l, nr_to_scan, zone, |
2089 | sc, prio); | 2092 | sc, prio); |
2090 | if (nr_reclaimed >= nr_pages) { | 2093 | if (nr_reclaimed >= nr_pages) { |
2091 | sc->nr_reclaimed = nr_reclaimed; | 2094 | sc->nr_reclaimed += nr_reclaimed; |
2092 | return; | 2095 | return; |
2093 | } | 2096 | } |
2094 | } | 2097 | } |
2095 | } | 2098 | } |
2096 | } | 2099 | } |
2097 | sc->nr_reclaimed = nr_reclaimed; | 2100 | sc->nr_reclaimed += nr_reclaimed; |
2098 | } | 2101 | } |
2099 | 2102 | ||
2100 | /* | 2103 | /* |
@@ -2115,6 +2118,7 @@ unsigned long shrink_all_memory(unsigned long nr_pages) | |||
2115 | .may_unmap = 0, | 2118 | .may_unmap = 0, |
2116 | .may_writepage = 1, | 2119 | .may_writepage = 1, |
2117 | .isolate_pages = isolate_pages_global, | 2120 | .isolate_pages = isolate_pages_global, |
2121 | .nr_reclaimed = 0, | ||
2118 | }; | 2122 | }; |
2119 | 2123 | ||
2120 | current->reclaim_state = &reclaim_state; | 2124 | current->reclaim_state = &reclaim_state; |
@@ -2297,6 +2301,7 @@ static int __zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order) | |||
2297 | struct scan_control sc = { | 2301 | struct scan_control sc = { |
2298 | .may_writepage = !!(zone_reclaim_mode & RECLAIM_WRITE), | 2302 | .may_writepage = !!(zone_reclaim_mode & RECLAIM_WRITE), |
2299 | .may_unmap = !!(zone_reclaim_mode & RECLAIM_SWAP), | 2303 | .may_unmap = !!(zone_reclaim_mode & RECLAIM_SWAP), |
2304 | .may_swap = 1, | ||
2300 | .swap_cluster_max = max_t(unsigned long, nr_pages, | 2305 | .swap_cluster_max = max_t(unsigned long, nr_pages, |
2301 | SWAP_CLUSTER_MAX), | 2306 | SWAP_CLUSTER_MAX), |
2302 | .gfp_mask = gfp_mask, | 2307 | .gfp_mask = gfp_mask, |