aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
Diffstat (limited to 'mm')
-rw-r--r--mm/filemap.c1
-rw-r--r--mm/nommu.c1
-rw-r--r--mm/shmem.c15
-rw-r--r--mm/slub.c2
-rw-r--r--mm/sparse-vmemmap.c1
-rw-r--r--mm/sparse.c11
6 files changed, 19 insertions, 12 deletions
diff --git a/mm/filemap.c b/mm/filemap.c
index 5209e47b7f..7c86436300 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -28,6 +28,7 @@
28#include <linux/backing-dev.h> 28#include <linux/backing-dev.h>
29#include <linux/pagevec.h> 29#include <linux/pagevec.h>
30#include <linux/blkdev.h> 30#include <linux/blkdev.h>
31#include <linux/backing-dev.h>
31#include <linux/security.h> 32#include <linux/security.h>
32#include <linux/syscalls.h> 33#include <linux/syscalls.h>
33#include <linux/cpuset.h> 34#include <linux/cpuset.h>
diff --git a/mm/nommu.c b/mm/nommu.c
index 8f09333f78..35622c5909 100644
--- a/mm/nommu.c
+++ b/mm/nommu.c
@@ -12,6 +12,7 @@
12 * Copyright (c) 2002 Greg Ungerer <gerg@snapgear.com> 12 * Copyright (c) 2002 Greg Ungerer <gerg@snapgear.com>
13 */ 13 */
14 14
15#include <linux/module.h>
15#include <linux/mm.h> 16#include <linux/mm.h>
16#include <linux/mman.h> 17#include <linux/mman.h>
17#include <linux/swap.h> 18#include <linux/swap.h>
diff --git a/mm/shmem.c b/mm/shmem.c
index 404e53bb21..253d205914 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -915,6 +915,21 @@ static int shmem_writepage(struct page *page, struct writeback_control *wbc)
915 struct inode *inode; 915 struct inode *inode;
916 916
917 BUG_ON(!PageLocked(page)); 917 BUG_ON(!PageLocked(page));
918 /*
919 * shmem_backing_dev_info's capabilities prevent regular writeback or
920 * sync from ever calling shmem_writepage; but a stacking filesystem
921 * may use the ->writepage of its underlying filesystem, in which case
922 * we want to do nothing when that underlying filesystem is tmpfs
923 * (writing out to swap is useful as a response to memory pressure, but
924 * of no use to stabilize the data) - just redirty the page, unlock it
925 * and claim success in this case. AOP_WRITEPAGE_ACTIVATE, and the
926 * page_mapped check below, must be avoided unless we're in reclaim.
927 */
928 if (!wbc->for_reclaim) {
929 set_page_dirty(page);
930 unlock_page(page);
931 return 0;
932 }
918 BUG_ON(page_mapped(page)); 933 BUG_ON(page_mapped(page));
919 934
920 mapping = page->mapping; 935 mapping = page->mapping;
diff --git a/mm/slub.c b/mm/slub.c
index aac1dd3c65..bcdb2c8941 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -2734,7 +2734,7 @@ static void slab_mem_offline_callback(void *arg)
2734 * and offline_pages() function shoudn't call this 2734 * and offline_pages() function shoudn't call this
2735 * callback. So, we must fail. 2735 * callback. So, we must fail.
2736 */ 2736 */
2737 BUG_ON(atomic_read(&n->nr_slabs)); 2737 BUG_ON(atomic_long_read(&n->nr_slabs));
2738 2738
2739 s->node[offline_node] = NULL; 2739 s->node[offline_node] = NULL;
2740 kmem_cache_free(kmalloc_caches, n); 2740 kmem_cache_free(kmalloc_caches, n);
diff --git a/mm/sparse-vmemmap.c b/mm/sparse-vmemmap.c
index d3b718b0c2..22620f6a97 100644
--- a/mm/sparse-vmemmap.c
+++ b/mm/sparse-vmemmap.c
@@ -24,6 +24,7 @@
24#include <linux/module.h> 24#include <linux/module.h>
25#include <linux/spinlock.h> 25#include <linux/spinlock.h>
26#include <linux/vmalloc.h> 26#include <linux/vmalloc.h>
27#include <linux/sched.h>
27#include <asm/dma.h> 28#include <asm/dma.h>
28#include <asm/pgalloc.h> 29#include <asm/pgalloc.h>
29#include <asm/pgtable.h> 30#include <asm/pgtable.h>
diff --git a/mm/sparse.c b/mm/sparse.c
index 08fb14f5ee..e06f514fe0 100644
--- a/mm/sparse.c
+++ b/mm/sparse.c
@@ -220,12 +220,6 @@ static int __meminit sparse_init_one_section(struct mem_section *ms,
220 return 1; 220 return 1;
221} 221}
222 222
223__attribute__((weak)) __init
224void *alloc_bootmem_high_node(pg_data_t *pgdat, unsigned long size)
225{
226 return NULL;
227}
228
229static unsigned long usemap_size(void) 223static unsigned long usemap_size(void)
230{ 224{
231 unsigned long size_bytes; 225 unsigned long size_bytes;
@@ -267,11 +261,6 @@ struct page __init *sparse_mem_map_populate(unsigned long pnum, int nid)
267 if (map) 261 if (map)
268 return map; 262 return map;
269 263
270 map = alloc_bootmem_high_node(NODE_DATA(nid),
271 sizeof(struct page) * PAGES_PER_SECTION);
272 if (map)
273 return map;
274
275 map = alloc_bootmem_node(NODE_DATA(nid), 264 map = alloc_bootmem_node(NODE_DATA(nid),
276 sizeof(struct page) * PAGES_PER_SECTION); 265 sizeof(struct page) * PAGES_PER_SECTION);
277 return map; 266 return map;