aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
Diffstat (limited to 'mm')
-rw-r--r--mm/filemap.c90
-rw-r--r--mm/mmap.c1
-rw-r--r--mm/nommu.c8
-rw-r--r--mm/page-writeback.c7
-rw-r--r--mm/page_alloc.c28
-rw-r--r--mm/rmap.c1
-rw-r--r--mm/slab.c52
-rw-r--r--mm/sparse.c12
-rw-r--r--mm/vmalloc.c7
-rw-r--r--mm/vmscan.c8
10 files changed, 115 insertions, 99 deletions
diff --git a/mm/filemap.c b/mm/filemap.c
index 5631d6b2a62d..9cbf4fea4a59 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -1110,6 +1110,45 @@ success:
1110 return size; 1110 return size;
1111} 1111}
1112 1112
1113/*
1114 * Performs necessary checks before doing a write
1115 * @iov: io vector request
1116 * @nr_segs: number of segments in the iovec
1117 * @count: number of bytes to write
1118 * @access_flags: type of access: %VERIFY_READ or %VERIFY_WRITE
1119 *
1120 * Adjust number of segments and amount of bytes to write (nr_segs should be
1121 * properly initialized first). Returns appropriate error code that caller
1122 * should return or zero in case that write should be allowed.
1123 */
1124int generic_segment_checks(const struct iovec *iov,
1125 unsigned long *nr_segs, size_t *count, int access_flags)
1126{
1127 unsigned long seg;
1128 size_t cnt = 0;
1129 for (seg = 0; seg < *nr_segs; seg++) {
1130 const struct iovec *iv = &iov[seg];
1131
1132 /*
1133 * If any segment has a negative length, or the cumulative
1134 * length ever wraps negative then return -EINVAL.
1135 */
1136 cnt += iv->iov_len;
1137 if (unlikely((ssize_t)(cnt|iv->iov_len) < 0))
1138 return -EINVAL;
1139 if (access_ok(access_flags, iv->iov_base, iv->iov_len))
1140 continue;
1141 if (seg == 0)
1142 return -EFAULT;
1143 *nr_segs = seg;
1144 cnt -= iv->iov_len; /* This segment is no good */
1145 break;
1146 }
1147 *count = cnt;
1148 return 0;
1149}
1150EXPORT_SYMBOL(generic_segment_checks);
1151
1113/** 1152/**
1114 * generic_file_aio_read - generic filesystem read routine 1153 * generic_file_aio_read - generic filesystem read routine
1115 * @iocb: kernel I/O control block 1154 * @iocb: kernel I/O control block
@@ -1131,24 +1170,9 @@ generic_file_aio_read(struct kiocb *iocb, const struct iovec *iov,
1131 loff_t *ppos = &iocb->ki_pos; 1170 loff_t *ppos = &iocb->ki_pos;
1132 1171
1133 count = 0; 1172 count = 0;
1134 for (seg = 0; seg < nr_segs; seg++) { 1173 retval = generic_segment_checks(iov, &nr_segs, &count, VERIFY_WRITE);
1135 const struct iovec *iv = &iov[seg]; 1174 if (retval)
1136 1175 return retval;
1137 /*
1138 * If any segment has a negative length, or the cumulative
1139 * length ever wraps negative then return -EINVAL.
1140 */
1141 count += iv->iov_len;
1142 if (unlikely((ssize_t)(count|iv->iov_len) < 0))
1143 return -EINVAL;
1144 if (access_ok(VERIFY_WRITE, iv->iov_base, iv->iov_len))
1145 continue;
1146 if (seg == 0)
1147 return -EFAULT;
1148 nr_segs = seg;
1149 count -= iv->iov_len; /* This segment is no good */
1150 break;
1151 }
1152 1176
1153 /* coalesce the iovecs and go direct-to-BIO for O_DIRECT */ 1177 /* coalesce the iovecs and go direct-to-BIO for O_DIRECT */
1154 if (filp->f_flags & O_DIRECT) { 1178 if (filp->f_flags & O_DIRECT) {
@@ -2218,30 +2242,14 @@ __generic_file_aio_write_nolock(struct kiocb *iocb, const struct iovec *iov,
2218 size_t ocount; /* original count */ 2242 size_t ocount; /* original count */
2219 size_t count; /* after file limit checks */ 2243 size_t count; /* after file limit checks */
2220 struct inode *inode = mapping->host; 2244 struct inode *inode = mapping->host;
2221 unsigned long seg;
2222 loff_t pos; 2245 loff_t pos;
2223 ssize_t written; 2246 ssize_t written;
2224 ssize_t err; 2247 ssize_t err;
2225 2248
2226 ocount = 0; 2249 ocount = 0;
2227 for (seg = 0; seg < nr_segs; seg++) { 2250 err = generic_segment_checks(iov, &nr_segs, &ocount, VERIFY_READ);
2228 const struct iovec *iv = &iov[seg]; 2251 if (err)
2229 2252 return err;
2230 /*
2231 * If any segment has a negative length, or the cumulative
2232 * length ever wraps negative then return -EINVAL.
2233 */
2234 ocount += iv->iov_len;
2235 if (unlikely((ssize_t)(ocount|iv->iov_len) < 0))
2236 return -EINVAL;
2237 if (access_ok(VERIFY_READ, iv->iov_base, iv->iov_len))
2238 continue;
2239 if (seg == 0)
2240 return -EFAULT;
2241 nr_segs = seg;
2242 ocount -= iv->iov_len; /* This segment is no good */
2243 break;
2244 }
2245 2253
2246 count = ocount; 2254 count = ocount;
2247 pos = *ppos; 2255 pos = *ppos;
@@ -2301,10 +2309,10 @@ __generic_file_aio_write_nolock(struct kiocb *iocb, const struct iovec *iov,
2301 * semantics. 2309 * semantics.
2302 */ 2310 */
2303 endbyte = pos + written_buffered - written - 1; 2311 endbyte = pos + written_buffered - written - 1;
2304 err = do_sync_file_range(file, pos, endbyte, 2312 err = do_sync_mapping_range(file->f_mapping, pos, endbyte,
2305 SYNC_FILE_RANGE_WAIT_BEFORE| 2313 SYNC_FILE_RANGE_WAIT_BEFORE|
2306 SYNC_FILE_RANGE_WRITE| 2314 SYNC_FILE_RANGE_WRITE|
2307 SYNC_FILE_RANGE_WAIT_AFTER); 2315 SYNC_FILE_RANGE_WAIT_AFTER);
2308 if (err == 0) { 2316 if (err == 0) {
2309 written = written_buffered; 2317 written = written_buffered;
2310 invalidate_mapping_pages(mapping, 2318 invalidate_mapping_pages(mapping,
diff --git a/mm/mmap.c b/mm/mmap.c
index 52646d61ff69..cc1f543eb1b8 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -1366,7 +1366,6 @@ unsigned long
1366get_unmapped_area(struct file *file, unsigned long addr, unsigned long len, 1366get_unmapped_area(struct file *file, unsigned long addr, unsigned long len,
1367 unsigned long pgoff, unsigned long flags) 1367 unsigned long pgoff, unsigned long flags)
1368{ 1368{
1369 unsigned long ret;
1370 unsigned long (*get_area)(struct file *, unsigned long, 1369 unsigned long (*get_area)(struct file *, unsigned long,
1371 unsigned long, unsigned long, unsigned long); 1370 unsigned long, unsigned long, unsigned long);
1372 1371
diff --git a/mm/nommu.c b/mm/nommu.c
index 1f60194d9b9b..2b16b00a5b11 100644
--- a/mm/nommu.c
+++ b/mm/nommu.c
@@ -262,6 +262,14 @@ void vunmap(void *addr)
262} 262}
263 263
264/* 264/*
265 * Implement a stub for vmalloc_sync_all() if the architecture chose not to
266 * have one.
267 */
268void __attribute__((weak)) vmalloc_sync_all(void)
269{
270}
271
272/*
265 * sys_brk() for the most part doesn't need the global kernel 273 * sys_brk() for the most part doesn't need the global kernel
266 * lock, except when an application is doing something nasty 274 * lock, except when an application is doing something nasty
267 * like trying to un-brk an area that has already been mapped 275 * like trying to un-brk an area that has already been mapped
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index 029dfad5a235..63cd88840eb2 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -683,12 +683,7 @@ retry:
683 } 683 }
684 684
685 ret = (*writepage)(page, wbc); 685 ret = (*writepage)(page, wbc);
686 if (ret) { 686 mapping_set_error(mapping, ret);
687 if (ret == -ENOSPC)
688 set_bit(AS_ENOSPC, &mapping->flags);
689 else
690 set_bit(AS_EIO, &mapping->flags);
691 }
692 687
693 if (unlikely(ret == AOP_WRITEPAGE_ACTIVATE)) 688 if (unlikely(ret == AOP_WRITEPAGE_ACTIVATE))
694 unlock_page(page); 689 unlock_page(page);
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 59164313167f..6fd0b7455b0b 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -103,7 +103,7 @@ int min_free_kbytes = 1024;
103 103
104unsigned long __meminitdata nr_kernel_pages; 104unsigned long __meminitdata nr_kernel_pages;
105unsigned long __meminitdata nr_all_pages; 105unsigned long __meminitdata nr_all_pages;
106static unsigned long __initdata dma_reserve; 106static unsigned long __meminitdata dma_reserve;
107 107
108#ifdef CONFIG_ARCH_POPULATES_NODE_MAP 108#ifdef CONFIG_ARCH_POPULATES_NODE_MAP
109 /* 109 /*
@@ -126,10 +126,10 @@ static unsigned long __initdata dma_reserve;
126 #endif 126 #endif
127 #endif 127 #endif
128 128
129 struct node_active_region __initdata early_node_map[MAX_ACTIVE_REGIONS]; 129 struct node_active_region __meminitdata early_node_map[MAX_ACTIVE_REGIONS];
130 int __initdata nr_nodemap_entries; 130 int __meminitdata nr_nodemap_entries;
131 unsigned long __initdata arch_zone_lowest_possible_pfn[MAX_NR_ZONES]; 131 unsigned long __meminitdata arch_zone_lowest_possible_pfn[MAX_NR_ZONES];
132 unsigned long __initdata arch_zone_highest_possible_pfn[MAX_NR_ZONES]; 132 unsigned long __meminitdata arch_zone_highest_possible_pfn[MAX_NR_ZONES];
133#ifdef CONFIG_MEMORY_HOTPLUG_RESERVE 133#ifdef CONFIG_MEMORY_HOTPLUG_RESERVE
134 unsigned long __initdata node_boundary_start_pfn[MAX_NUMNODES]; 134 unsigned long __initdata node_boundary_start_pfn[MAX_NUMNODES];
135 unsigned long __initdata node_boundary_end_pfn[MAX_NUMNODES]; 135 unsigned long __initdata node_boundary_end_pfn[MAX_NUMNODES];
@@ -2179,7 +2179,7 @@ void __init setup_per_cpu_pageset(void)
2179 2179
2180#endif 2180#endif
2181 2181
2182static __meminit 2182static __meminit noinline
2183int zone_wait_table_init(struct zone *zone, unsigned long zone_size_pages) 2183int zone_wait_table_init(struct zone *zone, unsigned long zone_size_pages)
2184{ 2184{
2185 int i; 2185 int i;
@@ -2267,7 +2267,7 @@ __meminit int init_currently_empty_zone(struct zone *zone,
2267 * Basic iterator support. Return the first range of PFNs for a node 2267 * Basic iterator support. Return the first range of PFNs for a node
2268 * Note: nid == MAX_NUMNODES returns first region regardless of node 2268 * Note: nid == MAX_NUMNODES returns first region regardless of node
2269 */ 2269 */
2270static int __init first_active_region_index_in_nid(int nid) 2270static int __meminit first_active_region_index_in_nid(int nid)
2271{ 2271{
2272 int i; 2272 int i;
2273 2273
@@ -2282,7 +2282,7 @@ static int __init first_active_region_index_in_nid(int nid)
2282 * Basic iterator support. Return the next active range of PFNs for a node 2282 * Basic iterator support. Return the next active range of PFNs for a node
2283 * Note: nid == MAX_NUMNODES returns next region regardles of node 2283 * Note: nid == MAX_NUMNODES returns next region regardles of node
2284 */ 2284 */
2285static int __init next_active_region_index_in_nid(int index, int nid) 2285static int __meminit next_active_region_index_in_nid(int index, int nid)
2286{ 2286{
2287 for (index = index + 1; index < nr_nodemap_entries; index++) 2287 for (index = index + 1; index < nr_nodemap_entries; index++)
2288 if (nid == MAX_NUMNODES || early_node_map[index].nid == nid) 2288 if (nid == MAX_NUMNODES || early_node_map[index].nid == nid)
@@ -2435,7 +2435,7 @@ static void __init account_node_boundary(unsigned int nid,
2435 * with no available memory, a warning is printed and the start and end 2435 * with no available memory, a warning is printed and the start and end
2436 * PFNs will be 0. 2436 * PFNs will be 0.
2437 */ 2437 */
2438void __init get_pfn_range_for_nid(unsigned int nid, 2438void __meminit get_pfn_range_for_nid(unsigned int nid,
2439 unsigned long *start_pfn, unsigned long *end_pfn) 2439 unsigned long *start_pfn, unsigned long *end_pfn)
2440{ 2440{
2441 int i; 2441 int i;
@@ -2460,7 +2460,7 @@ void __init get_pfn_range_for_nid(unsigned int nid,
2460 * Return the number of pages a zone spans in a node, including holes 2460 * Return the number of pages a zone spans in a node, including holes
2461 * present_pages = zone_spanned_pages_in_node() - zone_absent_pages_in_node() 2461 * present_pages = zone_spanned_pages_in_node() - zone_absent_pages_in_node()
2462 */ 2462 */
2463unsigned long __init zone_spanned_pages_in_node(int nid, 2463unsigned long __meminit zone_spanned_pages_in_node(int nid,
2464 unsigned long zone_type, 2464 unsigned long zone_type,
2465 unsigned long *ignored) 2465 unsigned long *ignored)
2466{ 2466{
@@ -2488,7 +2488,7 @@ unsigned long __init zone_spanned_pages_in_node(int nid,
2488 * Return the number of holes in a range on a node. If nid is MAX_NUMNODES, 2488 * Return the number of holes in a range on a node. If nid is MAX_NUMNODES,
2489 * then all holes in the requested range will be accounted for. 2489 * then all holes in the requested range will be accounted for.
2490 */ 2490 */
2491unsigned long __init __absent_pages_in_range(int nid, 2491unsigned long __meminit __absent_pages_in_range(int nid,
2492 unsigned long range_start_pfn, 2492 unsigned long range_start_pfn,
2493 unsigned long range_end_pfn) 2493 unsigned long range_end_pfn)
2494{ 2494{
@@ -2548,7 +2548,7 @@ unsigned long __init absent_pages_in_range(unsigned long start_pfn,
2548} 2548}
2549 2549
2550/* Return the number of page frames in holes in a zone on a node */ 2550/* Return the number of page frames in holes in a zone on a node */
2551unsigned long __init zone_absent_pages_in_node(int nid, 2551unsigned long __meminit zone_absent_pages_in_node(int nid,
2552 unsigned long zone_type, 2552 unsigned long zone_type,
2553 unsigned long *ignored) 2553 unsigned long *ignored)
2554{ 2554{
@@ -2584,7 +2584,7 @@ static inline unsigned long zone_absent_pages_in_node(int nid,
2584 2584
2585#endif 2585#endif
2586 2586
2587static void __init calculate_node_totalpages(struct pglist_data *pgdat, 2587static void __meminit calculate_node_totalpages(struct pglist_data *pgdat,
2588 unsigned long *zones_size, unsigned long *zholes_size) 2588 unsigned long *zones_size, unsigned long *zholes_size)
2589{ 2589{
2590 unsigned long realtotalpages, totalpages = 0; 2590 unsigned long realtotalpages, totalpages = 0;
@@ -2692,7 +2692,7 @@ static void __meminit free_area_init_core(struct pglist_data *pgdat,
2692 } 2692 }
2693} 2693}
2694 2694
2695static void __init alloc_node_mem_map(struct pglist_data *pgdat) 2695static void __meminit alloc_node_mem_map(struct pglist_data *pgdat)
2696{ 2696{
2697 /* Skip empty nodes */ 2697 /* Skip empty nodes */
2698 if (!pgdat->node_spanned_pages) 2698 if (!pgdat->node_spanned_pages)
diff --git a/mm/rmap.c b/mm/rmap.c
index 75a32be64a21..304f51985c78 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -505,6 +505,7 @@ int page_mkclean(struct page *page)
505 505
506 return ret; 506 return ret;
507} 507}
508EXPORT_SYMBOL_GPL(page_mkclean);
508 509
509/** 510/**
510 * page_set_anon_rmap - setup new anonymous rmap 511 * page_set_anon_rmap - setup new anonymous rmap
diff --git a/mm/slab.c b/mm/slab.c
index 5920a412b377..acda7e2d66e4 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -148,10 +148,11 @@
148 * Usually, the kmalloc caches are cache_line_size() aligned, except when 148 * Usually, the kmalloc caches are cache_line_size() aligned, except when
149 * DEBUG and FORCED_DEBUG are enabled, then they are BYTES_PER_WORD aligned. 149 * DEBUG and FORCED_DEBUG are enabled, then they are BYTES_PER_WORD aligned.
150 * Some archs want to perform DMA into kmalloc caches and need a guaranteed 150 * Some archs want to perform DMA into kmalloc caches and need a guaranteed
151 * alignment larger than BYTES_PER_WORD. ARCH_KMALLOC_MINALIGN allows that. 151 * alignment larger than the alignment of a 64-bit integer.
152 * Note that this flag disables some debug features. 152 * ARCH_KMALLOC_MINALIGN allows that.
153 * Note that increasing this value may disable some debug features.
153 */ 154 */
154#define ARCH_KMALLOC_MINALIGN 0 155#define ARCH_KMALLOC_MINALIGN __alignof__(unsigned long long)
155#endif 156#endif
156 157
157#ifndef ARCH_SLAB_MINALIGN 158#ifndef ARCH_SLAB_MINALIGN
@@ -536,19 +537,22 @@ static int obj_size(struct kmem_cache *cachep)
536 return cachep->obj_size; 537 return cachep->obj_size;
537} 538}
538 539
539static unsigned long *dbg_redzone1(struct kmem_cache *cachep, void *objp) 540static unsigned long long *dbg_redzone1(struct kmem_cache *cachep, void *objp)
540{ 541{
541 BUG_ON(!(cachep->flags & SLAB_RED_ZONE)); 542 BUG_ON(!(cachep->flags & SLAB_RED_ZONE));
542 return (unsigned long*) (objp+obj_offset(cachep)-BYTES_PER_WORD); 543 return (unsigned long long*) (objp + obj_offset(cachep) -
544 sizeof(unsigned long long));
543} 545}
544 546
545static unsigned long *dbg_redzone2(struct kmem_cache *cachep, void *objp) 547static unsigned long long *dbg_redzone2(struct kmem_cache *cachep, void *objp)
546{ 548{
547 BUG_ON(!(cachep->flags & SLAB_RED_ZONE)); 549 BUG_ON(!(cachep->flags & SLAB_RED_ZONE));
548 if (cachep->flags & SLAB_STORE_USER) 550 if (cachep->flags & SLAB_STORE_USER)
549 return (unsigned long *)(objp + cachep->buffer_size - 551 return (unsigned long long *)(objp + cachep->buffer_size -
550 2 * BYTES_PER_WORD); 552 sizeof(unsigned long long) -
551 return (unsigned long *)(objp + cachep->buffer_size - BYTES_PER_WORD); 553 BYTES_PER_WORD);
554 return (unsigned long long *) (objp + cachep->buffer_size -
555 sizeof(unsigned long long));
552} 556}
553 557
554static void **dbg_userword(struct kmem_cache *cachep, void *objp) 558static void **dbg_userword(struct kmem_cache *cachep, void *objp)
@@ -561,8 +565,8 @@ static void **dbg_userword(struct kmem_cache *cachep, void *objp)
561 565
562#define obj_offset(x) 0 566#define obj_offset(x) 0
563#define obj_size(cachep) (cachep->buffer_size) 567#define obj_size(cachep) (cachep->buffer_size)
564#define dbg_redzone1(cachep, objp) ({BUG(); (unsigned long *)NULL;}) 568#define dbg_redzone1(cachep, objp) ({BUG(); (unsigned long long *)NULL;})
565#define dbg_redzone2(cachep, objp) ({BUG(); (unsigned long *)NULL;}) 569#define dbg_redzone2(cachep, objp) ({BUG(); (unsigned long long *)NULL;})
566#define dbg_userword(cachep, objp) ({BUG(); (void **)NULL;}) 570#define dbg_userword(cachep, objp) ({BUG(); (void **)NULL;})
567 571
568#endif 572#endif
@@ -1776,7 +1780,7 @@ static void print_objinfo(struct kmem_cache *cachep, void *objp, int lines)
1776 char *realobj; 1780 char *realobj;
1777 1781
1778 if (cachep->flags & SLAB_RED_ZONE) { 1782 if (cachep->flags & SLAB_RED_ZONE) {
1779 printk(KERN_ERR "Redzone: 0x%lx/0x%lx.\n", 1783 printk(KERN_ERR "Redzone: 0x%llx/0x%llx.\n",
1780 *dbg_redzone1(cachep, objp), 1784 *dbg_redzone1(cachep, objp),
1781 *dbg_redzone2(cachep, objp)); 1785 *dbg_redzone2(cachep, objp));
1782 } 1786 }
@@ -2239,7 +2243,7 @@ kmem_cache_create (const char *name, size_t size, size_t align,
2239 * is greater than BYTES_PER_WORD. 2243 * is greater than BYTES_PER_WORD.
2240 */ 2244 */
2241 if (flags & SLAB_RED_ZONE || flags & SLAB_STORE_USER) 2245 if (flags & SLAB_RED_ZONE || flags & SLAB_STORE_USER)
2242 ralign = BYTES_PER_WORD; 2246 ralign = __alignof__(unsigned long long);
2243 2247
2244 /* 2) arch mandated alignment */ 2248 /* 2) arch mandated alignment */
2245 if (ralign < ARCH_SLAB_MINALIGN) { 2249 if (ralign < ARCH_SLAB_MINALIGN) {
@@ -2250,7 +2254,7 @@ kmem_cache_create (const char *name, size_t size, size_t align,
2250 ralign = align; 2254 ralign = align;
2251 } 2255 }
2252 /* disable debug if necessary */ 2256 /* disable debug if necessary */
2253 if (ralign > BYTES_PER_WORD) 2257 if (ralign > __alignof__(unsigned long long))
2254 flags &= ~(SLAB_RED_ZONE | SLAB_STORE_USER); 2258 flags &= ~(SLAB_RED_ZONE | SLAB_STORE_USER);
2255 /* 2259 /*
2256 * 4) Store it. 2260 * 4) Store it.
@@ -2271,8 +2275,8 @@ kmem_cache_create (const char *name, size_t size, size_t align,
2271 */ 2275 */
2272 if (flags & SLAB_RED_ZONE) { 2276 if (flags & SLAB_RED_ZONE) {
2273 /* add space for red zone words */ 2277 /* add space for red zone words */
2274 cachep->obj_offset += BYTES_PER_WORD; 2278 cachep->obj_offset += sizeof(unsigned long long);
2275 size += 2 * BYTES_PER_WORD; 2279 size += 2 * sizeof(unsigned long long);
2276 } 2280 }
2277 if (flags & SLAB_STORE_USER) { 2281 if (flags & SLAB_STORE_USER) {
2278 /* user store requires one word storage behind the end of 2282 /* user store requires one word storage behind the end of
@@ -2833,7 +2837,7 @@ static void kfree_debugcheck(const void *objp)
2833 2837
2834static inline void verify_redzone_free(struct kmem_cache *cache, void *obj) 2838static inline void verify_redzone_free(struct kmem_cache *cache, void *obj)
2835{ 2839{
2836 unsigned long redzone1, redzone2; 2840 unsigned long long redzone1, redzone2;
2837 2841
2838 redzone1 = *dbg_redzone1(cache, obj); 2842 redzone1 = *dbg_redzone1(cache, obj);
2839 redzone2 = *dbg_redzone2(cache, obj); 2843 redzone2 = *dbg_redzone2(cache, obj);
@@ -2849,7 +2853,7 @@ static inline void verify_redzone_free(struct kmem_cache *cache, void *obj)
2849 else 2853 else
2850 slab_error(cache, "memory outside object was overwritten"); 2854 slab_error(cache, "memory outside object was overwritten");
2851 2855
2852 printk(KERN_ERR "%p: redzone 1:0x%lx, redzone 2:0x%lx.\n", 2856 printk(KERN_ERR "%p: redzone 1:0x%llx, redzone 2:0x%llx.\n",
2853 obj, redzone1, redzone2); 2857 obj, redzone1, redzone2);
2854} 2858}
2855 2859
@@ -3065,7 +3069,7 @@ static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep,
3065 slab_error(cachep, "double free, or memory outside" 3069 slab_error(cachep, "double free, or memory outside"
3066 " object was overwritten"); 3070 " object was overwritten");
3067 printk(KERN_ERR 3071 printk(KERN_ERR
3068 "%p: redzone 1:0x%lx, redzone 2:0x%lx\n", 3072 "%p: redzone 1:0x%llx, redzone 2:0x%llx\n",
3069 objp, *dbg_redzone1(cachep, objp), 3073 objp, *dbg_redzone1(cachep, objp),
3070 *dbg_redzone2(cachep, objp)); 3074 *dbg_redzone2(cachep, objp));
3071 } 3075 }
@@ -4428,16 +4432,12 @@ static void handle_slab(unsigned long *n, struct kmem_cache *c, struct slab *s)
4428static void show_symbol(struct seq_file *m, unsigned long address) 4432static void show_symbol(struct seq_file *m, unsigned long address)
4429{ 4433{
4430#ifdef CONFIG_KALLSYMS 4434#ifdef CONFIG_KALLSYMS
4431 char *modname;
4432 const char *name;
4433 unsigned long offset, size; 4435 unsigned long offset, size;
4434 char namebuf[KSYM_NAME_LEN+1]; 4436 char modname[MODULE_NAME_LEN + 1], name[KSYM_NAME_LEN + 1];
4435
4436 name = kallsyms_lookup(address, &size, &offset, &modname, namebuf);
4437 4437
4438 if (name) { 4438 if (lookup_symbol_attrs(address, &size, &offset, modname, name) == 0) {
4439 seq_printf(m, "%s+%#lx/%#lx", name, offset, size); 4439 seq_printf(m, "%s+%#lx/%#lx", name, offset, size);
4440 if (modname) 4440 if (modname[0])
4441 seq_printf(m, " [%s]", modname); 4441 seq_printf(m, " [%s]", modname);
4442 return; 4442 return;
4443 } 4443 }
diff --git a/mm/sparse.c b/mm/sparse.c
index 893e5621c247..6f3fff907bc2 100644
--- a/mm/sparse.c
+++ b/mm/sparse.c
@@ -44,7 +44,7 @@ EXPORT_SYMBOL(page_to_nid);
44#endif 44#endif
45 45
46#ifdef CONFIG_SPARSEMEM_EXTREME 46#ifdef CONFIG_SPARSEMEM_EXTREME
47static struct mem_section *sparse_index_alloc(int nid) 47static struct mem_section noinline *sparse_index_alloc(int nid)
48{ 48{
49 struct mem_section *section = NULL; 49 struct mem_section *section = NULL;
50 unsigned long array_size = SECTIONS_PER_ROOT * 50 unsigned long array_size = SECTIONS_PER_ROOT *
@@ -61,7 +61,7 @@ static struct mem_section *sparse_index_alloc(int nid)
61 return section; 61 return section;
62} 62}
63 63
64static int sparse_index_init(unsigned long section_nr, int nid) 64static int __meminit sparse_index_init(unsigned long section_nr, int nid)
65{ 65{
66 static DEFINE_SPINLOCK(index_init_lock); 66 static DEFINE_SPINLOCK(index_init_lock);
67 unsigned long root = SECTION_NR_TO_ROOT(section_nr); 67 unsigned long root = SECTION_NR_TO_ROOT(section_nr);
@@ -138,7 +138,7 @@ static inline int sparse_early_nid(struct mem_section *section)
138} 138}
139 139
140/* Record a memory area against a node. */ 140/* Record a memory area against a node. */
141void memory_present(int nid, unsigned long start, unsigned long end) 141void __init memory_present(int nid, unsigned long start, unsigned long end)
142{ 142{
143 unsigned long pfn; 143 unsigned long pfn;
144 144
@@ -197,7 +197,7 @@ struct page *sparse_decode_mem_map(unsigned long coded_mem_map, unsigned long pn
197 return ((struct page *)coded_mem_map) + section_nr_to_pfn(pnum); 197 return ((struct page *)coded_mem_map) + section_nr_to_pfn(pnum);
198} 198}
199 199
200static int sparse_init_one_section(struct mem_section *ms, 200static int __meminit sparse_init_one_section(struct mem_section *ms,
201 unsigned long pnum, struct page *mem_map) 201 unsigned long pnum, struct page *mem_map)
202{ 202{
203 if (!valid_section(ms)) 203 if (!valid_section(ms))
@@ -209,7 +209,7 @@ static int sparse_init_one_section(struct mem_section *ms,
209 return 1; 209 return 1;
210} 210}
211 211
212static struct page *sparse_early_mem_map_alloc(unsigned long pnum) 212static struct page __init *sparse_early_mem_map_alloc(unsigned long pnum)
213{ 213{
214 struct page *map; 214 struct page *map;
215 struct mem_section *ms = __nr_to_section(pnum); 215 struct mem_section *ms = __nr_to_section(pnum);
@@ -288,6 +288,7 @@ void __init sparse_init(void)
288 } 288 }
289} 289}
290 290
291#ifdef CONFIG_MEMORY_HOTPLUG
291/* 292/*
292 * returns the number of sections whose mem_maps were properly 293 * returns the number of sections whose mem_maps were properly
293 * set. If this is <=0, then that means that the passed-in 294 * set. If this is <=0, then that means that the passed-in
@@ -327,3 +328,4 @@ out:
327 __kfree_section_memmap(memmap, nr_pages); 328 __kfree_section_memmap(memmap, nr_pages);
328 return ret; 329 return ret;
329} 330}
331#endif
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index cb5aabda7046..faa2a521dea3 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -755,3 +755,10 @@ out_einval_locked:
755} 755}
756EXPORT_SYMBOL(remap_vmalloc_range); 756EXPORT_SYMBOL(remap_vmalloc_range);
757 757
758/*
759 * Implement a stub for vmalloc_sync_all() if the architecture chose not to
760 * have one.
761 */
762void __attribute__((weak)) vmalloc_sync_all(void)
763{
764}
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 56651a10c366..1c8e75a1cfcd 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -284,12 +284,8 @@ static void handle_write_error(struct address_space *mapping,
284 struct page *page, int error) 284 struct page *page, int error)
285{ 285{
286 lock_page(page); 286 lock_page(page);
287 if (page_mapping(page) == mapping) { 287 if (page_mapping(page) == mapping)
288 if (error == -ENOSPC) 288 mapping_set_error(mapping, error);
289 set_bit(AS_ENOSPC, &mapping->flags);
290 else
291 set_bit(AS_EIO, &mapping->flags);
292 }
293 unlock_page(page); 289 unlock_page(page);
294} 290}
295 291