aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
Diffstat (limited to 'mm')
-rw-r--r--mm/backing-dev.c12
-rw-r--r--mm/memory.c2
-rw-r--r--mm/mmap.c4
-rw-r--r--mm/nommu.c4
-rw-r--r--mm/page_alloc.c8
-rw-r--r--mm/slob.c5
-rw-r--r--mm/slub.c5
-rw-r--r--mm/swap.c4
8 files changed, 20 insertions, 24 deletions
diff --git a/mm/backing-dev.c b/mm/backing-dev.c
index 7c4f9e097095..f2e574dbc300 100644
--- a/mm/backing-dev.c
+++ b/mm/backing-dev.c
@@ -172,30 +172,22 @@ postcore_initcall(bdi_class_init);
172int bdi_register(struct backing_dev_info *bdi, struct device *parent, 172int bdi_register(struct backing_dev_info *bdi, struct device *parent,
173 const char *fmt, ...) 173 const char *fmt, ...)
174{ 174{
175 char *name;
176 va_list args; 175 va_list args;
177 int ret = 0; 176 int ret = 0;
178 struct device *dev; 177 struct device *dev;
179 178
180 va_start(args, fmt); 179 va_start(args, fmt);
181 name = kvasprintf(GFP_KERNEL, fmt, args); 180 dev = device_create_vargs(bdi_class, parent, MKDEV(0, 0), bdi, fmt, args);
182 va_end(args); 181 va_end(args);
183
184 if (!name)
185 return -ENOMEM;
186
187 dev = device_create(bdi_class, parent, MKDEV(0, 0), name);
188 if (IS_ERR(dev)) { 182 if (IS_ERR(dev)) {
189 ret = PTR_ERR(dev); 183 ret = PTR_ERR(dev);
190 goto exit; 184 goto exit;
191 } 185 }
192 186
193 bdi->dev = dev; 187 bdi->dev = dev;
194 dev_set_drvdata(bdi->dev, bdi); 188 bdi_debug_register(bdi, dev_name(dev));
195 bdi_debug_register(bdi, name);
196 189
197exit: 190exit:
198 kfree(name);
199 return ret; 191 return ret;
200} 192}
201EXPORT_SYMBOL(bdi_register); 193EXPORT_SYMBOL(bdi_register);
diff --git a/mm/memory.c b/mm/memory.c
index fb5608a120ed..19e0ae9beecb 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -2295,8 +2295,6 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
2295 vmf.flags = flags; 2295 vmf.flags = flags;
2296 vmf.page = NULL; 2296 vmf.page = NULL;
2297 2297
2298 BUG_ON(vma->vm_flags & VM_PFNMAP);
2299
2300 ret = vma->vm_ops->fault(vma, &vmf); 2298 ret = vma->vm_ops->fault(vma, &vmf);
2301 if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE))) 2299 if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE)))
2302 return ret; 2300 return ret;
diff --git a/mm/mmap.c b/mm/mmap.c
index fac66337da2a..669499e7c2f5 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -80,7 +80,7 @@ EXPORT_SYMBOL(vm_get_page_prot);
80int sysctl_overcommit_memory = OVERCOMMIT_GUESS; /* heuristic overcommit */ 80int sysctl_overcommit_memory = OVERCOMMIT_GUESS; /* heuristic overcommit */
81int sysctl_overcommit_ratio = 50; /* default is 50% */ 81int sysctl_overcommit_ratio = 50; /* default is 50% */
82int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT; 82int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT;
83atomic_t vm_committed_space = ATOMIC_INIT(0); 83atomic_long_t vm_committed_space = ATOMIC_LONG_INIT(0);
84 84
85/* 85/*
86 * Check that a process has enough memory to allocate a new virtual 86 * Check that a process has enough memory to allocate a new virtual
@@ -177,7 +177,7 @@ int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin)
177 * cast `allowed' as a signed long because vm_committed_space 177 * cast `allowed' as a signed long because vm_committed_space
178 * sometimes has a negative value 178 * sometimes has a negative value
179 */ 179 */
180 if (atomic_read(&vm_committed_space) < (long)allowed) 180 if (atomic_long_read(&vm_committed_space) < (long)allowed)
181 return 0; 181 return 0;
182error: 182error:
183 vm_unacct_memory(pages); 183 vm_unacct_memory(pages);
diff --git a/mm/nommu.c b/mm/nommu.c
index ef8c62cec697..dca93fcb8b7a 100644
--- a/mm/nommu.c
+++ b/mm/nommu.c
@@ -39,7 +39,7 @@ struct page *mem_map;
39unsigned long max_mapnr; 39unsigned long max_mapnr;
40unsigned long num_physpages; 40unsigned long num_physpages;
41unsigned long askedalloc, realalloc; 41unsigned long askedalloc, realalloc;
42atomic_t vm_committed_space = ATOMIC_INIT(0); 42atomic_long_t vm_committed_space = ATOMIC_LONG_INIT(0);
43int sysctl_overcommit_memory = OVERCOMMIT_GUESS; /* heuristic overcommit */ 43int sysctl_overcommit_memory = OVERCOMMIT_GUESS; /* heuristic overcommit */
44int sysctl_overcommit_ratio = 50; /* default is 50% */ 44int sysctl_overcommit_ratio = 50; /* default is 50% */
45int sysctl_max_map_count = DEFAULT_MAX_MAP_COUNT; 45int sysctl_max_map_count = DEFAULT_MAX_MAP_COUNT;
@@ -1410,7 +1410,7 @@ int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin)
1410 * cast `allowed' as a signed long because vm_committed_space 1410 * cast `allowed' as a signed long because vm_committed_space
1411 * sometimes has a negative value 1411 * sometimes has a negative value
1412 */ 1412 */
1413 if (atomic_read(&vm_committed_space) < (long)allowed) 1413 if (atomic_long_read(&vm_committed_space) < (long)allowed)
1414 return 0; 1414 return 0;
1415error: 1415error:
1416 vm_unacct_memory(pages); 1416 vm_unacct_memory(pages);
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 63835579323a..8e83f02cd2d3 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -1396,6 +1396,9 @@ get_page_from_freelist(gfp_t gfp_mask, nodemask_t *nodemask, unsigned int order,
1396 1396
1397 (void)first_zones_zonelist(zonelist, high_zoneidx, nodemask, 1397 (void)first_zones_zonelist(zonelist, high_zoneidx, nodemask,
1398 &preferred_zone); 1398 &preferred_zone);
1399 if (!preferred_zone)
1400 return NULL;
1401
1399 classzone_idx = zone_idx(preferred_zone); 1402 classzone_idx = zone_idx(preferred_zone);
1400 1403
1401zonelist_scan: 1404zonelist_scan:
@@ -2804,7 +2807,7 @@ int zone_wait_table_init(struct zone *zone, unsigned long zone_size_pages)
2804 alloc_size = zone->wait_table_hash_nr_entries 2807 alloc_size = zone->wait_table_hash_nr_entries
2805 * sizeof(wait_queue_head_t); 2808 * sizeof(wait_queue_head_t);
2806 2809
2807 if (system_state == SYSTEM_BOOTING) { 2810 if (!slab_is_available()) {
2808 zone->wait_table = (wait_queue_head_t *) 2811 zone->wait_table = (wait_queue_head_t *)
2809 alloc_bootmem_node(pgdat, alloc_size); 2812 alloc_bootmem_node(pgdat, alloc_size);
2810 } else { 2813 } else {
@@ -3378,7 +3381,8 @@ static void __paginginit free_area_init_core(struct pglist_data *pgdat,
3378 * is used by this zone for memmap. This affects the watermark 3381 * is used by this zone for memmap. This affects the watermark
3379 * and per-cpu initialisations 3382 * and per-cpu initialisations
3380 */ 3383 */
3381 memmap_pages = (size * sizeof(struct page)) >> PAGE_SHIFT; 3384 memmap_pages =
3385 PAGE_ALIGN(size * sizeof(struct page)) >> PAGE_SHIFT;
3382 if (realsize >= memmap_pages) { 3386 if (realsize >= memmap_pages) {
3383 realsize -= memmap_pages; 3387 realsize -= memmap_pages;
3384 printk(KERN_DEBUG 3388 printk(KERN_DEBUG
diff --git a/mm/slob.c b/mm/slob.c
index 6038cbadf796..a3ad6671adf1 100644
--- a/mm/slob.c
+++ b/mm/slob.c
@@ -469,8 +469,9 @@ void *__kmalloc_node(size_t size, gfp_t gfp, int node)
469 return ZERO_SIZE_PTR; 469 return ZERO_SIZE_PTR;
470 470
471 m = slob_alloc(size + align, gfp, align, node); 471 m = slob_alloc(size + align, gfp, align, node);
472 if (m) 472 if (!m)
473 *m = size; 473 return NULL;
474 *m = size;
474 return (void *)m + align; 475 return (void *)m + align;
475 } else { 476 } else {
476 void *ret; 477 void *ret;
diff --git a/mm/slub.c b/mm/slub.c
index a505a828ef41..0987d1cd943c 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -2726,9 +2726,10 @@ size_t ksize(const void *object)
2726 2726
2727 page = virt_to_head_page(object); 2727 page = virt_to_head_page(object);
2728 2728
2729 if (unlikely(!PageSlab(page))) 2729 if (unlikely(!PageSlab(page))) {
2730 WARN_ON(!PageCompound(page));
2730 return PAGE_SIZE << compound_order(page); 2731 return PAGE_SIZE << compound_order(page);
2731 2732 }
2732 s = page->slab; 2733 s = page->slab;
2733 2734
2734#ifdef CONFIG_SLUB_DEBUG 2735#ifdef CONFIG_SLUB_DEBUG
diff --git a/mm/swap.c b/mm/swap.c
index 91e194445a5e..45c9f25a8a3b 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -503,7 +503,7 @@ void vm_acct_memory(long pages)
503 local = &__get_cpu_var(committed_space); 503 local = &__get_cpu_var(committed_space);
504 *local += pages; 504 *local += pages;
505 if (*local > ACCT_THRESHOLD || *local < -ACCT_THRESHOLD) { 505 if (*local > ACCT_THRESHOLD || *local < -ACCT_THRESHOLD) {
506 atomic_add(*local, &vm_committed_space); 506 atomic_long_add(*local, &vm_committed_space);
507 *local = 0; 507 *local = 0;
508 } 508 }
509 preempt_enable(); 509 preempt_enable();
@@ -520,7 +520,7 @@ static int cpu_swap_callback(struct notifier_block *nfb,
520 520
521 committed = &per_cpu(committed_space, (long)hcpu); 521 committed = &per_cpu(committed_space, (long)hcpu);
522 if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) { 522 if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) {
523 atomic_add(*committed, &vm_committed_space); 523 atomic_long_add(*committed, &vm_committed_space);
524 *committed = 0; 524 *committed = 0;
525 drain_cpu_pagevecs((long)hcpu); 525 drain_cpu_pagevecs((long)hcpu);
526 } 526 }