aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--mm/dmapool.c10
-rw-r--r--mm/huge_memory.c3
-rw-r--r--mm/kasan/report.c6
-rw-r--r--mm/kmemcheck.c3
-rw-r--r--mm/kmemleak.c18
-rw-r--r--mm/memblock.c3
-rw-r--r--mm/memory_hotplug.c3
-rw-r--r--mm/mempolicy.c4
-rw-r--r--mm/mmap.c8
-rw-r--r--mm/oom_kill.c3
-rw-r--r--mm/page_alloc.c37
-rw-r--r--mm/page_owner.c5
-rw-r--r--mm/percpu.c4
-rw-r--r--mm/slab.c28
-rw-r--r--mm/slab_common.c10
-rw-r--r--mm/slub.c19
-rw-r--r--mm/sparse-vmemmap.c8
-rw-r--r--mm/sparse.c8
-rw-r--r--mm/swapfile.c3
-rw-r--r--mm/vmalloc.c4
20 files changed, 78 insertions, 109 deletions
diff --git a/mm/dmapool.c b/mm/dmapool.c
index 57312b5d6e12..2821500e8123 100644
--- a/mm/dmapool.c
+++ b/mm/dmapool.c
@@ -452,13 +452,11 @@ void dma_pool_free(struct dma_pool *pool, void *vaddr, dma_addr_t dma)
452 } 452 }
453 spin_unlock_irqrestore(&pool->lock, flags); 453 spin_unlock_irqrestore(&pool->lock, flags);
454 if (pool->dev) 454 if (pool->dev)
455 dev_err(pool->dev, "dma_pool_free %s, dma %Lx " 455 dev_err(pool->dev, "dma_pool_free %s, dma %Lx already free\n",
456 "already free\n", pool->name, 456 pool->name, (unsigned long long)dma);
457 (unsigned long long)dma);
458 else 457 else
459 printk(KERN_ERR "dma_pool_free %s, dma %Lx " 458 printk(KERN_ERR "dma_pool_free %s, dma %Lx already free\n",
460 "already free\n", pool->name, 459 pool->name, (unsigned long long)dma);
461 (unsigned long long)dma);
462 return; 460 return;
463 } 461 }
464 } 462 }
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index bb944c771c82..e1a177c20791 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -168,8 +168,7 @@ static void set_recommended_min_free_kbytes(void)
168 168
169 if (recommended_min > min_free_kbytes) { 169 if (recommended_min > min_free_kbytes) {
170 if (user_min_free_kbytes >= 0) 170 if (user_min_free_kbytes >= 0)
171 pr_info("raising min_free_kbytes from %d to %lu " 171 pr_info("raising min_free_kbytes from %d to %lu to help transparent hugepage allocations\n",
172 "to help transparent hugepage allocations\n",
173 min_free_kbytes, recommended_min); 172 min_free_kbytes, recommended_min);
174 173
175 min_free_kbytes = recommended_min; 174 min_free_kbytes = recommended_min;
diff --git a/mm/kasan/report.c b/mm/kasan/report.c
index 12f222d0224b..745aa8f36028 100644
--- a/mm/kasan/report.c
+++ b/mm/kasan/report.c
@@ -214,8 +214,7 @@ static void kasan_report_error(struct kasan_access_info *info)
214 */ 214 */
215 kasan_disable_current(); 215 kasan_disable_current();
216 spin_lock_irqsave(&report_lock, flags); 216 spin_lock_irqsave(&report_lock, flags);
217 pr_err("=================================" 217 pr_err("==================================================================\n");
218 "=================================\n");
219 if (info->access_addr < 218 if (info->access_addr <
220 kasan_shadow_to_mem((void *)KASAN_SHADOW_START)) { 219 kasan_shadow_to_mem((void *)KASAN_SHADOW_START)) {
221 if ((unsigned long)info->access_addr < PAGE_SIZE) 220 if ((unsigned long)info->access_addr < PAGE_SIZE)
@@ -236,8 +235,7 @@ static void kasan_report_error(struct kasan_access_info *info)
236 print_address_description(info); 235 print_address_description(info);
237 print_shadow_for_address(info->first_bad_addr); 236 print_shadow_for_address(info->first_bad_addr);
238 } 237 }
239 pr_err("=================================" 238 pr_err("==================================================================\n");
240 "=================================\n");
241 add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE); 239 add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
242 spin_unlock_irqrestore(&report_lock, flags); 240 spin_unlock_irqrestore(&report_lock, flags);
243 kasan_enable_current(); 241 kasan_enable_current();
diff --git a/mm/kmemcheck.c b/mm/kmemcheck.c
index 6f4f424037c0..e5f83333066e 100644
--- a/mm/kmemcheck.c
+++ b/mm/kmemcheck.c
@@ -20,8 +20,7 @@ void kmemcheck_alloc_shadow(struct page *page, int order, gfp_t flags, int node)
20 shadow = alloc_pages_node(node, flags | __GFP_NOTRACK, order); 20 shadow = alloc_pages_node(node, flags | __GFP_NOTRACK, order);
21 if (!shadow) { 21 if (!shadow) {
22 if (printk_ratelimit()) 22 if (printk_ratelimit())
23 printk(KERN_ERR "kmemcheck: failed to allocate " 23 printk(KERN_ERR "kmemcheck: failed to allocate shadow bitmap\n");
24 "shadow bitmap\n");
25 return; 24 return;
26 } 25 }
27 26
diff --git a/mm/kmemleak.c b/mm/kmemleak.c
index a81cd76ea282..e6429926e957 100644
--- a/mm/kmemleak.c
+++ b/mm/kmemleak.c
@@ -596,8 +596,7 @@ static struct kmemleak_object *create_object(unsigned long ptr, size_t size,
596 else if (parent->pointer + parent->size <= ptr) 596 else if (parent->pointer + parent->size <= ptr)
597 link = &parent->rb_node.rb_right; 597 link = &parent->rb_node.rb_right;
598 else { 598 else {
599 kmemleak_stop("Cannot insert 0x%lx into the object " 599 kmemleak_stop("Cannot insert 0x%lx into the object search tree (overlaps existing)\n",
600 "search tree (overlaps existing)\n",
601 ptr); 600 ptr);
602 /* 601 /*
603 * No need for parent->lock here since "parent" cannot 602 * No need for parent->lock here since "parent" cannot
@@ -670,8 +669,8 @@ static void delete_object_part(unsigned long ptr, size_t size)
670 object = find_and_remove_object(ptr, 1); 669 object = find_and_remove_object(ptr, 1);
671 if (!object) { 670 if (!object) {
672#ifdef DEBUG 671#ifdef DEBUG
673 kmemleak_warn("Partially freeing unknown object at 0x%08lx " 672 kmemleak_warn("Partially freeing unknown object at 0x%08lx (size %zu)\n",
674 "(size %zu)\n", ptr, size); 673 ptr, size);
675#endif 674#endif
676 return; 675 return;
677 } 676 }
@@ -717,8 +716,8 @@ static void paint_ptr(unsigned long ptr, int color)
717 716
718 object = find_and_get_object(ptr, 0); 717 object = find_and_get_object(ptr, 0);
719 if (!object) { 718 if (!object) {
720 kmemleak_warn("Trying to color unknown object " 719 kmemleak_warn("Trying to color unknown object at 0x%08lx as %s\n",
721 "at 0x%08lx as %s\n", ptr, 720 ptr,
722 (color == KMEMLEAK_GREY) ? "Grey" : 721 (color == KMEMLEAK_GREY) ? "Grey" :
723 (color == KMEMLEAK_BLACK) ? "Black" : "Unknown"); 722 (color == KMEMLEAK_BLACK) ? "Black" : "Unknown");
724 return; 723 return;
@@ -1463,8 +1462,8 @@ static void kmemleak_scan(void)
1463 if (new_leaks) { 1462 if (new_leaks) {
1464 kmemleak_found_leaks = true; 1463 kmemleak_found_leaks = true;
1465 1464
1466 pr_info("%d new suspected memory leaks (see " 1465 pr_info("%d new suspected memory leaks (see /sys/kernel/debug/kmemleak)\n",
1467 "/sys/kernel/debug/kmemleak)\n", new_leaks); 1466 new_leaks);
1468 } 1467 }
1469 1468
1470} 1469}
@@ -1795,8 +1794,7 @@ static void kmemleak_do_cleanup(struct work_struct *work)
1795 if (!kmemleak_found_leaks) 1794 if (!kmemleak_found_leaks)
1796 __kmemleak_do_cleanup(); 1795 __kmemleak_do_cleanup();
1797 else 1796 else
1798 pr_info("Kmemleak disabled without freeing internal data. " 1797 pr_info("Kmemleak disabled without freeing internal data. Reclaim the memory with \"echo clear > /sys/kernel/debug/kmemleak\".\n");
1799 "Reclaim the memory with \"echo clear > /sys/kernel/debug/kmemleak\"\n");
1800} 1798}
1801 1799
1802static DECLARE_WORK(cleanup_work, kmemleak_do_cleanup); 1800static DECLARE_WORK(cleanup_work, kmemleak_do_cleanup);
diff --git a/mm/memblock.c b/mm/memblock.c
index fc7824fa1b42..b570dddb4cb9 100644
--- a/mm/memblock.c
+++ b/mm/memblock.c
@@ -238,8 +238,7 @@ phys_addr_t __init_memblock memblock_find_in_range_node(phys_addr_t size,
238 * so we use WARN_ONCE() here to see the stack trace if 238 * so we use WARN_ONCE() here to see the stack trace if
239 * fail happens. 239 * fail happens.
240 */ 240 */
241 WARN_ONCE(1, "memblock: bottom-up allocation failed, " 241 WARN_ONCE(1, "memblock: bottom-up allocation failed, memory hotunplug may be affected\n");
242 "memory hotunplug may be affected\n");
243 } 242 }
244 243
245 return __memblock_find_range_top_down(start, end, size, align, nid, 244 return __memblock_find_range_top_down(start, end, size, align, nid,
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index f5758b678608..aa34431c3f31 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -1970,8 +1970,7 @@ static int check_memblock_offlined_cb(struct memory_block *mem, void *arg)
1970 1970
1971 beginpa = PFN_PHYS(section_nr_to_pfn(mem->start_section_nr)); 1971 beginpa = PFN_PHYS(section_nr_to_pfn(mem->start_section_nr));
1972 endpa = PFN_PHYS(section_nr_to_pfn(mem->end_section_nr + 1))-1; 1972 endpa = PFN_PHYS(section_nr_to_pfn(mem->end_section_nr + 1))-1;
1973 pr_warn("removing memory fails, because memory " 1973 pr_warn("removing memory fails, because memory [%pa-%pa] is onlined\n",
1974 "[%pa-%pa] is onlined\n",
1975 &beginpa, &endpa); 1974 &beginpa, &endpa);
1976 } 1975 }
1977 1976
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index 8cbc74387df3..b25de27b83d0 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -2559,9 +2559,7 @@ static void __init check_numabalancing_enable(void)
2559 set_numabalancing_state(numabalancing_override == 1); 2559 set_numabalancing_state(numabalancing_override == 1);
2560 2560
2561 if (num_online_nodes() > 1 && !numabalancing_override) { 2561 if (num_online_nodes() > 1 && !numabalancing_override) {
2562 pr_info("%s automatic NUMA balancing. " 2562 pr_info("%s automatic NUMA balancing. Configure with numa_balancing= or the kernel.numa_balancing sysctl\n",
2563 "Configure with numa_balancing= or the "
2564 "kernel.numa_balancing sysctl",
2565 numabalancing_default ? "Enabling" : "Disabling"); 2563 numabalancing_default ? "Enabling" : "Disabling");
2566 set_numabalancing_state(numabalancing_default); 2564 set_numabalancing_state(numabalancing_default);
2567 } 2565 }
diff --git a/mm/mmap.c b/mm/mmap.c
index 14641926c97f..e06345aafa03 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -2517,9 +2517,8 @@ SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size,
2517 unsigned long ret = -EINVAL; 2517 unsigned long ret = -EINVAL;
2518 struct file *file; 2518 struct file *file;
2519 2519
2520 pr_warn_once("%s (%d) uses deprecated remap_file_pages() syscall. " 2520 pr_warn_once("%s (%d) uses deprecated remap_file_pages() syscall. See Documentation/vm/remap_file_pages.txt.\n",
2521 "See Documentation/vm/remap_file_pages.txt.\n", 2521 current->comm, current->pid);
2522 current->comm, current->pid);
2523 2522
2524 if (prot) 2523 if (prot)
2525 return ret; 2524 return ret;
@@ -2885,8 +2884,7 @@ bool may_expand_vm(struct mm_struct *mm, vm_flags_t flags, unsigned long npages)
2885 if (is_data_mapping(flags) && 2884 if (is_data_mapping(flags) &&
2886 mm->data_vm + npages > rlimit(RLIMIT_DATA) >> PAGE_SHIFT) { 2885 mm->data_vm + npages > rlimit(RLIMIT_DATA) >> PAGE_SHIFT) {
2887 if (ignore_rlimit_data) 2886 if (ignore_rlimit_data)
2888 pr_warn_once("%s (%d): VmData %lu exceed data ulimit " 2887 pr_warn_once("%s (%d): VmData %lu exceed data ulimit %lu. Will be forbidden soon.\n",
2889 "%lu. Will be forbidden soon.\n",
2890 current->comm, current->pid, 2888 current->comm, current->pid,
2891 (mm->data_vm + npages) << PAGE_SHIFT, 2889 (mm->data_vm + npages) << PAGE_SHIFT,
2892 rlimit(RLIMIT_DATA)); 2890 rlimit(RLIMIT_DATA));
diff --git a/mm/oom_kill.c b/mm/oom_kill.c
index 63ced708eafd..fde3d374c0af 100644
--- a/mm/oom_kill.c
+++ b/mm/oom_kill.c
@@ -383,8 +383,7 @@ static void dump_tasks(struct mem_cgroup *memcg, const nodemask_t *nodemask)
383static void dump_header(struct oom_control *oc, struct task_struct *p, 383static void dump_header(struct oom_control *oc, struct task_struct *p,
384 struct mem_cgroup *memcg) 384 struct mem_cgroup *memcg)
385{ 385{
386 pr_warn("%s invoked oom-killer: gfp_mask=%#x(%pGg), order=%d, " 386 pr_warn("%s invoked oom-killer: gfp_mask=%#x(%pGg), order=%d, oom_score_adj=%hd\n",
387 "oom_score_adj=%hd\n",
388 current->comm, oc->gfp_mask, &oc->gfp_mask, oc->order, 387 current->comm, oc->gfp_mask, &oc->gfp_mask, oc->order,
389 current->signal->oom_score_adj); 388 current->signal->oom_score_adj);
390 389
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 30f01c6f6b88..42cf199652a5 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -4074,8 +4074,7 @@ static int __parse_numa_zonelist_order(char *s)
4074 user_zonelist_order = ZONELIST_ORDER_ZONE; 4074 user_zonelist_order = ZONELIST_ORDER_ZONE;
4075 } else { 4075 } else {
4076 printk(KERN_WARNING 4076 printk(KERN_WARNING
4077 "Ignoring invalid numa_zonelist_order value: " 4077 "Ignoring invalid numa_zonelist_order value: %s\n", s);
4078 "%s\n", s);
4079 return -EINVAL; 4078 return -EINVAL;
4080 } 4079 }
4081 return 0; 4080 return 0;
@@ -4539,12 +4538,11 @@ void __ref build_all_zonelists(pg_data_t *pgdat, struct zone *zone)
4539 else 4538 else
4540 page_group_by_mobility_disabled = 0; 4539 page_group_by_mobility_disabled = 0;
4541 4540
4542 pr_info("Built %i zonelists in %s order, mobility grouping %s. " 4541 pr_info("Built %i zonelists in %s order, mobility grouping %s. Total pages: %ld\n",
4543 "Total pages: %ld\n", 4542 nr_online_nodes,
4544 nr_online_nodes, 4543 zonelist_order_name[current_zonelist_order],
4545 zonelist_order_name[current_zonelist_order], 4544 page_group_by_mobility_disabled ? "off" : "on",
4546 page_group_by_mobility_disabled ? "off" : "on", 4545 vm_total_pages);
4547 vm_total_pages);
4548#ifdef CONFIG_NUMA 4546#ifdef CONFIG_NUMA
4549 pr_info("Policy zone: %s\n", zone_names[policy_zone]); 4547 pr_info("Policy zone: %s\n", zone_names[policy_zone]);
4550#endif 4548#endif
@@ -6142,22 +6140,21 @@ void __init mem_init_print_info(const char *str)
6142 6140
6143#undef adj_init_size 6141#undef adj_init_size
6144 6142
6145 pr_info("Memory: %luK/%luK available " 6143 pr_info("Memory: %luK/%luK available (%luK kernel code, %luK rwdata, %luK rodata, %luK init, %luK bss, %luK reserved, %luK cma-reserved"
6146 "(%luK kernel code, %luK rwdata, %luK rodata, "
6147 "%luK init, %luK bss, %luK reserved, %luK cma-reserved"
6148#ifdef CONFIG_HIGHMEM 6144#ifdef CONFIG_HIGHMEM
6149 ", %luK highmem" 6145 ", %luK highmem"
6150#endif 6146#endif
6151 "%s%s)\n", 6147 "%s%s)\n",
6152 nr_free_pages() << (PAGE_SHIFT-10), physpages << (PAGE_SHIFT-10), 6148 nr_free_pages() << (PAGE_SHIFT - 10),
6153 codesize >> 10, datasize >> 10, rosize >> 10, 6149 physpages << (PAGE_SHIFT - 10),
6154 (init_data_size + init_code_size) >> 10, bss_size >> 10, 6150 codesize >> 10, datasize >> 10, rosize >> 10,
6155 (physpages - totalram_pages - totalcma_pages) << (PAGE_SHIFT-10), 6151 (init_data_size + init_code_size) >> 10, bss_size >> 10,
6156 totalcma_pages << (PAGE_SHIFT-10), 6152 (physpages - totalram_pages - totalcma_pages) << (PAGE_SHIFT - 10),
6153 totalcma_pages << (PAGE_SHIFT - 10),
6157#ifdef CONFIG_HIGHMEM 6154#ifdef CONFIG_HIGHMEM
6158 totalhigh_pages << (PAGE_SHIFT-10), 6155 totalhigh_pages << (PAGE_SHIFT - 10),
6159#endif 6156#endif
6160 str ? ", " : "", str ? str : ""); 6157 str ? ", " : "", str ? str : "");
6161} 6158}
6162 6159
6163/** 6160/**
diff --git a/mm/page_owner.c b/mm/page_owner.c
index 44ad1f00c4e1..ac3d8d129974 100644
--- a/mm/page_owner.c
+++ b/mm/page_owner.c
@@ -198,9 +198,8 @@ void __dump_page_owner(struct page *page)
198 return; 198 return;
199 } 199 }
200 200
201 pr_alert("page allocated via order %u, migratetype %s, " 201 pr_alert("page allocated via order %u, migratetype %s, gfp_mask %#x(%pGg)\n",
202 "gfp_mask %#x(%pGg)\n", page_ext->order, 202 page_ext->order, migratetype_names[mt], gfp_mask, &gfp_mask);
203 migratetype_names[mt], gfp_mask, &gfp_mask);
204 print_stack_trace(&trace, 0); 203 print_stack_trace(&trace, 0);
205 204
206 if (page_ext->last_migrate_reason != -1) 205 if (page_ext->last_migrate_reason != -1)
diff --git a/mm/percpu.c b/mm/percpu.c
index 847814b15233..1571547e7b01 100644
--- a/mm/percpu.c
+++ b/mm/percpu.c
@@ -888,8 +888,8 @@ static void __percpu *pcpu_alloc(size_t size, size_t align, bool reserved,
888 size = ALIGN(size, 2); 888 size = ALIGN(size, 2);
889 889
890 if (unlikely(!size || size > PCPU_MIN_UNIT_SIZE || align > PAGE_SIZE)) { 890 if (unlikely(!size || size > PCPU_MIN_UNIT_SIZE || align > PAGE_SIZE)) {
891 WARN(true, "illegal size (%zu) or align (%zu) for " 891 WARN(true, "illegal size (%zu) or align (%zu) for percpu allocation\n",
892 "percpu allocation\n", size, align); 892 size, align);
893 return NULL; 893 return NULL;
894 } 894 }
895 895
diff --git a/mm/slab.c b/mm/slab.c
index e1f6c27c3db5..e558f8593a22 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -1566,11 +1566,9 @@ static void dump_line(char *data, int offset, int limit)
1566 if (bad_count == 1) { 1566 if (bad_count == 1) {
1567 error ^= POISON_FREE; 1567 error ^= POISON_FREE;
1568 if (!(error & (error - 1))) { 1568 if (!(error & (error - 1))) {
1569 printk(KERN_ERR "Single bit error detected. Probably " 1569 printk(KERN_ERR "Single bit error detected. Probably bad RAM.\n");
1570 "bad RAM.\n");
1571#ifdef CONFIG_X86 1570#ifdef CONFIG_X86
1572 printk(KERN_ERR "Run memtest86+ or a similar memory " 1571 printk(KERN_ERR "Run memtest86+ or a similar memory test tool.\n");
1573 "test tool.\n");
1574#else 1572#else
1575 printk(KERN_ERR "Run a memory test tool.\n"); 1573 printk(KERN_ERR "Run a memory test tool.\n");
1576#endif 1574#endif
@@ -1693,11 +1691,9 @@ static void slab_destroy_debugcheck(struct kmem_cache *cachep,
1693 } 1691 }
1694 if (cachep->flags & SLAB_RED_ZONE) { 1692 if (cachep->flags & SLAB_RED_ZONE) {
1695 if (*dbg_redzone1(cachep, objp) != RED_INACTIVE) 1693 if (*dbg_redzone1(cachep, objp) != RED_INACTIVE)
1696 slab_error(cachep, "start of a freed object " 1694 slab_error(cachep, "start of a freed object was overwritten");
1697 "was overwritten");
1698 if (*dbg_redzone2(cachep, objp) != RED_INACTIVE) 1695 if (*dbg_redzone2(cachep, objp) != RED_INACTIVE)
1699 slab_error(cachep, "end of a freed object " 1696 slab_error(cachep, "end of a freed object was overwritten");
1700 "was overwritten");
1701 } 1697 }
1702 } 1698 }
1703} 1699}
@@ -2398,11 +2394,9 @@ static void cache_init_objs_debug(struct kmem_cache *cachep, struct page *page)
2398 2394
2399 if (cachep->flags & SLAB_RED_ZONE) { 2395 if (cachep->flags & SLAB_RED_ZONE) {
2400 if (*dbg_redzone2(cachep, objp) != RED_INACTIVE) 2396 if (*dbg_redzone2(cachep, objp) != RED_INACTIVE)
2401 slab_error(cachep, "constructor overwrote the" 2397 slab_error(cachep, "constructor overwrote the end of an object");
2402 " end of an object");
2403 if (*dbg_redzone1(cachep, objp) != RED_INACTIVE) 2398 if (*dbg_redzone1(cachep, objp) != RED_INACTIVE)
2404 slab_error(cachep, "constructor overwrote the" 2399 slab_error(cachep, "constructor overwrote the start of an object");
2405 " start of an object");
2406 } 2400 }
2407 /* need to poison the objs? */ 2401 /* need to poison the objs? */
2408 if (cachep->flags & SLAB_POISON) { 2402 if (cachep->flags & SLAB_POISON) {
@@ -2469,8 +2463,8 @@ static void slab_put_obj(struct kmem_cache *cachep,
2469 /* Verify double free bug */ 2463 /* Verify double free bug */
2470 for (i = page->active; i < cachep->num; i++) { 2464 for (i = page->active; i < cachep->num; i++) {
2471 if (get_free_obj(page, i) == objnr) { 2465 if (get_free_obj(page, i) == objnr) {
2472 printk(KERN_ERR "slab: double free detected in cache " 2466 printk(KERN_ERR "slab: double free detected in cache '%s', objp %p\n",
2473 "'%s', objp %p\n", cachep->name, objp); 2467 cachep->name, objp);
2474 BUG(); 2468 BUG();
2475 } 2469 }
2476 } 2470 }
@@ -2901,8 +2895,7 @@ static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep,
2901 if (cachep->flags & SLAB_RED_ZONE) { 2895 if (cachep->flags & SLAB_RED_ZONE) {
2902 if (*dbg_redzone1(cachep, objp) != RED_INACTIVE || 2896 if (*dbg_redzone1(cachep, objp) != RED_INACTIVE ||
2903 *dbg_redzone2(cachep, objp) != RED_INACTIVE) { 2897 *dbg_redzone2(cachep, objp) != RED_INACTIVE) {
2904 slab_error(cachep, "double free, or memory outside" 2898 slab_error(cachep, "double free, or memory outside object was overwritten");
2905 " object was overwritten");
2906 printk(KERN_ERR 2899 printk(KERN_ERR
2907 "%p: redzone 1:0x%llx, redzone 2:0x%llx\n", 2900 "%p: redzone 1:0x%llx, redzone 2:0x%llx\n",
2908 objp, *dbg_redzone1(cachep, objp), 2901 objp, *dbg_redzone1(cachep, objp),
@@ -4028,8 +4021,7 @@ void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *cachep)
4028 unsigned long node_frees = cachep->node_frees; 4021 unsigned long node_frees = cachep->node_frees;
4029 unsigned long overflows = cachep->node_overflow; 4022 unsigned long overflows = cachep->node_overflow;
4030 4023
4031 seq_printf(m, " : globalstat %7lu %6lu %5lu %4lu " 4024 seq_printf(m, " : globalstat %7lu %6lu %5lu %4lu %4lu %4lu %4lu %4lu %4lu",
4032 "%4lu %4lu %4lu %4lu %4lu",
4033 allocs, high, grown, 4025 allocs, high, grown,
4034 reaped, errors, max_freeable, node_allocs, 4026 reaped, errors, max_freeable, node_allocs,
4035 node_frees, overflows); 4027 node_frees, overflows);
diff --git a/mm/slab_common.c b/mm/slab_common.c
index 8addc3c4df37..e885e11a316f 100644
--- a/mm/slab_common.c
+++ b/mm/slab_common.c
@@ -726,8 +726,8 @@ void kmem_cache_destroy(struct kmem_cache *s)
726 err = shutdown_cache(s, &release, &need_rcu_barrier); 726 err = shutdown_cache(s, &release, &need_rcu_barrier);
727 727
728 if (err) { 728 if (err) {
729 pr_err("kmem_cache_destroy %s: " 729 pr_err("kmem_cache_destroy %s: Slab cache still has objects\n",
730 "Slab cache still has objects\n", s->name); 730 s->name);
731 dump_stack(); 731 dump_stack();
732 } 732 }
733out_unlock: 733out_unlock:
@@ -1047,13 +1047,11 @@ static void print_slabinfo_header(struct seq_file *m)
1047#else 1047#else
1048 seq_puts(m, "slabinfo - version: 2.1\n"); 1048 seq_puts(m, "slabinfo - version: 2.1\n");
1049#endif 1049#endif
1050 seq_puts(m, "# name <active_objs> <num_objs> <objsize> " 1050 seq_puts(m, "# name <active_objs> <num_objs> <objsize> <objperslab> <pagesperslab>");
1051 "<objperslab> <pagesperslab>");
1052 seq_puts(m, " : tunables <limit> <batchcount> <sharedfactor>"); 1051 seq_puts(m, " : tunables <limit> <batchcount> <sharedfactor>");
1053 seq_puts(m, " : slabdata <active_slabs> <num_slabs> <sharedavail>"); 1052 seq_puts(m, " : slabdata <active_slabs> <num_slabs> <sharedavail>");
1054#ifdef CONFIG_DEBUG_SLAB 1053#ifdef CONFIG_DEBUG_SLAB
1055 seq_puts(m, " : globalstat <listallocs> <maxobjs> <grown> <reaped> " 1054 seq_puts(m, " : globalstat <listallocs> <maxobjs> <grown> <reaped> <error> <maxfreeable> <nodeallocs> <remotefrees> <alienoverflow>");
1056 "<error> <maxfreeable> <nodeallocs> <remotefrees> <alienoverflow>");
1057 seq_puts(m, " : cpustat <allochit> <allocmiss> <freehit> <freemiss>"); 1055 seq_puts(m, " : cpustat <allochit> <allocmiss> <freehit> <freemiss>");
1058#endif 1056#endif
1059 seq_putc(m, '\n'); 1057 seq_putc(m, '\n');
diff --git a/mm/slub.c b/mm/slub.c
index 64ed5f3a3046..7277413ebc8b 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -950,14 +950,14 @@ static int on_freelist(struct kmem_cache *s, struct page *page, void *search)
950 max_objects = MAX_OBJS_PER_PAGE; 950 max_objects = MAX_OBJS_PER_PAGE;
951 951
952 if (page->objects != max_objects) { 952 if (page->objects != max_objects) {
953 slab_err(s, page, "Wrong number of objects. Found %d but " 953 slab_err(s, page, "Wrong number of objects. Found %d but should be %d",
954 "should be %d", page->objects, max_objects); 954 page->objects, max_objects);
955 page->objects = max_objects; 955 page->objects = max_objects;
956 slab_fix(s, "Number of objects adjusted."); 956 slab_fix(s, "Number of objects adjusted.");
957 } 957 }
958 if (page->inuse != page->objects - nr) { 958 if (page->inuse != page->objects - nr) {
959 slab_err(s, page, "Wrong object count. Counter is %d but " 959 slab_err(s, page, "Wrong object count. Counter is %d but counted were %d",
960 "counted were %d", page->inuse, page->objects - nr); 960 page->inuse, page->objects - nr);
961 page->inuse = page->objects - nr; 961 page->inuse = page->objects - nr;
962 slab_fix(s, "Object count adjusted."); 962 slab_fix(s, "Object count adjusted.");
963 } 963 }
@@ -1117,8 +1117,8 @@ static inline int free_consistency_checks(struct kmem_cache *s,
1117 1117
1118 if (unlikely(s != page->slab_cache)) { 1118 if (unlikely(s != page->slab_cache)) {
1119 if (!PageSlab(page)) { 1119 if (!PageSlab(page)) {
1120 slab_err(s, page, "Attempt to free object(0x%p) " 1120 slab_err(s, page, "Attempt to free object(0x%p) outside of slab",
1121 "outside of slab", object); 1121 object);
1122 } else if (!page->slab_cache) { 1122 } else if (!page->slab_cache) {
1123 pr_err("SLUB <none>: no slab for object 0x%p.\n", 1123 pr_err("SLUB <none>: no slab for object 0x%p.\n",
1124 object); 1124 object);
@@ -3439,10 +3439,9 @@ static int kmem_cache_open(struct kmem_cache *s, unsigned long flags)
3439 free_kmem_cache_nodes(s); 3439 free_kmem_cache_nodes(s);
3440error: 3440error:
3441 if (flags & SLAB_PANIC) 3441 if (flags & SLAB_PANIC)
3442 panic("Cannot create slab %s size=%lu realsize=%u " 3442 panic("Cannot create slab %s size=%lu realsize=%u order=%u offset=%u flags=%lx\n",
3443 "order=%u offset=%u flags=%lx\n", 3443 s->name, (unsigned long)s->size, s->size,
3444 s->name, (unsigned long)s->size, s->size, 3444 oo_order(s->oo), s->offset, flags);
3445 oo_order(s->oo), s->offset, flags);
3446 return -EINVAL; 3445 return -EINVAL;
3447} 3446}
3448 3447
diff --git a/mm/sparse-vmemmap.c b/mm/sparse-vmemmap.c
index b60802b3e5ea..d3511f9ad0f9 100644
--- a/mm/sparse-vmemmap.c
+++ b/mm/sparse-vmemmap.c
@@ -166,8 +166,8 @@ void __meminit vmemmap_verify(pte_t *pte, int node,
166 int actual_node = early_pfn_to_nid(pfn); 166 int actual_node = early_pfn_to_nid(pfn);
167 167
168 if (node_distance(actual_node, node) > LOCAL_DISTANCE) 168 if (node_distance(actual_node, node) > LOCAL_DISTANCE)
169 printk(KERN_WARNING "[%lx-%lx] potential offnode " 169 printk(KERN_WARNING "[%lx-%lx] potential offnode page_structs\n",
170 "page_structs\n", start, end - 1); 170 start, end - 1);
171} 171}
172 172
173pte_t * __meminit vmemmap_pte_populate(pmd_t *pmd, unsigned long addr, int node) 173pte_t * __meminit vmemmap_pte_populate(pmd_t *pmd, unsigned long addr, int node)
@@ -292,8 +292,8 @@ void __init sparse_mem_maps_populate_node(struct page **map_map,
292 if (map_map[pnum]) 292 if (map_map[pnum])
293 continue; 293 continue;
294 ms = __nr_to_section(pnum); 294 ms = __nr_to_section(pnum);
295 printk(KERN_ERR "%s: sparsemem memory map backing failed " 295 printk(KERN_ERR "%s: sparsemem memory map backing failed some memory will not be available.\n",
296 "some memory will not be available.\n", __func__); 296 __func__);
297 ms->section_mem_map = 0; 297 ms->section_mem_map = 0;
298 } 298 }
299 299
diff --git a/mm/sparse.c b/mm/sparse.c
index 3717ceed4177..7cdb27d9f01f 100644
--- a/mm/sparse.c
+++ b/mm/sparse.c
@@ -428,8 +428,8 @@ void __init sparse_mem_maps_populate_node(struct page **map_map,
428 if (map_map[pnum]) 428 if (map_map[pnum])
429 continue; 429 continue;
430 ms = __nr_to_section(pnum); 430 ms = __nr_to_section(pnum);
431 printk(KERN_ERR "%s: sparsemem memory map backing failed " 431 printk(KERN_ERR "%s: sparsemem memory map backing failed some memory will not be available.\n",
432 "some memory will not be available.\n", __func__); 432 __func__);
433 ms->section_mem_map = 0; 433 ms->section_mem_map = 0;
434 } 434 }
435} 435}
@@ -456,8 +456,8 @@ static struct page __init *sparse_early_mem_map_alloc(unsigned long pnum)
456 if (map) 456 if (map)
457 return map; 457 return map;
458 458
459 printk(KERN_ERR "%s: sparsemem memory map backing failed " 459 printk(KERN_ERR "%s: sparsemem memory map backing failed some memory will not be available.\n",
460 "some memory will not be available.\n", __func__); 460 __func__);
461 ms->section_mem_map = 0; 461 ms->section_mem_map = 0;
462 return NULL; 462 return NULL;
463} 463}
diff --git a/mm/swapfile.c b/mm/swapfile.c
index d2c37365e2d6..b86cf26a586b 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -2526,8 +2526,7 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
2526 (swap_flags & SWAP_FLAG_PRIO_MASK) >> SWAP_FLAG_PRIO_SHIFT; 2526 (swap_flags & SWAP_FLAG_PRIO_MASK) >> SWAP_FLAG_PRIO_SHIFT;
2527 enable_swap_info(p, prio, swap_map, cluster_info, frontswap_map); 2527 enable_swap_info(p, prio, swap_map, cluster_info, frontswap_map);
2528 2528
2529 pr_info("Adding %uk swap on %s. " 2529 pr_info("Adding %uk swap on %s. Priority:%d extents:%d across:%lluk %s%s%s%s%s\n",
2530 "Priority:%d extents:%d across:%lluk %s%s%s%s%s\n",
2531 p->pages<<(PAGE_SHIFT-10), name->name, p->prio, 2530 p->pages<<(PAGE_SHIFT-10), name->name, p->prio,
2532 nr_extents, (unsigned long long)span<<(PAGE_SHIFT-10), 2531 nr_extents, (unsigned long long)span<<(PAGE_SHIFT-10),
2533 (p->flags & SWP_SOLIDSTATE) ? "SS" : "", 2532 (p->flags & SWP_SOLIDSTATE) ? "SS" : "",
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index d4b2e34adae0..e86c24ee9445 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -469,8 +469,8 @@ overflow:
469 goto retry; 469 goto retry;
470 } 470 }
471 if (printk_ratelimit()) 471 if (printk_ratelimit())
472 pr_warn("vmap allocation for size %lu failed: " 472 pr_warn("vmap allocation for size %lu failed: use vmalloc=<size> to increase size\n",
473 "use vmalloc=<size> to increase size.\n", size); 473 size);
474 kfree(va); 474 kfree(va);
475 return ERR_PTR(-EBUSY); 475 return ERR_PTR(-EBUSY);
476} 476}