diff options
author | Joe Perches <joe@perches.com> | 2016-03-17 17:19:50 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2016-03-17 18:09:34 -0400 |
commit | 1170532bb49f9468aedabdc1d5a560e2521a2bcc (patch) | |
tree | 0197245ba37726d4ba7325e0de8129d45f8f49d9 | |
parent | 756a025f00091918d9d09ca3229defb160b409c0 (diff) |
mm: convert printk(KERN_<LEVEL> to pr_<level>
Most of the mm subsystem uses pr_<level> so make it consistent.
Miscellanea:
- Realign arguments
- Add missing newline to format
- kmemleak-test.c has a "kmemleak: " prefix added to the
"Kmemleak testing" logging message via pr_fmt
Signed-off-by: Joe Perches <joe@perches.com>
Acked-by: Tejun Heo <tj@kernel.org> [percpu]
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r-- | mm/backing-dev.c | 4 | ||||
-rw-r--r-- | mm/bootmem.c | 7 | ||||
-rw-r--r-- | mm/dmapool.c | 12 | ||||
-rw-r--r-- | mm/internal.h | 2 | ||||
-rw-r--r-- | mm/kmemcheck.c | 2 | ||||
-rw-r--r-- | mm/kmemleak-test.c | 2 | ||||
-rw-r--r-- | mm/memory-failure.c | 52 | ||||
-rw-r--r-- | mm/memory.c | 17 | ||||
-rw-r--r-- | mm/mm_init.c | 7 | ||||
-rw-r--r-- | mm/nobootmem.c | 4 | ||||
-rw-r--r-- | mm/page_alloc.c | 24 | ||||
-rw-r--r-- | mm/page_io.c | 22 | ||||
-rw-r--r-- | mm/percpu-km.c | 6 | ||||
-rw-r--r-- | mm/percpu.c | 12 | ||||
-rw-r--r-- | mm/shmem.c | 14 | ||||
-rw-r--r-- | mm/slab.c | 51 | ||||
-rw-r--r-- | mm/slab_common.c | 2 | ||||
-rw-r--r-- | mm/sparse-vmemmap.c | 6 | ||||
-rw-r--r-- | mm/sparse.c | 17 | ||||
-rw-r--r-- | mm/swap_cgroup.c | 5 |
20 files changed, 118 insertions, 150 deletions
diff --git a/mm/backing-dev.c b/mm/backing-dev.c index c554d173a65f..bfbd7096b6ed 100644 --- a/mm/backing-dev.c +++ b/mm/backing-dev.c | |||
@@ -1026,8 +1026,8 @@ int pdflush_proc_obsolete(struct ctl_table *table, int write, | |||
1026 | 1026 | ||
1027 | if (copy_to_user(buffer, kbuf, sizeof(kbuf))) | 1027 | if (copy_to_user(buffer, kbuf, sizeof(kbuf))) |
1028 | return -EFAULT; | 1028 | return -EFAULT; |
1029 | printk_once(KERN_WARNING "%s exported in /proc is scheduled for removal\n", | 1029 | pr_warn_once("%s exported in /proc is scheduled for removal\n", |
1030 | table->procname); | 1030 | table->procname); |
1031 | 1031 | ||
1032 | *lenp = 2; | 1032 | *lenp = 2; |
1033 | *ppos += *lenp; | 1033 | *ppos += *lenp; |
diff --git a/mm/bootmem.c b/mm/bootmem.c index 91e32bc8517f..0aa7dda52402 100644 --- a/mm/bootmem.c +++ b/mm/bootmem.c | |||
@@ -50,8 +50,7 @@ early_param("bootmem_debug", bootmem_debug_setup); | |||
50 | 50 | ||
51 | #define bdebug(fmt, args...) ({ \ | 51 | #define bdebug(fmt, args...) ({ \ |
52 | if (unlikely(bootmem_debug)) \ | 52 | if (unlikely(bootmem_debug)) \ |
53 | printk(KERN_INFO \ | 53 | pr_info("bootmem::%s " fmt, \ |
54 | "bootmem::%s " fmt, \ | ||
55 | __func__, ## args); \ | 54 | __func__, ## args); \ |
56 | }) | 55 | }) |
57 | 56 | ||
@@ -680,7 +679,7 @@ static void * __init ___alloc_bootmem(unsigned long size, unsigned long align, | |||
680 | /* | 679 | /* |
681 | * Whoops, we cannot satisfy the allocation request. | 680 | * Whoops, we cannot satisfy the allocation request. |
682 | */ | 681 | */ |
683 | printk(KERN_ALERT "bootmem alloc of %lu bytes failed!\n", size); | 682 | pr_alert("bootmem alloc of %lu bytes failed!\n", size); |
684 | panic("Out of memory"); | 683 | panic("Out of memory"); |
685 | return NULL; | 684 | return NULL; |
686 | } | 685 | } |
@@ -755,7 +754,7 @@ void * __init ___alloc_bootmem_node(pg_data_t *pgdat, unsigned long size, | |||
755 | if (ptr) | 754 | if (ptr) |
756 | return ptr; | 755 | return ptr; |
757 | 756 | ||
758 | printk(KERN_ALERT "bootmem alloc of %lu bytes failed!\n", size); | 757 | pr_alert("bootmem alloc of %lu bytes failed!\n", size); |
759 | panic("Out of memory"); | 758 | panic("Out of memory"); |
760 | return NULL; | 759 | return NULL; |
761 | } | 760 | } |
diff --git a/mm/dmapool.c b/mm/dmapool.c index 2821500e8123..abcbfe86c25a 100644 --- a/mm/dmapool.c +++ b/mm/dmapool.c | |||
@@ -294,8 +294,7 @@ void dma_pool_destroy(struct dma_pool *pool) | |||
294 | "dma_pool_destroy %s, %p busy\n", | 294 | "dma_pool_destroy %s, %p busy\n", |
295 | pool->name, page->vaddr); | 295 | pool->name, page->vaddr); |
296 | else | 296 | else |
297 | printk(KERN_ERR | 297 | pr_err("dma_pool_destroy %s, %p busy\n", |
298 | "dma_pool_destroy %s, %p busy\n", | ||
299 | pool->name, page->vaddr); | 298 | pool->name, page->vaddr); |
300 | /* leak the still-in-use consistent memory */ | 299 | /* leak the still-in-use consistent memory */ |
301 | list_del(&page->page_list); | 300 | list_del(&page->page_list); |
@@ -424,7 +423,7 @@ void dma_pool_free(struct dma_pool *pool, void *vaddr, dma_addr_t dma) | |||
424 | "dma_pool_free %s, %p/%lx (bad dma)\n", | 423 | "dma_pool_free %s, %p/%lx (bad dma)\n", |
425 | pool->name, vaddr, (unsigned long)dma); | 424 | pool->name, vaddr, (unsigned long)dma); |
426 | else | 425 | else |
427 | printk(KERN_ERR "dma_pool_free %s, %p/%lx (bad dma)\n", | 426 | pr_err("dma_pool_free %s, %p/%lx (bad dma)\n", |
428 | pool->name, vaddr, (unsigned long)dma); | 427 | pool->name, vaddr, (unsigned long)dma); |
429 | return; | 428 | return; |
430 | } | 429 | } |
@@ -438,8 +437,7 @@ void dma_pool_free(struct dma_pool *pool, void *vaddr, dma_addr_t dma) | |||
438 | "dma_pool_free %s, %p (bad vaddr)/%Lx\n", | 437 | "dma_pool_free %s, %p (bad vaddr)/%Lx\n", |
439 | pool->name, vaddr, (unsigned long long)dma); | 438 | pool->name, vaddr, (unsigned long long)dma); |
440 | else | 439 | else |
441 | printk(KERN_ERR | 440 | pr_err("dma_pool_free %s, %p (bad vaddr)/%Lx\n", |
442 | "dma_pool_free %s, %p (bad vaddr)/%Lx\n", | ||
443 | pool->name, vaddr, (unsigned long long)dma); | 441 | pool->name, vaddr, (unsigned long long)dma); |
444 | return; | 442 | return; |
445 | } | 443 | } |
@@ -455,8 +453,8 @@ void dma_pool_free(struct dma_pool *pool, void *vaddr, dma_addr_t dma) | |||
455 | dev_err(pool->dev, "dma_pool_free %s, dma %Lx already free\n", | 453 | dev_err(pool->dev, "dma_pool_free %s, dma %Lx already free\n", |
456 | pool->name, (unsigned long long)dma); | 454 | pool->name, (unsigned long long)dma); |
457 | else | 455 | else |
458 | printk(KERN_ERR "dma_pool_free %s, dma %Lx already free\n", | 456 | pr_err("dma_pool_free %s, dma %Lx already free\n", |
459 | pool->name, (unsigned long long)dma); | 457 | pool->name, (unsigned long long)dma); |
460 | return; | 458 | return; |
461 | } | 459 | } |
462 | } | 460 | } |
diff --git a/mm/internal.h b/mm/internal.h index 57d7b0e839f0..7449392c6faa 100644 --- a/mm/internal.h +++ b/mm/internal.h | |||
@@ -386,7 +386,7 @@ extern int mminit_loglevel; | |||
386 | do { \ | 386 | do { \ |
387 | if (level < mminit_loglevel) { \ | 387 | if (level < mminit_loglevel) { \ |
388 | if (level <= MMINIT_WARNING) \ | 388 | if (level <= MMINIT_WARNING) \ |
389 | printk(KERN_WARNING "mminit::" prefix " " fmt, ##arg); \ | 389 | pr_warn("mminit::" prefix " " fmt, ##arg); \ |
390 | else \ | 390 | else \ |
391 | printk(KERN_DEBUG "mminit::" prefix " " fmt, ##arg); \ | 391 | printk(KERN_DEBUG "mminit::" prefix " " fmt, ##arg); \ |
392 | } \ | 392 | } \ |
diff --git a/mm/kmemcheck.c b/mm/kmemcheck.c index e5f83333066e..5bf191756a4a 100644 --- a/mm/kmemcheck.c +++ b/mm/kmemcheck.c | |||
@@ -20,7 +20,7 @@ void kmemcheck_alloc_shadow(struct page *page, int order, gfp_t flags, int node) | |||
20 | shadow = alloc_pages_node(node, flags | __GFP_NOTRACK, order); | 20 | shadow = alloc_pages_node(node, flags | __GFP_NOTRACK, order); |
21 | if (!shadow) { | 21 | if (!shadow) { |
22 | if (printk_ratelimit()) | 22 | if (printk_ratelimit()) |
23 | printk(KERN_ERR "kmemcheck: failed to allocate shadow bitmap\n"); | 23 | pr_err("kmemcheck: failed to allocate shadow bitmap\n"); |
24 | return; | 24 | return; |
25 | } | 25 | } |
26 | 26 | ||
diff --git a/mm/kmemleak-test.c b/mm/kmemleak-test.c index dcdcadb69533..dd3c23a801b1 100644 --- a/mm/kmemleak-test.c +++ b/mm/kmemleak-test.c | |||
@@ -49,7 +49,7 @@ static int __init kmemleak_test_init(void) | |||
49 | struct test_node *elem; | 49 | struct test_node *elem; |
50 | int i; | 50 | int i; |
51 | 51 | ||
52 | printk(KERN_INFO "Kmemleak testing\n"); | 52 | pr_info("Kmemleak testing\n"); |
53 | 53 | ||
54 | /* make some orphan objects */ | 54 | /* make some orphan objects */ |
55 | pr_info("kmalloc(32) = %p\n", kmalloc(32, GFP_KERNEL)); | 55 | pr_info("kmalloc(32) = %p\n", kmalloc(32, GFP_KERNEL)); |
diff --git a/mm/memory-failure.c b/mm/memory-failure.c index 67c30eb993f0..5a544c6c0717 100644 --- a/mm/memory-failure.c +++ b/mm/memory-failure.c | |||
@@ -184,9 +184,8 @@ static int kill_proc(struct task_struct *t, unsigned long addr, int trapno, | |||
184 | struct siginfo si; | 184 | struct siginfo si; |
185 | int ret; | 185 | int ret; |
186 | 186 | ||
187 | printk(KERN_ERR | 187 | pr_err("MCE %#lx: Killing %s:%d due to hardware memory corruption\n", |
188 | "MCE %#lx: Killing %s:%d due to hardware memory corruption\n", | 188 | pfn, t->comm, t->pid); |
189 | pfn, t->comm, t->pid); | ||
190 | si.si_signo = SIGBUS; | 189 | si.si_signo = SIGBUS; |
191 | si.si_errno = 0; | 190 | si.si_errno = 0; |
192 | si.si_addr = (void *)addr; | 191 | si.si_addr = (void *)addr; |
@@ -209,8 +208,8 @@ static int kill_proc(struct task_struct *t, unsigned long addr, int trapno, | |||
209 | ret = send_sig_info(SIGBUS, &si, t); /* synchronous? */ | 208 | ret = send_sig_info(SIGBUS, &si, t); /* synchronous? */ |
210 | } | 209 | } |
211 | if (ret < 0) | 210 | if (ret < 0) |
212 | printk(KERN_INFO "MCE: Error sending signal to %s:%d: %d\n", | 211 | pr_info("MCE: Error sending signal to %s:%d: %d\n", |
213 | t->comm, t->pid, ret); | 212 | t->comm, t->pid, ret); |
214 | return ret; | 213 | return ret; |
215 | } | 214 | } |
216 | 215 | ||
@@ -290,8 +289,7 @@ static void add_to_kill(struct task_struct *tsk, struct page *p, | |||
290 | } else { | 289 | } else { |
291 | tk = kmalloc(sizeof(struct to_kill), GFP_ATOMIC); | 290 | tk = kmalloc(sizeof(struct to_kill), GFP_ATOMIC); |
292 | if (!tk) { | 291 | if (!tk) { |
293 | printk(KERN_ERR | 292 | pr_err("MCE: Out of memory while machine check handling\n"); |
294 | "MCE: Out of memory while machine check handling\n"); | ||
295 | return; | 293 | return; |
296 | } | 294 | } |
297 | } | 295 | } |
@@ -336,9 +334,8 @@ static void kill_procs(struct list_head *to_kill, int forcekill, int trapno, | |||
336 | * signal and then access the memory. Just kill it. | 334 | * signal and then access the memory. Just kill it. |
337 | */ | 335 | */ |
338 | if (fail || tk->addr_valid == 0) { | 336 | if (fail || tk->addr_valid == 0) { |
339 | printk(KERN_ERR | 337 | pr_err("MCE %#lx: forcibly killing %s:%d because of failure to unmap corrupted page\n", |
340 | "MCE %#lx: forcibly killing %s:%d because of failure to unmap corrupted page\n", | 338 | pfn, tk->tsk->comm, tk->tsk->pid); |
341 | pfn, tk->tsk->comm, tk->tsk->pid); | ||
342 | force_sig(SIGKILL, tk->tsk); | 339 | force_sig(SIGKILL, tk->tsk); |
343 | } | 340 | } |
344 | 341 | ||
@@ -350,9 +347,8 @@ static void kill_procs(struct list_head *to_kill, int forcekill, int trapno, | |||
350 | */ | 347 | */ |
351 | else if (kill_proc(tk->tsk, tk->addr, trapno, | 348 | else if (kill_proc(tk->tsk, tk->addr, trapno, |
352 | pfn, page, flags) < 0) | 349 | pfn, page, flags) < 0) |
353 | printk(KERN_ERR | 350 | pr_err("MCE %#lx: Cannot send advisory machine check signal to %s:%d\n", |
354 | "MCE %#lx: Cannot send advisory machine check signal to %s:%d\n", | 351 | pfn, tk->tsk->comm, tk->tsk->pid); |
355 | pfn, tk->tsk->comm, tk->tsk->pid); | ||
356 | } | 352 | } |
357 | put_task_struct(tk->tsk); | 353 | put_task_struct(tk->tsk); |
358 | kfree(tk); | 354 | kfree(tk); |
@@ -563,7 +559,7 @@ static int me_kernel(struct page *p, unsigned long pfn) | |||
563 | */ | 559 | */ |
564 | static int me_unknown(struct page *p, unsigned long pfn) | 560 | static int me_unknown(struct page *p, unsigned long pfn) |
565 | { | 561 | { |
566 | printk(KERN_ERR "MCE %#lx: Unknown page state\n", pfn); | 562 | pr_err("MCE %#lx: Unknown page state\n", pfn); |
567 | return MF_FAILED; | 563 | return MF_FAILED; |
568 | } | 564 | } |
569 | 565 | ||
@@ -608,8 +604,8 @@ static int me_pagecache_clean(struct page *p, unsigned long pfn) | |||
608 | if (mapping->a_ops->error_remove_page) { | 604 | if (mapping->a_ops->error_remove_page) { |
609 | err = mapping->a_ops->error_remove_page(mapping, p); | 605 | err = mapping->a_ops->error_remove_page(mapping, p); |
610 | if (err != 0) { | 606 | if (err != 0) { |
611 | printk(KERN_INFO "MCE %#lx: Failed to punch page: %d\n", | 607 | pr_info("MCE %#lx: Failed to punch page: %d\n", |
612 | pfn, err); | 608 | pfn, err); |
613 | } else if (page_has_private(p) && | 609 | } else if (page_has_private(p) && |
614 | !try_to_release_page(p, GFP_NOIO)) { | 610 | !try_to_release_page(p, GFP_NOIO)) { |
615 | pr_info("MCE %#lx: failed to release buffers\n", pfn); | 611 | pr_info("MCE %#lx: failed to release buffers\n", pfn); |
@@ -624,8 +620,7 @@ static int me_pagecache_clean(struct page *p, unsigned long pfn) | |||
624 | if (invalidate_inode_page(p)) | 620 | if (invalidate_inode_page(p)) |
625 | ret = MF_RECOVERED; | 621 | ret = MF_RECOVERED; |
626 | else | 622 | else |
627 | printk(KERN_INFO "MCE %#lx: Failed to invalidate\n", | 623 | pr_info("MCE %#lx: Failed to invalidate\n", pfn); |
628 | pfn); | ||
629 | } | 624 | } |
630 | return ret; | 625 | return ret; |
631 | } | 626 | } |
@@ -854,8 +849,7 @@ static int page_action(struct page_state *ps, struct page *p, | |||
854 | if (ps->action == me_swapcache_dirty && result == MF_DELAYED) | 849 | if (ps->action == me_swapcache_dirty && result == MF_DELAYED) |
855 | count--; | 850 | count--; |
856 | if (count != 0) { | 851 | if (count != 0) { |
857 | printk(KERN_ERR | 852 | pr_err("MCE %#lx: %s still referenced by %d users\n", |
858 | "MCE %#lx: %s still referenced by %d users\n", | ||
859 | pfn, action_page_types[ps->type], count); | 853 | pfn, action_page_types[ps->type], count); |
860 | result = MF_FAILED; | 854 | result = MF_FAILED; |
861 | } | 855 | } |
@@ -934,8 +928,7 @@ static int hwpoison_user_mappings(struct page *p, unsigned long pfn, | |||
934 | } | 928 | } |
935 | 929 | ||
936 | if (PageSwapCache(p)) { | 930 | if (PageSwapCache(p)) { |
937 | printk(KERN_ERR | 931 | pr_err("MCE %#lx: keeping poisoned page in swap cache\n", pfn); |
938 | "MCE %#lx: keeping poisoned page in swap cache\n", pfn); | ||
939 | ttu |= TTU_IGNORE_HWPOISON; | 932 | ttu |= TTU_IGNORE_HWPOISON; |
940 | } | 933 | } |
941 | 934 | ||
@@ -953,8 +946,7 @@ static int hwpoison_user_mappings(struct page *p, unsigned long pfn, | |||
953 | } else { | 946 | } else { |
954 | kill = 0; | 947 | kill = 0; |
955 | ttu |= TTU_IGNORE_HWPOISON; | 948 | ttu |= TTU_IGNORE_HWPOISON; |
956 | printk(KERN_INFO | 949 | pr_info("MCE %#lx: corrupted page was clean: dropped without side effects\n", |
957 | "MCE %#lx: corrupted page was clean: dropped without side effects\n", | ||
958 | pfn); | 950 | pfn); |
959 | } | 951 | } |
960 | } | 952 | } |
@@ -972,8 +964,8 @@ static int hwpoison_user_mappings(struct page *p, unsigned long pfn, | |||
972 | 964 | ||
973 | ret = try_to_unmap(hpage, ttu); | 965 | ret = try_to_unmap(hpage, ttu); |
974 | if (ret != SWAP_SUCCESS) | 966 | if (ret != SWAP_SUCCESS) |
975 | printk(KERN_ERR "MCE %#lx: failed to unmap page (mapcount=%d)\n", | 967 | pr_err("MCE %#lx: failed to unmap page (mapcount=%d)\n", |
976 | pfn, page_mapcount(hpage)); | 968 | pfn, page_mapcount(hpage)); |
977 | 969 | ||
978 | /* | 970 | /* |
979 | * Now that the dirty bit has been propagated to the | 971 | * Now that the dirty bit has been propagated to the |
@@ -1040,16 +1032,14 @@ int memory_failure(unsigned long pfn, int trapno, int flags) | |||
1040 | panic("Memory failure from trap %d on page %lx", trapno, pfn); | 1032 | panic("Memory failure from trap %d on page %lx", trapno, pfn); |
1041 | 1033 | ||
1042 | if (!pfn_valid(pfn)) { | 1034 | if (!pfn_valid(pfn)) { |
1043 | printk(KERN_ERR | 1035 | pr_err("MCE %#lx: memory outside kernel control\n", pfn); |
1044 | "MCE %#lx: memory outside kernel control\n", | ||
1045 | pfn); | ||
1046 | return -ENXIO; | 1036 | return -ENXIO; |
1047 | } | 1037 | } |
1048 | 1038 | ||
1049 | p = pfn_to_page(pfn); | 1039 | p = pfn_to_page(pfn); |
1050 | orig_head = hpage = compound_head(p); | 1040 | orig_head = hpage = compound_head(p); |
1051 | if (TestSetPageHWPoison(p)) { | 1041 | if (TestSetPageHWPoison(p)) { |
1052 | printk(KERN_ERR "MCE %#lx: already hardware poisoned\n", pfn); | 1042 | pr_err("MCE %#lx: already hardware poisoned\n", pfn); |
1053 | return 0; | 1043 | return 0; |
1054 | } | 1044 | } |
1055 | 1045 | ||
@@ -1180,7 +1170,7 @@ int memory_failure(unsigned long pfn, int trapno, int flags) | |||
1180 | * unpoison always clear PG_hwpoison inside page lock | 1170 | * unpoison always clear PG_hwpoison inside page lock |
1181 | */ | 1171 | */ |
1182 | if (!PageHWPoison(p)) { | 1172 | if (!PageHWPoison(p)) { |
1183 | printk(KERN_ERR "MCE %#lx: just unpoisoned\n", pfn); | 1173 | pr_err("MCE %#lx: just unpoisoned\n", pfn); |
1184 | num_poisoned_pages_sub(nr_pages); | 1174 | num_poisoned_pages_sub(nr_pages); |
1185 | unlock_page(hpage); | 1175 | unlock_page(hpage); |
1186 | put_hwpoison_page(hpage); | 1176 | put_hwpoison_page(hpage); |
diff --git a/mm/memory.c b/mm/memory.c index 1974fc02c4d0..ac6bc15c19be 100644 --- a/mm/memory.c +++ b/mm/memory.c | |||
@@ -660,9 +660,8 @@ static void print_bad_pte(struct vm_area_struct *vma, unsigned long addr, | |||
660 | return; | 660 | return; |
661 | } | 661 | } |
662 | if (nr_unshown) { | 662 | if (nr_unshown) { |
663 | printk(KERN_ALERT | 663 | pr_alert("BUG: Bad page map: %lu messages suppressed\n", |
664 | "BUG: Bad page map: %lu messages suppressed\n", | 664 | nr_unshown); |
665 | nr_unshown); | ||
666 | nr_unshown = 0; | 665 | nr_unshown = 0; |
667 | } | 666 | } |
668 | nr_shown = 0; | 667 | nr_shown = 0; |
@@ -673,15 +672,13 @@ static void print_bad_pte(struct vm_area_struct *vma, unsigned long addr, | |||
673 | mapping = vma->vm_file ? vma->vm_file->f_mapping : NULL; | 672 | mapping = vma->vm_file ? vma->vm_file->f_mapping : NULL; |
674 | index = linear_page_index(vma, addr); | 673 | index = linear_page_index(vma, addr); |
675 | 674 | ||
676 | printk(KERN_ALERT | 675 | pr_alert("BUG: Bad page map in process %s pte:%08llx pmd:%08llx\n", |
677 | "BUG: Bad page map in process %s pte:%08llx pmd:%08llx\n", | 676 | current->comm, |
678 | current->comm, | 677 | (long long)pte_val(pte), (long long)pmd_val(*pmd)); |
679 | (long long)pte_val(pte), (long long)pmd_val(*pmd)); | ||
680 | if (page) | 678 | if (page) |
681 | dump_page(page, "bad pte"); | 679 | dump_page(page, "bad pte"); |
682 | printk(KERN_ALERT | 680 | pr_alert("addr:%p vm_flags:%08lx anon_vma:%p mapping:%p index:%lx\n", |
683 | "addr:%p vm_flags:%08lx anon_vma:%p mapping:%p index:%lx\n", | 681 | (void *)addr, vma->vm_flags, vma->anon_vma, mapping, index); |
684 | (void *)addr, vma->vm_flags, vma->anon_vma, mapping, index); | ||
685 | /* | 682 | /* |
686 | * Choose text because data symbols depend on CONFIG_KALLSYMS_ALL=y | 683 | * Choose text because data symbols depend on CONFIG_KALLSYMS_ALL=y |
687 | */ | 684 | */ |
diff --git a/mm/mm_init.c b/mm/mm_init.c index fdadf918de76..5b72266b4b03 100644 --- a/mm/mm_init.c +++ b/mm/mm_init.c | |||
@@ -55,13 +55,12 @@ void __init mminit_verify_zonelist(void) | |||
55 | /* Iterate the zonelist */ | 55 | /* Iterate the zonelist */ |
56 | for_each_zone_zonelist(zone, z, zonelist, zoneid) { | 56 | for_each_zone_zonelist(zone, z, zonelist, zoneid) { |
57 | #ifdef CONFIG_NUMA | 57 | #ifdef CONFIG_NUMA |
58 | printk(KERN_CONT "%d:%s ", | 58 | pr_cont("%d:%s ", zone->node, zone->name); |
59 | zone->node, zone->name); | ||
60 | #else | 59 | #else |
61 | printk(KERN_CONT "0:%s ", zone->name); | 60 | pr_cont("0:%s ", zone->name); |
62 | #endif /* CONFIG_NUMA */ | 61 | #endif /* CONFIG_NUMA */ |
63 | } | 62 | } |
64 | printk(KERN_CONT "\n"); | 63 | pr_cont("\n"); |
65 | } | 64 | } |
66 | } | 65 | } |
67 | } | 66 | } |
diff --git a/mm/nobootmem.c b/mm/nobootmem.c index 99feb2b07fc5..bd05a70f44b9 100644 --- a/mm/nobootmem.c +++ b/mm/nobootmem.c | |||
@@ -288,7 +288,7 @@ static void * __init ___alloc_bootmem(unsigned long size, unsigned long align, | |||
288 | /* | 288 | /* |
289 | * Whoops, we cannot satisfy the allocation request. | 289 | * Whoops, we cannot satisfy the allocation request. |
290 | */ | 290 | */ |
291 | printk(KERN_ALERT "bootmem alloc of %lu bytes failed!\n", size); | 291 | pr_alert("bootmem alloc of %lu bytes failed!\n", size); |
292 | panic("Out of memory"); | 292 | panic("Out of memory"); |
293 | return NULL; | 293 | return NULL; |
294 | } | 294 | } |
@@ -360,7 +360,7 @@ static void * __init ___alloc_bootmem_node(pg_data_t *pgdat, unsigned long size, | |||
360 | if (ptr) | 360 | if (ptr) |
361 | return ptr; | 361 | return ptr; |
362 | 362 | ||
363 | printk(KERN_ALERT "bootmem alloc of %lu bytes failed!\n", size); | 363 | pr_alert("bootmem alloc of %lu bytes failed!\n", size); |
364 | panic("Out of memory"); | 364 | panic("Out of memory"); |
365 | return NULL; | 365 | return NULL; |
366 | } | 366 | } |
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 42cf199652a5..2a9eaec770b0 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c | |||
@@ -544,11 +544,11 @@ static int __init debug_guardpage_minorder_setup(char *buf) | |||
544 | unsigned long res; | 544 | unsigned long res; |
545 | 545 | ||
546 | if (kstrtoul(buf, 10, &res) < 0 || res > MAX_ORDER / 2) { | 546 | if (kstrtoul(buf, 10, &res) < 0 || res > MAX_ORDER / 2) { |
547 | printk(KERN_ERR "Bad debug_guardpage_minorder value\n"); | 547 | pr_err("Bad debug_guardpage_minorder value\n"); |
548 | return 0; | 548 | return 0; |
549 | } | 549 | } |
550 | _debug_guardpage_minorder = res; | 550 | _debug_guardpage_minorder = res; |
551 | printk(KERN_INFO "Setting debug_guardpage_minorder to %lu\n", res); | 551 | pr_info("Setting debug_guardpage_minorder to %lu\n", res); |
552 | return 0; | 552 | return 0; |
553 | } | 553 | } |
554 | __setup("debug_guardpage_minorder=", debug_guardpage_minorder_setup); | 554 | __setup("debug_guardpage_minorder=", debug_guardpage_minorder_setup); |
@@ -4073,8 +4073,7 @@ static int __parse_numa_zonelist_order(char *s) | |||
4073 | } else if (*s == 'z' || *s == 'Z') { | 4073 | } else if (*s == 'z' || *s == 'Z') { |
4074 | user_zonelist_order = ZONELIST_ORDER_ZONE; | 4074 | user_zonelist_order = ZONELIST_ORDER_ZONE; |
4075 | } else { | 4075 | } else { |
4076 | printk(KERN_WARNING | 4076 | pr_warn("Ignoring invalid numa_zonelist_order value: %s\n", s); |
4077 | "Ignoring invalid numa_zonelist_order value: %s\n", s); | ||
4078 | return -EINVAL; | 4077 | return -EINVAL; |
4079 | } | 4078 | } |
4080 | return 0; | 4079 | return 0; |
@@ -5458,8 +5457,7 @@ static void __paginginit free_area_init_core(struct pglist_data *pgdat) | |||
5458 | " %s zone: %lu pages used for memmap\n", | 5457 | " %s zone: %lu pages used for memmap\n", |
5459 | zone_names[j], memmap_pages); | 5458 | zone_names[j], memmap_pages); |
5460 | } else | 5459 | } else |
5461 | printk(KERN_WARNING | 5460 | pr_warn(" %s zone: %lu pages exceeds freesize %lu\n", |
5462 | " %s zone: %lu pages exceeds freesize %lu\n", | ||
5463 | zone_names[j], memmap_pages, freesize); | 5461 | zone_names[j], memmap_pages, freesize); |
5464 | } | 5462 | } |
5465 | 5463 | ||
@@ -5667,8 +5665,7 @@ static unsigned long __init find_min_pfn_for_node(int nid) | |||
5667 | min_pfn = min(min_pfn, start_pfn); | 5665 | min_pfn = min(min_pfn, start_pfn); |
5668 | 5666 | ||
5669 | if (min_pfn == ULONG_MAX) { | 5667 | if (min_pfn == ULONG_MAX) { |
5670 | printk(KERN_WARNING | 5668 | pr_warn("Could not find start_pfn for node %d\n", nid); |
5671 | "Could not find start_pfn for node %d\n", nid); | ||
5672 | return 0; | 5669 | return 0; |
5673 | } | 5670 | } |
5674 | 5671 | ||
@@ -6686,11 +6683,8 @@ void *__init alloc_large_system_hash(const char *tablename, | |||
6686 | if (!table) | 6683 | if (!table) |
6687 | panic("Failed to allocate %s hash table\n", tablename); | 6684 | panic("Failed to allocate %s hash table\n", tablename); |
6688 | 6685 | ||
6689 | printk(KERN_INFO "%s hash table entries: %ld (order: %d, %lu bytes)\n", | 6686 | pr_info("%s hash table entries: %ld (order: %d, %lu bytes)\n", |
6690 | tablename, | 6687 | tablename, 1UL << log2qty, ilog2(size) - PAGE_SHIFT, size); |
6691 | (1UL << log2qty), | ||
6692 | ilog2(size) - PAGE_SHIFT, | ||
6693 | size); | ||
6694 | 6688 | ||
6695 | if (_hash_shift) | 6689 | if (_hash_shift) |
6696 | *_hash_shift = log2qty; | 6690 | *_hash_shift = log2qty; |
@@ -7191,8 +7185,8 @@ __offline_isolated_pages(unsigned long start_pfn, unsigned long end_pfn) | |||
7191 | BUG_ON(!PageBuddy(page)); | 7185 | BUG_ON(!PageBuddy(page)); |
7192 | order = page_order(page); | 7186 | order = page_order(page); |
7193 | #ifdef CONFIG_DEBUG_VM | 7187 | #ifdef CONFIG_DEBUG_VM |
7194 | printk(KERN_INFO "remove from free list %lx %d %lx\n", | 7188 | pr_info("remove from free list %lx %d %lx\n", |
7195 | pfn, 1 << order, end_pfn); | 7189 | pfn, 1 << order, end_pfn); |
7196 | #endif | 7190 | #endif |
7197 | list_del(&page->lru); | 7191 | list_del(&page->lru); |
7198 | rmv_page_order(page); | 7192 | rmv_page_order(page); |
diff --git a/mm/page_io.c b/mm/page_io.c index b995a5ba5e8f..ff74e512f029 100644 --- a/mm/page_io.c +++ b/mm/page_io.c | |||
@@ -56,10 +56,10 @@ void end_swap_bio_write(struct bio *bio) | |||
56 | * Also clear PG_reclaim to avoid rotate_reclaimable_page() | 56 | * Also clear PG_reclaim to avoid rotate_reclaimable_page() |
57 | */ | 57 | */ |
58 | set_page_dirty(page); | 58 | set_page_dirty(page); |
59 | printk(KERN_ALERT "Write-error on swap-device (%u:%u:%Lu)\n", | 59 | pr_alert("Write-error on swap-device (%u:%u:%llu)\n", |
60 | imajor(bio->bi_bdev->bd_inode), | 60 | imajor(bio->bi_bdev->bd_inode), |
61 | iminor(bio->bi_bdev->bd_inode), | 61 | iminor(bio->bi_bdev->bd_inode), |
62 | (unsigned long long)bio->bi_iter.bi_sector); | 62 | (unsigned long long)bio->bi_iter.bi_sector); |
63 | ClearPageReclaim(page); | 63 | ClearPageReclaim(page); |
64 | } | 64 | } |
65 | end_page_writeback(page); | 65 | end_page_writeback(page); |
@@ -73,10 +73,10 @@ static void end_swap_bio_read(struct bio *bio) | |||
73 | if (bio->bi_error) { | 73 | if (bio->bi_error) { |
74 | SetPageError(page); | 74 | SetPageError(page); |
75 | ClearPageUptodate(page); | 75 | ClearPageUptodate(page); |
76 | printk(KERN_ALERT "Read-error on swap-device (%u:%u:%Lu)\n", | 76 | pr_alert("Read-error on swap-device (%u:%u:%llu)\n", |
77 | imajor(bio->bi_bdev->bd_inode), | 77 | imajor(bio->bi_bdev->bd_inode), |
78 | iminor(bio->bi_bdev->bd_inode), | 78 | iminor(bio->bi_bdev->bd_inode), |
79 | (unsigned long long)bio->bi_iter.bi_sector); | 79 | (unsigned long long)bio->bi_iter.bi_sector); |
80 | goto out; | 80 | goto out; |
81 | } | 81 | } |
82 | 82 | ||
@@ -216,7 +216,7 @@ reprobe: | |||
216 | out: | 216 | out: |
217 | return ret; | 217 | return ret; |
218 | bad_bmap: | 218 | bad_bmap: |
219 | printk(KERN_ERR "swapon: swapfile has holes\n"); | 219 | pr_err("swapon: swapfile has holes\n"); |
220 | ret = -EINVAL; | 220 | ret = -EINVAL; |
221 | goto out; | 221 | goto out; |
222 | } | 222 | } |
@@ -290,8 +290,8 @@ int __swap_writepage(struct page *page, struct writeback_control *wbc, | |||
290 | */ | 290 | */ |
291 | set_page_dirty(page); | 291 | set_page_dirty(page); |
292 | ClearPageReclaim(page); | 292 | ClearPageReclaim(page); |
293 | pr_err_ratelimited("Write error on dio swapfile (%Lu)\n", | 293 | pr_err_ratelimited("Write error on dio swapfile (%llu)\n", |
294 | page_file_offset(page)); | 294 | page_file_offset(page)); |
295 | } | 295 | } |
296 | end_page_writeback(page); | 296 | end_page_writeback(page); |
297 | return ret; | 297 | return ret; |
diff --git a/mm/percpu-km.c b/mm/percpu-km.c index 10e3d0b8a86d..0db94b748986 100644 --- a/mm/percpu-km.c +++ b/mm/percpu-km.c | |||
@@ -95,7 +95,7 @@ static int __init pcpu_verify_alloc_info(const struct pcpu_alloc_info *ai) | |||
95 | 95 | ||
96 | /* all units must be in a single group */ | 96 | /* all units must be in a single group */ |
97 | if (ai->nr_groups != 1) { | 97 | if (ai->nr_groups != 1) { |
98 | printk(KERN_CRIT "percpu: can't handle more than one groups\n"); | 98 | pr_crit("percpu: can't handle more than one groups\n"); |
99 | return -EINVAL; | 99 | return -EINVAL; |
100 | } | 100 | } |
101 | 101 | ||
@@ -103,8 +103,8 @@ static int __init pcpu_verify_alloc_info(const struct pcpu_alloc_info *ai) | |||
103 | alloc_pages = roundup_pow_of_two(nr_pages); | 103 | alloc_pages = roundup_pow_of_two(nr_pages); |
104 | 104 | ||
105 | if (alloc_pages > nr_pages) | 105 | if (alloc_pages > nr_pages) |
106 | printk(KERN_WARNING "percpu: wasting %zu pages per chunk\n", | 106 | pr_warn("percpu: wasting %zu pages per chunk\n", |
107 | alloc_pages - nr_pages); | 107 | alloc_pages - nr_pages); |
108 | 108 | ||
109 | return 0; | 109 | return 0; |
110 | } | 110 | } |
diff --git a/mm/percpu.c b/mm/percpu.c index 1571547e7b01..c987fd4d539d 100644 --- a/mm/percpu.c +++ b/mm/percpu.c | |||
@@ -1449,20 +1449,20 @@ static void pcpu_dump_alloc_info(const char *lvl, | |||
1449 | for (alloc_end += gi->nr_units / upa; | 1449 | for (alloc_end += gi->nr_units / upa; |
1450 | alloc < alloc_end; alloc++) { | 1450 | alloc < alloc_end; alloc++) { |
1451 | if (!(alloc % apl)) { | 1451 | if (!(alloc % apl)) { |
1452 | printk(KERN_CONT "\n"); | 1452 | pr_cont("\n"); |
1453 | printk("%spcpu-alloc: ", lvl); | 1453 | printk("%spcpu-alloc: ", lvl); |
1454 | } | 1454 | } |
1455 | printk(KERN_CONT "[%0*d] ", group_width, group); | 1455 | pr_cont("[%0*d] ", group_width, group); |
1456 | 1456 | ||
1457 | for (unit_end += upa; unit < unit_end; unit++) | 1457 | for (unit_end += upa; unit < unit_end; unit++) |
1458 | if (gi->cpu_map[unit] != NR_CPUS) | 1458 | if (gi->cpu_map[unit] != NR_CPUS) |
1459 | printk(KERN_CONT "%0*d ", cpu_width, | 1459 | pr_cont("%0*d ", |
1460 | gi->cpu_map[unit]); | 1460 | cpu_width, gi->cpu_map[unit]); |
1461 | else | 1461 | else |
1462 | printk(KERN_CONT "%s ", empty_str); | 1462 | pr_cont("%s ", empty_str); |
1463 | } | 1463 | } |
1464 | } | 1464 | } |
1465 | printk(KERN_CONT "\n"); | 1465 | pr_cont("\n"); |
1466 | } | 1466 | } |
1467 | 1467 | ||
1468 | /** | 1468 | /** |
diff --git a/mm/shmem.c b/mm/shmem.c index 1acfdbc4bd9e..c484f6888d5e 100644 --- a/mm/shmem.c +++ b/mm/shmem.c | |||
@@ -2823,9 +2823,8 @@ static int shmem_parse_options(char *options, struct shmem_sb_info *sbinfo, | |||
2823 | if ((value = strchr(this_char,'=')) != NULL) { | 2823 | if ((value = strchr(this_char,'=')) != NULL) { |
2824 | *value++ = 0; | 2824 | *value++ = 0; |
2825 | } else { | 2825 | } else { |
2826 | printk(KERN_ERR | 2826 | pr_err("tmpfs: No value for mount option '%s'\n", |
2827 | "tmpfs: No value for mount option '%s'\n", | 2827 | this_char); |
2828 | this_char); | ||
2829 | goto error; | 2828 | goto error; |
2830 | } | 2829 | } |
2831 | 2830 | ||
@@ -2880,8 +2879,7 @@ static int shmem_parse_options(char *options, struct shmem_sb_info *sbinfo, | |||
2880 | if (mpol_parse_str(value, &mpol)) | 2879 | if (mpol_parse_str(value, &mpol)) |
2881 | goto bad_val; | 2880 | goto bad_val; |
2882 | } else { | 2881 | } else { |
2883 | printk(KERN_ERR "tmpfs: Bad mount option %s\n", | 2882 | pr_err("tmpfs: Bad mount option %s\n", this_char); |
2884 | this_char); | ||
2885 | goto error; | 2883 | goto error; |
2886 | } | 2884 | } |
2887 | } | 2885 | } |
@@ -2889,7 +2887,7 @@ static int shmem_parse_options(char *options, struct shmem_sb_info *sbinfo, | |||
2889 | return 0; | 2887 | return 0; |
2890 | 2888 | ||
2891 | bad_val: | 2889 | bad_val: |
2892 | printk(KERN_ERR "tmpfs: Bad value '%s' for mount option '%s'\n", | 2890 | pr_err("tmpfs: Bad value '%s' for mount option '%s'\n", |
2893 | value, this_char); | 2891 | value, this_char); |
2894 | error: | 2892 | error: |
2895 | mpol_put(mpol); | 2893 | mpol_put(mpol); |
@@ -3286,14 +3284,14 @@ int __init shmem_init(void) | |||
3286 | 3284 | ||
3287 | error = register_filesystem(&shmem_fs_type); | 3285 | error = register_filesystem(&shmem_fs_type); |
3288 | if (error) { | 3286 | if (error) { |
3289 | printk(KERN_ERR "Could not register tmpfs\n"); | 3287 | pr_err("Could not register tmpfs\n"); |
3290 | goto out2; | 3288 | goto out2; |
3291 | } | 3289 | } |
3292 | 3290 | ||
3293 | shm_mnt = kern_mount(&shmem_fs_type); | 3291 | shm_mnt = kern_mount(&shmem_fs_type); |
3294 | if (IS_ERR(shm_mnt)) { | 3292 | if (IS_ERR(shm_mnt)) { |
3295 | error = PTR_ERR(shm_mnt); | 3293 | error = PTR_ERR(shm_mnt); |
3296 | printk(KERN_ERR "Could not kern_mount tmpfs\n"); | 3294 | pr_err("Could not kern_mount tmpfs\n"); |
3297 | goto out1; | 3295 | goto out1; |
3298 | } | 3296 | } |
3299 | return 0; | 3297 | return 0; |
@@ -474,7 +474,7 @@ static unsigned int cache_estimate(unsigned long gfporder, size_t buffer_size, | |||
474 | static void __slab_error(const char *function, struct kmem_cache *cachep, | 474 | static void __slab_error(const char *function, struct kmem_cache *cachep, |
475 | char *msg) | 475 | char *msg) |
476 | { | 476 | { |
477 | printk(KERN_ERR "slab error in %s(): cache `%s': %s\n", | 477 | pr_err("slab error in %s(): cache `%s': %s\n", |
478 | function, cachep->name, msg); | 478 | function, cachep->name, msg); |
479 | dump_stack(); | 479 | dump_stack(); |
480 | add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE); | 480 | add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE); |
@@ -1553,7 +1553,7 @@ static void dump_line(char *data, int offset, int limit) | |||
1553 | unsigned char error = 0; | 1553 | unsigned char error = 0; |
1554 | int bad_count = 0; | 1554 | int bad_count = 0; |
1555 | 1555 | ||
1556 | printk(KERN_ERR "%03x: ", offset); | 1556 | pr_err("%03x: ", offset); |
1557 | for (i = 0; i < limit; i++) { | 1557 | for (i = 0; i < limit; i++) { |
1558 | if (data[offset + i] != POISON_FREE) { | 1558 | if (data[offset + i] != POISON_FREE) { |
1559 | error = data[offset + i]; | 1559 | error = data[offset + i]; |
@@ -1566,11 +1566,11 @@ static void dump_line(char *data, int offset, int limit) | |||
1566 | if (bad_count == 1) { | 1566 | if (bad_count == 1) { |
1567 | error ^= POISON_FREE; | 1567 | error ^= POISON_FREE; |
1568 | if (!(error & (error - 1))) { | 1568 | if (!(error & (error - 1))) { |
1569 | printk(KERN_ERR "Single bit error detected. Probably bad RAM.\n"); | 1569 | pr_err("Single bit error detected. Probably bad RAM.\n"); |
1570 | #ifdef CONFIG_X86 | 1570 | #ifdef CONFIG_X86 |
1571 | printk(KERN_ERR "Run memtest86+ or a similar memory test tool.\n"); | 1571 | pr_err("Run memtest86+ or a similar memory test tool.\n"); |
1572 | #else | 1572 | #else |
1573 | printk(KERN_ERR "Run a memory test tool.\n"); | 1573 | pr_err("Run a memory test tool.\n"); |
1574 | #endif | 1574 | #endif |
1575 | } | 1575 | } |
1576 | } | 1576 | } |
@@ -1585,13 +1585,13 @@ static void print_objinfo(struct kmem_cache *cachep, void *objp, int lines) | |||
1585 | char *realobj; | 1585 | char *realobj; |
1586 | 1586 | ||
1587 | if (cachep->flags & SLAB_RED_ZONE) { | 1587 | if (cachep->flags & SLAB_RED_ZONE) { |
1588 | printk(KERN_ERR "Redzone: 0x%llx/0x%llx.\n", | 1588 | pr_err("Redzone: 0x%llx/0x%llx\n", |
1589 | *dbg_redzone1(cachep, objp), | 1589 | *dbg_redzone1(cachep, objp), |
1590 | *dbg_redzone2(cachep, objp)); | 1590 | *dbg_redzone2(cachep, objp)); |
1591 | } | 1591 | } |
1592 | 1592 | ||
1593 | if (cachep->flags & SLAB_STORE_USER) { | 1593 | if (cachep->flags & SLAB_STORE_USER) { |
1594 | printk(KERN_ERR "Last user: [<%p>](%pSR)\n", | 1594 | pr_err("Last user: [<%p>](%pSR)\n", |
1595 | *dbg_userword(cachep, objp), | 1595 | *dbg_userword(cachep, objp), |
1596 | *dbg_userword(cachep, objp)); | 1596 | *dbg_userword(cachep, objp)); |
1597 | } | 1597 | } |
@@ -1627,9 +1627,9 @@ static void check_poison_obj(struct kmem_cache *cachep, void *objp) | |||
1627 | /* Mismatch ! */ | 1627 | /* Mismatch ! */ |
1628 | /* Print header */ | 1628 | /* Print header */ |
1629 | if (lines == 0) { | 1629 | if (lines == 0) { |
1630 | printk(KERN_ERR | 1630 | pr_err("Slab corruption (%s): %s start=%p, len=%d\n", |
1631 | "Slab corruption (%s): %s start=%p, len=%d\n", | 1631 | print_tainted(), cachep->name, |
1632 | print_tainted(), cachep->name, realobj, size); | 1632 | realobj, size); |
1633 | print_objinfo(cachep, objp, 0); | 1633 | print_objinfo(cachep, objp, 0); |
1634 | } | 1634 | } |
1635 | /* Hexdump the affected line */ | 1635 | /* Hexdump the affected line */ |
@@ -1656,15 +1656,13 @@ static void check_poison_obj(struct kmem_cache *cachep, void *objp) | |||
1656 | if (objnr) { | 1656 | if (objnr) { |
1657 | objp = index_to_obj(cachep, page, objnr - 1); | 1657 | objp = index_to_obj(cachep, page, objnr - 1); |
1658 | realobj = (char *)objp + obj_offset(cachep); | 1658 | realobj = (char *)objp + obj_offset(cachep); |
1659 | printk(KERN_ERR "Prev obj: start=%p, len=%d\n", | 1659 | pr_err("Prev obj: start=%p, len=%d\n", realobj, size); |
1660 | realobj, size); | ||
1661 | print_objinfo(cachep, objp, 2); | 1660 | print_objinfo(cachep, objp, 2); |
1662 | } | 1661 | } |
1663 | if (objnr + 1 < cachep->num) { | 1662 | if (objnr + 1 < cachep->num) { |
1664 | objp = index_to_obj(cachep, page, objnr + 1); | 1663 | objp = index_to_obj(cachep, page, objnr + 1); |
1665 | realobj = (char *)objp + obj_offset(cachep); | 1664 | realobj = (char *)objp + obj_offset(cachep); |
1666 | printk(KERN_ERR "Next obj: start=%p, len=%d\n", | 1665 | pr_err("Next obj: start=%p, len=%d\n", realobj, size); |
1667 | realobj, size); | ||
1668 | print_objinfo(cachep, objp, 2); | 1666 | print_objinfo(cachep, objp, 2); |
1669 | } | 1667 | } |
1670 | } | 1668 | } |
@@ -2463,7 +2461,7 @@ static void slab_put_obj(struct kmem_cache *cachep, | |||
2463 | /* Verify double free bug */ | 2461 | /* Verify double free bug */ |
2464 | for (i = page->active; i < cachep->num; i++) { | 2462 | for (i = page->active; i < cachep->num; i++) { |
2465 | if (get_free_obj(page, i) == objnr) { | 2463 | if (get_free_obj(page, i) == objnr) { |
2466 | printk(KERN_ERR "slab: double free detected in cache '%s', objp %p\n", | 2464 | pr_err("slab: double free detected in cache '%s', objp %p\n", |
2467 | cachep->name, objp); | 2465 | cachep->name, objp); |
2468 | BUG(); | 2466 | BUG(); |
2469 | } | 2467 | } |
@@ -2583,7 +2581,7 @@ failed: | |||
2583 | static void kfree_debugcheck(const void *objp) | 2581 | static void kfree_debugcheck(const void *objp) |
2584 | { | 2582 | { |
2585 | if (!virt_addr_valid(objp)) { | 2583 | if (!virt_addr_valid(objp)) { |
2586 | printk(KERN_ERR "kfree_debugcheck: out of range ptr %lxh.\n", | 2584 | pr_err("kfree_debugcheck: out of range ptr %lxh\n", |
2587 | (unsigned long)objp); | 2585 | (unsigned long)objp); |
2588 | BUG(); | 2586 | BUG(); |
2589 | } | 2587 | } |
@@ -2607,8 +2605,8 @@ static inline void verify_redzone_free(struct kmem_cache *cache, void *obj) | |||
2607 | else | 2605 | else |
2608 | slab_error(cache, "memory outside object was overwritten"); | 2606 | slab_error(cache, "memory outside object was overwritten"); |
2609 | 2607 | ||
2610 | printk(KERN_ERR "%p: redzone 1:0x%llx, redzone 2:0x%llx.\n", | 2608 | pr_err("%p: redzone 1:0x%llx, redzone 2:0x%llx\n", |
2611 | obj, redzone1, redzone2); | 2609 | obj, redzone1, redzone2); |
2612 | } | 2610 | } |
2613 | 2611 | ||
2614 | static void *cache_free_debugcheck(struct kmem_cache *cachep, void *objp, | 2612 | static void *cache_free_debugcheck(struct kmem_cache *cachep, void *objp, |
@@ -2896,10 +2894,9 @@ static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep, | |||
2896 | if (*dbg_redzone1(cachep, objp) != RED_INACTIVE || | 2894 | if (*dbg_redzone1(cachep, objp) != RED_INACTIVE || |
2897 | *dbg_redzone2(cachep, objp) != RED_INACTIVE) { | 2895 | *dbg_redzone2(cachep, objp) != RED_INACTIVE) { |
2898 | slab_error(cachep, "double free, or memory outside object was overwritten"); | 2896 | slab_error(cachep, "double free, or memory outside object was overwritten"); |
2899 | printk(KERN_ERR | 2897 | pr_err("%p: redzone 1:0x%llx, redzone 2:0x%llx\n", |
2900 | "%p: redzone 1:0x%llx, redzone 2:0x%llx\n", | 2898 | objp, *dbg_redzone1(cachep, objp), |
2901 | objp, *dbg_redzone1(cachep, objp), | 2899 | *dbg_redzone2(cachep, objp)); |
2902 | *dbg_redzone2(cachep, objp)); | ||
2903 | } | 2900 | } |
2904 | *dbg_redzone1(cachep, objp) = RED_ACTIVE; | 2901 | *dbg_redzone1(cachep, objp) = RED_ACTIVE; |
2905 | *dbg_redzone2(cachep, objp) = RED_ACTIVE; | 2902 | *dbg_redzone2(cachep, objp) = RED_ACTIVE; |
@@ -2910,7 +2907,7 @@ static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep, | |||
2910 | cachep->ctor(objp); | 2907 | cachep->ctor(objp); |
2911 | if (ARCH_SLAB_MINALIGN && | 2908 | if (ARCH_SLAB_MINALIGN && |
2912 | ((unsigned long)objp & (ARCH_SLAB_MINALIGN-1))) { | 2909 | ((unsigned long)objp & (ARCH_SLAB_MINALIGN-1))) { |
2913 | printk(KERN_ERR "0x%p: not aligned to ARCH_SLAB_MINALIGN=%d\n", | 2910 | pr_err("0x%p: not aligned to ARCH_SLAB_MINALIGN=%d\n", |
2914 | objp, (int)ARCH_SLAB_MINALIGN); | 2911 | objp, (int)ARCH_SLAB_MINALIGN); |
2915 | } | 2912 | } |
2916 | return objp; | 2913 | return objp; |
@@ -3837,7 +3834,7 @@ static int enable_cpucache(struct kmem_cache *cachep, gfp_t gfp) | |||
3837 | skip_setup: | 3834 | skip_setup: |
3838 | err = do_tune_cpucache(cachep, limit, batchcount, shared, gfp); | 3835 | err = do_tune_cpucache(cachep, limit, batchcount, shared, gfp); |
3839 | if (err) | 3836 | if (err) |
3840 | printk(KERN_ERR "enable_cpucache failed for %s, error %d.\n", | 3837 | pr_err("enable_cpucache failed for %s, error %d\n", |
3841 | cachep->name, -err); | 3838 | cachep->name, -err); |
3842 | return err; | 3839 | return err; |
3843 | } | 3840 | } |
@@ -3993,7 +3990,7 @@ void get_slabinfo(struct kmem_cache *cachep, struct slabinfo *sinfo) | |||
3993 | 3990 | ||
3994 | name = cachep->name; | 3991 | name = cachep->name; |
3995 | if (error) | 3992 | if (error) |
3996 | printk(KERN_ERR "slab: cache %s error: %s\n", name, error); | 3993 | pr_err("slab: cache %s error: %s\n", name, error); |
3997 | 3994 | ||
3998 | sinfo->active_objs = active_objs; | 3995 | sinfo->active_objs = active_objs; |
3999 | sinfo->num_objs = num_objs; | 3996 | sinfo->num_objs = num_objs; |
diff --git a/mm/slab_common.c b/mm/slab_common.c index e885e11a316f..b2e379639a5b 100644 --- a/mm/slab_common.c +++ b/mm/slab_common.c | |||
@@ -442,7 +442,7 @@ out_unlock: | |||
442 | panic("kmem_cache_create: Failed to create slab '%s'. Error %d\n", | 442 | panic("kmem_cache_create: Failed to create slab '%s'. Error %d\n", |
443 | name, err); | 443 | name, err); |
444 | else { | 444 | else { |
445 | printk(KERN_WARNING "kmem_cache_create(%s) failed with error %d", | 445 | pr_warn("kmem_cache_create(%s) failed with error %d\n", |
446 | name, err); | 446 | name, err); |
447 | dump_stack(); | 447 | dump_stack(); |
448 | } | 448 | } |
diff --git a/mm/sparse-vmemmap.c b/mm/sparse-vmemmap.c index d3511f9ad0f9..68885dcbaf40 100644 --- a/mm/sparse-vmemmap.c +++ b/mm/sparse-vmemmap.c | |||
@@ -166,8 +166,8 @@ void __meminit vmemmap_verify(pte_t *pte, int node, | |||
166 | int actual_node = early_pfn_to_nid(pfn); | 166 | int actual_node = early_pfn_to_nid(pfn); |
167 | 167 | ||
168 | if (node_distance(actual_node, node) > LOCAL_DISTANCE) | 168 | if (node_distance(actual_node, node) > LOCAL_DISTANCE) |
169 | printk(KERN_WARNING "[%lx-%lx] potential offnode page_structs\n", | 169 | pr_warn("[%lx-%lx] potential offnode page_structs\n", |
170 | start, end - 1); | 170 | start, end - 1); |
171 | } | 171 | } |
172 | 172 | ||
173 | pte_t * __meminit vmemmap_pte_populate(pmd_t *pmd, unsigned long addr, int node) | 173 | pte_t * __meminit vmemmap_pte_populate(pmd_t *pmd, unsigned long addr, int node) |
@@ -292,7 +292,7 @@ void __init sparse_mem_maps_populate_node(struct page **map_map, | |||
292 | if (map_map[pnum]) | 292 | if (map_map[pnum]) |
293 | continue; | 293 | continue; |
294 | ms = __nr_to_section(pnum); | 294 | ms = __nr_to_section(pnum); |
295 | printk(KERN_ERR "%s: sparsemem memory map backing failed some memory will not be available.\n", | 295 | pr_err("%s: sparsemem memory map backing failed some memory will not be available\n", |
296 | __func__); | 296 | __func__); |
297 | ms->section_mem_map = 0; | 297 | ms->section_mem_map = 0; |
298 | } | 298 | } |
diff --git a/mm/sparse.c b/mm/sparse.c index 7cdb27d9f01f..5d0cf4540364 100644 --- a/mm/sparse.c +++ b/mm/sparse.c | |||
@@ -313,9 +313,8 @@ static void __init check_usemap_section_nr(int nid, unsigned long *usemap) | |||
313 | 313 | ||
314 | usemap_nid = sparse_early_nid(__nr_to_section(usemap_snr)); | 314 | usemap_nid = sparse_early_nid(__nr_to_section(usemap_snr)); |
315 | if (usemap_nid != nid) { | 315 | if (usemap_nid != nid) { |
316 | printk(KERN_INFO | 316 | pr_info("node %d must be removed before remove section %ld\n", |
317 | "node %d must be removed before remove section %ld\n", | 317 | nid, usemap_snr); |
318 | nid, usemap_snr); | ||
319 | return; | 318 | return; |
320 | } | 319 | } |
321 | /* | 320 | /* |
@@ -324,10 +323,8 @@ static void __init check_usemap_section_nr(int nid, unsigned long *usemap) | |||
324 | * gather other removable sections for dynamic partitioning. | 323 | * gather other removable sections for dynamic partitioning. |
325 | * Just notify un-removable section's number here. | 324 | * Just notify un-removable section's number here. |
326 | */ | 325 | */ |
327 | printk(KERN_INFO "Section %ld and %ld (node %d)", usemap_snr, | 326 | pr_info("Section %ld and %ld (node %d) have a circular dependency on usemap and pgdat allocations\n", |
328 | pgdat_snr, nid); | 327 | usemap_snr, pgdat_snr, nid); |
329 | printk(KERN_CONT | ||
330 | " have a circular dependency on usemap and pgdat allocations\n"); | ||
331 | } | 328 | } |
332 | #else | 329 | #else |
333 | static unsigned long * __init | 330 | static unsigned long * __init |
@@ -355,7 +352,7 @@ static void __init sparse_early_usemaps_alloc_node(void *data, | |||
355 | usemap = sparse_early_usemaps_alloc_pgdat_section(NODE_DATA(nodeid), | 352 | usemap = sparse_early_usemaps_alloc_pgdat_section(NODE_DATA(nodeid), |
356 | size * usemap_count); | 353 | size * usemap_count); |
357 | if (!usemap) { | 354 | if (!usemap) { |
358 | printk(KERN_WARNING "%s: allocation failed\n", __func__); | 355 | pr_warn("%s: allocation failed\n", __func__); |
359 | return; | 356 | return; |
360 | } | 357 | } |
361 | 358 | ||
@@ -428,7 +425,7 @@ void __init sparse_mem_maps_populate_node(struct page **map_map, | |||
428 | if (map_map[pnum]) | 425 | if (map_map[pnum]) |
429 | continue; | 426 | continue; |
430 | ms = __nr_to_section(pnum); | 427 | ms = __nr_to_section(pnum); |
431 | printk(KERN_ERR "%s: sparsemem memory map backing failed some memory will not be available.\n", | 428 | pr_err("%s: sparsemem memory map backing failed some memory will not be available\n", |
432 | __func__); | 429 | __func__); |
433 | ms->section_mem_map = 0; | 430 | ms->section_mem_map = 0; |
434 | } | 431 | } |
@@ -456,7 +453,7 @@ static struct page __init *sparse_early_mem_map_alloc(unsigned long pnum) | |||
456 | if (map) | 453 | if (map) |
457 | return map; | 454 | return map; |
458 | 455 | ||
459 | printk(KERN_ERR "%s: sparsemem memory map backing failed some memory will not be available.\n", | 456 | pr_err("%s: sparsemem memory map backing failed some memory will not be available\n", |
460 | __func__); | 457 | __func__); |
461 | ms->section_mem_map = 0; | 458 | ms->section_mem_map = 0; |
462 | return NULL; | 459 | return NULL; |
diff --git a/mm/swap_cgroup.c b/mm/swap_cgroup.c index b5f7f24b8dd1..310ac0b8f974 100644 --- a/mm/swap_cgroup.c +++ b/mm/swap_cgroup.c | |||
@@ -174,9 +174,8 @@ int swap_cgroup_swapon(int type, unsigned long max_pages) | |||
174 | 174 | ||
175 | return 0; | 175 | return 0; |
176 | nomem: | 176 | nomem: |
177 | printk(KERN_INFO "couldn't allocate enough memory for swap_cgroup.\n"); | 177 | pr_info("couldn't allocate enough memory for swap_cgroup\n"); |
178 | printk(KERN_INFO | 178 | pr_info("swap_cgroup can be disabled by swapaccount=0 boot option\n"); |
179 | "swap_cgroup can be disabled by swapaccount=0 boot option\n"); | ||
180 | return -ENOMEM; | 179 | return -ENOMEM; |
181 | } | 180 | } |
182 | 181 | ||