aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorJoe Perches <joe@perches.com>2016-03-17 17:19:44 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2016-03-17 18:09:34 -0400
commit598d80914e84fa79580850530f5d4a50a99bf4f5 (patch)
treebc53d78f5558f56c3a0899b14d2d419e2316d0b4 /mm
parentb11a7b94100cba5ec926a181894c2897a22651b9 (diff)
mm: convert pr_warning to pr_warn
There are a mixture of pr_warning and pr_warn uses in mm. Use pr_warn consistently. Miscellanea: - Coalesce formats - Realign arguments Signed-off-by: Joe Perches <joe@perches.com> Acked-by: Tejun Heo <tj@kernel.org> [percpu] Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/hugetlb.c5
-rw-r--r--mm/kmemleak.c14
-rw-r--r--mm/percpu.c15
3 files changed, 16 insertions, 18 deletions
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index aefba5a9cc47..06058eaa173b 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -2665,7 +2665,7 @@ void __init hugetlb_add_hstate(unsigned int order)
2665 unsigned long i; 2665 unsigned long i;
2666 2666
2667 if (size_to_hstate(PAGE_SIZE << order)) { 2667 if (size_to_hstate(PAGE_SIZE << order)) {
2668 pr_warning("hugepagesz= specified twice, ignoring\n"); 2668 pr_warn("hugepagesz= specified twice, ignoring\n");
2669 return; 2669 return;
2670 } 2670 }
2671 BUG_ON(hugetlb_max_hstate >= HUGE_MAX_HSTATE); 2671 BUG_ON(hugetlb_max_hstate >= HUGE_MAX_HSTATE);
@@ -2701,8 +2701,7 @@ static int __init hugetlb_nrpages_setup(char *s)
2701 mhp = &parsed_hstate->max_huge_pages; 2701 mhp = &parsed_hstate->max_huge_pages;
2702 2702
2703 if (mhp == last_mhp) { 2703 if (mhp == last_mhp) {
2704 pr_warning("hugepages= specified twice without " 2704 pr_warn("hugepages= specified twice without interleaving hugepagesz=, ignoring\n");
2705 "interleaving hugepagesz=, ignoring\n");
2706 return 1; 2705 return 1;
2707 } 2706 }
2708 2707
diff --git a/mm/kmemleak.c b/mm/kmemleak.c
index 25c0ad36fe38..a81cd76ea282 100644
--- a/mm/kmemleak.c
+++ b/mm/kmemleak.c
@@ -276,7 +276,7 @@ static void kmemleak_disable(void);
276 * Print a warning and dump the stack trace. 276 * Print a warning and dump the stack trace.
277 */ 277 */
278#define kmemleak_warn(x...) do { \ 278#define kmemleak_warn(x...) do { \
279 pr_warning(x); \ 279 pr_warn(x); \
280 dump_stack(); \ 280 dump_stack(); \
281 kmemleak_warning = 1; \ 281 kmemleak_warning = 1; \
282} while (0) 282} while (0)
@@ -543,7 +543,7 @@ static struct kmemleak_object *create_object(unsigned long ptr, size_t size,
543 543
544 object = kmem_cache_alloc(object_cache, gfp_kmemleak_mask(gfp)); 544 object = kmem_cache_alloc(object_cache, gfp_kmemleak_mask(gfp));
545 if (!object) { 545 if (!object) {
546 pr_warning("Cannot allocate a kmemleak_object structure\n"); 546 pr_warn("Cannot allocate a kmemleak_object structure\n");
547 kmemleak_disable(); 547 kmemleak_disable();
548 return NULL; 548 return NULL;
549 } 549 }
@@ -764,7 +764,7 @@ static void add_scan_area(unsigned long ptr, size_t size, gfp_t gfp)
764 764
765 area = kmem_cache_alloc(scan_area_cache, gfp_kmemleak_mask(gfp)); 765 area = kmem_cache_alloc(scan_area_cache, gfp_kmemleak_mask(gfp));
766 if (!area) { 766 if (!area) {
767 pr_warning("Cannot allocate a scan area\n"); 767 pr_warn("Cannot allocate a scan area\n");
768 goto out; 768 goto out;
769 } 769 }
770 770
@@ -1515,7 +1515,7 @@ static void start_scan_thread(void)
1515 return; 1515 return;
1516 scan_thread = kthread_run(kmemleak_scan_thread, NULL, "kmemleak"); 1516 scan_thread = kthread_run(kmemleak_scan_thread, NULL, "kmemleak");
1517 if (IS_ERR(scan_thread)) { 1517 if (IS_ERR(scan_thread)) {
1518 pr_warning("Failed to create the scan thread\n"); 1518 pr_warn("Failed to create the scan thread\n");
1519 scan_thread = NULL; 1519 scan_thread = NULL;
1520 } 1520 }
1521} 1521}
@@ -1874,8 +1874,8 @@ void __init kmemleak_init(void)
1874 scan_area_cache = KMEM_CACHE(kmemleak_scan_area, SLAB_NOLEAKTRACE); 1874 scan_area_cache = KMEM_CACHE(kmemleak_scan_area, SLAB_NOLEAKTRACE);
1875 1875
1876 if (crt_early_log > ARRAY_SIZE(early_log)) 1876 if (crt_early_log > ARRAY_SIZE(early_log))
1877 pr_warning("Early log buffer exceeded (%d), please increase " 1877 pr_warn("Early log buffer exceeded (%d), please increase DEBUG_KMEMLEAK_EARLY_LOG_SIZE\n",
1878 "DEBUG_KMEMLEAK_EARLY_LOG_SIZE\n", crt_early_log); 1878 crt_early_log);
1879 1879
1880 /* the kernel is still in UP mode, so disabling the IRQs is enough */ 1880 /* the kernel is still in UP mode, so disabling the IRQs is enough */
1881 local_irq_save(flags); 1881 local_irq_save(flags);
@@ -1960,7 +1960,7 @@ static int __init kmemleak_late_init(void)
1960 dentry = debugfs_create_file("kmemleak", S_IRUGO, NULL, NULL, 1960 dentry = debugfs_create_file("kmemleak", S_IRUGO, NULL, NULL,
1961 &kmemleak_fops); 1961 &kmemleak_fops);
1962 if (!dentry) 1962 if (!dentry)
1963 pr_warning("Failed to create the debugfs kmemleak file\n"); 1963 pr_warn("Failed to create the debugfs kmemleak file\n");
1964 mutex_lock(&scan_mutex); 1964 mutex_lock(&scan_mutex);
1965 start_scan_thread(); 1965 start_scan_thread();
1966 mutex_unlock(&scan_mutex); 1966 mutex_unlock(&scan_mutex);
diff --git a/mm/percpu.c b/mm/percpu.c
index 998607adf6eb..847814b15233 100644
--- a/mm/percpu.c
+++ b/mm/percpu.c
@@ -1033,8 +1033,8 @@ fail_unlock:
1033 spin_unlock_irqrestore(&pcpu_lock, flags); 1033 spin_unlock_irqrestore(&pcpu_lock, flags);
1034fail: 1034fail:
1035 if (!is_atomic && warn_limit) { 1035 if (!is_atomic && warn_limit) {
1036 pr_warning("PERCPU: allocation failed, size=%zu align=%zu atomic=%d, %s\n", 1036 pr_warn("PERCPU: allocation failed, size=%zu align=%zu atomic=%d, %s\n",
1037 size, align, is_atomic, err); 1037 size, align, is_atomic, err);
1038 dump_stack(); 1038 dump_stack();
1039 if (!--warn_limit) 1039 if (!--warn_limit)
1040 pr_info("PERCPU: limit reached, disable warning\n"); 1040 pr_info("PERCPU: limit reached, disable warning\n");
@@ -1723,7 +1723,7 @@ static int __init percpu_alloc_setup(char *str)
1723 pcpu_chosen_fc = PCPU_FC_PAGE; 1723 pcpu_chosen_fc = PCPU_FC_PAGE;
1724#endif 1724#endif
1725 else 1725 else
1726 pr_warning("PERCPU: unknown allocator %s specified\n", str); 1726 pr_warn("PERCPU: unknown allocator %s specified\n", str);
1727 1727
1728 return 0; 1728 return 0;
1729} 1729}
@@ -2016,9 +2016,8 @@ int __init pcpu_embed_first_chunk(size_t reserved_size, size_t dyn_size,
2016 2016
2017 /* warn if maximum distance is further than 75% of vmalloc space */ 2017 /* warn if maximum distance is further than 75% of vmalloc space */
2018 if (max_distance > VMALLOC_TOTAL * 3 / 4) { 2018 if (max_distance > VMALLOC_TOTAL * 3 / 4) {
2019 pr_warning("PERCPU: max_distance=0x%zx too large for vmalloc " 2019 pr_warn("PERCPU: max_distance=0x%zx too large for vmalloc space 0x%lx\n",
2020 "space 0x%lx\n", max_distance, 2020 max_distance, VMALLOC_TOTAL);
2021 VMALLOC_TOTAL);
2022#ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK 2021#ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK
2023 /* and fail if we have fallback */ 2022 /* and fail if we have fallback */
2024 rc = -EINVAL; 2023 rc = -EINVAL;
@@ -2100,8 +2099,8 @@ int __init pcpu_page_first_chunk(size_t reserved_size,
2100 2099
2101 ptr = alloc_fn(cpu, PAGE_SIZE, PAGE_SIZE); 2100 ptr = alloc_fn(cpu, PAGE_SIZE, PAGE_SIZE);
2102 if (!ptr) { 2101 if (!ptr) {
2103 pr_warning("PERCPU: failed to allocate %s page " 2102 pr_warn("PERCPU: failed to allocate %s page for cpu%u\n",
2104 "for cpu%u\n", psize_str, cpu); 2103 psize_str, cpu);
2105 goto enomem; 2104 goto enomem;
2106 } 2105 }
2107 /* kmemleak tracks the percpu allocations separately */ 2106 /* kmemleak tracks the percpu allocations separately */