aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-10-13 21:54:50 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2014-10-13 21:54:50 -0400
commitdfe2c6dcc8ca2cdc662d7c0473e9811b72ef3370 (patch)
tree9ed639a08c16322cdf136d576f42df5b97cd1549 /kernel
parenta45d572841a24db02a62cf05e1157c35fdd3705b (diff)
parent64e455079e1bd7787cc47be30b7f601ce682a5f6 (diff)
Merge branch 'akpm' (patches from Andrew Morton)
Merge second patch-bomb from Andrew Morton: - a few hotfixes - drivers/dma updates - MAINTAINERS updates - Quite a lot of lib/ updates - checkpatch updates - binfmt updates - autofs4 - drivers/rtc/ - various small tweaks to less used filesystems - ipc/ updates - kernel/watchdog.c changes * emailed patches from Andrew Morton <akpm@linux-foundation.org>: (135 commits) mm: softdirty: enable write notifications on VMAs after VM_SOFTDIRTY cleared kernel/param: consolidate __{start,stop}___param[] in <linux/moduleparam.h> ia64: remove duplicate declarations of __per_cpu_start[] and __per_cpu_end[] frv: remove unused declarations of __start___ex_table and __stop___ex_table kvm: ensure hard lockup detection is disabled by default kernel/watchdog.c: control hard lockup detection default staging: rtl8192u: use %*pEn to escape buffer staging: rtl8192e: use %*pEn to escape buffer staging: wlan-ng: use %*pEhp to print SN lib80211: remove unused print_ssid() wireless: hostap: proc: print properly escaped SSID wireless: ipw2x00: print SSID via %*pE wireless: libertas: print esaped string via %*pE lib/vsprintf: add %*pE[achnops] format specifier lib / string_helpers: introduce string_escape_mem() lib / string_helpers: refactoring the test suite lib / string_helpers: move documentation to c-file include/linux: remove strict_strto* definitions arch/x86/mm/numa.c: fix boot failure when all nodes are hotpluggable fs: check bh blocknr earlier when searching lru ...
Diffstat (limited to 'kernel')
-rw-r--r--kernel/debug/kdb/kdb_bp.c6
-rw-r--r--kernel/kallsyms.c11
-rw-r--r--kernel/kexec.c32
-rw-r--r--kernel/params.c7
-rw-r--r--kernel/printk/printk.c14
-rw-r--r--kernel/resource.c36
-rw-r--r--kernel/watchdog.c50
7 files changed, 108 insertions, 48 deletions
diff --git a/kernel/debug/kdb/kdb_bp.c b/kernel/debug/kdb/kdb_bp.c
index 70a504601dc3..b20d544f20c2 100644
--- a/kernel/debug/kdb/kdb_bp.c
+++ b/kernel/debug/kdb/kdb_bp.c
@@ -52,11 +52,11 @@ static int kdb_parsebp(int argc, const char **argv, int *nextargp, kdb_bp_t *bp)
52 52
53 bp->bph_length = 1; 53 bp->bph_length = 1;
54 if ((argc + 1) != nextarg) { 54 if ((argc + 1) != nextarg) {
55 if (strnicmp(argv[nextarg], "datar", sizeof("datar")) == 0) 55 if (strncasecmp(argv[nextarg], "datar", sizeof("datar")) == 0)
56 bp->bp_type = BP_ACCESS_WATCHPOINT; 56 bp->bp_type = BP_ACCESS_WATCHPOINT;
57 else if (strnicmp(argv[nextarg], "dataw", sizeof("dataw")) == 0) 57 else if (strncasecmp(argv[nextarg], "dataw", sizeof("dataw")) == 0)
58 bp->bp_type = BP_WRITE_WATCHPOINT; 58 bp->bp_type = BP_WRITE_WATCHPOINT;
59 else if (strnicmp(argv[nextarg], "inst", sizeof("inst")) == 0) 59 else if (strncasecmp(argv[nextarg], "inst", sizeof("inst")) == 0)
60 bp->bp_type = BP_HARDWARE_BREAKPOINT; 60 bp->bp_type = BP_HARDWARE_BREAKPOINT;
61 else 61 else
62 return KDB_ARGCOUNT; 62 return KDB_ARGCOUNT;
diff --git a/kernel/kallsyms.c b/kernel/kallsyms.c
index ae5167087845..5c5987f10819 100644
--- a/kernel/kallsyms.c
+++ b/kernel/kallsyms.c
@@ -565,19 +565,12 @@ static int kallsyms_open(struct inode *inode, struct file *file)
565 * using get_symbol_offset for every symbol. 565 * using get_symbol_offset for every symbol.
566 */ 566 */
567 struct kallsym_iter *iter; 567 struct kallsym_iter *iter;
568 int ret; 568 iter = __seq_open_private(file, &kallsyms_op, sizeof(*iter));
569
570 iter = kmalloc(sizeof(*iter), GFP_KERNEL);
571 if (!iter) 569 if (!iter)
572 return -ENOMEM; 570 return -ENOMEM;
573 reset_iter(iter, 0); 571 reset_iter(iter, 0);
574 572
575 ret = seq_open(file, &kallsyms_op); 573 return 0;
576 if (ret == 0)
577 ((struct seq_file *)file->private_data)->private = iter;
578 else
579 kfree(iter);
580 return ret;
581} 574}
582 575
583#ifdef CONFIG_KGDB_KDB 576#ifdef CONFIG_KGDB_KDB
diff --git a/kernel/kexec.c b/kernel/kexec.c
index 2bee072268d9..2abf9f6e9a61 100644
--- a/kernel/kexec.c
+++ b/kernel/kexec.c
@@ -1759,7 +1759,6 @@ static __initdata char *suffix_tbl[] = {
1759 */ 1759 */
1760static int __init parse_crashkernel_suffix(char *cmdline, 1760static int __init parse_crashkernel_suffix(char *cmdline,
1761 unsigned long long *crash_size, 1761 unsigned long long *crash_size,
1762 unsigned long long *crash_base,
1763 const char *suffix) 1762 const char *suffix)
1764{ 1763{
1765 char *cur = cmdline; 1764 char *cur = cmdline;
@@ -1848,7 +1847,7 @@ static int __init __parse_crashkernel(char *cmdline,
1848 1847
1849 if (suffix) 1848 if (suffix)
1850 return parse_crashkernel_suffix(ck_cmdline, crash_size, 1849 return parse_crashkernel_suffix(ck_cmdline, crash_size,
1851 crash_base, suffix); 1850 suffix);
1852 /* 1851 /*
1853 * if the commandline contains a ':', then that's the extended 1852 * if the commandline contains a ':', then that's the extended
1854 * syntax -- if not, it must be the classic syntax 1853 * syntax -- if not, it must be the classic syntax
@@ -2016,22 +2015,6 @@ static int __init crash_save_vmcoreinfo_init(void)
2016subsys_initcall(crash_save_vmcoreinfo_init); 2015subsys_initcall(crash_save_vmcoreinfo_init);
2017 2016
2018#ifdef CONFIG_KEXEC_FILE 2017#ifdef CONFIG_KEXEC_FILE
2019static int __kexec_add_segment(struct kimage *image, char *buf,
2020 unsigned long bufsz, unsigned long mem,
2021 unsigned long memsz)
2022{
2023 struct kexec_segment *ksegment;
2024
2025 ksegment = &image->segment[image->nr_segments];
2026 ksegment->kbuf = buf;
2027 ksegment->bufsz = bufsz;
2028 ksegment->mem = mem;
2029 ksegment->memsz = memsz;
2030 image->nr_segments++;
2031
2032 return 0;
2033}
2034
2035static int locate_mem_hole_top_down(unsigned long start, unsigned long end, 2018static int locate_mem_hole_top_down(unsigned long start, unsigned long end,
2036 struct kexec_buf *kbuf) 2019 struct kexec_buf *kbuf)
2037{ 2020{
@@ -2064,8 +2047,7 @@ static int locate_mem_hole_top_down(unsigned long start, unsigned long end,
2064 } while (1); 2047 } while (1);
2065 2048
2066 /* If we are here, we found a suitable memory range */ 2049 /* If we are here, we found a suitable memory range */
2067 __kexec_add_segment(image, kbuf->buffer, kbuf->bufsz, temp_start, 2050 kbuf->mem = temp_start;
2068 kbuf->memsz);
2069 2051
2070 /* Success, stop navigating through remaining System RAM ranges */ 2052 /* Success, stop navigating through remaining System RAM ranges */
2071 return 1; 2053 return 1;
@@ -2099,8 +2081,7 @@ static int locate_mem_hole_bottom_up(unsigned long start, unsigned long end,
2099 } while (1); 2081 } while (1);
2100 2082
2101 /* If we are here, we found a suitable memory range */ 2083 /* If we are here, we found a suitable memory range */
2102 __kexec_add_segment(image, kbuf->buffer, kbuf->bufsz, temp_start, 2084 kbuf->mem = temp_start;
2103 kbuf->memsz);
2104 2085
2105 /* Success, stop navigating through remaining System RAM ranges */ 2086 /* Success, stop navigating through remaining System RAM ranges */
2106 return 1; 2087 return 1;
@@ -2187,7 +2168,12 @@ int kexec_add_buffer(struct kimage *image, char *buffer, unsigned long bufsz,
2187 } 2168 }
2188 2169
2189 /* Found a suitable memory range */ 2170 /* Found a suitable memory range */
2190 ksegment = &image->segment[image->nr_segments - 1]; 2171 ksegment = &image->segment[image->nr_segments];
2172 ksegment->kbuf = kbuf->buffer;
2173 ksegment->bufsz = kbuf->bufsz;
2174 ksegment->mem = kbuf->mem;
2175 ksegment->memsz = kbuf->memsz;
2176 image->nr_segments++;
2191 *load_addr = ksegment->mem; 2177 *load_addr = ksegment->mem;
2192 return 0; 2178 return 0;
2193} 2179}
diff --git a/kernel/params.c b/kernel/params.c
index 041b5899d5e2..db97b791390f 100644
--- a/kernel/params.c
+++ b/kernel/params.c
@@ -19,6 +19,7 @@
19#include <linux/string.h> 19#include <linux/string.h>
20#include <linux/errno.h> 20#include <linux/errno.h>
21#include <linux/module.h> 21#include <linux/module.h>
22#include <linux/moduleparam.h>
22#include <linux/device.h> 23#include <linux/device.h>
23#include <linux/err.h> 24#include <linux/err.h>
24#include <linux/slab.h> 25#include <linux/slab.h>
@@ -513,8 +514,6 @@ EXPORT_SYMBOL(param_ops_string);
513#define to_module_attr(n) container_of(n, struct module_attribute, attr) 514#define to_module_attr(n) container_of(n, struct module_attribute, attr)
514#define to_module_kobject(n) container_of(n, struct module_kobject, kobj) 515#define to_module_kobject(n) container_of(n, struct module_kobject, kobj)
515 516
516extern struct kernel_param __start___param[], __stop___param[];
517
518struct param_attribute 517struct param_attribute
519{ 518{
520 struct module_attribute mattr; 519 struct module_attribute mattr;
@@ -774,7 +773,7 @@ static struct module_kobject * __init locate_module_kobject(const char *name)
774} 773}
775 774
776static void __init kernel_add_sysfs_param(const char *name, 775static void __init kernel_add_sysfs_param(const char *name,
777 struct kernel_param *kparam, 776 const struct kernel_param *kparam,
778 unsigned int name_skip) 777 unsigned int name_skip)
779{ 778{
780 struct module_kobject *mk; 779 struct module_kobject *mk;
@@ -809,7 +808,7 @@ static void __init kernel_add_sysfs_param(const char *name,
809 */ 808 */
810static void __init param_sysfs_builtin(void) 809static void __init param_sysfs_builtin(void)
811{ 810{
812 struct kernel_param *kp; 811 const struct kernel_param *kp;
813 unsigned int name_len; 812 unsigned int name_len;
814 char modname[MODULE_NAME_LEN]; 813 char modname[MODULE_NAME_LEN];
815 814
diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c
index 7a6e69441f75..e3962d63e368 100644
--- a/kernel/printk/printk.c
+++ b/kernel/printk/printk.c
@@ -267,7 +267,6 @@ static u32 clear_idx;
267#define LOG_ALIGN __alignof__(struct printk_log) 267#define LOG_ALIGN __alignof__(struct printk_log)
268#endif 268#endif
269#define __LOG_BUF_LEN (1 << CONFIG_LOG_BUF_SHIFT) 269#define __LOG_BUF_LEN (1 << CONFIG_LOG_BUF_SHIFT)
270#define __LOG_CPU_MAX_BUF_LEN (1 << CONFIG_LOG_CPU_MAX_BUF_SHIFT)
271static char __log_buf[__LOG_BUF_LEN] __aligned(LOG_ALIGN); 270static char __log_buf[__LOG_BUF_LEN] __aligned(LOG_ALIGN);
272static char *log_buf = __log_buf; 271static char *log_buf = __log_buf;
273static u32 log_buf_len = __LOG_BUF_LEN; 272static u32 log_buf_len = __LOG_BUF_LEN;
@@ -852,6 +851,9 @@ static int __init log_buf_len_setup(char *str)
852} 851}
853early_param("log_buf_len", log_buf_len_setup); 852early_param("log_buf_len", log_buf_len_setup);
854 853
854#ifdef CONFIG_SMP
855#define __LOG_CPU_MAX_BUF_LEN (1 << CONFIG_LOG_CPU_MAX_BUF_SHIFT)
856
855static void __init log_buf_add_cpu(void) 857static void __init log_buf_add_cpu(void)
856{ 858{
857 unsigned int cpu_extra; 859 unsigned int cpu_extra;
@@ -878,6 +880,9 @@ static void __init log_buf_add_cpu(void)
878 880
879 log_buf_len_update(cpu_extra + __LOG_BUF_LEN); 881 log_buf_len_update(cpu_extra + __LOG_BUF_LEN);
880} 882}
883#else /* !CONFIG_SMP */
884static inline void log_buf_add_cpu(void) {}
885#endif /* CONFIG_SMP */
881 886
882void __init setup_log_buf(int early) 887void __init setup_log_buf(int early)
883{ 888{
@@ -1674,12 +1679,7 @@ asmlinkage int vprintk_emit(int facility, int level,
1674 * The printf needs to come first; we need the syslog 1679 * The printf needs to come first; we need the syslog
1675 * prefix which might be passed-in as a parameter. 1680 * prefix which might be passed-in as a parameter.
1676 */ 1681 */
1677 if (in_sched) 1682 text_len = vscnprintf(text, sizeof(textbuf), fmt, args);
1678 text_len = scnprintf(text, sizeof(textbuf),
1679 KERN_WARNING "[sched_delayed] ");
1680
1681 text_len += vscnprintf(text + text_len,
1682 sizeof(textbuf) - text_len, fmt, args);
1683 1683
1684 /* mark and strip a trailing newline */ 1684 /* mark and strip a trailing newline */
1685 if (text_len && text[text_len-1] == '\n') { 1685 if (text_len && text[text_len-1] == '\n') {
diff --git a/kernel/resource.c b/kernel/resource.c
index 46322019ab7d..0bcebffc4e77 100644
--- a/kernel/resource.c
+++ b/kernel/resource.c
@@ -491,6 +491,42 @@ int __weak page_is_ram(unsigned long pfn)
491} 491}
492EXPORT_SYMBOL_GPL(page_is_ram); 492EXPORT_SYMBOL_GPL(page_is_ram);
493 493
494/*
495 * Search for a resouce entry that fully contains the specified region.
496 * If found, return 1 if it is RAM, 0 if not.
497 * If not found, or region is not fully contained, return -1
498 *
499 * Used by the ioremap functions to ensure the user is not remapping RAM and is
500 * a vast speed up over walking through the resource table page by page.
501 */
502int region_is_ram(resource_size_t start, unsigned long size)
503{
504 struct resource *p;
505 resource_size_t end = start + size - 1;
506 int flags = IORESOURCE_MEM | IORESOURCE_BUSY;
507 const char *name = "System RAM";
508 int ret = -1;
509
510 read_lock(&resource_lock);
511 for (p = iomem_resource.child; p ; p = p->sibling) {
512 if (end < p->start)
513 continue;
514
515 if (p->start <= start && end <= p->end) {
516 /* resource fully contains region */
517 if ((p->flags != flags) || strcmp(p->name, name))
518 ret = 0;
519 else
520 ret = 1;
521 break;
522 }
523 if (p->end < start)
524 break; /* not found */
525 }
526 read_unlock(&resource_lock);
527 return ret;
528}
529
494void __weak arch_remove_reservations(struct resource *avail) 530void __weak arch_remove_reservations(struct resource *avail)
495{ 531{
496} 532}
diff --git a/kernel/watchdog.c b/kernel/watchdog.c
index ff7fd80bef99..49e9537f3673 100644
--- a/kernel/watchdog.c
+++ b/kernel/watchdog.c
@@ -59,6 +59,25 @@ static unsigned long soft_lockup_nmi_warn;
59static int hardlockup_panic = 59static int hardlockup_panic =
60 CONFIG_BOOTPARAM_HARDLOCKUP_PANIC_VALUE; 60 CONFIG_BOOTPARAM_HARDLOCKUP_PANIC_VALUE;
61 61
62static bool hardlockup_detector_enabled = true;
63/*
64 * We may not want to enable hard lockup detection by default in all cases,
65 * for example when running the kernel as a guest on a hypervisor. In these
66 * cases this function can be called to disable hard lockup detection. This
67 * function should only be executed once by the boot processor before the
68 * kernel command line parameters are parsed, because otherwise it is not
69 * possible to override this in hardlockup_panic_setup().
70 */
71void watchdog_enable_hardlockup_detector(bool val)
72{
73 hardlockup_detector_enabled = val;
74}
75
76bool watchdog_hardlockup_detector_is_enabled(void)
77{
78 return hardlockup_detector_enabled;
79}
80
62static int __init hardlockup_panic_setup(char *str) 81static int __init hardlockup_panic_setup(char *str)
63{ 82{
64 if (!strncmp(str, "panic", 5)) 83 if (!strncmp(str, "panic", 5))
@@ -67,6 +86,14 @@ static int __init hardlockup_panic_setup(char *str)
67 hardlockup_panic = 0; 86 hardlockup_panic = 0;
68 else if (!strncmp(str, "0", 1)) 87 else if (!strncmp(str, "0", 1))
69 watchdog_user_enabled = 0; 88 watchdog_user_enabled = 0;
89 else if (!strncmp(str, "1", 1) || !strncmp(str, "2", 1)) {
90 /*
91 * Setting 'nmi_watchdog=1' or 'nmi_watchdog=2' (legacy option)
92 * has the same effect.
93 */
94 watchdog_user_enabled = 1;
95 watchdog_enable_hardlockup_detector(true);
96 }
70 return 1; 97 return 1;
71} 98}
72__setup("nmi_watchdog=", hardlockup_panic_setup); 99__setup("nmi_watchdog=", hardlockup_panic_setup);
@@ -465,6 +492,15 @@ static int watchdog_nmi_enable(unsigned int cpu)
465 struct perf_event_attr *wd_attr; 492 struct perf_event_attr *wd_attr;
466 struct perf_event *event = per_cpu(watchdog_ev, cpu); 493 struct perf_event *event = per_cpu(watchdog_ev, cpu);
467 494
495 /*
496 * Some kernels need to default hard lockup detection to
497 * 'disabled', for example a guest on a hypervisor.
498 */
499 if (!watchdog_hardlockup_detector_is_enabled()) {
500 event = ERR_PTR(-ENOENT);
501 goto handle_err;
502 }
503
468 /* is it already setup and enabled? */ 504 /* is it already setup and enabled? */
469 if (event && event->state > PERF_EVENT_STATE_OFF) 505 if (event && event->state > PERF_EVENT_STATE_OFF)
470 goto out; 506 goto out;
@@ -479,6 +515,7 @@ static int watchdog_nmi_enable(unsigned int cpu)
479 /* Try to register using hardware perf events */ 515 /* Try to register using hardware perf events */
480 event = perf_event_create_kernel_counter(wd_attr, cpu, NULL, watchdog_overflow_callback, NULL); 516 event = perf_event_create_kernel_counter(wd_attr, cpu, NULL, watchdog_overflow_callback, NULL);
481 517
518handle_err:
482 /* save cpu0 error for future comparision */ 519 /* save cpu0 error for future comparision */
483 if (cpu == 0 && IS_ERR(event)) 520 if (cpu == 0 && IS_ERR(event))
484 cpu0_err = PTR_ERR(event); 521 cpu0_err = PTR_ERR(event);
@@ -624,11 +661,13 @@ int proc_dowatchdog(struct ctl_table *table, int write,
624 void __user *buffer, size_t *lenp, loff_t *ppos) 661 void __user *buffer, size_t *lenp, loff_t *ppos)
625{ 662{
626 int err, old_thresh, old_enabled; 663 int err, old_thresh, old_enabled;
664 bool old_hardlockup;
627 static DEFINE_MUTEX(watchdog_proc_mutex); 665 static DEFINE_MUTEX(watchdog_proc_mutex);
628 666
629 mutex_lock(&watchdog_proc_mutex); 667 mutex_lock(&watchdog_proc_mutex);
630 old_thresh = ACCESS_ONCE(watchdog_thresh); 668 old_thresh = ACCESS_ONCE(watchdog_thresh);
631 old_enabled = ACCESS_ONCE(watchdog_user_enabled); 669 old_enabled = ACCESS_ONCE(watchdog_user_enabled);
670 old_hardlockup = watchdog_hardlockup_detector_is_enabled();
632 671
633 err = proc_dointvec_minmax(table, write, buffer, lenp, ppos); 672 err = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
634 if (err || !write) 673 if (err || !write)
@@ -640,15 +679,22 @@ int proc_dowatchdog(struct ctl_table *table, int write,
640 * disabled. The 'watchdog_running' variable check in 679 * disabled. The 'watchdog_running' variable check in
641 * watchdog_*_all_cpus() function takes care of this. 680 * watchdog_*_all_cpus() function takes care of this.
642 */ 681 */
643 if (watchdog_user_enabled && watchdog_thresh) 682 if (watchdog_user_enabled && watchdog_thresh) {
683 /*
684 * Prevent a change in watchdog_thresh accidentally overriding
685 * the enablement of the hardlockup detector.
686 */
687 if (watchdog_user_enabled != old_enabled)
688 watchdog_enable_hardlockup_detector(true);
644 err = watchdog_enable_all_cpus(old_thresh != watchdog_thresh); 689 err = watchdog_enable_all_cpus(old_thresh != watchdog_thresh);
645 else 690 } else
646 watchdog_disable_all_cpus(); 691 watchdog_disable_all_cpus();
647 692
648 /* Restore old values on failure */ 693 /* Restore old values on failure */
649 if (err) { 694 if (err) {
650 watchdog_thresh = old_thresh; 695 watchdog_thresh = old_thresh;
651 watchdog_user_enabled = old_enabled; 696 watchdog_user_enabled = old_enabled;
697 watchdog_enable_hardlockup_detector(old_hardlockup);
652 } 698 }
653out: 699out:
654 mutex_unlock(&watchdog_proc_mutex); 700 mutex_unlock(&watchdog_proc_mutex);