diff options
Diffstat (limited to 'kernel/printk/printk.c')
-rw-r--r-- | kernel/printk/printk.c | 2924 |
1 files changed, 2924 insertions, 0 deletions
diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c new file mode 100644 index 000000000000..69b0890ed7e5 --- /dev/null +++ b/kernel/printk/printk.c | |||
@@ -0,0 +1,2924 @@ | |||
1 | /* | ||
2 | * linux/kernel/printk.c | ||
3 | * | ||
4 | * Copyright (C) 1991, 1992 Linus Torvalds | ||
5 | * | ||
6 | * Modified to make sys_syslog() more flexible: added commands to | ||
7 | * return the last 4k of kernel messages, regardless of whether | ||
8 | * they've been read or not. Added option to suppress kernel printk's | ||
9 | * to the console. Added hook for sending the console messages | ||
10 | * elsewhere, in preparation for a serial line console (someday). | ||
11 | * Ted Ts'o, 2/11/93. | ||
12 | * Modified for sysctl support, 1/8/97, Chris Horn. | ||
13 | * Fixed SMP synchronization, 08/08/99, Manfred Spraul | ||
14 | * manfred@colorfullife.com | ||
15 | * Rewrote bits to get rid of console_lock | ||
16 | * 01Mar01 Andrew Morton | ||
17 | */ | ||
18 | |||
19 | #include <linux/kernel.h> | ||
20 | #include <linux/mm.h> | ||
21 | #include <linux/tty.h> | ||
22 | #include <linux/tty_driver.h> | ||
23 | #include <linux/console.h> | ||
24 | #include <linux/init.h> | ||
25 | #include <linux/jiffies.h> | ||
26 | #include <linux/nmi.h> | ||
27 | #include <linux/module.h> | ||
28 | #include <linux/moduleparam.h> | ||
29 | #include <linux/interrupt.h> /* For in_interrupt() */ | ||
30 | #include <linux/delay.h> | ||
31 | #include <linux/smp.h> | ||
32 | #include <linux/security.h> | ||
33 | #include <linux/bootmem.h> | ||
34 | #include <linux/memblock.h> | ||
35 | #include <linux/aio.h> | ||
36 | #include <linux/syscalls.h> | ||
37 | #include <linux/kexec.h> | ||
38 | #include <linux/kdb.h> | ||
39 | #include <linux/ratelimit.h> | ||
40 | #include <linux/kmsg_dump.h> | ||
41 | #include <linux/syslog.h> | ||
42 | #include <linux/cpu.h> | ||
43 | #include <linux/notifier.h> | ||
44 | #include <linux/rculist.h> | ||
45 | #include <linux/poll.h> | ||
46 | #include <linux/irq_work.h> | ||
47 | #include <linux/utsname.h> | ||
48 | |||
49 | #include <asm/uaccess.h> | ||
50 | |||
51 | #define CREATE_TRACE_POINTS | ||
52 | #include <trace/events/printk.h> | ||
53 | |||
54 | /* printk's without a loglevel use this.. */ | ||
55 | #define DEFAULT_MESSAGE_LOGLEVEL CONFIG_DEFAULT_MESSAGE_LOGLEVEL | ||
56 | |||
57 | /* We show everything that is MORE important than this.. */ | ||
58 | #define MINIMUM_CONSOLE_LOGLEVEL 1 /* Minimum loglevel we let people use */ | ||
59 | #define DEFAULT_CONSOLE_LOGLEVEL 7 /* anything MORE serious than KERN_DEBUG */ | ||
60 | |||
61 | int console_printk[4] = { | ||
62 | DEFAULT_CONSOLE_LOGLEVEL, /* console_loglevel */ | ||
63 | DEFAULT_MESSAGE_LOGLEVEL, /* default_message_loglevel */ | ||
64 | MINIMUM_CONSOLE_LOGLEVEL, /* minimum_console_loglevel */ | ||
65 | DEFAULT_CONSOLE_LOGLEVEL, /* default_console_loglevel */ | ||
66 | }; | ||
67 | |||
68 | /* | ||
69 | * Low level drivers may need that to know if they can schedule in | ||
70 | * their unblank() callback or not. So let's export it. | ||
71 | */ | ||
72 | int oops_in_progress; | ||
73 | EXPORT_SYMBOL(oops_in_progress); | ||
74 | |||
75 | /* | ||
76 | * console_sem protects the console_drivers list, and also | ||
77 | * provides serialisation for access to the entire console | ||
78 | * driver system. | ||
79 | */ | ||
80 | static DEFINE_SEMAPHORE(console_sem); | ||
81 | struct console *console_drivers; | ||
82 | EXPORT_SYMBOL_GPL(console_drivers); | ||
83 | |||
84 | #ifdef CONFIG_LOCKDEP | ||
85 | static struct lockdep_map console_lock_dep_map = { | ||
86 | .name = "console_lock" | ||
87 | }; | ||
88 | #endif | ||
89 | |||
90 | /* | ||
91 | * This is used for debugging the mess that is the VT code by | ||
92 | * keeping track if we have the console semaphore held. It's | ||
93 | * definitely not the perfect debug tool (we don't know if _WE_ | ||
94 | * hold it are racing, but it helps tracking those weird code | ||
95 | * path in the console code where we end up in places I want | ||
96 | * locked without the console sempahore held | ||
97 | */ | ||
98 | static int console_locked, console_suspended; | ||
99 | |||
100 | /* | ||
101 | * If exclusive_console is non-NULL then only this console is to be printed to. | ||
102 | */ | ||
103 | static struct console *exclusive_console; | ||
104 | |||
105 | /* | ||
106 | * Array of consoles built from command line options (console=) | ||
107 | */ | ||
108 | struct console_cmdline | ||
109 | { | ||
110 | char name[8]; /* Name of the driver */ | ||
111 | int index; /* Minor dev. to use */ | ||
112 | char *options; /* Options for the driver */ | ||
113 | #ifdef CONFIG_A11Y_BRAILLE_CONSOLE | ||
114 | char *brl_options; /* Options for braille driver */ | ||
115 | #endif | ||
116 | }; | ||
117 | |||
118 | #define MAX_CMDLINECONSOLES 8 | ||
119 | |||
120 | static struct console_cmdline console_cmdline[MAX_CMDLINECONSOLES]; | ||
121 | static int selected_console = -1; | ||
122 | static int preferred_console = -1; | ||
123 | int console_set_on_cmdline; | ||
124 | EXPORT_SYMBOL(console_set_on_cmdline); | ||
125 | |||
126 | /* Flag: console code may call schedule() */ | ||
127 | static int console_may_schedule; | ||
128 | |||
129 | /* | ||
130 | * The printk log buffer consists of a chain of concatenated variable | ||
131 | * length records. Every record starts with a record header, containing | ||
132 | * the overall length of the record. | ||
133 | * | ||
134 | * The heads to the first and last entry in the buffer, as well as the | ||
135 | * sequence numbers of these both entries are maintained when messages | ||
136 | * are stored.. | ||
137 | * | ||
138 | * If the heads indicate available messages, the length in the header | ||
139 | * tells the start next message. A length == 0 for the next message | ||
140 | * indicates a wrap-around to the beginning of the buffer. | ||
141 | * | ||
142 | * Every record carries the monotonic timestamp in microseconds, as well as | ||
143 | * the standard userspace syslog level and syslog facility. The usual | ||
144 | * kernel messages use LOG_KERN; userspace-injected messages always carry | ||
145 | * a matching syslog facility, by default LOG_USER. The origin of every | ||
146 | * message can be reliably determined that way. | ||
147 | * | ||
148 | * The human readable log message directly follows the message header. The | ||
149 | * length of the message text is stored in the header, the stored message | ||
150 | * is not terminated. | ||
151 | * | ||
152 | * Optionally, a message can carry a dictionary of properties (key/value pairs), | ||
153 | * to provide userspace with a machine-readable message context. | ||
154 | * | ||
155 | * Examples for well-defined, commonly used property names are: | ||
156 | * DEVICE=b12:8 device identifier | ||
157 | * b12:8 block dev_t | ||
158 | * c127:3 char dev_t | ||
159 | * n8 netdev ifindex | ||
160 | * +sound:card0 subsystem:devname | ||
161 | * SUBSYSTEM=pci driver-core subsystem name | ||
162 | * | ||
163 | * Valid characters in property names are [a-zA-Z0-9.-_]. The plain text value | ||
164 | * follows directly after a '=' character. Every property is terminated by | ||
165 | * a '\0' character. The last property is not terminated. | ||
166 | * | ||
167 | * Example of a message structure: | ||
168 | * 0000 ff 8f 00 00 00 00 00 00 monotonic time in nsec | ||
169 | * 0008 34 00 record is 52 bytes long | ||
170 | * 000a 0b 00 text is 11 bytes long | ||
171 | * 000c 1f 00 dictionary is 23 bytes long | ||
172 | * 000e 03 00 LOG_KERN (facility) LOG_ERR (level) | ||
173 | * 0010 69 74 27 73 20 61 20 6c "it's a l" | ||
174 | * 69 6e 65 "ine" | ||
175 | * 001b 44 45 56 49 43 "DEVIC" | ||
176 | * 45 3d 62 38 3a 32 00 44 "E=b8:2\0D" | ||
177 | * 52 49 56 45 52 3d 62 75 "RIVER=bu" | ||
178 | * 67 "g" | ||
179 | * 0032 00 00 00 padding to next message header | ||
180 | * | ||
181 | * The 'struct log' buffer header must never be directly exported to | ||
182 | * userspace, it is a kernel-private implementation detail that might | ||
183 | * need to be changed in the future, when the requirements change. | ||
184 | * | ||
185 | * /dev/kmsg exports the structured data in the following line format: | ||
186 | * "level,sequnum,timestamp;<message text>\n" | ||
187 | * | ||
188 | * The optional key/value pairs are attached as continuation lines starting | ||
189 | * with a space character and terminated by a newline. All possible | ||
190 | * non-prinatable characters are escaped in the "\xff" notation. | ||
191 | * | ||
192 | * Users of the export format should ignore possible additional values | ||
193 | * separated by ',', and find the message after the ';' character. | ||
194 | */ | ||
195 | |||
196 | enum log_flags { | ||
197 | LOG_NOCONS = 1, /* already flushed, do not print to console */ | ||
198 | LOG_NEWLINE = 2, /* text ended with a newline */ | ||
199 | LOG_PREFIX = 4, /* text started with a prefix */ | ||
200 | LOG_CONT = 8, /* text is a fragment of a continuation line */ | ||
201 | }; | ||
202 | |||
203 | struct log { | ||
204 | u64 ts_nsec; /* timestamp in nanoseconds */ | ||
205 | u16 len; /* length of entire record */ | ||
206 | u16 text_len; /* length of text buffer */ | ||
207 | u16 dict_len; /* length of dictionary buffer */ | ||
208 | u8 facility; /* syslog facility */ | ||
209 | u8 flags:5; /* internal record flags */ | ||
210 | u8 level:3; /* syslog level */ | ||
211 | }; | ||
212 | |||
213 | /* | ||
214 | * The logbuf_lock protects kmsg buffer, indices, counters. It is also | ||
215 | * used in interesting ways to provide interlocking in console_unlock(); | ||
216 | */ | ||
217 | static DEFINE_RAW_SPINLOCK(logbuf_lock); | ||
218 | |||
219 | #ifdef CONFIG_PRINTK | ||
220 | DECLARE_WAIT_QUEUE_HEAD(log_wait); | ||
221 | /* the next printk record to read by syslog(READ) or /proc/kmsg */ | ||
222 | static u64 syslog_seq; | ||
223 | static u32 syslog_idx; | ||
224 | static enum log_flags syslog_prev; | ||
225 | static size_t syslog_partial; | ||
226 | |||
227 | /* index and sequence number of the first record stored in the buffer */ | ||
228 | static u64 log_first_seq; | ||
229 | static u32 log_first_idx; | ||
230 | |||
231 | /* index and sequence number of the next record to store in the buffer */ | ||
232 | static u64 log_next_seq; | ||
233 | static u32 log_next_idx; | ||
234 | |||
235 | /* the next printk record to write to the console */ | ||
236 | static u64 console_seq; | ||
237 | static u32 console_idx; | ||
238 | static enum log_flags console_prev; | ||
239 | |||
240 | /* the next printk record to read after the last 'clear' command */ | ||
241 | static u64 clear_seq; | ||
242 | static u32 clear_idx; | ||
243 | |||
244 | #define PREFIX_MAX 32 | ||
245 | #define LOG_LINE_MAX 1024 - PREFIX_MAX | ||
246 | |||
247 | /* record buffer */ | ||
248 | #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) | ||
249 | #define LOG_ALIGN 4 | ||
250 | #else | ||
251 | #define LOG_ALIGN __alignof__(struct log) | ||
252 | #endif | ||
253 | #define __LOG_BUF_LEN (1 << CONFIG_LOG_BUF_SHIFT) | ||
254 | static char __log_buf[__LOG_BUF_LEN] __aligned(LOG_ALIGN); | ||
255 | static char *log_buf = __log_buf; | ||
256 | static u32 log_buf_len = __LOG_BUF_LEN; | ||
257 | |||
258 | /* cpu currently holding logbuf_lock */ | ||
259 | static volatile unsigned int logbuf_cpu = UINT_MAX; | ||
260 | |||
261 | /* human readable text of the record */ | ||
262 | static char *log_text(const struct log *msg) | ||
263 | { | ||
264 | return (char *)msg + sizeof(struct log); | ||
265 | } | ||
266 | |||
267 | /* optional key/value pair dictionary attached to the record */ | ||
268 | static char *log_dict(const struct log *msg) | ||
269 | { | ||
270 | return (char *)msg + sizeof(struct log) + msg->text_len; | ||
271 | } | ||
272 | |||
273 | /* get record by index; idx must point to valid msg */ | ||
274 | static struct log *log_from_idx(u32 idx) | ||
275 | { | ||
276 | struct log *msg = (struct log *)(log_buf + idx); | ||
277 | |||
278 | /* | ||
279 | * A length == 0 record is the end of buffer marker. Wrap around and | ||
280 | * read the message at the start of the buffer. | ||
281 | */ | ||
282 | if (!msg->len) | ||
283 | return (struct log *)log_buf; | ||
284 | return msg; | ||
285 | } | ||
286 | |||
287 | /* get next record; idx must point to valid msg */ | ||
288 | static u32 log_next(u32 idx) | ||
289 | { | ||
290 | struct log *msg = (struct log *)(log_buf + idx); | ||
291 | |||
292 | /* length == 0 indicates the end of the buffer; wrap */ | ||
293 | /* | ||
294 | * A length == 0 record is the end of buffer marker. Wrap around and | ||
295 | * read the message at the start of the buffer as *this* one, and | ||
296 | * return the one after that. | ||
297 | */ | ||
298 | if (!msg->len) { | ||
299 | msg = (struct log *)log_buf; | ||
300 | return msg->len; | ||
301 | } | ||
302 | return idx + msg->len; | ||
303 | } | ||
304 | |||
305 | /* insert record into the buffer, discard old ones, update heads */ | ||
306 | static void log_store(int facility, int level, | ||
307 | enum log_flags flags, u64 ts_nsec, | ||
308 | const char *dict, u16 dict_len, | ||
309 | const char *text, u16 text_len) | ||
310 | { | ||
311 | struct log *msg; | ||
312 | u32 size, pad_len; | ||
313 | |||
314 | /* number of '\0' padding bytes to next message */ | ||
315 | size = sizeof(struct log) + text_len + dict_len; | ||
316 | pad_len = (-size) & (LOG_ALIGN - 1); | ||
317 | size += pad_len; | ||
318 | |||
319 | while (log_first_seq < log_next_seq) { | ||
320 | u32 free; | ||
321 | |||
322 | if (log_next_idx > log_first_idx) | ||
323 | free = max(log_buf_len - log_next_idx, log_first_idx); | ||
324 | else | ||
325 | free = log_first_idx - log_next_idx; | ||
326 | |||
327 | if (free > size + sizeof(struct log)) | ||
328 | break; | ||
329 | |||
330 | /* drop old messages until we have enough contiuous space */ | ||
331 | log_first_idx = log_next(log_first_idx); | ||
332 | log_first_seq++; | ||
333 | } | ||
334 | |||
335 | if (log_next_idx + size + sizeof(struct log) >= log_buf_len) { | ||
336 | /* | ||
337 | * This message + an additional empty header does not fit | ||
338 | * at the end of the buffer. Add an empty header with len == 0 | ||
339 | * to signify a wrap around. | ||
340 | */ | ||
341 | memset(log_buf + log_next_idx, 0, sizeof(struct log)); | ||
342 | log_next_idx = 0; | ||
343 | } | ||
344 | |||
345 | /* fill message */ | ||
346 | msg = (struct log *)(log_buf + log_next_idx); | ||
347 | memcpy(log_text(msg), text, text_len); | ||
348 | msg->text_len = text_len; | ||
349 | memcpy(log_dict(msg), dict, dict_len); | ||
350 | msg->dict_len = dict_len; | ||
351 | msg->facility = facility; | ||
352 | msg->level = level & 7; | ||
353 | msg->flags = flags & 0x1f; | ||
354 | if (ts_nsec > 0) | ||
355 | msg->ts_nsec = ts_nsec; | ||
356 | else | ||
357 | msg->ts_nsec = local_clock(); | ||
358 | memset(log_dict(msg) + dict_len, 0, pad_len); | ||
359 | msg->len = sizeof(struct log) + text_len + dict_len + pad_len; | ||
360 | |||
361 | /* insert message */ | ||
362 | log_next_idx += msg->len; | ||
363 | log_next_seq++; | ||
364 | } | ||
365 | |||
366 | #ifdef CONFIG_SECURITY_DMESG_RESTRICT | ||
367 | int dmesg_restrict = 1; | ||
368 | #else | ||
369 | int dmesg_restrict; | ||
370 | #endif | ||
371 | |||
372 | static int syslog_action_restricted(int type) | ||
373 | { | ||
374 | if (dmesg_restrict) | ||
375 | return 1; | ||
376 | /* | ||
377 | * Unless restricted, we allow "read all" and "get buffer size" | ||
378 | * for everybody. | ||
379 | */ | ||
380 | return type != SYSLOG_ACTION_READ_ALL && | ||
381 | type != SYSLOG_ACTION_SIZE_BUFFER; | ||
382 | } | ||
383 | |||
384 | static int check_syslog_permissions(int type, bool from_file) | ||
385 | { | ||
386 | /* | ||
387 | * If this is from /proc/kmsg and we've already opened it, then we've | ||
388 | * already done the capabilities checks at open time. | ||
389 | */ | ||
390 | if (from_file && type != SYSLOG_ACTION_OPEN) | ||
391 | return 0; | ||
392 | |||
393 | if (syslog_action_restricted(type)) { | ||
394 | if (capable(CAP_SYSLOG)) | ||
395 | return 0; | ||
396 | /* | ||
397 | * For historical reasons, accept CAP_SYS_ADMIN too, with | ||
398 | * a warning. | ||
399 | */ | ||
400 | if (capable(CAP_SYS_ADMIN)) { | ||
401 | pr_warn_once("%s (%d): Attempt to access syslog with " | ||
402 | "CAP_SYS_ADMIN but no CAP_SYSLOG " | ||
403 | "(deprecated).\n", | ||
404 | current->comm, task_pid_nr(current)); | ||
405 | return 0; | ||
406 | } | ||
407 | return -EPERM; | ||
408 | } | ||
409 | return security_syslog(type); | ||
410 | } | ||
411 | |||
412 | |||
413 | /* /dev/kmsg - userspace message inject/listen interface */ | ||
414 | struct devkmsg_user { | ||
415 | u64 seq; | ||
416 | u32 idx; | ||
417 | enum log_flags prev; | ||
418 | struct mutex lock; | ||
419 | char buf[8192]; | ||
420 | }; | ||
421 | |||
422 | static ssize_t devkmsg_writev(struct kiocb *iocb, const struct iovec *iv, | ||
423 | unsigned long count, loff_t pos) | ||
424 | { | ||
425 | char *buf, *line; | ||
426 | int i; | ||
427 | int level = default_message_loglevel; | ||
428 | int facility = 1; /* LOG_USER */ | ||
429 | size_t len = iov_length(iv, count); | ||
430 | ssize_t ret = len; | ||
431 | |||
432 | if (len > LOG_LINE_MAX) | ||
433 | return -EINVAL; | ||
434 | buf = kmalloc(len+1, GFP_KERNEL); | ||
435 | if (buf == NULL) | ||
436 | return -ENOMEM; | ||
437 | |||
438 | line = buf; | ||
439 | for (i = 0; i < count; i++) { | ||
440 | if (copy_from_user(line, iv[i].iov_base, iv[i].iov_len)) { | ||
441 | ret = -EFAULT; | ||
442 | goto out; | ||
443 | } | ||
444 | line += iv[i].iov_len; | ||
445 | } | ||
446 | |||
447 | /* | ||
448 | * Extract and skip the syslog prefix <[0-9]*>. Coming from userspace | ||
449 | * the decimal value represents 32bit, the lower 3 bit are the log | ||
450 | * level, the rest are the log facility. | ||
451 | * | ||
452 | * If no prefix or no userspace facility is specified, we | ||
453 | * enforce LOG_USER, to be able to reliably distinguish | ||
454 | * kernel-generated messages from userspace-injected ones. | ||
455 | */ | ||
456 | line = buf; | ||
457 | if (line[0] == '<') { | ||
458 | char *endp = NULL; | ||
459 | |||
460 | i = simple_strtoul(line+1, &endp, 10); | ||
461 | if (endp && endp[0] == '>') { | ||
462 | level = i & 7; | ||
463 | if (i >> 3) | ||
464 | facility = i >> 3; | ||
465 | endp++; | ||
466 | len -= endp - line; | ||
467 | line = endp; | ||
468 | } | ||
469 | } | ||
470 | line[len] = '\0'; | ||
471 | |||
472 | printk_emit(facility, level, NULL, 0, "%s", line); | ||
473 | out: | ||
474 | kfree(buf); | ||
475 | return ret; | ||
476 | } | ||
477 | |||
478 | static ssize_t devkmsg_read(struct file *file, char __user *buf, | ||
479 | size_t count, loff_t *ppos) | ||
480 | { | ||
481 | struct devkmsg_user *user = file->private_data; | ||
482 | struct log *msg; | ||
483 | u64 ts_usec; | ||
484 | size_t i; | ||
485 | char cont = '-'; | ||
486 | size_t len; | ||
487 | ssize_t ret; | ||
488 | |||
489 | if (!user) | ||
490 | return -EBADF; | ||
491 | |||
492 | ret = mutex_lock_interruptible(&user->lock); | ||
493 | if (ret) | ||
494 | return ret; | ||
495 | raw_spin_lock_irq(&logbuf_lock); | ||
496 | while (user->seq == log_next_seq) { | ||
497 | if (file->f_flags & O_NONBLOCK) { | ||
498 | ret = -EAGAIN; | ||
499 | raw_spin_unlock_irq(&logbuf_lock); | ||
500 | goto out; | ||
501 | } | ||
502 | |||
503 | raw_spin_unlock_irq(&logbuf_lock); | ||
504 | ret = wait_event_interruptible(log_wait, | ||
505 | user->seq != log_next_seq); | ||
506 | if (ret) | ||
507 | goto out; | ||
508 | raw_spin_lock_irq(&logbuf_lock); | ||
509 | } | ||
510 | |||
511 | if (user->seq < log_first_seq) { | ||
512 | /* our last seen message is gone, return error and reset */ | ||
513 | user->idx = log_first_idx; | ||
514 | user->seq = log_first_seq; | ||
515 | ret = -EPIPE; | ||
516 | raw_spin_unlock_irq(&logbuf_lock); | ||
517 | goto out; | ||
518 | } | ||
519 | |||
520 | msg = log_from_idx(user->idx); | ||
521 | ts_usec = msg->ts_nsec; | ||
522 | do_div(ts_usec, 1000); | ||
523 | |||
524 | /* | ||
525 | * If we couldn't merge continuation line fragments during the print, | ||
526 | * export the stored flags to allow an optional external merge of the | ||
527 | * records. Merging the records isn't always neccessarily correct, like | ||
528 | * when we hit a race during printing. In most cases though, it produces | ||
529 | * better readable output. 'c' in the record flags mark the first | ||
530 | * fragment of a line, '+' the following. | ||
531 | */ | ||
532 | if (msg->flags & LOG_CONT && !(user->prev & LOG_CONT)) | ||
533 | cont = 'c'; | ||
534 | else if ((msg->flags & LOG_CONT) || | ||
535 | ((user->prev & LOG_CONT) && !(msg->flags & LOG_PREFIX))) | ||
536 | cont = '+'; | ||
537 | |||
538 | len = sprintf(user->buf, "%u,%llu,%llu,%c;", | ||
539 | (msg->facility << 3) | msg->level, | ||
540 | user->seq, ts_usec, cont); | ||
541 | user->prev = msg->flags; | ||
542 | |||
543 | /* escape non-printable characters */ | ||
544 | for (i = 0; i < msg->text_len; i++) { | ||
545 | unsigned char c = log_text(msg)[i]; | ||
546 | |||
547 | if (c < ' ' || c >= 127 || c == '\\') | ||
548 | len += sprintf(user->buf + len, "\\x%02x", c); | ||
549 | else | ||
550 | user->buf[len++] = c; | ||
551 | } | ||
552 | user->buf[len++] = '\n'; | ||
553 | |||
554 | if (msg->dict_len) { | ||
555 | bool line = true; | ||
556 | |||
557 | for (i = 0; i < msg->dict_len; i++) { | ||
558 | unsigned char c = log_dict(msg)[i]; | ||
559 | |||
560 | if (line) { | ||
561 | user->buf[len++] = ' '; | ||
562 | line = false; | ||
563 | } | ||
564 | |||
565 | if (c == '\0') { | ||
566 | user->buf[len++] = '\n'; | ||
567 | line = true; | ||
568 | continue; | ||
569 | } | ||
570 | |||
571 | if (c < ' ' || c >= 127 || c == '\\') { | ||
572 | len += sprintf(user->buf + len, "\\x%02x", c); | ||
573 | continue; | ||
574 | } | ||
575 | |||
576 | user->buf[len++] = c; | ||
577 | } | ||
578 | user->buf[len++] = '\n'; | ||
579 | } | ||
580 | |||
581 | user->idx = log_next(user->idx); | ||
582 | user->seq++; | ||
583 | raw_spin_unlock_irq(&logbuf_lock); | ||
584 | |||
585 | if (len > count) { | ||
586 | ret = -EINVAL; | ||
587 | goto out; | ||
588 | } | ||
589 | |||
590 | if (copy_to_user(buf, user->buf, len)) { | ||
591 | ret = -EFAULT; | ||
592 | goto out; | ||
593 | } | ||
594 | ret = len; | ||
595 | out: | ||
596 | mutex_unlock(&user->lock); | ||
597 | return ret; | ||
598 | } | ||
599 | |||
600 | static loff_t devkmsg_llseek(struct file *file, loff_t offset, int whence) | ||
601 | { | ||
602 | struct devkmsg_user *user = file->private_data; | ||
603 | loff_t ret = 0; | ||
604 | |||
605 | if (!user) | ||
606 | return -EBADF; | ||
607 | if (offset) | ||
608 | return -ESPIPE; | ||
609 | |||
610 | raw_spin_lock_irq(&logbuf_lock); | ||
611 | switch (whence) { | ||
612 | case SEEK_SET: | ||
613 | /* the first record */ | ||
614 | user->idx = log_first_idx; | ||
615 | user->seq = log_first_seq; | ||
616 | break; | ||
617 | case SEEK_DATA: | ||
618 | /* | ||
619 | * The first record after the last SYSLOG_ACTION_CLEAR, | ||
620 | * like issued by 'dmesg -c'. Reading /dev/kmsg itself | ||
621 | * changes no global state, and does not clear anything. | ||
622 | */ | ||
623 | user->idx = clear_idx; | ||
624 | user->seq = clear_seq; | ||
625 | break; | ||
626 | case SEEK_END: | ||
627 | /* after the last record */ | ||
628 | user->idx = log_next_idx; | ||
629 | user->seq = log_next_seq; | ||
630 | break; | ||
631 | default: | ||
632 | ret = -EINVAL; | ||
633 | } | ||
634 | raw_spin_unlock_irq(&logbuf_lock); | ||
635 | return ret; | ||
636 | } | ||
637 | |||
638 | static unsigned int devkmsg_poll(struct file *file, poll_table *wait) | ||
639 | { | ||
640 | struct devkmsg_user *user = file->private_data; | ||
641 | int ret = 0; | ||
642 | |||
643 | if (!user) | ||
644 | return POLLERR|POLLNVAL; | ||
645 | |||
646 | poll_wait(file, &log_wait, wait); | ||
647 | |||
648 | raw_spin_lock_irq(&logbuf_lock); | ||
649 | if (user->seq < log_next_seq) { | ||
650 | /* return error when data has vanished underneath us */ | ||
651 | if (user->seq < log_first_seq) | ||
652 | ret = POLLIN|POLLRDNORM|POLLERR|POLLPRI; | ||
653 | else | ||
654 | ret = POLLIN|POLLRDNORM; | ||
655 | } | ||
656 | raw_spin_unlock_irq(&logbuf_lock); | ||
657 | |||
658 | return ret; | ||
659 | } | ||
660 | |||
661 | static int devkmsg_open(struct inode *inode, struct file *file) | ||
662 | { | ||
663 | struct devkmsg_user *user; | ||
664 | int err; | ||
665 | |||
666 | /* write-only does not need any file context */ | ||
667 | if ((file->f_flags & O_ACCMODE) == O_WRONLY) | ||
668 | return 0; | ||
669 | |||
670 | err = check_syslog_permissions(SYSLOG_ACTION_READ_ALL, | ||
671 | SYSLOG_FROM_READER); | ||
672 | if (err) | ||
673 | return err; | ||
674 | |||
675 | user = kmalloc(sizeof(struct devkmsg_user), GFP_KERNEL); | ||
676 | if (!user) | ||
677 | return -ENOMEM; | ||
678 | |||
679 | mutex_init(&user->lock); | ||
680 | |||
681 | raw_spin_lock_irq(&logbuf_lock); | ||
682 | user->idx = log_first_idx; | ||
683 | user->seq = log_first_seq; | ||
684 | raw_spin_unlock_irq(&logbuf_lock); | ||
685 | |||
686 | file->private_data = user; | ||
687 | return 0; | ||
688 | } | ||
689 | |||
690 | static int devkmsg_release(struct inode *inode, struct file *file) | ||
691 | { | ||
692 | struct devkmsg_user *user = file->private_data; | ||
693 | |||
694 | if (!user) | ||
695 | return 0; | ||
696 | |||
697 | mutex_destroy(&user->lock); | ||
698 | kfree(user); | ||
699 | return 0; | ||
700 | } | ||
701 | |||
702 | const struct file_operations kmsg_fops = { | ||
703 | .open = devkmsg_open, | ||
704 | .read = devkmsg_read, | ||
705 | .aio_write = devkmsg_writev, | ||
706 | .llseek = devkmsg_llseek, | ||
707 | .poll = devkmsg_poll, | ||
708 | .release = devkmsg_release, | ||
709 | }; | ||
710 | |||
711 | #ifdef CONFIG_KEXEC | ||
712 | /* | ||
713 | * This appends the listed symbols to /proc/vmcoreinfo | ||
714 | * | ||
715 | * /proc/vmcoreinfo is used by various utiilties, like crash and makedumpfile to | ||
716 | * obtain access to symbols that are otherwise very difficult to locate. These | ||
717 | * symbols are specifically used so that utilities can access and extract the | ||
718 | * dmesg log from a vmcore file after a crash. | ||
719 | */ | ||
720 | void log_buf_kexec_setup(void) | ||
721 | { | ||
722 | VMCOREINFO_SYMBOL(log_buf); | ||
723 | VMCOREINFO_SYMBOL(log_buf_len); | ||
724 | VMCOREINFO_SYMBOL(log_first_idx); | ||
725 | VMCOREINFO_SYMBOL(log_next_idx); | ||
726 | /* | ||
727 | * Export struct log size and field offsets. User space tools can | ||
728 | * parse it and detect any changes to structure down the line. | ||
729 | */ | ||
730 | VMCOREINFO_STRUCT_SIZE(log); | ||
731 | VMCOREINFO_OFFSET(log, ts_nsec); | ||
732 | VMCOREINFO_OFFSET(log, len); | ||
733 | VMCOREINFO_OFFSET(log, text_len); | ||
734 | VMCOREINFO_OFFSET(log, dict_len); | ||
735 | } | ||
736 | #endif | ||
737 | |||
738 | /* requested log_buf_len from kernel cmdline */ | ||
739 | static unsigned long __initdata new_log_buf_len; | ||
740 | |||
741 | /* save requested log_buf_len since it's too early to process it */ | ||
742 | static int __init log_buf_len_setup(char *str) | ||
743 | { | ||
744 | unsigned size = memparse(str, &str); | ||
745 | |||
746 | if (size) | ||
747 | size = roundup_pow_of_two(size); | ||
748 | if (size > log_buf_len) | ||
749 | new_log_buf_len = size; | ||
750 | |||
751 | return 0; | ||
752 | } | ||
753 | early_param("log_buf_len", log_buf_len_setup); | ||
754 | |||
755 | void __init setup_log_buf(int early) | ||
756 | { | ||
757 | unsigned long flags; | ||
758 | char *new_log_buf; | ||
759 | int free; | ||
760 | |||
761 | if (!new_log_buf_len) | ||
762 | return; | ||
763 | |||
764 | if (early) { | ||
765 | unsigned long mem; | ||
766 | |||
767 | mem = memblock_alloc(new_log_buf_len, PAGE_SIZE); | ||
768 | if (!mem) | ||
769 | return; | ||
770 | new_log_buf = __va(mem); | ||
771 | } else { | ||
772 | new_log_buf = alloc_bootmem_nopanic(new_log_buf_len); | ||
773 | } | ||
774 | |||
775 | if (unlikely(!new_log_buf)) { | ||
776 | pr_err("log_buf_len: %ld bytes not available\n", | ||
777 | new_log_buf_len); | ||
778 | return; | ||
779 | } | ||
780 | |||
781 | raw_spin_lock_irqsave(&logbuf_lock, flags); | ||
782 | log_buf_len = new_log_buf_len; | ||
783 | log_buf = new_log_buf; | ||
784 | new_log_buf_len = 0; | ||
785 | free = __LOG_BUF_LEN - log_next_idx; | ||
786 | memcpy(log_buf, __log_buf, __LOG_BUF_LEN); | ||
787 | raw_spin_unlock_irqrestore(&logbuf_lock, flags); | ||
788 | |||
789 | pr_info("log_buf_len: %d\n", log_buf_len); | ||
790 | pr_info("early log buf free: %d(%d%%)\n", | ||
791 | free, (free * 100) / __LOG_BUF_LEN); | ||
792 | } | ||
793 | |||
794 | static bool __read_mostly ignore_loglevel; | ||
795 | |||
796 | static int __init ignore_loglevel_setup(char *str) | ||
797 | { | ||
798 | ignore_loglevel = 1; | ||
799 | printk(KERN_INFO "debug: ignoring loglevel setting.\n"); | ||
800 | |||
801 | return 0; | ||
802 | } | ||
803 | |||
804 | early_param("ignore_loglevel", ignore_loglevel_setup); | ||
805 | module_param(ignore_loglevel, bool, S_IRUGO | S_IWUSR); | ||
806 | MODULE_PARM_DESC(ignore_loglevel, "ignore loglevel setting, to" | ||
807 | "print all kernel messages to the console."); | ||
808 | |||
809 | #ifdef CONFIG_BOOT_PRINTK_DELAY | ||
810 | |||
811 | static int boot_delay; /* msecs delay after each printk during bootup */ | ||
812 | static unsigned long long loops_per_msec; /* based on boot_delay */ | ||
813 | |||
814 | static int __init boot_delay_setup(char *str) | ||
815 | { | ||
816 | unsigned long lpj; | ||
817 | |||
818 | lpj = preset_lpj ? preset_lpj : 1000000; /* some guess */ | ||
819 | loops_per_msec = (unsigned long long)lpj / 1000 * HZ; | ||
820 | |||
821 | get_option(&str, &boot_delay); | ||
822 | if (boot_delay > 10 * 1000) | ||
823 | boot_delay = 0; | ||
824 | |||
825 | pr_debug("boot_delay: %u, preset_lpj: %ld, lpj: %lu, " | ||
826 | "HZ: %d, loops_per_msec: %llu\n", | ||
827 | boot_delay, preset_lpj, lpj, HZ, loops_per_msec); | ||
828 | return 1; | ||
829 | } | ||
830 | __setup("boot_delay=", boot_delay_setup); | ||
831 | |||
832 | static void boot_delay_msec(int level) | ||
833 | { | ||
834 | unsigned long long k; | ||
835 | unsigned long timeout; | ||
836 | |||
837 | if ((boot_delay == 0 || system_state != SYSTEM_BOOTING) | ||
838 | || (level >= console_loglevel && !ignore_loglevel)) { | ||
839 | return; | ||
840 | } | ||
841 | |||
842 | k = (unsigned long long)loops_per_msec * boot_delay; | ||
843 | |||
844 | timeout = jiffies + msecs_to_jiffies(boot_delay); | ||
845 | while (k) { | ||
846 | k--; | ||
847 | cpu_relax(); | ||
848 | /* | ||
849 | * use (volatile) jiffies to prevent | ||
850 | * compiler reduction; loop termination via jiffies | ||
851 | * is secondary and may or may not happen. | ||
852 | */ | ||
853 | if (time_after(jiffies, timeout)) | ||
854 | break; | ||
855 | touch_nmi_watchdog(); | ||
856 | } | ||
857 | } | ||
858 | #else | ||
859 | static inline void boot_delay_msec(int level) | ||
860 | { | ||
861 | } | ||
862 | #endif | ||
863 | |||
864 | #if defined(CONFIG_PRINTK_TIME) | ||
865 | static bool printk_time = 1; | ||
866 | #else | ||
867 | static bool printk_time; | ||
868 | #endif | ||
869 | module_param_named(time, printk_time, bool, S_IRUGO | S_IWUSR); | ||
870 | |||
871 | static size_t print_time(u64 ts, char *buf) | ||
872 | { | ||
873 | unsigned long rem_nsec; | ||
874 | |||
875 | if (!printk_time) | ||
876 | return 0; | ||
877 | |||
878 | rem_nsec = do_div(ts, 1000000000); | ||
879 | |||
880 | if (!buf) | ||
881 | return snprintf(NULL, 0, "[%5lu.000000] ", (unsigned long)ts); | ||
882 | |||
883 | return sprintf(buf, "[%5lu.%06lu] ", | ||
884 | (unsigned long)ts, rem_nsec / 1000); | ||
885 | } | ||
886 | |||
887 | static size_t print_prefix(const struct log *msg, bool syslog, char *buf) | ||
888 | { | ||
889 | size_t len = 0; | ||
890 | unsigned int prefix = (msg->facility << 3) | msg->level; | ||
891 | |||
892 | if (syslog) { | ||
893 | if (buf) { | ||
894 | len += sprintf(buf, "<%u>", prefix); | ||
895 | } else { | ||
896 | len += 3; | ||
897 | if (prefix > 999) | ||
898 | len += 3; | ||
899 | else if (prefix > 99) | ||
900 | len += 2; | ||
901 | else if (prefix > 9) | ||
902 | len++; | ||
903 | } | ||
904 | } | ||
905 | |||
906 | len += print_time(msg->ts_nsec, buf ? buf + len : NULL); | ||
907 | return len; | ||
908 | } | ||
909 | |||
910 | static size_t msg_print_text(const struct log *msg, enum log_flags prev, | ||
911 | bool syslog, char *buf, size_t size) | ||
912 | { | ||
913 | const char *text = log_text(msg); | ||
914 | size_t text_size = msg->text_len; | ||
915 | bool prefix = true; | ||
916 | bool newline = true; | ||
917 | size_t len = 0; | ||
918 | |||
919 | if ((prev & LOG_CONT) && !(msg->flags & LOG_PREFIX)) | ||
920 | prefix = false; | ||
921 | |||
922 | if (msg->flags & LOG_CONT) { | ||
923 | if ((prev & LOG_CONT) && !(prev & LOG_NEWLINE)) | ||
924 | prefix = false; | ||
925 | |||
926 | if (!(msg->flags & LOG_NEWLINE)) | ||
927 | newline = false; | ||
928 | } | ||
929 | |||
930 | do { | ||
931 | const char *next = memchr(text, '\n', text_size); | ||
932 | size_t text_len; | ||
933 | |||
934 | if (next) { | ||
935 | text_len = next - text; | ||
936 | next++; | ||
937 | text_size -= next - text; | ||
938 | } else { | ||
939 | text_len = text_size; | ||
940 | } | ||
941 | |||
942 | if (buf) { | ||
943 | if (print_prefix(msg, syslog, NULL) + | ||
944 | text_len + 1 >= size - len) | ||
945 | break; | ||
946 | |||
947 | if (prefix) | ||
948 | len += print_prefix(msg, syslog, buf + len); | ||
949 | memcpy(buf + len, text, text_len); | ||
950 | len += text_len; | ||
951 | if (next || newline) | ||
952 | buf[len++] = '\n'; | ||
953 | } else { | ||
954 | /* SYSLOG_ACTION_* buffer size only calculation */ | ||
955 | if (prefix) | ||
956 | len += print_prefix(msg, syslog, NULL); | ||
957 | len += text_len; | ||
958 | if (next || newline) | ||
959 | len++; | ||
960 | } | ||
961 | |||
962 | prefix = true; | ||
963 | text = next; | ||
964 | } while (text); | ||
965 | |||
966 | return len; | ||
967 | } | ||
968 | |||
969 | static int syslog_print(char __user *buf, int size) | ||
970 | { | ||
971 | char *text; | ||
972 | struct log *msg; | ||
973 | int len = 0; | ||
974 | |||
975 | text = kmalloc(LOG_LINE_MAX + PREFIX_MAX, GFP_KERNEL); | ||
976 | if (!text) | ||
977 | return -ENOMEM; | ||
978 | |||
979 | while (size > 0) { | ||
980 | size_t n; | ||
981 | size_t skip; | ||
982 | |||
983 | raw_spin_lock_irq(&logbuf_lock); | ||
984 | if (syslog_seq < log_first_seq) { | ||
985 | /* messages are gone, move to first one */ | ||
986 | syslog_seq = log_first_seq; | ||
987 | syslog_idx = log_first_idx; | ||
988 | syslog_prev = 0; | ||
989 | syslog_partial = 0; | ||
990 | } | ||
991 | if (syslog_seq == log_next_seq) { | ||
992 | raw_spin_unlock_irq(&logbuf_lock); | ||
993 | break; | ||
994 | } | ||
995 | |||
996 | skip = syslog_partial; | ||
997 | msg = log_from_idx(syslog_idx); | ||
998 | n = msg_print_text(msg, syslog_prev, true, text, | ||
999 | LOG_LINE_MAX + PREFIX_MAX); | ||
1000 | if (n - syslog_partial <= size) { | ||
1001 | /* message fits into buffer, move forward */ | ||
1002 | syslog_idx = log_next(syslog_idx); | ||
1003 | syslog_seq++; | ||
1004 | syslog_prev = msg->flags; | ||
1005 | n -= syslog_partial; | ||
1006 | syslog_partial = 0; | ||
1007 | } else if (!len){ | ||
1008 | /* partial read(), remember position */ | ||
1009 | n = size; | ||
1010 | syslog_partial += n; | ||
1011 | } else | ||
1012 | n = 0; | ||
1013 | raw_spin_unlock_irq(&logbuf_lock); | ||
1014 | |||
1015 | if (!n) | ||
1016 | break; | ||
1017 | |||
1018 | if (copy_to_user(buf, text + skip, n)) { | ||
1019 | if (!len) | ||
1020 | len = -EFAULT; | ||
1021 | break; | ||
1022 | } | ||
1023 | |||
1024 | len += n; | ||
1025 | size -= n; | ||
1026 | buf += n; | ||
1027 | } | ||
1028 | |||
1029 | kfree(text); | ||
1030 | return len; | ||
1031 | } | ||
1032 | |||
1033 | static int syslog_print_all(char __user *buf, int size, bool clear) | ||
1034 | { | ||
1035 | char *text; | ||
1036 | int len = 0; | ||
1037 | |||
1038 | text = kmalloc(LOG_LINE_MAX + PREFIX_MAX, GFP_KERNEL); | ||
1039 | if (!text) | ||
1040 | return -ENOMEM; | ||
1041 | |||
1042 | raw_spin_lock_irq(&logbuf_lock); | ||
1043 | if (buf) { | ||
1044 | u64 next_seq; | ||
1045 | u64 seq; | ||
1046 | u32 idx; | ||
1047 | enum log_flags prev; | ||
1048 | |||
1049 | if (clear_seq < log_first_seq) { | ||
1050 | /* messages are gone, move to first available one */ | ||
1051 | clear_seq = log_first_seq; | ||
1052 | clear_idx = log_first_idx; | ||
1053 | } | ||
1054 | |||
1055 | /* | ||
1056 | * Find first record that fits, including all following records, | ||
1057 | * into the user-provided buffer for this dump. | ||
1058 | */ | ||
1059 | seq = clear_seq; | ||
1060 | idx = clear_idx; | ||
1061 | prev = 0; | ||
1062 | while (seq < log_next_seq) { | ||
1063 | struct log *msg = log_from_idx(idx); | ||
1064 | |||
1065 | len += msg_print_text(msg, prev, true, NULL, 0); | ||
1066 | prev = msg->flags; | ||
1067 | idx = log_next(idx); | ||
1068 | seq++; | ||
1069 | } | ||
1070 | |||
1071 | /* move first record forward until length fits into the buffer */ | ||
1072 | seq = clear_seq; | ||
1073 | idx = clear_idx; | ||
1074 | prev = 0; | ||
1075 | while (len > size && seq < log_next_seq) { | ||
1076 | struct log *msg = log_from_idx(idx); | ||
1077 | |||
1078 | len -= msg_print_text(msg, prev, true, NULL, 0); | ||
1079 | prev = msg->flags; | ||
1080 | idx = log_next(idx); | ||
1081 | seq++; | ||
1082 | } | ||
1083 | |||
1084 | /* last message fitting into this dump */ | ||
1085 | next_seq = log_next_seq; | ||
1086 | |||
1087 | len = 0; | ||
1088 | prev = 0; | ||
1089 | while (len >= 0 && seq < next_seq) { | ||
1090 | struct log *msg = log_from_idx(idx); | ||
1091 | int textlen; | ||
1092 | |||
1093 | textlen = msg_print_text(msg, prev, true, text, | ||
1094 | LOG_LINE_MAX + PREFIX_MAX); | ||
1095 | if (textlen < 0) { | ||
1096 | len = textlen; | ||
1097 | break; | ||
1098 | } | ||
1099 | idx = log_next(idx); | ||
1100 | seq++; | ||
1101 | prev = msg->flags; | ||
1102 | |||
1103 | raw_spin_unlock_irq(&logbuf_lock); | ||
1104 | if (copy_to_user(buf + len, text, textlen)) | ||
1105 | len = -EFAULT; | ||
1106 | else | ||
1107 | len += textlen; | ||
1108 | raw_spin_lock_irq(&logbuf_lock); | ||
1109 | |||
1110 | if (seq < log_first_seq) { | ||
1111 | /* messages are gone, move to next one */ | ||
1112 | seq = log_first_seq; | ||
1113 | idx = log_first_idx; | ||
1114 | prev = 0; | ||
1115 | } | ||
1116 | } | ||
1117 | } | ||
1118 | |||
1119 | if (clear) { | ||
1120 | clear_seq = log_next_seq; | ||
1121 | clear_idx = log_next_idx; | ||
1122 | } | ||
1123 | raw_spin_unlock_irq(&logbuf_lock); | ||
1124 | |||
1125 | kfree(text); | ||
1126 | return len; | ||
1127 | } | ||
1128 | |||
1129 | int do_syslog(int type, char __user *buf, int len, bool from_file) | ||
1130 | { | ||
1131 | bool clear = false; | ||
1132 | static int saved_console_loglevel = -1; | ||
1133 | int error; | ||
1134 | |||
1135 | error = check_syslog_permissions(type, from_file); | ||
1136 | if (error) | ||
1137 | goto out; | ||
1138 | |||
1139 | error = security_syslog(type); | ||
1140 | if (error) | ||
1141 | return error; | ||
1142 | |||
1143 | switch (type) { | ||
1144 | case SYSLOG_ACTION_CLOSE: /* Close log */ | ||
1145 | break; | ||
1146 | case SYSLOG_ACTION_OPEN: /* Open log */ | ||
1147 | break; | ||
1148 | case SYSLOG_ACTION_READ: /* Read from log */ | ||
1149 | error = -EINVAL; | ||
1150 | if (!buf || len < 0) | ||
1151 | goto out; | ||
1152 | error = 0; | ||
1153 | if (!len) | ||
1154 | goto out; | ||
1155 | if (!access_ok(VERIFY_WRITE, buf, len)) { | ||
1156 | error = -EFAULT; | ||
1157 | goto out; | ||
1158 | } | ||
1159 | error = wait_event_interruptible(log_wait, | ||
1160 | syslog_seq != log_next_seq); | ||
1161 | if (error) | ||
1162 | goto out; | ||
1163 | error = syslog_print(buf, len); | ||
1164 | break; | ||
1165 | /* Read/clear last kernel messages */ | ||
1166 | case SYSLOG_ACTION_READ_CLEAR: | ||
1167 | clear = true; | ||
1168 | /* FALL THRU */ | ||
1169 | /* Read last kernel messages */ | ||
1170 | case SYSLOG_ACTION_READ_ALL: | ||
1171 | error = -EINVAL; | ||
1172 | if (!buf || len < 0) | ||
1173 | goto out; | ||
1174 | error = 0; | ||
1175 | if (!len) | ||
1176 | goto out; | ||
1177 | if (!access_ok(VERIFY_WRITE, buf, len)) { | ||
1178 | error = -EFAULT; | ||
1179 | goto out; | ||
1180 | } | ||
1181 | error = syslog_print_all(buf, len, clear); | ||
1182 | break; | ||
1183 | /* Clear ring buffer */ | ||
1184 | case SYSLOG_ACTION_CLEAR: | ||
1185 | syslog_print_all(NULL, 0, true); | ||
1186 | break; | ||
1187 | /* Disable logging to console */ | ||
1188 | case SYSLOG_ACTION_CONSOLE_OFF: | ||
1189 | if (saved_console_loglevel == -1) | ||
1190 | saved_console_loglevel = console_loglevel; | ||
1191 | console_loglevel = minimum_console_loglevel; | ||
1192 | break; | ||
1193 | /* Enable logging to console */ | ||
1194 | case SYSLOG_ACTION_CONSOLE_ON: | ||
1195 | if (saved_console_loglevel != -1) { | ||
1196 | console_loglevel = saved_console_loglevel; | ||
1197 | saved_console_loglevel = -1; | ||
1198 | } | ||
1199 | break; | ||
1200 | /* Set level of messages printed to console */ | ||
1201 | case SYSLOG_ACTION_CONSOLE_LEVEL: | ||
1202 | error = -EINVAL; | ||
1203 | if (len < 1 || len > 8) | ||
1204 | goto out; | ||
1205 | if (len < minimum_console_loglevel) | ||
1206 | len = minimum_console_loglevel; | ||
1207 | console_loglevel = len; | ||
1208 | /* Implicitly re-enable logging to console */ | ||
1209 | saved_console_loglevel = -1; | ||
1210 | error = 0; | ||
1211 | break; | ||
1212 | /* Number of chars in the log buffer */ | ||
1213 | case SYSLOG_ACTION_SIZE_UNREAD: | ||
1214 | raw_spin_lock_irq(&logbuf_lock); | ||
1215 | if (syslog_seq < log_first_seq) { | ||
1216 | /* messages are gone, move to first one */ | ||
1217 | syslog_seq = log_first_seq; | ||
1218 | syslog_idx = log_first_idx; | ||
1219 | syslog_prev = 0; | ||
1220 | syslog_partial = 0; | ||
1221 | } | ||
1222 | if (from_file) { | ||
1223 | /* | ||
1224 | * Short-cut for poll(/"proc/kmsg") which simply checks | ||
1225 | * for pending data, not the size; return the count of | ||
1226 | * records, not the length. | ||
1227 | */ | ||
1228 | error = log_next_idx - syslog_idx; | ||
1229 | } else { | ||
1230 | u64 seq = syslog_seq; | ||
1231 | u32 idx = syslog_idx; | ||
1232 | enum log_flags prev = syslog_prev; | ||
1233 | |||
1234 | error = 0; | ||
1235 | while (seq < log_next_seq) { | ||
1236 | struct log *msg = log_from_idx(idx); | ||
1237 | |||
1238 | error += msg_print_text(msg, prev, true, NULL, 0); | ||
1239 | idx = log_next(idx); | ||
1240 | seq++; | ||
1241 | prev = msg->flags; | ||
1242 | } | ||
1243 | error -= syslog_partial; | ||
1244 | } | ||
1245 | raw_spin_unlock_irq(&logbuf_lock); | ||
1246 | break; | ||
1247 | /* Size of the log buffer */ | ||
1248 | case SYSLOG_ACTION_SIZE_BUFFER: | ||
1249 | error = log_buf_len; | ||
1250 | break; | ||
1251 | default: | ||
1252 | error = -EINVAL; | ||
1253 | break; | ||
1254 | } | ||
1255 | out: | ||
1256 | return error; | ||
1257 | } | ||
1258 | |||
1259 | SYSCALL_DEFINE3(syslog, int, type, char __user *, buf, int, len) | ||
1260 | { | ||
1261 | return do_syslog(type, buf, len, SYSLOG_FROM_READER); | ||
1262 | } | ||
1263 | |||
1264 | /* | ||
1265 | * Call the console drivers, asking them to write out | ||
1266 | * log_buf[start] to log_buf[end - 1]. | ||
1267 | * The console_lock must be held. | ||
1268 | */ | ||
1269 | static void call_console_drivers(int level, const char *text, size_t len) | ||
1270 | { | ||
1271 | struct console *con; | ||
1272 | |||
1273 | trace_console(text, len); | ||
1274 | |||
1275 | if (level >= console_loglevel && !ignore_loglevel) | ||
1276 | return; | ||
1277 | if (!console_drivers) | ||
1278 | return; | ||
1279 | |||
1280 | for_each_console(con) { | ||
1281 | if (exclusive_console && con != exclusive_console) | ||
1282 | continue; | ||
1283 | if (!(con->flags & CON_ENABLED)) | ||
1284 | continue; | ||
1285 | if (!con->write) | ||
1286 | continue; | ||
1287 | if (!cpu_online(smp_processor_id()) && | ||
1288 | !(con->flags & CON_ANYTIME)) | ||
1289 | continue; | ||
1290 | con->write(con, text, len); | ||
1291 | } | ||
1292 | } | ||
1293 | |||
1294 | /* | ||
1295 | * Zap console related locks when oopsing. Only zap at most once | ||
1296 | * every 10 seconds, to leave time for slow consoles to print a | ||
1297 | * full oops. | ||
1298 | */ | ||
1299 | static void zap_locks(void) | ||
1300 | { | ||
1301 | static unsigned long oops_timestamp; | ||
1302 | |||
1303 | if (time_after_eq(jiffies, oops_timestamp) && | ||
1304 | !time_after(jiffies, oops_timestamp + 30 * HZ)) | ||
1305 | return; | ||
1306 | |||
1307 | oops_timestamp = jiffies; | ||
1308 | |||
1309 | debug_locks_off(); | ||
1310 | /* If a crash is occurring, make sure we can't deadlock */ | ||
1311 | raw_spin_lock_init(&logbuf_lock); | ||
1312 | /* And make sure that we print immediately */ | ||
1313 | sema_init(&console_sem, 1); | ||
1314 | } | ||
1315 | |||
1316 | /* Check if we have any console registered that can be called early in boot. */ | ||
1317 | static int have_callable_console(void) | ||
1318 | { | ||
1319 | struct console *con; | ||
1320 | |||
1321 | for_each_console(con) | ||
1322 | if (con->flags & CON_ANYTIME) | ||
1323 | return 1; | ||
1324 | |||
1325 | return 0; | ||
1326 | } | ||
1327 | |||
1328 | /* | ||
1329 | * Can we actually use the console at this time on this cpu? | ||
1330 | * | ||
1331 | * Console drivers may assume that per-cpu resources have | ||
1332 | * been allocated. So unless they're explicitly marked as | ||
1333 | * being able to cope (CON_ANYTIME) don't call them until | ||
1334 | * this CPU is officially up. | ||
1335 | */ | ||
1336 | static inline int can_use_console(unsigned int cpu) | ||
1337 | { | ||
1338 | return cpu_online(cpu) || have_callable_console(); | ||
1339 | } | ||
1340 | |||
1341 | /* | ||
1342 | * Try to get console ownership to actually show the kernel | ||
1343 | * messages from a 'printk'. Return true (and with the | ||
1344 | * console_lock held, and 'console_locked' set) if it | ||
1345 | * is successful, false otherwise. | ||
1346 | * | ||
1347 | * This gets called with the 'logbuf_lock' spinlock held and | ||
1348 | * interrupts disabled. It should return with 'lockbuf_lock' | ||
1349 | * released but interrupts still disabled. | ||
1350 | */ | ||
1351 | static int console_trylock_for_printk(unsigned int cpu) | ||
1352 | __releases(&logbuf_lock) | ||
1353 | { | ||
1354 | int retval = 0, wake = 0; | ||
1355 | |||
1356 | if (console_trylock()) { | ||
1357 | retval = 1; | ||
1358 | |||
1359 | /* | ||
1360 | * If we can't use the console, we need to release | ||
1361 | * the console semaphore by hand to avoid flushing | ||
1362 | * the buffer. We need to hold the console semaphore | ||
1363 | * in order to do this test safely. | ||
1364 | */ | ||
1365 | if (!can_use_console(cpu)) { | ||
1366 | console_locked = 0; | ||
1367 | wake = 1; | ||
1368 | retval = 0; | ||
1369 | } | ||
1370 | } | ||
1371 | logbuf_cpu = UINT_MAX; | ||
1372 | raw_spin_unlock(&logbuf_lock); | ||
1373 | if (wake) | ||
1374 | up(&console_sem); | ||
1375 | return retval; | ||
1376 | } | ||
1377 | |||
1378 | int printk_delay_msec __read_mostly; | ||
1379 | |||
1380 | static inline void printk_delay(void) | ||
1381 | { | ||
1382 | if (unlikely(printk_delay_msec)) { | ||
1383 | int m = printk_delay_msec; | ||
1384 | |||
1385 | while (m--) { | ||
1386 | mdelay(1); | ||
1387 | touch_nmi_watchdog(); | ||
1388 | } | ||
1389 | } | ||
1390 | } | ||
1391 | |||
1392 | /* | ||
1393 | * Continuation lines are buffered, and not committed to the record buffer | ||
1394 | * until the line is complete, or a race forces it. The line fragments | ||
1395 | * though, are printed immediately to the consoles to ensure everything has | ||
1396 | * reached the console in case of a kernel crash. | ||
1397 | */ | ||
1398 | static struct cont { | ||
1399 | char buf[LOG_LINE_MAX]; | ||
1400 | size_t len; /* length == 0 means unused buffer */ | ||
1401 | size_t cons; /* bytes written to console */ | ||
1402 | struct task_struct *owner; /* task of first print*/ | ||
1403 | u64 ts_nsec; /* time of first print */ | ||
1404 | u8 level; /* log level of first message */ | ||
1405 | u8 facility; /* log level of first message */ | ||
1406 | enum log_flags flags; /* prefix, newline flags */ | ||
1407 | bool flushed:1; /* buffer sealed and committed */ | ||
1408 | } cont; | ||
1409 | |||
1410 | static void cont_flush(enum log_flags flags) | ||
1411 | { | ||
1412 | if (cont.flushed) | ||
1413 | return; | ||
1414 | if (cont.len == 0) | ||
1415 | return; | ||
1416 | |||
1417 | if (cont.cons) { | ||
1418 | /* | ||
1419 | * If a fragment of this line was directly flushed to the | ||
1420 | * console; wait for the console to pick up the rest of the | ||
1421 | * line. LOG_NOCONS suppresses a duplicated output. | ||
1422 | */ | ||
1423 | log_store(cont.facility, cont.level, flags | LOG_NOCONS, | ||
1424 | cont.ts_nsec, NULL, 0, cont.buf, cont.len); | ||
1425 | cont.flags = flags; | ||
1426 | cont.flushed = true; | ||
1427 | } else { | ||
1428 | /* | ||
1429 | * If no fragment of this line ever reached the console, | ||
1430 | * just submit it to the store and free the buffer. | ||
1431 | */ | ||
1432 | log_store(cont.facility, cont.level, flags, 0, | ||
1433 | NULL, 0, cont.buf, cont.len); | ||
1434 | cont.len = 0; | ||
1435 | } | ||
1436 | } | ||
1437 | |||
1438 | static bool cont_add(int facility, int level, const char *text, size_t len) | ||
1439 | { | ||
1440 | if (cont.len && cont.flushed) | ||
1441 | return false; | ||
1442 | |||
1443 | if (cont.len + len > sizeof(cont.buf)) { | ||
1444 | /* the line gets too long, split it up in separate records */ | ||
1445 | cont_flush(LOG_CONT); | ||
1446 | return false; | ||
1447 | } | ||
1448 | |||
1449 | if (!cont.len) { | ||
1450 | cont.facility = facility; | ||
1451 | cont.level = level; | ||
1452 | cont.owner = current; | ||
1453 | cont.ts_nsec = local_clock(); | ||
1454 | cont.flags = 0; | ||
1455 | cont.cons = 0; | ||
1456 | cont.flushed = false; | ||
1457 | } | ||
1458 | |||
1459 | memcpy(cont.buf + cont.len, text, len); | ||
1460 | cont.len += len; | ||
1461 | |||
1462 | if (cont.len > (sizeof(cont.buf) * 80) / 100) | ||
1463 | cont_flush(LOG_CONT); | ||
1464 | |||
1465 | return true; | ||
1466 | } | ||
1467 | |||
1468 | static size_t cont_print_text(char *text, size_t size) | ||
1469 | { | ||
1470 | size_t textlen = 0; | ||
1471 | size_t len; | ||
1472 | |||
1473 | if (cont.cons == 0 && (console_prev & LOG_NEWLINE)) { | ||
1474 | textlen += print_time(cont.ts_nsec, text); | ||
1475 | size -= textlen; | ||
1476 | } | ||
1477 | |||
1478 | len = cont.len - cont.cons; | ||
1479 | if (len > 0) { | ||
1480 | if (len+1 > size) | ||
1481 | len = size-1; | ||
1482 | memcpy(text + textlen, cont.buf + cont.cons, len); | ||
1483 | textlen += len; | ||
1484 | cont.cons = cont.len; | ||
1485 | } | ||
1486 | |||
1487 | if (cont.flushed) { | ||
1488 | if (cont.flags & LOG_NEWLINE) | ||
1489 | text[textlen++] = '\n'; | ||
1490 | /* got everything, release buffer */ | ||
1491 | cont.len = 0; | ||
1492 | } | ||
1493 | return textlen; | ||
1494 | } | ||
1495 | |||
1496 | asmlinkage int vprintk_emit(int facility, int level, | ||
1497 | const char *dict, size_t dictlen, | ||
1498 | const char *fmt, va_list args) | ||
1499 | { | ||
1500 | static int recursion_bug; | ||
1501 | static char textbuf[LOG_LINE_MAX]; | ||
1502 | char *text = textbuf; | ||
1503 | size_t text_len; | ||
1504 | enum log_flags lflags = 0; | ||
1505 | unsigned long flags; | ||
1506 | int this_cpu; | ||
1507 | int printed_len = 0; | ||
1508 | |||
1509 | boot_delay_msec(level); | ||
1510 | printk_delay(); | ||
1511 | |||
1512 | /* This stops the holder of console_sem just where we want him */ | ||
1513 | local_irq_save(flags); | ||
1514 | this_cpu = smp_processor_id(); | ||
1515 | |||
1516 | /* | ||
1517 | * Ouch, printk recursed into itself! | ||
1518 | */ | ||
1519 | if (unlikely(logbuf_cpu == this_cpu)) { | ||
1520 | /* | ||
1521 | * If a crash is occurring during printk() on this CPU, | ||
1522 | * then try to get the crash message out but make sure | ||
1523 | * we can't deadlock. Otherwise just return to avoid the | ||
1524 | * recursion and return - but flag the recursion so that | ||
1525 | * it can be printed at the next appropriate moment: | ||
1526 | */ | ||
1527 | if (!oops_in_progress && !lockdep_recursing(current)) { | ||
1528 | recursion_bug = 1; | ||
1529 | goto out_restore_irqs; | ||
1530 | } | ||
1531 | zap_locks(); | ||
1532 | } | ||
1533 | |||
1534 | lockdep_off(); | ||
1535 | raw_spin_lock(&logbuf_lock); | ||
1536 | logbuf_cpu = this_cpu; | ||
1537 | |||
1538 | if (recursion_bug) { | ||
1539 | static const char recursion_msg[] = | ||
1540 | "BUG: recent printk recursion!"; | ||
1541 | |||
1542 | recursion_bug = 0; | ||
1543 | printed_len += strlen(recursion_msg); | ||
1544 | /* emit KERN_CRIT message */ | ||
1545 | log_store(0, 2, LOG_PREFIX|LOG_NEWLINE, 0, | ||
1546 | NULL, 0, recursion_msg, printed_len); | ||
1547 | } | ||
1548 | |||
1549 | /* | ||
1550 | * The printf needs to come first; we need the syslog | ||
1551 | * prefix which might be passed-in as a parameter. | ||
1552 | */ | ||
1553 | text_len = vscnprintf(text, sizeof(textbuf), fmt, args); | ||
1554 | |||
1555 | /* mark and strip a trailing newline */ | ||
1556 | if (text_len && text[text_len-1] == '\n') { | ||
1557 | text_len--; | ||
1558 | lflags |= LOG_NEWLINE; | ||
1559 | } | ||
1560 | |||
1561 | /* strip kernel syslog prefix and extract log level or control flags */ | ||
1562 | if (facility == 0) { | ||
1563 | int kern_level = printk_get_level(text); | ||
1564 | |||
1565 | if (kern_level) { | ||
1566 | const char *end_of_header = printk_skip_level(text); | ||
1567 | switch (kern_level) { | ||
1568 | case '0' ... '7': | ||
1569 | if (level == -1) | ||
1570 | level = kern_level - '0'; | ||
1571 | case 'd': /* KERN_DEFAULT */ | ||
1572 | lflags |= LOG_PREFIX; | ||
1573 | case 'c': /* KERN_CONT */ | ||
1574 | break; | ||
1575 | } | ||
1576 | text_len -= end_of_header - text; | ||
1577 | text = (char *)end_of_header; | ||
1578 | } | ||
1579 | } | ||
1580 | |||
1581 | if (level == -1) | ||
1582 | level = default_message_loglevel; | ||
1583 | |||
1584 | if (dict) | ||
1585 | lflags |= LOG_PREFIX|LOG_NEWLINE; | ||
1586 | |||
1587 | if (!(lflags & LOG_NEWLINE)) { | ||
1588 | /* | ||
1589 | * Flush the conflicting buffer. An earlier newline was missing, | ||
1590 | * or another task also prints continuation lines. | ||
1591 | */ | ||
1592 | if (cont.len && (lflags & LOG_PREFIX || cont.owner != current)) | ||
1593 | cont_flush(LOG_NEWLINE); | ||
1594 | |||
1595 | /* buffer line if possible, otherwise store it right away */ | ||
1596 | if (!cont_add(facility, level, text, text_len)) | ||
1597 | log_store(facility, level, lflags | LOG_CONT, 0, | ||
1598 | dict, dictlen, text, text_len); | ||
1599 | } else { | ||
1600 | bool stored = false; | ||
1601 | |||
1602 | /* | ||
1603 | * If an earlier newline was missing and it was the same task, | ||
1604 | * either merge it with the current buffer and flush, or if | ||
1605 | * there was a race with interrupts (prefix == true) then just | ||
1606 | * flush it out and store this line separately. | ||
1607 | */ | ||
1608 | if (cont.len && cont.owner == current) { | ||
1609 | if (!(lflags & LOG_PREFIX)) | ||
1610 | stored = cont_add(facility, level, text, text_len); | ||
1611 | cont_flush(LOG_NEWLINE); | ||
1612 | } | ||
1613 | |||
1614 | if (!stored) | ||
1615 | log_store(facility, level, lflags, 0, | ||
1616 | dict, dictlen, text, text_len); | ||
1617 | } | ||
1618 | printed_len += text_len; | ||
1619 | |||
1620 | /* | ||
1621 | * Try to acquire and then immediately release the console semaphore. | ||
1622 | * The release will print out buffers and wake up /dev/kmsg and syslog() | ||
1623 | * users. | ||
1624 | * | ||
1625 | * The console_trylock_for_printk() function will release 'logbuf_lock' | ||
1626 | * regardless of whether it actually gets the console semaphore or not. | ||
1627 | */ | ||
1628 | if (console_trylock_for_printk(this_cpu)) | ||
1629 | console_unlock(); | ||
1630 | |||
1631 | lockdep_on(); | ||
1632 | out_restore_irqs: | ||
1633 | local_irq_restore(flags); | ||
1634 | |||
1635 | return printed_len; | ||
1636 | } | ||
1637 | EXPORT_SYMBOL(vprintk_emit); | ||
1638 | |||
1639 | asmlinkage int vprintk(const char *fmt, va_list args) | ||
1640 | { | ||
1641 | return vprintk_emit(0, -1, NULL, 0, fmt, args); | ||
1642 | } | ||
1643 | EXPORT_SYMBOL(vprintk); | ||
1644 | |||
1645 | asmlinkage int printk_emit(int facility, int level, | ||
1646 | const char *dict, size_t dictlen, | ||
1647 | const char *fmt, ...) | ||
1648 | { | ||
1649 | va_list args; | ||
1650 | int r; | ||
1651 | |||
1652 | va_start(args, fmt); | ||
1653 | r = vprintk_emit(facility, level, dict, dictlen, fmt, args); | ||
1654 | va_end(args); | ||
1655 | |||
1656 | return r; | ||
1657 | } | ||
1658 | EXPORT_SYMBOL(printk_emit); | ||
1659 | |||
1660 | /** | ||
1661 | * printk - print a kernel message | ||
1662 | * @fmt: format string | ||
1663 | * | ||
1664 | * This is printk(). It can be called from any context. We want it to work. | ||
1665 | * | ||
1666 | * We try to grab the console_lock. If we succeed, it's easy - we log the | ||
1667 | * output and call the console drivers. If we fail to get the semaphore, we | ||
1668 | * place the output into the log buffer and return. The current holder of | ||
1669 | * the console_sem will notice the new output in console_unlock(); and will | ||
1670 | * send it to the consoles before releasing the lock. | ||
1671 | * | ||
1672 | * One effect of this deferred printing is that code which calls printk() and | ||
1673 | * then changes console_loglevel may break. This is because console_loglevel | ||
1674 | * is inspected when the actual printing occurs. | ||
1675 | * | ||
1676 | * See also: | ||
1677 | * printf(3) | ||
1678 | * | ||
1679 | * See the vsnprintf() documentation for format string extensions over C99. | ||
1680 | */ | ||
1681 | asmlinkage int printk(const char *fmt, ...) | ||
1682 | { | ||
1683 | va_list args; | ||
1684 | int r; | ||
1685 | |||
1686 | #ifdef CONFIG_KGDB_KDB | ||
1687 | if (unlikely(kdb_trap_printk)) { | ||
1688 | va_start(args, fmt); | ||
1689 | r = vkdb_printf(fmt, args); | ||
1690 | va_end(args); | ||
1691 | return r; | ||
1692 | } | ||
1693 | #endif | ||
1694 | va_start(args, fmt); | ||
1695 | r = vprintk_emit(0, -1, NULL, 0, fmt, args); | ||
1696 | va_end(args); | ||
1697 | |||
1698 | return r; | ||
1699 | } | ||
1700 | EXPORT_SYMBOL(printk); | ||
1701 | |||
1702 | #else /* CONFIG_PRINTK */ | ||
1703 | |||
1704 | #define LOG_LINE_MAX 0 | ||
1705 | #define PREFIX_MAX 0 | ||
1706 | #define LOG_LINE_MAX 0 | ||
1707 | static u64 syslog_seq; | ||
1708 | static u32 syslog_idx; | ||
1709 | static u64 console_seq; | ||
1710 | static u32 console_idx; | ||
1711 | static enum log_flags syslog_prev; | ||
1712 | static u64 log_first_seq; | ||
1713 | static u32 log_first_idx; | ||
1714 | static u64 log_next_seq; | ||
1715 | static enum log_flags console_prev; | ||
1716 | static struct cont { | ||
1717 | size_t len; | ||
1718 | size_t cons; | ||
1719 | u8 level; | ||
1720 | bool flushed:1; | ||
1721 | } cont; | ||
1722 | static struct log *log_from_idx(u32 idx) { return NULL; } | ||
1723 | static u32 log_next(u32 idx) { return 0; } | ||
1724 | static void call_console_drivers(int level, const char *text, size_t len) {} | ||
1725 | static size_t msg_print_text(const struct log *msg, enum log_flags prev, | ||
1726 | bool syslog, char *buf, size_t size) { return 0; } | ||
1727 | static size_t cont_print_text(char *text, size_t size) { return 0; } | ||
1728 | |||
1729 | #endif /* CONFIG_PRINTK */ | ||
1730 | |||
1731 | #ifdef CONFIG_EARLY_PRINTK | ||
1732 | struct console *early_console; | ||
1733 | |||
1734 | void early_vprintk(const char *fmt, va_list ap) | ||
1735 | { | ||
1736 | if (early_console) { | ||
1737 | char buf[512]; | ||
1738 | int n = vscnprintf(buf, sizeof(buf), fmt, ap); | ||
1739 | |||
1740 | early_console->write(early_console, buf, n); | ||
1741 | } | ||
1742 | } | ||
1743 | |||
1744 | asmlinkage void early_printk(const char *fmt, ...) | ||
1745 | { | ||
1746 | va_list ap; | ||
1747 | |||
1748 | va_start(ap, fmt); | ||
1749 | early_vprintk(fmt, ap); | ||
1750 | va_end(ap); | ||
1751 | } | ||
1752 | #endif | ||
1753 | |||
1754 | static int __add_preferred_console(char *name, int idx, char *options, | ||
1755 | char *brl_options) | ||
1756 | { | ||
1757 | struct console_cmdline *c; | ||
1758 | int i; | ||
1759 | |||
1760 | /* | ||
1761 | * See if this tty is not yet registered, and | ||
1762 | * if we have a slot free. | ||
1763 | */ | ||
1764 | for (i = 0; i < MAX_CMDLINECONSOLES && console_cmdline[i].name[0]; i++) | ||
1765 | if (strcmp(console_cmdline[i].name, name) == 0 && | ||
1766 | console_cmdline[i].index == idx) { | ||
1767 | if (!brl_options) | ||
1768 | selected_console = i; | ||
1769 | return 0; | ||
1770 | } | ||
1771 | if (i == MAX_CMDLINECONSOLES) | ||
1772 | return -E2BIG; | ||
1773 | if (!brl_options) | ||
1774 | selected_console = i; | ||
1775 | c = &console_cmdline[i]; | ||
1776 | strlcpy(c->name, name, sizeof(c->name)); | ||
1777 | c->options = options; | ||
1778 | #ifdef CONFIG_A11Y_BRAILLE_CONSOLE | ||
1779 | c->brl_options = brl_options; | ||
1780 | #endif | ||
1781 | c->index = idx; | ||
1782 | return 0; | ||
1783 | } | ||
1784 | /* | ||
1785 | * Set up a list of consoles. Called from init/main.c | ||
1786 | */ | ||
1787 | static int __init console_setup(char *str) | ||
1788 | { | ||
1789 | char buf[sizeof(console_cmdline[0].name) + 4]; /* 4 for index */ | ||
1790 | char *s, *options, *brl_options = NULL; | ||
1791 | int idx; | ||
1792 | |||
1793 | #ifdef CONFIG_A11Y_BRAILLE_CONSOLE | ||
1794 | if (!memcmp(str, "brl,", 4)) { | ||
1795 | brl_options = ""; | ||
1796 | str += 4; | ||
1797 | } else if (!memcmp(str, "brl=", 4)) { | ||
1798 | brl_options = str + 4; | ||
1799 | str = strchr(brl_options, ','); | ||
1800 | if (!str) { | ||
1801 | printk(KERN_ERR "need port name after brl=\n"); | ||
1802 | return 1; | ||
1803 | } | ||
1804 | *(str++) = 0; | ||
1805 | } | ||
1806 | #endif | ||
1807 | |||
1808 | /* | ||
1809 | * Decode str into name, index, options. | ||
1810 | */ | ||
1811 | if (str[0] >= '0' && str[0] <= '9') { | ||
1812 | strcpy(buf, "ttyS"); | ||
1813 | strncpy(buf + 4, str, sizeof(buf) - 5); | ||
1814 | } else { | ||
1815 | strncpy(buf, str, sizeof(buf) - 1); | ||
1816 | } | ||
1817 | buf[sizeof(buf) - 1] = 0; | ||
1818 | if ((options = strchr(str, ',')) != NULL) | ||
1819 | *(options++) = 0; | ||
1820 | #ifdef __sparc__ | ||
1821 | if (!strcmp(str, "ttya")) | ||
1822 | strcpy(buf, "ttyS0"); | ||
1823 | if (!strcmp(str, "ttyb")) | ||
1824 | strcpy(buf, "ttyS1"); | ||
1825 | #endif | ||
1826 | for (s = buf; *s; s++) | ||
1827 | if ((*s >= '0' && *s <= '9') || *s == ',') | ||
1828 | break; | ||
1829 | idx = simple_strtoul(s, NULL, 10); | ||
1830 | *s = 0; | ||
1831 | |||
1832 | __add_preferred_console(buf, idx, options, brl_options); | ||
1833 | console_set_on_cmdline = 1; | ||
1834 | return 1; | ||
1835 | } | ||
1836 | __setup("console=", console_setup); | ||
1837 | |||
1838 | /** | ||
1839 | * add_preferred_console - add a device to the list of preferred consoles. | ||
1840 | * @name: device name | ||
1841 | * @idx: device index | ||
1842 | * @options: options for this console | ||
1843 | * | ||
1844 | * The last preferred console added will be used for kernel messages | ||
1845 | * and stdin/out/err for init. Normally this is used by console_setup | ||
1846 | * above to handle user-supplied console arguments; however it can also | ||
1847 | * be used by arch-specific code either to override the user or more | ||
1848 | * commonly to provide a default console (ie from PROM variables) when | ||
1849 | * the user has not supplied one. | ||
1850 | */ | ||
1851 | int add_preferred_console(char *name, int idx, char *options) | ||
1852 | { | ||
1853 | return __add_preferred_console(name, idx, options, NULL); | ||
1854 | } | ||
1855 | |||
1856 | int update_console_cmdline(char *name, int idx, char *name_new, int idx_new, char *options) | ||
1857 | { | ||
1858 | struct console_cmdline *c; | ||
1859 | int i; | ||
1860 | |||
1861 | for (i = 0; i < MAX_CMDLINECONSOLES && console_cmdline[i].name[0]; i++) | ||
1862 | if (strcmp(console_cmdline[i].name, name) == 0 && | ||
1863 | console_cmdline[i].index == idx) { | ||
1864 | c = &console_cmdline[i]; | ||
1865 | strlcpy(c->name, name_new, sizeof(c->name)); | ||
1866 | c->name[sizeof(c->name) - 1] = 0; | ||
1867 | c->options = options; | ||
1868 | c->index = idx_new; | ||
1869 | return i; | ||
1870 | } | ||
1871 | /* not found */ | ||
1872 | return -1; | ||
1873 | } | ||
1874 | |||
1875 | bool console_suspend_enabled = 1; | ||
1876 | EXPORT_SYMBOL(console_suspend_enabled); | ||
1877 | |||
1878 | static int __init console_suspend_disable(char *str) | ||
1879 | { | ||
1880 | console_suspend_enabled = 0; | ||
1881 | return 1; | ||
1882 | } | ||
1883 | __setup("no_console_suspend", console_suspend_disable); | ||
1884 | module_param_named(console_suspend, console_suspend_enabled, | ||
1885 | bool, S_IRUGO | S_IWUSR); | ||
1886 | MODULE_PARM_DESC(console_suspend, "suspend console during suspend" | ||
1887 | " and hibernate operations"); | ||
1888 | |||
1889 | /** | ||
1890 | * suspend_console - suspend the console subsystem | ||
1891 | * | ||
1892 | * This disables printk() while we go into suspend states | ||
1893 | */ | ||
1894 | void suspend_console(void) | ||
1895 | { | ||
1896 | if (!console_suspend_enabled) | ||
1897 | return; | ||
1898 | printk("Suspending console(s) (use no_console_suspend to debug)\n"); | ||
1899 | console_lock(); | ||
1900 | console_suspended = 1; | ||
1901 | up(&console_sem); | ||
1902 | } | ||
1903 | |||
1904 | void resume_console(void) | ||
1905 | { | ||
1906 | if (!console_suspend_enabled) | ||
1907 | return; | ||
1908 | down(&console_sem); | ||
1909 | console_suspended = 0; | ||
1910 | console_unlock(); | ||
1911 | } | ||
1912 | |||
1913 | /** | ||
1914 | * console_cpu_notify - print deferred console messages after CPU hotplug | ||
1915 | * @self: notifier struct | ||
1916 | * @action: CPU hotplug event | ||
1917 | * @hcpu: unused | ||
1918 | * | ||
1919 | * If printk() is called from a CPU that is not online yet, the messages | ||
1920 | * will be spooled but will not show up on the console. This function is | ||
1921 | * called when a new CPU comes online (or fails to come up), and ensures | ||
1922 | * that any such output gets printed. | ||
1923 | */ | ||
1924 | static int console_cpu_notify(struct notifier_block *self, | ||
1925 | unsigned long action, void *hcpu) | ||
1926 | { | ||
1927 | switch (action) { | ||
1928 | case CPU_ONLINE: | ||
1929 | case CPU_DEAD: | ||
1930 | case CPU_DOWN_FAILED: | ||
1931 | case CPU_UP_CANCELED: | ||
1932 | console_lock(); | ||
1933 | console_unlock(); | ||
1934 | } | ||
1935 | return NOTIFY_OK; | ||
1936 | } | ||
1937 | |||
1938 | /** | ||
1939 | * console_lock - lock the console system for exclusive use. | ||
1940 | * | ||
1941 | * Acquires a lock which guarantees that the caller has | ||
1942 | * exclusive access to the console system and the console_drivers list. | ||
1943 | * | ||
1944 | * Can sleep, returns nothing. | ||
1945 | */ | ||
1946 | void console_lock(void) | ||
1947 | { | ||
1948 | might_sleep(); | ||
1949 | |||
1950 | down(&console_sem); | ||
1951 | if (console_suspended) | ||
1952 | return; | ||
1953 | console_locked = 1; | ||
1954 | console_may_schedule = 1; | ||
1955 | mutex_acquire(&console_lock_dep_map, 0, 0, _RET_IP_); | ||
1956 | } | ||
1957 | EXPORT_SYMBOL(console_lock); | ||
1958 | |||
1959 | /** | ||
1960 | * console_trylock - try to lock the console system for exclusive use. | ||
1961 | * | ||
1962 | * Tried to acquire a lock which guarantees that the caller has | ||
1963 | * exclusive access to the console system and the console_drivers list. | ||
1964 | * | ||
1965 | * returns 1 on success, and 0 on failure to acquire the lock. | ||
1966 | */ | ||
1967 | int console_trylock(void) | ||
1968 | { | ||
1969 | if (down_trylock(&console_sem)) | ||
1970 | return 0; | ||
1971 | if (console_suspended) { | ||
1972 | up(&console_sem); | ||
1973 | return 0; | ||
1974 | } | ||
1975 | console_locked = 1; | ||
1976 | console_may_schedule = 0; | ||
1977 | mutex_acquire(&console_lock_dep_map, 0, 1, _RET_IP_); | ||
1978 | return 1; | ||
1979 | } | ||
1980 | EXPORT_SYMBOL(console_trylock); | ||
1981 | |||
1982 | int is_console_locked(void) | ||
1983 | { | ||
1984 | return console_locked; | ||
1985 | } | ||
1986 | |||
1987 | static void console_cont_flush(char *text, size_t size) | ||
1988 | { | ||
1989 | unsigned long flags; | ||
1990 | size_t len; | ||
1991 | |||
1992 | raw_spin_lock_irqsave(&logbuf_lock, flags); | ||
1993 | |||
1994 | if (!cont.len) | ||
1995 | goto out; | ||
1996 | |||
1997 | /* | ||
1998 | * We still queue earlier records, likely because the console was | ||
1999 | * busy. The earlier ones need to be printed before this one, we | ||
2000 | * did not flush any fragment so far, so just let it queue up. | ||
2001 | */ | ||
2002 | if (console_seq < log_next_seq && !cont.cons) | ||
2003 | goto out; | ||
2004 | |||
2005 | len = cont_print_text(text, size); | ||
2006 | raw_spin_unlock(&logbuf_lock); | ||
2007 | stop_critical_timings(); | ||
2008 | call_console_drivers(cont.level, text, len); | ||
2009 | start_critical_timings(); | ||
2010 | local_irq_restore(flags); | ||
2011 | return; | ||
2012 | out: | ||
2013 | raw_spin_unlock_irqrestore(&logbuf_lock, flags); | ||
2014 | } | ||
2015 | |||
2016 | /** | ||
2017 | * console_unlock - unlock the console system | ||
2018 | * | ||
2019 | * Releases the console_lock which the caller holds on the console system | ||
2020 | * and the console driver list. | ||
2021 | * | ||
2022 | * While the console_lock was held, console output may have been buffered | ||
2023 | * by printk(). If this is the case, console_unlock(); emits | ||
2024 | * the output prior to releasing the lock. | ||
2025 | * | ||
2026 | * If there is output waiting, we wake /dev/kmsg and syslog() users. | ||
2027 | * | ||
2028 | * console_unlock(); may be called from any context. | ||
2029 | */ | ||
2030 | void console_unlock(void) | ||
2031 | { | ||
2032 | static char text[LOG_LINE_MAX + PREFIX_MAX]; | ||
2033 | static u64 seen_seq; | ||
2034 | unsigned long flags; | ||
2035 | bool wake_klogd = false; | ||
2036 | bool retry; | ||
2037 | |||
2038 | if (console_suspended) { | ||
2039 | up(&console_sem); | ||
2040 | return; | ||
2041 | } | ||
2042 | |||
2043 | console_may_schedule = 0; | ||
2044 | |||
2045 | /* flush buffered message fragment immediately to console */ | ||
2046 | console_cont_flush(text, sizeof(text)); | ||
2047 | again: | ||
2048 | for (;;) { | ||
2049 | struct log *msg; | ||
2050 | size_t len; | ||
2051 | int level; | ||
2052 | |||
2053 | raw_spin_lock_irqsave(&logbuf_lock, flags); | ||
2054 | if (seen_seq != log_next_seq) { | ||
2055 | wake_klogd = true; | ||
2056 | seen_seq = log_next_seq; | ||
2057 | } | ||
2058 | |||
2059 | if (console_seq < log_first_seq) { | ||
2060 | /* messages are gone, move to first one */ | ||
2061 | console_seq = log_first_seq; | ||
2062 | console_idx = log_first_idx; | ||
2063 | console_prev = 0; | ||
2064 | } | ||
2065 | skip: | ||
2066 | if (console_seq == log_next_seq) | ||
2067 | break; | ||
2068 | |||
2069 | msg = log_from_idx(console_idx); | ||
2070 | if (msg->flags & LOG_NOCONS) { | ||
2071 | /* | ||
2072 | * Skip record we have buffered and already printed | ||
2073 | * directly to the console when we received it. | ||
2074 | */ | ||
2075 | console_idx = log_next(console_idx); | ||
2076 | console_seq++; | ||
2077 | /* | ||
2078 | * We will get here again when we register a new | ||
2079 | * CON_PRINTBUFFER console. Clear the flag so we | ||
2080 | * will properly dump everything later. | ||
2081 | */ | ||
2082 | msg->flags &= ~LOG_NOCONS; | ||
2083 | console_prev = msg->flags; | ||
2084 | goto skip; | ||
2085 | } | ||
2086 | |||
2087 | level = msg->level; | ||
2088 | len = msg_print_text(msg, console_prev, false, | ||
2089 | text, sizeof(text)); | ||
2090 | console_idx = log_next(console_idx); | ||
2091 | console_seq++; | ||
2092 | console_prev = msg->flags; | ||
2093 | raw_spin_unlock(&logbuf_lock); | ||
2094 | |||
2095 | stop_critical_timings(); /* don't trace print latency */ | ||
2096 | call_console_drivers(level, text, len); | ||
2097 | start_critical_timings(); | ||
2098 | local_irq_restore(flags); | ||
2099 | } | ||
2100 | console_locked = 0; | ||
2101 | mutex_release(&console_lock_dep_map, 1, _RET_IP_); | ||
2102 | |||
2103 | /* Release the exclusive_console once it is used */ | ||
2104 | if (unlikely(exclusive_console)) | ||
2105 | exclusive_console = NULL; | ||
2106 | |||
2107 | raw_spin_unlock(&logbuf_lock); | ||
2108 | |||
2109 | up(&console_sem); | ||
2110 | |||
2111 | /* | ||
2112 | * Someone could have filled up the buffer again, so re-check if there's | ||
2113 | * something to flush. In case we cannot trylock the console_sem again, | ||
2114 | * there's a new owner and the console_unlock() from them will do the | ||
2115 | * flush, no worries. | ||
2116 | */ | ||
2117 | raw_spin_lock(&logbuf_lock); | ||
2118 | retry = console_seq != log_next_seq; | ||
2119 | raw_spin_unlock_irqrestore(&logbuf_lock, flags); | ||
2120 | |||
2121 | if (retry && console_trylock()) | ||
2122 | goto again; | ||
2123 | |||
2124 | if (wake_klogd) | ||
2125 | wake_up_klogd(); | ||
2126 | } | ||
2127 | EXPORT_SYMBOL(console_unlock); | ||
2128 | |||
2129 | /** | ||
2130 | * console_conditional_schedule - yield the CPU if required | ||
2131 | * | ||
2132 | * If the console code is currently allowed to sleep, and | ||
2133 | * if this CPU should yield the CPU to another task, do | ||
2134 | * so here. | ||
2135 | * | ||
2136 | * Must be called within console_lock();. | ||
2137 | */ | ||
2138 | void __sched console_conditional_schedule(void) | ||
2139 | { | ||
2140 | if (console_may_schedule) | ||
2141 | cond_resched(); | ||
2142 | } | ||
2143 | EXPORT_SYMBOL(console_conditional_schedule); | ||
2144 | |||
2145 | void console_unblank(void) | ||
2146 | { | ||
2147 | struct console *c; | ||
2148 | |||
2149 | /* | ||
2150 | * console_unblank can no longer be called in interrupt context unless | ||
2151 | * oops_in_progress is set to 1.. | ||
2152 | */ | ||
2153 | if (oops_in_progress) { | ||
2154 | if (down_trylock(&console_sem) != 0) | ||
2155 | return; | ||
2156 | } else | ||
2157 | console_lock(); | ||
2158 | |||
2159 | console_locked = 1; | ||
2160 | console_may_schedule = 0; | ||
2161 | for_each_console(c) | ||
2162 | if ((c->flags & CON_ENABLED) && c->unblank) | ||
2163 | c->unblank(); | ||
2164 | console_unlock(); | ||
2165 | } | ||
2166 | |||
2167 | /* | ||
2168 | * Return the console tty driver structure and its associated index | ||
2169 | */ | ||
2170 | struct tty_driver *console_device(int *index) | ||
2171 | { | ||
2172 | struct console *c; | ||
2173 | struct tty_driver *driver = NULL; | ||
2174 | |||
2175 | console_lock(); | ||
2176 | for_each_console(c) { | ||
2177 | if (!c->device) | ||
2178 | continue; | ||
2179 | driver = c->device(c, index); | ||
2180 | if (driver) | ||
2181 | break; | ||
2182 | } | ||
2183 | console_unlock(); | ||
2184 | return driver; | ||
2185 | } | ||
2186 | |||
2187 | /* | ||
2188 | * Prevent further output on the passed console device so that (for example) | ||
2189 | * serial drivers can disable console output before suspending a port, and can | ||
2190 | * re-enable output afterwards. | ||
2191 | */ | ||
2192 | void console_stop(struct console *console) | ||
2193 | { | ||
2194 | console_lock(); | ||
2195 | console->flags &= ~CON_ENABLED; | ||
2196 | console_unlock(); | ||
2197 | } | ||
2198 | EXPORT_SYMBOL(console_stop); | ||
2199 | |||
2200 | void console_start(struct console *console) | ||
2201 | { | ||
2202 | console_lock(); | ||
2203 | console->flags |= CON_ENABLED; | ||
2204 | console_unlock(); | ||
2205 | } | ||
2206 | EXPORT_SYMBOL(console_start); | ||
2207 | |||
2208 | static int __read_mostly keep_bootcon; | ||
2209 | |||
2210 | static int __init keep_bootcon_setup(char *str) | ||
2211 | { | ||
2212 | keep_bootcon = 1; | ||
2213 | printk(KERN_INFO "debug: skip boot console de-registration.\n"); | ||
2214 | |||
2215 | return 0; | ||
2216 | } | ||
2217 | |||
2218 | early_param("keep_bootcon", keep_bootcon_setup); | ||
2219 | |||
2220 | /* | ||
2221 | * The console driver calls this routine during kernel initialization | ||
2222 | * to register the console printing procedure with printk() and to | ||
2223 | * print any messages that were printed by the kernel before the | ||
2224 | * console driver was initialized. | ||
2225 | * | ||
2226 | * This can happen pretty early during the boot process (because of | ||
2227 | * early_printk) - sometimes before setup_arch() completes - be careful | ||
2228 | * of what kernel features are used - they may not be initialised yet. | ||
2229 | * | ||
2230 | * There are two types of consoles - bootconsoles (early_printk) and | ||
2231 | * "real" consoles (everything which is not a bootconsole) which are | ||
2232 | * handled differently. | ||
2233 | * - Any number of bootconsoles can be registered at any time. | ||
2234 | * - As soon as a "real" console is registered, all bootconsoles | ||
2235 | * will be unregistered automatically. | ||
2236 | * - Once a "real" console is registered, any attempt to register a | ||
2237 | * bootconsoles will be rejected | ||
2238 | */ | ||
2239 | void register_console(struct console *newcon) | ||
2240 | { | ||
2241 | int i; | ||
2242 | unsigned long flags; | ||
2243 | struct console *bcon = NULL; | ||
2244 | |||
2245 | /* | ||
2246 | * before we register a new CON_BOOT console, make sure we don't | ||
2247 | * already have a valid console | ||
2248 | */ | ||
2249 | if (console_drivers && newcon->flags & CON_BOOT) { | ||
2250 | /* find the last or real console */ | ||
2251 | for_each_console(bcon) { | ||
2252 | if (!(bcon->flags & CON_BOOT)) { | ||
2253 | printk(KERN_INFO "Too late to register bootconsole %s%d\n", | ||
2254 | newcon->name, newcon->index); | ||
2255 | return; | ||
2256 | } | ||
2257 | } | ||
2258 | } | ||
2259 | |||
2260 | if (console_drivers && console_drivers->flags & CON_BOOT) | ||
2261 | bcon = console_drivers; | ||
2262 | |||
2263 | if (preferred_console < 0 || bcon || !console_drivers) | ||
2264 | preferred_console = selected_console; | ||
2265 | |||
2266 | if (newcon->early_setup) | ||
2267 | newcon->early_setup(); | ||
2268 | |||
2269 | /* | ||
2270 | * See if we want to use this console driver. If we | ||
2271 | * didn't select a console we take the first one | ||
2272 | * that registers here. | ||
2273 | */ | ||
2274 | if (preferred_console < 0) { | ||
2275 | if (newcon->index < 0) | ||
2276 | newcon->index = 0; | ||
2277 | if (newcon->setup == NULL || | ||
2278 | newcon->setup(newcon, NULL) == 0) { | ||
2279 | newcon->flags |= CON_ENABLED; | ||
2280 | if (newcon->device) { | ||
2281 | newcon->flags |= CON_CONSDEV; | ||
2282 | preferred_console = 0; | ||
2283 | } | ||
2284 | } | ||
2285 | } | ||
2286 | |||
2287 | /* | ||
2288 | * See if this console matches one we selected on | ||
2289 | * the command line. | ||
2290 | */ | ||
2291 | for (i = 0; i < MAX_CMDLINECONSOLES && console_cmdline[i].name[0]; | ||
2292 | i++) { | ||
2293 | if (strcmp(console_cmdline[i].name, newcon->name) != 0) | ||
2294 | continue; | ||
2295 | if (newcon->index >= 0 && | ||
2296 | newcon->index != console_cmdline[i].index) | ||
2297 | continue; | ||
2298 | if (newcon->index < 0) | ||
2299 | newcon->index = console_cmdline[i].index; | ||
2300 | #ifdef CONFIG_A11Y_BRAILLE_CONSOLE | ||
2301 | if (console_cmdline[i].brl_options) { | ||
2302 | newcon->flags |= CON_BRL; | ||
2303 | braille_register_console(newcon, | ||
2304 | console_cmdline[i].index, | ||
2305 | console_cmdline[i].options, | ||
2306 | console_cmdline[i].brl_options); | ||
2307 | return; | ||
2308 | } | ||
2309 | #endif | ||
2310 | if (newcon->setup && | ||
2311 | newcon->setup(newcon, console_cmdline[i].options) != 0) | ||
2312 | break; | ||
2313 | newcon->flags |= CON_ENABLED; | ||
2314 | newcon->index = console_cmdline[i].index; | ||
2315 | if (i == selected_console) { | ||
2316 | newcon->flags |= CON_CONSDEV; | ||
2317 | preferred_console = selected_console; | ||
2318 | } | ||
2319 | break; | ||
2320 | } | ||
2321 | |||
2322 | if (!(newcon->flags & CON_ENABLED)) | ||
2323 | return; | ||
2324 | |||
2325 | /* | ||
2326 | * If we have a bootconsole, and are switching to a real console, | ||
2327 | * don't print everything out again, since when the boot console, and | ||
2328 | * the real console are the same physical device, it's annoying to | ||
2329 | * see the beginning boot messages twice | ||
2330 | */ | ||
2331 | if (bcon && ((newcon->flags & (CON_CONSDEV | CON_BOOT)) == CON_CONSDEV)) | ||
2332 | newcon->flags &= ~CON_PRINTBUFFER; | ||
2333 | |||
2334 | /* | ||
2335 | * Put this console in the list - keep the | ||
2336 | * preferred driver at the head of the list. | ||
2337 | */ | ||
2338 | console_lock(); | ||
2339 | if ((newcon->flags & CON_CONSDEV) || console_drivers == NULL) { | ||
2340 | newcon->next = console_drivers; | ||
2341 | console_drivers = newcon; | ||
2342 | if (newcon->next) | ||
2343 | newcon->next->flags &= ~CON_CONSDEV; | ||
2344 | } else { | ||
2345 | newcon->next = console_drivers->next; | ||
2346 | console_drivers->next = newcon; | ||
2347 | } | ||
2348 | if (newcon->flags & CON_PRINTBUFFER) { | ||
2349 | /* | ||
2350 | * console_unlock(); will print out the buffered messages | ||
2351 | * for us. | ||
2352 | */ | ||
2353 | raw_spin_lock_irqsave(&logbuf_lock, flags); | ||
2354 | console_seq = syslog_seq; | ||
2355 | console_idx = syslog_idx; | ||
2356 | console_prev = syslog_prev; | ||
2357 | raw_spin_unlock_irqrestore(&logbuf_lock, flags); | ||
2358 | /* | ||
2359 | * We're about to replay the log buffer. Only do this to the | ||
2360 | * just-registered console to avoid excessive message spam to | ||
2361 | * the already-registered consoles. | ||
2362 | */ | ||
2363 | exclusive_console = newcon; | ||
2364 | } | ||
2365 | console_unlock(); | ||
2366 | console_sysfs_notify(); | ||
2367 | |||
2368 | /* | ||
2369 | * By unregistering the bootconsoles after we enable the real console | ||
2370 | * we get the "console xxx enabled" message on all the consoles - | ||
2371 | * boot consoles, real consoles, etc - this is to ensure that end | ||
2372 | * users know there might be something in the kernel's log buffer that | ||
2373 | * went to the bootconsole (that they do not see on the real console) | ||
2374 | */ | ||
2375 | if (bcon && | ||
2376 | ((newcon->flags & (CON_CONSDEV | CON_BOOT)) == CON_CONSDEV) && | ||
2377 | !keep_bootcon) { | ||
2378 | /* we need to iterate through twice, to make sure we print | ||
2379 | * everything out, before we unregister the console(s) | ||
2380 | */ | ||
2381 | printk(KERN_INFO "console [%s%d] enabled, bootconsole disabled\n", | ||
2382 | newcon->name, newcon->index); | ||
2383 | for_each_console(bcon) | ||
2384 | if (bcon->flags & CON_BOOT) | ||
2385 | unregister_console(bcon); | ||
2386 | } else { | ||
2387 | printk(KERN_INFO "%sconsole [%s%d] enabled\n", | ||
2388 | (newcon->flags & CON_BOOT) ? "boot" : "" , | ||
2389 | newcon->name, newcon->index); | ||
2390 | } | ||
2391 | } | ||
2392 | EXPORT_SYMBOL(register_console); | ||
2393 | |||
2394 | int unregister_console(struct console *console) | ||
2395 | { | ||
2396 | struct console *a, *b; | ||
2397 | int res = 1; | ||
2398 | |||
2399 | #ifdef CONFIG_A11Y_BRAILLE_CONSOLE | ||
2400 | if (console->flags & CON_BRL) | ||
2401 | return braille_unregister_console(console); | ||
2402 | #endif | ||
2403 | |||
2404 | console_lock(); | ||
2405 | if (console_drivers == console) { | ||
2406 | console_drivers=console->next; | ||
2407 | res = 0; | ||
2408 | } else if (console_drivers) { | ||
2409 | for (a=console_drivers->next, b=console_drivers ; | ||
2410 | a; b=a, a=b->next) { | ||
2411 | if (a == console) { | ||
2412 | b->next = a->next; | ||
2413 | res = 0; | ||
2414 | break; | ||
2415 | } | ||
2416 | } | ||
2417 | } | ||
2418 | |||
2419 | /* | ||
2420 | * If this isn't the last console and it has CON_CONSDEV set, we | ||
2421 | * need to set it on the next preferred console. | ||
2422 | */ | ||
2423 | if (console_drivers != NULL && console->flags & CON_CONSDEV) | ||
2424 | console_drivers->flags |= CON_CONSDEV; | ||
2425 | |||
2426 | console_unlock(); | ||
2427 | console_sysfs_notify(); | ||
2428 | return res; | ||
2429 | } | ||
2430 | EXPORT_SYMBOL(unregister_console); | ||
2431 | |||
2432 | static int __init printk_late_init(void) | ||
2433 | { | ||
2434 | struct console *con; | ||
2435 | |||
2436 | for_each_console(con) { | ||
2437 | if (!keep_bootcon && con->flags & CON_BOOT) { | ||
2438 | printk(KERN_INFO "turn off boot console %s%d\n", | ||
2439 | con->name, con->index); | ||
2440 | unregister_console(con); | ||
2441 | } | ||
2442 | } | ||
2443 | hotcpu_notifier(console_cpu_notify, 0); | ||
2444 | return 0; | ||
2445 | } | ||
2446 | late_initcall(printk_late_init); | ||
2447 | |||
2448 | #if defined CONFIG_PRINTK | ||
2449 | /* | ||
2450 | * Delayed printk version, for scheduler-internal messages: | ||
2451 | */ | ||
2452 | #define PRINTK_BUF_SIZE 512 | ||
2453 | |||
2454 | #define PRINTK_PENDING_WAKEUP 0x01 | ||
2455 | #define PRINTK_PENDING_SCHED 0x02 | ||
2456 | |||
2457 | static DEFINE_PER_CPU(int, printk_pending); | ||
2458 | static DEFINE_PER_CPU(char [PRINTK_BUF_SIZE], printk_sched_buf); | ||
2459 | |||
2460 | static void wake_up_klogd_work_func(struct irq_work *irq_work) | ||
2461 | { | ||
2462 | int pending = __this_cpu_xchg(printk_pending, 0); | ||
2463 | |||
2464 | if (pending & PRINTK_PENDING_SCHED) { | ||
2465 | char *buf = __get_cpu_var(printk_sched_buf); | ||
2466 | printk(KERN_WARNING "[sched_delayed] %s", buf); | ||
2467 | } | ||
2468 | |||
2469 | if (pending & PRINTK_PENDING_WAKEUP) | ||
2470 | wake_up_interruptible(&log_wait); | ||
2471 | } | ||
2472 | |||
2473 | static DEFINE_PER_CPU(struct irq_work, wake_up_klogd_work) = { | ||
2474 | .func = wake_up_klogd_work_func, | ||
2475 | .flags = IRQ_WORK_LAZY, | ||
2476 | }; | ||
2477 | |||
2478 | void wake_up_klogd(void) | ||
2479 | { | ||
2480 | preempt_disable(); | ||
2481 | if (waitqueue_active(&log_wait)) { | ||
2482 | this_cpu_or(printk_pending, PRINTK_PENDING_WAKEUP); | ||
2483 | irq_work_queue(&__get_cpu_var(wake_up_klogd_work)); | ||
2484 | } | ||
2485 | preempt_enable(); | ||
2486 | } | ||
2487 | |||
2488 | int printk_sched(const char *fmt, ...) | ||
2489 | { | ||
2490 | unsigned long flags; | ||
2491 | va_list args; | ||
2492 | char *buf; | ||
2493 | int r; | ||
2494 | |||
2495 | local_irq_save(flags); | ||
2496 | buf = __get_cpu_var(printk_sched_buf); | ||
2497 | |||
2498 | va_start(args, fmt); | ||
2499 | r = vsnprintf(buf, PRINTK_BUF_SIZE, fmt, args); | ||
2500 | va_end(args); | ||
2501 | |||
2502 | __this_cpu_or(printk_pending, PRINTK_PENDING_SCHED); | ||
2503 | irq_work_queue(&__get_cpu_var(wake_up_klogd_work)); | ||
2504 | local_irq_restore(flags); | ||
2505 | |||
2506 | return r; | ||
2507 | } | ||
2508 | |||
2509 | /* | ||
2510 | * printk rate limiting, lifted from the networking subsystem. | ||
2511 | * | ||
2512 | * This enforces a rate limit: not more than 10 kernel messages | ||
2513 | * every 5s to make a denial-of-service attack impossible. | ||
2514 | */ | ||
2515 | DEFINE_RATELIMIT_STATE(printk_ratelimit_state, 5 * HZ, 10); | ||
2516 | |||
2517 | int __printk_ratelimit(const char *func) | ||
2518 | { | ||
2519 | return ___ratelimit(&printk_ratelimit_state, func); | ||
2520 | } | ||
2521 | EXPORT_SYMBOL(__printk_ratelimit); | ||
2522 | |||
2523 | /** | ||
2524 | * printk_timed_ratelimit - caller-controlled printk ratelimiting | ||
2525 | * @caller_jiffies: pointer to caller's state | ||
2526 | * @interval_msecs: minimum interval between prints | ||
2527 | * | ||
2528 | * printk_timed_ratelimit() returns true if more than @interval_msecs | ||
2529 | * milliseconds have elapsed since the last time printk_timed_ratelimit() | ||
2530 | * returned true. | ||
2531 | */ | ||
2532 | bool printk_timed_ratelimit(unsigned long *caller_jiffies, | ||
2533 | unsigned int interval_msecs) | ||
2534 | { | ||
2535 | if (*caller_jiffies == 0 | ||
2536 | || !time_in_range(jiffies, *caller_jiffies, | ||
2537 | *caller_jiffies | ||
2538 | + msecs_to_jiffies(interval_msecs))) { | ||
2539 | *caller_jiffies = jiffies; | ||
2540 | return true; | ||
2541 | } | ||
2542 | return false; | ||
2543 | } | ||
2544 | EXPORT_SYMBOL(printk_timed_ratelimit); | ||
2545 | |||
2546 | static DEFINE_SPINLOCK(dump_list_lock); | ||
2547 | static LIST_HEAD(dump_list); | ||
2548 | |||
2549 | /** | ||
2550 | * kmsg_dump_register - register a kernel log dumper. | ||
2551 | * @dumper: pointer to the kmsg_dumper structure | ||
2552 | * | ||
2553 | * Adds a kernel log dumper to the system. The dump callback in the | ||
2554 | * structure will be called when the kernel oopses or panics and must be | ||
2555 | * set. Returns zero on success and %-EINVAL or %-EBUSY otherwise. | ||
2556 | */ | ||
2557 | int kmsg_dump_register(struct kmsg_dumper *dumper) | ||
2558 | { | ||
2559 | unsigned long flags; | ||
2560 | int err = -EBUSY; | ||
2561 | |||
2562 | /* The dump callback needs to be set */ | ||
2563 | if (!dumper->dump) | ||
2564 | return -EINVAL; | ||
2565 | |||
2566 | spin_lock_irqsave(&dump_list_lock, flags); | ||
2567 | /* Don't allow registering multiple times */ | ||
2568 | if (!dumper->registered) { | ||
2569 | dumper->registered = 1; | ||
2570 | list_add_tail_rcu(&dumper->list, &dump_list); | ||
2571 | err = 0; | ||
2572 | } | ||
2573 | spin_unlock_irqrestore(&dump_list_lock, flags); | ||
2574 | |||
2575 | return err; | ||
2576 | } | ||
2577 | EXPORT_SYMBOL_GPL(kmsg_dump_register); | ||
2578 | |||
2579 | /** | ||
2580 | * kmsg_dump_unregister - unregister a kmsg dumper. | ||
2581 | * @dumper: pointer to the kmsg_dumper structure | ||
2582 | * | ||
2583 | * Removes a dump device from the system. Returns zero on success and | ||
2584 | * %-EINVAL otherwise. | ||
2585 | */ | ||
2586 | int kmsg_dump_unregister(struct kmsg_dumper *dumper) | ||
2587 | { | ||
2588 | unsigned long flags; | ||
2589 | int err = -EINVAL; | ||
2590 | |||
2591 | spin_lock_irqsave(&dump_list_lock, flags); | ||
2592 | if (dumper->registered) { | ||
2593 | dumper->registered = 0; | ||
2594 | list_del_rcu(&dumper->list); | ||
2595 | err = 0; | ||
2596 | } | ||
2597 | spin_unlock_irqrestore(&dump_list_lock, flags); | ||
2598 | synchronize_rcu(); | ||
2599 | |||
2600 | return err; | ||
2601 | } | ||
2602 | EXPORT_SYMBOL_GPL(kmsg_dump_unregister); | ||
2603 | |||
2604 | static bool always_kmsg_dump; | ||
2605 | module_param_named(always_kmsg_dump, always_kmsg_dump, bool, S_IRUGO | S_IWUSR); | ||
2606 | |||
2607 | /** | ||
2608 | * kmsg_dump - dump kernel log to kernel message dumpers. | ||
2609 | * @reason: the reason (oops, panic etc) for dumping | ||
2610 | * | ||
2611 | * Call each of the registered dumper's dump() callback, which can | ||
2612 | * retrieve the kmsg records with kmsg_dump_get_line() or | ||
2613 | * kmsg_dump_get_buffer(). | ||
2614 | */ | ||
2615 | void kmsg_dump(enum kmsg_dump_reason reason) | ||
2616 | { | ||
2617 | struct kmsg_dumper *dumper; | ||
2618 | unsigned long flags; | ||
2619 | |||
2620 | if ((reason > KMSG_DUMP_OOPS) && !always_kmsg_dump) | ||
2621 | return; | ||
2622 | |||
2623 | rcu_read_lock(); | ||
2624 | list_for_each_entry_rcu(dumper, &dump_list, list) { | ||
2625 | if (dumper->max_reason && reason > dumper->max_reason) | ||
2626 | continue; | ||
2627 | |||
2628 | /* initialize iterator with data about the stored records */ | ||
2629 | dumper->active = true; | ||
2630 | |||
2631 | raw_spin_lock_irqsave(&logbuf_lock, flags); | ||
2632 | dumper->cur_seq = clear_seq; | ||
2633 | dumper->cur_idx = clear_idx; | ||
2634 | dumper->next_seq = log_next_seq; | ||
2635 | dumper->next_idx = log_next_idx; | ||
2636 | raw_spin_unlock_irqrestore(&logbuf_lock, flags); | ||
2637 | |||
2638 | /* invoke dumper which will iterate over records */ | ||
2639 | dumper->dump(dumper, reason); | ||
2640 | |||
2641 | /* reset iterator */ | ||
2642 | dumper->active = false; | ||
2643 | } | ||
2644 | rcu_read_unlock(); | ||
2645 | } | ||
2646 | |||
2647 | /** | ||
2648 | * kmsg_dump_get_line_nolock - retrieve one kmsg log line (unlocked version) | ||
2649 | * @dumper: registered kmsg dumper | ||
2650 | * @syslog: include the "<4>" prefixes | ||
2651 | * @line: buffer to copy the line to | ||
2652 | * @size: maximum size of the buffer | ||
2653 | * @len: length of line placed into buffer | ||
2654 | * | ||
2655 | * Start at the beginning of the kmsg buffer, with the oldest kmsg | ||
2656 | * record, and copy one record into the provided buffer. | ||
2657 | * | ||
2658 | * Consecutive calls will return the next available record moving | ||
2659 | * towards the end of the buffer with the youngest messages. | ||
2660 | * | ||
2661 | * A return value of FALSE indicates that there are no more records to | ||
2662 | * read. | ||
2663 | * | ||
2664 | * The function is similar to kmsg_dump_get_line(), but grabs no locks. | ||
2665 | */ | ||
2666 | bool kmsg_dump_get_line_nolock(struct kmsg_dumper *dumper, bool syslog, | ||
2667 | char *line, size_t size, size_t *len) | ||
2668 | { | ||
2669 | struct log *msg; | ||
2670 | size_t l = 0; | ||
2671 | bool ret = false; | ||
2672 | |||
2673 | if (!dumper->active) | ||
2674 | goto out; | ||
2675 | |||
2676 | if (dumper->cur_seq < log_first_seq) { | ||
2677 | /* messages are gone, move to first available one */ | ||
2678 | dumper->cur_seq = log_first_seq; | ||
2679 | dumper->cur_idx = log_first_idx; | ||
2680 | } | ||
2681 | |||
2682 | /* last entry */ | ||
2683 | if (dumper->cur_seq >= log_next_seq) | ||
2684 | goto out; | ||
2685 | |||
2686 | msg = log_from_idx(dumper->cur_idx); | ||
2687 | l = msg_print_text(msg, 0, syslog, line, size); | ||
2688 | |||
2689 | dumper->cur_idx = log_next(dumper->cur_idx); | ||
2690 | dumper->cur_seq++; | ||
2691 | ret = true; | ||
2692 | out: | ||
2693 | if (len) | ||
2694 | *len = l; | ||
2695 | return ret; | ||
2696 | } | ||
2697 | |||
2698 | /** | ||
2699 | * kmsg_dump_get_line - retrieve one kmsg log line | ||
2700 | * @dumper: registered kmsg dumper | ||
2701 | * @syslog: include the "<4>" prefixes | ||
2702 | * @line: buffer to copy the line to | ||
2703 | * @size: maximum size of the buffer | ||
2704 | * @len: length of line placed into buffer | ||
2705 | * | ||
2706 | * Start at the beginning of the kmsg buffer, with the oldest kmsg | ||
2707 | * record, and copy one record into the provided buffer. | ||
2708 | * | ||
2709 | * Consecutive calls will return the next available record moving | ||
2710 | * towards the end of the buffer with the youngest messages. | ||
2711 | * | ||
2712 | * A return value of FALSE indicates that there are no more records to | ||
2713 | * read. | ||
2714 | */ | ||
2715 | bool kmsg_dump_get_line(struct kmsg_dumper *dumper, bool syslog, | ||
2716 | char *line, size_t size, size_t *len) | ||
2717 | { | ||
2718 | unsigned long flags; | ||
2719 | bool ret; | ||
2720 | |||
2721 | raw_spin_lock_irqsave(&logbuf_lock, flags); | ||
2722 | ret = kmsg_dump_get_line_nolock(dumper, syslog, line, size, len); | ||
2723 | raw_spin_unlock_irqrestore(&logbuf_lock, flags); | ||
2724 | |||
2725 | return ret; | ||
2726 | } | ||
2727 | EXPORT_SYMBOL_GPL(kmsg_dump_get_line); | ||
2728 | |||
2729 | /** | ||
2730 | * kmsg_dump_get_buffer - copy kmsg log lines | ||
2731 | * @dumper: registered kmsg dumper | ||
2732 | * @syslog: include the "<4>" prefixes | ||
2733 | * @buf: buffer to copy the line to | ||
2734 | * @size: maximum size of the buffer | ||
2735 | * @len: length of line placed into buffer | ||
2736 | * | ||
2737 | * Start at the end of the kmsg buffer and fill the provided buffer | ||
2738 | * with as many of the the *youngest* kmsg records that fit into it. | ||
2739 | * If the buffer is large enough, all available kmsg records will be | ||
2740 | * copied with a single call. | ||
2741 | * | ||
2742 | * Consecutive calls will fill the buffer with the next block of | ||
2743 | * available older records, not including the earlier retrieved ones. | ||
2744 | * | ||
2745 | * A return value of FALSE indicates that there are no more records to | ||
2746 | * read. | ||
2747 | */ | ||
2748 | bool kmsg_dump_get_buffer(struct kmsg_dumper *dumper, bool syslog, | ||
2749 | char *buf, size_t size, size_t *len) | ||
2750 | { | ||
2751 | unsigned long flags; | ||
2752 | u64 seq; | ||
2753 | u32 idx; | ||
2754 | u64 next_seq; | ||
2755 | u32 next_idx; | ||
2756 | enum log_flags prev; | ||
2757 | size_t l = 0; | ||
2758 | bool ret = false; | ||
2759 | |||
2760 | if (!dumper->active) | ||
2761 | goto out; | ||
2762 | |||
2763 | raw_spin_lock_irqsave(&logbuf_lock, flags); | ||
2764 | if (dumper->cur_seq < log_first_seq) { | ||
2765 | /* messages are gone, move to first available one */ | ||
2766 | dumper->cur_seq = log_first_seq; | ||
2767 | dumper->cur_idx = log_first_idx; | ||
2768 | } | ||
2769 | |||
2770 | /* last entry */ | ||
2771 | if (dumper->cur_seq >= dumper->next_seq) { | ||
2772 | raw_spin_unlock_irqrestore(&logbuf_lock, flags); | ||
2773 | goto out; | ||
2774 | } | ||
2775 | |||
2776 | /* calculate length of entire buffer */ | ||
2777 | seq = dumper->cur_seq; | ||
2778 | idx = dumper->cur_idx; | ||
2779 | prev = 0; | ||
2780 | while (seq < dumper->next_seq) { | ||
2781 | struct log *msg = log_from_idx(idx); | ||
2782 | |||
2783 | l += msg_print_text(msg, prev, true, NULL, 0); | ||
2784 | idx = log_next(idx); | ||
2785 | seq++; | ||
2786 | prev = msg->flags; | ||
2787 | } | ||
2788 | |||
2789 | /* move first record forward until length fits into the buffer */ | ||
2790 | seq = dumper->cur_seq; | ||
2791 | idx = dumper->cur_idx; | ||
2792 | prev = 0; | ||
2793 | while (l > size && seq < dumper->next_seq) { | ||
2794 | struct log *msg = log_from_idx(idx); | ||
2795 | |||
2796 | l -= msg_print_text(msg, prev, true, NULL, 0); | ||
2797 | idx = log_next(idx); | ||
2798 | seq++; | ||
2799 | prev = msg->flags; | ||
2800 | } | ||
2801 | |||
2802 | /* last message in next interation */ | ||
2803 | next_seq = seq; | ||
2804 | next_idx = idx; | ||
2805 | |||
2806 | l = 0; | ||
2807 | prev = 0; | ||
2808 | while (seq < dumper->next_seq) { | ||
2809 | struct log *msg = log_from_idx(idx); | ||
2810 | |||
2811 | l += msg_print_text(msg, prev, syslog, buf + l, size - l); | ||
2812 | idx = log_next(idx); | ||
2813 | seq++; | ||
2814 | prev = msg->flags; | ||
2815 | } | ||
2816 | |||
2817 | dumper->next_seq = next_seq; | ||
2818 | dumper->next_idx = next_idx; | ||
2819 | ret = true; | ||
2820 | raw_spin_unlock_irqrestore(&logbuf_lock, flags); | ||
2821 | out: | ||
2822 | if (len) | ||
2823 | *len = l; | ||
2824 | return ret; | ||
2825 | } | ||
2826 | EXPORT_SYMBOL_GPL(kmsg_dump_get_buffer); | ||
2827 | |||
2828 | /** | ||
2829 | * kmsg_dump_rewind_nolock - reset the interator (unlocked version) | ||
2830 | * @dumper: registered kmsg dumper | ||
2831 | * | ||
2832 | * Reset the dumper's iterator so that kmsg_dump_get_line() and | ||
2833 | * kmsg_dump_get_buffer() can be called again and used multiple | ||
2834 | * times within the same dumper.dump() callback. | ||
2835 | * | ||
2836 | * The function is similar to kmsg_dump_rewind(), but grabs no locks. | ||
2837 | */ | ||
2838 | void kmsg_dump_rewind_nolock(struct kmsg_dumper *dumper) | ||
2839 | { | ||
2840 | dumper->cur_seq = clear_seq; | ||
2841 | dumper->cur_idx = clear_idx; | ||
2842 | dumper->next_seq = log_next_seq; | ||
2843 | dumper->next_idx = log_next_idx; | ||
2844 | } | ||
2845 | |||
2846 | /** | ||
2847 | * kmsg_dump_rewind - reset the interator | ||
2848 | * @dumper: registered kmsg dumper | ||
2849 | * | ||
2850 | * Reset the dumper's iterator so that kmsg_dump_get_line() and | ||
2851 | * kmsg_dump_get_buffer() can be called again and used multiple | ||
2852 | * times within the same dumper.dump() callback. | ||
2853 | */ | ||
2854 | void kmsg_dump_rewind(struct kmsg_dumper *dumper) | ||
2855 | { | ||
2856 | unsigned long flags; | ||
2857 | |||
2858 | raw_spin_lock_irqsave(&logbuf_lock, flags); | ||
2859 | kmsg_dump_rewind_nolock(dumper); | ||
2860 | raw_spin_unlock_irqrestore(&logbuf_lock, flags); | ||
2861 | } | ||
2862 | EXPORT_SYMBOL_GPL(kmsg_dump_rewind); | ||
2863 | |||
2864 | static char dump_stack_arch_desc_str[128]; | ||
2865 | |||
2866 | /** | ||
2867 | * dump_stack_set_arch_desc - set arch-specific str to show with task dumps | ||
2868 | * @fmt: printf-style format string | ||
2869 | * @...: arguments for the format string | ||
2870 | * | ||
2871 | * The configured string will be printed right after utsname during task | ||
2872 | * dumps. Usually used to add arch-specific system identifiers. If an | ||
2873 | * arch wants to make use of such an ID string, it should initialize this | ||
2874 | * as soon as possible during boot. | ||
2875 | */ | ||
2876 | void __init dump_stack_set_arch_desc(const char *fmt, ...) | ||
2877 | { | ||
2878 | va_list args; | ||
2879 | |||
2880 | va_start(args, fmt); | ||
2881 | vsnprintf(dump_stack_arch_desc_str, sizeof(dump_stack_arch_desc_str), | ||
2882 | fmt, args); | ||
2883 | va_end(args); | ||
2884 | } | ||
2885 | |||
2886 | /** | ||
2887 | * dump_stack_print_info - print generic debug info for dump_stack() | ||
2888 | * @log_lvl: log level | ||
2889 | * | ||
2890 | * Arch-specific dump_stack() implementations can use this function to | ||
2891 | * print out the same debug information as the generic dump_stack(). | ||
2892 | */ | ||
2893 | void dump_stack_print_info(const char *log_lvl) | ||
2894 | { | ||
2895 | printk("%sCPU: %d PID: %d Comm: %.20s %s %s %.*s\n", | ||
2896 | log_lvl, raw_smp_processor_id(), current->pid, current->comm, | ||
2897 | print_tainted(), init_utsname()->release, | ||
2898 | (int)strcspn(init_utsname()->version, " "), | ||
2899 | init_utsname()->version); | ||
2900 | |||
2901 | if (dump_stack_arch_desc_str[0] != '\0') | ||
2902 | printk("%sHardware name: %s\n", | ||
2903 | log_lvl, dump_stack_arch_desc_str); | ||
2904 | |||
2905 | print_worker_info(log_lvl, current); | ||
2906 | } | ||
2907 | |||
2908 | /** | ||
2909 | * show_regs_print_info - print generic debug info for show_regs() | ||
2910 | * @log_lvl: log level | ||
2911 | * | ||
2912 | * show_regs() implementations can use this function to print out generic | ||
2913 | * debug information. | ||
2914 | */ | ||
2915 | void show_regs_print_info(const char *log_lvl) | ||
2916 | { | ||
2917 | dump_stack_print_info(log_lvl); | ||
2918 | |||
2919 | printk("%stask: %p ti: %p task.ti: %p\n", | ||
2920 | log_lvl, current, current_thread_info(), | ||
2921 | task_thread_info(current)); | ||
2922 | } | ||
2923 | |||
2924 | #endif | ||