aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/x86/kernel/apic/hw_nmi.c91
-rw-r--r--include/linux/percpu.h4
-rw-r--r--include/linux/printk.h2
-rw-r--r--include/linux/seq_buf.h136
-rw-r--r--include/linux/trace_seq.h30
-rw-r--r--kernel/printk/printk.c41
-rw-r--r--kernel/trace/trace.c65
-rw-r--r--kernel/trace/trace_events.c9
-rw-r--r--kernel/trace/trace_functions_graph.c11
-rw-r--r--kernel/trace/trace_seq.c177
-rw-r--r--lib/Makefile2
-rw-r--r--lib/seq_buf.c359
12 files changed, 783 insertions, 144 deletions
diff --git a/arch/x86/kernel/apic/hw_nmi.c b/arch/x86/kernel/apic/hw_nmi.c
index 6a1e71bde323..6873ab925d00 100644
--- a/arch/x86/kernel/apic/hw_nmi.c
+++ b/arch/x86/kernel/apic/hw_nmi.c
@@ -18,6 +18,7 @@
18#include <linux/nmi.h> 18#include <linux/nmi.h>
19#include <linux/module.h> 19#include <linux/module.h>
20#include <linux/delay.h> 20#include <linux/delay.h>
21#include <linux/seq_buf.h>
21 22
22#ifdef CONFIG_HARDLOCKUP_DETECTOR 23#ifdef CONFIG_HARDLOCKUP_DETECTOR
23u64 hw_nmi_get_sample_period(int watchdog_thresh) 24u64 hw_nmi_get_sample_period(int watchdog_thresh)
@@ -29,14 +30,35 @@ u64 hw_nmi_get_sample_period(int watchdog_thresh)
29#ifdef arch_trigger_all_cpu_backtrace 30#ifdef arch_trigger_all_cpu_backtrace
30/* For reliability, we're prepared to waste bits here. */ 31/* For reliability, we're prepared to waste bits here. */
31static DECLARE_BITMAP(backtrace_mask, NR_CPUS) __read_mostly; 32static DECLARE_BITMAP(backtrace_mask, NR_CPUS) __read_mostly;
33static cpumask_t printtrace_mask;
34
35#define NMI_BUF_SIZE 4096
36
37struct nmi_seq_buf {
38 unsigned char buffer[NMI_BUF_SIZE];
39 struct seq_buf seq;
40};
41
42/* Safe printing in NMI context */
43static DEFINE_PER_CPU(struct nmi_seq_buf, nmi_print_seq);
32 44
33/* "in progress" flag of arch_trigger_all_cpu_backtrace */ 45/* "in progress" flag of arch_trigger_all_cpu_backtrace */
34static unsigned long backtrace_flag; 46static unsigned long backtrace_flag;
35 47
48static void print_seq_line(struct nmi_seq_buf *s, int start, int end)
49{
50 const char *buf = s->buffer + start;
51
52 printk("%.*s", (end - start) + 1, buf);
53}
54
36void arch_trigger_all_cpu_backtrace(bool include_self) 55void arch_trigger_all_cpu_backtrace(bool include_self)
37{ 56{
57 struct nmi_seq_buf *s;
58 int len;
59 int cpu;
38 int i; 60 int i;
39 int cpu = get_cpu(); 61 int this_cpu = get_cpu();
40 62
41 if (test_and_set_bit(0, &backtrace_flag)) { 63 if (test_and_set_bit(0, &backtrace_flag)) {
42 /* 64 /*
@@ -49,7 +71,17 @@ void arch_trigger_all_cpu_backtrace(bool include_self)
49 71
50 cpumask_copy(to_cpumask(backtrace_mask), cpu_online_mask); 72 cpumask_copy(to_cpumask(backtrace_mask), cpu_online_mask);
51 if (!include_self) 73 if (!include_self)
52 cpumask_clear_cpu(cpu, to_cpumask(backtrace_mask)); 74 cpumask_clear_cpu(this_cpu, to_cpumask(backtrace_mask));
75
76 cpumask_copy(&printtrace_mask, to_cpumask(backtrace_mask));
77 /*
78 * Set up per_cpu seq_buf buffers that the NMIs running on the other
79 * CPUs will write to.
80 */
81 for_each_cpu(cpu, to_cpumask(backtrace_mask)) {
82 s = &per_cpu(nmi_print_seq, cpu);
83 seq_buf_init(&s->seq, s->buffer, NMI_BUF_SIZE);
84 }
53 85
54 if (!cpumask_empty(to_cpumask(backtrace_mask))) { 86 if (!cpumask_empty(to_cpumask(backtrace_mask))) {
55 pr_info("sending NMI to %s CPUs:\n", 87 pr_info("sending NMI to %s CPUs:\n",
@@ -65,11 +97,58 @@ void arch_trigger_all_cpu_backtrace(bool include_self)
65 touch_softlockup_watchdog(); 97 touch_softlockup_watchdog();
66 } 98 }
67 99
100 /*
101 * Now that all the NMIs have triggered, we can dump out their
102 * back traces safely to the console.
103 */
104 for_each_cpu(cpu, &printtrace_mask) {
105 int last_i = 0;
106
107 s = &per_cpu(nmi_print_seq, cpu);
108 len = seq_buf_used(&s->seq);
109 if (!len)
110 continue;
111
112 /* Print line by line. */
113 for (i = 0; i < len; i++) {
114 if (s->buffer[i] == '\n') {
115 print_seq_line(s, last_i, i);
116 last_i = i + 1;
117 }
118 }
119 /* Check if there was a partial line. */
120 if (last_i < len) {
121 print_seq_line(s, last_i, len - 1);
122 pr_cont("\n");
123 }
124 }
125
68 clear_bit(0, &backtrace_flag); 126 clear_bit(0, &backtrace_flag);
69 smp_mb__after_atomic(); 127 smp_mb__after_atomic();
70 put_cpu(); 128 put_cpu();
71} 129}
72 130
131/*
132 * It is not safe to call printk() directly from NMI handlers.
133 * It may be fine if the NMI detected a lock up and we have no choice
134 * but to do so, but doing a NMI on all other CPUs to get a back trace
135 * can be done with a sysrq-l. We don't want that to lock up, which
136 * can happen if the NMI interrupts a printk in progress.
137 *
138 * Instead, we redirect the vprintk() to this nmi_vprintk() that writes
139 * the content into a per cpu seq_buf buffer. Then when the NMIs are
140 * all done, we can safely dump the contents of the seq_buf to a printk()
141 * from a non NMI context.
142 */
143static int nmi_vprintk(const char *fmt, va_list args)
144{
145 struct nmi_seq_buf *s = this_cpu_ptr(&nmi_print_seq);
146 unsigned int len = seq_buf_used(&s->seq);
147
148 seq_buf_vprintf(&s->seq, fmt, args);
149 return seq_buf_used(&s->seq) - len;
150}
151
73static int 152static int
74arch_trigger_all_cpu_backtrace_handler(unsigned int cmd, struct pt_regs *regs) 153arch_trigger_all_cpu_backtrace_handler(unsigned int cmd, struct pt_regs *regs)
75{ 154{
@@ -78,12 +157,14 @@ arch_trigger_all_cpu_backtrace_handler(unsigned int cmd, struct pt_regs *regs)
78 cpu = smp_processor_id(); 157 cpu = smp_processor_id();
79 158
80 if (cpumask_test_cpu(cpu, to_cpumask(backtrace_mask))) { 159 if (cpumask_test_cpu(cpu, to_cpumask(backtrace_mask))) {
81 static arch_spinlock_t lock = __ARCH_SPIN_LOCK_UNLOCKED; 160 printk_func_t printk_func_save = this_cpu_read(printk_func);
82 161
83 arch_spin_lock(&lock); 162 /* Replace printk to write into the NMI seq */
163 this_cpu_write(printk_func, nmi_vprintk);
84 printk(KERN_WARNING "NMI backtrace for cpu %d\n", cpu); 164 printk(KERN_WARNING "NMI backtrace for cpu %d\n", cpu);
85 show_regs(regs); 165 show_regs(regs);
86 arch_spin_unlock(&lock); 166 this_cpu_write(printk_func, printk_func_save);
167
87 cpumask_clear_cpu(cpu, to_cpumask(backtrace_mask)); 168 cpumask_clear_cpu(cpu, to_cpumask(backtrace_mask));
88 return NMI_HANDLED; 169 return NMI_HANDLED;
89 } 170 }
diff --git a/include/linux/percpu.h b/include/linux/percpu.h
index a3aa63e47637..caebf2a758dc 100644
--- a/include/linux/percpu.h
+++ b/include/linux/percpu.h
@@ -5,6 +5,7 @@
5#include <linux/preempt.h> 5#include <linux/preempt.h>
6#include <linux/smp.h> 6#include <linux/smp.h>
7#include <linux/cpumask.h> 7#include <linux/cpumask.h>
8#include <linux/printk.h>
8#include <linux/pfn.h> 9#include <linux/pfn.h>
9#include <linux/init.h> 10#include <linux/init.h>
10 11
@@ -134,4 +135,7 @@ extern phys_addr_t per_cpu_ptr_to_phys(void *addr);
134 (typeof(type) __percpu *)__alloc_percpu(sizeof(type), \ 135 (typeof(type) __percpu *)__alloc_percpu(sizeof(type), \
135 __alignof__(type)) 136 __alignof__(type))
136 137
138/* To avoid include hell, as printk can not declare this, we declare it here */
139DECLARE_PER_CPU(printk_func_t, printk_func);
140
137#endif /* __LINUX_PERCPU_H */ 141#endif /* __LINUX_PERCPU_H */
diff --git a/include/linux/printk.h b/include/linux/printk.h
index 3dd489f2dedc..c8f170324e64 100644
--- a/include/linux/printk.h
+++ b/include/linux/printk.h
@@ -123,6 +123,8 @@ static inline __printf(1, 2) __cold
123void early_printk(const char *s, ...) { } 123void early_printk(const char *s, ...) { }
124#endif 124#endif
125 125
126typedef int(*printk_func_t)(const char *fmt, va_list args);
127
126#ifdef CONFIG_PRINTK 128#ifdef CONFIG_PRINTK
127asmlinkage __printf(5, 0) 129asmlinkage __printf(5, 0)
128int vprintk_emit(int facility, int level, 130int vprintk_emit(int facility, int level,
diff --git a/include/linux/seq_buf.h b/include/linux/seq_buf.h
new file mode 100644
index 000000000000..9aafe0e24c68
--- /dev/null
+++ b/include/linux/seq_buf.h
@@ -0,0 +1,136 @@
1#ifndef _LINUX_SEQ_BUF_H
2#define _LINUX_SEQ_BUF_H
3
4#include <linux/fs.h>
5
6/*
7 * Trace sequences are used to allow a function to call several other functions
8 * to create a string of data to use.
9 */
10
11/**
12 * seq_buf - seq buffer structure
13 * @buffer: pointer to the buffer
14 * @size: size of the buffer
15 * @len: the amount of data inside the buffer
16 * @readpos: The next position to read in the buffer.
17 */
18struct seq_buf {
19 char *buffer;
20 size_t size;
21 size_t len;
22 loff_t readpos;
23};
24
25static inline void seq_buf_clear(struct seq_buf *s)
26{
27 s->len = 0;
28 s->readpos = 0;
29}
30
31static inline void
32seq_buf_init(struct seq_buf *s, unsigned char *buf, unsigned int size)
33{
34 s->buffer = buf;
35 s->size = size;
36 seq_buf_clear(s);
37}
38
39/*
40 * seq_buf have a buffer that might overflow. When this happens
41 * the len and size are set to be equal.
42 */
43static inline bool
44seq_buf_has_overflowed(struct seq_buf *s)
45{
46 return s->len > s->size;
47}
48
49static inline void
50seq_buf_set_overflow(struct seq_buf *s)
51{
52 s->len = s->size + 1;
53}
54
55/*
56 * How much buffer is left on the seq_buf?
57 */
58static inline unsigned int
59seq_buf_buffer_left(struct seq_buf *s)
60{
61 if (seq_buf_has_overflowed(s))
62 return 0;
63
64 return s->size - s->len;
65}
66
67/* How much buffer was written? */
68static inline unsigned int seq_buf_used(struct seq_buf *s)
69{
70 return min(s->len, s->size);
71}
72
73/**
74 * seq_buf_get_buf - get buffer to write arbitrary data to
75 * @s: the seq_buf handle
76 * @bufp: the beginning of the buffer is stored here
77 *
78 * Return the number of bytes available in the buffer, or zero if
79 * there's no space.
80 */
81static inline size_t seq_buf_get_buf(struct seq_buf *s, char **bufp)
82{
83 WARN_ON(s->len > s->size + 1);
84
85 if (s->len < s->size) {
86 *bufp = s->buffer + s->len;
87 return s->size - s->len;
88 }
89
90 *bufp = NULL;
91 return 0;
92}
93
94/**
95 * seq_buf_commit - commit data to the buffer
96 * @s: the seq_buf handle
97 * @num: the number of bytes to commit
98 *
99 * Commit @num bytes of data written to a buffer previously acquired
100 * by seq_buf_get. To signal an error condition, or that the data
101 * didn't fit in the available space, pass a negative @num value.
102 */
103static inline void seq_buf_commit(struct seq_buf *s, int num)
104{
105 if (num < 0) {
106 seq_buf_set_overflow(s);
107 } else {
108 /* num must be negative on overflow */
109 BUG_ON(s->len + num > s->size);
110 s->len += num;
111 }
112}
113
114extern __printf(2, 3)
115int seq_buf_printf(struct seq_buf *s, const char *fmt, ...);
116extern __printf(2, 0)
117int seq_buf_vprintf(struct seq_buf *s, const char *fmt, va_list args);
118extern int seq_buf_print_seq(struct seq_file *m, struct seq_buf *s);
119extern int seq_buf_to_user(struct seq_buf *s, char __user *ubuf,
120 int cnt);
121extern int seq_buf_puts(struct seq_buf *s, const char *str);
122extern int seq_buf_putc(struct seq_buf *s, unsigned char c);
123extern int seq_buf_putmem(struct seq_buf *s, const void *mem, unsigned int len);
124extern int seq_buf_putmem_hex(struct seq_buf *s, const void *mem,
125 unsigned int len);
126extern int seq_buf_path(struct seq_buf *s, const struct path *path, const char *esc);
127
128extern int seq_buf_bitmask(struct seq_buf *s, const unsigned long *maskp,
129 int nmaskbits);
130
131#ifdef CONFIG_BINARY_PRINTF
132extern int
133seq_buf_bprintf(struct seq_buf *s, const char *fmt, const u32 *binary);
134#endif
135
136#endif /* _LINUX_SEQ_BUF_H */
diff --git a/include/linux/trace_seq.h b/include/linux/trace_seq.h
index db8a73224f1a..cfaf5a1d4bad 100644
--- a/include/linux/trace_seq.h
+++ b/include/linux/trace_seq.h
@@ -1,7 +1,7 @@
1#ifndef _LINUX_TRACE_SEQ_H 1#ifndef _LINUX_TRACE_SEQ_H
2#define _LINUX_TRACE_SEQ_H 2#define _LINUX_TRACE_SEQ_H
3 3
4#include <linux/fs.h> 4#include <linux/seq_buf.h>
5 5
6#include <asm/page.h> 6#include <asm/page.h>
7 7
@@ -12,20 +12,36 @@
12 12
13struct trace_seq { 13struct trace_seq {
14 unsigned char buffer[PAGE_SIZE]; 14 unsigned char buffer[PAGE_SIZE];
15 unsigned int len; 15 struct seq_buf seq;
16 unsigned int readpos;
17 int full; 16 int full;
18}; 17};
19 18
20static inline void 19static inline void
21trace_seq_init(struct trace_seq *s) 20trace_seq_init(struct trace_seq *s)
22{ 21{
23 s->len = 0; 22 seq_buf_init(&s->seq, s->buffer, PAGE_SIZE);
24 s->readpos = 0;
25 s->full = 0; 23 s->full = 0;
26} 24}
27 25
28/** 26/**
27 * trace_seq_used - amount of actual data written to buffer
28 * @s: trace sequence descriptor
29 *
30 * Returns the amount of data written to the buffer.
31 *
32 * IMPORTANT!
33 *
34 * Use this instead of @s->seq.len if you need to pass the amount
35 * of data from the buffer to another buffer (userspace, or what not).
36 * The @s->seq.len on overflow is bigger than the buffer size and
37 * using it can cause access to undefined memory.
38 */
39static inline int trace_seq_used(struct trace_seq *s)
40{
41 return seq_buf_used(&s->seq);
42}
43
44/**
29 * trace_seq_buffer_ptr - return pointer to next location in buffer 45 * trace_seq_buffer_ptr - return pointer to next location in buffer
30 * @s: trace sequence descriptor 46 * @s: trace sequence descriptor
31 * 47 *
@@ -37,7 +53,7 @@ trace_seq_init(struct trace_seq *s)
37static inline unsigned char * 53static inline unsigned char *
38trace_seq_buffer_ptr(struct trace_seq *s) 54trace_seq_buffer_ptr(struct trace_seq *s)
39{ 55{
40 return s->buffer + s->len; 56 return s->buffer + seq_buf_used(&s->seq);
41} 57}
42 58
43/** 59/**
@@ -49,7 +65,7 @@ trace_seq_buffer_ptr(struct trace_seq *s)
49 */ 65 */
50static inline bool trace_seq_has_overflowed(struct trace_seq *s) 66static inline bool trace_seq_has_overflowed(struct trace_seq *s)
51{ 67{
52 return s->full || s->len > PAGE_SIZE - 1; 68 return s->full || seq_buf_has_overflowed(&s->seq);
53} 69}
54 70
55/* 71/*
diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c
index ea27c019655a..f900dc9f6822 100644
--- a/kernel/printk/printk.c
+++ b/kernel/printk/printk.c
@@ -1805,6 +1805,30 @@ asmlinkage int printk_emit(int facility, int level,
1805} 1805}
1806EXPORT_SYMBOL(printk_emit); 1806EXPORT_SYMBOL(printk_emit);
1807 1807
1808int vprintk_default(const char *fmt, va_list args)
1809{
1810 int r;
1811
1812#ifdef CONFIG_KGDB_KDB
1813 if (unlikely(kdb_trap_printk)) {
1814 r = vkdb_printf(fmt, args);
1815 return r;
1816 }
1817#endif
1818 r = vprintk_emit(0, LOGLEVEL_DEFAULT, NULL, 0, fmt, args);
1819
1820 return r;
1821}
1822EXPORT_SYMBOL_GPL(vprintk_default);
1823
1824/*
1825 * This allows printk to be diverted to another function per cpu.
1826 * This is useful for calling printk functions from within NMI
1827 * without worrying about race conditions that can lock up the
1828 * box.
1829 */
1830DEFINE_PER_CPU(printk_func_t, printk_func) = vprintk_default;
1831
1808/** 1832/**
1809 * printk - print a kernel message 1833 * printk - print a kernel message
1810 * @fmt: format string 1834 * @fmt: format string
@@ -1828,19 +1852,15 @@ EXPORT_SYMBOL(printk_emit);
1828 */ 1852 */
1829asmlinkage __visible int printk(const char *fmt, ...) 1853asmlinkage __visible int printk(const char *fmt, ...)
1830{ 1854{
1855 printk_func_t vprintk_func;
1831 va_list args; 1856 va_list args;
1832 int r; 1857 int r;
1833 1858
1834#ifdef CONFIG_KGDB_KDB
1835 if (unlikely(kdb_trap_printk)) {
1836 va_start(args, fmt);
1837 r = vkdb_printf(fmt, args);
1838 va_end(args);
1839 return r;
1840 }
1841#endif
1842 va_start(args, fmt); 1859 va_start(args, fmt);
1843 r = vprintk_emit(0, LOGLEVEL_DEFAULT, NULL, 0, fmt, args); 1860 preempt_disable();
1861 vprintk_func = this_cpu_read(printk_func);
1862 r = vprintk_func(fmt, args);
1863 preempt_enable();
1844 va_end(args); 1864 va_end(args);
1845 1865
1846 return r; 1866 return r;
@@ -1874,6 +1894,9 @@ static size_t msg_print_text(const struct printk_log *msg, enum log_flags prev,
1874 bool syslog, char *buf, size_t size) { return 0; } 1894 bool syslog, char *buf, size_t size) { return 0; }
1875static size_t cont_print_text(char *text, size_t size) { return 0; } 1895static size_t cont_print_text(char *text, size_t size) { return 0; }
1876 1896
1897/* Still needs to be defined for users */
1898DEFINE_PER_CPU(printk_func_t, printk_func);
1899
1877#endif /* CONFIG_PRINTK */ 1900#endif /* CONFIG_PRINTK */
1878 1901
1879#ifdef CONFIG_EARLY_PRINTK 1902#ifdef CONFIG_EARLY_PRINTK
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index ce11fa50a2f0..1af4f8f2ab5d 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -939,19 +939,20 @@ out:
939 return ret; 939 return ret;
940} 940}
941 941
942/* TODO add a seq_buf_to_buffer() */
942static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt) 943static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt)
943{ 944{
944 int len; 945 int len;
945 946
946 if (s->len <= s->readpos) 947 if (trace_seq_used(s) <= s->seq.readpos)
947 return -EBUSY; 948 return -EBUSY;
948 949
949 len = s->len - s->readpos; 950 len = trace_seq_used(s) - s->seq.readpos;
950 if (cnt > len) 951 if (cnt > len)
951 cnt = len; 952 cnt = len;
952 memcpy(buf, s->buffer + s->readpos, cnt); 953 memcpy(buf, s->buffer + s->seq.readpos, cnt);
953 954
954 s->readpos += cnt; 955 s->seq.readpos += cnt;
955 return cnt; 956 return cnt;
956} 957}
957 958
@@ -4313,6 +4314,8 @@ static int tracing_open_pipe(struct inode *inode, struct file *filp)
4313 goto out; 4314 goto out;
4314 } 4315 }
4315 4316
4317 trace_seq_init(&iter->seq);
4318
4316 /* 4319 /*
4317 * We make a copy of the current tracer to avoid concurrent 4320 * We make a copy of the current tracer to avoid concurrent
4318 * changes on it while we are reading. 4321 * changes on it while we are reading.
@@ -4506,18 +4509,18 @@ waitagain:
4506 trace_access_lock(iter->cpu_file); 4509 trace_access_lock(iter->cpu_file);
4507 while (trace_find_next_entry_inc(iter) != NULL) { 4510 while (trace_find_next_entry_inc(iter) != NULL) {
4508 enum print_line_t ret; 4511 enum print_line_t ret;
4509 int len = iter->seq.len; 4512 int save_len = iter->seq.seq.len;
4510 4513
4511 ret = print_trace_line(iter); 4514 ret = print_trace_line(iter);
4512 if (ret == TRACE_TYPE_PARTIAL_LINE) { 4515 if (ret == TRACE_TYPE_PARTIAL_LINE) {
4513 /* don't print partial lines */ 4516 /* don't print partial lines */
4514 iter->seq.len = len; 4517 iter->seq.seq.len = save_len;
4515 break; 4518 break;
4516 } 4519 }
4517 if (ret != TRACE_TYPE_NO_CONSUME) 4520 if (ret != TRACE_TYPE_NO_CONSUME)
4518 trace_consume(iter); 4521 trace_consume(iter);
4519 4522
4520 if (iter->seq.len >= cnt) 4523 if (trace_seq_used(&iter->seq) >= cnt)
4521 break; 4524 break;
4522 4525
4523 /* 4526 /*
@@ -4533,7 +4536,7 @@ waitagain:
4533 4536
4534 /* Now copy what we have to the user */ 4537 /* Now copy what we have to the user */
4535 sret = trace_seq_to_user(&iter->seq, ubuf, cnt); 4538 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
4536 if (iter->seq.readpos >= iter->seq.len) 4539 if (iter->seq.seq.readpos >= trace_seq_used(&iter->seq))
4537 trace_seq_init(&iter->seq); 4540 trace_seq_init(&iter->seq);
4538 4541
4539 /* 4542 /*
@@ -4567,20 +4570,33 @@ static size_t
4567tracing_fill_pipe_page(size_t rem, struct trace_iterator *iter) 4570tracing_fill_pipe_page(size_t rem, struct trace_iterator *iter)
4568{ 4571{
4569 size_t count; 4572 size_t count;
4573 int save_len;
4570 int ret; 4574 int ret;
4571 4575
4572 /* Seq buffer is page-sized, exactly what we need. */ 4576 /* Seq buffer is page-sized, exactly what we need. */
4573 for (;;) { 4577 for (;;) {
4574 count = iter->seq.len; 4578 save_len = iter->seq.seq.len;
4575 ret = print_trace_line(iter); 4579 ret = print_trace_line(iter);
4576 count = iter->seq.len - count; 4580
4577 if (rem < count) { 4581 if (trace_seq_has_overflowed(&iter->seq)) {
4578 rem = 0; 4582 iter->seq.seq.len = save_len;
4579 iter->seq.len -= count;
4580 break; 4583 break;
4581 } 4584 }
4585
4586 /*
4587 * This should not be hit, because it should only
4588 * be set if the iter->seq overflowed. But check it
4589 * anyway to be safe.
4590 */
4582 if (ret == TRACE_TYPE_PARTIAL_LINE) { 4591 if (ret == TRACE_TYPE_PARTIAL_LINE) {
4583 iter->seq.len -= count; 4592 iter->seq.seq.len = save_len;
4593 break;
4594 }
4595
4596 count = trace_seq_used(&iter->seq) - save_len;
4597 if (rem < count) {
4598 rem = 0;
4599 iter->seq.seq.len = save_len;
4584 break; 4600 break;
4585 } 4601 }
4586 4602
@@ -4661,13 +4677,13 @@ static ssize_t tracing_splice_read_pipe(struct file *filp,
4661 /* Copy the data into the page, so we can start over. */ 4677 /* Copy the data into the page, so we can start over. */
4662 ret = trace_seq_to_buffer(&iter->seq, 4678 ret = trace_seq_to_buffer(&iter->seq,
4663 page_address(spd.pages[i]), 4679 page_address(spd.pages[i]),
4664 iter->seq.len); 4680 trace_seq_used(&iter->seq));
4665 if (ret < 0) { 4681 if (ret < 0) {
4666 __free_page(spd.pages[i]); 4682 __free_page(spd.pages[i]);
4667 break; 4683 break;
4668 } 4684 }
4669 spd.partial[i].offset = 0; 4685 spd.partial[i].offset = 0;
4670 spd.partial[i].len = iter->seq.len; 4686 spd.partial[i].len = trace_seq_used(&iter->seq);
4671 4687
4672 trace_seq_init(&iter->seq); 4688 trace_seq_init(&iter->seq);
4673 } 4689 }
@@ -5667,7 +5683,8 @@ tracing_stats_read(struct file *filp, char __user *ubuf,
5667 cnt = ring_buffer_read_events_cpu(trace_buf->buffer, cpu); 5683 cnt = ring_buffer_read_events_cpu(trace_buf->buffer, cpu);
5668 trace_seq_printf(s, "read events: %ld\n", cnt); 5684 trace_seq_printf(s, "read events: %ld\n", cnt);
5669 5685
5670 count = simple_read_from_buffer(ubuf, count, ppos, s->buffer, s->len); 5686 count = simple_read_from_buffer(ubuf, count, ppos,
5687 s->buffer, trace_seq_used(s));
5671 5688
5672 kfree(s); 5689 kfree(s);
5673 5690
@@ -6630,11 +6647,19 @@ void
6630trace_printk_seq(struct trace_seq *s) 6647trace_printk_seq(struct trace_seq *s)
6631{ 6648{
6632 /* Probably should print a warning here. */ 6649 /* Probably should print a warning here. */
6633 if (s->len >= TRACE_MAX_PRINT) 6650 if (s->seq.len >= TRACE_MAX_PRINT)
6634 s->len = TRACE_MAX_PRINT; 6651 s->seq.len = TRACE_MAX_PRINT;
6652
6653 /*
6654 * More paranoid code. Although the buffer size is set to
6655 * PAGE_SIZE, and TRACE_MAX_PRINT is 1000, this is just
6656 * an extra layer of protection.
6657 */
6658 if (WARN_ON_ONCE(s->seq.len >= s->seq.size))
6659 s->seq.len = s->seq.size - 1;
6635 6660
6636 /* should be zero ended, but we are paranoid. */ 6661 /* should be zero ended, but we are paranoid. */
6637 s->buffer[s->len] = 0; 6662 s->buffer[s->seq.len] = 0;
6638 6663
6639 printk(KERN_TRACE "%s", s->buffer); 6664 printk(KERN_TRACE "%s", s->buffer);
6640 6665
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
index 139716bcef7a..d0e4f92b5eb6 100644
--- a/kernel/trace/trace_events.c
+++ b/kernel/trace/trace_events.c
@@ -1044,7 +1044,8 @@ event_filter_read(struct file *filp, char __user *ubuf, size_t cnt,
1044 mutex_unlock(&event_mutex); 1044 mutex_unlock(&event_mutex);
1045 1045
1046 if (file) 1046 if (file)
1047 r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len); 1047 r = simple_read_from_buffer(ubuf, cnt, ppos,
1048 s->buffer, trace_seq_used(s));
1048 1049
1049 kfree(s); 1050 kfree(s);
1050 1051
@@ -1210,7 +1211,8 @@ subsystem_filter_read(struct file *filp, char __user *ubuf, size_t cnt,
1210 trace_seq_init(s); 1211 trace_seq_init(s);
1211 1212
1212 print_subsystem_event_filter(system, s); 1213 print_subsystem_event_filter(system, s);
1213 r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len); 1214 r = simple_read_from_buffer(ubuf, cnt, ppos,
1215 s->buffer, trace_seq_used(s));
1214 1216
1215 kfree(s); 1217 kfree(s);
1216 1218
@@ -1265,7 +1267,8 @@ show_header(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
1265 trace_seq_init(s); 1267 trace_seq_init(s);
1266 1268
1267 func(s); 1269 func(s);
1268 r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len); 1270 r = simple_read_from_buffer(ubuf, cnt, ppos,
1271 s->buffer, trace_seq_used(s));
1269 1272
1270 kfree(s); 1273 kfree(s);
1271 1274
diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c
index 6c2ab955018c..ba476009e5de 100644
--- a/kernel/trace/trace_functions_graph.c
+++ b/kernel/trace/trace_functions_graph.c
@@ -1136,14 +1136,17 @@ print_graph_comment(struct trace_seq *s, struct trace_entry *ent,
1136 return ret; 1136 return ret;
1137 } 1137 }
1138 1138
1139 if (trace_seq_has_overflowed(s))
1140 goto out;
1141
1139 /* Strip ending newline */ 1142 /* Strip ending newline */
1140 if (s->buffer[s->len - 1] == '\n') { 1143 if (s->buffer[s->seq.len - 1] == '\n') {
1141 s->buffer[s->len - 1] = '\0'; 1144 s->buffer[s->seq.len - 1] = '\0';
1142 s->len--; 1145 s->seq.len--;
1143 } 1146 }
1144 1147
1145 trace_seq_puts(s, " */\n"); 1148 trace_seq_puts(s, " */\n");
1146 1149 out:
1147 return trace_handle_return(s); 1150 return trace_handle_return(s);
1148} 1151}
1149 1152
diff --git a/kernel/trace/trace_seq.c b/kernel/trace/trace_seq.c
index fabfa0f190a3..f8b45d8792f9 100644
--- a/kernel/trace/trace_seq.c
+++ b/kernel/trace/trace_seq.c
@@ -27,10 +27,19 @@
27#include <linux/trace_seq.h> 27#include <linux/trace_seq.h>
28 28
29/* How much buffer is left on the trace_seq? */ 29/* How much buffer is left on the trace_seq? */
30#define TRACE_SEQ_BUF_LEFT(s) ((PAGE_SIZE - 1) - (s)->len) 30#define TRACE_SEQ_BUF_LEFT(s) seq_buf_buffer_left(&(s)->seq)
31 31
32/* How much buffer is written? */ 32/* How much buffer is written? */
33#define TRACE_SEQ_BUF_USED(s) min((s)->len, (unsigned int)(PAGE_SIZE - 1)) 33#define TRACE_SEQ_BUF_USED(s) seq_buf_used(&(s)->seq)
34
35/*
36 * trace_seq should work with being initialized with 0s.
37 */
38static inline void __trace_seq_init(struct trace_seq *s)
39{
40 if (unlikely(!s->seq.size))
41 trace_seq_init(s);
42}
34 43
35/** 44/**
36 * trace_print_seq - move the contents of trace_seq into a seq_file 45 * trace_print_seq - move the contents of trace_seq into a seq_file
@@ -43,10 +52,11 @@
43 */ 52 */
44int trace_print_seq(struct seq_file *m, struct trace_seq *s) 53int trace_print_seq(struct seq_file *m, struct trace_seq *s)
45{ 54{
46 unsigned int len = TRACE_SEQ_BUF_USED(s);
47 int ret; 55 int ret;
48 56
49 ret = seq_write(m, s->buffer, len); 57 __trace_seq_init(s);
58
59 ret = seq_buf_print_seq(m, &s->seq);
50 60
51 /* 61 /*
52 * Only reset this buffer if we successfully wrote to the 62 * Only reset this buffer if we successfully wrote to the
@@ -72,24 +82,23 @@ int trace_print_seq(struct seq_file *m, struct trace_seq *s)
72 */ 82 */
73void trace_seq_printf(struct trace_seq *s, const char *fmt, ...) 83void trace_seq_printf(struct trace_seq *s, const char *fmt, ...)
74{ 84{
75 unsigned int len = TRACE_SEQ_BUF_LEFT(s); 85 unsigned int save_len = s->seq.len;
76 va_list ap; 86 va_list ap;
77 int ret;
78 87
79 if (s->full || !len) 88 if (s->full)
80 return; 89 return;
81 90
91 __trace_seq_init(s);
92
82 va_start(ap, fmt); 93 va_start(ap, fmt);
83 ret = vsnprintf(s->buffer + s->len, len, fmt, ap); 94 seq_buf_vprintf(&s->seq, fmt, ap);
84 va_end(ap); 95 va_end(ap);
85 96
86 /* If we can't write it all, don't bother writing anything */ 97 /* If we can't write it all, don't bother writing anything */
87 if (ret >= len) { 98 if (unlikely(seq_buf_has_overflowed(&s->seq))) {
99 s->seq.len = save_len;
88 s->full = 1; 100 s->full = 1;
89 return;
90 } 101 }
91
92 s->len += ret;
93} 102}
94EXPORT_SYMBOL_GPL(trace_seq_printf); 103EXPORT_SYMBOL_GPL(trace_seq_printf);
95 104
@@ -104,14 +113,19 @@ EXPORT_SYMBOL_GPL(trace_seq_printf);
104void trace_seq_bitmask(struct trace_seq *s, const unsigned long *maskp, 113void trace_seq_bitmask(struct trace_seq *s, const unsigned long *maskp,
105 int nmaskbits) 114 int nmaskbits)
106{ 115{
107 unsigned int len = TRACE_SEQ_BUF_LEFT(s); 116 unsigned int save_len = s->seq.len;
108 int ret;
109 117
110 if (s->full || !len) 118 if (s->full)
111 return; 119 return;
112 120
113 ret = bitmap_scnprintf(s->buffer + s->len, len, maskp, nmaskbits); 121 __trace_seq_init(s);
114 s->len += ret; 122
123 seq_buf_bitmask(&s->seq, maskp, nmaskbits);
124
125 if (unlikely(seq_buf_has_overflowed(&s->seq))) {
126 s->seq.len = save_len;
127 s->full = 1;
128 }
115} 129}
116EXPORT_SYMBOL_GPL(trace_seq_bitmask); 130EXPORT_SYMBOL_GPL(trace_seq_bitmask);
117 131
@@ -128,21 +142,20 @@ EXPORT_SYMBOL_GPL(trace_seq_bitmask);
128 */ 142 */
129void trace_seq_vprintf(struct trace_seq *s, const char *fmt, va_list args) 143void trace_seq_vprintf(struct trace_seq *s, const char *fmt, va_list args)
130{ 144{
131 unsigned int len = TRACE_SEQ_BUF_LEFT(s); 145 unsigned int save_len = s->seq.len;
132 int ret;
133 146
134 if (s->full || !len) 147 if (s->full)
135 return; 148 return;
136 149
137 ret = vsnprintf(s->buffer + s->len, len, fmt, args); 150 __trace_seq_init(s);
151
152 seq_buf_vprintf(&s->seq, fmt, args);
138 153
139 /* If we can't write it all, don't bother writing anything */ 154 /* If we can't write it all, don't bother writing anything */
140 if (ret >= len) { 155 if (unlikely(seq_buf_has_overflowed(&s->seq))) {
156 s->seq.len = save_len;
141 s->full = 1; 157 s->full = 1;
142 return;
143 } 158 }
144
145 s->len += ret;
146} 159}
147EXPORT_SYMBOL_GPL(trace_seq_vprintf); 160EXPORT_SYMBOL_GPL(trace_seq_vprintf);
148 161
@@ -163,21 +176,21 @@ EXPORT_SYMBOL_GPL(trace_seq_vprintf);
163 */ 176 */
164void trace_seq_bprintf(struct trace_seq *s, const char *fmt, const u32 *binary) 177void trace_seq_bprintf(struct trace_seq *s, const char *fmt, const u32 *binary)
165{ 178{
166 unsigned int len = TRACE_SEQ_BUF_LEFT(s); 179 unsigned int save_len = s->seq.len;
167 int ret;
168 180
169 if (s->full || !len) 181 if (s->full)
170 return; 182 return;
171 183
172 ret = bstr_printf(s->buffer + s->len, len, fmt, binary); 184 __trace_seq_init(s);
185
186 seq_buf_bprintf(&s->seq, fmt, binary);
173 187
174 /* If we can't write it all, don't bother writing anything */ 188 /* If we can't write it all, don't bother writing anything */
175 if (ret >= len) { 189 if (unlikely(seq_buf_has_overflowed(&s->seq))) {
190 s->seq.len = save_len;
176 s->full = 1; 191 s->full = 1;
177 return; 192 return;
178 } 193 }
179
180 s->len += ret;
181} 194}
182EXPORT_SYMBOL_GPL(trace_seq_bprintf); 195EXPORT_SYMBOL_GPL(trace_seq_bprintf);
183 196
@@ -198,13 +211,14 @@ void trace_seq_puts(struct trace_seq *s, const char *str)
198 if (s->full) 211 if (s->full)
199 return; 212 return;
200 213
214 __trace_seq_init(s);
215
201 if (len > TRACE_SEQ_BUF_LEFT(s)) { 216 if (len > TRACE_SEQ_BUF_LEFT(s)) {
202 s->full = 1; 217 s->full = 1;
203 return; 218 return;
204 } 219 }
205 220
206 memcpy(s->buffer + s->len, str, len); 221 seq_buf_putmem(&s->seq, str, len);
207 s->len += len;
208} 222}
209EXPORT_SYMBOL_GPL(trace_seq_puts); 223EXPORT_SYMBOL_GPL(trace_seq_puts);
210 224
@@ -223,12 +237,14 @@ void trace_seq_putc(struct trace_seq *s, unsigned char c)
223 if (s->full) 237 if (s->full)
224 return; 238 return;
225 239
240 __trace_seq_init(s);
241
226 if (TRACE_SEQ_BUF_LEFT(s) < 1) { 242 if (TRACE_SEQ_BUF_LEFT(s) < 1) {
227 s->full = 1; 243 s->full = 1;
228 return; 244 return;
229 } 245 }
230 246
231 s->buffer[s->len++] = c; 247 seq_buf_putc(&s->seq, c);
232} 248}
233EXPORT_SYMBOL_GPL(trace_seq_putc); 249EXPORT_SYMBOL_GPL(trace_seq_putc);
234 250
@@ -247,19 +263,17 @@ void trace_seq_putmem(struct trace_seq *s, const void *mem, unsigned int len)
247 if (s->full) 263 if (s->full)
248 return; 264 return;
249 265
266 __trace_seq_init(s);
267
250 if (len > TRACE_SEQ_BUF_LEFT(s)) { 268 if (len > TRACE_SEQ_BUF_LEFT(s)) {
251 s->full = 1; 269 s->full = 1;
252 return; 270 return;
253 } 271 }
254 272
255 memcpy(s->buffer + s->len, mem, len); 273 seq_buf_putmem(&s->seq, mem, len);
256 s->len += len;
257} 274}
258EXPORT_SYMBOL_GPL(trace_seq_putmem); 275EXPORT_SYMBOL_GPL(trace_seq_putmem);
259 276
260#define MAX_MEMHEX_BYTES 8U
261#define HEX_CHARS (MAX_MEMHEX_BYTES*2 + 1)
262
263/** 277/**
264 * trace_seq_putmem_hex - write raw memory into the buffer in ASCII hex 278 * trace_seq_putmem_hex - write raw memory into the buffer in ASCII hex
265 * @s: trace sequence descriptor 279 * @s: trace sequence descriptor
@@ -273,32 +287,26 @@ EXPORT_SYMBOL_GPL(trace_seq_putmem);
273void trace_seq_putmem_hex(struct trace_seq *s, const void *mem, 287void trace_seq_putmem_hex(struct trace_seq *s, const void *mem,
274 unsigned int len) 288 unsigned int len)
275{ 289{
276 unsigned char hex[HEX_CHARS]; 290 unsigned int save_len = s->seq.len;
277 const unsigned char *data = mem;
278 unsigned int start_len;
279 int i, j;
280 291
281 if (s->full) 292 if (s->full)
282 return; 293 return;
283 294
284 while (len) { 295 __trace_seq_init(s);
285 start_len = min(len, HEX_CHARS - 1); 296
286#ifdef __BIG_ENDIAN 297 /* Each byte is represented by two chars */
287 for (i = 0, j = 0; i < start_len; i++) { 298 if (len * 2 > TRACE_SEQ_BUF_LEFT(s)) {
288#else 299 s->full = 1;
289 for (i = start_len-1, j = 0; i >= 0; i--) { 300 return;
290#endif 301 }
291 hex[j++] = hex_asc_hi(data[i]); 302
292 hex[j++] = hex_asc_lo(data[i]); 303 /* The added spaces can still cause an overflow */
293 } 304 seq_buf_putmem_hex(&s->seq, mem, len);
294 if (WARN_ON_ONCE(j == 0 || j/2 > len)) 305
295 break; 306 if (unlikely(seq_buf_has_overflowed(&s->seq))) {
296 307 s->seq.len = save_len;
297 /* j increments twice per loop */ 308 s->full = 1;
298 len -= j / 2; 309 return;
299 hex[j++] = ' ';
300
301 trace_seq_putmem(s, hex, j);
302 } 310 }
303} 311}
304EXPORT_SYMBOL_GPL(trace_seq_putmem_hex); 312EXPORT_SYMBOL_GPL(trace_seq_putmem_hex);
@@ -317,30 +325,27 @@ EXPORT_SYMBOL_GPL(trace_seq_putmem_hex);
317 */ 325 */
318int trace_seq_path(struct trace_seq *s, const struct path *path) 326int trace_seq_path(struct trace_seq *s, const struct path *path)
319{ 327{
320 unsigned char *p; 328 unsigned int save_len = s->seq.len;
321 329
322 if (s->full) 330 if (s->full)
323 return 0; 331 return 0;
324 332
333 __trace_seq_init(s);
334
325 if (TRACE_SEQ_BUF_LEFT(s) < 1) { 335 if (TRACE_SEQ_BUF_LEFT(s) < 1) {
326 s->full = 1; 336 s->full = 1;
327 return 0; 337 return 0;
328 } 338 }
329 339
330 p = d_path(path, s->buffer + s->len, PAGE_SIZE - s->len); 340 seq_buf_path(&s->seq, path, "\n");
331 if (!IS_ERR(p)) { 341
332 p = mangle_path(s->buffer + s->len, p, "\n"); 342 if (unlikely(seq_buf_has_overflowed(&s->seq))) {
333 if (p) { 343 s->seq.len = save_len;
334 s->len = p - s->buffer; 344 s->full = 1;
335 return 1; 345 return 0;
336 }
337 } else {
338 s->buffer[s->len++] = '?';
339 return 1;
340 } 346 }
341 347
342 s->full = 1; 348 return 1;
343 return 0;
344} 349}
345EXPORT_SYMBOL_GPL(trace_seq_path); 350EXPORT_SYMBOL_GPL(trace_seq_path);
346 351
@@ -366,25 +371,7 @@ EXPORT_SYMBOL_GPL(trace_seq_path);
366 */ 371 */
367int trace_seq_to_user(struct trace_seq *s, char __user *ubuf, int cnt) 372int trace_seq_to_user(struct trace_seq *s, char __user *ubuf, int cnt)
368{ 373{
369 int len; 374 __trace_seq_init(s);
370 int ret; 375 return seq_buf_to_user(&s->seq, ubuf, cnt);
371
372 if (!cnt)
373 return 0;
374
375 if (s->len <= s->readpos)
376 return -EBUSY;
377
378 len = s->len - s->readpos;
379 if (cnt > len)
380 cnt = len;
381 ret = copy_to_user(ubuf, s->buffer + s->readpos, cnt);
382 if (ret == cnt)
383 return -EFAULT;
384
385 cnt -= ret;
386
387 s->readpos += cnt;
388 return cnt;
389} 376}
390EXPORT_SYMBOL_GPL(trace_seq_to_user); 377EXPORT_SYMBOL_GPL(trace_seq_to_user);
diff --git a/lib/Makefile b/lib/Makefile
index 0211d2bd5e17..923a191eaf71 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -13,7 +13,7 @@ lib-y := ctype.o string.o vsprintf.o cmdline.o \
13 sha1.o md5.o irq_regs.o argv_split.o \ 13 sha1.o md5.o irq_regs.o argv_split.o \
14 proportions.o flex_proportions.o ratelimit.o show_mem.o \ 14 proportions.o flex_proportions.o ratelimit.o show_mem.o \
15 is_single_threaded.o plist.o decompress.o kobject_uevent.o \ 15 is_single_threaded.o plist.o decompress.o kobject_uevent.o \
16 earlycpio.o 16 earlycpio.o seq_buf.o
17 17
18obj-$(CONFIG_ARCH_HAS_DEBUG_STRICT_USER_COPY_CHECKS) += usercopy.o 18obj-$(CONFIG_ARCH_HAS_DEBUG_STRICT_USER_COPY_CHECKS) += usercopy.o
19lib-$(CONFIG_MMU) += ioremap.o 19lib-$(CONFIG_MMU) += ioremap.o
diff --git a/lib/seq_buf.c b/lib/seq_buf.c
new file mode 100644
index 000000000000..4eedfedb9e31
--- /dev/null
+++ b/lib/seq_buf.c
@@ -0,0 +1,359 @@
1/*
2 * seq_buf.c
3 *
4 * Copyright (C) 2014 Red Hat Inc, Steven Rostedt <srostedt@redhat.com>
5 *
6 * The seq_buf is a handy tool that allows you to pass a descriptor around
7 * to a buffer that other functions can write to. It is similar to the
8 * seq_file functionality but has some differences.
9 *
10 * To use it, the seq_buf must be initialized with seq_buf_init().
11 * This will set up the counters within the descriptor. You can call
12 * seq_buf_init() more than once to reset the seq_buf to start
13 * from scratch.
14 */
15#include <linux/uaccess.h>
16#include <linux/seq_file.h>
17#include <linux/seq_buf.h>
18
19/**
20 * seq_buf_can_fit - can the new data fit in the current buffer?
21 * @s: the seq_buf descriptor
22 * @len: The length to see if it can fit in the current buffer
23 *
24 * Returns true if there's enough unused space in the seq_buf buffer
25 * to fit the amount of new data according to @len.
26 */
27static bool seq_buf_can_fit(struct seq_buf *s, size_t len)
28{
29 return s->len + len <= s->size;
30}
31
32/**
33 * seq_buf_print_seq - move the contents of seq_buf into a seq_file
34 * @m: the seq_file descriptor that is the destination
35 * @s: the seq_buf descriptor that is the source.
36 *
37 * Returns zero on success, non zero otherwise
38 */
39int seq_buf_print_seq(struct seq_file *m, struct seq_buf *s)
40{
41 unsigned int len = seq_buf_used(s);
42
43 return seq_write(m, s->buffer, len);
44}
45
46/**
47 * seq_buf_vprintf - sequence printing of information.
48 * @s: seq_buf descriptor
49 * @fmt: printf format string
50 * @args: va_list of arguments from a printf() type function
51 *
52 * Writes a vnprintf() format into the sequencce buffer.
53 *
54 * Returns zero on success, -1 on overflow.
55 */
56int seq_buf_vprintf(struct seq_buf *s, const char *fmt, va_list args)
57{
58 int len;
59
60 WARN_ON(s->size == 0);
61
62 if (s->len < s->size) {
63 len = vsnprintf(s->buffer + s->len, s->size - s->len, fmt, args);
64 if (seq_buf_can_fit(s, len)) {
65 s->len += len;
66 return 0;
67 }
68 }
69 seq_buf_set_overflow(s);
70 return -1;
71}
72
73/**
74 * seq_buf_printf - sequence printing of information
75 * @s: seq_buf descriptor
76 * @fmt: printf format string
77 *
78 * Writes a printf() format into the sequence buffer.
79 *
80 * Returns zero on success, -1 on overflow.
81 */
82int seq_buf_printf(struct seq_buf *s, const char *fmt, ...)
83{
84 va_list ap;
85 int ret;
86
87 va_start(ap, fmt);
88 ret = seq_buf_vprintf(s, fmt, ap);
89 va_end(ap);
90
91 return ret;
92}
93
94/**
95 * seq_buf_bitmask - write a bitmask array in its ASCII representation
96 * @s: seq_buf descriptor
97 * @maskp: points to an array of unsigned longs that represent a bitmask
98 * @nmaskbits: The number of bits that are valid in @maskp
99 *
100 * Writes a ASCII representation of a bitmask string into @s.
101 *
102 * Returns zero on success, -1 on overflow.
103 */
104int seq_buf_bitmask(struct seq_buf *s, const unsigned long *maskp,
105 int nmaskbits)
106{
107 unsigned int len = seq_buf_buffer_left(s);
108 int ret;
109
110 WARN_ON(s->size == 0);
111
112 /*
113 * Note, because bitmap_scnprintf() only returns the number of bytes
114 * written and not the number that would be written, we use the last
115 * byte of the buffer to let us know if we overflowed. There's a small
116 * chance that the bitmap could have fit exactly inside the buffer, but
117 * it's not that critical if that does happen.
118 */
119 if (len > 1) {
120 ret = bitmap_scnprintf(s->buffer + s->len, len, maskp, nmaskbits);
121 if (ret < len) {
122 s->len += ret;
123 return 0;
124 }
125 }
126 seq_buf_set_overflow(s);
127 return -1;
128}
129
130#ifdef CONFIG_BINARY_PRINTF
131/**
132 * seq_buf_bprintf - Write the printf string from binary arguments
133 * @s: seq_buf descriptor
134 * @fmt: The format string for the @binary arguments
135 * @binary: The binary arguments for @fmt.
136 *
137 * When recording in a fast path, a printf may be recorded with just
138 * saving the format and the arguments as they were passed to the
139 * function, instead of wasting cycles converting the arguments into
140 * ASCII characters. Instead, the arguments are saved in a 32 bit
141 * word array that is defined by the format string constraints.
142 *
143 * This function will take the format and the binary array and finish
144 * the conversion into the ASCII string within the buffer.
145 *
146 * Returns zero on success, -1 on overflow.
147 */
148int seq_buf_bprintf(struct seq_buf *s, const char *fmt, const u32 *binary)
149{
150 unsigned int len = seq_buf_buffer_left(s);
151 int ret;
152
153 WARN_ON(s->size == 0);
154
155 if (s->len < s->size) {
156 ret = bstr_printf(s->buffer + s->len, len, fmt, binary);
157 if (seq_buf_can_fit(s, ret)) {
158 s->len += ret;
159 return 0;
160 }
161 }
162 seq_buf_set_overflow(s);
163 return -1;
164}
165#endif /* CONFIG_BINARY_PRINTF */
166
167/**
168 * seq_buf_puts - sequence printing of simple string
169 * @s: seq_buf descriptor
170 * @str: simple string to record
171 *
172 * Copy a simple string into the sequence buffer.
173 *
174 * Returns zero on success, -1 on overflow
175 */
176int seq_buf_puts(struct seq_buf *s, const char *str)
177{
178 unsigned int len = strlen(str);
179
180 WARN_ON(s->size == 0);
181
182 if (seq_buf_can_fit(s, len)) {
183 memcpy(s->buffer + s->len, str, len);
184 s->len += len;
185 return 0;
186 }
187 seq_buf_set_overflow(s);
188 return -1;
189}
190
191/**
192 * seq_buf_putc - sequence printing of simple character
193 * @s: seq_buf descriptor
194 * @c: simple character to record
195 *
196 * Copy a single character into the sequence buffer.
197 *
198 * Returns zero on success, -1 on overflow
199 */
200int seq_buf_putc(struct seq_buf *s, unsigned char c)
201{
202 WARN_ON(s->size == 0);
203
204 if (seq_buf_can_fit(s, 1)) {
205 s->buffer[s->len++] = c;
206 return 0;
207 }
208 seq_buf_set_overflow(s);
209 return -1;
210}
211
212/**
213 * seq_buf_putmem - write raw data into the sequenc buffer
214 * @s: seq_buf descriptor
215 * @mem: The raw memory to copy into the buffer
216 * @len: The length of the raw memory to copy (in bytes)
217 *
218 * There may be cases where raw memory needs to be written into the
219 * buffer and a strcpy() would not work. Using this function allows
220 * for such cases.
221 *
222 * Returns zero on success, -1 on overflow
223 */
224int seq_buf_putmem(struct seq_buf *s, const void *mem, unsigned int len)
225{
226 WARN_ON(s->size == 0);
227
228 if (seq_buf_can_fit(s, len)) {
229 memcpy(s->buffer + s->len, mem, len);
230 s->len += len;
231 return 0;
232 }
233 seq_buf_set_overflow(s);
234 return -1;
235}
236
237#define MAX_MEMHEX_BYTES 8U
238#define HEX_CHARS (MAX_MEMHEX_BYTES*2 + 1)
239
240/**
241 * seq_buf_putmem_hex - write raw memory into the buffer in ASCII hex
242 * @s: seq_buf descriptor
243 * @mem: The raw memory to write its hex ASCII representation of
244 * @len: The length of the raw memory to copy (in bytes)
245 *
246 * This is similar to seq_buf_putmem() except instead of just copying the
247 * raw memory into the buffer it writes its ASCII representation of it
248 * in hex characters.
249 *
250 * Returns zero on success, -1 on overflow
251 */
252int seq_buf_putmem_hex(struct seq_buf *s, const void *mem,
253 unsigned int len)
254{
255 unsigned char hex[HEX_CHARS];
256 const unsigned char *data = mem;
257 unsigned int start_len;
258 int i, j;
259
260 WARN_ON(s->size == 0);
261
262 while (len) {
263 start_len = min(len, HEX_CHARS - 1);
264#ifdef __BIG_ENDIAN
265 for (i = 0, j = 0; i < start_len; i++) {
266#else
267 for (i = start_len-1, j = 0; i >= 0; i--) {
268#endif
269 hex[j++] = hex_asc_hi(data[i]);
270 hex[j++] = hex_asc_lo(data[i]);
271 }
272 if (WARN_ON_ONCE(j == 0 || j/2 > len))
273 break;
274
275 /* j increments twice per loop */
276 len -= j / 2;
277 hex[j++] = ' ';
278
279 seq_buf_putmem(s, hex, j);
280 if (seq_buf_has_overflowed(s))
281 return -1;
282 }
283 return 0;
284}
285
286/**
287 * seq_buf_path - copy a path into the sequence buffer
288 * @s: seq_buf descriptor
289 * @path: path to write into the sequence buffer.
290 * @esc: set of characters to escape in the output
291 *
292 * Write a path name into the sequence buffer.
293 *
294 * Returns the number of written bytes on success, -1 on overflow
295 */
296int seq_buf_path(struct seq_buf *s, const struct path *path, const char *esc)
297{
298 char *buf;
299 size_t size = seq_buf_get_buf(s, &buf);
300 int res = -1;
301
302 WARN_ON(s->size == 0);
303
304 if (size) {
305 char *p = d_path(path, buf, size);
306 if (!IS_ERR(p)) {
307 char *end = mangle_path(buf, p, esc);
308 if (end)
309 res = end - buf;
310 }
311 }
312 seq_buf_commit(s, res);
313
314 return res;
315}
316
317/**
318 * seq_buf_to_user - copy the squence buffer to user space
319 * @s: seq_buf descriptor
320 * @ubuf: The userspace memory location to copy to
321 * @cnt: The amount to copy
322 *
323 * Copies the sequence buffer into the userspace memory pointed to
324 * by @ubuf. It starts from the last read position (@s->readpos)
325 * and writes up to @cnt characters or till it reaches the end of
326 * the content in the buffer (@s->len), which ever comes first.
327 *
328 * On success, it returns a positive number of the number of bytes
329 * it copied.
330 *
331 * On failure it returns -EBUSY if all of the content in the
332 * sequence has been already read, which includes nothing in the
333 * sequence (@s->len == @s->readpos).
334 *
335 * Returns -EFAULT if the copy to userspace fails.
336 */
337int seq_buf_to_user(struct seq_buf *s, char __user *ubuf, int cnt)
338{
339 int len;
340 int ret;
341
342 if (!cnt)
343 return 0;
344
345 if (s->len <= s->readpos)
346 return -EBUSY;
347
348 len = seq_buf_used(s) - s->readpos;
349 if (cnt > len)
350 cnt = len;
351 ret = copy_to_user(ubuf, s->buffer + s->readpos, cnt);
352 if (ret == cnt)
353 return -EFAULT;
354
355 cnt -= ret;
356
357 s->readpos += cnt;
358 return cnt;
359}