diff options
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/cgroup.c | 4 | ||||
-rw-r--r-- | kernel/exit.c | 7 | ||||
-rw-r--r-- | kernel/kexec.c | 8 | ||||
-rw-r--r-- | kernel/kfifo.c | 749 | ||||
-rw-r--r-- | kernel/panic.c | 60 | ||||
-rw-r--r-- | kernel/pid.c | 56 | ||||
-rw-r--r-- | kernel/power/block_io.c | 2 | ||||
-rw-r--r-- | kernel/ptrace.c | 12 | ||||
-rw-r--r-- | kernel/timer.c | 1 | ||||
-rw-r--r-- | kernel/trace/blktrace.c | 80 |
10 files changed, 611 insertions, 368 deletions
diff --git a/kernel/cgroup.c b/kernel/cgroup.c index d83cab06da87..192f88c5b0f9 100644 --- a/kernel/cgroup.c +++ b/kernel/cgroup.c | |||
@@ -1102,7 +1102,7 @@ static int parse_cgroupfs_options(char *data, struct cgroup_sb_opts *opts) | |||
1102 | if (opts->release_agent) | 1102 | if (opts->release_agent) |
1103 | return -EINVAL; | 1103 | return -EINVAL; |
1104 | opts->release_agent = | 1104 | opts->release_agent = |
1105 | kstrndup(token + 14, PATH_MAX, GFP_KERNEL); | 1105 | kstrndup(token + 14, PATH_MAX - 1, GFP_KERNEL); |
1106 | if (!opts->release_agent) | 1106 | if (!opts->release_agent) |
1107 | return -ENOMEM; | 1107 | return -ENOMEM; |
1108 | } else if (!strncmp(token, "name=", 5)) { | 1108 | } else if (!strncmp(token, "name=", 5)) { |
@@ -1123,7 +1123,7 @@ static int parse_cgroupfs_options(char *data, struct cgroup_sb_opts *opts) | |||
1123 | if (opts->name) | 1123 | if (opts->name) |
1124 | return -EINVAL; | 1124 | return -EINVAL; |
1125 | opts->name = kstrndup(name, | 1125 | opts->name = kstrndup(name, |
1126 | MAX_CGROUP_ROOT_NAMELEN, | 1126 | MAX_CGROUP_ROOT_NAMELEN - 1, |
1127 | GFP_KERNEL); | 1127 | GFP_KERNEL); |
1128 | if (!opts->name) | 1128 | if (!opts->name) |
1129 | return -ENOMEM; | 1129 | return -ENOMEM; |
diff --git a/kernel/exit.c b/kernel/exit.c index ceffc67b564a..671ed56e0a49 100644 --- a/kernel/exit.c +++ b/kernel/exit.c | |||
@@ -771,9 +771,12 @@ static void forget_original_parent(struct task_struct *father) | |||
771 | struct task_struct *p, *n, *reaper; | 771 | struct task_struct *p, *n, *reaper; |
772 | LIST_HEAD(dead_children); | 772 | LIST_HEAD(dead_children); |
773 | 773 | ||
774 | exit_ptrace(father); | ||
775 | |||
776 | write_lock_irq(&tasklist_lock); | 774 | write_lock_irq(&tasklist_lock); |
775 | /* | ||
776 | * Note that exit_ptrace() and find_new_reaper() might | ||
777 | * drop tasklist_lock and reacquire it. | ||
778 | */ | ||
779 | exit_ptrace(father); | ||
777 | reaper = find_new_reaper(father); | 780 | reaper = find_new_reaper(father); |
778 | 781 | ||
779 | list_for_each_entry_safe(p, n, &father->children, sibling) { | 782 | list_for_each_entry_safe(p, n, &father->children, sibling) { |
diff --git a/kernel/kexec.c b/kernel/kexec.c index 131b1703936f..c0613f7d6730 100644 --- a/kernel/kexec.c +++ b/kernel/kexec.c | |||
@@ -151,8 +151,10 @@ static int do_kimage_alloc(struct kimage **rimage, unsigned long entry, | |||
151 | image->nr_segments = nr_segments; | 151 | image->nr_segments = nr_segments; |
152 | segment_bytes = nr_segments * sizeof(*segments); | 152 | segment_bytes = nr_segments * sizeof(*segments); |
153 | result = copy_from_user(image->segment, segments, segment_bytes); | 153 | result = copy_from_user(image->segment, segments, segment_bytes); |
154 | if (result) | 154 | if (result) { |
155 | result = -EFAULT; | ||
155 | goto out; | 156 | goto out; |
157 | } | ||
156 | 158 | ||
157 | /* | 159 | /* |
158 | * Verify we have good destination addresses. The caller is | 160 | * Verify we have good destination addresses. The caller is |
@@ -827,7 +829,7 @@ static int kimage_load_normal_segment(struct kimage *image, | |||
827 | result = copy_from_user(ptr, buf, uchunk); | 829 | result = copy_from_user(ptr, buf, uchunk); |
828 | kunmap(page); | 830 | kunmap(page); |
829 | if (result) { | 831 | if (result) { |
830 | result = (result < 0) ? result : -EIO; | 832 | result = -EFAULT; |
831 | goto out; | 833 | goto out; |
832 | } | 834 | } |
833 | ubytes -= uchunk; | 835 | ubytes -= uchunk; |
@@ -882,7 +884,7 @@ static int kimage_load_crash_segment(struct kimage *image, | |||
882 | kexec_flush_icache_page(page); | 884 | kexec_flush_icache_page(page); |
883 | kunmap(page); | 885 | kunmap(page); |
884 | if (result) { | 886 | if (result) { |
885 | result = (result < 0) ? result : -EIO; | 887 | result = -EFAULT; |
886 | goto out; | 888 | goto out; |
887 | } | 889 | } |
888 | ubytes -= uchunk; | 890 | ubytes -= uchunk; |
diff --git a/kernel/kfifo.c b/kernel/kfifo.c index 35edbe22e9a9..02192dd905cc 100644 --- a/kernel/kfifo.c +++ b/kernel/kfifo.c | |||
@@ -1,8 +1,7 @@ | |||
1 | /* | 1 | /* |
2 | * A generic kernel FIFO implementation. | 2 | * A generic kernel FIFO implementation |
3 | * | 3 | * |
4 | * Copyright (C) 2009 Stefani Seibold <stefani@seibold.net> | 4 | * Copyright (C) 2009/2010 Stefani Seibold <stefani@seibold.net> |
5 | * Copyright (C) 2004 Stelian Pop <stelian@popies.net> | ||
6 | * | 5 | * |
7 | * This program is free software; you can redistribute it and/or modify | 6 | * This program is free software; you can redistribute it and/or modify |
8 | * it under the terms of the GNU General Public License as published by | 7 | * it under the terms of the GNU General Public License as published by |
@@ -24,422 +23,580 @@ | |||
24 | #include <linux/module.h> | 23 | #include <linux/module.h> |
25 | #include <linux/slab.h> | 24 | #include <linux/slab.h> |
26 | #include <linux/err.h> | 25 | #include <linux/err.h> |
27 | #include <linux/kfifo.h> | ||
28 | #include <linux/log2.h> | 26 | #include <linux/log2.h> |
29 | #include <linux/uaccess.h> | 27 | #include <linux/uaccess.h> |
28 | #include <linux/kfifo.h> | ||
30 | 29 | ||
31 | static void _kfifo_init(struct kfifo *fifo, void *buffer, | 30 | /* |
32 | unsigned int size) | 31 | * internal helper to calculate the unused elements in a fifo |
33 | { | ||
34 | fifo->buffer = buffer; | ||
35 | fifo->size = size; | ||
36 | |||
37 | kfifo_reset(fifo); | ||
38 | } | ||
39 | |||
40 | /** | ||
41 | * kfifo_init - initialize a FIFO using a preallocated buffer | ||
42 | * @fifo: the fifo to assign the buffer | ||
43 | * @buffer: the preallocated buffer to be used. | ||
44 | * @size: the size of the internal buffer, this has to be a power of 2. | ||
45 | * | ||
46 | */ | 32 | */ |
47 | void kfifo_init(struct kfifo *fifo, void *buffer, unsigned int size) | 33 | static inline unsigned int kfifo_unused(struct __kfifo *fifo) |
48 | { | 34 | { |
49 | /* size must be a power of 2 */ | 35 | return (fifo->mask + 1) - (fifo->in - fifo->out); |
50 | BUG_ON(!is_power_of_2(size)); | ||
51 | |||
52 | _kfifo_init(fifo, buffer, size); | ||
53 | } | 36 | } |
54 | EXPORT_SYMBOL(kfifo_init); | ||
55 | 37 | ||
56 | /** | 38 | int __kfifo_alloc(struct __kfifo *fifo, unsigned int size, |
57 | * kfifo_alloc - allocates a new FIFO internal buffer | 39 | size_t esize, gfp_t gfp_mask) |
58 | * @fifo: the fifo to assign then new buffer | ||
59 | * @size: the size of the buffer to be allocated, this have to be a power of 2. | ||
60 | * @gfp_mask: get_free_pages mask, passed to kmalloc() | ||
61 | * | ||
62 | * This function dynamically allocates a new fifo internal buffer | ||
63 | * | ||
64 | * The size will be rounded-up to a power of 2. | ||
65 | * The buffer will be release with kfifo_free(). | ||
66 | * Return 0 if no error, otherwise the an error code | ||
67 | */ | ||
68 | int kfifo_alloc(struct kfifo *fifo, unsigned int size, gfp_t gfp_mask) | ||
69 | { | 40 | { |
70 | unsigned char *buffer; | ||
71 | |||
72 | /* | 41 | /* |
73 | * round up to the next power of 2, since our 'let the indices | 42 | * round down to the next power of 2, since our 'let the indices |
74 | * wrap' technique works only in this case. | 43 | * wrap' technique works only in this case. |
75 | */ | 44 | */ |
76 | if (!is_power_of_2(size)) { | 45 | if (!is_power_of_2(size)) |
77 | BUG_ON(size > 0x80000000); | 46 | size = rounddown_pow_of_two(size); |
78 | size = roundup_pow_of_two(size); | 47 | |
48 | fifo->in = 0; | ||
49 | fifo->out = 0; | ||
50 | fifo->esize = esize; | ||
51 | |||
52 | if (size < 2) { | ||
53 | fifo->data = NULL; | ||
54 | fifo->mask = 0; | ||
55 | return -EINVAL; | ||
79 | } | 56 | } |
80 | 57 | ||
81 | buffer = kmalloc(size, gfp_mask); | 58 | fifo->data = kmalloc(size * esize, gfp_mask); |
82 | if (!buffer) { | 59 | |
83 | _kfifo_init(fifo, NULL, 0); | 60 | if (!fifo->data) { |
61 | fifo->mask = 0; | ||
84 | return -ENOMEM; | 62 | return -ENOMEM; |
85 | } | 63 | } |
86 | 64 | fifo->mask = size - 1; | |
87 | _kfifo_init(fifo, buffer, size); | ||
88 | 65 | ||
89 | return 0; | 66 | return 0; |
90 | } | 67 | } |
91 | EXPORT_SYMBOL(kfifo_alloc); | 68 | EXPORT_SYMBOL(__kfifo_alloc); |
92 | 69 | ||
93 | /** | 70 | void __kfifo_free(struct __kfifo *fifo) |
94 | * kfifo_free - frees the FIFO internal buffer | ||
95 | * @fifo: the fifo to be freed. | ||
96 | */ | ||
97 | void kfifo_free(struct kfifo *fifo) | ||
98 | { | 71 | { |
99 | kfree(fifo->buffer); | 72 | kfree(fifo->data); |
100 | _kfifo_init(fifo, NULL, 0); | 73 | fifo->in = 0; |
74 | fifo->out = 0; | ||
75 | fifo->esize = 0; | ||
76 | fifo->data = NULL; | ||
77 | fifo->mask = 0; | ||
101 | } | 78 | } |
102 | EXPORT_SYMBOL(kfifo_free); | 79 | EXPORT_SYMBOL(__kfifo_free); |
103 | 80 | ||
104 | /** | 81 | int __kfifo_init(struct __kfifo *fifo, void *buffer, |
105 | * kfifo_skip - skip output data | 82 | unsigned int size, size_t esize) |
106 | * @fifo: the fifo to be used. | ||
107 | * @len: number of bytes to skip | ||
108 | */ | ||
109 | void kfifo_skip(struct kfifo *fifo, unsigned int len) | ||
110 | { | 83 | { |
111 | if (len < kfifo_len(fifo)) { | 84 | size /= esize; |
112 | __kfifo_add_out(fifo, len); | 85 | |
113 | return; | 86 | if (!is_power_of_2(size)) |
87 | size = rounddown_pow_of_two(size); | ||
88 | |||
89 | fifo->in = 0; | ||
90 | fifo->out = 0; | ||
91 | fifo->esize = esize; | ||
92 | fifo->data = buffer; | ||
93 | |||
94 | if (size < 2) { | ||
95 | fifo->mask = 0; | ||
96 | return -EINVAL; | ||
114 | } | 97 | } |
115 | kfifo_reset_out(fifo); | 98 | fifo->mask = size - 1; |
99 | |||
100 | return 0; | ||
116 | } | 101 | } |
117 | EXPORT_SYMBOL(kfifo_skip); | 102 | EXPORT_SYMBOL(__kfifo_init); |
118 | 103 | ||
119 | static inline void __kfifo_in_data(struct kfifo *fifo, | 104 | static void kfifo_copy_in(struct __kfifo *fifo, const void *src, |
120 | const void *from, unsigned int len, unsigned int off) | 105 | unsigned int len, unsigned int off) |
121 | { | 106 | { |
107 | unsigned int size = fifo->mask + 1; | ||
108 | unsigned int esize = fifo->esize; | ||
122 | unsigned int l; | 109 | unsigned int l; |
123 | 110 | ||
111 | off &= fifo->mask; | ||
112 | if (esize != 1) { | ||
113 | off *= esize; | ||
114 | size *= esize; | ||
115 | len *= esize; | ||
116 | } | ||
117 | l = min(len, size - off); | ||
118 | |||
119 | memcpy(fifo->data + off, src, l); | ||
120 | memcpy(fifo->data, src + l, len - l); | ||
124 | /* | 121 | /* |
125 | * Ensure that we sample the fifo->out index -before- we | 122 | * make sure that the data in the fifo is up to date before |
126 | * start putting bytes into the kfifo. | 123 | * incrementing the fifo->in index counter |
127 | */ | 124 | */ |
125 | smp_wmb(); | ||
126 | } | ||
128 | 127 | ||
129 | smp_mb(); | 128 | unsigned int __kfifo_in(struct __kfifo *fifo, |
130 | 129 | const void *buf, unsigned int len) | |
131 | off = __kfifo_off(fifo, fifo->in + off); | 130 | { |
131 | unsigned int l; | ||
132 | 132 | ||
133 | /* first put the data starting from fifo->in to buffer end */ | 133 | l = kfifo_unused(fifo); |
134 | l = min(len, fifo->size - off); | 134 | if (len > l) |
135 | memcpy(fifo->buffer + off, from, l); | 135 | len = l; |
136 | 136 | ||
137 | /* then put the rest (if any) at the beginning of the buffer */ | 137 | kfifo_copy_in(fifo, buf, len, fifo->in); |
138 | memcpy(fifo->buffer, from + l, len - l); | 138 | fifo->in += len; |
139 | return len; | ||
139 | } | 140 | } |
141 | EXPORT_SYMBOL(__kfifo_in); | ||
140 | 142 | ||
141 | static inline void __kfifo_out_data(struct kfifo *fifo, | 143 | static void kfifo_copy_out(struct __kfifo *fifo, void *dst, |
142 | void *to, unsigned int len, unsigned int off) | 144 | unsigned int len, unsigned int off) |
143 | { | 145 | { |
146 | unsigned int size = fifo->mask + 1; | ||
147 | unsigned int esize = fifo->esize; | ||
144 | unsigned int l; | 148 | unsigned int l; |
145 | 149 | ||
150 | off &= fifo->mask; | ||
151 | if (esize != 1) { | ||
152 | off *= esize; | ||
153 | size *= esize; | ||
154 | len *= esize; | ||
155 | } | ||
156 | l = min(len, size - off); | ||
157 | |||
158 | memcpy(dst, fifo->data + off, l); | ||
159 | memcpy(dst + l, fifo->data, len - l); | ||
146 | /* | 160 | /* |
147 | * Ensure that we sample the fifo->in index -before- we | 161 | * make sure that the data is copied before |
148 | * start removing bytes from the kfifo. | 162 | * incrementing the fifo->out index counter |
149 | */ | 163 | */ |
164 | smp_wmb(); | ||
165 | } | ||
150 | 166 | ||
151 | smp_rmb(); | 167 | unsigned int __kfifo_out_peek(struct __kfifo *fifo, |
168 | void *buf, unsigned int len) | ||
169 | { | ||
170 | unsigned int l; | ||
152 | 171 | ||
153 | off = __kfifo_off(fifo, fifo->out + off); | 172 | l = fifo->in - fifo->out; |
173 | if (len > l) | ||
174 | len = l; | ||
154 | 175 | ||
155 | /* first get the data from fifo->out until the end of the buffer */ | 176 | kfifo_copy_out(fifo, buf, len, fifo->out); |
156 | l = min(len, fifo->size - off); | 177 | return len; |
157 | memcpy(to, fifo->buffer + off, l); | 178 | } |
179 | EXPORT_SYMBOL(__kfifo_out_peek); | ||
158 | 180 | ||
159 | /* then get the rest (if any) from the beginning of the buffer */ | 181 | unsigned int __kfifo_out(struct __kfifo *fifo, |
160 | memcpy(to + l, fifo->buffer, len - l); | 182 | void *buf, unsigned int len) |
183 | { | ||
184 | len = __kfifo_out_peek(fifo, buf, len); | ||
185 | fifo->out += len; | ||
186 | return len; | ||
161 | } | 187 | } |
188 | EXPORT_SYMBOL(__kfifo_out); | ||
162 | 189 | ||
163 | static inline int __kfifo_from_user_data(struct kfifo *fifo, | 190 | static unsigned long kfifo_copy_from_user(struct __kfifo *fifo, |
164 | const void __user *from, unsigned int len, unsigned int off, | 191 | const void __user *from, unsigned int len, unsigned int off, |
165 | unsigned *lenout) | 192 | unsigned int *copied) |
166 | { | 193 | { |
194 | unsigned int size = fifo->mask + 1; | ||
195 | unsigned int esize = fifo->esize; | ||
167 | unsigned int l; | 196 | unsigned int l; |
168 | int ret; | 197 | unsigned long ret; |
169 | 198 | ||
199 | off &= fifo->mask; | ||
200 | if (esize != 1) { | ||
201 | off *= esize; | ||
202 | size *= esize; | ||
203 | len *= esize; | ||
204 | } | ||
205 | l = min(len, size - off); | ||
206 | |||
207 | ret = copy_from_user(fifo->data + off, from, l); | ||
208 | if (unlikely(ret)) | ||
209 | ret = DIV_ROUND_UP(ret + len - l, esize); | ||
210 | else { | ||
211 | ret = copy_from_user(fifo->data, from + l, len - l); | ||
212 | if (unlikely(ret)) | ||
213 | ret = DIV_ROUND_UP(ret, esize); | ||
214 | } | ||
170 | /* | 215 | /* |
171 | * Ensure that we sample the fifo->out index -before- we | 216 | * make sure that the data in the fifo is up to date before |
172 | * start putting bytes into the kfifo. | 217 | * incrementing the fifo->in index counter |
173 | */ | 218 | */ |
219 | smp_wmb(); | ||
220 | *copied = len - ret; | ||
221 | /* return the number of elements which are not copied */ | ||
222 | return ret; | ||
223 | } | ||
174 | 224 | ||
175 | smp_mb(); | 225 | int __kfifo_from_user(struct __kfifo *fifo, const void __user *from, |
226 | unsigned long len, unsigned int *copied) | ||
227 | { | ||
228 | unsigned int l; | ||
229 | unsigned long ret; | ||
230 | unsigned int esize = fifo->esize; | ||
231 | int err; | ||
176 | 232 | ||
177 | off = __kfifo_off(fifo, fifo->in + off); | 233 | if (esize != 1) |
234 | len /= esize; | ||
178 | 235 | ||
179 | /* first put the data starting from fifo->in to buffer end */ | 236 | l = kfifo_unused(fifo); |
180 | l = min(len, fifo->size - off); | 237 | if (len > l) |
181 | ret = copy_from_user(fifo->buffer + off, from, l); | 238 | len = l; |
182 | if (unlikely(ret)) { | ||
183 | *lenout = ret; | ||
184 | return -EFAULT; | ||
185 | } | ||
186 | *lenout = l; | ||
187 | 239 | ||
188 | /* then put the rest (if any) at the beginning of the buffer */ | 240 | ret = kfifo_copy_from_user(fifo, from, len, fifo->in, copied); |
189 | ret = copy_from_user(fifo->buffer, from + l, len - l); | 241 | if (unlikely(ret)) { |
190 | *lenout += ret ? ret : len - l; | 242 | len -= ret; |
191 | return ret ? -EFAULT : 0; | 243 | err = -EFAULT; |
244 | } else | ||
245 | err = 0; | ||
246 | fifo->in += len; | ||
247 | return err; | ||
192 | } | 248 | } |
249 | EXPORT_SYMBOL(__kfifo_from_user); | ||
193 | 250 | ||
194 | static inline int __kfifo_to_user_data(struct kfifo *fifo, | 251 | static unsigned long kfifo_copy_to_user(struct __kfifo *fifo, void __user *to, |
195 | void __user *to, unsigned int len, unsigned int off, unsigned *lenout) | 252 | unsigned int len, unsigned int off, unsigned int *copied) |
196 | { | 253 | { |
197 | unsigned int l; | 254 | unsigned int l; |
198 | int ret; | 255 | unsigned long ret; |
199 | 256 | unsigned int size = fifo->mask + 1; | |
257 | unsigned int esize = fifo->esize; | ||
258 | |||
259 | off &= fifo->mask; | ||
260 | if (esize != 1) { | ||
261 | off *= esize; | ||
262 | size *= esize; | ||
263 | len *= esize; | ||
264 | } | ||
265 | l = min(len, size - off); | ||
266 | |||
267 | ret = copy_to_user(to, fifo->data + off, l); | ||
268 | if (unlikely(ret)) | ||
269 | ret = DIV_ROUND_UP(ret + len - l, esize); | ||
270 | else { | ||
271 | ret = copy_to_user(to + l, fifo->data, len - l); | ||
272 | if (unlikely(ret)) | ||
273 | ret = DIV_ROUND_UP(ret, esize); | ||
274 | } | ||
200 | /* | 275 | /* |
201 | * Ensure that we sample the fifo->in index -before- we | 276 | * make sure that the data is copied before |
202 | * start removing bytes from the kfifo. | 277 | * incrementing the fifo->out index counter |
203 | */ | 278 | */ |
279 | smp_wmb(); | ||
280 | *copied = len - ret; | ||
281 | /* return the number of elements which are not copied */ | ||
282 | return ret; | ||
283 | } | ||
204 | 284 | ||
205 | smp_rmb(); | 285 | int __kfifo_to_user(struct __kfifo *fifo, void __user *to, |
286 | unsigned long len, unsigned int *copied) | ||
287 | { | ||
288 | unsigned int l; | ||
289 | unsigned long ret; | ||
290 | unsigned int esize = fifo->esize; | ||
291 | int err; | ||
206 | 292 | ||
207 | off = __kfifo_off(fifo, fifo->out + off); | 293 | if (esize != 1) |
294 | len /= esize; | ||
208 | 295 | ||
209 | /* first get the data from fifo->out until the end of the buffer */ | 296 | l = fifo->in - fifo->out; |
210 | l = min(len, fifo->size - off); | 297 | if (len > l) |
211 | ret = copy_to_user(to, fifo->buffer + off, l); | 298 | len = l; |
212 | *lenout = l; | 299 | ret = kfifo_copy_to_user(fifo, to, len, fifo->out, copied); |
213 | if (unlikely(ret)) { | 300 | if (unlikely(ret)) { |
214 | *lenout -= ret; | 301 | len -= ret; |
215 | return -EFAULT; | 302 | err = -EFAULT; |
216 | } | 303 | } else |
304 | err = 0; | ||
305 | fifo->out += len; | ||
306 | return err; | ||
307 | } | ||
308 | EXPORT_SYMBOL(__kfifo_to_user); | ||
217 | 309 | ||
218 | /* then get the rest (if any) from the beginning of the buffer */ | 310 | static int setup_sgl_buf(struct scatterlist *sgl, void *buf, |
219 | len -= l; | 311 | int nents, unsigned int len) |
220 | ret = copy_to_user(to + l, fifo->buffer, len); | 312 | { |
221 | if (unlikely(ret)) { | 313 | int n; |
222 | *lenout += len - ret; | 314 | unsigned int l; |
223 | return -EFAULT; | 315 | unsigned int off; |
316 | struct page *page; | ||
317 | |||
318 | if (!nents) | ||
319 | return 0; | ||
320 | |||
321 | if (!len) | ||
322 | return 0; | ||
323 | |||
324 | n = 0; | ||
325 | page = virt_to_page(buf); | ||
326 | off = offset_in_page(buf); | ||
327 | l = 0; | ||
328 | |||
329 | while (len >= l + PAGE_SIZE - off) { | ||
330 | struct page *npage; | ||
331 | |||
332 | l += PAGE_SIZE; | ||
333 | buf += PAGE_SIZE; | ||
334 | npage = virt_to_page(buf); | ||
335 | if (page_to_phys(page) != page_to_phys(npage) - l) { | ||
336 | sgl->page_link = 0; | ||
337 | sg_set_page(sgl++, page, l - off, off); | ||
338 | if (++n == nents) | ||
339 | return n; | ||
340 | page = npage; | ||
341 | len -= l - off; | ||
342 | l = off = 0; | ||
343 | } | ||
224 | } | 344 | } |
225 | *lenout += len; | 345 | sgl->page_link = 0; |
226 | return 0; | 346 | sg_set_page(sgl++, page, len, off); |
347 | return n + 1; | ||
227 | } | 348 | } |
228 | 349 | ||
229 | unsigned int __kfifo_in_n(struct kfifo *fifo, | 350 | static unsigned int setup_sgl(struct __kfifo *fifo, struct scatterlist *sgl, |
230 | const void *from, unsigned int len, unsigned int recsize) | 351 | int nents, unsigned int len, unsigned int off) |
231 | { | 352 | { |
232 | if (kfifo_avail(fifo) < len + recsize) | 353 | unsigned int size = fifo->mask + 1; |
233 | return len + 1; | 354 | unsigned int esize = fifo->esize; |
355 | unsigned int l; | ||
356 | unsigned int n; | ||
234 | 357 | ||
235 | __kfifo_in_data(fifo, from, len, recsize); | 358 | off &= fifo->mask; |
236 | return 0; | 359 | if (esize != 1) { |
360 | off *= esize; | ||
361 | size *= esize; | ||
362 | len *= esize; | ||
363 | } | ||
364 | l = min(len, size - off); | ||
365 | |||
366 | n = setup_sgl_buf(sgl, fifo->data + off, nents, l); | ||
367 | n += setup_sgl_buf(sgl + n, fifo->data, nents - n, len - l); | ||
368 | |||
369 | if (n) | ||
370 | sg_mark_end(sgl + n - 1); | ||
371 | return n; | ||
237 | } | 372 | } |
238 | EXPORT_SYMBOL(__kfifo_in_n); | ||
239 | 373 | ||
240 | /** | 374 | unsigned int __kfifo_dma_in_prepare(struct __kfifo *fifo, |
241 | * kfifo_in - puts some data into the FIFO | 375 | struct scatterlist *sgl, int nents, unsigned int len) |
242 | * @fifo: the fifo to be used. | ||
243 | * @from: the data to be added. | ||
244 | * @len: the length of the data to be added. | ||
245 | * | ||
246 | * This function copies at most @len bytes from the @from buffer into | ||
247 | * the FIFO depending on the free space, and returns the number of | ||
248 | * bytes copied. | ||
249 | * | ||
250 | * Note that with only one concurrent reader and one concurrent | ||
251 | * writer, you don't need extra locking to use these functions. | ||
252 | */ | ||
253 | unsigned int kfifo_in(struct kfifo *fifo, const void *from, | ||
254 | unsigned int len) | ||
255 | { | 376 | { |
256 | len = min(kfifo_avail(fifo), len); | 377 | unsigned int l; |
257 | 378 | ||
258 | __kfifo_in_data(fifo, from, len, 0); | 379 | l = kfifo_unused(fifo); |
259 | __kfifo_add_in(fifo, len); | 380 | if (len > l) |
260 | return len; | 381 | len = l; |
382 | |||
383 | return setup_sgl(fifo, sgl, nents, len, fifo->in); | ||
261 | } | 384 | } |
262 | EXPORT_SYMBOL(kfifo_in); | 385 | EXPORT_SYMBOL(__kfifo_dma_in_prepare); |
263 | 386 | ||
264 | unsigned int __kfifo_in_generic(struct kfifo *fifo, | 387 | unsigned int __kfifo_dma_out_prepare(struct __kfifo *fifo, |
265 | const void *from, unsigned int len, unsigned int recsize) | 388 | struct scatterlist *sgl, int nents, unsigned int len) |
266 | { | 389 | { |
267 | return __kfifo_in_rec(fifo, from, len, recsize); | 390 | unsigned int l; |
391 | |||
392 | l = fifo->in - fifo->out; | ||
393 | if (len > l) | ||
394 | len = l; | ||
395 | |||
396 | return setup_sgl(fifo, sgl, nents, len, fifo->out); | ||
268 | } | 397 | } |
269 | EXPORT_SYMBOL(__kfifo_in_generic); | 398 | EXPORT_SYMBOL(__kfifo_dma_out_prepare); |
270 | 399 | ||
271 | unsigned int __kfifo_out_n(struct kfifo *fifo, | 400 | unsigned int __kfifo_max_r(unsigned int len, size_t recsize) |
272 | void *to, unsigned int len, unsigned int recsize) | ||
273 | { | 401 | { |
274 | if (kfifo_len(fifo) < len + recsize) | 402 | unsigned int max = (1 << (recsize << 3)) - 1; |
275 | return len; | ||
276 | 403 | ||
277 | __kfifo_out_data(fifo, to, len, recsize); | 404 | if (len > max) |
278 | __kfifo_add_out(fifo, len + recsize); | 405 | return max; |
279 | return 0; | 406 | return len; |
280 | } | 407 | } |
281 | EXPORT_SYMBOL(__kfifo_out_n); | ||
282 | 408 | ||
283 | /** | 409 | #define __KFIFO_PEEK(data, out, mask) \ |
284 | * kfifo_out - gets some data from the FIFO | 410 | ((data)[(out) & (mask)]) |
285 | * @fifo: the fifo to be used. | 411 | /* |
286 | * @to: where the data must be copied. | 412 | * __kfifo_peek_n internal helper function for determinate the length of |
287 | * @len: the size of the destination buffer. | 413 | * the next record in the fifo |
288 | * | ||
289 | * This function copies at most @len bytes from the FIFO into the | ||
290 | * @to buffer and returns the number of copied bytes. | ||
291 | * | ||
292 | * Note that with only one concurrent reader and one concurrent | ||
293 | * writer, you don't need extra locking to use these functions. | ||
294 | */ | 414 | */ |
295 | unsigned int kfifo_out(struct kfifo *fifo, void *to, unsigned int len) | 415 | static unsigned int __kfifo_peek_n(struct __kfifo *fifo, size_t recsize) |
296 | { | 416 | { |
297 | len = min(kfifo_len(fifo), len); | 417 | unsigned int l; |
418 | unsigned int mask = fifo->mask; | ||
419 | unsigned char *data = fifo->data; | ||
298 | 420 | ||
299 | __kfifo_out_data(fifo, to, len, 0); | 421 | l = __KFIFO_PEEK(data, fifo->out, mask); |
300 | __kfifo_add_out(fifo, len); | ||
301 | 422 | ||
302 | return len; | 423 | if (--recsize) |
424 | l |= __KFIFO_PEEK(data, fifo->out + 1, mask) << 8; | ||
425 | |||
426 | return l; | ||
303 | } | 427 | } |
304 | EXPORT_SYMBOL(kfifo_out); | 428 | |
305 | 429 | #define __KFIFO_POKE(data, in, mask, val) \ | |
306 | /** | 430 | ( \ |
307 | * kfifo_out_peek - copy some data from the FIFO, but do not remove it | 431 | (data)[(in) & (mask)] = (unsigned char)(val) \ |
308 | * @fifo: the fifo to be used. | 432 | ) |
309 | * @to: where the data must be copied. | 433 | |
310 | * @len: the size of the destination buffer. | 434 | /* |
311 | * @offset: offset into the fifo | 435 | * __kfifo_poke_n internal helper function for storeing the length of |
312 | * | 436 | * the record into the fifo |
313 | * This function copies at most @len bytes at @offset from the FIFO | ||
314 | * into the @to buffer and returns the number of copied bytes. | ||
315 | * The data is not removed from the FIFO. | ||
316 | */ | 437 | */ |
317 | unsigned int kfifo_out_peek(struct kfifo *fifo, void *to, unsigned int len, | 438 | static void __kfifo_poke_n(struct __kfifo *fifo, unsigned int n, size_t recsize) |
318 | unsigned offset) | ||
319 | { | 439 | { |
320 | len = min(kfifo_len(fifo), len + offset); | 440 | unsigned int mask = fifo->mask; |
441 | unsigned char *data = fifo->data; | ||
321 | 442 | ||
322 | __kfifo_out_data(fifo, to, len, offset); | 443 | __KFIFO_POKE(data, fifo->in, mask, n); |
323 | return len; | 444 | |
445 | if (recsize > 1) | ||
446 | __KFIFO_POKE(data, fifo->in + 1, mask, n >> 8); | ||
324 | } | 447 | } |
325 | EXPORT_SYMBOL(kfifo_out_peek); | ||
326 | 448 | ||
327 | unsigned int __kfifo_out_generic(struct kfifo *fifo, | 449 | unsigned int __kfifo_len_r(struct __kfifo *fifo, size_t recsize) |
328 | void *to, unsigned int len, unsigned int recsize, | ||
329 | unsigned int *total) | ||
330 | { | 450 | { |
331 | return __kfifo_out_rec(fifo, to, len, recsize, total); | 451 | return __kfifo_peek_n(fifo, recsize); |
332 | } | 452 | } |
333 | EXPORT_SYMBOL(__kfifo_out_generic); | 453 | EXPORT_SYMBOL(__kfifo_len_r); |
334 | 454 | ||
335 | unsigned int __kfifo_from_user_n(struct kfifo *fifo, | 455 | unsigned int __kfifo_in_r(struct __kfifo *fifo, const void *buf, |
336 | const void __user *from, unsigned int len, unsigned int recsize) | 456 | unsigned int len, size_t recsize) |
337 | { | 457 | { |
338 | unsigned total; | 458 | if (len + recsize > kfifo_unused(fifo)) |
459 | return 0; | ||
339 | 460 | ||
340 | if (kfifo_avail(fifo) < len + recsize) | 461 | __kfifo_poke_n(fifo, len, recsize); |
341 | return len + 1; | ||
342 | 462 | ||
343 | __kfifo_from_user_data(fifo, from, len, recsize, &total); | 463 | kfifo_copy_in(fifo, buf, len, fifo->in + recsize); |
344 | return total; | 464 | fifo->in += len + recsize; |
465 | return len; | ||
345 | } | 466 | } |
346 | EXPORT_SYMBOL(__kfifo_from_user_n); | 467 | EXPORT_SYMBOL(__kfifo_in_r); |
347 | 468 | ||
348 | /** | 469 | static unsigned int kfifo_out_copy_r(struct __kfifo *fifo, |
349 | * kfifo_from_user - puts some data from user space into the FIFO | 470 | void *buf, unsigned int len, size_t recsize, unsigned int *n) |
350 | * @fifo: the fifo to be used. | ||
351 | * @from: pointer to the data to be added. | ||
352 | * @len: the length of the data to be added. | ||
353 | * @total: the actual returned data length. | ||
354 | * | ||
355 | * This function copies at most @len bytes from the @from into the | ||
356 | * FIFO depending and returns -EFAULT/0. | ||
357 | * | ||
358 | * Note that with only one concurrent reader and one concurrent | ||
359 | * writer, you don't need extra locking to use these functions. | ||
360 | */ | ||
361 | int kfifo_from_user(struct kfifo *fifo, | ||
362 | const void __user *from, unsigned int len, unsigned *total) | ||
363 | { | 471 | { |
364 | int ret; | 472 | *n = __kfifo_peek_n(fifo, recsize); |
365 | len = min(kfifo_avail(fifo), len); | 473 | |
366 | ret = __kfifo_from_user_data(fifo, from, len, 0, total); | 474 | if (len > *n) |
367 | if (ret) | 475 | len = *n; |
368 | return ret; | 476 | |
369 | __kfifo_add_in(fifo, len); | 477 | kfifo_copy_out(fifo, buf, len, fifo->out + recsize); |
370 | return 0; | 478 | return len; |
479 | } | ||
480 | |||
481 | unsigned int __kfifo_out_peek_r(struct __kfifo *fifo, void *buf, | ||
482 | unsigned int len, size_t recsize) | ||
483 | { | ||
484 | unsigned int n; | ||
485 | |||
486 | if (fifo->in == fifo->out) | ||
487 | return 0; | ||
488 | |||
489 | return kfifo_out_copy_r(fifo, buf, len, recsize, &n); | ||
371 | } | 490 | } |
372 | EXPORT_SYMBOL(kfifo_from_user); | 491 | EXPORT_SYMBOL(__kfifo_out_peek_r); |
373 | 492 | ||
374 | unsigned int __kfifo_from_user_generic(struct kfifo *fifo, | 493 | unsigned int __kfifo_out_r(struct __kfifo *fifo, void *buf, |
375 | const void __user *from, unsigned int len, unsigned int recsize) | 494 | unsigned int len, size_t recsize) |
376 | { | 495 | { |
377 | return __kfifo_from_user_rec(fifo, from, len, recsize); | 496 | unsigned int n; |
497 | |||
498 | if (fifo->in == fifo->out) | ||
499 | return 0; | ||
500 | |||
501 | len = kfifo_out_copy_r(fifo, buf, len, recsize, &n); | ||
502 | fifo->out += n + recsize; | ||
503 | return len; | ||
378 | } | 504 | } |
379 | EXPORT_SYMBOL(__kfifo_from_user_generic); | 505 | EXPORT_SYMBOL(__kfifo_out_r); |
380 | 506 | ||
381 | unsigned int __kfifo_to_user_n(struct kfifo *fifo, | 507 | int __kfifo_from_user_r(struct __kfifo *fifo, const void __user *from, |
382 | void __user *to, unsigned int len, unsigned int reclen, | 508 | unsigned long len, unsigned int *copied, size_t recsize) |
383 | unsigned int recsize) | ||
384 | { | 509 | { |
385 | unsigned int ret, total; | 510 | unsigned long ret; |
386 | 511 | ||
387 | if (kfifo_len(fifo) < reclen + recsize) | 512 | len = __kfifo_max_r(len, recsize); |
388 | return len; | ||
389 | 513 | ||
390 | ret = __kfifo_to_user_data(fifo, to, reclen, recsize, &total); | 514 | if (len + recsize > kfifo_unused(fifo)) { |
515 | *copied = 0; | ||
516 | return 0; | ||
517 | } | ||
391 | 518 | ||
392 | if (likely(ret == 0)) | 519 | __kfifo_poke_n(fifo, len, recsize); |
393 | __kfifo_add_out(fifo, reclen + recsize); | ||
394 | 520 | ||
395 | return total; | 521 | ret = kfifo_copy_from_user(fifo, from, len, fifo->in + recsize, copied); |
522 | if (unlikely(ret)) { | ||
523 | *copied = 0; | ||
524 | return -EFAULT; | ||
525 | } | ||
526 | fifo->in += len + recsize; | ||
527 | return 0; | ||
396 | } | 528 | } |
397 | EXPORT_SYMBOL(__kfifo_to_user_n); | 529 | EXPORT_SYMBOL(__kfifo_from_user_r); |
398 | 530 | ||
399 | /** | 531 | int __kfifo_to_user_r(struct __kfifo *fifo, void __user *to, |
400 | * kfifo_to_user - gets data from the FIFO and write it to user space | 532 | unsigned long len, unsigned int *copied, size_t recsize) |
401 | * @fifo: the fifo to be used. | ||
402 | * @to: where the data must be copied. | ||
403 | * @len: the size of the destination buffer. | ||
404 | * @lenout: pointer to output variable with copied data | ||
405 | * | ||
406 | * This function copies at most @len bytes from the FIFO into the | ||
407 | * @to buffer and 0 or -EFAULT. | ||
408 | * | ||
409 | * Note that with only one concurrent reader and one concurrent | ||
410 | * writer, you don't need extra locking to use these functions. | ||
411 | */ | ||
412 | int kfifo_to_user(struct kfifo *fifo, | ||
413 | void __user *to, unsigned int len, unsigned *lenout) | ||
414 | { | 533 | { |
415 | int ret; | 534 | unsigned long ret; |
416 | len = min(kfifo_len(fifo), len); | 535 | unsigned int n; |
417 | ret = __kfifo_to_user_data(fifo, to, len, 0, lenout); | 536 | |
418 | __kfifo_add_out(fifo, *lenout); | 537 | if (fifo->in == fifo->out) { |
419 | return ret; | 538 | *copied = 0; |
539 | return 0; | ||
540 | } | ||
541 | |||
542 | n = __kfifo_peek_n(fifo, recsize); | ||
543 | if (len > n) | ||
544 | len = n; | ||
545 | |||
546 | ret = kfifo_copy_to_user(fifo, to, len, fifo->out + recsize, copied); | ||
547 | if (unlikely(ret)) { | ||
548 | *copied = 0; | ||
549 | return -EFAULT; | ||
550 | } | ||
551 | fifo->out += n + recsize; | ||
552 | return 0; | ||
420 | } | 553 | } |
421 | EXPORT_SYMBOL(kfifo_to_user); | 554 | EXPORT_SYMBOL(__kfifo_to_user_r); |
422 | 555 | ||
423 | unsigned int __kfifo_to_user_generic(struct kfifo *fifo, | 556 | unsigned int __kfifo_dma_in_prepare_r(struct __kfifo *fifo, |
424 | void __user *to, unsigned int len, unsigned int recsize, | 557 | struct scatterlist *sgl, int nents, unsigned int len, size_t recsize) |
425 | unsigned int *total) | ||
426 | { | 558 | { |
427 | return __kfifo_to_user_rec(fifo, to, len, recsize, total); | 559 | if (!nents) |
560 | BUG(); | ||
561 | |||
562 | len = __kfifo_max_r(len, recsize); | ||
563 | |||
564 | if (len + recsize > kfifo_unused(fifo)) | ||
565 | return 0; | ||
566 | |||
567 | return setup_sgl(fifo, sgl, nents, len, fifo->in + recsize); | ||
428 | } | 568 | } |
429 | EXPORT_SYMBOL(__kfifo_to_user_generic); | 569 | EXPORT_SYMBOL(__kfifo_dma_in_prepare_r); |
430 | 570 | ||
431 | unsigned int __kfifo_peek_generic(struct kfifo *fifo, unsigned int recsize) | 571 | void __kfifo_dma_in_finish_r(struct __kfifo *fifo, |
572 | unsigned int len, size_t recsize) | ||
432 | { | 573 | { |
433 | if (recsize == 0) | 574 | len = __kfifo_max_r(len, recsize); |
434 | return kfifo_avail(fifo); | 575 | __kfifo_poke_n(fifo, len, recsize); |
435 | 576 | fifo->in += len + recsize; | |
436 | return __kfifo_peek_n(fifo, recsize); | ||
437 | } | 577 | } |
438 | EXPORT_SYMBOL(__kfifo_peek_generic); | 578 | EXPORT_SYMBOL(__kfifo_dma_in_finish_r); |
439 | 579 | ||
440 | void __kfifo_skip_generic(struct kfifo *fifo, unsigned int recsize) | 580 | unsigned int __kfifo_dma_out_prepare_r(struct __kfifo *fifo, |
581 | struct scatterlist *sgl, int nents, unsigned int len, size_t recsize) | ||
441 | { | 582 | { |
442 | __kfifo_skip_rec(fifo, recsize); | 583 | if (!nents) |
584 | BUG(); | ||
585 | |||
586 | len = __kfifo_max_r(len, recsize); | ||
587 | |||
588 | if (len + recsize > fifo->in - fifo->out) | ||
589 | return 0; | ||
590 | |||
591 | return setup_sgl(fifo, sgl, nents, len, fifo->out + recsize); | ||
443 | } | 592 | } |
444 | EXPORT_SYMBOL(__kfifo_skip_generic); | 593 | EXPORT_SYMBOL(__kfifo_dma_out_prepare_r); |
594 | |||
595 | void __kfifo_dma_out_finish_r(struct __kfifo *fifo, size_t recsize) | ||
596 | { | ||
597 | unsigned int len; | ||
445 | 598 | ||
599 | len = __kfifo_peek_n(fifo, recsize); | ||
600 | fifo->out += len + recsize; | ||
601 | } | ||
602 | EXPORT_SYMBOL(__kfifo_dma_out_finish_r); | ||
diff --git a/kernel/panic.c b/kernel/panic.c index 3b16cd93fa7d..4c13b1a88ebb 100644 --- a/kernel/panic.c +++ b/kernel/panic.c | |||
@@ -24,6 +24,9 @@ | |||
24 | #include <linux/nmi.h> | 24 | #include <linux/nmi.h> |
25 | #include <linux/dmi.h> | 25 | #include <linux/dmi.h> |
26 | 26 | ||
27 | #define PANIC_TIMER_STEP 100 | ||
28 | #define PANIC_BLINK_SPD 18 | ||
29 | |||
27 | int panic_on_oops; | 30 | int panic_on_oops; |
28 | static unsigned long tainted_mask; | 31 | static unsigned long tainted_mask; |
29 | static int pause_on_oops; | 32 | static int pause_on_oops; |
@@ -36,36 +39,15 @@ ATOMIC_NOTIFIER_HEAD(panic_notifier_list); | |||
36 | 39 | ||
37 | EXPORT_SYMBOL(panic_notifier_list); | 40 | EXPORT_SYMBOL(panic_notifier_list); |
38 | 41 | ||
39 | /* Returns how long it waited in ms */ | 42 | static long no_blink(int state) |
40 | long (*panic_blink)(long time); | ||
41 | EXPORT_SYMBOL(panic_blink); | ||
42 | |||
43 | static void panic_blink_one_second(void) | ||
44 | { | 43 | { |
45 | static long i = 0, end; | 44 | return 0; |
46 | |||
47 | if (panic_blink) { | ||
48 | end = i + MSEC_PER_SEC; | ||
49 | |||
50 | while (i < end) { | ||
51 | i += panic_blink(i); | ||
52 | mdelay(1); | ||
53 | i++; | ||
54 | } | ||
55 | } else { | ||
56 | /* | ||
57 | * When running under a hypervisor a small mdelay may get | ||
58 | * rounded up to the hypervisor timeslice. For example, with | ||
59 | * a 1ms in 10ms hypervisor timeslice we might inflate a | ||
60 | * mdelay(1) loop by 10x. | ||
61 | * | ||
62 | * If we have nothing to blink, spin on 1 second calls to | ||
63 | * mdelay to avoid this. | ||
64 | */ | ||
65 | mdelay(MSEC_PER_SEC); | ||
66 | } | ||
67 | } | 45 | } |
68 | 46 | ||
47 | /* Returns how long it waited in ms */ | ||
48 | long (*panic_blink)(int state); | ||
49 | EXPORT_SYMBOL(panic_blink); | ||
50 | |||
69 | /** | 51 | /** |
70 | * panic - halt the system | 52 | * panic - halt the system |
71 | * @fmt: The text string to print | 53 | * @fmt: The text string to print |
@@ -78,7 +60,8 @@ NORET_TYPE void panic(const char * fmt, ...) | |||
78 | { | 60 | { |
79 | static char buf[1024]; | 61 | static char buf[1024]; |
80 | va_list args; | 62 | va_list args; |
81 | long i; | 63 | long i, i_next = 0; |
64 | int state = 0; | ||
82 | 65 | ||
83 | /* | 66 | /* |
84 | * It's possible to come here directly from a panic-assertion and | 67 | * It's possible to come here directly from a panic-assertion and |
@@ -117,6 +100,9 @@ NORET_TYPE void panic(const char * fmt, ...) | |||
117 | 100 | ||
118 | bust_spinlocks(0); | 101 | bust_spinlocks(0); |
119 | 102 | ||
103 | if (!panic_blink) | ||
104 | panic_blink = no_blink; | ||
105 | |||
120 | if (panic_timeout > 0) { | 106 | if (panic_timeout > 0) { |
121 | /* | 107 | /* |
122 | * Delay timeout seconds before rebooting the machine. | 108 | * Delay timeout seconds before rebooting the machine. |
@@ -124,9 +110,13 @@ NORET_TYPE void panic(const char * fmt, ...) | |||
124 | */ | 110 | */ |
125 | printk(KERN_EMERG "Rebooting in %d seconds..", panic_timeout); | 111 | printk(KERN_EMERG "Rebooting in %d seconds..", panic_timeout); |
126 | 112 | ||
127 | for (i = 0; i < panic_timeout; i++) { | 113 | for (i = 0; i < panic_timeout * 1000; i += PANIC_TIMER_STEP) { |
128 | touch_nmi_watchdog(); | 114 | touch_nmi_watchdog(); |
129 | panic_blink_one_second(); | 115 | if (i >= i_next) { |
116 | i += panic_blink(state ^= 1); | ||
117 | i_next = i + 3600 / PANIC_BLINK_SPD; | ||
118 | } | ||
119 | mdelay(PANIC_TIMER_STEP); | ||
130 | } | 120 | } |
131 | /* | 121 | /* |
132 | * This will not be a clean reboot, with everything | 122 | * This will not be a clean reboot, with everything |
@@ -152,9 +142,13 @@ NORET_TYPE void panic(const char * fmt, ...) | |||
152 | } | 142 | } |
153 | #endif | 143 | #endif |
154 | local_irq_enable(); | 144 | local_irq_enable(); |
155 | while (1) { | 145 | for (i = 0; ; i += PANIC_TIMER_STEP) { |
156 | touch_softlockup_watchdog(); | 146 | touch_softlockup_watchdog(); |
157 | panic_blink_one_second(); | 147 | if (i >= i_next) { |
148 | i += panic_blink(state ^= 1); | ||
149 | i_next = i + 3600 / PANIC_BLINK_SPD; | ||
150 | } | ||
151 | mdelay(PANIC_TIMER_STEP); | ||
158 | } | 152 | } |
159 | } | 153 | } |
160 | 154 | ||
@@ -344,7 +338,7 @@ static int init_oops_id(void) | |||
344 | } | 338 | } |
345 | late_initcall(init_oops_id); | 339 | late_initcall(init_oops_id); |
346 | 340 | ||
347 | static void print_oops_end_marker(void) | 341 | void print_oops_end_marker(void) |
348 | { | 342 | { |
349 | init_oops_id(); | 343 | init_oops_id(); |
350 | printk(KERN_WARNING "---[ end trace %016llx ]---\n", | 344 | printk(KERN_WARNING "---[ end trace %016llx ]---\n", |
diff --git a/kernel/pid.c b/kernel/pid.c index e9fd8c132d26..d55c6fb8d087 100644 --- a/kernel/pid.c +++ b/kernel/pid.c | |||
@@ -122,6 +122,43 @@ static void free_pidmap(struct upid *upid) | |||
122 | atomic_inc(&map->nr_free); | 122 | atomic_inc(&map->nr_free); |
123 | } | 123 | } |
124 | 124 | ||
125 | /* | ||
126 | * If we started walking pids at 'base', is 'a' seen before 'b'? | ||
127 | */ | ||
128 | static int pid_before(int base, int a, int b) | ||
129 | { | ||
130 | /* | ||
131 | * This is the same as saying | ||
132 | * | ||
133 | * (a - base + MAXUINT) % MAXUINT < (b - base + MAXUINT) % MAXUINT | ||
134 | * and that mapping orders 'a' and 'b' with respect to 'base'. | ||
135 | */ | ||
136 | return (unsigned)(a - base) < (unsigned)(b - base); | ||
137 | } | ||
138 | |||
139 | /* | ||
140 | * We might be racing with someone else trying to set pid_ns->last_pid. | ||
141 | * We want the winner to have the "later" value, because if the | ||
142 | * "earlier" value prevails, then a pid may get reused immediately. | ||
143 | * | ||
144 | * Since pids rollover, it is not sufficient to just pick the bigger | ||
145 | * value. We have to consider where we started counting from. | ||
146 | * | ||
147 | * 'base' is the value of pid_ns->last_pid that we observed when | ||
148 | * we started looking for a pid. | ||
149 | * | ||
150 | * 'pid' is the pid that we eventually found. | ||
151 | */ | ||
152 | static void set_last_pid(struct pid_namespace *pid_ns, int base, int pid) | ||
153 | { | ||
154 | int prev; | ||
155 | int last_write = base; | ||
156 | do { | ||
157 | prev = last_write; | ||
158 | last_write = cmpxchg(&pid_ns->last_pid, prev, pid); | ||
159 | } while ((prev != last_write) && (pid_before(base, last_write, pid))); | ||
160 | } | ||
161 | |||
125 | static int alloc_pidmap(struct pid_namespace *pid_ns) | 162 | static int alloc_pidmap(struct pid_namespace *pid_ns) |
126 | { | 163 | { |
127 | int i, offset, max_scan, pid, last = pid_ns->last_pid; | 164 | int i, offset, max_scan, pid, last = pid_ns->last_pid; |
@@ -132,7 +169,12 @@ static int alloc_pidmap(struct pid_namespace *pid_ns) | |||
132 | pid = RESERVED_PIDS; | 169 | pid = RESERVED_PIDS; |
133 | offset = pid & BITS_PER_PAGE_MASK; | 170 | offset = pid & BITS_PER_PAGE_MASK; |
134 | map = &pid_ns->pidmap[pid/BITS_PER_PAGE]; | 171 | map = &pid_ns->pidmap[pid/BITS_PER_PAGE]; |
135 | max_scan = (pid_max + BITS_PER_PAGE - 1)/BITS_PER_PAGE - !offset; | 172 | /* |
173 | * If last_pid points into the middle of the map->page we | ||
174 | * want to scan this bitmap block twice, the second time | ||
175 | * we start with offset == 0 (or RESERVED_PIDS). | ||
176 | */ | ||
177 | max_scan = DIV_ROUND_UP(pid_max, BITS_PER_PAGE) - !offset; | ||
136 | for (i = 0; i <= max_scan; ++i) { | 178 | for (i = 0; i <= max_scan; ++i) { |
137 | if (unlikely(!map->page)) { | 179 | if (unlikely(!map->page)) { |
138 | void *page = kzalloc(PAGE_SIZE, GFP_KERNEL); | 180 | void *page = kzalloc(PAGE_SIZE, GFP_KERNEL); |
@@ -154,20 +196,12 @@ static int alloc_pidmap(struct pid_namespace *pid_ns) | |||
154 | do { | 196 | do { |
155 | if (!test_and_set_bit(offset, map->page)) { | 197 | if (!test_and_set_bit(offset, map->page)) { |
156 | atomic_dec(&map->nr_free); | 198 | atomic_dec(&map->nr_free); |
157 | pid_ns->last_pid = pid; | 199 | set_last_pid(pid_ns, last, pid); |
158 | return pid; | 200 | return pid; |
159 | } | 201 | } |
160 | offset = find_next_offset(map, offset); | 202 | offset = find_next_offset(map, offset); |
161 | pid = mk_pid(pid_ns, map, offset); | 203 | pid = mk_pid(pid_ns, map, offset); |
162 | /* | 204 | } while (offset < BITS_PER_PAGE && pid < pid_max); |
163 | * find_next_offset() found a bit, the pid from it | ||
164 | * is in-bounds, and if we fell back to the last | ||
165 | * bitmap block and the final block was the same | ||
166 | * as the starting point, pid is before last_pid. | ||
167 | */ | ||
168 | } while (offset < BITS_PER_PAGE && pid < pid_max && | ||
169 | (i != max_scan || pid < last || | ||
170 | !((last+1) & BITS_PER_PAGE_MASK))); | ||
171 | } | 205 | } |
172 | if (map < &pid_ns->pidmap[(pid_max-1)/BITS_PER_PAGE]) { | 206 | if (map < &pid_ns->pidmap[(pid_max-1)/BITS_PER_PAGE]) { |
173 | ++map; | 207 | ++map; |
diff --git a/kernel/power/block_io.c b/kernel/power/block_io.c index 97024fd40cd5..83bbc7c02df9 100644 --- a/kernel/power/block_io.c +++ b/kernel/power/block_io.c | |||
@@ -28,7 +28,7 @@ | |||
28 | static int submit(int rw, struct block_device *bdev, sector_t sector, | 28 | static int submit(int rw, struct block_device *bdev, sector_t sector, |
29 | struct page *page, struct bio **bio_chain) | 29 | struct page *page, struct bio **bio_chain) |
30 | { | 30 | { |
31 | const int bio_rw = rw | (1 << BIO_RW_SYNCIO) | (1 << BIO_RW_UNPLUG); | 31 | const int bio_rw = rw | REQ_SYNC | REQ_UNPLUG; |
32 | struct bio *bio; | 32 | struct bio *bio; |
33 | 33 | ||
34 | bio = bio_alloc(__GFP_WAIT | __GFP_HIGH, 1); | 34 | bio = bio_alloc(__GFP_WAIT | __GFP_HIGH, 1); |
diff --git a/kernel/ptrace.c b/kernel/ptrace.c index 74a3d693c196..f34d798ef4a2 100644 --- a/kernel/ptrace.c +++ b/kernel/ptrace.c | |||
@@ -324,26 +324,32 @@ int ptrace_detach(struct task_struct *child, unsigned int data) | |||
324 | } | 324 | } |
325 | 325 | ||
326 | /* | 326 | /* |
327 | * Detach all tasks we were using ptrace on. | 327 | * Detach all tasks we were using ptrace on. Called with tasklist held |
328 | * for writing, and returns with it held too. But note it can release | ||
329 | * and reacquire the lock. | ||
328 | */ | 330 | */ |
329 | void exit_ptrace(struct task_struct *tracer) | 331 | void exit_ptrace(struct task_struct *tracer) |
330 | { | 332 | { |
331 | struct task_struct *p, *n; | 333 | struct task_struct *p, *n; |
332 | LIST_HEAD(ptrace_dead); | 334 | LIST_HEAD(ptrace_dead); |
333 | 335 | ||
334 | write_lock_irq(&tasklist_lock); | 336 | if (likely(list_empty(&tracer->ptraced))) |
337 | return; | ||
338 | |||
335 | list_for_each_entry_safe(p, n, &tracer->ptraced, ptrace_entry) { | 339 | list_for_each_entry_safe(p, n, &tracer->ptraced, ptrace_entry) { |
336 | if (__ptrace_detach(tracer, p)) | 340 | if (__ptrace_detach(tracer, p)) |
337 | list_add(&p->ptrace_entry, &ptrace_dead); | 341 | list_add(&p->ptrace_entry, &ptrace_dead); |
338 | } | 342 | } |
339 | write_unlock_irq(&tasklist_lock); | ||
340 | 343 | ||
344 | write_unlock_irq(&tasklist_lock); | ||
341 | BUG_ON(!list_empty(&tracer->ptraced)); | 345 | BUG_ON(!list_empty(&tracer->ptraced)); |
342 | 346 | ||
343 | list_for_each_entry_safe(p, n, &ptrace_dead, ptrace_entry) { | 347 | list_for_each_entry_safe(p, n, &ptrace_dead, ptrace_entry) { |
344 | list_del_init(&p->ptrace_entry); | 348 | list_del_init(&p->ptrace_entry); |
345 | release_task(p); | 349 | release_task(p); |
346 | } | 350 | } |
351 | |||
352 | write_lock_irq(&tasklist_lock); | ||
347 | } | 353 | } |
348 | 354 | ||
349 | int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst, int len) | 355 | int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst, int len) |
diff --git a/kernel/timer.c b/kernel/timer.c index f1b8afe1ad86..97bf05baade7 100644 --- a/kernel/timer.c +++ b/kernel/timer.c | |||
@@ -326,6 +326,7 @@ EXPORT_SYMBOL_GPL(round_jiffies_up_relative); | |||
326 | 326 | ||
327 | /** | 327 | /** |
328 | * set_timer_slack - set the allowed slack for a timer | 328 | * set_timer_slack - set the allowed slack for a timer |
329 | * @timer: the timer to be modified | ||
329 | * @slack_hz: the amount of time (in jiffies) allowed for rounding | 330 | * @slack_hz: the amount of time (in jiffies) allowed for rounding |
330 | * | 331 | * |
331 | * Set the amount of time, in jiffies, that a certain timer has | 332 | * Set the amount of time, in jiffies, that a certain timer has |
diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c index 638711c17504..82499a5bdcb7 100644 --- a/kernel/trace/blktrace.c +++ b/kernel/trace/blktrace.c | |||
@@ -169,9 +169,12 @@ static int act_log_check(struct blk_trace *bt, u32 what, sector_t sector, | |||
169 | static const u32 ddir_act[2] = { BLK_TC_ACT(BLK_TC_READ), | 169 | static const u32 ddir_act[2] = { BLK_TC_ACT(BLK_TC_READ), |
170 | BLK_TC_ACT(BLK_TC_WRITE) }; | 170 | BLK_TC_ACT(BLK_TC_WRITE) }; |
171 | 171 | ||
172 | #define BLK_TC_HARDBARRIER BLK_TC_BARRIER | ||
173 | #define BLK_TC_RAHEAD BLK_TC_AHEAD | ||
174 | |||
172 | /* The ilog2() calls fall out because they're constant */ | 175 | /* The ilog2() calls fall out because they're constant */ |
173 | #define MASK_TC_BIT(rw, __name) ((rw & (1 << BIO_RW_ ## __name)) << \ | 176 | #define MASK_TC_BIT(rw, __name) ((rw & REQ_ ## __name) << \ |
174 | (ilog2(BLK_TC_ ## __name) + BLK_TC_SHIFT - BIO_RW_ ## __name)) | 177 | (ilog2(BLK_TC_ ## __name) + BLK_TC_SHIFT - __REQ_ ## __name)) |
175 | 178 | ||
176 | /* | 179 | /* |
177 | * The worker for the various blk_add_trace*() types. Fills out a | 180 | * The worker for the various blk_add_trace*() types. Fills out a |
@@ -194,9 +197,9 @@ static void __blk_add_trace(struct blk_trace *bt, sector_t sector, int bytes, | |||
194 | return; | 197 | return; |
195 | 198 | ||
196 | what |= ddir_act[rw & WRITE]; | 199 | what |= ddir_act[rw & WRITE]; |
197 | what |= MASK_TC_BIT(rw, BARRIER); | 200 | what |= MASK_TC_BIT(rw, HARDBARRIER); |
198 | what |= MASK_TC_BIT(rw, SYNCIO); | 201 | what |= MASK_TC_BIT(rw, SYNC); |
199 | what |= MASK_TC_BIT(rw, AHEAD); | 202 | what |= MASK_TC_BIT(rw, RAHEAD); |
200 | what |= MASK_TC_BIT(rw, META); | 203 | what |= MASK_TC_BIT(rw, META); |
201 | what |= MASK_TC_BIT(rw, DISCARD); | 204 | what |= MASK_TC_BIT(rw, DISCARD); |
202 | 205 | ||
@@ -549,6 +552,41 @@ int blk_trace_setup(struct request_queue *q, char *name, dev_t dev, | |||
549 | } | 552 | } |
550 | EXPORT_SYMBOL_GPL(blk_trace_setup); | 553 | EXPORT_SYMBOL_GPL(blk_trace_setup); |
551 | 554 | ||
555 | #if defined(CONFIG_COMPAT) && defined(CONFIG_X86_64) | ||
556 | static int compat_blk_trace_setup(struct request_queue *q, char *name, | ||
557 | dev_t dev, struct block_device *bdev, | ||
558 | char __user *arg) | ||
559 | { | ||
560 | struct blk_user_trace_setup buts; | ||
561 | struct compat_blk_user_trace_setup cbuts; | ||
562 | int ret; | ||
563 | |||
564 | if (copy_from_user(&cbuts, arg, sizeof(cbuts))) | ||
565 | return -EFAULT; | ||
566 | |||
567 | buts = (struct blk_user_trace_setup) { | ||
568 | .act_mask = cbuts.act_mask, | ||
569 | .buf_size = cbuts.buf_size, | ||
570 | .buf_nr = cbuts.buf_nr, | ||
571 | .start_lba = cbuts.start_lba, | ||
572 | .end_lba = cbuts.end_lba, | ||
573 | .pid = cbuts.pid, | ||
574 | }; | ||
575 | memcpy(&buts.name, &cbuts.name, 32); | ||
576 | |||
577 | ret = do_blk_trace_setup(q, name, dev, bdev, &buts); | ||
578 | if (ret) | ||
579 | return ret; | ||
580 | |||
581 | if (copy_to_user(arg, &buts.name, 32)) { | ||
582 | blk_trace_remove(q); | ||
583 | return -EFAULT; | ||
584 | } | ||
585 | |||
586 | return 0; | ||
587 | } | ||
588 | #endif | ||
589 | |||
552 | int blk_trace_startstop(struct request_queue *q, int start) | 590 | int blk_trace_startstop(struct request_queue *q, int start) |
553 | { | 591 | { |
554 | int ret; | 592 | int ret; |
@@ -601,6 +639,7 @@ int blk_trace_ioctl(struct block_device *bdev, unsigned cmd, char __user *arg) | |||
601 | if (!q) | 639 | if (!q) |
602 | return -ENXIO; | 640 | return -ENXIO; |
603 | 641 | ||
642 | lock_kernel(); | ||
604 | mutex_lock(&bdev->bd_mutex); | 643 | mutex_lock(&bdev->bd_mutex); |
605 | 644 | ||
606 | switch (cmd) { | 645 | switch (cmd) { |
@@ -608,6 +647,12 @@ int blk_trace_ioctl(struct block_device *bdev, unsigned cmd, char __user *arg) | |||
608 | bdevname(bdev, b); | 647 | bdevname(bdev, b); |
609 | ret = blk_trace_setup(q, b, bdev->bd_dev, bdev, arg); | 648 | ret = blk_trace_setup(q, b, bdev->bd_dev, bdev, arg); |
610 | break; | 649 | break; |
650 | #if defined(CONFIG_COMPAT) && defined(CONFIG_X86_64) | ||
651 | case BLKTRACESETUP32: | ||
652 | bdevname(bdev, b); | ||
653 | ret = compat_blk_trace_setup(q, b, bdev->bd_dev, bdev, arg); | ||
654 | break; | ||
655 | #endif | ||
611 | case BLKTRACESTART: | 656 | case BLKTRACESTART: |
612 | start = 1; | 657 | start = 1; |
613 | case BLKTRACESTOP: | 658 | case BLKTRACESTOP: |
@@ -622,6 +667,7 @@ int blk_trace_ioctl(struct block_device *bdev, unsigned cmd, char __user *arg) | |||
622 | } | 667 | } |
623 | 668 | ||
624 | mutex_unlock(&bdev->bd_mutex); | 669 | mutex_unlock(&bdev->bd_mutex); |
670 | unlock_kernel(); | ||
625 | return ret; | 671 | return ret; |
626 | } | 672 | } |
627 | 673 | ||
@@ -661,10 +707,10 @@ static void blk_add_trace_rq(struct request_queue *q, struct request *rq, | |||
661 | if (likely(!bt)) | 707 | if (likely(!bt)) |
662 | return; | 708 | return; |
663 | 709 | ||
664 | if (blk_discard_rq(rq)) | 710 | if (rq->cmd_flags & REQ_DISCARD) |
665 | rw |= (1 << BIO_RW_DISCARD); | 711 | rw |= REQ_DISCARD; |
666 | 712 | ||
667 | if (blk_pc_request(rq)) { | 713 | if (rq->cmd_type == REQ_TYPE_BLOCK_PC) { |
668 | what |= BLK_TC_ACT(BLK_TC_PC); | 714 | what |= BLK_TC_ACT(BLK_TC_PC); |
669 | __blk_add_trace(bt, 0, blk_rq_bytes(rq), rw, | 715 | __blk_add_trace(bt, 0, blk_rq_bytes(rq), rw, |
670 | what, rq->errors, rq->cmd_len, rq->cmd); | 716 | what, rq->errors, rq->cmd_len, rq->cmd); |
@@ -925,7 +971,7 @@ void blk_add_driver_data(struct request_queue *q, | |||
925 | if (likely(!bt)) | 971 | if (likely(!bt)) |
926 | return; | 972 | return; |
927 | 973 | ||
928 | if (blk_pc_request(rq)) | 974 | if (rq->cmd_type == REQ_TYPE_BLOCK_PC) |
929 | __blk_add_trace(bt, 0, blk_rq_bytes(rq), 0, | 975 | __blk_add_trace(bt, 0, blk_rq_bytes(rq), 0, |
930 | BLK_TA_DRV_DATA, rq->errors, len, data); | 976 | BLK_TA_DRV_DATA, rq->errors, len, data); |
931 | else | 977 | else |
@@ -1730,7 +1776,7 @@ void blk_dump_cmd(char *buf, struct request *rq) | |||
1730 | int len = rq->cmd_len; | 1776 | int len = rq->cmd_len; |
1731 | unsigned char *cmd = rq->cmd; | 1777 | unsigned char *cmd = rq->cmd; |
1732 | 1778 | ||
1733 | if (!blk_pc_request(rq)) { | 1779 | if (rq->cmd_type != REQ_TYPE_BLOCK_PC) { |
1734 | buf[0] = '\0'; | 1780 | buf[0] = '\0'; |
1735 | return; | 1781 | return; |
1736 | } | 1782 | } |
@@ -1755,20 +1801,20 @@ void blk_fill_rwbs(char *rwbs, u32 rw, int bytes) | |||
1755 | 1801 | ||
1756 | if (rw & WRITE) | 1802 | if (rw & WRITE) |
1757 | rwbs[i++] = 'W'; | 1803 | rwbs[i++] = 'W'; |
1758 | else if (rw & 1 << BIO_RW_DISCARD) | 1804 | else if (rw & REQ_DISCARD) |
1759 | rwbs[i++] = 'D'; | 1805 | rwbs[i++] = 'D'; |
1760 | else if (bytes) | 1806 | else if (bytes) |
1761 | rwbs[i++] = 'R'; | 1807 | rwbs[i++] = 'R'; |
1762 | else | 1808 | else |
1763 | rwbs[i++] = 'N'; | 1809 | rwbs[i++] = 'N'; |
1764 | 1810 | ||
1765 | if (rw & 1 << BIO_RW_AHEAD) | 1811 | if (rw & REQ_RAHEAD) |
1766 | rwbs[i++] = 'A'; | 1812 | rwbs[i++] = 'A'; |
1767 | if (rw & 1 << BIO_RW_BARRIER) | 1813 | if (rw & REQ_HARDBARRIER) |
1768 | rwbs[i++] = 'B'; | 1814 | rwbs[i++] = 'B'; |
1769 | if (rw & 1 << BIO_RW_SYNCIO) | 1815 | if (rw & REQ_SYNC) |
1770 | rwbs[i++] = 'S'; | 1816 | rwbs[i++] = 'S'; |
1771 | if (rw & 1 << BIO_RW_META) | 1817 | if (rw & REQ_META) |
1772 | rwbs[i++] = 'M'; | 1818 | rwbs[i++] = 'M'; |
1773 | 1819 | ||
1774 | rwbs[i] = '\0'; | 1820 | rwbs[i] = '\0'; |
@@ -1779,8 +1825,8 @@ void blk_fill_rwbs_rq(char *rwbs, struct request *rq) | |||
1779 | int rw = rq->cmd_flags & 0x03; | 1825 | int rw = rq->cmd_flags & 0x03; |
1780 | int bytes; | 1826 | int bytes; |
1781 | 1827 | ||
1782 | if (blk_discard_rq(rq)) | 1828 | if (rq->cmd_flags & REQ_DISCARD) |
1783 | rw |= (1 << BIO_RW_DISCARD); | 1829 | rw |= REQ_DISCARD; |
1784 | 1830 | ||
1785 | bytes = blk_rq_bytes(rq); | 1831 | bytes = blk_rq_bytes(rq); |
1786 | 1832 | ||