aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/auditsc.c9
-rw-r--r--kernel/cgroup.c4
-rw-r--r--kernel/cred.c4
-rw-r--r--kernel/exit.c7
-rw-r--r--kernel/kexec.c8
-rw-r--r--kernel/kfifo.c750
-rw-r--r--kernel/panic.c60
-rw-r--r--kernel/pid.c56
-rw-r--r--kernel/ptrace.c12
-rw-r--r--kernel/time/timekeeping.c11
-rw-r--r--kernel/trace/blktrace.c8
11 files changed, 563 insertions, 366 deletions
diff --git a/kernel/auditsc.c b/kernel/auditsc.c
index b87a63beb66c..1b31c130d034 100644
--- a/kernel/auditsc.c
+++ b/kernel/auditsc.c
@@ -1835,13 +1835,8 @@ void __audit_getname(const char *name)
1835 context->names[context->name_count].ino = (unsigned long)-1; 1835 context->names[context->name_count].ino = (unsigned long)-1;
1836 context->names[context->name_count].osid = 0; 1836 context->names[context->name_count].osid = 0;
1837 ++context->name_count; 1837 ++context->name_count;
1838 if (!context->pwd.dentry) { 1838 if (!context->pwd.dentry)
1839 read_lock(&current->fs->lock); 1839 get_fs_pwd(current->fs, &context->pwd);
1840 context->pwd = current->fs->pwd;
1841 path_get(&current->fs->pwd);
1842 read_unlock(&current->fs->lock);
1843 }
1844
1845} 1840}
1846 1841
1847/* audit_putname - intercept a putname request 1842/* audit_putname - intercept a putname request
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index d83cab06da87..192f88c5b0f9 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -1102,7 +1102,7 @@ static int parse_cgroupfs_options(char *data, struct cgroup_sb_opts *opts)
1102 if (opts->release_agent) 1102 if (opts->release_agent)
1103 return -EINVAL; 1103 return -EINVAL;
1104 opts->release_agent = 1104 opts->release_agent =
1105 kstrndup(token + 14, PATH_MAX, GFP_KERNEL); 1105 kstrndup(token + 14, PATH_MAX - 1, GFP_KERNEL);
1106 if (!opts->release_agent) 1106 if (!opts->release_agent)
1107 return -ENOMEM; 1107 return -ENOMEM;
1108 } else if (!strncmp(token, "name=", 5)) { 1108 } else if (!strncmp(token, "name=", 5)) {
@@ -1123,7 +1123,7 @@ static int parse_cgroupfs_options(char *data, struct cgroup_sb_opts *opts)
1123 if (opts->name) 1123 if (opts->name)
1124 return -EINVAL; 1124 return -EINVAL;
1125 opts->name = kstrndup(name, 1125 opts->name = kstrndup(name,
1126 MAX_CGROUP_ROOT_NAMELEN, 1126 MAX_CGROUP_ROOT_NAMELEN - 1,
1127 GFP_KERNEL); 1127 GFP_KERNEL);
1128 if (!opts->name) 1128 if (!opts->name)
1129 return -ENOMEM; 1129 return -ENOMEM;
diff --git a/kernel/cred.c b/kernel/cred.c
index 60bc8b1e32e6..9a3e22641fe7 100644
--- a/kernel/cred.c
+++ b/kernel/cred.c
@@ -22,10 +22,6 @@
22#define kdebug(FMT, ...) \ 22#define kdebug(FMT, ...) \
23 printk("[%-5.5s%5u] "FMT"\n", current->comm, current->pid ,##__VA_ARGS__) 23 printk("[%-5.5s%5u] "FMT"\n", current->comm, current->pid ,##__VA_ARGS__)
24#else 24#else
25static inline __attribute__((format(printf, 1, 2)))
26void no_printk(const char *fmt, ...)
27{
28}
29#define kdebug(FMT, ...) \ 25#define kdebug(FMT, ...) \
30 no_printk("[%-5.5s%5u] "FMT"\n", current->comm, current->pid ,##__VA_ARGS__) 26 no_printk("[%-5.5s%5u] "FMT"\n", current->comm, current->pid ,##__VA_ARGS__)
31#endif 27#endif
diff --git a/kernel/exit.c b/kernel/exit.c
index ceffc67b564a..671ed56e0a49 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -771,9 +771,12 @@ static void forget_original_parent(struct task_struct *father)
771 struct task_struct *p, *n, *reaper; 771 struct task_struct *p, *n, *reaper;
772 LIST_HEAD(dead_children); 772 LIST_HEAD(dead_children);
773 773
774 exit_ptrace(father);
775
776 write_lock_irq(&tasklist_lock); 774 write_lock_irq(&tasklist_lock);
775 /*
776 * Note that exit_ptrace() and find_new_reaper() might
777 * drop tasklist_lock and reacquire it.
778 */
779 exit_ptrace(father);
777 reaper = find_new_reaper(father); 780 reaper = find_new_reaper(father);
778 781
779 list_for_each_entry_safe(p, n, &father->children, sibling) { 782 list_for_each_entry_safe(p, n, &father->children, sibling) {
diff --git a/kernel/kexec.c b/kernel/kexec.c
index 131b1703936f..c0613f7d6730 100644
--- a/kernel/kexec.c
+++ b/kernel/kexec.c
@@ -151,8 +151,10 @@ static int do_kimage_alloc(struct kimage **rimage, unsigned long entry,
151 image->nr_segments = nr_segments; 151 image->nr_segments = nr_segments;
152 segment_bytes = nr_segments * sizeof(*segments); 152 segment_bytes = nr_segments * sizeof(*segments);
153 result = copy_from_user(image->segment, segments, segment_bytes); 153 result = copy_from_user(image->segment, segments, segment_bytes);
154 if (result) 154 if (result) {
155 result = -EFAULT;
155 goto out; 156 goto out;
157 }
156 158
157 /* 159 /*
158 * Verify we have good destination addresses. The caller is 160 * Verify we have good destination addresses. The caller is
@@ -827,7 +829,7 @@ static int kimage_load_normal_segment(struct kimage *image,
827 result = copy_from_user(ptr, buf, uchunk); 829 result = copy_from_user(ptr, buf, uchunk);
828 kunmap(page); 830 kunmap(page);
829 if (result) { 831 if (result) {
830 result = (result < 0) ? result : -EIO; 832 result = -EFAULT;
831 goto out; 833 goto out;
832 } 834 }
833 ubytes -= uchunk; 835 ubytes -= uchunk;
@@ -882,7 +884,7 @@ static int kimage_load_crash_segment(struct kimage *image,
882 kexec_flush_icache_page(page); 884 kexec_flush_icache_page(page);
883 kunmap(page); 885 kunmap(page);
884 if (result) { 886 if (result) {
885 result = (result < 0) ? result : -EIO; 887 result = -EFAULT;
886 goto out; 888 goto out;
887 } 889 }
888 ubytes -= uchunk; 890 ubytes -= uchunk;
diff --git a/kernel/kfifo.c b/kernel/kfifo.c
index 35edbe22e9a9..4502604ecadf 100644
--- a/kernel/kfifo.c
+++ b/kernel/kfifo.c
@@ -1,8 +1,7 @@
1/* 1/*
2 * A generic kernel FIFO implementation. 2 * A generic kernel FIFO implementation
3 * 3 *
4 * Copyright (C) 2009 Stefani Seibold <stefani@seibold.net> 4 * Copyright (C) 2009/2010 Stefani Seibold <stefani@seibold.net>
5 * Copyright (C) 2004 Stelian Pop <stelian@popies.net>
6 * 5 *
7 * This program is free software; you can redistribute it and/or modify 6 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by 7 * it under the terms of the GNU General Public License as published by
@@ -11,7 +10,7 @@
11 * 10 *
12 * This program is distributed in the hope that it will be useful, 11 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details. 14 * GNU General Public License for more details.
16 * 15 *
17 * You should have received a copy of the GNU General Public License 16 * You should have received a copy of the GNU General Public License
@@ -24,422 +23,579 @@
24#include <linux/module.h> 23#include <linux/module.h>
25#include <linux/slab.h> 24#include <linux/slab.h>
26#include <linux/err.h> 25#include <linux/err.h>
27#include <linux/kfifo.h>
28#include <linux/log2.h> 26#include <linux/log2.h>
29#include <linux/uaccess.h> 27#include <linux/uaccess.h>
28#include <linux/kfifo.h>
30 29
31static void _kfifo_init(struct kfifo *fifo, void *buffer, 30/*
32 unsigned int size) 31 * internal helper to calculate the unused elements in a fifo
33{
34 fifo->buffer = buffer;
35 fifo->size = size;
36
37 kfifo_reset(fifo);
38}
39
40/**
41 * kfifo_init - initialize a FIFO using a preallocated buffer
42 * @fifo: the fifo to assign the buffer
43 * @buffer: the preallocated buffer to be used.
44 * @size: the size of the internal buffer, this has to be a power of 2.
45 *
46 */ 32 */
47void kfifo_init(struct kfifo *fifo, void *buffer, unsigned int size) 33static inline unsigned int kfifo_unused(struct __kfifo *fifo)
48{ 34{
49 /* size must be a power of 2 */ 35 return (fifo->mask + 1) - (fifo->in - fifo->out);
50 BUG_ON(!is_power_of_2(size));
51
52 _kfifo_init(fifo, buffer, size);
53} 36}
54EXPORT_SYMBOL(kfifo_init);
55 37
56/** 38int __kfifo_alloc(struct __kfifo *fifo, unsigned int size,
57 * kfifo_alloc - allocates a new FIFO internal buffer 39 size_t esize, gfp_t gfp_mask)
58 * @fifo: the fifo to assign then new buffer
59 * @size: the size of the buffer to be allocated, this have to be a power of 2.
60 * @gfp_mask: get_free_pages mask, passed to kmalloc()
61 *
62 * This function dynamically allocates a new fifo internal buffer
63 *
64 * The size will be rounded-up to a power of 2.
65 * The buffer will be release with kfifo_free().
66 * Return 0 if no error, otherwise the an error code
67 */
68int kfifo_alloc(struct kfifo *fifo, unsigned int size, gfp_t gfp_mask)
69{ 40{
70 unsigned char *buffer;
71
72 /* 41 /*
73 * round up to the next power of 2, since our 'let the indices 42 * round down to the next power of 2, since our 'let the indices
74 * wrap' technique works only in this case. 43 * wrap' technique works only in this case.
75 */ 44 */
76 if (!is_power_of_2(size)) { 45 if (!is_power_of_2(size))
77 BUG_ON(size > 0x80000000); 46 size = rounddown_pow_of_two(size);
78 size = roundup_pow_of_two(size); 47
48 fifo->in = 0;
49 fifo->out = 0;
50 fifo->esize = esize;
51
52 if (size < 2) {
53 fifo->data = NULL;
54 fifo->mask = 0;
55 return -EINVAL;
79 } 56 }
80 57
81 buffer = kmalloc(size, gfp_mask); 58 fifo->data = kmalloc(size * esize, gfp_mask);
82 if (!buffer) { 59
83 _kfifo_init(fifo, NULL, 0); 60 if (!fifo->data) {
61 fifo->mask = 0;
84 return -ENOMEM; 62 return -ENOMEM;
85 } 63 }
86 64 fifo->mask = size - 1;
87 _kfifo_init(fifo, buffer, size);
88 65
89 return 0; 66 return 0;
90} 67}
91EXPORT_SYMBOL(kfifo_alloc); 68EXPORT_SYMBOL(__kfifo_alloc);
92 69
93/** 70void __kfifo_free(struct __kfifo *fifo)
94 * kfifo_free - frees the FIFO internal buffer
95 * @fifo: the fifo to be freed.
96 */
97void kfifo_free(struct kfifo *fifo)
98{ 71{
99 kfree(fifo->buffer); 72 kfree(fifo->data);
100 _kfifo_init(fifo, NULL, 0); 73 fifo->in = 0;
74 fifo->out = 0;
75 fifo->esize = 0;
76 fifo->data = NULL;
77 fifo->mask = 0;
101} 78}
102EXPORT_SYMBOL(kfifo_free); 79EXPORT_SYMBOL(__kfifo_free);
103 80
104/** 81int __kfifo_init(struct __kfifo *fifo, void *buffer,
105 * kfifo_skip - skip output data 82 unsigned int size, size_t esize)
106 * @fifo: the fifo to be used.
107 * @len: number of bytes to skip
108 */
109void kfifo_skip(struct kfifo *fifo, unsigned int len)
110{ 83{
111 if (len < kfifo_len(fifo)) { 84 size /= esize;
112 __kfifo_add_out(fifo, len); 85
113 return; 86 if (!is_power_of_2(size))
87 size = rounddown_pow_of_two(size);
88
89 fifo->in = 0;
90 fifo->out = 0;
91 fifo->esize = esize;
92 fifo->data = buffer;
93
94 if (size < 2) {
95 fifo->mask = 0;
96 return -EINVAL;
114 } 97 }
115 kfifo_reset_out(fifo); 98 fifo->mask = size - 1;
99
100 return 0;
116} 101}
117EXPORT_SYMBOL(kfifo_skip); 102EXPORT_SYMBOL(__kfifo_init);
118 103
119static inline void __kfifo_in_data(struct kfifo *fifo, 104static void kfifo_copy_in(struct __kfifo *fifo, const void *src,
120 const void *from, unsigned int len, unsigned int off) 105 unsigned int len, unsigned int off)
121{ 106{
107 unsigned int size = fifo->mask + 1;
108 unsigned int esize = fifo->esize;
122 unsigned int l; 109 unsigned int l;
123 110
111 off &= fifo->mask;
112 if (esize != 1) {
113 off *= esize;
114 size *= esize;
115 len *= esize;
116 }
117 l = min(len, size - off);
118
119 memcpy(fifo->data + off, src, l);
120 memcpy(fifo->data, src + l, len - l);
124 /* 121 /*
125 * Ensure that we sample the fifo->out index -before- we 122 * make sure that the data in the fifo is up to date before
126 * start putting bytes into the kfifo. 123 * incrementing the fifo->in index counter
127 */ 124 */
125 smp_wmb();
126}
128 127
129 smp_mb(); 128unsigned int __kfifo_in(struct __kfifo *fifo,
130 129 const void *buf, unsigned int len)
131 off = __kfifo_off(fifo, fifo->in + off); 130{
131 unsigned int l;
132 132
133 /* first put the data starting from fifo->in to buffer end */ 133 l = kfifo_unused(fifo);
134 l = min(len, fifo->size - off); 134 if (len > l)
135 memcpy(fifo->buffer + off, from, l); 135 len = l;
136 136
137 /* then put the rest (if any) at the beginning of the buffer */ 137 kfifo_copy_in(fifo, buf, len, fifo->in);
138 memcpy(fifo->buffer, from + l, len - l); 138 fifo->in += len;
139 return len;
139} 140}
141EXPORT_SYMBOL(__kfifo_in);
140 142
141static inline void __kfifo_out_data(struct kfifo *fifo, 143static void kfifo_copy_out(struct __kfifo *fifo, void *dst,
142 void *to, unsigned int len, unsigned int off) 144 unsigned int len, unsigned int off)
143{ 145{
146 unsigned int size = fifo->mask + 1;
147 unsigned int esize = fifo->esize;
144 unsigned int l; 148 unsigned int l;
145 149
150 off &= fifo->mask;
151 if (esize != 1) {
152 off *= esize;
153 size *= esize;
154 len *= esize;
155 }
156 l = min(len, size - off);
157
158 memcpy(dst, fifo->data + off, l);
159 memcpy(dst + l, fifo->data, len - l);
146 /* 160 /*
147 * Ensure that we sample the fifo->in index -before- we 161 * make sure that the data is copied before
148 * start removing bytes from the kfifo. 162 * incrementing the fifo->out index counter
149 */ 163 */
164 smp_wmb();
165}
150 166
151 smp_rmb(); 167unsigned int __kfifo_out_peek(struct __kfifo *fifo,
168 void *buf, unsigned int len)
169{
170 unsigned int l;
152 171
153 off = __kfifo_off(fifo, fifo->out + off); 172 l = fifo->in - fifo->out;
173 if (len > l)
174 len = l;
154 175
155 /* first get the data from fifo->out until the end of the buffer */ 176 kfifo_copy_out(fifo, buf, len, fifo->out);
156 l = min(len, fifo->size - off); 177 return len;
157 memcpy(to, fifo->buffer + off, l); 178}
179EXPORT_SYMBOL(__kfifo_out_peek);
158 180
159 /* then get the rest (if any) from the beginning of the buffer */ 181unsigned int __kfifo_out(struct __kfifo *fifo,
160 memcpy(to + l, fifo->buffer, len - l); 182 void *buf, unsigned int len)
183{
184 len = __kfifo_out_peek(fifo, buf, len);
185 fifo->out += len;
186 return len;
161} 187}
188EXPORT_SYMBOL(__kfifo_out);
162 189
163static inline int __kfifo_from_user_data(struct kfifo *fifo, 190static unsigned long kfifo_copy_from_user(struct __kfifo *fifo,
164 const void __user *from, unsigned int len, unsigned int off, 191 const void __user *from, unsigned int len, unsigned int off,
165 unsigned *lenout) 192 unsigned int *copied)
166{ 193{
194 unsigned int size = fifo->mask + 1;
195 unsigned int esize = fifo->esize;
167 unsigned int l; 196 unsigned int l;
168 int ret; 197 unsigned long ret;
169 198
199 off &= fifo->mask;
200 if (esize != 1) {
201 off *= esize;
202 size *= esize;
203 len *= esize;
204 }
205 l = min(len, size - off);
206
207 ret = copy_from_user(fifo->data + off, from, l);
208 if (unlikely(ret))
209 ret = DIV_ROUND_UP(ret + len - l, esize);
210 else {
211 ret = copy_from_user(fifo->data, from + l, len - l);
212 if (unlikely(ret))
213 ret = DIV_ROUND_UP(ret, esize);
214 }
170 /* 215 /*
171 * Ensure that we sample the fifo->out index -before- we 216 * make sure that the data in the fifo is up to date before
172 * start putting bytes into the kfifo. 217 * incrementing the fifo->in index counter
173 */ 218 */
219 smp_wmb();
220 *copied = len - ret;
221 /* return the number of elements which are not copied */
222 return ret;
223}
174 224
175 smp_mb(); 225int __kfifo_from_user(struct __kfifo *fifo, const void __user *from,
226 unsigned long len, unsigned int *copied)
227{
228 unsigned int l;
229 unsigned long ret;
230 unsigned int esize = fifo->esize;
231 int err;
176 232
177 off = __kfifo_off(fifo, fifo->in + off); 233 if (esize != 1)
234 len /= esize;
178 235
179 /* first put the data starting from fifo->in to buffer end */ 236 l = kfifo_unused(fifo);
180 l = min(len, fifo->size - off); 237 if (len > l)
181 ret = copy_from_user(fifo->buffer + off, from, l); 238 len = l;
182 if (unlikely(ret)) {
183 *lenout = ret;
184 return -EFAULT;
185 }
186 *lenout = l;
187 239
188 /* then put the rest (if any) at the beginning of the buffer */ 240 ret = kfifo_copy_from_user(fifo, from, len, fifo->in, copied);
189 ret = copy_from_user(fifo->buffer, from + l, len - l); 241 if (unlikely(ret)) {
190 *lenout += ret ? ret : len - l; 242 len -= ret;
191 return ret ? -EFAULT : 0; 243 err = -EFAULT;
244 } else
245 err = 0;
246 fifo->in += len;
247 return err;
192} 248}
249EXPORT_SYMBOL(__kfifo_from_user);
193 250
194static inline int __kfifo_to_user_data(struct kfifo *fifo, 251static unsigned long kfifo_copy_to_user(struct __kfifo *fifo, void __user *to,
195 void __user *to, unsigned int len, unsigned int off, unsigned *lenout) 252 unsigned int len, unsigned int off, unsigned int *copied)
196{ 253{
197 unsigned int l; 254 unsigned int l;
198 int ret; 255 unsigned long ret;
199 256 unsigned int size = fifo->mask + 1;
257 unsigned int esize = fifo->esize;
258
259 off &= fifo->mask;
260 if (esize != 1) {
261 off *= esize;
262 size *= esize;
263 len *= esize;
264 }
265 l = min(len, size - off);
266
267 ret = copy_to_user(to, fifo->data + off, l);
268 if (unlikely(ret))
269 ret = DIV_ROUND_UP(ret + len - l, esize);
270 else {
271 ret = copy_to_user(to + l, fifo->data, len - l);
272 if (unlikely(ret))
273 ret = DIV_ROUND_UP(ret, esize);
274 }
200 /* 275 /*
201 * Ensure that we sample the fifo->in index -before- we 276 * make sure that the data is copied before
202 * start removing bytes from the kfifo. 277 * incrementing the fifo->out index counter
203 */ 278 */
279 smp_wmb();
280 *copied = len - ret;
281 /* return the number of elements which are not copied */
282 return ret;
283}
204 284
205 smp_rmb(); 285int __kfifo_to_user(struct __kfifo *fifo, void __user *to,
286 unsigned long len, unsigned int *copied)
287{
288 unsigned int l;
289 unsigned long ret;
290 unsigned int esize = fifo->esize;
291 int err;
206 292
207 off = __kfifo_off(fifo, fifo->out + off); 293 if (esize != 1)
294 len /= esize;
208 295
209 /* first get the data from fifo->out until the end of the buffer */ 296 l = fifo->in - fifo->out;
210 l = min(len, fifo->size - off); 297 if (len > l)
211 ret = copy_to_user(to, fifo->buffer + off, l); 298 len = l;
212 *lenout = l; 299 ret = kfifo_copy_to_user(fifo, to, len, fifo->out, copied);
213 if (unlikely(ret)) { 300 if (unlikely(ret)) {
214 *lenout -= ret; 301 len -= ret;
215 return -EFAULT; 302 err = -EFAULT;
216 } 303 } else
304 err = 0;
305 fifo->out += len;
306 return err;
307}
308EXPORT_SYMBOL(__kfifo_to_user);
217 309
218 /* then get the rest (if any) from the beginning of the buffer */ 310static int setup_sgl_buf(struct scatterlist *sgl, void *buf,
219 len -= l; 311 int nents, unsigned int len)
220 ret = copy_to_user(to + l, fifo->buffer, len); 312{
221 if (unlikely(ret)) { 313 int n;
222 *lenout += len - ret; 314 unsigned int l;
223 return -EFAULT; 315 unsigned int off;
316 struct page *page;
317
318 if (!nents)
319 return 0;
320
321 if (!len)
322 return 0;
323
324 n = 0;
325 page = virt_to_page(buf);
326 off = offset_in_page(buf);
327 l = 0;
328
329 while (len >= l + PAGE_SIZE - off) {
330 struct page *npage;
331
332 l += PAGE_SIZE;
333 buf += PAGE_SIZE;
334 npage = virt_to_page(buf);
335 if (page_to_phys(page) != page_to_phys(npage) - l) {
336 sg_set_page(sgl, page, l - off, off);
337 sgl = sg_next(sgl);
338 if (++n == nents || sgl == NULL)
339 return n;
340 page = npage;
341 len -= l - off;
342 l = off = 0;
343 }
224 } 344 }
225 *lenout += len; 345 sg_set_page(sgl, page, len, off);
226 return 0; 346 return n + 1;
227} 347}
228 348
229unsigned int __kfifo_in_n(struct kfifo *fifo, 349static unsigned int setup_sgl(struct __kfifo *fifo, struct scatterlist *sgl,
230 const void *from, unsigned int len, unsigned int recsize) 350 int nents, unsigned int len, unsigned int off)
231{ 351{
232 if (kfifo_avail(fifo) < len + recsize) 352 unsigned int size = fifo->mask + 1;
233 return len + 1; 353 unsigned int esize = fifo->esize;
354 unsigned int l;
355 unsigned int n;
234 356
235 __kfifo_in_data(fifo, from, len, recsize); 357 off &= fifo->mask;
236 return 0; 358 if (esize != 1) {
359 off *= esize;
360 size *= esize;
361 len *= esize;
362 }
363 l = min(len, size - off);
364
365 n = setup_sgl_buf(sgl, fifo->data + off, nents, l);
366 n += setup_sgl_buf(sgl + n, fifo->data, nents - n, len - l);
367
368 if (n)
369 sg_mark_end(sgl + n - 1);
370 return n;
237} 371}
238EXPORT_SYMBOL(__kfifo_in_n);
239 372
240/** 373unsigned int __kfifo_dma_in_prepare(struct __kfifo *fifo,
241 * kfifo_in - puts some data into the FIFO 374 struct scatterlist *sgl, int nents, unsigned int len)
242 * @fifo: the fifo to be used.
243 * @from: the data to be added.
244 * @len: the length of the data to be added.
245 *
246 * This function copies at most @len bytes from the @from buffer into
247 * the FIFO depending on the free space, and returns the number of
248 * bytes copied.
249 *
250 * Note that with only one concurrent reader and one concurrent
251 * writer, you don't need extra locking to use these functions.
252 */
253unsigned int kfifo_in(struct kfifo *fifo, const void *from,
254 unsigned int len)
255{ 375{
256 len = min(kfifo_avail(fifo), len); 376 unsigned int l;
257 377
258 __kfifo_in_data(fifo, from, len, 0); 378 l = kfifo_unused(fifo);
259 __kfifo_add_in(fifo, len); 379 if (len > l)
260 return len; 380 len = l;
381
382 return setup_sgl(fifo, sgl, nents, len, fifo->in);
261} 383}
262EXPORT_SYMBOL(kfifo_in); 384EXPORT_SYMBOL(__kfifo_dma_in_prepare);
263 385
264unsigned int __kfifo_in_generic(struct kfifo *fifo, 386unsigned int __kfifo_dma_out_prepare(struct __kfifo *fifo,
265 const void *from, unsigned int len, unsigned int recsize) 387 struct scatterlist *sgl, int nents, unsigned int len)
266{ 388{
267 return __kfifo_in_rec(fifo, from, len, recsize); 389 unsigned int l;
390
391 l = fifo->in - fifo->out;
392 if (len > l)
393 len = l;
394
395 return setup_sgl(fifo, sgl, nents, len, fifo->out);
268} 396}
269EXPORT_SYMBOL(__kfifo_in_generic); 397EXPORT_SYMBOL(__kfifo_dma_out_prepare);
270 398
271unsigned int __kfifo_out_n(struct kfifo *fifo, 399unsigned int __kfifo_max_r(unsigned int len, size_t recsize)
272 void *to, unsigned int len, unsigned int recsize)
273{ 400{
274 if (kfifo_len(fifo) < len + recsize) 401 unsigned int max = (1 << (recsize << 3)) - 1;
275 return len;
276 402
277 __kfifo_out_data(fifo, to, len, recsize); 403 if (len > max)
278 __kfifo_add_out(fifo, len + recsize); 404 return max;
279 return 0; 405 return len;
280} 406}
281EXPORT_SYMBOL(__kfifo_out_n);
282 407
283/** 408#define __KFIFO_PEEK(data, out, mask) \
284 * kfifo_out - gets some data from the FIFO 409 ((data)[(out) & (mask)])
285 * @fifo: the fifo to be used. 410/*
286 * @to: where the data must be copied. 411 * __kfifo_peek_n internal helper function for determinate the length of
287 * @len: the size of the destination buffer. 412 * the next record in the fifo
288 *
289 * This function copies at most @len bytes from the FIFO into the
290 * @to buffer and returns the number of copied bytes.
291 *
292 * Note that with only one concurrent reader and one concurrent
293 * writer, you don't need extra locking to use these functions.
294 */ 413 */
295unsigned int kfifo_out(struct kfifo *fifo, void *to, unsigned int len) 414static unsigned int __kfifo_peek_n(struct __kfifo *fifo, size_t recsize)
296{ 415{
297 len = min(kfifo_len(fifo), len); 416 unsigned int l;
417 unsigned int mask = fifo->mask;
418 unsigned char *data = fifo->data;
298 419
299 __kfifo_out_data(fifo, to, len, 0); 420 l = __KFIFO_PEEK(data, fifo->out, mask);
300 __kfifo_add_out(fifo, len);
301 421
302 return len; 422 if (--recsize)
423 l |= __KFIFO_PEEK(data, fifo->out + 1, mask) << 8;
424
425 return l;
303} 426}
304EXPORT_SYMBOL(kfifo_out); 427
305 428#define __KFIFO_POKE(data, in, mask, val) \
306/** 429 ( \
307 * kfifo_out_peek - copy some data from the FIFO, but do not remove it 430 (data)[(in) & (mask)] = (unsigned char)(val) \
308 * @fifo: the fifo to be used. 431 )
309 * @to: where the data must be copied. 432
310 * @len: the size of the destination buffer. 433/*
311 * @offset: offset into the fifo 434 * __kfifo_poke_n internal helper function for storeing the length of
312 * 435 * the record into the fifo
313 * This function copies at most @len bytes at @offset from the FIFO
314 * into the @to buffer and returns the number of copied bytes.
315 * The data is not removed from the FIFO.
316 */ 436 */
317unsigned int kfifo_out_peek(struct kfifo *fifo, void *to, unsigned int len, 437static void __kfifo_poke_n(struct __kfifo *fifo, unsigned int n, size_t recsize)
318 unsigned offset)
319{ 438{
320 len = min(kfifo_len(fifo), len + offset); 439 unsigned int mask = fifo->mask;
440 unsigned char *data = fifo->data;
321 441
322 __kfifo_out_data(fifo, to, len, offset); 442 __KFIFO_POKE(data, fifo->in, mask, n);
323 return len; 443
444 if (recsize > 1)
445 __KFIFO_POKE(data, fifo->in + 1, mask, n >> 8);
324} 446}
325EXPORT_SYMBOL(kfifo_out_peek);
326 447
327unsigned int __kfifo_out_generic(struct kfifo *fifo, 448unsigned int __kfifo_len_r(struct __kfifo *fifo, size_t recsize)
328 void *to, unsigned int len, unsigned int recsize,
329 unsigned int *total)
330{ 449{
331 return __kfifo_out_rec(fifo, to, len, recsize, total); 450 return __kfifo_peek_n(fifo, recsize);
332} 451}
333EXPORT_SYMBOL(__kfifo_out_generic); 452EXPORT_SYMBOL(__kfifo_len_r);
334 453
335unsigned int __kfifo_from_user_n(struct kfifo *fifo, 454unsigned int __kfifo_in_r(struct __kfifo *fifo, const void *buf,
336 const void __user *from, unsigned int len, unsigned int recsize) 455 unsigned int len, size_t recsize)
337{ 456{
338 unsigned total; 457 if (len + recsize > kfifo_unused(fifo))
458 return 0;
339 459
340 if (kfifo_avail(fifo) < len + recsize) 460 __kfifo_poke_n(fifo, len, recsize);
341 return len + 1;
342 461
343 __kfifo_from_user_data(fifo, from, len, recsize, &total); 462 kfifo_copy_in(fifo, buf, len, fifo->in + recsize);
344 return total; 463 fifo->in += len + recsize;
464 return len;
345} 465}
346EXPORT_SYMBOL(__kfifo_from_user_n); 466EXPORT_SYMBOL(__kfifo_in_r);
347 467
348/** 468static unsigned int kfifo_out_copy_r(struct __kfifo *fifo,
349 * kfifo_from_user - puts some data from user space into the FIFO 469 void *buf, unsigned int len, size_t recsize, unsigned int *n)
350 * @fifo: the fifo to be used.
351 * @from: pointer to the data to be added.
352 * @len: the length of the data to be added.
353 * @total: the actual returned data length.
354 *
355 * This function copies at most @len bytes from the @from into the
356 * FIFO depending and returns -EFAULT/0.
357 *
358 * Note that with only one concurrent reader and one concurrent
359 * writer, you don't need extra locking to use these functions.
360 */
361int kfifo_from_user(struct kfifo *fifo,
362 const void __user *from, unsigned int len, unsigned *total)
363{ 470{
364 int ret; 471 *n = __kfifo_peek_n(fifo, recsize);
365 len = min(kfifo_avail(fifo), len); 472
366 ret = __kfifo_from_user_data(fifo, from, len, 0, total); 473 if (len > *n)
367 if (ret) 474 len = *n;
368 return ret; 475
369 __kfifo_add_in(fifo, len); 476 kfifo_copy_out(fifo, buf, len, fifo->out + recsize);
370 return 0; 477 return len;
478}
479
480unsigned int __kfifo_out_peek_r(struct __kfifo *fifo, void *buf,
481 unsigned int len, size_t recsize)
482{
483 unsigned int n;
484
485 if (fifo->in == fifo->out)
486 return 0;
487
488 return kfifo_out_copy_r(fifo, buf, len, recsize, &n);
371} 489}
372EXPORT_SYMBOL(kfifo_from_user); 490EXPORT_SYMBOL(__kfifo_out_peek_r);
373 491
374unsigned int __kfifo_from_user_generic(struct kfifo *fifo, 492unsigned int __kfifo_out_r(struct __kfifo *fifo, void *buf,
375 const void __user *from, unsigned int len, unsigned int recsize) 493 unsigned int len, size_t recsize)
376{ 494{
377 return __kfifo_from_user_rec(fifo, from, len, recsize); 495 unsigned int n;
496
497 if (fifo->in == fifo->out)
498 return 0;
499
500 len = kfifo_out_copy_r(fifo, buf, len, recsize, &n);
501 fifo->out += n + recsize;
502 return len;
378} 503}
379EXPORT_SYMBOL(__kfifo_from_user_generic); 504EXPORT_SYMBOL(__kfifo_out_r);
380 505
381unsigned int __kfifo_to_user_n(struct kfifo *fifo, 506int __kfifo_from_user_r(struct __kfifo *fifo, const void __user *from,
382 void __user *to, unsigned int len, unsigned int reclen, 507 unsigned long len, unsigned int *copied, size_t recsize)
383 unsigned int recsize)
384{ 508{
385 unsigned int ret, total; 509 unsigned long ret;
386 510
387 if (kfifo_len(fifo) < reclen + recsize) 511 len = __kfifo_max_r(len, recsize);
388 return len;
389 512
390 ret = __kfifo_to_user_data(fifo, to, reclen, recsize, &total); 513 if (len + recsize > kfifo_unused(fifo)) {
514 *copied = 0;
515 return 0;
516 }
391 517
392 if (likely(ret == 0)) 518 __kfifo_poke_n(fifo, len, recsize);
393 __kfifo_add_out(fifo, reclen + recsize);
394 519
395 return total; 520 ret = kfifo_copy_from_user(fifo, from, len, fifo->in + recsize, copied);
521 if (unlikely(ret)) {
522 *copied = 0;
523 return -EFAULT;
524 }
525 fifo->in += len + recsize;
526 return 0;
396} 527}
397EXPORT_SYMBOL(__kfifo_to_user_n); 528EXPORT_SYMBOL(__kfifo_from_user_r);
398 529
399/** 530int __kfifo_to_user_r(struct __kfifo *fifo, void __user *to,
400 * kfifo_to_user - gets data from the FIFO and write it to user space 531 unsigned long len, unsigned int *copied, size_t recsize)
401 * @fifo: the fifo to be used.
402 * @to: where the data must be copied.
403 * @len: the size of the destination buffer.
404 * @lenout: pointer to output variable with copied data
405 *
406 * This function copies at most @len bytes from the FIFO into the
407 * @to buffer and 0 or -EFAULT.
408 *
409 * Note that with only one concurrent reader and one concurrent
410 * writer, you don't need extra locking to use these functions.
411 */
412int kfifo_to_user(struct kfifo *fifo,
413 void __user *to, unsigned int len, unsigned *lenout)
414{ 532{
415 int ret; 533 unsigned long ret;
416 len = min(kfifo_len(fifo), len); 534 unsigned int n;
417 ret = __kfifo_to_user_data(fifo, to, len, 0, lenout); 535
418 __kfifo_add_out(fifo, *lenout); 536 if (fifo->in == fifo->out) {
419 return ret; 537 *copied = 0;
538 return 0;
539 }
540
541 n = __kfifo_peek_n(fifo, recsize);
542 if (len > n)
543 len = n;
544
545 ret = kfifo_copy_to_user(fifo, to, len, fifo->out + recsize, copied);
546 if (unlikely(ret)) {
547 *copied = 0;
548 return -EFAULT;
549 }
550 fifo->out += n + recsize;
551 return 0;
420} 552}
421EXPORT_SYMBOL(kfifo_to_user); 553EXPORT_SYMBOL(__kfifo_to_user_r);
422 554
423unsigned int __kfifo_to_user_generic(struct kfifo *fifo, 555unsigned int __kfifo_dma_in_prepare_r(struct __kfifo *fifo,
424 void __user *to, unsigned int len, unsigned int recsize, 556 struct scatterlist *sgl, int nents, unsigned int len, size_t recsize)
425 unsigned int *total)
426{ 557{
427 return __kfifo_to_user_rec(fifo, to, len, recsize, total); 558 if (!nents)
559 BUG();
560
561 len = __kfifo_max_r(len, recsize);
562
563 if (len + recsize > kfifo_unused(fifo))
564 return 0;
565
566 return setup_sgl(fifo, sgl, nents, len, fifo->in + recsize);
428} 567}
429EXPORT_SYMBOL(__kfifo_to_user_generic); 568EXPORT_SYMBOL(__kfifo_dma_in_prepare_r);
430 569
431unsigned int __kfifo_peek_generic(struct kfifo *fifo, unsigned int recsize) 570void __kfifo_dma_in_finish_r(struct __kfifo *fifo,
571 unsigned int len, size_t recsize)
432{ 572{
433 if (recsize == 0) 573 len = __kfifo_max_r(len, recsize);
434 return kfifo_avail(fifo); 574 __kfifo_poke_n(fifo, len, recsize);
435 575 fifo->in += len + recsize;
436 return __kfifo_peek_n(fifo, recsize);
437} 576}
438EXPORT_SYMBOL(__kfifo_peek_generic); 577EXPORT_SYMBOL(__kfifo_dma_in_finish_r);
439 578
440void __kfifo_skip_generic(struct kfifo *fifo, unsigned int recsize) 579unsigned int __kfifo_dma_out_prepare_r(struct __kfifo *fifo,
580 struct scatterlist *sgl, int nents, unsigned int len, size_t recsize)
441{ 581{
442 __kfifo_skip_rec(fifo, recsize); 582 if (!nents)
583 BUG();
584
585 len = __kfifo_max_r(len, recsize);
586
587 if (len + recsize > fifo->in - fifo->out)
588 return 0;
589
590 return setup_sgl(fifo, sgl, nents, len, fifo->out + recsize);
443} 591}
444EXPORT_SYMBOL(__kfifo_skip_generic); 592EXPORT_SYMBOL(__kfifo_dma_out_prepare_r);
593
594void __kfifo_dma_out_finish_r(struct __kfifo *fifo, size_t recsize)
595{
596 unsigned int len;
445 597
598 len = __kfifo_peek_n(fifo, recsize);
599 fifo->out += len + recsize;
600}
601EXPORT_SYMBOL(__kfifo_dma_out_finish_r);
diff --git a/kernel/panic.c b/kernel/panic.c
index 3b16cd93fa7d..4c13b1a88ebb 100644
--- a/kernel/panic.c
+++ b/kernel/panic.c
@@ -24,6 +24,9 @@
24#include <linux/nmi.h> 24#include <linux/nmi.h>
25#include <linux/dmi.h> 25#include <linux/dmi.h>
26 26
27#define PANIC_TIMER_STEP 100
28#define PANIC_BLINK_SPD 18
29
27int panic_on_oops; 30int panic_on_oops;
28static unsigned long tainted_mask; 31static unsigned long tainted_mask;
29static int pause_on_oops; 32static int pause_on_oops;
@@ -36,36 +39,15 @@ ATOMIC_NOTIFIER_HEAD(panic_notifier_list);
36 39
37EXPORT_SYMBOL(panic_notifier_list); 40EXPORT_SYMBOL(panic_notifier_list);
38 41
39/* Returns how long it waited in ms */ 42static long no_blink(int state)
40long (*panic_blink)(long time);
41EXPORT_SYMBOL(panic_blink);
42
43static void panic_blink_one_second(void)
44{ 43{
45 static long i = 0, end; 44 return 0;
46
47 if (panic_blink) {
48 end = i + MSEC_PER_SEC;
49
50 while (i < end) {
51 i += panic_blink(i);
52 mdelay(1);
53 i++;
54 }
55 } else {
56 /*
57 * When running under a hypervisor a small mdelay may get
58 * rounded up to the hypervisor timeslice. For example, with
59 * a 1ms in 10ms hypervisor timeslice we might inflate a
60 * mdelay(1) loop by 10x.
61 *
62 * If we have nothing to blink, spin on 1 second calls to
63 * mdelay to avoid this.
64 */
65 mdelay(MSEC_PER_SEC);
66 }
67} 45}
68 46
47/* Returns how long it waited in ms */
48long (*panic_blink)(int state);
49EXPORT_SYMBOL(panic_blink);
50
69/** 51/**
70 * panic - halt the system 52 * panic - halt the system
71 * @fmt: The text string to print 53 * @fmt: The text string to print
@@ -78,7 +60,8 @@ NORET_TYPE void panic(const char * fmt, ...)
78{ 60{
79 static char buf[1024]; 61 static char buf[1024];
80 va_list args; 62 va_list args;
81 long i; 63 long i, i_next = 0;
64 int state = 0;
82 65
83 /* 66 /*
84 * It's possible to come here directly from a panic-assertion and 67 * It's possible to come here directly from a panic-assertion and
@@ -117,6 +100,9 @@ NORET_TYPE void panic(const char * fmt, ...)
117 100
118 bust_spinlocks(0); 101 bust_spinlocks(0);
119 102
103 if (!panic_blink)
104 panic_blink = no_blink;
105
120 if (panic_timeout > 0) { 106 if (panic_timeout > 0) {
121 /* 107 /*
122 * Delay timeout seconds before rebooting the machine. 108 * Delay timeout seconds before rebooting the machine.
@@ -124,9 +110,13 @@ NORET_TYPE void panic(const char * fmt, ...)
124 */ 110 */
125 printk(KERN_EMERG "Rebooting in %d seconds..", panic_timeout); 111 printk(KERN_EMERG "Rebooting in %d seconds..", panic_timeout);
126 112
127 for (i = 0; i < panic_timeout; i++) { 113 for (i = 0; i < panic_timeout * 1000; i += PANIC_TIMER_STEP) {
128 touch_nmi_watchdog(); 114 touch_nmi_watchdog();
129 panic_blink_one_second(); 115 if (i >= i_next) {
116 i += panic_blink(state ^= 1);
117 i_next = i + 3600 / PANIC_BLINK_SPD;
118 }
119 mdelay(PANIC_TIMER_STEP);
130 } 120 }
131 /* 121 /*
132 * This will not be a clean reboot, with everything 122 * This will not be a clean reboot, with everything
@@ -152,9 +142,13 @@ NORET_TYPE void panic(const char * fmt, ...)
152 } 142 }
153#endif 143#endif
154 local_irq_enable(); 144 local_irq_enable();
155 while (1) { 145 for (i = 0; ; i += PANIC_TIMER_STEP) {
156 touch_softlockup_watchdog(); 146 touch_softlockup_watchdog();
157 panic_blink_one_second(); 147 if (i >= i_next) {
148 i += panic_blink(state ^= 1);
149 i_next = i + 3600 / PANIC_BLINK_SPD;
150 }
151 mdelay(PANIC_TIMER_STEP);
158 } 152 }
159} 153}
160 154
@@ -344,7 +338,7 @@ static int init_oops_id(void)
344} 338}
345late_initcall(init_oops_id); 339late_initcall(init_oops_id);
346 340
347static void print_oops_end_marker(void) 341void print_oops_end_marker(void)
348{ 342{
349 init_oops_id(); 343 init_oops_id();
350 printk(KERN_WARNING "---[ end trace %016llx ]---\n", 344 printk(KERN_WARNING "---[ end trace %016llx ]---\n",
diff --git a/kernel/pid.c b/kernel/pid.c
index e9fd8c132d26..d55c6fb8d087 100644
--- a/kernel/pid.c
+++ b/kernel/pid.c
@@ -122,6 +122,43 @@ static void free_pidmap(struct upid *upid)
122 atomic_inc(&map->nr_free); 122 atomic_inc(&map->nr_free);
123} 123}
124 124
125/*
126 * If we started walking pids at 'base', is 'a' seen before 'b'?
127 */
128static int pid_before(int base, int a, int b)
129{
130 /*
131 * This is the same as saying
132 *
133 * (a - base + MAXUINT) % MAXUINT < (b - base + MAXUINT) % MAXUINT
134 * and that mapping orders 'a' and 'b' with respect to 'base'.
135 */
136 return (unsigned)(a - base) < (unsigned)(b - base);
137}
138
139/*
140 * We might be racing with someone else trying to set pid_ns->last_pid.
141 * We want the winner to have the "later" value, because if the
142 * "earlier" value prevails, then a pid may get reused immediately.
143 *
144 * Since pids rollover, it is not sufficient to just pick the bigger
145 * value. We have to consider where we started counting from.
146 *
147 * 'base' is the value of pid_ns->last_pid that we observed when
148 * we started looking for a pid.
149 *
150 * 'pid' is the pid that we eventually found.
151 */
152static void set_last_pid(struct pid_namespace *pid_ns, int base, int pid)
153{
154 int prev;
155 int last_write = base;
156 do {
157 prev = last_write;
158 last_write = cmpxchg(&pid_ns->last_pid, prev, pid);
159 } while ((prev != last_write) && (pid_before(base, last_write, pid)));
160}
161
125static int alloc_pidmap(struct pid_namespace *pid_ns) 162static int alloc_pidmap(struct pid_namespace *pid_ns)
126{ 163{
127 int i, offset, max_scan, pid, last = pid_ns->last_pid; 164 int i, offset, max_scan, pid, last = pid_ns->last_pid;
@@ -132,7 +169,12 @@ static int alloc_pidmap(struct pid_namespace *pid_ns)
132 pid = RESERVED_PIDS; 169 pid = RESERVED_PIDS;
133 offset = pid & BITS_PER_PAGE_MASK; 170 offset = pid & BITS_PER_PAGE_MASK;
134 map = &pid_ns->pidmap[pid/BITS_PER_PAGE]; 171 map = &pid_ns->pidmap[pid/BITS_PER_PAGE];
135 max_scan = (pid_max + BITS_PER_PAGE - 1)/BITS_PER_PAGE - !offset; 172 /*
173 * If last_pid points into the middle of the map->page we
174 * want to scan this bitmap block twice, the second time
175 * we start with offset == 0 (or RESERVED_PIDS).
176 */
177 max_scan = DIV_ROUND_UP(pid_max, BITS_PER_PAGE) - !offset;
136 for (i = 0; i <= max_scan; ++i) { 178 for (i = 0; i <= max_scan; ++i) {
137 if (unlikely(!map->page)) { 179 if (unlikely(!map->page)) {
138 void *page = kzalloc(PAGE_SIZE, GFP_KERNEL); 180 void *page = kzalloc(PAGE_SIZE, GFP_KERNEL);
@@ -154,20 +196,12 @@ static int alloc_pidmap(struct pid_namespace *pid_ns)
154 do { 196 do {
155 if (!test_and_set_bit(offset, map->page)) { 197 if (!test_and_set_bit(offset, map->page)) {
156 atomic_dec(&map->nr_free); 198 atomic_dec(&map->nr_free);
157 pid_ns->last_pid = pid; 199 set_last_pid(pid_ns, last, pid);
158 return pid; 200 return pid;
159 } 201 }
160 offset = find_next_offset(map, offset); 202 offset = find_next_offset(map, offset);
161 pid = mk_pid(pid_ns, map, offset); 203 pid = mk_pid(pid_ns, map, offset);
162 /* 204 } while (offset < BITS_PER_PAGE && pid < pid_max);
163 * find_next_offset() found a bit, the pid from it
164 * is in-bounds, and if we fell back to the last
165 * bitmap block and the final block was the same
166 * as the starting point, pid is before last_pid.
167 */
168 } while (offset < BITS_PER_PAGE && pid < pid_max &&
169 (i != max_scan || pid < last ||
170 !((last+1) & BITS_PER_PAGE_MASK)));
171 } 205 }
172 if (map < &pid_ns->pidmap[(pid_max-1)/BITS_PER_PAGE]) { 206 if (map < &pid_ns->pidmap[(pid_max-1)/BITS_PER_PAGE]) {
173 ++map; 207 ++map;
diff --git a/kernel/ptrace.c b/kernel/ptrace.c
index 74a3d693c196..f34d798ef4a2 100644
--- a/kernel/ptrace.c
+++ b/kernel/ptrace.c
@@ -324,26 +324,32 @@ int ptrace_detach(struct task_struct *child, unsigned int data)
324} 324}
325 325
326/* 326/*
327 * Detach all tasks we were using ptrace on. 327 * Detach all tasks we were using ptrace on. Called with tasklist held
328 * for writing, and returns with it held too. But note it can release
329 * and reacquire the lock.
328 */ 330 */
329void exit_ptrace(struct task_struct *tracer) 331void exit_ptrace(struct task_struct *tracer)
330{ 332{
331 struct task_struct *p, *n; 333 struct task_struct *p, *n;
332 LIST_HEAD(ptrace_dead); 334 LIST_HEAD(ptrace_dead);
333 335
334 write_lock_irq(&tasklist_lock); 336 if (likely(list_empty(&tracer->ptraced)))
337 return;
338
335 list_for_each_entry_safe(p, n, &tracer->ptraced, ptrace_entry) { 339 list_for_each_entry_safe(p, n, &tracer->ptraced, ptrace_entry) {
336 if (__ptrace_detach(tracer, p)) 340 if (__ptrace_detach(tracer, p))
337 list_add(&p->ptrace_entry, &ptrace_dead); 341 list_add(&p->ptrace_entry, &ptrace_dead);
338 } 342 }
339 write_unlock_irq(&tasklist_lock);
340 343
344 write_unlock_irq(&tasklist_lock);
341 BUG_ON(!list_empty(&tracer->ptraced)); 345 BUG_ON(!list_empty(&tracer->ptraced));
342 346
343 list_for_each_entry_safe(p, n, &ptrace_dead, ptrace_entry) { 347 list_for_each_entry_safe(p, n, &ptrace_dead, ptrace_entry) {
344 list_del_init(&p->ptrace_entry); 348 list_del_init(&p->ptrace_entry);
345 release_task(p); 349 release_task(p);
346 } 350 }
351
352 write_lock_irq(&tasklist_lock);
347} 353}
348 354
349int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst, int len) 355int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst, int len)
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
index e14c839e9faa..e960d824263f 100644
--- a/kernel/time/timekeeping.c
+++ b/kernel/time/timekeeping.c
@@ -690,6 +690,7 @@ static void timekeeping_adjust(s64 offset)
690static cycle_t logarithmic_accumulation(cycle_t offset, int shift) 690static cycle_t logarithmic_accumulation(cycle_t offset, int shift)
691{ 691{
692 u64 nsecps = (u64)NSEC_PER_SEC << timekeeper.shift; 692 u64 nsecps = (u64)NSEC_PER_SEC << timekeeper.shift;
693 u64 raw_nsecs;
693 694
694 /* If the offset is smaller then a shifted interval, do nothing */ 695 /* If the offset is smaller then a shifted interval, do nothing */
695 if (offset < timekeeper.cycle_interval<<shift) 696 if (offset < timekeeper.cycle_interval<<shift)
@@ -706,12 +707,14 @@ static cycle_t logarithmic_accumulation(cycle_t offset, int shift)
706 second_overflow(); 707 second_overflow();
707 } 708 }
708 709
709 /* Accumulate into raw time */ 710 /* Accumulate raw time */
710 raw_time.tv_nsec += timekeeper.raw_interval << shift;; 711 raw_nsecs = timekeeper.raw_interval << shift;
711 while (raw_time.tv_nsec >= NSEC_PER_SEC) { 712 raw_nsecs += raw_time.tv_nsec;
712 raw_time.tv_nsec -= NSEC_PER_SEC; 713 while (raw_nsecs >= NSEC_PER_SEC) {
714 raw_nsecs -= NSEC_PER_SEC;
713 raw_time.tv_sec++; 715 raw_time.tv_sec++;
714 } 716 }
717 raw_time.tv_nsec = raw_nsecs;
715 718
716 /* Accumulate error between NTP and clock interval */ 719 /* Accumulate error between NTP and clock interval */
717 timekeeper.ntp_error += tick_length << shift; 720 timekeeper.ntp_error += tick_length << shift;
diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c
index 82499a5bdcb7..959f8d6c8cc1 100644
--- a/kernel/trace/blktrace.c
+++ b/kernel/trace/blktrace.c
@@ -710,6 +710,9 @@ static void blk_add_trace_rq(struct request_queue *q, struct request *rq,
710 if (rq->cmd_flags & REQ_DISCARD) 710 if (rq->cmd_flags & REQ_DISCARD)
711 rw |= REQ_DISCARD; 711 rw |= REQ_DISCARD;
712 712
713 if (rq->cmd_flags & REQ_SECURE)
714 rw |= REQ_SECURE;
715
713 if (rq->cmd_type == REQ_TYPE_BLOCK_PC) { 716 if (rq->cmd_type == REQ_TYPE_BLOCK_PC) {
714 what |= BLK_TC_ACT(BLK_TC_PC); 717 what |= BLK_TC_ACT(BLK_TC_PC);
715 __blk_add_trace(bt, 0, blk_rq_bytes(rq), rw, 718 __blk_add_trace(bt, 0, blk_rq_bytes(rq), rw,
@@ -1816,6 +1819,8 @@ void blk_fill_rwbs(char *rwbs, u32 rw, int bytes)
1816 rwbs[i++] = 'S'; 1819 rwbs[i++] = 'S';
1817 if (rw & REQ_META) 1820 if (rw & REQ_META)
1818 rwbs[i++] = 'M'; 1821 rwbs[i++] = 'M';
1822 if (rw & REQ_SECURE)
1823 rwbs[i++] = 'E';
1819 1824
1820 rwbs[i] = '\0'; 1825 rwbs[i] = '\0';
1821} 1826}
@@ -1828,6 +1833,9 @@ void blk_fill_rwbs_rq(char *rwbs, struct request *rq)
1828 if (rq->cmd_flags & REQ_DISCARD) 1833 if (rq->cmd_flags & REQ_DISCARD)
1829 rw |= REQ_DISCARD; 1834 rw |= REQ_DISCARD;
1830 1835
1836 if (rq->cmd_flags & REQ_SECURE)
1837 rw |= REQ_SECURE;
1838
1831 bytes = blk_rq_bytes(rq); 1839 bytes = blk_rq_bytes(rq);
1832 1840
1833 blk_fill_rwbs(rwbs, rw, bytes); 1841 blk_fill_rwbs(rwbs, rw, bytes);