diff options
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/Makefile | 3 | ||||
-rw-r--r-- | kernel/cred.c | 6 | ||||
-rw-r--r-- | kernel/events/core.c | 67 | ||||
-rw-r--r-- | kernel/fork.c | 1 | ||||
-rw-r--r-- | kernel/irq/generic-chip.c | 4 | ||||
-rw-r--r-- | kernel/irq/irqdesc.c | 37 | ||||
-rw-r--r-- | kernel/irq/manage.c | 17 | ||||
-rw-r--r-- | kernel/lockdep.c | 8 | ||||
-rw-r--r-- | kernel/power/Kconfig | 4 | ||||
-rw-r--r-- | kernel/printk.c | 8 | ||||
-rw-r--r-- | kernel/sched.c | 43 | ||||
-rw-r--r-- | kernel/sys.c | 53 | ||||
-rw-r--r-- | kernel/sys_ni.c | 1 | ||||
-rw-r--r-- | kernel/sysctl_binary.c | 2 | ||||
-rw-r--r-- | kernel/sysctl_check.c | 2 | ||||
-rw-r--r-- | kernel/trace/Kconfig | 2 | ||||
-rw-r--r-- | kernel/trace/blktrace.c | 21 |
17 files changed, 211 insertions, 68 deletions
diff --git a/kernel/Makefile b/kernel/Makefile index d06467fc8f7..eca595e2fd5 100644 --- a/kernel/Makefile +++ b/kernel/Makefile | |||
@@ -10,7 +10,7 @@ obj-y = sched.o fork.o exec_domain.o panic.o printk.o \ | |||
10 | kthread.o wait.o kfifo.o sys_ni.o posix-cpu-timers.o mutex.o \ | 10 | kthread.o wait.o kfifo.o sys_ni.o posix-cpu-timers.o mutex.o \ |
11 | hrtimer.o rwsem.o nsproxy.o srcu.o semaphore.o \ | 11 | hrtimer.o rwsem.o nsproxy.o srcu.o semaphore.o \ |
12 | notifier.o ksysfs.o pm_qos_params.o sched_clock.o cred.o \ | 12 | notifier.o ksysfs.o pm_qos_params.o sched_clock.o cred.o \ |
13 | async.o range.o jump_label.o | 13 | async.o range.o |
14 | obj-y += groups.o | 14 | obj-y += groups.o |
15 | 15 | ||
16 | ifdef CONFIG_FUNCTION_TRACER | 16 | ifdef CONFIG_FUNCTION_TRACER |
@@ -107,6 +107,7 @@ obj-$(CONFIG_PERF_EVENTS) += events/ | |||
107 | obj-$(CONFIG_USER_RETURN_NOTIFIER) += user-return-notifier.o | 107 | obj-$(CONFIG_USER_RETURN_NOTIFIER) += user-return-notifier.o |
108 | obj-$(CONFIG_PADATA) += padata.o | 108 | obj-$(CONFIG_PADATA) += padata.o |
109 | obj-$(CONFIG_CRASH_DUMP) += crash_dump.o | 109 | obj-$(CONFIG_CRASH_DUMP) += crash_dump.o |
110 | obj-$(CONFIG_JUMP_LABEL) += jump_label.o | ||
110 | 111 | ||
111 | ifneq ($(CONFIG_SCHED_OMIT_FRAME_POINTER),y) | 112 | ifneq ($(CONFIG_SCHED_OMIT_FRAME_POINTER),y) |
112 | # According to Alan Modra <alan@linuxcare.com.au>, the -fno-omit-frame-pointer is | 113 | # According to Alan Modra <alan@linuxcare.com.au>, the -fno-omit-frame-pointer is |
diff --git a/kernel/cred.c b/kernel/cred.c index 174fa84eca3..8ef31f53c44 100644 --- a/kernel/cred.c +++ b/kernel/cred.c | |||
@@ -508,10 +508,8 @@ int commit_creds(struct cred *new) | |||
508 | key_fsgid_changed(task); | 508 | key_fsgid_changed(task); |
509 | 509 | ||
510 | /* do it | 510 | /* do it |
511 | * - What if a process setreuid()'s and this brings the | 511 | * RLIMIT_NPROC limits on user->processes have already been checked |
512 | * new uid over his NPROC rlimit? We can check this now | 512 | * in set_user(). |
513 | * cheaply with the new uid cache, so if it matters | ||
514 | * we should be checking for it. -DaveM | ||
515 | */ | 513 | */ |
516 | alter_cred_subscribers(new, 2); | 514 | alter_cred_subscribers(new, 2); |
517 | if (new->user != old->user) | 515 | if (new->user != old->user) |
diff --git a/kernel/events/core.c b/kernel/events/core.c index b8785e26ee1..0f857782d06 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c | |||
@@ -399,14 +399,54 @@ void perf_cgroup_switch(struct task_struct *task, int mode) | |||
399 | local_irq_restore(flags); | 399 | local_irq_restore(flags); |
400 | } | 400 | } |
401 | 401 | ||
402 | static inline void perf_cgroup_sched_out(struct task_struct *task) | 402 | static inline void perf_cgroup_sched_out(struct task_struct *task, |
403 | struct task_struct *next) | ||
403 | { | 404 | { |
404 | perf_cgroup_switch(task, PERF_CGROUP_SWOUT); | 405 | struct perf_cgroup *cgrp1; |
406 | struct perf_cgroup *cgrp2 = NULL; | ||
407 | |||
408 | /* | ||
409 | * we come here when we know perf_cgroup_events > 0 | ||
410 | */ | ||
411 | cgrp1 = perf_cgroup_from_task(task); | ||
412 | |||
413 | /* | ||
414 | * next is NULL when called from perf_event_enable_on_exec() | ||
415 | * that will systematically cause a cgroup_switch() | ||
416 | */ | ||
417 | if (next) | ||
418 | cgrp2 = perf_cgroup_from_task(next); | ||
419 | |||
420 | /* | ||
421 | * only schedule out current cgroup events if we know | ||
422 | * that we are switching to a different cgroup. Otherwise, | ||
423 | * do no touch the cgroup events. | ||
424 | */ | ||
425 | if (cgrp1 != cgrp2) | ||
426 | perf_cgroup_switch(task, PERF_CGROUP_SWOUT); | ||
405 | } | 427 | } |
406 | 428 | ||
407 | static inline void perf_cgroup_sched_in(struct task_struct *task) | 429 | static inline void perf_cgroup_sched_in(struct task_struct *prev, |
430 | struct task_struct *task) | ||
408 | { | 431 | { |
409 | perf_cgroup_switch(task, PERF_CGROUP_SWIN); | 432 | struct perf_cgroup *cgrp1; |
433 | struct perf_cgroup *cgrp2 = NULL; | ||
434 | |||
435 | /* | ||
436 | * we come here when we know perf_cgroup_events > 0 | ||
437 | */ | ||
438 | cgrp1 = perf_cgroup_from_task(task); | ||
439 | |||
440 | /* prev can never be NULL */ | ||
441 | cgrp2 = perf_cgroup_from_task(prev); | ||
442 | |||
443 | /* | ||
444 | * only need to schedule in cgroup events if we are changing | ||
445 | * cgroup during ctxsw. Cgroup events were not scheduled | ||
446 | * out of ctxsw out if that was not the case. | ||
447 | */ | ||
448 | if (cgrp1 != cgrp2) | ||
449 | perf_cgroup_switch(task, PERF_CGROUP_SWIN); | ||
410 | } | 450 | } |
411 | 451 | ||
412 | static inline int perf_cgroup_connect(int fd, struct perf_event *event, | 452 | static inline int perf_cgroup_connect(int fd, struct perf_event *event, |
@@ -518,11 +558,13 @@ static inline void update_cgrp_time_from_cpuctx(struct perf_cpu_context *cpuctx) | |||
518 | { | 558 | { |
519 | } | 559 | } |
520 | 560 | ||
521 | static inline void perf_cgroup_sched_out(struct task_struct *task) | 561 | static inline void perf_cgroup_sched_out(struct task_struct *task, |
562 | struct task_struct *next) | ||
522 | { | 563 | { |
523 | } | 564 | } |
524 | 565 | ||
525 | static inline void perf_cgroup_sched_in(struct task_struct *task) | 566 | static inline void perf_cgroup_sched_in(struct task_struct *prev, |
567 | struct task_struct *task) | ||
526 | { | 568 | { |
527 | } | 569 | } |
528 | 570 | ||
@@ -1988,7 +2030,7 @@ void __perf_event_task_sched_out(struct task_struct *task, | |||
1988 | * cgroup event are system-wide mode only | 2030 | * cgroup event are system-wide mode only |
1989 | */ | 2031 | */ |
1990 | if (atomic_read(&__get_cpu_var(perf_cgroup_events))) | 2032 | if (atomic_read(&__get_cpu_var(perf_cgroup_events))) |
1991 | perf_cgroup_sched_out(task); | 2033 | perf_cgroup_sched_out(task, next); |
1992 | } | 2034 | } |
1993 | 2035 | ||
1994 | static void task_ctx_sched_out(struct perf_event_context *ctx) | 2036 | static void task_ctx_sched_out(struct perf_event_context *ctx) |
@@ -2153,7 +2195,8 @@ static void perf_event_context_sched_in(struct perf_event_context *ctx, | |||
2153 | * accessing the event control register. If a NMI hits, then it will | 2195 | * accessing the event control register. If a NMI hits, then it will |
2154 | * keep the event running. | 2196 | * keep the event running. |
2155 | */ | 2197 | */ |
2156 | void __perf_event_task_sched_in(struct task_struct *task) | 2198 | void __perf_event_task_sched_in(struct task_struct *prev, |
2199 | struct task_struct *task) | ||
2157 | { | 2200 | { |
2158 | struct perf_event_context *ctx; | 2201 | struct perf_event_context *ctx; |
2159 | int ctxn; | 2202 | int ctxn; |
@@ -2171,7 +2214,7 @@ void __perf_event_task_sched_in(struct task_struct *task) | |||
2171 | * cgroup event are system-wide mode only | 2214 | * cgroup event are system-wide mode only |
2172 | */ | 2215 | */ |
2173 | if (atomic_read(&__get_cpu_var(perf_cgroup_events))) | 2216 | if (atomic_read(&__get_cpu_var(perf_cgroup_events))) |
2174 | perf_cgroup_sched_in(task); | 2217 | perf_cgroup_sched_in(prev, task); |
2175 | } | 2218 | } |
2176 | 2219 | ||
2177 | static u64 perf_calculate_period(struct perf_event *event, u64 nsec, u64 count) | 2220 | static u64 perf_calculate_period(struct perf_event *event, u64 nsec, u64 count) |
@@ -2427,7 +2470,7 @@ static void perf_event_enable_on_exec(struct perf_event_context *ctx) | |||
2427 | * ctxswin cgroup events which are already scheduled | 2470 | * ctxswin cgroup events which are already scheduled |
2428 | * in. | 2471 | * in. |
2429 | */ | 2472 | */ |
2430 | perf_cgroup_sched_out(current); | 2473 | perf_cgroup_sched_out(current, NULL); |
2431 | 2474 | ||
2432 | raw_spin_lock(&ctx->lock); | 2475 | raw_spin_lock(&ctx->lock); |
2433 | task_ctx_sched_out(ctx); | 2476 | task_ctx_sched_out(ctx); |
@@ -3353,8 +3396,8 @@ static int perf_event_index(struct perf_event *event) | |||
3353 | } | 3396 | } |
3354 | 3397 | ||
3355 | static void calc_timer_values(struct perf_event *event, | 3398 | static void calc_timer_values(struct perf_event *event, |
3356 | u64 *running, | 3399 | u64 *enabled, |
3357 | u64 *enabled) | 3400 | u64 *running) |
3358 | { | 3401 | { |
3359 | u64 now, ctx_time; | 3402 | u64 now, ctx_time; |
3360 | 3403 | ||
diff --git a/kernel/fork.c b/kernel/fork.c index e7ceaca8960..8e6b6f4fb27 100644 --- a/kernel/fork.c +++ b/kernel/fork.c | |||
@@ -1111,6 +1111,7 @@ static struct task_struct *copy_process(unsigned long clone_flags, | |||
1111 | p->real_cred->user != INIT_USER) | 1111 | p->real_cred->user != INIT_USER) |
1112 | goto bad_fork_free; | 1112 | goto bad_fork_free; |
1113 | } | 1113 | } |
1114 | current->flags &= ~PF_NPROC_EXCEEDED; | ||
1114 | 1115 | ||
1115 | retval = copy_creds(p, clone_flags); | 1116 | retval = copy_creds(p, clone_flags); |
1116 | if (retval < 0) | 1117 | if (retval < 0) |
diff --git a/kernel/irq/generic-chip.c b/kernel/irq/generic-chip.c index 3a2cab407b9..e38544dddb1 100644 --- a/kernel/irq/generic-chip.c +++ b/kernel/irq/generic-chip.c | |||
@@ -246,7 +246,7 @@ void irq_setup_generic_chip(struct irq_chip_generic *gc, u32 msk, | |||
246 | gc->mask_cache = irq_reg_readl(gc->reg_base + ct->regs.mask); | 246 | gc->mask_cache = irq_reg_readl(gc->reg_base + ct->regs.mask); |
247 | 247 | ||
248 | for (i = gc->irq_base; msk; msk >>= 1, i++) { | 248 | for (i = gc->irq_base; msk; msk >>= 1, i++) { |
249 | if (!msk & 0x01) | 249 | if (!(msk & 0x01)) |
250 | continue; | 250 | continue; |
251 | 251 | ||
252 | if (flags & IRQ_GC_INIT_NESTED_LOCK) | 252 | if (flags & IRQ_GC_INIT_NESTED_LOCK) |
@@ -301,7 +301,7 @@ void irq_remove_generic_chip(struct irq_chip_generic *gc, u32 msk, | |||
301 | raw_spin_unlock(&gc_lock); | 301 | raw_spin_unlock(&gc_lock); |
302 | 302 | ||
303 | for (; msk; msk >>= 1, i++) { | 303 | for (; msk; msk >>= 1, i++) { |
304 | if (!msk & 0x01) | 304 | if (!(msk & 0x01)) |
305 | continue; | 305 | continue; |
306 | 306 | ||
307 | /* Remove handler first. That will mask the irq line */ | 307 | /* Remove handler first. That will mask the irq line */ |
diff --git a/kernel/irq/irqdesc.c b/kernel/irq/irqdesc.c index 4c60a50e66b..039b889ea05 100644 --- a/kernel/irq/irqdesc.c +++ b/kernel/irq/irqdesc.c | |||
@@ -70,7 +70,8 @@ static inline void desc_smp_init(struct irq_desc *desc, int node) { } | |||
70 | static inline int desc_node(struct irq_desc *desc) { return 0; } | 70 | static inline int desc_node(struct irq_desc *desc) { return 0; } |
71 | #endif | 71 | #endif |
72 | 72 | ||
73 | static void desc_set_defaults(unsigned int irq, struct irq_desc *desc, int node) | 73 | static void desc_set_defaults(unsigned int irq, struct irq_desc *desc, int node, |
74 | struct module *owner) | ||
74 | { | 75 | { |
75 | int cpu; | 76 | int cpu; |
76 | 77 | ||
@@ -86,6 +87,7 @@ static void desc_set_defaults(unsigned int irq, struct irq_desc *desc, int node) | |||
86 | desc->irq_count = 0; | 87 | desc->irq_count = 0; |
87 | desc->irqs_unhandled = 0; | 88 | desc->irqs_unhandled = 0; |
88 | desc->name = NULL; | 89 | desc->name = NULL; |
90 | desc->owner = owner; | ||
89 | for_each_possible_cpu(cpu) | 91 | for_each_possible_cpu(cpu) |
90 | *per_cpu_ptr(desc->kstat_irqs, cpu) = 0; | 92 | *per_cpu_ptr(desc->kstat_irqs, cpu) = 0; |
91 | desc_smp_init(desc, node); | 93 | desc_smp_init(desc, node); |
@@ -128,7 +130,7 @@ static void free_masks(struct irq_desc *desc) | |||
128 | static inline void free_masks(struct irq_desc *desc) { } | 130 | static inline void free_masks(struct irq_desc *desc) { } |
129 | #endif | 131 | #endif |
130 | 132 | ||
131 | static struct irq_desc *alloc_desc(int irq, int node) | 133 | static struct irq_desc *alloc_desc(int irq, int node, struct module *owner) |
132 | { | 134 | { |
133 | struct irq_desc *desc; | 135 | struct irq_desc *desc; |
134 | gfp_t gfp = GFP_KERNEL; | 136 | gfp_t gfp = GFP_KERNEL; |
@@ -147,7 +149,7 @@ static struct irq_desc *alloc_desc(int irq, int node) | |||
147 | raw_spin_lock_init(&desc->lock); | 149 | raw_spin_lock_init(&desc->lock); |
148 | lockdep_set_class(&desc->lock, &irq_desc_lock_class); | 150 | lockdep_set_class(&desc->lock, &irq_desc_lock_class); |
149 | 151 | ||
150 | desc_set_defaults(irq, desc, node); | 152 | desc_set_defaults(irq, desc, node, owner); |
151 | 153 | ||
152 | return desc; | 154 | return desc; |
153 | 155 | ||
@@ -173,13 +175,14 @@ static void free_desc(unsigned int irq) | |||
173 | kfree(desc); | 175 | kfree(desc); |
174 | } | 176 | } |
175 | 177 | ||
176 | static int alloc_descs(unsigned int start, unsigned int cnt, int node) | 178 | static int alloc_descs(unsigned int start, unsigned int cnt, int node, |
179 | struct module *owner) | ||
177 | { | 180 | { |
178 | struct irq_desc *desc; | 181 | struct irq_desc *desc; |
179 | int i; | 182 | int i; |
180 | 183 | ||
181 | for (i = 0; i < cnt; i++) { | 184 | for (i = 0; i < cnt; i++) { |
182 | desc = alloc_desc(start + i, node); | 185 | desc = alloc_desc(start + i, node, owner); |
183 | if (!desc) | 186 | if (!desc) |
184 | goto err; | 187 | goto err; |
185 | mutex_lock(&sparse_irq_lock); | 188 | mutex_lock(&sparse_irq_lock); |
@@ -227,7 +230,7 @@ int __init early_irq_init(void) | |||
227 | nr_irqs = initcnt; | 230 | nr_irqs = initcnt; |
228 | 231 | ||
229 | for (i = 0; i < initcnt; i++) { | 232 | for (i = 0; i < initcnt; i++) { |
230 | desc = alloc_desc(i, node); | 233 | desc = alloc_desc(i, node, NULL); |
231 | set_bit(i, allocated_irqs); | 234 | set_bit(i, allocated_irqs); |
232 | irq_insert_desc(i, desc); | 235 | irq_insert_desc(i, desc); |
233 | } | 236 | } |
@@ -261,7 +264,7 @@ int __init early_irq_init(void) | |||
261 | alloc_masks(&desc[i], GFP_KERNEL, node); | 264 | alloc_masks(&desc[i], GFP_KERNEL, node); |
262 | raw_spin_lock_init(&desc[i].lock); | 265 | raw_spin_lock_init(&desc[i].lock); |
263 | lockdep_set_class(&desc[i].lock, &irq_desc_lock_class); | 266 | lockdep_set_class(&desc[i].lock, &irq_desc_lock_class); |
264 | desc_set_defaults(i, &desc[i], node); | 267 | desc_set_defaults(i, &desc[i], node, NULL); |
265 | } | 268 | } |
266 | return arch_early_irq_init(); | 269 | return arch_early_irq_init(); |
267 | } | 270 | } |
@@ -276,8 +279,16 @@ static void free_desc(unsigned int irq) | |||
276 | dynamic_irq_cleanup(irq); | 279 | dynamic_irq_cleanup(irq); |
277 | } | 280 | } |
278 | 281 | ||
279 | static inline int alloc_descs(unsigned int start, unsigned int cnt, int node) | 282 | static inline int alloc_descs(unsigned int start, unsigned int cnt, int node, |
283 | struct module *owner) | ||
280 | { | 284 | { |
285 | u32 i; | ||
286 | |||
287 | for (i = 0; i < cnt; i++) { | ||
288 | struct irq_desc *desc = irq_to_desc(start + i); | ||
289 | |||
290 | desc->owner = owner; | ||
291 | } | ||
281 | return start; | 292 | return start; |
282 | } | 293 | } |
283 | 294 | ||
@@ -333,11 +344,13 @@ EXPORT_SYMBOL_GPL(irq_free_descs); | |||
333 | * @from: Start the search from this irq number | 344 | * @from: Start the search from this irq number |
334 | * @cnt: Number of consecutive irqs to allocate. | 345 | * @cnt: Number of consecutive irqs to allocate. |
335 | * @node: Preferred node on which the irq descriptor should be allocated | 346 | * @node: Preferred node on which the irq descriptor should be allocated |
347 | * @owner: Owning module (can be NULL) | ||
336 | * | 348 | * |
337 | * Returns the first irq number or error code | 349 | * Returns the first irq number or error code |
338 | */ | 350 | */ |
339 | int __ref | 351 | int __ref |
340 | irq_alloc_descs(int irq, unsigned int from, unsigned int cnt, int node) | 352 | __irq_alloc_descs(int irq, unsigned int from, unsigned int cnt, int node, |
353 | struct module *owner) | ||
341 | { | 354 | { |
342 | int start, ret; | 355 | int start, ret; |
343 | 356 | ||
@@ -366,13 +379,13 @@ irq_alloc_descs(int irq, unsigned int from, unsigned int cnt, int node) | |||
366 | 379 | ||
367 | bitmap_set(allocated_irqs, start, cnt); | 380 | bitmap_set(allocated_irqs, start, cnt); |
368 | mutex_unlock(&sparse_irq_lock); | 381 | mutex_unlock(&sparse_irq_lock); |
369 | return alloc_descs(start, cnt, node); | 382 | return alloc_descs(start, cnt, node, owner); |
370 | 383 | ||
371 | err: | 384 | err: |
372 | mutex_unlock(&sparse_irq_lock); | 385 | mutex_unlock(&sparse_irq_lock); |
373 | return ret; | 386 | return ret; |
374 | } | 387 | } |
375 | EXPORT_SYMBOL_GPL(irq_alloc_descs); | 388 | EXPORT_SYMBOL_GPL(__irq_alloc_descs); |
376 | 389 | ||
377 | /** | 390 | /** |
378 | * irq_reserve_irqs - mark irqs allocated | 391 | * irq_reserve_irqs - mark irqs allocated |
@@ -440,7 +453,7 @@ void dynamic_irq_cleanup(unsigned int irq) | |||
440 | unsigned long flags; | 453 | unsigned long flags; |
441 | 454 | ||
442 | raw_spin_lock_irqsave(&desc->lock, flags); | 455 | raw_spin_lock_irqsave(&desc->lock, flags); |
443 | desc_set_defaults(irq, desc, desc_node(desc)); | 456 | desc_set_defaults(irq, desc, desc_node(desc), NULL); |
444 | raw_spin_unlock_irqrestore(&desc->lock, flags); | 457 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
445 | } | 458 | } |
446 | 459 | ||
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c index 0a7840aeb0f..9b956fa2030 100644 --- a/kernel/irq/manage.c +++ b/kernel/irq/manage.c | |||
@@ -883,6 +883,8 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new) | |||
883 | 883 | ||
884 | if (desc->irq_data.chip == &no_irq_chip) | 884 | if (desc->irq_data.chip == &no_irq_chip) |
885 | return -ENOSYS; | 885 | return -ENOSYS; |
886 | if (!try_module_get(desc->owner)) | ||
887 | return -ENODEV; | ||
886 | /* | 888 | /* |
887 | * Some drivers like serial.c use request_irq() heavily, | 889 | * Some drivers like serial.c use request_irq() heavily, |
888 | * so we have to be careful not to interfere with a | 890 | * so we have to be careful not to interfere with a |
@@ -906,8 +908,10 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new) | |||
906 | */ | 908 | */ |
907 | nested = irq_settings_is_nested_thread(desc); | 909 | nested = irq_settings_is_nested_thread(desc); |
908 | if (nested) { | 910 | if (nested) { |
909 | if (!new->thread_fn) | 911 | if (!new->thread_fn) { |
910 | return -EINVAL; | 912 | ret = -EINVAL; |
913 | goto out_mput; | ||
914 | } | ||
911 | /* | 915 | /* |
912 | * Replace the primary handler which was provided from | 916 | * Replace the primary handler which was provided from |
913 | * the driver for non nested interrupt handling by the | 917 | * the driver for non nested interrupt handling by the |
@@ -929,8 +933,10 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new) | |||
929 | 933 | ||
930 | t = kthread_create(irq_thread, new, "irq/%d-%s", irq, | 934 | t = kthread_create(irq_thread, new, "irq/%d-%s", irq, |
931 | new->name); | 935 | new->name); |
932 | if (IS_ERR(t)) | 936 | if (IS_ERR(t)) { |
933 | return PTR_ERR(t); | 937 | ret = PTR_ERR(t); |
938 | goto out_mput; | ||
939 | } | ||
934 | /* | 940 | /* |
935 | * We keep the reference to the task struct even if | 941 | * We keep the reference to the task struct even if |
936 | * the thread dies to avoid that the interrupt code | 942 | * the thread dies to avoid that the interrupt code |
@@ -1095,6 +1101,8 @@ out_thread: | |||
1095 | kthread_stop(t); | 1101 | kthread_stop(t); |
1096 | put_task_struct(t); | 1102 | put_task_struct(t); |
1097 | } | 1103 | } |
1104 | out_mput: | ||
1105 | module_put(desc->owner); | ||
1098 | return ret; | 1106 | return ret; |
1099 | } | 1107 | } |
1100 | 1108 | ||
@@ -1203,6 +1211,7 @@ static struct irqaction *__free_irq(unsigned int irq, void *dev_id) | |||
1203 | put_task_struct(action->thread); | 1211 | put_task_struct(action->thread); |
1204 | } | 1212 | } |
1205 | 1213 | ||
1214 | module_put(desc->owner); | ||
1206 | return action; | 1215 | return action; |
1207 | } | 1216 | } |
1208 | 1217 | ||
diff --git a/kernel/lockdep.c b/kernel/lockdep.c index 8c24294e477..91d67ce3a8d 100644 --- a/kernel/lockdep.c +++ b/kernel/lockdep.c | |||
@@ -3111,7 +3111,13 @@ static int match_held_lock(struct held_lock *hlock, struct lockdep_map *lock) | |||
3111 | if (!class) | 3111 | if (!class) |
3112 | class = look_up_lock_class(lock, 0); | 3112 | class = look_up_lock_class(lock, 0); |
3113 | 3113 | ||
3114 | if (DEBUG_LOCKS_WARN_ON(!class)) | 3114 | /* |
3115 | * If look_up_lock_class() failed to find a class, we're trying | ||
3116 | * to test if we hold a lock that has never yet been acquired. | ||
3117 | * Clearly if the lock hasn't been acquired _ever_, we're not | ||
3118 | * holding it either, so report failure. | ||
3119 | */ | ||
3120 | if (!class) | ||
3115 | return 0; | 3121 | return 0; |
3116 | 3122 | ||
3117 | if (DEBUG_LOCKS_WARN_ON(!hlock->nest_lock)) | 3123 | if (DEBUG_LOCKS_WARN_ON(!hlock->nest_lock)) |
diff --git a/kernel/power/Kconfig b/kernel/power/Kconfig index b1914cb9095..3744c594b19 100644 --- a/kernel/power/Kconfig +++ b/kernel/power/Kconfig | |||
@@ -231,3 +231,7 @@ config PM_CLK | |||
231 | config PM_GENERIC_DOMAINS | 231 | config PM_GENERIC_DOMAINS |
232 | bool | 232 | bool |
233 | depends on PM | 233 | depends on PM |
234 | |||
235 | config PM_GENERIC_DOMAINS_RUNTIME | ||
236 | def_bool y | ||
237 | depends on PM_RUNTIME && PM_GENERIC_DOMAINS | ||
diff --git a/kernel/printk.c b/kernel/printk.c index 37dff3429ad..28a40d8171b 100644 --- a/kernel/printk.c +++ b/kernel/printk.c | |||
@@ -318,8 +318,10 @@ static int check_syslog_permissions(int type, bool from_file) | |||
318 | return 0; | 318 | return 0; |
319 | /* For historical reasons, accept CAP_SYS_ADMIN too, with a warning */ | 319 | /* For historical reasons, accept CAP_SYS_ADMIN too, with a warning */ |
320 | if (capable(CAP_SYS_ADMIN)) { | 320 | if (capable(CAP_SYS_ADMIN)) { |
321 | WARN_ONCE(1, "Attempt to access syslog with CAP_SYS_ADMIN " | 321 | printk_once(KERN_WARNING "%s (%d): " |
322 | "but no CAP_SYSLOG (deprecated).\n"); | 322 | "Attempt to access syslog with CAP_SYS_ADMIN " |
323 | "but no CAP_SYSLOG (deprecated).\n", | ||
324 | current->comm, task_pid_nr(current)); | ||
323 | return 0; | 325 | return 0; |
324 | } | 326 | } |
325 | return -EPERM; | 327 | return -EPERM; |
@@ -1602,7 +1604,7 @@ static int __init printk_late_init(void) | |||
1602 | struct console *con; | 1604 | struct console *con; |
1603 | 1605 | ||
1604 | for_each_console(con) { | 1606 | for_each_console(con) { |
1605 | if (con->flags & CON_BOOT) { | 1607 | if (!keep_bootcon && con->flags & CON_BOOT) { |
1606 | printk(KERN_INFO "turn off boot console %s%d\n", | 1608 | printk(KERN_INFO "turn off boot console %s%d\n", |
1607 | con->name, con->index); | 1609 | con->name, con->index); |
1608 | unregister_console(con); | 1610 | unregister_console(con); |
diff --git a/kernel/sched.c b/kernel/sched.c index ccacdbdecf4..ec5f472bc5b 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -3065,7 +3065,7 @@ static void finish_task_switch(struct rq *rq, struct task_struct *prev) | |||
3065 | #ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW | 3065 | #ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW |
3066 | local_irq_disable(); | 3066 | local_irq_disable(); |
3067 | #endif /* __ARCH_WANT_INTERRUPTS_ON_CTXSW */ | 3067 | #endif /* __ARCH_WANT_INTERRUPTS_ON_CTXSW */ |
3068 | perf_event_task_sched_in(current); | 3068 | perf_event_task_sched_in(prev, current); |
3069 | #ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW | 3069 | #ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW |
3070 | local_irq_enable(); | 3070 | local_irq_enable(); |
3071 | #endif /* __ARCH_WANT_INTERRUPTS_ON_CTXSW */ | 3071 | #endif /* __ARCH_WANT_INTERRUPTS_ON_CTXSW */ |
@@ -4279,9 +4279,9 @@ pick_next_task(struct rq *rq) | |||
4279 | } | 4279 | } |
4280 | 4280 | ||
4281 | /* | 4281 | /* |
4282 | * schedule() is the main scheduler function. | 4282 | * __schedule() is the main scheduler function. |
4283 | */ | 4283 | */ |
4284 | asmlinkage void __sched schedule(void) | 4284 | static void __sched __schedule(void) |
4285 | { | 4285 | { |
4286 | struct task_struct *prev, *next; | 4286 | struct task_struct *prev, *next; |
4287 | unsigned long *switch_count; | 4287 | unsigned long *switch_count; |
@@ -4322,16 +4322,6 @@ need_resched: | |||
4322 | if (to_wakeup) | 4322 | if (to_wakeup) |
4323 | try_to_wake_up_local(to_wakeup); | 4323 | try_to_wake_up_local(to_wakeup); |
4324 | } | 4324 | } |
4325 | |||
4326 | /* | ||
4327 | * If we are going to sleep and we have plugged IO | ||
4328 | * queued, make sure to submit it to avoid deadlocks. | ||
4329 | */ | ||
4330 | if (blk_needs_flush_plug(prev)) { | ||
4331 | raw_spin_unlock(&rq->lock); | ||
4332 | blk_schedule_flush_plug(prev); | ||
4333 | raw_spin_lock(&rq->lock); | ||
4334 | } | ||
4335 | } | 4325 | } |
4336 | switch_count = &prev->nvcsw; | 4326 | switch_count = &prev->nvcsw; |
4337 | } | 4327 | } |
@@ -4369,6 +4359,26 @@ need_resched: | |||
4369 | if (need_resched()) | 4359 | if (need_resched()) |
4370 | goto need_resched; | 4360 | goto need_resched; |
4371 | } | 4361 | } |
4362 | |||
4363 | static inline void sched_submit_work(struct task_struct *tsk) | ||
4364 | { | ||
4365 | if (!tsk->state) | ||
4366 | return; | ||
4367 | /* | ||
4368 | * If we are going to sleep and we have plugged IO queued, | ||
4369 | * make sure to submit it to avoid deadlocks. | ||
4370 | */ | ||
4371 | if (blk_needs_flush_plug(tsk)) | ||
4372 | blk_schedule_flush_plug(tsk); | ||
4373 | } | ||
4374 | |||
4375 | asmlinkage void schedule(void) | ||
4376 | { | ||
4377 | struct task_struct *tsk = current; | ||
4378 | |||
4379 | sched_submit_work(tsk); | ||
4380 | __schedule(); | ||
4381 | } | ||
4372 | EXPORT_SYMBOL(schedule); | 4382 | EXPORT_SYMBOL(schedule); |
4373 | 4383 | ||
4374 | #ifdef CONFIG_MUTEX_SPIN_ON_OWNER | 4384 | #ifdef CONFIG_MUTEX_SPIN_ON_OWNER |
@@ -4435,7 +4445,7 @@ asmlinkage void __sched notrace preempt_schedule(void) | |||
4435 | 4445 | ||
4436 | do { | 4446 | do { |
4437 | add_preempt_count_notrace(PREEMPT_ACTIVE); | 4447 | add_preempt_count_notrace(PREEMPT_ACTIVE); |
4438 | schedule(); | 4448 | __schedule(); |
4439 | sub_preempt_count_notrace(PREEMPT_ACTIVE); | 4449 | sub_preempt_count_notrace(PREEMPT_ACTIVE); |
4440 | 4450 | ||
4441 | /* | 4451 | /* |
@@ -4463,7 +4473,7 @@ asmlinkage void __sched preempt_schedule_irq(void) | |||
4463 | do { | 4473 | do { |
4464 | add_preempt_count(PREEMPT_ACTIVE); | 4474 | add_preempt_count(PREEMPT_ACTIVE); |
4465 | local_irq_enable(); | 4475 | local_irq_enable(); |
4466 | schedule(); | 4476 | __schedule(); |
4467 | local_irq_disable(); | 4477 | local_irq_disable(); |
4468 | sub_preempt_count(PREEMPT_ACTIVE); | 4478 | sub_preempt_count(PREEMPT_ACTIVE); |
4469 | 4479 | ||
@@ -5588,7 +5598,7 @@ static inline int should_resched(void) | |||
5588 | static void __cond_resched(void) | 5598 | static void __cond_resched(void) |
5589 | { | 5599 | { |
5590 | add_preempt_count(PREEMPT_ACTIVE); | 5600 | add_preempt_count(PREEMPT_ACTIVE); |
5591 | schedule(); | 5601 | __schedule(); |
5592 | sub_preempt_count(PREEMPT_ACTIVE); | 5602 | sub_preempt_count(PREEMPT_ACTIVE); |
5593 | } | 5603 | } |
5594 | 5604 | ||
@@ -7443,6 +7453,7 @@ static void __sdt_free(const struct cpumask *cpu_map) | |||
7443 | struct sched_domain *sd = *per_cpu_ptr(sdd->sd, j); | 7453 | struct sched_domain *sd = *per_cpu_ptr(sdd->sd, j); |
7444 | if (sd && (sd->flags & SD_OVERLAP)) | 7454 | if (sd && (sd->flags & SD_OVERLAP)) |
7445 | free_sched_groups(sd->groups, 0); | 7455 | free_sched_groups(sd->groups, 0); |
7456 | kfree(*per_cpu_ptr(sdd->sd, j)); | ||
7446 | kfree(*per_cpu_ptr(sdd->sg, j)); | 7457 | kfree(*per_cpu_ptr(sdd->sg, j)); |
7447 | kfree(*per_cpu_ptr(sdd->sgp, j)); | 7458 | kfree(*per_cpu_ptr(sdd->sgp, j)); |
7448 | } | 7459 | } |
diff --git a/kernel/sys.c b/kernel/sys.c index a101ba36c44..18ee1d2f647 100644 --- a/kernel/sys.c +++ b/kernel/sys.c | |||
@@ -37,6 +37,8 @@ | |||
37 | #include <linux/fs_struct.h> | 37 | #include <linux/fs_struct.h> |
38 | #include <linux/gfp.h> | 38 | #include <linux/gfp.h> |
39 | #include <linux/syscore_ops.h> | 39 | #include <linux/syscore_ops.h> |
40 | #include <linux/version.h> | ||
41 | #include <linux/ctype.h> | ||
40 | 42 | ||
41 | #include <linux/compat.h> | 43 | #include <linux/compat.h> |
42 | #include <linux/syscalls.h> | 44 | #include <linux/syscalls.h> |
@@ -44,6 +46,8 @@ | |||
44 | #include <linux/user_namespace.h> | 46 | #include <linux/user_namespace.h> |
45 | 47 | ||
46 | #include <linux/kmsg_dump.h> | 48 | #include <linux/kmsg_dump.h> |
49 | /* Move somewhere else to avoid recompiling? */ | ||
50 | #include <generated/utsrelease.h> | ||
47 | 51 | ||
48 | #include <asm/uaccess.h> | 52 | #include <asm/uaccess.h> |
49 | #include <asm/io.h> | 53 | #include <asm/io.h> |
@@ -621,11 +625,18 @@ static int set_user(struct cred *new) | |||
621 | if (!new_user) | 625 | if (!new_user) |
622 | return -EAGAIN; | 626 | return -EAGAIN; |
623 | 627 | ||
628 | /* | ||
629 | * We don't fail in case of NPROC limit excess here because too many | ||
630 | * poorly written programs don't check set*uid() return code, assuming | ||
631 | * it never fails if called by root. We may still enforce NPROC limit | ||
632 | * for programs doing set*uid()+execve() by harmlessly deferring the | ||
633 | * failure to the execve() stage. | ||
634 | */ | ||
624 | if (atomic_read(&new_user->processes) >= rlimit(RLIMIT_NPROC) && | 635 | if (atomic_read(&new_user->processes) >= rlimit(RLIMIT_NPROC) && |
625 | new_user != INIT_USER) { | 636 | new_user != INIT_USER) |
626 | free_uid(new_user); | 637 | current->flags |= PF_NPROC_EXCEEDED; |
627 | return -EAGAIN; | 638 | else |
628 | } | 639 | current->flags &= ~PF_NPROC_EXCEEDED; |
629 | 640 | ||
630 | free_uid(new->user); | 641 | free_uid(new->user); |
631 | new->user = new_user; | 642 | new->user = new_user; |
@@ -1154,6 +1165,34 @@ DECLARE_RWSEM(uts_sem); | |||
1154 | #define override_architecture(name) 0 | 1165 | #define override_architecture(name) 0 |
1155 | #endif | 1166 | #endif |
1156 | 1167 | ||
1168 | /* | ||
1169 | * Work around broken programs that cannot handle "Linux 3.0". | ||
1170 | * Instead we map 3.x to 2.6.40+x, so e.g. 3.0 would be 2.6.40 | ||
1171 | */ | ||
1172 | static int override_release(char __user *release, int len) | ||
1173 | { | ||
1174 | int ret = 0; | ||
1175 | char buf[len]; | ||
1176 | |||
1177 | if (current->personality & UNAME26) { | ||
1178 | char *rest = UTS_RELEASE; | ||
1179 | int ndots = 0; | ||
1180 | unsigned v; | ||
1181 | |||
1182 | while (*rest) { | ||
1183 | if (*rest == '.' && ++ndots >= 3) | ||
1184 | break; | ||
1185 | if (!isdigit(*rest) && *rest != '.') | ||
1186 | break; | ||
1187 | rest++; | ||
1188 | } | ||
1189 | v = ((LINUX_VERSION_CODE >> 8) & 0xff) + 40; | ||
1190 | snprintf(buf, len, "2.6.%u%s", v, rest); | ||
1191 | ret = copy_to_user(release, buf, len); | ||
1192 | } | ||
1193 | return ret; | ||
1194 | } | ||
1195 | |||
1157 | SYSCALL_DEFINE1(newuname, struct new_utsname __user *, name) | 1196 | SYSCALL_DEFINE1(newuname, struct new_utsname __user *, name) |
1158 | { | 1197 | { |
1159 | int errno = 0; | 1198 | int errno = 0; |
@@ -1163,6 +1202,8 @@ SYSCALL_DEFINE1(newuname, struct new_utsname __user *, name) | |||
1163 | errno = -EFAULT; | 1202 | errno = -EFAULT; |
1164 | up_read(&uts_sem); | 1203 | up_read(&uts_sem); |
1165 | 1204 | ||
1205 | if (!errno && override_release(name->release, sizeof(name->release))) | ||
1206 | errno = -EFAULT; | ||
1166 | if (!errno && override_architecture(name)) | 1207 | if (!errno && override_architecture(name)) |
1167 | errno = -EFAULT; | 1208 | errno = -EFAULT; |
1168 | return errno; | 1209 | return errno; |
@@ -1184,6 +1225,8 @@ SYSCALL_DEFINE1(uname, struct old_utsname __user *, name) | |||
1184 | error = -EFAULT; | 1225 | error = -EFAULT; |
1185 | up_read(&uts_sem); | 1226 | up_read(&uts_sem); |
1186 | 1227 | ||
1228 | if (!error && override_release(name->release, sizeof(name->release))) | ||
1229 | error = -EFAULT; | ||
1187 | if (!error && override_architecture(name)) | 1230 | if (!error && override_architecture(name)) |
1188 | error = -EFAULT; | 1231 | error = -EFAULT; |
1189 | return error; | 1232 | return error; |
@@ -1218,6 +1261,8 @@ SYSCALL_DEFINE1(olduname, struct oldold_utsname __user *, name) | |||
1218 | 1261 | ||
1219 | if (!error && override_architecture(name)) | 1262 | if (!error && override_architecture(name)) |
1220 | error = -EFAULT; | 1263 | error = -EFAULT; |
1264 | if (!error && override_release(name->release, sizeof(name->release))) | ||
1265 | error = -EFAULT; | ||
1221 | return error ? -EFAULT : 0; | 1266 | return error ? -EFAULT : 0; |
1222 | } | 1267 | } |
1223 | #endif | 1268 | #endif |
diff --git a/kernel/sys_ni.c b/kernel/sys_ni.c index 62cbc8877fe..a9a5de07c4f 100644 --- a/kernel/sys_ni.c +++ b/kernel/sys_ni.c | |||
@@ -16,7 +16,6 @@ asmlinkage long sys_ni_syscall(void) | |||
16 | return -ENOSYS; | 16 | return -ENOSYS; |
17 | } | 17 | } |
18 | 18 | ||
19 | cond_syscall(sys_nfsservctl); | ||
20 | cond_syscall(sys_quotactl); | 19 | cond_syscall(sys_quotactl); |
21 | cond_syscall(sys32_quotactl); | 20 | cond_syscall(sys32_quotactl); |
22 | cond_syscall(sys_acct); | 21 | cond_syscall(sys_acct); |
diff --git a/kernel/sysctl_binary.c b/kernel/sysctl_binary.c index 3b8e028b960..e8bffbe2ba4 100644 --- a/kernel/sysctl_binary.c +++ b/kernel/sysctl_binary.c | |||
@@ -1,6 +1,6 @@ | |||
1 | #include <linux/stat.h> | 1 | #include <linux/stat.h> |
2 | #include <linux/sysctl.h> | 2 | #include <linux/sysctl.h> |
3 | #include "../fs/xfs/linux-2.6/xfs_sysctl.h" | 3 | #include "../fs/xfs/xfs_sysctl.h" |
4 | #include <linux/sunrpc/debug.h> | 4 | #include <linux/sunrpc/debug.h> |
5 | #include <linux/string.h> | 5 | #include <linux/string.h> |
6 | #include <net/ip_vs.h> | 6 | #include <net/ip_vs.h> |
diff --git a/kernel/sysctl_check.c b/kernel/sysctl_check.c index 4e4932a7b36..362da653813 100644 --- a/kernel/sysctl_check.c +++ b/kernel/sysctl_check.c | |||
@@ -1,6 +1,6 @@ | |||
1 | #include <linux/stat.h> | 1 | #include <linux/stat.h> |
2 | #include <linux/sysctl.h> | 2 | #include <linux/sysctl.h> |
3 | #include "../fs/xfs/linux-2.6/xfs_sysctl.h" | 3 | #include "../fs/xfs/xfs_sysctl.h" |
4 | #include <linux/sunrpc/debug.h> | 4 | #include <linux/sunrpc/debug.h> |
5 | #include <linux/string.h> | 5 | #include <linux/string.h> |
6 | #include <net/ip_vs.h> | 6 | #include <net/ip_vs.h> |
diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig index 2ad39e556cb..cd3134510f3 100644 --- a/kernel/trace/Kconfig +++ b/kernel/trace/Kconfig | |||
@@ -82,7 +82,7 @@ config EVENT_POWER_TRACING_DEPRECATED | |||
82 | power:power_frequency | 82 | power:power_frequency |
83 | This is for userspace compatibility | 83 | This is for userspace compatibility |
84 | and will vanish after 5 kernel iterations, | 84 | and will vanish after 5 kernel iterations, |
85 | namely 2.6.41. | 85 | namely 3.1. |
86 | 86 | ||
87 | config CONTEXT_SWITCH_TRACER | 87 | config CONTEXT_SWITCH_TRACER |
88 | bool | 88 | bool |
diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c index 6957aa298df..7c910a5593a 100644 --- a/kernel/trace/blktrace.c +++ b/kernel/trace/blktrace.c | |||
@@ -206,6 +206,8 @@ static void __blk_add_trace(struct blk_trace *bt, sector_t sector, int bytes, | |||
206 | what |= MASK_TC_BIT(rw, RAHEAD); | 206 | what |= MASK_TC_BIT(rw, RAHEAD); |
207 | what |= MASK_TC_BIT(rw, META); | 207 | what |= MASK_TC_BIT(rw, META); |
208 | what |= MASK_TC_BIT(rw, DISCARD); | 208 | what |= MASK_TC_BIT(rw, DISCARD); |
209 | what |= MASK_TC_BIT(rw, FLUSH); | ||
210 | what |= MASK_TC_BIT(rw, FUA); | ||
209 | 211 | ||
210 | pid = tsk->pid; | 212 | pid = tsk->pid; |
211 | if (act_log_check(bt, what, sector, pid)) | 213 | if (act_log_check(bt, what, sector, pid)) |
@@ -1054,6 +1056,9 @@ static void fill_rwbs(char *rwbs, const struct blk_io_trace *t) | |||
1054 | goto out; | 1056 | goto out; |
1055 | } | 1057 | } |
1056 | 1058 | ||
1059 | if (tc & BLK_TC_FLUSH) | ||
1060 | rwbs[i++] = 'F'; | ||
1061 | |||
1057 | if (tc & BLK_TC_DISCARD) | 1062 | if (tc & BLK_TC_DISCARD) |
1058 | rwbs[i++] = 'D'; | 1063 | rwbs[i++] = 'D'; |
1059 | else if (tc & BLK_TC_WRITE) | 1064 | else if (tc & BLK_TC_WRITE) |
@@ -1063,10 +1068,10 @@ static void fill_rwbs(char *rwbs, const struct blk_io_trace *t) | |||
1063 | else | 1068 | else |
1064 | rwbs[i++] = 'N'; | 1069 | rwbs[i++] = 'N'; |
1065 | 1070 | ||
1071 | if (tc & BLK_TC_FUA) | ||
1072 | rwbs[i++] = 'F'; | ||
1066 | if (tc & BLK_TC_AHEAD) | 1073 | if (tc & BLK_TC_AHEAD) |
1067 | rwbs[i++] = 'A'; | 1074 | rwbs[i++] = 'A'; |
1068 | if (tc & BLK_TC_BARRIER) | ||
1069 | rwbs[i++] = 'B'; | ||
1070 | if (tc & BLK_TC_SYNC) | 1075 | if (tc & BLK_TC_SYNC) |
1071 | rwbs[i++] = 'S'; | 1076 | rwbs[i++] = 'S'; |
1072 | if (tc & BLK_TC_META) | 1077 | if (tc & BLK_TC_META) |
@@ -1132,7 +1137,7 @@ typedef int (blk_log_action_t) (struct trace_iterator *iter, const char *act); | |||
1132 | 1137 | ||
1133 | static int blk_log_action_classic(struct trace_iterator *iter, const char *act) | 1138 | static int blk_log_action_classic(struct trace_iterator *iter, const char *act) |
1134 | { | 1139 | { |
1135 | char rwbs[6]; | 1140 | char rwbs[RWBS_LEN]; |
1136 | unsigned long long ts = iter->ts; | 1141 | unsigned long long ts = iter->ts; |
1137 | unsigned long nsec_rem = do_div(ts, NSEC_PER_SEC); | 1142 | unsigned long nsec_rem = do_div(ts, NSEC_PER_SEC); |
1138 | unsigned secs = (unsigned long)ts; | 1143 | unsigned secs = (unsigned long)ts; |
@@ -1148,7 +1153,7 @@ static int blk_log_action_classic(struct trace_iterator *iter, const char *act) | |||
1148 | 1153 | ||
1149 | static int blk_log_action(struct trace_iterator *iter, const char *act) | 1154 | static int blk_log_action(struct trace_iterator *iter, const char *act) |
1150 | { | 1155 | { |
1151 | char rwbs[6]; | 1156 | char rwbs[RWBS_LEN]; |
1152 | const struct blk_io_trace *t = te_blk_io_trace(iter->ent); | 1157 | const struct blk_io_trace *t = te_blk_io_trace(iter->ent); |
1153 | 1158 | ||
1154 | fill_rwbs(rwbs, t); | 1159 | fill_rwbs(rwbs, t); |
@@ -1561,7 +1566,7 @@ static const struct { | |||
1561 | } mask_maps[] = { | 1566 | } mask_maps[] = { |
1562 | { BLK_TC_READ, "read" }, | 1567 | { BLK_TC_READ, "read" }, |
1563 | { BLK_TC_WRITE, "write" }, | 1568 | { BLK_TC_WRITE, "write" }, |
1564 | { BLK_TC_BARRIER, "barrier" }, | 1569 | { BLK_TC_FLUSH, "flush" }, |
1565 | { BLK_TC_SYNC, "sync" }, | 1570 | { BLK_TC_SYNC, "sync" }, |
1566 | { BLK_TC_QUEUE, "queue" }, | 1571 | { BLK_TC_QUEUE, "queue" }, |
1567 | { BLK_TC_REQUEUE, "requeue" }, | 1572 | { BLK_TC_REQUEUE, "requeue" }, |
@@ -1573,6 +1578,7 @@ static const struct { | |||
1573 | { BLK_TC_META, "meta" }, | 1578 | { BLK_TC_META, "meta" }, |
1574 | { BLK_TC_DISCARD, "discard" }, | 1579 | { BLK_TC_DISCARD, "discard" }, |
1575 | { BLK_TC_DRV_DATA, "drv_data" }, | 1580 | { BLK_TC_DRV_DATA, "drv_data" }, |
1581 | { BLK_TC_FUA, "fua" }, | ||
1576 | }; | 1582 | }; |
1577 | 1583 | ||
1578 | static int blk_trace_str2mask(const char *str) | 1584 | static int blk_trace_str2mask(const char *str) |
@@ -1788,6 +1794,9 @@ void blk_fill_rwbs(char *rwbs, u32 rw, int bytes) | |||
1788 | { | 1794 | { |
1789 | int i = 0; | 1795 | int i = 0; |
1790 | 1796 | ||
1797 | if (rw & REQ_FLUSH) | ||
1798 | rwbs[i++] = 'F'; | ||
1799 | |||
1791 | if (rw & WRITE) | 1800 | if (rw & WRITE) |
1792 | rwbs[i++] = 'W'; | 1801 | rwbs[i++] = 'W'; |
1793 | else if (rw & REQ_DISCARD) | 1802 | else if (rw & REQ_DISCARD) |
@@ -1797,6 +1806,8 @@ void blk_fill_rwbs(char *rwbs, u32 rw, int bytes) | |||
1797 | else | 1806 | else |
1798 | rwbs[i++] = 'N'; | 1807 | rwbs[i++] = 'N'; |
1799 | 1808 | ||
1809 | if (rw & REQ_FUA) | ||
1810 | rwbs[i++] = 'F'; | ||
1800 | if (rw & REQ_RAHEAD) | 1811 | if (rw & REQ_RAHEAD) |
1801 | rwbs[i++] = 'A'; | 1812 | rwbs[i++] = 'A'; |
1802 | if (rw & REQ_SYNC) | 1813 | if (rw & REQ_SYNC) |