diff options
author | David S. Miller <davem@davemloft.net> | 2014-11-14 01:01:12 -0500 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2014-11-14 01:01:12 -0500 |
commit | 076ce4482569ea1a2c27b4ca71a309adaf91d398 (patch) | |
tree | 2ae9e42612f35be897f190983fc292d7af781cd2 /kernel | |
parent | d649a7a81f3b5bacb1d60abd7529894d8234a666 (diff) | |
parent | b23dc5a7cc6ebc9a0d57351da7a0e8454c9ffea3 (diff) |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Conflicts:
drivers/net/ethernet/chelsio/cxgb4vf/sge.c
drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
sge.c was overlapping two changes, one to use the new
__dev_alloc_page() in net-next, and one to use s->fl_pg_order in net.
ixgbe_phy.c was a set of overlapping whitespace changes.
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/audit.c | 2 | ||||
-rw-r--r-- | kernel/audit_tree.c | 1 | ||||
-rw-r--r-- | kernel/panic.c | 1 | ||||
-rw-r--r-- | kernel/trace/ring_buffer.c | 81 | ||||
-rw-r--r-- | kernel/trace/trace.c | 33 |
5 files changed, 72 insertions, 46 deletions
diff --git a/kernel/audit.c b/kernel/audit.c index 80983df92cd4..cebb11db4d34 100644 --- a/kernel/audit.c +++ b/kernel/audit.c | |||
@@ -739,7 +739,7 @@ static void audit_log_feature_change(int which, u32 old_feature, u32 new_feature | |||
739 | 739 | ||
740 | ab = audit_log_start(NULL, GFP_KERNEL, AUDIT_FEATURE_CHANGE); | 740 | ab = audit_log_start(NULL, GFP_KERNEL, AUDIT_FEATURE_CHANGE); |
741 | audit_log_task_info(ab, current); | 741 | audit_log_task_info(ab, current); |
742 | audit_log_format(ab, "feature=%s old=%u new=%u old_lock=%u new_lock=%u res=%d", | 742 | audit_log_format(ab, " feature=%s old=%u new=%u old_lock=%u new_lock=%u res=%d", |
743 | audit_feature_names[which], !!old_feature, !!new_feature, | 743 | audit_feature_names[which], !!old_feature, !!new_feature, |
744 | !!old_lock, !!new_lock, res); | 744 | !!old_lock, !!new_lock, res); |
745 | audit_log_end(ab); | 745 | audit_log_end(ab); |
diff --git a/kernel/audit_tree.c b/kernel/audit_tree.c index e242e3a9864a..80f29e015570 100644 --- a/kernel/audit_tree.c +++ b/kernel/audit_tree.c | |||
@@ -154,6 +154,7 @@ static struct audit_chunk *alloc_chunk(int count) | |||
154 | chunk->owners[i].index = i; | 154 | chunk->owners[i].index = i; |
155 | } | 155 | } |
156 | fsnotify_init_mark(&chunk->mark, audit_tree_destroy_watch); | 156 | fsnotify_init_mark(&chunk->mark, audit_tree_destroy_watch); |
157 | chunk->mark.mask = FS_IN_IGNORED; | ||
157 | return chunk; | 158 | return chunk; |
158 | } | 159 | } |
159 | 160 | ||
diff --git a/kernel/panic.c b/kernel/panic.c index d09dc5c32c67..cf80672b7924 100644 --- a/kernel/panic.c +++ b/kernel/panic.c | |||
@@ -244,6 +244,7 @@ static const struct tnt tnts[] = { | |||
244 | * 'I' - Working around severe firmware bug. | 244 | * 'I' - Working around severe firmware bug. |
245 | * 'O' - Out-of-tree module has been loaded. | 245 | * 'O' - Out-of-tree module has been loaded. |
246 | * 'E' - Unsigned module has been loaded. | 246 | * 'E' - Unsigned module has been loaded. |
247 | * 'L' - A soft lockup has previously occurred. | ||
247 | * | 248 | * |
248 | * The string is overwritten by the next call to print_tainted(). | 249 | * The string is overwritten by the next call to print_tainted(). |
249 | */ | 250 | */ |
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index 2d75c94ae87d..a56e07c8d15b 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c | |||
@@ -538,16 +538,18 @@ static void rb_wake_up_waiters(struct irq_work *work) | |||
538 | * ring_buffer_wait - wait for input to the ring buffer | 538 | * ring_buffer_wait - wait for input to the ring buffer |
539 | * @buffer: buffer to wait on | 539 | * @buffer: buffer to wait on |
540 | * @cpu: the cpu buffer to wait on | 540 | * @cpu: the cpu buffer to wait on |
541 | * @full: wait until a full page is available, if @cpu != RING_BUFFER_ALL_CPUS | ||
541 | * | 542 | * |
542 | * If @cpu == RING_BUFFER_ALL_CPUS then the task will wake up as soon | 543 | * If @cpu == RING_BUFFER_ALL_CPUS then the task will wake up as soon |
543 | * as data is added to any of the @buffer's cpu buffers. Otherwise | 544 | * as data is added to any of the @buffer's cpu buffers. Otherwise |
544 | * it will wait for data to be added to a specific cpu buffer. | 545 | * it will wait for data to be added to a specific cpu buffer. |
545 | */ | 546 | */ |
546 | int ring_buffer_wait(struct ring_buffer *buffer, int cpu) | 547 | int ring_buffer_wait(struct ring_buffer *buffer, int cpu, bool full) |
547 | { | 548 | { |
548 | struct ring_buffer_per_cpu *cpu_buffer; | 549 | struct ring_buffer_per_cpu *uninitialized_var(cpu_buffer); |
549 | DEFINE_WAIT(wait); | 550 | DEFINE_WAIT(wait); |
550 | struct rb_irq_work *work; | 551 | struct rb_irq_work *work; |
552 | int ret = 0; | ||
551 | 553 | ||
552 | /* | 554 | /* |
553 | * Depending on what the caller is waiting for, either any | 555 | * Depending on what the caller is waiting for, either any |
@@ -564,36 +566,61 @@ int ring_buffer_wait(struct ring_buffer *buffer, int cpu) | |||
564 | } | 566 | } |
565 | 567 | ||
566 | 568 | ||
567 | prepare_to_wait(&work->waiters, &wait, TASK_INTERRUPTIBLE); | 569 | while (true) { |
570 | prepare_to_wait(&work->waiters, &wait, TASK_INTERRUPTIBLE); | ||
568 | 571 | ||
569 | /* | 572 | /* |
570 | * The events can happen in critical sections where | 573 | * The events can happen in critical sections where |
571 | * checking a work queue can cause deadlocks. | 574 | * checking a work queue can cause deadlocks. |
572 | * After adding a task to the queue, this flag is set | 575 | * After adding a task to the queue, this flag is set |
573 | * only to notify events to try to wake up the queue | 576 | * only to notify events to try to wake up the queue |
574 | * using irq_work. | 577 | * using irq_work. |
575 | * | 578 | * |
576 | * We don't clear it even if the buffer is no longer | 579 | * We don't clear it even if the buffer is no longer |
577 | * empty. The flag only causes the next event to run | 580 | * empty. The flag only causes the next event to run |
578 | * irq_work to do the work queue wake up. The worse | 581 | * irq_work to do the work queue wake up. The worse |
579 | * that can happen if we race with !trace_empty() is that | 582 | * that can happen if we race with !trace_empty() is that |
580 | * an event will cause an irq_work to try to wake up | 583 | * an event will cause an irq_work to try to wake up |
581 | * an empty queue. | 584 | * an empty queue. |
582 | * | 585 | * |
583 | * There's no reason to protect this flag either, as | 586 | * There's no reason to protect this flag either, as |
584 | * the work queue and irq_work logic will do the necessary | 587 | * the work queue and irq_work logic will do the necessary |
585 | * synchronization for the wake ups. The only thing | 588 | * synchronization for the wake ups. The only thing |
586 | * that is necessary is that the wake up happens after | 589 | * that is necessary is that the wake up happens after |
587 | * a task has been queued. It's OK for spurious wake ups. | 590 | * a task has been queued. It's OK for spurious wake ups. |
588 | */ | 591 | */ |
589 | work->waiters_pending = true; | 592 | work->waiters_pending = true; |
593 | |||
594 | if (signal_pending(current)) { | ||
595 | ret = -EINTR; | ||
596 | break; | ||
597 | } | ||
598 | |||
599 | if (cpu == RING_BUFFER_ALL_CPUS && !ring_buffer_empty(buffer)) | ||
600 | break; | ||
601 | |||
602 | if (cpu != RING_BUFFER_ALL_CPUS && | ||
603 | !ring_buffer_empty_cpu(buffer, cpu)) { | ||
604 | unsigned long flags; | ||
605 | bool pagebusy; | ||
606 | |||
607 | if (!full) | ||
608 | break; | ||
609 | |||
610 | raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); | ||
611 | pagebusy = cpu_buffer->reader_page == cpu_buffer->commit_page; | ||
612 | raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); | ||
613 | |||
614 | if (!pagebusy) | ||
615 | break; | ||
616 | } | ||
590 | 617 | ||
591 | if ((cpu == RING_BUFFER_ALL_CPUS && ring_buffer_empty(buffer)) || | ||
592 | (cpu != RING_BUFFER_ALL_CPUS && ring_buffer_empty_cpu(buffer, cpu))) | ||
593 | schedule(); | 618 | schedule(); |
619 | } | ||
594 | 620 | ||
595 | finish_wait(&work->waiters, &wait); | 621 | finish_wait(&work->waiters, &wait); |
596 | return 0; | 622 | |
623 | return ret; | ||
597 | } | 624 | } |
598 | 625 | ||
599 | /** | 626 | /** |
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 8a528392b1f4..92f4a6cee172 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c | |||
@@ -1076,13 +1076,14 @@ update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu) | |||
1076 | } | 1076 | } |
1077 | #endif /* CONFIG_TRACER_MAX_TRACE */ | 1077 | #endif /* CONFIG_TRACER_MAX_TRACE */ |
1078 | 1078 | ||
1079 | static int wait_on_pipe(struct trace_iterator *iter) | 1079 | static int wait_on_pipe(struct trace_iterator *iter, bool full) |
1080 | { | 1080 | { |
1081 | /* Iterators are static, they should be filled or empty */ | 1081 | /* Iterators are static, they should be filled or empty */ |
1082 | if (trace_buffer_iter(iter, iter->cpu_file)) | 1082 | if (trace_buffer_iter(iter, iter->cpu_file)) |
1083 | return 0; | 1083 | return 0; |
1084 | 1084 | ||
1085 | return ring_buffer_wait(iter->trace_buffer->buffer, iter->cpu_file); | 1085 | return ring_buffer_wait(iter->trace_buffer->buffer, iter->cpu_file, |
1086 | full); | ||
1086 | } | 1087 | } |
1087 | 1088 | ||
1088 | #ifdef CONFIG_FTRACE_STARTUP_TEST | 1089 | #ifdef CONFIG_FTRACE_STARTUP_TEST |
@@ -4434,15 +4435,12 @@ static int tracing_wait_pipe(struct file *filp) | |||
4434 | 4435 | ||
4435 | mutex_unlock(&iter->mutex); | 4436 | mutex_unlock(&iter->mutex); |
4436 | 4437 | ||
4437 | ret = wait_on_pipe(iter); | 4438 | ret = wait_on_pipe(iter, false); |
4438 | 4439 | ||
4439 | mutex_lock(&iter->mutex); | 4440 | mutex_lock(&iter->mutex); |
4440 | 4441 | ||
4441 | if (ret) | 4442 | if (ret) |
4442 | return ret; | 4443 | return ret; |
4443 | |||
4444 | if (signal_pending(current)) | ||
4445 | return -EINTR; | ||
4446 | } | 4444 | } |
4447 | 4445 | ||
4448 | return 1; | 4446 | return 1; |
@@ -5372,16 +5370,12 @@ tracing_buffers_read(struct file *filp, char __user *ubuf, | |||
5372 | goto out_unlock; | 5370 | goto out_unlock; |
5373 | } | 5371 | } |
5374 | mutex_unlock(&trace_types_lock); | 5372 | mutex_unlock(&trace_types_lock); |
5375 | ret = wait_on_pipe(iter); | 5373 | ret = wait_on_pipe(iter, false); |
5376 | mutex_lock(&trace_types_lock); | 5374 | mutex_lock(&trace_types_lock); |
5377 | if (ret) { | 5375 | if (ret) { |
5378 | size = ret; | 5376 | size = ret; |
5379 | goto out_unlock; | 5377 | goto out_unlock; |
5380 | } | 5378 | } |
5381 | if (signal_pending(current)) { | ||
5382 | size = -EINTR; | ||
5383 | goto out_unlock; | ||
5384 | } | ||
5385 | goto again; | 5379 | goto again; |
5386 | } | 5380 | } |
5387 | size = 0; | 5381 | size = 0; |
@@ -5500,7 +5494,7 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos, | |||
5500 | }; | 5494 | }; |
5501 | struct buffer_ref *ref; | 5495 | struct buffer_ref *ref; |
5502 | int entries, size, i; | 5496 | int entries, size, i; |
5503 | ssize_t ret; | 5497 | ssize_t ret = 0; |
5504 | 5498 | ||
5505 | mutex_lock(&trace_types_lock); | 5499 | mutex_lock(&trace_types_lock); |
5506 | 5500 | ||
@@ -5538,13 +5532,16 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos, | |||
5538 | int r; | 5532 | int r; |
5539 | 5533 | ||
5540 | ref = kzalloc(sizeof(*ref), GFP_KERNEL); | 5534 | ref = kzalloc(sizeof(*ref), GFP_KERNEL); |
5541 | if (!ref) | 5535 | if (!ref) { |
5536 | ret = -ENOMEM; | ||
5542 | break; | 5537 | break; |
5538 | } | ||
5543 | 5539 | ||
5544 | ref->ref = 1; | 5540 | ref->ref = 1; |
5545 | ref->buffer = iter->trace_buffer->buffer; | 5541 | ref->buffer = iter->trace_buffer->buffer; |
5546 | ref->page = ring_buffer_alloc_read_page(ref->buffer, iter->cpu_file); | 5542 | ref->page = ring_buffer_alloc_read_page(ref->buffer, iter->cpu_file); |
5547 | if (!ref->page) { | 5543 | if (!ref->page) { |
5544 | ret = -ENOMEM; | ||
5548 | kfree(ref); | 5545 | kfree(ref); |
5549 | break; | 5546 | break; |
5550 | } | 5547 | } |
@@ -5582,19 +5579,19 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos, | |||
5582 | 5579 | ||
5583 | /* did we read anything? */ | 5580 | /* did we read anything? */ |
5584 | if (!spd.nr_pages) { | 5581 | if (!spd.nr_pages) { |
5582 | if (ret) | ||
5583 | goto out; | ||
5584 | |||
5585 | if ((file->f_flags & O_NONBLOCK) || (flags & SPLICE_F_NONBLOCK)) { | 5585 | if ((file->f_flags & O_NONBLOCK) || (flags & SPLICE_F_NONBLOCK)) { |
5586 | ret = -EAGAIN; | 5586 | ret = -EAGAIN; |
5587 | goto out; | 5587 | goto out; |
5588 | } | 5588 | } |
5589 | mutex_unlock(&trace_types_lock); | 5589 | mutex_unlock(&trace_types_lock); |
5590 | ret = wait_on_pipe(iter); | 5590 | ret = wait_on_pipe(iter, true); |
5591 | mutex_lock(&trace_types_lock); | 5591 | mutex_lock(&trace_types_lock); |
5592 | if (ret) | 5592 | if (ret) |
5593 | goto out; | 5593 | goto out; |
5594 | if (signal_pending(current)) { | 5594 | |
5595 | ret = -EINTR; | ||
5596 | goto out; | ||
5597 | } | ||
5598 | goto again; | 5595 | goto again; |
5599 | } | 5596 | } |
5600 | 5597 | ||