aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>2013-06-09 00:27:51 -0400
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2013-06-09 00:27:51 -0400
commit141dc40ee343ab532717b235dd645e2d25ae3092 (patch)
tree2289e5d1ec20df5e465b9b629e291877f4d70485 /kernel
parent1c83d94ff646001f9ee83f0330a3933b55660927 (diff)
parent317ddd256b9c24b0d78fa8018f80f1e495481a10 (diff)
Merge 3.10-rc5 into usb-next
We need the changes in this branch. Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/cgroup.c31
-rw-r--r--kernel/irq/irqdomain.c9
-rw-r--r--kernel/range.c8
-rw-r--r--kernel/time/ntp.c1
-rw-r--r--kernel/time/tick-broadcast.c8
-rw-r--r--kernel/time/timekeeping.c8
-rw-r--r--kernel/trace/ftrace.c18
-rw-r--r--kernel/trace/ring_buffer.c3
-rw-r--r--kernel/trace/trace.c19
-rw-r--r--kernel/trace/trace_selftest.c2
10 files changed, 72 insertions, 35 deletions
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index 2a9926275f80..a7c9e6ddb979 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -1686,11 +1686,14 @@ static struct dentry *cgroup_mount(struct file_system_type *fs_type,
1686 */ 1686 */
1687 cgroup_drop_root(opts.new_root); 1687 cgroup_drop_root(opts.new_root);
1688 1688
1689 if (((root->flags | opts.flags) & CGRP_ROOT_SANE_BEHAVIOR) && 1689 if (root->flags != opts.flags) {
1690 root->flags != opts.flags) { 1690 if ((root->flags | opts.flags) & CGRP_ROOT_SANE_BEHAVIOR) {
1691 pr_err("cgroup: sane_behavior: new mount options should match the existing superblock\n"); 1691 pr_err("cgroup: sane_behavior: new mount options should match the existing superblock\n");
1692 ret = -EINVAL; 1692 ret = -EINVAL;
1693 goto drop_new_super; 1693 goto drop_new_super;
1694 } else {
1695 pr_warning("cgroup: new mount options do not match the existing superblock, will be ignored\n");
1696 }
1694 } 1697 }
1695 1698
1696 /* no subsys rebinding, so refcounts don't change */ 1699 /* no subsys rebinding, so refcounts don't change */
@@ -2699,13 +2702,14 @@ static int cgroup_add_file(struct cgroup *cgrp, struct cgroup_subsys *subsys,
2699 goto out; 2702 goto out;
2700 } 2703 }
2701 2704
2705 cfe->type = (void *)cft;
2706 cfe->dentry = dentry;
2707 dentry->d_fsdata = cfe;
2708 simple_xattrs_init(&cfe->xattrs);
2709
2702 mode = cgroup_file_mode(cft); 2710 mode = cgroup_file_mode(cft);
2703 error = cgroup_create_file(dentry, mode | S_IFREG, cgrp->root->sb); 2711 error = cgroup_create_file(dentry, mode | S_IFREG, cgrp->root->sb);
2704 if (!error) { 2712 if (!error) {
2705 cfe->type = (void *)cft;
2706 cfe->dentry = dentry;
2707 dentry->d_fsdata = cfe;
2708 simple_xattrs_init(&cfe->xattrs);
2709 list_add_tail(&cfe->node, &parent->files); 2713 list_add_tail(&cfe->node, &parent->files);
2710 cfe = NULL; 2714 cfe = NULL;
2711 } 2715 }
@@ -2953,11 +2957,8 @@ struct cgroup *cgroup_next_descendant_pre(struct cgroup *pos,
2953 WARN_ON_ONCE(!rcu_read_lock_held()); 2957 WARN_ON_ONCE(!rcu_read_lock_held());
2954 2958
2955 /* if first iteration, pretend we just visited @cgroup */ 2959 /* if first iteration, pretend we just visited @cgroup */
2956 if (!pos) { 2960 if (!pos)
2957 if (list_empty(&cgroup->children))
2958 return NULL;
2959 pos = cgroup; 2961 pos = cgroup;
2960 }
2961 2962
2962 /* visit the first child if exists */ 2963 /* visit the first child if exists */
2963 next = list_first_or_null_rcu(&pos->children, struct cgroup, sibling); 2964 next = list_first_or_null_rcu(&pos->children, struct cgroup, sibling);
@@ -2965,14 +2966,14 @@ struct cgroup *cgroup_next_descendant_pre(struct cgroup *pos,
2965 return next; 2966 return next;
2966 2967
2967 /* no child, visit my or the closest ancestor's next sibling */ 2968 /* no child, visit my or the closest ancestor's next sibling */
2968 do { 2969 while (pos != cgroup) {
2969 next = list_entry_rcu(pos->sibling.next, struct cgroup, 2970 next = list_entry_rcu(pos->sibling.next, struct cgroup,
2970 sibling); 2971 sibling);
2971 if (&next->sibling != &pos->parent->children) 2972 if (&next->sibling != &pos->parent->children)
2972 return next; 2973 return next;
2973 2974
2974 pos = pos->parent; 2975 pos = pos->parent;
2975 } while (pos != cgroup); 2976 }
2976 2977
2977 return NULL; 2978 return NULL;
2978} 2979}
diff --git a/kernel/irq/irqdomain.c b/kernel/irq/irqdomain.c
index 5a83dde8ca0c..54a4d5223238 100644
--- a/kernel/irq/irqdomain.c
+++ b/kernel/irq/irqdomain.c
@@ -143,7 +143,10 @@ static unsigned int irq_domain_legacy_revmap(struct irq_domain *domain,
143 * irq_domain_add_simple() - Allocate and register a simple irq_domain. 143 * irq_domain_add_simple() - Allocate and register a simple irq_domain.
144 * @of_node: pointer to interrupt controller's device tree node. 144 * @of_node: pointer to interrupt controller's device tree node.
145 * @size: total number of irqs in mapping 145 * @size: total number of irqs in mapping
146 * @first_irq: first number of irq block assigned to the domain 146 * @first_irq: first number of irq block assigned to the domain,
147 * pass zero to assign irqs on-the-fly. This will result in a
148 * linear IRQ domain so it is important to use irq_create_mapping()
149 * for each used IRQ, especially when SPARSE_IRQ is enabled.
147 * @ops: map/unmap domain callbacks 150 * @ops: map/unmap domain callbacks
148 * @host_data: Controller private data pointer 151 * @host_data: Controller private data pointer
149 * 152 *
@@ -191,6 +194,7 @@ struct irq_domain *irq_domain_add_simple(struct device_node *of_node,
191 /* A linear domain is the default */ 194 /* A linear domain is the default */
192 return irq_domain_add_linear(of_node, size, ops, host_data); 195 return irq_domain_add_linear(of_node, size, ops, host_data);
193} 196}
197EXPORT_SYMBOL_GPL(irq_domain_add_simple);
194 198
195/** 199/**
196 * irq_domain_add_legacy() - Allocate and register a legacy revmap irq_domain. 200 * irq_domain_add_legacy() - Allocate and register a legacy revmap irq_domain.
@@ -397,11 +401,12 @@ static void irq_domain_disassociate_many(struct irq_domain *domain,
397 while (count--) { 401 while (count--) {
398 int irq = irq_base + count; 402 int irq = irq_base + count;
399 struct irq_data *irq_data = irq_get_irq_data(irq); 403 struct irq_data *irq_data = irq_get_irq_data(irq);
400 irq_hw_number_t hwirq = irq_data->hwirq; 404 irq_hw_number_t hwirq;
401 405
402 if (WARN_ON(!irq_data || irq_data->domain != domain)) 406 if (WARN_ON(!irq_data || irq_data->domain != domain))
403 continue; 407 continue;
404 408
409 hwirq = irq_data->hwirq;
405 irq_set_status_flags(irq, IRQ_NOREQUEST); 410 irq_set_status_flags(irq, IRQ_NOREQUEST);
406 411
407 /* remove chip and handler */ 412 /* remove chip and handler */
diff --git a/kernel/range.c b/kernel/range.c
index 071b0ab455cb..eb911dbce267 100644
--- a/kernel/range.c
+++ b/kernel/range.c
@@ -48,9 +48,11 @@ int add_range_with_merge(struct range *range, int az, int nr_range,
48 final_start = min(range[i].start, start); 48 final_start = min(range[i].start, start);
49 final_end = max(range[i].end, end); 49 final_end = max(range[i].end, end);
50 50
51 range[i].start = final_start; 51 /* clear it and add it back for further merge */
52 range[i].end = final_end; 52 range[i].start = 0;
53 return nr_range; 53 range[i].end = 0;
54 return add_range_with_merge(range, az, nr_range,
55 final_start, final_end);
54 } 56 }
55 57
56 /* Need to add it: */ 58 /* Need to add it: */
diff --git a/kernel/time/ntp.c b/kernel/time/ntp.c
index 12ff13a838c6..8f5b3b98577b 100644
--- a/kernel/time/ntp.c
+++ b/kernel/time/ntp.c
@@ -874,7 +874,6 @@ static void hardpps_update_phase(long error)
874void __hardpps(const struct timespec *phase_ts, const struct timespec *raw_ts) 874void __hardpps(const struct timespec *phase_ts, const struct timespec *raw_ts)
875{ 875{
876 struct pps_normtime pts_norm, freq_norm; 876 struct pps_normtime pts_norm, freq_norm;
877 unsigned long flags;
878 877
879 pts_norm = pps_normalize_ts(*phase_ts); 878 pts_norm = pps_normalize_ts(*phase_ts);
880 879
diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c
index 24938d577669..0c739423b0f9 100644
--- a/kernel/time/tick-broadcast.c
+++ b/kernel/time/tick-broadcast.c
@@ -511,6 +511,12 @@ again:
511 } 511 }
512 } 512 }
513 513
514 /*
515 * Remove the current cpu from the pending mask. The event is
516 * delivered immediately in tick_do_broadcast() !
517 */
518 cpumask_clear_cpu(smp_processor_id(), tick_broadcast_pending_mask);
519
514 /* Take care of enforced broadcast requests */ 520 /* Take care of enforced broadcast requests */
515 cpumask_or(tmpmask, tmpmask, tick_broadcast_force_mask); 521 cpumask_or(tmpmask, tmpmask, tick_broadcast_force_mask);
516 cpumask_clear(tick_broadcast_force_mask); 522 cpumask_clear(tick_broadcast_force_mask);
@@ -575,8 +581,8 @@ void tick_broadcast_oneshot_control(unsigned long reason)
575 581
576 raw_spin_lock_irqsave(&tick_broadcast_lock, flags); 582 raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
577 if (reason == CLOCK_EVT_NOTIFY_BROADCAST_ENTER) { 583 if (reason == CLOCK_EVT_NOTIFY_BROADCAST_ENTER) {
578 WARN_ON_ONCE(cpumask_test_cpu(cpu, tick_broadcast_pending_mask));
579 if (!cpumask_test_and_set_cpu(cpu, tick_broadcast_oneshot_mask)) { 584 if (!cpumask_test_and_set_cpu(cpu, tick_broadcast_oneshot_mask)) {
585 WARN_ON_ONCE(cpumask_test_cpu(cpu, tick_broadcast_pending_mask));
580 clockevents_set_mode(dev, CLOCK_EVT_MODE_SHUTDOWN); 586 clockevents_set_mode(dev, CLOCK_EVT_MODE_SHUTDOWN);
581 /* 587 /*
582 * We only reprogram the broadcast timer if we 588 * We only reprogram the broadcast timer if we
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
index 98cd470bbe49..baeeb5c87cf1 100644
--- a/kernel/time/timekeeping.c
+++ b/kernel/time/timekeeping.c
@@ -975,6 +975,14 @@ static int timekeeping_suspend(void)
975 975
976 read_persistent_clock(&timekeeping_suspend_time); 976 read_persistent_clock(&timekeeping_suspend_time);
977 977
978 /*
979 * On some systems the persistent_clock can not be detected at
980 * timekeeping_init by its return value, so if we see a valid
981 * value returned, update the persistent_clock_exists flag.
982 */
983 if (timekeeping_suspend_time.tv_sec || timekeeping_suspend_time.tv_nsec)
984 persistent_clock_exist = true;
985
978 raw_spin_lock_irqsave(&timekeeper_lock, flags); 986 raw_spin_lock_irqsave(&timekeeper_lock, flags);
979 write_seqcount_begin(&timekeeper_seq); 987 write_seqcount_begin(&timekeeper_seq);
980 timekeeping_forward_now(tk); 988 timekeeping_forward_now(tk);
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index b549b0f5b977..6c508ff33c62 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -120,22 +120,22 @@ static void ftrace_ops_no_ops(unsigned long ip, unsigned long parent_ip);
120 120
121/* 121/*
122 * Traverse the ftrace_global_list, invoking all entries. The reason that we 122 * Traverse the ftrace_global_list, invoking all entries. The reason that we
123 * can use rcu_dereference_raw() is that elements removed from this list 123 * can use rcu_dereference_raw_notrace() is that elements removed from this list
124 * are simply leaked, so there is no need to interact with a grace-period 124 * are simply leaked, so there is no need to interact with a grace-period
125 * mechanism. The rcu_dereference_raw() calls are needed to handle 125 * mechanism. The rcu_dereference_raw_notrace() calls are needed to handle
126 * concurrent insertions into the ftrace_global_list. 126 * concurrent insertions into the ftrace_global_list.
127 * 127 *
128 * Silly Alpha and silly pointer-speculation compiler optimizations! 128 * Silly Alpha and silly pointer-speculation compiler optimizations!
129 */ 129 */
130#define do_for_each_ftrace_op(op, list) \ 130#define do_for_each_ftrace_op(op, list) \
131 op = rcu_dereference_raw(list); \ 131 op = rcu_dereference_raw_notrace(list); \
132 do 132 do
133 133
134/* 134/*
135 * Optimized for just a single item in the list (as that is the normal case). 135 * Optimized for just a single item in the list (as that is the normal case).
136 */ 136 */
137#define while_for_each_ftrace_op(op) \ 137#define while_for_each_ftrace_op(op) \
138 while (likely(op = rcu_dereference_raw((op)->next)) && \ 138 while (likely(op = rcu_dereference_raw_notrace((op)->next)) && \
139 unlikely((op) != &ftrace_list_end)) 139 unlikely((op) != &ftrace_list_end))
140 140
141static inline void ftrace_ops_init(struct ftrace_ops *ops) 141static inline void ftrace_ops_init(struct ftrace_ops *ops)
@@ -779,7 +779,7 @@ ftrace_find_profiled_func(struct ftrace_profile_stat *stat, unsigned long ip)
779 if (hlist_empty(hhd)) 779 if (hlist_empty(hhd))
780 return NULL; 780 return NULL;
781 781
782 hlist_for_each_entry_rcu(rec, hhd, node) { 782 hlist_for_each_entry_rcu_notrace(rec, hhd, node) {
783 if (rec->ip == ip) 783 if (rec->ip == ip)
784 return rec; 784 return rec;
785 } 785 }
@@ -1165,7 +1165,7 @@ ftrace_lookup_ip(struct ftrace_hash *hash, unsigned long ip)
1165 1165
1166 hhd = &hash->buckets[key]; 1166 hhd = &hash->buckets[key];
1167 1167
1168 hlist_for_each_entry_rcu(entry, hhd, hlist) { 1168 hlist_for_each_entry_rcu_notrace(entry, hhd, hlist) {
1169 if (entry->ip == ip) 1169 if (entry->ip == ip)
1170 return entry; 1170 return entry;
1171 } 1171 }
@@ -1422,8 +1422,8 @@ ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip)
1422 struct ftrace_hash *notrace_hash; 1422 struct ftrace_hash *notrace_hash;
1423 int ret; 1423 int ret;
1424 1424
1425 filter_hash = rcu_dereference_raw(ops->filter_hash); 1425 filter_hash = rcu_dereference_raw_notrace(ops->filter_hash);
1426 notrace_hash = rcu_dereference_raw(ops->notrace_hash); 1426 notrace_hash = rcu_dereference_raw_notrace(ops->notrace_hash);
1427 1427
1428 if ((ftrace_hash_empty(filter_hash) || 1428 if ((ftrace_hash_empty(filter_hash) ||
1429 ftrace_lookup_ip(filter_hash, ip)) && 1429 ftrace_lookup_ip(filter_hash, ip)) &&
@@ -2920,7 +2920,7 @@ static void function_trace_probe_call(unsigned long ip, unsigned long parent_ip,
2920 * on the hash. rcu_read_lock is too dangerous here. 2920 * on the hash. rcu_read_lock is too dangerous here.
2921 */ 2921 */
2922 preempt_disable_notrace(); 2922 preempt_disable_notrace();
2923 hlist_for_each_entry_rcu(entry, hhd, node) { 2923 hlist_for_each_entry_rcu_notrace(entry, hhd, node) {
2924 if (entry->ip == ip) 2924 if (entry->ip == ip)
2925 entry->ops->func(ip, parent_ip, &entry->data); 2925 entry->ops->func(ip, parent_ip, &entry->data);
2926 } 2926 }
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index b59aea2c48c2..e444ff88f0a4 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -620,6 +620,9 @@ int ring_buffer_poll_wait(struct ring_buffer *buffer, int cpu,
620 if (cpu == RING_BUFFER_ALL_CPUS) 620 if (cpu == RING_BUFFER_ALL_CPUS)
621 work = &buffer->irq_work; 621 work = &buffer->irq_work;
622 else { 622 else {
623 if (!cpumask_test_cpu(cpu, buffer->cpumask))
624 return -EINVAL;
625
623 cpu_buffer = buffer->buffers[cpu]; 626 cpu_buffer = buffer->buffers[cpu];
624 work = &cpu_buffer->irq_work; 627 work = &cpu_buffer->irq_work;
625 } 628 }
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index ae6fa2d1cdf7..1a41023a1f88 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -843,7 +843,15 @@ __update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
843 843
844 memcpy(max_data->comm, tsk->comm, TASK_COMM_LEN); 844 memcpy(max_data->comm, tsk->comm, TASK_COMM_LEN);
845 max_data->pid = tsk->pid; 845 max_data->pid = tsk->pid;
846 max_data->uid = task_uid(tsk); 846 /*
847 * If tsk == current, then use current_uid(), as that does not use
848 * RCU. The irq tracer can be called out of RCU scope.
849 */
850 if (tsk == current)
851 max_data->uid = current_uid();
852 else
853 max_data->uid = task_uid(tsk);
854
847 max_data->nice = tsk->static_prio - 20 - MAX_RT_PRIO; 855 max_data->nice = tsk->static_prio - 20 - MAX_RT_PRIO;
848 max_data->policy = tsk->policy; 856 max_data->policy = tsk->policy;
849 max_data->rt_priority = tsk->rt_priority; 857 max_data->rt_priority = tsk->rt_priority;
@@ -6216,10 +6224,15 @@ __init static int tracer_alloc_buffers(void)
6216 6224
6217 trace_init_cmdlines(); 6225 trace_init_cmdlines();
6218 6226
6219 register_tracer(&nop_trace); 6227 /*
6220 6228 * register_tracer() might reference current_trace, so it
6229 * needs to be set before we register anything. This is
6230 * just a bootstrap of current_trace anyway.
6231 */
6221 global_trace.current_trace = &nop_trace; 6232 global_trace.current_trace = &nop_trace;
6222 6233
6234 register_tracer(&nop_trace);
6235
6223 /* All seems OK, enable tracing */ 6236 /* All seems OK, enable tracing */
6224 tracing_disabled = 0; 6237 tracing_disabled = 0;
6225 6238
diff --git a/kernel/trace/trace_selftest.c b/kernel/trace/trace_selftest.c
index 55e2cf66967b..2901e3b88590 100644
--- a/kernel/trace/trace_selftest.c
+++ b/kernel/trace/trace_selftest.c
@@ -1159,7 +1159,7 @@ trace_selftest_startup_branch(struct tracer *trace, struct trace_array *tr)
1159 /* stop the tracing. */ 1159 /* stop the tracing. */
1160 tracing_stop(); 1160 tracing_stop();
1161 /* check the trace buffer */ 1161 /* check the trace buffer */
1162 ret = trace_test_buffer(tr, &count); 1162 ret = trace_test_buffer(&tr->trace_buffer, &count);
1163 trace->reset(tr); 1163 trace->reset(tr);
1164 tracing_start(); 1164 tracing_start();
1165 1165