diff options
| -rw-r--r-- | include/linux/rculist.h | 20 | ||||
| -rw-r--r-- | include/linux/rcupdate.h | 9 | ||||
| -rw-r--r-- | kernel/trace/ftrace.c | 18 | ||||
| -rw-r--r-- | kernel/trace/trace.c | 10 | ||||
| -rw-r--r-- | kernel/trace/trace_selftest.c | 2 |
5 files changed, 48 insertions, 11 deletions
diff --git a/include/linux/rculist.h b/include/linux/rculist.h index 8089e35d47ac..f4b1001a4676 100644 --- a/include/linux/rculist.h +++ b/include/linux/rculist.h | |||
| @@ -461,6 +461,26 @@ static inline void hlist_add_after_rcu(struct hlist_node *prev, | |||
| 461 | &(pos)->member)), typeof(*(pos)), member)) | 461 | &(pos)->member)), typeof(*(pos)), member)) |
| 462 | 462 | ||
| 463 | /** | 463 | /** |
| 464 | * hlist_for_each_entry_rcu_notrace - iterate over rcu list of given type (for tracing) | ||
| 465 | * @pos: the type * to use as a loop cursor. | ||
| 466 | * @head: the head for your list. | ||
| 467 | * @member: the name of the hlist_node within the struct. | ||
| 468 | * | ||
| 469 | * This list-traversal primitive may safely run concurrently with | ||
| 470 | * the _rcu list-mutation primitives such as hlist_add_head_rcu() | ||
| 471 | * as long as the traversal is guarded by rcu_read_lock(). | ||
| 472 | * | ||
| 473 | * This is the same as hlist_for_each_entry_rcu() except that it does | ||
| 474 | * not do any RCU debugging or tracing. | ||
| 475 | */ | ||
| 476 | #define hlist_for_each_entry_rcu_notrace(pos, head, member) \ | ||
| 477 | for (pos = hlist_entry_safe (rcu_dereference_raw_notrace(hlist_first_rcu(head)),\ | ||
| 478 | typeof(*(pos)), member); \ | ||
| 479 | pos; \ | ||
| 480 | pos = hlist_entry_safe(rcu_dereference_raw_notrace(hlist_next_rcu(\ | ||
| 481 | &(pos)->member)), typeof(*(pos)), member)) | ||
| 482 | |||
| 483 | /** | ||
| 464 | * hlist_for_each_entry_rcu_bh - iterate over rcu list of given type | 484 | * hlist_for_each_entry_rcu_bh - iterate over rcu list of given type |
| 465 | * @pos: the type * to use as a loop cursor. | 485 | * @pos: the type * to use as a loop cursor. |
| 466 | * @head: the head for your list. | 486 | * @head: the head for your list. |
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h index 4ccd68e49b00..ddcc7826d907 100644 --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h | |||
| @@ -640,6 +640,15 @@ static inline void rcu_preempt_sleep_check(void) | |||
| 640 | 640 | ||
| 641 | #define rcu_dereference_raw(p) rcu_dereference_check(p, 1) /*@@@ needed? @@@*/ | 641 | #define rcu_dereference_raw(p) rcu_dereference_check(p, 1) /*@@@ needed? @@@*/ |
| 642 | 642 | ||
| 643 | /* | ||
| 644 | * The tracing infrastructure traces RCU (we want that), but unfortunately | ||
| 645 | * some of the RCU checks causes tracing to lock up the system. | ||
| 646 | * | ||
| 647 | * The tracing version of rcu_dereference_raw() must not call | ||
| 648 | * rcu_read_lock_held(). | ||
| 649 | */ | ||
| 650 | #define rcu_dereference_raw_notrace(p) __rcu_dereference_check((p), 1, __rcu) | ||
| 651 | |||
| 643 | /** | 652 | /** |
| 644 | * rcu_access_index() - fetch RCU index with no dereferencing | 653 | * rcu_access_index() - fetch RCU index with no dereferencing |
| 645 | * @p: The index to read | 654 | * @p: The index to read |
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index b549b0f5b977..6c508ff33c62 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c | |||
| @@ -120,22 +120,22 @@ static void ftrace_ops_no_ops(unsigned long ip, unsigned long parent_ip); | |||
| 120 | 120 | ||
| 121 | /* | 121 | /* |
| 122 | * Traverse the ftrace_global_list, invoking all entries. The reason that we | 122 | * Traverse the ftrace_global_list, invoking all entries. The reason that we |
| 123 | * can use rcu_dereference_raw() is that elements removed from this list | 123 | * can use rcu_dereference_raw_notrace() is that elements removed from this list |
| 124 | * are simply leaked, so there is no need to interact with a grace-period | 124 | * are simply leaked, so there is no need to interact with a grace-period |
| 125 | * mechanism. The rcu_dereference_raw() calls are needed to handle | 125 | * mechanism. The rcu_dereference_raw_notrace() calls are needed to handle |
| 126 | * concurrent insertions into the ftrace_global_list. | 126 | * concurrent insertions into the ftrace_global_list. |
| 127 | * | 127 | * |
| 128 | * Silly Alpha and silly pointer-speculation compiler optimizations! | 128 | * Silly Alpha and silly pointer-speculation compiler optimizations! |
| 129 | */ | 129 | */ |
| 130 | #define do_for_each_ftrace_op(op, list) \ | 130 | #define do_for_each_ftrace_op(op, list) \ |
| 131 | op = rcu_dereference_raw(list); \ | 131 | op = rcu_dereference_raw_notrace(list); \ |
| 132 | do | 132 | do |
| 133 | 133 | ||
| 134 | /* | 134 | /* |
| 135 | * Optimized for just a single item in the list (as that is the normal case). | 135 | * Optimized for just a single item in the list (as that is the normal case). |
| 136 | */ | 136 | */ |
| 137 | #define while_for_each_ftrace_op(op) \ | 137 | #define while_for_each_ftrace_op(op) \ |
| 138 | while (likely(op = rcu_dereference_raw((op)->next)) && \ | 138 | while (likely(op = rcu_dereference_raw_notrace((op)->next)) && \ |
| 139 | unlikely((op) != &ftrace_list_end)) | 139 | unlikely((op) != &ftrace_list_end)) |
| 140 | 140 | ||
| 141 | static inline void ftrace_ops_init(struct ftrace_ops *ops) | 141 | static inline void ftrace_ops_init(struct ftrace_ops *ops) |
| @@ -779,7 +779,7 @@ ftrace_find_profiled_func(struct ftrace_profile_stat *stat, unsigned long ip) | |||
| 779 | if (hlist_empty(hhd)) | 779 | if (hlist_empty(hhd)) |
| 780 | return NULL; | 780 | return NULL; |
| 781 | 781 | ||
| 782 | hlist_for_each_entry_rcu(rec, hhd, node) { | 782 | hlist_for_each_entry_rcu_notrace(rec, hhd, node) { |
| 783 | if (rec->ip == ip) | 783 | if (rec->ip == ip) |
| 784 | return rec; | 784 | return rec; |
| 785 | } | 785 | } |
| @@ -1165,7 +1165,7 @@ ftrace_lookup_ip(struct ftrace_hash *hash, unsigned long ip) | |||
| 1165 | 1165 | ||
| 1166 | hhd = &hash->buckets[key]; | 1166 | hhd = &hash->buckets[key]; |
| 1167 | 1167 | ||
| 1168 | hlist_for_each_entry_rcu(entry, hhd, hlist) { | 1168 | hlist_for_each_entry_rcu_notrace(entry, hhd, hlist) { |
| 1169 | if (entry->ip == ip) | 1169 | if (entry->ip == ip) |
| 1170 | return entry; | 1170 | return entry; |
| 1171 | } | 1171 | } |
| @@ -1422,8 +1422,8 @@ ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip) | |||
| 1422 | struct ftrace_hash *notrace_hash; | 1422 | struct ftrace_hash *notrace_hash; |
| 1423 | int ret; | 1423 | int ret; |
| 1424 | 1424 | ||
| 1425 | filter_hash = rcu_dereference_raw(ops->filter_hash); | 1425 | filter_hash = rcu_dereference_raw_notrace(ops->filter_hash); |
| 1426 | notrace_hash = rcu_dereference_raw(ops->notrace_hash); | 1426 | notrace_hash = rcu_dereference_raw_notrace(ops->notrace_hash); |
| 1427 | 1427 | ||
| 1428 | if ((ftrace_hash_empty(filter_hash) || | 1428 | if ((ftrace_hash_empty(filter_hash) || |
| 1429 | ftrace_lookup_ip(filter_hash, ip)) && | 1429 | ftrace_lookup_ip(filter_hash, ip)) && |
| @@ -2920,7 +2920,7 @@ static void function_trace_probe_call(unsigned long ip, unsigned long parent_ip, | |||
| 2920 | * on the hash. rcu_read_lock is too dangerous here. | 2920 | * on the hash. rcu_read_lock is too dangerous here. |
| 2921 | */ | 2921 | */ |
| 2922 | preempt_disable_notrace(); | 2922 | preempt_disable_notrace(); |
| 2923 | hlist_for_each_entry_rcu(entry, hhd, node) { | 2923 | hlist_for_each_entry_rcu_notrace(entry, hhd, node) { |
| 2924 | if (entry->ip == ip) | 2924 | if (entry->ip == ip) |
| 2925 | entry->ops->func(ip, parent_ip, &entry->data); | 2925 | entry->ops->func(ip, parent_ip, &entry->data); |
| 2926 | } | 2926 | } |
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 4d79485b3237..1a41023a1f88 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c | |||
| @@ -843,7 +843,15 @@ __update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu) | |||
| 843 | 843 | ||
| 844 | memcpy(max_data->comm, tsk->comm, TASK_COMM_LEN); | 844 | memcpy(max_data->comm, tsk->comm, TASK_COMM_LEN); |
| 845 | max_data->pid = tsk->pid; | 845 | max_data->pid = tsk->pid; |
| 846 | max_data->uid = task_uid(tsk); | 846 | /* |
| 847 | * If tsk == current, then use current_uid(), as that does not use | ||
| 848 | * RCU. The irq tracer can be called out of RCU scope. | ||
| 849 | */ | ||
| 850 | if (tsk == current) | ||
| 851 | max_data->uid = current_uid(); | ||
| 852 | else | ||
| 853 | max_data->uid = task_uid(tsk); | ||
| 854 | |||
| 847 | max_data->nice = tsk->static_prio - 20 - MAX_RT_PRIO; | 855 | max_data->nice = tsk->static_prio - 20 - MAX_RT_PRIO; |
| 848 | max_data->policy = tsk->policy; | 856 | max_data->policy = tsk->policy; |
| 849 | max_data->rt_priority = tsk->rt_priority; | 857 | max_data->rt_priority = tsk->rt_priority; |
diff --git a/kernel/trace/trace_selftest.c b/kernel/trace/trace_selftest.c index 55e2cf66967b..2901e3b88590 100644 --- a/kernel/trace/trace_selftest.c +++ b/kernel/trace/trace_selftest.c | |||
| @@ -1159,7 +1159,7 @@ trace_selftest_startup_branch(struct tracer *trace, struct trace_array *tr) | |||
| 1159 | /* stop the tracing. */ | 1159 | /* stop the tracing. */ |
| 1160 | tracing_stop(); | 1160 | tracing_stop(); |
| 1161 | /* check the trace buffer */ | 1161 | /* check the trace buffer */ |
| 1162 | ret = trace_test_buffer(tr, &count); | 1162 | ret = trace_test_buffer(&tr->trace_buffer, &count); |
| 1163 | trace->reset(tr); | 1163 | trace->reset(tr); |
| 1164 | tracing_start(); | 1164 | tracing_start(); |
| 1165 | 1165 | ||
