aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorArnaldo Carvalho de Melo <acme@redhat.com>2009-02-05 01:13:37 -0500
committerIngo Molnar <mingo@elte.hu>2009-02-05 08:35:47 -0500
commit7be421510b91491d5aa5a29fa1005712039b95af (patch)
tree43c85cd688ea4e33b32816bb3f363215d572f052
parent97e5b191ae7dc0f4f5b82b9db29782928b103b4d (diff)
trace: Remove unused trace_array_cpu parameter
Impact: cleanup Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com> Signed-off-by: Steven Rostedt <srostedt@redhat.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
-rw-r--r--block/blktrace.c2
-rw-r--r--kernel/trace/trace.c47
-rw-r--r--kernel/trace/trace.h4
-rw-r--r--kernel/trace/trace_functions.c8
-rw-r--r--kernel/trace/trace_irqsoff.c10
-rw-r--r--kernel/trace/trace_sched_switch.c4
-rw-r--r--kernel/trace/trace_sched_wakeup.c12
7 files changed, 35 insertions, 52 deletions
diff --git a/block/blktrace.c b/block/blktrace.c
index 1ebd068061ec..d9d7146ee023 100644
--- a/block/blktrace.c
+++ b/block/blktrace.c
@@ -245,7 +245,7 @@ record_it:
245 if (pid != 0 && 245 if (pid != 0 &&
246 !(blk_tracer_flags.val & TRACE_BLK_OPT_CLASSIC) && 246 !(blk_tracer_flags.val & TRACE_BLK_OPT_CLASSIC) &&
247 (trace_flags & TRACE_ITER_STACKTRACE) != 0) 247 (trace_flags & TRACE_ITER_STACKTRACE) != 0)
248 __trace_stack(blk_tr, NULL, flags, 5, pc); 248 __trace_stack(blk_tr, flags, 5, pc);
249 trace_wake_up(); 249 trace_wake_up();
250 return; 250 return;
251 } 251 }
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index a5e4c0af9bb0..1d4ff568cc4d 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -776,7 +776,7 @@ tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags,
776} 776}
777 777
778void 778void
779trace_function(struct trace_array *tr, struct trace_array_cpu *data, 779trace_function(struct trace_array *tr,
780 unsigned long ip, unsigned long parent_ip, unsigned long flags, 780 unsigned long ip, unsigned long parent_ip, unsigned long flags,
781 int pc) 781 int pc)
782{ 782{
@@ -802,7 +802,6 @@ trace_function(struct trace_array *tr, struct trace_array_cpu *data,
802 802
803#ifdef CONFIG_FUNCTION_GRAPH_TRACER 803#ifdef CONFIG_FUNCTION_GRAPH_TRACER
804static void __trace_graph_entry(struct trace_array *tr, 804static void __trace_graph_entry(struct trace_array *tr,
805 struct trace_array_cpu *data,
806 struct ftrace_graph_ent *trace, 805 struct ftrace_graph_ent *trace,
807 unsigned long flags, 806 unsigned long flags,
808 int pc) 807 int pc)
@@ -826,7 +825,6 @@ static void __trace_graph_entry(struct trace_array *tr,
826} 825}
827 826
828static void __trace_graph_return(struct trace_array *tr, 827static void __trace_graph_return(struct trace_array *tr,
829 struct trace_array_cpu *data,
830 struct ftrace_graph_ret *trace, 828 struct ftrace_graph_ret *trace,
831 unsigned long flags, 829 unsigned long flags,
832 int pc) 830 int pc)
@@ -856,11 +854,10 @@ ftrace(struct trace_array *tr, struct trace_array_cpu *data,
856 int pc) 854 int pc)
857{ 855{
858 if (likely(!atomic_read(&data->disabled))) 856 if (likely(!atomic_read(&data->disabled)))
859 trace_function(tr, data, ip, parent_ip, flags, pc); 857 trace_function(tr, ip, parent_ip, flags, pc);
860} 858}
861 859
862static void __ftrace_trace_stack(struct trace_array *tr, 860static void __ftrace_trace_stack(struct trace_array *tr,
863 struct trace_array_cpu *data,
864 unsigned long flags, 861 unsigned long flags,
865 int skip, int pc) 862 int skip, int pc)
866{ 863{
@@ -891,27 +888,24 @@ static void __ftrace_trace_stack(struct trace_array *tr,
891} 888}
892 889
893static void ftrace_trace_stack(struct trace_array *tr, 890static void ftrace_trace_stack(struct trace_array *tr,
894 struct trace_array_cpu *data,
895 unsigned long flags, 891 unsigned long flags,
896 int skip, int pc) 892 int skip, int pc)
897{ 893{
898 if (!(trace_flags & TRACE_ITER_STACKTRACE)) 894 if (!(trace_flags & TRACE_ITER_STACKTRACE))
899 return; 895 return;
900 896
901 __ftrace_trace_stack(tr, data, flags, skip, pc); 897 __ftrace_trace_stack(tr, flags, skip, pc);
902} 898}
903 899
904void __trace_stack(struct trace_array *tr, 900void __trace_stack(struct trace_array *tr,
905 struct trace_array_cpu *data,
906 unsigned long flags, 901 unsigned long flags,
907 int skip, int pc) 902 int skip, int pc)
908{ 903{
909 __ftrace_trace_stack(tr, data, flags, skip, pc); 904 __ftrace_trace_stack(tr, flags, skip, pc);
910} 905}
911 906
912static void ftrace_trace_userstack(struct trace_array *tr, 907static void ftrace_trace_userstack(struct trace_array *tr,
913 struct trace_array_cpu *data, 908 unsigned long flags, int pc)
914 unsigned long flags, int pc)
915{ 909{
916#ifdef CONFIG_STACKTRACE 910#ifdef CONFIG_STACKTRACE
917 struct ring_buffer_event *event; 911 struct ring_buffer_event *event;
@@ -942,20 +936,17 @@ static void ftrace_trace_userstack(struct trace_array *tr,
942#endif 936#endif
943} 937}
944 938
945void __trace_userstack(struct trace_array *tr, 939void __trace_userstack(struct trace_array *tr, unsigned long flags)
946 struct trace_array_cpu *data,
947 unsigned long flags)
948{ 940{
949 ftrace_trace_userstack(tr, data, flags, preempt_count()); 941 ftrace_trace_userstack(tr, flags, preempt_count());
950} 942}
951 943
952static void 944static void
953ftrace_trace_special(void *__tr, void *__data, 945ftrace_trace_special(void *__tr,
954 unsigned long arg1, unsigned long arg2, unsigned long arg3, 946 unsigned long arg1, unsigned long arg2, unsigned long arg3,
955 int pc) 947 int pc)
956{ 948{
957 struct ring_buffer_event *event; 949 struct ring_buffer_event *event;
958 struct trace_array_cpu *data = __data;
959 struct trace_array *tr = __tr; 950 struct trace_array *tr = __tr;
960 struct special_entry *entry; 951 struct special_entry *entry;
961 unsigned long irq_flags; 952 unsigned long irq_flags;
@@ -971,8 +962,8 @@ ftrace_trace_special(void *__tr, void *__data,
971 entry->arg2 = arg2; 962 entry->arg2 = arg2;
972 entry->arg3 = arg3; 963 entry->arg3 = arg3;
973 ring_buffer_unlock_commit(tr->buffer, event, irq_flags); 964 ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
974 ftrace_trace_stack(tr, data, irq_flags, 4, pc); 965 ftrace_trace_stack(tr, irq_flags, 4, pc);
975 ftrace_trace_userstack(tr, data, irq_flags, pc); 966 ftrace_trace_userstack(tr, irq_flags, pc);
976 967
977 trace_wake_up(); 968 trace_wake_up();
978} 969}
@@ -981,12 +972,11 @@ void
981__trace_special(void *__tr, void *__data, 972__trace_special(void *__tr, void *__data,
982 unsigned long arg1, unsigned long arg2, unsigned long arg3) 973 unsigned long arg1, unsigned long arg2, unsigned long arg3)
983{ 974{
984 ftrace_trace_special(__tr, __data, arg1, arg2, arg3, preempt_count()); 975 ftrace_trace_special(__tr, arg1, arg2, arg3, preempt_count());
985} 976}
986 977
987void 978void
988tracing_sched_switch_trace(struct trace_array *tr, 979tracing_sched_switch_trace(struct trace_array *tr,
989 struct trace_array_cpu *data,
990 struct task_struct *prev, 980 struct task_struct *prev,
991 struct task_struct *next, 981 struct task_struct *next,
992 unsigned long flags, int pc) 982 unsigned long flags, int pc)
@@ -1010,13 +1000,12 @@ tracing_sched_switch_trace(struct trace_array *tr,
1010 entry->next_state = next->state; 1000 entry->next_state = next->state;
1011 entry->next_cpu = task_cpu(next); 1001 entry->next_cpu = task_cpu(next);
1012 ring_buffer_unlock_commit(tr->buffer, event, irq_flags); 1002 ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
1013 ftrace_trace_stack(tr, data, flags, 5, pc); 1003 ftrace_trace_stack(tr, flags, 5, pc);
1014 ftrace_trace_userstack(tr, data, flags, pc); 1004 ftrace_trace_userstack(tr, flags, pc);
1015} 1005}
1016 1006
1017void 1007void
1018tracing_sched_wakeup_trace(struct trace_array *tr, 1008tracing_sched_wakeup_trace(struct trace_array *tr,
1019 struct trace_array_cpu *data,
1020 struct task_struct *wakee, 1009 struct task_struct *wakee,
1021 struct task_struct *curr, 1010 struct task_struct *curr,
1022 unsigned long flags, int pc) 1011 unsigned long flags, int pc)
@@ -1040,8 +1029,8 @@ tracing_sched_wakeup_trace(struct trace_array *tr,
1040 entry->next_state = wakee->state; 1029 entry->next_state = wakee->state;
1041 entry->next_cpu = task_cpu(wakee); 1030 entry->next_cpu = task_cpu(wakee);
1042 ring_buffer_unlock_commit(tr->buffer, event, irq_flags); 1031 ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
1043 ftrace_trace_stack(tr, data, flags, 6, pc); 1032 ftrace_trace_stack(tr, flags, 6, pc);
1044 ftrace_trace_userstack(tr, data, flags, pc); 1033 ftrace_trace_userstack(tr, flags, pc);
1045 1034
1046 trace_wake_up(); 1035 trace_wake_up();
1047} 1036}
@@ -1064,7 +1053,7 @@ ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3)
1064 data = tr->data[cpu]; 1053 data = tr->data[cpu];
1065 1054
1066 if (likely(atomic_inc_return(&data->disabled) == 1)) 1055 if (likely(atomic_inc_return(&data->disabled) == 1))
1067 ftrace_trace_special(tr, data, arg1, arg2, arg3, pc); 1056 ftrace_trace_special(tr, arg1, arg2, arg3, pc);
1068 1057
1069 atomic_dec(&data->disabled); 1058 atomic_dec(&data->disabled);
1070 local_irq_restore(flags); 1059 local_irq_restore(flags);
@@ -1092,7 +1081,7 @@ int trace_graph_entry(struct ftrace_graph_ent *trace)
1092 disabled = atomic_inc_return(&data->disabled); 1081 disabled = atomic_inc_return(&data->disabled);
1093 if (likely(disabled == 1)) { 1082 if (likely(disabled == 1)) {
1094 pc = preempt_count(); 1083 pc = preempt_count();
1095 __trace_graph_entry(tr, data, trace, flags, pc); 1084 __trace_graph_entry(tr, trace, flags, pc);
1096 } 1085 }
1097 /* Only do the atomic if it is not already set */ 1086 /* Only do the atomic if it is not already set */
1098 if (!test_tsk_trace_graph(current)) 1087 if (!test_tsk_trace_graph(current))
@@ -1118,7 +1107,7 @@ void trace_graph_return(struct ftrace_graph_ret *trace)
1118 disabled = atomic_inc_return(&data->disabled); 1107 disabled = atomic_inc_return(&data->disabled);
1119 if (likely(disabled == 1)) { 1108 if (likely(disabled == 1)) {
1120 pc = preempt_count(); 1109 pc = preempt_count();
1121 __trace_graph_return(tr, data, trace, flags, pc); 1110 __trace_graph_return(tr, trace, flags, pc);
1122 } 1111 }
1123 if (!trace->depth) 1112 if (!trace->depth)
1124 clear_tsk_trace_graph(current); 1113 clear_tsk_trace_graph(current);
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index f0c7a0f08cac..df627a948694 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -419,14 +419,12 @@ void ftrace(struct trace_array *tr,
419 unsigned long parent_ip, 419 unsigned long parent_ip,
420 unsigned long flags, int pc); 420 unsigned long flags, int pc);
421void tracing_sched_switch_trace(struct trace_array *tr, 421void tracing_sched_switch_trace(struct trace_array *tr,
422 struct trace_array_cpu *data,
423 struct task_struct *prev, 422 struct task_struct *prev,
424 struct task_struct *next, 423 struct task_struct *next,
425 unsigned long flags, int pc); 424 unsigned long flags, int pc);
426void tracing_record_cmdline(struct task_struct *tsk); 425void tracing_record_cmdline(struct task_struct *tsk);
427 426
428void tracing_sched_wakeup_trace(struct trace_array *tr, 427void tracing_sched_wakeup_trace(struct trace_array *tr,
429 struct trace_array_cpu *data,
430 struct task_struct *wakee, 428 struct task_struct *wakee,
431 struct task_struct *cur, 429 struct task_struct *cur,
432 unsigned long flags, int pc); 430 unsigned long flags, int pc);
@@ -436,7 +434,6 @@ void trace_special(struct trace_array *tr,
436 unsigned long arg2, 434 unsigned long arg2,
437 unsigned long arg3, int pc); 435 unsigned long arg3, int pc);
438void trace_function(struct trace_array *tr, 436void trace_function(struct trace_array *tr,
439 struct trace_array_cpu *data,
440 unsigned long ip, 437 unsigned long ip,
441 unsigned long parent_ip, 438 unsigned long parent_ip,
442 unsigned long flags, int pc); 439 unsigned long flags, int pc);
@@ -462,7 +459,6 @@ void update_max_tr_single(struct trace_array *tr,
462 struct task_struct *tsk, int cpu); 459 struct task_struct *tsk, int cpu);
463 460
464void __trace_stack(struct trace_array *tr, 461void __trace_stack(struct trace_array *tr,
465 struct trace_array_cpu *data,
466 unsigned long flags, 462 unsigned long flags,
467 int skip, int pc); 463 int skip, int pc);
468 464
diff --git a/kernel/trace/trace_functions.c b/kernel/trace/trace_functions.c
index b3a320f8aba7..d067cea2ccc3 100644
--- a/kernel/trace/trace_functions.c
+++ b/kernel/trace/trace_functions.c
@@ -78,7 +78,7 @@ function_trace_call_preempt_only(unsigned long ip, unsigned long parent_ip)
78 disabled = atomic_inc_return(&data->disabled); 78 disabled = atomic_inc_return(&data->disabled);
79 79
80 if (likely(disabled == 1)) 80 if (likely(disabled == 1))
81 trace_function(tr, data, ip, parent_ip, flags, pc); 81 trace_function(tr, ip, parent_ip, flags, pc);
82 82
83 atomic_dec(&data->disabled); 83 atomic_dec(&data->disabled);
84 ftrace_preempt_enable(resched); 84 ftrace_preempt_enable(resched);
@@ -108,7 +108,7 @@ function_trace_call(unsigned long ip, unsigned long parent_ip)
108 108
109 if (likely(disabled == 1)) { 109 if (likely(disabled == 1)) {
110 pc = preempt_count(); 110 pc = preempt_count();
111 trace_function(tr, data, ip, parent_ip, flags, pc); 111 trace_function(tr, ip, parent_ip, flags, pc);
112 } 112 }
113 113
114 atomic_dec(&data->disabled); 114 atomic_dec(&data->disabled);
@@ -139,7 +139,7 @@ function_stack_trace_call(unsigned long ip, unsigned long parent_ip)
139 139
140 if (likely(disabled == 1)) { 140 if (likely(disabled == 1)) {
141 pc = preempt_count(); 141 pc = preempt_count();
142 trace_function(tr, data, ip, parent_ip, flags, pc); 142 trace_function(tr, ip, parent_ip, flags, pc);
143 /* 143 /*
144 * skip over 5 funcs: 144 * skip over 5 funcs:
145 * __ftrace_trace_stack, 145 * __ftrace_trace_stack,
@@ -148,7 +148,7 @@ function_stack_trace_call(unsigned long ip, unsigned long parent_ip)
148 * ftrace_list_func 148 * ftrace_list_func
149 * ftrace_call 149 * ftrace_call
150 */ 150 */
151 __trace_stack(tr, data, flags, 5, pc); 151 __trace_stack(tr, flags, 5, pc);
152 } 152 }
153 153
154 atomic_dec(&data->disabled); 154 atomic_dec(&data->disabled);
diff --git a/kernel/trace/trace_irqsoff.c b/kernel/trace/trace_irqsoff.c
index ed344b022a14..c6b442d88de8 100644
--- a/kernel/trace/trace_irqsoff.c
+++ b/kernel/trace/trace_irqsoff.c
@@ -95,7 +95,7 @@ irqsoff_tracer_call(unsigned long ip, unsigned long parent_ip)
95 disabled = atomic_inc_return(&data->disabled); 95 disabled = atomic_inc_return(&data->disabled);
96 96
97 if (likely(disabled == 1)) 97 if (likely(disabled == 1))
98 trace_function(tr, data, ip, parent_ip, flags, preempt_count()); 98 trace_function(tr, ip, parent_ip, flags, preempt_count());
99 99
100 atomic_dec(&data->disabled); 100 atomic_dec(&data->disabled);
101} 101}
@@ -153,7 +153,7 @@ check_critical_timing(struct trace_array *tr,
153 if (!report_latency(delta)) 153 if (!report_latency(delta))
154 goto out_unlock; 154 goto out_unlock;
155 155
156 trace_function(tr, data, CALLER_ADDR0, parent_ip, flags, pc); 156 trace_function(tr, CALLER_ADDR0, parent_ip, flags, pc);
157 157
158 latency = nsecs_to_usecs(delta); 158 latency = nsecs_to_usecs(delta);
159 159
@@ -177,7 +177,7 @@ out:
177 data->critical_sequence = max_sequence; 177 data->critical_sequence = max_sequence;
178 data->preempt_timestamp = ftrace_now(cpu); 178 data->preempt_timestamp = ftrace_now(cpu);
179 tracing_reset(tr, cpu); 179 tracing_reset(tr, cpu);
180 trace_function(tr, data, CALLER_ADDR0, parent_ip, flags, pc); 180 trace_function(tr, CALLER_ADDR0, parent_ip, flags, pc);
181} 181}
182 182
183static inline void 183static inline void
@@ -210,7 +210,7 @@ start_critical_timing(unsigned long ip, unsigned long parent_ip)
210 210
211 local_save_flags(flags); 211 local_save_flags(flags);
212 212
213 trace_function(tr, data, ip, parent_ip, flags, preempt_count()); 213 trace_function(tr, ip, parent_ip, flags, preempt_count());
214 214
215 per_cpu(tracing_cpu, cpu) = 1; 215 per_cpu(tracing_cpu, cpu) = 1;
216 216
@@ -244,7 +244,7 @@ stop_critical_timing(unsigned long ip, unsigned long parent_ip)
244 atomic_inc(&data->disabled); 244 atomic_inc(&data->disabled);
245 245
246 local_save_flags(flags); 246 local_save_flags(flags);
247 trace_function(tr, data, ip, parent_ip, flags, preempt_count()); 247 trace_function(tr, ip, parent_ip, flags, preempt_count());
248 check_critical_timing(tr, data, parent_ip ? : ip, cpu); 248 check_critical_timing(tr, data, parent_ip ? : ip, cpu);
249 data->critical_start = 0; 249 data->critical_start = 0;
250 atomic_dec(&data->disabled); 250 atomic_dec(&data->disabled);
diff --git a/kernel/trace/trace_sched_switch.c b/kernel/trace/trace_sched_switch.c
index df175cb4564f..c4f9add5ec90 100644
--- a/kernel/trace/trace_sched_switch.c
+++ b/kernel/trace/trace_sched_switch.c
@@ -43,7 +43,7 @@ probe_sched_switch(struct rq *__rq, struct task_struct *prev,
43 data = ctx_trace->data[cpu]; 43 data = ctx_trace->data[cpu];
44 44
45 if (likely(!atomic_read(&data->disabled))) 45 if (likely(!atomic_read(&data->disabled)))
46 tracing_sched_switch_trace(ctx_trace, data, prev, next, flags, pc); 46 tracing_sched_switch_trace(ctx_trace, prev, next, flags, pc);
47 47
48 local_irq_restore(flags); 48 local_irq_restore(flags);
49} 49}
@@ -66,7 +66,7 @@ probe_sched_wakeup(struct rq *__rq, struct task_struct *wakee, int success)
66 data = ctx_trace->data[cpu]; 66 data = ctx_trace->data[cpu];
67 67
68 if (likely(!atomic_read(&data->disabled))) 68 if (likely(!atomic_read(&data->disabled)))
69 tracing_sched_wakeup_trace(ctx_trace, data, wakee, current, 69 tracing_sched_wakeup_trace(ctx_trace, wakee, current,
70 flags, pc); 70 flags, pc);
71 71
72 local_irq_restore(flags); 72 local_irq_restore(flags);
diff --git a/kernel/trace/trace_sched_wakeup.c b/kernel/trace/trace_sched_wakeup.c
index a48c9b4b0c85..96d716485898 100644
--- a/kernel/trace/trace_sched_wakeup.c
+++ b/kernel/trace/trace_sched_wakeup.c
@@ -72,7 +72,7 @@ wakeup_tracer_call(unsigned long ip, unsigned long parent_ip)
72 if (task_cpu(wakeup_task) != cpu) 72 if (task_cpu(wakeup_task) != cpu)
73 goto unlock; 73 goto unlock;
74 74
75 trace_function(tr, data, ip, parent_ip, flags, pc); 75 trace_function(tr, ip, parent_ip, flags, pc);
76 76
77 unlock: 77 unlock:
78 __raw_spin_unlock(&wakeup_lock); 78 __raw_spin_unlock(&wakeup_lock);
@@ -152,8 +152,8 @@ probe_wakeup_sched_switch(struct rq *rq, struct task_struct *prev,
152 if (unlikely(!tracer_enabled || next != wakeup_task)) 152 if (unlikely(!tracer_enabled || next != wakeup_task))
153 goto out_unlock; 153 goto out_unlock;
154 154
155 trace_function(wakeup_trace, data, CALLER_ADDR1, CALLER_ADDR2, flags, pc); 155 trace_function(wakeup_trace, CALLER_ADDR1, CALLER_ADDR2, flags, pc);
156 tracing_sched_switch_trace(wakeup_trace, data, prev, next, flags, pc); 156 tracing_sched_switch_trace(wakeup_trace, prev, next, flags, pc);
157 157
158 /* 158 /*
159 * usecs conversion is slow so we try to delay the conversion 159 * usecs conversion is slow so we try to delay the conversion
@@ -254,10 +254,8 @@ probe_wakeup(struct rq *rq, struct task_struct *p, int success)
254 254
255 data = wakeup_trace->data[wakeup_cpu]; 255 data = wakeup_trace->data[wakeup_cpu];
256 data->preempt_timestamp = ftrace_now(cpu); 256 data->preempt_timestamp = ftrace_now(cpu);
257 tracing_sched_wakeup_trace(wakeup_trace, data, p, current, 257 tracing_sched_wakeup_trace(wakeup_trace, p, current, flags, pc);
258 flags, pc); 258 trace_function(wakeup_trace, CALLER_ADDR1, CALLER_ADDR2, flags, pc);
259 trace_function(wakeup_trace, data, CALLER_ADDR1, CALLER_ADDR2,
260 flags, pc);
261 259
262out_locked: 260out_locked:
263 __raw_spin_unlock(&wakeup_lock); 261 __raw_spin_unlock(&wakeup_lock);