diff options
Diffstat (limited to 'kernel/trace/trace_sched_wakeup.c')
| -rw-r--r-- | kernel/trace/trace_sched_wakeup.c | 16 | 
1 files changed, 8 insertions, 8 deletions
| diff --git a/kernel/trace/trace_sched_wakeup.c b/kernel/trace/trace_sched_wakeup.c index 26185d727676..0271742abb8d 100644 --- a/kernel/trace/trace_sched_wakeup.c +++ b/kernel/trace/trace_sched_wakeup.c | |||
| @@ -28,8 +28,8 @@ static int wakeup_current_cpu; | |||
| 28 | static unsigned wakeup_prio = -1; | 28 | static unsigned wakeup_prio = -1; | 
| 29 | static int wakeup_rt; | 29 | static int wakeup_rt; | 
| 30 | 30 | ||
| 31 | static raw_spinlock_t wakeup_lock = | 31 | static arch_spinlock_t wakeup_lock = | 
| 32 | (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; | 32 | (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; | 
| 33 | 33 | ||
| 34 | static void __wakeup_reset(struct trace_array *tr); | 34 | static void __wakeup_reset(struct trace_array *tr); | 
| 35 | 35 | ||
| @@ -143,7 +143,7 @@ probe_wakeup_sched_switch(struct rq *rq, struct task_struct *prev, | |||
| 143 | goto out; | 143 | goto out; | 
| 144 | 144 | ||
| 145 | local_irq_save(flags); | 145 | local_irq_save(flags); | 
| 146 | __raw_spin_lock(&wakeup_lock); | 146 | arch_spin_lock(&wakeup_lock); | 
| 147 | 147 | ||
| 148 | /* We could race with grabbing wakeup_lock */ | 148 | /* We could race with grabbing wakeup_lock */ | 
| 149 | if (unlikely(!tracer_enabled || next != wakeup_task)) | 149 | if (unlikely(!tracer_enabled || next != wakeup_task)) | 
| @@ -169,7 +169,7 @@ probe_wakeup_sched_switch(struct rq *rq, struct task_struct *prev, | |||
| 169 | 169 | ||
| 170 | out_unlock: | 170 | out_unlock: | 
| 171 | __wakeup_reset(wakeup_trace); | 171 | __wakeup_reset(wakeup_trace); | 
| 172 | __raw_spin_unlock(&wakeup_lock); | 172 | arch_spin_unlock(&wakeup_lock); | 
| 173 | local_irq_restore(flags); | 173 | local_irq_restore(flags); | 
| 174 | out: | 174 | out: | 
| 175 | atomic_dec(&wakeup_trace->data[cpu]->disabled); | 175 | atomic_dec(&wakeup_trace->data[cpu]->disabled); | 
| @@ -193,9 +193,9 @@ static void wakeup_reset(struct trace_array *tr) | |||
| 193 | tracing_reset_online_cpus(tr); | 193 | tracing_reset_online_cpus(tr); | 
| 194 | 194 | ||
| 195 | local_irq_save(flags); | 195 | local_irq_save(flags); | 
| 196 | __raw_spin_lock(&wakeup_lock); | 196 | arch_spin_lock(&wakeup_lock); | 
| 197 | __wakeup_reset(tr); | 197 | __wakeup_reset(tr); | 
| 198 | __raw_spin_unlock(&wakeup_lock); | 198 | arch_spin_unlock(&wakeup_lock); | 
| 199 | local_irq_restore(flags); | 199 | local_irq_restore(flags); | 
| 200 | } | 200 | } | 
| 201 | 201 | ||
| @@ -225,7 +225,7 @@ probe_wakeup(struct rq *rq, struct task_struct *p, int success) | |||
| 225 | goto out; | 225 | goto out; | 
| 226 | 226 | ||
| 227 | /* interrupts should be off from try_to_wake_up */ | 227 | /* interrupts should be off from try_to_wake_up */ | 
| 228 | __raw_spin_lock(&wakeup_lock); | 228 | arch_spin_lock(&wakeup_lock); | 
| 229 | 229 | ||
| 230 | /* check for races. */ | 230 | /* check for races. */ | 
| 231 | if (!tracer_enabled || p->prio >= wakeup_prio) | 231 | if (!tracer_enabled || p->prio >= wakeup_prio) | 
| @@ -255,7 +255,7 @@ probe_wakeup(struct rq *rq, struct task_struct *p, int success) | |||
| 255 | trace_function(wakeup_trace, CALLER_ADDR1, CALLER_ADDR2, flags, pc); | 255 | trace_function(wakeup_trace, CALLER_ADDR1, CALLER_ADDR2, flags, pc); | 
| 256 | 256 | ||
| 257 | out_locked: | 257 | out_locked: | 
| 258 | __raw_spin_unlock(&wakeup_lock); | 258 | arch_spin_unlock(&wakeup_lock); | 
| 259 | out: | 259 | out: | 
| 260 | atomic_dec(&wakeup_trace->data[cpu]->disabled); | 260 | atomic_dec(&wakeup_trace->data[cpu]->disabled); | 
| 261 | } | 261 | } | 
