diff options
| -rw-r--r-- | include/linux/sched/task.h | 6 | ||||
| -rw-r--r-- | kernel/events/core.c | 9 | ||||
| -rw-r--r-- | kernel/irq/manage.c | 3 | ||||
| -rw-r--r-- | kernel/locking/rtmutex.c | 6 | ||||
| -rw-r--r-- | kernel/trace/trace_sched_wakeup.c | 3 |
5 files changed, 12 insertions, 15 deletions
diff --git a/include/linux/sched/task.h b/include/linux/sched/task.h index 0497091e40c1..3d90ed8f75f0 100644 --- a/include/linux/sched/task.h +++ b/include/linux/sched/task.h | |||
| @@ -105,7 +105,11 @@ extern void sched_exec(void); | |||
| 105 | #define sched_exec() {} | 105 | #define sched_exec() {} |
| 106 | #endif | 106 | #endif |
| 107 | 107 | ||
| 108 | #define get_task_struct(tsk) do { refcount_inc(&(tsk)->usage); } while(0) | 108 | static inline struct task_struct *get_task_struct(struct task_struct *t) |
| 109 | { | ||
| 110 | refcount_inc(&t->usage); | ||
| 111 | return t; | ||
| 112 | } | ||
| 109 | 113 | ||
| 110 | extern void __put_task_struct(struct task_struct *t); | 114 | extern void __put_task_struct(struct task_struct *t); |
| 111 | 115 | ||
diff --git a/kernel/events/core.c b/kernel/events/core.c index 026a14541a38..ea5e8139fe62 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c | |||
| @@ -4089,10 +4089,8 @@ alloc_perf_context(struct pmu *pmu, struct task_struct *task) | |||
| 4089 | return NULL; | 4089 | return NULL; |
| 4090 | 4090 | ||
| 4091 | __perf_event_init_context(ctx); | 4091 | __perf_event_init_context(ctx); |
| 4092 | if (task) { | 4092 | if (task) |
| 4093 | ctx->task = task; | 4093 | ctx->task = get_task_struct(task); |
| 4094 | get_task_struct(task); | ||
| 4095 | } | ||
| 4096 | ctx->pmu = pmu; | 4094 | ctx->pmu = pmu; |
| 4097 | 4095 | ||
| 4098 | return ctx; | 4096 | return ctx; |
| @@ -10355,8 +10353,7 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu, | |||
| 10355 | * and we cannot use the ctx information because we need the | 10353 | * and we cannot use the ctx information because we need the |
| 10356 | * pmu before we get a ctx. | 10354 | * pmu before we get a ctx. |
| 10357 | */ | 10355 | */ |
| 10358 | get_task_struct(task); | 10356 | event->hw.target = get_task_struct(task); |
| 10359 | event->hw.target = task; | ||
| 10360 | } | 10357 | } |
| 10361 | 10358 | ||
| 10362 | event->clock = &local_clock; | 10359 | event->clock = &local_clock; |
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c index e8f7f179bf77..9d50fbe5531a 100644 --- a/kernel/irq/manage.c +++ b/kernel/irq/manage.c | |||
| @@ -1255,8 +1255,7 @@ setup_irq_thread(struct irqaction *new, unsigned int irq, bool secondary) | |||
| 1255 | * the thread dies to avoid that the interrupt code | 1255 | * the thread dies to avoid that the interrupt code |
| 1256 | * references an already freed task_struct. | 1256 | * references an already freed task_struct. |
| 1257 | */ | 1257 | */ |
| 1258 | get_task_struct(t); | 1258 | new->thread = get_task_struct(t); |
| 1259 | new->thread = t; | ||
| 1260 | /* | 1259 | /* |
| 1261 | * Tell the thread to set its affinity. This is | 1260 | * Tell the thread to set its affinity. This is |
| 1262 | * important for shared interrupt handlers as we do | 1261 | * important for shared interrupt handlers as we do |
diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c index fa83d36e30c6..2874bf556162 100644 --- a/kernel/locking/rtmutex.c +++ b/kernel/locking/rtmutex.c | |||
| @@ -628,8 +628,7 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task, | |||
| 628 | } | 628 | } |
| 629 | 629 | ||
| 630 | /* [10] Grab the next task, i.e. owner of @lock */ | 630 | /* [10] Grab the next task, i.e. owner of @lock */ |
| 631 | task = rt_mutex_owner(lock); | 631 | task = get_task_struct(rt_mutex_owner(lock)); |
| 632 | get_task_struct(task); | ||
| 633 | raw_spin_lock(&task->pi_lock); | 632 | raw_spin_lock(&task->pi_lock); |
| 634 | 633 | ||
| 635 | /* | 634 | /* |
| @@ -709,8 +708,7 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task, | |||
| 709 | } | 708 | } |
| 710 | 709 | ||
| 711 | /* [10] Grab the next task, i.e. the owner of @lock */ | 710 | /* [10] Grab the next task, i.e. the owner of @lock */ |
| 712 | task = rt_mutex_owner(lock); | 711 | task = get_task_struct(rt_mutex_owner(lock)); |
| 713 | get_task_struct(task); | ||
| 714 | raw_spin_lock(&task->pi_lock); | 712 | raw_spin_lock(&task->pi_lock); |
| 715 | 713 | ||
| 716 | /* [11] requeue the pi waiters if necessary */ | 714 | /* [11] requeue the pi waiters if necessary */ |
diff --git a/kernel/trace/trace_sched_wakeup.c b/kernel/trace/trace_sched_wakeup.c index 743b2b520d34..5e43b9664eca 100644 --- a/kernel/trace/trace_sched_wakeup.c +++ b/kernel/trace/trace_sched_wakeup.c | |||
| @@ -579,8 +579,7 @@ probe_wakeup(void *ignore, struct task_struct *p) | |||
| 579 | else | 579 | else |
| 580 | tracing_dl = 0; | 580 | tracing_dl = 0; |
| 581 | 581 | ||
| 582 | wakeup_task = p; | 582 | wakeup_task = get_task_struct(p); |
| 583 | get_task_struct(wakeup_task); | ||
| 584 | 583 | ||
| 585 | local_save_flags(flags); | 584 | local_save_flags(flags); |
| 586 | 585 | ||
