aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorRoland McGrath <roland@redhat.com>2008-07-25 22:45:58 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2008-07-26 15:00:09 -0400
commit85ba2d862e521375a8ee01526c5c46b1f24bb4af (patch)
treef098555cb580b2864f145aa845b574f07d2cf694
parent1f5a4ad97a0e7b663d527ecc02aabe203d000d91 (diff)
tracehook: wait_task_inactive
This extends wait_task_inactive() with a new argument so it can be used in a "soft" mode where it will check for the task changing state unexpectedly and back off. There is no change to existing callers. This lays the groundwork to allow robust, noninvasive tracing that can try to sample a blocked thread but back off safely if it wakes up. Signed-off-by: Roland McGrath <roland@redhat.com> Cc: Oleg Nesterov <oleg@tv-sign.ru> Reviewed-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--arch/ia64/kernel/perfmon.c4
-rw-r--r--include/linux/sched.h8
-rw-r--r--kernel/kthread.c2
-rw-r--r--kernel/ptrace.c2
-rw-r--r--kernel/sched.c29
5 files changed, 37 insertions, 8 deletions
diff --git a/arch/ia64/kernel/perfmon.c b/arch/ia64/kernel/perfmon.c
index 19d4493c6193..fc8f3509df27 100644
--- a/arch/ia64/kernel/perfmon.c
+++ b/arch/ia64/kernel/perfmon.c
@@ -2626,7 +2626,7 @@ pfm_task_incompatible(pfm_context_t *ctx, struct task_struct *task)
2626 /* 2626 /*
2627 * make sure the task is off any CPU 2627 * make sure the task is off any CPU
2628 */ 2628 */
2629 wait_task_inactive(task); 2629 wait_task_inactive(task, 0);
2630 2630
2631 /* more to come... */ 2631 /* more to come... */
2632 2632
@@ -4774,7 +4774,7 @@ recheck:
4774 4774
4775 UNPROTECT_CTX(ctx, flags); 4775 UNPROTECT_CTX(ctx, flags);
4776 4776
4777 wait_task_inactive(task); 4777 wait_task_inactive(task, 0);
4778 4778
4779 PROTECT_CTX(ctx, flags); 4779 PROTECT_CTX(ctx, flags);
4780 4780
diff --git a/include/linux/sched.h b/include/linux/sched.h
index a95d84d0da95..f59318a0099b 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1882,9 +1882,13 @@ extern void set_task_comm(struct task_struct *tsk, char *from);
1882extern char *get_task_comm(char *to, struct task_struct *tsk); 1882extern char *get_task_comm(char *to, struct task_struct *tsk);
1883 1883
1884#ifdef CONFIG_SMP 1884#ifdef CONFIG_SMP
1885extern void wait_task_inactive(struct task_struct * p); 1885extern unsigned long wait_task_inactive(struct task_struct *, long match_state);
1886#else 1886#else
1887#define wait_task_inactive(p) do { } while (0) 1887static inline unsigned long wait_task_inactive(struct task_struct *p,
1888 long match_state)
1889{
1890 return 1;
1891}
1888#endif 1892#endif
1889 1893
1890#define next_task(p) list_entry(rcu_dereference((p)->tasks.next), struct task_struct, tasks) 1894#define next_task(p) list_entry(rcu_dereference((p)->tasks.next), struct task_struct, tasks)
diff --git a/kernel/kthread.c b/kernel/kthread.c
index 6111c27491b1..96cff2f8710b 100644
--- a/kernel/kthread.c
+++ b/kernel/kthread.c
@@ -176,7 +176,7 @@ void kthread_bind(struct task_struct *k, unsigned int cpu)
176 return; 176 return;
177 } 177 }
178 /* Must have done schedule() in kthread() before we set_task_cpu */ 178 /* Must have done schedule() in kthread() before we set_task_cpu */
179 wait_task_inactive(k); 179 wait_task_inactive(k, 0);
180 set_task_cpu(k, cpu); 180 set_task_cpu(k, cpu);
181 k->cpus_allowed = cpumask_of_cpu(cpu); 181 k->cpus_allowed = cpumask_of_cpu(cpu);
182 k->rt.nr_cpus_allowed = 1; 182 k->rt.nr_cpus_allowed = 1;
diff --git a/kernel/ptrace.c b/kernel/ptrace.c
index 8392a9da6450..082b3fcb32a0 100644
--- a/kernel/ptrace.c
+++ b/kernel/ptrace.c
@@ -107,7 +107,7 @@ int ptrace_check_attach(struct task_struct *child, int kill)
107 read_unlock(&tasklist_lock); 107 read_unlock(&tasklist_lock);
108 108
109 if (!ret && !kill) 109 if (!ret && !kill)
110 wait_task_inactive(child); 110 ret = wait_task_inactive(child, TASK_TRACED) ? 0 : -ESRCH;
111 111
112 /* All systems go.. */ 112 /* All systems go.. */
113 return ret; 113 return ret;
diff --git a/kernel/sched.c b/kernel/sched.c
index fde1a1026359..0236958addcb 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -1867,16 +1867,24 @@ migrate_task(struct task_struct *p, int dest_cpu, struct migration_req *req)
1867/* 1867/*
1868 * wait_task_inactive - wait for a thread to unschedule. 1868 * wait_task_inactive - wait for a thread to unschedule.
1869 * 1869 *
1870 * If @match_state is nonzero, it's the @p->state value just checked and
1871 * not expected to change. If it changes, i.e. @p might have woken up,
1872 * then return zero. When we succeed in waiting for @p to be off its CPU,
1873 * we return a positive number (its total switch count). If a second call
1874 * a short while later returns the same number, the caller can be sure that
1875 * @p has remained unscheduled the whole time.
1876 *
1870 * The caller must ensure that the task *will* unschedule sometime soon, 1877 * The caller must ensure that the task *will* unschedule sometime soon,
1871 * else this function might spin for a *long* time. This function can't 1878 * else this function might spin for a *long* time. This function can't
1872 * be called with interrupts off, or it may introduce deadlock with 1879 * be called with interrupts off, or it may introduce deadlock with
1873 * smp_call_function() if an IPI is sent by the same process we are 1880 * smp_call_function() if an IPI is sent by the same process we are
1874 * waiting to become inactive. 1881 * waiting to become inactive.
1875 */ 1882 */
1876void wait_task_inactive(struct task_struct *p) 1883unsigned long wait_task_inactive(struct task_struct *p, long match_state)
1877{ 1884{
1878 unsigned long flags; 1885 unsigned long flags;
1879 int running, on_rq; 1886 int running, on_rq;
1887 unsigned long ncsw;
1880 struct rq *rq; 1888 struct rq *rq;
1881 1889
1882 for (;;) { 1890 for (;;) {
@@ -1899,8 +1907,11 @@ void wait_task_inactive(struct task_struct *p)
1899 * return false if the runqueue has changed and p 1907 * return false if the runqueue has changed and p
1900 * is actually now running somewhere else! 1908 * is actually now running somewhere else!
1901 */ 1909 */
1902 while (task_running(rq, p)) 1910 while (task_running(rq, p)) {
1911 if (match_state && unlikely(p->state != match_state))
1912 return 0;
1903 cpu_relax(); 1913 cpu_relax();
1914 }
1904 1915
1905 /* 1916 /*
1906 * Ok, time to look more closely! We need the rq 1917 * Ok, time to look more closely! We need the rq
@@ -1910,9 +1921,21 @@ void wait_task_inactive(struct task_struct *p)
1910 rq = task_rq_lock(p, &flags); 1921 rq = task_rq_lock(p, &flags);
1911 running = task_running(rq, p); 1922 running = task_running(rq, p);
1912 on_rq = p->se.on_rq; 1923 on_rq = p->se.on_rq;
1924 ncsw = 0;
1925 if (!match_state || p->state == match_state) {
1926 ncsw = p->nivcsw + p->nvcsw;
1927 if (unlikely(!ncsw))
1928 ncsw = 1;
1929 }
1913 task_rq_unlock(rq, &flags); 1930 task_rq_unlock(rq, &flags);
1914 1931
1915 /* 1932 /*
1933 * If it changed from the expected state, bail out now.
1934 */
1935 if (unlikely(!ncsw))
1936 break;
1937
1938 /*
1916 * Was it really running after all now that we 1939 * Was it really running after all now that we
1917 * checked with the proper locks actually held? 1940 * checked with the proper locks actually held?
1918 * 1941 *
@@ -1944,6 +1967,8 @@ void wait_task_inactive(struct task_struct *p)
1944 */ 1967 */
1945 break; 1968 break;
1946 } 1969 }
1970
1971 return ncsw;
1947} 1972}
1948 1973
1949/*** 1974/***