aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched.c
diff options
context:
space:
mode:
authorRoland McGrath <roland@redhat.com>2008-07-25 22:45:58 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2008-07-26 15:00:09 -0400
commit85ba2d862e521375a8ee01526c5c46b1f24bb4af (patch)
treef098555cb580b2864f145aa845b574f07d2cf694 /kernel/sched.c
parent1f5a4ad97a0e7b663d527ecc02aabe203d000d91 (diff)
tracehook: wait_task_inactive
This extends wait_task_inactive() with a new argument so it can be used in a "soft" mode where it will check for the task changing state unexpectedly and back off. There is no change to existing callers. This lays the groundwork to allow robust, noninvasive tracing that can try to sample a blocked thread but back off safely if it wakes up. Signed-off-by: Roland McGrath <roland@redhat.com> Cc: Oleg Nesterov <oleg@tv-sign.ru> Reviewed-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'kernel/sched.c')
-rw-r--r--kernel/sched.c29
1 files changed, 27 insertions, 2 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index fde1a1026359..0236958addcb 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -1867,16 +1867,24 @@ migrate_task(struct task_struct *p, int dest_cpu, struct migration_req *req)
1867/* 1867/*
1868 * wait_task_inactive - wait for a thread to unschedule. 1868 * wait_task_inactive - wait for a thread to unschedule.
1869 * 1869 *
1870 * If @match_state is nonzero, it's the @p->state value just checked and
1871 * not expected to change. If it changes, i.e. @p might have woken up,
1872 * then return zero. When we succeed in waiting for @p to be off its CPU,
1873 * we return a positive number (its total switch count). If a second call
1874 * a short while later returns the same number, the caller can be sure that
1875 * @p has remained unscheduled the whole time.
1876 *
1870 * The caller must ensure that the task *will* unschedule sometime soon, 1877 * The caller must ensure that the task *will* unschedule sometime soon,
1871 * else this function might spin for a *long* time. This function can't 1878 * else this function might spin for a *long* time. This function can't
1872 * be called with interrupts off, or it may introduce deadlock with 1879 * be called with interrupts off, or it may introduce deadlock with
1873 * smp_call_function() if an IPI is sent by the same process we are 1880 * smp_call_function() if an IPI is sent by the same process we are
1874 * waiting to become inactive. 1881 * waiting to become inactive.
1875 */ 1882 */
1876void wait_task_inactive(struct task_struct *p) 1883unsigned long wait_task_inactive(struct task_struct *p, long match_state)
1877{ 1884{
1878 unsigned long flags; 1885 unsigned long flags;
1879 int running, on_rq; 1886 int running, on_rq;
1887 unsigned long ncsw;
1880 struct rq *rq; 1888 struct rq *rq;
1881 1889
1882 for (;;) { 1890 for (;;) {
@@ -1899,8 +1907,11 @@ void wait_task_inactive(struct task_struct *p)
1899 * return false if the runqueue has changed and p 1907 * return false if the runqueue has changed and p
1900 * is actually now running somewhere else! 1908 * is actually now running somewhere else!
1901 */ 1909 */
1902 while (task_running(rq, p)) 1910 while (task_running(rq, p)) {
1911 if (match_state && unlikely(p->state != match_state))
1912 return 0;
1903 cpu_relax(); 1913 cpu_relax();
1914 }
1904 1915
1905 /* 1916 /*
1906 * Ok, time to look more closely! We need the rq 1917 * Ok, time to look more closely! We need the rq
@@ -1910,9 +1921,21 @@ void wait_task_inactive(struct task_struct *p)
1910 rq = task_rq_lock(p, &flags); 1921 rq = task_rq_lock(p, &flags);
1911 running = task_running(rq, p); 1922 running = task_running(rq, p);
1912 on_rq = p->se.on_rq; 1923 on_rq = p->se.on_rq;
1924 ncsw = 0;
1925 if (!match_state || p->state == match_state) {
1926 ncsw = p->nivcsw + p->nvcsw;
1927 if (unlikely(!ncsw))
1928 ncsw = 1;
1929 }
1913 task_rq_unlock(rq, &flags); 1930 task_rq_unlock(rq, &flags);
1914 1931
1915 /* 1932 /*
1933 * If it changed from the expected state, bail out now.
1934 */
1935 if (unlikely(!ncsw))
1936 break;
1937
1938 /*
1916 * Was it really running after all now that we 1939 * Was it really running after all now that we
1917 * checked with the proper locks actually held? 1940 * checked with the proper locks actually held?
1918 * 1941 *
@@ -1944,6 +1967,8 @@ void wait_task_inactive(struct task_struct *p)
1944 */ 1967 */
1945 break; 1968 break;
1946 } 1969 }
1970
1971 return ncsw;
1947} 1972}
1948 1973
1949/*** 1974/***