diff options
| author | Ingo Molnar <mingo@elte.hu> | 2008-07-28 15:14:43 -0400 |
|---|---|---|
| committer | Ingo Molnar <mingo@elte.hu> | 2008-07-28 15:14:43 -0400 |
| commit | 414f746d232d41ed6ae8632c4495ae795373c44b (patch) | |
| tree | 167f9bc8f139c6e82e6732b38c7a938b8a9d31cd /kernel/sched.c | |
| parent | 5a7a201c51c324876d00a54e7208af6af12d1ca4 (diff) | |
| parent | c9272c4f9fbe2087beb3392f526dc5b19efaa56b (diff) | |
Merge branch 'linus' into cpus4096
Diffstat (limited to 'kernel/sched.c')
| -rw-r--r-- | kernel/sched.c | 34 |
1 files changed, 31 insertions, 3 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index 0047bd9b96aa..0236958addcb 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
| @@ -1867,16 +1867,24 @@ migrate_task(struct task_struct *p, int dest_cpu, struct migration_req *req) | |||
| 1867 | /* | 1867 | /* |
| 1868 | * wait_task_inactive - wait for a thread to unschedule. | 1868 | * wait_task_inactive - wait for a thread to unschedule. |
| 1869 | * | 1869 | * |
| 1870 | * If @match_state is nonzero, it's the @p->state value just checked and | ||
| 1871 | * not expected to change. If it changes, i.e. @p might have woken up, | ||
| 1872 | * then return zero. When we succeed in waiting for @p to be off its CPU, | ||
| 1873 | * we return a positive number (its total switch count). If a second call | ||
| 1874 | * a short while later returns the same number, the caller can be sure that | ||
| 1875 | * @p has remained unscheduled the whole time. | ||
| 1876 | * | ||
| 1870 | * The caller must ensure that the task *will* unschedule sometime soon, | 1877 | * The caller must ensure that the task *will* unschedule sometime soon, |
| 1871 | * else this function might spin for a *long* time. This function can't | 1878 | * else this function might spin for a *long* time. This function can't |
| 1872 | * be called with interrupts off, or it may introduce deadlock with | 1879 | * be called with interrupts off, or it may introduce deadlock with |
| 1873 | * smp_call_function() if an IPI is sent by the same process we are | 1880 | * smp_call_function() if an IPI is sent by the same process we are |
| 1874 | * waiting to become inactive. | 1881 | * waiting to become inactive. |
| 1875 | */ | 1882 | */ |
| 1876 | void wait_task_inactive(struct task_struct *p) | 1883 | unsigned long wait_task_inactive(struct task_struct *p, long match_state) |
| 1877 | { | 1884 | { |
| 1878 | unsigned long flags; | 1885 | unsigned long flags; |
| 1879 | int running, on_rq; | 1886 | int running, on_rq; |
| 1887 | unsigned long ncsw; | ||
| 1880 | struct rq *rq; | 1888 | struct rq *rq; |
| 1881 | 1889 | ||
| 1882 | for (;;) { | 1890 | for (;;) { |
| @@ -1899,8 +1907,11 @@ void wait_task_inactive(struct task_struct *p) | |||
| 1899 | * return false if the runqueue has changed and p | 1907 | * return false if the runqueue has changed and p |
| 1900 | * is actually now running somewhere else! | 1908 | * is actually now running somewhere else! |
| 1901 | */ | 1909 | */ |
| 1902 | while (task_running(rq, p)) | 1910 | while (task_running(rq, p)) { |
| 1911 | if (match_state && unlikely(p->state != match_state)) | ||
| 1912 | return 0; | ||
| 1903 | cpu_relax(); | 1913 | cpu_relax(); |
| 1914 | } | ||
| 1904 | 1915 | ||
| 1905 | /* | 1916 | /* |
| 1906 | * Ok, time to look more closely! We need the rq | 1917 | * Ok, time to look more closely! We need the rq |
| @@ -1910,9 +1921,21 @@ void wait_task_inactive(struct task_struct *p) | |||
| 1910 | rq = task_rq_lock(p, &flags); | 1921 | rq = task_rq_lock(p, &flags); |
| 1911 | running = task_running(rq, p); | 1922 | running = task_running(rq, p); |
| 1912 | on_rq = p->se.on_rq; | 1923 | on_rq = p->se.on_rq; |
| 1924 | ncsw = 0; | ||
| 1925 | if (!match_state || p->state == match_state) { | ||
| 1926 | ncsw = p->nivcsw + p->nvcsw; | ||
| 1927 | if (unlikely(!ncsw)) | ||
| 1928 | ncsw = 1; | ||
| 1929 | } | ||
| 1913 | task_rq_unlock(rq, &flags); | 1930 | task_rq_unlock(rq, &flags); |
| 1914 | 1931 | ||
| 1915 | /* | 1932 | /* |
| 1933 | * If it changed from the expected state, bail out now. | ||
| 1934 | */ | ||
| 1935 | if (unlikely(!ncsw)) | ||
| 1936 | break; | ||
| 1937 | |||
| 1938 | /* | ||
| 1916 | * Was it really running after all now that we | 1939 | * Was it really running after all now that we |
| 1917 | * checked with the proper locks actually held? | 1940 | * checked with the proper locks actually held? |
| 1918 | * | 1941 | * |
| @@ -1944,6 +1967,8 @@ void wait_task_inactive(struct task_struct *p) | |||
| 1944 | */ | 1967 | */ |
| 1945 | break; | 1968 | break; |
| 1946 | } | 1969 | } |
| 1970 | |||
| 1971 | return ncsw; | ||
| 1947 | } | 1972 | } |
| 1948 | 1973 | ||
| 1949 | /*** | 1974 | /*** |
| @@ -6389,7 +6414,7 @@ static struct notifier_block __cpuinitdata migration_notifier = { | |||
| 6389 | .priority = 10 | 6414 | .priority = 10 |
| 6390 | }; | 6415 | }; |
| 6391 | 6416 | ||
| 6392 | void __init migration_init(void) | 6417 | static int __init migration_init(void) |
| 6393 | { | 6418 | { |
| 6394 | void *cpu = (void *)(long)smp_processor_id(); | 6419 | void *cpu = (void *)(long)smp_processor_id(); |
| 6395 | int err; | 6420 | int err; |
| @@ -6399,7 +6424,10 @@ void __init migration_init(void) | |||
| 6399 | BUG_ON(err == NOTIFY_BAD); | 6424 | BUG_ON(err == NOTIFY_BAD); |
| 6400 | migration_call(&migration_notifier, CPU_ONLINE, cpu); | 6425 | migration_call(&migration_notifier, CPU_ONLINE, cpu); |
| 6401 | register_cpu_notifier(&migration_notifier); | 6426 | register_cpu_notifier(&migration_notifier); |
| 6427 | |||
| 6428 | return err; | ||
| 6402 | } | 6429 | } |
| 6430 | early_initcall(migration_init); | ||
| 6403 | #endif | 6431 | #endif |
| 6404 | 6432 | ||
| 6405 | #ifdef CONFIG_SMP | 6433 | #ifdef CONFIG_SMP |
