diff options
| author | Chen, Kenneth W <kenneth.w.chen@intel.com> | 2006-01-26 21:24:59 -0500 |
|---|---|---|
| committer | Tony Luck <tony.luck@intel.com> | 2006-02-02 16:20:42 -0500 |
| commit | f8efa27662532ad5adb2790bfc3f4c78e019cfad (patch) | |
| tree | 848c4935674ed965256ef328a6b69316ebad6a8c | |
| parent | df080e7c94c8f4b8334614159fa079aaeece5670 (diff) | |
[IA64] remove staled comments in asm/system.h
With the recent optimization made to wrap_mmu_context function,
we don't hold tasklist_lock anymore when wrapping context id.
The comments in asm/system.h must fall through the crack earlier.
Remove staled comments.
I believe it is still beneficial to unlock the runqueue lock
across context switch. So leave __ARCH_WANT_UNLOCKED_CTXSW on.
Signed-off-by: Ken Chen <kenneth.w.chen@intel.com>
Signed-off-by: Tony Luck <tony.luck@intel.com>
| -rw-r--r-- | include/asm-ia64/system.h | 25 |
1 files changed, 0 insertions, 25 deletions
diff --git a/include/asm-ia64/system.h b/include/asm-ia64/system.h index 80c5a234e259..062538715623 100644 --- a/include/asm-ia64/system.h +++ b/include/asm-ia64/system.h | |||
| @@ -249,32 +249,7 @@ extern void ia64_load_extra (struct task_struct *task); | |||
| 249 | # define switch_to(prev,next,last) __switch_to(prev, next, last) | 249 | # define switch_to(prev,next,last) __switch_to(prev, next, last) |
| 250 | #endif | 250 | #endif |
| 251 | 251 | ||
| 252 | /* | ||
| 253 | * On IA-64, we don't want to hold the runqueue's lock during the low-level context-switch, | ||
| 254 | * because that could cause a deadlock. Here is an example by Erich Focht: | ||
| 255 | * | ||
| 256 | * Example: | ||
| 257 | * CPU#0: | ||
| 258 | * schedule() | ||
| 259 | * -> spin_lock_irq(&rq->lock) | ||
| 260 | * -> context_switch() | ||
| 261 | * -> wrap_mmu_context() | ||
| 262 | * -> read_lock(&tasklist_lock) | ||
| 263 | * | ||
| 264 | * CPU#1: | ||
| 265 | * sys_wait4() or release_task() or forget_original_parent() | ||
| 266 | * -> write_lock(&tasklist_lock) | ||
| 267 | * -> do_notify_parent() | ||
| 268 | * -> wake_up_parent() | ||
| 269 | * -> try_to_wake_up() | ||
| 270 | * -> spin_lock_irq(&parent_rq->lock) | ||
| 271 | * | ||
| 272 | * If the parent's rq happens to be on CPU#0, we'll wait for the rq->lock | ||
| 273 | * of that CPU which will not be released, because there we wait for the | ||
| 274 | * tasklist_lock to become available. | ||
| 275 | */ | ||
| 276 | #define __ARCH_WANT_UNLOCKED_CTXSW | 252 | #define __ARCH_WANT_UNLOCKED_CTXSW |
| 277 | |||
| 278 | #define ARCH_HAS_PREFETCH_SWITCH_STACK | 253 | #define ARCH_HAS_PREFETCH_SWITCH_STACK |
| 279 | #define ia64_platform_is(x) (strcmp(x, platform_name) == 0) | 254 | #define ia64_platform_is(x) (strcmp(x, platform_name) == 0) |
| 280 | 255 | ||
