aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-ia64/system.h
diff options
context:
space:
mode:
authorBrent Casavant <bcasavan@sgi.com>2006-01-26 18:55:52 -0500
committerTony Luck <tony.luck@intel.com>2006-01-26 18:55:52 -0500
commite08e6c521355cd33e647b2f739885bc3050eead6 (patch)
tree251dd80647bd3a0140f5f31c35c125094c035f9c /include/asm-ia64/system.h
parent3ee68c4af3fd7228c1be63254b9f884614f9ebb2 (diff)
[IA64] hooks to wait for mmio writes to drain when migrating processes
On SN2, MMIO writes which are issued from separate processors are not guaranteed to arrive in any particular order at the IO hardware. When performing such writes from the kernel this is not a problem, as a kernel thread will not migrate to another CPU during execution, and mmiowb() calls can guarantee write ordering when control of the IO resource is allowed to move between threads. However, when MMIO writes can be performed from user space (e.g. DRM) there are no such guarantees and mechanisms, as the process may context-switch at any time, and may migrate to a different CPU as part of the switch. For such programs/hardware to operate correctly, it is required that the MMIO writes from the old CPU be accepted by the IO hardware before subsequent writes from the new CPU can be issued. The following patch implements this behavior on SN2 by waiting for a Shub register to indicate that these writes have been accepted. This is placed in the context switch-in path, and only performs the wait when the newly scheduled task changes CPUs. Signed-off-by: Prarit Bhargava <prarit@sgi.com> Signed-off-by: Brent Casavant <bcasavan@sgi.com>
Diffstat (limited to 'include/asm-ia64/system.h')
-rw-r--r--include/asm-ia64/system.h7
1 files changed, 7 insertions, 0 deletions
diff --git a/include/asm-ia64/system.h b/include/asm-ia64/system.h
index 80c5a234e259..99b6f307e94b 100644
--- a/include/asm-ia64/system.h
+++ b/include/asm-ia64/system.h
@@ -244,6 +244,13 @@ extern void ia64_load_extra (struct task_struct *task);
244 __ia64_save_fpu((prev)->thread.fph); \ 244 __ia64_save_fpu((prev)->thread.fph); \
245 } \ 245 } \
246 __switch_to(prev, next, last); \ 246 __switch_to(prev, next, last); \
247 /* "next" in old context is "current" in new context */ \
248 if (unlikely((current->thread.flags & IA64_THREAD_MIGRATION) && \
249 (task_cpu(current) != \
250 task_thread_info(current)->last_cpu))) { \
251 platform_migrate(current); \
252 task_thread_info(current)->last_cpu = task_cpu(current); \
253 } \
247} while (0) 254} while (0)
248#else 255#else
249# define switch_to(prev,next,last) __switch_to(prev, next, last) 256# define switch_to(prev,next,last) __switch_to(prev, next, last)