diff options
| author | Tony Luck <tony.luck@intel.com> | 2006-03-21 11:21:26 -0500 |
|---|---|---|
| committer | Tony Luck <tony.luck@intel.com> | 2006-03-21 11:21:26 -0500 |
| commit | 409761bb6a06bd61e2d8e27a1af534371d9537ed (patch) | |
| tree | 2c07068558092a01d70f737de9b9a34212a44d4b /arch/ia64/sn/kernel/sn2 | |
| parent | a4e817ba24d2a52f0332c2ddcdbf77ddd6a92bbe (diff) | |
| parent | e08e6c521355cd33e647b2f739885bc3050eead6 (diff) | |
Pull sn2-mmio-writes into release branch
Hand-fixed conflicts:
include/asm-ia64/machvec_sn2.h
Signed-off-by: Tony Luck <tony.luck@intel.com>
Diffstat (limited to 'arch/ia64/sn/kernel/sn2')
| -rw-r--r-- | arch/ia64/sn/kernel/sn2/sn2_smp.c | 21 |
1 files changed, 21 insertions, 0 deletions
diff --git a/arch/ia64/sn/kernel/sn2/sn2_smp.c b/arch/ia64/sn/kernel/sn2/sn2_smp.c index b2e1e746b47f..d9d306c79f2d 100644 --- a/arch/ia64/sn/kernel/sn2/sn2_smp.c +++ b/arch/ia64/sn/kernel/sn2/sn2_smp.c | |||
| @@ -93,6 +93,27 @@ static inline unsigned long wait_piowc(void) | |||
| 93 | return (ws & SH_PIO_WRITE_STATUS_WRITE_DEADLOCK_MASK) != 0; | 93 | return (ws & SH_PIO_WRITE_STATUS_WRITE_DEADLOCK_MASK) != 0; |
| 94 | } | 94 | } |
| 95 | 95 | ||
| 96 | /** | ||
| 97 | * sn_migrate - SN-specific task migration actions | ||
| 98 | * @task: Task being migrated to new CPU | ||
| 99 | * | ||
| 100 | * SN2 PIO writes from separate CPUs are not guaranteed to arrive in order. | ||
| 101 | * Context switching user threads which have memory-mapped MMIO may cause | ||
| 102 | * PIOs to issue from seperate CPUs, thus the PIO writes must be drained | ||
| 103 | * from the previous CPU's Shub before execution resumes on the new CPU. | ||
| 104 | */ | ||
| 105 | void sn_migrate(struct task_struct *task) | ||
| 106 | { | ||
| 107 | pda_t *last_pda = pdacpu(task_thread_info(task)->last_cpu); | ||
| 108 | volatile unsigned long *adr = last_pda->pio_write_status_addr; | ||
| 109 | unsigned long val = last_pda->pio_write_status_val; | ||
| 110 | |||
| 111 | /* Drain PIO writes from old CPU's Shub */ | ||
| 112 | while (unlikely((*adr & SH_PIO_WRITE_STATUS_PENDING_WRITE_COUNT_MASK) | ||
| 113 | != val)) | ||
| 114 | cpu_relax(); | ||
| 115 | } | ||
| 116 | |||
| 96 | void sn_tlb_migrate_finish(struct mm_struct *mm) | 117 | void sn_tlb_migrate_finish(struct mm_struct *mm) |
| 97 | { | 118 | { |
| 98 | /* flush_tlb_mm is inefficient if more than 1 users of mm */ | 119 | /* flush_tlb_mm is inefficient if more than 1 users of mm */ |
