diff options
Diffstat (limited to 'arch/ia64/sn')
-rw-r--r-- | arch/ia64/sn/kernel/Makefile | 3 | ||||
-rw-r--r-- | arch/ia64/sn/kernel/pio_phys.S | 71 | ||||
-rw-r--r-- | arch/ia64/sn/kernel/setup.c | 6 | ||||
-rw-r--r-- | arch/ia64/sn/kernel/sn2/sn2_smp.c | 21 | ||||
-rw-r--r-- | arch/ia64/sn/kernel/xpc_channel.c | 102 | ||||
-rw-r--r-- | arch/ia64/sn/kernel/xpc_main.c | 1 | ||||
-rw-r--r-- | arch/ia64/sn/kernel/xpc_partition.c | 28 | ||||
-rw-r--r-- | arch/ia64/sn/pci/tioce_provider.c | 326 |
8 files changed, 486 insertions, 72 deletions
diff --git a/arch/ia64/sn/kernel/Makefile b/arch/ia64/sn/kernel/Makefile index 3e9b4eea7418..ab9c48c88012 100644 --- a/arch/ia64/sn/kernel/Makefile +++ b/arch/ia64/sn/kernel/Makefile | |||
@@ -10,7 +10,8 @@ | |||
10 | CPPFLAGS += -I$(srctree)/arch/ia64/sn/include | 10 | CPPFLAGS += -I$(srctree)/arch/ia64/sn/include |
11 | 11 | ||
12 | obj-y += setup.o bte.o bte_error.o irq.o mca.o idle.o \ | 12 | obj-y += setup.o bte.o bte_error.o irq.o mca.o idle.o \ |
13 | huberror.o io_init.o iomv.o klconflib.o sn2/ | 13 | huberror.o io_init.o iomv.o klconflib.o pio_phys.o \ |
14 | sn2/ | ||
14 | obj-$(CONFIG_IA64_GENERIC) += machvec.o | 15 | obj-$(CONFIG_IA64_GENERIC) += machvec.o |
15 | obj-$(CONFIG_SGI_TIOCX) += tiocx.o | 16 | obj-$(CONFIG_SGI_TIOCX) += tiocx.o |
16 | obj-$(CONFIG_IA64_SGI_SN_XP) += xp.o | 17 | obj-$(CONFIG_IA64_SGI_SN_XP) += xp.o |
diff --git a/arch/ia64/sn/kernel/pio_phys.S b/arch/ia64/sn/kernel/pio_phys.S new file mode 100644 index 000000000000..3c7d48d6ecb8 --- /dev/null +++ b/arch/ia64/sn/kernel/pio_phys.S | |||
@@ -0,0 +1,71 @@ | |||
1 | /* | ||
2 | * This file is subject to the terms and conditions of the GNU General Public | ||
3 | * License. See the file "COPYING" in the main directory of this archive | ||
4 | * for more details. | ||
5 | * | ||
6 | * Copyright (C) 2000-2005 Silicon Graphics, Inc. All rights reserved. | ||
7 | * | ||
8 | * This file contains macros used to access MMR registers via | ||
9 | * uncached physical addresses. | ||
10 | * pio_phys_read_mmr - read an MMR | ||
11 | * pio_phys_write_mmr - write an MMR | ||
12 | * pio_atomic_phys_write_mmrs - atomically write 1 or 2 MMRs with psr.ic=0 | ||
13 | * Second MMR will be skipped if address is NULL | ||
14 | * | ||
15 | * Addresses passed to these routines should be uncached physical addresses | ||
16 | * ie., 0x80000.... | ||
17 | */ | ||
18 | |||
19 | |||
20 | |||
21 | #include <asm/asmmacro.h> | ||
22 | #include <asm/page.h> | ||
23 | |||
24 | GLOBAL_ENTRY(pio_phys_read_mmr) | ||
25 | .prologue | ||
26 | .regstk 1,0,0,0 | ||
27 | .body | ||
28 | mov r2=psr | ||
29 | rsm psr.i | psr.dt | ||
30 | ;; | ||
31 | srlz.d | ||
32 | ld8.acq r8=[r32] | ||
33 | ;; | ||
34 | mov psr.l=r2;; | ||
35 | srlz.d | ||
36 | br.ret.sptk.many rp | ||
37 | END(pio_phys_read_mmr) | ||
38 | |||
39 | GLOBAL_ENTRY(pio_phys_write_mmr) | ||
40 | .prologue | ||
41 | .regstk 2,0,0,0 | ||
42 | .body | ||
43 | mov r2=psr | ||
44 | rsm psr.i | psr.dt | ||
45 | ;; | ||
46 | srlz.d | ||
47 | st8.rel [r32]=r33 | ||
48 | ;; | ||
49 | mov psr.l=r2;; | ||
50 | srlz.d | ||
51 | br.ret.sptk.many rp | ||
52 | END(pio_phys_write_mmr) | ||
53 | |||
54 | GLOBAL_ENTRY(pio_atomic_phys_write_mmrs) | ||
55 | .prologue | ||
56 | .regstk 4,0,0,0 | ||
57 | .body | ||
58 | mov r2=psr | ||
59 | cmp.ne p9,p0=r34,r0; | ||
60 | rsm psr.i | psr.dt | psr.ic | ||
61 | ;; | ||
62 | srlz.d | ||
63 | st8.rel [r32]=r33 | ||
64 | (p9) st8.rel [r34]=r35 | ||
65 | ;; | ||
66 | mov psr.l=r2;; | ||
67 | srlz.d | ||
68 | br.ret.sptk.many rp | ||
69 | END(pio_atomic_phys_write_mmrs) | ||
70 | |||
71 | |||
diff --git a/arch/ia64/sn/kernel/setup.c b/arch/ia64/sn/kernel/setup.c index 5b84836c2171..8b6d5c844708 100644 --- a/arch/ia64/sn/kernel/setup.c +++ b/arch/ia64/sn/kernel/setup.c | |||
@@ -3,7 +3,7 @@ | |||
3 | * License. See the file "COPYING" in the main directory of this archive | 3 | * License. See the file "COPYING" in the main directory of this archive |
4 | * for more details. | 4 | * for more details. |
5 | * | 5 | * |
6 | * Copyright (C) 1999,2001-2005 Silicon Graphics, Inc. All rights reserved. | 6 | * Copyright (C) 1999,2001-2006 Silicon Graphics, Inc. All rights reserved. |
7 | */ | 7 | */ |
8 | 8 | ||
9 | #include <linux/config.h> | 9 | #include <linux/config.h> |
@@ -498,6 +498,7 @@ void __init sn_setup(char **cmdline_p) | |||
498 | * for sn. | 498 | * for sn. |
499 | */ | 499 | */ |
500 | pm_power_off = ia64_sn_power_down; | 500 | pm_power_off = ia64_sn_power_down; |
501 | current->thread.flags |= IA64_THREAD_MIGRATION; | ||
501 | } | 502 | } |
502 | 503 | ||
503 | /** | 504 | /** |
@@ -660,7 +661,8 @@ void __init sn_cpu_init(void) | |||
660 | SH2_PIO_WRITE_STATUS_1, SH2_PIO_WRITE_STATUS_3}; | 661 | SH2_PIO_WRITE_STATUS_1, SH2_PIO_WRITE_STATUS_3}; |
661 | u64 *pio; | 662 | u64 *pio; |
662 | pio = is_shub1() ? pio1 : pio2; | 663 | pio = is_shub1() ? pio1 : pio2; |
663 | pda->pio_write_status_addr = (volatile unsigned long *) LOCAL_MMR_ADDR(pio[slice]); | 664 | pda->pio_write_status_addr = |
665 | (volatile unsigned long *)GLOBAL_MMR_ADDR(nasid, pio[slice]); | ||
664 | pda->pio_write_status_val = is_shub1() ? SH_PIO_WRITE_STATUS_PENDING_WRITE_COUNT_MASK : 0; | 666 | pda->pio_write_status_val = is_shub1() ? SH_PIO_WRITE_STATUS_PENDING_WRITE_COUNT_MASK : 0; |
665 | } | 667 | } |
666 | 668 | ||
diff --git a/arch/ia64/sn/kernel/sn2/sn2_smp.c b/arch/ia64/sn/kernel/sn2/sn2_smp.c index b2e1e746b47f..d9d306c79f2d 100644 --- a/arch/ia64/sn/kernel/sn2/sn2_smp.c +++ b/arch/ia64/sn/kernel/sn2/sn2_smp.c | |||
@@ -93,6 +93,27 @@ static inline unsigned long wait_piowc(void) | |||
93 | return (ws & SH_PIO_WRITE_STATUS_WRITE_DEADLOCK_MASK) != 0; | 93 | return (ws & SH_PIO_WRITE_STATUS_WRITE_DEADLOCK_MASK) != 0; |
94 | } | 94 | } |
95 | 95 | ||
96 | /** | ||
97 | * sn_migrate - SN-specific task migration actions | ||
98 | * @task: Task being migrated to new CPU | ||
99 | * | ||
100 | * SN2 PIO writes from separate CPUs are not guaranteed to arrive in order. | ||
101 | * Context switching user threads which have memory-mapped MMIO may cause | ||
102 | * PIOs to issue from seperate CPUs, thus the PIO writes must be drained | ||
103 | * from the previous CPU's Shub before execution resumes on the new CPU. | ||
104 | */ | ||
105 | void sn_migrate(struct task_struct *task) | ||
106 | { | ||
107 | pda_t *last_pda = pdacpu(task_thread_info(task)->last_cpu); | ||
108 | volatile unsigned long *adr = last_pda->pio_write_status_addr; | ||
109 | unsigned long val = last_pda->pio_write_status_val; | ||
110 | |||
111 | /* Drain PIO writes from old CPU's Shub */ | ||
112 | while (unlikely((*adr & SH_PIO_WRITE_STATUS_PENDING_WRITE_COUNT_MASK) | ||
113 | != val)) | ||
114 | cpu_relax(); | ||
115 | } | ||
116 | |||
96 | void sn_tlb_migrate_finish(struct mm_struct *mm) | 117 | void sn_tlb_migrate_finish(struct mm_struct *mm) |
97 | { | 118 | { |
98 | /* flush_tlb_mm is inefficient if more than 1 users of mm */ | 119 | /* flush_tlb_mm is inefficient if more than 1 users of mm */ |
diff --git a/arch/ia64/sn/kernel/xpc_channel.c b/arch/ia64/sn/kernel/xpc_channel.c index cdf6856ce089..d0abddd9ffe6 100644 --- a/arch/ia64/sn/kernel/xpc_channel.c +++ b/arch/ia64/sn/kernel/xpc_channel.c | |||
@@ -21,7 +21,6 @@ | |||
21 | #include <linux/sched.h> | 21 | #include <linux/sched.h> |
22 | #include <linux/cache.h> | 22 | #include <linux/cache.h> |
23 | #include <linux/interrupt.h> | 23 | #include <linux/interrupt.h> |
24 | #include <linux/slab.h> | ||
25 | #include <linux/mutex.h> | 24 | #include <linux/mutex.h> |
26 | #include <linux/completion.h> | 25 | #include <linux/completion.h> |
27 | #include <asm/sn/bte.h> | 26 | #include <asm/sn/bte.h> |
@@ -30,6 +29,31 @@ | |||
30 | 29 | ||
31 | 30 | ||
32 | /* | 31 | /* |
32 | * Guarantee that the kzalloc'd memory is cacheline aligned. | ||
33 | */ | ||
34 | static void * | ||
35 | xpc_kzalloc_cacheline_aligned(size_t size, gfp_t flags, void **base) | ||
36 | { | ||
37 | /* see if kzalloc will give us cachline aligned memory by default */ | ||
38 | *base = kzalloc(size, flags); | ||
39 | if (*base == NULL) { | ||
40 | return NULL; | ||
41 | } | ||
42 | if ((u64) *base == L1_CACHE_ALIGN((u64) *base)) { | ||
43 | return *base; | ||
44 | } | ||
45 | kfree(*base); | ||
46 | |||
47 | /* nope, we'll have to do it ourselves */ | ||
48 | *base = kzalloc(size + L1_CACHE_BYTES, flags); | ||
49 | if (*base == NULL) { | ||
50 | return NULL; | ||
51 | } | ||
52 | return (void *) L1_CACHE_ALIGN((u64) *base); | ||
53 | } | ||
54 | |||
55 | |||
56 | /* | ||
33 | * Set up the initial values for the XPartition Communication channels. | 57 | * Set up the initial values for the XPartition Communication channels. |
34 | */ | 58 | */ |
35 | static void | 59 | static void |
@@ -93,20 +117,19 @@ xpc_setup_infrastructure(struct xpc_partition *part) | |||
93 | * Allocate all of the channel structures as a contiguous chunk of | 117 | * Allocate all of the channel structures as a contiguous chunk of |
94 | * memory. | 118 | * memory. |
95 | */ | 119 | */ |
96 | part->channels = kmalloc(sizeof(struct xpc_channel) * XPC_NCHANNELS, | 120 | part->channels = kzalloc(sizeof(struct xpc_channel) * XPC_NCHANNELS, |
97 | GFP_KERNEL); | 121 | GFP_KERNEL); |
98 | if (part->channels == NULL) { | 122 | if (part->channels == NULL) { |
99 | dev_err(xpc_chan, "can't get memory for channels\n"); | 123 | dev_err(xpc_chan, "can't get memory for channels\n"); |
100 | return xpcNoMemory; | 124 | return xpcNoMemory; |
101 | } | 125 | } |
102 | memset(part->channels, 0, sizeof(struct xpc_channel) * XPC_NCHANNELS); | ||
103 | 126 | ||
104 | part->nchannels = XPC_NCHANNELS; | 127 | part->nchannels = XPC_NCHANNELS; |
105 | 128 | ||
106 | 129 | ||
107 | /* allocate all the required GET/PUT values */ | 130 | /* allocate all the required GET/PUT values */ |
108 | 131 | ||
109 | part->local_GPs = xpc_kmalloc_cacheline_aligned(XPC_GP_SIZE, | 132 | part->local_GPs = xpc_kzalloc_cacheline_aligned(XPC_GP_SIZE, |
110 | GFP_KERNEL, &part->local_GPs_base); | 133 | GFP_KERNEL, &part->local_GPs_base); |
111 | if (part->local_GPs == NULL) { | 134 | if (part->local_GPs == NULL) { |
112 | kfree(part->channels); | 135 | kfree(part->channels); |
@@ -115,55 +138,51 @@ xpc_setup_infrastructure(struct xpc_partition *part) | |||
115 | "values\n"); | 138 | "values\n"); |
116 | return xpcNoMemory; | 139 | return xpcNoMemory; |
117 | } | 140 | } |
118 | memset(part->local_GPs, 0, XPC_GP_SIZE); | ||
119 | 141 | ||
120 | part->remote_GPs = xpc_kmalloc_cacheline_aligned(XPC_GP_SIZE, | 142 | part->remote_GPs = xpc_kzalloc_cacheline_aligned(XPC_GP_SIZE, |
121 | GFP_KERNEL, &part->remote_GPs_base); | 143 | GFP_KERNEL, &part->remote_GPs_base); |
122 | if (part->remote_GPs == NULL) { | 144 | if (part->remote_GPs == NULL) { |
123 | kfree(part->channels); | ||
124 | part->channels = NULL; | ||
125 | kfree(part->local_GPs_base); | ||
126 | part->local_GPs = NULL; | ||
127 | dev_err(xpc_chan, "can't get memory for remote get/put " | 145 | dev_err(xpc_chan, "can't get memory for remote get/put " |
128 | "values\n"); | 146 | "values\n"); |
147 | kfree(part->local_GPs_base); | ||
148 | part->local_GPs = NULL; | ||
149 | kfree(part->channels); | ||
150 | part->channels = NULL; | ||
129 | return xpcNoMemory; | 151 | return xpcNoMemory; |
130 | } | 152 | } |
131 | memset(part->remote_GPs, 0, XPC_GP_SIZE); | ||
132 | 153 | ||
133 | 154 | ||
134 | /* allocate all the required open and close args */ | 155 | /* allocate all the required open and close args */ |
135 | 156 | ||
136 | part->local_openclose_args = xpc_kmalloc_cacheline_aligned( | 157 | part->local_openclose_args = xpc_kzalloc_cacheline_aligned( |
137 | XPC_OPENCLOSE_ARGS_SIZE, GFP_KERNEL, | 158 | XPC_OPENCLOSE_ARGS_SIZE, GFP_KERNEL, |
138 | &part->local_openclose_args_base); | 159 | &part->local_openclose_args_base); |
139 | if (part->local_openclose_args == NULL) { | 160 | if (part->local_openclose_args == NULL) { |
140 | kfree(part->channels); | 161 | dev_err(xpc_chan, "can't get memory for local connect args\n"); |
141 | part->channels = NULL; | ||
142 | kfree(part->local_GPs_base); | ||
143 | part->local_GPs = NULL; | ||
144 | kfree(part->remote_GPs_base); | 162 | kfree(part->remote_GPs_base); |
145 | part->remote_GPs = NULL; | 163 | part->remote_GPs = NULL; |
146 | dev_err(xpc_chan, "can't get memory for local connect args\n"); | 164 | kfree(part->local_GPs_base); |
165 | part->local_GPs = NULL; | ||
166 | kfree(part->channels); | ||
167 | part->channels = NULL; | ||
147 | return xpcNoMemory; | 168 | return xpcNoMemory; |
148 | } | 169 | } |
149 | memset(part->local_openclose_args, 0, XPC_OPENCLOSE_ARGS_SIZE); | ||
150 | 170 | ||
151 | part->remote_openclose_args = xpc_kmalloc_cacheline_aligned( | 171 | part->remote_openclose_args = xpc_kzalloc_cacheline_aligned( |
152 | XPC_OPENCLOSE_ARGS_SIZE, GFP_KERNEL, | 172 | XPC_OPENCLOSE_ARGS_SIZE, GFP_KERNEL, |
153 | &part->remote_openclose_args_base); | 173 | &part->remote_openclose_args_base); |
154 | if (part->remote_openclose_args == NULL) { | 174 | if (part->remote_openclose_args == NULL) { |
155 | kfree(part->channels); | 175 | dev_err(xpc_chan, "can't get memory for remote connect args\n"); |
156 | part->channels = NULL; | ||
157 | kfree(part->local_GPs_base); | ||
158 | part->local_GPs = NULL; | ||
159 | kfree(part->remote_GPs_base); | ||
160 | part->remote_GPs = NULL; | ||
161 | kfree(part->local_openclose_args_base); | 176 | kfree(part->local_openclose_args_base); |
162 | part->local_openclose_args = NULL; | 177 | part->local_openclose_args = NULL; |
163 | dev_err(xpc_chan, "can't get memory for remote connect args\n"); | 178 | kfree(part->remote_GPs_base); |
179 | part->remote_GPs = NULL; | ||
180 | kfree(part->local_GPs_base); | ||
181 | part->local_GPs = NULL; | ||
182 | kfree(part->channels); | ||
183 | part->channels = NULL; | ||
164 | return xpcNoMemory; | 184 | return xpcNoMemory; |
165 | } | 185 | } |
166 | memset(part->remote_openclose_args, 0, XPC_OPENCLOSE_ARGS_SIZE); | ||
167 | 186 | ||
168 | 187 | ||
169 | xpc_initialize_channels(part, partid); | 188 | xpc_initialize_channels(part, partid); |
@@ -186,18 +205,18 @@ xpc_setup_infrastructure(struct xpc_partition *part) | |||
186 | ret = request_irq(SGI_XPC_NOTIFY, xpc_notify_IRQ_handler, SA_SHIRQ, | 205 | ret = request_irq(SGI_XPC_NOTIFY, xpc_notify_IRQ_handler, SA_SHIRQ, |
187 | part->IPI_owner, (void *) (u64) partid); | 206 | part->IPI_owner, (void *) (u64) partid); |
188 | if (ret != 0) { | 207 | if (ret != 0) { |
189 | kfree(part->channels); | ||
190 | part->channels = NULL; | ||
191 | kfree(part->local_GPs_base); | ||
192 | part->local_GPs = NULL; | ||
193 | kfree(part->remote_GPs_base); | ||
194 | part->remote_GPs = NULL; | ||
195 | kfree(part->local_openclose_args_base); | ||
196 | part->local_openclose_args = NULL; | ||
197 | kfree(part->remote_openclose_args_base); | ||
198 | part->remote_openclose_args = NULL; | ||
199 | dev_err(xpc_chan, "can't register NOTIFY IRQ handler, " | 208 | dev_err(xpc_chan, "can't register NOTIFY IRQ handler, " |
200 | "errno=%d\n", -ret); | 209 | "errno=%d\n", -ret); |
210 | kfree(part->remote_openclose_args_base); | ||
211 | part->remote_openclose_args = NULL; | ||
212 | kfree(part->local_openclose_args_base); | ||
213 | part->local_openclose_args = NULL; | ||
214 | kfree(part->remote_GPs_base); | ||
215 | part->remote_GPs = NULL; | ||
216 | kfree(part->local_GPs_base); | ||
217 | part->local_GPs = NULL; | ||
218 | kfree(part->channels); | ||
219 | part->channels = NULL; | ||
201 | return xpcLackOfResources; | 220 | return xpcLackOfResources; |
202 | } | 221 | } |
203 | 222 | ||
@@ -446,22 +465,20 @@ xpc_allocate_local_msgqueue(struct xpc_channel *ch) | |||
446 | for (nentries = ch->local_nentries; nentries > 0; nentries--) { | 465 | for (nentries = ch->local_nentries; nentries > 0; nentries--) { |
447 | 466 | ||
448 | nbytes = nentries * ch->msg_size; | 467 | nbytes = nentries * ch->msg_size; |
449 | ch->local_msgqueue = xpc_kmalloc_cacheline_aligned(nbytes, | 468 | ch->local_msgqueue = xpc_kzalloc_cacheline_aligned(nbytes, |
450 | GFP_KERNEL, | 469 | GFP_KERNEL, |
451 | &ch->local_msgqueue_base); | 470 | &ch->local_msgqueue_base); |
452 | if (ch->local_msgqueue == NULL) { | 471 | if (ch->local_msgqueue == NULL) { |
453 | continue; | 472 | continue; |
454 | } | 473 | } |
455 | memset(ch->local_msgqueue, 0, nbytes); | ||
456 | 474 | ||
457 | nbytes = nentries * sizeof(struct xpc_notify); | 475 | nbytes = nentries * sizeof(struct xpc_notify); |
458 | ch->notify_queue = kmalloc(nbytes, GFP_KERNEL); | 476 | ch->notify_queue = kzalloc(nbytes, GFP_KERNEL); |
459 | if (ch->notify_queue == NULL) { | 477 | if (ch->notify_queue == NULL) { |
460 | kfree(ch->local_msgqueue_base); | 478 | kfree(ch->local_msgqueue_base); |
461 | ch->local_msgqueue = NULL; | 479 | ch->local_msgqueue = NULL; |
462 | continue; | 480 | continue; |
463 | } | 481 | } |
464 | memset(ch->notify_queue, 0, nbytes); | ||
465 | 482 | ||
466 | spin_lock_irqsave(&ch->lock, irq_flags); | 483 | spin_lock_irqsave(&ch->lock, irq_flags); |
467 | if (nentries < ch->local_nentries) { | 484 | if (nentries < ch->local_nentries) { |
@@ -501,13 +518,12 @@ xpc_allocate_remote_msgqueue(struct xpc_channel *ch) | |||
501 | for (nentries = ch->remote_nentries; nentries > 0; nentries--) { | 518 | for (nentries = ch->remote_nentries; nentries > 0; nentries--) { |
502 | 519 | ||
503 | nbytes = nentries * ch->msg_size; | 520 | nbytes = nentries * ch->msg_size; |
504 | ch->remote_msgqueue = xpc_kmalloc_cacheline_aligned(nbytes, | 521 | ch->remote_msgqueue = xpc_kzalloc_cacheline_aligned(nbytes, |
505 | GFP_KERNEL, | 522 | GFP_KERNEL, |
506 | &ch->remote_msgqueue_base); | 523 | &ch->remote_msgqueue_base); |
507 | if (ch->remote_msgqueue == NULL) { | 524 | if (ch->remote_msgqueue == NULL) { |
508 | continue; | 525 | continue; |
509 | } | 526 | } |
510 | memset(ch->remote_msgqueue, 0, nbytes); | ||
511 | 527 | ||
512 | spin_lock_irqsave(&ch->lock, irq_flags); | 528 | spin_lock_irqsave(&ch->lock, irq_flags); |
513 | if (nentries < ch->remote_nentries) { | 529 | if (nentries < ch->remote_nentries) { |
diff --git a/arch/ia64/sn/kernel/xpc_main.c b/arch/ia64/sn/kernel/xpc_main.c index 8cbf16432570..99b123a6421a 100644 --- a/arch/ia64/sn/kernel/xpc_main.c +++ b/arch/ia64/sn/kernel/xpc_main.c | |||
@@ -52,7 +52,6 @@ | |||
52 | #include <linux/syscalls.h> | 52 | #include <linux/syscalls.h> |
53 | #include <linux/cache.h> | 53 | #include <linux/cache.h> |
54 | #include <linux/interrupt.h> | 54 | #include <linux/interrupt.h> |
55 | #include <linux/slab.h> | ||
56 | #include <linux/delay.h> | 55 | #include <linux/delay.h> |
57 | #include <linux/reboot.h> | 56 | #include <linux/reboot.h> |
58 | #include <linux/completion.h> | 57 | #include <linux/completion.h> |
diff --git a/arch/ia64/sn/kernel/xpc_partition.c b/arch/ia64/sn/kernel/xpc_partition.c index 88a730e6cfdb..94211429fd0c 100644 --- a/arch/ia64/sn/kernel/xpc_partition.c +++ b/arch/ia64/sn/kernel/xpc_partition.c | |||
@@ -81,6 +81,31 @@ char ____cacheline_aligned xpc_remote_copy_buffer[XPC_RP_HEADER_SIZE + | |||
81 | 81 | ||
82 | 82 | ||
83 | /* | 83 | /* |
84 | * Guarantee that the kmalloc'd memory is cacheline aligned. | ||
85 | */ | ||
86 | static void * | ||
87 | xpc_kmalloc_cacheline_aligned(size_t size, gfp_t flags, void **base) | ||
88 | { | ||
89 | /* see if kmalloc will give us cachline aligned memory by default */ | ||
90 | *base = kmalloc(size, flags); | ||
91 | if (*base == NULL) { | ||
92 | return NULL; | ||
93 | } | ||
94 | if ((u64) *base == L1_CACHE_ALIGN((u64) *base)) { | ||
95 | return *base; | ||
96 | } | ||
97 | kfree(*base); | ||
98 | |||
99 | /* nope, we'll have to do it ourselves */ | ||
100 | *base = kmalloc(size + L1_CACHE_BYTES, flags); | ||
101 | if (*base == NULL) { | ||
102 | return NULL; | ||
103 | } | ||
104 | return (void *) L1_CACHE_ALIGN((u64) *base); | ||
105 | } | ||
106 | |||
107 | |||
108 | /* | ||
84 | * Given a nasid, get the physical address of the partition's reserved page | 109 | * Given a nasid, get the physical address of the partition's reserved page |
85 | * for that nasid. This function returns 0 on any error. | 110 | * for that nasid. This function returns 0 on any error. |
86 | */ | 111 | */ |
@@ -1038,13 +1063,12 @@ xpc_discovery(void) | |||
1038 | remote_vars = (struct xpc_vars *) remote_rp; | 1063 | remote_vars = (struct xpc_vars *) remote_rp; |
1039 | 1064 | ||
1040 | 1065 | ||
1041 | discovered_nasids = kmalloc(sizeof(u64) * xp_nasid_mask_words, | 1066 | discovered_nasids = kzalloc(sizeof(u64) * xp_nasid_mask_words, |
1042 | GFP_KERNEL); | 1067 | GFP_KERNEL); |
1043 | if (discovered_nasids == NULL) { | 1068 | if (discovered_nasids == NULL) { |
1044 | kfree(remote_rp_base); | 1069 | kfree(remote_rp_base); |
1045 | return; | 1070 | return; |
1046 | } | 1071 | } |
1047 | memset(discovered_nasids, 0, sizeof(u64) * xp_nasid_mask_words); | ||
1048 | 1072 | ||
1049 | rp = (struct xpc_rsvd_page *) xpc_rsvd_page; | 1073 | rp = (struct xpc_rsvd_page *) xpc_rsvd_page; |
1050 | 1074 | ||
diff --git a/arch/ia64/sn/pci/tioce_provider.c b/arch/ia64/sn/pci/tioce_provider.c index e52831ed93eb..fa073cc4b565 100644 --- a/arch/ia64/sn/pci/tioce_provider.c +++ b/arch/ia64/sn/pci/tioce_provider.c | |||
@@ -15,6 +15,124 @@ | |||
15 | #include <asm/sn/pcidev.h> | 15 | #include <asm/sn/pcidev.h> |
16 | #include <asm/sn/pcibus_provider_defs.h> | 16 | #include <asm/sn/pcibus_provider_defs.h> |
17 | #include <asm/sn/tioce_provider.h> | 17 | #include <asm/sn/tioce_provider.h> |
18 | #include <asm/sn/sn2/sn_hwperf.h> | ||
19 | |||
20 | /* | ||
21 | * 1/26/2006 | ||
22 | * | ||
23 | * WAR for SGI PV 944642. For revA TIOCE, need to use the following recipe | ||
24 | * (taken from the above PV) before and after accessing tioce internal MMR's | ||
25 | * to avoid tioce lockups. | ||
26 | * | ||
27 | * The recipe as taken from the PV: | ||
28 | * | ||
29 | * if(mmr address < 0x45000) { | ||
30 | * if(mmr address == 0 or 0x80) | ||
31 | * mmr wrt or read address 0xc0 | ||
32 | * else if(mmr address == 0x148 or 0x200) | ||
33 | * mmr wrt or read address 0x28 | ||
34 | * else | ||
35 | * mmr wrt or read address 0x158 | ||
36 | * | ||
37 | * do desired mmr access (rd or wrt) | ||
38 | * | ||
39 | * if(mmr address == 0x100) | ||
40 | * mmr wrt or read address 0x38 | ||
41 | * mmr wrt or read address 0xb050 | ||
42 | * } else | ||
43 | * do desired mmr access | ||
44 | * | ||
45 | * According to hw, we can use reads instead of writes to the above addres | ||
46 | * | ||
47 | * Note this WAR can only to be used for accessing internal MMR's in the | ||
48 | * TIOCE Coretalk Address Range 0x0 - 0x07ff_ffff. This includes the | ||
49 | * "Local CE Registers and Memories" and "PCI Compatible Config Space" address | ||
50 | * spaces from table 2-1 of the "CE Programmer's Reference Overview" document. | ||
51 | * | ||
52 | * All registers defined in struct tioce will meet that criteria. | ||
53 | */ | ||
54 | |||
55 | static void inline | ||
56 | tioce_mmr_war_pre(struct tioce_kernel *kern, void *mmr_addr) | ||
57 | { | ||
58 | u64 mmr_base; | ||
59 | u64 mmr_offset; | ||
60 | |||
61 | if (kern->ce_common->ce_rev != TIOCE_REV_A) | ||
62 | return; | ||
63 | |||
64 | mmr_base = kern->ce_common->ce_pcibus.bs_base; | ||
65 | mmr_offset = (u64)mmr_addr - mmr_base; | ||
66 | |||
67 | if (mmr_offset < 0x45000) { | ||
68 | u64 mmr_war_offset; | ||
69 | |||
70 | if (mmr_offset == 0 || mmr_offset == 0x80) | ||
71 | mmr_war_offset = 0xc0; | ||
72 | else if (mmr_offset == 0x148 || mmr_offset == 0x200) | ||
73 | mmr_war_offset = 0x28; | ||
74 | else | ||
75 | mmr_war_offset = 0x158; | ||
76 | |||
77 | readq_relaxed((void *)(mmr_base + mmr_war_offset)); | ||
78 | } | ||
79 | } | ||
80 | |||
81 | static void inline | ||
82 | tioce_mmr_war_post(struct tioce_kernel *kern, void *mmr_addr) | ||
83 | { | ||
84 | u64 mmr_base; | ||
85 | u64 mmr_offset; | ||
86 | |||
87 | if (kern->ce_common->ce_rev != TIOCE_REV_A) | ||
88 | return; | ||
89 | |||
90 | mmr_base = kern->ce_common->ce_pcibus.bs_base; | ||
91 | mmr_offset = (u64)mmr_addr - mmr_base; | ||
92 | |||
93 | if (mmr_offset < 0x45000) { | ||
94 | if (mmr_offset == 0x100) | ||
95 | readq_relaxed((void *)(mmr_base + 0x38)); | ||
96 | readq_relaxed((void *)(mmr_base + 0xb050)); | ||
97 | } | ||
98 | } | ||
99 | |||
100 | /* load mmr contents into a variable */ | ||
101 | #define tioce_mmr_load(kern, mmrp, varp) do {\ | ||
102 | tioce_mmr_war_pre(kern, mmrp); \ | ||
103 | *(varp) = readq_relaxed(mmrp); \ | ||
104 | tioce_mmr_war_post(kern, mmrp); \ | ||
105 | } while (0) | ||
106 | |||
107 | /* store variable contents into mmr */ | ||
108 | #define tioce_mmr_store(kern, mmrp, varp) do {\ | ||
109 | tioce_mmr_war_pre(kern, mmrp); \ | ||
110 | writeq(*varp, mmrp); \ | ||
111 | tioce_mmr_war_post(kern, mmrp); \ | ||
112 | } while (0) | ||
113 | |||
114 | /* store immediate value into mmr */ | ||
115 | #define tioce_mmr_storei(kern, mmrp, val) do {\ | ||
116 | tioce_mmr_war_pre(kern, mmrp); \ | ||
117 | writeq(val, mmrp); \ | ||
118 | tioce_mmr_war_post(kern, mmrp); \ | ||
119 | } while (0) | ||
120 | |||
121 | /* set bits (immediate value) into mmr */ | ||
122 | #define tioce_mmr_seti(kern, mmrp, bits) do {\ | ||
123 | u64 tmp; \ | ||
124 | tioce_mmr_load(kern, mmrp, &tmp); \ | ||
125 | tmp |= (bits); \ | ||
126 | tioce_mmr_store(kern, mmrp, &tmp); \ | ||
127 | } while (0) | ||
128 | |||
129 | /* clear bits (immediate value) into mmr */ | ||
130 | #define tioce_mmr_clri(kern, mmrp, bits) do { \ | ||
131 | u64 tmp; \ | ||
132 | tioce_mmr_load(kern, mmrp, &tmp); \ | ||
133 | tmp &= ~(bits); \ | ||
134 | tioce_mmr_store(kern, mmrp, &tmp); \ | ||
135 | } while (0) | ||
18 | 136 | ||
19 | /** | 137 | /** |
20 | * Bus address ranges for the 5 flavors of TIOCE DMA | 138 | * Bus address ranges for the 5 flavors of TIOCE DMA |
@@ -62,9 +180,9 @@ | |||
62 | #define TIOCE_ATE_M40 2 | 180 | #define TIOCE_ATE_M40 2 |
63 | #define TIOCE_ATE_M40S 3 | 181 | #define TIOCE_ATE_M40S 3 |
64 | 182 | ||
65 | #define KB(x) ((x) << 10) | 183 | #define KB(x) ((u64)(x) << 10) |
66 | #define MB(x) ((x) << 20) | 184 | #define MB(x) ((u64)(x) << 20) |
67 | #define GB(x) ((x) << 30) | 185 | #define GB(x) ((u64)(x) << 30) |
68 | 186 | ||
69 | /** | 187 | /** |
70 | * tioce_dma_d64 - create a DMA mapping using 64-bit direct mode | 188 | * tioce_dma_d64 - create a DMA mapping using 64-bit direct mode |
@@ -151,7 +269,7 @@ tioce_alloc_map(struct tioce_kernel *ce_kern, int type, int port, | |||
151 | int last; | 269 | int last; |
152 | int entries; | 270 | int entries; |
153 | int nates; | 271 | int nates; |
154 | int pagesize; | 272 | u64 pagesize; |
155 | u64 *ate_shadow; | 273 | u64 *ate_shadow; |
156 | u64 *ate_reg; | 274 | u64 *ate_reg; |
157 | u64 addr; | 275 | u64 addr; |
@@ -228,7 +346,7 @@ tioce_alloc_map(struct tioce_kernel *ce_kern, int type, int port, | |||
228 | 346 | ||
229 | ate = ATE_MAKE(addr, pagesize); | 347 | ate = ATE_MAKE(addr, pagesize); |
230 | ate_shadow[i + j] = ate; | 348 | ate_shadow[i + j] = ate; |
231 | writeq(ate, &ate_reg[i + j]); | 349 | tioce_mmr_storei(ce_kern, &ate_reg[i + j], ate); |
232 | addr += pagesize; | 350 | addr += pagesize; |
233 | } | 351 | } |
234 | 352 | ||
@@ -272,7 +390,8 @@ tioce_dma_d32(struct pci_dev *pdev, u64 ct_addr) | |||
272 | u64 tmp; | 390 | u64 tmp; |
273 | 391 | ||
274 | ce_kern->ce_port[port].dirmap_shadow = ct_upper; | 392 | ce_kern->ce_port[port].dirmap_shadow = ct_upper; |
275 | writeq(ct_upper, &ce_mmr->ce_ure_dir_map[port]); | 393 | tioce_mmr_storei(ce_kern, &ce_mmr->ce_ure_dir_map[port], |
394 | ct_upper); | ||
276 | tmp = ce_mmr->ce_ure_dir_map[port]; | 395 | tmp = ce_mmr->ce_ure_dir_map[port]; |
277 | dma_ok = 1; | 396 | dma_ok = 1; |
278 | } else | 397 | } else |
@@ -344,7 +463,8 @@ tioce_dma_unmap(struct pci_dev *pdev, dma_addr_t bus_addr, int dir) | |||
344 | if (TIOCE_D32_ADDR(bus_addr)) { | 463 | if (TIOCE_D32_ADDR(bus_addr)) { |
345 | if (--ce_kern->ce_port[port].dirmap_refcnt == 0) { | 464 | if (--ce_kern->ce_port[port].dirmap_refcnt == 0) { |
346 | ce_kern->ce_port[port].dirmap_shadow = 0; | 465 | ce_kern->ce_port[port].dirmap_shadow = 0; |
347 | writeq(0, &ce_mmr->ce_ure_dir_map[port]); | 466 | tioce_mmr_storei(ce_kern, &ce_mmr->ce_ure_dir_map[port], |
467 | 0); | ||
348 | } | 468 | } |
349 | } else { | 469 | } else { |
350 | struct tioce_dmamap *map; | 470 | struct tioce_dmamap *map; |
@@ -365,7 +485,7 @@ tioce_dma_unmap(struct pci_dev *pdev, dma_addr_t bus_addr, int dir) | |||
365 | } else if (--map->refcnt == 0) { | 485 | } else if (--map->refcnt == 0) { |
366 | for (i = 0; i < map->ate_count; i++) { | 486 | for (i = 0; i < map->ate_count; i++) { |
367 | map->ate_shadow[i] = 0; | 487 | map->ate_shadow[i] = 0; |
368 | map->ate_hw[i] = 0; | 488 | tioce_mmr_storei(ce_kern, &map->ate_hw[i], 0); |
369 | } | 489 | } |
370 | 490 | ||
371 | list_del(&map->ce_dmamap_list); | 491 | list_del(&map->ce_dmamap_list); |
@@ -486,7 +606,7 @@ tioce_do_dma_map(struct pci_dev *pdev, u64 paddr, size_t byte_count, | |||
486 | spin_unlock_irqrestore(&ce_kern->ce_lock, flags); | 606 | spin_unlock_irqrestore(&ce_kern->ce_lock, flags); |
487 | 607 | ||
488 | dma_map_done: | 608 | dma_map_done: |
489 | if (mapaddr & barrier) | 609 | if (mapaddr && barrier) |
490 | mapaddr = tioce_dma_barrier(mapaddr, 1); | 610 | mapaddr = tioce_dma_barrier(mapaddr, 1); |
491 | 611 | ||
492 | return mapaddr; | 612 | return mapaddr; |
@@ -541,17 +661,61 @@ tioce_error_intr_handler(int irq, void *arg, struct pt_regs *pt) | |||
541 | soft->ce_pcibus.bs_persist_segment, | 661 | soft->ce_pcibus.bs_persist_segment, |
542 | soft->ce_pcibus.bs_persist_busnum, 0, 0, 0, 0, 0); | 662 | soft->ce_pcibus.bs_persist_busnum, 0, 0, 0, 0, 0); |
543 | 663 | ||
664 | if (ret_stuff.v0) | ||
665 | panic("tioce_error_intr_handler: Fatal TIOCE error"); | ||
666 | |||
544 | return IRQ_HANDLED; | 667 | return IRQ_HANDLED; |
545 | } | 668 | } |
546 | 669 | ||
547 | /** | 670 | /** |
671 | * tioce_reserve_m32 - reserve M32 ate's for the indicated address range | ||
672 | * @tioce_kernel: TIOCE context to reserve ate's for | ||
673 | * @base: starting bus address to reserve | ||
674 | * @limit: last bus address to reserve | ||
675 | * | ||
676 | * If base/limit falls within the range of bus space mapped through the | ||
677 | * M32 space, reserve the resources corresponding to the range. | ||
678 | */ | ||
679 | static void | ||
680 | tioce_reserve_m32(struct tioce_kernel *ce_kern, u64 base, u64 limit) | ||
681 | { | ||
682 | int ate_index, last_ate, ps; | ||
683 | struct tioce *ce_mmr; | ||
684 | |||
685 | if (!TIOCE_M32_ADDR(base)) | ||
686 | return; | ||
687 | |||
688 | ce_mmr = (struct tioce *)ce_kern->ce_common->ce_pcibus.bs_base; | ||
689 | ps = ce_kern->ce_ate3240_pagesize; | ||
690 | ate_index = ATE_PAGE(base, ps); | ||
691 | last_ate = ate_index + ATE_NPAGES(base, limit-base+1, ps) - 1; | ||
692 | |||
693 | if (ate_index < 64) | ||
694 | ate_index = 64; | ||
695 | |||
696 | while (ate_index <= last_ate) { | ||
697 | u64 ate; | ||
698 | |||
699 | ate = ATE_MAKE(0xdeadbeef, ps); | ||
700 | ce_kern->ce_ate3240_shadow[ate_index] = ate; | ||
701 | tioce_mmr_storei(ce_kern, &ce_mmr->ce_ure_ate3240[ate_index], | ||
702 | ate); | ||
703 | ate_index++; | ||
704 | } | ||
705 | } | ||
706 | |||
707 | /** | ||
548 | * tioce_kern_init - init kernel structures related to a given TIOCE | 708 | * tioce_kern_init - init kernel structures related to a given TIOCE |
549 | * @tioce_common: ptr to a cached tioce_common struct that originated in prom | 709 | * @tioce_common: ptr to a cached tioce_common struct that originated in prom |
550 | */ static struct tioce_kernel * | 710 | */ |
711 | static struct tioce_kernel * | ||
551 | tioce_kern_init(struct tioce_common *tioce_common) | 712 | tioce_kern_init(struct tioce_common *tioce_common) |
552 | { | 713 | { |
553 | int i; | 714 | int i; |
715 | int ps; | ||
716 | int dev; | ||
554 | u32 tmp; | 717 | u32 tmp; |
718 | unsigned int seg, bus; | ||
555 | struct tioce *tioce_mmr; | 719 | struct tioce *tioce_mmr; |
556 | struct tioce_kernel *tioce_kern; | 720 | struct tioce_kernel *tioce_kern; |
557 | 721 | ||
@@ -572,9 +736,10 @@ tioce_kern_init(struct tioce_common *tioce_common) | |||
572 | * here to use pci_read_config_xxx() so use the raw_pci_ops vector. | 736 | * here to use pci_read_config_xxx() so use the raw_pci_ops vector. |
573 | */ | 737 | */ |
574 | 738 | ||
575 | raw_pci_ops->read(tioce_common->ce_pcibus.bs_persist_segment, | 739 | seg = tioce_common->ce_pcibus.bs_persist_segment; |
576 | tioce_common->ce_pcibus.bs_persist_busnum, | 740 | bus = tioce_common->ce_pcibus.bs_persist_busnum; |
577 | PCI_DEVFN(2, 0), PCI_SECONDARY_BUS, 1, &tmp); | 741 | |
742 | raw_pci_ops->read(seg, bus, PCI_DEVFN(2, 0), PCI_SECONDARY_BUS, 1,&tmp); | ||
578 | tioce_kern->ce_port1_secondary = (u8) tmp; | 743 | tioce_kern->ce_port1_secondary = (u8) tmp; |
579 | 744 | ||
580 | /* | 745 | /* |
@@ -583,18 +748,76 @@ tioce_kern_init(struct tioce_common *tioce_common) | |||
583 | */ | 748 | */ |
584 | 749 | ||
585 | tioce_mmr = (struct tioce *)tioce_common->ce_pcibus.bs_base; | 750 | tioce_mmr = (struct tioce *)tioce_common->ce_pcibus.bs_base; |
586 | __sn_clrq_relaxed(&tioce_mmr->ce_ure_page_map, CE_URE_PAGESIZE_MASK); | 751 | tioce_mmr_clri(tioce_kern, &tioce_mmr->ce_ure_page_map, |
587 | __sn_setq_relaxed(&tioce_mmr->ce_ure_page_map, CE_URE_256K_PAGESIZE); | 752 | CE_URE_PAGESIZE_MASK); |
588 | tioce_kern->ce_ate3240_pagesize = KB(256); | 753 | tioce_mmr_seti(tioce_kern, &tioce_mmr->ce_ure_page_map, |
754 | CE_URE_256K_PAGESIZE); | ||
755 | ps = tioce_kern->ce_ate3240_pagesize = KB(256); | ||
589 | 756 | ||
590 | for (i = 0; i < TIOCE_NUM_M40_ATES; i++) { | 757 | for (i = 0; i < TIOCE_NUM_M40_ATES; i++) { |
591 | tioce_kern->ce_ate40_shadow[i] = 0; | 758 | tioce_kern->ce_ate40_shadow[i] = 0; |
592 | writeq(0, &tioce_mmr->ce_ure_ate40[i]); | 759 | tioce_mmr_storei(tioce_kern, &tioce_mmr->ce_ure_ate40[i], 0); |
593 | } | 760 | } |
594 | 761 | ||
595 | for (i = 0; i < TIOCE_NUM_M3240_ATES; i++) { | 762 | for (i = 0; i < TIOCE_NUM_M3240_ATES; i++) { |
596 | tioce_kern->ce_ate3240_shadow[i] = 0; | 763 | tioce_kern->ce_ate3240_shadow[i] = 0; |
597 | writeq(0, &tioce_mmr->ce_ure_ate3240[i]); | 764 | tioce_mmr_storei(tioce_kern, &tioce_mmr->ce_ure_ate3240[i], 0); |
765 | } | ||
766 | |||
767 | /* | ||
768 | * Reserve ATE's corresponding to reserved address ranges. These | ||
769 | * include: | ||
770 | * | ||
771 | * Memory space covered by each PPB mem base/limit register | ||
772 | * Memory space covered by each PPB prefetch base/limit register | ||
773 | * | ||
774 | * These bus ranges are for pio (downstream) traffic only, and so | ||
775 | * cannot be used for DMA. | ||
776 | */ | ||
777 | |||
778 | for (dev = 1; dev <= 2; dev++) { | ||
779 | u64 base, limit; | ||
780 | |||
781 | /* mem base/limit */ | ||
782 | |||
783 | raw_pci_ops->read(seg, bus, PCI_DEVFN(dev, 0), | ||
784 | PCI_MEMORY_BASE, 2, &tmp); | ||
785 | base = (u64)tmp << 16; | ||
786 | |||
787 | raw_pci_ops->read(seg, bus, PCI_DEVFN(dev, 0), | ||
788 | PCI_MEMORY_LIMIT, 2, &tmp); | ||
789 | limit = (u64)tmp << 16; | ||
790 | limit |= 0xfffffUL; | ||
791 | |||
792 | if (base < limit) | ||
793 | tioce_reserve_m32(tioce_kern, base, limit); | ||
794 | |||
795 | /* | ||
796 | * prefetch mem base/limit. The tioce ppb's have 64-bit | ||
797 | * decoders, so read the upper portions w/o checking the | ||
798 | * attributes. | ||
799 | */ | ||
800 | |||
801 | raw_pci_ops->read(seg, bus, PCI_DEVFN(dev, 0), | ||
802 | PCI_PREF_MEMORY_BASE, 2, &tmp); | ||
803 | base = ((u64)tmp & PCI_PREF_RANGE_MASK) << 16; | ||
804 | |||
805 | raw_pci_ops->read(seg, bus, PCI_DEVFN(dev, 0), | ||
806 | PCI_PREF_BASE_UPPER32, 4, &tmp); | ||
807 | base |= (u64)tmp << 32; | ||
808 | |||
809 | raw_pci_ops->read(seg, bus, PCI_DEVFN(dev, 0), | ||
810 | PCI_PREF_MEMORY_LIMIT, 2, &tmp); | ||
811 | |||
812 | limit = ((u64)tmp & PCI_PREF_RANGE_MASK) << 16; | ||
813 | limit |= 0xfffffUL; | ||
814 | |||
815 | raw_pci_ops->read(seg, bus, PCI_DEVFN(dev, 0), | ||
816 | PCI_PREF_LIMIT_UPPER32, 4, &tmp); | ||
817 | limit |= (u64)tmp << 32; | ||
818 | |||
819 | if ((base < limit) && TIOCE_M32_ADDR(base)) | ||
820 | tioce_reserve_m32(tioce_kern, base, limit); | ||
598 | } | 821 | } |
599 | 822 | ||
600 | return tioce_kern; | 823 | return tioce_kern; |
@@ -614,6 +837,7 @@ tioce_force_interrupt(struct sn_irq_info *sn_irq_info) | |||
614 | { | 837 | { |
615 | struct pcidev_info *pcidev_info; | 838 | struct pcidev_info *pcidev_info; |
616 | struct tioce_common *ce_common; | 839 | struct tioce_common *ce_common; |
840 | struct tioce_kernel *ce_kern; | ||
617 | struct tioce *ce_mmr; | 841 | struct tioce *ce_mmr; |
618 | u64 force_int_val; | 842 | u64 force_int_val; |
619 | 843 | ||
@@ -629,6 +853,29 @@ tioce_force_interrupt(struct sn_irq_info *sn_irq_info) | |||
629 | 853 | ||
630 | ce_common = (struct tioce_common *)pcidev_info->pdi_pcibus_info; | 854 | ce_common = (struct tioce_common *)pcidev_info->pdi_pcibus_info; |
631 | ce_mmr = (struct tioce *)ce_common->ce_pcibus.bs_base; | 855 | ce_mmr = (struct tioce *)ce_common->ce_pcibus.bs_base; |
856 | ce_kern = (struct tioce_kernel *)ce_common->ce_kernel_private; | ||
857 | |||
858 | /* | ||
859 | * TIOCE Rev A workaround (PV 945826), force an interrupt by writing | ||
860 | * the TIO_INTx register directly (1/26/2006) | ||
861 | */ | ||
862 | if (ce_common->ce_rev == TIOCE_REV_A) { | ||
863 | u64 int_bit_mask = (1ULL << sn_irq_info->irq_int_bit); | ||
864 | u64 status; | ||
865 | |||
866 | tioce_mmr_load(ce_kern, &ce_mmr->ce_adm_int_status, &status); | ||
867 | if (status & int_bit_mask) { | ||
868 | u64 force_irq = (1 << 8) | sn_irq_info->irq_irq; | ||
869 | u64 ctalk = sn_irq_info->irq_xtalkaddr; | ||
870 | u64 nasid, offset; | ||
871 | |||
872 | nasid = (ctalk & CTALK_NASID_MASK) >> CTALK_NASID_SHFT; | ||
873 | offset = (ctalk & CTALK_NODE_OFFSET); | ||
874 | HUB_S(TIO_IOSPACE_ADDR(nasid, offset), force_irq); | ||
875 | } | ||
876 | |||
877 | return; | ||
878 | } | ||
632 | 879 | ||
633 | /* | 880 | /* |
634 | * irq_int_bit is originally set up by prom, and holds the interrupt | 881 | * irq_int_bit is originally set up by prom, and holds the interrupt |
@@ -666,7 +913,7 @@ tioce_force_interrupt(struct sn_irq_info *sn_irq_info) | |||
666 | default: | 913 | default: |
667 | return; | 914 | return; |
668 | } | 915 | } |
669 | writeq(force_int_val, &ce_mmr->ce_adm_force_int); | 916 | tioce_mmr_storei(ce_kern, &ce_mmr->ce_adm_force_int, force_int_val); |
670 | } | 917 | } |
671 | 918 | ||
672 | /** | 919 | /** |
@@ -685,6 +932,7 @@ tioce_target_interrupt(struct sn_irq_info *sn_irq_info) | |||
685 | { | 932 | { |
686 | struct pcidev_info *pcidev_info; | 933 | struct pcidev_info *pcidev_info; |
687 | struct tioce_common *ce_common; | 934 | struct tioce_common *ce_common; |
935 | struct tioce_kernel *ce_kern; | ||
688 | struct tioce *ce_mmr; | 936 | struct tioce *ce_mmr; |
689 | int bit; | 937 | int bit; |
690 | u64 vector; | 938 | u64 vector; |
@@ -695,14 +943,15 @@ tioce_target_interrupt(struct sn_irq_info *sn_irq_info) | |||
695 | 943 | ||
696 | ce_common = (struct tioce_common *)pcidev_info->pdi_pcibus_info; | 944 | ce_common = (struct tioce_common *)pcidev_info->pdi_pcibus_info; |
697 | ce_mmr = (struct tioce *)ce_common->ce_pcibus.bs_base; | 945 | ce_mmr = (struct tioce *)ce_common->ce_pcibus.bs_base; |
946 | ce_kern = (struct tioce_kernel *)ce_common->ce_kernel_private; | ||
698 | 947 | ||
699 | bit = sn_irq_info->irq_int_bit; | 948 | bit = sn_irq_info->irq_int_bit; |
700 | 949 | ||
701 | __sn_setq_relaxed(&ce_mmr->ce_adm_int_mask, (1UL << bit)); | 950 | tioce_mmr_seti(ce_kern, &ce_mmr->ce_adm_int_mask, (1UL << bit)); |
702 | vector = (u64)sn_irq_info->irq_irq << INTR_VECTOR_SHFT; | 951 | vector = (u64)sn_irq_info->irq_irq << INTR_VECTOR_SHFT; |
703 | vector |= sn_irq_info->irq_xtalkaddr; | 952 | vector |= sn_irq_info->irq_xtalkaddr; |
704 | writeq(vector, &ce_mmr->ce_adm_int_dest[bit]); | 953 | tioce_mmr_storei(ce_kern, &ce_mmr->ce_adm_int_dest[bit], vector); |
705 | __sn_clrq_relaxed(&ce_mmr->ce_adm_int_mask, (1UL << bit)); | 954 | tioce_mmr_clri(ce_kern, &ce_mmr->ce_adm_int_mask, (1UL << bit)); |
706 | 955 | ||
707 | tioce_force_interrupt(sn_irq_info); | 956 | tioce_force_interrupt(sn_irq_info); |
708 | } | 957 | } |
@@ -721,7 +970,11 @@ tioce_target_interrupt(struct sn_irq_info *sn_irq_info) | |||
721 | static void * | 970 | static void * |
722 | tioce_bus_fixup(struct pcibus_bussoft *prom_bussoft, struct pci_controller *controller) | 971 | tioce_bus_fixup(struct pcibus_bussoft *prom_bussoft, struct pci_controller *controller) |
723 | { | 972 | { |
973 | int my_nasid; | ||
974 | cnodeid_t my_cnode, mem_cnode; | ||
724 | struct tioce_common *tioce_common; | 975 | struct tioce_common *tioce_common; |
976 | struct tioce_kernel *tioce_kern; | ||
977 | struct tioce *tioce_mmr; | ||
725 | 978 | ||
726 | /* | 979 | /* |
727 | * Allocate kernel bus soft and copy from prom. | 980 | * Allocate kernel bus soft and copy from prom. |
@@ -734,11 +987,23 @@ tioce_bus_fixup(struct pcibus_bussoft *prom_bussoft, struct pci_controller *cont | |||
734 | memcpy(tioce_common, prom_bussoft, sizeof(struct tioce_common)); | 987 | memcpy(tioce_common, prom_bussoft, sizeof(struct tioce_common)); |
735 | tioce_common->ce_pcibus.bs_base |= __IA64_UNCACHED_OFFSET; | 988 | tioce_common->ce_pcibus.bs_base |= __IA64_UNCACHED_OFFSET; |
736 | 989 | ||
737 | if (tioce_kern_init(tioce_common) == NULL) { | 990 | tioce_kern = tioce_kern_init(tioce_common); |
991 | if (tioce_kern == NULL) { | ||
738 | kfree(tioce_common); | 992 | kfree(tioce_common); |
739 | return NULL; | 993 | return NULL; |
740 | } | 994 | } |
741 | 995 | ||
996 | /* | ||
997 | * Clear out any transient errors before registering the error | ||
998 | * interrupt handler. | ||
999 | */ | ||
1000 | |||
1001 | tioce_mmr = (struct tioce *)tioce_common->ce_pcibus.bs_base; | ||
1002 | tioce_mmr_seti(tioce_kern, &tioce_mmr->ce_adm_int_status_alias, ~0ULL); | ||
1003 | tioce_mmr_seti(tioce_kern, &tioce_mmr->ce_adm_error_summary_alias, | ||
1004 | ~0ULL); | ||
1005 | tioce_mmr_seti(tioce_kern, &tioce_mmr->ce_dre_comp_err_addr, ~0ULL); | ||
1006 | |||
742 | if (request_irq(SGI_PCIASIC_ERROR, | 1007 | if (request_irq(SGI_PCIASIC_ERROR, |
743 | tioce_error_intr_handler, | 1008 | tioce_error_intr_handler, |
744 | SA_SHIRQ, "TIOCE error", (void *)tioce_common)) | 1009 | SA_SHIRQ, "TIOCE error", (void *)tioce_common)) |
@@ -750,6 +1015,21 @@ tioce_bus_fixup(struct pcibus_bussoft *prom_bussoft, struct pci_controller *cont | |||
750 | tioce_common->ce_pcibus.bs_persist_segment, | 1015 | tioce_common->ce_pcibus.bs_persist_segment, |
751 | tioce_common->ce_pcibus.bs_persist_busnum); | 1016 | tioce_common->ce_pcibus.bs_persist_busnum); |
752 | 1017 | ||
1018 | /* | ||
1019 | * identify closest nasid for memory allocations | ||
1020 | */ | ||
1021 | |||
1022 | my_nasid = NASID_GET(tioce_common->ce_pcibus.bs_base); | ||
1023 | my_cnode = nasid_to_cnodeid(my_nasid); | ||
1024 | |||
1025 | if (sn_hwperf_get_nearest_node(my_cnode, &mem_cnode, NULL) < 0) { | ||
1026 | printk(KERN_WARNING "tioce_bus_fixup: failed to find " | ||
1027 | "closest node with MEM to TIO node %d\n", my_cnode); | ||
1028 | mem_cnode = (cnodeid_t)-1; /* use any node */ | ||
1029 | } | ||
1030 | |||
1031 | controller->node = mem_cnode; | ||
1032 | |||
753 | return tioce_common; | 1033 | return tioce_common; |
754 | } | 1034 | } |
755 | 1035 | ||