aboutsummaryrefslogtreecommitdiffstats
path: root/arch/ia64/sn/kernel
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@ppc970.osdl.org>2005-04-27 13:05:42 -0400
committerLinus Torvalds <torvalds@ppc970.osdl.org>2005-04-27 13:05:42 -0400
commitfc67b16ecaf6ebde04096030c268adddade023f1 (patch)
tree1cce42cdca1fc9e4ec41b9f7f72c60e343cebca7 /arch/ia64/sn/kernel
parente8108c98dd6d65613fa0ec9d2300f89c48d554bf (diff)
parent2d29306b231a1a0e7a70166c10e4c0f917b21334 (diff)
Automatic merge of rsync://rsync.kernel.org/pub/scm/linux/kernel/git/aegl/linux-2.6.git
Diffstat (limited to 'arch/ia64/sn/kernel')
-rw-r--r--arch/ia64/sn/kernel/Makefile1
-rw-r--r--arch/ia64/sn/kernel/bte.c20
-rw-r--r--arch/ia64/sn/kernel/bte_error.c76
-rw-r--r--arch/ia64/sn/kernel/huberror.c9
-rw-r--r--arch/ia64/sn/kernel/io_init.c78
-rw-r--r--arch/ia64/sn/kernel/irq.c19
-rw-r--r--arch/ia64/sn/kernel/setup.c9
-rw-r--r--arch/ia64/sn/kernel/sn2/sn_hwperf.c112
-rw-r--r--arch/ia64/sn/kernel/tiocx.c548
9 files changed, 798 insertions, 74 deletions
diff --git a/arch/ia64/sn/kernel/Makefile b/arch/ia64/sn/kernel/Makefile
index 6c7f4d9e8ea0..4f381fb25049 100644
--- a/arch/ia64/sn/kernel/Makefile
+++ b/arch/ia64/sn/kernel/Makefile
@@ -10,3 +10,4 @@
10obj-y += setup.o bte.o bte_error.o irq.o mca.o idle.o \ 10obj-y += setup.o bte.o bte_error.o irq.o mca.o idle.o \
11 huberror.o io_init.o iomv.o klconflib.o sn2/ 11 huberror.o io_init.o iomv.o klconflib.o sn2/
12obj-$(CONFIG_IA64_GENERIC) += machvec.o 12obj-$(CONFIG_IA64_GENERIC) += machvec.o
13obj-$(CONFIG_SGI_TIOCX) += tiocx.o
diff --git a/arch/ia64/sn/kernel/bte.c b/arch/ia64/sn/kernel/bte.c
index ce0bc4085eae..647deae9bfcd 100644
--- a/arch/ia64/sn/kernel/bte.c
+++ b/arch/ia64/sn/kernel/bte.c
@@ -3,7 +3,7 @@
3 * License. See the file "COPYING" in the main directory of this archive 3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details. 4 * for more details.
5 * 5 *
6 * Copyright (c) 2000-2003 Silicon Graphics, Inc. All Rights Reserved. 6 * Copyright (c) 2000-2005 Silicon Graphics, Inc. All Rights Reserved.
7 */ 7 */
8 8
9#include <linux/config.h> 9#include <linux/config.h>
@@ -170,10 +170,6 @@ retry_bteop:
170 /* Initialize the notification to a known value. */ 170 /* Initialize the notification to a known value. */
171 *bte->most_rcnt_na = BTE_WORD_BUSY; 171 *bte->most_rcnt_na = BTE_WORD_BUSY;
172 172
173 /* Set the status reg busy bit and transfer length */
174 BTE_PRINTKV(("IBLS = 0x%lx\n", IBLS_BUSY | transfer_size));
175 BTE_LNSTAT_STORE(bte, IBLS_BUSY | transfer_size);
176
177 /* Set the source and destination registers */ 173 /* Set the source and destination registers */
178 BTE_PRINTKV(("IBSA = 0x%lx)\n", (TO_PHYS(src)))); 174 BTE_PRINTKV(("IBSA = 0x%lx)\n", (TO_PHYS(src))));
179 BTE_SRC_STORE(bte, TO_PHYS(src)); 175 BTE_SRC_STORE(bte, TO_PHYS(src));
@@ -188,7 +184,7 @@ retry_bteop:
188 184
189 /* Initiate the transfer */ 185 /* Initiate the transfer */
190 BTE_PRINTK(("IBCT = 0x%lx)\n", BTE_VALID_MODE(mode))); 186 BTE_PRINTK(("IBCT = 0x%lx)\n", BTE_VALID_MODE(mode)));
191 BTE_CTRL_STORE(bte, BTE_VALID_MODE(mode)); 187 BTE_START_TRANSFER(bte, transfer_size, BTE_VALID_MODE(mode));
192 188
193 itc_end = ia64_get_itc() + (40000000 * local_cpu_data->cyc_per_usec); 189 itc_end = ia64_get_itc() + (40000000 * local_cpu_data->cyc_per_usec);
194 190
@@ -429,10 +425,16 @@ void bte_init_node(nodepda_t * mynodepda, cnodeid_t cnode)
429 mynodepda->bte_recovery_timer.data = (unsigned long)mynodepda; 425 mynodepda->bte_recovery_timer.data = (unsigned long)mynodepda;
430 426
431 for (i = 0; i < BTES_PER_NODE; i++) { 427 for (i = 0; i < BTES_PER_NODE; i++) {
428 u64 *base_addr;
429
432 /* Which link status register should we use? */ 430 /* Which link status register should we use? */
433 unsigned long link_status = (i == 0 ? IIO_IBLS0 : IIO_IBLS1); 431 base_addr = (u64 *)
434 mynodepda->bte_if[i].bte_base_addr = (u64 *) 432 REMOTE_HUB_ADDR(cnodeid_to_nasid(cnode), BTE_BASE_ADDR(i));
435 REMOTE_HUB_ADDR(cnodeid_to_nasid(cnode), link_status); 433 mynodepda->bte_if[i].bte_base_addr = base_addr;
434 mynodepda->bte_if[i].bte_source_addr = BTE_SOURCE_ADDR(base_addr);
435 mynodepda->bte_if[i].bte_destination_addr = BTE_DEST_ADDR(base_addr);
436 mynodepda->bte_if[i].bte_control_addr = BTE_CTRL_ADDR(base_addr);
437 mynodepda->bte_if[i].bte_notify_addr = BTE_NOTIF_ADDR(base_addr);
436 438
437 /* 439 /*
438 * Initialize the notification and spinlock 440 * Initialize the notification and spinlock
diff --git a/arch/ia64/sn/kernel/bte_error.c b/arch/ia64/sn/kernel/bte_error.c
index fd104312c6bd..fcbc748ae433 100644
--- a/arch/ia64/sn/kernel/bte_error.c
+++ b/arch/ia64/sn/kernel/bte_error.c
@@ -3,7 +3,7 @@
3 * License. See the file "COPYING" in the main directory of this archive 3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details. 4 * for more details.
5 * 5 *
6 * Copyright (c) 2000-2004 Silicon Graphics, Inc. All Rights Reserved. 6 * Copyright (c) 2000-2005 Silicon Graphics, Inc. All Rights Reserved.
7 */ 7 */
8 8
9#include <linux/types.h> 9#include <linux/types.h>
@@ -33,48 +33,28 @@ void bte_error_handler(unsigned long);
33 * Wait until all BTE related CRBs are completed 33 * Wait until all BTE related CRBs are completed
34 * and then reset the interfaces. 34 * and then reset the interfaces.
35 */ 35 */
36void bte_error_handler(unsigned long _nodepda) 36void shub1_bte_error_handler(unsigned long _nodepda)
37{ 37{
38 struct nodepda_s *err_nodepda = (struct nodepda_s *)_nodepda; 38 struct nodepda_s *err_nodepda = (struct nodepda_s *)_nodepda;
39 spinlock_t *recovery_lock = &err_nodepda->bte_recovery_lock;
40 struct timer_list *recovery_timer = &err_nodepda->bte_recovery_timer; 39 struct timer_list *recovery_timer = &err_nodepda->bte_recovery_timer;
41 nasid_t nasid; 40 nasid_t nasid;
42 int i; 41 int i;
43 int valid_crbs; 42 int valid_crbs;
44 unsigned long irq_flags;
45 volatile u64 *notify;
46 bte_result_t bh_error;
47 ii_imem_u_t imem; /* II IMEM Register */ 43 ii_imem_u_t imem; /* II IMEM Register */
48 ii_icrb0_d_u_t icrbd; /* II CRB Register D */ 44 ii_icrb0_d_u_t icrbd; /* II CRB Register D */
49 ii_ibcr_u_t ibcr; 45 ii_ibcr_u_t ibcr;
50 ii_icmr_u_t icmr; 46 ii_icmr_u_t icmr;
51 ii_ieclr_u_t ieclr; 47 ii_ieclr_u_t ieclr;
52 48
53 BTE_PRINTK(("bte_error_handler(%p) - %d\n", err_nodepda, 49 BTE_PRINTK(("shub1_bte_error_handler(%p) - %d\n", err_nodepda,
54 smp_processor_id())); 50 smp_processor_id()));
55 51
56 spin_lock_irqsave(recovery_lock, irq_flags);
57
58 if ((err_nodepda->bte_if[0].bh_error == BTE_SUCCESS) && 52 if ((err_nodepda->bte_if[0].bh_error == BTE_SUCCESS) &&
59 (err_nodepda->bte_if[1].bh_error == BTE_SUCCESS)) { 53 (err_nodepda->bte_if[1].bh_error == BTE_SUCCESS)) {
60 BTE_PRINTK(("eh:%p:%d Nothing to do.\n", err_nodepda, 54 BTE_PRINTK(("eh:%p:%d Nothing to do.\n", err_nodepda,
61 smp_processor_id())); 55 smp_processor_id()));
62 spin_unlock_irqrestore(recovery_lock, irq_flags);
63 return; 56 return;
64 } 57 }
65 /*
66 * Lock all interfaces on this node to prevent new transfers
67 * from being queued.
68 */
69 for (i = 0; i < BTES_PER_NODE; i++) {
70 if (err_nodepda->bte_if[i].cleanup_active) {
71 continue;
72 }
73 spin_lock(&err_nodepda->bte_if[i].spinlock);
74 BTE_PRINTK(("eh:%p:%d locked %d\n", err_nodepda,
75 smp_processor_id(), i));
76 err_nodepda->bte_if[i].cleanup_active = 1;
77 }
78 58
79 /* Determine information about our hub */ 59 /* Determine information about our hub */
80 nasid = cnodeid_to_nasid(err_nodepda->bte_if[0].bte_cnode); 60 nasid = cnodeid_to_nasid(err_nodepda->bte_if[0].bte_cnode);
@@ -101,7 +81,6 @@ void bte_error_handler(unsigned long _nodepda)
101 mod_timer(recovery_timer, HZ * 5); 81 mod_timer(recovery_timer, HZ * 5);
102 BTE_PRINTK(("eh:%p:%d Marked Giving up\n", err_nodepda, 82 BTE_PRINTK(("eh:%p:%d Marked Giving up\n", err_nodepda,
103 smp_processor_id())); 83 smp_processor_id()));
104 spin_unlock_irqrestore(recovery_lock, irq_flags);
105 return; 84 return;
106 } 85 }
107 if (icmr.ii_icmr_fld_s.i_crb_vld != 0) { 86 if (icmr.ii_icmr_fld_s.i_crb_vld != 0) {
@@ -120,8 +99,6 @@ void bte_error_handler(unsigned long _nodepda)
120 BTE_PRINTK(("eh:%p:%d Valid %d, Giving up\n", 99 BTE_PRINTK(("eh:%p:%d Valid %d, Giving up\n",
121 err_nodepda, smp_processor_id(), 100 err_nodepda, smp_processor_id(),
122 i)); 101 i));
123 spin_unlock_irqrestore(recovery_lock,
124 irq_flags);
125 return; 102 return;
126 } 103 }
127 } 104 }
@@ -146,6 +123,51 @@ void bte_error_handler(unsigned long _nodepda)
146 ibcr.ii_ibcr_fld_s.i_soft_reset = 1; 123 ibcr.ii_ibcr_fld_s.i_soft_reset = 1;
147 REMOTE_HUB_S(nasid, IIO_IBCR, ibcr.ii_ibcr_regval); 124 REMOTE_HUB_S(nasid, IIO_IBCR, ibcr.ii_ibcr_regval);
148 125
126 del_timer(recovery_timer);
127}
128
129/*
130 * Wait until all BTE related CRBs are completed
131 * and then reset the interfaces.
132 */
133void bte_error_handler(unsigned long _nodepda)
134{
135 struct nodepda_s *err_nodepda = (struct nodepda_s *)_nodepda;
136 spinlock_t *recovery_lock = &err_nodepda->bte_recovery_lock;
137 int i;
138 nasid_t nasid;
139 unsigned long irq_flags;
140 volatile u64 *notify;
141 bte_result_t bh_error;
142
143 BTE_PRINTK(("bte_error_handler(%p) - %d\n", err_nodepda,
144 smp_processor_id()));
145
146 spin_lock_irqsave(recovery_lock, irq_flags);
147
148 /*
149 * Lock all interfaces on this node to prevent new transfers
150 * from being queued.
151 */
152 for (i = 0; i < BTES_PER_NODE; i++) {
153 if (err_nodepda->bte_if[i].cleanup_active) {
154 continue;
155 }
156 spin_lock(&err_nodepda->bte_if[i].spinlock);
157 BTE_PRINTK(("eh:%p:%d locked %d\n", err_nodepda,
158 smp_processor_id(), i));
159 err_nodepda->bte_if[i].cleanup_active = 1;
160 }
161
162 if (is_shub1()) {
163 shub1_bte_error_handler(_nodepda);
164 } else {
165 nasid = cnodeid_to_nasid(err_nodepda->bte_if[0].bte_cnode);
166
167 if (ia64_sn_bte_recovery(nasid))
168 panic("bte_error_handler(): Fatal BTE Error");
169 }
170
149 for (i = 0; i < BTES_PER_NODE; i++) { 171 for (i = 0; i < BTES_PER_NODE; i++) {
150 bh_error = err_nodepda->bte_if[i].bh_error; 172 bh_error = err_nodepda->bte_if[i].bh_error;
151 if (bh_error != BTE_SUCCESS) { 173 if (bh_error != BTE_SUCCESS) {
@@ -165,8 +187,6 @@ void bte_error_handler(unsigned long _nodepda)
165 spin_unlock(&err_nodepda->bte_if[i].spinlock); 187 spin_unlock(&err_nodepda->bte_if[i].spinlock);
166 } 188 }
167 189
168 del_timer(recovery_timer);
169
170 spin_unlock_irqrestore(recovery_lock, irq_flags); 190 spin_unlock_irqrestore(recovery_lock, irq_flags);
171} 191}
172 192
diff --git a/arch/ia64/sn/kernel/huberror.c b/arch/ia64/sn/kernel/huberror.c
index 2bdf684c5066..5c39b43ba3c0 100644
--- a/arch/ia64/sn/kernel/huberror.c
+++ b/arch/ia64/sn/kernel/huberror.c
@@ -3,7 +3,7 @@
3 * License. See the file "COPYING" in the main directory of this archive 3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details. 4 * for more details.
5 * 5 *
6 * Copyright (C) 1992 - 1997, 2000,2002-2004 Silicon Graphics, Inc. All rights reserved. 6 * Copyright (C) 1992 - 1997, 2000,2002-2005 Silicon Graphics, Inc. All rights reserved.
7 */ 7 */
8 8
9#include <linux/types.h> 9#include <linux/types.h>
@@ -38,8 +38,11 @@ static irqreturn_t hub_eint_handler(int irq, void *arg, struct pt_regs *ep)
38 if ((int)ret_stuff.v0) 38 if ((int)ret_stuff.v0)
39 panic("hubii_eint_handler(): Fatal TIO Error"); 39 panic("hubii_eint_handler(): Fatal TIO Error");
40 40
41 if (!(nasid & 1)) /* Not a TIO, handle CRB errors */ 41 if (is_shub1()) {
42 (void)hubiio_crb_error_handler(hubdev_info); 42 if (!(nasid & 1)) /* Not a TIO, handle CRB errors */
43 (void)hubiio_crb_error_handler(hubdev_info);
44 } else
45 bte_error_handler((unsigned long)NODEPDA(nasid_to_cnodeid(nasid)));
43 46
44 return IRQ_HANDLED; 47 return IRQ_HANDLED;
45} 48}
diff --git a/arch/ia64/sn/kernel/io_init.c b/arch/ia64/sn/kernel/io_init.c
index 001880812b7c..18160a06a8c9 100644
--- a/arch/ia64/sn/kernel/io_init.c
+++ b/arch/ia64/sn/kernel/io_init.c
@@ -11,14 +11,15 @@
11#include <asm/sn/types.h> 11#include <asm/sn/types.h>
12#include <asm/sn/sn_sal.h> 12#include <asm/sn/sn_sal.h>
13#include <asm/sn/addrs.h> 13#include <asm/sn/addrs.h>
14#include "pci/pcibus_provider_defs.h" 14#include <asm/sn/pcibus_provider_defs.h>
15#include "pci/pcidev.h" 15#include <asm/sn/pcidev.h>
16#include "pci/pcibr_provider.h" 16#include "pci/pcibr_provider.h"
17#include "xtalk/xwidgetdev.h" 17#include "xtalk/xwidgetdev.h"
18#include <asm/sn/geo.h> 18#include <asm/sn/geo.h>
19#include "xtalk/hubdev.h" 19#include "xtalk/hubdev.h"
20#include <asm/sn/io.h> 20#include <asm/sn/io.h>
21#include <asm/sn/simulator.h> 21#include <asm/sn/simulator.h>
22#include <asm/sn/tioca_provider.h>
22 23
23char master_baseio_wid; 24char master_baseio_wid;
24nasid_t master_nasid = INVALID_NASID; /* Partition Master */ 25nasid_t master_nasid = INVALID_NASID; /* Partition Master */
@@ -34,6 +35,37 @@ struct brick {
34 35
35int sn_ioif_inited = 0; /* SN I/O infrastructure initialized? */ 36int sn_ioif_inited = 0; /* SN I/O infrastructure initialized? */
36 37
38struct sn_pcibus_provider *sn_pci_provider[PCIIO_ASIC_MAX_TYPES]; /* indexed by asic type */
39
40/*
41 * Hooks and struct for unsupported pci providers
42 */
43
44static dma_addr_t
45sn_default_pci_map(struct pci_dev *pdev, unsigned long paddr, size_t size)
46{
47 return 0;
48}
49
50static void
51sn_default_pci_unmap(struct pci_dev *pdev, dma_addr_t addr, int direction)
52{
53 return;
54}
55
56static void *
57sn_default_pci_bus_fixup(struct pcibus_bussoft *soft)
58{
59 return NULL;
60}
61
62static struct sn_pcibus_provider sn_pci_default_provider = {
63 .dma_map = sn_default_pci_map,
64 .dma_map_consistent = sn_default_pci_map,
65 .dma_unmap = sn_default_pci_unmap,
66 .bus_fixup = sn_default_pci_bus_fixup,
67};
68
37/* 69/*
38 * Retrieve the DMA Flush List given nasid. This list is needed 70 * Retrieve the DMA Flush List given nasid. This list is needed
39 * to implement the WAR - Flush DMA data on PIO Reads. 71 * to implement the WAR - Flush DMA data on PIO Reads.
@@ -201,6 +233,7 @@ static void sn_pci_fixup_slot(struct pci_dev *dev)
201 struct sn_irq_info *sn_irq_info; 233 struct sn_irq_info *sn_irq_info;
202 struct pci_dev *host_pci_dev; 234 struct pci_dev *host_pci_dev;
203 int status = 0; 235 int status = 0;
236 struct pcibus_bussoft *bs;
204 237
205 dev->sysdata = kmalloc(sizeof(struct pcidev_info), GFP_KERNEL); 238 dev->sysdata = kmalloc(sizeof(struct pcidev_info), GFP_KERNEL);
206 if (SN_PCIDEV_INFO(dev) <= 0) 239 if (SN_PCIDEV_INFO(dev) <= 0)
@@ -241,6 +274,7 @@ static void sn_pci_fixup_slot(struct pci_dev *dev)
241 } 274 }
242 275
243 /* set up host bus linkages */ 276 /* set up host bus linkages */
277 bs = SN_PCIBUS_BUSSOFT(dev->bus);
244 host_pci_dev = 278 host_pci_dev =
245 pci_find_slot(SN_PCIDEV_INFO(dev)->pdi_slot_host_handle >> 32, 279 pci_find_slot(SN_PCIDEV_INFO(dev)->pdi_slot_host_handle >> 32,
246 SN_PCIDEV_INFO(dev)-> 280 SN_PCIDEV_INFO(dev)->
@@ -248,10 +282,16 @@ static void sn_pci_fixup_slot(struct pci_dev *dev)
248 SN_PCIDEV_INFO(dev)->pdi_host_pcidev_info = 282 SN_PCIDEV_INFO(dev)->pdi_host_pcidev_info =
249 SN_PCIDEV_INFO(host_pci_dev); 283 SN_PCIDEV_INFO(host_pci_dev);
250 SN_PCIDEV_INFO(dev)->pdi_linux_pcidev = dev; 284 SN_PCIDEV_INFO(dev)->pdi_linux_pcidev = dev;
251 SN_PCIDEV_INFO(dev)->pdi_pcibus_info = SN_PCIBUS_BUSSOFT(dev->bus); 285 SN_PCIDEV_INFO(dev)->pdi_pcibus_info = bs;
286
287 if (bs && bs->bs_asic_type < PCIIO_ASIC_MAX_TYPES) {
288 SN_PCIDEV_BUSPROVIDER(dev) = sn_pci_provider[bs->bs_asic_type];
289 } else {
290 SN_PCIDEV_BUSPROVIDER(dev) = &sn_pci_default_provider;
291 }
252 292
253 /* Only set up IRQ stuff if this device has a host bus context */ 293 /* Only set up IRQ stuff if this device has a host bus context */
254 if (SN_PCIDEV_BUSSOFT(dev) && sn_irq_info->irq_irq) { 294 if (bs && sn_irq_info->irq_irq) {
255 SN_PCIDEV_INFO(dev)->pdi_sn_irq_info = sn_irq_info; 295 SN_PCIDEV_INFO(dev)->pdi_sn_irq_info = sn_irq_info;
256 dev->irq = SN_PCIDEV_INFO(dev)->pdi_sn_irq_info->irq_irq; 296 dev->irq = SN_PCIDEV_INFO(dev)->pdi_sn_irq_info->irq_irq;
257 sn_irq_fixup(dev, sn_irq_info); 297 sn_irq_fixup(dev, sn_irq_info);
@@ -271,6 +311,7 @@ static void sn_pci_controller_fixup(int segment, int busnum)
271 struct pcibus_bussoft *prom_bussoft_ptr; 311 struct pcibus_bussoft *prom_bussoft_ptr;
272 struct hubdev_info *hubdev_info; 312 struct hubdev_info *hubdev_info;
273 void *provider_soft; 313 void *provider_soft;
314 struct sn_pcibus_provider *provider;
274 315
275 status = 316 status =
276 sal_get_pcibus_info((u64) segment, (u64) busnum, 317 sal_get_pcibus_info((u64) segment, (u64) busnum,
@@ -291,16 +332,22 @@ static void sn_pci_controller_fixup(int segment, int busnum)
291 /* 332 /*
292 * Per-provider fixup. Copies the contents from prom to local 333 * Per-provider fixup. Copies the contents from prom to local
293 * area and links SN_PCIBUS_BUSSOFT(). 334 * area and links SN_PCIBUS_BUSSOFT().
294 *
295 * Note: Provider is responsible for ensuring that prom_bussoft_ptr
296 * represents an asic-type that it can handle.
297 */ 335 */
298 336
299 if (prom_bussoft_ptr->bs_asic_type == PCIIO_ASIC_TYPE_PPB) { 337 if (prom_bussoft_ptr->bs_asic_type >= PCIIO_ASIC_MAX_TYPES) {
300 return; /* no further fixup necessary */ 338 return; /* unsupported asic type */
339 }
340
341 provider = sn_pci_provider[prom_bussoft_ptr->bs_asic_type];
342 if (provider == NULL) {
343 return; /* no provider registerd for this asic */
344 }
345
346 provider_soft = NULL;
347 if (provider->bus_fixup) {
348 provider_soft = (*provider->bus_fixup) (prom_bussoft_ptr);
301 } 349 }
302 350
303 provider_soft = pcibr_bus_fixup(prom_bussoft_ptr);
304 if (provider_soft == NULL) { 351 if (provider_soft == NULL) {
305 return; /* fixup failed or not applicable */ 352 return; /* fixup failed or not applicable */
306 } 353 }
@@ -339,6 +386,17 @@ static int __init sn_pci_init(void)
339 return 0; 386 return 0;
340 387
341 /* 388 /*
389 * prime sn_pci_provider[]. Individial provider init routines will
390 * override their respective default entries.
391 */
392
393 for (i = 0; i < PCIIO_ASIC_MAX_TYPES; i++)
394 sn_pci_provider[i] = &sn_pci_default_provider;
395
396 pcibr_init_provider();
397 tioca_init_provider();
398
399 /*
342 * This is needed to avoid bounce limit checks in the blk layer 400 * This is needed to avoid bounce limit checks in the blk layer
343 */ 401 */
344 ia64_max_iommu_merge_mask = ~PAGE_MASK; 402 ia64_max_iommu_merge_mask = ~PAGE_MASK;
diff --git a/arch/ia64/sn/kernel/irq.c b/arch/ia64/sn/kernel/irq.c
index 3be44724f6c8..0f4e8138658f 100644
--- a/arch/ia64/sn/kernel/irq.c
+++ b/arch/ia64/sn/kernel/irq.c
@@ -13,8 +13,8 @@
13#include <asm/sn/addrs.h> 13#include <asm/sn/addrs.h>
14#include <asm/sn/arch.h> 14#include <asm/sn/arch.h>
15#include "xtalk/xwidgetdev.h" 15#include "xtalk/xwidgetdev.h"
16#include "pci/pcibus_provider_defs.h" 16#include <asm/sn/pcibus_provider_defs.h>
17#include "pci/pcidev.h" 17#include <asm/sn/pcidev.h>
18#include "pci/pcibr_provider.h" 18#include "pci/pcibr_provider.h"
19#include <asm/sn/shub_mmr.h> 19#include <asm/sn/shub_mmr.h>
20#include <asm/sn/sn_sal.h> 20#include <asm/sn/sn_sal.h>
@@ -82,20 +82,9 @@ static void sn_ack_irq(unsigned int irq)
82 nasid = get_nasid(); 82 nasid = get_nasid();
83 event_occurred = 83 event_occurred =
84 HUB_L((uint64_t *) GLOBAL_MMR_ADDR(nasid, SH_EVENT_OCCURRED)); 84 HUB_L((uint64_t *) GLOBAL_MMR_ADDR(nasid, SH_EVENT_OCCURRED));
85 if (event_occurred & SH_EVENT_OCCURRED_UART_INT_MASK) { 85 mask = event_occurred & SH_ALL_INT_MASK;
86 mask |= (1 << SH_EVENT_OCCURRED_UART_INT_SHFT);
87 }
88 if (event_occurred & SH_EVENT_OCCURRED_IPI_INT_MASK) {
89 mask |= (1 << SH_EVENT_OCCURRED_IPI_INT_SHFT);
90 }
91 if (event_occurred & SH_EVENT_OCCURRED_II_INT0_MASK) {
92 mask |= (1 << SH_EVENT_OCCURRED_II_INT0_SHFT);
93 }
94 if (event_occurred & SH_EVENT_OCCURRED_II_INT1_MASK) {
95 mask |= (1 << SH_EVENT_OCCURRED_II_INT1_SHFT);
96 }
97 HUB_S((uint64_t *) GLOBAL_MMR_ADDR(nasid, SH_EVENT_OCCURRED_ALIAS), 86 HUB_S((uint64_t *) GLOBAL_MMR_ADDR(nasid, SH_EVENT_OCCURRED_ALIAS),
98 mask); 87 mask);
99 __set_bit(irq, (volatile void *)pda->sn_in_service_ivecs); 88 __set_bit(irq, (volatile void *)pda->sn_in_service_ivecs);
100 89
101 move_irq(irq); 90 move_irq(irq);
diff --git a/arch/ia64/sn/kernel/setup.c b/arch/ia64/sn/kernel/setup.c
index f0306b516afb..d35f2a6f9c94 100644
--- a/arch/ia64/sn/kernel/setup.c
+++ b/arch/ia64/sn/kernel/setup.c
@@ -29,6 +29,7 @@
29#include <linux/sched.h> 29#include <linux/sched.h>
30#include <linux/root_dev.h> 30#include <linux/root_dev.h>
31#include <linux/nodemask.h> 31#include <linux/nodemask.h>
32#include <linux/pm.h>
32 33
33#include <asm/io.h> 34#include <asm/io.h>
34#include <asm/sal.h> 35#include <asm/sal.h>
@@ -353,6 +354,14 @@ void __init sn_setup(char **cmdline_p)
353 screen_info = sn_screen_info; 354 screen_info = sn_screen_info;
354 355
355 sn_timer_init(); 356 sn_timer_init();
357
358 /*
359 * set pm_power_off to a SAL call to allow
360 * sn machines to power off. The SAL call can be replaced
361 * by an ACPI interface call when ACPI is fully implemented
362 * for sn.
363 */
364 pm_power_off = ia64_sn_power_down;
356} 365}
357 366
358/** 367/**
diff --git a/arch/ia64/sn/kernel/sn2/sn_hwperf.c b/arch/ia64/sn/kernel/sn2/sn_hwperf.c
index 197356460ee1..833e700fdac9 100644
--- a/arch/ia64/sn/kernel/sn2/sn_hwperf.c
+++ b/arch/ia64/sn/kernel/sn2/sn_hwperf.c
@@ -28,6 +28,7 @@
28#include <linux/vmalloc.h> 28#include <linux/vmalloc.h>
29#include <linux/seq_file.h> 29#include <linux/seq_file.h>
30#include <linux/miscdevice.h> 30#include <linux/miscdevice.h>
31#include <linux/utsname.h>
31#include <linux/cpumask.h> 32#include <linux/cpumask.h>
32#include <linux/smp_lock.h> 33#include <linux/smp_lock.h>
33#include <linux/nodemask.h> 34#include <linux/nodemask.h>
@@ -43,6 +44,7 @@
43#include <asm/sn/module.h> 44#include <asm/sn/module.h>
44#include <asm/sn/geo.h> 45#include <asm/sn/geo.h>
45#include <asm/sn/sn2/sn_hwperf.h> 46#include <asm/sn/sn2/sn_hwperf.h>
47#include <asm/sn/addrs.h>
46 48
47static void *sn_hwperf_salheap = NULL; 49static void *sn_hwperf_salheap = NULL;
48static int sn_hwperf_obj_cnt = 0; 50static int sn_hwperf_obj_cnt = 0;
@@ -81,26 +83,45 @@ out:
81 return e; 83 return e;
82} 84}
83 85
86static int sn_hwperf_location_to_bpos(char *location,
87 int *rack, int *bay, int *slot, int *slab)
88{
89 char type;
90
91 /* first scan for an old style geoid string */
92 if (sscanf(location, "%03d%c%02d#%d",
93 rack, &type, bay, slab) == 4)
94 *slot = 0;
95 else /* scan for a new bladed geoid string */
96 if (sscanf(location, "%03d%c%02d^%02d#%d",
97 rack, &type, bay, slot, slab) != 5)
98 return -1;
99 /* success */
100 return 0;
101}
102
84static int sn_hwperf_geoid_to_cnode(char *location) 103static int sn_hwperf_geoid_to_cnode(char *location)
85{ 104{
86 int cnode; 105 int cnode;
87 geoid_t geoid; 106 geoid_t geoid;
88 moduleid_t module_id; 107 moduleid_t module_id;
89 char type; 108 int rack, bay, slot, slab;
90 int rack, slot, slab; 109 int this_rack, this_bay, this_slot, this_slab;
91 int this_rack, this_slot, this_slab;
92 110
93 if (sscanf(location, "%03d%c%02d#%d", &rack, &type, &slot, &slab) != 4) 111 if (sn_hwperf_location_to_bpos(location, &rack, &bay, &slot, &slab))
94 return -1; 112 return -1;
95 113
96 for (cnode = 0; cnode < numionodes; cnode++) { 114 for (cnode = 0; cnode < numionodes; cnode++) {
97 geoid = cnodeid_get_geoid(cnode); 115 geoid = cnodeid_get_geoid(cnode);
98 module_id = geo_module(geoid); 116 module_id = geo_module(geoid);
99 this_rack = MODULE_GET_RACK(module_id); 117 this_rack = MODULE_GET_RACK(module_id);
100 this_slot = MODULE_GET_BPOS(module_id); 118 this_bay = MODULE_GET_BPOS(module_id);
119 this_slot = geo_slot(geoid);
101 this_slab = geo_slab(geoid); 120 this_slab = geo_slab(geoid);
102 if (rack == this_rack && slot == this_slot && slab == this_slab) 121 if (rack == this_rack && bay == this_bay &&
122 slot == this_slot && slab == this_slab) {
103 break; 123 break;
124 }
104 } 125 }
105 126
106 return cnode < numionodes ? cnode : -1; 127 return cnode < numionodes ? cnode : -1;
@@ -153,11 +174,36 @@ static const char *sn_hwperf_get_slabname(struct sn_hwperf_object_info *obj,
153 return slabname; 174 return slabname;
154} 175}
155 176
177static void print_pci_topology(struct seq_file *s,
178 struct sn_hwperf_object_info *obj, int *ordinal,
179 u64 rack, u64 bay, u64 slot, u64 slab)
180{
181 char *p1;
182 char *p2;
183 char *pg;
184
185 if (!(pg = (char *)get_zeroed_page(GFP_KERNEL)))
186 return; /* ignore */
187 if (ia64_sn_ioif_get_pci_topology(rack, bay, slot, slab,
188 __pa(pg), PAGE_SIZE) == SN_HWPERF_OP_OK) {
189 for (p1=pg; *p1 && p1 < pg + PAGE_SIZE;) {
190 if (!(p2 = strchr(p1, '\n')))
191 break;
192 *p2 = '\0';
193 seq_printf(s, "pcibus %d %s-%s\n",
194 *ordinal, obj->location, p1);
195 (*ordinal)++;
196 p1 = p2 + 1;
197 }
198 }
199 free_page((unsigned long)pg);
200}
201
156static int sn_topology_show(struct seq_file *s, void *d) 202static int sn_topology_show(struct seq_file *s, void *d)
157{ 203{
158 int sz; 204 int sz;
159 int pt; 205 int pt;
160 int e; 206 int e = 0;
161 int i; 207 int i;
162 int j; 208 int j;
163 const char *slabname; 209 const char *slabname;
@@ -169,11 +215,44 @@ static int sn_topology_show(struct seq_file *s, void *d)
169 struct sn_hwperf_object_info *p; 215 struct sn_hwperf_object_info *p;
170 struct sn_hwperf_object_info *obj = d; /* this object */ 216 struct sn_hwperf_object_info *obj = d; /* this object */
171 struct sn_hwperf_object_info *objs = s->private; /* all objects */ 217 struct sn_hwperf_object_info *objs = s->private; /* all objects */
218 int rack, bay, slot, slab;
219 u8 shubtype;
220 u8 system_size;
221 u8 sharing_size;
222 u8 partid;
223 u8 coher;
224 u8 nasid_shift;
225 u8 region_size;
226 u16 nasid_mask;
227 int nasid_msb;
228 int pci_bus_ordinal = 0;
172 229
173 if (obj == objs) { 230 if (obj == objs) {
174 seq_printf(s, "# sn_topology version 1\n"); 231 seq_printf(s, "# sn_topology version 2\n");
175 seq_printf(s, "# objtype ordinal location partition" 232 seq_printf(s, "# objtype ordinal location partition"
176 " [attribute value [, ...]]\n"); 233 " [attribute value [, ...]]\n");
234
235 if (ia64_sn_get_sn_info(0,
236 &shubtype, &nasid_mask, &nasid_shift, &system_size,
237 &sharing_size, &partid, &coher, &region_size))
238 BUG();
239 for (nasid_msb=63; nasid_msb > 0; nasid_msb--) {
240 if (((u64)nasid_mask << nasid_shift) & (1ULL << nasid_msb))
241 break;
242 }
243 seq_printf(s, "partition %u %s local "
244 "shubtype %s, "
245 "nasid_mask 0x%016lx, "
246 "nasid_bits %d:%d, "
247 "system_size %d, "
248 "sharing_size %d, "
249 "coherency_domain %d, "
250 "region_size %d\n",
251
252 partid, system_utsname.nodename,
253 shubtype ? "shub2" : "shub1",
254 (u64)nasid_mask << nasid_shift, nasid_msb, nasid_shift,
255 system_size, sharing_size, coher, region_size);
177 } 256 }
178 257
179 if (SN_HWPERF_FOREIGN(obj)) { 258 if (SN_HWPERF_FOREIGN(obj)) {
@@ -181,7 +260,7 @@ static int sn_topology_show(struct seq_file *s, void *d)
181 return 0; 260 return 0;
182 } 261 }
183 262
184 for (i = 0; obj->name[i]; i++) { 263 for (i = 0; i < SN_HWPERF_MAXSTRING && obj->name[i]; i++) {
185 if (obj->name[i] == ' ') 264 if (obj->name[i] == ' ')
186 obj->name[i] = '_'; 265 obj->name[i] = '_';
187 } 266 }
@@ -221,6 +300,17 @@ static int sn_topology_show(struct seq_file *s, void *d)
221 seq_putc(s, '\n'); 300 seq_putc(s, '\n');
222 } 301 }
223 } 302 }
303
304 /*
305 * PCI busses attached to this node, if any
306 */
307 if (sn_hwperf_location_to_bpos(obj->location,
308 &rack, &bay, &slot, &slab)) {
309 /* export pci bus info */
310 print_pci_topology(s, obj, &pci_bus_ordinal,
311 rack, bay, slot, slab);
312
313 }
224 } 314 }
225 315
226 if (obj->ports) { 316 if (obj->ports) {
@@ -397,6 +487,9 @@ static int sn_hwperf_map_err(int hwperf_err)
397 break; 487 break;
398 488
399 case SN_HWPERF_OP_BUSY: 489 case SN_HWPERF_OP_BUSY:
490 e = -EBUSY;
491 break;
492
400 case SN_HWPERF_OP_RECONFIGURE: 493 case SN_HWPERF_OP_RECONFIGURE:
401 e = -EAGAIN; 494 e = -EAGAIN;
402 break; 495 break;
@@ -549,6 +642,7 @@ sn_hwperf_ioctl(struct inode *in, struct file *fp, u32 op, u64 arg)
549 r = sn_hwperf_op_cpu(&op_info); 642 r = sn_hwperf_op_cpu(&op_info);
550 if (r) { 643 if (r) {
551 r = sn_hwperf_map_err(r); 644 r = sn_hwperf_map_err(r);
645 a.v0 = v0;
552 goto error; 646 goto error;
553 } 647 }
554 break; 648 break;
diff --git a/arch/ia64/sn/kernel/tiocx.c b/arch/ia64/sn/kernel/tiocx.c
new file mode 100644
index 000000000000..66190d7e492d
--- /dev/null
+++ b/arch/ia64/sn/kernel/tiocx.c
@@ -0,0 +1,548 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (c) 2005 Silicon Graphics, Inc. All rights reserved.
7 */
8
9#include <linux/module.h>
10#include <linux/kernel.h>
11#include <linux/version.h>
12#include <linux/slab.h>
13#include <linux/spinlock.h>
14#include <linux/proc_fs.h>
15#include <linux/device.h>
16#include <linux/delay.h>
17#include <asm/uaccess.h>
18#include <asm/sn/sn_sal.h>
19#include <asm/sn/addrs.h>
20#include <asm/sn/io.h>
21#include <asm/sn/types.h>
22#include <asm/sn/shubio.h>
23#include <asm/sn/tiocx.h>
24#include "tio.h"
25#include "xtalk/xwidgetdev.h"
26#include "xtalk/hubdev.h"
27
28#define CX_DEV_NONE 0
29#define DEVICE_NAME "tiocx"
30#define WIDGET_ID 0
31#define TIOCX_DEBUG 0
32
33#if TIOCX_DEBUG
34#define DBG(fmt...) printk(KERN_ALERT fmt)
35#else
36#define DBG(fmt...)
37#endif
38
39struct device_attribute dev_attr_cxdev_control;
40
41/**
42 * tiocx_match - Try to match driver id list with device.
43 * @dev: device pointer
44 * @drv: driver pointer
45 *
46 * Returns 1 if match, 0 otherwise.
47 */
48static int tiocx_match(struct device *dev, struct device_driver *drv)
49{
50 struct cx_dev *cx_dev = to_cx_dev(dev);
51 struct cx_drv *cx_drv = to_cx_driver(drv);
52 const struct cx_device_id *ids = cx_drv->id_table;
53
54 if (!ids)
55 return 0;
56
57 while (ids->part_num) {
58 if (ids->part_num == cx_dev->cx_id.part_num)
59 return 1;
60 ids++;
61 }
62 return 0;
63
64}
65
66static int tiocx_hotplug(struct device *dev, char **envp, int num_envp,
67 char *buffer, int buffer_size)
68{
69 return -ENODEV;
70}
71
72static void tiocx_bus_release(struct device *dev)
73{
74 kfree(to_cx_dev(dev));
75}
76
77struct bus_type tiocx_bus_type = {
78 .name = "tiocx",
79 .match = tiocx_match,
80 .hotplug = tiocx_hotplug,
81};
82
83/**
84 * cx_device_match - Find cx_device in the id table.
85 * @ids: id table from driver
86 * @cx_device: part/mfg id for the device
87 *
88 */
89static const struct cx_device_id *cx_device_match(const struct cx_device_id
90 *ids,
91 struct cx_dev *cx_device)
92{
93 /*
94 * NOTES: We may want to check for CX_ANY_ID too.
95 * Do we want to match against nasid too?
96 * CX_DEV_NONE == 0, if the driver tries to register for
97 * part/mfg == 0 we should return no-match (NULL) here.
98 */
99 while (ids->part_num && ids->mfg_num) {
100 if (ids->part_num == cx_device->cx_id.part_num &&
101 ids->mfg_num == cx_device->cx_id.mfg_num)
102 return ids;
103 ids++;
104 }
105
106 return NULL;
107}
108
109/**
110 * cx_device_probe - Look for matching device.
111 * Call driver probe routine if found.
112 * @cx_driver: driver table (cx_drv struct) from driver
113 * @cx_device: part/mfg id for the device
114 */
115static int cx_device_probe(struct device *dev)
116{
117 const struct cx_device_id *id;
118 struct cx_drv *cx_drv = to_cx_driver(dev->driver);
119 struct cx_dev *cx_dev = to_cx_dev(dev);
120 int error = 0;
121
122 if (!cx_dev->driver && cx_drv->probe) {
123 id = cx_device_match(cx_drv->id_table, cx_dev);
124 if (id) {
125 if ((error = cx_drv->probe(cx_dev, id)) < 0)
126 return error;
127 else
128 cx_dev->driver = cx_drv;
129 }
130 }
131
132 return error;
133}
134
135/**
136 * cx_driver_remove - Remove driver from device struct.
137 * @dev: device
138 */
139static int cx_driver_remove(struct device *dev)
140{
141 struct cx_dev *cx_dev = to_cx_dev(dev);
142 struct cx_drv *cx_drv = cx_dev->driver;
143 if (cx_drv->remove)
144 cx_drv->remove(cx_dev);
145 cx_dev->driver = NULL;
146 return 0;
147}
148
149/**
150 * cx_driver_register - Register the driver.
151 * @cx_driver: driver table (cx_drv struct) from driver
152 *
153 * Called from the driver init routine to register a driver.
154 * The cx_drv struct contains the driver name, a pointer to
155 * a table of part/mfg numbers and a pointer to the driver's
156 * probe/attach routine.
157 */
158int cx_driver_register(struct cx_drv *cx_driver)
159{
160 cx_driver->driver.name = cx_driver->name;
161 cx_driver->driver.bus = &tiocx_bus_type;
162 cx_driver->driver.probe = cx_device_probe;
163 cx_driver->driver.remove = cx_driver_remove;
164
165 return driver_register(&cx_driver->driver);
166}
167
168/**
169 * cx_driver_unregister - Unregister the driver.
170 * @cx_driver: driver table (cx_drv struct) from driver
171 */
172int cx_driver_unregister(struct cx_drv *cx_driver)
173{
174 driver_unregister(&cx_driver->driver);
175 return 0;
176}
177
178/**
179 * cx_device_register - Register a device.
180 * @nasid: device's nasid
181 * @part_num: device's part number
182 * @mfg_num: device's manufacturer number
183 * @hubdev: hub info associated with this device
184 *
185 */
186int
187cx_device_register(nasid_t nasid, int part_num, int mfg_num,
188 struct hubdev_info *hubdev)
189{
190 struct cx_dev *cx_dev;
191
192 cx_dev = kcalloc(1, sizeof(struct cx_dev), GFP_KERNEL);
193 DBG("cx_dev= 0x%p\n", cx_dev);
194 if (cx_dev == NULL)
195 return -ENOMEM;
196
197 cx_dev->cx_id.part_num = part_num;
198 cx_dev->cx_id.mfg_num = mfg_num;
199 cx_dev->cx_id.nasid = nasid;
200 cx_dev->hubdev = hubdev;
201
202 cx_dev->dev.parent = NULL;
203 cx_dev->dev.bus = &tiocx_bus_type;
204 cx_dev->dev.release = tiocx_bus_release;
205 snprintf(cx_dev->dev.bus_id, BUS_ID_SIZE, "%d.0x%x",
206 cx_dev->cx_id.nasid, cx_dev->cx_id.part_num);
207 device_register(&cx_dev->dev);
208 get_device(&cx_dev->dev);
209
210 device_create_file(&cx_dev->dev, &dev_attr_cxdev_control);
211
212 return 0;
213}
214
215/**
216 * cx_device_unregister - Unregister a device.
217 * @cx_dev: part/mfg id for the device
218 */
219int cx_device_unregister(struct cx_dev *cx_dev)
220{
221 put_device(&cx_dev->dev);
222 device_unregister(&cx_dev->dev);
223 return 0;
224}
225
226/**
227 * cx_device_reload - Reload the device.
228 * @nasid: device's nasid
229 * @part_num: device's part number
230 * @mfg_num: device's manufacturer number
231 *
232 * Remove the device associated with 'nasid' from device list and then
233 * call device-register with the given part/mfg numbers.
234 */
235static int cx_device_reload(struct cx_dev *cx_dev)
236{
237 device_remove_file(&cx_dev->dev, &dev_attr_cxdev_control);
238 cx_device_unregister(cx_dev);
239 return cx_device_register(cx_dev->cx_id.nasid, cx_dev->cx_id.part_num,
240 cx_dev->cx_id.mfg_num, cx_dev->hubdev);
241}
242
243static inline uint64_t tiocx_intr_alloc(nasid_t nasid, int widget,
244 u64 sn_irq_info,
245 int req_irq, nasid_t req_nasid,
246 int req_slice)
247{
248 struct ia64_sal_retval rv;
249 rv.status = 0;
250 rv.v0 = 0;
251
252 ia64_sal_oemcall_nolock(&rv, SN_SAL_IOIF_INTERRUPT,
253 SAL_INTR_ALLOC, nasid,
254 widget, sn_irq_info, req_irq,
255 req_nasid, req_slice);
256 return rv.status;
257}
258
259static inline void tiocx_intr_free(nasid_t nasid, int widget,
260 struct sn_irq_info *sn_irq_info)
261{
262 struct ia64_sal_retval rv;
263 rv.status = 0;
264 rv.v0 = 0;
265
266 ia64_sal_oemcall_nolock(&rv, SN_SAL_IOIF_INTERRUPT,
267 SAL_INTR_FREE, nasid,
268 widget, sn_irq_info->irq_irq,
269 sn_irq_info->irq_cookie, 0, 0);
270}
271
272struct sn_irq_info *tiocx_irq_alloc(nasid_t nasid, int widget, int irq,
273 nasid_t req_nasid, int slice)
274{
275 struct sn_irq_info *sn_irq_info;
276 int status;
277 int sn_irq_size = sizeof(struct sn_irq_info);
278
279 if ((nasid & 1) == 0)
280 return NULL;
281
282 sn_irq_info = kmalloc(sn_irq_size, GFP_KERNEL);
283 if (sn_irq_info == NULL)
284 return NULL;
285
286 memset(sn_irq_info, 0x0, sn_irq_size);
287
288 status = tiocx_intr_alloc(nasid, widget, __pa(sn_irq_info), irq,
289 req_nasid, slice);
290 if (status) {
291 kfree(sn_irq_info);
292 return NULL;
293 } else {
294 return sn_irq_info;
295 }
296}
297
298void tiocx_irq_free(struct sn_irq_info *sn_irq_info)
299{
300 uint64_t bridge = (uint64_t) sn_irq_info->irq_bridge;
301 nasid_t nasid = NASID_GET(bridge);
302 int widget;
303
304 if (nasid & 1) {
305 widget = TIO_SWIN_WIDGETNUM(bridge);
306 tiocx_intr_free(nasid, widget, sn_irq_info);
307 kfree(sn_irq_info);
308 }
309}
310
311uint64_t
312tiocx_dma_addr(uint64_t addr)
313{
314 return PHYS_TO_TIODMA(addr);
315}
316
317uint64_t
318tiocx_swin_base(int nasid)
319{
320 return TIO_SWIN_BASE(nasid, TIOCX_CORELET);
321}
322
323EXPORT_SYMBOL(cx_driver_register);
324EXPORT_SYMBOL(cx_driver_unregister);
325EXPORT_SYMBOL(cx_device_register);
326EXPORT_SYMBOL(cx_device_unregister);
327EXPORT_SYMBOL(tiocx_irq_alloc);
328EXPORT_SYMBOL(tiocx_irq_free);
329EXPORT_SYMBOL(tiocx_bus_type);
330EXPORT_SYMBOL(tiocx_dma_addr);
331EXPORT_SYMBOL(tiocx_swin_base);
332
333static uint64_t tiocx_get_hubdev_info(u64 handle, u64 address)
334{
335
336 struct ia64_sal_retval ret_stuff;
337 ret_stuff.status = 0;
338 ret_stuff.v0 = 0;
339
340 ia64_sal_oemcall_nolock(&ret_stuff,
341 SN_SAL_IOIF_GET_HUBDEV_INFO,
342 handle, address, 0, 0, 0, 0, 0);
343 return ret_stuff.v0;
344}
345
346static void tio_conveyor_set(nasid_t nasid, int enable_flag)
347{
348 uint64_t ice_frz;
349 uint64_t disable_cb = (1ull << 61);
350
351 if (!(nasid & 1))
352 return;
353
354 ice_frz = REMOTE_HUB_L(nasid, TIO_ICE_FRZ_CFG);
355 if (enable_flag) {
356 if (!(ice_frz & disable_cb)) /* already enabled */
357 return;
358 ice_frz &= ~disable_cb;
359 } else {
360 if (ice_frz & disable_cb) /* already disabled */
361 return;
362 ice_frz |= disable_cb;
363 }
364 DBG(KERN_ALERT "TIO_ICE_FRZ_CFG= 0x%lx\n", ice_frz);
365 REMOTE_HUB_S(nasid, TIO_ICE_FRZ_CFG, ice_frz);
366}
367
368#define tio_conveyor_enable(nasid) tio_conveyor_set(nasid, 1)
369#define tio_conveyor_disable(nasid) tio_conveyor_set(nasid, 0)
370
371static void tio_corelet_reset(nasid_t nasid, int corelet)
372{
373 if (!(nasid & 1))
374 return;
375
376 REMOTE_HUB_S(nasid, TIO_ICE_PMI_TX_CFG, 1 << corelet);
377 udelay(2000);
378 REMOTE_HUB_S(nasid, TIO_ICE_PMI_TX_CFG, 0);
379 udelay(2000);
380}
381
382static int fpga_attached(nasid_t nasid)
383{
384 uint64_t cx_credits;
385
386 cx_credits = REMOTE_HUB_L(nasid, TIO_ICE_PMI_TX_DYN_CREDIT_STAT_CB3);
387 cx_credits &= TIO_ICE_PMI_TX_DYN_CREDIT_STAT_CB3_CREDIT_CNT_MASK;
388 DBG("cx_credits= 0x%lx\n", cx_credits);
389
390 return (cx_credits == 0xf) ? 1 : 0;
391}
392
393static int tiocx_reload(struct cx_dev *cx_dev)
394{
395 int part_num = CX_DEV_NONE;
396 int mfg_num = CX_DEV_NONE;
397 nasid_t nasid = cx_dev->cx_id.nasid;
398
399 if (fpga_attached(nasid)) {
400 uint64_t cx_id;
401
402 cx_id =
403 *(volatile int32_t *)(TIO_SWIN_BASE(nasid, TIOCX_CORELET) +
404 WIDGET_ID);
405 part_num = XWIDGET_PART_NUM(cx_id);
406 mfg_num = XWIDGET_MFG_NUM(cx_id);
407 DBG("part= 0x%x, mfg= 0x%x\n", part_num, mfg_num);
408 /* just ignore it if it's a CE */
409 if (part_num == TIO_CE_ASIC_PARTNUM)
410 return 0;
411 }
412
413 cx_dev->cx_id.part_num = part_num;
414 cx_dev->cx_id.mfg_num = mfg_num;
415
416 /*
417 * Delete old device and register the new one. It's ok if
418 * part_num/mfg_num == CX_DEV_NONE. We want to register
419 * devices in the table even if a bitstream isn't loaded.
420 * That allows use to see that a bitstream isn't loaded via
421 * TIOCX_IOCTL_DEV_LIST.
422 */
423 return cx_device_reload(cx_dev);
424}
425
426static ssize_t show_cxdev_control(struct device *dev, char *buf)
427{
428 struct cx_dev *cx_dev = to_cx_dev(dev);
429
430 return sprintf(buf, "0x%x 0x%x 0x%x\n",
431 cx_dev->cx_id.nasid,
432 cx_dev->cx_id.part_num, cx_dev->cx_id.mfg_num);
433}
434
435static ssize_t store_cxdev_control(struct device *dev, const char *buf,
436 size_t count)
437{
438 int n;
439 struct cx_dev *cx_dev = to_cx_dev(dev);
440
441 if (!capable(CAP_SYS_ADMIN))
442 return -EPERM;
443
444 if (count <= 0)
445 return 0;
446
447 n = simple_strtoul(buf, NULL, 0);
448
449 switch (n) {
450 case 1:
451 tiocx_reload(cx_dev);
452 break;
453 case 3:
454 tio_corelet_reset(cx_dev->cx_id.nasid, TIOCX_CORELET);
455 break;
456 default:
457 break;
458 }
459
460 return count;
461}
462
463DEVICE_ATTR(cxdev_control, 0644, show_cxdev_control, store_cxdev_control);
464
465static int __init tiocx_init(void)
466{
467 cnodeid_t cnodeid;
468 int found_tiocx_device = 0;
469
470 bus_register(&tiocx_bus_type);
471
472 for (cnodeid = 0; cnodeid < MAX_COMPACT_NODES; cnodeid++) {
473 nasid_t nasid;
474
475 if ((nasid = cnodeid_to_nasid(cnodeid)) < 0)
476 break; /* No more nasids .. bail out of loop */
477
478 if (nasid & 0x1) { /* TIO's are always odd */
479 struct hubdev_info *hubdev;
480 uint64_t status;
481 struct xwidget_info *widgetp;
482
483 DBG("Found TIO at nasid 0x%x\n", nasid);
484
485 hubdev =
486 (struct hubdev_info *)(NODEPDA(cnodeid)->pdinfo);
487 status =
488 tiocx_get_hubdev_info(nasid,
489 (uint64_t) __pa(hubdev));
490 if (status)
491 continue;
492
493 widgetp = &hubdev->hdi_xwidget_info[TIOCX_CORELET];
494
495 /* The CE hangs off of the CX port but is not an FPGA */
496 if (widgetp->xwi_hwid.part_num == TIO_CE_ASIC_PARTNUM)
497 continue;
498
499 tio_corelet_reset(nasid, TIOCX_CORELET);
500 tio_conveyor_enable(nasid);
501
502 if (cx_device_register
503 (nasid, widgetp->xwi_hwid.part_num,
504 widgetp->xwi_hwid.mfg_num, hubdev) < 0)
505 return -ENXIO;
506 else
507 found_tiocx_device++;
508 }
509 }
510
511 /* It's ok if we find zero devices. */
512 DBG("found_tiocx_device= %d\n", found_tiocx_device);
513
514 return 0;
515}
516
517static void __exit tiocx_exit(void)
518{
519 struct device *dev;
520 struct device *tdev;
521
522 DBG("tiocx_exit\n");
523
524 /*
525 * Unregister devices.
526 */
527 list_for_each_entry_safe(dev, tdev, &tiocx_bus_type.devices.list,
528 bus_list) {
529 if (dev) {
530 struct cx_dev *cx_dev = to_cx_dev(dev);
531 device_remove_file(dev, &dev_attr_cxdev_control);
532 cx_device_unregister(cx_dev);
533 }
534 }
535
536 bus_unregister(&tiocx_bus_type);
537}
538
539module_init(tiocx_init);
540module_exit(tiocx_exit);
541
542/************************************************************************
543 * Module licensing and description
544 ************************************************************************/
545MODULE_LICENSE("GPL");
546MODULE_AUTHOR("Bruce Losure <blosure@sgi.com>");
547MODULE_DESCRIPTION("TIOCX module");
548MODULE_SUPPORTED_DEVICE(DEVICE_NAME);