aboutsummaryrefslogtreecommitdiffstats
path: root/arch/ia64/sn
diff options
context:
space:
mode:
Diffstat (limited to 'arch/ia64/sn')
-rw-r--r--arch/ia64/sn/include/pci/pcibr_provider.h6
-rw-r--r--arch/ia64/sn/include/pci/pcibus_provider_defs.h43
-rw-r--r--arch/ia64/sn/include/pci/pcidev.h54
-rw-r--r--arch/ia64/sn/kernel/Makefile1
-rw-r--r--arch/ia64/sn/kernel/bte.c20
-rw-r--r--arch/ia64/sn/kernel/bte_error.c76
-rw-r--r--arch/ia64/sn/kernel/huberror.c9
-rw-r--r--arch/ia64/sn/kernel/io_init.c78
-rw-r--r--arch/ia64/sn/kernel/irq.c19
-rw-r--r--arch/ia64/sn/kernel/setup.c9
-rw-r--r--arch/ia64/sn/kernel/sn2/sn_hwperf.c112
-rw-r--r--arch/ia64/sn/kernel/tiocx.c548
-rw-r--r--arch/ia64/sn/pci/Makefile2
-rw-r--r--arch/ia64/sn/pci/pci_dma.c39
-rw-r--r--arch/ia64/sn/pci/pcibr/pcibr_ate.c4
-rw-r--r--arch/ia64/sn/pci/pcibr/pcibr_dma.c107
-rw-r--r--arch/ia64/sn/pci/pcibr/pcibr_provider.c24
-rw-r--r--arch/ia64/sn/pci/pcibr/pcibr_reg.c4
-rw-r--r--arch/ia64/sn/pci/tioca_provider.c668
19 files changed, 1572 insertions, 251 deletions
diff --git a/arch/ia64/sn/include/pci/pcibr_provider.h b/arch/ia64/sn/include/pci/pcibr_provider.h
index b1f05ffec70b..1cd291d8badd 100644
--- a/arch/ia64/sn/include/pci/pcibr_provider.h
+++ b/arch/ia64/sn/include/pci/pcibr_provider.h
@@ -123,9 +123,11 @@ pcibr_lock(struct pcibus_info *pcibus_info)
123} 123}
124#define pcibr_unlock(pcibus_info, flag) spin_unlock_irqrestore(&pcibus_info->pbi_lock, flag) 124#define pcibr_unlock(pcibus_info, flag) spin_unlock_irqrestore(&pcibus_info->pbi_lock, flag)
125 125
126extern int pcibr_init_provider(void);
126extern void *pcibr_bus_fixup(struct pcibus_bussoft *); 127extern void *pcibr_bus_fixup(struct pcibus_bussoft *);
127extern uint64_t pcibr_dma_map(struct pcidev_info *, unsigned long, size_t, unsigned int); 128extern dma_addr_t pcibr_dma_map(struct pci_dev *, unsigned long, size_t);
128extern void pcibr_dma_unmap(struct pcidev_info *, dma_addr_t, int); 129extern dma_addr_t pcibr_dma_map_consistent(struct pci_dev *, unsigned long, size_t);
130extern void pcibr_dma_unmap(struct pci_dev *, dma_addr_t, int);
129 131
130/* 132/*
131 * prototypes for the bridge asic register access routines in pcibr_reg.c 133 * prototypes for the bridge asic register access routines in pcibr_reg.c
diff --git a/arch/ia64/sn/include/pci/pcibus_provider_defs.h b/arch/ia64/sn/include/pci/pcibus_provider_defs.h
deleted file mode 100644
index 07065615bbea..000000000000
--- a/arch/ia64/sn/include/pci/pcibus_provider_defs.h
+++ /dev/null
@@ -1,43 +0,0 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 1992 - 1997, 2000-2004 Silicon Graphics, Inc. All rights reserved.
7 */
8#ifndef _ASM_IA64_SN_PCI_PCIBUS_PROVIDER_H
9#define _ASM_IA64_SN_PCI_PCIBUS_PROVIDER_H
10
11/*
12 * SN pci asic types. Do not ever renumber these or reuse values. The
13 * values must agree with what prom thinks they are.
14 */
15
16#define PCIIO_ASIC_TYPE_UNKNOWN 0
17#define PCIIO_ASIC_TYPE_PPB 1
18#define PCIIO_ASIC_TYPE_PIC 2
19#define PCIIO_ASIC_TYPE_TIOCP 3
20
21/*
22 * Common pciio bus provider data. There should be one of these as the
23 * first field in any pciio based provider soft structure (e.g. pcibr_soft
24 * tioca_soft, etc).
25 */
26
27struct pcibus_bussoft {
28 uint32_t bs_asic_type; /* chipset type */
29 uint32_t bs_xid; /* xwidget id */
30 uint64_t bs_persist_busnum; /* Persistent Bus Number */
31 uint64_t bs_legacy_io; /* legacy io pio addr */
32 uint64_t bs_legacy_mem; /* legacy mem pio addr */
33 uint64_t bs_base; /* widget base */
34 struct xwidget_info *bs_xwidget_info;
35};
36
37/*
38 * DMA mapping flags
39 */
40
41#define SN_PCIDMA_CONSISTENT 0x0001
42
43#endif /* _ASM_IA64_SN_PCI_PCIBUS_PROVIDER_H */
diff --git a/arch/ia64/sn/include/pci/pcidev.h b/arch/ia64/sn/include/pci/pcidev.h
deleted file mode 100644
index 81eb95d3bf47..000000000000
--- a/arch/ia64/sn/include/pci/pcidev.h
+++ /dev/null
@@ -1,54 +0,0 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 1992 - 1997, 2000-2004 Silicon Graphics, Inc. All rights reserved.
7 */
8#ifndef _ASM_IA64_SN_PCI_PCIDEV_H
9#define _ASM_IA64_SN_PCI_PCIDEV_H
10
11#include <linux/pci.h>
12
13extern struct sn_irq_info **sn_irq;
14
15#define SN_PCIDEV_INFO(pci_dev) \
16 ((struct pcidev_info *)(pci_dev)->sysdata)
17
18/*
19 * Given a pci_bus, return the sn pcibus_bussoft struct. Note that
20 * this only works for root busses, not for busses represented by PPB's.
21 */
22
23#define SN_PCIBUS_BUSSOFT(pci_bus) \
24 ((struct pcibus_bussoft *)(PCI_CONTROLLER((pci_bus))->platform_data))
25
26/*
27 * Given a struct pci_dev, return the sn pcibus_bussoft struct. Note
28 * that this is not equivalent to SN_PCIBUS_BUSSOFT(pci_dev->bus) due
29 * due to possible PPB's in the path.
30 */
31
32#define SN_PCIDEV_BUSSOFT(pci_dev) \
33 (SN_PCIDEV_INFO(pci_dev)->pdi_host_pcidev_info->pdi_pcibus_info)
34
35#define PCIIO_BUS_NONE 255 /* bus 255 reserved */
36#define PCIIO_SLOT_NONE 255
37#define PCIIO_FUNC_NONE 255
38#define PCIIO_VENDOR_ID_NONE (-1)
39
40struct pcidev_info {
41 uint64_t pdi_pio_mapped_addr[7]; /* 6 BARs PLUS 1 ROM */
42 uint64_t pdi_slot_host_handle; /* Bus and devfn Host pci_dev */
43
44 struct pcibus_bussoft *pdi_pcibus_info; /* Kernel common bus soft */
45 struct pcidev_info *pdi_host_pcidev_info; /* Kernel Host pci_dev */
46 struct pci_dev *pdi_linux_pcidev; /* Kernel pci_dev */
47
48 struct sn_irq_info *pdi_sn_irq_info;
49};
50
51extern void sn_irq_fixup(struct pci_dev *pci_dev,
52 struct sn_irq_info *sn_irq_info);
53
54#endif /* _ASM_IA64_SN_PCI_PCIDEV_H */
diff --git a/arch/ia64/sn/kernel/Makefile b/arch/ia64/sn/kernel/Makefile
index 6c7f4d9e8ea0..4f381fb25049 100644
--- a/arch/ia64/sn/kernel/Makefile
+++ b/arch/ia64/sn/kernel/Makefile
@@ -10,3 +10,4 @@
10obj-y += setup.o bte.o bte_error.o irq.o mca.o idle.o \ 10obj-y += setup.o bte.o bte_error.o irq.o mca.o idle.o \
11 huberror.o io_init.o iomv.o klconflib.o sn2/ 11 huberror.o io_init.o iomv.o klconflib.o sn2/
12obj-$(CONFIG_IA64_GENERIC) += machvec.o 12obj-$(CONFIG_IA64_GENERIC) += machvec.o
13obj-$(CONFIG_SGI_TIOCX) += tiocx.o
diff --git a/arch/ia64/sn/kernel/bte.c b/arch/ia64/sn/kernel/bte.c
index ce0bc4085eae..647deae9bfcd 100644
--- a/arch/ia64/sn/kernel/bte.c
+++ b/arch/ia64/sn/kernel/bte.c
@@ -3,7 +3,7 @@
3 * License. See the file "COPYING" in the main directory of this archive 3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details. 4 * for more details.
5 * 5 *
6 * Copyright (c) 2000-2003 Silicon Graphics, Inc. All Rights Reserved. 6 * Copyright (c) 2000-2005 Silicon Graphics, Inc. All Rights Reserved.
7 */ 7 */
8 8
9#include <linux/config.h> 9#include <linux/config.h>
@@ -170,10 +170,6 @@ retry_bteop:
170 /* Initialize the notification to a known value. */ 170 /* Initialize the notification to a known value. */
171 *bte->most_rcnt_na = BTE_WORD_BUSY; 171 *bte->most_rcnt_na = BTE_WORD_BUSY;
172 172
173 /* Set the status reg busy bit and transfer length */
174 BTE_PRINTKV(("IBLS = 0x%lx\n", IBLS_BUSY | transfer_size));
175 BTE_LNSTAT_STORE(bte, IBLS_BUSY | transfer_size);
176
177 /* Set the source and destination registers */ 173 /* Set the source and destination registers */
178 BTE_PRINTKV(("IBSA = 0x%lx)\n", (TO_PHYS(src)))); 174 BTE_PRINTKV(("IBSA = 0x%lx)\n", (TO_PHYS(src))));
179 BTE_SRC_STORE(bte, TO_PHYS(src)); 175 BTE_SRC_STORE(bte, TO_PHYS(src));
@@ -188,7 +184,7 @@ retry_bteop:
188 184
189 /* Initiate the transfer */ 185 /* Initiate the transfer */
190 BTE_PRINTK(("IBCT = 0x%lx)\n", BTE_VALID_MODE(mode))); 186 BTE_PRINTK(("IBCT = 0x%lx)\n", BTE_VALID_MODE(mode)));
191 BTE_CTRL_STORE(bte, BTE_VALID_MODE(mode)); 187 BTE_START_TRANSFER(bte, transfer_size, BTE_VALID_MODE(mode));
192 188
193 itc_end = ia64_get_itc() + (40000000 * local_cpu_data->cyc_per_usec); 189 itc_end = ia64_get_itc() + (40000000 * local_cpu_data->cyc_per_usec);
194 190
@@ -429,10 +425,16 @@ void bte_init_node(nodepda_t * mynodepda, cnodeid_t cnode)
429 mynodepda->bte_recovery_timer.data = (unsigned long)mynodepda; 425 mynodepda->bte_recovery_timer.data = (unsigned long)mynodepda;
430 426
431 for (i = 0; i < BTES_PER_NODE; i++) { 427 for (i = 0; i < BTES_PER_NODE; i++) {
428 u64 *base_addr;
429
432 /* Which link status register should we use? */ 430 /* Which link status register should we use? */
433 unsigned long link_status = (i == 0 ? IIO_IBLS0 : IIO_IBLS1); 431 base_addr = (u64 *)
434 mynodepda->bte_if[i].bte_base_addr = (u64 *) 432 REMOTE_HUB_ADDR(cnodeid_to_nasid(cnode), BTE_BASE_ADDR(i));
435 REMOTE_HUB_ADDR(cnodeid_to_nasid(cnode), link_status); 433 mynodepda->bte_if[i].bte_base_addr = base_addr;
434 mynodepda->bte_if[i].bte_source_addr = BTE_SOURCE_ADDR(base_addr);
435 mynodepda->bte_if[i].bte_destination_addr = BTE_DEST_ADDR(base_addr);
436 mynodepda->bte_if[i].bte_control_addr = BTE_CTRL_ADDR(base_addr);
437 mynodepda->bte_if[i].bte_notify_addr = BTE_NOTIF_ADDR(base_addr);
436 438
437 /* 439 /*
438 * Initialize the notification and spinlock 440 * Initialize the notification and spinlock
diff --git a/arch/ia64/sn/kernel/bte_error.c b/arch/ia64/sn/kernel/bte_error.c
index fd104312c6bd..fcbc748ae433 100644
--- a/arch/ia64/sn/kernel/bte_error.c
+++ b/arch/ia64/sn/kernel/bte_error.c
@@ -3,7 +3,7 @@
3 * License. See the file "COPYING" in the main directory of this archive 3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details. 4 * for more details.
5 * 5 *
6 * Copyright (c) 2000-2004 Silicon Graphics, Inc. All Rights Reserved. 6 * Copyright (c) 2000-2005 Silicon Graphics, Inc. All Rights Reserved.
7 */ 7 */
8 8
9#include <linux/types.h> 9#include <linux/types.h>
@@ -33,48 +33,28 @@ void bte_error_handler(unsigned long);
33 * Wait until all BTE related CRBs are completed 33 * Wait until all BTE related CRBs are completed
34 * and then reset the interfaces. 34 * and then reset the interfaces.
35 */ 35 */
36void bte_error_handler(unsigned long _nodepda) 36void shub1_bte_error_handler(unsigned long _nodepda)
37{ 37{
38 struct nodepda_s *err_nodepda = (struct nodepda_s *)_nodepda; 38 struct nodepda_s *err_nodepda = (struct nodepda_s *)_nodepda;
39 spinlock_t *recovery_lock = &err_nodepda->bte_recovery_lock;
40 struct timer_list *recovery_timer = &err_nodepda->bte_recovery_timer; 39 struct timer_list *recovery_timer = &err_nodepda->bte_recovery_timer;
41 nasid_t nasid; 40 nasid_t nasid;
42 int i; 41 int i;
43 int valid_crbs; 42 int valid_crbs;
44 unsigned long irq_flags;
45 volatile u64 *notify;
46 bte_result_t bh_error;
47 ii_imem_u_t imem; /* II IMEM Register */ 43 ii_imem_u_t imem; /* II IMEM Register */
48 ii_icrb0_d_u_t icrbd; /* II CRB Register D */ 44 ii_icrb0_d_u_t icrbd; /* II CRB Register D */
49 ii_ibcr_u_t ibcr; 45 ii_ibcr_u_t ibcr;
50 ii_icmr_u_t icmr; 46 ii_icmr_u_t icmr;
51 ii_ieclr_u_t ieclr; 47 ii_ieclr_u_t ieclr;
52 48
53 BTE_PRINTK(("bte_error_handler(%p) - %d\n", err_nodepda, 49 BTE_PRINTK(("shub1_bte_error_handler(%p) - %d\n", err_nodepda,
54 smp_processor_id())); 50 smp_processor_id()));
55 51
56 spin_lock_irqsave(recovery_lock, irq_flags);
57
58 if ((err_nodepda->bte_if[0].bh_error == BTE_SUCCESS) && 52 if ((err_nodepda->bte_if[0].bh_error == BTE_SUCCESS) &&
59 (err_nodepda->bte_if[1].bh_error == BTE_SUCCESS)) { 53 (err_nodepda->bte_if[1].bh_error == BTE_SUCCESS)) {
60 BTE_PRINTK(("eh:%p:%d Nothing to do.\n", err_nodepda, 54 BTE_PRINTK(("eh:%p:%d Nothing to do.\n", err_nodepda,
61 smp_processor_id())); 55 smp_processor_id()));
62 spin_unlock_irqrestore(recovery_lock, irq_flags);
63 return; 56 return;
64 } 57 }
65 /*
66 * Lock all interfaces on this node to prevent new transfers
67 * from being queued.
68 */
69 for (i = 0; i < BTES_PER_NODE; i++) {
70 if (err_nodepda->bte_if[i].cleanup_active) {
71 continue;
72 }
73 spin_lock(&err_nodepda->bte_if[i].spinlock);
74 BTE_PRINTK(("eh:%p:%d locked %d\n", err_nodepda,
75 smp_processor_id(), i));
76 err_nodepda->bte_if[i].cleanup_active = 1;
77 }
78 58
79 /* Determine information about our hub */ 59 /* Determine information about our hub */
80 nasid = cnodeid_to_nasid(err_nodepda->bte_if[0].bte_cnode); 60 nasid = cnodeid_to_nasid(err_nodepda->bte_if[0].bte_cnode);
@@ -101,7 +81,6 @@ void bte_error_handler(unsigned long _nodepda)
101 mod_timer(recovery_timer, HZ * 5); 81 mod_timer(recovery_timer, HZ * 5);
102 BTE_PRINTK(("eh:%p:%d Marked Giving up\n", err_nodepda, 82 BTE_PRINTK(("eh:%p:%d Marked Giving up\n", err_nodepda,
103 smp_processor_id())); 83 smp_processor_id()));
104 spin_unlock_irqrestore(recovery_lock, irq_flags);
105 return; 84 return;
106 } 85 }
107 if (icmr.ii_icmr_fld_s.i_crb_vld != 0) { 86 if (icmr.ii_icmr_fld_s.i_crb_vld != 0) {
@@ -120,8 +99,6 @@ void bte_error_handler(unsigned long _nodepda)
120 BTE_PRINTK(("eh:%p:%d Valid %d, Giving up\n", 99 BTE_PRINTK(("eh:%p:%d Valid %d, Giving up\n",
121 err_nodepda, smp_processor_id(), 100 err_nodepda, smp_processor_id(),
122 i)); 101 i));
123 spin_unlock_irqrestore(recovery_lock,
124 irq_flags);
125 return; 102 return;
126 } 103 }
127 } 104 }
@@ -146,6 +123,51 @@ void bte_error_handler(unsigned long _nodepda)
146 ibcr.ii_ibcr_fld_s.i_soft_reset = 1; 123 ibcr.ii_ibcr_fld_s.i_soft_reset = 1;
147 REMOTE_HUB_S(nasid, IIO_IBCR, ibcr.ii_ibcr_regval); 124 REMOTE_HUB_S(nasid, IIO_IBCR, ibcr.ii_ibcr_regval);
148 125
126 del_timer(recovery_timer);
127}
128
129/*
130 * Wait until all BTE related CRBs are completed
131 * and then reset the interfaces.
132 */
133void bte_error_handler(unsigned long _nodepda)
134{
135 struct nodepda_s *err_nodepda = (struct nodepda_s *)_nodepda;
136 spinlock_t *recovery_lock = &err_nodepda->bte_recovery_lock;
137 int i;
138 nasid_t nasid;
139 unsigned long irq_flags;
140 volatile u64 *notify;
141 bte_result_t bh_error;
142
143 BTE_PRINTK(("bte_error_handler(%p) - %d\n", err_nodepda,
144 smp_processor_id()));
145
146 spin_lock_irqsave(recovery_lock, irq_flags);
147
148 /*
149 * Lock all interfaces on this node to prevent new transfers
150 * from being queued.
151 */
152 for (i = 0; i < BTES_PER_NODE; i++) {
153 if (err_nodepda->bte_if[i].cleanup_active) {
154 continue;
155 }
156 spin_lock(&err_nodepda->bte_if[i].spinlock);
157 BTE_PRINTK(("eh:%p:%d locked %d\n", err_nodepda,
158 smp_processor_id(), i));
159 err_nodepda->bte_if[i].cleanup_active = 1;
160 }
161
162 if (is_shub1()) {
163 shub1_bte_error_handler(_nodepda);
164 } else {
165 nasid = cnodeid_to_nasid(err_nodepda->bte_if[0].bte_cnode);
166
167 if (ia64_sn_bte_recovery(nasid))
168 panic("bte_error_handler(): Fatal BTE Error");
169 }
170
149 for (i = 0; i < BTES_PER_NODE; i++) { 171 for (i = 0; i < BTES_PER_NODE; i++) {
150 bh_error = err_nodepda->bte_if[i].bh_error; 172 bh_error = err_nodepda->bte_if[i].bh_error;
151 if (bh_error != BTE_SUCCESS) { 173 if (bh_error != BTE_SUCCESS) {
@@ -165,8 +187,6 @@ void bte_error_handler(unsigned long _nodepda)
165 spin_unlock(&err_nodepda->bte_if[i].spinlock); 187 spin_unlock(&err_nodepda->bte_if[i].spinlock);
166 } 188 }
167 189
168 del_timer(recovery_timer);
169
170 spin_unlock_irqrestore(recovery_lock, irq_flags); 190 spin_unlock_irqrestore(recovery_lock, irq_flags);
171} 191}
172 192
diff --git a/arch/ia64/sn/kernel/huberror.c b/arch/ia64/sn/kernel/huberror.c
index 2bdf684c5066..5c39b43ba3c0 100644
--- a/arch/ia64/sn/kernel/huberror.c
+++ b/arch/ia64/sn/kernel/huberror.c
@@ -3,7 +3,7 @@
3 * License. See the file "COPYING" in the main directory of this archive 3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details. 4 * for more details.
5 * 5 *
6 * Copyright (C) 1992 - 1997, 2000,2002-2004 Silicon Graphics, Inc. All rights reserved. 6 * Copyright (C) 1992 - 1997, 2000,2002-2005 Silicon Graphics, Inc. All rights reserved.
7 */ 7 */
8 8
9#include <linux/types.h> 9#include <linux/types.h>
@@ -38,8 +38,11 @@ static irqreturn_t hub_eint_handler(int irq, void *arg, struct pt_regs *ep)
38 if ((int)ret_stuff.v0) 38 if ((int)ret_stuff.v0)
39 panic("hubii_eint_handler(): Fatal TIO Error"); 39 panic("hubii_eint_handler(): Fatal TIO Error");
40 40
41 if (!(nasid & 1)) /* Not a TIO, handle CRB errors */ 41 if (is_shub1()) {
42 (void)hubiio_crb_error_handler(hubdev_info); 42 if (!(nasid & 1)) /* Not a TIO, handle CRB errors */
43 (void)hubiio_crb_error_handler(hubdev_info);
44 } else
45 bte_error_handler((unsigned long)NODEPDA(nasid_to_cnodeid(nasid)));
43 46
44 return IRQ_HANDLED; 47 return IRQ_HANDLED;
45} 48}
diff --git a/arch/ia64/sn/kernel/io_init.c b/arch/ia64/sn/kernel/io_init.c
index 001880812b7c..18160a06a8c9 100644
--- a/arch/ia64/sn/kernel/io_init.c
+++ b/arch/ia64/sn/kernel/io_init.c
@@ -11,14 +11,15 @@
11#include <asm/sn/types.h> 11#include <asm/sn/types.h>
12#include <asm/sn/sn_sal.h> 12#include <asm/sn/sn_sal.h>
13#include <asm/sn/addrs.h> 13#include <asm/sn/addrs.h>
14#include "pci/pcibus_provider_defs.h" 14#include <asm/sn/pcibus_provider_defs.h>
15#include "pci/pcidev.h" 15#include <asm/sn/pcidev.h>
16#include "pci/pcibr_provider.h" 16#include "pci/pcibr_provider.h"
17#include "xtalk/xwidgetdev.h" 17#include "xtalk/xwidgetdev.h"
18#include <asm/sn/geo.h> 18#include <asm/sn/geo.h>
19#include "xtalk/hubdev.h" 19#include "xtalk/hubdev.h"
20#include <asm/sn/io.h> 20#include <asm/sn/io.h>
21#include <asm/sn/simulator.h> 21#include <asm/sn/simulator.h>
22#include <asm/sn/tioca_provider.h>
22 23
23char master_baseio_wid; 24char master_baseio_wid;
24nasid_t master_nasid = INVALID_NASID; /* Partition Master */ 25nasid_t master_nasid = INVALID_NASID; /* Partition Master */
@@ -34,6 +35,37 @@ struct brick {
34 35
35int sn_ioif_inited = 0; /* SN I/O infrastructure initialized? */ 36int sn_ioif_inited = 0; /* SN I/O infrastructure initialized? */
36 37
38struct sn_pcibus_provider *sn_pci_provider[PCIIO_ASIC_MAX_TYPES]; /* indexed by asic type */
39
40/*
41 * Hooks and struct for unsupported pci providers
42 */
43
44static dma_addr_t
45sn_default_pci_map(struct pci_dev *pdev, unsigned long paddr, size_t size)
46{
47 return 0;
48}
49
50static void
51sn_default_pci_unmap(struct pci_dev *pdev, dma_addr_t addr, int direction)
52{
53 return;
54}
55
56static void *
57sn_default_pci_bus_fixup(struct pcibus_bussoft *soft)
58{
59 return NULL;
60}
61
62static struct sn_pcibus_provider sn_pci_default_provider = {
63 .dma_map = sn_default_pci_map,
64 .dma_map_consistent = sn_default_pci_map,
65 .dma_unmap = sn_default_pci_unmap,
66 .bus_fixup = sn_default_pci_bus_fixup,
67};
68
37/* 69/*
38 * Retrieve the DMA Flush List given nasid. This list is needed 70 * Retrieve the DMA Flush List given nasid. This list is needed
39 * to implement the WAR - Flush DMA data on PIO Reads. 71 * to implement the WAR - Flush DMA data on PIO Reads.
@@ -201,6 +233,7 @@ static void sn_pci_fixup_slot(struct pci_dev *dev)
201 struct sn_irq_info *sn_irq_info; 233 struct sn_irq_info *sn_irq_info;
202 struct pci_dev *host_pci_dev; 234 struct pci_dev *host_pci_dev;
203 int status = 0; 235 int status = 0;
236 struct pcibus_bussoft *bs;
204 237
205 dev->sysdata = kmalloc(sizeof(struct pcidev_info), GFP_KERNEL); 238 dev->sysdata = kmalloc(sizeof(struct pcidev_info), GFP_KERNEL);
206 if (SN_PCIDEV_INFO(dev) <= 0) 239 if (SN_PCIDEV_INFO(dev) <= 0)
@@ -241,6 +274,7 @@ static void sn_pci_fixup_slot(struct pci_dev *dev)
241 } 274 }
242 275
243 /* set up host bus linkages */ 276 /* set up host bus linkages */
277 bs = SN_PCIBUS_BUSSOFT(dev->bus);
244 host_pci_dev = 278 host_pci_dev =
245 pci_find_slot(SN_PCIDEV_INFO(dev)->pdi_slot_host_handle >> 32, 279 pci_find_slot(SN_PCIDEV_INFO(dev)->pdi_slot_host_handle >> 32,
246 SN_PCIDEV_INFO(dev)-> 280 SN_PCIDEV_INFO(dev)->
@@ -248,10 +282,16 @@ static void sn_pci_fixup_slot(struct pci_dev *dev)
248 SN_PCIDEV_INFO(dev)->pdi_host_pcidev_info = 282 SN_PCIDEV_INFO(dev)->pdi_host_pcidev_info =
249 SN_PCIDEV_INFO(host_pci_dev); 283 SN_PCIDEV_INFO(host_pci_dev);
250 SN_PCIDEV_INFO(dev)->pdi_linux_pcidev = dev; 284 SN_PCIDEV_INFO(dev)->pdi_linux_pcidev = dev;
251 SN_PCIDEV_INFO(dev)->pdi_pcibus_info = SN_PCIBUS_BUSSOFT(dev->bus); 285 SN_PCIDEV_INFO(dev)->pdi_pcibus_info = bs;
286
287 if (bs && bs->bs_asic_type < PCIIO_ASIC_MAX_TYPES) {
288 SN_PCIDEV_BUSPROVIDER(dev) = sn_pci_provider[bs->bs_asic_type];
289 } else {
290 SN_PCIDEV_BUSPROVIDER(dev) = &sn_pci_default_provider;
291 }
252 292
253 /* Only set up IRQ stuff if this device has a host bus context */ 293 /* Only set up IRQ stuff if this device has a host bus context */
254 if (SN_PCIDEV_BUSSOFT(dev) && sn_irq_info->irq_irq) { 294 if (bs && sn_irq_info->irq_irq) {
255 SN_PCIDEV_INFO(dev)->pdi_sn_irq_info = sn_irq_info; 295 SN_PCIDEV_INFO(dev)->pdi_sn_irq_info = sn_irq_info;
256 dev->irq = SN_PCIDEV_INFO(dev)->pdi_sn_irq_info->irq_irq; 296 dev->irq = SN_PCIDEV_INFO(dev)->pdi_sn_irq_info->irq_irq;
257 sn_irq_fixup(dev, sn_irq_info); 297 sn_irq_fixup(dev, sn_irq_info);
@@ -271,6 +311,7 @@ static void sn_pci_controller_fixup(int segment, int busnum)
271 struct pcibus_bussoft *prom_bussoft_ptr; 311 struct pcibus_bussoft *prom_bussoft_ptr;
272 struct hubdev_info *hubdev_info; 312 struct hubdev_info *hubdev_info;
273 void *provider_soft; 313 void *provider_soft;
314 struct sn_pcibus_provider *provider;
274 315
275 status = 316 status =
276 sal_get_pcibus_info((u64) segment, (u64) busnum, 317 sal_get_pcibus_info((u64) segment, (u64) busnum,
@@ -291,16 +332,22 @@ static void sn_pci_controller_fixup(int segment, int busnum)
291 /* 332 /*
292 * Per-provider fixup. Copies the contents from prom to local 333 * Per-provider fixup. Copies the contents from prom to local
293 * area and links SN_PCIBUS_BUSSOFT(). 334 * area and links SN_PCIBUS_BUSSOFT().
294 *
295 * Note: Provider is responsible for ensuring that prom_bussoft_ptr
296 * represents an asic-type that it can handle.
297 */ 335 */
298 336
299 if (prom_bussoft_ptr->bs_asic_type == PCIIO_ASIC_TYPE_PPB) { 337 if (prom_bussoft_ptr->bs_asic_type >= PCIIO_ASIC_MAX_TYPES) {
300 return; /* no further fixup necessary */ 338 return; /* unsupported asic type */
339 }
340
341 provider = sn_pci_provider[prom_bussoft_ptr->bs_asic_type];
342 if (provider == NULL) {
343 return; /* no provider registerd for this asic */
344 }
345
346 provider_soft = NULL;
347 if (provider->bus_fixup) {
348 provider_soft = (*provider->bus_fixup) (prom_bussoft_ptr);
301 } 349 }
302 350
303 provider_soft = pcibr_bus_fixup(prom_bussoft_ptr);
304 if (provider_soft == NULL) { 351 if (provider_soft == NULL) {
305 return; /* fixup failed or not applicable */ 352 return; /* fixup failed or not applicable */
306 } 353 }
@@ -339,6 +386,17 @@ static int __init sn_pci_init(void)
339 return 0; 386 return 0;
340 387
341 /* 388 /*
389 * prime sn_pci_provider[]. Individial provider init routines will
390 * override their respective default entries.
391 */
392
393 for (i = 0; i < PCIIO_ASIC_MAX_TYPES; i++)
394 sn_pci_provider[i] = &sn_pci_default_provider;
395
396 pcibr_init_provider();
397 tioca_init_provider();
398
399 /*
342 * This is needed to avoid bounce limit checks in the blk layer 400 * This is needed to avoid bounce limit checks in the blk layer
343 */ 401 */
344 ia64_max_iommu_merge_mask = ~PAGE_MASK; 402 ia64_max_iommu_merge_mask = ~PAGE_MASK;
diff --git a/arch/ia64/sn/kernel/irq.c b/arch/ia64/sn/kernel/irq.c
index 3be44724f6c8..0f4e8138658f 100644
--- a/arch/ia64/sn/kernel/irq.c
+++ b/arch/ia64/sn/kernel/irq.c
@@ -13,8 +13,8 @@
13#include <asm/sn/addrs.h> 13#include <asm/sn/addrs.h>
14#include <asm/sn/arch.h> 14#include <asm/sn/arch.h>
15#include "xtalk/xwidgetdev.h" 15#include "xtalk/xwidgetdev.h"
16#include "pci/pcibus_provider_defs.h" 16#include <asm/sn/pcibus_provider_defs.h>
17#include "pci/pcidev.h" 17#include <asm/sn/pcidev.h>
18#include "pci/pcibr_provider.h" 18#include "pci/pcibr_provider.h"
19#include <asm/sn/shub_mmr.h> 19#include <asm/sn/shub_mmr.h>
20#include <asm/sn/sn_sal.h> 20#include <asm/sn/sn_sal.h>
@@ -82,20 +82,9 @@ static void sn_ack_irq(unsigned int irq)
82 nasid = get_nasid(); 82 nasid = get_nasid();
83 event_occurred = 83 event_occurred =
84 HUB_L((uint64_t *) GLOBAL_MMR_ADDR(nasid, SH_EVENT_OCCURRED)); 84 HUB_L((uint64_t *) GLOBAL_MMR_ADDR(nasid, SH_EVENT_OCCURRED));
85 if (event_occurred & SH_EVENT_OCCURRED_UART_INT_MASK) { 85 mask = event_occurred & SH_ALL_INT_MASK;
86 mask |= (1 << SH_EVENT_OCCURRED_UART_INT_SHFT);
87 }
88 if (event_occurred & SH_EVENT_OCCURRED_IPI_INT_MASK) {
89 mask |= (1 << SH_EVENT_OCCURRED_IPI_INT_SHFT);
90 }
91 if (event_occurred & SH_EVENT_OCCURRED_II_INT0_MASK) {
92 mask |= (1 << SH_EVENT_OCCURRED_II_INT0_SHFT);
93 }
94 if (event_occurred & SH_EVENT_OCCURRED_II_INT1_MASK) {
95 mask |= (1 << SH_EVENT_OCCURRED_II_INT1_SHFT);
96 }
97 HUB_S((uint64_t *) GLOBAL_MMR_ADDR(nasid, SH_EVENT_OCCURRED_ALIAS), 86 HUB_S((uint64_t *) GLOBAL_MMR_ADDR(nasid, SH_EVENT_OCCURRED_ALIAS),
98 mask); 87 mask);
99 __set_bit(irq, (volatile void *)pda->sn_in_service_ivecs); 88 __set_bit(irq, (volatile void *)pda->sn_in_service_ivecs);
100 89
101 move_irq(irq); 90 move_irq(irq);
diff --git a/arch/ia64/sn/kernel/setup.c b/arch/ia64/sn/kernel/setup.c
index f0306b516afb..d35f2a6f9c94 100644
--- a/arch/ia64/sn/kernel/setup.c
+++ b/arch/ia64/sn/kernel/setup.c
@@ -29,6 +29,7 @@
29#include <linux/sched.h> 29#include <linux/sched.h>
30#include <linux/root_dev.h> 30#include <linux/root_dev.h>
31#include <linux/nodemask.h> 31#include <linux/nodemask.h>
32#include <linux/pm.h>
32 33
33#include <asm/io.h> 34#include <asm/io.h>
34#include <asm/sal.h> 35#include <asm/sal.h>
@@ -353,6 +354,14 @@ void __init sn_setup(char **cmdline_p)
353 screen_info = sn_screen_info; 354 screen_info = sn_screen_info;
354 355
355 sn_timer_init(); 356 sn_timer_init();
357
358 /*
359 * set pm_power_off to a SAL call to allow
360 * sn machines to power off. The SAL call can be replaced
361 * by an ACPI interface call when ACPI is fully implemented
362 * for sn.
363 */
364 pm_power_off = ia64_sn_power_down;
356} 365}
357 366
358/** 367/**
diff --git a/arch/ia64/sn/kernel/sn2/sn_hwperf.c b/arch/ia64/sn/kernel/sn2/sn_hwperf.c
index 197356460ee1..833e700fdac9 100644
--- a/arch/ia64/sn/kernel/sn2/sn_hwperf.c
+++ b/arch/ia64/sn/kernel/sn2/sn_hwperf.c
@@ -28,6 +28,7 @@
28#include <linux/vmalloc.h> 28#include <linux/vmalloc.h>
29#include <linux/seq_file.h> 29#include <linux/seq_file.h>
30#include <linux/miscdevice.h> 30#include <linux/miscdevice.h>
31#include <linux/utsname.h>
31#include <linux/cpumask.h> 32#include <linux/cpumask.h>
32#include <linux/smp_lock.h> 33#include <linux/smp_lock.h>
33#include <linux/nodemask.h> 34#include <linux/nodemask.h>
@@ -43,6 +44,7 @@
43#include <asm/sn/module.h> 44#include <asm/sn/module.h>
44#include <asm/sn/geo.h> 45#include <asm/sn/geo.h>
45#include <asm/sn/sn2/sn_hwperf.h> 46#include <asm/sn/sn2/sn_hwperf.h>
47#include <asm/sn/addrs.h>
46 48
47static void *sn_hwperf_salheap = NULL; 49static void *sn_hwperf_salheap = NULL;
48static int sn_hwperf_obj_cnt = 0; 50static int sn_hwperf_obj_cnt = 0;
@@ -81,26 +83,45 @@ out:
81 return e; 83 return e;
82} 84}
83 85
86static int sn_hwperf_location_to_bpos(char *location,
87 int *rack, int *bay, int *slot, int *slab)
88{
89 char type;
90
91 /* first scan for an old style geoid string */
92 if (sscanf(location, "%03d%c%02d#%d",
93 rack, &type, bay, slab) == 4)
94 *slot = 0;
95 else /* scan for a new bladed geoid string */
96 if (sscanf(location, "%03d%c%02d^%02d#%d",
97 rack, &type, bay, slot, slab) != 5)
98 return -1;
99 /* success */
100 return 0;
101}
102
84static int sn_hwperf_geoid_to_cnode(char *location) 103static int sn_hwperf_geoid_to_cnode(char *location)
85{ 104{
86 int cnode; 105 int cnode;
87 geoid_t geoid; 106 geoid_t geoid;
88 moduleid_t module_id; 107 moduleid_t module_id;
89 char type; 108 int rack, bay, slot, slab;
90 int rack, slot, slab; 109 int this_rack, this_bay, this_slot, this_slab;
91 int this_rack, this_slot, this_slab;
92 110
93 if (sscanf(location, "%03d%c%02d#%d", &rack, &type, &slot, &slab) != 4) 111 if (sn_hwperf_location_to_bpos(location, &rack, &bay, &slot, &slab))
94 return -1; 112 return -1;
95 113
96 for (cnode = 0; cnode < numionodes; cnode++) { 114 for (cnode = 0; cnode < numionodes; cnode++) {
97 geoid = cnodeid_get_geoid(cnode); 115 geoid = cnodeid_get_geoid(cnode);
98 module_id = geo_module(geoid); 116 module_id = geo_module(geoid);
99 this_rack = MODULE_GET_RACK(module_id); 117 this_rack = MODULE_GET_RACK(module_id);
100 this_slot = MODULE_GET_BPOS(module_id); 118 this_bay = MODULE_GET_BPOS(module_id);
119 this_slot = geo_slot(geoid);
101 this_slab = geo_slab(geoid); 120 this_slab = geo_slab(geoid);
102 if (rack == this_rack && slot == this_slot && slab == this_slab) 121 if (rack == this_rack && bay == this_bay &&
122 slot == this_slot && slab == this_slab) {
103 break; 123 break;
124 }
104 } 125 }
105 126
106 return cnode < numionodes ? cnode : -1; 127 return cnode < numionodes ? cnode : -1;
@@ -153,11 +174,36 @@ static const char *sn_hwperf_get_slabname(struct sn_hwperf_object_info *obj,
153 return slabname; 174 return slabname;
154} 175}
155 176
177static void print_pci_topology(struct seq_file *s,
178 struct sn_hwperf_object_info *obj, int *ordinal,
179 u64 rack, u64 bay, u64 slot, u64 slab)
180{
181 char *p1;
182 char *p2;
183 char *pg;
184
185 if (!(pg = (char *)get_zeroed_page(GFP_KERNEL)))
186 return; /* ignore */
187 if (ia64_sn_ioif_get_pci_topology(rack, bay, slot, slab,
188 __pa(pg), PAGE_SIZE) == SN_HWPERF_OP_OK) {
189 for (p1=pg; *p1 && p1 < pg + PAGE_SIZE;) {
190 if (!(p2 = strchr(p1, '\n')))
191 break;
192 *p2 = '\0';
193 seq_printf(s, "pcibus %d %s-%s\n",
194 *ordinal, obj->location, p1);
195 (*ordinal)++;
196 p1 = p2 + 1;
197 }
198 }
199 free_page((unsigned long)pg);
200}
201
156static int sn_topology_show(struct seq_file *s, void *d) 202static int sn_topology_show(struct seq_file *s, void *d)
157{ 203{
158 int sz; 204 int sz;
159 int pt; 205 int pt;
160 int e; 206 int e = 0;
161 int i; 207 int i;
162 int j; 208 int j;
163 const char *slabname; 209 const char *slabname;
@@ -169,11 +215,44 @@ static int sn_topology_show(struct seq_file *s, void *d)
169 struct sn_hwperf_object_info *p; 215 struct sn_hwperf_object_info *p;
170 struct sn_hwperf_object_info *obj = d; /* this object */ 216 struct sn_hwperf_object_info *obj = d; /* this object */
171 struct sn_hwperf_object_info *objs = s->private; /* all objects */ 217 struct sn_hwperf_object_info *objs = s->private; /* all objects */
218 int rack, bay, slot, slab;
219 u8 shubtype;
220 u8 system_size;
221 u8 sharing_size;
222 u8 partid;
223 u8 coher;
224 u8 nasid_shift;
225 u8 region_size;
226 u16 nasid_mask;
227 int nasid_msb;
228 int pci_bus_ordinal = 0;
172 229
173 if (obj == objs) { 230 if (obj == objs) {
174 seq_printf(s, "# sn_topology version 1\n"); 231 seq_printf(s, "# sn_topology version 2\n");
175 seq_printf(s, "# objtype ordinal location partition" 232 seq_printf(s, "# objtype ordinal location partition"
176 " [attribute value [, ...]]\n"); 233 " [attribute value [, ...]]\n");
234
235 if (ia64_sn_get_sn_info(0,
236 &shubtype, &nasid_mask, &nasid_shift, &system_size,
237 &sharing_size, &partid, &coher, &region_size))
238 BUG();
239 for (nasid_msb=63; nasid_msb > 0; nasid_msb--) {
240 if (((u64)nasid_mask << nasid_shift) & (1ULL << nasid_msb))
241 break;
242 }
243 seq_printf(s, "partition %u %s local "
244 "shubtype %s, "
245 "nasid_mask 0x%016lx, "
246 "nasid_bits %d:%d, "
247 "system_size %d, "
248 "sharing_size %d, "
249 "coherency_domain %d, "
250 "region_size %d\n",
251
252 partid, system_utsname.nodename,
253 shubtype ? "shub2" : "shub1",
254 (u64)nasid_mask << nasid_shift, nasid_msb, nasid_shift,
255 system_size, sharing_size, coher, region_size);
177 } 256 }
178 257
179 if (SN_HWPERF_FOREIGN(obj)) { 258 if (SN_HWPERF_FOREIGN(obj)) {
@@ -181,7 +260,7 @@ static int sn_topology_show(struct seq_file *s, void *d)
181 return 0; 260 return 0;
182 } 261 }
183 262
184 for (i = 0; obj->name[i]; i++) { 263 for (i = 0; i < SN_HWPERF_MAXSTRING && obj->name[i]; i++) {
185 if (obj->name[i] == ' ') 264 if (obj->name[i] == ' ')
186 obj->name[i] = '_'; 265 obj->name[i] = '_';
187 } 266 }
@@ -221,6 +300,17 @@ static int sn_topology_show(struct seq_file *s, void *d)
221 seq_putc(s, '\n'); 300 seq_putc(s, '\n');
222 } 301 }
223 } 302 }
303
304 /*
305 * PCI busses attached to this node, if any
306 */
307 if (sn_hwperf_location_to_bpos(obj->location,
308 &rack, &bay, &slot, &slab)) {
309 /* export pci bus info */
310 print_pci_topology(s, obj, &pci_bus_ordinal,
311 rack, bay, slot, slab);
312
313 }
224 } 314 }
225 315
226 if (obj->ports) { 316 if (obj->ports) {
@@ -397,6 +487,9 @@ static int sn_hwperf_map_err(int hwperf_err)
397 break; 487 break;
398 488
399 case SN_HWPERF_OP_BUSY: 489 case SN_HWPERF_OP_BUSY:
490 e = -EBUSY;
491 break;
492
400 case SN_HWPERF_OP_RECONFIGURE: 493 case SN_HWPERF_OP_RECONFIGURE:
401 e = -EAGAIN; 494 e = -EAGAIN;
402 break; 495 break;
@@ -549,6 +642,7 @@ sn_hwperf_ioctl(struct inode *in, struct file *fp, u32 op, u64 arg)
549 r = sn_hwperf_op_cpu(&op_info); 642 r = sn_hwperf_op_cpu(&op_info);
550 if (r) { 643 if (r) {
551 r = sn_hwperf_map_err(r); 644 r = sn_hwperf_map_err(r);
645 a.v0 = v0;
552 goto error; 646 goto error;
553 } 647 }
554 break; 648 break;
diff --git a/arch/ia64/sn/kernel/tiocx.c b/arch/ia64/sn/kernel/tiocx.c
new file mode 100644
index 000000000000..66190d7e492d
--- /dev/null
+++ b/arch/ia64/sn/kernel/tiocx.c
@@ -0,0 +1,548 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (c) 2005 Silicon Graphics, Inc. All rights reserved.
7 */
8
9#include <linux/module.h>
10#include <linux/kernel.h>
11#include <linux/version.h>
12#include <linux/slab.h>
13#include <linux/spinlock.h>
14#include <linux/proc_fs.h>
15#include <linux/device.h>
16#include <linux/delay.h>
17#include <asm/uaccess.h>
18#include <asm/sn/sn_sal.h>
19#include <asm/sn/addrs.h>
20#include <asm/sn/io.h>
21#include <asm/sn/types.h>
22#include <asm/sn/shubio.h>
23#include <asm/sn/tiocx.h>
24#include "tio.h"
25#include "xtalk/xwidgetdev.h"
26#include "xtalk/hubdev.h"
27
28#define CX_DEV_NONE 0
29#define DEVICE_NAME "tiocx"
30#define WIDGET_ID 0
31#define TIOCX_DEBUG 0
32
33#if TIOCX_DEBUG
34#define DBG(fmt...) printk(KERN_ALERT fmt)
35#else
36#define DBG(fmt...)
37#endif
38
39struct device_attribute dev_attr_cxdev_control;
40
41/**
42 * tiocx_match - Try to match driver id list with device.
43 * @dev: device pointer
44 * @drv: driver pointer
45 *
46 * Returns 1 if match, 0 otherwise.
47 */
48static int tiocx_match(struct device *dev, struct device_driver *drv)
49{
50 struct cx_dev *cx_dev = to_cx_dev(dev);
51 struct cx_drv *cx_drv = to_cx_driver(drv);
52 const struct cx_device_id *ids = cx_drv->id_table;
53
54 if (!ids)
55 return 0;
56
57 while (ids->part_num) {
58 if (ids->part_num == cx_dev->cx_id.part_num)
59 return 1;
60 ids++;
61 }
62 return 0;
63
64}
65
66static int tiocx_hotplug(struct device *dev, char **envp, int num_envp,
67 char *buffer, int buffer_size)
68{
69 return -ENODEV;
70}
71
72static void tiocx_bus_release(struct device *dev)
73{
74 kfree(to_cx_dev(dev));
75}
76
77struct bus_type tiocx_bus_type = {
78 .name = "tiocx",
79 .match = tiocx_match,
80 .hotplug = tiocx_hotplug,
81};
82
83/**
84 * cx_device_match - Find cx_device in the id table.
85 * @ids: id table from driver
86 * @cx_device: part/mfg id for the device
87 *
88 */
89static const struct cx_device_id *cx_device_match(const struct cx_device_id
90 *ids,
91 struct cx_dev *cx_device)
92{
93 /*
94 * NOTES: We may want to check for CX_ANY_ID too.
95 * Do we want to match against nasid too?
96 * CX_DEV_NONE == 0, if the driver tries to register for
97 * part/mfg == 0 we should return no-match (NULL) here.
98 */
99 while (ids->part_num && ids->mfg_num) {
100 if (ids->part_num == cx_device->cx_id.part_num &&
101 ids->mfg_num == cx_device->cx_id.mfg_num)
102 return ids;
103 ids++;
104 }
105
106 return NULL;
107}
108
109/**
110 * cx_device_probe - Look for matching device.
111 * Call driver probe routine if found.
112 * @cx_driver: driver table (cx_drv struct) from driver
113 * @cx_device: part/mfg id for the device
114 */
115static int cx_device_probe(struct device *dev)
116{
117 const struct cx_device_id *id;
118 struct cx_drv *cx_drv = to_cx_driver(dev->driver);
119 struct cx_dev *cx_dev = to_cx_dev(dev);
120 int error = 0;
121
122 if (!cx_dev->driver && cx_drv->probe) {
123 id = cx_device_match(cx_drv->id_table, cx_dev);
124 if (id) {
125 if ((error = cx_drv->probe(cx_dev, id)) < 0)
126 return error;
127 else
128 cx_dev->driver = cx_drv;
129 }
130 }
131
132 return error;
133}
134
135/**
136 * cx_driver_remove - Remove driver from device struct.
137 * @dev: device
138 */
139static int cx_driver_remove(struct device *dev)
140{
141 struct cx_dev *cx_dev = to_cx_dev(dev);
142 struct cx_drv *cx_drv = cx_dev->driver;
143 if (cx_drv->remove)
144 cx_drv->remove(cx_dev);
145 cx_dev->driver = NULL;
146 return 0;
147}
148
149/**
150 * cx_driver_register - Register the driver.
151 * @cx_driver: driver table (cx_drv struct) from driver
152 *
153 * Called from the driver init routine to register a driver.
154 * The cx_drv struct contains the driver name, a pointer to
155 * a table of part/mfg numbers and a pointer to the driver's
156 * probe/attach routine.
157 */
158int cx_driver_register(struct cx_drv *cx_driver)
159{
160 cx_driver->driver.name = cx_driver->name;
161 cx_driver->driver.bus = &tiocx_bus_type;
162 cx_driver->driver.probe = cx_device_probe;
163 cx_driver->driver.remove = cx_driver_remove;
164
165 return driver_register(&cx_driver->driver);
166}
167
168/**
169 * cx_driver_unregister - Unregister the driver.
170 * @cx_driver: driver table (cx_drv struct) from driver
171 */
172int cx_driver_unregister(struct cx_drv *cx_driver)
173{
174 driver_unregister(&cx_driver->driver);
175 return 0;
176}
177
178/**
179 * cx_device_register - Register a device.
180 * @nasid: device's nasid
181 * @part_num: device's part number
182 * @mfg_num: device's manufacturer number
183 * @hubdev: hub info associated with this device
184 *
185 */
186int
187cx_device_register(nasid_t nasid, int part_num, int mfg_num,
188 struct hubdev_info *hubdev)
189{
190 struct cx_dev *cx_dev;
191
192 cx_dev = kcalloc(1, sizeof(struct cx_dev), GFP_KERNEL);
193 DBG("cx_dev= 0x%p\n", cx_dev);
194 if (cx_dev == NULL)
195 return -ENOMEM;
196
197 cx_dev->cx_id.part_num = part_num;
198 cx_dev->cx_id.mfg_num = mfg_num;
199 cx_dev->cx_id.nasid = nasid;
200 cx_dev->hubdev = hubdev;
201
202 cx_dev->dev.parent = NULL;
203 cx_dev->dev.bus = &tiocx_bus_type;
204 cx_dev->dev.release = tiocx_bus_release;
205 snprintf(cx_dev->dev.bus_id, BUS_ID_SIZE, "%d.0x%x",
206 cx_dev->cx_id.nasid, cx_dev->cx_id.part_num);
207 device_register(&cx_dev->dev);
208 get_device(&cx_dev->dev);
209
210 device_create_file(&cx_dev->dev, &dev_attr_cxdev_control);
211
212 return 0;
213}
214
215/**
216 * cx_device_unregister - Unregister a device.
217 * @cx_dev: part/mfg id for the device
218 */
219int cx_device_unregister(struct cx_dev *cx_dev)
220{
221 put_device(&cx_dev->dev);
222 device_unregister(&cx_dev->dev);
223 return 0;
224}
225
226/**
227 * cx_device_reload - Reload the device.
228 * @nasid: device's nasid
229 * @part_num: device's part number
230 * @mfg_num: device's manufacturer number
231 *
232 * Remove the device associated with 'nasid' from device list and then
233 * call device-register with the given part/mfg numbers.
234 */
235static int cx_device_reload(struct cx_dev *cx_dev)
236{
237 device_remove_file(&cx_dev->dev, &dev_attr_cxdev_control);
238 cx_device_unregister(cx_dev);
239 return cx_device_register(cx_dev->cx_id.nasid, cx_dev->cx_id.part_num,
240 cx_dev->cx_id.mfg_num, cx_dev->hubdev);
241}
242
243static inline uint64_t tiocx_intr_alloc(nasid_t nasid, int widget,
244 u64 sn_irq_info,
245 int req_irq, nasid_t req_nasid,
246 int req_slice)
247{
248 struct ia64_sal_retval rv;
249 rv.status = 0;
250 rv.v0 = 0;
251
252 ia64_sal_oemcall_nolock(&rv, SN_SAL_IOIF_INTERRUPT,
253 SAL_INTR_ALLOC, nasid,
254 widget, sn_irq_info, req_irq,
255 req_nasid, req_slice);
256 return rv.status;
257}
258
259static inline void tiocx_intr_free(nasid_t nasid, int widget,
260 struct sn_irq_info *sn_irq_info)
261{
262 struct ia64_sal_retval rv;
263 rv.status = 0;
264 rv.v0 = 0;
265
266 ia64_sal_oemcall_nolock(&rv, SN_SAL_IOIF_INTERRUPT,
267 SAL_INTR_FREE, nasid,
268 widget, sn_irq_info->irq_irq,
269 sn_irq_info->irq_cookie, 0, 0);
270}
271
272struct sn_irq_info *tiocx_irq_alloc(nasid_t nasid, int widget, int irq,
273 nasid_t req_nasid, int slice)
274{
275 struct sn_irq_info *sn_irq_info;
276 int status;
277 int sn_irq_size = sizeof(struct sn_irq_info);
278
279 if ((nasid & 1) == 0)
280 return NULL;
281
282 sn_irq_info = kmalloc(sn_irq_size, GFP_KERNEL);
283 if (sn_irq_info == NULL)
284 return NULL;
285
286 memset(sn_irq_info, 0x0, sn_irq_size);
287
288 status = tiocx_intr_alloc(nasid, widget, __pa(sn_irq_info), irq,
289 req_nasid, slice);
290 if (status) {
291 kfree(sn_irq_info);
292 return NULL;
293 } else {
294 return sn_irq_info;
295 }
296}
297
298void tiocx_irq_free(struct sn_irq_info *sn_irq_info)
299{
300 uint64_t bridge = (uint64_t) sn_irq_info->irq_bridge;
301 nasid_t nasid = NASID_GET(bridge);
302 int widget;
303
304 if (nasid & 1) {
305 widget = TIO_SWIN_WIDGETNUM(bridge);
306 tiocx_intr_free(nasid, widget, sn_irq_info);
307 kfree(sn_irq_info);
308 }
309}
310
311uint64_t
312tiocx_dma_addr(uint64_t addr)
313{
314 return PHYS_TO_TIODMA(addr);
315}
316
317uint64_t
318tiocx_swin_base(int nasid)
319{
320 return TIO_SWIN_BASE(nasid, TIOCX_CORELET);
321}
322
323EXPORT_SYMBOL(cx_driver_register);
324EXPORT_SYMBOL(cx_driver_unregister);
325EXPORT_SYMBOL(cx_device_register);
326EXPORT_SYMBOL(cx_device_unregister);
327EXPORT_SYMBOL(tiocx_irq_alloc);
328EXPORT_SYMBOL(tiocx_irq_free);
329EXPORT_SYMBOL(tiocx_bus_type);
330EXPORT_SYMBOL(tiocx_dma_addr);
331EXPORT_SYMBOL(tiocx_swin_base);
332
333static uint64_t tiocx_get_hubdev_info(u64 handle, u64 address)
334{
335
336 struct ia64_sal_retval ret_stuff;
337 ret_stuff.status = 0;
338 ret_stuff.v0 = 0;
339
340 ia64_sal_oemcall_nolock(&ret_stuff,
341 SN_SAL_IOIF_GET_HUBDEV_INFO,
342 handle, address, 0, 0, 0, 0, 0);
343 return ret_stuff.v0;
344}
345
346static void tio_conveyor_set(nasid_t nasid, int enable_flag)
347{
348 uint64_t ice_frz;
349 uint64_t disable_cb = (1ull << 61);
350
351 if (!(nasid & 1))
352 return;
353
354 ice_frz = REMOTE_HUB_L(nasid, TIO_ICE_FRZ_CFG);
355 if (enable_flag) {
356 if (!(ice_frz & disable_cb)) /* already enabled */
357 return;
358 ice_frz &= ~disable_cb;
359 } else {
360 if (ice_frz & disable_cb) /* already disabled */
361 return;
362 ice_frz |= disable_cb;
363 }
364 DBG(KERN_ALERT "TIO_ICE_FRZ_CFG= 0x%lx\n", ice_frz);
365 REMOTE_HUB_S(nasid, TIO_ICE_FRZ_CFG, ice_frz);
366}
367
368#define tio_conveyor_enable(nasid) tio_conveyor_set(nasid, 1)
369#define tio_conveyor_disable(nasid) tio_conveyor_set(nasid, 0)
370
371static void tio_corelet_reset(nasid_t nasid, int corelet)
372{
373 if (!(nasid & 1))
374 return;
375
376 REMOTE_HUB_S(nasid, TIO_ICE_PMI_TX_CFG, 1 << corelet);
377 udelay(2000);
378 REMOTE_HUB_S(nasid, TIO_ICE_PMI_TX_CFG, 0);
379 udelay(2000);
380}
381
382static int fpga_attached(nasid_t nasid)
383{
384 uint64_t cx_credits;
385
386 cx_credits = REMOTE_HUB_L(nasid, TIO_ICE_PMI_TX_DYN_CREDIT_STAT_CB3);
387 cx_credits &= TIO_ICE_PMI_TX_DYN_CREDIT_STAT_CB3_CREDIT_CNT_MASK;
388 DBG("cx_credits= 0x%lx\n", cx_credits);
389
390 return (cx_credits == 0xf) ? 1 : 0;
391}
392
393static int tiocx_reload(struct cx_dev *cx_dev)
394{
395 int part_num = CX_DEV_NONE;
396 int mfg_num = CX_DEV_NONE;
397 nasid_t nasid = cx_dev->cx_id.nasid;
398
399 if (fpga_attached(nasid)) {
400 uint64_t cx_id;
401
402 cx_id =
403 *(volatile int32_t *)(TIO_SWIN_BASE(nasid, TIOCX_CORELET) +
404 WIDGET_ID);
405 part_num = XWIDGET_PART_NUM(cx_id);
406 mfg_num = XWIDGET_MFG_NUM(cx_id);
407 DBG("part= 0x%x, mfg= 0x%x\n", part_num, mfg_num);
408 /* just ignore it if it's a CE */
409 if (part_num == TIO_CE_ASIC_PARTNUM)
410 return 0;
411 }
412
413 cx_dev->cx_id.part_num = part_num;
414 cx_dev->cx_id.mfg_num = mfg_num;
415
416 /*
417 * Delete old device and register the new one. It's ok if
418 * part_num/mfg_num == CX_DEV_NONE. We want to register
419 * devices in the table even if a bitstream isn't loaded.
420 * That allows use to see that a bitstream isn't loaded via
421 * TIOCX_IOCTL_DEV_LIST.
422 */
423 return cx_device_reload(cx_dev);
424}
425
426static ssize_t show_cxdev_control(struct device *dev, char *buf)
427{
428 struct cx_dev *cx_dev = to_cx_dev(dev);
429
430 return sprintf(buf, "0x%x 0x%x 0x%x\n",
431 cx_dev->cx_id.nasid,
432 cx_dev->cx_id.part_num, cx_dev->cx_id.mfg_num);
433}
434
435static ssize_t store_cxdev_control(struct device *dev, const char *buf,
436 size_t count)
437{
438 int n;
439 struct cx_dev *cx_dev = to_cx_dev(dev);
440
441 if (!capable(CAP_SYS_ADMIN))
442 return -EPERM;
443
444 if (count <= 0)
445 return 0;
446
447 n = simple_strtoul(buf, NULL, 0);
448
449 switch (n) {
450 case 1:
451 tiocx_reload(cx_dev);
452 break;
453 case 3:
454 tio_corelet_reset(cx_dev->cx_id.nasid, TIOCX_CORELET);
455 break;
456 default:
457 break;
458 }
459
460 return count;
461}
462
463DEVICE_ATTR(cxdev_control, 0644, show_cxdev_control, store_cxdev_control);
464
465static int __init tiocx_init(void)
466{
467 cnodeid_t cnodeid;
468 int found_tiocx_device = 0;
469
470 bus_register(&tiocx_bus_type);
471
472 for (cnodeid = 0; cnodeid < MAX_COMPACT_NODES; cnodeid++) {
473 nasid_t nasid;
474
475 if ((nasid = cnodeid_to_nasid(cnodeid)) < 0)
476 break; /* No more nasids .. bail out of loop */
477
478 if (nasid & 0x1) { /* TIO's are always odd */
479 struct hubdev_info *hubdev;
480 uint64_t status;
481 struct xwidget_info *widgetp;
482
483 DBG("Found TIO at nasid 0x%x\n", nasid);
484
485 hubdev =
486 (struct hubdev_info *)(NODEPDA(cnodeid)->pdinfo);
487 status =
488 tiocx_get_hubdev_info(nasid,
489 (uint64_t) __pa(hubdev));
490 if (status)
491 continue;
492
493 widgetp = &hubdev->hdi_xwidget_info[TIOCX_CORELET];
494
495 /* The CE hangs off of the CX port but is not an FPGA */
496 if (widgetp->xwi_hwid.part_num == TIO_CE_ASIC_PARTNUM)
497 continue;
498
499 tio_corelet_reset(nasid, TIOCX_CORELET);
500 tio_conveyor_enable(nasid);
501
502 if (cx_device_register
503 (nasid, widgetp->xwi_hwid.part_num,
504 widgetp->xwi_hwid.mfg_num, hubdev) < 0)
505 return -ENXIO;
506 else
507 found_tiocx_device++;
508 }
509 }
510
511 /* It's ok if we find zero devices. */
512 DBG("found_tiocx_device= %d\n", found_tiocx_device);
513
514 return 0;
515}
516
517static void __exit tiocx_exit(void)
518{
519 struct device *dev;
520 struct device *tdev;
521
522 DBG("tiocx_exit\n");
523
524 /*
525 * Unregister devices.
526 */
527 list_for_each_entry_safe(dev, tdev, &tiocx_bus_type.devices.list,
528 bus_list) {
529 if (dev) {
530 struct cx_dev *cx_dev = to_cx_dev(dev);
531 device_remove_file(dev, &dev_attr_cxdev_control);
532 cx_device_unregister(cx_dev);
533 }
534 }
535
536 bus_unregister(&tiocx_bus_type);
537}
538
539module_init(tiocx_init);
540module_exit(tiocx_exit);
541
542/************************************************************************
543 * Module licensing and description
544 ************************************************************************/
545MODULE_LICENSE("GPL");
546MODULE_AUTHOR("Bruce Losure <blosure@sgi.com>");
547MODULE_DESCRIPTION("TIOCX module");
548MODULE_SUPPORTED_DEVICE(DEVICE_NAME);
diff --git a/arch/ia64/sn/pci/Makefile b/arch/ia64/sn/pci/Makefile
index b5dca0097a8e..2f915bce25f9 100644
--- a/arch/ia64/sn/pci/Makefile
+++ b/arch/ia64/sn/pci/Makefile
@@ -7,4 +7,4 @@
7# 7#
8# Makefile for the sn pci general routines. 8# Makefile for the sn pci general routines.
9 9
10obj-y := pci_dma.o pcibr/ 10obj-y := pci_dma.o tioca_provider.o pcibr/
diff --git a/arch/ia64/sn/pci/pci_dma.c b/arch/ia64/sn/pci/pci_dma.c
index f680824f819d..5da9bdbde7cb 100644
--- a/arch/ia64/sn/pci/pci_dma.c
+++ b/arch/ia64/sn/pci/pci_dma.c
@@ -12,9 +12,8 @@
12#include <linux/module.h> 12#include <linux/module.h>
13#include <asm/dma.h> 13#include <asm/dma.h>
14#include <asm/sn/sn_sal.h> 14#include <asm/sn/sn_sal.h>
15#include "pci/pcibus_provider_defs.h" 15#include <asm/sn/pcibus_provider_defs.h>
16#include "pci/pcidev.h" 16#include <asm/sn/pcidev.h>
17#include "pci/pcibr_provider.h"
18 17
19#define SG_ENT_VIRT_ADDRESS(sg) (page_address((sg)->page) + (sg)->offset) 18#define SG_ENT_VIRT_ADDRESS(sg) (page_address((sg)->page) + (sg)->offset)
20#define SG_ENT_PHYS_ADDRESS(SG) virt_to_phys(SG_ENT_VIRT_ADDRESS(SG)) 19#define SG_ENT_PHYS_ADDRESS(SG) virt_to_phys(SG_ENT_VIRT_ADDRESS(SG))
@@ -79,7 +78,8 @@ void *sn_dma_alloc_coherent(struct device *dev, size_t size,
79{ 78{
80 void *cpuaddr; 79 void *cpuaddr;
81 unsigned long phys_addr; 80 unsigned long phys_addr;
82 struct pcidev_info *pcidev_info = SN_PCIDEV_INFO(to_pci_dev(dev)); 81 struct pci_dev *pdev = to_pci_dev(dev);
82 struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev);
83 83
84 BUG_ON(dev->bus != &pci_bus_type); 84 BUG_ON(dev->bus != &pci_bus_type);
85 85
@@ -102,8 +102,7 @@ void *sn_dma_alloc_coherent(struct device *dev, size_t size,
102 * resources. 102 * resources.
103 */ 103 */
104 104
105 *dma_handle = pcibr_dma_map(pcidev_info, phys_addr, size, 105 *dma_handle = provider->dma_map_consistent(pdev, phys_addr, size);
106 SN_PCIDMA_CONSISTENT);
107 if (!*dma_handle) { 106 if (!*dma_handle) {
108 printk(KERN_ERR "%s: out of ATEs\n", __FUNCTION__); 107 printk(KERN_ERR "%s: out of ATEs\n", __FUNCTION__);
109 free_pages((unsigned long)cpuaddr, get_order(size)); 108 free_pages((unsigned long)cpuaddr, get_order(size));
@@ -127,11 +126,12 @@ EXPORT_SYMBOL(sn_dma_alloc_coherent);
127void sn_dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, 126void sn_dma_free_coherent(struct device *dev, size_t size, void *cpu_addr,
128 dma_addr_t dma_handle) 127 dma_addr_t dma_handle)
129{ 128{
130 struct pcidev_info *pcidev_info = SN_PCIDEV_INFO(to_pci_dev(dev)); 129 struct pci_dev *pdev = to_pci_dev(dev);
130 struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev);
131 131
132 BUG_ON(dev->bus != &pci_bus_type); 132 BUG_ON(dev->bus != &pci_bus_type);
133 133
134 pcibr_dma_unmap(pcidev_info, dma_handle, 0); 134 provider->dma_unmap(pdev, dma_handle, 0);
135 free_pages((unsigned long)cpu_addr, get_order(size)); 135 free_pages((unsigned long)cpu_addr, get_order(size));
136} 136}
137EXPORT_SYMBOL(sn_dma_free_coherent); 137EXPORT_SYMBOL(sn_dma_free_coherent);
@@ -159,12 +159,13 @@ dma_addr_t sn_dma_map_single(struct device *dev, void *cpu_addr, size_t size,
159{ 159{
160 dma_addr_t dma_addr; 160 dma_addr_t dma_addr;
161 unsigned long phys_addr; 161 unsigned long phys_addr;
162 struct pcidev_info *pcidev_info = SN_PCIDEV_INFO(to_pci_dev(dev)); 162 struct pci_dev *pdev = to_pci_dev(dev);
163 struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev);
163 164
164 BUG_ON(dev->bus != &pci_bus_type); 165 BUG_ON(dev->bus != &pci_bus_type);
165 166
166 phys_addr = __pa(cpu_addr); 167 phys_addr = __pa(cpu_addr);
167 dma_addr = pcibr_dma_map(pcidev_info, phys_addr, size, 0); 168 dma_addr = provider->dma_map(pdev, phys_addr, size);
168 if (!dma_addr) { 169 if (!dma_addr) {
169 printk(KERN_ERR "%s: out of ATEs\n", __FUNCTION__); 170 printk(KERN_ERR "%s: out of ATEs\n", __FUNCTION__);
170 return 0; 171 return 0;
@@ -187,10 +188,12 @@ EXPORT_SYMBOL(sn_dma_map_single);
187void sn_dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size, 188void sn_dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
188 int direction) 189 int direction)
189{ 190{
190 struct pcidev_info *pcidev_info = SN_PCIDEV_INFO(to_pci_dev(dev)); 191 struct pci_dev *pdev = to_pci_dev(dev);
192 struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev);
191 193
192 BUG_ON(dev->bus != &pci_bus_type); 194 BUG_ON(dev->bus != &pci_bus_type);
193 pcibr_dma_unmap(pcidev_info, dma_addr, direction); 195
196 provider->dma_unmap(pdev, dma_addr, direction);
194} 197}
195EXPORT_SYMBOL(sn_dma_unmap_single); 198EXPORT_SYMBOL(sn_dma_unmap_single);
196 199
@@ -207,12 +210,13 @@ void sn_dma_unmap_sg(struct device *dev, struct scatterlist *sg,
207 int nhwentries, int direction) 210 int nhwentries, int direction)
208{ 211{
209 int i; 212 int i;
210 struct pcidev_info *pcidev_info = SN_PCIDEV_INFO(to_pci_dev(dev)); 213 struct pci_dev *pdev = to_pci_dev(dev);
214 struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev);
211 215
212 BUG_ON(dev->bus != &pci_bus_type); 216 BUG_ON(dev->bus != &pci_bus_type);
213 217
214 for (i = 0; i < nhwentries; i++, sg++) { 218 for (i = 0; i < nhwentries; i++, sg++) {
215 pcibr_dma_unmap(pcidev_info, sg->dma_address, direction); 219 provider->dma_unmap(pdev, sg->dma_address, direction);
216 sg->dma_address = (dma_addr_t) NULL; 220 sg->dma_address = (dma_addr_t) NULL;
217 sg->dma_length = 0; 221 sg->dma_length = 0;
218 } 222 }
@@ -233,7 +237,8 @@ int sn_dma_map_sg(struct device *dev, struct scatterlist *sg, int nhwentries,
233{ 237{
234 unsigned long phys_addr; 238 unsigned long phys_addr;
235 struct scatterlist *saved_sg = sg; 239 struct scatterlist *saved_sg = sg;
236 struct pcidev_info *pcidev_info = SN_PCIDEV_INFO(to_pci_dev(dev)); 240 struct pci_dev *pdev = to_pci_dev(dev);
241 struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev);
237 int i; 242 int i;
238 243
239 BUG_ON(dev->bus != &pci_bus_type); 244 BUG_ON(dev->bus != &pci_bus_type);
@@ -243,8 +248,8 @@ int sn_dma_map_sg(struct device *dev, struct scatterlist *sg, int nhwentries,
243 */ 248 */
244 for (i = 0; i < nhwentries; i++, sg++) { 249 for (i = 0; i < nhwentries; i++, sg++) {
245 phys_addr = SG_ENT_PHYS_ADDRESS(sg); 250 phys_addr = SG_ENT_PHYS_ADDRESS(sg);
246 sg->dma_address = pcibr_dma_map(pcidev_info, phys_addr, 251 sg->dma_address = provider->dma_map(pdev,
247 sg->length, 0); 252 phys_addr, sg->length);
248 253
249 if (!sg->dma_address) { 254 if (!sg->dma_address) {
250 printk(KERN_ERR "%s: out of ATEs\n", __FUNCTION__); 255 printk(KERN_ERR "%s: out of ATEs\n", __FUNCTION__);
diff --git a/arch/ia64/sn/pci/pcibr/pcibr_ate.c b/arch/ia64/sn/pci/pcibr/pcibr_ate.c
index 9d6854666f9b..0e47bce85f2d 100644
--- a/arch/ia64/sn/pci/pcibr/pcibr_ate.c
+++ b/arch/ia64/sn/pci/pcibr/pcibr_ate.c
@@ -8,8 +8,8 @@
8 8
9#include <linux/types.h> 9#include <linux/types.h>
10#include <asm/sn/sn_sal.h> 10#include <asm/sn/sn_sal.h>
11#include "pci/pcibus_provider_defs.h" 11#include <asm/sn/pcibus_provider_defs.h>
12#include "pci/pcidev.h" 12#include <asm/sn/pcidev.h>
13#include "pci/pcibr_provider.h" 13#include "pci/pcibr_provider.h"
14 14
15int pcibr_invalidate_ate = 0; /* by default don't invalidate ATE on free */ 15int pcibr_invalidate_ate = 0; /* by default don't invalidate ATE on free */
diff --git a/arch/ia64/sn/pci/pcibr/pcibr_dma.c b/arch/ia64/sn/pci/pcibr/pcibr_dma.c
index b1d66ac065c8..c90685985d81 100644
--- a/arch/ia64/sn/pci/pcibr/pcibr_dma.c
+++ b/arch/ia64/sn/pci/pcibr/pcibr_dma.c
@@ -12,8 +12,8 @@
12#include <asm/sn/geo.h> 12#include <asm/sn/geo.h>
13#include "xtalk/xwidgetdev.h" 13#include "xtalk/xwidgetdev.h"
14#include "xtalk/hubdev.h" 14#include "xtalk/hubdev.h"
15#include "pci/pcibus_provider_defs.h" 15#include <asm/sn/pcibus_provider_defs.h>
16#include "pci/pcidev.h" 16#include <asm/sn/pcidev.h>
17#include "pci/tiocp.h" 17#include "pci/tiocp.h"
18#include "pci/pic.h" 18#include "pci/pic.h"
19#include "pci/pcibr_provider.h" 19#include "pci/pcibr_provider.h"
@@ -40,7 +40,7 @@ extern int sn_ioif_inited;
40 * we do not have to allocate entries in the PMU. 40 * we do not have to allocate entries in the PMU.
41 */ 41 */
42 42
43static uint64_t 43static dma_addr_t
44pcibr_dmamap_ate32(struct pcidev_info *info, 44pcibr_dmamap_ate32(struct pcidev_info *info,
45 uint64_t paddr, size_t req_size, uint64_t flags) 45 uint64_t paddr, size_t req_size, uint64_t flags)
46{ 46{
@@ -109,7 +109,7 @@ pcibr_dmamap_ate32(struct pcidev_info *info,
109 return pci_addr; 109 return pci_addr;
110} 110}
111 111
112static uint64_t 112static dma_addr_t
113pcibr_dmatrans_direct64(struct pcidev_info * info, uint64_t paddr, 113pcibr_dmatrans_direct64(struct pcidev_info * info, uint64_t paddr,
114 uint64_t dma_attributes) 114 uint64_t dma_attributes)
115{ 115{
@@ -141,7 +141,7 @@ pcibr_dmatrans_direct64(struct pcidev_info * info, uint64_t paddr,
141 141
142} 142}
143 143
144static uint64_t 144static dma_addr_t
145pcibr_dmatrans_direct32(struct pcidev_info * info, 145pcibr_dmatrans_direct32(struct pcidev_info * info,
146 uint64_t paddr, size_t req_size, uint64_t flags) 146 uint64_t paddr, size_t req_size, uint64_t flags)
147{ 147{
@@ -180,11 +180,11 @@ pcibr_dmatrans_direct32(struct pcidev_info * info,
180 * DMA mappings for Direct 64 and 32 do not have any DMA maps. 180 * DMA mappings for Direct 64 and 32 do not have any DMA maps.
181 */ 181 */
182void 182void
183pcibr_dma_unmap(struct pcidev_info *pcidev_info, dma_addr_t dma_handle, 183pcibr_dma_unmap(struct pci_dev *hwdev, dma_addr_t dma_handle, int direction)
184 int direction)
185{ 184{
186 struct pcibus_info *pcibus_info = (struct pcibus_info *)pcidev_info-> 185 struct pcidev_info *pcidev_info = SN_PCIDEV_INFO(hwdev);
187 pdi_pcibus_info; 186 struct pcibus_info *pcibus_info =
187 (struct pcibus_info *)pcidev_info->pdi_pcibus_info;
188 188
189 if (IS_PCI32_MAPPED(dma_handle)) { 189 if (IS_PCI32_MAPPED(dma_handle)) {
190 int ate_index; 190 int ate_index;
@@ -316,64 +316,63 @@ void sn_dma_flush(uint64_t addr)
316} 316}
317 317
318/* 318/*
319 * Wrapper DMA interface. Called from pci_dma.c routines. 319 * DMA interfaces. Called from pci_dma.c routines.
320 */ 320 */
321 321
322uint64_t 322dma_addr_t
323pcibr_dma_map(struct pcidev_info * pcidev_info, unsigned long phys_addr, 323pcibr_dma_map(struct pci_dev * hwdev, unsigned long phys_addr, size_t size)
324 size_t size, unsigned int flags)
325{ 324{
326 dma_addr_t dma_handle; 325 dma_addr_t dma_handle;
327 struct pci_dev *pcidev = pcidev_info->pdi_linux_pcidev; 326 struct pcidev_info *pcidev_info = SN_PCIDEV_INFO(hwdev);
328
329 if (flags & SN_PCIDMA_CONSISTENT) {
330 /* sn_pci_alloc_consistent interfaces */
331 if (pcidev->dev.coherent_dma_mask == ~0UL) {
332 dma_handle =
333 pcibr_dmatrans_direct64(pcidev_info, phys_addr,
334 PCI64_ATTR_BAR);
335 } else {
336 dma_handle =
337 (dma_addr_t) pcibr_dmamap_ate32(pcidev_info,
338 phys_addr, size,
339 PCI32_ATE_BAR);
340 }
341 } else {
342 /* map_sg/map_single interfaces */
343 327
344 /* SN cannot support DMA addresses smaller than 32 bits. */ 328 /* SN cannot support DMA addresses smaller than 32 bits. */
345 if (pcidev->dma_mask < 0x7fffffff) { 329 if (hwdev->dma_mask < 0x7fffffff) {
346 return 0; 330 return 0;
347 } 331 }
348 332
349 if (pcidev->dma_mask == ~0UL) { 333 if (hwdev->dma_mask == ~0UL) {
334 /*
335 * Handle the most common case: 64 bit cards. This
336 * call should always succeed.
337 */
338
339 dma_handle = pcibr_dmatrans_direct64(pcidev_info, phys_addr,
340 PCI64_ATTR_PREF);
341 } else {
342 /* Handle 32-63 bit cards via direct mapping */
343 dma_handle = pcibr_dmatrans_direct32(pcidev_info, phys_addr,
344 size, 0);
345 if (!dma_handle) {
350 /* 346 /*
351 * Handle the most common case: 64 bit cards. This 347 * It is a 32 bit card and we cannot do direct mapping,
352 * call should always succeed. 348 * so we use an ATE.
353 */ 349 */
354 350
355 dma_handle = 351 dma_handle = pcibr_dmamap_ate32(pcidev_info, phys_addr,
356 pcibr_dmatrans_direct64(pcidev_info, phys_addr, 352 size, PCI32_ATE_PREF);
357 PCI64_ATTR_PREF);
358 } else {
359 /* Handle 32-63 bit cards via direct mapping */
360 dma_handle =
361 pcibr_dmatrans_direct32(pcidev_info, phys_addr,
362 size, 0);
363 if (!dma_handle) {
364 /*
365 * It is a 32 bit card and we cannot do direct mapping,
366 * so we use an ATE.
367 */
368
369 dma_handle =
370 pcibr_dmamap_ate32(pcidev_info, phys_addr,
371 size, PCI32_ATE_PREF);
372 }
373 } 353 }
374 } 354 }
375 355
376 return dma_handle; 356 return dma_handle;
377} 357}
378 358
359dma_addr_t
360pcibr_dma_map_consistent(struct pci_dev * hwdev, unsigned long phys_addr,
361 size_t size)
362{
363 dma_addr_t dma_handle;
364 struct pcidev_info *pcidev_info = SN_PCIDEV_INFO(hwdev);
365
366 if (hwdev->dev.coherent_dma_mask == ~0UL) {
367 dma_handle = pcibr_dmatrans_direct64(pcidev_info, phys_addr,
368 PCI64_ATTR_BAR);
369 } else {
370 dma_handle = (dma_addr_t) pcibr_dmamap_ate32(pcidev_info,
371 phys_addr, size,
372 PCI32_ATE_BAR);
373 }
374
375 return dma_handle;
376}
377
379EXPORT_SYMBOL(sn_dma_flush); 378EXPORT_SYMBOL(sn_dma_flush);
diff --git a/arch/ia64/sn/pci/pcibr/pcibr_provider.c b/arch/ia64/sn/pci/pcibr/pcibr_provider.c
index 92bd278cf7ff..3893999d23d8 100644
--- a/arch/ia64/sn/pci/pcibr/pcibr_provider.c
+++ b/arch/ia64/sn/pci/pcibr/pcibr_provider.c
@@ -13,8 +13,8 @@
13#include "xtalk/xwidgetdev.h" 13#include "xtalk/xwidgetdev.h"
14#include <asm/sn/geo.h> 14#include <asm/sn/geo.h>
15#include "xtalk/hubdev.h" 15#include "xtalk/hubdev.h"
16#include "pci/pcibus_provider_defs.h" 16#include <asm/sn/pcibus_provider_defs.h>
17#include "pci/pcidev.h" 17#include <asm/sn/pcidev.h>
18#include "pci/pcibr_provider.h" 18#include "pci/pcibr_provider.h"
19#include <asm/sn/addrs.h> 19#include <asm/sn/addrs.h>
20 20
@@ -168,3 +168,23 @@ void pcibr_change_devices_irq(struct sn_irq_info *sn_irq_info)
168 pcibr_force_interrupt(sn_irq_info); 168 pcibr_force_interrupt(sn_irq_info);
169 } 169 }
170} 170}
171
172/*
173 * Provider entries for PIC/CP
174 */
175
176struct sn_pcibus_provider pcibr_provider = {
177 .dma_map = pcibr_dma_map,
178 .dma_map_consistent = pcibr_dma_map_consistent,
179 .dma_unmap = pcibr_dma_unmap,
180 .bus_fixup = pcibr_bus_fixup,
181};
182
183int
184pcibr_init_provider(void)
185{
186 sn_pci_provider[PCIIO_ASIC_TYPE_PIC] = &pcibr_provider;
187 sn_pci_provider[PCIIO_ASIC_TYPE_TIOCP] = &pcibr_provider;
188
189 return 0;
190}
diff --git a/arch/ia64/sn/pci/pcibr/pcibr_reg.c b/arch/ia64/sn/pci/pcibr/pcibr_reg.c
index 74a74a7d2a13..865c11c3b50a 100644
--- a/arch/ia64/sn/pci/pcibr/pcibr_reg.c
+++ b/arch/ia64/sn/pci/pcibr/pcibr_reg.c
@@ -8,8 +8,8 @@
8 8
9#include <linux/types.h> 9#include <linux/types.h>
10#include <linux/interrupt.h> 10#include <linux/interrupt.h>
11#include "pci/pcibus_provider_defs.h" 11#include <asm/sn/pcibus_provider_defs.h>
12#include "pci/pcidev.h" 12#include <asm/sn/pcidev.h>
13#include "pci/tiocp.h" 13#include "pci/tiocp.h"
14#include "pci/pic.h" 14#include "pci/pic.h"
15#include "pci/pcibr_provider.h" 15#include "pci/pcibr_provider.h"
diff --git a/arch/ia64/sn/pci/tioca_provider.c b/arch/ia64/sn/pci/tioca_provider.c
new file mode 100644
index 000000000000..54a0dd447e76
--- /dev/null
+++ b/arch/ia64/sn/pci/tioca_provider.c
@@ -0,0 +1,668 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 2003-2005 Silicon Graphics, Inc. All Rights Reserved.
7 */
8
9#include <linux/types.h>
10#include <linux/interrupt.h>
11#include <linux/pci.h>
12#include <asm/sn/sn_sal.h>
13#include <asm/sn/addrs.h>
14#include <asm/sn/pcidev.h>
15#include <asm/sn/pcibus_provider_defs.h>
16#include <asm/sn/tioca_provider.h>
17
18uint32_t tioca_gart_found;
19EXPORT_SYMBOL(tioca_gart_found); /* used by agp-sgi */
20
21LIST_HEAD(tioca_list);
22EXPORT_SYMBOL(tioca_list); /* used by agp-sgi */
23
24static int tioca_gart_init(struct tioca_kernel *);
25
26/**
27 * tioca_gart_init - Initialize SGI TIOCA GART
28 * @tioca_common: ptr to common prom/kernel struct identifying the
29 *
30 * If the indicated tioca has devices present, initialize its associated
31 * GART MMR's and kernel memory.
32 */
33static int
34tioca_gart_init(struct tioca_kernel *tioca_kern)
35{
36 uint64_t ap_reg;
37 uint64_t offset;
38 struct page *tmp;
39 struct tioca_common *tioca_common;
40 volatile struct tioca *ca_base;
41
42 tioca_common = tioca_kern->ca_common;
43 ca_base = (struct tioca *)tioca_common->ca_common.bs_base;
44
45 if (list_empty(tioca_kern->ca_devices))
46 return 0;
47
48 ap_reg = 0;
49
50 /*
51 * Validate aperature size
52 */
53
54 switch (CA_APERATURE_SIZE >> 20) {
55 case 4:
56 ap_reg |= (0x3ff << CA_GART_AP_SIZE_SHFT); /* 4MB */
57 break;
58 case 8:
59 ap_reg |= (0x3fe << CA_GART_AP_SIZE_SHFT); /* 8MB */
60 break;
61 case 16:
62 ap_reg |= (0x3fc << CA_GART_AP_SIZE_SHFT); /* 16MB */
63 break;
64 case 32:
65 ap_reg |= (0x3f8 << CA_GART_AP_SIZE_SHFT); /* 32 MB */
66 break;
67 case 64:
68 ap_reg |= (0x3f0 << CA_GART_AP_SIZE_SHFT); /* 64 MB */
69 break;
70 case 128:
71 ap_reg |= (0x3e0 << CA_GART_AP_SIZE_SHFT); /* 128 MB */
72 break;
73 case 256:
74 ap_reg |= (0x3c0 << CA_GART_AP_SIZE_SHFT); /* 256 MB */
75 break;
76 case 512:
77 ap_reg |= (0x380 << CA_GART_AP_SIZE_SHFT); /* 512 MB */
78 break;
79 case 1024:
80 ap_reg |= (0x300 << CA_GART_AP_SIZE_SHFT); /* 1GB */
81 break;
82 case 2048:
83 ap_reg |= (0x200 << CA_GART_AP_SIZE_SHFT); /* 2GB */
84 break;
85 case 4096:
86 ap_reg |= (0x000 << CA_GART_AP_SIZE_SHFT); /* 4 GB */
87 break;
88 default:
89 printk(KERN_ERR "%s: Invalid CA_APERATURE_SIZE "
90 "0x%lx\n", __FUNCTION__, (ulong) CA_APERATURE_SIZE);
91 return -1;
92 }
93
94 /*
95 * Set up other aperature parameters
96 */
97
98 if (PAGE_SIZE >= 16384) {
99 tioca_kern->ca_ap_pagesize = 16384;
100 ap_reg |= CA_GART_PAGE_SIZE;
101 } else {
102 tioca_kern->ca_ap_pagesize = 4096;
103 }
104
105 tioca_kern->ca_ap_size = CA_APERATURE_SIZE;
106 tioca_kern->ca_ap_bus_base = CA_APERATURE_BASE;
107 tioca_kern->ca_gart_entries =
108 tioca_kern->ca_ap_size / tioca_kern->ca_ap_pagesize;
109
110 ap_reg |= (CA_GART_AP_ENB_AGP | CA_GART_AP_ENB_PCI);
111 ap_reg |= tioca_kern->ca_ap_bus_base;
112
113 /*
114 * Allocate and set up the GART
115 */
116
117 tioca_kern->ca_gart_size = tioca_kern->ca_gart_entries * sizeof(u64);
118 tmp =
119 alloc_pages_node(tioca_kern->ca_closest_node,
120 GFP_KERNEL | __GFP_ZERO,
121 get_order(tioca_kern->ca_gart_size));
122
123 if (!tmp) {
124 printk(KERN_ERR "%s: Could not allocate "
125 "%lu bytes (order %d) for GART\n",
126 __FUNCTION__,
127 tioca_kern->ca_gart_size,
128 get_order(tioca_kern->ca_gart_size));
129 return -ENOMEM;
130 }
131
132 tioca_kern->ca_gart = page_address(tmp);
133 tioca_kern->ca_gart_coretalk_addr =
134 PHYS_TO_TIODMA(virt_to_phys(tioca_kern->ca_gart));
135
136 /*
137 * Compute PCI/AGP convenience fields
138 */
139
140 offset = CA_PCI32_MAPPED_BASE - CA_APERATURE_BASE;
141 tioca_kern->ca_pciap_base = CA_PCI32_MAPPED_BASE;
142 tioca_kern->ca_pciap_size = CA_PCI32_MAPPED_SIZE;
143 tioca_kern->ca_pcigart_start = offset / tioca_kern->ca_ap_pagesize;
144 tioca_kern->ca_pcigart_base =
145 tioca_kern->ca_gart_coretalk_addr + offset;
146 tioca_kern->ca_pcigart =
147 &tioca_kern->ca_gart[tioca_kern->ca_pcigart_start];
148 tioca_kern->ca_pcigart_entries =
149 tioca_kern->ca_pciap_size / tioca_kern->ca_ap_pagesize;
150 tioca_kern->ca_pcigart_pagemap =
151 kcalloc(1, tioca_kern->ca_pcigart_entries / 8, GFP_KERNEL);
152 if (!tioca_kern->ca_pcigart_pagemap) {
153 free_pages((unsigned long)tioca_kern->ca_gart,
154 get_order(tioca_kern->ca_gart_size));
155 return -1;
156 }
157
158 offset = CA_AGP_MAPPED_BASE - CA_APERATURE_BASE;
159 tioca_kern->ca_gfxap_base = CA_AGP_MAPPED_BASE;
160 tioca_kern->ca_gfxap_size = CA_AGP_MAPPED_SIZE;
161 tioca_kern->ca_gfxgart_start = offset / tioca_kern->ca_ap_pagesize;
162 tioca_kern->ca_gfxgart_base =
163 tioca_kern->ca_gart_coretalk_addr + offset;
164 tioca_kern->ca_gfxgart =
165 &tioca_kern->ca_gart[tioca_kern->ca_gfxgart_start];
166 tioca_kern->ca_gfxgart_entries =
167 tioca_kern->ca_gfxap_size / tioca_kern->ca_ap_pagesize;
168
169 /*
170 * various control settings:
171 * use agp op-combining
172 * use GET semantics to fetch memory
173 * participate in coherency domain
174 * DISABLE GART PREFETCHING due to hw bug tracked in SGI PV930029
175 */
176
177 ca_base->ca_control1 |= CA_AGPDMA_OP_ENB_COMBDELAY; /* PV895469 ? */
178 ca_base->ca_control2 &= ~(CA_GART_MEM_PARAM);
179 ca_base->ca_control2 |= (0x2ull << CA_GART_MEM_PARAM_SHFT);
180 tioca_kern->ca_gart_iscoherent = 1;
181 ca_base->ca_control2 &=
182 ~(CA_GART_WR_PREFETCH_ENB | CA_GART_RD_PREFETCH_ENB);
183
184 /*
185 * Unmask GART fetch error interrupts. Clear residual errors first.
186 */
187
188 ca_base->ca_int_status_alias = CA_GART_FETCH_ERR;
189 ca_base->ca_mult_error_alias = CA_GART_FETCH_ERR;
190 ca_base->ca_int_mask &= ~CA_GART_FETCH_ERR;
191
192 /*
193 * Program the aperature and gart registers in TIOCA
194 */
195
196 ca_base->ca_gart_aperature = ap_reg;
197 ca_base->ca_gart_ptr_table = tioca_kern->ca_gart_coretalk_addr | 1;
198
199 return 0;
200}
201
202/**
203 * tioca_fastwrite_enable - enable AGP FW for a tioca and its functions
204 * @tioca_kernel: structure representing the CA
205 *
206 * Given a CA, scan all attached functions making sure they all support
207 * FastWrite. If so, enable FastWrite for all functions and the CA itself.
208 */
209
210void
211tioca_fastwrite_enable(struct tioca_kernel *tioca_kern)
212{
213 int cap_ptr;
214 uint64_t ca_control1;
215 uint32_t reg;
216 struct tioca *tioca_base;
217 struct pci_dev *pdev;
218 struct tioca_common *common;
219
220 common = tioca_kern->ca_common;
221
222 /*
223 * Scan all vga controllers on this bus making sure they all
224 * suport FW. If not, return.
225 */
226
227 list_for_each_entry(pdev, tioca_kern->ca_devices, bus_list) {
228 if (pdev->class != (PCI_CLASS_DISPLAY_VGA << 8))
229 continue;
230
231 cap_ptr = pci_find_capability(pdev, PCI_CAP_ID_AGP);
232 if (!cap_ptr)
233 return; /* no AGP CAP means no FW */
234
235 pci_read_config_dword(pdev, cap_ptr + PCI_AGP_STATUS, &reg);
236 if (!(reg & PCI_AGP_STATUS_FW))
237 return; /* function doesn't support FW */
238 }
239
240 /*
241 * Set fw for all vga fn's
242 */
243
244 list_for_each_entry(pdev, tioca_kern->ca_devices, bus_list) {
245 if (pdev->class != (PCI_CLASS_DISPLAY_VGA << 8))
246 continue;
247
248 cap_ptr = pci_find_capability(pdev, PCI_CAP_ID_AGP);
249 pci_read_config_dword(pdev, cap_ptr + PCI_AGP_COMMAND, &reg);
250 reg |= PCI_AGP_COMMAND_FW;
251 pci_write_config_dword(pdev, cap_ptr + PCI_AGP_COMMAND, reg);
252 }
253
254 /*
255 * Set ca's fw to match
256 */
257
258 tioca_base = (struct tioca *)common->ca_common.bs_base;
259 ca_control1 = tioca_base->ca_control1;
260 ca_control1 |= CA_AGP_FW_ENABLE;
261 tioca_base->ca_control1 = ca_control1;
262}
263
264EXPORT_SYMBOL(tioca_fastwrite_enable); /* used by agp-sgi */
265
266/**
267 * tioca_dma_d64 - create a DMA mapping using 64-bit direct mode
268 * @paddr: system physical address
269 *
270 * Map @paddr into 64-bit CA bus space. No device context is necessary.
271 * Bits 53:0 come from the coretalk address. We just need to mask in the
272 * following optional bits of the 64-bit pci address:
273 *
274 * 63:60 - Coretalk Packet Type - 0x1 for Mem Get/Put (coherent)
275 * 0x2 for PIO (non-coherent)
276 * We will always use 0x1
277 * 55:55 - Swap bytes Currently unused
278 */
279static uint64_t
280tioca_dma_d64(unsigned long paddr)
281{
282 dma_addr_t bus_addr;
283
284 bus_addr = PHYS_TO_TIODMA(paddr);
285
286 BUG_ON(!bus_addr);
287 BUG_ON(bus_addr >> 54);
288
289 /* Set upper nibble to Cache Coherent Memory op */
290 bus_addr |= (1UL << 60);
291
292 return bus_addr;
293}
294
295/**
296 * tioca_dma_d48 - create a DMA mapping using 48-bit direct mode
297 * @pdev: linux pci_dev representing the function
298 * @paddr: system physical address
299 *
300 * Map @paddr into 64-bit bus space of the CA associated with @pcidev_info.
301 *
302 * The CA agp 48 bit direct address falls out as follows:
303 *
304 * When direct mapping AGP addresses, the 48 bit AGP address is
305 * constructed as follows:
306 *
307 * [47:40] - Low 8 bits of the page Node ID extracted from coretalk
308 * address [47:40]. The upper 8 node bits are fixed
309 * and come from the xxx register bits [5:0]
310 * [39:38] - Chiplet ID extracted from coretalk address [39:38]
311 * [37:00] - node offset extracted from coretalk address [37:00]
312 *
313 * Since the node id in general will be non-zero, and the chiplet id
314 * will always be non-zero, it follows that the device must support
315 * a dma mask of at least 0xffffffffff (40 bits) to target node 0
316 * and in general should be 0xffffffffffff (48 bits) to target nodes
317 * up to 255. Nodes above 255 need the support of the xxx register,
318 * and so a given CA can only directly target nodes in the range
319 * xxx - xxx+255.
320 */
321static uint64_t
322tioca_dma_d48(struct pci_dev *pdev, uint64_t paddr)
323{
324 struct tioca_common *tioca_common;
325 struct tioca *ca_base;
326 uint64_t ct_addr;
327 dma_addr_t bus_addr;
328 uint32_t node_upper;
329 uint64_t agp_dma_extn;
330 struct pcidev_info *pcidev_info = SN_PCIDEV_INFO(pdev);
331
332 tioca_common = (struct tioca_common *)pcidev_info->pdi_pcibus_info;
333 ca_base = (struct tioca *)tioca_common->ca_common.bs_base;
334
335 ct_addr = PHYS_TO_TIODMA(paddr);
336 if (!ct_addr)
337 return 0;
338
339 bus_addr = (dma_addr_t) (ct_addr & 0xffffffffffff);
340 node_upper = ct_addr >> 48;
341
342 if (node_upper > 64) {
343 printk(KERN_ERR "%s: coretalk addr 0x%p node id out "
344 "of range\n", __FUNCTION__, (void *)ct_addr);
345 return 0;
346 }
347
348 agp_dma_extn = ca_base->ca_agp_dma_addr_extn;
349 if (node_upper != (agp_dma_extn >> CA_AGP_DMA_NODE_ID_SHFT)) {
350 printk(KERN_ERR "%s: coretalk upper node (%u) "
351 "mismatch with ca_agp_dma_addr_extn (%lu)\n",
352 __FUNCTION__,
353 node_upper, (agp_dma_extn >> CA_AGP_DMA_NODE_ID_SHFT));
354 return 0;
355 }
356
357 return bus_addr;
358}
359
360/**
361 * tioca_dma_mapped - create a DMA mapping using a CA GART
362 * @pdev: linux pci_dev representing the function
363 * @paddr: host physical address to map
364 * @req_size: len (bytes) to map
365 *
366 * Map @paddr into CA address space using the GART mechanism. The mapped
367 * dma_addr_t is guarenteed to be contiguous in CA bus space.
368 */
369static dma_addr_t
370tioca_dma_mapped(struct pci_dev *pdev, uint64_t paddr, size_t req_size)
371{
372 int i, ps, ps_shift, entry, entries, mapsize, last_entry;
373 uint64_t xio_addr, end_xio_addr;
374 struct tioca_common *tioca_common;
375 struct tioca_kernel *tioca_kern;
376 dma_addr_t bus_addr = 0;
377 struct tioca_dmamap *ca_dmamap;
378 void *map;
379 unsigned long flags;
380 struct pcidev_info *pcidev_info = SN_PCIDEV_INFO(pdev);;
381
382 tioca_common = (struct tioca_common *)pcidev_info->pdi_pcibus_info;
383 tioca_kern = (struct tioca_kernel *)tioca_common->ca_kernel_private;
384
385 xio_addr = PHYS_TO_TIODMA(paddr);
386 if (!xio_addr)
387 return 0;
388
389 spin_lock_irqsave(&tioca_kern->ca_lock, flags);
390
391 /*
392 * allocate a map struct
393 */
394
395 ca_dmamap = kcalloc(1, sizeof(struct tioca_dmamap), GFP_ATOMIC);
396 if (!ca_dmamap)
397 goto map_return;
398
399 /*
400 * Locate free entries that can hold req_size. Account for
401 * unaligned start/length when allocating.
402 */
403
404 ps = tioca_kern->ca_ap_pagesize; /* will be power of 2 */
405 ps_shift = ffs(ps) - 1;
406 end_xio_addr = xio_addr + req_size - 1;
407
408 entries = (end_xio_addr >> ps_shift) - (xio_addr >> ps_shift) + 1;
409
410 map = tioca_kern->ca_pcigart_pagemap;
411 mapsize = tioca_kern->ca_pcigart_entries;
412
413 entry = find_first_zero_bit(map, mapsize);
414 while (entry < mapsize) {
415 last_entry = find_next_bit(map, mapsize, entry);
416
417 if (last_entry - entry >= entries)
418 break;
419
420 entry = find_next_zero_bit(map, mapsize, last_entry);
421 }
422
423 if (entry > mapsize)
424 goto map_return;
425
426 for (i = 0; i < entries; i++)
427 set_bit(entry + i, map);
428
429 bus_addr = tioca_kern->ca_pciap_base + (entry * ps);
430
431 ca_dmamap->cad_dma_addr = bus_addr;
432 ca_dmamap->cad_gart_size = entries;
433 ca_dmamap->cad_gart_entry = entry;
434 list_add(&ca_dmamap->cad_list, &tioca_kern->ca_list);
435
436 if (xio_addr % ps) {
437 tioca_kern->ca_pcigart[entry] = tioca_paddr_to_gart(xio_addr);
438 bus_addr += xio_addr & (ps - 1);
439 xio_addr &= ~(ps - 1);
440 xio_addr += ps;
441 entry++;
442 }
443
444 while (xio_addr < end_xio_addr) {
445 tioca_kern->ca_pcigart[entry] = tioca_paddr_to_gart(xio_addr);
446 xio_addr += ps;
447 entry++;
448 }
449
450 tioca_tlbflush(tioca_kern);
451
452map_return:
453 spin_unlock_irqrestore(&tioca_kern->ca_lock, flags);
454 return bus_addr;
455}
456
457/**
458 * tioca_dma_unmap - release CA mapping resources
459 * @pdev: linux pci_dev representing the function
460 * @bus_addr: bus address returned by an earlier tioca_dma_map
461 * @dir: mapping direction (unused)
462 *
463 * Locate mapping resources associated with @bus_addr and release them.
464 * For mappings created using the direct modes (64 or 48) there are no
465 * resources to release.
466 */
467void
468tioca_dma_unmap(struct pci_dev *pdev, dma_addr_t bus_addr, int dir)
469{
470 int i, entry;
471 struct tioca_common *tioca_common;
472 struct tioca_kernel *tioca_kern;
473 struct tioca_dmamap *map;
474 struct pcidev_info *pcidev_info = SN_PCIDEV_INFO(pdev);
475 unsigned long flags;
476
477 tioca_common = (struct tioca_common *)pcidev_info->pdi_pcibus_info;
478 tioca_kern = (struct tioca_kernel *)tioca_common->ca_kernel_private;
479
480 /* return straight away if this isn't be a mapped address */
481
482 if (bus_addr < tioca_kern->ca_pciap_base ||
483 bus_addr >= (tioca_kern->ca_pciap_base + tioca_kern->ca_pciap_size))
484 return;
485
486 spin_lock_irqsave(&tioca_kern->ca_lock, flags);
487
488 list_for_each_entry(map, &tioca_kern->ca_dmamaps, cad_list)
489 if (map->cad_dma_addr == bus_addr)
490 break;
491
492 BUG_ON(map == NULL);
493
494 entry = map->cad_gart_entry;
495
496 for (i = 0; i < map->cad_gart_size; i++, entry++) {
497 clear_bit(entry, tioca_kern->ca_pcigart_pagemap);
498 tioca_kern->ca_pcigart[entry] = 0;
499 }
500 tioca_tlbflush(tioca_kern);
501
502 list_del(&map->cad_list);
503 spin_unlock_irqrestore(&tioca_kern->ca_lock, flags);
504 kfree(map);
505}
506
507/**
508 * tioca_dma_map - map pages for PCI DMA
509 * @pdev: linux pci_dev representing the function
510 * @paddr: host physical address to map
511 * @byte_count: bytes to map
512 *
513 * This is the main wrapper for mapping host physical pages to CA PCI space.
514 * The mapping mode used is based on the devices dma_mask. As a last resort
515 * use the GART mapped mode.
516 */
517uint64_t
518tioca_dma_map(struct pci_dev *pdev, uint64_t paddr, size_t byte_count)
519{
520 uint64_t mapaddr;
521
522 /*
523 * If card is 64 or 48 bit addresable, use a direct mapping. 32
524 * bit direct is so restrictive w.r.t. where the memory resides that
525 * we don't use it even though CA has some support.
526 */
527
528 if (pdev->dma_mask == ~0UL)
529 mapaddr = tioca_dma_d64(paddr);
530 else if (pdev->dma_mask == 0xffffffffffffUL)
531 mapaddr = tioca_dma_d48(pdev, paddr);
532 else
533 mapaddr = 0;
534
535 /* Last resort ... use PCI portion of CA GART */
536
537 if (mapaddr == 0)
538 mapaddr = tioca_dma_mapped(pdev, paddr, byte_count);
539
540 return mapaddr;
541}
542
543/**
544 * tioca_error_intr_handler - SGI TIO CA error interrupt handler
545 * @irq: unused
546 * @arg: pointer to tioca_common struct for the given CA
547 * @pt: unused
548 *
549 * Handle a CA error interrupt. Simply a wrapper around a SAL call which
550 * defers processing to the SGI prom.
551 */
552static irqreturn_t
553tioca_error_intr_handler(int irq, void *arg, struct pt_regs *pt)
554{
555 struct tioca_common *soft = arg;
556 struct ia64_sal_retval ret_stuff;
557 uint64_t segment;
558 uint64_t busnum;
559 ret_stuff.status = 0;
560 ret_stuff.v0 = 0;
561
562 segment = 0;
563 busnum = soft->ca_common.bs_persist_busnum;
564
565 SAL_CALL_NOLOCK(ret_stuff,
566 (u64) SN_SAL_IOIF_ERROR_INTERRUPT,
567 segment, busnum, 0, 0, 0, 0, 0);
568
569 return IRQ_HANDLED;
570}
571
572/**
573 * tioca_bus_fixup - perform final PCI fixup for a TIO CA bus
574 * @prom_bussoft: Common prom/kernel struct representing the bus
575 *
576 * Replicates the tioca_common pointed to by @prom_bussoft in kernel
577 * space. Allocates and initializes a kernel-only area for a given CA,
578 * and sets up an irq for handling CA error interrupts.
579 *
580 * On successful setup, returns the kernel version of tioca_common back to
581 * the caller.
582 */
583void *
584tioca_bus_fixup(struct pcibus_bussoft *prom_bussoft)
585{
586 struct tioca_common *tioca_common;
587 struct tioca_kernel *tioca_kern;
588 struct pci_bus *bus;
589
590 /* sanity check prom rev */
591
592 if (sn_sal_rev_major() < 4 ||
593 (sn_sal_rev_major() == 4 && sn_sal_rev_minor() < 6)) {
594 printk
595 (KERN_ERR "%s: SGI prom rev 4.06 or greater required "
596 "for tioca support\n", __FUNCTION__);
597 return NULL;
598 }
599
600 /*
601 * Allocate kernel bus soft and copy from prom.
602 */
603
604 tioca_common = kcalloc(1, sizeof(struct tioca_common), GFP_KERNEL);
605 if (!tioca_common)
606 return NULL;
607
608 memcpy(tioca_common, prom_bussoft, sizeof(struct tioca_common));
609 tioca_common->ca_common.bs_base |= __IA64_UNCACHED_OFFSET;
610
611 /* init kernel-private area */
612
613 tioca_kern = kcalloc(1, sizeof(struct tioca_kernel), GFP_KERNEL);
614 if (!tioca_kern) {
615 kfree(tioca_common);
616 return NULL;
617 }
618
619 tioca_kern->ca_common = tioca_common;
620 spin_lock_init(&tioca_kern->ca_lock);
621 INIT_LIST_HEAD(&tioca_kern->ca_dmamaps);
622 tioca_kern->ca_closest_node =
623 nasid_to_cnodeid(tioca_common->ca_closest_nasid);
624 tioca_common->ca_kernel_private = (uint64_t) tioca_kern;
625
626 bus = pci_find_bus(0, tioca_common->ca_common.bs_persist_busnum);
627 BUG_ON(!bus);
628 tioca_kern->ca_devices = &bus->devices;
629
630 /* init GART */
631
632 if (tioca_gart_init(tioca_kern) < 0) {
633 kfree(tioca_kern);
634 kfree(tioca_common);
635 return NULL;
636 }
637
638 tioca_gart_found++;
639 list_add(&tioca_kern->ca_list, &tioca_list);
640
641 if (request_irq(SGI_TIOCA_ERROR,
642 tioca_error_intr_handler,
643 SA_SHIRQ, "TIOCA error", (void *)tioca_common))
644 printk(KERN_WARNING
645 "%s: Unable to get irq %d. "
646 "Error interrupts won't be routed for TIOCA bus %d\n",
647 __FUNCTION__, SGI_TIOCA_ERROR,
648 (int)tioca_common->ca_common.bs_persist_busnum);
649
650 return tioca_common;
651}
652
653static struct sn_pcibus_provider tioca_pci_interfaces = {
654 .dma_map = tioca_dma_map,
655 .dma_map_consistent = tioca_dma_map,
656 .dma_unmap = tioca_dma_unmap,
657 .bus_fixup = tioca_bus_fixup,
658};
659
660/**
661 * tioca_init_provider - init SN PCI provider ops for TIO CA
662 */
663int
664tioca_init_provider(void)
665{
666 sn_pci_provider[PCIIO_ASIC_TYPE_TIOCA] = &tioca_pci_interfaces;
667 return 0;
668}