aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/ia64/sn/kernel/io_init.c9
-rw-r--r--arch/ia64/sn/kernel/irq.c135
-rw-r--r--arch/ia64/sn/pci/pci_dma.c10
-rw-r--r--arch/ia64/sn/pci/pcibr/pcibr_dma.c62
-rw-r--r--arch/ia64/sn/pci/tioca_provider.c8
-rw-r--r--arch/ia64/sn/pci/tioce_provider.c65
-rw-r--r--drivers/pci/msi-altix.c200
-rw-r--r--include/asm-ia64/sn/intr.h8
-rw-r--r--include/asm-ia64/sn/pcibr_provider.h5
-rw-r--r--include/asm-ia64/sn/pcibus_provider_defs.h17
-rw-r--r--include/asm-ia64/sn/tiocp.h3
11 files changed, 401 insertions, 121 deletions
diff --git a/arch/ia64/sn/kernel/io_init.c b/arch/ia64/sn/kernel/io_init.c
index 5101ac462643..dc09a6a28a37 100644
--- a/arch/ia64/sn/kernel/io_init.c
+++ b/arch/ia64/sn/kernel/io_init.c
@@ -58,7 +58,7 @@ static int max_pcibus_number = 255; /* Default highest pci bus number */
58 */ 58 */
59 59
60static dma_addr_t 60static dma_addr_t
61sn_default_pci_map(struct pci_dev *pdev, unsigned long paddr, size_t size) 61sn_default_pci_map(struct pci_dev *pdev, unsigned long paddr, size_t size, int type)
62{ 62{
63 return 0; 63 return 0;
64} 64}
@@ -457,13 +457,6 @@ void sn_pci_fixup_slot(struct pci_dev *dev)
457 pcidev_info->pdi_sn_irq_info = NULL; 457 pcidev_info->pdi_sn_irq_info = NULL;
458 kfree(sn_irq_info); 458 kfree(sn_irq_info);
459 } 459 }
460
461 /*
462 * MSI currently not supported on altix. Remove this when
463 * the MSI abstraction patches are integrated into the kernel
464 * (sometime after 2.6.16 releases)
465 */
466 dev->no_msi = 1;
467} 460}
468 461
469/* 462/*
diff --git a/arch/ia64/sn/kernel/irq.c b/arch/ia64/sn/kernel/irq.c
index db187f5cdae1..dc8e2b696713 100644
--- a/arch/ia64/sn/kernel/irq.c
+++ b/arch/ia64/sn/kernel/irq.c
@@ -26,11 +26,11 @@ static void unregister_intr_pda(struct sn_irq_info *sn_irq_info);
26 26
27int sn_force_interrupt_flag = 1; 27int sn_force_interrupt_flag = 1;
28extern int sn_ioif_inited; 28extern int sn_ioif_inited;
29static struct list_head **sn_irq_lh; 29struct list_head **sn_irq_lh;
30static spinlock_t sn_irq_info_lock = SPIN_LOCK_UNLOCKED; /* non-IRQ lock */ 30static spinlock_t sn_irq_info_lock = SPIN_LOCK_UNLOCKED; /* non-IRQ lock */
31 31
32static inline u64 sn_intr_alloc(nasid_t local_nasid, int local_widget, 32u64 sn_intr_alloc(nasid_t local_nasid, int local_widget,
33 u64 sn_irq_info, 33 struct sn_irq_info *sn_irq_info,
34 int req_irq, nasid_t req_nasid, 34 int req_irq, nasid_t req_nasid,
35 int req_slice) 35 int req_slice)
36{ 36{
@@ -40,12 +40,13 @@ static inline u64 sn_intr_alloc(nasid_t local_nasid, int local_widget,
40 40
41 SAL_CALL_NOLOCK(ret_stuff, (u64) SN_SAL_IOIF_INTERRUPT, 41 SAL_CALL_NOLOCK(ret_stuff, (u64) SN_SAL_IOIF_INTERRUPT,
42 (u64) SAL_INTR_ALLOC, (u64) local_nasid, 42 (u64) SAL_INTR_ALLOC, (u64) local_nasid,
43 (u64) local_widget, (u64) sn_irq_info, (u64) req_irq, 43 (u64) local_widget, __pa(sn_irq_info), (u64) req_irq,
44 (u64) req_nasid, (u64) req_slice); 44 (u64) req_nasid, (u64) req_slice);
45
45 return ret_stuff.status; 46 return ret_stuff.status;
46} 47}
47 48
48static inline void sn_intr_free(nasid_t local_nasid, int local_widget, 49void sn_intr_free(nasid_t local_nasid, int local_widget,
49 struct sn_irq_info *sn_irq_info) 50 struct sn_irq_info *sn_irq_info)
50{ 51{
51 struct ia64_sal_retval ret_stuff; 52 struct ia64_sal_retval ret_stuff;
@@ -112,73 +113,91 @@ static void sn_end_irq(unsigned int irq)
112 113
113static void sn_irq_info_free(struct rcu_head *head); 114static void sn_irq_info_free(struct rcu_head *head);
114 115
115static void sn_set_affinity_irq(unsigned int irq, cpumask_t mask) 116struct sn_irq_info *sn_retarget_vector(struct sn_irq_info *sn_irq_info,
117 nasid_t nasid, int slice)
116{ 118{
117 struct sn_irq_info *sn_irq_info, *sn_irq_info_safe; 119 int vector;
118 int cpuid, cpuphys; 120 int cpuphys;
121 int64_t bridge;
122 int local_widget, status;
123 nasid_t local_nasid;
124 struct sn_irq_info *new_irq_info;
125 struct sn_pcibus_provider *pci_provider;
119 126
120 cpuid = first_cpu(mask); 127 new_irq_info = kmalloc(sizeof(struct sn_irq_info), GFP_ATOMIC);
121 cpuphys = cpu_physical_id(cpuid); 128 if (new_irq_info == NULL)
129 return NULL;
122 130
123 list_for_each_entry_safe(sn_irq_info, sn_irq_info_safe, 131 memcpy(new_irq_info, sn_irq_info, sizeof(struct sn_irq_info));
124 sn_irq_lh[irq], list) {
125 u64 bridge;
126 int local_widget, status;
127 nasid_t local_nasid;
128 struct sn_irq_info *new_irq_info;
129 struct sn_pcibus_provider *pci_provider;
130
131 new_irq_info = kmalloc(sizeof(struct sn_irq_info), GFP_ATOMIC);
132 if (new_irq_info == NULL)
133 break;
134 memcpy(new_irq_info, sn_irq_info, sizeof(struct sn_irq_info));
135
136 bridge = (u64) new_irq_info->irq_bridge;
137 if (!bridge) {
138 kfree(new_irq_info);
139 break; /* irq is not a device interrupt */
140 }
141 132
142 local_nasid = NASID_GET(bridge); 133 bridge = (u64) new_irq_info->irq_bridge;
134 if (!bridge) {
135 kfree(new_irq_info);
136 return NULL; /* irq is not a device interrupt */
137 }
143 138
144 if (local_nasid & 1) 139 local_nasid = NASID_GET(bridge);
145 local_widget = TIO_SWIN_WIDGETNUM(bridge);
146 else
147 local_widget = SWIN_WIDGETNUM(bridge);
148 140
149 /* Free the old PROM new_irq_info structure */ 141 if (local_nasid & 1)
150 sn_intr_free(local_nasid, local_widget, new_irq_info); 142 local_widget = TIO_SWIN_WIDGETNUM(bridge);
151 /* Update kernels new_irq_info with new target info */ 143 else
152 unregister_intr_pda(new_irq_info); 144 local_widget = SWIN_WIDGETNUM(bridge);
153 145
154 /* allocate a new PROM new_irq_info struct */ 146 vector = sn_irq_info->irq_irq;
155 status = sn_intr_alloc(local_nasid, local_widget, 147 /* Free the old PROM new_irq_info structure */
156 __pa(new_irq_info), irq, 148 sn_intr_free(local_nasid, local_widget, new_irq_info);
157 cpuid_to_nasid(cpuid), 149 /* Update kernels new_irq_info with new target info */
158 cpuid_to_slice(cpuid)); 150 unregister_intr_pda(new_irq_info);
159 151
160 /* SAL call failed */ 152 /* allocate a new PROM new_irq_info struct */
161 if (status) { 153 status = sn_intr_alloc(local_nasid, local_widget,
162 kfree(new_irq_info); 154 new_irq_info, vector,
163 break; 155 nasid, slice);
164 } 156
157 /* SAL call failed */
158 if (status) {
159 kfree(new_irq_info);
160 return NULL;
161 }
165 162
166 new_irq_info->irq_cpuid = cpuid; 163 cpuphys = nasid_slice_to_cpuid(nasid, slice);
167 register_intr_pda(new_irq_info); 164 new_irq_info->irq_cpuid = cpuphys;
165 register_intr_pda(new_irq_info);
168 166
169 pci_provider = sn_pci_provider[new_irq_info->irq_bridge_type]; 167 pci_provider = sn_pci_provider[new_irq_info->irq_bridge_type];
170 if (pci_provider && pci_provider->target_interrupt) 168
171 (pci_provider->target_interrupt)(new_irq_info); 169 /*
170 * If this represents a line interrupt, target it. If it's
171 * an msi (irq_int_bit < 0), it's already targeted.
172 */
173 if (new_irq_info->irq_int_bit >= 0 &&
174 pci_provider && pci_provider->target_interrupt)
175 (pci_provider->target_interrupt)(new_irq_info);
172 176
173 spin_lock(&sn_irq_info_lock); 177 spin_lock(&sn_irq_info_lock);
174 list_replace_rcu(&sn_irq_info->list, &new_irq_info->list); 178 list_replace_rcu(&sn_irq_info->list, &new_irq_info->list);
175 spin_unlock(&sn_irq_info_lock); 179 spin_unlock(&sn_irq_info_lock);
176 call_rcu(&sn_irq_info->rcu, sn_irq_info_free); 180 call_rcu(&sn_irq_info->rcu, sn_irq_info_free);
177 181
178#ifdef CONFIG_SMP 182#ifdef CONFIG_SMP
179 set_irq_affinity_info((irq & 0xff), cpuphys, 0); 183 set_irq_affinity_info((vector & 0xff), cpuphys, 0);
180#endif 184#endif
181 } 185
186 return new_irq_info;
187}
188
189static void sn_set_affinity_irq(unsigned int irq, cpumask_t mask)
190{
191 struct sn_irq_info *sn_irq_info, *sn_irq_info_safe;
192 nasid_t nasid;
193 int slice;
194
195 nasid = cpuid_to_nasid(first_cpu(mask));
196 slice = cpuid_to_slice(first_cpu(mask));
197
198 list_for_each_entry_safe(sn_irq_info, sn_irq_info_safe,
199 sn_irq_lh[irq], list)
200 (void)sn_retarget_vector(sn_irq_info, nasid, slice);
182} 201}
183 202
184struct hw_interrupt_type irq_type_sn = { 203struct hw_interrupt_type irq_type_sn = {
diff --git a/arch/ia64/sn/pci/pci_dma.c b/arch/ia64/sn/pci/pci_dma.c
index b4b84c269210..7a291a271511 100644
--- a/arch/ia64/sn/pci/pci_dma.c
+++ b/arch/ia64/sn/pci/pci_dma.c
@@ -11,7 +11,7 @@
11 11
12#include <linux/module.h> 12#include <linux/module.h>
13#include <asm/dma.h> 13#include <asm/dma.h>
14#include <asm/sn/pcibr_provider.h> 14#include <asm/sn/intr.h>
15#include <asm/sn/pcibus_provider_defs.h> 15#include <asm/sn/pcibus_provider_defs.h>
16#include <asm/sn/pcidev.h> 16#include <asm/sn/pcidev.h>
17#include <asm/sn/sn_sal.h> 17#include <asm/sn/sn_sal.h>
@@ -113,7 +113,8 @@ void *sn_dma_alloc_coherent(struct device *dev, size_t size,
113 * resources. 113 * resources.
114 */ 114 */
115 115
116 *dma_handle = provider->dma_map_consistent(pdev, phys_addr, size); 116 *dma_handle = provider->dma_map_consistent(pdev, phys_addr, size,
117 SN_DMA_ADDR_PHYS);
117 if (!*dma_handle) { 118 if (!*dma_handle) {
118 printk(KERN_ERR "%s: out of ATEs\n", __FUNCTION__); 119 printk(KERN_ERR "%s: out of ATEs\n", __FUNCTION__);
119 free_pages((unsigned long)cpuaddr, get_order(size)); 120 free_pages((unsigned long)cpuaddr, get_order(size));
@@ -176,7 +177,7 @@ dma_addr_t sn_dma_map_single(struct device *dev, void *cpu_addr, size_t size,
176 BUG_ON(dev->bus != &pci_bus_type); 177 BUG_ON(dev->bus != &pci_bus_type);
177 178
178 phys_addr = __pa(cpu_addr); 179 phys_addr = __pa(cpu_addr);
179 dma_addr = provider->dma_map(pdev, phys_addr, size); 180 dma_addr = provider->dma_map(pdev, phys_addr, size, SN_DMA_ADDR_PHYS);
180 if (!dma_addr) { 181 if (!dma_addr) {
181 printk(KERN_ERR "%s: out of ATEs\n", __FUNCTION__); 182 printk(KERN_ERR "%s: out of ATEs\n", __FUNCTION__);
182 return 0; 183 return 0;
@@ -260,7 +261,8 @@ int sn_dma_map_sg(struct device *dev, struct scatterlist *sg, int nhwentries,
260 for (i = 0; i < nhwentries; i++, sg++) { 261 for (i = 0; i < nhwentries; i++, sg++) {
261 phys_addr = SG_ENT_PHYS_ADDRESS(sg); 262 phys_addr = SG_ENT_PHYS_ADDRESS(sg);
262 sg->dma_address = provider->dma_map(pdev, 263 sg->dma_address = provider->dma_map(pdev,
263 phys_addr, sg->length); 264 phys_addr, sg->length,
265 SN_DMA_ADDR_PHYS);
264 266
265 if (!sg->dma_address) { 267 if (!sg->dma_address) {
266 printk(KERN_ERR "%s: out of ATEs\n", __FUNCTION__); 268 printk(KERN_ERR "%s: out of ATEs\n", __FUNCTION__);
diff --git a/arch/ia64/sn/pci/pcibr/pcibr_dma.c b/arch/ia64/sn/pci/pcibr/pcibr_dma.c
index 9f86bb6519aa..a86c7b945962 100644
--- a/arch/ia64/sn/pci/pcibr/pcibr_dma.c
+++ b/arch/ia64/sn/pci/pcibr/pcibr_dma.c
@@ -41,7 +41,7 @@ extern int sn_ioif_inited;
41 41
42static dma_addr_t 42static dma_addr_t
43pcibr_dmamap_ate32(struct pcidev_info *info, 43pcibr_dmamap_ate32(struct pcidev_info *info,
44 u64 paddr, size_t req_size, u64 flags) 44 u64 paddr, size_t req_size, u64 flags, int dma_flags)
45{ 45{
46 46
47 struct pcidev_info *pcidev_info = info->pdi_host_pcidev_info; 47 struct pcidev_info *pcidev_info = info->pdi_host_pcidev_info;
@@ -81,9 +81,12 @@ pcibr_dmamap_ate32(struct pcidev_info *info,
81 if (IS_PCIX(pcibus_info)) 81 if (IS_PCIX(pcibus_info))
82 ate_flags &= ~(PCI32_ATE_PREF); 82 ate_flags &= ~(PCI32_ATE_PREF);
83 83
84 xio_addr = 84 if (SN_DMA_ADDRTYPE(dma_flags == SN_DMA_ADDR_PHYS))
85 IS_PIC_SOFT(pcibus_info) ? PHYS_TO_DMA(paddr) : 85 xio_addr = IS_PIC_SOFT(pcibus_info) ? PHYS_TO_DMA(paddr) :
86 PHYS_TO_TIODMA(paddr); 86 PHYS_TO_TIODMA(paddr);
87 else
88 xio_addr = paddr;
89
87 offset = IOPGOFF(xio_addr); 90 offset = IOPGOFF(xio_addr);
88 ate = ate_flags | (xio_addr - offset); 91 ate = ate_flags | (xio_addr - offset);
89 92
@@ -91,6 +94,13 @@ pcibr_dmamap_ate32(struct pcidev_info *info,
91 if (IS_PIC_SOFT(pcibus_info)) { 94 if (IS_PIC_SOFT(pcibus_info)) {
92 ate |= (pcibus_info->pbi_hub_xid << PIC_ATE_TARGETID_SHFT); 95 ate |= (pcibus_info->pbi_hub_xid << PIC_ATE_TARGETID_SHFT);
93 } 96 }
97
98 /*
99 * If we're mapping for MSI, set the MSI bit in the ATE
100 */
101 if (dma_flags & SN_DMA_MSI)
102 ate |= PCI32_ATE_MSI;
103
94 ate_write(pcibus_info, ate_index, ate_count, ate); 104 ate_write(pcibus_info, ate_index, ate_count, ate);
95 105
96 /* 106 /*
@@ -105,20 +115,27 @@ pcibr_dmamap_ate32(struct pcidev_info *info,
105 if (pcibus_info->pbi_devreg[internal_device] & PCIBR_DEV_SWAP_DIR) 115 if (pcibus_info->pbi_devreg[internal_device] & PCIBR_DEV_SWAP_DIR)
106 ATE_SWAP_ON(pci_addr); 116 ATE_SWAP_ON(pci_addr);
107 117
118
108 return pci_addr; 119 return pci_addr;
109} 120}
110 121
111static dma_addr_t 122static dma_addr_t
112pcibr_dmatrans_direct64(struct pcidev_info * info, u64 paddr, 123pcibr_dmatrans_direct64(struct pcidev_info * info, u64 paddr,
113 u64 dma_attributes) 124 u64 dma_attributes, int dma_flags)
114{ 125{
115 struct pcibus_info *pcibus_info = (struct pcibus_info *) 126 struct pcibus_info *pcibus_info = (struct pcibus_info *)
116 ((info->pdi_host_pcidev_info)->pdi_pcibus_info); 127 ((info->pdi_host_pcidev_info)->pdi_pcibus_info);
117 u64 pci_addr; 128 u64 pci_addr;
118 129
119 /* Translate to Crosstalk View of Physical Address */ 130 /* Translate to Crosstalk View of Physical Address */
120 pci_addr = (IS_PIC_SOFT(pcibus_info) ? PHYS_TO_DMA(paddr) : 131 if (SN_DMA_ADDRTYPE(dma_flags) == SN_DMA_ADDR_PHYS)
121 PHYS_TO_TIODMA(paddr)) | dma_attributes; 132 pci_addr = IS_PIC_SOFT(pcibus_info) ?
133 PHYS_TO_DMA(paddr) :
134 PHYS_TO_TIODMA(paddr) | dma_attributes;
135 else
136 pci_addr = IS_PIC_SOFT(pcibus_info) ?
137 paddr :
138 paddr | dma_attributes;
122 139
123 /* Handle Bus mode */ 140 /* Handle Bus mode */
124 if (IS_PCIX(pcibus_info)) 141 if (IS_PCIX(pcibus_info))
@@ -130,7 +147,9 @@ pcibr_dmatrans_direct64(struct pcidev_info * info, u64 paddr,
130 ((u64) pcibus_info-> 147 ((u64) pcibus_info->
131 pbi_hub_xid << PIC_PCI64_ATTR_TARG_SHFT); 148 pbi_hub_xid << PIC_PCI64_ATTR_TARG_SHFT);
132 } else 149 } else
133 pci_addr |= TIOCP_PCI64_CMDTYPE_MEM; 150 pci_addr |= (dma_flags & SN_DMA_MSI) ?
151 TIOCP_PCI64_CMDTYPE_MSI :
152 TIOCP_PCI64_CMDTYPE_MEM;
134 153
135 /* If PCI mode, func zero uses VCHAN0, every other func uses VCHAN1 */ 154 /* If PCI mode, func zero uses VCHAN0, every other func uses VCHAN1 */
136 if (!IS_PCIX(pcibus_info) && PCI_FUNC(info->pdi_linux_pcidev->devfn)) 155 if (!IS_PCIX(pcibus_info) && PCI_FUNC(info->pdi_linux_pcidev->devfn))
@@ -141,7 +160,7 @@ pcibr_dmatrans_direct64(struct pcidev_info * info, u64 paddr,
141 160
142static dma_addr_t 161static dma_addr_t
143pcibr_dmatrans_direct32(struct pcidev_info * info, 162pcibr_dmatrans_direct32(struct pcidev_info * info,
144 u64 paddr, size_t req_size, u64 flags) 163 u64 paddr, size_t req_size, u64 flags, int dma_flags)
145{ 164{
146 struct pcidev_info *pcidev_info = info->pdi_host_pcidev_info; 165 struct pcidev_info *pcidev_info = info->pdi_host_pcidev_info;
147 struct pcibus_info *pcibus_info = (struct pcibus_info *)pcidev_info-> 166 struct pcibus_info *pcibus_info = (struct pcibus_info *)pcidev_info->
@@ -156,8 +175,14 @@ pcibr_dmatrans_direct32(struct pcidev_info * info,
156 return 0; 175 return 0;
157 } 176 }
158 177
159 xio_addr = IS_PIC_SOFT(pcibus_info) ? PHYS_TO_DMA(paddr) : 178 if (dma_flags & SN_DMA_MSI)
160 PHYS_TO_TIODMA(paddr); 179 return 0;
180
181 if (SN_DMA_ADDRTYPE(dma_flags) == SN_DMA_ADDR_PHYS)
182 xio_addr = IS_PIC_SOFT(pcibus_info) ? PHYS_TO_DMA(paddr) :
183 PHYS_TO_TIODMA(paddr);
184 else
185 xio_addr = paddr;
161 186
162 xio_base = pcibus_info->pbi_dir_xbase; 187 xio_base = pcibus_info->pbi_dir_xbase;
163 offset = xio_addr - xio_base; 188 offset = xio_addr - xio_base;
@@ -327,7 +352,7 @@ void sn_dma_flush(u64 addr)
327 */ 352 */
328 353
329dma_addr_t 354dma_addr_t
330pcibr_dma_map(struct pci_dev * hwdev, unsigned long phys_addr, size_t size) 355pcibr_dma_map(struct pci_dev * hwdev, unsigned long phys_addr, size_t size, int dma_flags)
331{ 356{
332 dma_addr_t dma_handle; 357 dma_addr_t dma_handle;
333 struct pcidev_info *pcidev_info = SN_PCIDEV_INFO(hwdev); 358 struct pcidev_info *pcidev_info = SN_PCIDEV_INFO(hwdev);
@@ -344,11 +369,11 @@ pcibr_dma_map(struct pci_dev * hwdev, unsigned long phys_addr, size_t size)
344 */ 369 */
345 370
346 dma_handle = pcibr_dmatrans_direct64(pcidev_info, phys_addr, 371 dma_handle = pcibr_dmatrans_direct64(pcidev_info, phys_addr,
347 PCI64_ATTR_PREF); 372 PCI64_ATTR_PREF, dma_flags);
348 } else { 373 } else {
349 /* Handle 32-63 bit cards via direct mapping */ 374 /* Handle 32-63 bit cards via direct mapping */
350 dma_handle = pcibr_dmatrans_direct32(pcidev_info, phys_addr, 375 dma_handle = pcibr_dmatrans_direct32(pcidev_info, phys_addr,
351 size, 0); 376 size, 0, dma_flags);
352 if (!dma_handle) { 377 if (!dma_handle) {
353 /* 378 /*
354 * It is a 32 bit card and we cannot do direct mapping, 379 * It is a 32 bit card and we cannot do direct mapping,
@@ -356,7 +381,8 @@ pcibr_dma_map(struct pci_dev * hwdev, unsigned long phys_addr, size_t size)
356 */ 381 */
357 382
358 dma_handle = pcibr_dmamap_ate32(pcidev_info, phys_addr, 383 dma_handle = pcibr_dmamap_ate32(pcidev_info, phys_addr,
359 size, PCI32_ATE_PREF); 384 size, PCI32_ATE_PREF,
385 dma_flags);
360 } 386 }
361 } 387 }
362 388
@@ -365,18 +391,18 @@ pcibr_dma_map(struct pci_dev * hwdev, unsigned long phys_addr, size_t size)
365 391
366dma_addr_t 392dma_addr_t
367pcibr_dma_map_consistent(struct pci_dev * hwdev, unsigned long phys_addr, 393pcibr_dma_map_consistent(struct pci_dev * hwdev, unsigned long phys_addr,
368 size_t size) 394 size_t size, int dma_flags)
369{ 395{
370 dma_addr_t dma_handle; 396 dma_addr_t dma_handle;
371 struct pcidev_info *pcidev_info = SN_PCIDEV_INFO(hwdev); 397 struct pcidev_info *pcidev_info = SN_PCIDEV_INFO(hwdev);
372 398
373 if (hwdev->dev.coherent_dma_mask == ~0UL) { 399 if (hwdev->dev.coherent_dma_mask == ~0UL) {
374 dma_handle = pcibr_dmatrans_direct64(pcidev_info, phys_addr, 400 dma_handle = pcibr_dmatrans_direct64(pcidev_info, phys_addr,
375 PCI64_ATTR_BAR); 401 PCI64_ATTR_BAR, dma_flags);
376 } else { 402 } else {
377 dma_handle = (dma_addr_t) pcibr_dmamap_ate32(pcidev_info, 403 dma_handle = (dma_addr_t) pcibr_dmamap_ate32(pcidev_info,
378 phys_addr, size, 404 phys_addr, size,
379 PCI32_ATE_BAR); 405 PCI32_ATE_BAR, dma_flags);
380 } 406 }
381 407
382 return dma_handle; 408 return dma_handle;
diff --git a/arch/ia64/sn/pci/tioca_provider.c b/arch/ia64/sn/pci/tioca_provider.c
index be0176912968..20de72791b97 100644
--- a/arch/ia64/sn/pci/tioca_provider.c
+++ b/arch/ia64/sn/pci/tioca_provider.c
@@ -515,11 +515,17 @@ tioca_dma_unmap(struct pci_dev *pdev, dma_addr_t bus_addr, int dir)
515 * use the GART mapped mode. 515 * use the GART mapped mode.
516 */ 516 */
517static u64 517static u64
518tioca_dma_map(struct pci_dev *pdev, u64 paddr, size_t byte_count) 518tioca_dma_map(struct pci_dev *pdev, u64 paddr, size_t byte_count, int dma_flags)
519{ 519{
520 u64 mapaddr; 520 u64 mapaddr;
521 521
522 /* 522 /*
523 * Not supported for now ...
524 */
525 if (dma_flags & SN_DMA_MSI)
526 return 0;
527
528 /*
523 * If card is 64 or 48 bit addresable, use a direct mapping. 32 529 * If card is 64 or 48 bit addresable, use a direct mapping. 32
524 * bit direct is so restrictive w.r.t. where the memory resides that 530 * bit direct is so restrictive w.r.t. where the memory resides that
525 * we don't use it even though CA has some support. 531 * we don't use it even though CA has some support.
diff --git a/arch/ia64/sn/pci/tioce_provider.c b/arch/ia64/sn/pci/tioce_provider.c
index 833295624e5d..4cac7bdc7c7f 100644
--- a/arch/ia64/sn/pci/tioce_provider.c
+++ b/arch/ia64/sn/pci/tioce_provider.c
@@ -170,7 +170,8 @@ tioce_mmr_war_post(struct tioce_kernel *kern, void *mmr_addr)
170 (ATE_PAGE((start)+(len)-1, pagesize) - ATE_PAGE(start, pagesize) + 1) 170 (ATE_PAGE((start)+(len)-1, pagesize) - ATE_PAGE(start, pagesize) + 1)
171 171
172#define ATE_VALID(ate) ((ate) & (1UL << 63)) 172#define ATE_VALID(ate) ((ate) & (1UL << 63))
173#define ATE_MAKE(addr, ps) (((addr) & ~ATE_PAGEMASK(ps)) | (1UL << 63)) 173#define ATE_MAKE(addr, ps, msi) \
174 (((addr) & ~ATE_PAGEMASK(ps)) | (1UL << 63) | ((msi)?(1UL << 62):0))
174 175
175/* 176/*
176 * Flavors of ate-based mapping supported by tioce_alloc_map() 177 * Flavors of ate-based mapping supported by tioce_alloc_map()
@@ -196,15 +197,17 @@ tioce_mmr_war_post(struct tioce_kernel *kern, void *mmr_addr)
196 * 197 *
197 * 63 - must be 1 to indicate d64 mode to CE hardware 198 * 63 - must be 1 to indicate d64 mode to CE hardware
198 * 62 - barrier bit ... controlled with tioce_dma_barrier() 199 * 62 - barrier bit ... controlled with tioce_dma_barrier()
199 * 61 - 0 since this is not an MSI transaction 200 * 61 - msi bit ... specified through dma_flags
200 * 60:54 - reserved, MBZ 201 * 60:54 - reserved, MBZ
201 */ 202 */
202static u64 203static u64
203tioce_dma_d64(unsigned long ct_addr) 204tioce_dma_d64(unsigned long ct_addr, int dma_flags)
204{ 205{
205 u64 bus_addr; 206 u64 bus_addr;
206 207
207 bus_addr = ct_addr | (1UL << 63); 208 bus_addr = ct_addr | (1UL << 63);
209 if (dma_flags & SN_DMA_MSI)
210 bus_addr |= (1UL << 61);
208 211
209 return bus_addr; 212 return bus_addr;
210} 213}
@@ -261,7 +264,7 @@ pcidev_to_tioce(struct pci_dev *pdev, struct tioce **base,
261 */ 264 */
262static u64 265static u64
263tioce_alloc_map(struct tioce_kernel *ce_kern, int type, int port, 266tioce_alloc_map(struct tioce_kernel *ce_kern, int type, int port,
264 u64 ct_addr, int len) 267 u64 ct_addr, int len, int dma_flags)
265{ 268{
266 int i; 269 int i;
267 int j; 270 int j;
@@ -270,6 +273,7 @@ tioce_alloc_map(struct tioce_kernel *ce_kern, int type, int port,
270 int entries; 273 int entries;
271 int nates; 274 int nates;
272 u64 pagesize; 275 u64 pagesize;
276 int msi_capable, msi_wanted;
273 u64 *ate_shadow; 277 u64 *ate_shadow;
274 u64 *ate_reg; 278 u64 *ate_reg;
275 u64 addr; 279 u64 addr;
@@ -291,6 +295,7 @@ tioce_alloc_map(struct tioce_kernel *ce_kern, int type, int port,
291 ate_reg = ce_mmr->ce_ure_ate3240; 295 ate_reg = ce_mmr->ce_ure_ate3240;
292 pagesize = ce_kern->ce_ate3240_pagesize; 296 pagesize = ce_kern->ce_ate3240_pagesize;
293 bus_base = TIOCE_M32_MIN; 297 bus_base = TIOCE_M32_MIN;
298 msi_capable = 1;
294 break; 299 break;
295 case TIOCE_ATE_M40: 300 case TIOCE_ATE_M40:
296 first = 0; 301 first = 0;
@@ -299,6 +304,7 @@ tioce_alloc_map(struct tioce_kernel *ce_kern, int type, int port,
299 ate_reg = ce_mmr->ce_ure_ate40; 304 ate_reg = ce_mmr->ce_ure_ate40;
300 pagesize = MB(64); 305 pagesize = MB(64);
301 bus_base = TIOCE_M40_MIN; 306 bus_base = TIOCE_M40_MIN;
307 msi_capable = 0;
302 break; 308 break;
303 case TIOCE_ATE_M40S: 309 case TIOCE_ATE_M40S:
304 /* 310 /*
@@ -311,11 +317,16 @@ tioce_alloc_map(struct tioce_kernel *ce_kern, int type, int port,
311 ate_reg = ce_mmr->ce_ure_ate3240; 317 ate_reg = ce_mmr->ce_ure_ate3240;
312 pagesize = GB(16); 318 pagesize = GB(16);
313 bus_base = TIOCE_M40S_MIN; 319 bus_base = TIOCE_M40S_MIN;
320 msi_capable = 0;
314 break; 321 break;
315 default: 322 default:
316 return 0; 323 return 0;
317 } 324 }
318 325
326 msi_wanted = dma_flags & SN_DMA_MSI;
327 if (msi_wanted && !msi_capable)
328 return 0;
329
319 nates = ATE_NPAGES(ct_addr, len, pagesize); 330 nates = ATE_NPAGES(ct_addr, len, pagesize);
320 if (nates > entries) 331 if (nates > entries)
321 return 0; 332 return 0;
@@ -344,7 +355,7 @@ tioce_alloc_map(struct tioce_kernel *ce_kern, int type, int port,
344 for (j = 0; j < nates; j++) { 355 for (j = 0; j < nates; j++) {
345 u64 ate; 356 u64 ate;
346 357
347 ate = ATE_MAKE(addr, pagesize); 358 ate = ATE_MAKE(addr, pagesize, msi_wanted);
348 ate_shadow[i + j] = ate; 359 ate_shadow[i + j] = ate;
349 tioce_mmr_storei(ce_kern, &ate_reg[i + j], ate); 360 tioce_mmr_storei(ce_kern, &ate_reg[i + j], ate);
350 addr += pagesize; 361 addr += pagesize;
@@ -371,7 +382,7 @@ tioce_alloc_map(struct tioce_kernel *ce_kern, int type, int port,
371 * Map @paddr into 32-bit bus space of the CE associated with @pcidev_info. 382 * Map @paddr into 32-bit bus space of the CE associated with @pcidev_info.
372 */ 383 */
373static u64 384static u64
374tioce_dma_d32(struct pci_dev *pdev, u64 ct_addr) 385tioce_dma_d32(struct pci_dev *pdev, u64 ct_addr, int dma_flags)
375{ 386{
376 int dma_ok; 387 int dma_ok;
377 int port; 388 int port;
@@ -381,6 +392,9 @@ tioce_dma_d32(struct pci_dev *pdev, u64 ct_addr)
381 u64 ct_lower; 392 u64 ct_lower;
382 dma_addr_t bus_addr; 393 dma_addr_t bus_addr;
383 394
395 if (dma_flags & SN_DMA_MSI)
396 return 0;
397
384 ct_upper = ct_addr & ~0x3fffffffUL; 398 ct_upper = ct_addr & ~0x3fffffffUL;
385 ct_lower = ct_addr & 0x3fffffffUL; 399 ct_lower = ct_addr & 0x3fffffffUL;
386 400
@@ -507,7 +521,7 @@ tioce_dma_unmap(struct pci_dev *pdev, dma_addr_t bus_addr, int dir)
507 */ 521 */
508static u64 522static u64
509tioce_do_dma_map(struct pci_dev *pdev, u64 paddr, size_t byte_count, 523tioce_do_dma_map(struct pci_dev *pdev, u64 paddr, size_t byte_count,
510 int barrier) 524 int barrier, int dma_flags)
511{ 525{
512 unsigned long flags; 526 unsigned long flags;
513 u64 ct_addr; 527 u64 ct_addr;
@@ -523,15 +537,18 @@ tioce_do_dma_map(struct pci_dev *pdev, u64 paddr, size_t byte_count,
523 if (dma_mask < 0x7fffffffUL) 537 if (dma_mask < 0x7fffffffUL)
524 return 0; 538 return 0;
525 539
526 ct_addr = PHYS_TO_TIODMA(paddr); 540 if (SN_DMA_ADDRTYPE(dma_flags) == SN_DMA_ADDR_PHYS)
541 ct_addr = PHYS_TO_TIODMA(paddr);
542 else
543 ct_addr = paddr;
527 544
528 /* 545 /*
529 * If the device can generate 64 bit addresses, create a D64 map. 546 * If the device can generate 64 bit addresses, create a D64 map.
530 * Since this should never fail, bypass the rest of the checks.
531 */ 547 */
532 if (dma_mask == ~0UL) { 548 if (dma_mask == ~0UL) {
533 mapaddr = tioce_dma_d64(ct_addr); 549 mapaddr = tioce_dma_d64(ct_addr, dma_flags);
534 goto dma_map_done; 550 if (mapaddr)
551 goto dma_map_done;
535 } 552 }
536 553
537 pcidev_to_tioce(pdev, NULL, &ce_kern, &port); 554 pcidev_to_tioce(pdev, NULL, &ce_kern, &port);
@@ -574,18 +591,22 @@ tioce_do_dma_map(struct pci_dev *pdev, u64 paddr, size_t byte_count,
574 591
575 if (byte_count > MB(64)) { 592 if (byte_count > MB(64)) {
576 mapaddr = tioce_alloc_map(ce_kern, TIOCE_ATE_M40S, 593 mapaddr = tioce_alloc_map(ce_kern, TIOCE_ATE_M40S,
577 port, ct_addr, byte_count); 594 port, ct_addr, byte_count,
595 dma_flags);
578 if (!mapaddr) 596 if (!mapaddr)
579 mapaddr = 597 mapaddr =
580 tioce_alloc_map(ce_kern, TIOCE_ATE_M40, -1, 598 tioce_alloc_map(ce_kern, TIOCE_ATE_M40, -1,
581 ct_addr, byte_count); 599 ct_addr, byte_count,
600 dma_flags);
582 } else { 601 } else {
583 mapaddr = tioce_alloc_map(ce_kern, TIOCE_ATE_M40, -1, 602 mapaddr = tioce_alloc_map(ce_kern, TIOCE_ATE_M40, -1,
584 ct_addr, byte_count); 603 ct_addr, byte_count,
604 dma_flags);
585 if (!mapaddr) 605 if (!mapaddr)
586 mapaddr = 606 mapaddr =
587 tioce_alloc_map(ce_kern, TIOCE_ATE_M40S, 607 tioce_alloc_map(ce_kern, TIOCE_ATE_M40S,
588 port, ct_addr, byte_count); 608 port, ct_addr, byte_count,
609 dma_flags);
589 } 610 }
590 } 611 }
591 612
@@ -593,7 +614,7 @@ tioce_do_dma_map(struct pci_dev *pdev, u64 paddr, size_t byte_count,
593 * 32-bit direct is the next mode to try 614 * 32-bit direct is the next mode to try
594 */ 615 */
595 if (!mapaddr && dma_mask >= 0xffffffffUL) 616 if (!mapaddr && dma_mask >= 0xffffffffUL)
596 mapaddr = tioce_dma_d32(pdev, ct_addr); 617 mapaddr = tioce_dma_d32(pdev, ct_addr, dma_flags);
597 618
598 /* 619 /*
599 * Last resort, try 32-bit ATE-based map. 620 * Last resort, try 32-bit ATE-based map.
@@ -601,7 +622,7 @@ tioce_do_dma_map(struct pci_dev *pdev, u64 paddr, size_t byte_count,
601 if (!mapaddr) 622 if (!mapaddr)
602 mapaddr = 623 mapaddr =
603 tioce_alloc_map(ce_kern, TIOCE_ATE_M32, -1, ct_addr, 624 tioce_alloc_map(ce_kern, TIOCE_ATE_M32, -1, ct_addr,
604 byte_count); 625 byte_count, dma_flags);
605 626
606 spin_unlock_irqrestore(&ce_kern->ce_lock, flags); 627 spin_unlock_irqrestore(&ce_kern->ce_lock, flags);
607 628
@@ -622,9 +643,9 @@ dma_map_done:
622 * in the address. 643 * in the address.
623 */ 644 */
624static u64 645static u64
625tioce_dma(struct pci_dev *pdev, u64 paddr, size_t byte_count) 646tioce_dma(struct pci_dev *pdev, u64 paddr, size_t byte_count, int dma_flags)
626{ 647{
627 return tioce_do_dma_map(pdev, paddr, byte_count, 0); 648 return tioce_do_dma_map(pdev, paddr, byte_count, 0, dma_flags);
628} 649}
629 650
630/** 651/**
@@ -636,9 +657,9 @@ tioce_dma(struct pci_dev *pdev, u64 paddr, size_t byte_count)
636 * Simply call tioce_do_dma_map() to create a map with the barrier bit set 657 * Simply call tioce_do_dma_map() to create a map with the barrier bit set
637 * in the address. 658 * in the address.
638 */ static u64 659 */ static u64
639tioce_dma_consistent(struct pci_dev *pdev, u64 paddr, size_t byte_count) 660tioce_dma_consistent(struct pci_dev *pdev, u64 paddr, size_t byte_count, int dma_flags)
640{ 661{
641 return tioce_do_dma_map(pdev, paddr, byte_count, 1); 662 return tioce_do_dma_map(pdev, paddr, byte_count, 1, dma_flags);
642} 663}
643 664
644/** 665/**
@@ -696,7 +717,7 @@ tioce_reserve_m32(struct tioce_kernel *ce_kern, u64 base, u64 limit)
696 while (ate_index <= last_ate) { 717 while (ate_index <= last_ate) {
697 u64 ate; 718 u64 ate;
698 719
699 ate = ATE_MAKE(0xdeadbeef, ps); 720 ate = ATE_MAKE(0xdeadbeef, ps, 0);
700 ce_kern->ce_ate3240_shadow[ate_index] = ate; 721 ce_kern->ce_ate3240_shadow[ate_index] = ate;
701 tioce_mmr_storei(ce_kern, &ce_mmr->ce_ure_ate3240[ate_index], 722 tioce_mmr_storei(ce_kern, &ce_mmr->ce_ure_ate3240[ate_index],
702 ate); 723 ate);
diff --git a/drivers/pci/msi-altix.c b/drivers/pci/msi-altix.c
index 9bd240602c1e..bed4183a5e39 100644
--- a/drivers/pci/msi-altix.c
+++ b/drivers/pci/msi-altix.c
@@ -6,13 +6,205 @@
6 * Copyright (C) 2006 Silicon Graphics, Inc. All Rights Reserved. 6 * Copyright (C) 2006 Silicon Graphics, Inc. All Rights Reserved.
7 */ 7 */
8 8
9#include <asm/errno.h> 9#include <linux/types.h>
10#include <linux/pci.h>
11#include <linux/cpumask.h>
12
13#include <asm/sn/addrs.h>
14#include <asm/sn/intr.h>
15#include <asm/sn/pcibus_provider_defs.h>
16#include <asm/sn/pcidev.h>
17#include <asm/sn/nodepda.h>
18
19#include "msi.h"
20
21struct sn_msi_info {
22 u64 pci_addr;
23 struct sn_irq_info *sn_irq_info;
24};
25
26static struct sn_msi_info *sn_msi_info;
27
28static void
29sn_msi_teardown(unsigned int vector)
30{
31 nasid_t nasid;
32 int widget;
33 struct pci_dev *pdev;
34 struct pcidev_info *sn_pdev;
35 struct sn_irq_info *sn_irq_info;
36 struct pcibus_bussoft *bussoft;
37 struct sn_pcibus_provider *provider;
38
39 sn_irq_info = sn_msi_info[vector].sn_irq_info;
40 if (sn_irq_info == NULL || sn_irq_info->irq_int_bit >= 0)
41 return;
42
43 sn_pdev = (struct pcidev_info *)sn_irq_info->irq_pciioinfo;
44 pdev = sn_pdev->pdi_linux_pcidev;
45 provider = SN_PCIDEV_BUSPROVIDER(pdev);
46
47 (*provider->dma_unmap)(pdev,
48 sn_msi_info[vector].pci_addr,
49 PCI_DMA_FROMDEVICE);
50 sn_msi_info[vector].pci_addr = 0;
51
52 bussoft = SN_PCIDEV_BUSSOFT(pdev);
53 nasid = NASID_GET(bussoft->bs_base);
54 widget = (nasid & 1) ?
55 TIO_SWIN_WIDGETNUM(bussoft->bs_base) :
56 SWIN_WIDGETNUM(bussoft->bs_base);
57
58 sn_intr_free(nasid, widget, sn_irq_info);
59 sn_msi_info[vector].sn_irq_info = NULL;
60
61 return;
62}
10 63
11int 64int
12sn_msi_init(void) 65sn_msi_setup(struct pci_dev *pdev, unsigned int vector,
66 u32 *addr_hi, u32 *addr_lo, u32 *data)
13{ 67{
68 int widget;
69 int status;
70 nasid_t nasid;
71 u64 bus_addr;
72 struct sn_irq_info *sn_irq_info;
73 struct pcibus_bussoft *bussoft = SN_PCIDEV_BUSSOFT(pdev);
74 struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev);
75
76 if (bussoft == NULL)
77 return -EINVAL;
78
79 if (provider == NULL || provider->dma_map_consistent == NULL)
80 return -EINVAL;
81
82 /*
83 * Set up the vector plumbing. Let the prom (via sn_intr_alloc)
84 * decide which cpu to direct this msi at by default.
85 */
86
87 nasid = NASID_GET(bussoft->bs_base);
88 widget = (nasid & 1) ?
89 TIO_SWIN_WIDGETNUM(bussoft->bs_base) :
90 SWIN_WIDGETNUM(bussoft->bs_base);
91
92 sn_irq_info = kzalloc(sizeof(struct sn_irq_info), GFP_KERNEL);
93 if (! sn_irq_info)
94 return -ENOMEM;
95
96 status = sn_intr_alloc(nasid, widget, sn_irq_info, vector, -1, -1);
97 if (status) {
98 kfree(sn_irq_info);
99 return -ENOMEM;
100 }
101
102 sn_irq_info->irq_int_bit = -1; /* mark this as an MSI irq */
103 sn_irq_fixup(pdev, sn_irq_info);
104
105 /* Prom probably should fill these in, but doesn't ... */
106 sn_irq_info->irq_bridge_type = bussoft->bs_asic_type;
107 sn_irq_info->irq_bridge = (void *)bussoft->bs_base;
108
14 /* 109 /*
15 * return error until MSI is supported on altix platforms 110 * Map the xio address into bus space
16 */ 111 */
17 return -EINVAL; 112 bus_addr = (*provider->dma_map_consistent)(pdev,
113 sn_irq_info->irq_xtalkaddr,
114 sizeof(sn_irq_info->irq_xtalkaddr),
115 SN_DMA_MSI|SN_DMA_ADDR_XIO);
116 if (! bus_addr) {
117 sn_intr_free(nasid, widget, sn_irq_info);
118 kfree(sn_irq_info);
119 return -ENOMEM;
120 }
121
122 sn_msi_info[vector].sn_irq_info = sn_irq_info;
123 sn_msi_info[vector].pci_addr = bus_addr;
124
125 *addr_hi = (u32)(bus_addr >> 32);
126 *addr_lo = (u32)(bus_addr & 0x00000000ffffffff);
127
128 /*
129 * In the SN platform, bit 16 is a "send vector" bit which
130 * must be present in order to move the vector through the system.
131 */
132 *data = 0x100 + (unsigned int)vector;
133
134#ifdef CONFIG_SMP
135 set_irq_affinity_info((vector & 0xff), sn_irq_info->irq_cpuid, 0);
136#endif
137
138 return 0;
139}
140
141static void
142sn_msi_target(unsigned int vector, unsigned int cpu,
143 u32 *addr_hi, u32 *addr_lo)
144{
145 int slice;
146 nasid_t nasid;
147 u64 bus_addr;
148 struct pci_dev *pdev;
149 struct pcidev_info *sn_pdev;
150 struct sn_irq_info *sn_irq_info;
151 struct sn_irq_info *new_irq_info;
152 struct sn_pcibus_provider *provider;
153
154 sn_irq_info = sn_msi_info[vector].sn_irq_info;
155 if (sn_irq_info == NULL || sn_irq_info->irq_int_bit >= 0)
156 return;
157
158 /*
159 * Release XIO resources for the old MSI PCI address
160 */
161
162 sn_pdev = (struct pcidev_info *)sn_irq_info->irq_pciioinfo;
163 pdev = sn_pdev->pdi_linux_pcidev;
164 provider = SN_PCIDEV_BUSPROVIDER(pdev);
165
166 bus_addr = (u64)(*addr_hi) << 32 | (u64)(*addr_lo);
167 (*provider->dma_unmap)(pdev, bus_addr, PCI_DMA_FROMDEVICE);
168 sn_msi_info[vector].pci_addr = 0;
169
170 nasid = cpuid_to_nasid(cpu);
171 slice = cpuid_to_slice(cpu);
172
173 new_irq_info = sn_retarget_vector(sn_irq_info, nasid, slice);
174 sn_msi_info[vector].sn_irq_info = new_irq_info;
175 if (new_irq_info == NULL)
176 return;
177
178 /*
179 * Map the xio address into bus space
180 */
181
182 bus_addr = (*provider->dma_map_consistent)(pdev,
183 new_irq_info->irq_xtalkaddr,
184 sizeof(new_irq_info->irq_xtalkaddr),
185 SN_DMA_MSI|SN_DMA_ADDR_XIO);
186
187 sn_msi_info[vector].pci_addr = bus_addr;
188 *addr_hi = (u32)(bus_addr >> 32);
189 *addr_lo = (u32)(bus_addr & 0x00000000ffffffff);
190}
191
192struct msi_ops sn_msi_ops = {
193 .setup = sn_msi_setup,
194 .teardown = sn_msi_teardown,
195#ifdef CONFIG_SMP
196 .target = sn_msi_target,
197#endif
198};
199
200int
201sn_msi_init(void)
202{
203 sn_msi_info =
204 kzalloc(sizeof(struct sn_msi_info) * NR_VECTORS, GFP_KERNEL);
205 if (! sn_msi_info)
206 return -ENOMEM;
207
208 msi_register(&sn_msi_ops);
209 return 0;
18} 210}
diff --git a/include/asm-ia64/sn/intr.h b/include/asm-ia64/sn/intr.h
index 60a51a406eec..12b54ddb06be 100644
--- a/include/asm-ia64/sn/intr.h
+++ b/include/asm-ia64/sn/intr.h
@@ -10,6 +10,7 @@
10#define _ASM_IA64_SN_INTR_H 10#define _ASM_IA64_SN_INTR_H
11 11
12#include <linux/rcupdate.h> 12#include <linux/rcupdate.h>
13#include <asm/sn/types.h>
13 14
14#define SGI_UART_VECTOR 0xe9 15#define SGI_UART_VECTOR 0xe9
15 16
@@ -40,6 +41,7 @@ struct sn_irq_info {
40 int irq_cpuid; /* kernel logical cpuid */ 41 int irq_cpuid; /* kernel logical cpuid */
41 int irq_irq; /* the IRQ number */ 42 int irq_irq; /* the IRQ number */
42 int irq_int_bit; /* Bridge interrupt pin */ 43 int irq_int_bit; /* Bridge interrupt pin */
44 /* <0 means MSI */
43 u64 irq_xtalkaddr; /* xtalkaddr IRQ is sent to */ 45 u64 irq_xtalkaddr; /* xtalkaddr IRQ is sent to */
44 int irq_bridge_type;/* pciio asic type (pciio.h) */ 46 int irq_bridge_type;/* pciio asic type (pciio.h) */
45 void *irq_bridge; /* bridge generating irq */ 47 void *irq_bridge; /* bridge generating irq */
@@ -53,6 +55,12 @@ struct sn_irq_info {
53}; 55};
54 56
55extern void sn_send_IPI_phys(int, long, int, int); 57extern void sn_send_IPI_phys(int, long, int, int);
58extern u64 sn_intr_alloc(nasid_t, int,
59 struct sn_irq_info *,
60 int, nasid_t, int);
61extern void sn_intr_free(nasid_t, int, struct sn_irq_info *);
62extern struct sn_irq_info *sn_retarget_vector(struct sn_irq_info *, nasid_t, int);
63extern struct list_head **sn_irq_lh;
56 64
57#define CPU_VECTOR_TO_IRQ(cpuid,vector) (vector) 65#define CPU_VECTOR_TO_IRQ(cpuid,vector) (vector)
58 66
diff --git a/include/asm-ia64/sn/pcibr_provider.h b/include/asm-ia64/sn/pcibr_provider.h
index 51260ab70d91..e3b0c3fe5eed 100644
--- a/include/asm-ia64/sn/pcibr_provider.h
+++ b/include/asm-ia64/sn/pcibr_provider.h
@@ -55,6 +55,7 @@
55#define PCI32_ATE_V (0x1 << 0) 55#define PCI32_ATE_V (0x1 << 0)
56#define PCI32_ATE_CO (0x1 << 1) 56#define PCI32_ATE_CO (0x1 << 1)
57#define PCI32_ATE_PREC (0x1 << 2) 57#define PCI32_ATE_PREC (0x1 << 2)
58#define PCI32_ATE_MSI (0x1 << 2)
58#define PCI32_ATE_PREF (0x1 << 3) 59#define PCI32_ATE_PREF (0x1 << 3)
59#define PCI32_ATE_BAR (0x1 << 4) 60#define PCI32_ATE_BAR (0x1 << 4)
60#define PCI32_ATE_ADDR_SHFT 12 61#define PCI32_ATE_ADDR_SHFT 12
@@ -117,8 +118,8 @@ struct pcibus_info {
117 118
118extern int pcibr_init_provider(void); 119extern int pcibr_init_provider(void);
119extern void *pcibr_bus_fixup(struct pcibus_bussoft *, struct pci_controller *); 120extern void *pcibr_bus_fixup(struct pcibus_bussoft *, struct pci_controller *);
120extern dma_addr_t pcibr_dma_map(struct pci_dev *, unsigned long, size_t); 121extern dma_addr_t pcibr_dma_map(struct pci_dev *, unsigned long, size_t, int type);
121extern dma_addr_t pcibr_dma_map_consistent(struct pci_dev *, unsigned long, size_t); 122extern dma_addr_t pcibr_dma_map_consistent(struct pci_dev *, unsigned long, size_t, int type);
122extern void pcibr_dma_unmap(struct pci_dev *, dma_addr_t, int); 123extern void pcibr_dma_unmap(struct pci_dev *, dma_addr_t, int);
123 124
124/* 125/*
diff --git a/include/asm-ia64/sn/pcibus_provider_defs.h b/include/asm-ia64/sn/pcibus_provider_defs.h
index ce3f6c328241..8f7c83d0f6d3 100644
--- a/include/asm-ia64/sn/pcibus_provider_defs.h
+++ b/include/asm-ia64/sn/pcibus_provider_defs.h
@@ -3,7 +3,7 @@
3 * License. See the file "COPYING" in the main directory of this archive 3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details. 4 * for more details.
5 * 5 *
6 * Copyright (C) 1992 - 1997, 2000-2004 Silicon Graphics, Inc. All rights reserved. 6 * Copyright (C) 1992 - 1997, 2000-2005 Silicon Graphics, Inc. All rights reserved.
7 */ 7 */
8#ifndef _ASM_IA64_SN_PCI_PCIBUS_PROVIDER_H 8#ifndef _ASM_IA64_SN_PCI_PCIBUS_PROVIDER_H
9#define _ASM_IA64_SN_PCI_PCIBUS_PROVIDER_H 9#define _ASM_IA64_SN_PCI_PCIBUS_PROVIDER_H
@@ -45,13 +45,24 @@ struct pci_controller;
45 */ 45 */
46 46
47struct sn_pcibus_provider { 47struct sn_pcibus_provider {
48 dma_addr_t (*dma_map)(struct pci_dev *, unsigned long, size_t); 48 dma_addr_t (*dma_map)(struct pci_dev *, unsigned long, size_t, int flags);
49 dma_addr_t (*dma_map_consistent)(struct pci_dev *, unsigned long, size_t); 49 dma_addr_t (*dma_map_consistent)(struct pci_dev *, unsigned long, size_t, int flags);
50 void (*dma_unmap)(struct pci_dev *, dma_addr_t, int); 50 void (*dma_unmap)(struct pci_dev *, dma_addr_t, int);
51 void * (*bus_fixup)(struct pcibus_bussoft *, struct pci_controller *); 51 void * (*bus_fixup)(struct pcibus_bussoft *, struct pci_controller *);
52 void (*force_interrupt)(struct sn_irq_info *); 52 void (*force_interrupt)(struct sn_irq_info *);
53 void (*target_interrupt)(struct sn_irq_info *); 53 void (*target_interrupt)(struct sn_irq_info *);
54}; 54};
55 55
56/*
57 * Flags used by the map interfaces
58 * bits 3:0 specifies format of passed in address
59 * bit 4 specifies that address is to be used for MSI
60 */
61
62#define SN_DMA_ADDRTYPE(x) ((x) & 0xf)
63#define SN_DMA_ADDR_PHYS 1 /* address is an xio address. */
64#define SN_DMA_ADDR_XIO 2 /* address is phys memory */
65#define SN_DMA_MSI 0x10 /* Bus address is to be used for MSI */
66
56extern struct sn_pcibus_provider *sn_pci_provider[]; 67extern struct sn_pcibus_provider *sn_pci_provider[];
57#endif /* _ASM_IA64_SN_PCI_PCIBUS_PROVIDER_H */ 68#endif /* _ASM_IA64_SN_PCI_PCIBUS_PROVIDER_H */
diff --git a/include/asm-ia64/sn/tiocp.h b/include/asm-ia64/sn/tiocp.h
index f47c08ab483c..e8ad0bb5b6c5 100644
--- a/include/asm-ia64/sn/tiocp.h
+++ b/include/asm-ia64/sn/tiocp.h
@@ -3,13 +3,14 @@
3 * License. See the file "COPYING" in the main directory of this archive 3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details. 4 * for more details.
5 * 5 *
6 * Copyright (C) 2003-2004 Silicon Graphics, Inc. All rights reserved. 6 * Copyright (C) 2003-2005 Silicon Graphics, Inc. All rights reserved.
7 */ 7 */
8#ifndef _ASM_IA64_SN_PCI_TIOCP_H 8#ifndef _ASM_IA64_SN_PCI_TIOCP_H
9#define _ASM_IA64_SN_PCI_TIOCP_H 9#define _ASM_IA64_SN_PCI_TIOCP_H
10 10
11#define TIOCP_HOST_INTR_ADDR 0x003FFFFFFFFFFFFFUL 11#define TIOCP_HOST_INTR_ADDR 0x003FFFFFFFFFFFFFUL
12#define TIOCP_PCI64_CMDTYPE_MEM (0x1ull << 60) 12#define TIOCP_PCI64_CMDTYPE_MEM (0x1ull << 60)
13#define TIOCP_PCI64_CMDTYPE_MSI (0x3ull << 60)
13 14
14 15
15/***************************************************************************** 16/*****************************************************************************