diff options
author | Tony Luck <tony.luck@intel.com> | 2006-06-23 16:46:23 -0400 |
---|---|---|
committer | Tony Luck <tony.luck@intel.com> | 2006-06-23 16:46:23 -0400 |
commit | 8cf60e04a131310199d5776e2f9e915f0c468899 (patch) | |
tree | 373a68e88e6737713a0a5723d552cdeefffff929 /arch/ia64/sn/pci | |
parent | 1323523f505606cfd24af6122369afddefc3b09d (diff) | |
parent | 95eaa5fa8eb2c345244acd5f65b200b115ae8c65 (diff) |
Auto-update from upstream
Diffstat (limited to 'arch/ia64/sn/pci')
-rw-r--r-- | arch/ia64/sn/pci/pci_dma.c | 10 | ||||
-rw-r--r-- | arch/ia64/sn/pci/pcibr/pcibr_dma.c | 62 | ||||
-rw-r--r-- | arch/ia64/sn/pci/tioca_provider.c | 8 | ||||
-rw-r--r-- | arch/ia64/sn/pci/tioce_provider.c | 65 |
4 files changed, 100 insertions, 45 deletions
diff --git a/arch/ia64/sn/pci/pci_dma.c b/arch/ia64/sn/pci/pci_dma.c index b4b84c269210..7a291a271511 100644 --- a/arch/ia64/sn/pci/pci_dma.c +++ b/arch/ia64/sn/pci/pci_dma.c | |||
@@ -11,7 +11,7 @@ | |||
11 | 11 | ||
12 | #include <linux/module.h> | 12 | #include <linux/module.h> |
13 | #include <asm/dma.h> | 13 | #include <asm/dma.h> |
14 | #include <asm/sn/pcibr_provider.h> | 14 | #include <asm/sn/intr.h> |
15 | #include <asm/sn/pcibus_provider_defs.h> | 15 | #include <asm/sn/pcibus_provider_defs.h> |
16 | #include <asm/sn/pcidev.h> | 16 | #include <asm/sn/pcidev.h> |
17 | #include <asm/sn/sn_sal.h> | 17 | #include <asm/sn/sn_sal.h> |
@@ -113,7 +113,8 @@ void *sn_dma_alloc_coherent(struct device *dev, size_t size, | |||
113 | * resources. | 113 | * resources. |
114 | */ | 114 | */ |
115 | 115 | ||
116 | *dma_handle = provider->dma_map_consistent(pdev, phys_addr, size); | 116 | *dma_handle = provider->dma_map_consistent(pdev, phys_addr, size, |
117 | SN_DMA_ADDR_PHYS); | ||
117 | if (!*dma_handle) { | 118 | if (!*dma_handle) { |
118 | printk(KERN_ERR "%s: out of ATEs\n", __FUNCTION__); | 119 | printk(KERN_ERR "%s: out of ATEs\n", __FUNCTION__); |
119 | free_pages((unsigned long)cpuaddr, get_order(size)); | 120 | free_pages((unsigned long)cpuaddr, get_order(size)); |
@@ -176,7 +177,7 @@ dma_addr_t sn_dma_map_single(struct device *dev, void *cpu_addr, size_t size, | |||
176 | BUG_ON(dev->bus != &pci_bus_type); | 177 | BUG_ON(dev->bus != &pci_bus_type); |
177 | 178 | ||
178 | phys_addr = __pa(cpu_addr); | 179 | phys_addr = __pa(cpu_addr); |
179 | dma_addr = provider->dma_map(pdev, phys_addr, size); | 180 | dma_addr = provider->dma_map(pdev, phys_addr, size, SN_DMA_ADDR_PHYS); |
180 | if (!dma_addr) { | 181 | if (!dma_addr) { |
181 | printk(KERN_ERR "%s: out of ATEs\n", __FUNCTION__); | 182 | printk(KERN_ERR "%s: out of ATEs\n", __FUNCTION__); |
182 | return 0; | 183 | return 0; |
@@ -260,7 +261,8 @@ int sn_dma_map_sg(struct device *dev, struct scatterlist *sg, int nhwentries, | |||
260 | for (i = 0; i < nhwentries; i++, sg++) { | 261 | for (i = 0; i < nhwentries; i++, sg++) { |
261 | phys_addr = SG_ENT_PHYS_ADDRESS(sg); | 262 | phys_addr = SG_ENT_PHYS_ADDRESS(sg); |
262 | sg->dma_address = provider->dma_map(pdev, | 263 | sg->dma_address = provider->dma_map(pdev, |
263 | phys_addr, sg->length); | 264 | phys_addr, sg->length, |
265 | SN_DMA_ADDR_PHYS); | ||
264 | 266 | ||
265 | if (!sg->dma_address) { | 267 | if (!sg->dma_address) { |
266 | printk(KERN_ERR "%s: out of ATEs\n", __FUNCTION__); | 268 | printk(KERN_ERR "%s: out of ATEs\n", __FUNCTION__); |
diff --git a/arch/ia64/sn/pci/pcibr/pcibr_dma.c b/arch/ia64/sn/pci/pcibr/pcibr_dma.c index 9f86bb6519aa..a86c7b945962 100644 --- a/arch/ia64/sn/pci/pcibr/pcibr_dma.c +++ b/arch/ia64/sn/pci/pcibr/pcibr_dma.c | |||
@@ -41,7 +41,7 @@ extern int sn_ioif_inited; | |||
41 | 41 | ||
42 | static dma_addr_t | 42 | static dma_addr_t |
43 | pcibr_dmamap_ate32(struct pcidev_info *info, | 43 | pcibr_dmamap_ate32(struct pcidev_info *info, |
44 | u64 paddr, size_t req_size, u64 flags) | 44 | u64 paddr, size_t req_size, u64 flags, int dma_flags) |
45 | { | 45 | { |
46 | 46 | ||
47 | struct pcidev_info *pcidev_info = info->pdi_host_pcidev_info; | 47 | struct pcidev_info *pcidev_info = info->pdi_host_pcidev_info; |
@@ -81,9 +81,12 @@ pcibr_dmamap_ate32(struct pcidev_info *info, | |||
81 | if (IS_PCIX(pcibus_info)) | 81 | if (IS_PCIX(pcibus_info)) |
82 | ate_flags &= ~(PCI32_ATE_PREF); | 82 | ate_flags &= ~(PCI32_ATE_PREF); |
83 | 83 | ||
84 | xio_addr = | 84 | if (SN_DMA_ADDRTYPE(dma_flags == SN_DMA_ADDR_PHYS)) |
85 | IS_PIC_SOFT(pcibus_info) ? PHYS_TO_DMA(paddr) : | 85 | xio_addr = IS_PIC_SOFT(pcibus_info) ? PHYS_TO_DMA(paddr) : |
86 | PHYS_TO_TIODMA(paddr); | 86 | PHYS_TO_TIODMA(paddr); |
87 | else | ||
88 | xio_addr = paddr; | ||
89 | |||
87 | offset = IOPGOFF(xio_addr); | 90 | offset = IOPGOFF(xio_addr); |
88 | ate = ate_flags | (xio_addr - offset); | 91 | ate = ate_flags | (xio_addr - offset); |
89 | 92 | ||
@@ -91,6 +94,13 @@ pcibr_dmamap_ate32(struct pcidev_info *info, | |||
91 | if (IS_PIC_SOFT(pcibus_info)) { | 94 | if (IS_PIC_SOFT(pcibus_info)) { |
92 | ate |= (pcibus_info->pbi_hub_xid << PIC_ATE_TARGETID_SHFT); | 95 | ate |= (pcibus_info->pbi_hub_xid << PIC_ATE_TARGETID_SHFT); |
93 | } | 96 | } |
97 | |||
98 | /* | ||
99 | * If we're mapping for MSI, set the MSI bit in the ATE | ||
100 | */ | ||
101 | if (dma_flags & SN_DMA_MSI) | ||
102 | ate |= PCI32_ATE_MSI; | ||
103 | |||
94 | ate_write(pcibus_info, ate_index, ate_count, ate); | 104 | ate_write(pcibus_info, ate_index, ate_count, ate); |
95 | 105 | ||
96 | /* | 106 | /* |
@@ -105,20 +115,27 @@ pcibr_dmamap_ate32(struct pcidev_info *info, | |||
105 | if (pcibus_info->pbi_devreg[internal_device] & PCIBR_DEV_SWAP_DIR) | 115 | if (pcibus_info->pbi_devreg[internal_device] & PCIBR_DEV_SWAP_DIR) |
106 | ATE_SWAP_ON(pci_addr); | 116 | ATE_SWAP_ON(pci_addr); |
107 | 117 | ||
118 | |||
108 | return pci_addr; | 119 | return pci_addr; |
109 | } | 120 | } |
110 | 121 | ||
111 | static dma_addr_t | 122 | static dma_addr_t |
112 | pcibr_dmatrans_direct64(struct pcidev_info * info, u64 paddr, | 123 | pcibr_dmatrans_direct64(struct pcidev_info * info, u64 paddr, |
113 | u64 dma_attributes) | 124 | u64 dma_attributes, int dma_flags) |
114 | { | 125 | { |
115 | struct pcibus_info *pcibus_info = (struct pcibus_info *) | 126 | struct pcibus_info *pcibus_info = (struct pcibus_info *) |
116 | ((info->pdi_host_pcidev_info)->pdi_pcibus_info); | 127 | ((info->pdi_host_pcidev_info)->pdi_pcibus_info); |
117 | u64 pci_addr; | 128 | u64 pci_addr; |
118 | 129 | ||
119 | /* Translate to Crosstalk View of Physical Address */ | 130 | /* Translate to Crosstalk View of Physical Address */ |
120 | pci_addr = (IS_PIC_SOFT(pcibus_info) ? PHYS_TO_DMA(paddr) : | 131 | if (SN_DMA_ADDRTYPE(dma_flags) == SN_DMA_ADDR_PHYS) |
121 | PHYS_TO_TIODMA(paddr)) | dma_attributes; | 132 | pci_addr = IS_PIC_SOFT(pcibus_info) ? |
133 | PHYS_TO_DMA(paddr) : | ||
134 | PHYS_TO_TIODMA(paddr) | dma_attributes; | ||
135 | else | ||
136 | pci_addr = IS_PIC_SOFT(pcibus_info) ? | ||
137 | paddr : | ||
138 | paddr | dma_attributes; | ||
122 | 139 | ||
123 | /* Handle Bus mode */ | 140 | /* Handle Bus mode */ |
124 | if (IS_PCIX(pcibus_info)) | 141 | if (IS_PCIX(pcibus_info)) |
@@ -130,7 +147,9 @@ pcibr_dmatrans_direct64(struct pcidev_info * info, u64 paddr, | |||
130 | ((u64) pcibus_info-> | 147 | ((u64) pcibus_info-> |
131 | pbi_hub_xid << PIC_PCI64_ATTR_TARG_SHFT); | 148 | pbi_hub_xid << PIC_PCI64_ATTR_TARG_SHFT); |
132 | } else | 149 | } else |
133 | pci_addr |= TIOCP_PCI64_CMDTYPE_MEM; | 150 | pci_addr |= (dma_flags & SN_DMA_MSI) ? |
151 | TIOCP_PCI64_CMDTYPE_MSI : | ||
152 | TIOCP_PCI64_CMDTYPE_MEM; | ||
134 | 153 | ||
135 | /* If PCI mode, func zero uses VCHAN0, every other func uses VCHAN1 */ | 154 | /* If PCI mode, func zero uses VCHAN0, every other func uses VCHAN1 */ |
136 | if (!IS_PCIX(pcibus_info) && PCI_FUNC(info->pdi_linux_pcidev->devfn)) | 155 | if (!IS_PCIX(pcibus_info) && PCI_FUNC(info->pdi_linux_pcidev->devfn)) |
@@ -141,7 +160,7 @@ pcibr_dmatrans_direct64(struct pcidev_info * info, u64 paddr, | |||
141 | 160 | ||
142 | static dma_addr_t | 161 | static dma_addr_t |
143 | pcibr_dmatrans_direct32(struct pcidev_info * info, | 162 | pcibr_dmatrans_direct32(struct pcidev_info * info, |
144 | u64 paddr, size_t req_size, u64 flags) | 163 | u64 paddr, size_t req_size, u64 flags, int dma_flags) |
145 | { | 164 | { |
146 | struct pcidev_info *pcidev_info = info->pdi_host_pcidev_info; | 165 | struct pcidev_info *pcidev_info = info->pdi_host_pcidev_info; |
147 | struct pcibus_info *pcibus_info = (struct pcibus_info *)pcidev_info-> | 166 | struct pcibus_info *pcibus_info = (struct pcibus_info *)pcidev_info-> |
@@ -156,8 +175,14 @@ pcibr_dmatrans_direct32(struct pcidev_info * info, | |||
156 | return 0; | 175 | return 0; |
157 | } | 176 | } |
158 | 177 | ||
159 | xio_addr = IS_PIC_SOFT(pcibus_info) ? PHYS_TO_DMA(paddr) : | 178 | if (dma_flags & SN_DMA_MSI) |
160 | PHYS_TO_TIODMA(paddr); | 179 | return 0; |
180 | |||
181 | if (SN_DMA_ADDRTYPE(dma_flags) == SN_DMA_ADDR_PHYS) | ||
182 | xio_addr = IS_PIC_SOFT(pcibus_info) ? PHYS_TO_DMA(paddr) : | ||
183 | PHYS_TO_TIODMA(paddr); | ||
184 | else | ||
185 | xio_addr = paddr; | ||
161 | 186 | ||
162 | xio_base = pcibus_info->pbi_dir_xbase; | 187 | xio_base = pcibus_info->pbi_dir_xbase; |
163 | offset = xio_addr - xio_base; | 188 | offset = xio_addr - xio_base; |
@@ -327,7 +352,7 @@ void sn_dma_flush(u64 addr) | |||
327 | */ | 352 | */ |
328 | 353 | ||
329 | dma_addr_t | 354 | dma_addr_t |
330 | pcibr_dma_map(struct pci_dev * hwdev, unsigned long phys_addr, size_t size) | 355 | pcibr_dma_map(struct pci_dev * hwdev, unsigned long phys_addr, size_t size, int dma_flags) |
331 | { | 356 | { |
332 | dma_addr_t dma_handle; | 357 | dma_addr_t dma_handle; |
333 | struct pcidev_info *pcidev_info = SN_PCIDEV_INFO(hwdev); | 358 | struct pcidev_info *pcidev_info = SN_PCIDEV_INFO(hwdev); |
@@ -344,11 +369,11 @@ pcibr_dma_map(struct pci_dev * hwdev, unsigned long phys_addr, size_t size) | |||
344 | */ | 369 | */ |
345 | 370 | ||
346 | dma_handle = pcibr_dmatrans_direct64(pcidev_info, phys_addr, | 371 | dma_handle = pcibr_dmatrans_direct64(pcidev_info, phys_addr, |
347 | PCI64_ATTR_PREF); | 372 | PCI64_ATTR_PREF, dma_flags); |
348 | } else { | 373 | } else { |
349 | /* Handle 32-63 bit cards via direct mapping */ | 374 | /* Handle 32-63 bit cards via direct mapping */ |
350 | dma_handle = pcibr_dmatrans_direct32(pcidev_info, phys_addr, | 375 | dma_handle = pcibr_dmatrans_direct32(pcidev_info, phys_addr, |
351 | size, 0); | 376 | size, 0, dma_flags); |
352 | if (!dma_handle) { | 377 | if (!dma_handle) { |
353 | /* | 378 | /* |
354 | * It is a 32 bit card and we cannot do direct mapping, | 379 | * It is a 32 bit card and we cannot do direct mapping, |
@@ -356,7 +381,8 @@ pcibr_dma_map(struct pci_dev * hwdev, unsigned long phys_addr, size_t size) | |||
356 | */ | 381 | */ |
357 | 382 | ||
358 | dma_handle = pcibr_dmamap_ate32(pcidev_info, phys_addr, | 383 | dma_handle = pcibr_dmamap_ate32(pcidev_info, phys_addr, |
359 | size, PCI32_ATE_PREF); | 384 | size, PCI32_ATE_PREF, |
385 | dma_flags); | ||
360 | } | 386 | } |
361 | } | 387 | } |
362 | 388 | ||
@@ -365,18 +391,18 @@ pcibr_dma_map(struct pci_dev * hwdev, unsigned long phys_addr, size_t size) | |||
365 | 391 | ||
366 | dma_addr_t | 392 | dma_addr_t |
367 | pcibr_dma_map_consistent(struct pci_dev * hwdev, unsigned long phys_addr, | 393 | pcibr_dma_map_consistent(struct pci_dev * hwdev, unsigned long phys_addr, |
368 | size_t size) | 394 | size_t size, int dma_flags) |
369 | { | 395 | { |
370 | dma_addr_t dma_handle; | 396 | dma_addr_t dma_handle; |
371 | struct pcidev_info *pcidev_info = SN_PCIDEV_INFO(hwdev); | 397 | struct pcidev_info *pcidev_info = SN_PCIDEV_INFO(hwdev); |
372 | 398 | ||
373 | if (hwdev->dev.coherent_dma_mask == ~0UL) { | 399 | if (hwdev->dev.coherent_dma_mask == ~0UL) { |
374 | dma_handle = pcibr_dmatrans_direct64(pcidev_info, phys_addr, | 400 | dma_handle = pcibr_dmatrans_direct64(pcidev_info, phys_addr, |
375 | PCI64_ATTR_BAR); | 401 | PCI64_ATTR_BAR, dma_flags); |
376 | } else { | 402 | } else { |
377 | dma_handle = (dma_addr_t) pcibr_dmamap_ate32(pcidev_info, | 403 | dma_handle = (dma_addr_t) pcibr_dmamap_ate32(pcidev_info, |
378 | phys_addr, size, | 404 | phys_addr, size, |
379 | PCI32_ATE_BAR); | 405 | PCI32_ATE_BAR, dma_flags); |
380 | } | 406 | } |
381 | 407 | ||
382 | return dma_handle; | 408 | return dma_handle; |
diff --git a/arch/ia64/sn/pci/tioca_provider.c b/arch/ia64/sn/pci/tioca_provider.c index be0176912968..20de72791b97 100644 --- a/arch/ia64/sn/pci/tioca_provider.c +++ b/arch/ia64/sn/pci/tioca_provider.c | |||
@@ -515,11 +515,17 @@ tioca_dma_unmap(struct pci_dev *pdev, dma_addr_t bus_addr, int dir) | |||
515 | * use the GART mapped mode. | 515 | * use the GART mapped mode. |
516 | */ | 516 | */ |
517 | static u64 | 517 | static u64 |
518 | tioca_dma_map(struct pci_dev *pdev, u64 paddr, size_t byte_count) | 518 | tioca_dma_map(struct pci_dev *pdev, u64 paddr, size_t byte_count, int dma_flags) |
519 | { | 519 | { |
520 | u64 mapaddr; | 520 | u64 mapaddr; |
521 | 521 | ||
522 | /* | 522 | /* |
523 | * Not supported for now ... | ||
524 | */ | ||
525 | if (dma_flags & SN_DMA_MSI) | ||
526 | return 0; | ||
527 | |||
528 | /* | ||
523 | * If card is 64 or 48 bit addresable, use a direct mapping. 32 | 529 | * If card is 64 or 48 bit addresable, use a direct mapping. 32 |
524 | * bit direct is so restrictive w.r.t. where the memory resides that | 530 | * bit direct is so restrictive w.r.t. where the memory resides that |
525 | * we don't use it even though CA has some support. | 531 | * we don't use it even though CA has some support. |
diff --git a/arch/ia64/sn/pci/tioce_provider.c b/arch/ia64/sn/pci/tioce_provider.c index 85f3b3d4c606..2d7948567ebc 100644 --- a/arch/ia64/sn/pci/tioce_provider.c +++ b/arch/ia64/sn/pci/tioce_provider.c | |||
@@ -170,7 +170,8 @@ tioce_mmr_war_post(struct tioce_kernel *kern, void *mmr_addr) | |||
170 | (ATE_PAGE((start)+(len)-1, pagesize) - ATE_PAGE(start, pagesize) + 1) | 170 | (ATE_PAGE((start)+(len)-1, pagesize) - ATE_PAGE(start, pagesize) + 1) |
171 | 171 | ||
172 | #define ATE_VALID(ate) ((ate) & (1UL << 63)) | 172 | #define ATE_VALID(ate) ((ate) & (1UL << 63)) |
173 | #define ATE_MAKE(addr, ps) (((addr) & ~ATE_PAGEMASK(ps)) | (1UL << 63)) | 173 | #define ATE_MAKE(addr, ps, msi) \ |
174 | (((addr) & ~ATE_PAGEMASK(ps)) | (1UL << 63) | ((msi)?(1UL << 62):0)) | ||
174 | 175 | ||
175 | /* | 176 | /* |
176 | * Flavors of ate-based mapping supported by tioce_alloc_map() | 177 | * Flavors of ate-based mapping supported by tioce_alloc_map() |
@@ -196,15 +197,17 @@ tioce_mmr_war_post(struct tioce_kernel *kern, void *mmr_addr) | |||
196 | * | 197 | * |
197 | * 63 - must be 1 to indicate d64 mode to CE hardware | 198 | * 63 - must be 1 to indicate d64 mode to CE hardware |
198 | * 62 - barrier bit ... controlled with tioce_dma_barrier() | 199 | * 62 - barrier bit ... controlled with tioce_dma_barrier() |
199 | * 61 - 0 since this is not an MSI transaction | 200 | * 61 - msi bit ... specified through dma_flags |
200 | * 60:54 - reserved, MBZ | 201 | * 60:54 - reserved, MBZ |
201 | */ | 202 | */ |
202 | static u64 | 203 | static u64 |
203 | tioce_dma_d64(unsigned long ct_addr) | 204 | tioce_dma_d64(unsigned long ct_addr, int dma_flags) |
204 | { | 205 | { |
205 | u64 bus_addr; | 206 | u64 bus_addr; |
206 | 207 | ||
207 | bus_addr = ct_addr | (1UL << 63); | 208 | bus_addr = ct_addr | (1UL << 63); |
209 | if (dma_flags & SN_DMA_MSI) | ||
210 | bus_addr |= (1UL << 61); | ||
208 | 211 | ||
209 | return bus_addr; | 212 | return bus_addr; |
210 | } | 213 | } |
@@ -261,7 +264,7 @@ pcidev_to_tioce(struct pci_dev *pdev, struct tioce **base, | |||
261 | */ | 264 | */ |
262 | static u64 | 265 | static u64 |
263 | tioce_alloc_map(struct tioce_kernel *ce_kern, int type, int port, | 266 | tioce_alloc_map(struct tioce_kernel *ce_kern, int type, int port, |
264 | u64 ct_addr, int len) | 267 | u64 ct_addr, int len, int dma_flags) |
265 | { | 268 | { |
266 | int i; | 269 | int i; |
267 | int j; | 270 | int j; |
@@ -270,6 +273,7 @@ tioce_alloc_map(struct tioce_kernel *ce_kern, int type, int port, | |||
270 | int entries; | 273 | int entries; |
271 | int nates; | 274 | int nates; |
272 | u64 pagesize; | 275 | u64 pagesize; |
276 | int msi_capable, msi_wanted; | ||
273 | u64 *ate_shadow; | 277 | u64 *ate_shadow; |
274 | u64 *ate_reg; | 278 | u64 *ate_reg; |
275 | u64 addr; | 279 | u64 addr; |
@@ -291,6 +295,7 @@ tioce_alloc_map(struct tioce_kernel *ce_kern, int type, int port, | |||
291 | ate_reg = ce_mmr->ce_ure_ate3240; | 295 | ate_reg = ce_mmr->ce_ure_ate3240; |
292 | pagesize = ce_kern->ce_ate3240_pagesize; | 296 | pagesize = ce_kern->ce_ate3240_pagesize; |
293 | bus_base = TIOCE_M32_MIN; | 297 | bus_base = TIOCE_M32_MIN; |
298 | msi_capable = 1; | ||
294 | break; | 299 | break; |
295 | case TIOCE_ATE_M40: | 300 | case TIOCE_ATE_M40: |
296 | first = 0; | 301 | first = 0; |
@@ -299,6 +304,7 @@ tioce_alloc_map(struct tioce_kernel *ce_kern, int type, int port, | |||
299 | ate_reg = ce_mmr->ce_ure_ate40; | 304 | ate_reg = ce_mmr->ce_ure_ate40; |
300 | pagesize = MB(64); | 305 | pagesize = MB(64); |
301 | bus_base = TIOCE_M40_MIN; | 306 | bus_base = TIOCE_M40_MIN; |
307 | msi_capable = 0; | ||
302 | break; | 308 | break; |
303 | case TIOCE_ATE_M40S: | 309 | case TIOCE_ATE_M40S: |
304 | /* | 310 | /* |
@@ -311,11 +317,16 @@ tioce_alloc_map(struct tioce_kernel *ce_kern, int type, int port, | |||
311 | ate_reg = ce_mmr->ce_ure_ate3240; | 317 | ate_reg = ce_mmr->ce_ure_ate3240; |
312 | pagesize = GB(16); | 318 | pagesize = GB(16); |
313 | bus_base = TIOCE_M40S_MIN; | 319 | bus_base = TIOCE_M40S_MIN; |
320 | msi_capable = 0; | ||
314 | break; | 321 | break; |
315 | default: | 322 | default: |
316 | return 0; | 323 | return 0; |
317 | } | 324 | } |
318 | 325 | ||
326 | msi_wanted = dma_flags & SN_DMA_MSI; | ||
327 | if (msi_wanted && !msi_capable) | ||
328 | return 0; | ||
329 | |||
319 | nates = ATE_NPAGES(ct_addr, len, pagesize); | 330 | nates = ATE_NPAGES(ct_addr, len, pagesize); |
320 | if (nates > entries) | 331 | if (nates > entries) |
321 | return 0; | 332 | return 0; |
@@ -344,7 +355,7 @@ tioce_alloc_map(struct tioce_kernel *ce_kern, int type, int port, | |||
344 | for (j = 0; j < nates; j++) { | 355 | for (j = 0; j < nates; j++) { |
345 | u64 ate; | 356 | u64 ate; |
346 | 357 | ||
347 | ate = ATE_MAKE(addr, pagesize); | 358 | ate = ATE_MAKE(addr, pagesize, msi_wanted); |
348 | ate_shadow[i + j] = ate; | 359 | ate_shadow[i + j] = ate; |
349 | tioce_mmr_storei(ce_kern, &ate_reg[i + j], ate); | 360 | tioce_mmr_storei(ce_kern, &ate_reg[i + j], ate); |
350 | addr += pagesize; | 361 | addr += pagesize; |
@@ -371,7 +382,7 @@ tioce_alloc_map(struct tioce_kernel *ce_kern, int type, int port, | |||
371 | * Map @paddr into 32-bit bus space of the CE associated with @pcidev_info. | 382 | * Map @paddr into 32-bit bus space of the CE associated with @pcidev_info. |
372 | */ | 383 | */ |
373 | static u64 | 384 | static u64 |
374 | tioce_dma_d32(struct pci_dev *pdev, u64 ct_addr) | 385 | tioce_dma_d32(struct pci_dev *pdev, u64 ct_addr, int dma_flags) |
375 | { | 386 | { |
376 | int dma_ok; | 387 | int dma_ok; |
377 | int port; | 388 | int port; |
@@ -381,6 +392,9 @@ tioce_dma_d32(struct pci_dev *pdev, u64 ct_addr) | |||
381 | u64 ct_lower; | 392 | u64 ct_lower; |
382 | dma_addr_t bus_addr; | 393 | dma_addr_t bus_addr; |
383 | 394 | ||
395 | if (dma_flags & SN_DMA_MSI) | ||
396 | return 0; | ||
397 | |||
384 | ct_upper = ct_addr & ~0x3fffffffUL; | 398 | ct_upper = ct_addr & ~0x3fffffffUL; |
385 | ct_lower = ct_addr & 0x3fffffffUL; | 399 | ct_lower = ct_addr & 0x3fffffffUL; |
386 | 400 | ||
@@ -507,7 +521,7 @@ tioce_dma_unmap(struct pci_dev *pdev, dma_addr_t bus_addr, int dir) | |||
507 | */ | 521 | */ |
508 | static u64 | 522 | static u64 |
509 | tioce_do_dma_map(struct pci_dev *pdev, u64 paddr, size_t byte_count, | 523 | tioce_do_dma_map(struct pci_dev *pdev, u64 paddr, size_t byte_count, |
510 | int barrier) | 524 | int barrier, int dma_flags) |
511 | { | 525 | { |
512 | unsigned long flags; | 526 | unsigned long flags; |
513 | u64 ct_addr; | 527 | u64 ct_addr; |
@@ -523,15 +537,18 @@ tioce_do_dma_map(struct pci_dev *pdev, u64 paddr, size_t byte_count, | |||
523 | if (dma_mask < 0x7fffffffUL) | 537 | if (dma_mask < 0x7fffffffUL) |
524 | return 0; | 538 | return 0; |
525 | 539 | ||
526 | ct_addr = PHYS_TO_TIODMA(paddr); | 540 | if (SN_DMA_ADDRTYPE(dma_flags) == SN_DMA_ADDR_PHYS) |
541 | ct_addr = PHYS_TO_TIODMA(paddr); | ||
542 | else | ||
543 | ct_addr = paddr; | ||
527 | 544 | ||
528 | /* | 545 | /* |
529 | * If the device can generate 64 bit addresses, create a D64 map. | 546 | * If the device can generate 64 bit addresses, create a D64 map. |
530 | * Since this should never fail, bypass the rest of the checks. | ||
531 | */ | 547 | */ |
532 | if (dma_mask == ~0UL) { | 548 | if (dma_mask == ~0UL) { |
533 | mapaddr = tioce_dma_d64(ct_addr); | 549 | mapaddr = tioce_dma_d64(ct_addr, dma_flags); |
534 | goto dma_map_done; | 550 | if (mapaddr) |
551 | goto dma_map_done; | ||
535 | } | 552 | } |
536 | 553 | ||
537 | pcidev_to_tioce(pdev, NULL, &ce_kern, &port); | 554 | pcidev_to_tioce(pdev, NULL, &ce_kern, &port); |
@@ -574,18 +591,22 @@ tioce_do_dma_map(struct pci_dev *pdev, u64 paddr, size_t byte_count, | |||
574 | 591 | ||
575 | if (byte_count > MB(64)) { | 592 | if (byte_count > MB(64)) { |
576 | mapaddr = tioce_alloc_map(ce_kern, TIOCE_ATE_M40S, | 593 | mapaddr = tioce_alloc_map(ce_kern, TIOCE_ATE_M40S, |
577 | port, ct_addr, byte_count); | 594 | port, ct_addr, byte_count, |
595 | dma_flags); | ||
578 | if (!mapaddr) | 596 | if (!mapaddr) |
579 | mapaddr = | 597 | mapaddr = |
580 | tioce_alloc_map(ce_kern, TIOCE_ATE_M40, -1, | 598 | tioce_alloc_map(ce_kern, TIOCE_ATE_M40, -1, |
581 | ct_addr, byte_count); | 599 | ct_addr, byte_count, |
600 | dma_flags); | ||
582 | } else { | 601 | } else { |
583 | mapaddr = tioce_alloc_map(ce_kern, TIOCE_ATE_M40, -1, | 602 | mapaddr = tioce_alloc_map(ce_kern, TIOCE_ATE_M40, -1, |
584 | ct_addr, byte_count); | 603 | ct_addr, byte_count, |
604 | dma_flags); | ||
585 | if (!mapaddr) | 605 | if (!mapaddr) |
586 | mapaddr = | 606 | mapaddr = |
587 | tioce_alloc_map(ce_kern, TIOCE_ATE_M40S, | 607 | tioce_alloc_map(ce_kern, TIOCE_ATE_M40S, |
588 | port, ct_addr, byte_count); | 608 | port, ct_addr, byte_count, |
609 | dma_flags); | ||
589 | } | 610 | } |
590 | } | 611 | } |
591 | 612 | ||
@@ -593,7 +614,7 @@ tioce_do_dma_map(struct pci_dev *pdev, u64 paddr, size_t byte_count, | |||
593 | * 32-bit direct is the next mode to try | 614 | * 32-bit direct is the next mode to try |
594 | */ | 615 | */ |
595 | if (!mapaddr && dma_mask >= 0xffffffffUL) | 616 | if (!mapaddr && dma_mask >= 0xffffffffUL) |
596 | mapaddr = tioce_dma_d32(pdev, ct_addr); | 617 | mapaddr = tioce_dma_d32(pdev, ct_addr, dma_flags); |
597 | 618 | ||
598 | /* | 619 | /* |
599 | * Last resort, try 32-bit ATE-based map. | 620 | * Last resort, try 32-bit ATE-based map. |
@@ -601,7 +622,7 @@ tioce_do_dma_map(struct pci_dev *pdev, u64 paddr, size_t byte_count, | |||
601 | if (!mapaddr) | 622 | if (!mapaddr) |
602 | mapaddr = | 623 | mapaddr = |
603 | tioce_alloc_map(ce_kern, TIOCE_ATE_M32, -1, ct_addr, | 624 | tioce_alloc_map(ce_kern, TIOCE_ATE_M32, -1, ct_addr, |
604 | byte_count); | 625 | byte_count, dma_flags); |
605 | 626 | ||
606 | spin_unlock_irqrestore(&ce_kern->ce_lock, flags); | 627 | spin_unlock_irqrestore(&ce_kern->ce_lock, flags); |
607 | 628 | ||
@@ -622,9 +643,9 @@ dma_map_done: | |||
622 | * in the address. | 643 | * in the address. |
623 | */ | 644 | */ |
624 | static u64 | 645 | static u64 |
625 | tioce_dma(struct pci_dev *pdev, u64 paddr, size_t byte_count) | 646 | tioce_dma(struct pci_dev *pdev, u64 paddr, size_t byte_count, int dma_flags) |
626 | { | 647 | { |
627 | return tioce_do_dma_map(pdev, paddr, byte_count, 0); | 648 | return tioce_do_dma_map(pdev, paddr, byte_count, 0, dma_flags); |
628 | } | 649 | } |
629 | 650 | ||
630 | /** | 651 | /** |
@@ -636,9 +657,9 @@ tioce_dma(struct pci_dev *pdev, u64 paddr, size_t byte_count) | |||
636 | * Simply call tioce_do_dma_map() to create a map with the barrier bit set | 657 | * Simply call tioce_do_dma_map() to create a map with the barrier bit set |
637 | * in the address. | 658 | * in the address. |
638 | */ static u64 | 659 | */ static u64 |
639 | tioce_dma_consistent(struct pci_dev *pdev, u64 paddr, size_t byte_count) | 660 | tioce_dma_consistent(struct pci_dev *pdev, u64 paddr, size_t byte_count, int dma_flags) |
640 | { | 661 | { |
641 | return tioce_do_dma_map(pdev, paddr, byte_count, 1); | 662 | return tioce_do_dma_map(pdev, paddr, byte_count, 1, dma_flags); |
642 | } | 663 | } |
643 | 664 | ||
644 | /** | 665 | /** |
@@ -696,7 +717,7 @@ tioce_reserve_m32(struct tioce_kernel *ce_kern, u64 base, u64 limit) | |||
696 | while (ate_index <= last_ate) { | 717 | while (ate_index <= last_ate) { |
697 | u64 ate; | 718 | u64 ate; |
698 | 719 | ||
699 | ate = ATE_MAKE(0xdeadbeef, ps); | 720 | ate = ATE_MAKE(0xdeadbeef, ps, 0); |
700 | ce_kern->ce_ate3240_shadow[ate_index] = ate; | 721 | ce_kern->ce_ate3240_shadow[ate_index] = ate; |
701 | tioce_mmr_storei(ce_kern, &ce_mmr->ce_ure_ate3240[ate_index], | 722 | tioce_mmr_storei(ce_kern, &ce_mmr->ce_ure_ate3240[ate_index], |
702 | ate); | 723 | ate); |