aboutsummaryrefslogtreecommitdiffstats
path: root/arch/ia64/sn/pci/tioce_provider.c
diff options
context:
space:
mode:
authorTony Luck <tony.luck@intel.com>2006-06-23 16:46:23 -0400
committerTony Luck <tony.luck@intel.com>2006-06-23 16:46:23 -0400
commit8cf60e04a131310199d5776e2f9e915f0c468899 (patch)
tree373a68e88e6737713a0a5723d552cdeefffff929 /arch/ia64/sn/pci/tioce_provider.c
parent1323523f505606cfd24af6122369afddefc3b09d (diff)
parent95eaa5fa8eb2c345244acd5f65b200b115ae8c65 (diff)
Auto-update from upstream
Diffstat (limited to 'arch/ia64/sn/pci/tioce_provider.c')
-rw-r--r--arch/ia64/sn/pci/tioce_provider.c65
1 files changed, 43 insertions, 22 deletions
diff --git a/arch/ia64/sn/pci/tioce_provider.c b/arch/ia64/sn/pci/tioce_provider.c
index 85f3b3d4c60..2d7948567eb 100644
--- a/arch/ia64/sn/pci/tioce_provider.c
+++ b/arch/ia64/sn/pci/tioce_provider.c
@@ -170,7 +170,8 @@ tioce_mmr_war_post(struct tioce_kernel *kern, void *mmr_addr)
170 (ATE_PAGE((start)+(len)-1, pagesize) - ATE_PAGE(start, pagesize) + 1) 170 (ATE_PAGE((start)+(len)-1, pagesize) - ATE_PAGE(start, pagesize) + 1)
171 171
172#define ATE_VALID(ate) ((ate) & (1UL << 63)) 172#define ATE_VALID(ate) ((ate) & (1UL << 63))
173#define ATE_MAKE(addr, ps) (((addr) & ~ATE_PAGEMASK(ps)) | (1UL << 63)) 173#define ATE_MAKE(addr, ps, msi) \
174 (((addr) & ~ATE_PAGEMASK(ps)) | (1UL << 63) | ((msi)?(1UL << 62):0))
174 175
175/* 176/*
176 * Flavors of ate-based mapping supported by tioce_alloc_map() 177 * Flavors of ate-based mapping supported by tioce_alloc_map()
@@ -196,15 +197,17 @@ tioce_mmr_war_post(struct tioce_kernel *kern, void *mmr_addr)
196 * 197 *
197 * 63 - must be 1 to indicate d64 mode to CE hardware 198 * 63 - must be 1 to indicate d64 mode to CE hardware
198 * 62 - barrier bit ... controlled with tioce_dma_barrier() 199 * 62 - barrier bit ... controlled with tioce_dma_barrier()
199 * 61 - 0 since this is not an MSI transaction 200 * 61 - msi bit ... specified through dma_flags
200 * 60:54 - reserved, MBZ 201 * 60:54 - reserved, MBZ
201 */ 202 */
202static u64 203static u64
203tioce_dma_d64(unsigned long ct_addr) 204tioce_dma_d64(unsigned long ct_addr, int dma_flags)
204{ 205{
205 u64 bus_addr; 206 u64 bus_addr;
206 207
207 bus_addr = ct_addr | (1UL << 63); 208 bus_addr = ct_addr | (1UL << 63);
209 if (dma_flags & SN_DMA_MSI)
210 bus_addr |= (1UL << 61);
208 211
209 return bus_addr; 212 return bus_addr;
210} 213}
@@ -261,7 +264,7 @@ pcidev_to_tioce(struct pci_dev *pdev, struct tioce **base,
261 */ 264 */
262static u64 265static u64
263tioce_alloc_map(struct tioce_kernel *ce_kern, int type, int port, 266tioce_alloc_map(struct tioce_kernel *ce_kern, int type, int port,
264 u64 ct_addr, int len) 267 u64 ct_addr, int len, int dma_flags)
265{ 268{
266 int i; 269 int i;
267 int j; 270 int j;
@@ -270,6 +273,7 @@ tioce_alloc_map(struct tioce_kernel *ce_kern, int type, int port,
270 int entries; 273 int entries;
271 int nates; 274 int nates;
272 u64 pagesize; 275 u64 pagesize;
276 int msi_capable, msi_wanted;
273 u64 *ate_shadow; 277 u64 *ate_shadow;
274 u64 *ate_reg; 278 u64 *ate_reg;
275 u64 addr; 279 u64 addr;
@@ -291,6 +295,7 @@ tioce_alloc_map(struct tioce_kernel *ce_kern, int type, int port,
291 ate_reg = ce_mmr->ce_ure_ate3240; 295 ate_reg = ce_mmr->ce_ure_ate3240;
292 pagesize = ce_kern->ce_ate3240_pagesize; 296 pagesize = ce_kern->ce_ate3240_pagesize;
293 bus_base = TIOCE_M32_MIN; 297 bus_base = TIOCE_M32_MIN;
298 msi_capable = 1;
294 break; 299 break;
295 case TIOCE_ATE_M40: 300 case TIOCE_ATE_M40:
296 first = 0; 301 first = 0;
@@ -299,6 +304,7 @@ tioce_alloc_map(struct tioce_kernel *ce_kern, int type, int port,
299 ate_reg = ce_mmr->ce_ure_ate40; 304 ate_reg = ce_mmr->ce_ure_ate40;
300 pagesize = MB(64); 305 pagesize = MB(64);
301 bus_base = TIOCE_M40_MIN; 306 bus_base = TIOCE_M40_MIN;
307 msi_capable = 0;
302 break; 308 break;
303 case TIOCE_ATE_M40S: 309 case TIOCE_ATE_M40S:
304 /* 310 /*
@@ -311,11 +317,16 @@ tioce_alloc_map(struct tioce_kernel *ce_kern, int type, int port,
311 ate_reg = ce_mmr->ce_ure_ate3240; 317 ate_reg = ce_mmr->ce_ure_ate3240;
312 pagesize = GB(16); 318 pagesize = GB(16);
313 bus_base = TIOCE_M40S_MIN; 319 bus_base = TIOCE_M40S_MIN;
320 msi_capable = 0;
314 break; 321 break;
315 default: 322 default:
316 return 0; 323 return 0;
317 } 324 }
318 325
326 msi_wanted = dma_flags & SN_DMA_MSI;
327 if (msi_wanted && !msi_capable)
328 return 0;
329
319 nates = ATE_NPAGES(ct_addr, len, pagesize); 330 nates = ATE_NPAGES(ct_addr, len, pagesize);
320 if (nates > entries) 331 if (nates > entries)
321 return 0; 332 return 0;
@@ -344,7 +355,7 @@ tioce_alloc_map(struct tioce_kernel *ce_kern, int type, int port,
344 for (j = 0; j < nates; j++) { 355 for (j = 0; j < nates; j++) {
345 u64 ate; 356 u64 ate;
346 357
347 ate = ATE_MAKE(addr, pagesize); 358 ate = ATE_MAKE(addr, pagesize, msi_wanted);
348 ate_shadow[i + j] = ate; 359 ate_shadow[i + j] = ate;
349 tioce_mmr_storei(ce_kern, &ate_reg[i + j], ate); 360 tioce_mmr_storei(ce_kern, &ate_reg[i + j], ate);
350 addr += pagesize; 361 addr += pagesize;
@@ -371,7 +382,7 @@ tioce_alloc_map(struct tioce_kernel *ce_kern, int type, int port,
371 * Map @paddr into 32-bit bus space of the CE associated with @pcidev_info. 382 * Map @paddr into 32-bit bus space of the CE associated with @pcidev_info.
372 */ 383 */
373static u64 384static u64
374tioce_dma_d32(struct pci_dev *pdev, u64 ct_addr) 385tioce_dma_d32(struct pci_dev *pdev, u64 ct_addr, int dma_flags)
375{ 386{
376 int dma_ok; 387 int dma_ok;
377 int port; 388 int port;
@@ -381,6 +392,9 @@ tioce_dma_d32(struct pci_dev *pdev, u64 ct_addr)
381 u64 ct_lower; 392 u64 ct_lower;
382 dma_addr_t bus_addr; 393 dma_addr_t bus_addr;
383 394
395 if (dma_flags & SN_DMA_MSI)
396 return 0;
397
384 ct_upper = ct_addr & ~0x3fffffffUL; 398 ct_upper = ct_addr & ~0x3fffffffUL;
385 ct_lower = ct_addr & 0x3fffffffUL; 399 ct_lower = ct_addr & 0x3fffffffUL;
386 400
@@ -507,7 +521,7 @@ tioce_dma_unmap(struct pci_dev *pdev, dma_addr_t bus_addr, int dir)
507 */ 521 */
508static u64 522static u64
509tioce_do_dma_map(struct pci_dev *pdev, u64 paddr, size_t byte_count, 523tioce_do_dma_map(struct pci_dev *pdev, u64 paddr, size_t byte_count,
510 int barrier) 524 int barrier, int dma_flags)
511{ 525{
512 unsigned long flags; 526 unsigned long flags;
513 u64 ct_addr; 527 u64 ct_addr;
@@ -523,15 +537,18 @@ tioce_do_dma_map(struct pci_dev *pdev, u64 paddr, size_t byte_count,
523 if (dma_mask < 0x7fffffffUL) 537 if (dma_mask < 0x7fffffffUL)
524 return 0; 538 return 0;
525 539
526 ct_addr = PHYS_TO_TIODMA(paddr); 540 if (SN_DMA_ADDRTYPE(dma_flags) == SN_DMA_ADDR_PHYS)
541 ct_addr = PHYS_TO_TIODMA(paddr);
542 else
543 ct_addr = paddr;
527 544
528 /* 545 /*
529 * If the device can generate 64 bit addresses, create a D64 map. 546 * If the device can generate 64 bit addresses, create a D64 map.
530 * Since this should never fail, bypass the rest of the checks.
531 */ 547 */
532 if (dma_mask == ~0UL) { 548 if (dma_mask == ~0UL) {
533 mapaddr = tioce_dma_d64(ct_addr); 549 mapaddr = tioce_dma_d64(ct_addr, dma_flags);
534 goto dma_map_done; 550 if (mapaddr)
551 goto dma_map_done;
535 } 552 }
536 553
537 pcidev_to_tioce(pdev, NULL, &ce_kern, &port); 554 pcidev_to_tioce(pdev, NULL, &ce_kern, &port);
@@ -574,18 +591,22 @@ tioce_do_dma_map(struct pci_dev *pdev, u64 paddr, size_t byte_count,
574 591
575 if (byte_count > MB(64)) { 592 if (byte_count > MB(64)) {
576 mapaddr = tioce_alloc_map(ce_kern, TIOCE_ATE_M40S, 593 mapaddr = tioce_alloc_map(ce_kern, TIOCE_ATE_M40S,
577 port, ct_addr, byte_count); 594 port, ct_addr, byte_count,
595 dma_flags);
578 if (!mapaddr) 596 if (!mapaddr)
579 mapaddr = 597 mapaddr =
580 tioce_alloc_map(ce_kern, TIOCE_ATE_M40, -1, 598 tioce_alloc_map(ce_kern, TIOCE_ATE_M40, -1,
581 ct_addr, byte_count); 599 ct_addr, byte_count,
600 dma_flags);
582 } else { 601 } else {
583 mapaddr = tioce_alloc_map(ce_kern, TIOCE_ATE_M40, -1, 602 mapaddr = tioce_alloc_map(ce_kern, TIOCE_ATE_M40, -1,
584 ct_addr, byte_count); 603 ct_addr, byte_count,
604 dma_flags);
585 if (!mapaddr) 605 if (!mapaddr)
586 mapaddr = 606 mapaddr =
587 tioce_alloc_map(ce_kern, TIOCE_ATE_M40S, 607 tioce_alloc_map(ce_kern, TIOCE_ATE_M40S,
588 port, ct_addr, byte_count); 608 port, ct_addr, byte_count,
609 dma_flags);
589 } 610 }
590 } 611 }
591 612
@@ -593,7 +614,7 @@ tioce_do_dma_map(struct pci_dev *pdev, u64 paddr, size_t byte_count,
593 * 32-bit direct is the next mode to try 614 * 32-bit direct is the next mode to try
594 */ 615 */
595 if (!mapaddr && dma_mask >= 0xffffffffUL) 616 if (!mapaddr && dma_mask >= 0xffffffffUL)
596 mapaddr = tioce_dma_d32(pdev, ct_addr); 617 mapaddr = tioce_dma_d32(pdev, ct_addr, dma_flags);
597 618
598 /* 619 /*
599 * Last resort, try 32-bit ATE-based map. 620 * Last resort, try 32-bit ATE-based map.
@@ -601,7 +622,7 @@ tioce_do_dma_map(struct pci_dev *pdev, u64 paddr, size_t byte_count,
601 if (!mapaddr) 622 if (!mapaddr)
602 mapaddr = 623 mapaddr =
603 tioce_alloc_map(ce_kern, TIOCE_ATE_M32, -1, ct_addr, 624 tioce_alloc_map(ce_kern, TIOCE_ATE_M32, -1, ct_addr,
604 byte_count); 625 byte_count, dma_flags);
605 626
606 spin_unlock_irqrestore(&ce_kern->ce_lock, flags); 627 spin_unlock_irqrestore(&ce_kern->ce_lock, flags);
607 628
@@ -622,9 +643,9 @@ dma_map_done:
622 * in the address. 643 * in the address.
623 */ 644 */
624static u64 645static u64
625tioce_dma(struct pci_dev *pdev, u64 paddr, size_t byte_count) 646tioce_dma(struct pci_dev *pdev, u64 paddr, size_t byte_count, int dma_flags)
626{ 647{
627 return tioce_do_dma_map(pdev, paddr, byte_count, 0); 648 return tioce_do_dma_map(pdev, paddr, byte_count, 0, dma_flags);
628} 649}
629 650
630/** 651/**
@@ -636,9 +657,9 @@ tioce_dma(struct pci_dev *pdev, u64 paddr, size_t byte_count)
636 * Simply call tioce_do_dma_map() to create a map with the barrier bit set 657 * Simply call tioce_do_dma_map() to create a map with the barrier bit set
637 * in the address. 658 * in the address.
638 */ static u64 659 */ static u64
639tioce_dma_consistent(struct pci_dev *pdev, u64 paddr, size_t byte_count) 660tioce_dma_consistent(struct pci_dev *pdev, u64 paddr, size_t byte_count, int dma_flags)
640{ 661{
641 return tioce_do_dma_map(pdev, paddr, byte_count, 1); 662 return tioce_do_dma_map(pdev, paddr, byte_count, 1, dma_flags);
642} 663}
643 664
644/** 665/**
@@ -696,7 +717,7 @@ tioce_reserve_m32(struct tioce_kernel *ce_kern, u64 base, u64 limit)
696 while (ate_index <= last_ate) { 717 while (ate_index <= last_ate) {
697 u64 ate; 718 u64 ate;
698 719
699 ate = ATE_MAKE(0xdeadbeef, ps); 720 ate = ATE_MAKE(0xdeadbeef, ps, 0);
700 ce_kern->ce_ate3240_shadow[ate_index] = ate; 721 ce_kern->ce_ate3240_shadow[ate_index] = ate;
701 tioce_mmr_storei(ce_kern, &ce_mmr->ce_ure_ate3240[ate_index], 722 tioce_mmr_storei(ce_kern, &ce_mmr->ce_ure_ate3240[ate_index],
702 ate); 723 ate);