aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sparc64/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/sparc64/kernel')
-rw-r--r--arch/sparc64/kernel/pci_iommu.c221
-rw-r--r--arch/sparc64/kernel/pci_psycho.c2
-rw-r--r--arch/sparc64/kernel/pci_sabre.c2
-rw-r--r--arch/sparc64/kernel/pci_schizo.c2
-rw-r--r--arch/sparc64/kernel/sbus.c41
-rw-r--r--arch/sparc64/kernel/setup.c11
-rw-r--r--arch/sparc64/kernel/smp.c3
-rw-r--r--arch/sparc64/kernel/traps.c19
8 files changed, 186 insertions, 115 deletions
diff --git a/arch/sparc64/kernel/pci_iommu.c b/arch/sparc64/kernel/pci_iommu.c
index 292983413ae2..2803bc7c2c79 100644
--- a/arch/sparc64/kernel/pci_iommu.c
+++ b/arch/sparc64/kernel/pci_iommu.c
@@ -8,6 +8,7 @@
8#include <linux/kernel.h> 8#include <linux/kernel.h>
9#include <linux/sched.h> 9#include <linux/sched.h>
10#include <linux/mm.h> 10#include <linux/mm.h>
11#include <linux/delay.h>
11 12
12#include <asm/pbm.h> 13#include <asm/pbm.h>
13 14
@@ -195,6 +196,34 @@ static iopte_t *alloc_consistent_cluster(struct pci_iommu *iommu, unsigned long
195 return NULL; 196 return NULL;
196} 197}
197 198
199static int iommu_alloc_ctx(struct pci_iommu *iommu)
200{
201 int lowest = iommu->ctx_lowest_free;
202 int sz = IOMMU_NUM_CTXS - lowest;
203 int n = find_next_zero_bit(iommu->ctx_bitmap, sz, lowest);
204
205 if (unlikely(n == sz)) {
206 n = find_next_zero_bit(iommu->ctx_bitmap, lowest, 1);
207 if (unlikely(n == lowest)) {
208 printk(KERN_WARNING "IOMMU: Ran out of contexts.\n");
209 n = 0;
210 }
211 }
212 if (n)
213 __set_bit(n, iommu->ctx_bitmap);
214
215 return n;
216}
217
218static inline void iommu_free_ctx(struct pci_iommu *iommu, int ctx)
219{
220 if (likely(ctx)) {
221 __clear_bit(ctx, iommu->ctx_bitmap);
222 if (ctx < iommu->ctx_lowest_free)
223 iommu->ctx_lowest_free = ctx;
224 }
225}
226
198/* Allocate and map kernel buffer of size SIZE using consistent mode 227/* Allocate and map kernel buffer of size SIZE using consistent mode
199 * DMA for PCI device PDEV. Return non-NULL cpu-side address if 228 * DMA for PCI device PDEV. Return non-NULL cpu-side address if
200 * successful and set *DMA_ADDRP to the PCI side dma address. 229 * successful and set *DMA_ADDRP to the PCI side dma address.
@@ -235,7 +264,7 @@ void *pci_alloc_consistent(struct pci_dev *pdev, size_t size, dma_addr_t *dma_ad
235 npages = size >> IO_PAGE_SHIFT; 264 npages = size >> IO_PAGE_SHIFT;
236 ctx = 0; 265 ctx = 0;
237 if (iommu->iommu_ctxflush) 266 if (iommu->iommu_ctxflush)
238 ctx = iommu->iommu_cur_ctx++; 267 ctx = iommu_alloc_ctx(iommu);
239 first_page = __pa(first_page); 268 first_page = __pa(first_page);
240 while (npages--) { 269 while (npages--) {
241 iopte_val(*iopte) = (IOPTE_CONSISTENT(ctx) | 270 iopte_val(*iopte) = (IOPTE_CONSISTENT(ctx) |
@@ -316,6 +345,8 @@ void pci_free_consistent(struct pci_dev *pdev, size_t size, void *cpu, dma_addr_
316 } 345 }
317 } 346 }
318 347
348 iommu_free_ctx(iommu, ctx);
349
319 spin_unlock_irqrestore(&iommu->lock, flags); 350 spin_unlock_irqrestore(&iommu->lock, flags);
320 351
321 order = get_order(size); 352 order = get_order(size);
@@ -359,7 +390,7 @@ dma_addr_t pci_map_single(struct pci_dev *pdev, void *ptr, size_t sz, int direct
359 base_paddr = __pa(oaddr & IO_PAGE_MASK); 390 base_paddr = __pa(oaddr & IO_PAGE_MASK);
360 ctx = 0; 391 ctx = 0;
361 if (iommu->iommu_ctxflush) 392 if (iommu->iommu_ctxflush)
362 ctx = iommu->iommu_cur_ctx++; 393 ctx = iommu_alloc_ctx(iommu);
363 if (strbuf->strbuf_enabled) 394 if (strbuf->strbuf_enabled)
364 iopte_protection = IOPTE_STREAMING(ctx); 395 iopte_protection = IOPTE_STREAMING(ctx);
365 else 396 else
@@ -379,6 +410,70 @@ bad:
379 return PCI_DMA_ERROR_CODE; 410 return PCI_DMA_ERROR_CODE;
380} 411}
381 412
413static void pci_strbuf_flush(struct pci_strbuf *strbuf, struct pci_iommu *iommu, u32 vaddr, unsigned long ctx, unsigned long npages, int direction)
414{
415 int limit;
416
417 if (strbuf->strbuf_ctxflush &&
418 iommu->iommu_ctxflush) {
419 unsigned long matchreg, flushreg;
420 u64 val;
421
422 flushreg = strbuf->strbuf_ctxflush;
423 matchreg = PCI_STC_CTXMATCH_ADDR(strbuf, ctx);
424
425 pci_iommu_write(flushreg, ctx);
426 val = pci_iommu_read(matchreg);
427 val &= 0xffff;
428 if (!val)
429 goto do_flush_sync;
430
431 while (val) {
432 if (val & 0x1)
433 pci_iommu_write(flushreg, ctx);
434 val >>= 1;
435 }
436 val = pci_iommu_read(matchreg);
437 if (unlikely(val)) {
438 printk(KERN_WARNING "pci_strbuf_flush: ctx flush "
439 "timeout matchreg[%lx] ctx[%lx]\n",
440 val, ctx);
441 goto do_page_flush;
442 }
443 } else {
444 unsigned long i;
445
446 do_page_flush:
447 for (i = 0; i < npages; i++, vaddr += IO_PAGE_SIZE)
448 pci_iommu_write(strbuf->strbuf_pflush, vaddr);
449 }
450
451do_flush_sync:
452 /* If the device could not have possibly put dirty data into
453 * the streaming cache, no flush-flag synchronization needs
454 * to be performed.
455 */
456 if (direction == PCI_DMA_TODEVICE)
457 return;
458
459 PCI_STC_FLUSHFLAG_INIT(strbuf);
460 pci_iommu_write(strbuf->strbuf_fsync, strbuf->strbuf_flushflag_pa);
461 (void) pci_iommu_read(iommu->write_complete_reg);
462
463 limit = 100000;
464 while (!PCI_STC_FLUSHFLAG_SET(strbuf)) {
465 limit--;
466 if (!limit)
467 break;
468 udelay(1);
469 membar("#LoadLoad");
470 }
471 if (!limit)
472 printk(KERN_WARNING "pci_strbuf_flush: flushflag timeout "
473 "vaddr[%08x] ctx[%lx] npages[%ld]\n",
474 vaddr, ctx, npages);
475}
476
382/* Unmap a single streaming mode DMA translation. */ 477/* Unmap a single streaming mode DMA translation. */
383void pci_unmap_single(struct pci_dev *pdev, dma_addr_t bus_addr, size_t sz, int direction) 478void pci_unmap_single(struct pci_dev *pdev, dma_addr_t bus_addr, size_t sz, int direction)
384{ 479{
@@ -386,7 +481,7 @@ void pci_unmap_single(struct pci_dev *pdev, dma_addr_t bus_addr, size_t sz, int
386 struct pci_iommu *iommu; 481 struct pci_iommu *iommu;
387 struct pci_strbuf *strbuf; 482 struct pci_strbuf *strbuf;
388 iopte_t *base; 483 iopte_t *base;
389 unsigned long flags, npages, i, ctx; 484 unsigned long flags, npages, ctx;
390 485
391 if (direction == PCI_DMA_NONE) 486 if (direction == PCI_DMA_NONE)
392 BUG(); 487 BUG();
@@ -414,29 +509,8 @@ void pci_unmap_single(struct pci_dev *pdev, dma_addr_t bus_addr, size_t sz, int
414 ctx = (iopte_val(*base) & IOPTE_CONTEXT) >> 47UL; 509 ctx = (iopte_val(*base) & IOPTE_CONTEXT) >> 47UL;
415 510
416 /* Step 1: Kick data out of streaming buffers if necessary. */ 511 /* Step 1: Kick data out of streaming buffers if necessary. */
417 if (strbuf->strbuf_enabled) { 512 if (strbuf->strbuf_enabled)
418 u32 vaddr = bus_addr; 513 pci_strbuf_flush(strbuf, iommu, bus_addr, ctx, npages, direction);
419
420 PCI_STC_FLUSHFLAG_INIT(strbuf);
421 if (strbuf->strbuf_ctxflush &&
422 iommu->iommu_ctxflush) {
423 unsigned long matchreg, flushreg;
424
425 flushreg = strbuf->strbuf_ctxflush;
426 matchreg = PCI_STC_CTXMATCH_ADDR(strbuf, ctx);
427 do {
428 pci_iommu_write(flushreg, ctx);
429 } while(((long)pci_iommu_read(matchreg)) < 0L);
430 } else {
431 for (i = 0; i < npages; i++, vaddr += IO_PAGE_SIZE)
432 pci_iommu_write(strbuf->strbuf_pflush, vaddr);
433 }
434
435 pci_iommu_write(strbuf->strbuf_fsync, strbuf->strbuf_flushflag_pa);
436 (void) pci_iommu_read(iommu->write_complete_reg);
437 while (!PCI_STC_FLUSHFLAG_SET(strbuf))
438 membar("#LoadLoad");
439 }
440 514
441 /* Step 2: Clear out first TSB entry. */ 515 /* Step 2: Clear out first TSB entry. */
442 iopte_make_dummy(iommu, base); 516 iopte_make_dummy(iommu, base);
@@ -444,6 +518,8 @@ void pci_unmap_single(struct pci_dev *pdev, dma_addr_t bus_addr, size_t sz, int
444 free_streaming_cluster(iommu, bus_addr - iommu->page_table_map_base, 518 free_streaming_cluster(iommu, bus_addr - iommu->page_table_map_base,
445 npages, ctx); 519 npages, ctx);
446 520
521 iommu_free_ctx(iommu, ctx);
522
447 spin_unlock_irqrestore(&iommu->lock, flags); 523 spin_unlock_irqrestore(&iommu->lock, flags);
448} 524}
449 525
@@ -583,7 +659,7 @@ int pci_map_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, int
583 /* Step 4: Choose a context if necessary. */ 659 /* Step 4: Choose a context if necessary. */
584 ctx = 0; 660 ctx = 0;
585 if (iommu->iommu_ctxflush) 661 if (iommu->iommu_ctxflush)
586 ctx = iommu->iommu_cur_ctx++; 662 ctx = iommu_alloc_ctx(iommu);
587 663
588 /* Step 5: Create the mappings. */ 664 /* Step 5: Create the mappings. */
589 if (strbuf->strbuf_enabled) 665 if (strbuf->strbuf_enabled)
@@ -647,29 +723,8 @@ void pci_unmap_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelems,
647 ctx = (iopte_val(*base) & IOPTE_CONTEXT) >> 47UL; 723 ctx = (iopte_val(*base) & IOPTE_CONTEXT) >> 47UL;
648 724
649 /* Step 1: Kick data out of streaming buffers if necessary. */ 725 /* Step 1: Kick data out of streaming buffers if necessary. */
650 if (strbuf->strbuf_enabled) { 726 if (strbuf->strbuf_enabled)
651 u32 vaddr = (u32) bus_addr; 727 pci_strbuf_flush(strbuf, iommu, bus_addr, ctx, npages, direction);
652
653 PCI_STC_FLUSHFLAG_INIT(strbuf);
654 if (strbuf->strbuf_ctxflush &&
655 iommu->iommu_ctxflush) {
656 unsigned long matchreg, flushreg;
657
658 flushreg = strbuf->strbuf_ctxflush;
659 matchreg = PCI_STC_CTXMATCH_ADDR(strbuf, ctx);
660 do {
661 pci_iommu_write(flushreg, ctx);
662 } while(((long)pci_iommu_read(matchreg)) < 0L);
663 } else {
664 for (i = 0; i < npages; i++, vaddr += IO_PAGE_SIZE)
665 pci_iommu_write(strbuf->strbuf_pflush, vaddr);
666 }
667
668 pci_iommu_write(strbuf->strbuf_fsync, strbuf->strbuf_flushflag_pa);
669 (void) pci_iommu_read(iommu->write_complete_reg);
670 while (!PCI_STC_FLUSHFLAG_SET(strbuf))
671 membar("#LoadLoad");
672 }
673 728
674 /* Step 2: Clear out first TSB entry. */ 729 /* Step 2: Clear out first TSB entry. */
675 iopte_make_dummy(iommu, base); 730 iopte_make_dummy(iommu, base);
@@ -677,6 +732,8 @@ void pci_unmap_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelems,
677 free_streaming_cluster(iommu, bus_addr - iommu->page_table_map_base, 732 free_streaming_cluster(iommu, bus_addr - iommu->page_table_map_base,
678 npages, ctx); 733 npages, ctx);
679 734
735 iommu_free_ctx(iommu, ctx);
736
680 spin_unlock_irqrestore(&iommu->lock, flags); 737 spin_unlock_irqrestore(&iommu->lock, flags);
681} 738}
682 739
@@ -715,28 +772,7 @@ void pci_dma_sync_single_for_cpu(struct pci_dev *pdev, dma_addr_t bus_addr, size
715 } 772 }
716 773
717 /* Step 2: Kick data out of streaming buffers. */ 774 /* Step 2: Kick data out of streaming buffers. */
718 PCI_STC_FLUSHFLAG_INIT(strbuf); 775 pci_strbuf_flush(strbuf, iommu, bus_addr, ctx, npages, direction);
719 if (iommu->iommu_ctxflush &&
720 strbuf->strbuf_ctxflush) {
721 unsigned long matchreg, flushreg;
722
723 flushreg = strbuf->strbuf_ctxflush;
724 matchreg = PCI_STC_CTXMATCH_ADDR(strbuf, ctx);
725 do {
726 pci_iommu_write(flushreg, ctx);
727 } while(((long)pci_iommu_read(matchreg)) < 0L);
728 } else {
729 unsigned long i;
730
731 for (i = 0; i < npages; i++, bus_addr += IO_PAGE_SIZE)
732 pci_iommu_write(strbuf->strbuf_pflush, bus_addr);
733 }
734
735 /* Step 3: Perform flush synchronization sequence. */
736 pci_iommu_write(strbuf->strbuf_fsync, strbuf->strbuf_flushflag_pa);
737 (void) pci_iommu_read(iommu->write_complete_reg);
738 while (!PCI_STC_FLUSHFLAG_SET(strbuf))
739 membar("#LoadLoad");
740 776
741 spin_unlock_irqrestore(&iommu->lock, flags); 777 spin_unlock_irqrestore(&iommu->lock, flags);
742} 778}
@@ -749,7 +785,8 @@ void pci_dma_sync_sg_for_cpu(struct pci_dev *pdev, struct scatterlist *sglist, i
749 struct pcidev_cookie *pcp; 785 struct pcidev_cookie *pcp;
750 struct pci_iommu *iommu; 786 struct pci_iommu *iommu;
751 struct pci_strbuf *strbuf; 787 struct pci_strbuf *strbuf;
752 unsigned long flags, ctx; 788 unsigned long flags, ctx, npages, i;
789 u32 bus_addr;
753 790
754 pcp = pdev->sysdata; 791 pcp = pdev->sysdata;
755 iommu = pcp->pbm->iommu; 792 iommu = pcp->pbm->iommu;
@@ -772,36 +809,14 @@ void pci_dma_sync_sg_for_cpu(struct pci_dev *pdev, struct scatterlist *sglist, i
772 } 809 }
773 810
774 /* Step 2: Kick data out of streaming buffers. */ 811 /* Step 2: Kick data out of streaming buffers. */
775 PCI_STC_FLUSHFLAG_INIT(strbuf); 812 bus_addr = sglist[0].dma_address & IO_PAGE_MASK;
776 if (iommu->iommu_ctxflush && 813 for(i = 1; i < nelems; i++)
777 strbuf->strbuf_ctxflush) { 814 if (!sglist[i].dma_length)
778 unsigned long matchreg, flushreg; 815 break;
779 816 i--;
780 flushreg = strbuf->strbuf_ctxflush; 817 npages = (IO_PAGE_ALIGN(sglist[i].dma_address + sglist[i].dma_length)
781 matchreg = PCI_STC_CTXMATCH_ADDR(strbuf, ctx); 818 - bus_addr) >> IO_PAGE_SHIFT;
782 do { 819 pci_strbuf_flush(strbuf, iommu, bus_addr, ctx, npages, direction);
783 pci_iommu_write(flushreg, ctx);
784 } while (((long)pci_iommu_read(matchreg)) < 0L);
785 } else {
786 unsigned long i, npages;
787 u32 bus_addr;
788
789 bus_addr = sglist[0].dma_address & IO_PAGE_MASK;
790
791 for(i = 1; i < nelems; i++)
792 if (!sglist[i].dma_length)
793 break;
794 i--;
795 npages = (IO_PAGE_ALIGN(sglist[i].dma_address + sglist[i].dma_length) - bus_addr) >> IO_PAGE_SHIFT;
796 for (i = 0; i < npages; i++, bus_addr += IO_PAGE_SIZE)
797 pci_iommu_write(strbuf->strbuf_pflush, bus_addr);
798 }
799
800 /* Step 3: Perform flush synchronization sequence. */
801 pci_iommu_write(strbuf->strbuf_fsync, strbuf->strbuf_flushflag_pa);
802 (void) pci_iommu_read(iommu->write_complete_reg);
803 while (!PCI_STC_FLUSHFLAG_SET(strbuf))
804 membar("#LoadLoad");
805 820
806 spin_unlock_irqrestore(&iommu->lock, flags); 821 spin_unlock_irqrestore(&iommu->lock, flags);
807} 822}
diff --git a/arch/sparc64/kernel/pci_psycho.c b/arch/sparc64/kernel/pci_psycho.c
index 3567fa879e1f..534320ef0db2 100644
--- a/arch/sparc64/kernel/pci_psycho.c
+++ b/arch/sparc64/kernel/pci_psycho.c
@@ -1212,7 +1212,7 @@ static void __init psycho_iommu_init(struct pci_controller_info *p)
1212 1212
1213 /* Setup initial software IOMMU state. */ 1213 /* Setup initial software IOMMU state. */
1214 spin_lock_init(&iommu->lock); 1214 spin_lock_init(&iommu->lock);
1215 iommu->iommu_cur_ctx = 0; 1215 iommu->ctx_lowest_free = 1;
1216 1216
1217 /* Register addresses. */ 1217 /* Register addresses. */
1218 iommu->iommu_control = p->pbm_A.controller_regs + PSYCHO_IOMMU_CONTROL; 1218 iommu->iommu_control = p->pbm_A.controller_regs + PSYCHO_IOMMU_CONTROL;
diff --git a/arch/sparc64/kernel/pci_sabre.c b/arch/sparc64/kernel/pci_sabre.c
index 5525d1ec4af8..53d333b4a4e8 100644
--- a/arch/sparc64/kernel/pci_sabre.c
+++ b/arch/sparc64/kernel/pci_sabre.c
@@ -1265,7 +1265,7 @@ static void __init sabre_iommu_init(struct pci_controller_info *p,
1265 1265
1266 /* Setup initial software IOMMU state. */ 1266 /* Setup initial software IOMMU state. */
1267 spin_lock_init(&iommu->lock); 1267 spin_lock_init(&iommu->lock);
1268 iommu->iommu_cur_ctx = 0; 1268 iommu->ctx_lowest_free = 1;
1269 1269
1270 /* Register addresses. */ 1270 /* Register addresses. */
1271 iommu->iommu_control = p->pbm_A.controller_regs + SABRE_IOMMU_CONTROL; 1271 iommu->iommu_control = p->pbm_A.controller_regs + SABRE_IOMMU_CONTROL;
diff --git a/arch/sparc64/kernel/pci_schizo.c b/arch/sparc64/kernel/pci_schizo.c
index e93fcadc3722..5753175b94e6 100644
--- a/arch/sparc64/kernel/pci_schizo.c
+++ b/arch/sparc64/kernel/pci_schizo.c
@@ -1753,7 +1753,7 @@ static void schizo_pbm_iommu_init(struct pci_pbm_info *pbm)
1753 1753
1754 /* Setup initial software IOMMU state. */ 1754 /* Setup initial software IOMMU state. */
1755 spin_lock_init(&iommu->lock); 1755 spin_lock_init(&iommu->lock);
1756 iommu->iommu_cur_ctx = 0; 1756 iommu->ctx_lowest_free = 1;
1757 1757
1758 /* Register addresses, SCHIZO has iommu ctx flushing. */ 1758 /* Register addresses, SCHIZO has iommu ctx flushing. */
1759 iommu->iommu_control = pbm->pbm_regs + SCHIZO_IOMMU_CONTROL; 1759 iommu->iommu_control = pbm->pbm_regs + SCHIZO_IOMMU_CONTROL;
diff --git a/arch/sparc64/kernel/sbus.c b/arch/sparc64/kernel/sbus.c
index 14d9c3a21b9a..89f5e019f24c 100644
--- a/arch/sparc64/kernel/sbus.c
+++ b/arch/sparc64/kernel/sbus.c
@@ -117,19 +117,42 @@ static void iommu_flush(struct sbus_iommu *iommu, u32 base, unsigned long npages
117 117
118#define STRBUF_TAG_VALID 0x02UL 118#define STRBUF_TAG_VALID 0x02UL
119 119
120static void strbuf_flush(struct sbus_iommu *iommu, u32 base, unsigned long npages) 120static void sbus_strbuf_flush(struct sbus_iommu *iommu, u32 base, unsigned long npages, int direction)
121{ 121{
122 iommu->strbuf_flushflag = 0UL; 122 unsigned long n;
123 while (npages--) 123 int limit;
124 upa_writeq(base + (npages << IO_PAGE_SHIFT), 124
125 n = npages;
126 while (n--)
127 upa_writeq(base + (n << IO_PAGE_SHIFT),
125 iommu->strbuf_regs + STRBUF_PFLUSH); 128 iommu->strbuf_regs + STRBUF_PFLUSH);
126 129
130 /* If the device could not have possibly put dirty data into
131 * the streaming cache, no flush-flag synchronization needs
132 * to be performed.
133 */
134 if (direction == SBUS_DMA_TODEVICE)
135 return;
136
137 iommu->strbuf_flushflag = 0UL;
138
127 /* Whoopee cushion! */ 139 /* Whoopee cushion! */
128 upa_writeq(__pa(&iommu->strbuf_flushflag), 140 upa_writeq(__pa(&iommu->strbuf_flushflag),
129 iommu->strbuf_regs + STRBUF_FSYNC); 141 iommu->strbuf_regs + STRBUF_FSYNC);
130 upa_readq(iommu->sbus_control_reg); 142 upa_readq(iommu->sbus_control_reg);
131 while (iommu->strbuf_flushflag == 0UL) 143
144 limit = 100000;
145 while (iommu->strbuf_flushflag == 0UL) {
146 limit--;
147 if (!limit)
148 break;
149 udelay(1);
132 membar("#LoadLoad"); 150 membar("#LoadLoad");
151 }
152 if (!limit)
153 printk(KERN_WARNING "sbus_strbuf_flush: flushflag timeout "
154 "vaddr[%08x] npages[%ld]\n",
155 base, npages);
133} 156}
134 157
135static iopte_t *alloc_streaming_cluster(struct sbus_iommu *iommu, unsigned long npages) 158static iopte_t *alloc_streaming_cluster(struct sbus_iommu *iommu, unsigned long npages)
@@ -406,7 +429,7 @@ void sbus_unmap_single(struct sbus_dev *sdev, dma_addr_t dma_addr, size_t size,
406 429
407 spin_lock_irqsave(&iommu->lock, flags); 430 spin_lock_irqsave(&iommu->lock, flags);
408 free_streaming_cluster(iommu, dma_base, size >> IO_PAGE_SHIFT); 431 free_streaming_cluster(iommu, dma_base, size >> IO_PAGE_SHIFT);
409 strbuf_flush(iommu, dma_base, size >> IO_PAGE_SHIFT); 432 sbus_strbuf_flush(iommu, dma_base, size >> IO_PAGE_SHIFT, direction);
410 spin_unlock_irqrestore(&iommu->lock, flags); 433 spin_unlock_irqrestore(&iommu->lock, flags);
411} 434}
412 435
@@ -569,7 +592,7 @@ void sbus_unmap_sg(struct sbus_dev *sdev, struct scatterlist *sg, int nents, int
569 iommu = sdev->bus->iommu; 592 iommu = sdev->bus->iommu;
570 spin_lock_irqsave(&iommu->lock, flags); 593 spin_lock_irqsave(&iommu->lock, flags);
571 free_streaming_cluster(iommu, dvma_base, size >> IO_PAGE_SHIFT); 594 free_streaming_cluster(iommu, dvma_base, size >> IO_PAGE_SHIFT);
572 strbuf_flush(iommu, dvma_base, size >> IO_PAGE_SHIFT); 595 sbus_strbuf_flush(iommu, dvma_base, size >> IO_PAGE_SHIFT, direction);
573 spin_unlock_irqrestore(&iommu->lock, flags); 596 spin_unlock_irqrestore(&iommu->lock, flags);
574} 597}
575 598
@@ -581,7 +604,7 @@ void sbus_dma_sync_single_for_cpu(struct sbus_dev *sdev, dma_addr_t base, size_t
581 size = (IO_PAGE_ALIGN(base + size) - (base & IO_PAGE_MASK)); 604 size = (IO_PAGE_ALIGN(base + size) - (base & IO_PAGE_MASK));
582 605
583 spin_lock_irqsave(&iommu->lock, flags); 606 spin_lock_irqsave(&iommu->lock, flags);
584 strbuf_flush(iommu, base & IO_PAGE_MASK, size >> IO_PAGE_SHIFT); 607 sbus_strbuf_flush(iommu, base & IO_PAGE_MASK, size >> IO_PAGE_SHIFT, direction);
585 spin_unlock_irqrestore(&iommu->lock, flags); 608 spin_unlock_irqrestore(&iommu->lock, flags);
586} 609}
587 610
@@ -605,7 +628,7 @@ void sbus_dma_sync_sg_for_cpu(struct sbus_dev *sdev, struct scatterlist *sg, int
605 size = IO_PAGE_ALIGN(sg[i].dma_address + sg[i].dma_length) - base; 628 size = IO_PAGE_ALIGN(sg[i].dma_address + sg[i].dma_length) - base;
606 629
607 spin_lock_irqsave(&iommu->lock, flags); 630 spin_lock_irqsave(&iommu->lock, flags);
608 strbuf_flush(iommu, base, size >> IO_PAGE_SHIFT); 631 sbus_strbuf_flush(iommu, base, size >> IO_PAGE_SHIFT, direction);
609 spin_unlock_irqrestore(&iommu->lock, flags); 632 spin_unlock_irqrestore(&iommu->lock, flags);
610} 633}
611 634
diff --git a/arch/sparc64/kernel/setup.c b/arch/sparc64/kernel/setup.c
index 12c3d84b7460..b7e6a91952b2 100644
--- a/arch/sparc64/kernel/setup.c
+++ b/arch/sparc64/kernel/setup.c
@@ -383,6 +383,17 @@ static void __init process_switch(char c)
383 /* Use PROM debug console. */ 383 /* Use PROM debug console. */
384 register_console(&prom_debug_console); 384 register_console(&prom_debug_console);
385 break; 385 break;
386 case 'P':
387 /* Force UltraSPARC-III P-Cache on. */
388 if (tlb_type != cheetah) {
389 printk("BOOT: Ignoring P-Cache force option.\n");
390 break;
391 }
392 cheetah_pcache_forced_on = 1;
393 add_taint(TAINT_MACHINE_CHECK);
394 cheetah_enable_pcache();
395 break;
396
386 default: 397 default:
387 printk("Unknown boot switch (-%c)\n", c); 398 printk("Unknown boot switch (-%c)\n", c);
388 break; 399 break;
diff --git a/arch/sparc64/kernel/smp.c b/arch/sparc64/kernel/smp.c
index 6dff06a44e76..e5b9c7a27789 100644
--- a/arch/sparc64/kernel/smp.c
+++ b/arch/sparc64/kernel/smp.c
@@ -123,6 +123,9 @@ void __init smp_callin(void)
123 123
124 smp_setup_percpu_timer(); 124 smp_setup_percpu_timer();
125 125
126 if (cheetah_pcache_forced_on)
127 cheetah_enable_pcache();
128
126 local_irq_enable(); 129 local_irq_enable();
127 130
128 calibrate_delay(); 131 calibrate_delay();
diff --git a/arch/sparc64/kernel/traps.c b/arch/sparc64/kernel/traps.c
index 56b203a2af69..a9f4596d7c2b 100644
--- a/arch/sparc64/kernel/traps.c
+++ b/arch/sparc64/kernel/traps.c
@@ -421,6 +421,25 @@ asmlinkage void cee_log(unsigned long ce_status,
421 } 421 }
422} 422}
423 423
424int cheetah_pcache_forced_on;
425
426void cheetah_enable_pcache(void)
427{
428 unsigned long dcr;
429
430 printk("CHEETAH: Enabling P-Cache on cpu %d.\n",
431 smp_processor_id());
432
433 __asm__ __volatile__("ldxa [%%g0] %1, %0"
434 : "=r" (dcr)
435 : "i" (ASI_DCU_CONTROL_REG));
436 dcr |= (DCU_PE | DCU_HPE | DCU_SPE | DCU_SL);
437 __asm__ __volatile__("stxa %0, [%%g0] %1\n\t"
438 "membar #Sync"
439 : /* no outputs */
440 : "r" (dcr), "i" (ASI_DCU_CONTROL_REG));
441}
442
424/* Cheetah error trap handling. */ 443/* Cheetah error trap handling. */
425static unsigned long ecache_flush_physbase; 444static unsigned long ecache_flush_physbase;
426static unsigned long ecache_flush_linesize; 445static unsigned long ecache_flush_linesize;