diff options
author | Linus Torvalds <torvalds@ppc970.osdl.org> | 2005-06-01 16:20:35 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@ppc970.osdl.org> | 2005-06-01 16:20:35 -0400 |
commit | f9a22239257561df80ef54fc8f31880e2fb2a27b (patch) | |
tree | 01e6be9b66ecf68f3c01ba76289007d983ebe162 /arch | |
parent | eff910a91ac04ab1d9e210d4f721484af3b39c8d (diff) | |
parent | 88314ee73fd75eb32abdcb3119cd303c116d4500 (diff) |
Automatic merge of rsync://rsync.kernel.org/pub/scm/linux/kernel/git/davem/sparc-2.6
Diffstat (limited to 'arch')
-rw-r--r-- | arch/sparc64/kernel/pci_iommu.c | 88 | ||||
-rw-r--r-- | arch/sparc64/kernel/pci_psycho.c | 2 | ||||
-rw-r--r-- | arch/sparc64/kernel/pci_sabre.c | 2 | ||||
-rw-r--r-- | arch/sparc64/kernel/pci_schizo.c | 2 | ||||
-rw-r--r-- | arch/sparc64/kernel/sbus.c | 20 |
5 files changed, 85 insertions, 29 deletions
diff --git a/arch/sparc64/kernel/pci_iommu.c b/arch/sparc64/kernel/pci_iommu.c index 33ca56c90da2..2803bc7c2c79 100644 --- a/arch/sparc64/kernel/pci_iommu.c +++ b/arch/sparc64/kernel/pci_iommu.c | |||
@@ -196,6 +196,34 @@ static iopte_t *alloc_consistent_cluster(struct pci_iommu *iommu, unsigned long | |||
196 | return NULL; | 196 | return NULL; |
197 | } | 197 | } |
198 | 198 | ||
199 | static int iommu_alloc_ctx(struct pci_iommu *iommu) | ||
200 | { | ||
201 | int lowest = iommu->ctx_lowest_free; | ||
202 | int sz = IOMMU_NUM_CTXS - lowest; | ||
203 | int n = find_next_zero_bit(iommu->ctx_bitmap, sz, lowest); | ||
204 | |||
205 | if (unlikely(n == sz)) { | ||
206 | n = find_next_zero_bit(iommu->ctx_bitmap, lowest, 1); | ||
207 | if (unlikely(n == lowest)) { | ||
208 | printk(KERN_WARNING "IOMMU: Ran out of contexts.\n"); | ||
209 | n = 0; | ||
210 | } | ||
211 | } | ||
212 | if (n) | ||
213 | __set_bit(n, iommu->ctx_bitmap); | ||
214 | |||
215 | return n; | ||
216 | } | ||
217 | |||
218 | static inline void iommu_free_ctx(struct pci_iommu *iommu, int ctx) | ||
219 | { | ||
220 | if (likely(ctx)) { | ||
221 | __clear_bit(ctx, iommu->ctx_bitmap); | ||
222 | if (ctx < iommu->ctx_lowest_free) | ||
223 | iommu->ctx_lowest_free = ctx; | ||
224 | } | ||
225 | } | ||
226 | |||
199 | /* Allocate and map kernel buffer of size SIZE using consistent mode | 227 | /* Allocate and map kernel buffer of size SIZE using consistent mode |
200 | * DMA for PCI device PDEV. Return non-NULL cpu-side address if | 228 | * DMA for PCI device PDEV. Return non-NULL cpu-side address if |
201 | * successful and set *DMA_ADDRP to the PCI side dma address. | 229 | * successful and set *DMA_ADDRP to the PCI side dma address. |
@@ -236,7 +264,7 @@ void *pci_alloc_consistent(struct pci_dev *pdev, size_t size, dma_addr_t *dma_ad | |||
236 | npages = size >> IO_PAGE_SHIFT; | 264 | npages = size >> IO_PAGE_SHIFT; |
237 | ctx = 0; | 265 | ctx = 0; |
238 | if (iommu->iommu_ctxflush) | 266 | if (iommu->iommu_ctxflush) |
239 | ctx = iommu->iommu_cur_ctx++; | 267 | ctx = iommu_alloc_ctx(iommu); |
240 | first_page = __pa(first_page); | 268 | first_page = __pa(first_page); |
241 | while (npages--) { | 269 | while (npages--) { |
242 | iopte_val(*iopte) = (IOPTE_CONSISTENT(ctx) | | 270 | iopte_val(*iopte) = (IOPTE_CONSISTENT(ctx) | |
@@ -317,6 +345,8 @@ void pci_free_consistent(struct pci_dev *pdev, size_t size, void *cpu, dma_addr_ | |||
317 | } | 345 | } |
318 | } | 346 | } |
319 | 347 | ||
348 | iommu_free_ctx(iommu, ctx); | ||
349 | |||
320 | spin_unlock_irqrestore(&iommu->lock, flags); | 350 | spin_unlock_irqrestore(&iommu->lock, flags); |
321 | 351 | ||
322 | order = get_order(size); | 352 | order = get_order(size); |
@@ -360,7 +390,7 @@ dma_addr_t pci_map_single(struct pci_dev *pdev, void *ptr, size_t sz, int direct | |||
360 | base_paddr = __pa(oaddr & IO_PAGE_MASK); | 390 | base_paddr = __pa(oaddr & IO_PAGE_MASK); |
361 | ctx = 0; | 391 | ctx = 0; |
362 | if (iommu->iommu_ctxflush) | 392 | if (iommu->iommu_ctxflush) |
363 | ctx = iommu->iommu_cur_ctx++; | 393 | ctx = iommu_alloc_ctx(iommu); |
364 | if (strbuf->strbuf_enabled) | 394 | if (strbuf->strbuf_enabled) |
365 | iopte_protection = IOPTE_STREAMING(ctx); | 395 | iopte_protection = IOPTE_STREAMING(ctx); |
366 | else | 396 | else |
@@ -380,39 +410,53 @@ bad: | |||
380 | return PCI_DMA_ERROR_CODE; | 410 | return PCI_DMA_ERROR_CODE; |
381 | } | 411 | } |
382 | 412 | ||
383 | static void pci_strbuf_flush(struct pci_strbuf *strbuf, struct pci_iommu *iommu, u32 vaddr, unsigned long ctx, unsigned long npages) | 413 | static void pci_strbuf_flush(struct pci_strbuf *strbuf, struct pci_iommu *iommu, u32 vaddr, unsigned long ctx, unsigned long npages, int direction) |
384 | { | 414 | { |
385 | int limit; | 415 | int limit; |
386 | 416 | ||
387 | PCI_STC_FLUSHFLAG_INIT(strbuf); | ||
388 | if (strbuf->strbuf_ctxflush && | 417 | if (strbuf->strbuf_ctxflush && |
389 | iommu->iommu_ctxflush) { | 418 | iommu->iommu_ctxflush) { |
390 | unsigned long matchreg, flushreg; | 419 | unsigned long matchreg, flushreg; |
420 | u64 val; | ||
391 | 421 | ||
392 | flushreg = strbuf->strbuf_ctxflush; | 422 | flushreg = strbuf->strbuf_ctxflush; |
393 | matchreg = PCI_STC_CTXMATCH_ADDR(strbuf, ctx); | 423 | matchreg = PCI_STC_CTXMATCH_ADDR(strbuf, ctx); |
394 | 424 | ||
395 | limit = 100000; | ||
396 | pci_iommu_write(flushreg, ctx); | 425 | pci_iommu_write(flushreg, ctx); |
397 | for(;;) { | 426 | val = pci_iommu_read(matchreg); |
398 | if (((long)pci_iommu_read(matchreg)) >= 0L) | 427 | val &= 0xffff; |
399 | break; | 428 | if (!val) |
400 | limit--; | 429 | goto do_flush_sync; |
401 | if (!limit) | 430 | |
402 | break; | 431 | while (val) { |
403 | udelay(1); | 432 | if (val & 0x1) |
433 | pci_iommu_write(flushreg, ctx); | ||
434 | val >>= 1; | ||
404 | } | 435 | } |
405 | if (!limit) | 436 | val = pci_iommu_read(matchreg); |
437 | if (unlikely(val)) { | ||
406 | printk(KERN_WARNING "pci_strbuf_flush: ctx flush " | 438 | printk(KERN_WARNING "pci_strbuf_flush: ctx flush " |
407 | "timeout vaddr[%08x] ctx[%lx]\n", | 439 | "timeout matchreg[%lx] ctx[%lx]\n", |
408 | vaddr, ctx); | 440 | val, ctx); |
441 | goto do_page_flush; | ||
442 | } | ||
409 | } else { | 443 | } else { |
410 | unsigned long i; | 444 | unsigned long i; |
411 | 445 | ||
446 | do_page_flush: | ||
412 | for (i = 0; i < npages; i++, vaddr += IO_PAGE_SIZE) | 447 | for (i = 0; i < npages; i++, vaddr += IO_PAGE_SIZE) |
413 | pci_iommu_write(strbuf->strbuf_pflush, vaddr); | 448 | pci_iommu_write(strbuf->strbuf_pflush, vaddr); |
414 | } | 449 | } |
415 | 450 | ||
451 | do_flush_sync: | ||
452 | /* If the device could not have possibly put dirty data into | ||
453 | * the streaming cache, no flush-flag synchronization needs | ||
454 | * to be performed. | ||
455 | */ | ||
456 | if (direction == PCI_DMA_TODEVICE) | ||
457 | return; | ||
458 | |||
459 | PCI_STC_FLUSHFLAG_INIT(strbuf); | ||
416 | pci_iommu_write(strbuf->strbuf_fsync, strbuf->strbuf_flushflag_pa); | 460 | pci_iommu_write(strbuf->strbuf_fsync, strbuf->strbuf_flushflag_pa); |
417 | (void) pci_iommu_read(iommu->write_complete_reg); | 461 | (void) pci_iommu_read(iommu->write_complete_reg); |
418 | 462 | ||
@@ -466,7 +510,7 @@ void pci_unmap_single(struct pci_dev *pdev, dma_addr_t bus_addr, size_t sz, int | |||
466 | 510 | ||
467 | /* Step 1: Kick data out of streaming buffers if necessary. */ | 511 | /* Step 1: Kick data out of streaming buffers if necessary. */ |
468 | if (strbuf->strbuf_enabled) | 512 | if (strbuf->strbuf_enabled) |
469 | pci_strbuf_flush(strbuf, iommu, bus_addr, ctx, npages); | 513 | pci_strbuf_flush(strbuf, iommu, bus_addr, ctx, npages, direction); |
470 | 514 | ||
471 | /* Step 2: Clear out first TSB entry. */ | 515 | /* Step 2: Clear out first TSB entry. */ |
472 | iopte_make_dummy(iommu, base); | 516 | iopte_make_dummy(iommu, base); |
@@ -474,6 +518,8 @@ void pci_unmap_single(struct pci_dev *pdev, dma_addr_t bus_addr, size_t sz, int | |||
474 | free_streaming_cluster(iommu, bus_addr - iommu->page_table_map_base, | 518 | free_streaming_cluster(iommu, bus_addr - iommu->page_table_map_base, |
475 | npages, ctx); | 519 | npages, ctx); |
476 | 520 | ||
521 | iommu_free_ctx(iommu, ctx); | ||
522 | |||
477 | spin_unlock_irqrestore(&iommu->lock, flags); | 523 | spin_unlock_irqrestore(&iommu->lock, flags); |
478 | } | 524 | } |
479 | 525 | ||
@@ -613,7 +659,7 @@ int pci_map_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, int | |||
613 | /* Step 4: Choose a context if necessary. */ | 659 | /* Step 4: Choose a context if necessary. */ |
614 | ctx = 0; | 660 | ctx = 0; |
615 | if (iommu->iommu_ctxflush) | 661 | if (iommu->iommu_ctxflush) |
616 | ctx = iommu->iommu_cur_ctx++; | 662 | ctx = iommu_alloc_ctx(iommu); |
617 | 663 | ||
618 | /* Step 5: Create the mappings. */ | 664 | /* Step 5: Create the mappings. */ |
619 | if (strbuf->strbuf_enabled) | 665 | if (strbuf->strbuf_enabled) |
@@ -678,7 +724,7 @@ void pci_unmap_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, | |||
678 | 724 | ||
679 | /* Step 1: Kick data out of streaming buffers if necessary. */ | 725 | /* Step 1: Kick data out of streaming buffers if necessary. */ |
680 | if (strbuf->strbuf_enabled) | 726 | if (strbuf->strbuf_enabled) |
681 | pci_strbuf_flush(strbuf, iommu, bus_addr, ctx, npages); | 727 | pci_strbuf_flush(strbuf, iommu, bus_addr, ctx, npages, direction); |
682 | 728 | ||
683 | /* Step 2: Clear out first TSB entry. */ | 729 | /* Step 2: Clear out first TSB entry. */ |
684 | iopte_make_dummy(iommu, base); | 730 | iopte_make_dummy(iommu, base); |
@@ -686,6 +732,8 @@ void pci_unmap_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, | |||
686 | free_streaming_cluster(iommu, bus_addr - iommu->page_table_map_base, | 732 | free_streaming_cluster(iommu, bus_addr - iommu->page_table_map_base, |
687 | npages, ctx); | 733 | npages, ctx); |
688 | 734 | ||
735 | iommu_free_ctx(iommu, ctx); | ||
736 | |||
689 | spin_unlock_irqrestore(&iommu->lock, flags); | 737 | spin_unlock_irqrestore(&iommu->lock, flags); |
690 | } | 738 | } |
691 | 739 | ||
@@ -724,7 +772,7 @@ void pci_dma_sync_single_for_cpu(struct pci_dev *pdev, dma_addr_t bus_addr, size | |||
724 | } | 772 | } |
725 | 773 | ||
726 | /* Step 2: Kick data out of streaming buffers. */ | 774 | /* Step 2: Kick data out of streaming buffers. */ |
727 | pci_strbuf_flush(strbuf, iommu, bus_addr, ctx, npages); | 775 | pci_strbuf_flush(strbuf, iommu, bus_addr, ctx, npages, direction); |
728 | 776 | ||
729 | spin_unlock_irqrestore(&iommu->lock, flags); | 777 | spin_unlock_irqrestore(&iommu->lock, flags); |
730 | } | 778 | } |
@@ -768,7 +816,7 @@ void pci_dma_sync_sg_for_cpu(struct pci_dev *pdev, struct scatterlist *sglist, i | |||
768 | i--; | 816 | i--; |
769 | npages = (IO_PAGE_ALIGN(sglist[i].dma_address + sglist[i].dma_length) | 817 | npages = (IO_PAGE_ALIGN(sglist[i].dma_address + sglist[i].dma_length) |
770 | - bus_addr) >> IO_PAGE_SHIFT; | 818 | - bus_addr) >> IO_PAGE_SHIFT; |
771 | pci_strbuf_flush(strbuf, iommu, bus_addr, ctx, npages); | 819 | pci_strbuf_flush(strbuf, iommu, bus_addr, ctx, npages, direction); |
772 | 820 | ||
773 | spin_unlock_irqrestore(&iommu->lock, flags); | 821 | spin_unlock_irqrestore(&iommu->lock, flags); |
774 | } | 822 | } |
diff --git a/arch/sparc64/kernel/pci_psycho.c b/arch/sparc64/kernel/pci_psycho.c index 3567fa879e1f..534320ef0db2 100644 --- a/arch/sparc64/kernel/pci_psycho.c +++ b/arch/sparc64/kernel/pci_psycho.c | |||
@@ -1212,7 +1212,7 @@ static void __init psycho_iommu_init(struct pci_controller_info *p) | |||
1212 | 1212 | ||
1213 | /* Setup initial software IOMMU state. */ | 1213 | /* Setup initial software IOMMU state. */ |
1214 | spin_lock_init(&iommu->lock); | 1214 | spin_lock_init(&iommu->lock); |
1215 | iommu->iommu_cur_ctx = 0; | 1215 | iommu->ctx_lowest_free = 1; |
1216 | 1216 | ||
1217 | /* Register addresses. */ | 1217 | /* Register addresses. */ |
1218 | iommu->iommu_control = p->pbm_A.controller_regs + PSYCHO_IOMMU_CONTROL; | 1218 | iommu->iommu_control = p->pbm_A.controller_regs + PSYCHO_IOMMU_CONTROL; |
diff --git a/arch/sparc64/kernel/pci_sabre.c b/arch/sparc64/kernel/pci_sabre.c index 5525d1ec4af8..53d333b4a4e8 100644 --- a/arch/sparc64/kernel/pci_sabre.c +++ b/arch/sparc64/kernel/pci_sabre.c | |||
@@ -1265,7 +1265,7 @@ static void __init sabre_iommu_init(struct pci_controller_info *p, | |||
1265 | 1265 | ||
1266 | /* Setup initial software IOMMU state. */ | 1266 | /* Setup initial software IOMMU state. */ |
1267 | spin_lock_init(&iommu->lock); | 1267 | spin_lock_init(&iommu->lock); |
1268 | iommu->iommu_cur_ctx = 0; | 1268 | iommu->ctx_lowest_free = 1; |
1269 | 1269 | ||
1270 | /* Register addresses. */ | 1270 | /* Register addresses. */ |
1271 | iommu->iommu_control = p->pbm_A.controller_regs + SABRE_IOMMU_CONTROL; | 1271 | iommu->iommu_control = p->pbm_A.controller_regs + SABRE_IOMMU_CONTROL; |
diff --git a/arch/sparc64/kernel/pci_schizo.c b/arch/sparc64/kernel/pci_schizo.c index e93fcadc3722..5753175b94e6 100644 --- a/arch/sparc64/kernel/pci_schizo.c +++ b/arch/sparc64/kernel/pci_schizo.c | |||
@@ -1753,7 +1753,7 @@ static void schizo_pbm_iommu_init(struct pci_pbm_info *pbm) | |||
1753 | 1753 | ||
1754 | /* Setup initial software IOMMU state. */ | 1754 | /* Setup initial software IOMMU state. */ |
1755 | spin_lock_init(&iommu->lock); | 1755 | spin_lock_init(&iommu->lock); |
1756 | iommu->iommu_cur_ctx = 0; | 1756 | iommu->ctx_lowest_free = 1; |
1757 | 1757 | ||
1758 | /* Register addresses, SCHIZO has iommu ctx flushing. */ | 1758 | /* Register addresses, SCHIZO has iommu ctx flushing. */ |
1759 | iommu->iommu_control = pbm->pbm_regs + SCHIZO_IOMMU_CONTROL; | 1759 | iommu->iommu_control = pbm->pbm_regs + SCHIZO_IOMMU_CONTROL; |
diff --git a/arch/sparc64/kernel/sbus.c b/arch/sparc64/kernel/sbus.c index 76ea6455433f..89f5e019f24c 100644 --- a/arch/sparc64/kernel/sbus.c +++ b/arch/sparc64/kernel/sbus.c | |||
@@ -117,17 +117,25 @@ static void iommu_flush(struct sbus_iommu *iommu, u32 base, unsigned long npages | |||
117 | 117 | ||
118 | #define STRBUF_TAG_VALID 0x02UL | 118 | #define STRBUF_TAG_VALID 0x02UL |
119 | 119 | ||
120 | static void sbus_strbuf_flush(struct sbus_iommu *iommu, u32 base, unsigned long npages) | 120 | static void sbus_strbuf_flush(struct sbus_iommu *iommu, u32 base, unsigned long npages, int direction) |
121 | { | 121 | { |
122 | unsigned long n; | 122 | unsigned long n; |
123 | int limit; | 123 | int limit; |
124 | 124 | ||
125 | iommu->strbuf_flushflag = 0UL; | ||
126 | n = npages; | 125 | n = npages; |
127 | while (n--) | 126 | while (n--) |
128 | upa_writeq(base + (n << IO_PAGE_SHIFT), | 127 | upa_writeq(base + (n << IO_PAGE_SHIFT), |
129 | iommu->strbuf_regs + STRBUF_PFLUSH); | 128 | iommu->strbuf_regs + STRBUF_PFLUSH); |
130 | 129 | ||
130 | /* If the device could not have possibly put dirty data into | ||
131 | * the streaming cache, no flush-flag synchronization needs | ||
132 | * to be performed. | ||
133 | */ | ||
134 | if (direction == SBUS_DMA_TODEVICE) | ||
135 | return; | ||
136 | |||
137 | iommu->strbuf_flushflag = 0UL; | ||
138 | |||
131 | /* Whoopee cushion! */ | 139 | /* Whoopee cushion! */ |
132 | upa_writeq(__pa(&iommu->strbuf_flushflag), | 140 | upa_writeq(__pa(&iommu->strbuf_flushflag), |
133 | iommu->strbuf_regs + STRBUF_FSYNC); | 141 | iommu->strbuf_regs + STRBUF_FSYNC); |
@@ -421,7 +429,7 @@ void sbus_unmap_single(struct sbus_dev *sdev, dma_addr_t dma_addr, size_t size, | |||
421 | 429 | ||
422 | spin_lock_irqsave(&iommu->lock, flags); | 430 | spin_lock_irqsave(&iommu->lock, flags); |
423 | free_streaming_cluster(iommu, dma_base, size >> IO_PAGE_SHIFT); | 431 | free_streaming_cluster(iommu, dma_base, size >> IO_PAGE_SHIFT); |
424 | sbus_strbuf_flush(iommu, dma_base, size >> IO_PAGE_SHIFT); | 432 | sbus_strbuf_flush(iommu, dma_base, size >> IO_PAGE_SHIFT, direction); |
425 | spin_unlock_irqrestore(&iommu->lock, flags); | 433 | spin_unlock_irqrestore(&iommu->lock, flags); |
426 | } | 434 | } |
427 | 435 | ||
@@ -584,7 +592,7 @@ void sbus_unmap_sg(struct sbus_dev *sdev, struct scatterlist *sg, int nents, int | |||
584 | iommu = sdev->bus->iommu; | 592 | iommu = sdev->bus->iommu; |
585 | spin_lock_irqsave(&iommu->lock, flags); | 593 | spin_lock_irqsave(&iommu->lock, flags); |
586 | free_streaming_cluster(iommu, dvma_base, size >> IO_PAGE_SHIFT); | 594 | free_streaming_cluster(iommu, dvma_base, size >> IO_PAGE_SHIFT); |
587 | sbus_strbuf_flush(iommu, dvma_base, size >> IO_PAGE_SHIFT); | 595 | sbus_strbuf_flush(iommu, dvma_base, size >> IO_PAGE_SHIFT, direction); |
588 | spin_unlock_irqrestore(&iommu->lock, flags); | 596 | spin_unlock_irqrestore(&iommu->lock, flags); |
589 | } | 597 | } |
590 | 598 | ||
@@ -596,7 +604,7 @@ void sbus_dma_sync_single_for_cpu(struct sbus_dev *sdev, dma_addr_t base, size_t | |||
596 | size = (IO_PAGE_ALIGN(base + size) - (base & IO_PAGE_MASK)); | 604 | size = (IO_PAGE_ALIGN(base + size) - (base & IO_PAGE_MASK)); |
597 | 605 | ||
598 | spin_lock_irqsave(&iommu->lock, flags); | 606 | spin_lock_irqsave(&iommu->lock, flags); |
599 | sbus_strbuf_flush(iommu, base & IO_PAGE_MASK, size >> IO_PAGE_SHIFT); | 607 | sbus_strbuf_flush(iommu, base & IO_PAGE_MASK, size >> IO_PAGE_SHIFT, direction); |
600 | spin_unlock_irqrestore(&iommu->lock, flags); | 608 | spin_unlock_irqrestore(&iommu->lock, flags); |
601 | } | 609 | } |
602 | 610 | ||
@@ -620,7 +628,7 @@ void sbus_dma_sync_sg_for_cpu(struct sbus_dev *sdev, struct scatterlist *sg, int | |||
620 | size = IO_PAGE_ALIGN(sg[i].dma_address + sg[i].dma_length) - base; | 628 | size = IO_PAGE_ALIGN(sg[i].dma_address + sg[i].dma_length) - base; |
621 | 629 | ||
622 | spin_lock_irqsave(&iommu->lock, flags); | 630 | spin_lock_irqsave(&iommu->lock, flags); |
623 | sbus_strbuf_flush(iommu, base, size >> IO_PAGE_SHIFT); | 631 | sbus_strbuf_flush(iommu, base, size >> IO_PAGE_SHIFT, direction); |
624 | spin_unlock_irqrestore(&iommu->lock, flags); | 632 | spin_unlock_irqrestore(&iommu->lock, flags); |
625 | } | 633 | } |
626 | 634 | ||