diff options
-rw-r--r-- | arch/sparc64/kernel/iommu.c | 231 | ||||
-rw-r--r-- | arch/sparc64/kernel/pci_sun4v.c | 206 | ||||
-rw-r--r-- | include/asm-sparc64/io.h | 2 |
3 files changed, 291 insertions, 148 deletions
diff --git a/arch/sparc64/kernel/iommu.c b/arch/sparc64/kernel/iommu.c index 90a5907080a1..d3276ebcfb47 100644 --- a/arch/sparc64/kernel/iommu.c +++ b/arch/sparc64/kernel/iommu.c | |||
@@ -512,124 +512,209 @@ static void dma_4u_unmap_single(struct device *dev, dma_addr_t bus_addr, | |||
512 | static int dma_4u_map_sg(struct device *dev, struct scatterlist *sglist, | 512 | static int dma_4u_map_sg(struct device *dev, struct scatterlist *sglist, |
513 | int nelems, enum dma_data_direction direction) | 513 | int nelems, enum dma_data_direction direction) |
514 | { | 514 | { |
515 | unsigned long flags, ctx, i, npages, iopte_protection; | 515 | struct scatterlist *s, *outs, *segstart; |
516 | struct scatterlist *sg; | 516 | unsigned long flags, handle, prot, ctx; |
517 | dma_addr_t dma_next = 0, dma_addr; | ||
518 | unsigned int max_seg_size; | ||
519 | int outcount, incount, i; | ||
517 | struct strbuf *strbuf; | 520 | struct strbuf *strbuf; |
518 | struct iommu *iommu; | 521 | struct iommu *iommu; |
519 | iopte_t *base; | 522 | |
520 | u32 dma_base; | 523 | BUG_ON(direction == DMA_NONE); |
521 | |||
522 | /* Fast path single entry scatterlists. */ | ||
523 | if (nelems == 1) { | ||
524 | sglist->dma_address = | ||
525 | dma_4u_map_single(dev, sg_virt(sglist), | ||
526 | sglist->length, direction); | ||
527 | if (unlikely(sglist->dma_address == DMA_ERROR_CODE)) | ||
528 | return 0; | ||
529 | sglist->dma_length = sglist->length; | ||
530 | return 1; | ||
531 | } | ||
532 | 524 | ||
533 | iommu = dev->archdata.iommu; | 525 | iommu = dev->archdata.iommu; |
534 | strbuf = dev->archdata.stc; | 526 | strbuf = dev->archdata.stc; |
535 | 527 | if (nelems == 0 || !iommu) | |
536 | if (unlikely(direction == DMA_NONE)) | 528 | return 0; |
537 | goto bad_no_ctx; | ||
538 | |||
539 | npages = calc_npages(sglist, nelems); | ||
540 | 529 | ||
541 | spin_lock_irqsave(&iommu->lock, flags); | 530 | spin_lock_irqsave(&iommu->lock, flags); |
542 | 531 | ||
543 | base = alloc_npages(dev, iommu, npages); | ||
544 | ctx = 0; | 532 | ctx = 0; |
545 | if (iommu->iommu_ctxflush) | 533 | if (iommu->iommu_ctxflush) |
546 | ctx = iommu_alloc_ctx(iommu); | 534 | ctx = iommu_alloc_ctx(iommu); |
547 | 535 | ||
548 | spin_unlock_irqrestore(&iommu->lock, flags); | ||
549 | |||
550 | if (base == NULL) | ||
551 | goto bad; | ||
552 | |||
553 | dma_base = iommu->page_table_map_base + | ||
554 | ((base - iommu->page_table) << IO_PAGE_SHIFT); | ||
555 | |||
556 | if (strbuf->strbuf_enabled) | 536 | if (strbuf->strbuf_enabled) |
557 | iopte_protection = IOPTE_STREAMING(ctx); | 537 | prot = IOPTE_STREAMING(ctx); |
558 | else | 538 | else |
559 | iopte_protection = IOPTE_CONSISTENT(ctx); | 539 | prot = IOPTE_CONSISTENT(ctx); |
560 | if (direction != DMA_TO_DEVICE) | 540 | if (direction != DMA_TO_DEVICE) |
561 | iopte_protection |= IOPTE_WRITE; | 541 | prot |= IOPTE_WRITE; |
562 | 542 | ||
563 | for_each_sg(sglist, sg, nelems, i) { | 543 | outs = s = segstart = &sglist[0]; |
564 | unsigned long paddr = SG_ENT_PHYS_ADDRESS(sg); | 544 | outcount = 1; |
565 | unsigned long slen = sg->length; | 545 | incount = nelems; |
566 | unsigned long this_npages; | 546 | handle = 0; |
547 | |||
548 | /* Init first segment length for backout at failure */ | ||
549 | outs->dma_length = 0; | ||
550 | |||
551 | max_seg_size = dma_get_max_seg_size(dev); | ||
552 | for_each_sg(sglist, s, nelems, i) { | ||
553 | unsigned long paddr, npages, entry, slen; | ||
554 | iopte_t *base; | ||
555 | |||
556 | slen = s->length; | ||
557 | /* Sanity check */ | ||
558 | if (slen == 0) { | ||
559 | dma_next = 0; | ||
560 | continue; | ||
561 | } | ||
562 | /* Allocate iommu entries for that segment */ | ||
563 | paddr = (unsigned long) SG_ENT_PHYS_ADDRESS(s); | ||
564 | npages = iommu_num_pages(paddr, slen); | ||
565 | entry = iommu_range_alloc(dev, iommu, npages, &handle); | ||
566 | |||
567 | /* Handle failure */ | ||
568 | if (unlikely(entry == DMA_ERROR_CODE)) { | ||
569 | if (printk_ratelimit()) | ||
570 | printk(KERN_INFO "iommu_alloc failed, iommu %p paddr %lx" | ||
571 | " npages %lx\n", iommu, paddr, npages); | ||
572 | goto iommu_map_failed; | ||
573 | } | ||
567 | 574 | ||
568 | this_npages = iommu_num_pages(paddr, slen); | 575 | base = iommu->page_table + entry; |
569 | 576 | ||
570 | sg->dma_address = dma_base | (paddr & ~IO_PAGE_MASK); | 577 | /* Convert entry to a dma_addr_t */ |
571 | sg->dma_length = slen; | 578 | dma_addr = iommu->page_table_map_base + |
579 | (entry << IO_PAGE_SHIFT); | ||
580 | dma_addr |= (s->offset & ~IO_PAGE_MASK); | ||
572 | 581 | ||
582 | /* Insert into HW table */ | ||
573 | paddr &= IO_PAGE_MASK; | 583 | paddr &= IO_PAGE_MASK; |
574 | while (this_npages--) { | 584 | while (npages--) { |
575 | iopte_val(*base) = iopte_protection | paddr; | 585 | iopte_val(*base) = prot | paddr; |
576 | |||
577 | base++; | 586 | base++; |
578 | paddr += IO_PAGE_SIZE; | 587 | paddr += IO_PAGE_SIZE; |
579 | dma_base += IO_PAGE_SIZE; | ||
580 | } | 588 | } |
589 | |||
590 | /* If we are in an open segment, try merging */ | ||
591 | if (segstart != s) { | ||
592 | /* We cannot merge if: | ||
593 | * - allocated dma_addr isn't contiguous to previous allocation | ||
594 | */ | ||
595 | if ((dma_addr != dma_next) || | ||
596 | (outs->dma_length + s->length > max_seg_size)) { | ||
597 | /* Can't merge: create a new segment */ | ||
598 | segstart = s; | ||
599 | outcount++; | ||
600 | outs = sg_next(outs); | ||
601 | } else { | ||
602 | outs->dma_length += s->length; | ||
603 | } | ||
604 | } | ||
605 | |||
606 | if (segstart == s) { | ||
607 | /* This is a new segment, fill entries */ | ||
608 | outs->dma_address = dma_addr; | ||
609 | outs->dma_length = slen; | ||
610 | } | ||
611 | |||
612 | /* Calculate next page pointer for contiguous check */ | ||
613 | dma_next = dma_addr + slen; | ||
581 | } | 614 | } |
582 | 615 | ||
583 | return nelems; | 616 | spin_unlock_irqrestore(&iommu->lock, flags); |
617 | |||
618 | if (outcount < incount) { | ||
619 | outs = sg_next(outs); | ||
620 | outs->dma_address = DMA_ERROR_CODE; | ||
621 | outs->dma_length = 0; | ||
622 | } | ||
623 | |||
624 | return outcount; | ||
625 | |||
626 | iommu_map_failed: | ||
627 | for_each_sg(sglist, s, nelems, i) { | ||
628 | if (s->dma_length != 0) { | ||
629 | unsigned long vaddr, npages, entry, i; | ||
630 | iopte_t *base; | ||
631 | |||
632 | vaddr = s->dma_address & IO_PAGE_MASK; | ||
633 | npages = iommu_num_pages(s->dma_address, s->dma_length); | ||
634 | iommu_range_free(iommu, vaddr, npages); | ||
635 | |||
636 | entry = (vaddr - iommu->page_table_map_base) | ||
637 | >> IO_PAGE_SHIFT; | ||
638 | base = iommu->page_table + entry; | ||
639 | |||
640 | for (i = 0; i < npages; i++) | ||
641 | iopte_make_dummy(iommu, base + i); | ||
642 | |||
643 | s->dma_address = DMA_ERROR_CODE; | ||
644 | s->dma_length = 0; | ||
645 | } | ||
646 | if (s == outs) | ||
647 | break; | ||
648 | } | ||
649 | spin_unlock_irqrestore(&iommu->lock, flags); | ||
584 | 650 | ||
585 | bad: | ||
586 | iommu_free_ctx(iommu, ctx); | ||
587 | bad_no_ctx: | ||
588 | if (printk_ratelimit()) | ||
589 | WARN_ON(1); | ||
590 | return 0; | 651 | return 0; |
591 | } | 652 | } |
592 | 653 | ||
654 | /* If contexts are being used, they are the same in all of the mappings | ||
655 | * we make for a particular SG. | ||
656 | */ | ||
657 | static unsigned long fetch_sg_ctx(struct iommu *iommu, struct scatterlist *sg) | ||
658 | { | ||
659 | unsigned long ctx = 0; | ||
660 | |||
661 | if (iommu->iommu_ctxflush) { | ||
662 | iopte_t *base; | ||
663 | u32 bus_addr; | ||
664 | |||
665 | bus_addr = sg->dma_address & IO_PAGE_MASK; | ||
666 | base = iommu->page_table + | ||
667 | ((bus_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT); | ||
668 | |||
669 | ctx = (iopte_val(*base) & IOPTE_CONTEXT) >> 47UL; | ||
670 | } | ||
671 | return ctx; | ||
672 | } | ||
673 | |||
593 | static void dma_4u_unmap_sg(struct device *dev, struct scatterlist *sglist, | 674 | static void dma_4u_unmap_sg(struct device *dev, struct scatterlist *sglist, |
594 | int nelems, enum dma_data_direction direction) | 675 | int nelems, enum dma_data_direction direction) |
595 | { | 676 | { |
596 | unsigned long flags, ctx, i, npages; | 677 | unsigned long flags, ctx; |
678 | struct scatterlist *sg; | ||
597 | struct strbuf *strbuf; | 679 | struct strbuf *strbuf; |
598 | struct iommu *iommu; | 680 | struct iommu *iommu; |
599 | iopte_t *base; | ||
600 | u32 bus_addr; | ||
601 | 681 | ||
602 | if (unlikely(direction == DMA_NONE)) { | 682 | BUG_ON(direction == DMA_NONE); |
603 | if (printk_ratelimit()) | ||
604 | WARN_ON(1); | ||
605 | } | ||
606 | 683 | ||
607 | iommu = dev->archdata.iommu; | 684 | iommu = dev->archdata.iommu; |
608 | strbuf = dev->archdata.stc; | 685 | strbuf = dev->archdata.stc; |
609 | 686 | ||
610 | bus_addr = sglist->dma_address & IO_PAGE_MASK; | 687 | ctx = fetch_sg_ctx(iommu, sglist); |
611 | 688 | ||
612 | npages = calc_npages(sglist, nelems); | 689 | spin_lock_irqsave(&iommu->lock, flags); |
613 | 690 | ||
614 | base = iommu->page_table + | 691 | sg = sglist; |
615 | ((bus_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT); | 692 | while (nelems--) { |
693 | dma_addr_t dma_handle = sg->dma_address; | ||
694 | unsigned int len = sg->dma_length; | ||
695 | unsigned long npages, entry; | ||
696 | iopte_t *base; | ||
697 | int i; | ||
616 | 698 | ||
617 | spin_lock_irqsave(&iommu->lock, flags); | 699 | if (!len) |
700 | break; | ||
701 | npages = iommu_num_pages(dma_handle, len); | ||
702 | iommu_range_free(iommu, dma_handle, npages); | ||
618 | 703 | ||
619 | /* Record the context, if any. */ | 704 | entry = ((dma_handle - iommu->page_table_map_base) |
620 | ctx = 0; | 705 | >> IO_PAGE_SHIFT); |
621 | if (iommu->iommu_ctxflush) | 706 | base = iommu->page_table + entry; |
622 | ctx = (iopte_val(*base) & IOPTE_CONTEXT) >> 47UL; | ||
623 | 707 | ||
624 | /* Step 1: Kick data out of streaming buffers if necessary. */ | 708 | dma_handle &= IO_PAGE_MASK; |
625 | if (strbuf->strbuf_enabled) | 709 | if (strbuf->strbuf_enabled) |
626 | strbuf_flush(strbuf, iommu, bus_addr, ctx, npages, direction); | 710 | strbuf_flush(strbuf, iommu, dma_handle, ctx, |
711 | npages, direction); | ||
627 | 712 | ||
628 | /* Step 2: Clear out the TSB entries. */ | 713 | for (i = 0; i < npages; i++) |
629 | for (i = 0; i < npages; i++) | 714 | iopte_make_dummy(iommu, base + i); |
630 | iopte_make_dummy(iommu, base + i); | ||
631 | 715 | ||
632 | iommu_range_free(iommu, bus_addr, npages); | 716 | sg = sg_next(sg); |
717 | } | ||
633 | 718 | ||
634 | iommu_free_ctx(iommu, ctx); | 719 | iommu_free_ctx(iommu, ctx); |
635 | 720 | ||
diff --git a/arch/sparc64/kernel/pci_sun4v.c b/arch/sparc64/kernel/pci_sun4v.c index c8b6199a5dc4..ddca6c6c0b49 100644 --- a/arch/sparc64/kernel/pci_sun4v.c +++ b/arch/sparc64/kernel/pci_sun4v.c | |||
@@ -89,6 +89,17 @@ static long iommu_batch_flush(struct iommu_batch *p) | |||
89 | return 0; | 89 | return 0; |
90 | } | 90 | } |
91 | 91 | ||
92 | static inline void iommu_batch_new_entry(unsigned long entry) | ||
93 | { | ||
94 | struct iommu_batch *p = &__get_cpu_var(iommu_batch); | ||
95 | |||
96 | if (p->entry + p->npages == entry) | ||
97 | return; | ||
98 | if (p->entry != ~0UL) | ||
99 | iommu_batch_flush(p); | ||
100 | p->entry = entry; | ||
101 | } | ||
102 | |||
92 | /* Interrupts must be disabled. */ | 103 | /* Interrupts must be disabled. */ |
93 | static inline long iommu_batch_add(u64 phys_page) | 104 | static inline long iommu_batch_add(u64 phys_page) |
94 | { | 105 | { |
@@ -320,88 +331,131 @@ static void dma_4v_unmap_single(struct device *dev, dma_addr_t bus_addr, | |||
320 | static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist, | 331 | static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist, |
321 | int nelems, enum dma_data_direction direction) | 332 | int nelems, enum dma_data_direction direction) |
322 | { | 333 | { |
323 | unsigned long flags, npages, i, prot; | 334 | struct scatterlist *s, *outs, *segstart; |
324 | u32 dma_base, orig_dma_base; | 335 | unsigned long flags, handle, prot; |
325 | struct scatterlist *sg; | 336 | dma_addr_t dma_next = 0, dma_addr; |
337 | unsigned int max_seg_size; | ||
338 | int outcount, incount, i; | ||
326 | struct iommu *iommu; | 339 | struct iommu *iommu; |
327 | long entry, err; | 340 | long err; |
328 | 341 | ||
329 | /* Fast path single entry scatterlists. */ | 342 | BUG_ON(direction == DMA_NONE); |
330 | if (nelems == 1) { | ||
331 | sglist->dma_address = | ||
332 | dma_4v_map_single(dev, sg_virt(sglist), | ||
333 | sglist->length, direction); | ||
334 | if (unlikely(sglist->dma_address == DMA_ERROR_CODE)) | ||
335 | return 0; | ||
336 | sglist->dma_length = sglist->length; | ||
337 | return 1; | ||
338 | } | ||
339 | 343 | ||
340 | iommu = dev->archdata.iommu; | 344 | iommu = dev->archdata.iommu; |
345 | if (nelems == 0 || !iommu) | ||
346 | return 0; | ||
341 | 347 | ||
342 | if (unlikely(direction == DMA_NONE)) | 348 | prot = HV_PCI_MAP_ATTR_READ; |
343 | goto bad; | 349 | if (direction != DMA_TO_DEVICE) |
344 | 350 | prot |= HV_PCI_MAP_ATTR_WRITE; | |
345 | npages = calc_npages(sglist, nelems); | ||
346 | 351 | ||
347 | spin_lock_irqsave(&iommu->lock, flags); | 352 | outs = s = segstart = &sglist[0]; |
348 | entry = iommu_range_alloc(dev, iommu, npages, NULL); | 353 | outcount = 1; |
349 | spin_unlock_irqrestore(&iommu->lock, flags); | 354 | incount = nelems; |
355 | handle = 0; | ||
350 | 356 | ||
351 | if (unlikely(entry == DMA_ERROR_CODE)) | 357 | /* Init first segment length for backout at failure */ |
352 | goto bad; | 358 | outs->dma_length = 0; |
353 | 359 | ||
354 | orig_dma_base = dma_base = iommu->page_table_map_base + | 360 | spin_lock_irqsave(&iommu->lock, flags); |
355 | (entry << IO_PAGE_SHIFT); | ||
356 | 361 | ||
357 | prot = HV_PCI_MAP_ATTR_READ; | 362 | iommu_batch_start(dev, prot, ~0UL); |
358 | if (direction != DMA_TO_DEVICE) | ||
359 | prot |= HV_PCI_MAP_ATTR_WRITE; | ||
360 | 363 | ||
361 | local_irq_save(flags); | 364 | max_seg_size = dma_get_max_seg_size(dev); |
365 | for_each_sg(sglist, s, nelems, i) { | ||
366 | unsigned long paddr, npages, entry, slen; | ||
362 | 367 | ||
363 | iommu_batch_start(dev, prot, entry); | 368 | slen = s->length; |
369 | /* Sanity check */ | ||
370 | if (slen == 0) { | ||
371 | dma_next = 0; | ||
372 | continue; | ||
373 | } | ||
374 | /* Allocate iommu entries for that segment */ | ||
375 | paddr = (unsigned long) SG_ENT_PHYS_ADDRESS(s); | ||
376 | npages = iommu_num_pages(paddr, slen); | ||
377 | entry = iommu_range_alloc(dev, iommu, npages, &handle); | ||
364 | 378 | ||
365 | for_each_sg(sglist, sg, nelems, i) { | 379 | /* Handle failure */ |
366 | unsigned long paddr = SG_ENT_PHYS_ADDRESS(sg); | 380 | if (unlikely(entry == DMA_ERROR_CODE)) { |
367 | unsigned long slen = sg->length; | 381 | if (printk_ratelimit()) |
368 | unsigned long this_npages; | 382 | printk(KERN_INFO "iommu_alloc failed, iommu %p paddr %lx" |
383 | " npages %lx\n", iommu, paddr, npages); | ||
384 | goto iommu_map_failed; | ||
385 | } | ||
369 | 386 | ||
370 | this_npages = iommu_num_pages(paddr, slen); | 387 | iommu_batch_new_entry(entry); |
371 | 388 | ||
372 | sg->dma_address = dma_base | (paddr & ~IO_PAGE_MASK); | 389 | /* Convert entry to a dma_addr_t */ |
373 | sg->dma_length = slen; | 390 | dma_addr = iommu->page_table_map_base + |
391 | (entry << IO_PAGE_SHIFT); | ||
392 | dma_addr |= (s->offset & ~IO_PAGE_MASK); | ||
374 | 393 | ||
394 | /* Insert into HW table */ | ||
375 | paddr &= IO_PAGE_MASK; | 395 | paddr &= IO_PAGE_MASK; |
376 | while (this_npages--) { | 396 | while (npages--) { |
377 | err = iommu_batch_add(paddr); | 397 | err = iommu_batch_add(paddr); |
378 | if (unlikely(err < 0L)) { | 398 | if (unlikely(err < 0L)) |
379 | local_irq_restore(flags); | ||
380 | goto iommu_map_failed; | 399 | goto iommu_map_failed; |
400 | paddr += IO_PAGE_SIZE; | ||
401 | } | ||
402 | |||
403 | /* If we are in an open segment, try merging */ | ||
404 | if (segstart != s) { | ||
405 | /* We cannot merge if: | ||
406 | * - allocated dma_addr isn't contiguous to previous allocation | ||
407 | */ | ||
408 | if ((dma_addr != dma_next) || | ||
409 | (outs->dma_length + s->length > max_seg_size)) { | ||
410 | /* Can't merge: create a new segment */ | ||
411 | segstart = s; | ||
412 | outcount++; | ||
413 | outs = sg_next(outs); | ||
414 | } else { | ||
415 | outs->dma_length += s->length; | ||
381 | } | 416 | } |
417 | } | ||
382 | 418 | ||
383 | paddr += IO_PAGE_SIZE; | 419 | if (segstart == s) { |
384 | dma_base += IO_PAGE_SIZE; | 420 | /* This is a new segment, fill entries */ |
421 | outs->dma_address = dma_addr; | ||
422 | outs->dma_length = slen; | ||
385 | } | 423 | } |
424 | |||
425 | /* Calculate next page pointer for contiguous check */ | ||
426 | dma_next = dma_addr + slen; | ||
386 | } | 427 | } |
387 | 428 | ||
388 | err = iommu_batch_end(); | 429 | err = iommu_batch_end(); |
389 | 430 | ||
390 | local_irq_restore(flags); | ||
391 | |||
392 | if (unlikely(err < 0L)) | 431 | if (unlikely(err < 0L)) |
393 | goto iommu_map_failed; | 432 | goto iommu_map_failed; |
394 | 433 | ||
395 | return nelems; | 434 | spin_unlock_irqrestore(&iommu->lock, flags); |
396 | 435 | ||
397 | bad: | 436 | if (outcount < incount) { |
398 | if (printk_ratelimit()) | 437 | outs = sg_next(outs); |
399 | WARN_ON(1); | 438 | outs->dma_address = DMA_ERROR_CODE; |
400 | return 0; | 439 | outs->dma_length = 0; |
440 | } | ||
441 | |||
442 | return outcount; | ||
401 | 443 | ||
402 | iommu_map_failed: | 444 | iommu_map_failed: |
403 | spin_lock_irqsave(&iommu->lock, flags); | 445 | for_each_sg(sglist, s, nelems, i) { |
404 | iommu_range_free(iommu, orig_dma_base, npages); | 446 | if (s->dma_length != 0) { |
447 | unsigned long vaddr, npages; | ||
448 | |||
449 | vaddr = s->dma_address & IO_PAGE_MASK; | ||
450 | npages = iommu_num_pages(s->dma_address, s->dma_length); | ||
451 | iommu_range_free(iommu, vaddr, npages); | ||
452 | /* XXX demap? XXX */ | ||
453 | s->dma_address = DMA_ERROR_CODE; | ||
454 | s->dma_length = 0; | ||
455 | } | ||
456 | if (s == outs) | ||
457 | break; | ||
458 | } | ||
405 | spin_unlock_irqrestore(&iommu->lock, flags); | 459 | spin_unlock_irqrestore(&iommu->lock, flags); |
406 | 460 | ||
407 | return 0; | 461 | return 0; |
@@ -410,39 +464,43 @@ iommu_map_failed: | |||
410 | static void dma_4v_unmap_sg(struct device *dev, struct scatterlist *sglist, | 464 | static void dma_4v_unmap_sg(struct device *dev, struct scatterlist *sglist, |
411 | int nelems, enum dma_data_direction direction) | 465 | int nelems, enum dma_data_direction direction) |
412 | { | 466 | { |
413 | unsigned long flags, npages; | ||
414 | struct pci_pbm_info *pbm; | 467 | struct pci_pbm_info *pbm; |
415 | u32 devhandle, bus_addr; | 468 | struct scatterlist *sg; |
416 | struct iommu *iommu; | 469 | struct iommu *iommu; |
417 | long entry; | 470 | unsigned long flags; |
471 | u32 devhandle; | ||
418 | 472 | ||
419 | if (unlikely(direction == DMA_NONE)) { | 473 | BUG_ON(direction == DMA_NONE); |
420 | if (printk_ratelimit()) | ||
421 | WARN_ON(1); | ||
422 | } | ||
423 | 474 | ||
424 | iommu = dev->archdata.iommu; | 475 | iommu = dev->archdata.iommu; |
425 | pbm = dev->archdata.host_controller; | 476 | pbm = dev->archdata.host_controller; |
426 | devhandle = pbm->devhandle; | 477 | devhandle = pbm->devhandle; |
427 | 478 | ||
428 | bus_addr = sglist->dma_address & IO_PAGE_MASK; | ||
429 | |||
430 | npages = calc_npages(sglist, nelems); | ||
431 | |||
432 | entry = ((bus_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT); | ||
433 | |||
434 | spin_lock_irqsave(&iommu->lock, flags); | 479 | spin_lock_irqsave(&iommu->lock, flags); |
435 | 480 | ||
436 | iommu_range_free(iommu, bus_addr, npages); | 481 | sg = sglist; |
437 | 482 | while (nelems--) { | |
438 | do { | 483 | dma_addr_t dma_handle = sg->dma_address; |
439 | unsigned long num; | 484 | unsigned int len = sg->dma_length; |
485 | unsigned long npages, entry; | ||
486 | |||
487 | if (!len) | ||
488 | break; | ||
489 | npages = iommu_num_pages(dma_handle, len); | ||
490 | iommu_range_free(iommu, dma_handle, npages); | ||
491 | |||
492 | entry = ((dma_handle - iommu->page_table_map_base) >> IO_PAGE_SHIFT); | ||
493 | while (npages) { | ||
494 | unsigned long num; | ||
495 | |||
496 | num = pci_sun4v_iommu_demap(devhandle, HV_PCI_TSBID(0, entry), | ||
497 | npages); | ||
498 | entry += num; | ||
499 | npages -= num; | ||
500 | } | ||
440 | 501 | ||
441 | num = pci_sun4v_iommu_demap(devhandle, HV_PCI_TSBID(0, entry), | 502 | sg = sg_next(sg); |
442 | npages); | 503 | } |
443 | entry += num; | ||
444 | npages -= num; | ||
445 | } while (npages != 0); | ||
446 | 504 | ||
447 | spin_unlock_irqrestore(&iommu->lock, flags); | 505 | spin_unlock_irqrestore(&iommu->lock, flags); |
448 | } | 506 | } |
diff --git a/include/asm-sparc64/io.h b/include/asm-sparc64/io.h index b6ece223562d..c299b853b5ba 100644 --- a/include/asm-sparc64/io.h +++ b/include/asm-sparc64/io.h | |||
@@ -16,7 +16,7 @@ | |||
16 | /* BIO layer definitions. */ | 16 | /* BIO layer definitions. */ |
17 | extern unsigned long kern_base, kern_size; | 17 | extern unsigned long kern_base, kern_size; |
18 | #define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT) | 18 | #define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT) |
19 | #define BIO_VMERGE_BOUNDARY 0 | 19 | #define BIO_VMERGE_BOUNDARY 8192 |
20 | 20 | ||
21 | static inline u8 _inb(unsigned long addr) | 21 | static inline u8 _inb(unsigned long addr) |
22 | { | 22 | { |