aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorAlex Williamson <alex.williamson@hp.com>2005-04-25 16:14:36 -0400
committerTony Luck <tony.luck@intel.com>2005-04-25 16:14:36 -0400
commit5f6602a101993592b437b801c401443bec65d0cf (patch)
tree603ce367a349b9454a7f6b77953b3ecdf194ec68 /arch
parentfde740e4dd4a05ca8957490d468fa9b2770f5bd6 (diff)
[IA64] sba_iommu bug fixes
This fixes a couple of bugs in the zx1/sx1000 sba_iommu. These are all pretty low likelihood of hitting. The first problem is a simple off by one, deep in the sba_alloc_range() error path. Surrounding that was a lock ordering problem that could have potentially deadlocked with the order the locks are grabbed in sba_unmap_single(). I moved the resource locking into sba_search_bitmap() to prevent this. Finally, there's a potential race between unmapping pdir entries and marking incoming DMA pages clean. If you see any oddities, please let me know, but I've tested it pretty thoroughly here. Tony, please apply. Thanks, BTW, many of the options in this driver not on by default are becoming more and more broken. I'll be working on some patches to clean them out, but I wanted to get this bug fix out first. Signed-off-by: Alex Williamson <alex.williamson@hp.com> Signed-off-by: Tony Luck <tony.luck@intel.com>
Diffstat (limited to 'arch')
-rw-r--r--arch/ia64/hp/common/sba_iommu.c96
1 files changed, 56 insertions, 40 deletions
diff --git a/arch/ia64/hp/common/sba_iommu.c b/arch/ia64/hp/common/sba_iommu.c
index 017c9ab5fc1b..6a8fcba7a853 100644
--- a/arch/ia64/hp/common/sba_iommu.c
+++ b/arch/ia64/hp/common/sba_iommu.c
@@ -1,9 +1,9 @@
1/* 1/*
2** IA64 System Bus Adapter (SBA) I/O MMU manager 2** IA64 System Bus Adapter (SBA) I/O MMU manager
3** 3**
4** (c) Copyright 2002-2004 Alex Williamson 4** (c) Copyright 2002-2005 Alex Williamson
5** (c) Copyright 2002-2003 Grant Grundler 5** (c) Copyright 2002-2003 Grant Grundler
6** (c) Copyright 2002-2004 Hewlett-Packard Company 6** (c) Copyright 2002-2005 Hewlett-Packard Company
7** 7**
8** Portions (c) 2000 Grant Grundler (from parisc I/O MMU code) 8** Portions (c) 2000 Grant Grundler (from parisc I/O MMU code)
9** Portions (c) 1999 Dave S. Miller (from sparc64 I/O MMU code) 9** Portions (c) 1999 Dave S. Miller (from sparc64 I/O MMU code)
@@ -459,21 +459,32 @@ get_iovp_order (unsigned long size)
459 * sba_search_bitmap - find free space in IO PDIR resource bitmap 459 * sba_search_bitmap - find free space in IO PDIR resource bitmap
460 * @ioc: IO MMU structure which owns the pdir we are interested in. 460 * @ioc: IO MMU structure which owns the pdir we are interested in.
461 * @bits_wanted: number of entries we need. 461 * @bits_wanted: number of entries we need.
462 * @use_hint: use res_hint to indicate where to start looking
462 * 463 *
463 * Find consecutive free bits in resource bitmap. 464 * Find consecutive free bits in resource bitmap.
464 * Each bit represents one entry in the IO Pdir. 465 * Each bit represents one entry in the IO Pdir.
465 * Cool perf optimization: search for log2(size) bits at a time. 466 * Cool perf optimization: search for log2(size) bits at a time.
466 */ 467 */
467static SBA_INLINE unsigned long 468static SBA_INLINE unsigned long
468sba_search_bitmap(struct ioc *ioc, unsigned long bits_wanted) 469sba_search_bitmap(struct ioc *ioc, unsigned long bits_wanted, int use_hint)
469{ 470{
470 unsigned long *res_ptr = ioc->res_hint; 471 unsigned long *res_ptr;
471 unsigned long *res_end = (unsigned long *) &(ioc->res_map[ioc->res_size]); 472 unsigned long *res_end = (unsigned long *) &(ioc->res_map[ioc->res_size]);
472 unsigned long pide = ~0UL; 473 unsigned long flags, pide = ~0UL;
473 474
474 ASSERT(((unsigned long) ioc->res_hint & (sizeof(unsigned long) - 1UL)) == 0); 475 ASSERT(((unsigned long) ioc->res_hint & (sizeof(unsigned long) - 1UL)) == 0);
475 ASSERT(res_ptr < res_end); 476 ASSERT(res_ptr < res_end);
476 477
478 spin_lock_irqsave(&ioc->res_lock, flags);
479
480 /* Allow caller to force a search through the entire resource space */
481 if (likely(use_hint)) {
482 res_ptr = ioc->res_hint;
483 } else {
484 res_ptr = (ulong *)ioc->res_map;
485 ioc->res_bitshift = 0;
486 }
487
477 /* 488 /*
478 * N.B. REO/Grande defect AR2305 can cause TLB fetch timeouts 489 * N.B. REO/Grande defect AR2305 can cause TLB fetch timeouts
479 * if a TLB entry is purged while in use. sba_mark_invalid() 490 * if a TLB entry is purged while in use. sba_mark_invalid()
@@ -570,10 +581,12 @@ not_found:
570 prefetch(ioc->res_map); 581 prefetch(ioc->res_map);
571 ioc->res_hint = (unsigned long *) ioc->res_map; 582 ioc->res_hint = (unsigned long *) ioc->res_map;
572 ioc->res_bitshift = 0; 583 ioc->res_bitshift = 0;
584 spin_unlock_irqrestore(&ioc->res_lock, flags);
573 return (pide); 585 return (pide);
574 586
575found_it: 587found_it:
576 ioc->res_hint = res_ptr; 588 ioc->res_hint = res_ptr;
589 spin_unlock_irqrestore(&ioc->res_lock, flags);
577 return (pide); 590 return (pide);
578} 591}
579 592
@@ -594,36 +607,36 @@ sba_alloc_range(struct ioc *ioc, size_t size)
594 unsigned long itc_start; 607 unsigned long itc_start;
595#endif 608#endif
596 unsigned long pide; 609 unsigned long pide;
597 unsigned long flags;
598 610
599 ASSERT(pages_needed); 611 ASSERT(pages_needed);
600 ASSERT(0 == (size & ~iovp_mask)); 612 ASSERT(0 == (size & ~iovp_mask));
601 613
602 spin_lock_irqsave(&ioc->res_lock, flags);
603
604#ifdef PDIR_SEARCH_TIMING 614#ifdef PDIR_SEARCH_TIMING
605 itc_start = ia64_get_itc(); 615 itc_start = ia64_get_itc();
606#endif 616#endif
607 /* 617 /*
608 ** "seek and ye shall find"...praying never hurts either... 618 ** "seek and ye shall find"...praying never hurts either...
609 */ 619 */
610 pide = sba_search_bitmap(ioc, pages_needed); 620 pide = sba_search_bitmap(ioc, pages_needed, 1);
611 if (unlikely(pide >= (ioc->res_size << 3))) { 621 if (unlikely(pide >= (ioc->res_size << 3))) {
612 pide = sba_search_bitmap(ioc, pages_needed); 622 pide = sba_search_bitmap(ioc, pages_needed, 0);
613 if (unlikely(pide >= (ioc->res_size << 3))) { 623 if (unlikely(pide >= (ioc->res_size << 3))) {
614#if DELAYED_RESOURCE_CNT > 0 624#if DELAYED_RESOURCE_CNT > 0
625 unsigned long flags;
626
615 /* 627 /*
616 ** With delayed resource freeing, we can give this one more shot. We're 628 ** With delayed resource freeing, we can give this one more shot. We're
617 ** getting close to being in trouble here, so do what we can to make this 629 ** getting close to being in trouble here, so do what we can to make this
618 ** one count. 630 ** one count.
619 */ 631 */
620 spin_lock(&ioc->saved_lock); 632 spin_lock_irqsave(&ioc->saved_lock, flags);
621 if (ioc->saved_cnt > 0) { 633 if (ioc->saved_cnt > 0) {
622 struct sba_dma_pair *d; 634 struct sba_dma_pair *d;
623 int cnt = ioc->saved_cnt; 635 int cnt = ioc->saved_cnt;
624 636
625 d = &(ioc->saved[ioc->saved_cnt]); 637 d = &(ioc->saved[ioc->saved_cnt - 1]);
626 638
639 spin_lock(&ioc->res_lock);
627 while (cnt--) { 640 while (cnt--) {
628 sba_mark_invalid(ioc, d->iova, d->size); 641 sba_mark_invalid(ioc, d->iova, d->size);
629 sba_free_range(ioc, d->iova, d->size); 642 sba_free_range(ioc, d->iova, d->size);
@@ -631,10 +644,11 @@ sba_alloc_range(struct ioc *ioc, size_t size)
631 } 644 }
632 ioc->saved_cnt = 0; 645 ioc->saved_cnt = 0;
633 READ_REG(ioc->ioc_hpa+IOC_PCOM); /* flush purges */ 646 READ_REG(ioc->ioc_hpa+IOC_PCOM); /* flush purges */
647 spin_unlock(&ioc->res_lock);
634 } 648 }
635 spin_unlock(&ioc->saved_lock); 649 spin_unlock_irqrestore(&ioc->saved_lock, flags);
636 650
637 pide = sba_search_bitmap(ioc, pages_needed); 651 pide = sba_search_bitmap(ioc, pages_needed, 0);
638 if (unlikely(pide >= (ioc->res_size << 3))) 652 if (unlikely(pide >= (ioc->res_size << 3)))
639 panic(__FILE__ ": I/O MMU @ %p is out of mapping resources\n", 653 panic(__FILE__ ": I/O MMU @ %p is out of mapping resources\n",
640 ioc->ioc_hpa); 654 ioc->ioc_hpa);
@@ -664,8 +678,6 @@ sba_alloc_range(struct ioc *ioc, size_t size)
664 (uint) ((unsigned long) ioc->res_hint - (unsigned long) ioc->res_map), 678 (uint) ((unsigned long) ioc->res_hint - (unsigned long) ioc->res_map),
665 ioc->res_bitshift ); 679 ioc->res_bitshift );
666 680
667 spin_unlock_irqrestore(&ioc->res_lock, flags);
668
669 return (pide); 681 return (pide);
670} 682}
671 683
@@ -950,6 +962,30 @@ sba_map_single(struct device *dev, void *addr, size_t size, int dir)
950 return SBA_IOVA(ioc, iovp, offset); 962 return SBA_IOVA(ioc, iovp, offset);
951} 963}
952 964
965#ifdef ENABLE_MARK_CLEAN
966static SBA_INLINE void
967sba_mark_clean(struct ioc *ioc, dma_addr_t iova, size_t size)
968{
969 u32 iovp = (u32) SBA_IOVP(ioc,iova);
970 int off = PDIR_INDEX(iovp);
971 void *addr;
972
973 if (size <= iovp_size) {
974 addr = phys_to_virt(ioc->pdir_base[off] &
975 ~0xE000000000000FFFULL);
976 mark_clean(addr, size);
977 } else {
978 do {
979 addr = phys_to_virt(ioc->pdir_base[off] &
980 ~0xE000000000000FFFULL);
981 mark_clean(addr, min(size, iovp_size));
982 off++;
983 size -= iovp_size;
984 } while (size > 0);
985 }
986}
987#endif
988
953/** 989/**
954 * sba_unmap_single - unmap one IOVA and free resources 990 * sba_unmap_single - unmap one IOVA and free resources
955 * @dev: instance of PCI owned by the driver that's asking. 991 * @dev: instance of PCI owned by the driver that's asking.
@@ -995,6 +1031,10 @@ void sba_unmap_single(struct device *dev, dma_addr_t iova, size_t size, int dir)
995 size += offset; 1031 size += offset;
996 size = ROUNDUP(size, iovp_size); 1032 size = ROUNDUP(size, iovp_size);
997 1033
1034#ifdef ENABLE_MARK_CLEAN
1035 if (dir == DMA_FROM_DEVICE)
1036 sba_mark_clean(ioc, iova, size);
1037#endif
998 1038
999#if DELAYED_RESOURCE_CNT > 0 1039#if DELAYED_RESOURCE_CNT > 0
1000 spin_lock_irqsave(&ioc->saved_lock, flags); 1040 spin_lock_irqsave(&ioc->saved_lock, flags);
@@ -1021,30 +1061,6 @@ void sba_unmap_single(struct device *dev, dma_addr_t iova, size_t size, int dir)
1021 READ_REG(ioc->ioc_hpa+IOC_PCOM); /* flush purges */ 1061 READ_REG(ioc->ioc_hpa+IOC_PCOM); /* flush purges */
1022 spin_unlock_irqrestore(&ioc->res_lock, flags); 1062 spin_unlock_irqrestore(&ioc->res_lock, flags);
1023#endif /* DELAYED_RESOURCE_CNT == 0 */ 1063#endif /* DELAYED_RESOURCE_CNT == 0 */
1024#ifdef ENABLE_MARK_CLEAN
1025 if (dir == DMA_FROM_DEVICE) {
1026 u32 iovp = (u32) SBA_IOVP(ioc,iova);
1027 int off = PDIR_INDEX(iovp);
1028 void *addr;
1029
1030 if (size <= iovp_size) {
1031 addr = phys_to_virt(ioc->pdir_base[off] &
1032 ~0xE000000000000FFFULL);
1033 mark_clean(addr, size);
1034 } else {
1035 size_t byte_cnt = size;
1036
1037 do {
1038 addr = phys_to_virt(ioc->pdir_base[off] &
1039 ~0xE000000000000FFFULL);
1040 mark_clean(addr, min(byte_cnt, iovp_size));
1041 off++;
1042 byte_cnt -= iovp_size;
1043
1044 } while (byte_cnt > 0);
1045 }
1046 }
1047#endif
1048} 1064}
1049 1065
1050 1066