diff options
author | Grant Grundler <grundler@parisc-linux.org> | 2005-10-21 22:37:20 -0400 |
---|---|---|
committer | Kyle McMartin <kyle@parisc-linux.org> | 2005-10-21 22:37:20 -0400 |
commit | 64908ad95c34f25849412d6d4735ac10f8fb6575 (patch) | |
tree | 544173d34dadfb80339e3d4ba147875ad1f5c731 /drivers | |
parent | 53f01bba49938f115237fe43a261c31ac13ae5c6 (diff) |
[PARISC] Update sba_iommu from parisc tree
revert use of %%sr0 in fdc asm.
Thanks to Joel Soete for pointing out this oversight.
Signed-off-by: Grant Grundler <grundler@parisc-linux.org>
2.6.14-rc2-pa3 move "sync" outside the main loop that fills IO Pdir.
Signed-off-by: Grant Grundler <grundler@parisc-linux.org>
remove explicit use of sr0 in fdc ops.
Thanks to Joel Soete for reminding me were I added those...
Signed-off-by: Grant Grundler <grundler@parisc-linux.org>
2.6.14-rc2-pa2 - make SBA more anal about invalidating pdir entries
Previous code cleared the valid flag a pdir entry but it did NOT
guarantee this change was visible to the PDIR before writing
the PCOM register. Ie the SBA could pick up a stale entry if
the write happened to hit the SBA before the cacheline was flushed
from the cache.
Long term, I think I want to make this a compile time flag.
Developement tree should enable anal pdir checking by default
and Debian can disable it with either a CONFIG option
or one-line patch. fdc/sync options can only negatively affect
performance though I haven't measure how much yet.
If someone can run netperf TCP_RR across gige and compare
-pa1 and -pa2, that would be sufficient.
Cleaned up the use of "fdc" to make sure it's using "kernel"
space id (specify sr0 but maps to sr4-7). It seems a bit fragile
to assume "sr1" gets loaded with KERNEL_SPACE which is how the
code works today.
Tested on 32 and 64-bit SMP kernels on j6k.
Signed-off-by: Grant Grundler <grundler@parisc-linux.org>
remove PDC_NARROW from SBA and document history of PDC_NARROW a bit.
It will still show up in an older kernel's .config file.
Signed-off-by: Grant Grundler <grundler@parisc-linux.org>
if/ifdef cleanups from Joel Soete.
Signed-off-by: Grant Grundler <grundler@parisc-linux.org>
2.6.12-rc4-pa2 fix 32-bit support for Astro platforms
o Since my last SBA code change, SBA could allocate more than 1GB of IOVA
space on Astro boxes with more than 1GB of RAM when running 32-bit kernel.
This is bad since IOMMU can only talk to the first 1GB at most.
Kudos to jejb for quickly spotting that bug.
o jejb also noted SBA should *always* reject DMA masks > 32-bits since
DMA-mapping.txt indicates caller should try again with 32-bits.
o off-by-one error when comparing the mask to IOVA space size.
Signed-off-by: Grant Grundler <grundler@parisc-linux.org>
Signed-off-by: Kyle McMartin <kyle@parisc-linux.org>
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/parisc/sba_iommu.c | 140 |
1 files changed, 91 insertions, 49 deletions
diff --git a/drivers/parisc/sba_iommu.c b/drivers/parisc/sba_iommu.c index 6256ad365d0b..48591badd4ce 100644 --- a/drivers/parisc/sba_iommu.c +++ b/drivers/parisc/sba_iommu.c | |||
@@ -91,8 +91,8 @@ extern struct proc_dir_entry * proc_mckinley_root; | |||
91 | #define DBG_RES(x...) | 91 | #define DBG_RES(x...) |
92 | #endif | 92 | #endif |
93 | 93 | ||
94 | #if defined(__LP64__) && !defined(CONFIG_PDC_NARROW) | 94 | #if defined(CONFIG_64BIT) |
95 | /* "low end" PA8800 machines use ZX1 chipset */ | 95 | /* "low end" PA8800 machines use ZX1 chipset: PAT PDC and only run 64-bit */ |
96 | #define ZX1_SUPPORT | 96 | #define ZX1_SUPPORT |
97 | #endif | 97 | #endif |
98 | 98 | ||
@@ -231,7 +231,7 @@ struct ioc { | |||
231 | spinlock_t res_lock; | 231 | spinlock_t res_lock; |
232 | unsigned int res_bitshift; /* from the LEFT! */ | 232 | unsigned int res_bitshift; /* from the LEFT! */ |
233 | unsigned int res_size; /* size of resource map in bytes */ | 233 | unsigned int res_size; /* size of resource map in bytes */ |
234 | #if SBA_HINT_SUPPORT | 234 | #ifdef SBA_HINT_SUPPORT |
235 | /* FIXME : DMA HINTs not used */ | 235 | /* FIXME : DMA HINTs not used */ |
236 | unsigned long hint_mask_pdir; /* bits used for DMA hints */ | 236 | unsigned long hint_mask_pdir; /* bits used for DMA hints */ |
237 | unsigned int hint_shift_pdir; | 237 | unsigned int hint_shift_pdir; |
@@ -294,7 +294,7 @@ static unsigned long piranha_bad_128k = 0; | |||
294 | /* Looks nice and keeps the compiler happy */ | 294 | /* Looks nice and keeps the compiler happy */ |
295 | #define SBA_DEV(d) ((struct sba_device *) (d)) | 295 | #define SBA_DEV(d) ((struct sba_device *) (d)) |
296 | 296 | ||
297 | #if SBA_AGP_SUPPORT | 297 | #ifdef SBA_AGP_SUPPORT |
298 | static int reserve_sba_gart = 1; | 298 | static int reserve_sba_gart = 1; |
299 | #endif | 299 | #endif |
300 | 300 | ||
@@ -314,7 +314,7 @@ static int reserve_sba_gart = 1; | |||
314 | #define WRITE_REG32(val, addr) __raw_writel(cpu_to_le32(val), addr) | 314 | #define WRITE_REG32(val, addr) __raw_writel(cpu_to_le32(val), addr) |
315 | #define WRITE_REG64(val, addr) __raw_writeq(cpu_to_le64(val), addr) | 315 | #define WRITE_REG64(val, addr) __raw_writeq(cpu_to_le64(val), addr) |
316 | 316 | ||
317 | #ifdef __LP64__ | 317 | #ifdef CONFIG_64BIT |
318 | #define READ_REG(addr) READ_REG64(addr) | 318 | #define READ_REG(addr) READ_REG64(addr) |
319 | #define WRITE_REG(value, addr) WRITE_REG64(value, addr) | 319 | #define WRITE_REG(value, addr) WRITE_REG64(value, addr) |
320 | #else | 320 | #else |
@@ -324,7 +324,7 @@ static int reserve_sba_gart = 1; | |||
324 | 324 | ||
325 | #ifdef DEBUG_SBA_INIT | 325 | #ifdef DEBUG_SBA_INIT |
326 | 326 | ||
327 | /* NOTE: When __LP64__ isn't defined, READ_REG64() is two 32-bit reads */ | 327 | /* NOTE: When CONFIG_64BIT isn't defined, READ_REG64() is two 32-bit reads */ |
328 | 328 | ||
329 | /** | 329 | /** |
330 | * sba_dump_ranges - debugging only - print ranges assigned to this IOA | 330 | * sba_dump_ranges - debugging only - print ranges assigned to this IOA |
@@ -364,7 +364,7 @@ static void sba_dump_tlb(void __iomem *hpa) | |||
364 | #else | 364 | #else |
365 | #define sba_dump_ranges(x) | 365 | #define sba_dump_ranges(x) |
366 | #define sba_dump_tlb(x) | 366 | #define sba_dump_tlb(x) |
367 | #endif | 367 | #endif /* DEBUG_SBA_INIT */ |
368 | 368 | ||
369 | 369 | ||
370 | #ifdef ASSERT_PDIR_SANITY | 370 | #ifdef ASSERT_PDIR_SANITY |
@@ -674,7 +674,7 @@ sba_free_range(struct ioc *ioc, dma_addr_t iova, size_t size) | |||
674 | * | 674 | * |
675 | ***************************************************************/ | 675 | ***************************************************************/ |
676 | 676 | ||
677 | #if SBA_HINT_SUPPORT | 677 | #ifdef SBA_HINT_SUPPORT |
678 | #define SBA_DMA_HINT(ioc, val) ((val) << (ioc)->hint_shift_pdir) | 678 | #define SBA_DMA_HINT(ioc, val) ((val) << (ioc)->hint_shift_pdir) |
679 | #endif | 679 | #endif |
680 | 680 | ||
@@ -743,9 +743,8 @@ sba_io_pdir_entry(u64 *pdir_ptr, space_t sid, unsigned long vba, | |||
743 | * (bit #61, big endian), we have to flush and sync every time | 743 | * (bit #61, big endian), we have to flush and sync every time |
744 | * IO-PDIR is changed in Ike/Astro. | 744 | * IO-PDIR is changed in Ike/Astro. |
745 | */ | 745 | */ |
746 | if (ioc_needs_fdc) { | 746 | if (ioc_needs_fdc) |
747 | asm volatile("fdc 0(%%sr1,%0)\n\tsync" : : "r" (pdir_ptr)); | 747 | asm volatile("fdc %%r0(%0)" : : "r" (pdir_ptr)); |
748 | } | ||
749 | } | 748 | } |
750 | 749 | ||
751 | 750 | ||
@@ -769,42 +768,57 @@ static SBA_INLINE void | |||
769 | sba_mark_invalid(struct ioc *ioc, dma_addr_t iova, size_t byte_cnt) | 768 | sba_mark_invalid(struct ioc *ioc, dma_addr_t iova, size_t byte_cnt) |
770 | { | 769 | { |
771 | u32 iovp = (u32) SBA_IOVP(ioc,iova); | 770 | u32 iovp = (u32) SBA_IOVP(ioc,iova); |
772 | 771 | u64 *pdir_ptr = &ioc->pdir_base[PDIR_INDEX(iovp)]; | |
773 | /* Even though this is a big-endian machine, the entries | ||
774 | ** in the iopdir are little endian. That's why we clear the byte | ||
775 | ** at +7 instead of at +0. | ||
776 | */ | ||
777 | int off = PDIR_INDEX(iovp)*sizeof(u64)+7; | ||
778 | 772 | ||
779 | #ifdef ASSERT_PDIR_SANITY | 773 | #ifdef ASSERT_PDIR_SANITY |
780 | /* Assert first pdir entry is set */ | 774 | /* Assert first pdir entry is set. |
781 | if (0x80 != (((u8 *) ioc->pdir_base)[off])) { | 775 | ** |
776 | ** Even though this is a big-endian machine, the entries | ||
777 | ** in the iopdir are little endian. That's why we look at | ||
778 | ** the byte at +7 instead of at +0. | ||
779 | */ | ||
780 | if (0x80 != (((u8 *) pdir_ptr)[7])) { | ||
782 | sba_dump_pdir_entry(ioc,"sba_mark_invalid()", PDIR_INDEX(iovp)); | 781 | sba_dump_pdir_entry(ioc,"sba_mark_invalid()", PDIR_INDEX(iovp)); |
783 | } | 782 | } |
784 | #endif | 783 | #endif |
785 | 784 | ||
786 | if (byte_cnt <= IOVP_SIZE) | 785 | if (byte_cnt > IOVP_SIZE) |
787 | { | 786 | { |
788 | iovp |= IOVP_SHIFT; /* set "size" field for PCOM */ | 787 | #if 0 |
788 | unsigned long entries_per_cacheline = ioc_needs_fdc ? | ||
789 | L1_CACHE_ALIGN(((unsigned long) pdir_ptr)) | ||
790 | - (unsigned long) pdir_ptr; | ||
791 | : 262144; | ||
792 | #endif | ||
789 | 793 | ||
790 | /* | 794 | /* set "size" field for PCOM */ |
791 | ** clear I/O PDIR entry "valid" bit | 795 | iovp |= get_order(byte_cnt) + PAGE_SHIFT; |
792 | ** Do NOT clear the rest - save it for debugging. | ||
793 | ** We should only clear bits that have previously | ||
794 | ** been enabled. | ||
795 | */ | ||
796 | ((u8 *)(ioc->pdir_base))[off] = 0; | ||
797 | } else { | ||
798 | u32 t = get_order(byte_cnt) + PAGE_SHIFT; | ||
799 | 796 | ||
800 | iovp |= t; | ||
801 | do { | 797 | do { |
802 | /* clear I/O Pdir entry "valid" bit first */ | 798 | /* clear I/O Pdir entry "valid" bit first */ |
803 | ((u8 *)(ioc->pdir_base))[off] = 0; | 799 | ((u8 *) pdir_ptr)[7] = 0; |
804 | off += sizeof(u64); | 800 | if (ioc_needs_fdc) { |
801 | asm volatile("fdc %%r0(%0)" : : "r" (pdir_ptr)); | ||
802 | #if 0 | ||
803 | entries_per_cacheline = L1_CACHE_SHIFT - 3; | ||
804 | #endif | ||
805 | } | ||
806 | pdir_ptr++; | ||
805 | byte_cnt -= IOVP_SIZE; | 807 | byte_cnt -= IOVP_SIZE; |
806 | } while (byte_cnt > 0); | 808 | } while (byte_cnt > IOVP_SIZE); |
807 | } | 809 | } else |
810 | iovp |= IOVP_SHIFT; /* set "size" field for PCOM */ | ||
811 | |||
812 | /* | ||
813 | ** clear I/O PDIR entry "valid" bit. | ||
814 | ** We have to R/M/W the cacheline regardless how much of the | ||
815 | ** pdir entry that we clobber. | ||
816 | ** The rest of the entry would be useful for debugging if we | ||
817 | ** could dump core on HPMC. | ||
818 | */ | ||
819 | ((u8 *) pdir_ptr)[7] = 0; | ||
820 | if (ioc_needs_fdc) | ||
821 | asm volatile("fdc %%r0(%0)" : : "r" (pdir_ptr)); | ||
808 | 822 | ||
809 | WRITE_REG( SBA_IOVA(ioc, iovp, 0, 0), ioc->ioc_hpa+IOC_PCOM); | 823 | WRITE_REG( SBA_IOVA(ioc, iovp, 0, 0), ioc->ioc_hpa+IOC_PCOM); |
810 | } | 824 | } |
@@ -819,18 +833,29 @@ sba_mark_invalid(struct ioc *ioc, dma_addr_t iova, size_t byte_cnt) | |||
819 | static int sba_dma_supported( struct device *dev, u64 mask) | 833 | static int sba_dma_supported( struct device *dev, u64 mask) |
820 | { | 834 | { |
821 | struct ioc *ioc; | 835 | struct ioc *ioc; |
836 | |||
822 | if (dev == NULL) { | 837 | if (dev == NULL) { |
823 | printk(KERN_ERR MODULE_NAME ": EISA/ISA/et al not supported\n"); | 838 | printk(KERN_ERR MODULE_NAME ": EISA/ISA/et al not supported\n"); |
824 | BUG(); | 839 | BUG(); |
825 | return(0); | 840 | return(0); |
826 | } | 841 | } |
827 | 842 | ||
828 | ioc = GET_IOC(dev); | 843 | /* Documentation/DMA-mapping.txt tells drivers to try 64-bit first, |
844 | * then fall back to 32-bit if that fails. | ||
845 | * We are just "encouraging" 32-bit DMA masks here since we can | ||
846 | * never allow IOMMU bypass unless we add special support for ZX1. | ||
847 | */ | ||
848 | if (mask > ~0U) | ||
849 | return 0; | ||
829 | 850 | ||
830 | /* check if mask is > than the largest IO Virt Address */ | 851 | ioc = GET_IOC(dev); |
831 | 852 | ||
832 | return((int) (mask >= (ioc->ibase + | 853 | /* |
833 | (ioc->pdir_size / sizeof(u64) * IOVP_SIZE) ))); | 854 | * check if mask is >= than the current max IO Virt Address |
855 | * The max IO Virt address will *always* < 30 bits. | ||
856 | */ | ||
857 | return((int)(mask >= (ioc->ibase - 1 + | ||
858 | (ioc->pdir_size / sizeof(u64) * IOVP_SIZE) ))); | ||
834 | } | 859 | } |
835 | 860 | ||
836 | 861 | ||
@@ -898,11 +923,17 @@ sba_map_single(struct device *dev, void *addr, size_t size, | |||
898 | size -= IOVP_SIZE; | 923 | size -= IOVP_SIZE; |
899 | pdir_start++; | 924 | pdir_start++; |
900 | } | 925 | } |
901 | /* form complete address */ | 926 | |
927 | /* force FDC ops in io_pdir_entry() to be visible to IOMMU */ | ||
928 | if (ioc_needs_fdc) | ||
929 | asm volatile("sync" : : ); | ||
930 | |||
902 | #ifdef ASSERT_PDIR_SANITY | 931 | #ifdef ASSERT_PDIR_SANITY |
903 | sba_check_pdir(ioc,"Check after sba_map_single()"); | 932 | sba_check_pdir(ioc,"Check after sba_map_single()"); |
904 | #endif | 933 | #endif |
905 | spin_unlock_irqrestore(&ioc->res_lock, flags); | 934 | spin_unlock_irqrestore(&ioc->res_lock, flags); |
935 | |||
936 | /* form complete address */ | ||
906 | return SBA_IOVA(ioc, iovp, offset, DEFAULT_DMA_HINT_REG); | 937 | return SBA_IOVA(ioc, iovp, offset, DEFAULT_DMA_HINT_REG); |
907 | } | 938 | } |
908 | 939 | ||
@@ -958,12 +989,19 @@ sba_unmap_single(struct device *dev, dma_addr_t iova, size_t size, | |||
958 | d--; | 989 | d--; |
959 | } | 990 | } |
960 | ioc->saved_cnt = 0; | 991 | ioc->saved_cnt = 0; |
992 | |||
961 | READ_REG(ioc->ioc_hpa+IOC_PCOM); /* flush purges */ | 993 | READ_REG(ioc->ioc_hpa+IOC_PCOM); /* flush purges */ |
962 | } | 994 | } |
963 | #else /* DELAYED_RESOURCE_CNT == 0 */ | 995 | #else /* DELAYED_RESOURCE_CNT == 0 */ |
964 | sba_free_range(ioc, iova, size); | 996 | sba_free_range(ioc, iova, size); |
997 | |||
998 | /* If fdc's were issued, force fdc's to be visible now */ | ||
999 | if (ioc_needs_fdc) | ||
1000 | asm volatile("sync" : : ); | ||
1001 | |||
965 | READ_REG(ioc->ioc_hpa+IOC_PCOM); /* flush purges */ | 1002 | READ_REG(ioc->ioc_hpa+IOC_PCOM); /* flush purges */ |
966 | #endif /* DELAYED_RESOURCE_CNT == 0 */ | 1003 | #endif /* DELAYED_RESOURCE_CNT == 0 */ |
1004 | |||
967 | spin_unlock_irqrestore(&ioc->res_lock, flags); | 1005 | spin_unlock_irqrestore(&ioc->res_lock, flags); |
968 | 1006 | ||
969 | /* XXX REVISIT for 2.5 Linux - need syncdma for zero-copy support. | 1007 | /* XXX REVISIT for 2.5 Linux - need syncdma for zero-copy support. |
@@ -1106,6 +1144,10 @@ sba_map_sg(struct device *dev, struct scatterlist *sglist, int nents, | |||
1106 | */ | 1144 | */ |
1107 | filled = iommu_fill_pdir(ioc, sglist, nents, 0, sba_io_pdir_entry); | 1145 | filled = iommu_fill_pdir(ioc, sglist, nents, 0, sba_io_pdir_entry); |
1108 | 1146 | ||
1147 | /* force FDC ops in io_pdir_entry() to be visible to IOMMU */ | ||
1148 | if (ioc_needs_fdc) | ||
1149 | asm volatile("sync" : : ); | ||
1150 | |||
1109 | #ifdef ASSERT_PDIR_SANITY | 1151 | #ifdef ASSERT_PDIR_SANITY |
1110 | if (sba_check_pdir(ioc,"Check after sba_map_sg()")) | 1152 | if (sba_check_pdir(ioc,"Check after sba_map_sg()")) |
1111 | { | 1153 | { |
@@ -1234,8 +1276,10 @@ sba_alloc_pdir(unsigned int pdir_size) | |||
1234 | unsigned long pdir_order = get_order(pdir_size); | 1276 | unsigned long pdir_order = get_order(pdir_size); |
1235 | 1277 | ||
1236 | pdir_base = __get_free_pages(GFP_KERNEL, pdir_order); | 1278 | pdir_base = __get_free_pages(GFP_KERNEL, pdir_order); |
1237 | if (NULL == (void *) pdir_base) | 1279 | if (NULL == (void *) pdir_base) { |
1238 | panic("sba_ioc_init() could not allocate I/O Page Table\n"); | 1280 | panic("%s() could not allocate I/O Page Table\n", |
1281 | __FUNCTION__); | ||
1282 | } | ||
1239 | 1283 | ||
1240 | /* If this is not PA8700 (PCX-W2) | 1284 | /* If this is not PA8700 (PCX-W2) |
1241 | ** OR newer than ver 2.2 | 1285 | ** OR newer than ver 2.2 |
@@ -1353,7 +1397,7 @@ sba_ioc_init_pluto(struct parisc_device *sba, struct ioc *ioc, int ioc_num) | |||
1353 | u32 iova_space_mask; | 1397 | u32 iova_space_mask; |
1354 | u32 iova_space_size; | 1398 | u32 iova_space_size; |
1355 | int iov_order, tcnfg; | 1399 | int iov_order, tcnfg; |
1356 | #if SBA_AGP_SUPPORT | 1400 | #ifdef SBA_AGP_SUPPORT |
1357 | int agp_found = 0; | 1401 | int agp_found = 0; |
1358 | #endif | 1402 | #endif |
1359 | /* | 1403 | /* |
@@ -1390,7 +1434,7 @@ sba_ioc_init_pluto(struct parisc_device *sba, struct ioc *ioc, int ioc_num) | |||
1390 | DBG_INIT("%s() pdir %p size %x\n", | 1434 | DBG_INIT("%s() pdir %p size %x\n", |
1391 | __FUNCTION__, ioc->pdir_base, ioc->pdir_size); | 1435 | __FUNCTION__, ioc->pdir_base, ioc->pdir_size); |
1392 | 1436 | ||
1393 | #if SBA_HINT_SUPPORT | 1437 | #ifdef SBA_HINT_SUPPORT |
1394 | ioc->hint_shift_pdir = iov_order + PAGE_SHIFT; | 1438 | ioc->hint_shift_pdir = iov_order + PAGE_SHIFT; |
1395 | ioc->hint_mask_pdir = ~(0x3 << (iov_order + PAGE_SHIFT)); | 1439 | ioc->hint_mask_pdir = ~(0x3 << (iov_order + PAGE_SHIFT)); |
1396 | 1440 | ||
@@ -1414,7 +1458,7 @@ sba_ioc_init_pluto(struct parisc_device *sba, struct ioc *ioc, int ioc_num) | |||
1414 | 1458 | ||
1415 | WRITE_REG(ioc->imask, ioc->ioc_hpa + IOC_IMASK); | 1459 | WRITE_REG(ioc->imask, ioc->ioc_hpa + IOC_IMASK); |
1416 | 1460 | ||
1417 | #ifdef __LP64__ | 1461 | #ifdef CONFIG_64BIT |
1418 | /* | 1462 | /* |
1419 | ** Setting the upper bits makes checking for bypass addresses | 1463 | ** Setting the upper bits makes checking for bypass addresses |
1420 | ** a little faster later on. | 1464 | ** a little faster later on. |
@@ -1447,7 +1491,7 @@ sba_ioc_init_pluto(struct parisc_device *sba, struct ioc *ioc, int ioc_num) | |||
1447 | */ | 1491 | */ |
1448 | WRITE_REG(ioc->ibase | 31, ioc->ioc_hpa + IOC_PCOM); | 1492 | WRITE_REG(ioc->ibase | 31, ioc->ioc_hpa + IOC_PCOM); |
1449 | 1493 | ||
1450 | #if SBA_AGP_SUPPORT | 1494 | #ifdef SBA_AGP_SUPPORT |
1451 | /* | 1495 | /* |
1452 | ** If an AGP device is present, only use half of the IOV space | 1496 | ** If an AGP device is present, only use half of the IOV space |
1453 | ** for PCI DMA. Unfortunately we can't know ahead of time | 1497 | ** for PCI DMA. Unfortunately we can't know ahead of time |
@@ -1499,11 +1543,9 @@ sba_ioc_init(struct parisc_device *sba, struct ioc *ioc, int ioc_num) | |||
1499 | if (iova_space_size < (1 << (20 - PAGE_SHIFT))) { | 1543 | if (iova_space_size < (1 << (20 - PAGE_SHIFT))) { |
1500 | iova_space_size = 1 << (20 - PAGE_SHIFT); | 1544 | iova_space_size = 1 << (20 - PAGE_SHIFT); |
1501 | } | 1545 | } |
1502 | #ifdef __LP64__ | ||
1503 | else if (iova_space_size > (1 << (30 - PAGE_SHIFT))) { | 1546 | else if (iova_space_size > (1 << (30 - PAGE_SHIFT))) { |
1504 | iova_space_size = 1 << (30 - PAGE_SHIFT); | 1547 | iova_space_size = 1 << (30 - PAGE_SHIFT); |
1505 | } | 1548 | } |
1506 | #endif | ||
1507 | 1549 | ||
1508 | /* | 1550 | /* |
1509 | ** iova space must be log2() in size. | 1551 | ** iova space must be log2() in size. |
@@ -1529,7 +1571,7 @@ sba_ioc_init(struct parisc_device *sba, struct ioc *ioc, int ioc_num) | |||
1529 | DBG_INIT("%s() pdir %p size %x\n", | 1571 | DBG_INIT("%s() pdir %p size %x\n", |
1530 | __FUNCTION__, ioc->pdir_base, pdir_size); | 1572 | __FUNCTION__, ioc->pdir_base, pdir_size); |
1531 | 1573 | ||
1532 | #if SBA_HINT_SUPPORT | 1574 | #ifdef SBA_HINT_SUPPORT |
1533 | /* FIXME : DMA HINTs not used */ | 1575 | /* FIXME : DMA HINTs not used */ |
1534 | ioc->hint_shift_pdir = iov_order + PAGE_SHIFT; | 1576 | ioc->hint_shift_pdir = iov_order + PAGE_SHIFT; |
1535 | ioc->hint_mask_pdir = ~(0x3 << (iov_order + PAGE_SHIFT)); | 1577 | ioc->hint_mask_pdir = ~(0x3 << (iov_order + PAGE_SHIFT)); |