aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/parisc/sba_iommu.c
diff options
context:
space:
mode:
authorHarvey Harrison <harvey.harrison@gmail.com>2008-05-14 19:21:56 -0400
committerKyle McMartin <kyle@mcmartin.ca>2008-05-15 10:38:54 -0400
commita8043ecb17bd2e4b034006bee315efeea3936278 (patch)
treedb1780acb16d95cc619d901bd9f850c5cae45c50 /drivers/parisc/sba_iommu.c
parent91bae23ce185b74c9b6dda86b92bb204a1c951c3 (diff)
drivers/parisc: replace remaining __FUNCTION__ occurrences
__FUNCTION__ is gcc-specific, use __func__ Signed-off-by: Harvey Harrison <harvey.harrison@gmail.com> Cc: Kyle McMartin <kyle@mcmartin.ca> Cc: Matthew Wilcox <willy@debian.org> Cc: Grant Grundler <grundler@parisc-linux.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Kyle McMartin <kyle@mcmartin.ca>
Diffstat (limited to 'drivers/parisc/sba_iommu.c')
-rw-r--r--drivers/parisc/sba_iommu.c42
1 files changed, 21 insertions, 21 deletions
diff --git a/drivers/parisc/sba_iommu.c b/drivers/parisc/sba_iommu.c
index afc849bd3f5..bc73b96346f 100644
--- a/drivers/parisc/sba_iommu.c
+++ b/drivers/parisc/sba_iommu.c
@@ -384,7 +384,7 @@ sba_search_bitmap(struct ioc *ioc, struct device *dev,
384 } 384 }
385 mask = RESMAP_MASK(bits_wanted) >> bitshiftcnt; 385 mask = RESMAP_MASK(bits_wanted) >> bitshiftcnt;
386 386
387 DBG_RES("%s() o %ld %p", __FUNCTION__, o, res_ptr); 387 DBG_RES("%s() o %ld %p", __func__, o, res_ptr);
388 while(res_ptr < res_end) 388 while(res_ptr < res_end)
389 { 389 {
390 DBG_RES(" %p %lx %lx\n", res_ptr, mask, *res_ptr); 390 DBG_RES(" %p %lx %lx\n", res_ptr, mask, *res_ptr);
@@ -454,7 +454,7 @@ sba_alloc_range(struct ioc *ioc, struct device *dev, size_t size)
454#endif 454#endif
455 455
456 DBG_RES("%s(%x) %d -> %lx hint %x/%x\n", 456 DBG_RES("%s(%x) %d -> %lx hint %x/%x\n",
457 __FUNCTION__, size, pages_needed, pide, 457 __func__, size, pages_needed, pide,
458 (uint) ((unsigned long) ioc->res_hint - (unsigned long) ioc->res_map), 458 (uint) ((unsigned long) ioc->res_hint - (unsigned long) ioc->res_map),
459 ioc->res_bitshift ); 459 ioc->res_bitshift );
460 460
@@ -497,7 +497,7 @@ sba_free_range(struct ioc *ioc, dma_addr_t iova, size_t size)
497 unsigned long m = RESMAP_MASK(bits_not_wanted) >> (pide & (BITS_PER_LONG - 1)); 497 unsigned long m = RESMAP_MASK(bits_not_wanted) >> (pide & (BITS_PER_LONG - 1));
498 498
499 DBG_RES("%s( ,%x,%x) %x/%lx %x %p %lx\n", 499 DBG_RES("%s( ,%x,%x) %x/%lx %x %p %lx\n",
500 __FUNCTION__, (uint) iova, size, 500 __func__, (uint) iova, size,
501 bits_not_wanted, m, pide, res_ptr, *res_ptr); 501 bits_not_wanted, m, pide, res_ptr, *res_ptr);
502 502
503#ifdef SBA_COLLECT_STATS 503#ifdef SBA_COLLECT_STATS
@@ -740,7 +740,7 @@ sba_map_single(struct device *dev, void *addr, size_t size,
740 iovp = (dma_addr_t) pide << IOVP_SHIFT; 740 iovp = (dma_addr_t) pide << IOVP_SHIFT;
741 741
742 DBG_RUN("%s() 0x%p -> 0x%lx\n", 742 DBG_RUN("%s() 0x%p -> 0x%lx\n",
743 __FUNCTION__, addr, (long) iovp | offset); 743 __func__, addr, (long) iovp | offset);
744 744
745 pdir_start = &(ioc->pdir_base[pide]); 745 pdir_start = &(ioc->pdir_base[pide]);
746 746
@@ -798,7 +798,7 @@ sba_unmap_single(struct device *dev, dma_addr_t iova, size_t size,
798 unsigned long flags; 798 unsigned long flags;
799 dma_addr_t offset; 799 dma_addr_t offset;
800 800
801 DBG_RUN("%s() iovp 0x%lx/%x\n", __FUNCTION__, (long) iova, size); 801 DBG_RUN("%s() iovp 0x%lx/%x\n", __func__, (long) iova, size);
802 802
803 ioc = GET_IOC(dev); 803 ioc = GET_IOC(dev);
804 offset = iova & ~IOVP_MASK; 804 offset = iova & ~IOVP_MASK;
@@ -937,7 +937,7 @@ sba_map_sg(struct device *dev, struct scatterlist *sglist, int nents,
937 int coalesced, filled = 0; 937 int coalesced, filled = 0;
938 unsigned long flags; 938 unsigned long flags;
939 939
940 DBG_RUN_SG("%s() START %d entries\n", __FUNCTION__, nents); 940 DBG_RUN_SG("%s() START %d entries\n", __func__, nents);
941 941
942 ioc = GET_IOC(dev); 942 ioc = GET_IOC(dev);
943 943
@@ -998,7 +998,7 @@ sba_map_sg(struct device *dev, struct scatterlist *sglist, int nents,
998 998
999 spin_unlock_irqrestore(&ioc->res_lock, flags); 999 spin_unlock_irqrestore(&ioc->res_lock, flags);
1000 1000
1001 DBG_RUN_SG("%s() DONE %d mappings\n", __FUNCTION__, filled); 1001 DBG_RUN_SG("%s() DONE %d mappings\n", __func__, filled);
1002 1002
1003 return filled; 1003 return filled;
1004} 1004}
@@ -1023,7 +1023,7 @@ sba_unmap_sg(struct device *dev, struct scatterlist *sglist, int nents,
1023#endif 1023#endif
1024 1024
1025 DBG_RUN_SG("%s() START %d entries, %p,%x\n", 1025 DBG_RUN_SG("%s() START %d entries, %p,%x\n",
1026 __FUNCTION__, nents, sg_virt_addr(sglist), sglist->length); 1026 __func__, nents, sg_virt_addr(sglist), sglist->length);
1027 1027
1028 ioc = GET_IOC(dev); 1028 ioc = GET_IOC(dev);
1029 1029
@@ -1047,7 +1047,7 @@ sba_unmap_sg(struct device *dev, struct scatterlist *sglist, int nents,
1047 ++sglist; 1047 ++sglist;
1048 } 1048 }
1049 1049
1050 DBG_RUN_SG("%s() DONE (nents %d)\n", __FUNCTION__, nents); 1050 DBG_RUN_SG("%s() DONE (nents %d)\n", __func__, nents);
1051 1051
1052#ifdef ASSERT_PDIR_SANITY 1052#ifdef ASSERT_PDIR_SANITY
1053 spin_lock_irqsave(&ioc->res_lock, flags); 1053 spin_lock_irqsave(&ioc->res_lock, flags);
@@ -1118,7 +1118,7 @@ sba_alloc_pdir(unsigned int pdir_size)
1118 pdir_base = __get_free_pages(GFP_KERNEL, pdir_order); 1118 pdir_base = __get_free_pages(GFP_KERNEL, pdir_order);
1119 if (NULL == (void *) pdir_base) { 1119 if (NULL == (void *) pdir_base) {
1120 panic("%s() could not allocate I/O Page Table\n", 1120 panic("%s() could not allocate I/O Page Table\n",
1121 __FUNCTION__); 1121 __func__);
1122 } 1122 }
1123 1123
1124 /* If this is not PA8700 (PCX-W2) 1124 /* If this is not PA8700 (PCX-W2)
@@ -1261,7 +1261,7 @@ sba_ioc_init_pluto(struct parisc_device *sba, struct ioc *ioc, int ioc_num)
1261 ioc->pdir_size = (iova_space_size / IOVP_SIZE) * sizeof(u64); 1261 ioc->pdir_size = (iova_space_size / IOVP_SIZE) * sizeof(u64);
1262 1262
1263 DBG_INIT("%s() hpa 0x%p IOV %dMB (%d bits)\n", 1263 DBG_INIT("%s() hpa 0x%p IOV %dMB (%d bits)\n",
1264 __FUNCTION__, ioc->ioc_hpa, iova_space_size >> 20, 1264 __func__, ioc->ioc_hpa, iova_space_size >> 20,
1265 iov_order + PAGE_SHIFT); 1265 iov_order + PAGE_SHIFT);
1266 1266
1267 ioc->pdir_base = (void *) __get_free_pages(GFP_KERNEL, 1267 ioc->pdir_base = (void *) __get_free_pages(GFP_KERNEL,
@@ -1272,7 +1272,7 @@ sba_ioc_init_pluto(struct parisc_device *sba, struct ioc *ioc, int ioc_num)
1272 memset(ioc->pdir_base, 0, ioc->pdir_size); 1272 memset(ioc->pdir_base, 0, ioc->pdir_size);
1273 1273
1274 DBG_INIT("%s() pdir %p size %x\n", 1274 DBG_INIT("%s() pdir %p size %x\n",
1275 __FUNCTION__, ioc->pdir_base, ioc->pdir_size); 1275 __func__, ioc->pdir_base, ioc->pdir_size);
1276 1276
1277#ifdef SBA_HINT_SUPPORT 1277#ifdef SBA_HINT_SUPPORT
1278 ioc->hint_shift_pdir = iov_order + PAGE_SHIFT; 1278 ioc->hint_shift_pdir = iov_order + PAGE_SHIFT;
@@ -1354,7 +1354,7 @@ sba_ioc_init_pluto(struct parisc_device *sba, struct ioc *ioc, int ioc_num)
1354 1354
1355 if (agp_found && sba_reserve_agpgart) { 1355 if (agp_found && sba_reserve_agpgart) {
1356 printk(KERN_INFO "%s: reserving %dMb of IOVA space for agpgart\n", 1356 printk(KERN_INFO "%s: reserving %dMb of IOVA space for agpgart\n",
1357 __FUNCTION__, (iova_space_size/2) >> 20); 1357 __func__, (iova_space_size/2) >> 20);
1358 ioc->pdir_size /= 2; 1358 ioc->pdir_size /= 2;
1359 ioc->pdir_base[PDIR_INDEX(iova_space_size/2)] = SBA_AGPGART_COOKIE; 1359 ioc->pdir_base[PDIR_INDEX(iova_space_size/2)] = SBA_AGPGART_COOKIE;
1360 } 1360 }
@@ -1406,7 +1406,7 @@ sba_ioc_init(struct parisc_device *sba, struct ioc *ioc, int ioc_num)
1406 ioc->pdir_size = pdir_size = (iova_space_size/IOVP_SIZE) * sizeof(u64); 1406 ioc->pdir_size = pdir_size = (iova_space_size/IOVP_SIZE) * sizeof(u64);
1407 1407
1408 DBG_INIT("%s() hpa 0x%lx mem %ldMB IOV %dMB (%d bits)\n", 1408 DBG_INIT("%s() hpa 0x%lx mem %ldMB IOV %dMB (%d bits)\n",
1409 __FUNCTION__, 1409 __func__,
1410 ioc->ioc_hpa, 1410 ioc->ioc_hpa,
1411 (unsigned long) num_physpages >> (20 - PAGE_SHIFT), 1411 (unsigned long) num_physpages >> (20 - PAGE_SHIFT),
1412 iova_space_size>>20, 1412 iova_space_size>>20,
@@ -1415,7 +1415,7 @@ sba_ioc_init(struct parisc_device *sba, struct ioc *ioc, int ioc_num)
1415 ioc->pdir_base = sba_alloc_pdir(pdir_size); 1415 ioc->pdir_base = sba_alloc_pdir(pdir_size);
1416 1416
1417 DBG_INIT("%s() pdir %p size %x\n", 1417 DBG_INIT("%s() pdir %p size %x\n",
1418 __FUNCTION__, ioc->pdir_base, pdir_size); 1418 __func__, ioc->pdir_base, pdir_size);
1419 1419
1420#ifdef SBA_HINT_SUPPORT 1420#ifdef SBA_HINT_SUPPORT
1421 /* FIXME : DMA HINTs not used */ 1421 /* FIXME : DMA HINTs not used */
@@ -1443,7 +1443,7 @@ sba_ioc_init(struct parisc_device *sba, struct ioc *ioc, int ioc_num)
1443#endif 1443#endif
1444 1444
1445 DBG_INIT("%s() IOV base 0x%lx mask 0x%0lx\n", 1445 DBG_INIT("%s() IOV base 0x%lx mask 0x%0lx\n",
1446 __FUNCTION__, ioc->ibase, ioc->imask); 1446 __func__, ioc->ibase, ioc->imask);
1447 1447
1448 /* 1448 /*
1449 ** FIXME: Hint registers are programmed with default hint 1449 ** FIXME: Hint registers are programmed with default hint
@@ -1470,7 +1470,7 @@ sba_ioc_init(struct parisc_device *sba, struct ioc *ioc, int ioc_num)
1470 1470
1471 ioc->ibase = 0; /* used by SBA_IOVA and related macros */ 1471 ioc->ibase = 0; /* used by SBA_IOVA and related macros */
1472 1472
1473 DBG_INIT("%s() DONE\n", __FUNCTION__); 1473 DBG_INIT("%s() DONE\n", __func__);
1474} 1474}
1475 1475
1476 1476
@@ -1544,7 +1544,7 @@ printk("sba_hw_init(): mem_boot 0x%x 0x%x 0x%x 0x%x\n", PAGE0->mem_boot.hpa,
1544 if (!IS_PLUTO(sba_dev->dev)) { 1544 if (!IS_PLUTO(sba_dev->dev)) {
1545 ioc_ctl = READ_REG(sba_dev->sba_hpa+IOC_CTRL); 1545 ioc_ctl = READ_REG(sba_dev->sba_hpa+IOC_CTRL);
1546 DBG_INIT("%s() hpa 0x%lx ioc_ctl 0x%Lx ->", 1546 DBG_INIT("%s() hpa 0x%lx ioc_ctl 0x%Lx ->",
1547 __FUNCTION__, sba_dev->sba_hpa, ioc_ctl); 1547 __func__, sba_dev->sba_hpa, ioc_ctl);
1548 ioc_ctl &= ~(IOC_CTRL_RM | IOC_CTRL_NC | IOC_CTRL_CE); 1548 ioc_ctl &= ~(IOC_CTRL_RM | IOC_CTRL_NC | IOC_CTRL_CE);
1549 ioc_ctl |= IOC_CTRL_DD | IOC_CTRL_D4 | IOC_CTRL_TC; 1549 ioc_ctl |= IOC_CTRL_DD | IOC_CTRL_D4 | IOC_CTRL_TC;
1550 /* j6700 v1.6 firmware sets 0x294f */ 1550 /* j6700 v1.6 firmware sets 0x294f */
@@ -1675,7 +1675,7 @@ sba_common_init(struct sba_device *sba_dev)
1675 1675
1676 res_size >>= 3; /* convert bit count to byte count */ 1676 res_size >>= 3; /* convert bit count to byte count */
1677 DBG_INIT("%s() res_size 0x%x\n", 1677 DBG_INIT("%s() res_size 0x%x\n",
1678 __FUNCTION__, res_size); 1678 __func__, res_size);
1679 1679
1680 sba_dev->ioc[i].res_size = res_size; 1680 sba_dev->ioc[i].res_size = res_size;
1681 sba_dev->ioc[i].res_map = (char *) __get_free_pages(GFP_KERNEL, get_order(res_size)); 1681 sba_dev->ioc[i].res_map = (char *) __get_free_pages(GFP_KERNEL, get_order(res_size));
@@ -1688,7 +1688,7 @@ sba_common_init(struct sba_device *sba_dev)
1688 if (NULL == sba_dev->ioc[i].res_map) 1688 if (NULL == sba_dev->ioc[i].res_map)
1689 { 1689 {
1690 panic("%s:%s() could not allocate resource map\n", 1690 panic("%s:%s() could not allocate resource map\n",
1691 __FILE__, __FUNCTION__ ); 1691 __FILE__, __func__ );
1692 } 1692 }
1693 1693
1694 memset(sba_dev->ioc[i].res_map, 0, res_size); 1694 memset(sba_dev->ioc[i].res_map, 0, res_size);
@@ -1725,7 +1725,7 @@ sba_common_init(struct sba_device *sba_dev)
1725#endif 1725#endif
1726 1726
1727 DBG_INIT("%s() %d res_map %x %p\n", 1727 DBG_INIT("%s() %d res_map %x %p\n",
1728 __FUNCTION__, i, res_size, sba_dev->ioc[i].res_map); 1728 __func__, i, res_size, sba_dev->ioc[i].res_map);
1729 } 1729 }
1730 1730
1731 spin_lock_init(&sba_dev->sba_lock); 1731 spin_lock_init(&sba_dev->sba_lock);