aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/parisc/asp.c2
-rw-r--r--drivers/parisc/ccio-dma.c36
-rw-r--r--drivers/parisc/dino.c14
-rw-r--r--drivers/parisc/gsc.c4
-rw-r--r--drivers/parisc/lasi.c2
-rw-r--r--drivers/parisc/lba_pci.c22
-rw-r--r--drivers/parisc/led.c2
-rw-r--r--drivers/parisc/sba_iommu.c42
-rw-r--r--drivers/parisc/wax.c2
9 files changed, 63 insertions, 63 deletions
diff --git a/drivers/parisc/asp.c b/drivers/parisc/asp.c
index 558420bc9f8..82136913536 100644
--- a/drivers/parisc/asp.c
+++ b/drivers/parisc/asp.c
@@ -88,7 +88,7 @@ asp_init_chip(struct parisc_device *dev)
88 ret = -EBUSY; 88 ret = -EBUSY;
89 dev->irq = gsc_claim_irq(&gsc_irq, ASP_GSC_IRQ); 89 dev->irq = gsc_claim_irq(&gsc_irq, ASP_GSC_IRQ);
90 if (dev->irq < 0) { 90 if (dev->irq < 0) {
91 printk(KERN_ERR "%s(): cannot get GSC irq\n", __FUNCTION__); 91 printk(KERN_ERR "%s(): cannot get GSC irq\n", __func__);
92 goto out; 92 goto out;
93 } 93 }
94 94
diff --git a/drivers/parisc/ccio-dma.c b/drivers/parisc/ccio-dma.c
index 07d2a8d4498..b30e38f3a50 100644
--- a/drivers/parisc/ccio-dma.c
+++ b/drivers/parisc/ccio-dma.c
@@ -359,7 +359,7 @@ ccio_alloc_range(struct ioc *ioc, struct device *dev, size_t size)
359 BUG_ON((pages_needed * IOVP_SIZE) > DMA_CHUNK_SIZE); 359 BUG_ON((pages_needed * IOVP_SIZE) > DMA_CHUNK_SIZE);
360 360
361 DBG_RES("%s() size: %d pages_needed %d\n", 361 DBG_RES("%s() size: %d pages_needed %d\n",
362 __FUNCTION__, size, pages_needed); 362 __func__, size, pages_needed);
363 363
364 /* 364 /*
365 ** "seek and ye shall find"...praying never hurts either... 365 ** "seek and ye shall find"...praying never hurts either...
@@ -395,16 +395,16 @@ ccio_alloc_range(struct ioc *ioc, struct device *dev, size_t size)
395#endif 395#endif
396 } else { 396 } else {
397 panic("%s: %s() Too many pages to map. pages_needed: %u\n", 397 panic("%s: %s() Too many pages to map. pages_needed: %u\n",
398 __FILE__, __FUNCTION__, pages_needed); 398 __FILE__, __func__, pages_needed);
399 } 399 }
400 400
401 panic("%s: %s() I/O MMU is out of mapping resources.\n", __FILE__, 401 panic("%s: %s() I/O MMU is out of mapping resources.\n", __FILE__,
402 __FUNCTION__); 402 __func__);
403 403
404resource_found: 404resource_found:
405 405
406 DBG_RES("%s() res_idx %d res_hint: %d\n", 406 DBG_RES("%s() res_idx %d res_hint: %d\n",
407 __FUNCTION__, res_idx, ioc->res_hint); 407 __func__, res_idx, ioc->res_hint);
408 408
409#ifdef CCIO_SEARCH_TIME 409#ifdef CCIO_SEARCH_TIME
410 { 410 {
@@ -450,7 +450,7 @@ ccio_free_range(struct ioc *ioc, dma_addr_t iova, unsigned long pages_mapped)
450 BUG_ON(pages_mapped > BITS_PER_LONG); 450 BUG_ON(pages_mapped > BITS_PER_LONG);
451 451
452 DBG_RES("%s(): res_idx: %d pages_mapped %d\n", 452 DBG_RES("%s(): res_idx: %d pages_mapped %d\n",
453 __FUNCTION__, res_idx, pages_mapped); 453 __func__, res_idx, pages_mapped);
454 454
455#ifdef CCIO_MAP_STATS 455#ifdef CCIO_MAP_STATS
456 ioc->used_pages -= pages_mapped; 456 ioc->used_pages -= pages_mapped;
@@ -474,7 +474,7 @@ ccio_free_range(struct ioc *ioc, dma_addr_t iova, unsigned long pages_mapped)
474#endif 474#endif
475 } else { 475 } else {
476 panic("%s:%s() Too many pages to unmap.\n", __FILE__, 476 panic("%s:%s() Too many pages to unmap.\n", __FILE__,
477 __FUNCTION__); 477 __func__);
478 } 478 }
479} 479}
480 480
@@ -775,7 +775,7 @@ ccio_map_single(struct device *dev, void *addr, size_t size,
775 pdir_start = &(ioc->pdir_base[idx]); 775 pdir_start = &(ioc->pdir_base[idx]);
776 776
777 DBG_RUN("%s() 0x%p -> 0x%lx size: %0x%x\n", 777 DBG_RUN("%s() 0x%p -> 0x%lx size: %0x%x\n",
778 __FUNCTION__, addr, (long)iovp | offset, size); 778 __func__, addr, (long)iovp | offset, size);
779 779
780 /* If not cacheline aligned, force SAFE_DMA on the whole mess */ 780 /* If not cacheline aligned, force SAFE_DMA on the whole mess */
781 if((size % L1_CACHE_BYTES) || ((unsigned long)addr % L1_CACHE_BYTES)) 781 if((size % L1_CACHE_BYTES) || ((unsigned long)addr % L1_CACHE_BYTES))
@@ -820,7 +820,7 @@ ccio_unmap_single(struct device *dev, dma_addr_t iova, size_t size,
820 ioc = GET_IOC(dev); 820 ioc = GET_IOC(dev);
821 821
822 DBG_RUN("%s() iovp 0x%lx/%x\n", 822 DBG_RUN("%s() iovp 0x%lx/%x\n",
823 __FUNCTION__, (long)iova, size); 823 __func__, (long)iova, size);
824 824
825 iova ^= offset; /* clear offset bits */ 825 iova ^= offset; /* clear offset bits */
826 size += offset; 826 size += offset;
@@ -922,7 +922,7 @@ ccio_map_sg(struct device *dev, struct scatterlist *sglist, int nents,
922 BUG_ON(!dev); 922 BUG_ON(!dev);
923 ioc = GET_IOC(dev); 923 ioc = GET_IOC(dev);
924 924
925 DBG_RUN_SG("%s() START %d entries\n", __FUNCTION__, nents); 925 DBG_RUN_SG("%s() START %d entries\n", __func__, nents);
926 926
927 /* Fast path single entry scatterlists. */ 927 /* Fast path single entry scatterlists. */
928 if (nents == 1) { 928 if (nents == 1) {
@@ -966,7 +966,7 @@ ccio_map_sg(struct device *dev, struct scatterlist *sglist, int nents,
966 966
967 BUG_ON(coalesced != filled); 967 BUG_ON(coalesced != filled);
968 968
969 DBG_RUN_SG("%s() DONE %d mappings\n", __FUNCTION__, filled); 969 DBG_RUN_SG("%s() DONE %d mappings\n", __func__, filled);
970 970
971 for (i = 0; i < filled; i++) 971 for (i = 0; i < filled; i++)
972 current_len += sg_dma_len(sglist + i); 972 current_len += sg_dma_len(sglist + i);
@@ -995,7 +995,7 @@ ccio_unmap_sg(struct device *dev, struct scatterlist *sglist, int nents,
995 ioc = GET_IOC(dev); 995 ioc = GET_IOC(dev);
996 996
997 DBG_RUN_SG("%s() START %d entries, %08lx,%x\n", 997 DBG_RUN_SG("%s() START %d entries, %08lx,%x\n",
998 __FUNCTION__, nents, sg_virt_addr(sglist), sglist->length); 998 __func__, nents, sg_virt_addr(sglist), sglist->length);
999 999
1000#ifdef CCIO_MAP_STATS 1000#ifdef CCIO_MAP_STATS
1001 ioc->usg_calls++; 1001 ioc->usg_calls++;
@@ -1011,7 +1011,7 @@ ccio_unmap_sg(struct device *dev, struct scatterlist *sglist, int nents,
1011 ++sglist; 1011 ++sglist;
1012 } 1012 }
1013 1013
1014 DBG_RUN_SG("%s() DONE (nents %d)\n", __FUNCTION__, nents); 1014 DBG_RUN_SG("%s() DONE (nents %d)\n", __func__, nents);
1015} 1015}
1016 1016
1017static struct hppa_dma_ops ccio_ops = { 1017static struct hppa_dma_ops ccio_ops = {
@@ -1225,7 +1225,7 @@ static int
1225ccio_get_iotlb_size(struct parisc_device *dev) 1225ccio_get_iotlb_size(struct parisc_device *dev)
1226{ 1226{
1227 if (dev->spa_shift == 0) { 1227 if (dev->spa_shift == 0) {
1228 panic("%s() : Can't determine I/O TLB size.\n", __FUNCTION__); 1228 panic("%s() : Can't determine I/O TLB size.\n", __func__);
1229 } 1229 }
1230 return (1 << dev->spa_shift); 1230 return (1 << dev->spa_shift);
1231} 1231}
@@ -1315,7 +1315,7 @@ ccio_ioc_init(struct ioc *ioc)
1315 BUG_ON((1 << get_order(ioc->pdir_size)) != (ioc->pdir_size >> PAGE_SHIFT)); 1315 BUG_ON((1 << get_order(ioc->pdir_size)) != (ioc->pdir_size >> PAGE_SHIFT));
1316 1316
1317 DBG_INIT("%s() hpa 0x%p mem %luMB IOV %dMB (%d bits)\n", 1317 DBG_INIT("%s() hpa 0x%p mem %luMB IOV %dMB (%d bits)\n",
1318 __FUNCTION__, ioc->ioc_regs, 1318 __func__, ioc->ioc_regs,
1319 (unsigned long) num_physpages >> (20 - PAGE_SHIFT), 1319 (unsigned long) num_physpages >> (20 - PAGE_SHIFT),
1320 iova_space_size>>20, 1320 iova_space_size>>20,
1321 iov_order + PAGE_SHIFT); 1321 iov_order + PAGE_SHIFT);
@@ -1323,7 +1323,7 @@ ccio_ioc_init(struct ioc *ioc)
1323 ioc->pdir_base = (u64 *)__get_free_pages(GFP_KERNEL, 1323 ioc->pdir_base = (u64 *)__get_free_pages(GFP_KERNEL,
1324 get_order(ioc->pdir_size)); 1324 get_order(ioc->pdir_size));
1325 if(NULL == ioc->pdir_base) { 1325 if(NULL == ioc->pdir_base) {
1326 panic("%s() could not allocate I/O Page Table\n", __FUNCTION__); 1326 panic("%s() could not allocate I/O Page Table\n", __func__);
1327 } 1327 }
1328 memset(ioc->pdir_base, 0, ioc->pdir_size); 1328 memset(ioc->pdir_base, 0, ioc->pdir_size);
1329 1329
@@ -1332,12 +1332,12 @@ ccio_ioc_init(struct ioc *ioc)
1332 1332
1333 /* resource map size dictated by pdir_size */ 1333 /* resource map size dictated by pdir_size */
1334 ioc->res_size = (ioc->pdir_size / sizeof(u64)) >> 3; 1334 ioc->res_size = (ioc->pdir_size / sizeof(u64)) >> 3;
1335 DBG_INIT("%s() res_size 0x%x\n", __FUNCTION__, ioc->res_size); 1335 DBG_INIT("%s() res_size 0x%x\n", __func__, ioc->res_size);
1336 1336
1337 ioc->res_map = (u8 *)__get_free_pages(GFP_KERNEL, 1337 ioc->res_map = (u8 *)__get_free_pages(GFP_KERNEL,
1338 get_order(ioc->res_size)); 1338 get_order(ioc->res_size));
1339 if(NULL == ioc->res_map) { 1339 if(NULL == ioc->res_map) {
1340 panic("%s() could not allocate resource map\n", __FUNCTION__); 1340 panic("%s() could not allocate resource map\n", __func__);
1341 } 1341 }
1342 memset(ioc->res_map, 0, ioc->res_size); 1342 memset(ioc->res_map, 0, ioc->res_size);
1343 1343
@@ -1409,7 +1409,7 @@ ccio_init_resource(struct resource *res, char *name, void __iomem *ioaddr)
1409 result = insert_resource(&iomem_resource, res); 1409 result = insert_resource(&iomem_resource, res);
1410 if (result < 0) { 1410 if (result < 0) {
1411 printk(KERN_ERR "%s() failed to claim CCIO bus address space (%08lx,%08lx)\n", 1411 printk(KERN_ERR "%s() failed to claim CCIO bus address space (%08lx,%08lx)\n",
1412 __FUNCTION__, res->start, res->end); 1412 __func__, res->start, res->end);
1413 } 1413 }
1414} 1414}
1415 1415
diff --git a/drivers/parisc/dino.c b/drivers/parisc/dino.c
index d9c6322a721..fd56128525d 100644
--- a/drivers/parisc/dino.c
+++ b/drivers/parisc/dino.c
@@ -180,7 +180,7 @@ static int dino_cfg_read(struct pci_bus *bus, unsigned int devfn, int where,
180 void __iomem *base_addr = d->hba.base_addr; 180 void __iomem *base_addr = d->hba.base_addr;
181 unsigned long flags; 181 unsigned long flags;
182 182
183 DBG("%s: %p, %d, %d, %d\n", __FUNCTION__, base_addr, devfn, where, 183 DBG("%s: %p, %d, %d, %d\n", __func__, base_addr, devfn, where,
184 size); 184 size);
185 spin_lock_irqsave(&d->dinosaur_pen, flags); 185 spin_lock_irqsave(&d->dinosaur_pen, flags);
186 186
@@ -215,7 +215,7 @@ static int dino_cfg_write(struct pci_bus *bus, unsigned int devfn, int where,
215 void __iomem *base_addr = d->hba.base_addr; 215 void __iomem *base_addr = d->hba.base_addr;
216 unsigned long flags; 216 unsigned long flags;
217 217
218 DBG("%s: %p, %d, %d, %d\n", __FUNCTION__, base_addr, devfn, where, 218 DBG("%s: %p, %d, %d, %d\n", __func__, base_addr, devfn, where,
219 size); 219 size);
220 spin_lock_irqsave(&d->dinosaur_pen, flags); 220 spin_lock_irqsave(&d->dinosaur_pen, flags);
221 221
@@ -301,7 +301,7 @@ static void dino_disable_irq(unsigned int irq)
301 struct dino_device *dino_dev = irq_desc[irq].chip_data; 301 struct dino_device *dino_dev = irq_desc[irq].chip_data;
302 int local_irq = gsc_find_local_irq(irq, dino_dev->global_irq, DINO_LOCAL_IRQS); 302 int local_irq = gsc_find_local_irq(irq, dino_dev->global_irq, DINO_LOCAL_IRQS);
303 303
304 DBG(KERN_WARNING "%s(0x%p, %d)\n", __FUNCTION__, dino_dev, irq); 304 DBG(KERN_WARNING "%s(0x%p, %d)\n", __func__, dino_dev, irq);
305 305
306 /* Clear the matching bit in the IMR register */ 306 /* Clear the matching bit in the IMR register */
307 dino_dev->imr &= ~(DINO_MASK_IRQ(local_irq)); 307 dino_dev->imr &= ~(DINO_MASK_IRQ(local_irq));
@@ -314,7 +314,7 @@ static void dino_enable_irq(unsigned int irq)
314 int local_irq = gsc_find_local_irq(irq, dino_dev->global_irq, DINO_LOCAL_IRQS); 314 int local_irq = gsc_find_local_irq(irq, dino_dev->global_irq, DINO_LOCAL_IRQS);
315 u32 tmp; 315 u32 tmp;
316 316
317 DBG(KERN_WARNING "%s(0x%p, %d)\n", __FUNCTION__, dino_dev, irq); 317 DBG(KERN_WARNING "%s(0x%p, %d)\n", __func__, dino_dev, irq);
318 318
319 /* 319 /*
320 ** clear pending IRQ bits 320 ** clear pending IRQ bits
@@ -340,7 +340,7 @@ static void dino_enable_irq(unsigned int irq)
340 tmp = __raw_readl(dino_dev->hba.base_addr+DINO_ILR); 340 tmp = __raw_readl(dino_dev->hba.base_addr+DINO_ILR);
341 if (tmp & DINO_MASK_IRQ(local_irq)) { 341 if (tmp & DINO_MASK_IRQ(local_irq)) {
342 DBG(KERN_WARNING "%s(): IRQ asserted! (ILR 0x%x)\n", 342 DBG(KERN_WARNING "%s(): IRQ asserted! (ILR 0x%x)\n",
343 __FUNCTION__, tmp); 343 __func__, tmp);
344 gsc_writel(dino_dev->txn_data, dino_dev->txn_addr); 344 gsc_writel(dino_dev->txn_data, dino_dev->txn_addr);
345 } 345 }
346} 346}
@@ -388,7 +388,7 @@ ilr_again:
388 int local_irq = __ffs(mask); 388 int local_irq = __ffs(mask);
389 int irq = dino_dev->global_irq[local_irq]; 389 int irq = dino_dev->global_irq[local_irq];
390 DBG(KERN_DEBUG "%s(%d, %p) mask 0x%x\n", 390 DBG(KERN_DEBUG "%s(%d, %p) mask 0x%x\n",
391 __FUNCTION__, irq, intr_dev, mask); 391 __func__, irq, intr_dev, mask);
392 __do_IRQ(irq); 392 __do_IRQ(irq);
393 mask &= ~(1 << local_irq); 393 mask &= ~(1 << local_irq);
394 } while (mask); 394 } while (mask);
@@ -566,7 +566,7 @@ dino_fixup_bus(struct pci_bus *bus)
566 int port_base = HBA_PORT_BASE(dino_dev->hba.hba_num); 566 int port_base = HBA_PORT_BASE(dino_dev->hba.hba_num);
567 567
568 DBG(KERN_WARNING "%s(0x%p) bus %d platform_data 0x%p\n", 568 DBG(KERN_WARNING "%s(0x%p) bus %d platform_data 0x%p\n",
569 __FUNCTION__, bus, bus->secondary, 569 __func__, bus, bus->secondary,
570 bus->bridge->platform_data); 570 bus->bridge->platform_data);
571 571
572 /* Firmware doesn't set up card-mode dino, so we have to */ 572 /* Firmware doesn't set up card-mode dino, so we have to */
diff --git a/drivers/parisc/gsc.c b/drivers/parisc/gsc.c
index 1b3e3fd12d9..f7d088b897e 100644
--- a/drivers/parisc/gsc.c
+++ b/drivers/parisc/gsc.c
@@ -112,7 +112,7 @@ static void gsc_asic_disable_irq(unsigned int irq)
112 int local_irq = gsc_find_local_irq(irq, irq_dev->global_irq, 32); 112 int local_irq = gsc_find_local_irq(irq, irq_dev->global_irq, 32);
113 u32 imr; 113 u32 imr;
114 114
115 DEBPRINTK(KERN_DEBUG "%s(%d) %s: IMR 0x%x\n", __FUNCTION__, irq, 115 DEBPRINTK(KERN_DEBUG "%s(%d) %s: IMR 0x%x\n", __func__, irq,
116 irq_dev->name, imr); 116 irq_dev->name, imr);
117 117
118 /* Disable the IRQ line by clearing the bit in the IMR */ 118 /* Disable the IRQ line by clearing the bit in the IMR */
@@ -127,7 +127,7 @@ static void gsc_asic_enable_irq(unsigned int irq)
127 int local_irq = gsc_find_local_irq(irq, irq_dev->global_irq, 32); 127 int local_irq = gsc_find_local_irq(irq, irq_dev->global_irq, 32);
128 u32 imr; 128 u32 imr;
129 129
130 DEBPRINTK(KERN_DEBUG "%s(%d) %s: IMR 0x%x\n", __FUNCTION__, irq, 130 DEBPRINTK(KERN_DEBUG "%s(%d) %s: IMR 0x%x\n", __func__, irq,
131 irq_dev->name, imr); 131 irq_dev->name, imr);
132 132
133 /* Enable the IRQ line by setting the bit in the IMR */ 133 /* Enable the IRQ line by setting the bit in the IMR */
diff --git a/drivers/parisc/lasi.c b/drivers/parisc/lasi.c
index cb3d2817612..bee510098ce 100644
--- a/drivers/parisc/lasi.c
+++ b/drivers/parisc/lasi.c
@@ -193,7 +193,7 @@ lasi_init_chip(struct parisc_device *dev)
193 dev->irq = gsc_alloc_irq(&gsc_irq); 193 dev->irq = gsc_alloc_irq(&gsc_irq);
194 if (dev->irq < 0) { 194 if (dev->irq < 0) {
195 printk(KERN_ERR "%s(): cannot get GSC irq\n", 195 printk(KERN_ERR "%s(): cannot get GSC irq\n",
196 __FUNCTION__); 196 __func__);
197 kfree(lasi); 197 kfree(lasi);
198 return -EBUSY; 198 return -EBUSY;
199 } 199 }
diff --git a/drivers/parisc/lba_pci.c b/drivers/parisc/lba_pci.c
index 66ce6104836..a28c8946dea 100644
--- a/drivers/parisc/lba_pci.c
+++ b/drivers/parisc/lba_pci.c
@@ -377,12 +377,12 @@ static int elroy_cfg_read(struct pci_bus *bus, unsigned int devfn, int pos, int
377 /* original - Generate config cycle on broken elroy 377 /* original - Generate config cycle on broken elroy
378 with risk we will miss PCI bus errors. */ 378 with risk we will miss PCI bus errors. */
379 *data = lba_rd_cfg(d, tok, pos, size); 379 *data = lba_rd_cfg(d, tok, pos, size);
380 DBG_CFG("%s(%x+%2x) -> 0x%x (a)\n", __FUNCTION__, tok, pos, *data); 380 DBG_CFG("%s(%x+%2x) -> 0x%x (a)\n", __func__, tok, pos, *data);
381 return 0; 381 return 0;
382 } 382 }
383 383
384 if (LBA_SKIP_PROBE(d) && !lba_device_present(bus->secondary, devfn, d)) { 384 if (LBA_SKIP_PROBE(d) && !lba_device_present(bus->secondary, devfn, d)) {
385 DBG_CFG("%s(%x+%2x) -> -1 (b)\n", __FUNCTION__, tok, pos); 385 DBG_CFG("%s(%x+%2x) -> -1 (b)\n", __func__, tok, pos);
386 /* either don't want to look or know device isn't present. */ 386 /* either don't want to look or know device isn't present. */
387 *data = ~0U; 387 *data = ~0U;
388 return(0); 388 return(0);
@@ -398,7 +398,7 @@ static int elroy_cfg_read(struct pci_bus *bus, unsigned int devfn, int pos, int
398 case 2: *data = READ_REG16(data_reg + (pos & 2)); break; 398 case 2: *data = READ_REG16(data_reg + (pos & 2)); break;
399 case 4: *data = READ_REG32(data_reg); break; 399 case 4: *data = READ_REG32(data_reg); break;
400 } 400 }
401 DBG_CFG("%s(%x+%2x) -> 0x%x (c)\n", __FUNCTION__, tok, pos, *data); 401 DBG_CFG("%s(%x+%2x) -> 0x%x (c)\n", __func__, tok, pos, *data);
402 return 0; 402 return 0;
403} 403}
404 404
@@ -441,16 +441,16 @@ static int elroy_cfg_write(struct pci_bus *bus, unsigned int devfn, int pos, int
441 if (!LBA_SKIP_PROBE(d)) { 441 if (!LBA_SKIP_PROBE(d)) {
442 /* Original Workaround */ 442 /* Original Workaround */
443 lba_wr_cfg(d, tok, pos, (u32) data, size); 443 lba_wr_cfg(d, tok, pos, (u32) data, size);
444 DBG_CFG("%s(%x+%2x) = 0x%x (a)\n", __FUNCTION__, tok, pos,data); 444 DBG_CFG("%s(%x+%2x) = 0x%x (a)\n", __func__, tok, pos,data);
445 return 0; 445 return 0;
446 } 446 }
447 447
448 if (LBA_SKIP_PROBE(d) && (!lba_device_present(bus->secondary, devfn, d))) { 448 if (LBA_SKIP_PROBE(d) && (!lba_device_present(bus->secondary, devfn, d))) {
449 DBG_CFG("%s(%x+%2x) = 0x%x (b)\n", __FUNCTION__, tok, pos,data); 449 DBG_CFG("%s(%x+%2x) = 0x%x (b)\n", __func__, tok, pos,data);
450 return 1; /* New Workaround */ 450 return 1; /* New Workaround */
451 } 451 }
452 452
453 DBG_CFG("%s(%x+%2x) = 0x%x (c)\n", __FUNCTION__, tok, pos, data); 453 DBG_CFG("%s(%x+%2x) = 0x%x (c)\n", __func__, tok, pos, data);
454 454
455 /* Basic Algorithm */ 455 /* Basic Algorithm */
456 LBA_CFG_ADDR_SETUP(d, tok | pos); 456 LBA_CFG_ADDR_SETUP(d, tok | pos);
@@ -521,7 +521,7 @@ static int mercury_cfg_write(struct pci_bus *bus, unsigned int devfn, int pos, i
521 if ((pos > 255) || (devfn > 255)) 521 if ((pos > 255) || (devfn > 255))
522 return -EINVAL; 522 return -EINVAL;
523 523
524 DBG_CFG("%s(%x+%2x) <- 0x%x (c)\n", __FUNCTION__, tok, pos, data); 524 DBG_CFG("%s(%x+%2x) <- 0x%x (c)\n", __func__, tok, pos, data);
525 525
526 LBA_CFG_TR4_ADDR_SETUP(d, tok | pos); 526 LBA_CFG_TR4_ADDR_SETUP(d, tok | pos);
527 switch(size) { 527 switch(size) {
@@ -890,7 +890,7 @@ LBA_PORT_IN(32, 0)
890#define LBA_PORT_OUT(size, mask) \ 890#define LBA_PORT_OUT(size, mask) \
891static void lba_astro_out##size (struct pci_hba_data *d, u16 addr, u##size val) \ 891static void lba_astro_out##size (struct pci_hba_data *d, u16 addr, u##size val) \
892{ \ 892{ \
893 DBG_PORT("%s(0x%p, 0x%x, 0x%x)\n", __FUNCTION__, d, addr, val); \ 893 DBG_PORT("%s(0x%p, 0x%x, 0x%x)\n", __func__, d, addr, val); \
894 WRITE_REG##size(val, astro_iop_base + addr); \ 894 WRITE_REG##size(val, astro_iop_base + addr); \
895 if (LBA_DEV(d)->hw_rev < 3) \ 895 if (LBA_DEV(d)->hw_rev < 3) \
896 lba_t32 = READ_U32(d->base_addr + LBA_FUNC_ID); \ 896 lba_t32 = READ_U32(d->base_addr + LBA_FUNC_ID); \
@@ -932,7 +932,7 @@ static struct pci_port_ops lba_astro_port_ops = {
932static u##size lba_pat_in##size (struct pci_hba_data *l, u16 addr) \ 932static u##size lba_pat_in##size (struct pci_hba_data *l, u16 addr) \
933{ \ 933{ \
934 u##size t; \ 934 u##size t; \
935 DBG_PORT("%s(0x%p, 0x%x) ->", __FUNCTION__, l, addr); \ 935 DBG_PORT("%s(0x%p, 0x%x) ->", __func__, l, addr); \
936 t = READ_REG##size(PIOP_TO_GMMIO(LBA_DEV(l), addr)); \ 936 t = READ_REG##size(PIOP_TO_GMMIO(LBA_DEV(l), addr)); \
937 DBG_PORT(" 0x%x\n", t); \ 937 DBG_PORT(" 0x%x\n", t); \
938 return (t); \ 938 return (t); \
@@ -948,7 +948,7 @@ LBA_PORT_IN(32, 0)
948static void lba_pat_out##size (struct pci_hba_data *l, u16 addr, u##size val) \ 948static void lba_pat_out##size (struct pci_hba_data *l, u16 addr, u##size val) \
949{ \ 949{ \
950 void __iomem *where = PIOP_TO_GMMIO(LBA_DEV(l), addr); \ 950 void __iomem *where = PIOP_TO_GMMIO(LBA_DEV(l), addr); \
951 DBG_PORT("%s(0x%p, 0x%x, 0x%x)\n", __FUNCTION__, l, addr, val); \ 951 DBG_PORT("%s(0x%p, 0x%x, 0x%x)\n", __func__, l, addr, val); \
952 WRITE_REG##size(val, where); \ 952 WRITE_REG##size(val, where); \
953 /* flush the I/O down to the elroy at least */ \ 953 /* flush the I/O down to the elroy at least */ \
954 lba_t32 = READ_U32(l->base_addr + LBA_FUNC_ID); \ 954 lba_t32 = READ_U32(l->base_addr + LBA_FUNC_ID); \
@@ -1584,7 +1584,7 @@ void lba_set_iregs(struct parisc_device *lba, u32 ibase, u32 imask)
1584 WARN_ON((ibase & 0x001fffff) != 0); 1584 WARN_ON((ibase & 0x001fffff) != 0);
1585 WARN_ON((imask & 0x001fffff) != 0); 1585 WARN_ON((imask & 0x001fffff) != 0);
1586 1586
1587 DBG("%s() ibase 0x%x imask 0x%x\n", __FUNCTION__, ibase, imask); 1587 DBG("%s() ibase 0x%x imask 0x%x\n", __func__, ibase, imask);
1588 WRITE_REG32( imask, base_addr + LBA_IMASK); 1588 WRITE_REG32( imask, base_addr + LBA_IMASK);
1589 WRITE_REG32( ibase, base_addr + LBA_IBASE); 1589 WRITE_REG32( ibase, base_addr + LBA_IBASE);
1590 iounmap(base_addr); 1590 iounmap(base_addr);
diff --git a/drivers/parisc/led.c b/drivers/parisc/led.c
index 703b85edb00..f9b12664f9f 100644
--- a/drivers/parisc/led.c
+++ b/drivers/parisc/led.c
@@ -569,7 +569,7 @@ int __init register_led_driver(int model, unsigned long cmd_reg, unsigned long d
569 569
570 default: 570 default:
571 printk(KERN_ERR "%s: Wrong LCD/LED model %d !\n", 571 printk(KERN_ERR "%s: Wrong LCD/LED model %d !\n",
572 __FUNCTION__, lcd_info.model); 572 __func__, lcd_info.model);
573 return 1; 573 return 1;
574 } 574 }
575 575
diff --git a/drivers/parisc/sba_iommu.c b/drivers/parisc/sba_iommu.c
index afc849bd3f5..bc73b96346f 100644
--- a/drivers/parisc/sba_iommu.c
+++ b/drivers/parisc/sba_iommu.c
@@ -384,7 +384,7 @@ sba_search_bitmap(struct ioc *ioc, struct device *dev,
384 } 384 }
385 mask = RESMAP_MASK(bits_wanted) >> bitshiftcnt; 385 mask = RESMAP_MASK(bits_wanted) >> bitshiftcnt;
386 386
387 DBG_RES("%s() o %ld %p", __FUNCTION__, o, res_ptr); 387 DBG_RES("%s() o %ld %p", __func__, o, res_ptr);
388 while(res_ptr < res_end) 388 while(res_ptr < res_end)
389 { 389 {
390 DBG_RES(" %p %lx %lx\n", res_ptr, mask, *res_ptr); 390 DBG_RES(" %p %lx %lx\n", res_ptr, mask, *res_ptr);
@@ -454,7 +454,7 @@ sba_alloc_range(struct ioc *ioc, struct device *dev, size_t size)
454#endif 454#endif
455 455
456 DBG_RES("%s(%x) %d -> %lx hint %x/%x\n", 456 DBG_RES("%s(%x) %d -> %lx hint %x/%x\n",
457 __FUNCTION__, size, pages_needed, pide, 457 __func__, size, pages_needed, pide,
458 (uint) ((unsigned long) ioc->res_hint - (unsigned long) ioc->res_map), 458 (uint) ((unsigned long) ioc->res_hint - (unsigned long) ioc->res_map),
459 ioc->res_bitshift ); 459 ioc->res_bitshift );
460 460
@@ -497,7 +497,7 @@ sba_free_range(struct ioc *ioc, dma_addr_t iova, size_t size)
497 unsigned long m = RESMAP_MASK(bits_not_wanted) >> (pide & (BITS_PER_LONG - 1)); 497 unsigned long m = RESMAP_MASK(bits_not_wanted) >> (pide & (BITS_PER_LONG - 1));
498 498
499 DBG_RES("%s( ,%x,%x) %x/%lx %x %p %lx\n", 499 DBG_RES("%s( ,%x,%x) %x/%lx %x %p %lx\n",
500 __FUNCTION__, (uint) iova, size, 500 __func__, (uint) iova, size,
501 bits_not_wanted, m, pide, res_ptr, *res_ptr); 501 bits_not_wanted, m, pide, res_ptr, *res_ptr);
502 502
503#ifdef SBA_COLLECT_STATS 503#ifdef SBA_COLLECT_STATS
@@ -740,7 +740,7 @@ sba_map_single(struct device *dev, void *addr, size_t size,
740 iovp = (dma_addr_t) pide << IOVP_SHIFT; 740 iovp = (dma_addr_t) pide << IOVP_SHIFT;
741 741
742 DBG_RUN("%s() 0x%p -> 0x%lx\n", 742 DBG_RUN("%s() 0x%p -> 0x%lx\n",
743 __FUNCTION__, addr, (long) iovp | offset); 743 __func__, addr, (long) iovp | offset);
744 744
745 pdir_start = &(ioc->pdir_base[pide]); 745 pdir_start = &(ioc->pdir_base[pide]);
746 746
@@ -798,7 +798,7 @@ sba_unmap_single(struct device *dev, dma_addr_t iova, size_t size,
798 unsigned long flags; 798 unsigned long flags;
799 dma_addr_t offset; 799 dma_addr_t offset;
800 800
801 DBG_RUN("%s() iovp 0x%lx/%x\n", __FUNCTION__, (long) iova, size); 801 DBG_RUN("%s() iovp 0x%lx/%x\n", __func__, (long) iova, size);
802 802
803 ioc = GET_IOC(dev); 803 ioc = GET_IOC(dev);
804 offset = iova & ~IOVP_MASK; 804 offset = iova & ~IOVP_MASK;
@@ -937,7 +937,7 @@ sba_map_sg(struct device *dev, struct scatterlist *sglist, int nents,
937 int coalesced, filled = 0; 937 int coalesced, filled = 0;
938 unsigned long flags; 938 unsigned long flags;
939 939
940 DBG_RUN_SG("%s() START %d entries\n", __FUNCTION__, nents); 940 DBG_RUN_SG("%s() START %d entries\n", __func__, nents);
941 941
942 ioc = GET_IOC(dev); 942 ioc = GET_IOC(dev);
943 943
@@ -998,7 +998,7 @@ sba_map_sg(struct device *dev, struct scatterlist *sglist, int nents,
998 998
999 spin_unlock_irqrestore(&ioc->res_lock, flags); 999 spin_unlock_irqrestore(&ioc->res_lock, flags);
1000 1000
1001 DBG_RUN_SG("%s() DONE %d mappings\n", __FUNCTION__, filled); 1001 DBG_RUN_SG("%s() DONE %d mappings\n", __func__, filled);
1002 1002
1003 return filled; 1003 return filled;
1004} 1004}
@@ -1023,7 +1023,7 @@ sba_unmap_sg(struct device *dev, struct scatterlist *sglist, int nents,
1023#endif 1023#endif
1024 1024
1025 DBG_RUN_SG("%s() START %d entries, %p,%x\n", 1025 DBG_RUN_SG("%s() START %d entries, %p,%x\n",
1026 __FUNCTION__, nents, sg_virt_addr(sglist), sglist->length); 1026 __func__, nents, sg_virt_addr(sglist), sglist->length);
1027 1027
1028 ioc = GET_IOC(dev); 1028 ioc = GET_IOC(dev);
1029 1029
@@ -1047,7 +1047,7 @@ sba_unmap_sg(struct device *dev, struct scatterlist *sglist, int nents,
1047 ++sglist; 1047 ++sglist;
1048 } 1048 }
1049 1049
1050 DBG_RUN_SG("%s() DONE (nents %d)\n", __FUNCTION__, nents); 1050 DBG_RUN_SG("%s() DONE (nents %d)\n", __func__, nents);
1051 1051
1052#ifdef ASSERT_PDIR_SANITY 1052#ifdef ASSERT_PDIR_SANITY
1053 spin_lock_irqsave(&ioc->res_lock, flags); 1053 spin_lock_irqsave(&ioc->res_lock, flags);
@@ -1118,7 +1118,7 @@ sba_alloc_pdir(unsigned int pdir_size)
1118 pdir_base = __get_free_pages(GFP_KERNEL, pdir_order); 1118 pdir_base = __get_free_pages(GFP_KERNEL, pdir_order);
1119 if (NULL == (void *) pdir_base) { 1119 if (NULL == (void *) pdir_base) {
1120 panic("%s() could not allocate I/O Page Table\n", 1120 panic("%s() could not allocate I/O Page Table\n",
1121 __FUNCTION__); 1121 __func__);
1122 } 1122 }
1123 1123
1124 /* If this is not PA8700 (PCX-W2) 1124 /* If this is not PA8700 (PCX-W2)
@@ -1261,7 +1261,7 @@ sba_ioc_init_pluto(struct parisc_device *sba, struct ioc *ioc, int ioc_num)
1261 ioc->pdir_size = (iova_space_size / IOVP_SIZE) * sizeof(u64); 1261 ioc->pdir_size = (iova_space_size / IOVP_SIZE) * sizeof(u64);
1262 1262
1263 DBG_INIT("%s() hpa 0x%p IOV %dMB (%d bits)\n", 1263 DBG_INIT("%s() hpa 0x%p IOV %dMB (%d bits)\n",
1264 __FUNCTION__, ioc->ioc_hpa, iova_space_size >> 20, 1264 __func__, ioc->ioc_hpa, iova_space_size >> 20,
1265 iov_order + PAGE_SHIFT); 1265 iov_order + PAGE_SHIFT);
1266 1266
1267 ioc->pdir_base = (void *) __get_free_pages(GFP_KERNEL, 1267 ioc->pdir_base = (void *) __get_free_pages(GFP_KERNEL,
@@ -1272,7 +1272,7 @@ sba_ioc_init_pluto(struct parisc_device *sba, struct ioc *ioc, int ioc_num)
1272 memset(ioc->pdir_base, 0, ioc->pdir_size); 1272 memset(ioc->pdir_base, 0, ioc->pdir_size);
1273 1273
1274 DBG_INIT("%s() pdir %p size %x\n", 1274 DBG_INIT("%s() pdir %p size %x\n",
1275 __FUNCTION__, ioc->pdir_base, ioc->pdir_size); 1275 __func__, ioc->pdir_base, ioc->pdir_size);
1276 1276
1277#ifdef SBA_HINT_SUPPORT 1277#ifdef SBA_HINT_SUPPORT
1278 ioc->hint_shift_pdir = iov_order + PAGE_SHIFT; 1278 ioc->hint_shift_pdir = iov_order + PAGE_SHIFT;
@@ -1354,7 +1354,7 @@ sba_ioc_init_pluto(struct parisc_device *sba, struct ioc *ioc, int ioc_num)
1354 1354
1355 if (agp_found && sba_reserve_agpgart) { 1355 if (agp_found && sba_reserve_agpgart) {
1356 printk(KERN_INFO "%s: reserving %dMb of IOVA space for agpgart\n", 1356 printk(KERN_INFO "%s: reserving %dMb of IOVA space for agpgart\n",
1357 __FUNCTION__, (iova_space_size/2) >> 20); 1357 __func__, (iova_space_size/2) >> 20);
1358 ioc->pdir_size /= 2; 1358 ioc->pdir_size /= 2;
1359 ioc->pdir_base[PDIR_INDEX(iova_space_size/2)] = SBA_AGPGART_COOKIE; 1359 ioc->pdir_base[PDIR_INDEX(iova_space_size/2)] = SBA_AGPGART_COOKIE;
1360 } 1360 }
@@ -1406,7 +1406,7 @@ sba_ioc_init(struct parisc_device *sba, struct ioc *ioc, int ioc_num)
1406 ioc->pdir_size = pdir_size = (iova_space_size/IOVP_SIZE) * sizeof(u64); 1406 ioc->pdir_size = pdir_size = (iova_space_size/IOVP_SIZE) * sizeof(u64);
1407 1407
1408 DBG_INIT("%s() hpa 0x%lx mem %ldMB IOV %dMB (%d bits)\n", 1408 DBG_INIT("%s() hpa 0x%lx mem %ldMB IOV %dMB (%d bits)\n",
1409 __FUNCTION__, 1409 __func__,
1410 ioc->ioc_hpa, 1410 ioc->ioc_hpa,
1411 (unsigned long) num_physpages >> (20 - PAGE_SHIFT), 1411 (unsigned long) num_physpages >> (20 - PAGE_SHIFT),
1412 iova_space_size>>20, 1412 iova_space_size>>20,
@@ -1415,7 +1415,7 @@ sba_ioc_init(struct parisc_device *sba, struct ioc *ioc, int ioc_num)
1415 ioc->pdir_base = sba_alloc_pdir(pdir_size); 1415 ioc->pdir_base = sba_alloc_pdir(pdir_size);
1416 1416
1417 DBG_INIT("%s() pdir %p size %x\n", 1417 DBG_INIT("%s() pdir %p size %x\n",
1418 __FUNCTION__, ioc->pdir_base, pdir_size); 1418 __func__, ioc->pdir_base, pdir_size);
1419 1419
1420#ifdef SBA_HINT_SUPPORT 1420#ifdef SBA_HINT_SUPPORT
1421 /* FIXME : DMA HINTs not used */ 1421 /* FIXME : DMA HINTs not used */
@@ -1443,7 +1443,7 @@ sba_ioc_init(struct parisc_device *sba, struct ioc *ioc, int ioc_num)
1443#endif 1443#endif
1444 1444
1445 DBG_INIT("%s() IOV base 0x%lx mask 0x%0lx\n", 1445 DBG_INIT("%s() IOV base 0x%lx mask 0x%0lx\n",
1446 __FUNCTION__, ioc->ibase, ioc->imask); 1446 __func__, ioc->ibase, ioc->imask);
1447 1447
1448 /* 1448 /*
1449 ** FIXME: Hint registers are programmed with default hint 1449 ** FIXME: Hint registers are programmed with default hint
@@ -1470,7 +1470,7 @@ sba_ioc_init(struct parisc_device *sba, struct ioc *ioc, int ioc_num)
1470 1470
1471 ioc->ibase = 0; /* used by SBA_IOVA and related macros */ 1471 ioc->ibase = 0; /* used by SBA_IOVA and related macros */
1472 1472
1473 DBG_INIT("%s() DONE\n", __FUNCTION__); 1473 DBG_INIT("%s() DONE\n", __func__);
1474} 1474}
1475 1475
1476 1476
@@ -1544,7 +1544,7 @@ printk("sba_hw_init(): mem_boot 0x%x 0x%x 0x%x 0x%x\n", PAGE0->mem_boot.hpa,
1544 if (!IS_PLUTO(sba_dev->dev)) { 1544 if (!IS_PLUTO(sba_dev->dev)) {
1545 ioc_ctl = READ_REG(sba_dev->sba_hpa+IOC_CTRL); 1545 ioc_ctl = READ_REG(sba_dev->sba_hpa+IOC_CTRL);
1546 DBG_INIT("%s() hpa 0x%lx ioc_ctl 0x%Lx ->", 1546 DBG_INIT("%s() hpa 0x%lx ioc_ctl 0x%Lx ->",
1547 __FUNCTION__, sba_dev->sba_hpa, ioc_ctl); 1547 __func__, sba_dev->sba_hpa, ioc_ctl);
1548 ioc_ctl &= ~(IOC_CTRL_RM | IOC_CTRL_NC | IOC_CTRL_CE); 1548 ioc_ctl &= ~(IOC_CTRL_RM | IOC_CTRL_NC | IOC_CTRL_CE);
1549 ioc_ctl |= IOC_CTRL_DD | IOC_CTRL_D4 | IOC_CTRL_TC; 1549 ioc_ctl |= IOC_CTRL_DD | IOC_CTRL_D4 | IOC_CTRL_TC;
1550 /* j6700 v1.6 firmware sets 0x294f */ 1550 /* j6700 v1.6 firmware sets 0x294f */
@@ -1675,7 +1675,7 @@ sba_common_init(struct sba_device *sba_dev)
1675 1675
1676 res_size >>= 3; /* convert bit count to byte count */ 1676 res_size >>= 3; /* convert bit count to byte count */
1677 DBG_INIT("%s() res_size 0x%x\n", 1677 DBG_INIT("%s() res_size 0x%x\n",
1678 __FUNCTION__, res_size); 1678 __func__, res_size);
1679 1679
1680 sba_dev->ioc[i].res_size = res_size; 1680 sba_dev->ioc[i].res_size = res_size;
1681 sba_dev->ioc[i].res_map = (char *) __get_free_pages(GFP_KERNEL, get_order(res_size)); 1681 sba_dev->ioc[i].res_map = (char *) __get_free_pages(GFP_KERNEL, get_order(res_size));
@@ -1688,7 +1688,7 @@ sba_common_init(struct sba_device *sba_dev)
1688 if (NULL == sba_dev->ioc[i].res_map) 1688 if (NULL == sba_dev->ioc[i].res_map)
1689 { 1689 {
1690 panic("%s:%s() could not allocate resource map\n", 1690 panic("%s:%s() could not allocate resource map\n",
1691 __FILE__, __FUNCTION__ ); 1691 __FILE__, __func__ );
1692 } 1692 }
1693 1693
1694 memset(sba_dev->ioc[i].res_map, 0, res_size); 1694 memset(sba_dev->ioc[i].res_map, 0, res_size);
@@ -1725,7 +1725,7 @@ sba_common_init(struct sba_device *sba_dev)
1725#endif 1725#endif
1726 1726
1727 DBG_INIT("%s() %d res_map %x %p\n", 1727 DBG_INIT("%s() %d res_map %x %p\n",
1728 __FUNCTION__, i, res_size, sba_dev->ioc[i].res_map); 1728 __func__, i, res_size, sba_dev->ioc[i].res_map);
1729 } 1729 }
1730 1730
1731 spin_lock_init(&sba_dev->sba_lock); 1731 spin_lock_init(&sba_dev->sba_lock);
diff --git a/drivers/parisc/wax.c b/drivers/parisc/wax.c
index 813c2c24ab1..892a83bbe73 100644
--- a/drivers/parisc/wax.c
+++ b/drivers/parisc/wax.c
@@ -93,7 +93,7 @@ wax_init_chip(struct parisc_device *dev)
93 dev->irq = gsc_claim_irq(&gsc_irq, WAX_GSC_IRQ); 93 dev->irq = gsc_claim_irq(&gsc_irq, WAX_GSC_IRQ);
94 if (dev->irq < 0) { 94 if (dev->irq < 0) {
95 printk(KERN_ERR "%s(): cannot get GSC irq\n", 95 printk(KERN_ERR "%s(): cannot get GSC irq\n",
96 __FUNCTION__); 96 __func__);
97 kfree(wax); 97 kfree(wax);
98 return -EBUSY; 98 return -EBUSY;
99 } 99 }