diff options
Diffstat (limited to 'drivers/net/irda/vlsi_ir.c')
-rw-r--r-- | drivers/net/irda/vlsi_ir.c | 92 |
1 files changed, 46 insertions, 46 deletions
diff --git a/drivers/net/irda/vlsi_ir.c b/drivers/net/irda/vlsi_ir.c index d15e00b8591e..18f4b3a96aed 100644 --- a/drivers/net/irda/vlsi_ir.c +++ b/drivers/net/irda/vlsi_ir.c | |||
@@ -140,15 +140,15 @@ static void vlsi_ring_debug(struct vlsi_ring *r) | |||
140 | unsigned i; | 140 | unsigned i; |
141 | 141 | ||
142 | printk(KERN_DEBUG "%s - ring %p / size %u / mask 0x%04x / len %u / dir %d / hw %p\n", | 142 | printk(KERN_DEBUG "%s - ring %p / size %u / mask 0x%04x / len %u / dir %d / hw %p\n", |
143 | __FUNCTION__, r, r->size, r->mask, r->len, r->dir, r->rd[0].hw); | 143 | __func__, r, r->size, r->mask, r->len, r->dir, r->rd[0].hw); |
144 | printk(KERN_DEBUG "%s - head = %d / tail = %d\n", __FUNCTION__, | 144 | printk(KERN_DEBUG "%s - head = %d / tail = %d\n", __func__, |
145 | atomic_read(&r->head) & r->mask, atomic_read(&r->tail) & r->mask); | 145 | atomic_read(&r->head) & r->mask, atomic_read(&r->tail) & r->mask); |
146 | for (i = 0; i < r->size; i++) { | 146 | for (i = 0; i < r->size; i++) { |
147 | rd = &r->rd[i]; | 147 | rd = &r->rd[i]; |
148 | printk(KERN_DEBUG "%s - ring descr %u: ", __FUNCTION__, i); | 148 | printk(KERN_DEBUG "%s - ring descr %u: ", __func__, i); |
149 | printk("skb=%p data=%p hw=%p\n", rd->skb, rd->buf, rd->hw); | 149 | printk("skb=%p data=%p hw=%p\n", rd->skb, rd->buf, rd->hw); |
150 | printk(KERN_DEBUG "%s - hw: status=%02x count=%u addr=0x%08x\n", | 150 | printk(KERN_DEBUG "%s - hw: status=%02x count=%u addr=0x%08x\n", |
151 | __FUNCTION__, (unsigned) rd_get_status(rd), | 151 | __func__, (unsigned) rd_get_status(rd), |
152 | (unsigned) rd_get_count(rd), (unsigned) rd_get_addr(rd)); | 152 | (unsigned) rd_get_count(rd), (unsigned) rd_get_addr(rd)); |
153 | } | 153 | } |
154 | } | 154 | } |
@@ -435,7 +435,7 @@ static struct vlsi_ring *vlsi_alloc_ring(struct pci_dev *pdev, struct ring_descr | |||
435 | || !(busaddr = pci_map_single(pdev, rd->buf, len, dir))) { | 435 | || !(busaddr = pci_map_single(pdev, rd->buf, len, dir))) { |
436 | if (rd->buf) { | 436 | if (rd->buf) { |
437 | IRDA_ERROR("%s: failed to create PCI-MAP for %p", | 437 | IRDA_ERROR("%s: failed to create PCI-MAP for %p", |
438 | __FUNCTION__, rd->buf); | 438 | __func__, rd->buf); |
439 | kfree(rd->buf); | 439 | kfree(rd->buf); |
440 | rd->buf = NULL; | 440 | rd->buf = NULL; |
441 | } | 441 | } |
@@ -489,7 +489,7 @@ static int vlsi_create_hwif(vlsi_irda_dev_t *idev) | |||
489 | ringarea = pci_alloc_consistent(idev->pdev, HW_RING_AREA_SIZE, &idev->busaddr); | 489 | ringarea = pci_alloc_consistent(idev->pdev, HW_RING_AREA_SIZE, &idev->busaddr); |
490 | if (!ringarea) { | 490 | if (!ringarea) { |
491 | IRDA_ERROR("%s: insufficient memory for descriptor rings\n", | 491 | IRDA_ERROR("%s: insufficient memory for descriptor rings\n", |
492 | __FUNCTION__); | 492 | __func__); |
493 | goto out; | 493 | goto out; |
494 | } | 494 | } |
495 | memset(ringarea, 0, HW_RING_AREA_SIZE); | 495 | memset(ringarea, 0, HW_RING_AREA_SIZE); |
@@ -564,7 +564,7 @@ static int vlsi_process_rx(struct vlsi_ring *r, struct ring_descr *rd) | |||
564 | crclen = (idev->mode==IFF_FIR) ? sizeof(u32) : sizeof(u16); | 564 | crclen = (idev->mode==IFF_FIR) ? sizeof(u32) : sizeof(u16); |
565 | len -= crclen; /* remove trailing CRC */ | 565 | len -= crclen; /* remove trailing CRC */ |
566 | if (len <= 0) { | 566 | if (len <= 0) { |
567 | IRDA_DEBUG(0, "%s: strange frame (len=%d)\n", __FUNCTION__, len); | 567 | IRDA_DEBUG(0, "%s: strange frame (len=%d)\n", __func__, len); |
568 | ret |= VLSI_RX_DROP; | 568 | ret |= VLSI_RX_DROP; |
569 | goto done; | 569 | goto done; |
570 | } | 570 | } |
@@ -579,14 +579,14 @@ static int vlsi_process_rx(struct vlsi_ring *r, struct ring_descr *rd) | |||
579 | */ | 579 | */ |
580 | le16_to_cpus(rd->buf+len); | 580 | le16_to_cpus(rd->buf+len); |
581 | if (irda_calc_crc16(INIT_FCS,rd->buf,len+crclen) != GOOD_FCS) { | 581 | if (irda_calc_crc16(INIT_FCS,rd->buf,len+crclen) != GOOD_FCS) { |
582 | IRDA_DEBUG(0, "%s: crc error\n", __FUNCTION__); | 582 | IRDA_DEBUG(0, "%s: crc error\n", __func__); |
583 | ret |= VLSI_RX_CRC; | 583 | ret |= VLSI_RX_CRC; |
584 | goto done; | 584 | goto done; |
585 | } | 585 | } |
586 | } | 586 | } |
587 | 587 | ||
588 | if (!rd->skb) { | 588 | if (!rd->skb) { |
589 | IRDA_WARNING("%s: rx packet lost\n", __FUNCTION__); | 589 | IRDA_WARNING("%s: rx packet lost\n", __func__); |
590 | ret |= VLSI_RX_DROP; | 590 | ret |= VLSI_RX_DROP; |
591 | goto done; | 591 | goto done; |
592 | } | 592 | } |
@@ -617,7 +617,7 @@ static void vlsi_fill_rx(struct vlsi_ring *r) | |||
617 | for (rd = ring_last(r); rd != NULL; rd = ring_put(r)) { | 617 | for (rd = ring_last(r); rd != NULL; rd = ring_put(r)) { |
618 | if (rd_is_active(rd)) { | 618 | if (rd_is_active(rd)) { |
619 | IRDA_WARNING("%s: driver bug: rx descr race with hw\n", | 619 | IRDA_WARNING("%s: driver bug: rx descr race with hw\n", |
620 | __FUNCTION__); | 620 | __func__); |
621 | vlsi_ring_debug(r); | 621 | vlsi_ring_debug(r); |
622 | break; | 622 | break; |
623 | } | 623 | } |
@@ -676,7 +676,7 @@ static void vlsi_rx_interrupt(struct net_device *ndev) | |||
676 | 676 | ||
677 | if (ring_first(r) == NULL) { | 677 | if (ring_first(r) == NULL) { |
678 | /* we are in big trouble, if this should ever happen */ | 678 | /* we are in big trouble, if this should ever happen */ |
679 | IRDA_ERROR("%s: rx ring exhausted!\n", __FUNCTION__); | 679 | IRDA_ERROR("%s: rx ring exhausted!\n", __func__); |
680 | vlsi_ring_debug(r); | 680 | vlsi_ring_debug(r); |
681 | } | 681 | } |
682 | else | 682 | else |
@@ -697,7 +697,7 @@ static void vlsi_unarm_rx(vlsi_irda_dev_t *idev) | |||
697 | if (rd_is_active(rd)) { | 697 | if (rd_is_active(rd)) { |
698 | rd_set_status(rd, 0); | 698 | rd_set_status(rd, 0); |
699 | if (rd_get_count(rd)) { | 699 | if (rd_get_count(rd)) { |
700 | IRDA_DEBUG(0, "%s - dropping rx packet\n", __FUNCTION__); | 700 | IRDA_DEBUG(0, "%s - dropping rx packet\n", __func__); |
701 | ret = -VLSI_RX_DROP; | 701 | ret = -VLSI_RX_DROP; |
702 | } | 702 | } |
703 | rd_set_count(rd, 0); | 703 | rd_set_count(rd, 0); |
@@ -772,7 +772,7 @@ static int vlsi_set_baud(vlsi_irda_dev_t *idev, unsigned iobase) | |||
772 | int fifocnt; | 772 | int fifocnt; |
773 | 773 | ||
774 | baudrate = idev->new_baud; | 774 | baudrate = idev->new_baud; |
775 | IRDA_DEBUG(2, "%s: %d -> %d\n", __FUNCTION__, idev->baud, idev->new_baud); | 775 | IRDA_DEBUG(2, "%s: %d -> %d\n", __func__, idev->baud, idev->new_baud); |
776 | if (baudrate == 4000000) { | 776 | if (baudrate == 4000000) { |
777 | mode = IFF_FIR; | 777 | mode = IFF_FIR; |
778 | config = IRCFG_FIR; | 778 | config = IRCFG_FIR; |
@@ -789,7 +789,7 @@ static int vlsi_set_baud(vlsi_irda_dev_t *idev, unsigned iobase) | |||
789 | switch(baudrate) { | 789 | switch(baudrate) { |
790 | default: | 790 | default: |
791 | IRDA_WARNING("%s: undefined baudrate %d - fallback to 9600!\n", | 791 | IRDA_WARNING("%s: undefined baudrate %d - fallback to 9600!\n", |
792 | __FUNCTION__, baudrate); | 792 | __func__, baudrate); |
793 | baudrate = 9600; | 793 | baudrate = 9600; |
794 | /* fallthru */ | 794 | /* fallthru */ |
795 | case 2400: | 795 | case 2400: |
@@ -806,7 +806,7 @@ static int vlsi_set_baud(vlsi_irda_dev_t *idev, unsigned iobase) | |||
806 | 806 | ||
807 | fifocnt = inw(iobase+VLSI_PIO_RCVBCNT) & RCVBCNT_MASK; | 807 | fifocnt = inw(iobase+VLSI_PIO_RCVBCNT) & RCVBCNT_MASK; |
808 | if (fifocnt != 0) { | 808 | if (fifocnt != 0) { |
809 | IRDA_DEBUG(0, "%s: rx fifo not empty(%d)\n", __FUNCTION__, fifocnt); | 809 | IRDA_DEBUG(0, "%s: rx fifo not empty(%d)\n", __func__, fifocnt); |
810 | } | 810 | } |
811 | 811 | ||
812 | outw(0, iobase+VLSI_PIO_IRENABLE); | 812 | outw(0, iobase+VLSI_PIO_IRENABLE); |
@@ -830,14 +830,14 @@ static int vlsi_set_baud(vlsi_irda_dev_t *idev, unsigned iobase) | |||
830 | config ^= IRENABLE_SIR_ON; | 830 | config ^= IRENABLE_SIR_ON; |
831 | 831 | ||
832 | if (config != (IRENABLE_PHYANDCLOCK|IRENABLE_ENRXST)) { | 832 | if (config != (IRENABLE_PHYANDCLOCK|IRENABLE_ENRXST)) { |
833 | IRDA_WARNING("%s: failed to set %s mode!\n", __FUNCTION__, | 833 | IRDA_WARNING("%s: failed to set %s mode!\n", __func__, |
834 | (mode==IFF_SIR)?"SIR":((mode==IFF_MIR)?"MIR":"FIR")); | 834 | (mode==IFF_SIR)?"SIR":((mode==IFF_MIR)?"MIR":"FIR")); |
835 | ret = -1; | 835 | ret = -1; |
836 | } | 836 | } |
837 | else { | 837 | else { |
838 | if (inw(iobase+VLSI_PIO_PHYCTL) != nphyctl) { | 838 | if (inw(iobase+VLSI_PIO_PHYCTL) != nphyctl) { |
839 | IRDA_WARNING("%s: failed to apply baudrate %d\n", | 839 | IRDA_WARNING("%s: failed to apply baudrate %d\n", |
840 | __FUNCTION__, baudrate); | 840 | __func__, baudrate); |
841 | ret = -1; | 841 | ret = -1; |
842 | } | 842 | } |
843 | else { | 843 | else { |
@@ -849,7 +849,7 @@ static int vlsi_set_baud(vlsi_irda_dev_t *idev, unsigned iobase) | |||
849 | } | 849 | } |
850 | 850 | ||
851 | if (ret) | 851 | if (ret) |
852 | vlsi_reg_debug(iobase,__FUNCTION__); | 852 | vlsi_reg_debug(iobase,__func__); |
853 | 853 | ||
854 | return ret; | 854 | return ret; |
855 | } | 855 | } |
@@ -982,7 +982,7 @@ static int vlsi_hard_start_xmit(struct sk_buff *skb, struct net_device *ndev) | |||
982 | 982 | ||
983 | if (len >= r->len-5) | 983 | if (len >= r->len-5) |
984 | IRDA_WARNING("%s: possible buffer overflow with SIR wrapping!\n", | 984 | IRDA_WARNING("%s: possible buffer overflow with SIR wrapping!\n", |
985 | __FUNCTION__); | 985 | __func__); |
986 | } | 986 | } |
987 | else { | 987 | else { |
988 | /* hw deals with MIR/FIR mode wrapping */ | 988 | /* hw deals with MIR/FIR mode wrapping */ |
@@ -1027,7 +1027,7 @@ static int vlsi_hard_start_xmit(struct sk_buff *skb, struct net_device *ndev) | |||
1027 | 1027 | ||
1028 | fifocnt = inw(ndev->base_addr+VLSI_PIO_RCVBCNT) & RCVBCNT_MASK; | 1028 | fifocnt = inw(ndev->base_addr+VLSI_PIO_RCVBCNT) & RCVBCNT_MASK; |
1029 | if (fifocnt != 0) { | 1029 | if (fifocnt != 0) { |
1030 | IRDA_DEBUG(0, "%s: rx fifo not empty(%d)\n", __FUNCTION__, fifocnt); | 1030 | IRDA_DEBUG(0, "%s: rx fifo not empty(%d)\n", __func__, fifocnt); |
1031 | } | 1031 | } |
1032 | 1032 | ||
1033 | config = inw(iobase+VLSI_PIO_IRCFG); | 1033 | config = inw(iobase+VLSI_PIO_IRCFG); |
@@ -1040,7 +1040,7 @@ static int vlsi_hard_start_xmit(struct sk_buff *skb, struct net_device *ndev) | |||
1040 | 1040 | ||
1041 | if (ring_put(r) == NULL) { | 1041 | if (ring_put(r) == NULL) { |
1042 | netif_stop_queue(ndev); | 1042 | netif_stop_queue(ndev); |
1043 | IRDA_DEBUG(3, "%s: tx ring full - queue stopped\n", __FUNCTION__); | 1043 | IRDA_DEBUG(3, "%s: tx ring full - queue stopped\n", __func__); |
1044 | } | 1044 | } |
1045 | spin_unlock_irqrestore(&idev->lock, flags); | 1045 | spin_unlock_irqrestore(&idev->lock, flags); |
1046 | 1046 | ||
@@ -1049,7 +1049,7 @@ static int vlsi_hard_start_xmit(struct sk_buff *skb, struct net_device *ndev) | |||
1049 | drop_unlock: | 1049 | drop_unlock: |
1050 | spin_unlock_irqrestore(&idev->lock, flags); | 1050 | spin_unlock_irqrestore(&idev->lock, flags); |
1051 | drop: | 1051 | drop: |
1052 | IRDA_WARNING("%s: dropping packet - %s\n", __FUNCTION__, msg); | 1052 | IRDA_WARNING("%s: dropping packet - %s\n", __func__, msg); |
1053 | dev_kfree_skb_any(skb); | 1053 | dev_kfree_skb_any(skb); |
1054 | idev->stats.tx_errors++; | 1054 | idev->stats.tx_errors++; |
1055 | idev->stats.tx_dropped++; | 1055 | idev->stats.tx_dropped++; |
@@ -1106,7 +1106,7 @@ static void vlsi_tx_interrupt(struct net_device *ndev) | |||
1106 | fifocnt = inw(iobase+VLSI_PIO_RCVBCNT) & RCVBCNT_MASK; | 1106 | fifocnt = inw(iobase+VLSI_PIO_RCVBCNT) & RCVBCNT_MASK; |
1107 | if (fifocnt != 0) { | 1107 | if (fifocnt != 0) { |
1108 | IRDA_DEBUG(0, "%s: rx fifo not empty(%d)\n", | 1108 | IRDA_DEBUG(0, "%s: rx fifo not empty(%d)\n", |
1109 | __FUNCTION__, fifocnt); | 1109 | __func__, fifocnt); |
1110 | } | 1110 | } |
1111 | outw(config | IRCFG_ENTX, iobase+VLSI_PIO_IRCFG); | 1111 | outw(config | IRCFG_ENTX, iobase+VLSI_PIO_IRCFG); |
1112 | } | 1112 | } |
@@ -1115,7 +1115,7 @@ static void vlsi_tx_interrupt(struct net_device *ndev) | |||
1115 | 1115 | ||
1116 | if (netif_queue_stopped(ndev) && !idev->new_baud) { | 1116 | if (netif_queue_stopped(ndev) && !idev->new_baud) { |
1117 | netif_wake_queue(ndev); | 1117 | netif_wake_queue(ndev); |
1118 | IRDA_DEBUG(3, "%s: queue awoken\n", __FUNCTION__); | 1118 | IRDA_DEBUG(3, "%s: queue awoken\n", __func__); |
1119 | } | 1119 | } |
1120 | } | 1120 | } |
1121 | 1121 | ||
@@ -1138,7 +1138,7 @@ static void vlsi_unarm_tx(vlsi_irda_dev_t *idev) | |||
1138 | dev_kfree_skb_any(rd->skb); | 1138 | dev_kfree_skb_any(rd->skb); |
1139 | rd->skb = NULL; | 1139 | rd->skb = NULL; |
1140 | } | 1140 | } |
1141 | IRDA_DEBUG(0, "%s - dropping tx packet\n", __FUNCTION__); | 1141 | IRDA_DEBUG(0, "%s - dropping tx packet\n", __func__); |
1142 | ret = -VLSI_TX_DROP; | 1142 | ret = -VLSI_TX_DROP; |
1143 | } | 1143 | } |
1144 | else | 1144 | else |
@@ -1188,7 +1188,7 @@ static int vlsi_start_clock(struct pci_dev *pdev) | |||
1188 | if (count < 3) { | 1188 | if (count < 3) { |
1189 | if (clksrc == 1) { /* explicitly asked for PLL hence bail out */ | 1189 | if (clksrc == 1) { /* explicitly asked for PLL hence bail out */ |
1190 | IRDA_ERROR("%s: no PLL or failed to lock!\n", | 1190 | IRDA_ERROR("%s: no PLL or failed to lock!\n", |
1191 | __FUNCTION__); | 1191 | __func__); |
1192 | clkctl = CLKCTL_CLKSTP; | 1192 | clkctl = CLKCTL_CLKSTP; |
1193 | pci_write_config_byte(pdev, VLSI_PCI_CLKCTL, clkctl); | 1193 | pci_write_config_byte(pdev, VLSI_PCI_CLKCTL, clkctl); |
1194 | return -1; | 1194 | return -1; |
@@ -1197,7 +1197,7 @@ static int vlsi_start_clock(struct pci_dev *pdev) | |||
1197 | clksrc = 3; /* fallback to 40MHz XCLK (OB800) */ | 1197 | clksrc = 3; /* fallback to 40MHz XCLK (OB800) */ |
1198 | 1198 | ||
1199 | IRDA_DEBUG(0, "%s: PLL not locked, fallback to clksrc=%d\n", | 1199 | IRDA_DEBUG(0, "%s: PLL not locked, fallback to clksrc=%d\n", |
1200 | __FUNCTION__, clksrc); | 1200 | __func__, clksrc); |
1201 | } | 1201 | } |
1202 | else | 1202 | else |
1203 | clksrc = 1; /* got successful PLL lock */ | 1203 | clksrc = 1; /* got successful PLL lock */ |
@@ -1269,7 +1269,7 @@ static int vlsi_init_chip(struct pci_dev *pdev) | |||
1269 | /* start the clock and clean the registers */ | 1269 | /* start the clock and clean the registers */ |
1270 | 1270 | ||
1271 | if (vlsi_start_clock(pdev)) { | 1271 | if (vlsi_start_clock(pdev)) { |
1272 | IRDA_ERROR("%s: no valid clock source\n", __FUNCTION__); | 1272 | IRDA_ERROR("%s: no valid clock source\n", __func__); |
1273 | return -1; | 1273 | return -1; |
1274 | } | 1274 | } |
1275 | iobase = ndev->base_addr; | 1275 | iobase = ndev->base_addr; |
@@ -1386,7 +1386,7 @@ static void vlsi_tx_timeout(struct net_device *ndev) | |||
1386 | vlsi_irda_dev_t *idev = ndev->priv; | 1386 | vlsi_irda_dev_t *idev = ndev->priv; |
1387 | 1387 | ||
1388 | 1388 | ||
1389 | vlsi_reg_debug(ndev->base_addr, __FUNCTION__); | 1389 | vlsi_reg_debug(ndev->base_addr, __func__); |
1390 | vlsi_ring_debug(idev->tx_ring); | 1390 | vlsi_ring_debug(idev->tx_ring); |
1391 | 1391 | ||
1392 | if (netif_running(ndev)) | 1392 | if (netif_running(ndev)) |
@@ -1401,7 +1401,7 @@ static void vlsi_tx_timeout(struct net_device *ndev) | |||
1401 | 1401 | ||
1402 | if (vlsi_start_hw(idev)) | 1402 | if (vlsi_start_hw(idev)) |
1403 | IRDA_ERROR("%s: failed to restart hw - %s(%s) unusable!\n", | 1403 | IRDA_ERROR("%s: failed to restart hw - %s(%s) unusable!\n", |
1404 | __FUNCTION__, pci_name(idev->pdev), ndev->name); | 1404 | __func__, pci_name(idev->pdev), ndev->name); |
1405 | else | 1405 | else |
1406 | netif_start_queue(ndev); | 1406 | netif_start_queue(ndev); |
1407 | } | 1407 | } |
@@ -1446,7 +1446,7 @@ static int vlsi_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd) | |||
1446 | break; | 1446 | break; |
1447 | default: | 1447 | default: |
1448 | IRDA_WARNING("%s: notsupp - cmd=%04x\n", | 1448 | IRDA_WARNING("%s: notsupp - cmd=%04x\n", |
1449 | __FUNCTION__, cmd); | 1449 | __func__, cmd); |
1450 | ret = -EOPNOTSUPP; | 1450 | ret = -EOPNOTSUPP; |
1451 | } | 1451 | } |
1452 | 1452 | ||
@@ -1491,7 +1491,7 @@ static irqreturn_t vlsi_interrupt(int irq, void *dev_instance) | |||
1491 | 1491 | ||
1492 | if (boguscount <= 0) | 1492 | if (boguscount <= 0) |
1493 | IRDA_MESSAGE("%s: too much work in interrupt!\n", | 1493 | IRDA_MESSAGE("%s: too much work in interrupt!\n", |
1494 | __FUNCTION__); | 1494 | __func__); |
1495 | return IRQ_RETVAL(handled); | 1495 | return IRQ_RETVAL(handled); |
1496 | } | 1496 | } |
1497 | 1497 | ||
@@ -1504,7 +1504,7 @@ static int vlsi_open(struct net_device *ndev) | |||
1504 | char hwname[32]; | 1504 | char hwname[32]; |
1505 | 1505 | ||
1506 | if (pci_request_regions(idev->pdev, drivername)) { | 1506 | if (pci_request_regions(idev->pdev, drivername)) { |
1507 | IRDA_WARNING("%s: io resource busy\n", __FUNCTION__); | 1507 | IRDA_WARNING("%s: io resource busy\n", __func__); |
1508 | goto errout; | 1508 | goto errout; |
1509 | } | 1509 | } |
1510 | ndev->base_addr = pci_resource_start(idev->pdev,0); | 1510 | ndev->base_addr = pci_resource_start(idev->pdev,0); |
@@ -1519,7 +1519,7 @@ static int vlsi_open(struct net_device *ndev) | |||
1519 | if (request_irq(ndev->irq, vlsi_interrupt, IRQF_SHARED, | 1519 | if (request_irq(ndev->irq, vlsi_interrupt, IRQF_SHARED, |
1520 | drivername, ndev)) { | 1520 | drivername, ndev)) { |
1521 | IRDA_WARNING("%s: couldn't get IRQ: %d\n", | 1521 | IRDA_WARNING("%s: couldn't get IRQ: %d\n", |
1522 | __FUNCTION__, ndev->irq); | 1522 | __func__, ndev->irq); |
1523 | goto errout_io; | 1523 | goto errout_io; |
1524 | } | 1524 | } |
1525 | 1525 | ||
@@ -1540,7 +1540,7 @@ static int vlsi_open(struct net_device *ndev) | |||
1540 | 1540 | ||
1541 | netif_start_queue(ndev); | 1541 | netif_start_queue(ndev); |
1542 | 1542 | ||
1543 | IRDA_MESSAGE("%s: device %s operational\n", __FUNCTION__, ndev->name); | 1543 | IRDA_MESSAGE("%s: device %s operational\n", __func__, ndev->name); |
1544 | 1544 | ||
1545 | return 0; | 1545 | return 0; |
1546 | 1546 | ||
@@ -1574,7 +1574,7 @@ static int vlsi_close(struct net_device *ndev) | |||
1574 | 1574 | ||
1575 | pci_release_regions(idev->pdev); | 1575 | pci_release_regions(idev->pdev); |
1576 | 1576 | ||
1577 | IRDA_MESSAGE("%s: device %s stopped\n", __FUNCTION__, ndev->name); | 1577 | IRDA_MESSAGE("%s: device %s stopped\n", __func__, ndev->name); |
1578 | 1578 | ||
1579 | return 0; | 1579 | return 0; |
1580 | } | 1580 | } |
@@ -1593,7 +1593,7 @@ static int vlsi_irda_init(struct net_device *ndev) | |||
1593 | 1593 | ||
1594 | if (pci_set_dma_mask(pdev,DMA_MASK_USED_BY_HW) | 1594 | if (pci_set_dma_mask(pdev,DMA_MASK_USED_BY_HW) |
1595 | || pci_set_dma_mask(pdev,DMA_MASK_MSTRPAGE)) { | 1595 | || pci_set_dma_mask(pdev,DMA_MASK_MSTRPAGE)) { |
1596 | IRDA_ERROR("%s: aborting due to PCI BM-DMA address limitations\n", __FUNCTION__); | 1596 | IRDA_ERROR("%s: aborting due to PCI BM-DMA address limitations\n", __func__); |
1597 | return -1; | 1597 | return -1; |
1598 | } | 1598 | } |
1599 | 1599 | ||
@@ -1645,14 +1645,14 @@ vlsi_irda_probe(struct pci_dev *pdev, const struct pci_device_id *id) | |||
1645 | 1645 | ||
1646 | if ( !pci_resource_start(pdev,0) | 1646 | if ( !pci_resource_start(pdev,0) |
1647 | || !(pci_resource_flags(pdev,0) & IORESOURCE_IO) ) { | 1647 | || !(pci_resource_flags(pdev,0) & IORESOURCE_IO) ) { |
1648 | IRDA_ERROR("%s: bar 0 invalid", __FUNCTION__); | 1648 | IRDA_ERROR("%s: bar 0 invalid", __func__); |
1649 | goto out_disable; | 1649 | goto out_disable; |
1650 | } | 1650 | } |
1651 | 1651 | ||
1652 | ndev = alloc_irdadev(sizeof(*idev)); | 1652 | ndev = alloc_irdadev(sizeof(*idev)); |
1653 | if (ndev==NULL) { | 1653 | if (ndev==NULL) { |
1654 | IRDA_ERROR("%s: Unable to allocate device memory.\n", | 1654 | IRDA_ERROR("%s: Unable to allocate device memory.\n", |
1655 | __FUNCTION__); | 1655 | __func__); |
1656 | goto out_disable; | 1656 | goto out_disable; |
1657 | } | 1657 | } |
1658 | 1658 | ||
@@ -1667,7 +1667,7 @@ vlsi_irda_probe(struct pci_dev *pdev, const struct pci_device_id *id) | |||
1667 | goto out_freedev; | 1667 | goto out_freedev; |
1668 | 1668 | ||
1669 | if (register_netdev(ndev) < 0) { | 1669 | if (register_netdev(ndev) < 0) { |
1670 | IRDA_ERROR("%s: register_netdev failed\n", __FUNCTION__); | 1670 | IRDA_ERROR("%s: register_netdev failed\n", __func__); |
1671 | goto out_freedev; | 1671 | goto out_freedev; |
1672 | } | 1672 | } |
1673 | 1673 | ||
@@ -1678,7 +1678,7 @@ vlsi_irda_probe(struct pci_dev *pdev, const struct pci_device_id *id) | |||
1678 | vlsi_proc_root, VLSI_PROC_FOPS, ndev); | 1678 | vlsi_proc_root, VLSI_PROC_FOPS, ndev); |
1679 | if (!ent) { | 1679 | if (!ent) { |
1680 | IRDA_WARNING("%s: failed to create proc entry\n", | 1680 | IRDA_WARNING("%s: failed to create proc entry\n", |
1681 | __FUNCTION__); | 1681 | __func__); |
1682 | } else { | 1682 | } else { |
1683 | ent->size = 0; | 1683 | ent->size = 0; |
1684 | } | 1684 | } |
@@ -1745,7 +1745,7 @@ static int vlsi_irda_suspend(struct pci_dev *pdev, pm_message_t state) | |||
1745 | 1745 | ||
1746 | if (!ndev) { | 1746 | if (!ndev) { |
1747 | IRDA_ERROR("%s - %s: no netdevice \n", | 1747 | IRDA_ERROR("%s - %s: no netdevice \n", |
1748 | __FUNCTION__, pci_name(pdev)); | 1748 | __func__, pci_name(pdev)); |
1749 | return 0; | 1749 | return 0; |
1750 | } | 1750 | } |
1751 | idev = ndev->priv; | 1751 | idev = ndev->priv; |
@@ -1756,7 +1756,7 @@ static int vlsi_irda_suspend(struct pci_dev *pdev, pm_message_t state) | |||
1756 | pdev->current_state = state.event; | 1756 | pdev->current_state = state.event; |
1757 | } | 1757 | } |
1758 | else | 1758 | else |
1759 | IRDA_ERROR("%s - %s: invalid suspend request %u -> %u\n", __FUNCTION__, pci_name(pdev), pdev->current_state, state.event); | 1759 | IRDA_ERROR("%s - %s: invalid suspend request %u -> %u\n", __func__, pci_name(pdev), pdev->current_state, state.event); |
1760 | mutex_unlock(&idev->mtx); | 1760 | mutex_unlock(&idev->mtx); |
1761 | return 0; | 1761 | return 0; |
1762 | } | 1762 | } |
@@ -1784,7 +1784,7 @@ static int vlsi_irda_resume(struct pci_dev *pdev) | |||
1784 | 1784 | ||
1785 | if (!ndev) { | 1785 | if (!ndev) { |
1786 | IRDA_ERROR("%s - %s: no netdevice \n", | 1786 | IRDA_ERROR("%s - %s: no netdevice \n", |
1787 | __FUNCTION__, pci_name(pdev)); | 1787 | __func__, pci_name(pdev)); |
1788 | return 0; | 1788 | return 0; |
1789 | } | 1789 | } |
1790 | idev = ndev->priv; | 1790 | idev = ndev->priv; |
@@ -1792,7 +1792,7 @@ static int vlsi_irda_resume(struct pci_dev *pdev) | |||
1792 | if (pdev->current_state == 0) { | 1792 | if (pdev->current_state == 0) { |
1793 | mutex_unlock(&idev->mtx); | 1793 | mutex_unlock(&idev->mtx); |
1794 | IRDA_WARNING("%s - %s: already resumed\n", | 1794 | IRDA_WARNING("%s - %s: already resumed\n", |
1795 | __FUNCTION__, pci_name(pdev)); | 1795 | __func__, pci_name(pdev)); |
1796 | return 0; | 1796 | return 0; |
1797 | } | 1797 | } |
1798 | 1798 | ||
@@ -1811,7 +1811,7 @@ static int vlsi_irda_resume(struct pci_dev *pdev) | |||
1811 | * now we explicitly set pdev->current_state = 0 after enabling the | 1811 | * now we explicitly set pdev->current_state = 0 after enabling the |
1812 | * device and independently resume_ok should catch any garbage config. | 1812 | * device and independently resume_ok should catch any garbage config. |
1813 | */ | 1813 | */ |
1814 | IRDA_WARNING("%s - hm, nothing to resume?\n", __FUNCTION__); | 1814 | IRDA_WARNING("%s - hm, nothing to resume?\n", __func__); |
1815 | mutex_unlock(&idev->mtx); | 1815 | mutex_unlock(&idev->mtx); |
1816 | return 0; | 1816 | return 0; |
1817 | } | 1817 | } |