aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/irda/sir_dev.c
diff options
context:
space:
mode:
authorHarvey Harrison <harvey.harrison@gmail.com>2008-07-30 20:20:18 -0400
committerDavid S. Miller <davem@davemloft.net>2008-07-30 20:20:18 -0400
commita97a6f10771b90235b33c13a6db9279237a08422 (patch)
tree60f73c13805d3e9968ac734960505ab081173945 /drivers/net/irda/sir_dev.c
parent1fa98174ba980b2826edd1e4632a17916dfdb4fa (diff)
irda: replace __FUNCTION__ with __func__
__FUNCTION__ is gcc-specific, use __func__ Signed-off-by: Harvey Harrison <harvey.harrison@gmail.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/irda/sir_dev.c')
-rw-r--r--drivers/net/irda/sir_dev.c63
1 files changed, 32 insertions, 31 deletions
diff --git a/drivers/net/irda/sir_dev.c b/drivers/net/irda/sir_dev.c
index 6078e03de9a8..3f32909c24c8 100644
--- a/drivers/net/irda/sir_dev.c
+++ b/drivers/net/irda/sir_dev.c
@@ -80,7 +80,7 @@ static int sirdev_tx_complete_fsm(struct sir_dev *dev)
80 return 0; 80 return 0;
81 81
82 default: 82 default:
83 IRDA_ERROR("%s - undefined state\n", __FUNCTION__); 83 IRDA_ERROR("%s - undefined state\n", __func__);
84 return -EINVAL; 84 return -EINVAL;
85 } 85 }
86 fsm->substate = next_state; 86 fsm->substate = next_state;
@@ -107,11 +107,11 @@ static void sirdev_config_fsm(struct work_struct *work)
107 int ret = -1; 107 int ret = -1;
108 unsigned delay; 108 unsigned delay;
109 109
110 IRDA_DEBUG(2, "%s(), <%ld>\n", __FUNCTION__, jiffies); 110 IRDA_DEBUG(2, "%s(), <%ld>\n", __func__, jiffies);
111 111
112 do { 112 do {
113 IRDA_DEBUG(3, "%s - state=0x%04x / substate=0x%04x\n", 113 IRDA_DEBUG(3, "%s - state=0x%04x / substate=0x%04x\n",
114 __FUNCTION__, fsm->state, fsm->substate); 114 __func__, fsm->state, fsm->substate);
115 115
116 next_state = fsm->state; 116 next_state = fsm->state;
117 delay = 0; 117 delay = 0;
@@ -249,12 +249,12 @@ static void sirdev_config_fsm(struct work_struct *work)
249 break; 249 break;
250 250
251 default: 251 default:
252 IRDA_ERROR("%s - undefined state\n", __FUNCTION__); 252 IRDA_ERROR("%s - undefined state\n", __func__);
253 fsm->result = -EINVAL; 253 fsm->result = -EINVAL;
254 /* fall thru */ 254 /* fall thru */
255 255
256 case SIRDEV_STATE_ERROR: 256 case SIRDEV_STATE_ERROR:
257 IRDA_ERROR("%s - error: %d\n", __FUNCTION__, fsm->result); 257 IRDA_ERROR("%s - error: %d\n", __func__, fsm->result);
258 258
259#if 0 /* don't enable this before we have netdev->tx_timeout to recover */ 259#if 0 /* don't enable this before we have netdev->tx_timeout to recover */
260 netif_stop_queue(dev->netdev); 260 netif_stop_queue(dev->netdev);
@@ -284,11 +284,12 @@ int sirdev_schedule_request(struct sir_dev *dev, int initial_state, unsigned par
284{ 284{
285 struct sir_fsm *fsm = &dev->fsm; 285 struct sir_fsm *fsm = &dev->fsm;
286 286
287 IRDA_DEBUG(2, "%s - state=0x%04x / param=%u\n", __FUNCTION__, initial_state, param); 287 IRDA_DEBUG(2, "%s - state=0x%04x / param=%u\n", __func__,
288 initial_state, param);
288 289
289 if (down_trylock(&fsm->sem)) { 290 if (down_trylock(&fsm->sem)) {
290 if (in_interrupt() || in_atomic() || irqs_disabled()) { 291 if (in_interrupt() || in_atomic() || irqs_disabled()) {
291 IRDA_DEBUG(1, "%s(), state machine busy!\n", __FUNCTION__); 292 IRDA_DEBUG(1, "%s(), state machine busy!\n", __func__);
292 return -EWOULDBLOCK; 293 return -EWOULDBLOCK;
293 } else 294 } else
294 down(&fsm->sem); 295 down(&fsm->sem);
@@ -296,7 +297,7 @@ int sirdev_schedule_request(struct sir_dev *dev, int initial_state, unsigned par
296 297
297 if (fsm->state == SIRDEV_STATE_DEAD) { 298 if (fsm->state == SIRDEV_STATE_DEAD) {
298 /* race with sirdev_close should never happen */ 299 /* race with sirdev_close should never happen */
299 IRDA_ERROR("%s(), instance staled!\n", __FUNCTION__); 300 IRDA_ERROR("%s(), instance staled!\n", __func__);
300 up(&fsm->sem); 301 up(&fsm->sem);
301 return -ESTALE; /* or better EPIPE? */ 302 return -ESTALE; /* or better EPIPE? */
302 } 303 }
@@ -341,7 +342,7 @@ int sirdev_set_dongle(struct sir_dev *dev, IRDA_DONGLE type)
341{ 342{
342 int err; 343 int err;
343 344
344 IRDA_DEBUG(3, "%s : requesting dongle %d.\n", __FUNCTION__, type); 345 IRDA_DEBUG(3, "%s : requesting dongle %d.\n", __func__, type);
345 346
346 err = sirdev_schedule_dongle_open(dev, type); 347 err = sirdev_schedule_dongle_open(dev, type);
347 if (unlikely(err)) 348 if (unlikely(err))
@@ -376,7 +377,7 @@ int sirdev_raw_write(struct sir_dev *dev, const char *buf, int len)
376 377
377 ret = dev->drv->do_write(dev, dev->tx_buff.data, dev->tx_buff.len); 378 ret = dev->drv->do_write(dev, dev->tx_buff.data, dev->tx_buff.len);
378 if (ret > 0) { 379 if (ret > 0) {
379 IRDA_DEBUG(3, "%s(), raw-tx started\n", __FUNCTION__); 380 IRDA_DEBUG(3, "%s(), raw-tx started\n", __func__);
380 381
381 dev->tx_buff.data += ret; 382 dev->tx_buff.data += ret;
382 dev->tx_buff.len -= ret; 383 dev->tx_buff.len -= ret;
@@ -437,7 +438,7 @@ void sirdev_write_complete(struct sir_dev *dev)
437 spin_lock_irqsave(&dev->tx_lock, flags); 438 spin_lock_irqsave(&dev->tx_lock, flags);
438 439
439 IRDA_DEBUG(3, "%s() - dev->tx_buff.len = %d\n", 440 IRDA_DEBUG(3, "%s() - dev->tx_buff.len = %d\n",
440 __FUNCTION__, dev->tx_buff.len); 441 __func__, dev->tx_buff.len);
441 442
442 if (likely(dev->tx_buff.len > 0)) { 443 if (likely(dev->tx_buff.len > 0)) {
443 /* Write data left in transmit buffer */ 444 /* Write data left in transmit buffer */
@@ -450,7 +451,7 @@ void sirdev_write_complete(struct sir_dev *dev)
450 else if (unlikely(actual<0)) { 451 else if (unlikely(actual<0)) {
451 /* could be dropped later when we have tx_timeout to recover */ 452 /* could be dropped later when we have tx_timeout to recover */
452 IRDA_ERROR("%s: drv->do_write failed (%d)\n", 453 IRDA_ERROR("%s: drv->do_write failed (%d)\n",
453 __FUNCTION__, actual); 454 __func__, actual);
454 if ((skb=dev->tx_skb) != NULL) { 455 if ((skb=dev->tx_skb) != NULL) {
455 dev->tx_skb = NULL; 456 dev->tx_skb = NULL;
456 dev_kfree_skb_any(skb); 457 dev_kfree_skb_any(skb);
@@ -471,7 +472,7 @@ void sirdev_write_complete(struct sir_dev *dev)
471 * restarted when the irda-thread has completed the request. 472 * restarted when the irda-thread has completed the request.
472 */ 473 */
473 474
474 IRDA_DEBUG(3, "%s(), raw-tx done\n", __FUNCTION__); 475 IRDA_DEBUG(3, "%s(), raw-tx done\n", __func__);
475 dev->raw_tx = 0; 476 dev->raw_tx = 0;
476 goto done; /* no post-frame handling in raw mode */ 477 goto done; /* no post-frame handling in raw mode */
477 } 478 }
@@ -488,7 +489,7 @@ void sirdev_write_complete(struct sir_dev *dev)
488 * re-activated. 489 * re-activated.
489 */ 490 */
490 491
491 IRDA_DEBUG(5, "%s(), finished with frame!\n", __FUNCTION__); 492 IRDA_DEBUG(5, "%s(), finished with frame!\n", __func__);
492 493
493 if ((skb=dev->tx_skb) != NULL) { 494 if ((skb=dev->tx_skb) != NULL) {
494 dev->tx_skb = NULL; 495 dev->tx_skb = NULL;
@@ -498,14 +499,14 @@ void sirdev_write_complete(struct sir_dev *dev)
498 } 499 }
499 500
500 if (unlikely(dev->new_speed > 0)) { 501 if (unlikely(dev->new_speed > 0)) {
501 IRDA_DEBUG(5, "%s(), Changing speed!\n", __FUNCTION__); 502 IRDA_DEBUG(5, "%s(), Changing speed!\n", __func__);
502 err = sirdev_schedule_speed(dev, dev->new_speed); 503 err = sirdev_schedule_speed(dev, dev->new_speed);
503 if (unlikely(err)) { 504 if (unlikely(err)) {
504 /* should never happen 505 /* should never happen
505 * forget the speed change and hope the stack recovers 506 * forget the speed change and hope the stack recovers
506 */ 507 */
507 IRDA_ERROR("%s - schedule speed change failed: %d\n", 508 IRDA_ERROR("%s - schedule speed change failed: %d\n",
508 __FUNCTION__, err); 509 __func__, err);
509 netif_wake_queue(dev->netdev); 510 netif_wake_queue(dev->netdev);
510 } 511 }
511 /* else: success 512 /* else: success
@@ -532,13 +533,13 @@ EXPORT_SYMBOL(sirdev_write_complete);
532int sirdev_receive(struct sir_dev *dev, const unsigned char *cp, size_t count) 533int sirdev_receive(struct sir_dev *dev, const unsigned char *cp, size_t count)
533{ 534{
534 if (!dev || !dev->netdev) { 535 if (!dev || !dev->netdev) {
535 IRDA_WARNING("%s(), not ready yet!\n", __FUNCTION__); 536 IRDA_WARNING("%s(), not ready yet!\n", __func__);
536 return -1; 537 return -1;
537 } 538 }
538 539
539 if (!dev->irlap) { 540 if (!dev->irlap) {
540 IRDA_WARNING("%s - too early: %p / %zd!\n", 541 IRDA_WARNING("%s - too early: %p / %zd!\n",
541 __FUNCTION__, cp, count); 542 __func__, cp, count);
542 return -1; 543 return -1;
543 } 544 }
544 545
@@ -548,7 +549,7 @@ int sirdev_receive(struct sir_dev *dev, const unsigned char *cp, size_t count)
548 */ 549 */
549 irda_device_set_media_busy(dev->netdev, TRUE); 550 irda_device_set_media_busy(dev->netdev, TRUE);
550 dev->stats.rx_dropped++; 551 dev->stats.rx_dropped++;
551 IRDA_DEBUG(0, "%s; rx-drop: %zd\n", __FUNCTION__, count); 552 IRDA_DEBUG(0, "%s; rx-drop: %zd\n", __func__, count);
552 return 0; 553 return 0;
553 } 554 }
554 555
@@ -600,7 +601,7 @@ static int sirdev_hard_xmit(struct sk_buff *skb, struct net_device *ndev)
600 601
601 netif_stop_queue(ndev); 602 netif_stop_queue(ndev);
602 603
603 IRDA_DEBUG(3, "%s(), skb->len = %d\n", __FUNCTION__, skb->len); 604 IRDA_DEBUG(3, "%s(), skb->len = %d\n", __func__, skb->len);
604 605
605 speed = irda_get_next_speed(skb); 606 speed = irda_get_next_speed(skb);
606 if ((speed != dev->speed) && (speed != -1)) { 607 if ((speed != dev->speed) && (speed != -1)) {
@@ -637,7 +638,7 @@ static int sirdev_hard_xmit(struct sk_buff *skb, struct net_device *ndev)
637 638
638 /* Check problems */ 639 /* Check problems */
639 if(spin_is_locked(&dev->tx_lock)) { 640 if(spin_is_locked(&dev->tx_lock)) {
640 IRDA_DEBUG(3, "%s(), write not completed\n", __FUNCTION__); 641 IRDA_DEBUG(3, "%s(), write not completed\n", __func__);
641 } 642 }
642 643
643 /* serialize with write completion */ 644 /* serialize with write completion */
@@ -666,7 +667,7 @@ static int sirdev_hard_xmit(struct sk_buff *skb, struct net_device *ndev)
666 else if (unlikely(actual < 0)) { 667 else if (unlikely(actual < 0)) {
667 /* could be dropped later when we have tx_timeout to recover */ 668 /* could be dropped later when we have tx_timeout to recover */
668 IRDA_ERROR("%s: drv->do_write failed (%d)\n", 669 IRDA_ERROR("%s: drv->do_write failed (%d)\n",
669 __FUNCTION__, actual); 670 __func__, actual);
670 dev_kfree_skb_any(skb); 671 dev_kfree_skb_any(skb);
671 dev->stats.tx_errors++; 672 dev->stats.tx_errors++;
672 dev->stats.tx_dropped++; 673 dev->stats.tx_dropped++;
@@ -687,7 +688,7 @@ static int sirdev_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
687 688
688 IRDA_ASSERT(dev != NULL, return -1;); 689 IRDA_ASSERT(dev != NULL, return -1;);
689 690
690 IRDA_DEBUG(3, "%s(), %s, (cmd=0x%X)\n", __FUNCTION__, ndev->name, cmd); 691 IRDA_DEBUG(3, "%s(), %s, (cmd=0x%X)\n", __func__, ndev->name, cmd);
691 692
692 switch (cmd) { 693 switch (cmd) {
693 case SIOCSBANDWIDTH: /* Set bandwidth */ 694 case SIOCSBANDWIDTH: /* Set bandwidth */
@@ -804,7 +805,7 @@ static int sirdev_open(struct net_device *ndev)
804 if (!try_module_get(drv->owner)) 805 if (!try_module_get(drv->owner))
805 return -ESTALE; 806 return -ESTALE;
806 807
807 IRDA_DEBUG(2, "%s()\n", __FUNCTION__); 808 IRDA_DEBUG(2, "%s()\n", __func__);
808 809
809 if (sirdev_alloc_buffers(dev)) 810 if (sirdev_alloc_buffers(dev))
810 goto errout_dec; 811 goto errout_dec;
@@ -822,7 +823,7 @@ static int sirdev_open(struct net_device *ndev)
822 823
823 netif_wake_queue(ndev); 824 netif_wake_queue(ndev);
824 825
825 IRDA_DEBUG(2, "%s - done, speed = %d\n", __FUNCTION__, dev->speed); 826 IRDA_DEBUG(2, "%s - done, speed = %d\n", __func__, dev->speed);
826 827
827 return 0; 828 return 0;
828 829
@@ -842,7 +843,7 @@ static int sirdev_close(struct net_device *ndev)
842 struct sir_dev *dev = ndev->priv; 843 struct sir_dev *dev = ndev->priv;
843 const struct sir_driver *drv; 844 const struct sir_driver *drv;
844 845
845// IRDA_DEBUG(0, "%s\n", __FUNCTION__); 846// IRDA_DEBUG(0, "%s\n", __func__);
846 847
847 netif_stop_queue(ndev); 848 netif_stop_queue(ndev);
848 849
@@ -878,7 +879,7 @@ struct sir_dev * sirdev_get_instance(const struct sir_driver *drv, const char *n
878 struct net_device *ndev; 879 struct net_device *ndev;
879 struct sir_dev *dev; 880 struct sir_dev *dev;
880 881
881 IRDA_DEBUG(0, "%s - %s\n", __FUNCTION__, name); 882 IRDA_DEBUG(0, "%s - %s\n", __func__, name);
882 883
883 /* instead of adding tests to protect against drv->do_write==NULL 884 /* instead of adding tests to protect against drv->do_write==NULL
884 * at several places we refuse to create a sir_dev instance for 885 * at several places we refuse to create a sir_dev instance for
@@ -892,7 +893,7 @@ struct sir_dev * sirdev_get_instance(const struct sir_driver *drv, const char *n
892 */ 893 */
893 ndev = alloc_irdadev(sizeof(*dev)); 894 ndev = alloc_irdadev(sizeof(*dev));
894 if (ndev == NULL) { 895 if (ndev == NULL) {
895 IRDA_ERROR("%s - Can't allocate memory for IrDA control block!\n", __FUNCTION__); 896 IRDA_ERROR("%s - Can't allocate memory for IrDA control block!\n", __func__);
896 goto out; 897 goto out;
897 } 898 }
898 dev = ndev->priv; 899 dev = ndev->priv;
@@ -921,7 +922,7 @@ struct sir_dev * sirdev_get_instance(const struct sir_driver *drv, const char *n
921 ndev->do_ioctl = sirdev_ioctl; 922 ndev->do_ioctl = sirdev_ioctl;
922 923
923 if (register_netdev(ndev)) { 924 if (register_netdev(ndev)) {
924 IRDA_ERROR("%s(), register_netdev() failed!\n", __FUNCTION__); 925 IRDA_ERROR("%s(), register_netdev() failed!\n", __func__);
925 goto out_freenetdev; 926 goto out_freenetdev;
926 } 927 }
927 928
@@ -938,7 +939,7 @@ int sirdev_put_instance(struct sir_dev *dev)
938{ 939{
939 int err = 0; 940 int err = 0;
940 941
941 IRDA_DEBUG(0, "%s\n", __FUNCTION__); 942 IRDA_DEBUG(0, "%s\n", __func__);
942 943
943 atomic_set(&dev->enable_rx, 0); 944 atomic_set(&dev->enable_rx, 0);
944 945
@@ -948,7 +949,7 @@ int sirdev_put_instance(struct sir_dev *dev)
948 if (dev->dongle_drv) 949 if (dev->dongle_drv)
949 err = sirdev_schedule_dongle_close(dev); 950 err = sirdev_schedule_dongle_close(dev);
950 if (err) 951 if (err)
951 IRDA_ERROR("%s - error %d\n", __FUNCTION__, err); 952 IRDA_ERROR("%s - error %d\n", __func__, err);
952 953
953 sirdev_close(dev->netdev); 954 sirdev_close(dev->netdev);
954 955