aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorSjur Brændeland <sjur.brandeland@stericsson.com>2012-06-25 03:49:38 -0400
committerDavid S. Miller <davem@davemloft.net>2012-06-25 19:44:12 -0400
commit90de9bab9123d7fe905cb885c1e3ed32dc516f18 (patch)
tree33c96f739602d2158e8ba0262a754f22adc1ce15
parenta5c96b518c373fba35a8dc2e4e5fead24aeefb1c (diff)
caif-hsi: Use netdev_X instead of dev_X for printing
Replace dev_X with the corresponding netdev_X print function when applicable. Signed-off-by: Sjur Brændeland <sjur.brandeland@stericssion.com> Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--drivers/net/caif/caif_hsi.c94
1 files changed, 47 insertions, 47 deletions
diff --git a/drivers/net/caif/caif_hsi.c b/drivers/net/caif/caif_hsi.c
index 56cc94120677..f7997a7b5aed 100644
--- a/drivers/net/caif/caif_hsi.c
+++ b/drivers/net/caif/caif_hsi.c
@@ -85,7 +85,7 @@ static void cfhsi_inactivity_tout(unsigned long arg)
85{ 85{
86 struct cfhsi *cfhsi = (struct cfhsi *)arg; 86 struct cfhsi *cfhsi = (struct cfhsi *)arg;
87 87
88 dev_dbg(&cfhsi->ndev->dev, "%s.\n", 88 netdev_dbg(cfhsi->ndev, "%s.\n",
89 __func__); 89 __func__);
90 90
91 /* Schedule power down work queue. */ 91 /* Schedule power down work queue. */
@@ -181,14 +181,14 @@ static int cfhsi_flush_fifo(struct cfhsi *cfhsi)
181 size_t fifo_occupancy; 181 size_t fifo_occupancy;
182 int ret; 182 int ret;
183 183
184 dev_dbg(&cfhsi->ndev->dev, "%s.\n", 184 netdev_dbg(cfhsi->ndev, "%s.\n",
185 __func__); 185 __func__);
186 186
187 do { 187 do {
188 ret = cfhsi->dev->cfhsi_fifo_occupancy(cfhsi->dev, 188 ret = cfhsi->dev->cfhsi_fifo_occupancy(cfhsi->dev,
189 &fifo_occupancy); 189 &fifo_occupancy);
190 if (ret) { 190 if (ret) {
191 dev_warn(&cfhsi->ndev->dev, 191 netdev_warn(cfhsi->ndev,
192 "%s: can't get FIFO occupancy: %d.\n", 192 "%s: can't get FIFO occupancy: %d.\n",
193 __func__, ret); 193 __func__, ret);
194 break; 194 break;
@@ -202,7 +202,7 @@ static int cfhsi_flush_fifo(struct cfhsi *cfhsi)
202 cfhsi->dev); 202 cfhsi->dev);
203 if (ret) { 203 if (ret) {
204 clear_bit(CFHSI_FLUSH_FIFO, &cfhsi->bits); 204 clear_bit(CFHSI_FLUSH_FIFO, &cfhsi->bits);
205 dev_warn(&cfhsi->ndev->dev, 205 netdev_warn(cfhsi->ndev,
206 "%s: can't read data: %d.\n", 206 "%s: can't read data: %d.\n",
207 __func__, ret); 207 __func__, ret);
208 break; 208 break;
@@ -213,13 +213,13 @@ static int cfhsi_flush_fifo(struct cfhsi *cfhsi)
213 !test_bit(CFHSI_FLUSH_FIFO, &cfhsi->bits), ret); 213 !test_bit(CFHSI_FLUSH_FIFO, &cfhsi->bits), ret);
214 214
215 if (ret < 0) { 215 if (ret < 0) {
216 dev_warn(&cfhsi->ndev->dev, 216 netdev_warn(cfhsi->ndev,
217 "%s: can't wait for flush complete: %d.\n", 217 "%s: can't wait for flush complete: %d.\n",
218 __func__, ret); 218 __func__, ret);
219 break; 219 break;
220 } else if (!ret) { 220 } else if (!ret) {
221 ret = -ETIMEDOUT; 221 ret = -ETIMEDOUT;
222 dev_warn(&cfhsi->ndev->dev, 222 netdev_warn(cfhsi->ndev,
223 "%s: timeout waiting for flush complete.\n", 223 "%s: timeout waiting for flush complete.\n",
224 __func__); 224 __func__);
225 break; 225 break;
@@ -348,7 +348,7 @@ static void cfhsi_start_tx(struct cfhsi *cfhsi)
348 struct cfhsi_desc *desc = (struct cfhsi_desc *)cfhsi->tx_buf; 348 struct cfhsi_desc *desc = (struct cfhsi_desc *)cfhsi->tx_buf;
349 int len, res; 349 int len, res;
350 350
351 dev_dbg(&cfhsi->ndev->dev, "%s.\n", __func__); 351 netdev_dbg(cfhsi->ndev, "%s.\n", __func__);
352 352
353 if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits)) 353 if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
354 return; 354 return;
@@ -374,14 +374,14 @@ static void cfhsi_start_tx(struct cfhsi *cfhsi)
374 /* Set up new transfer. */ 374 /* Set up new transfer. */
375 res = cfhsi->dev->cfhsi_tx(cfhsi->tx_buf, len, cfhsi->dev); 375 res = cfhsi->dev->cfhsi_tx(cfhsi->tx_buf, len, cfhsi->dev);
376 if (WARN_ON(res < 0)) 376 if (WARN_ON(res < 0))
377 dev_err(&cfhsi->ndev->dev, "%s: TX error %d.\n", 377 netdev_err(cfhsi->ndev, "%s: TX error %d.\n",
378 __func__, res); 378 __func__, res);
379 } while (res < 0); 379 } while (res < 0);
380} 380}
381 381
382static void cfhsi_tx_done(struct cfhsi *cfhsi) 382static void cfhsi_tx_done(struct cfhsi *cfhsi)
383{ 383{
384 dev_dbg(&cfhsi->ndev->dev, "%s.\n", __func__); 384 netdev_dbg(cfhsi->ndev, "%s.\n", __func__);
385 385
386 if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits)) 386 if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
387 return; 387 return;
@@ -416,7 +416,7 @@ static void cfhsi_tx_done_cb(struct cfhsi_drv *drv)
416 struct cfhsi *cfhsi; 416 struct cfhsi *cfhsi;
417 417
418 cfhsi = container_of(drv, struct cfhsi, drv); 418 cfhsi = container_of(drv, struct cfhsi, drv);
419 dev_dbg(&cfhsi->ndev->dev, "%s.\n", 419 netdev_dbg(cfhsi->ndev, "%s.\n",
420 __func__); 420 __func__);
421 421
422 if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits)) 422 if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
@@ -433,7 +433,7 @@ static int cfhsi_rx_desc(struct cfhsi_desc *desc, struct cfhsi *cfhsi)
433 433
434 if ((desc->header & ~CFHSI_PIGGY_DESC) || 434 if ((desc->header & ~CFHSI_PIGGY_DESC) ||
435 (desc->offset > CFHSI_MAX_EMB_FRM_SZ)) { 435 (desc->offset > CFHSI_MAX_EMB_FRM_SZ)) {
436 dev_err(&cfhsi->ndev->dev, "%s: Invalid descriptor.\n", 436 netdev_err(cfhsi->ndev, "%s: Invalid descriptor.\n",
437 __func__); 437 __func__);
438 return -EPROTO; 438 return -EPROTO;
439 } 439 }
@@ -455,7 +455,7 @@ static int cfhsi_rx_desc(struct cfhsi_desc *desc, struct cfhsi *cfhsi)
455 455
456 /* Sanity check length of CAIF frame. */ 456 /* Sanity check length of CAIF frame. */
457 if (unlikely(len > CFHSI_MAX_CAIF_FRAME_SZ)) { 457 if (unlikely(len > CFHSI_MAX_CAIF_FRAME_SZ)) {
458 dev_err(&cfhsi->ndev->dev, "%s: Invalid length.\n", 458 netdev_err(cfhsi->ndev, "%s: Invalid length.\n",
459 __func__); 459 __func__);
460 return -EPROTO; 460 return -EPROTO;
461 } 461 }
@@ -463,7 +463,7 @@ static int cfhsi_rx_desc(struct cfhsi_desc *desc, struct cfhsi *cfhsi)
463 /* Allocate SKB (OK even in IRQ context). */ 463 /* Allocate SKB (OK even in IRQ context). */
464 skb = alloc_skb(len + 1, GFP_ATOMIC); 464 skb = alloc_skb(len + 1, GFP_ATOMIC);
465 if (!skb) { 465 if (!skb) {
466 dev_err(&cfhsi->ndev->dev, "%s: Out of memory !\n", 466 netdev_err(cfhsi->ndev, "%s: Out of memory !\n",
467 __func__); 467 __func__);
468 return -ENOMEM; 468 return -ENOMEM;
469 } 469 }
@@ -504,7 +504,7 @@ static int cfhsi_rx_desc(struct cfhsi_desc *desc, struct cfhsi *cfhsi)
504 xfer_sz += CFHSI_DESC_SZ; 504 xfer_sz += CFHSI_DESC_SZ;
505 505
506 if ((xfer_sz % 4) || (xfer_sz > (CFHSI_BUF_SZ_RX - CFHSI_DESC_SZ))) { 506 if ((xfer_sz % 4) || (xfer_sz > (CFHSI_BUF_SZ_RX - CFHSI_DESC_SZ))) {
507 dev_err(&cfhsi->ndev->dev, 507 netdev_err(cfhsi->ndev,
508 "%s: Invalid payload len: %d, ignored.\n", 508 "%s: Invalid payload len: %d, ignored.\n",
509 __func__, xfer_sz); 509 __func__, xfer_sz);
510 return -EPROTO; 510 return -EPROTO;
@@ -551,7 +551,7 @@ static int cfhsi_rx_pld(struct cfhsi_desc *desc, struct cfhsi *cfhsi)
551 /* Sanity check header and offset. */ 551 /* Sanity check header and offset. */
552 if (WARN_ON((desc->header & ~CFHSI_PIGGY_DESC) || 552 if (WARN_ON((desc->header & ~CFHSI_PIGGY_DESC) ||
553 (desc->offset > CFHSI_MAX_EMB_FRM_SZ))) { 553 (desc->offset > CFHSI_MAX_EMB_FRM_SZ))) {
554 dev_err(&cfhsi->ndev->dev, "%s: Invalid descriptor.\n", 554 netdev_err(cfhsi->ndev, "%s: Invalid descriptor.\n",
555 __func__); 555 __func__);
556 return -EPROTO; 556 return -EPROTO;
557 } 557 }
@@ -585,7 +585,7 @@ static int cfhsi_rx_pld(struct cfhsi_desc *desc, struct cfhsi *cfhsi)
585 585
586 /* Sanity check length of CAIF frames. */ 586 /* Sanity check length of CAIF frames. */
587 if (unlikely(len > CFHSI_MAX_CAIF_FRAME_SZ)) { 587 if (unlikely(len > CFHSI_MAX_CAIF_FRAME_SZ)) {
588 dev_err(&cfhsi->ndev->dev, "%s: Invalid length.\n", 588 netdev_err(cfhsi->ndev, "%s: Invalid length.\n",
589 __func__); 589 __func__);
590 return -EPROTO; 590 return -EPROTO;
591 } 591 }
@@ -593,7 +593,7 @@ static int cfhsi_rx_pld(struct cfhsi_desc *desc, struct cfhsi *cfhsi)
593 /* Allocate SKB (OK even in IRQ context). */ 593 /* Allocate SKB (OK even in IRQ context). */
594 skb = alloc_skb(len + 1, GFP_ATOMIC); 594 skb = alloc_skb(len + 1, GFP_ATOMIC);
595 if (!skb) { 595 if (!skb) {
596 dev_err(&cfhsi->ndev->dev, "%s: Out of memory !\n", 596 netdev_err(cfhsi->ndev, "%s: Out of memory !\n",
597 __func__); 597 __func__);
598 cfhsi->rx_state.nfrms = nfrms; 598 cfhsi->rx_state.nfrms = nfrms;
599 return -ENOMEM; 599 return -ENOMEM;
@@ -639,7 +639,7 @@ static void cfhsi_rx_done(struct cfhsi *cfhsi)
639 639
640 desc = (struct cfhsi_desc *)cfhsi->rx_buf; 640 desc = (struct cfhsi_desc *)cfhsi->rx_buf;
641 641
642 dev_dbg(&cfhsi->ndev->dev, "%s\n", __func__); 642 netdev_dbg(cfhsi->ndev, "%s\n", __func__);
643 643
644 if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits)) 644 if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
645 return; 645 return;
@@ -709,13 +709,13 @@ static void cfhsi_rx_done(struct cfhsi *cfhsi)
709 /* Initiate next read */ 709 /* Initiate next read */
710 if (test_bit(CFHSI_AWAKE, &cfhsi->bits)) { 710 if (test_bit(CFHSI_AWAKE, &cfhsi->bits)) {
711 /* Set up new transfer. */ 711 /* Set up new transfer. */
712 dev_dbg(&cfhsi->ndev->dev, "%s: Start RX.\n", 712 netdev_dbg(cfhsi->ndev, "%s: Start RX.\n",
713 __func__); 713 __func__);
714 714
715 res = cfhsi->dev->cfhsi_rx(rx_ptr, rx_len, 715 res = cfhsi->dev->cfhsi_rx(rx_ptr, rx_len,
716 cfhsi->dev); 716 cfhsi->dev);
717 if (WARN_ON(res < 0)) { 717 if (WARN_ON(res < 0)) {
718 dev_err(&cfhsi->ndev->dev, "%s: RX error %d.\n", 718 netdev_err(cfhsi->ndev, "%s: RX error %d.\n",
719 __func__, res); 719 __func__, res);
720 cfhsi->ndev->stats.rx_errors++; 720 cfhsi->ndev->stats.rx_errors++;
721 cfhsi->ndev->stats.rx_dropped++; 721 cfhsi->ndev->stats.rx_dropped++;
@@ -750,7 +750,7 @@ static void cfhsi_rx_done(struct cfhsi *cfhsi)
750 return; 750 return;
751 751
752out_of_sync: 752out_of_sync:
753 dev_err(&cfhsi->ndev->dev, "%s: Out of sync.\n", __func__); 753 netdev_err(cfhsi->ndev, "%s: Out of sync.\n", __func__);
754 print_hex_dump_bytes("--> ", DUMP_PREFIX_NONE, 754 print_hex_dump_bytes("--> ", DUMP_PREFIX_NONE,
755 cfhsi->rx_buf, CFHSI_DESC_SZ); 755 cfhsi->rx_buf, CFHSI_DESC_SZ);
756 schedule_work(&cfhsi->out_of_sync_work); 756 schedule_work(&cfhsi->out_of_sync_work);
@@ -760,7 +760,7 @@ static void cfhsi_rx_slowpath(unsigned long arg)
760{ 760{
761 struct cfhsi *cfhsi = (struct cfhsi *)arg; 761 struct cfhsi *cfhsi = (struct cfhsi *)arg;
762 762
763 dev_dbg(&cfhsi->ndev->dev, "%s.\n", 763 netdev_dbg(cfhsi->ndev, "%s.\n",
764 __func__); 764 __func__);
765 765
766 cfhsi_rx_done(cfhsi); 766 cfhsi_rx_done(cfhsi);
@@ -771,7 +771,7 @@ static void cfhsi_rx_done_cb(struct cfhsi_drv *drv)
771 struct cfhsi *cfhsi; 771 struct cfhsi *cfhsi;
772 772
773 cfhsi = container_of(drv, struct cfhsi, drv); 773 cfhsi = container_of(drv, struct cfhsi, drv);
774 dev_dbg(&cfhsi->ndev->dev, "%s.\n", 774 netdev_dbg(cfhsi->ndev, "%s.\n",
775 __func__); 775 __func__);
776 776
777 if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits)) 777 if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
@@ -806,7 +806,7 @@ static void cfhsi_wake_up(struct work_struct *work)
806 /* Activate wake line. */ 806 /* Activate wake line. */
807 cfhsi->dev->cfhsi_wake_up(cfhsi->dev); 807 cfhsi->dev->cfhsi_wake_up(cfhsi->dev);
808 808
809 dev_dbg(&cfhsi->ndev->dev, "%s: Start waiting.\n", 809 netdev_dbg(cfhsi->ndev, "%s: Start waiting.\n",
810 __func__); 810 __func__);
811 811
812 /* Wait for acknowledge. */ 812 /* Wait for acknowledge. */
@@ -816,7 +816,7 @@ static void cfhsi_wake_up(struct work_struct *work)
816 &cfhsi->bits), ret); 816 &cfhsi->bits), ret);
817 if (unlikely(ret < 0)) { 817 if (unlikely(ret < 0)) {
818 /* Interrupted by signal. */ 818 /* Interrupted by signal. */
819 dev_err(&cfhsi->ndev->dev, "%s: Signalled: %ld.\n", 819 netdev_err(cfhsi->ndev, "%s: Signalled: %ld.\n",
820 __func__, ret); 820 __func__, ret);
821 821
822 clear_bit(CFHSI_WAKE_UP, &cfhsi->bits); 822 clear_bit(CFHSI_WAKE_UP, &cfhsi->bits);
@@ -827,14 +827,14 @@ static void cfhsi_wake_up(struct work_struct *work)
827 size_t fifo_occupancy = 0; 827 size_t fifo_occupancy = 0;
828 828
829 /* Wakeup timeout */ 829 /* Wakeup timeout */
830 dev_dbg(&cfhsi->ndev->dev, "%s: Timeout.\n", 830 netdev_dbg(cfhsi->ndev, "%s: Timeout.\n",
831 __func__); 831 __func__);
832 832
833 /* Check FIFO to check if modem has sent something. */ 833 /* Check FIFO to check if modem has sent something. */
834 WARN_ON(cfhsi->dev->cfhsi_fifo_occupancy(cfhsi->dev, 834 WARN_ON(cfhsi->dev->cfhsi_fifo_occupancy(cfhsi->dev,
835 &fifo_occupancy)); 835 &fifo_occupancy));
836 836
837 dev_dbg(&cfhsi->ndev->dev, "%s: Bytes in FIFO: %u.\n", 837 netdev_dbg(cfhsi->ndev, "%s: Bytes in FIFO: %u.\n",
838 __func__, (unsigned) fifo_occupancy); 838 __func__, (unsigned) fifo_occupancy);
839 839
840 /* Check if we misssed the interrupt. */ 840 /* Check if we misssed the interrupt. */
@@ -842,7 +842,7 @@ static void cfhsi_wake_up(struct work_struct *work)
842 &ca_wake)); 842 &ca_wake));
843 843
844 if (ca_wake) { 844 if (ca_wake) {
845 dev_err(&cfhsi->ndev->dev, "%s: CA Wake missed !.\n", 845 netdev_err(cfhsi->ndev, "%s: CA Wake missed !.\n",
846 __func__); 846 __func__);
847 847
848 /* Clear the CFHSI_WAKE_UP_ACK bit to prevent race. */ 848 /* Clear the CFHSI_WAKE_UP_ACK bit to prevent race. */
@@ -857,7 +857,7 @@ static void cfhsi_wake_up(struct work_struct *work)
857 return; 857 return;
858 } 858 }
859wake_ack: 859wake_ack:
860 dev_dbg(&cfhsi->ndev->dev, "%s: Woken.\n", 860 netdev_dbg(cfhsi->ndev, "%s: Woken.\n",
861 __func__); 861 __func__);
862 862
863 /* Clear power up bit. */ 863 /* Clear power up bit. */
@@ -865,11 +865,11 @@ wake_ack:
865 clear_bit(CFHSI_WAKE_UP, &cfhsi->bits); 865 clear_bit(CFHSI_WAKE_UP, &cfhsi->bits);
866 866
867 /* Resume read operation. */ 867 /* Resume read operation. */
868 dev_dbg(&cfhsi->ndev->dev, "%s: Start RX.\n", __func__); 868 netdev_dbg(cfhsi->ndev, "%s: Start RX.\n", __func__);
869 res = cfhsi->dev->cfhsi_rx(cfhsi->rx_ptr, cfhsi->rx_len, cfhsi->dev); 869 res = cfhsi->dev->cfhsi_rx(cfhsi->rx_ptr, cfhsi->rx_len, cfhsi->dev);
870 870
871 if (WARN_ON(res < 0)) 871 if (WARN_ON(res < 0))
872 dev_err(&cfhsi->ndev->dev, "%s: RX err %d.\n", __func__, res); 872 netdev_err(cfhsi->ndev, "%s: RX err %d.\n", __func__, res);
873 873
874 /* Clear power up acknowledment. */ 874 /* Clear power up acknowledment. */
875 clear_bit(CFHSI_WAKE_UP_ACK, &cfhsi->bits); 875 clear_bit(CFHSI_WAKE_UP_ACK, &cfhsi->bits);
@@ -878,7 +878,7 @@ wake_ack:
878 878
879 /* Resume transmit if queues are not empty. */ 879 /* Resume transmit if queues are not empty. */
880 if (!cfhsi_tx_queue_len(cfhsi)) { 880 if (!cfhsi_tx_queue_len(cfhsi)) {
881 dev_dbg(&cfhsi->ndev->dev, "%s: Peer wake, start timer.\n", 881 netdev_dbg(cfhsi->ndev, "%s: Peer wake, start timer.\n",
882 __func__); 882 __func__);
883 /* Start inactivity timer. */ 883 /* Start inactivity timer. */
884 mod_timer(&cfhsi->inactivity_timer, 884 mod_timer(&cfhsi->inactivity_timer,
@@ -887,7 +887,7 @@ wake_ack:
887 return; 887 return;
888 } 888 }
889 889
890 dev_dbg(&cfhsi->ndev->dev, "%s: Host wake.\n", 890 netdev_dbg(cfhsi->ndev, "%s: Host wake.\n",
891 __func__); 891 __func__);
892 892
893 spin_unlock_bh(&cfhsi->lock); 893 spin_unlock_bh(&cfhsi->lock);
@@ -899,12 +899,12 @@ wake_ack:
899 /* Set up new transfer. */ 899 /* Set up new transfer. */
900 res = cfhsi->dev->cfhsi_tx(cfhsi->tx_buf, len, cfhsi->dev); 900 res = cfhsi->dev->cfhsi_tx(cfhsi->tx_buf, len, cfhsi->dev);
901 if (WARN_ON(res < 0)) { 901 if (WARN_ON(res < 0)) {
902 dev_err(&cfhsi->ndev->dev, "%s: TX error %d.\n", 902 netdev_err(cfhsi->ndev, "%s: TX error %d.\n",
903 __func__, res); 903 __func__, res);
904 cfhsi_abort_tx(cfhsi); 904 cfhsi_abort_tx(cfhsi);
905 } 905 }
906 } else { 906 } else {
907 dev_err(&cfhsi->ndev->dev, 907 netdev_err(cfhsi->ndev,
908 "%s: Failed to create HSI frame: %d.\n", 908 "%s: Failed to create HSI frame: %d.\n",
909 __func__, len); 909 __func__, len);
910 } 910 }
@@ -918,7 +918,7 @@ static void cfhsi_wake_down(struct work_struct *work)
918 int retry = CFHSI_WAKE_TOUT; 918 int retry = CFHSI_WAKE_TOUT;
919 919
920 cfhsi = container_of(work, struct cfhsi, wake_down_work); 920 cfhsi = container_of(work, struct cfhsi, wake_down_work);
921 dev_dbg(&cfhsi->ndev->dev, "%s.\n", __func__); 921 netdev_dbg(cfhsi->ndev, "%s.\n", __func__);
922 922
923 if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits)) 923 if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
924 return; 924 return;
@@ -933,20 +933,20 @@ static void cfhsi_wake_down(struct work_struct *work)
933 &cfhsi->bits), ret); 933 &cfhsi->bits), ret);
934 if (ret < 0) { 934 if (ret < 0) {
935 /* Interrupted by signal. */ 935 /* Interrupted by signal. */
936 dev_err(&cfhsi->ndev->dev, "%s: Signalled: %ld.\n", 936 netdev_err(cfhsi->ndev, "%s: Signalled: %ld.\n",
937 __func__, ret); 937 __func__, ret);
938 return; 938 return;
939 } else if (!ret) { 939 } else if (!ret) {
940 bool ca_wake = true; 940 bool ca_wake = true;
941 941
942 /* Timeout */ 942 /* Timeout */
943 dev_err(&cfhsi->ndev->dev, "%s: Timeout.\n", __func__); 943 netdev_err(cfhsi->ndev, "%s: Timeout.\n", __func__);
944 944
945 /* Check if we misssed the interrupt. */ 945 /* Check if we misssed the interrupt. */
946 WARN_ON(cfhsi->dev->cfhsi_get_peer_wake(cfhsi->dev, 946 WARN_ON(cfhsi->dev->cfhsi_get_peer_wake(cfhsi->dev,
947 &ca_wake)); 947 &ca_wake));
948 if (!ca_wake) 948 if (!ca_wake)
949 dev_err(&cfhsi->ndev->dev, "%s: CA Wake missed !.\n", 949 netdev_err(cfhsi->ndev, "%s: CA Wake missed !.\n",
950 __func__); 950 __func__);
951 } 951 }
952 952
@@ -964,7 +964,7 @@ static void cfhsi_wake_down(struct work_struct *work)
964 } 964 }
965 965
966 if (!retry) 966 if (!retry)
967 dev_err(&cfhsi->ndev->dev, "%s: FIFO Timeout.\n", __func__); 967 netdev_err(cfhsi->ndev, "%s: FIFO Timeout.\n", __func__);
968 968
969 /* Clear AWAKE condition. */ 969 /* Clear AWAKE condition. */
970 clear_bit(CFHSI_AWAKE, &cfhsi->bits); 970 clear_bit(CFHSI_AWAKE, &cfhsi->bits);
@@ -990,7 +990,7 @@ static void cfhsi_wake_up_cb(struct cfhsi_drv *drv)
990 struct cfhsi *cfhsi = NULL; 990 struct cfhsi *cfhsi = NULL;
991 991
992 cfhsi = container_of(drv, struct cfhsi, drv); 992 cfhsi = container_of(drv, struct cfhsi, drv);
993 dev_dbg(&cfhsi->ndev->dev, "%s.\n", 993 netdev_dbg(cfhsi->ndev, "%s.\n",
994 __func__); 994 __func__);
995 995
996 set_bit(CFHSI_WAKE_UP_ACK, &cfhsi->bits); 996 set_bit(CFHSI_WAKE_UP_ACK, &cfhsi->bits);
@@ -1009,7 +1009,7 @@ static void cfhsi_wake_down_cb(struct cfhsi_drv *drv)
1009 struct cfhsi *cfhsi = NULL; 1009 struct cfhsi *cfhsi = NULL;
1010 1010
1011 cfhsi = container_of(drv, struct cfhsi, drv); 1011 cfhsi = container_of(drv, struct cfhsi, drv);
1012 dev_dbg(&cfhsi->ndev->dev, "%s.\n", 1012 netdev_dbg(cfhsi->ndev, "%s.\n",
1013 __func__); 1013 __func__);
1014 1014
1015 /* Initiating low power is only permitted by the host (us). */ 1015 /* Initiating low power is only permitted by the host (us). */
@@ -1021,7 +1021,7 @@ static void cfhsi_aggregation_tout(unsigned long arg)
1021{ 1021{
1022 struct cfhsi *cfhsi = (struct cfhsi *)arg; 1022 struct cfhsi *cfhsi = (struct cfhsi *)arg;
1023 1023
1024 dev_dbg(&cfhsi->ndev->dev, "%s.\n", 1024 netdev_dbg(cfhsi->ndev, "%s.\n",
1025 __func__); 1025 __func__);
1026 1026
1027 cfhsi_start_tx(cfhsi); 1027 cfhsi_start_tx(cfhsi);
@@ -1113,7 +1113,7 @@ static int cfhsi_xmit(struct sk_buff *skb, struct net_device *dev)
1113 /* Set up new transfer. */ 1113 /* Set up new transfer. */
1114 res = cfhsi->dev->cfhsi_tx(cfhsi->tx_buf, len, cfhsi->dev); 1114 res = cfhsi->dev->cfhsi_tx(cfhsi->tx_buf, len, cfhsi->dev);
1115 if (WARN_ON(res < 0)) { 1115 if (WARN_ON(res < 0)) {
1116 dev_err(&cfhsi->ndev->dev, "%s: TX error %d.\n", 1116 netdev_err(cfhsi->ndev, "%s: TX error %d.\n",
1117 __func__, res); 1117 __func__, res);
1118 cfhsi_abort_tx(cfhsi); 1118 cfhsi_abort_tx(cfhsi);
1119 } 1119 }
@@ -1269,7 +1269,7 @@ static int cfhsi_open(struct net_device *ndev)
1269 /* Create work thread. */ 1269 /* Create work thread. */
1270 cfhsi->wq = create_singlethread_workqueue(cfhsi->pdev->name); 1270 cfhsi->wq = create_singlethread_workqueue(cfhsi->pdev->name);
1271 if (!cfhsi->wq) { 1271 if (!cfhsi->wq) {
1272 dev_err(&cfhsi->ndev->dev, "%s: Failed to create work queue.\n", 1272 netdev_err(cfhsi->ndev, "%s: Failed to create work queue.\n",
1273 __func__); 1273 __func__);
1274 res = -ENODEV; 1274 res = -ENODEV;
1275 goto err_create_wq; 1275 goto err_create_wq;
@@ -1296,7 +1296,7 @@ static int cfhsi_open(struct net_device *ndev)
1296 /* Activate HSI interface. */ 1296 /* Activate HSI interface. */
1297 res = cfhsi->dev->cfhsi_up(cfhsi->dev); 1297 res = cfhsi->dev->cfhsi_up(cfhsi->dev);
1298 if (res) { 1298 if (res) {
1299 dev_err(&cfhsi->ndev->dev, 1299 netdev_err(cfhsi->ndev,
1300 "%s: can't activate HSI interface: %d.\n", 1300 "%s: can't activate HSI interface: %d.\n",
1301 __func__, res); 1301 __func__, res);
1302 goto err_activate; 1302 goto err_activate;
@@ -1305,7 +1305,7 @@ static int cfhsi_open(struct net_device *ndev)
1305 /* Flush FIFO */ 1305 /* Flush FIFO */
1306 res = cfhsi_flush_fifo(cfhsi); 1306 res = cfhsi_flush_fifo(cfhsi);
1307 if (res) { 1307 if (res) {
1308 dev_err(&cfhsi->ndev->dev, "%s: Can't flush FIFO: %d.\n", 1308 netdev_err(cfhsi->ndev, "%s: Can't flush FIFO: %d.\n",
1309 __func__, res); 1309 __func__, res);
1310 goto err_net_reg; 1310 goto err_net_reg;
1311 } 1311 }