diff options
author | David Vrabel <david.vrabel@csr.com> | 2008-12-22 13:22:50 -0500 |
---|---|---|
committer | David Vrabel <david.vrabel@csr.com> | 2008-12-22 13:22:50 -0500 |
commit | bce83697c5fe84a7a5d38c96fbbe43b4bc028c3e (patch) | |
tree | b8e920af66f5b4de509e95a7295cedbe42878dd6 /drivers/uwb/i1480/i1480u-wlp/tx.c | |
parent | 02f11ee181baa562df23e105ba930902f0d0b1bf (diff) |
uwb: use dev_dbg() for debug messages
Instead of the home-grown d_fnstart(), d_fnend() and d_printf() macros,
use dev_dbg() or remove the message entirely.
Signed-off-by: David Vrabel <david.vrabel@csr.com>
Diffstat (limited to 'drivers/uwb/i1480/i1480u-wlp/tx.c')
-rw-r--r-- | drivers/uwb/i1480/i1480u-wlp/tx.c | 66 |
1 files changed, 8 insertions, 58 deletions
diff --git a/drivers/uwb/i1480/i1480u-wlp/tx.c b/drivers/uwb/i1480/i1480u-wlp/tx.c index 3426bfb68240..39032cc3503e 100644 --- a/drivers/uwb/i1480/i1480u-wlp/tx.c +++ b/drivers/uwb/i1480/i1480u-wlp/tx.c | |||
@@ -55,8 +55,6 @@ | |||
55 | */ | 55 | */ |
56 | 56 | ||
57 | #include "i1480u-wlp.h" | 57 | #include "i1480u-wlp.h" |
58 | #define D_LOCAL 5 | ||
59 | #include <linux/uwb/debug.h> | ||
60 | 58 | ||
61 | enum { | 59 | enum { |
62 | /* This is only for Next and Last TX packets */ | 60 | /* This is only for Next and Last TX packets */ |
@@ -64,7 +62,7 @@ enum { | |||
64 | - sizeof(struct untd_hdr_rst), | 62 | - sizeof(struct untd_hdr_rst), |
65 | }; | 63 | }; |
66 | 64 | ||
67 | /** Free resources allocated to a i1480u tx context. */ | 65 | /* Free resources allocated to a i1480u tx context. */ |
68 | static | 66 | static |
69 | void i1480u_tx_free(struct i1480u_tx *wtx) | 67 | void i1480u_tx_free(struct i1480u_tx *wtx) |
70 | { | 68 | { |
@@ -99,7 +97,7 @@ void i1480u_tx_unlink_urbs(struct i1480u *i1480u) | |||
99 | } | 97 | } |
100 | 98 | ||
101 | 99 | ||
102 | /** | 100 | /* |
103 | * Callback for a completed tx USB URB. | 101 | * Callback for a completed tx USB URB. |
104 | * | 102 | * |
105 | * TODO: | 103 | * TODO: |
@@ -149,8 +147,6 @@ void i1480u_tx_cb(struct urb *urb) | |||
149 | <= i1480u->tx_inflight.threshold | 147 | <= i1480u->tx_inflight.threshold |
150 | && netif_queue_stopped(net_dev) | 148 | && netif_queue_stopped(net_dev) |
151 | && i1480u->tx_inflight.threshold != 0) { | 149 | && i1480u->tx_inflight.threshold != 0) { |
152 | if (d_test(2) && printk_ratelimit()) | ||
153 | d_printf(2, dev, "Restart queue. \n"); | ||
154 | netif_start_queue(net_dev); | 150 | netif_start_queue(net_dev); |
155 | atomic_inc(&i1480u->tx_inflight.restart_count); | 151 | atomic_inc(&i1480u->tx_inflight.restart_count); |
156 | } | 152 | } |
@@ -158,7 +154,7 @@ void i1480u_tx_cb(struct urb *urb) | |||
158 | } | 154 | } |
159 | 155 | ||
160 | 156 | ||
161 | /** | 157 | /* |
162 | * Given a buffer that doesn't fit in a single fragment, create an | 158 | * Given a buffer that doesn't fit in a single fragment, create an |
163 | * scatter/gather structure for delivery to the USB pipe. | 159 | * scatter/gather structure for delivery to the USB pipe. |
164 | * | 160 | * |
@@ -253,15 +249,11 @@ int i1480u_tx_create_n(struct i1480u_tx *wtx, struct sk_buff *skb, | |||
253 | /* Now do each remaining fragment */ | 249 | /* Now do each remaining fragment */ |
254 | result = -EINVAL; | 250 | result = -EINVAL; |
255 | while (pl_size_left > 0) { | 251 | while (pl_size_left > 0) { |
256 | d_printf(5, NULL, "ITR HDR: pl_size_left %zu buf_itr %zu\n", | ||
257 | pl_size_left, buf_itr - wtx->buf); | ||
258 | if (buf_itr + sizeof(*untd_hdr_rst) - wtx->buf | 252 | if (buf_itr + sizeof(*untd_hdr_rst) - wtx->buf |
259 | > wtx->buf_size) { | 253 | > wtx->buf_size) { |
260 | printk(KERN_ERR "BUG: no space for header\n"); | 254 | printk(KERN_ERR "BUG: no space for header\n"); |
261 | goto error_bug; | 255 | goto error_bug; |
262 | } | 256 | } |
263 | d_printf(5, NULL, "ITR HDR 2: pl_size_left %zu buf_itr %zu\n", | ||
264 | pl_size_left, buf_itr - wtx->buf); | ||
265 | untd_hdr_rst = buf_itr; | 257 | untd_hdr_rst = buf_itr; |
266 | buf_itr += sizeof(*untd_hdr_rst); | 258 | buf_itr += sizeof(*untd_hdr_rst); |
267 | if (pl_size_left > i1480u_MAX_PL_SIZE) { | 259 | if (pl_size_left > i1480u_MAX_PL_SIZE) { |
@@ -271,9 +263,6 @@ int i1480u_tx_create_n(struct i1480u_tx *wtx, struct sk_buff *skb, | |||
271 | frg_pl_size = pl_size_left; | 263 | frg_pl_size = pl_size_left; |
272 | untd_hdr_set_type(&untd_hdr_rst->hdr, i1480u_PKT_FRAG_LST); | 264 | untd_hdr_set_type(&untd_hdr_rst->hdr, i1480u_PKT_FRAG_LST); |
273 | } | 265 | } |
274 | d_printf(5, NULL, | ||
275 | "ITR PL: pl_size_left %zu buf_itr %zu frg_pl_size %zu\n", | ||
276 | pl_size_left, buf_itr - wtx->buf, frg_pl_size); | ||
277 | untd_hdr_set_rx_tx(&untd_hdr_rst->hdr, 0); | 266 | untd_hdr_set_rx_tx(&untd_hdr_rst->hdr, 0); |
278 | untd_hdr_rst->hdr.len = cpu_to_le16(frg_pl_size); | 267 | untd_hdr_rst->hdr.len = cpu_to_le16(frg_pl_size); |
279 | untd_hdr_rst->padding = 0; | 268 | untd_hdr_rst->padding = 0; |
@@ -286,9 +275,6 @@ int i1480u_tx_create_n(struct i1480u_tx *wtx, struct sk_buff *skb, | |||
286 | buf_itr += frg_pl_size; | 275 | buf_itr += frg_pl_size; |
287 | pl_itr += frg_pl_size; | 276 | pl_itr += frg_pl_size; |
288 | pl_size_left -= frg_pl_size; | 277 | pl_size_left -= frg_pl_size; |
289 | d_printf(5, NULL, | ||
290 | "ITR PL 2: pl_size_left %zu buf_itr %zu frg_pl_size %zu\n", | ||
291 | pl_size_left, buf_itr - wtx->buf, frg_pl_size); | ||
292 | } | 278 | } |
293 | dev_kfree_skb_irq(skb); | 279 | dev_kfree_skb_irq(skb); |
294 | return 0; | 280 | return 0; |
@@ -308,7 +294,7 @@ error_buf_alloc: | |||
308 | } | 294 | } |
309 | 295 | ||
310 | 296 | ||
311 | /** | 297 | /* |
312 | * Given a buffer that fits in a single fragment, fill out a @wtx | 298 | * Given a buffer that fits in a single fragment, fill out a @wtx |
313 | * struct for transmitting it down the USB pipe. | 299 | * struct for transmitting it down the USB pipe. |
314 | * | 300 | * |
@@ -346,7 +332,7 @@ int i1480u_tx_create_1(struct i1480u_tx *wtx, struct sk_buff *skb, | |||
346 | } | 332 | } |
347 | 333 | ||
348 | 334 | ||
349 | /** | 335 | /* |
350 | * Given a skb to transmit, massage it to become palatable for the TX pipe | 336 | * Given a skb to transmit, massage it to become palatable for the TX pipe |
351 | * | 337 | * |
352 | * This will break the buffer in chunks smaller than | 338 | * This will break the buffer in chunks smaller than |
@@ -425,7 +411,7 @@ error_wtx_alloc: | |||
425 | return NULL; | 411 | return NULL; |
426 | } | 412 | } |
427 | 413 | ||
428 | /** | 414 | /* |
429 | * Actual fragmentation and transmission of frame | 415 | * Actual fragmentation and transmission of frame |
430 | * | 416 | * |
431 | * @wlp: WLP substack data structure | 417 | * @wlp: WLP substack data structure |
@@ -447,20 +433,12 @@ int i1480u_xmit_frame(struct wlp *wlp, struct sk_buff *skb, | |||
447 | struct i1480u_tx *wtx; | 433 | struct i1480u_tx *wtx; |
448 | struct wlp_tx_hdr *wlp_tx_hdr; | 434 | struct wlp_tx_hdr *wlp_tx_hdr; |
449 | static unsigned char dev_bcast[2] = { 0xff, 0xff }; | 435 | static unsigned char dev_bcast[2] = { 0xff, 0xff }; |
450 | #if 0 | ||
451 | int lockup = 50; | ||
452 | #endif | ||
453 | 436 | ||
454 | d_fnstart(6, dev, "(skb %p (%u), net_dev %p)\n", skb, skb->len, | ||
455 | net_dev); | ||
456 | BUG_ON(i1480u->wlp.rc == NULL); | 437 | BUG_ON(i1480u->wlp.rc == NULL); |
457 | if ((net_dev->flags & IFF_UP) == 0) | 438 | if ((net_dev->flags & IFF_UP) == 0) |
458 | goto out; | 439 | goto out; |
459 | result = -EBUSY; | 440 | result = -EBUSY; |
460 | if (atomic_read(&i1480u->tx_inflight.count) >= i1480u->tx_inflight.max) { | 441 | if (atomic_read(&i1480u->tx_inflight.count) >= i1480u->tx_inflight.max) { |
461 | if (d_test(2) && printk_ratelimit()) | ||
462 | d_printf(2, dev, "Max frames in flight " | ||
463 | "stopping queue.\n"); | ||
464 | netif_stop_queue(net_dev); | 442 | netif_stop_queue(net_dev); |
465 | goto error_max_inflight; | 443 | goto error_max_inflight; |
466 | } | 444 | } |
@@ -489,21 +467,6 @@ int i1480u_xmit_frame(struct wlp *wlp, struct sk_buff *skb, | |||
489 | wlp_tx_hdr_set_delivery_id_type(wlp_tx_hdr, i1480u->options.pca_base_priority); | 467 | wlp_tx_hdr_set_delivery_id_type(wlp_tx_hdr, i1480u->options.pca_base_priority); |
490 | } | 468 | } |
491 | 469 | ||
492 | #if 0 | ||
493 | dev_info(dev, "TX delivering skb -> USB, %zu bytes\n", skb->len); | ||
494 | dump_bytes(dev, skb->data, skb->len > 72 ? 72 : skb->len); | ||
495 | #endif | ||
496 | #if 0 | ||
497 | /* simulates a device lockup after every lockup# packets */ | ||
498 | if (lockup && ((i1480u->stats.tx_packets + 1) % lockup) == 0) { | ||
499 | /* Simulate a dropped transmit interrupt */ | ||
500 | net_dev->trans_start = jiffies; | ||
501 | netif_stop_queue(net_dev); | ||
502 | dev_err(dev, "Simulate lockup at %ld\n", jiffies); | ||
503 | return result; | ||
504 | } | ||
505 | #endif | ||
506 | |||
507 | result = usb_submit_urb(wtx->urb, GFP_ATOMIC); /* Go baby */ | 470 | result = usb_submit_urb(wtx->urb, GFP_ATOMIC); /* Go baby */ |
508 | if (result < 0) { | 471 | if (result < 0) { |
509 | dev_err(dev, "TX: cannot submit URB: %d\n", result); | 472 | dev_err(dev, "TX: cannot submit URB: %d\n", result); |
@@ -513,8 +476,6 @@ int i1480u_xmit_frame(struct wlp *wlp, struct sk_buff *skb, | |||
513 | } | 476 | } |
514 | atomic_inc(&i1480u->tx_inflight.count); | 477 | atomic_inc(&i1480u->tx_inflight.count); |
515 | net_dev->trans_start = jiffies; | 478 | net_dev->trans_start = jiffies; |
516 | d_fnend(6, dev, "(skb %p (%u), net_dev %p) = %d\n", skb, skb->len, | ||
517 | net_dev, result); | ||
518 | return result; | 479 | return result; |
519 | 480 | ||
520 | error_tx_urb_submit: | 481 | error_tx_urb_submit: |
@@ -522,13 +483,11 @@ error_tx_urb_submit: | |||
522 | error_wtx_alloc: | 483 | error_wtx_alloc: |
523 | error_max_inflight: | 484 | error_max_inflight: |
524 | out: | 485 | out: |
525 | d_fnend(6, dev, "(skb %p (%u), net_dev %p) = %d\n", skb, skb->len, | ||
526 | net_dev, result); | ||
527 | return result; | 486 | return result; |
528 | } | 487 | } |
529 | 488 | ||
530 | 489 | ||
531 | /** | 490 | /* |
532 | * Transmit an skb Called when an skbuf has to be transmitted | 491 | * Transmit an skb Called when an skbuf has to be transmitted |
533 | * | 492 | * |
534 | * The skb is first passed to WLP substack to ensure this is a valid | 493 | * The skb is first passed to WLP substack to ensure this is a valid |
@@ -551,9 +510,6 @@ int i1480u_hard_start_xmit(struct sk_buff *skb, struct net_device *net_dev) | |||
551 | struct device *dev = &i1480u->usb_iface->dev; | 510 | struct device *dev = &i1480u->usb_iface->dev; |
552 | struct uwb_dev_addr dst; | 511 | struct uwb_dev_addr dst; |
553 | 512 | ||
554 | d_fnstart(6, dev, "(skb %p (%u), net_dev %p)\n", skb, skb->len, | ||
555 | net_dev); | ||
556 | BUG_ON(i1480u->wlp.rc == NULL); | ||
557 | if ((net_dev->flags & IFF_UP) == 0) | 513 | if ((net_dev->flags & IFF_UP) == 0) |
558 | goto error; | 514 | goto error; |
559 | result = wlp_prepare_tx_frame(dev, &i1480u->wlp, skb, &dst); | 515 | result = wlp_prepare_tx_frame(dev, &i1480u->wlp, skb, &dst); |
@@ -562,31 +518,25 @@ int i1480u_hard_start_xmit(struct sk_buff *skb, struct net_device *net_dev) | |||
562 | "Dropping packet.\n", result); | 518 | "Dropping packet.\n", result); |
563 | goto error; | 519 | goto error; |
564 | } else if (result == 1) { | 520 | } else if (result == 1) { |
565 | d_printf(6, dev, "WLP will transmit frame. \n"); | ||
566 | /* trans_start time will be set when WLP actually transmits | 521 | /* trans_start time will be set when WLP actually transmits |
567 | * the frame */ | 522 | * the frame */ |
568 | goto out; | 523 | goto out; |
569 | } | 524 | } |
570 | d_printf(6, dev, "Transmitting frame. \n"); | ||
571 | result = i1480u_xmit_frame(&i1480u->wlp, skb, &dst); | 525 | result = i1480u_xmit_frame(&i1480u->wlp, skb, &dst); |
572 | if (result < 0) { | 526 | if (result < 0) { |
573 | dev_err(dev, "Frame TX failed (%d).\n", result); | 527 | dev_err(dev, "Frame TX failed (%d).\n", result); |
574 | goto error; | 528 | goto error; |
575 | } | 529 | } |
576 | d_fnend(6, dev, "(skb %p (%u), net_dev %p) = %d\n", skb, skb->len, | ||
577 | net_dev, result); | ||
578 | return NETDEV_TX_OK; | 530 | return NETDEV_TX_OK; |
579 | error: | 531 | error: |
580 | dev_kfree_skb_any(skb); | 532 | dev_kfree_skb_any(skb); |
581 | i1480u->stats.tx_dropped++; | 533 | i1480u->stats.tx_dropped++; |
582 | out: | 534 | out: |
583 | d_fnend(6, dev, "(skb %p (%u), net_dev %p) = %d\n", skb, skb->len, | ||
584 | net_dev, result); | ||
585 | return NETDEV_TX_OK; | 535 | return NETDEV_TX_OK; |
586 | } | 536 | } |
587 | 537 | ||
588 | 538 | ||
589 | /** | 539 | /* |
590 | * Called when a pkt transmission doesn't complete in a reasonable period | 540 | * Called when a pkt transmission doesn't complete in a reasonable period |
591 | * Device reset may sleep - do it outside of interrupt context (delayed) | 541 | * Device reset may sleep - do it outside of interrupt context (delayed) |
592 | */ | 542 | */ |