diff options
author | Thomas Pugliese <thomas.pugliese@gmail.com> | 2013-09-27 16:33:36 -0400 |
---|---|---|
committer | Greg Kroah-Hartman <gregkh@linuxfoundation.org> | 2013-09-30 21:55:04 -0400 |
commit | b9c84be60c07336e17c4af90e1313666189cbcbd (patch) | |
tree | c0881cf074d9ca1fc6580a3df0c31d0821d3bcd0 /drivers/usb/wusbcore/wa-xfer.c | |
parent | fdd160c3088f7e7de033cd31f4d11f38fc24803d (diff) |
usb: wusbcore: include the xfer_id in debug prints
Include the xfer_id in debug prints for transfers and transfer segments.
This makes it much easier to correlate debug logs to USB analyzer logs.
Signed-off-by: Thomas Pugliese <thomas.pugliese@gmail.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'drivers/usb/wusbcore/wa-xfer.c')
-rw-r--r-- | drivers/usb/wusbcore/wa-xfer.c | 94 |
1 files changed, 49 insertions, 45 deletions
diff --git a/drivers/usb/wusbcore/wa-xfer.c b/drivers/usb/wusbcore/wa-xfer.c index 6f935d575b07..3860bdf3a6c6 100644 --- a/drivers/usb/wusbcore/wa-xfer.c +++ b/drivers/usb/wusbcore/wa-xfer.c | |||
@@ -238,6 +238,31 @@ static void wa_xfer_completion(struct wa_xfer *xfer) | |||
238 | } | 238 | } |
239 | 239 | ||
240 | /* | 240 | /* |
241 | * Initialize a transfer's ID | ||
242 | * | ||
243 | * We need to use a sequential number; if we use the pointer or the | ||
244 | * hash of the pointer, it can repeat over sequential transfers and | ||
245 | * then it will confuse the HWA....wonder why in hell they put a 32 | ||
246 | * bit handle in there then. | ||
247 | */ | ||
248 | static void wa_xfer_id_init(struct wa_xfer *xfer) | ||
249 | { | ||
250 | xfer->id = atomic_add_return(1, &xfer->wa->xfer_id_count); | ||
251 | } | ||
252 | |||
253 | /* Return the xfer's ID. */ | ||
254 | static inline u32 wa_xfer_id(struct wa_xfer *xfer) | ||
255 | { | ||
256 | return xfer->id; | ||
257 | } | ||
258 | |||
259 | /* Return the xfer's ID in transport format (little endian). */ | ||
260 | static inline __le32 wa_xfer_id_le32(struct wa_xfer *xfer) | ||
261 | { | ||
262 | return cpu_to_le32(xfer->id); | ||
263 | } | ||
264 | |||
265 | /* | ||
241 | * If transfer is done, wrap it up and return true | 266 | * If transfer is done, wrap it up and return true |
242 | * | 267 | * |
243 | * xfer->lock has to be locked | 268 | * xfer->lock has to be locked |
@@ -259,8 +284,9 @@ static unsigned __wa_xfer_is_done(struct wa_xfer *xfer) | |||
259 | switch (seg->status) { | 284 | switch (seg->status) { |
260 | case WA_SEG_DONE: | 285 | case WA_SEG_DONE: |
261 | if (found_short && seg->result > 0) { | 286 | if (found_short && seg->result > 0) { |
262 | dev_dbg(dev, "xfer %p#%u: bad short segments (%zu)\n", | 287 | dev_dbg(dev, "xfer %p ID %08X#%u: bad short segments (%zu)\n", |
263 | xfer, cnt, seg->result); | 288 | xfer, wa_xfer_id(xfer), cnt, |
289 | seg->result); | ||
264 | urb->status = -EINVAL; | 290 | urb->status = -EINVAL; |
265 | goto out; | 291 | goto out; |
266 | } | 292 | } |
@@ -268,24 +294,26 @@ static unsigned __wa_xfer_is_done(struct wa_xfer *xfer) | |||
268 | if (seg->result < xfer->seg_size | 294 | if (seg->result < xfer->seg_size |
269 | && cnt != xfer->segs-1) | 295 | && cnt != xfer->segs-1) |
270 | found_short = 1; | 296 | found_short = 1; |
271 | dev_dbg(dev, "xfer %p#%u: DONE short %d " | 297 | dev_dbg(dev, "xfer %p ID %08X#%u: DONE short %d " |
272 | "result %zu urb->actual_length %d\n", | 298 | "result %zu urb->actual_length %d\n", |
273 | xfer, seg->index, found_short, seg->result, | 299 | xfer, wa_xfer_id(xfer), seg->index, found_short, |
274 | urb->actual_length); | 300 | seg->result, urb->actual_length); |
275 | break; | 301 | break; |
276 | case WA_SEG_ERROR: | 302 | case WA_SEG_ERROR: |
277 | xfer->result = seg->result; | 303 | xfer->result = seg->result; |
278 | dev_dbg(dev, "xfer %p#%u: ERROR result %zu\n", | 304 | dev_dbg(dev, "xfer %p ID %08X#%u: ERROR result %zu(0x%08X)\n", |
279 | xfer, seg->index, seg->result); | 305 | xfer, wa_xfer_id(xfer), seg->index, seg->result, |
306 | seg->result); | ||
280 | goto out; | 307 | goto out; |
281 | case WA_SEG_ABORTED: | 308 | case WA_SEG_ABORTED: |
282 | dev_dbg(dev, "xfer %p#%u ABORTED: result %d\n", | 309 | dev_dbg(dev, "xfer %p ID %08X#%u ABORTED: result %d\n", |
283 | xfer, seg->index, urb->status); | 310 | xfer, wa_xfer_id(xfer), seg->index, |
311 | urb->status); | ||
284 | xfer->result = urb->status; | 312 | xfer->result = urb->status; |
285 | goto out; | 313 | goto out; |
286 | default: | 314 | default: |
287 | dev_warn(dev, "xfer %p#%u: is_done bad state %d\n", | 315 | dev_warn(dev, "xfer %p ID %08X#%u: is_done bad state %d\n", |
288 | xfer, cnt, seg->status); | 316 | xfer, wa_xfer_id(xfer), cnt, seg->status); |
289 | xfer->result = -EINVAL; | 317 | xfer->result = -EINVAL; |
290 | goto out; | 318 | goto out; |
291 | } | 319 | } |
@@ -296,31 +324,6 @@ out: | |||
296 | } | 324 | } |
297 | 325 | ||
298 | /* | 326 | /* |
299 | * Initialize a transfer's ID | ||
300 | * | ||
301 | * We need to use a sequential number; if we use the pointer or the | ||
302 | * hash of the pointer, it can repeat over sequential transfers and | ||
303 | * then it will confuse the HWA....wonder why in hell they put a 32 | ||
304 | * bit handle in there then. | ||
305 | */ | ||
306 | static void wa_xfer_id_init(struct wa_xfer *xfer) | ||
307 | { | ||
308 | xfer->id = atomic_add_return(1, &xfer->wa->xfer_id_count); | ||
309 | } | ||
310 | |||
311 | /* Return the xfer's ID. */ | ||
312 | static inline u32 wa_xfer_id(struct wa_xfer *xfer) | ||
313 | { | ||
314 | return xfer->id; | ||
315 | } | ||
316 | |||
317 | /* Return the xfer's ID in transport format (little endian). */ | ||
318 | static inline __le32 wa_xfer_id_le32(struct wa_xfer *xfer) | ||
319 | { | ||
320 | return cpu_to_le32(xfer->id); | ||
321 | } | ||
322 | |||
323 | /* | ||
324 | * Search for a transfer list ID on the HCD's URB list | 327 | * Search for a transfer list ID on the HCD's URB list |
325 | * | 328 | * |
326 | * For 32 bit architectures, we use the pointer itself; for 64 bits, a | 329 | * For 32 bit architectures, we use the pointer itself; for 64 bits, a |
@@ -618,8 +621,9 @@ static void wa_seg_tr_cb(struct urb *urb) | |||
618 | dev = &wa->usb_iface->dev; | 621 | dev = &wa->usb_iface->dev; |
619 | rpipe = xfer->ep->hcpriv; | 622 | rpipe = xfer->ep->hcpriv; |
620 | if (printk_ratelimit()) | 623 | if (printk_ratelimit()) |
621 | dev_err(dev, "xfer %p#%u: request error %d\n", | 624 | dev_err(dev, "xfer %p ID 0x%08X#%u: request error %d\n", |
622 | xfer, seg->index, urb->status); | 625 | xfer, wa_xfer_id(xfer), seg->index, |
626 | urb->status); | ||
623 | if (edc_inc(&wa->nep_edc, EDC_MAX_ERRORS, | 627 | if (edc_inc(&wa->nep_edc, EDC_MAX_ERRORS, |
624 | EDC_ERROR_TIMEFRAME)){ | 628 | EDC_ERROR_TIMEFRAME)){ |
625 | dev_err(dev, "DTO: URB max acceptable errors " | 629 | dev_err(dev, "DTO: URB max acceptable errors " |
@@ -964,8 +968,9 @@ static void wa_xfer_delayed_run(struct wa_rpipe *rpipe) | |||
964 | list_del(&seg->list_node); | 968 | list_del(&seg->list_node); |
965 | xfer = seg->xfer; | 969 | xfer = seg->xfer; |
966 | result = __wa_seg_submit(rpipe, xfer, seg); | 970 | result = __wa_seg_submit(rpipe, xfer, seg); |
967 | dev_dbg(dev, "xfer %p#%u submitted from delayed [%d segments available] %d\n", | 971 | dev_dbg(dev, "xfer %p ID %08X#%u submitted from delayed [%d segments available] %d\n", |
968 | xfer, seg->index, atomic_read(&rpipe->segs_available), result); | 972 | xfer, wa_xfer_id(xfer), seg->index, |
973 | atomic_read(&rpipe->segs_available), result); | ||
969 | if (unlikely(result < 0)) { | 974 | if (unlikely(result < 0)) { |
970 | spin_unlock_irqrestore(&rpipe->seg_lock, flags); | 975 | spin_unlock_irqrestore(&rpipe->seg_lock, flags); |
971 | spin_lock_irqsave(&xfer->lock, flags); | 976 | spin_lock_irqsave(&xfer->lock, flags); |
@@ -1009,11 +1014,10 @@ static int __wa_xfer_submit(struct wa_xfer *xfer) | |||
1009 | available = atomic_read(&rpipe->segs_available); | 1014 | available = atomic_read(&rpipe->segs_available); |
1010 | empty = list_empty(&rpipe->seg_list); | 1015 | empty = list_empty(&rpipe->seg_list); |
1011 | seg = xfer->seg[cnt]; | 1016 | seg = xfer->seg[cnt]; |
1012 | dev_dbg(dev, "xfer %p#%u: available %u empty %u (%s)\n", | 1017 | dev_dbg(dev, "xfer %p ID 0x%08X#%u: available %u empty %u (%s)\n", |
1013 | xfer, cnt, available, empty, | 1018 | xfer, wa_xfer_id(xfer), cnt, available, empty, |
1014 | available == 0 || !empty ? "delayed" : "submitted"); | 1019 | available == 0 || !empty ? "delayed" : "submitted"); |
1015 | if (available == 0 || !empty) { | 1020 | if (available == 0 || !empty) { |
1016 | dev_dbg(dev, "xfer %p#%u: delayed\n", xfer, cnt); | ||
1017 | seg->status = WA_SEG_DELAYED; | 1021 | seg->status = WA_SEG_DELAYED; |
1018 | list_add_tail(&seg->list_node, &rpipe->seg_list); | 1022 | list_add_tail(&seg->list_node, &rpipe->seg_list); |
1019 | } else { | 1023 | } else { |
@@ -1463,8 +1467,8 @@ static void wa_xfer_result_chew(struct wahc *wa, struct wa_xfer *xfer, | |||
1463 | seg = xfer->seg[seg_idx]; | 1467 | seg = xfer->seg[seg_idx]; |
1464 | rpipe = xfer->ep->hcpriv; | 1468 | rpipe = xfer->ep->hcpriv; |
1465 | usb_status = xfer_result->bTransferStatus; | 1469 | usb_status = xfer_result->bTransferStatus; |
1466 | dev_dbg(dev, "xfer %p#%u: bTransferStatus 0x%02x (seg status %u)\n", | 1470 | dev_dbg(dev, "xfer %p ID 0x%08X#%u: bTransferStatus 0x%02x (seg status %u)\n", |
1467 | xfer, seg_idx, usb_status, seg->status); | 1471 | xfer, wa_xfer_id(xfer), seg_idx, usb_status, seg->status); |
1468 | if (seg->status == WA_SEG_ABORTED | 1472 | if (seg->status == WA_SEG_ABORTED |
1469 | || seg->status == WA_SEG_ERROR) /* already handled */ | 1473 | || seg->status == WA_SEG_ERROR) /* already handled */ |
1470 | goto segment_aborted; | 1474 | goto segment_aborted; |