aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/usb/wusbcore
diff options
context:
space:
mode:
authorThomas Pugliese <thomas.pugliese@gmail.com>2014-02-28 15:31:58 -0500
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2014-02-28 19:13:09 -0500
commite500d526f968f184462912334b74b80dc905fca0 (patch)
treefc66037ff090415b02c5074edaa22de002bc2384 /drivers/usb/wusbcore
parent5da43afc2b73795e82c4bc3e53a4a177a02637d0 (diff)
usb: wusbcore: add a convenience function for completing a transfer segment
This patch adds a convenience function for the commonly performed task of marking a transfer segment as done. It combines the 3 steps of setting the segment status, incrementing the segs_done field of the transfer and checking if the completed segment results in the transfer also being done. Signed-off-by: Thomas Pugliese <thomas.pugliese@gmail.com> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'drivers/usb/wusbcore')
-rw-r--r--drivers/usb/wusbcore/wa-xfer.c48
1 files changed, 27 insertions, 21 deletions
diff --git a/drivers/usb/wusbcore/wa-xfer.c b/drivers/usb/wusbcore/wa-xfer.c
index 5e5343e69915..3d6b30d8520e 100644
--- a/drivers/usb/wusbcore/wa-xfer.c
+++ b/drivers/usb/wusbcore/wa-xfer.c
@@ -392,6 +392,24 @@ out:
392} 392}
393 393
394/* 394/*
395 * Mark the given segment as done. Return true if this completes the xfer.
396 * This should only be called for segs that have been submitted to an RPIPE.
397 * Delayed segs are not marked as submitted so they do not need to be marked
398 * as done when cleaning up.
399 *
400 * xfer->lock has to be locked
401 */
402static unsigned __wa_xfer_mark_seg_as_done(struct wa_xfer *xfer,
403 struct wa_seg *seg, enum wa_seg_status status)
404{
405 seg->status = status;
406 xfer->segs_done++;
407
408 /* check for done. */
409 return __wa_xfer_is_done(xfer);
410}
411
412/*
395 * Search for a transfer list ID on the HCD's URB list 413 * Search for a transfer list ID on the HCD's URB list
396 * 414 *
397 * For 32 bit architectures, we use the pointer itself; for 64 bits, a 415 * For 32 bit architectures, we use the pointer itself; for 64 bits, a
@@ -821,12 +839,10 @@ error_default:
821 wa_reset_all(wa); 839 wa_reset_all(wa);
822 } 840 }
823 if (seg->status != WA_SEG_ERROR) { 841 if (seg->status != WA_SEG_ERROR) {
824 seg->status = WA_SEG_ERROR;
825 seg->result = urb->status; 842 seg->result = urb->status;
826 xfer->segs_done++;
827 __wa_xfer_abort(xfer); 843 __wa_xfer_abort(xfer);
828 rpipe_ready = rpipe_avail_inc(rpipe); 844 rpipe_ready = rpipe_avail_inc(rpipe);
829 done = __wa_xfer_is_done(xfer); 845 done = __wa_xfer_mark_seg_as_done(xfer, seg, WA_SEG_ERROR);
830 } 846 }
831 spin_unlock_irqrestore(&xfer->lock, flags); 847 spin_unlock_irqrestore(&xfer->lock, flags);
832 if (holding_dto) { 848 if (holding_dto) {
@@ -892,12 +908,11 @@ static void wa_seg_iso_pack_desc_cb(struct urb *urb)
892 } 908 }
893 if (seg->status != WA_SEG_ERROR) { 909 if (seg->status != WA_SEG_ERROR) {
894 usb_unlink_urb(seg->dto_urb); 910 usb_unlink_urb(seg->dto_urb);
895 seg->status = WA_SEG_ERROR;
896 seg->result = urb->status; 911 seg->result = urb->status;
897 xfer->segs_done++;
898 __wa_xfer_abort(xfer); 912 __wa_xfer_abort(xfer);
899 rpipe_ready = rpipe_avail_inc(rpipe); 913 rpipe_ready = rpipe_avail_inc(rpipe);
900 done = __wa_xfer_is_done(xfer); 914 done = __wa_xfer_mark_seg_as_done(xfer, seg,
915 WA_SEG_ERROR);
901 } 916 }
902 spin_unlock_irqrestore(&xfer->lock, flags); 917 spin_unlock_irqrestore(&xfer->lock, flags);
903 if (done) 918 if (done)
@@ -971,12 +986,10 @@ static void wa_seg_tr_cb(struct urb *urb)
971 } 986 }
972 usb_unlink_urb(seg->isoc_pack_desc_urb); 987 usb_unlink_urb(seg->isoc_pack_desc_urb);
973 usb_unlink_urb(seg->dto_urb); 988 usb_unlink_urb(seg->dto_urb);
974 seg->status = WA_SEG_ERROR;
975 seg->result = urb->status; 989 seg->result = urb->status;
976 xfer->segs_done++;
977 __wa_xfer_abort(xfer); 990 __wa_xfer_abort(xfer);
978 rpipe_ready = rpipe_avail_inc(rpipe); 991 rpipe_ready = rpipe_avail_inc(rpipe);
979 done = __wa_xfer_is_done(xfer); 992 done = __wa_xfer_mark_seg_as_done(xfer, seg, WA_SEG_ERROR);
980 spin_unlock_irqrestore(&xfer->lock, flags); 993 spin_unlock_irqrestore(&xfer->lock, flags);
981 if (done) 994 if (done)
982 wa_xfer_completion(xfer); 995 wa_xfer_completion(xfer);
@@ -2285,11 +2298,9 @@ static void wa_xfer_result_chew(struct wahc *wa, struct wa_xfer *xfer,
2285 goto error_submit_buf_in; 2298 goto error_submit_buf_in;
2286 } else { 2299 } else {
2287 /* OUT data phase or no data, complete it -- */ 2300 /* OUT data phase or no data, complete it -- */
2288 seg->status = WA_SEG_DONE;
2289 seg->result = bytes_transferred; 2301 seg->result = bytes_transferred;
2290 xfer->segs_done++;
2291 rpipe_ready = rpipe_avail_inc(rpipe); 2302 rpipe_ready = rpipe_avail_inc(rpipe);
2292 done = __wa_xfer_is_done(xfer); 2303 done = __wa_xfer_mark_seg_as_done(xfer, seg, WA_SEG_DONE);
2293 } 2304 }
2294 spin_unlock_irqrestore(&xfer->lock, flags); 2305 spin_unlock_irqrestore(&xfer->lock, flags);
2295 if (done) 2306 if (done)
@@ -2453,10 +2464,8 @@ static int wa_process_iso_packet_status(struct wahc *wa, struct urb *urb)
2453 dti_busy = 1; 2464 dti_busy = 1;
2454 } else { 2465 } else {
2455 /* OUT transfer or no more IN data, complete it -- */ 2466 /* OUT transfer or no more IN data, complete it -- */
2456 seg->status = WA_SEG_DONE;
2457 xfer->segs_done++;
2458 rpipe_ready = rpipe_avail_inc(rpipe); 2467 rpipe_ready = rpipe_avail_inc(rpipe);
2459 done = __wa_xfer_is_done(xfer); 2468 done = __wa_xfer_mark_seg_as_done(xfer, seg, WA_SEG_DONE);
2460 } 2469 }
2461 spin_unlock_irqrestore(&xfer->lock, flags); 2470 spin_unlock_irqrestore(&xfer->lock, flags);
2462 wa->dti_state = WA_DTI_TRANSFER_RESULT_PENDING; 2471 wa->dti_state = WA_DTI_TRANSFER_RESULT_PENDING;
@@ -2547,12 +2556,11 @@ static void wa_buf_in_cb(struct urb *urb)
2547 } 2556 }
2548 } else { 2557 } else {
2549 rpipe = xfer->ep->hcpriv; 2558 rpipe = xfer->ep->hcpriv;
2550 seg->status = WA_SEG_DONE;
2551 dev_dbg(dev, "xfer %p#%u: data in done (%zu bytes)\n", 2559 dev_dbg(dev, "xfer %p#%u: data in done (%zu bytes)\n",
2552 xfer, seg->index, seg->result); 2560 xfer, seg->index, seg->result);
2553 xfer->segs_done++;
2554 rpipe_ready = rpipe_avail_inc(rpipe); 2561 rpipe_ready = rpipe_avail_inc(rpipe);
2555 done = __wa_xfer_is_done(xfer); 2562 done = __wa_xfer_mark_seg_as_done(xfer, seg,
2563 WA_SEG_DONE);
2556 } 2564 }
2557 spin_unlock_irqrestore(&xfer->lock, flags); 2565 spin_unlock_irqrestore(&xfer->lock, flags);
2558 if (done) 2566 if (done)
@@ -2575,12 +2583,10 @@ static void wa_buf_in_cb(struct urb *urb)
2575 "exceeded, resetting device\n"); 2583 "exceeded, resetting device\n");
2576 wa_reset_all(wa); 2584 wa_reset_all(wa);
2577 } 2585 }
2578 seg->status = WA_SEG_ERROR;
2579 seg->result = urb->status; 2586 seg->result = urb->status;
2580 xfer->segs_done++;
2581 rpipe_ready = rpipe_avail_inc(rpipe); 2587 rpipe_ready = rpipe_avail_inc(rpipe);
2582 __wa_xfer_abort(xfer); 2588 __wa_xfer_abort(xfer);
2583 done = __wa_xfer_is_done(xfer); 2589 done = __wa_xfer_mark_seg_as_done(xfer, seg, WA_SEG_ERROR);
2584 spin_unlock_irqrestore(&xfer->lock, flags); 2590 spin_unlock_irqrestore(&xfer->lock, flags);
2585 if (done) 2591 if (done)
2586 wa_xfer_completion(xfer); 2592 wa_xfer_completion(xfer);