aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorThomas Pugliese <thomas.pugliese@gmail.com>2013-10-04 11:40:45 -0400
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2013-10-05 20:54:13 -0400
commit7a32d9be33e2409e19fef4434188d49d1fb1959e (patch)
tree73af5d520ad7a933393840e49fc4df874475d4a3 /drivers
parente4a49a6015efa6bd35f107640a497380d5e4ed48 (diff)
usb: wusbcore: add support for isoc out transfers
This patch adds support for isochronous out transfers to the HWA. The primary changes are: 1. Add a isoc_pack_desc_urb field to struct wa_seg. This urb is used to send the isochronous packet info message to the HWA which describes the isoc data segment(s) that will be sent as the payload of the transfer request. 2. Use the URB iso_frame_desc field to populate the isochronous packet info message and data segments sent to the HWA. 3. After the data is sent and transfer result is returned from the HWA, read the isoc packet status message from the HWA. The contents of the isoc packet status message are used to set the iso_frame_desc status and actual_length fields in the original isoc URB. This feature required the addition of a some state tracking variables in struct wahc so the dti_urb knows what type of packet it expects to receive next. Signed-off-by: Thomas Pugliese <thomas.pugliese@gmail.com> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/usb/wusbcore/wa-hc.h15
-rw-r--r--drivers/usb/wusbcore/wa-xfer.c493
2 files changed, 406 insertions, 102 deletions
diff --git a/drivers/usb/wusbcore/wa-hc.h b/drivers/usb/wusbcore/wa-hc.h
index ab399343757e..b44aca3f25dd 100644
--- a/drivers/usb/wusbcore/wa-hc.h
+++ b/drivers/usb/wusbcore/wa-hc.h
@@ -122,6 +122,11 @@ struct wa_rpipe {
122}; 122};
123 123
124 124
125enum wa_dti_state {
126 WA_DTI_TRANSFER_RESULT_PENDING,
127 WA_DTI_ISOC_PACKET_STATUS_PENDING
128};
129
125/** 130/**
126 * Instance of a HWA Host Controller 131 * Instance of a HWA Host Controller
127 * 132 *
@@ -181,6 +186,15 @@ struct wahc {
181 spinlock_t rpipe_bm_lock; /* protect rpipe_bm */ 186 spinlock_t rpipe_bm_lock; /* protect rpipe_bm */
182 struct mutex rpipe_mutex; /* assigning resources to endpoints */ 187 struct mutex rpipe_mutex; /* assigning resources to endpoints */
183 188
189 /*
190 * dti_state is used to track the state of the dti_urb. When dti_state
191 * is WA_DTI_ISOC_PACKET_STATUS_PENDING, dti_isoc_xfer_in_progress and
192 * dti_isoc_xfer_seg identify which xfer the incoming isoc packet status
193 * refers to.
194 */
195 enum wa_dti_state dti_state;
196 u32 dti_isoc_xfer_in_progress;
197 u8 dti_isoc_xfer_seg;
184 struct urb *dti_urb; /* URB for reading xfer results */ 198 struct urb *dti_urb; /* URB for reading xfer results */
185 struct urb *buf_in_urb; /* URB for reading data in */ 199 struct urb *buf_in_urb; /* URB for reading data in */
186 struct edc dti_edc; /* DTI error density counter */ 200 struct edc dti_edc; /* DTI error density counter */
@@ -247,6 +261,7 @@ static inline void wa_init(struct wahc *wa)
247{ 261{
248 edc_init(&wa->nep_edc); 262 edc_init(&wa->nep_edc);
249 atomic_set(&wa->notifs_queued, 0); 263 atomic_set(&wa->notifs_queued, 0);
264 wa->dti_state = WA_DTI_TRANSFER_RESULT_PENDING;
250 wa_rpipe_init(wa); 265 wa_rpipe_init(wa);
251 edc_init(&wa->dti_edc); 266 edc_init(&wa->dti_edc);
252 INIT_LIST_HEAD(&wa->xfer_list); 267 INIT_LIST_HEAD(&wa->xfer_list);
diff --git a/drivers/usb/wusbcore/wa-xfer.c b/drivers/usb/wusbcore/wa-xfer.c
index 13faac0ea99f..e097da30a26b 100644
--- a/drivers/usb/wusbcore/wa-xfer.c
+++ b/drivers/usb/wusbcore/wa-xfer.c
@@ -115,6 +115,7 @@ static void wa_xfer_delayed_run(struct wa_rpipe *);
115 */ 115 */
116struct wa_seg { 116struct wa_seg {
117 struct urb tr_urb; /* transfer request urb. */ 117 struct urb tr_urb; /* transfer request urb. */
118 struct urb *isoc_pack_desc_urb; /* for isoc packet descriptor. */
118 struct urb *dto_urb; /* for data output. */ 119 struct urb *dto_urb; /* for data output. */
119 struct list_head list_node; /* for rpipe->req_list */ 120 struct list_head list_node; /* for rpipe->req_list */
120 struct wa_xfer *xfer; /* out xfer */ 121 struct wa_xfer *xfer; /* out xfer */
@@ -122,7 +123,6 @@ struct wa_seg {
122 enum wa_seg_status status; 123 enum wa_seg_status status;
123 ssize_t result; /* bytes xfered or error */ 124 ssize_t result; /* bytes xfered or error */
124 struct wa_xfer_hdr xfer_hdr; 125 struct wa_xfer_hdr xfer_hdr;
125 u8 xfer_extra[]; /* xtra space for xfer_hdr_ctl */
126}; 126};
127 127
128static inline void wa_seg_init(struct wa_seg *seg) 128static inline void wa_seg_init(struct wa_seg *seg)
@@ -169,7 +169,7 @@ static inline void wa_xfer_init(struct wa_xfer *xfer)
169/* 169/*
170 * Destroy a transfer structure 170 * Destroy a transfer structure
171 * 171 *
172 * Note that freeing xfer->seg[cnt]->urb will free the containing 172 * Note that freeing xfer->seg[cnt]->tr_urb will free the containing
173 * xfer->seg[cnt] memory that was allocated by __wa_xfer_setup_segs. 173 * xfer->seg[cnt] memory that was allocated by __wa_xfer_setup_segs.
174 */ 174 */
175static void wa_xfer_destroy(struct kref *_xfer) 175static void wa_xfer_destroy(struct kref *_xfer)
@@ -178,12 +178,14 @@ static void wa_xfer_destroy(struct kref *_xfer)
178 if (xfer->seg) { 178 if (xfer->seg) {
179 unsigned cnt; 179 unsigned cnt;
180 for (cnt = 0; cnt < xfer->segs; cnt++) { 180 for (cnt = 0; cnt < xfer->segs; cnt++) {
181 if (xfer->seg[cnt]) { 181 struct wa_seg *seg = xfer->seg[cnt];
182 if (xfer->seg[cnt]->dto_urb) { 182 if (seg) {
183 kfree(xfer->seg[cnt]->dto_urb->sg); 183 usb_free_urb(seg->isoc_pack_desc_urb);
184 usb_free_urb(xfer->seg[cnt]->dto_urb); 184 if (seg->dto_urb) {
185 kfree(seg->dto_urb->sg);
186 usb_free_urb(seg->dto_urb);
185 } 187 }
186 usb_free_urb(&xfer->seg[cnt]->tr_urb); 188 usb_free_urb(&seg->tr_urb);
187 } 189 }
188 } 190 }
189 kfree(xfer->seg); 191 kfree(xfer->seg);
@@ -291,7 +293,8 @@ static unsigned __wa_xfer_is_done(struct wa_xfer *xfer)
291 goto out; 293 goto out;
292 } 294 }
293 urb->actual_length += seg->result; 295 urb->actual_length += seg->result;
294 if (seg->result < xfer->seg_size 296 if (!(usb_pipeisoc(xfer->urb->pipe))
297 && seg->result < xfer->seg_size
295 && cnt != xfer->segs-1) 298 && cnt != xfer->segs-1)
296 found_short = 1; 299 found_short = 1;
297 dev_dbg(dev, "xfer %p ID %08X#%u: DONE short %d " 300 dev_dbg(dev, "xfer %p ID %08X#%u: DONE short %d "
@@ -429,39 +432,53 @@ static ssize_t __wa_xfer_setup_sizes(struct wa_xfer *xfer,
429 result = sizeof(struct wa_xfer_bi); 432 result = sizeof(struct wa_xfer_bi);
430 break; 433 break;
431 case USB_ENDPOINT_XFER_ISOC: 434 case USB_ENDPOINT_XFER_ISOC:
432 dev_err(dev, "FIXME: ISOC not implemented\n"); 435 if (usb_pipeout(urb->pipe)) {
433 result = -ENOSYS; 436 *pxfer_type = WA_XFER_TYPE_ISO;
434 goto error; 437 result = sizeof(struct wa_xfer_hwaiso);
438 } else {
439 dev_err(dev, "FIXME: ISOC IN not implemented\n");
440 result = -ENOSYS;
441 goto error;
442 }
443 break;
435 default: 444 default:
436 /* never happens */ 445 /* never happens */
437 BUG(); 446 BUG();
438 result = -EINVAL; /* shut gcc up */ 447 result = -EINVAL; /* shut gcc up */
439 }; 448 }
440 xfer->is_inbound = urb->pipe & USB_DIR_IN ? 1 : 0; 449 xfer->is_inbound = urb->pipe & USB_DIR_IN ? 1 : 0;
441 xfer->is_dma = urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP ? 1 : 0; 450 xfer->is_dma = urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP ? 1 : 0;
442 xfer->seg_size = le16_to_cpu(rpipe->descr.wBlocks) 451
443 * 1 << (xfer->wa->wa_descr->bRPipeBlockSize - 1);
444 /* Compute the segment size and make sure it is a multiple of
445 * the maxpktsize (WUSB1.0[8.3.3.1])...not really too much of
446 * a check (FIXME) */
447 maxpktsize = le16_to_cpu(rpipe->descr.wMaxPacketSize); 452 maxpktsize = le16_to_cpu(rpipe->descr.wMaxPacketSize);
448 if (xfer->seg_size < maxpktsize) { 453 if ((rpipe->descr.bmAttribute & 0x3) == USB_ENDPOINT_XFER_ISOC) {
449 dev_err(dev, "HW BUG? seg_size %zu smaller than maxpktsize " 454 xfer->seg_size = maxpktsize;
450 "%zu\n", xfer->seg_size, maxpktsize); 455 xfer->segs = urb->number_of_packets;
451 result = -EINVAL; 456 } else {
452 goto error; 457 xfer->seg_size = le16_to_cpu(rpipe->descr.wBlocks)
453 } 458 * 1 << (xfer->wa->wa_descr->bRPipeBlockSize - 1);
454 xfer->seg_size = (xfer->seg_size / maxpktsize) * maxpktsize; 459 /* Compute the segment size and make sure it is a multiple of
455 xfer->segs = DIV_ROUND_UP(urb->transfer_buffer_length, xfer->seg_size); 460 * the maxpktsize (WUSB1.0[8.3.3.1])...not really too much of
456 if (xfer->segs >= WA_SEGS_MAX) { 461 * a check (FIXME) */
457 dev_err(dev, "BUG? ops, number of segments %d bigger than %d\n", 462 if (xfer->seg_size < maxpktsize) {
458 (int)(urb->transfer_buffer_length / xfer->seg_size), 463 dev_err(dev,
459 WA_SEGS_MAX); 464 "HW BUG? seg_size %zu smaller than maxpktsize %zu\n",
460 result = -EINVAL; 465 xfer->seg_size, maxpktsize);
461 goto error; 466 result = -EINVAL;
467 goto error;
468 }
469 xfer->seg_size = (xfer->seg_size / maxpktsize) * maxpktsize;
470 xfer->segs = DIV_ROUND_UP(urb->transfer_buffer_length,
471 xfer->seg_size);
472 if (xfer->segs >= WA_SEGS_MAX) {
473 dev_err(dev, "BUG? oops, number of segments %d bigger than %d\n",
474 (urb->transfer_buffer_length/xfer->seg_size),
475 WA_SEGS_MAX);
476 result = -EINVAL;
477 goto error;
478 }
479 if (xfer->segs == 0 && *pxfer_type == WA_XFER_TYPE_CTL)
480 xfer->segs = 1;
462 } 481 }
463 if (xfer->segs == 0 && *pxfer_type == WA_XFER_TYPE_CTL)
464 xfer->segs = 1;
465error: 482error:
466 return result; 483 return result;
467} 484}
@@ -491,8 +508,26 @@ static void __wa_xfer_setup_hdr0(struct wa_xfer *xfer,
491 } 508 }
492 case WA_XFER_TYPE_BI: 509 case WA_XFER_TYPE_BI:
493 break; 510 break;
494 case WA_XFER_TYPE_ISO: 511 case WA_XFER_TYPE_ISO: {
495 printk(KERN_ERR "FIXME: ISOC not implemented\n"); 512 struct wa_xfer_hwaiso *xfer_iso =
513 container_of(xfer_hdr0, struct wa_xfer_hwaiso, hdr);
514 struct wa_xfer_packet_info_hwaiso *packet_desc =
515 ((void *)xfer_iso) + xfer_hdr_size;
516 struct usb_iso_packet_descriptor *iso_frame_desc =
517 &(xfer->urb->iso_frame_desc[0]);
518 /* populate the isoc section of the transfer request. */
519 xfer_iso->dwNumOfPackets = cpu_to_le32(1);
520 /*
521 * populate isoc packet descriptor. This assumes 1
522 * packet per segment.
523 */
524 packet_desc->wLength = cpu_to_le16(sizeof(*packet_desc) +
525 sizeof(packet_desc->PacketLength[0]));
526 packet_desc->bPacketType = WA_XFER_ISO_PACKET_INFO;
527 packet_desc->PacketLength[0] =
528 cpu_to_le16(iso_frame_desc->length);
529 break;
530 }
496 default: 531 default:
497 BUG(); 532 BUG();
498 }; 533 };
@@ -570,6 +605,72 @@ static void wa_seg_dto_cb(struct urb *urb)
570} 605}
571 606
572/* 607/*
608 * Callback for the isoc packet descriptor phase of the segment request
609 *
610 * Check wa_seg_tr_cb(); most comments also apply here because this
611 * function does almost the same thing and they work closely
612 * together.
613 *
614 * If the seg request has failed but this phase has succeeded,
615 * wa_seg_tr_cb() has already failed the segment and moved the
616 * status to WA_SEG_ERROR, so this will go through 'case 0' and
617 * effectively do nothing.
618 */
619static void wa_seg_iso_pack_desc_cb(struct urb *urb)
620{
621 struct wa_seg *seg = urb->context;
622 struct wa_xfer *xfer = seg->xfer;
623 struct wahc *wa;
624 struct device *dev;
625 struct wa_rpipe *rpipe;
626 unsigned long flags;
627 unsigned rpipe_ready = 0;
628 u8 done = 0;
629
630 switch (urb->status) {
631 case 0:
632 spin_lock_irqsave(&xfer->lock, flags);
633 wa = xfer->wa;
634 dev = &wa->usb_iface->dev;
635 dev_dbg(dev, "iso xfer %p#%u: packet descriptor done\n",
636 xfer, seg->index);
637 if (xfer->is_inbound && seg->status < WA_SEG_PENDING)
638 seg->status = WA_SEG_PENDING;
639 spin_unlock_irqrestore(&xfer->lock, flags);
640 break;
641 case -ECONNRESET: /* URB unlinked; no need to do anything */
642 case -ENOENT: /* as it was done by the who unlinked us */
643 break;
644 default: /* Other errors ... */
645 spin_lock_irqsave(&xfer->lock, flags);
646 wa = xfer->wa;
647 dev = &wa->usb_iface->dev;
648 rpipe = xfer->ep->hcpriv;
649 pr_err_ratelimited("iso xfer %p#%u: packet descriptor error %d\n",
650 xfer, seg->index, urb->status);
651 if (edc_inc(&wa->nep_edc, EDC_MAX_ERRORS,
652 EDC_ERROR_TIMEFRAME)){
653 dev_err(dev, "DTO: URB max acceptable errors exceeded, resetting device\n");
654 wa_reset_all(wa);
655 }
656 if (seg->status != WA_SEG_ERROR) {
657 usb_unlink_urb(seg->dto_urb);
658 seg->status = WA_SEG_ERROR;
659 seg->result = urb->status;
660 xfer->segs_done++;
661 __wa_xfer_abort(xfer);
662 rpipe_ready = rpipe_avail_inc(rpipe);
663 done = __wa_xfer_is_done(xfer);
664 }
665 spin_unlock_irqrestore(&xfer->lock, flags);
666 if (done)
667 wa_xfer_completion(xfer);
668 if (rpipe_ready)
669 wa_xfer_delayed_run(rpipe);
670 }
671}
672
673/*
573 * Callback for the segment request 674 * Callback for the segment request
574 * 675 *
575 * If successful transition state (unless already transitioned or 676 * If successful transition state (unless already transitioned or
@@ -583,7 +684,7 @@ static void wa_seg_dto_cb(struct urb *urb)
583 * We have to check before setting the status to WA_SEG_PENDING 684 * We have to check before setting the status to WA_SEG_PENDING
584 * because sometimes the xfer result callback arrives before this 685 * because sometimes the xfer result callback arrives before this
585 * callback (geeeeeeze), so it might happen that we are already in 686 * callback (geeeeeeze), so it might happen that we are already in
586 * another state. As well, we don't set it if the transfer is inbound, 687 * another state. As well, we don't set it if the transfer is not inbound,
587 * as in that case, wa_seg_dto_cb will do it when the OUT data phase 688 * as in that case, wa_seg_dto_cb will do it when the OUT data phase
588 * finishes. 689 * finishes.
589 */ 690 */
@@ -603,8 +704,11 @@ static void wa_seg_tr_cb(struct urb *urb)
603 spin_lock_irqsave(&xfer->lock, flags); 704 spin_lock_irqsave(&xfer->lock, flags);
604 wa = xfer->wa; 705 wa = xfer->wa;
605 dev = &wa->usb_iface->dev; 706 dev = &wa->usb_iface->dev;
606 dev_dbg(dev, "xfer %p#%u: request done\n", xfer, seg->index); 707 dev_dbg(dev, "xfer %p ID 0x%08X#%u: request done\n",
607 if (xfer->is_inbound && seg->status < WA_SEG_PENDING) 708 xfer, wa_xfer_id(xfer), seg->index);
709 if (xfer->is_inbound &&
710 seg->status < WA_SEG_PENDING &&
711 !(usb_pipeisoc(xfer->urb->pipe)))
608 seg->status = WA_SEG_PENDING; 712 seg->status = WA_SEG_PENDING;
609 spin_unlock_irqrestore(&xfer->lock, flags); 713 spin_unlock_irqrestore(&xfer->lock, flags);
610 break; 714 break;
@@ -626,6 +730,7 @@ static void wa_seg_tr_cb(struct urb *urb)
626 "exceeded, resetting device\n"); 730 "exceeded, resetting device\n");
627 wa_reset_all(wa); 731 wa_reset_all(wa);
628 } 732 }
733 usb_unlink_urb(seg->isoc_pack_desc_urb);
629 usb_unlink_urb(seg->dto_urb); 734 usb_unlink_urb(seg->dto_urb);
630 seg->status = WA_SEG_ERROR; 735 seg->status = WA_SEG_ERROR;
631 seg->result = urb->status; 736 seg->result = urb->status;
@@ -724,6 +829,25 @@ static struct scatterlist *wa_xfer_create_subset_sg(struct scatterlist *in_sg,
724} 829}
725 830
726/* 831/*
832 * Populate DMA buffer info for the isoc dto urb.
833 */
834static void __wa_populate_dto_urb_iso(struct wa_xfer *xfer,
835 struct wa_seg *seg, int curr_iso_frame)
836{
837 /*
838 * dto urb buffer address and size pulled from
839 * iso_frame_desc.
840 */
841 seg->dto_urb->transfer_dma = xfer->urb->transfer_dma +
842 xfer->urb->iso_frame_desc[curr_iso_frame].offset;
843 seg->dto_urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
844 seg->dto_urb->sg = NULL;
845 seg->dto_urb->num_sgs = 0;
846 seg->dto_urb->transfer_buffer_length =
847 xfer->urb->iso_frame_desc[curr_iso_frame].length;
848}
849
850/*
727 * Populate buffer ptr and size, DMA buffer or SG list for the dto urb. 851 * Populate buffer ptr and size, DMA buffer or SG list for the dto urb.
728 */ 852 */
729static int __wa_populate_dto_urb(struct wa_xfer *xfer, 853static int __wa_populate_dto_urb(struct wa_xfer *xfer,
@@ -788,7 +912,7 @@ static int __wa_xfer_setup_segs(struct wa_xfer *xfer, size_t xfer_hdr_size)
788 struct usb_device *usb_dev = xfer->wa->usb_dev; 912 struct usb_device *usb_dev = xfer->wa->usb_dev;
789 const struct usb_endpoint_descriptor *dto_epd = xfer->wa->dto_epd; 913 const struct usb_endpoint_descriptor *dto_epd = xfer->wa->dto_epd;
790 struct wa_seg *seg; 914 struct wa_seg *seg;
791 size_t buf_itr, buf_size, buf_itr_size; 915 size_t buf_itr, buf_size, buf_itr_size, iso_pkt_descr_size = 0;
792 916
793 result = -ENOMEM; 917 result = -ENOMEM;
794 xfer->seg = kcalloc(xfer->segs, sizeof(xfer->seg[0]), GFP_ATOMIC); 918 xfer->seg = kcalloc(xfer->segs, sizeof(xfer->seg[0]), GFP_ATOMIC);
@@ -796,6 +920,17 @@ static int __wa_xfer_setup_segs(struct wa_xfer *xfer, size_t xfer_hdr_size)
796 goto error_segs_kzalloc; 920 goto error_segs_kzalloc;
797 buf_itr = 0; 921 buf_itr = 0;
798 buf_size = xfer->urb->transfer_buffer_length; 922 buf_size = xfer->urb->transfer_buffer_length;
923
924 if (usb_pipeisoc(xfer->urb->pipe)) {
925 /*
926 * This calculation assumes one isoc packet per xfer segment.
927 * It will need to be updated if this changes.
928 */
929 iso_pkt_descr_size = sizeof(struct wa_xfer_packet_info_hwaiso) +
930 sizeof(__le16);
931 alloc_size += iso_pkt_descr_size;
932 }
933
799 for (cnt = 0; cnt < xfer->segs; cnt++) { 934 for (cnt = 0; cnt < xfer->segs; cnt++) {
800 seg = xfer->seg[cnt] = kmalloc(alloc_size, GFP_ATOMIC); 935 seg = xfer->seg[cnt] = kmalloc(alloc_size, GFP_ATOMIC);
801 if (seg == NULL) 936 if (seg == NULL)
@@ -820,16 +955,40 @@ static int __wa_xfer_setup_segs(struct wa_xfer *xfer, size_t xfer_hdr_size)
820 dto_epd->bEndpointAddress), 955 dto_epd->bEndpointAddress),
821 NULL, 0, wa_seg_dto_cb, seg); 956 NULL, 0, wa_seg_dto_cb, seg);
822 957
823 /* fill in the xfer buffer information. */ 958 if (usb_pipeisoc(xfer->urb->pipe)) {
824 result = __wa_populate_dto_urb(xfer, seg, 959 /* iso packet descriptor. */
825 buf_itr, buf_itr_size); 960 seg->isoc_pack_desc_urb =
961 usb_alloc_urb(0, GFP_ATOMIC);
962 if (seg->isoc_pack_desc_urb == NULL)
963 goto error_iso_pack_desc_alloc;
964 /*
965 * The buffer for the isoc packet descriptor
966 * after the transfer request header in the
967 * segment object memory buffer.
968 */
969 usb_fill_bulk_urb(
970 seg->isoc_pack_desc_urb, usb_dev,
971 usb_sndbulkpipe(usb_dev,
972 dto_epd->bEndpointAddress),
973 (void *)(&seg->xfer_hdr) +
974 xfer_hdr_size,
975 iso_pkt_descr_size,
976 wa_seg_iso_pack_desc_cb, seg);
826 977
827 if (result < 0) 978 /* fill in the xfer buffer information. */
828 goto error_seg_outbound_populate; 979 __wa_populate_dto_urb_iso(xfer, seg, cnt);
980 } else {
981 /* fill in the xfer buffer information. */
982 result = __wa_populate_dto_urb(xfer, seg,
983 buf_itr, buf_itr_size);
984 if (result < 0)
985 goto error_seg_outbound_populate;
986
987 buf_itr += buf_itr_size;
988 buf_size -= buf_itr_size;
989 }
829 } 990 }
830 seg->status = WA_SEG_READY; 991 seg->status = WA_SEG_READY;
831 buf_itr += buf_itr_size;
832 buf_size -= buf_itr_size;
833 } 992 }
834 return 0; 993 return 0;
835 994
@@ -838,6 +997,7 @@ static int __wa_xfer_setup_segs(struct wa_xfer *xfer, size_t xfer_hdr_size)
838 * Use the fact that cnt is left at were it failed. The remaining 997 * Use the fact that cnt is left at were it failed. The remaining
839 * segments will be cleaned up by wa_xfer_destroy. 998 * segments will be cleaned up by wa_xfer_destroy.
840 */ 999 */
1000error_iso_pack_desc_alloc:
841error_seg_outbound_populate: 1001error_seg_outbound_populate:
842 usb_free_urb(xfer->seg[cnt]->dto_urb); 1002 usb_free_urb(xfer->seg[cnt]->dto_urb);
843error_dto_alloc: 1003error_dto_alloc:
@@ -881,21 +1041,50 @@ static int __wa_xfer_setup(struct wa_xfer *xfer, struct urb *urb)
881 wa_xfer_id_init(xfer); 1041 wa_xfer_id_init(xfer);
882 __wa_xfer_setup_hdr0(xfer, xfer_hdr0, xfer_type, xfer_hdr_size); 1042 __wa_xfer_setup_hdr0(xfer, xfer_hdr0, xfer_type, xfer_hdr_size);
883 1043
884 /* Fill remainig headers */ 1044 /* Fill remaining headers */
885 xfer_hdr = xfer_hdr0; 1045 xfer_hdr = xfer_hdr0;
886 transfer_size = urb->transfer_buffer_length; 1046 if (xfer_type == WA_XFER_TYPE_ISO) {
887 xfer_hdr0->dwTransferLength = transfer_size > xfer->seg_size ? 1047 xfer_hdr0->dwTransferLength =
888 xfer->seg_size : transfer_size; 1048 cpu_to_le32(xfer->urb->iso_frame_desc[0].length);
889 transfer_size -= xfer->seg_size; 1049 for (cnt = 1; cnt < xfer->segs; cnt++) {
890 for (cnt = 1; cnt < xfer->segs; cnt++) { 1050 struct usb_iso_packet_descriptor *iso_frame_desc =
891 xfer_hdr = &xfer->seg[cnt]->xfer_hdr; 1051 &(xfer->urb->iso_frame_desc[cnt]);
892 memcpy(xfer_hdr, xfer_hdr0, xfer_hdr_size); 1052 struct wa_xfer_packet_info_hwaiso *packet_desc;
893 xfer_hdr->bTransferSegment = cnt; 1053
894 xfer_hdr->dwTransferLength = transfer_size > xfer->seg_size ? 1054 xfer_hdr = &xfer->seg[cnt]->xfer_hdr;
895 cpu_to_le32(xfer->seg_size) 1055 packet_desc = ((void *)xfer_hdr) + xfer_hdr_size;
896 : cpu_to_le32(transfer_size); 1056 /*
897 xfer->seg[cnt]->status = WA_SEG_READY; 1057 * Copy values from the 0th header and isoc packet
1058 * descriptor. Segment specific values are set below.
1059 */
1060 memcpy(xfer_hdr, xfer_hdr0,
1061 xfer_hdr_size + sizeof(*packet_desc));
1062 xfer_hdr->bTransferSegment = cnt;
1063 xfer_hdr->dwTransferLength =
1064 cpu_to_le32(iso_frame_desc->length);
1065 /* populate isoc packet descriptor length. */
1066 packet_desc->PacketLength[0] =
1067 cpu_to_le16(iso_frame_desc->length);
1068
1069 xfer->seg[cnt]->status = WA_SEG_READY;
1070 }
1071 } else {
1072 transfer_size = urb->transfer_buffer_length;
1073 xfer_hdr0->dwTransferLength = transfer_size > xfer->seg_size ?
1074 cpu_to_le32(xfer->seg_size) :
1075 cpu_to_le32(transfer_size);
898 transfer_size -= xfer->seg_size; 1076 transfer_size -= xfer->seg_size;
1077 for (cnt = 1; cnt < xfer->segs; cnt++) {
1078 xfer_hdr = &xfer->seg[cnt]->xfer_hdr;
1079 memcpy(xfer_hdr, xfer_hdr0, xfer_hdr_size);
1080 xfer_hdr->bTransferSegment = cnt;
1081 xfer_hdr->dwTransferLength =
1082 transfer_size > xfer->seg_size ?
1083 cpu_to_le32(xfer->seg_size)
1084 : cpu_to_le32(transfer_size);
1085 xfer->seg[cnt]->status = WA_SEG_READY;
1086 transfer_size -= xfer->seg_size;
1087 }
899 } 1088 }
900 xfer_hdr->bTransferSegment |= 0x80; /* this is the last segment */ 1089 xfer_hdr->bTransferSegment |= 0x80; /* this is the last segment */
901 result = 0; 1090 result = 0;
@@ -916,16 +1105,25 @@ static int __wa_seg_submit(struct wa_rpipe *rpipe, struct wa_xfer *xfer,
916 /* submit the transfer request. */ 1105 /* submit the transfer request. */
917 result = usb_submit_urb(&seg->tr_urb, GFP_ATOMIC); 1106 result = usb_submit_urb(&seg->tr_urb, GFP_ATOMIC);
918 if (result < 0) { 1107 if (result < 0) {
919 printk(KERN_ERR "xfer %p#%u: REQ submit failed: %d\n", 1108 pr_err("%s: xfer %p#%u: REQ submit failed: %d\n",
920 xfer, seg->index, result); 1109 __func__, xfer, seg->index, result);
921 goto error_seg_submit; 1110 goto error_seg_submit;
922 } 1111 }
1112 /* submit the isoc packet descriptor if present. */
1113 if (seg->isoc_pack_desc_urb) {
1114 result = usb_submit_urb(seg->isoc_pack_desc_urb, GFP_ATOMIC);
1115 if (result < 0) {
1116 pr_err("%s: xfer %p#%u: ISO packet descriptor submit failed: %d\n",
1117 __func__, xfer, seg->index, result);
1118 goto error_iso_pack_desc_submit;
1119 }
1120 }
923 /* submit the out data if this is an out request. */ 1121 /* submit the out data if this is an out request. */
924 if (seg->dto_urb) { 1122 if (seg->dto_urb) {
925 result = usb_submit_urb(seg->dto_urb, GFP_ATOMIC); 1123 result = usb_submit_urb(seg->dto_urb, GFP_ATOMIC);
926 if (result < 0) { 1124 if (result < 0) {
927 printk(KERN_ERR "xfer %p#%u: DTO submit failed: %d\n", 1125 pr_err("%s: xfer %p#%u: DTO submit failed: %d\n",
928 xfer, seg->index, result); 1126 __func__, xfer, seg->index, result);
929 goto error_dto_submit; 1127 goto error_dto_submit;
930 } 1128 }
931 } 1129 }
@@ -934,6 +1132,8 @@ static int __wa_seg_submit(struct wa_rpipe *rpipe, struct wa_xfer *xfer,
934 return 0; 1132 return 0;
935 1133
936error_dto_submit: 1134error_dto_submit:
1135 usb_unlink_urb(seg->isoc_pack_desc_urb);
1136error_iso_pack_desc_submit:
937 usb_unlink_urb(&seg->tr_urb); 1137 usb_unlink_urb(&seg->tr_urb);
938error_seg_submit: 1138error_seg_submit:
939 seg->status = WA_SEG_ERROR; 1139 seg->status = WA_SEG_ERROR;
@@ -1565,7 +1765,12 @@ static void wa_xfer_result_chew(struct wahc *wa, struct wa_xfer *xfer,
1565 /* FIXME: we ignore warnings, tally them for stats */ 1765 /* FIXME: we ignore warnings, tally them for stats */
1566 if (usb_status & 0x40) /* Warning?... */ 1766 if (usb_status & 0x40) /* Warning?... */
1567 usb_status = 0; /* ... pass */ 1767 usb_status = 0; /* ... pass */
1568 if (xfer->is_inbound) { /* IN data phase: read to buffer */ 1768 if (usb_pipeisoc(xfer->urb->pipe)) {
1769 /* set up WA state to read the isoc packet status next. */
1770 wa->dti_isoc_xfer_in_progress = wa_xfer_id(xfer);
1771 wa->dti_isoc_xfer_seg = seg_idx;
1772 wa->dti_state = WA_DTI_ISOC_PACKET_STATUS_PENDING;
1773 } else if (xfer->is_inbound) { /* IN data phase: read to buffer */
1569 seg->status = WA_SEG_DTI_PENDING; 1774 seg->status = WA_SEG_DTI_PENDING;
1570 BUG_ON(wa->buf_in_urb->status == -EINPROGRESS); 1775 BUG_ON(wa->buf_in_urb->status == -EINPROGRESS);
1571 /* this should always be 0 before a resubmit. */ 1776 /* this should always be 0 before a resubmit. */
@@ -1694,6 +1899,85 @@ segment_aborted:
1694} 1899}
1695 1900
1696/* 1901/*
1902 * Process a isochronous packet status message
1903 *
1904 * inbound transfers: need to schedule a buf_in_urb read
1905 */
1906static void wa_process_iso_packet_status(struct wahc *wa, struct urb *urb)
1907{
1908 struct device *dev = &wa->usb_iface->dev;
1909 struct wa_xfer_packet_status_hwaiso *packet_status;
1910 struct wa_xfer *xfer;
1911 unsigned long flags;
1912 struct wa_seg *seg;
1913 struct wa_rpipe *rpipe;
1914 unsigned done = 0;
1915 unsigned rpipe_ready = 0;
1916 const int expected_size = sizeof(*packet_status) +
1917 sizeof(packet_status->PacketStatus[0]);
1918
1919 /* We have a xfer result buffer; check it */
1920 dev_dbg(dev, "DTI: isoc packet status %d bytes at %p\n",
1921 urb->actual_length, urb->transfer_buffer);
1922 if (urb->actual_length != expected_size) {
1923 dev_err(dev, "DTI Error: isoc packet status--bad urb length (%d bytes vs %zu needed)\n",
1924 urb->actual_length, expected_size);
1925 goto error_parse_buffer;
1926 }
1927 packet_status = (struct wa_xfer_packet_status_hwaiso *)(wa->dti_buf);
1928 if (le16_to_cpu(packet_status->wLength) != expected_size) {
1929 dev_err(dev, "DTI Error: isoc packet status--bad length %u\n",
1930 le16_to_cpu(packet_status->wLength));
1931 goto error_parse_buffer;
1932 }
1933 if (packet_status->bPacketType != WA_XFER_ISO_PACKET_STATUS) {
1934 dev_err(dev, "DTI Error: isoc packet status--bad type 0x%02x\n",
1935 packet_status->bPacketType);
1936 goto error_parse_buffer;
1937 }
1938 xfer = wa_xfer_get_by_id(wa, wa->dti_isoc_xfer_in_progress);
1939 if (xfer == NULL) {
1940 dev_err(dev, "DTI Error: isoc packet status--unknown xfer 0x%08x\n",
1941 wa->dti_isoc_xfer_in_progress);
1942 goto error_parse_buffer;
1943 }
1944 spin_lock_irqsave(&xfer->lock, flags);
1945 if (unlikely(wa->dti_isoc_xfer_seg >= xfer->segs))
1946 goto error_bad_seg;
1947 seg = xfer->seg[wa->dti_isoc_xfer_seg];
1948 rpipe = xfer->ep->hcpriv;
1949
1950 /* set urb isoc packet status and length. */
1951 xfer->urb->iso_frame_desc[seg->index].status =
1952 wa_xfer_status_to_errno(
1953 le16_to_cpu(packet_status->PacketStatus[0].PacketStatus));
1954 xfer->urb->iso_frame_desc[seg->index].actual_length =
1955 le16_to_cpu(packet_status->PacketStatus[0].PacketLength);
1956
1957 if (!xfer->is_inbound) {
1958 /* OUT transfer, complete it -- */
1959 seg->status = WA_SEG_DONE;
1960 xfer->segs_done++;
1961 rpipe_ready = rpipe_avail_inc(rpipe);
1962 done = __wa_xfer_is_done(xfer);
1963 }
1964 spin_unlock_irqrestore(&xfer->lock, flags);
1965 wa->dti_state = WA_DTI_TRANSFER_RESULT_PENDING;
1966 if (done)
1967 wa_xfer_completion(xfer);
1968 if (rpipe_ready)
1969 wa_xfer_delayed_run(rpipe);
1970 wa_xfer_put(xfer);
1971 return;
1972
1973error_bad_seg:
1974 spin_unlock_irqrestore(&xfer->lock, flags);
1975 wa_xfer_put(xfer);
1976error_parse_buffer:
1977 return;
1978}
1979
1980/*
1697 * Callback for the IN data phase 1981 * Callback for the IN data phase
1698 * 1982 *
1699 * If successful transition state; otherwise, take a note of the 1983 * If successful transition state; otherwise, take a note of the
@@ -1799,51 +2083,56 @@ static void wa_dti_cb(struct urb *urb)
1799 int result; 2083 int result;
1800 struct wahc *wa = urb->context; 2084 struct wahc *wa = urb->context;
1801 struct device *dev = &wa->usb_iface->dev; 2085 struct device *dev = &wa->usb_iface->dev;
1802 struct wa_xfer_result *xfer_result;
1803 u32 xfer_id; 2086 u32 xfer_id;
1804 struct wa_xfer *xfer;
1805 u8 usb_status; 2087 u8 usb_status;
1806 2088
1807 BUG_ON(wa->dti_urb != urb); 2089 BUG_ON(wa->dti_urb != urb);
1808 switch (wa->dti_urb->status) { 2090 switch (wa->dti_urb->status) {
1809 case 0: 2091 case 0:
1810 /* We have a xfer result buffer; check it */ 2092 if (wa->dti_state == WA_DTI_TRANSFER_RESULT_PENDING) {
1811 dev_dbg(dev, "DTI: xfer result %d bytes at %p\n", 2093 struct wa_xfer_result *xfer_result;
1812 urb->actual_length, urb->transfer_buffer); 2094 struct wa_xfer *xfer;
1813 if (wa->dti_urb->actual_length != sizeof(*xfer_result)) { 2095
1814 dev_err(dev, "DTI Error: xfer result--bad size " 2096 /* We have a xfer result buffer; check it */
1815 "xfer result (%d bytes vs %zu needed)\n", 2097 dev_dbg(dev, "DTI: xfer result %d bytes at %p\n",
1816 urb->actual_length, sizeof(*xfer_result)); 2098 urb->actual_length, urb->transfer_buffer);
1817 break; 2099 if (urb->actual_length != sizeof(*xfer_result)) {
1818 } 2100 dev_err(dev, "DTI Error: xfer result--bad size xfer result (%d bytes vs %zu needed)\n",
1819 xfer_result = (struct wa_xfer_result *)(wa->dti_buf); 2101 urb->actual_length,
1820 if (xfer_result->hdr.bLength != sizeof(*xfer_result)) { 2102 sizeof(*xfer_result));
1821 dev_err(dev, "DTI Error: xfer result--" 2103 break;
1822 "bad header length %u\n", 2104 }
1823 xfer_result->hdr.bLength); 2105 xfer_result = (struct wa_xfer_result *)(wa->dti_buf);
1824 break; 2106 if (xfer_result->hdr.bLength != sizeof(*xfer_result)) {
1825 } 2107 dev_err(dev, "DTI Error: xfer result--bad header length %u\n",
1826 if (xfer_result->hdr.bNotifyType != WA_XFER_RESULT) { 2108 xfer_result->hdr.bLength);
1827 dev_err(dev, "DTI Error: xfer result--" 2109 break;
1828 "bad header type 0x%02x\n", 2110 }
1829 xfer_result->hdr.bNotifyType); 2111 if (xfer_result->hdr.bNotifyType != WA_XFER_RESULT) {
1830 break; 2112 dev_err(dev, "DTI Error: xfer result--bad header type 0x%02x\n",
1831 } 2113 xfer_result->hdr.bNotifyType);
1832 usb_status = xfer_result->bTransferStatus & 0x3f; 2114 break;
1833 if (usb_status == WA_XFER_STATUS_NOT_FOUND) 2115 }
1834 /* taken care of already */ 2116 usb_status = xfer_result->bTransferStatus & 0x3f;
1835 break; 2117 if (usb_status == WA_XFER_STATUS_NOT_FOUND)
1836 xfer_id = le32_to_cpu(xfer_result->dwTransferID); 2118 /* taken care of already */
1837 xfer = wa_xfer_get_by_id(wa, xfer_id); 2119 break;
1838 if (xfer == NULL) { 2120 xfer_id = le32_to_cpu(xfer_result->dwTransferID);
1839 /* FIXME: transaction might have been cancelled */ 2121 xfer = wa_xfer_get_by_id(wa, xfer_id);
1840 dev_err(dev, "DTI Error: xfer result--" 2122 if (xfer == NULL) {
1841 "unknown xfer 0x%08x (status 0x%02x)\n", 2123 /* FIXME: transaction not found. */
1842 xfer_id, usb_status); 2124 dev_err(dev, "DTI Error: xfer result--unknown xfer 0x%08x (status 0x%02x)\n",
1843 break; 2125 xfer_id, usb_status);
2126 break;
2127 }
2128 wa_xfer_result_chew(wa, xfer, xfer_result);
2129 wa_xfer_put(xfer);
2130 } else if (wa->dti_state == WA_DTI_ISOC_PACKET_STATUS_PENDING) {
2131 wa_process_iso_packet_status(wa, urb);
2132 } else {
2133 dev_err(dev, "DTI Error: unexpected EP state = %d\n",
2134 wa->dti_state);
1844 } 2135 }
1845 wa_xfer_result_chew(wa, xfer, xfer_result);
1846 wa_xfer_put(xfer);
1847 break; 2136 break;
1848 case -ENOENT: /* (we killed the URB)...so, no broadcast */ 2137 case -ENOENT: /* (we killed the URB)...so, no broadcast */
1849 case -ESHUTDOWN: /* going away! */ 2138 case -ESHUTDOWN: /* going away! */