diff options
-rw-r--r-- | drivers/usb/wusbcore/wa-hc.c | 2 | ||||
-rw-r--r-- | drivers/usb/wusbcore/wa-hc.h | 15 | ||||
-rw-r--r-- | drivers/usb/wusbcore/wa-xfer.c | 191 |
3 files changed, 134 insertions, 74 deletions
diff --git a/drivers/usb/wusbcore/wa-hc.c b/drivers/usb/wusbcore/wa-hc.c index 368360f9a93a..252c7bd9218a 100644 --- a/drivers/usb/wusbcore/wa-hc.c +++ b/drivers/usb/wusbcore/wa-hc.c | |||
@@ -75,8 +75,6 @@ void __wa_destroy(struct wahc *wa) | |||
75 | if (wa->dti_urb) { | 75 | if (wa->dti_urb) { |
76 | usb_kill_urb(wa->dti_urb); | 76 | usb_kill_urb(wa->dti_urb); |
77 | usb_put_urb(wa->dti_urb); | 77 | usb_put_urb(wa->dti_urb); |
78 | usb_kill_urb(wa->buf_in_urb); | ||
79 | usb_put_urb(wa->buf_in_urb); | ||
80 | } | 78 | } |
81 | kfree(wa->dti_buf); | 79 | kfree(wa->dti_buf); |
82 | wa_nep_destroy(wa); | 80 | wa_nep_destroy(wa); |
diff --git a/drivers/usb/wusbcore/wa-hc.h b/drivers/usb/wusbcore/wa-hc.h index 7510960588dc..f2a8d29e17b9 100644 --- a/drivers/usb/wusbcore/wa-hc.h +++ b/drivers/usb/wusbcore/wa-hc.h | |||
@@ -125,7 +125,8 @@ struct wa_rpipe { | |||
125 | 125 | ||
126 | enum wa_dti_state { | 126 | enum wa_dti_state { |
127 | WA_DTI_TRANSFER_RESULT_PENDING, | 127 | WA_DTI_TRANSFER_RESULT_PENDING, |
128 | WA_DTI_ISOC_PACKET_STATUS_PENDING | 128 | WA_DTI_ISOC_PACKET_STATUS_PENDING, |
129 | WA_DTI_BUF_IN_DATA_PENDING | ||
129 | }; | 130 | }; |
130 | 131 | ||
131 | enum wa_quirks { | 132 | enum wa_quirks { |
@@ -146,6 +147,8 @@ enum wa_vendor_specific_requests { | |||
146 | WA_REQ_ALEREON_FEATURE_SET = 0x01, | 147 | WA_REQ_ALEREON_FEATURE_SET = 0x01, |
147 | WA_REQ_ALEREON_FEATURE_CLEAR = 0x00, | 148 | WA_REQ_ALEREON_FEATURE_CLEAR = 0x00, |
148 | }; | 149 | }; |
150 | |||
151 | #define WA_MAX_BUF_IN_URBS 4 | ||
149 | /** | 152 | /** |
150 | * Instance of a HWA Host Controller | 153 | * Instance of a HWA Host Controller |
151 | * | 154 | * |
@@ -216,7 +219,9 @@ struct wahc { | |||
216 | u32 dti_isoc_xfer_in_progress; | 219 | u32 dti_isoc_xfer_in_progress; |
217 | u8 dti_isoc_xfer_seg; | 220 | u8 dti_isoc_xfer_seg; |
218 | struct urb *dti_urb; /* URB for reading xfer results */ | 221 | struct urb *dti_urb; /* URB for reading xfer results */ |
219 | struct urb *buf_in_urb; /* URB for reading data in */ | 222 | /* URBs for reading data in */ |
223 | struct urb buf_in_urbs[WA_MAX_BUF_IN_URBS]; | ||
224 | int active_buf_in_urbs; /* number of buf_in_urbs active. */ | ||
220 | struct edc dti_edc; /* DTI error density counter */ | 225 | struct edc dti_edc; /* DTI error density counter */ |
221 | void *dti_buf; | 226 | void *dti_buf; |
222 | size_t dti_buf_size; | 227 | size_t dti_buf_size; |
@@ -286,6 +291,8 @@ static inline void wa_rpipe_init(struct wahc *wa) | |||
286 | 291 | ||
287 | static inline void wa_init(struct wahc *wa) | 292 | static inline void wa_init(struct wahc *wa) |
288 | { | 293 | { |
294 | int index; | ||
295 | |||
289 | edc_init(&wa->nep_edc); | 296 | edc_init(&wa->nep_edc); |
290 | atomic_set(&wa->notifs_queued, 0); | 297 | atomic_set(&wa->notifs_queued, 0); |
291 | wa->dti_state = WA_DTI_TRANSFER_RESULT_PENDING; | 298 | wa->dti_state = WA_DTI_TRANSFER_RESULT_PENDING; |
@@ -299,6 +306,10 @@ static inline void wa_init(struct wahc *wa) | |||
299 | INIT_WORK(&wa->xfer_error_work, wa_process_errored_transfers_run); | 306 | INIT_WORK(&wa->xfer_error_work, wa_process_errored_transfers_run); |
300 | wa->dto_in_use = 0; | 307 | wa->dto_in_use = 0; |
301 | atomic_set(&wa->xfer_id_count, 1); | 308 | atomic_set(&wa->xfer_id_count, 1); |
309 | /* init the buf in URBs */ | ||
310 | for (index = 0; index < WA_MAX_BUF_IN_URBS; ++index) | ||
311 | usb_init_urb(&(wa->buf_in_urbs[index])); | ||
312 | wa->active_buf_in_urbs = 0; | ||
302 | } | 313 | } |
303 | 314 | ||
304 | /** | 315 | /** |
diff --git a/drivers/usb/wusbcore/wa-xfer.c b/drivers/usb/wusbcore/wa-xfer.c index f930bbb1a0ff..c8e2a47d62a7 100644 --- a/drivers/usb/wusbcore/wa-xfer.c +++ b/drivers/usb/wusbcore/wa-xfer.c | |||
@@ -2159,9 +2159,9 @@ static void wa_complete_remaining_xfer_segs(struct wa_xfer *xfer, | |||
2159 | } | 2159 | } |
2160 | } | 2160 | } |
2161 | 2161 | ||
2162 | /* Populate the wa->buf_in_urb based on the current isoc transfer state. */ | 2162 | /* Populate the given urb based on the current isoc transfer state. */ |
2163 | static int __wa_populate_buf_in_urb_isoc(struct wahc *wa, struct wa_xfer *xfer, | 2163 | static int __wa_populate_buf_in_urb_isoc(struct wahc *wa, |
2164 | struct wa_seg *seg) | 2164 | struct urb *buf_in_urb, struct wa_xfer *xfer, struct wa_seg *seg) |
2165 | { | 2165 | { |
2166 | int urb_start_frame = seg->isoc_frame_index + seg->isoc_frame_offset; | 2166 | int urb_start_frame = seg->isoc_frame_index + seg->isoc_frame_offset; |
2167 | int seg_index, total_len = 0, urb_frame_index = urb_start_frame; | 2167 | int seg_index, total_len = 0, urb_frame_index = urb_start_frame; |
@@ -2171,7 +2171,7 @@ static int __wa_populate_buf_in_urb_isoc(struct wahc *wa, struct wa_xfer *xfer, | |||
2171 | int next_frame_contiguous; | 2171 | int next_frame_contiguous; |
2172 | struct usb_iso_packet_descriptor *iso_frame; | 2172 | struct usb_iso_packet_descriptor *iso_frame; |
2173 | 2173 | ||
2174 | BUG_ON(wa->buf_in_urb->status == -EINPROGRESS); | 2174 | BUG_ON(buf_in_urb->status == -EINPROGRESS); |
2175 | 2175 | ||
2176 | /* | 2176 | /* |
2177 | * If the current frame actual_length is contiguous with the next frame | 2177 | * If the current frame actual_length is contiguous with the next frame |
@@ -2201,68 +2201,68 @@ static int __wa_populate_buf_in_urb_isoc(struct wahc *wa, struct wa_xfer *xfer, | |||
2201 | && ((iso_frame->actual_length % dti_packet_size) == 0)); | 2201 | && ((iso_frame->actual_length % dti_packet_size) == 0)); |
2202 | 2202 | ||
2203 | /* this should always be 0 before a resubmit. */ | 2203 | /* this should always be 0 before a resubmit. */ |
2204 | wa->buf_in_urb->num_mapped_sgs = 0; | 2204 | buf_in_urb->num_mapped_sgs = 0; |
2205 | wa->buf_in_urb->transfer_dma = xfer->urb->transfer_dma + | 2205 | buf_in_urb->transfer_dma = xfer->urb->transfer_dma + |
2206 | iso_frame_desc[urb_start_frame].offset; | 2206 | iso_frame_desc[urb_start_frame].offset; |
2207 | wa->buf_in_urb->transfer_buffer_length = total_len; | 2207 | buf_in_urb->transfer_buffer_length = total_len; |
2208 | wa->buf_in_urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; | 2208 | buf_in_urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; |
2209 | wa->buf_in_urb->transfer_buffer = NULL; | 2209 | buf_in_urb->transfer_buffer = NULL; |
2210 | wa->buf_in_urb->sg = NULL; | 2210 | buf_in_urb->sg = NULL; |
2211 | wa->buf_in_urb->num_sgs = 0; | 2211 | buf_in_urb->num_sgs = 0; |
2212 | wa->buf_in_urb->context = seg; | 2212 | buf_in_urb->context = seg; |
2213 | 2213 | ||
2214 | /* return the number of frames included in this URB. */ | 2214 | /* return the number of frames included in this URB. */ |
2215 | return seg_index - seg->isoc_frame_index; | 2215 | return seg_index - seg->isoc_frame_index; |
2216 | } | 2216 | } |
2217 | 2217 | ||
2218 | /* Populate the wa->buf_in_urb based on the current transfer state. */ | 2218 | /* Populate the given urb based on the current transfer state. */ |
2219 | static int wa_populate_buf_in_urb(struct wahc *wa, struct wa_xfer *xfer, | 2219 | static int wa_populate_buf_in_urb(struct urb *buf_in_urb, struct wa_xfer *xfer, |
2220 | unsigned int seg_idx, unsigned int bytes_transferred) | 2220 | unsigned int seg_idx, unsigned int bytes_transferred) |
2221 | { | 2221 | { |
2222 | int result = 0; | 2222 | int result = 0; |
2223 | struct wa_seg *seg = xfer->seg[seg_idx]; | 2223 | struct wa_seg *seg = xfer->seg[seg_idx]; |
2224 | 2224 | ||
2225 | BUG_ON(wa->buf_in_urb->status == -EINPROGRESS); | 2225 | BUG_ON(buf_in_urb->status == -EINPROGRESS); |
2226 | /* this should always be 0 before a resubmit. */ | 2226 | /* this should always be 0 before a resubmit. */ |
2227 | wa->buf_in_urb->num_mapped_sgs = 0; | 2227 | buf_in_urb->num_mapped_sgs = 0; |
2228 | 2228 | ||
2229 | if (xfer->is_dma) { | 2229 | if (xfer->is_dma) { |
2230 | wa->buf_in_urb->transfer_dma = xfer->urb->transfer_dma | 2230 | buf_in_urb->transfer_dma = xfer->urb->transfer_dma |
2231 | + (seg_idx * xfer->seg_size); | 2231 | + (seg_idx * xfer->seg_size); |
2232 | wa->buf_in_urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; | 2232 | buf_in_urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; |
2233 | wa->buf_in_urb->transfer_buffer = NULL; | 2233 | buf_in_urb->transfer_buffer = NULL; |
2234 | wa->buf_in_urb->sg = NULL; | 2234 | buf_in_urb->sg = NULL; |
2235 | wa->buf_in_urb->num_sgs = 0; | 2235 | buf_in_urb->num_sgs = 0; |
2236 | } else { | 2236 | } else { |
2237 | /* do buffer or SG processing. */ | 2237 | /* do buffer or SG processing. */ |
2238 | wa->buf_in_urb->transfer_flags &= ~URB_NO_TRANSFER_DMA_MAP; | 2238 | buf_in_urb->transfer_flags &= ~URB_NO_TRANSFER_DMA_MAP; |
2239 | 2239 | ||
2240 | if (xfer->urb->transfer_buffer) { | 2240 | if (xfer->urb->transfer_buffer) { |
2241 | wa->buf_in_urb->transfer_buffer = | 2241 | buf_in_urb->transfer_buffer = |
2242 | xfer->urb->transfer_buffer | 2242 | xfer->urb->transfer_buffer |
2243 | + (seg_idx * xfer->seg_size); | 2243 | + (seg_idx * xfer->seg_size); |
2244 | wa->buf_in_urb->sg = NULL; | 2244 | buf_in_urb->sg = NULL; |
2245 | wa->buf_in_urb->num_sgs = 0; | 2245 | buf_in_urb->num_sgs = 0; |
2246 | } else { | 2246 | } else { |
2247 | /* allocate an SG list to store seg_size bytes | 2247 | /* allocate an SG list to store seg_size bytes |
2248 | and copy the subset of the xfer->urb->sg | 2248 | and copy the subset of the xfer->urb->sg |
2249 | that matches the buffer subset we are | 2249 | that matches the buffer subset we are |
2250 | about to read. */ | 2250 | about to read. */ |
2251 | wa->buf_in_urb->sg = wa_xfer_create_subset_sg( | 2251 | buf_in_urb->sg = wa_xfer_create_subset_sg( |
2252 | xfer->urb->sg, | 2252 | xfer->urb->sg, |
2253 | seg_idx * xfer->seg_size, | 2253 | seg_idx * xfer->seg_size, |
2254 | bytes_transferred, | 2254 | bytes_transferred, |
2255 | &(wa->buf_in_urb->num_sgs)); | 2255 | &(buf_in_urb->num_sgs)); |
2256 | 2256 | ||
2257 | if (!(wa->buf_in_urb->sg)) { | 2257 | if (!(buf_in_urb->sg)) { |
2258 | wa->buf_in_urb->num_sgs = 0; | 2258 | buf_in_urb->num_sgs = 0; |
2259 | result = -ENOMEM; | 2259 | result = -ENOMEM; |
2260 | } | 2260 | } |
2261 | wa->buf_in_urb->transfer_buffer = NULL; | 2261 | buf_in_urb->transfer_buffer = NULL; |
2262 | } | 2262 | } |
2263 | } | 2263 | } |
2264 | wa->buf_in_urb->transfer_buffer_length = bytes_transferred; | 2264 | buf_in_urb->transfer_buffer_length = bytes_transferred; |
2265 | wa->buf_in_urb->context = seg; | 2265 | buf_in_urb->context = seg; |
2266 | 2266 | ||
2267 | return result; | 2267 | return result; |
2268 | } | 2268 | } |
@@ -2287,6 +2287,7 @@ static void wa_xfer_result_chew(struct wahc *wa, struct wa_xfer *xfer, | |||
2287 | u8 usb_status; | 2287 | u8 usb_status; |
2288 | unsigned rpipe_ready = 0; | 2288 | unsigned rpipe_ready = 0; |
2289 | unsigned bytes_transferred = le32_to_cpu(xfer_result->dwTransferLength); | 2289 | unsigned bytes_transferred = le32_to_cpu(xfer_result->dwTransferLength); |
2290 | struct urb *buf_in_urb = &(wa->buf_in_urbs[0]); | ||
2290 | 2291 | ||
2291 | spin_lock_irqsave(&xfer->lock, flags); | 2292 | spin_lock_irqsave(&xfer->lock, flags); |
2292 | seg_idx = xfer_result->bTransferSegment & 0x7f; | 2293 | seg_idx = xfer_result->bTransferSegment & 0x7f; |
@@ -2337,13 +2338,16 @@ static void wa_xfer_result_chew(struct wahc *wa, struct wa_xfer *xfer, | |||
2337 | && (bytes_transferred > 0)) { | 2338 | && (bytes_transferred > 0)) { |
2338 | /* IN data phase: read to buffer */ | 2339 | /* IN data phase: read to buffer */ |
2339 | seg->status = WA_SEG_DTI_PENDING; | 2340 | seg->status = WA_SEG_DTI_PENDING; |
2340 | result = wa_populate_buf_in_urb(wa, xfer, seg_idx, | 2341 | result = wa_populate_buf_in_urb(buf_in_urb, xfer, seg_idx, |
2341 | bytes_transferred); | 2342 | bytes_transferred); |
2342 | if (result < 0) | 2343 | if (result < 0) |
2343 | goto error_buf_in_populate; | 2344 | goto error_buf_in_populate; |
2344 | result = usb_submit_urb(wa->buf_in_urb, GFP_ATOMIC); | 2345 | ++(wa->active_buf_in_urbs); |
2345 | if (result < 0) | 2346 | result = usb_submit_urb(buf_in_urb, GFP_ATOMIC); |
2347 | if (result < 0) { | ||
2348 | --(wa->active_buf_in_urbs); | ||
2346 | goto error_submit_buf_in; | 2349 | goto error_submit_buf_in; |
2350 | } | ||
2347 | } else { | 2351 | } else { |
2348 | /* OUT data phase or no data, complete it -- */ | 2352 | /* OUT data phase or no data, complete it -- */ |
2349 | seg->result = bytes_transferred; | 2353 | seg->result = bytes_transferred; |
@@ -2367,8 +2371,8 @@ error_submit_buf_in: | |||
2367 | dev_err(dev, "xfer %p#%u: can't submit DTI data phase: %d\n", | 2371 | dev_err(dev, "xfer %p#%u: can't submit DTI data phase: %d\n", |
2368 | xfer, seg_idx, result); | 2372 | xfer, seg_idx, result); |
2369 | seg->result = result; | 2373 | seg->result = result; |
2370 | kfree(wa->buf_in_urb->sg); | 2374 | kfree(buf_in_urb->sg); |
2371 | wa->buf_in_urb->sg = NULL; | 2375 | buf_in_urb->sg = NULL; |
2372 | error_buf_in_populate: | 2376 | error_buf_in_populate: |
2373 | __wa_xfer_abort(xfer); | 2377 | __wa_xfer_abort(xfer); |
2374 | seg->status = WA_SEG_ERROR; | 2378 | seg->status = WA_SEG_ERROR; |
@@ -2477,16 +2481,16 @@ static int wa_process_iso_packet_status(struct wahc *wa, struct urb *urb) | |||
2477 | for (seg_index = 0; seg_index < seg->isoc_frame_count; ++seg_index) { | 2481 | for (seg_index = 0; seg_index < seg->isoc_frame_count; ++seg_index) { |
2478 | struct usb_iso_packet_descriptor *iso_frame_desc = | 2482 | struct usb_iso_packet_descriptor *iso_frame_desc = |
2479 | xfer->urb->iso_frame_desc; | 2483 | xfer->urb->iso_frame_desc; |
2480 | const int urb_frame_index = | 2484 | const int xfer_frame_index = |
2481 | seg->isoc_frame_offset + seg_index; | 2485 | seg->isoc_frame_offset + seg_index; |
2482 | 2486 | ||
2483 | iso_frame_desc[urb_frame_index].status = | 2487 | iso_frame_desc[xfer_frame_index].status = |
2484 | wa_xfer_status_to_errno( | 2488 | wa_xfer_status_to_errno( |
2485 | le16_to_cpu(status_array[seg_index].PacketStatus)); | 2489 | le16_to_cpu(status_array[seg_index].PacketStatus)); |
2486 | iso_frame_desc[urb_frame_index].actual_length = | 2490 | iso_frame_desc[xfer_frame_index].actual_length = |
2487 | le16_to_cpu(status_array[seg_index].PacketLength); | 2491 | le16_to_cpu(status_array[seg_index].PacketLength); |
2488 | /* track the number of frames successfully transferred. */ | 2492 | /* track the number of frames successfully transferred. */ |
2489 | if (iso_frame_desc[urb_frame_index].actual_length > 0) { | 2493 | if (iso_frame_desc[xfer_frame_index].actual_length > 0) { |
2490 | /* save the starting frame index for buf_in_urb. */ | 2494 | /* save the starting frame index for buf_in_urb. */ |
2491 | if (!data_frame_count) | 2495 | if (!data_frame_count) |
2492 | first_frame_index = seg_index; | 2496 | first_frame_index = seg_index; |
@@ -2495,21 +2499,53 @@ static int wa_process_iso_packet_status(struct wahc *wa, struct urb *urb) | |||
2495 | } | 2499 | } |
2496 | 2500 | ||
2497 | if (xfer->is_inbound && data_frame_count) { | 2501 | if (xfer->is_inbound && data_frame_count) { |
2498 | int result, urb_frame_count; | 2502 | int result, total_frames_read = 0, urb_index = 0; |
2503 | struct urb *buf_in_urb; | ||
2499 | 2504 | ||
2505 | /* IN data phase: read to buffer */ | ||
2506 | seg->status = WA_SEG_DTI_PENDING; | ||
2507 | |||
2508 | /* start with the first frame with data. */ | ||
2500 | seg->isoc_frame_index = first_frame_index; | 2509 | seg->isoc_frame_index = first_frame_index; |
2501 | /* submit a read URB for the first frame with data. */ | 2510 | /* submit up to WA_MAX_BUF_IN_URBS read URBs. */ |
2502 | urb_frame_count = __wa_populate_buf_in_urb_isoc(wa, xfer, seg); | 2511 | do { |
2503 | /* advance index to start of next read URB. */ | 2512 | int urb_frame_index, urb_frame_count; |
2504 | seg->isoc_frame_index += urb_frame_count; | 2513 | struct usb_iso_packet_descriptor *iso_frame_desc; |
2514 | |||
2515 | buf_in_urb = &(wa->buf_in_urbs[urb_index]); | ||
2516 | urb_frame_count = __wa_populate_buf_in_urb_isoc(wa, | ||
2517 | buf_in_urb, xfer, seg); | ||
2518 | /* advance frame index to start of next read URB. */ | ||
2519 | seg->isoc_frame_index += urb_frame_count; | ||
2520 | total_frames_read += urb_frame_count; | ||
2521 | |||
2522 | ++(wa->active_buf_in_urbs); | ||
2523 | result = usb_submit_urb(buf_in_urb, GFP_ATOMIC); | ||
2524 | |||
2525 | /* skip 0-byte frames. */ | ||
2526 | urb_frame_index = | ||
2527 | seg->isoc_frame_offset + seg->isoc_frame_index; | ||
2528 | iso_frame_desc = | ||
2529 | &(xfer->urb->iso_frame_desc[urb_frame_index]); | ||
2530 | while ((seg->isoc_frame_index < | ||
2531 | seg->isoc_frame_count) && | ||
2532 | (iso_frame_desc->actual_length == 0)) { | ||
2533 | ++(seg->isoc_frame_index); | ||
2534 | ++iso_frame_desc; | ||
2535 | } | ||
2536 | ++urb_index; | ||
2537 | |||
2538 | } while ((result == 0) && (urb_index < WA_MAX_BUF_IN_URBS) | ||
2539 | && (seg->isoc_frame_index < | ||
2540 | seg->isoc_frame_count)); | ||
2505 | 2541 | ||
2506 | result = usb_submit_urb(wa->buf_in_urb, GFP_ATOMIC); | ||
2507 | if (result < 0) { | 2542 | if (result < 0) { |
2543 | --(wa->active_buf_in_urbs); | ||
2508 | dev_err(dev, "DTI Error: Could not submit buf in URB (%d)", | 2544 | dev_err(dev, "DTI Error: Could not submit buf in URB (%d)", |
2509 | result); | 2545 | result); |
2510 | wa_reset_all(wa); | 2546 | wa_reset_all(wa); |
2511 | } else if (data_frame_count > urb_frame_count) | 2547 | } else if (data_frame_count > total_frames_read) |
2512 | /* If we need to read multiple frames, set DTI busy. */ | 2548 | /* If we need to read more frames, set DTI busy. */ |
2513 | dti_busy = 1; | 2549 | dti_busy = 1; |
2514 | } else { | 2550 | } else { |
2515 | /* OUT transfer or no more IN data, complete it -- */ | 2551 | /* OUT transfer or no more IN data, complete it -- */ |
@@ -2517,7 +2553,10 @@ static int wa_process_iso_packet_status(struct wahc *wa, struct urb *urb) | |||
2517 | done = __wa_xfer_mark_seg_as_done(xfer, seg, WA_SEG_DONE); | 2553 | done = __wa_xfer_mark_seg_as_done(xfer, seg, WA_SEG_DONE); |
2518 | } | 2554 | } |
2519 | spin_unlock_irqrestore(&xfer->lock, flags); | 2555 | spin_unlock_irqrestore(&xfer->lock, flags); |
2520 | wa->dti_state = WA_DTI_TRANSFER_RESULT_PENDING; | 2556 | if (dti_busy) |
2557 | wa->dti_state = WA_DTI_BUF_IN_DATA_PENDING; | ||
2558 | else | ||
2559 | wa->dti_state = WA_DTI_TRANSFER_RESULT_PENDING; | ||
2521 | if (done) | 2560 | if (done) |
2522 | wa_xfer_completion(xfer); | 2561 | wa_xfer_completion(xfer); |
2523 | if (rpipe_ready) | 2562 | if (rpipe_ready) |
@@ -2551,7 +2590,7 @@ static void wa_buf_in_cb(struct urb *urb) | |||
2551 | struct wa_rpipe *rpipe; | 2590 | struct wa_rpipe *rpipe; |
2552 | unsigned rpipe_ready = 0, isoc_data_frame_count = 0; | 2591 | unsigned rpipe_ready = 0, isoc_data_frame_count = 0; |
2553 | unsigned long flags; | 2592 | unsigned long flags; |
2554 | int resubmit_dti = 0; | 2593 | int resubmit_dti = 0, active_buf_in_urbs; |
2555 | u8 done = 0; | 2594 | u8 done = 0; |
2556 | 2595 | ||
2557 | /* free the sg if it was used. */ | 2596 | /* free the sg if it was used. */ |
@@ -2561,6 +2600,8 @@ static void wa_buf_in_cb(struct urb *urb) | |||
2561 | spin_lock_irqsave(&xfer->lock, flags); | 2600 | spin_lock_irqsave(&xfer->lock, flags); |
2562 | wa = xfer->wa; | 2601 | wa = xfer->wa; |
2563 | dev = &wa->usb_iface->dev; | 2602 | dev = &wa->usb_iface->dev; |
2603 | --(wa->active_buf_in_urbs); | ||
2604 | active_buf_in_urbs = wa->active_buf_in_urbs; | ||
2564 | 2605 | ||
2565 | if (usb_pipeisoc(xfer->urb->pipe)) { | 2606 | if (usb_pipeisoc(xfer->urb->pipe)) { |
2566 | struct usb_iso_packet_descriptor *iso_frame_desc = | 2607 | struct usb_iso_packet_descriptor *iso_frame_desc = |
@@ -2596,12 +2637,14 @@ static void wa_buf_in_cb(struct urb *urb) | |||
2596 | int result, urb_frame_count; | 2637 | int result, urb_frame_count; |
2597 | 2638 | ||
2598 | /* submit a read URB for the next frame with data. */ | 2639 | /* submit a read URB for the next frame with data. */ |
2599 | urb_frame_count = __wa_populate_buf_in_urb_isoc(wa, | 2640 | urb_frame_count = __wa_populate_buf_in_urb_isoc(wa, urb, |
2600 | xfer, seg); | 2641 | xfer, seg); |
2601 | /* advance index to start of next read URB. */ | 2642 | /* advance index to start of next read URB. */ |
2602 | seg->isoc_frame_index += urb_frame_count; | 2643 | seg->isoc_frame_index += urb_frame_count; |
2603 | result = usb_submit_urb(wa->buf_in_urb, GFP_ATOMIC); | 2644 | ++(wa->active_buf_in_urbs); |
2645 | result = usb_submit_urb(urb, GFP_ATOMIC); | ||
2604 | if (result < 0) { | 2646 | if (result < 0) { |
2647 | --(wa->active_buf_in_urbs); | ||
2605 | dev_err(dev, "DTI Error: Could not submit buf in URB (%d)", | 2648 | dev_err(dev, "DTI Error: Could not submit buf in URB (%d)", |
2606 | result); | 2649 | result); |
2607 | wa_reset_all(wa); | 2650 | wa_reset_all(wa); |
@@ -2615,7 +2658,7 @@ static void wa_buf_in_cb(struct urb *urb) | |||
2615 | */ | 2658 | */ |
2616 | resubmit_dti = (isoc_data_frame_count == | 2659 | resubmit_dti = (isoc_data_frame_count == |
2617 | urb_frame_count); | 2660 | urb_frame_count); |
2618 | } else { | 2661 | } else if (active_buf_in_urbs == 0) { |
2619 | rpipe = xfer->ep->hcpriv; | 2662 | rpipe = xfer->ep->hcpriv; |
2620 | dev_dbg(dev, | 2663 | dev_dbg(dev, |
2621 | "xfer %p 0x%08X#%u: data in done (%zu bytes)\n", | 2664 | "xfer %p 0x%08X#%u: data in done (%zu bytes)\n", |
@@ -2635,7 +2678,12 @@ static void wa_buf_in_cb(struct urb *urb) | |||
2635 | case -ENOENT: /* as it was done by the who unlinked us */ | 2678 | case -ENOENT: /* as it was done by the who unlinked us */ |
2636 | break; | 2679 | break; |
2637 | default: /* Other errors ... */ | 2680 | default: /* Other errors ... */ |
2638 | resubmit_dti = 1; | 2681 | /* |
2682 | * Error on data buf read. Only resubmit DTI if it hasn't | ||
2683 | * already been done by previously hitting this error or by a | ||
2684 | * successful completion of the previous buf_in_urb. | ||
2685 | */ | ||
2686 | resubmit_dti = wa->dti_state != WA_DTI_TRANSFER_RESULT_PENDING; | ||
2639 | spin_lock_irqsave(&xfer->lock, flags); | 2687 | spin_lock_irqsave(&xfer->lock, flags); |
2640 | rpipe = xfer->ep->hcpriv; | 2688 | rpipe = xfer->ep->hcpriv; |
2641 | if (printk_ratelimit()) | 2689 | if (printk_ratelimit()) |
@@ -2650,8 +2698,11 @@ static void wa_buf_in_cb(struct urb *urb) | |||
2650 | } | 2698 | } |
2651 | seg->result = urb->status; | 2699 | seg->result = urb->status; |
2652 | rpipe_ready = rpipe_avail_inc(rpipe); | 2700 | rpipe_ready = rpipe_avail_inc(rpipe); |
2653 | __wa_xfer_abort(xfer); | 2701 | if (active_buf_in_urbs == 0) |
2654 | done = __wa_xfer_mark_seg_as_done(xfer, seg, WA_SEG_ERROR); | 2702 | done = __wa_xfer_mark_seg_as_done(xfer, seg, |
2703 | WA_SEG_ERROR); | ||
2704 | else | ||
2705 | __wa_xfer_abort(xfer); | ||
2655 | spin_unlock_irqrestore(&xfer->lock, flags); | 2706 | spin_unlock_irqrestore(&xfer->lock, flags); |
2656 | if (done) | 2707 | if (done) |
2657 | wa_xfer_completion(xfer); | 2708 | wa_xfer_completion(xfer); |
@@ -2660,7 +2711,11 @@ static void wa_buf_in_cb(struct urb *urb) | |||
2660 | } | 2711 | } |
2661 | 2712 | ||
2662 | if (resubmit_dti) { | 2713 | if (resubmit_dti) { |
2663 | int result = usb_submit_urb(wa->dti_urb, GFP_ATOMIC); | 2714 | int result; |
2715 | |||
2716 | wa->dti_state = WA_DTI_TRANSFER_RESULT_PENDING; | ||
2717 | |||
2718 | result = usb_submit_urb(wa->dti_urb, GFP_ATOMIC); | ||
2664 | if (result < 0) { | 2719 | if (result < 0) { |
2665 | dev_err(dev, "DTI Error: Could not submit DTI URB (%d)\n", | 2720 | dev_err(dev, "DTI Error: Could not submit DTI URB (%d)\n", |
2666 | result); | 2721 | result); |
@@ -2794,7 +2849,7 @@ int wa_dti_start(struct wahc *wa) | |||
2794 | { | 2849 | { |
2795 | const struct usb_endpoint_descriptor *dti_epd = wa->dti_epd; | 2850 | const struct usb_endpoint_descriptor *dti_epd = wa->dti_epd; |
2796 | struct device *dev = &wa->usb_iface->dev; | 2851 | struct device *dev = &wa->usb_iface->dev; |
2797 | int result = -ENOMEM; | 2852 | int result = -ENOMEM, index; |
2798 | 2853 | ||
2799 | if (wa->dti_urb != NULL) /* DTI URB already started */ | 2854 | if (wa->dti_urb != NULL) /* DTI URB already started */ |
2800 | goto out; | 2855 | goto out; |
@@ -2810,15 +2865,14 @@ int wa_dti_start(struct wahc *wa) | |||
2810 | wa->dti_buf, wa->dti_buf_size, | 2865 | wa->dti_buf, wa->dti_buf_size, |
2811 | wa_dti_cb, wa); | 2866 | wa_dti_cb, wa); |
2812 | 2867 | ||
2813 | wa->buf_in_urb = usb_alloc_urb(0, GFP_KERNEL); | 2868 | /* init the buf in URBs */ |
2814 | if (wa->buf_in_urb == NULL) { | 2869 | for (index = 0; index < WA_MAX_BUF_IN_URBS; ++index) { |
2815 | dev_err(dev, "Can't allocate BUF-IN URB\n"); | 2870 | usb_fill_bulk_urb( |
2816 | goto error_buf_in_urb_alloc; | 2871 | &(wa->buf_in_urbs[index]), wa->usb_dev, |
2872 | usb_rcvbulkpipe(wa->usb_dev, | ||
2873 | 0x80 | dti_epd->bEndpointAddress), | ||
2874 | NULL, 0, wa_buf_in_cb, wa); | ||
2817 | } | 2875 | } |
2818 | usb_fill_bulk_urb( | ||
2819 | wa->buf_in_urb, wa->usb_dev, | ||
2820 | usb_rcvbulkpipe(wa->usb_dev, 0x80 | dti_epd->bEndpointAddress), | ||
2821 | NULL, 0, wa_buf_in_cb, wa); | ||
2822 | result = usb_submit_urb(wa->dti_urb, GFP_KERNEL); | 2876 | result = usb_submit_urb(wa->dti_urb, GFP_KERNEL); |
2823 | if (result < 0) { | 2877 | if (result < 0) { |
2824 | dev_err(dev, "DTI Error: Could not submit DTI URB (%d) resetting\n", | 2878 | dev_err(dev, "DTI Error: Could not submit DTI URB (%d) resetting\n", |
@@ -2829,9 +2883,6 @@ out: | |||
2829 | return 0; | 2883 | return 0; |
2830 | 2884 | ||
2831 | error_dti_urb_submit: | 2885 | error_dti_urb_submit: |
2832 | usb_put_urb(wa->buf_in_urb); | ||
2833 | wa->buf_in_urb = NULL; | ||
2834 | error_buf_in_urb_alloc: | ||
2835 | usb_put_urb(wa->dti_urb); | 2886 | usb_put_urb(wa->dti_urb); |
2836 | wa->dti_urb = NULL; | 2887 | wa->dti_urb = NULL; |
2837 | error_dti_urb_alloc: | 2888 | error_dti_urb_alloc: |