aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/usb/host/xhci-ring.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/usb/host/xhci-ring.c')
-rw-r--r--drivers/usb/host/xhci-ring.c478
1 files changed, 145 insertions, 333 deletions
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index 99b4ff42f7a0..52deae4b7eac 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -373,7 +373,11 @@ static void ring_doorbell_for_active_rings(struct xhci_hcd *xhci,
373 } 373 }
374} 374}
375 375
376static struct xhci_ring *xhci_triad_to_transfer_ring(struct xhci_hcd *xhci, 376/* Get the right ring for the given slot_id, ep_index and stream_id.
377 * If the endpoint supports streams, boundary check the URB's stream ID.
378 * If the endpoint doesn't support streams, return the singular endpoint ring.
379 */
380struct xhci_ring *xhci_triad_to_transfer_ring(struct xhci_hcd *xhci,
377 unsigned int slot_id, unsigned int ep_index, 381 unsigned int slot_id, unsigned int ep_index,
378 unsigned int stream_id) 382 unsigned int stream_id)
379{ 383{
@@ -405,17 +409,6 @@ static struct xhci_ring *xhci_triad_to_transfer_ring(struct xhci_hcd *xhci,
405 return NULL; 409 return NULL;
406} 410}
407 411
408/* Get the right ring for the given URB.
409 * If the endpoint supports streams, boundary check the URB's stream ID.
410 * If the endpoint doesn't support streams, return the singular endpoint ring.
411 */
412static struct xhci_ring *xhci_urb_to_transfer_ring(struct xhci_hcd *xhci,
413 struct urb *urb)
414{
415 return xhci_triad_to_transfer_ring(xhci, urb->dev->slot_id,
416 xhci_get_endpoint_index(&urb->ep->desc), urb->stream_id);
417}
418
419/* 412/*
420 * Move the xHC's endpoint ring dequeue pointer past cur_td. 413 * Move the xHC's endpoint ring dequeue pointer past cur_td.
421 * Record the new state of the xHC's endpoint ring dequeue segment, 414 * Record the new state of the xHC's endpoint ring dequeue segment,
@@ -1768,7 +1761,7 @@ static int xhci_requires_manual_halt_cleanup(struct xhci_hcd *xhci,
1768 if (trb_comp_code == COMP_TX_ERR || 1761 if (trb_comp_code == COMP_TX_ERR ||
1769 trb_comp_code == COMP_BABBLE || 1762 trb_comp_code == COMP_BABBLE ||
1770 trb_comp_code == COMP_SPLIT_ERR) 1763 trb_comp_code == COMP_SPLIT_ERR)
1771 /* The 0.96 spec says a babbling control endpoint 1764 /* The 0.95 spec says a babbling control endpoint
1772 * is not halted. The 0.96 spec says it is. Some HW 1765 * is not halted. The 0.96 spec says it is. Some HW
1773 * claims to be 0.95 compliant, but it halts the control 1766 * claims to be 0.95 compliant, but it halts the control
1774 * endpoint anyway. Check if a babble halted the 1767 * endpoint anyway. Check if a babble halted the
@@ -2938,46 +2931,55 @@ static int prepare_transfer(struct xhci_hcd *xhci,
2938 return 0; 2931 return 0;
2939} 2932}
2940 2933
2941static unsigned int count_sg_trbs_needed(struct xhci_hcd *xhci, struct urb *urb) 2934static unsigned int count_trbs(u64 addr, u64 len)
2935{
2936 unsigned int num_trbs;
2937
2938 num_trbs = DIV_ROUND_UP(len + (addr & (TRB_MAX_BUFF_SIZE - 1)),
2939 TRB_MAX_BUFF_SIZE);
2940 if (num_trbs == 0)
2941 num_trbs++;
2942
2943 return num_trbs;
2944}
2945
2946static inline unsigned int count_trbs_needed(struct urb *urb)
2947{
2948 return count_trbs(urb->transfer_dma, urb->transfer_buffer_length);
2949}
2950
2951static unsigned int count_sg_trbs_needed(struct urb *urb)
2942{ 2952{
2943 int num_sgs, num_trbs, running_total, temp, i;
2944 struct scatterlist *sg; 2953 struct scatterlist *sg;
2954 unsigned int i, len, full_len, num_trbs = 0;
2945 2955
2946 sg = NULL; 2956 full_len = urb->transfer_buffer_length;
2947 num_sgs = urb->num_mapped_sgs;
2948 temp = urb->transfer_buffer_length;
2949 2957
2950 num_trbs = 0; 2958 for_each_sg(urb->sg, sg, urb->num_mapped_sgs, i) {
2951 for_each_sg(urb->sg, sg, num_sgs, i) { 2959 len = sg_dma_len(sg);
2952 unsigned int len = sg_dma_len(sg); 2960 num_trbs += count_trbs(sg_dma_address(sg), len);
2953 2961 len = min_t(unsigned int, len, full_len);
2954 /* Scatter gather list entries may cross 64KB boundaries */ 2962 full_len -= len;
2955 running_total = TRB_MAX_BUFF_SIZE - 2963 if (full_len == 0)
2956 (sg_dma_address(sg) & (TRB_MAX_BUFF_SIZE - 1));
2957 running_total &= TRB_MAX_BUFF_SIZE - 1;
2958 if (running_total != 0)
2959 num_trbs++;
2960
2961 /* How many more 64KB chunks to transfer, how many more TRBs? */
2962 while (running_total < sg_dma_len(sg) && running_total < temp) {
2963 num_trbs++;
2964 running_total += TRB_MAX_BUFF_SIZE;
2965 }
2966 len = min_t(int, len, temp);
2967 temp -= len;
2968 if (temp == 0)
2969 break; 2964 break;
2970 } 2965 }
2966
2971 return num_trbs; 2967 return num_trbs;
2972} 2968}
2973 2969
2974static void check_trb_math(struct urb *urb, int num_trbs, int running_total) 2970static unsigned int count_isoc_trbs_needed(struct urb *urb, int i)
2975{ 2971{
2976 if (num_trbs != 0) 2972 u64 addr, len;
2977 dev_err(&urb->dev->dev, "%s - ep %#x - Miscalculated number of " 2973
2978 "TRBs, %d left\n", __func__, 2974 addr = (u64) (urb->transfer_dma + urb->iso_frame_desc[i].offset);
2979 urb->ep->desc.bEndpointAddress, num_trbs); 2975 len = urb->iso_frame_desc[i].length;
2980 if (running_total != urb->transfer_buffer_length) 2976
2977 return count_trbs(addr, len);
2978}
2979
2980static void check_trb_math(struct urb *urb, int running_total)
2981{
2982 if (unlikely(running_total != urb->transfer_buffer_length))
2981 dev_err(&urb->dev->dev, "%s - ep %#x - Miscalculated tx length, " 2983 dev_err(&urb->dev->dev, "%s - ep %#x - Miscalculated tx length, "
2982 "queued %#x (%d), asked for %#x (%d)\n", 2984 "queued %#x (%d), asked for %#x (%d)\n",
2983 __func__, 2985 __func__,
@@ -3003,26 +3005,20 @@ static void giveback_first_trb(struct xhci_hcd *xhci, int slot_id,
3003 xhci_ring_ep_doorbell(xhci, slot_id, ep_index, stream_id); 3005 xhci_ring_ep_doorbell(xhci, slot_id, ep_index, stream_id);
3004} 3006}
3005 3007
3006/* 3008static void check_interval(struct xhci_hcd *xhci, struct urb *urb,
3007 * xHCI uses normal TRBs for both bulk and interrupt. When the interrupt 3009 struct xhci_ep_ctx *ep_ctx)
3008 * endpoint is to be serviced, the xHC will consume (at most) one TD. A TD
3009 * (comprised of sg list entries) can take several service intervals to
3010 * transmit.
3011 */
3012int xhci_queue_intr_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
3013 struct urb *urb, int slot_id, unsigned int ep_index)
3014{ 3010{
3015 struct xhci_ep_ctx *ep_ctx = xhci_get_ep_ctx(xhci,
3016 xhci->devs[slot_id]->out_ctx, ep_index);
3017 int xhci_interval; 3011 int xhci_interval;
3018 int ep_interval; 3012 int ep_interval;
3019 3013
3020 xhci_interval = EP_INTERVAL_TO_UFRAMES(le32_to_cpu(ep_ctx->ep_info)); 3014 xhci_interval = EP_INTERVAL_TO_UFRAMES(le32_to_cpu(ep_ctx->ep_info));
3021 ep_interval = urb->interval; 3015 ep_interval = urb->interval;
3016
3022 /* Convert to microframes */ 3017 /* Convert to microframes */
3023 if (urb->dev->speed == USB_SPEED_LOW || 3018 if (urb->dev->speed == USB_SPEED_LOW ||
3024 urb->dev->speed == USB_SPEED_FULL) 3019 urb->dev->speed == USB_SPEED_FULL)
3025 ep_interval *= 8; 3020 ep_interval *= 8;
3021
3026 /* FIXME change this to a warning and a suggestion to use the new API 3022 /* FIXME change this to a warning and a suggestion to use the new API
3027 * to set the polling interval (once the API is added). 3023 * to set the polling interval (once the API is added).
3028 */ 3024 */
@@ -3037,6 +3033,22 @@ int xhci_queue_intr_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
3037 urb->dev->speed == USB_SPEED_FULL) 3033 urb->dev->speed == USB_SPEED_FULL)
3038 urb->interval /= 8; 3034 urb->interval /= 8;
3039 } 3035 }
3036}
3037
3038/*
3039 * xHCI uses normal TRBs for both bulk and interrupt. When the interrupt
3040 * endpoint is to be serviced, the xHC will consume (at most) one TD. A TD
3041 * (comprised of sg list entries) can take several service intervals to
3042 * transmit.
3043 */
3044int xhci_queue_intr_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
3045 struct urb *urb, int slot_id, unsigned int ep_index)
3046{
3047 struct xhci_ep_ctx *ep_ctx;
3048
3049 ep_ctx = xhci_get_ep_ctx(xhci, xhci->devs[slot_id]->out_ctx, ep_index);
3050 check_interval(xhci, urb, ep_ctx);
3051
3040 return xhci_queue_bulk_tx(xhci, mem_flags, urb, slot_id, ep_index); 3052 return xhci_queue_bulk_tx(xhci, mem_flags, urb, slot_id, ep_index);
3041} 3053}
3042 3054
@@ -3086,44 +3098,47 @@ static u32 xhci_td_remainder(struct xhci_hcd *xhci, int transferred,
3086 return (total_packet_count - ((transferred + trb_buff_len) / maxp)); 3098 return (total_packet_count - ((transferred + trb_buff_len) / maxp));
3087} 3099}
3088 3100
3089 3101/* This is very similar to what ehci-q.c qtd_fill() does */
3090static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags, 3102int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
3091 struct urb *urb, int slot_id, unsigned int ep_index) 3103 struct urb *urb, int slot_id, unsigned int ep_index)
3092{ 3104{
3093 struct xhci_ring *ep_ring; 3105 struct xhci_ring *ep_ring;
3094 unsigned int num_trbs;
3095 struct urb_priv *urb_priv; 3106 struct urb_priv *urb_priv;
3096 struct xhci_td *td; 3107 struct xhci_td *td;
3097 struct scatterlist *sg; 3108 struct xhci_generic_trb *start_trb;
3098 int num_sgs; 3109 struct scatterlist *sg = NULL;
3099 int trb_buff_len, this_sg_len, running_total, ret; 3110 bool more_trbs_coming;
3100 unsigned int total_packet_count;
3101 bool zero_length_needed; 3111 bool zero_length_needed;
3102 bool first_trb; 3112 unsigned int num_trbs, last_trb_num, i;
3103 int last_trb_num; 3113 unsigned int start_cycle, num_sgs = 0;
3114 unsigned int running_total, block_len, trb_buff_len;
3115 unsigned int full_len;
3116 int ret;
3117 u32 field, length_field, remainder;
3104 u64 addr; 3118 u64 addr;
3105 bool more_trbs_coming;
3106
3107 struct xhci_generic_trb *start_trb;
3108 int start_cycle;
3109 3119
3110 ep_ring = xhci_urb_to_transfer_ring(xhci, urb); 3120 ep_ring = xhci_urb_to_transfer_ring(xhci, urb);
3111 if (!ep_ring) 3121 if (!ep_ring)
3112 return -EINVAL; 3122 return -EINVAL;
3113 3123
3114 num_trbs = count_sg_trbs_needed(xhci, urb); 3124 /* If we have scatter/gather list, we use it. */
3115 num_sgs = urb->num_mapped_sgs; 3125 if (urb->num_sgs) {
3116 total_packet_count = DIV_ROUND_UP(urb->transfer_buffer_length, 3126 num_sgs = urb->num_mapped_sgs;
3117 usb_endpoint_maxp(&urb->ep->desc)); 3127 sg = urb->sg;
3128 num_trbs = count_sg_trbs_needed(urb);
3129 } else
3130 num_trbs = count_trbs_needed(urb);
3118 3131
3119 ret = prepare_transfer(xhci, xhci->devs[slot_id], 3132 ret = prepare_transfer(xhci, xhci->devs[slot_id],
3120 ep_index, urb->stream_id, 3133 ep_index, urb->stream_id,
3121 num_trbs, urb, 0, mem_flags); 3134 num_trbs, urb, 0, mem_flags);
3122 if (ret < 0) 3135 if (unlikely(ret < 0))
3123 return ret; 3136 return ret;
3124 3137
3125 urb_priv = urb->hcpriv; 3138 urb_priv = urb->hcpriv;
3126 3139
3140 last_trb_num = num_trbs - 1;
3141
3127 /* Deal with URB_ZERO_PACKET - need one more td/trb */ 3142 /* Deal with URB_ZERO_PACKET - need one more td/trb */
3128 zero_length_needed = urb->transfer_flags & URB_ZERO_PACKET && 3143 zero_length_needed = urb->transfer_flags & URB_ZERO_PACKET &&
3129 urb_priv->length == 2; 3144 urb_priv->length == 2;
@@ -3133,7 +3148,7 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
3133 ret = prepare_transfer(xhci, xhci->devs[slot_id], 3148 ret = prepare_transfer(xhci, xhci->devs[slot_id],
3134 ep_index, urb->stream_id, 3149 ep_index, urb->stream_id,
3135 1, urb, 1, mem_flags); 3150 1, urb, 1, mem_flags);
3136 if (ret < 0) 3151 if (unlikely(ret < 0))
3137 return ret; 3152 return ret;
3138 } 3153 }
3139 3154
@@ -3147,228 +3162,58 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
3147 start_trb = &ep_ring->enqueue->generic; 3162 start_trb = &ep_ring->enqueue->generic;
3148 start_cycle = ep_ring->cycle_state; 3163 start_cycle = ep_ring->cycle_state;
3149 3164
3165 full_len = urb->transfer_buffer_length;
3150 running_total = 0; 3166 running_total = 0;
3151 /* 3167 block_len = 0;
3152 * How much data is in the first TRB?
3153 *
3154 * There are three forces at work for TRB buffer pointers and lengths:
3155 * 1. We don't want to walk off the end of this sg-list entry buffer.
3156 * 2. The transfer length that the driver requested may be smaller than
3157 * the amount of memory allocated for this scatter-gather list.
3158 * 3. TRBs buffers can't cross 64KB boundaries.
3159 */
3160 sg = urb->sg;
3161 addr = (u64) sg_dma_address(sg);
3162 this_sg_len = sg_dma_len(sg);
3163 trb_buff_len = TRB_MAX_BUFF_SIZE - (addr & (TRB_MAX_BUFF_SIZE - 1));
3164 trb_buff_len = min_t(int, trb_buff_len, this_sg_len);
3165 if (trb_buff_len > urb->transfer_buffer_length)
3166 trb_buff_len = urb->transfer_buffer_length;
3167
3168 first_trb = true;
3169 last_trb_num = zero_length_needed ? 2 : 1;
3170 /* Queue the first TRB, even if it's zero-length */
3171 do {
3172 u32 field = 0;
3173 u32 length_field = 0;
3174 u32 remainder = 0;
3175 3168
3176 /* Don't change the cycle bit of the first TRB until later */ 3169 /* Queue the TRBs, even if they are zero-length */
3177 if (first_trb) { 3170 for (i = 0; i < num_trbs; i++) {
3178 first_trb = false; 3171 field = TRB_TYPE(TRB_NORMAL);
3179 if (start_cycle == 0)
3180 field |= 0x1;
3181 } else
3182 field |= ep_ring->cycle_state;
3183 3172
3184 /* Chain all the TRBs together; clear the chain bit in the last 3173 if (block_len == 0) {
3185 * TRB to indicate it's the last TRB in the chain. 3174 /* A new contiguous block. */
3186 */ 3175 if (sg) {
3187 if (num_trbs > last_trb_num) { 3176 addr = (u64) sg_dma_address(sg);
3188 field |= TRB_CHAIN; 3177 block_len = sg_dma_len(sg);
3189 } else if (num_trbs == last_trb_num) { 3178 } else {
3190 td->last_trb = ep_ring->enqueue; 3179 addr = (u64) urb->transfer_dma;
3191 field |= TRB_IOC; 3180 block_len = full_len;
3192 } else if (zero_length_needed && num_trbs == 1) { 3181 }
3193 trb_buff_len = 0; 3182 /* TRB buffer should not cross 64KB boundaries */
3194 urb_priv->td[1]->last_trb = ep_ring->enqueue; 3183 trb_buff_len = TRB_BUFF_LEN_UP_TO_BOUNDARY(addr);
3195 field |= TRB_IOC; 3184 trb_buff_len = min_t(unsigned int,
3196 } 3185 trb_buff_len,
3197 3186 block_len);
3198 /* Only set interrupt on short packet for IN endpoints */
3199 if (usb_urb_dir_in(urb))
3200 field |= TRB_ISP;
3201
3202 if (TRB_MAX_BUFF_SIZE -
3203 (addr & (TRB_MAX_BUFF_SIZE - 1)) < trb_buff_len) {
3204 xhci_warn(xhci, "WARN: sg dma xfer crosses 64KB boundaries!\n");
3205 xhci_dbg(xhci, "Next boundary at %#x, end dma = %#x\n",
3206 (unsigned int) (addr + TRB_MAX_BUFF_SIZE) & ~(TRB_MAX_BUFF_SIZE - 1),
3207 (unsigned int) addr + trb_buff_len);
3208 }
3209
3210 /* Set the TRB length, TD size, and interrupter fields. */
3211 remainder = xhci_td_remainder(xhci, running_total, trb_buff_len,
3212 urb->transfer_buffer_length,
3213 urb, num_trbs - 1);
3214
3215 length_field = TRB_LEN(trb_buff_len) |
3216 TRB_TD_SIZE(remainder) |
3217 TRB_INTR_TARGET(0);
3218
3219 if (num_trbs > 1)
3220 more_trbs_coming = true;
3221 else
3222 more_trbs_coming = false;
3223 queue_trb(xhci, ep_ring, more_trbs_coming,
3224 lower_32_bits(addr),
3225 upper_32_bits(addr),
3226 length_field,
3227 field | TRB_TYPE(TRB_NORMAL));
3228 --num_trbs;
3229 running_total += trb_buff_len;
3230
3231 /* Calculate length for next transfer --
3232 * Are we done queueing all the TRBs for this sg entry?
3233 */
3234 this_sg_len -= trb_buff_len;
3235 if (this_sg_len == 0) {
3236 --num_sgs;
3237 if (num_sgs == 0)
3238 break;
3239 sg = sg_next(sg);
3240 addr = (u64) sg_dma_address(sg);
3241 this_sg_len = sg_dma_len(sg);
3242 } else { 3187 } else {
3243 addr += trb_buff_len; 3188 /* Further through the contiguous block. */
3189 trb_buff_len = block_len;
3190 if (trb_buff_len > TRB_MAX_BUFF_SIZE)
3191 trb_buff_len = TRB_MAX_BUFF_SIZE;
3244 } 3192 }
3245 3193
3246 trb_buff_len = TRB_MAX_BUFF_SIZE - 3194 if (running_total + trb_buff_len > full_len)
3247 (addr & (TRB_MAX_BUFF_SIZE - 1)); 3195 trb_buff_len = full_len - running_total;
3248 trb_buff_len = min_t(int, trb_buff_len, this_sg_len);
3249 if (running_total + trb_buff_len > urb->transfer_buffer_length)
3250 trb_buff_len =
3251 urb->transfer_buffer_length - running_total;
3252 } while (num_trbs > 0);
3253
3254 check_trb_math(urb, num_trbs, running_total);
3255 giveback_first_trb(xhci, slot_id, ep_index, urb->stream_id,
3256 start_cycle, start_trb);
3257 return 0;
3258}
3259
3260/* This is very similar to what ehci-q.c qtd_fill() does */
3261int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
3262 struct urb *urb, int slot_id, unsigned int ep_index)
3263{
3264 struct xhci_ring *ep_ring;
3265 struct urb_priv *urb_priv;
3266 struct xhci_td *td;
3267 int num_trbs;
3268 struct xhci_generic_trb *start_trb;
3269 bool first_trb;
3270 int last_trb_num;
3271 bool more_trbs_coming;
3272 bool zero_length_needed;
3273 int start_cycle;
3274 u32 field, length_field;
3275
3276 int running_total, trb_buff_len, ret;
3277 unsigned int total_packet_count;
3278 u64 addr;
3279
3280 if (urb->num_sgs)
3281 return queue_bulk_sg_tx(xhci, mem_flags, urb, slot_id, ep_index);
3282
3283 ep_ring = xhci_urb_to_transfer_ring(xhci, urb);
3284 if (!ep_ring)
3285 return -EINVAL;
3286
3287 num_trbs = 0;
3288 /* How much data is (potentially) left before the 64KB boundary? */
3289 running_total = TRB_MAX_BUFF_SIZE -
3290 (urb->transfer_dma & (TRB_MAX_BUFF_SIZE - 1));
3291 running_total &= TRB_MAX_BUFF_SIZE - 1;
3292
3293 /* If there's some data on this 64KB chunk, or we have to send a
3294 * zero-length transfer, we need at least one TRB
3295 */
3296 if (running_total != 0 || urb->transfer_buffer_length == 0)
3297 num_trbs++;
3298 /* How many more 64KB chunks to transfer, how many more TRBs? */
3299 while (running_total < urb->transfer_buffer_length) {
3300 num_trbs++;
3301 running_total += TRB_MAX_BUFF_SIZE;
3302 }
3303
3304 ret = prepare_transfer(xhci, xhci->devs[slot_id],
3305 ep_index, urb->stream_id,
3306 num_trbs, urb, 0, mem_flags);
3307 if (ret < 0)
3308 return ret;
3309
3310 urb_priv = urb->hcpriv;
3311
3312 /* Deal with URB_ZERO_PACKET - need one more td/trb */
3313 zero_length_needed = urb->transfer_flags & URB_ZERO_PACKET &&
3314 urb_priv->length == 2;
3315 if (zero_length_needed) {
3316 num_trbs++;
3317 xhci_dbg(xhci, "Creating zero length td.\n");
3318 ret = prepare_transfer(xhci, xhci->devs[slot_id],
3319 ep_index, urb->stream_id,
3320 1, urb, 1, mem_flags);
3321 if (ret < 0)
3322 return ret;
3323 }
3324
3325 td = urb_priv->td[0];
3326
3327 /*
3328 * Don't give the first TRB to the hardware (by toggling the cycle bit)
3329 * until we've finished creating all the other TRBs. The ring's cycle
3330 * state may change as we enqueue the other TRBs, so save it too.
3331 */
3332 start_trb = &ep_ring->enqueue->generic;
3333 start_cycle = ep_ring->cycle_state;
3334
3335 running_total = 0;
3336 total_packet_count = DIV_ROUND_UP(urb->transfer_buffer_length,
3337 usb_endpoint_maxp(&urb->ep->desc));
3338 /* How much data is in the first TRB? */
3339 addr = (u64) urb->transfer_dma;
3340 trb_buff_len = TRB_MAX_BUFF_SIZE -
3341 (urb->transfer_dma & (TRB_MAX_BUFF_SIZE - 1));
3342 if (trb_buff_len > urb->transfer_buffer_length)
3343 trb_buff_len = urb->transfer_buffer_length;
3344
3345 first_trb = true;
3346 last_trb_num = zero_length_needed ? 2 : 1;
3347 /* Queue the first TRB, even if it's zero-length */
3348 do {
3349 u32 remainder = 0;
3350 field = 0;
3351 3196
3352 /* Don't change the cycle bit of the first TRB until later */ 3197 /* Don't change the cycle bit of the first TRB until later */
3353 if (first_trb) { 3198 if (i == 0) {
3354 first_trb = false;
3355 if (start_cycle == 0) 3199 if (start_cycle == 0)
3356 field |= 0x1; 3200 field |= TRB_CYCLE;
3357 } else 3201 } else
3358 field |= ep_ring->cycle_state; 3202 field |= ep_ring->cycle_state;
3359 3203
3360 /* Chain all the TRBs together; clear the chain bit in the last 3204 /* Chain all the TRBs together; clear the chain bit in the last
3361 * TRB to indicate it's the last TRB in the chain. 3205 * TRB to indicate it's the last TRB in the chain.
3362 */ 3206 */
3363 if (num_trbs > last_trb_num) { 3207 if (i < last_trb_num) {
3364 field |= TRB_CHAIN; 3208 field |= TRB_CHAIN;
3365 } else if (num_trbs == last_trb_num) { 3209 } else {
3366 td->last_trb = ep_ring->enqueue;
3367 field |= TRB_IOC;
3368 } else if (zero_length_needed && num_trbs == 1) {
3369 trb_buff_len = 0;
3370 urb_priv->td[1]->last_trb = ep_ring->enqueue;
3371 field |= TRB_IOC; 3210 field |= TRB_IOC;
3211 if (i == last_trb_num)
3212 td->last_trb = ep_ring->enqueue;
3213 else if (zero_length_needed) {
3214 trb_buff_len = 0;
3215 urb_priv->td[1]->last_trb = ep_ring->enqueue;
3216 }
3372 } 3217 }
3373 3218
3374 /* Only set interrupt on short packet for IN endpoints */ 3219 /* Only set interrupt on short packet for IN endpoints */
@@ -3376,15 +3221,15 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
3376 field |= TRB_ISP; 3221 field |= TRB_ISP;
3377 3222
3378 /* Set the TRB length, TD size, and interrupter fields. */ 3223 /* Set the TRB length, TD size, and interrupter fields. */
3379 remainder = xhci_td_remainder(xhci, running_total, trb_buff_len, 3224 remainder = xhci_td_remainder(xhci, running_total,
3380 urb->transfer_buffer_length, 3225 trb_buff_len, full_len,
3381 urb, num_trbs - 1); 3226 urb, num_trbs - i - 1);
3382 3227
3383 length_field = TRB_LEN(trb_buff_len) | 3228 length_field = TRB_LEN(trb_buff_len) |
3384 TRB_TD_SIZE(remainder) | 3229 TRB_TD_SIZE(remainder) |
3385 TRB_INTR_TARGET(0); 3230 TRB_INTR_TARGET(0);
3386 3231
3387 if (num_trbs > 1) 3232 if (i < num_trbs - 1)
3388 more_trbs_coming = true; 3233 more_trbs_coming = true;
3389 else 3234 else
3390 more_trbs_coming = false; 3235 more_trbs_coming = false;
@@ -3392,18 +3237,24 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
3392 lower_32_bits(addr), 3237 lower_32_bits(addr),
3393 upper_32_bits(addr), 3238 upper_32_bits(addr),
3394 length_field, 3239 length_field,
3395 field | TRB_TYPE(TRB_NORMAL)); 3240 field);
3396 --num_trbs;
3397 running_total += trb_buff_len;
3398 3241
3399 /* Calculate length for next transfer */ 3242 running_total += trb_buff_len;
3400 addr += trb_buff_len; 3243 addr += trb_buff_len;
3401 trb_buff_len = urb->transfer_buffer_length - running_total; 3244 block_len -= trb_buff_len;
3402 if (trb_buff_len > TRB_MAX_BUFF_SIZE) 3245
3403 trb_buff_len = TRB_MAX_BUFF_SIZE; 3246 if (sg) {
3404 } while (num_trbs > 0); 3247 if (block_len == 0) {
3248 /* New sg entry */
3249 --num_sgs;
3250 if (num_sgs == 0)
3251 break;
3252 sg = sg_next(sg);
3253 }
3254 }
3255 }
3405 3256
3406 check_trb_math(urb, num_trbs, running_total); 3257 check_trb_math(urb, running_total);
3407 giveback_first_trb(xhci, slot_id, ep_index, urb->stream_id, 3258 giveback_first_trb(xhci, slot_id, ep_index, urb->stream_id,
3408 start_cycle, start_trb); 3259 start_cycle, start_trb);
3409 return 0; 3260 return 0;
@@ -3532,23 +3383,6 @@ int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
3532 return 0; 3383 return 0;
3533} 3384}
3534 3385
3535static int count_isoc_trbs_needed(struct xhci_hcd *xhci,
3536 struct urb *urb, int i)
3537{
3538 int num_trbs = 0;
3539 u64 addr, td_len;
3540
3541 addr = (u64) (urb->transfer_dma + urb->iso_frame_desc[i].offset);
3542 td_len = urb->iso_frame_desc[i].length;
3543
3544 num_trbs = DIV_ROUND_UP(td_len + (addr & (TRB_MAX_BUFF_SIZE - 1)),
3545 TRB_MAX_BUFF_SIZE);
3546 if (num_trbs == 0)
3547 num_trbs++;
3548
3549 return num_trbs;
3550}
3551
3552/* 3386/*
3553 * The transfer burst count field of the isochronous TRB defines the number of 3387 * The transfer burst count field of the isochronous TRB defines the number of
3554 * bursts that are required to move all packets in this TD. Only SuperSpeed 3388 * bursts that are required to move all packets in this TD. Only SuperSpeed
@@ -3746,7 +3580,7 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
3746 last_burst_pkt_count = xhci_get_last_burst_packet_count(xhci, 3580 last_burst_pkt_count = xhci_get_last_burst_packet_count(xhci,
3747 urb, total_pkt_count); 3581 urb, total_pkt_count);
3748 3582
3749 trbs_per_td = count_isoc_trbs_needed(xhci, urb, i); 3583 trbs_per_td = count_isoc_trbs_needed(urb, i);
3750 3584
3751 ret = prepare_transfer(xhci, xhci->devs[slot_id], ep_index, 3585 ret = prepare_transfer(xhci, xhci->devs[slot_id], ep_index,
3752 urb->stream_id, trbs_per_td, urb, i, mem_flags); 3586 urb->stream_id, trbs_per_td, urb, i, mem_flags);
@@ -3807,8 +3641,7 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
3807 field |= TRB_BEI; 3641 field |= TRB_BEI;
3808 } 3642 }
3809 /* Calculate TRB length */ 3643 /* Calculate TRB length */
3810 trb_buff_len = TRB_MAX_BUFF_SIZE - 3644 trb_buff_len = TRB_BUFF_LEN_UP_TO_BOUNDARY(addr);
3811 (addr & ((1 << TRB_MAX_BUFF_SHIFT) - 1));
3812 if (trb_buff_len > td_remain_len) 3645 if (trb_buff_len > td_remain_len)
3813 trb_buff_len = td_remain_len; 3646 trb_buff_len = td_remain_len;
3814 3647
@@ -3897,8 +3730,6 @@ int xhci_queue_isoc_tx_prepare(struct xhci_hcd *xhci, gfp_t mem_flags,
3897 struct xhci_ring *ep_ring; 3730 struct xhci_ring *ep_ring;
3898 struct xhci_ep_ctx *ep_ctx; 3731 struct xhci_ep_ctx *ep_ctx;
3899 int start_frame; 3732 int start_frame;
3900 int xhci_interval;
3901 int ep_interval;
3902 int num_tds, num_trbs, i; 3733 int num_tds, num_trbs, i;
3903 int ret; 3734 int ret;
3904 struct xhci_virt_ep *xep; 3735 struct xhci_virt_ep *xep;
@@ -3912,7 +3743,7 @@ int xhci_queue_isoc_tx_prepare(struct xhci_hcd *xhci, gfp_t mem_flags,
3912 num_trbs = 0; 3743 num_trbs = 0;
3913 num_tds = urb->number_of_packets; 3744 num_tds = urb->number_of_packets;
3914 for (i = 0; i < num_tds; i++) 3745 for (i = 0; i < num_tds; i++)
3915 num_trbs += count_isoc_trbs_needed(xhci, urb, i); 3746 num_trbs += count_isoc_trbs_needed(urb, i);
3916 3747
3917 /* Check the ring to guarantee there is enough room for the whole urb. 3748 /* Check the ring to guarantee there is enough room for the whole urb.
3918 * Do not insert any td of the urb to the ring if the check failed. 3749 * Do not insert any td of the urb to the ring if the check failed.
@@ -3926,26 +3757,7 @@ int xhci_queue_isoc_tx_prepare(struct xhci_hcd *xhci, gfp_t mem_flags,
3926 * Check interval value. This should be done before we start to 3757 * Check interval value. This should be done before we start to
3927 * calculate the start frame value. 3758 * calculate the start frame value.
3928 */ 3759 */
3929 xhci_interval = EP_INTERVAL_TO_UFRAMES(le32_to_cpu(ep_ctx->ep_info)); 3760 check_interval(xhci, urb, ep_ctx);
3930 ep_interval = urb->interval;
3931 /* Convert to microframes */
3932 if (urb->dev->speed == USB_SPEED_LOW ||
3933 urb->dev->speed == USB_SPEED_FULL)
3934 ep_interval *= 8;
3935 /* FIXME change this to a warning and a suggestion to use the new API
3936 * to set the polling interval (once the API is added).
3937 */
3938 if (xhci_interval != ep_interval) {
3939 dev_dbg_ratelimited(&urb->dev->dev,
3940 "Driver uses different interval (%d microframe%s) than xHCI (%d microframe%s)\n",
3941 ep_interval, ep_interval == 1 ? "" : "s",
3942 xhci_interval, xhci_interval == 1 ? "" : "s");
3943 urb->interval = xhci_interval;
3944 /* Convert back to frames for LS/FS devices */
3945 if (urb->dev->speed == USB_SPEED_LOW ||
3946 urb->dev->speed == USB_SPEED_FULL)
3947 urb->interval /= 8;
3948 }
3949 3761
3950 /* Calculate the start frame and put it in urb->start_frame. */ 3762 /* Calculate the start frame and put it in urb->start_frame. */
3951 if (HCC_CFC(xhci->hcc_params) && !list_empty(&ep_ring->td_list)) { 3763 if (HCC_CFC(xhci->hcc_params) && !list_empty(&ep_ring->td_list)) {