aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/usb/host
diff options
context:
space:
mode:
authorAndiry Xu <andiry.xu@amd.com>2011-09-23 17:19:54 -0400
committerGreg Kroah-Hartman <gregkh@suse.de>2011-09-26 18:51:11 -0400
commit7e393a834b41001174a8fb3ae3bc23a749467760 (patch)
tree621a7e49bb94045c11b8b2efe9070c4bc2339e60 /drivers/usb/host
parentc1045e87b2cd293d53dff19779ea46b19195d593 (diff)
xHCI: AMD isoc link TRB chain bit quirk
Setting the chain (CH) bit in the link TRB of isochronous transfer rings is required by AMD 0.96 xHCI host controller to successfully transverse multi-TRB TD that span through different memory segments. When a Missed Service Error event occurs, if the chain bit is not set in the link TRB and the host skips TDs which just across a link TRB, the host may falsely recognize the link TRB as a normal TRB. You can see this may cause big trouble - the host does not jump to the right address which is pointed by the link TRB, but continue fetching the memory which is after the link TRB address, which may not even belong to the host, and the result cannot be predicted. This causes some big problems. Without the former patch I sent: "xHCI: prevent infinite loop when processing MSE event", the system may hang. With that patch applied, system does not hang, but the host still access wrong memory address and isoc transfer will fail. With this patch, isochronous transfer works as expected. This patch should be applied to kernels as old as 2.6.36, which was when the first isochronous support was added for the xHCI host controller. Signed-off-by: Andiry Xu <andiry.xu@amd.com> Signed-off-by: Sarah Sharp <sarah.a.sharp@linux.intel.com> Cc: stable@kernel.org Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
Diffstat (limited to 'drivers/usb/host')
-rw-r--r--drivers/usb/host/xhci-mem.c32
-rw-r--r--drivers/usb/host/xhci-pci.c3
-rw-r--r--drivers/usb/host/xhci-ring.c53
-rw-r--r--drivers/usb/host/xhci.h1
4 files changed, 51 insertions, 38 deletions
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
index 3ec2ac9636fe..a6ff8252699e 100644
--- a/drivers/usb/host/xhci-mem.c
+++ b/drivers/usb/host/xhci-mem.c
@@ -79,7 +79,7 @@ static void xhci_segment_free(struct xhci_hcd *xhci, struct xhci_segment *seg)
79 * related flags, such as End TRB, Toggle Cycle, and no snoop. 79 * related flags, such as End TRB, Toggle Cycle, and no snoop.
80 */ 80 */
81static void xhci_link_segments(struct xhci_hcd *xhci, struct xhci_segment *prev, 81static void xhci_link_segments(struct xhci_hcd *xhci, struct xhci_segment *prev,
82 struct xhci_segment *next, bool link_trbs) 82 struct xhci_segment *next, bool link_trbs, bool isoc)
83{ 83{
84 u32 val; 84 u32 val;
85 85
@@ -95,7 +95,9 @@ static void xhci_link_segments(struct xhci_hcd *xhci, struct xhci_segment *prev,
95 val &= ~TRB_TYPE_BITMASK; 95 val &= ~TRB_TYPE_BITMASK;
96 val |= TRB_TYPE(TRB_LINK); 96 val |= TRB_TYPE(TRB_LINK);
97 /* Always set the chain bit with 0.95 hardware */ 97 /* Always set the chain bit with 0.95 hardware */
98 if (xhci_link_trb_quirk(xhci)) 98 /* Set chain bit for isoc rings on AMD 0.96 host */
99 if (xhci_link_trb_quirk(xhci) ||
100 (isoc && (xhci->quirks & XHCI_AMD_0x96_HOST)))
99 val |= TRB_CHAIN; 101 val |= TRB_CHAIN;
100 prev->trbs[TRBS_PER_SEGMENT-1].link.control = cpu_to_le32(val); 102 prev->trbs[TRBS_PER_SEGMENT-1].link.control = cpu_to_le32(val);
101 } 103 }
@@ -152,7 +154,7 @@ static void xhci_initialize_ring_info(struct xhci_ring *ring)
152 * See section 4.9.1 and figures 15 and 16. 154 * See section 4.9.1 and figures 15 and 16.
153 */ 155 */
154static struct xhci_ring *xhci_ring_alloc(struct xhci_hcd *xhci, 156static struct xhci_ring *xhci_ring_alloc(struct xhci_hcd *xhci,
155 unsigned int num_segs, bool link_trbs, gfp_t flags) 157 unsigned int num_segs, bool link_trbs, bool isoc, gfp_t flags)
156{ 158{
157 struct xhci_ring *ring; 159 struct xhci_ring *ring;
158 struct xhci_segment *prev; 160 struct xhci_segment *prev;
@@ -178,12 +180,12 @@ static struct xhci_ring *xhci_ring_alloc(struct xhci_hcd *xhci,
178 next = xhci_segment_alloc(xhci, flags); 180 next = xhci_segment_alloc(xhci, flags);
179 if (!next) 181 if (!next)
180 goto fail; 182 goto fail;
181 xhci_link_segments(xhci, prev, next, link_trbs); 183 xhci_link_segments(xhci, prev, next, link_trbs, isoc);
182 184
183 prev = next; 185 prev = next;
184 num_segs--; 186 num_segs--;
185 } 187 }
186 xhci_link_segments(xhci, prev, ring->first_seg, link_trbs); 188 xhci_link_segments(xhci, prev, ring->first_seg, link_trbs, isoc);
187 189
188 if (link_trbs) { 190 if (link_trbs) {
189 /* See section 4.9.2.1 and 6.4.4.1 */ 191 /* See section 4.9.2.1 and 6.4.4.1 */
@@ -229,14 +231,14 @@ void xhci_free_or_cache_endpoint_ring(struct xhci_hcd *xhci,
229 * pointers to the beginning of the ring. 231 * pointers to the beginning of the ring.
230 */ 232 */
231static void xhci_reinit_cached_ring(struct xhci_hcd *xhci, 233static void xhci_reinit_cached_ring(struct xhci_hcd *xhci,
232 struct xhci_ring *ring) 234 struct xhci_ring *ring, bool isoc)
233{ 235{
234 struct xhci_segment *seg = ring->first_seg; 236 struct xhci_segment *seg = ring->first_seg;
235 do { 237 do {
236 memset(seg->trbs, 0, 238 memset(seg->trbs, 0,
237 sizeof(union xhci_trb)*TRBS_PER_SEGMENT); 239 sizeof(union xhci_trb)*TRBS_PER_SEGMENT);
238 /* All endpoint rings have link TRBs */ 240 /* All endpoint rings have link TRBs */
239 xhci_link_segments(xhci, seg, seg->next, 1); 241 xhci_link_segments(xhci, seg, seg->next, 1, isoc);
240 seg = seg->next; 242 seg = seg->next;
241 } while (seg != ring->first_seg); 243 } while (seg != ring->first_seg);
242 xhci_initialize_ring_info(ring); 244 xhci_initialize_ring_info(ring);
@@ -540,7 +542,7 @@ struct xhci_stream_info *xhci_alloc_stream_info(struct xhci_hcd *xhci,
540 */ 542 */
541 for (cur_stream = 1; cur_stream < num_streams; cur_stream++) { 543 for (cur_stream = 1; cur_stream < num_streams; cur_stream++) {
542 stream_info->stream_rings[cur_stream] = 544 stream_info->stream_rings[cur_stream] =
543 xhci_ring_alloc(xhci, 1, true, mem_flags); 545 xhci_ring_alloc(xhci, 1, true, false, mem_flags);
544 cur_ring = stream_info->stream_rings[cur_stream]; 546 cur_ring = stream_info->stream_rings[cur_stream];
545 if (!cur_ring) 547 if (!cur_ring)
546 goto cleanup_rings; 548 goto cleanup_rings;
@@ -874,7 +876,7 @@ int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id,
874 } 876 }
875 877
876 /* Allocate endpoint 0 ring */ 878 /* Allocate endpoint 0 ring */
877 dev->eps[0].ring = xhci_ring_alloc(xhci, 1, true, flags); 879 dev->eps[0].ring = xhci_ring_alloc(xhci, 1, true, false, flags);
878 if (!dev->eps[0].ring) 880 if (!dev->eps[0].ring)
879 goto fail; 881 goto fail;
880 882
@@ -1315,10 +1317,10 @@ int xhci_endpoint_init(struct xhci_hcd *xhci,
1315 */ 1317 */
1316 if (usb_endpoint_xfer_isoc(&ep->desc)) 1318 if (usb_endpoint_xfer_isoc(&ep->desc))
1317 virt_dev->eps[ep_index].new_ring = 1319 virt_dev->eps[ep_index].new_ring =
1318 xhci_ring_alloc(xhci, 8, true, mem_flags); 1320 xhci_ring_alloc(xhci, 8, true, true, mem_flags);
1319 else 1321 else
1320 virt_dev->eps[ep_index].new_ring = 1322 virt_dev->eps[ep_index].new_ring =
1321 xhci_ring_alloc(xhci, 1, true, mem_flags); 1323 xhci_ring_alloc(xhci, 1, true, false, mem_flags);
1322 if (!virt_dev->eps[ep_index].new_ring) { 1324 if (!virt_dev->eps[ep_index].new_ring) {
1323 /* Attempt to use the ring cache */ 1325 /* Attempt to use the ring cache */
1324 if (virt_dev->num_rings_cached == 0) 1326 if (virt_dev->num_rings_cached == 0)
@@ -1327,7 +1329,8 @@ int xhci_endpoint_init(struct xhci_hcd *xhci,
1327 virt_dev->ring_cache[virt_dev->num_rings_cached]; 1329 virt_dev->ring_cache[virt_dev->num_rings_cached];
1328 virt_dev->ring_cache[virt_dev->num_rings_cached] = NULL; 1330 virt_dev->ring_cache[virt_dev->num_rings_cached] = NULL;
1329 virt_dev->num_rings_cached--; 1331 virt_dev->num_rings_cached--;
1330 xhci_reinit_cached_ring(xhci, virt_dev->eps[ep_index].new_ring); 1332 xhci_reinit_cached_ring(xhci, virt_dev->eps[ep_index].new_ring,
1333 usb_endpoint_xfer_isoc(&ep->desc) ? true : false);
1331 } 1334 }
1332 virt_dev->eps[ep_index].skip = false; 1335 virt_dev->eps[ep_index].skip = false;
1333 ep_ring = virt_dev->eps[ep_index].new_ring; 1336 ep_ring = virt_dev->eps[ep_index].new_ring;
@@ -2236,7 +2239,7 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
2236 goto fail; 2239 goto fail;
2237 2240
2238 /* Set up the command ring to have one segments for now. */ 2241 /* Set up the command ring to have one segments for now. */
2239 xhci->cmd_ring = xhci_ring_alloc(xhci, 1, true, flags); 2242 xhci->cmd_ring = xhci_ring_alloc(xhci, 1, true, false, flags);
2240 if (!xhci->cmd_ring) 2243 if (!xhci->cmd_ring)
2241 goto fail; 2244 goto fail;
2242 xhci_dbg(xhci, "Allocated command ring at %p\n", xhci->cmd_ring); 2245 xhci_dbg(xhci, "Allocated command ring at %p\n", xhci->cmd_ring);
@@ -2267,7 +2270,8 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
2267 * the event ring segment table (ERST). Section 4.9.3. 2270 * the event ring segment table (ERST). Section 4.9.3.
2268 */ 2271 */
2269 xhci_dbg(xhci, "// Allocating event ring\n"); 2272 xhci_dbg(xhci, "// Allocating event ring\n");
2270 xhci->event_ring = xhci_ring_alloc(xhci, ERST_NUM_SEGS, false, flags); 2273 xhci->event_ring = xhci_ring_alloc(xhci, ERST_NUM_SEGS, false, false,
2274 flags);
2271 if (!xhci->event_ring) 2275 if (!xhci->event_ring)
2272 goto fail; 2276 goto fail;
2273 if (xhci_check_trb_in_td_math(xhci, flags) < 0) 2277 if (xhci_check_trb_in_td_math(xhci, flags) < 0)
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
index e66e2b03fbbe..732837eafabe 100644
--- a/drivers/usb/host/xhci-pci.c
+++ b/drivers/usb/host/xhci-pci.c
@@ -128,6 +128,9 @@ static int xhci_pci_setup(struct usb_hcd *hcd)
128 if (pdev->vendor == PCI_VENDOR_ID_NEC) 128 if (pdev->vendor == PCI_VENDOR_ID_NEC)
129 xhci->quirks |= XHCI_NEC_HOST; 129 xhci->quirks |= XHCI_NEC_HOST;
130 130
131 if (pdev->vendor == PCI_VENDOR_ID_AMD && xhci->hci_version == 0x96)
132 xhci->quirks |= XHCI_AMD_0x96_HOST;
133
131 /* AMD PLL quirk */ 134 /* AMD PLL quirk */
132 if (pdev->vendor == PCI_VENDOR_ID_AMD && usb_amd_find_chipset_info()) 135 if (pdev->vendor == PCI_VENDOR_ID_AMD && usb_amd_find_chipset_info())
133 xhci->quirks |= XHCI_AMD_PLL_FIX; 136 xhci->quirks |= XHCI_AMD_PLL_FIX;
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index a3679635382a..e4b7f003d702 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -185,7 +185,7 @@ static void inc_deq(struct xhci_hcd *xhci, struct xhci_ring *ring, bool consumer
185 * prepare_transfer()? 185 * prepare_transfer()?
186 */ 186 */
187static void inc_enq(struct xhci_hcd *xhci, struct xhci_ring *ring, 187static void inc_enq(struct xhci_hcd *xhci, struct xhci_ring *ring,
188 bool consumer, bool more_trbs_coming) 188 bool consumer, bool more_trbs_coming, bool isoc)
189{ 189{
190 u32 chain; 190 u32 chain;
191 union xhci_trb *next; 191 union xhci_trb *next;
@@ -212,11 +212,13 @@ static void inc_enq(struct xhci_hcd *xhci, struct xhci_ring *ring,
212 if (!chain && !more_trbs_coming) 212 if (!chain && !more_trbs_coming)
213 break; 213 break;
214 214
215 /* If we're not dealing with 0.95 hardware, 215 /* If we're not dealing with 0.95 hardware or
216 * isoc rings on AMD 0.96 host,
216 * carry over the chain bit of the previous TRB 217 * carry over the chain bit of the previous TRB
217 * (which may mean the chain bit is cleared). 218 * (which may mean the chain bit is cleared).
218 */ 219 */
219 if (!xhci_link_trb_quirk(xhci)) { 220 if (!(isoc && (xhci->quirks & XHCI_AMD_0x96_HOST))
221 && !xhci_link_trb_quirk(xhci)) {
220 next->link.control &= 222 next->link.control &=
221 cpu_to_le32(~TRB_CHAIN); 223 cpu_to_le32(~TRB_CHAIN);
222 next->link.control |= 224 next->link.control |=
@@ -2391,7 +2393,7 @@ irqreturn_t xhci_msi_irq(int irq, struct usb_hcd *hcd)
2391 * prepare_transfer()? 2393 * prepare_transfer()?
2392 */ 2394 */
2393static void queue_trb(struct xhci_hcd *xhci, struct xhci_ring *ring, 2395static void queue_trb(struct xhci_hcd *xhci, struct xhci_ring *ring,
2394 bool consumer, bool more_trbs_coming, 2396 bool consumer, bool more_trbs_coming, bool isoc,
2395 u32 field1, u32 field2, u32 field3, u32 field4) 2397 u32 field1, u32 field2, u32 field3, u32 field4)
2396{ 2398{
2397 struct xhci_generic_trb *trb; 2399 struct xhci_generic_trb *trb;
@@ -2401,7 +2403,7 @@ static void queue_trb(struct xhci_hcd *xhci, struct xhci_ring *ring,
2401 trb->field[1] = cpu_to_le32(field2); 2403 trb->field[1] = cpu_to_le32(field2);
2402 trb->field[2] = cpu_to_le32(field3); 2404 trb->field[2] = cpu_to_le32(field3);
2403 trb->field[3] = cpu_to_le32(field4); 2405 trb->field[3] = cpu_to_le32(field4);
2404 inc_enq(xhci, ring, consumer, more_trbs_coming); 2406 inc_enq(xhci, ring, consumer, more_trbs_coming, isoc);
2405} 2407}
2406 2408
2407/* 2409/*
@@ -2409,7 +2411,7 @@ static void queue_trb(struct xhci_hcd *xhci, struct xhci_ring *ring,
2409 * FIXME allocate segments if the ring is full. 2411 * FIXME allocate segments if the ring is full.
2410 */ 2412 */
2411static int prepare_ring(struct xhci_hcd *xhci, struct xhci_ring *ep_ring, 2413static int prepare_ring(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
2412 u32 ep_state, unsigned int num_trbs, gfp_t mem_flags) 2414 u32 ep_state, unsigned int num_trbs, bool isoc, gfp_t mem_flags)
2413{ 2415{
2414 /* Make sure the endpoint has been added to xHC schedule */ 2416 /* Make sure the endpoint has been added to xHC schedule */
2415 switch (ep_state) { 2417 switch (ep_state) {
@@ -2451,10 +2453,11 @@ static int prepare_ring(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
2451 next = ring->enqueue; 2453 next = ring->enqueue;
2452 2454
2453 while (last_trb(xhci, ring, ring->enq_seg, next)) { 2455 while (last_trb(xhci, ring, ring->enq_seg, next)) {
2454 /* If we're not dealing with 0.95 hardware, 2456 /* If we're not dealing with 0.95 hardware or isoc rings
2455 * clear the chain bit. 2457 * on AMD 0.96 host, clear the chain bit.
2456 */ 2458 */
2457 if (!xhci_link_trb_quirk(xhci)) 2459 if (!xhci_link_trb_quirk(xhci) && !(isoc &&
2460 (xhci->quirks & XHCI_AMD_0x96_HOST)))
2458 next->link.control &= cpu_to_le32(~TRB_CHAIN); 2461 next->link.control &= cpu_to_le32(~TRB_CHAIN);
2459 else 2462 else
2460 next->link.control |= cpu_to_le32(TRB_CHAIN); 2463 next->link.control |= cpu_to_le32(TRB_CHAIN);
@@ -2487,6 +2490,7 @@ static int prepare_transfer(struct xhci_hcd *xhci,
2487 unsigned int num_trbs, 2490 unsigned int num_trbs,
2488 struct urb *urb, 2491 struct urb *urb,
2489 unsigned int td_index, 2492 unsigned int td_index,
2493 bool isoc,
2490 gfp_t mem_flags) 2494 gfp_t mem_flags)
2491{ 2495{
2492 int ret; 2496 int ret;
@@ -2504,7 +2508,7 @@ static int prepare_transfer(struct xhci_hcd *xhci,
2504 2508
2505 ret = prepare_ring(xhci, ep_ring, 2509 ret = prepare_ring(xhci, ep_ring,
2506 le32_to_cpu(ep_ctx->ep_info) & EP_STATE_MASK, 2510 le32_to_cpu(ep_ctx->ep_info) & EP_STATE_MASK,
2507 num_trbs, mem_flags); 2511 num_trbs, isoc, mem_flags);
2508 if (ret) 2512 if (ret)
2509 return ret; 2513 return ret;
2510 2514
@@ -2727,7 +2731,7 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
2727 2731
2728 trb_buff_len = prepare_transfer(xhci, xhci->devs[slot_id], 2732 trb_buff_len = prepare_transfer(xhci, xhci->devs[slot_id],
2729 ep_index, urb->stream_id, 2733 ep_index, urb->stream_id,
2730 num_trbs, urb, 0, mem_flags); 2734 num_trbs, urb, 0, false, mem_flags);
2731 if (trb_buff_len < 0) 2735 if (trb_buff_len < 0)
2732 return trb_buff_len; 2736 return trb_buff_len;
2733 2737
@@ -2822,7 +2826,7 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
2822 more_trbs_coming = true; 2826 more_trbs_coming = true;
2823 else 2827 else
2824 more_trbs_coming = false; 2828 more_trbs_coming = false;
2825 queue_trb(xhci, ep_ring, false, more_trbs_coming, 2829 queue_trb(xhci, ep_ring, false, more_trbs_coming, false,
2826 lower_32_bits(addr), 2830 lower_32_bits(addr),
2827 upper_32_bits(addr), 2831 upper_32_bits(addr),
2828 length_field, 2832 length_field,
@@ -2913,7 +2917,7 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
2913 2917
2914 ret = prepare_transfer(xhci, xhci->devs[slot_id], 2918 ret = prepare_transfer(xhci, xhci->devs[slot_id],
2915 ep_index, urb->stream_id, 2919 ep_index, urb->stream_id,
2916 num_trbs, urb, 0, mem_flags); 2920 num_trbs, urb, 0, false, mem_flags);
2917 if (ret < 0) 2921 if (ret < 0)
2918 return ret; 2922 return ret;
2919 2923
@@ -2985,7 +2989,7 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
2985 more_trbs_coming = true; 2989 more_trbs_coming = true;
2986 else 2990 else
2987 more_trbs_coming = false; 2991 more_trbs_coming = false;
2988 queue_trb(xhci, ep_ring, false, more_trbs_coming, 2992 queue_trb(xhci, ep_ring, false, more_trbs_coming, false,
2989 lower_32_bits(addr), 2993 lower_32_bits(addr),
2990 upper_32_bits(addr), 2994 upper_32_bits(addr),
2991 length_field, 2995 length_field,
@@ -3045,7 +3049,7 @@ int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
3045 num_trbs++; 3049 num_trbs++;
3046 ret = prepare_transfer(xhci, xhci->devs[slot_id], 3050 ret = prepare_transfer(xhci, xhci->devs[slot_id],
3047 ep_index, urb->stream_id, 3051 ep_index, urb->stream_id,
3048 num_trbs, urb, 0, mem_flags); 3052 num_trbs, urb, 0, false, mem_flags);
3049 if (ret < 0) 3053 if (ret < 0)
3050 return ret; 3054 return ret;
3051 3055
@@ -3078,7 +3082,7 @@ int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
3078 } 3082 }
3079 } 3083 }
3080 3084
3081 queue_trb(xhci, ep_ring, false, true, 3085 queue_trb(xhci, ep_ring, false, true, false,
3082 setup->bRequestType | setup->bRequest << 8 | le16_to_cpu(setup->wValue) << 16, 3086 setup->bRequestType | setup->bRequest << 8 | le16_to_cpu(setup->wValue) << 16,
3083 le16_to_cpu(setup->wIndex) | le16_to_cpu(setup->wLength) << 16, 3087 le16_to_cpu(setup->wIndex) | le16_to_cpu(setup->wLength) << 16,
3084 TRB_LEN(8) | TRB_INTR_TARGET(0), 3088 TRB_LEN(8) | TRB_INTR_TARGET(0),
@@ -3098,7 +3102,7 @@ int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
3098 if (urb->transfer_buffer_length > 0) { 3102 if (urb->transfer_buffer_length > 0) {
3099 if (setup->bRequestType & USB_DIR_IN) 3103 if (setup->bRequestType & USB_DIR_IN)
3100 field |= TRB_DIR_IN; 3104 field |= TRB_DIR_IN;
3101 queue_trb(xhci, ep_ring, false, true, 3105 queue_trb(xhci, ep_ring, false, true, false,
3102 lower_32_bits(urb->transfer_dma), 3106 lower_32_bits(urb->transfer_dma),
3103 upper_32_bits(urb->transfer_dma), 3107 upper_32_bits(urb->transfer_dma),
3104 length_field, 3108 length_field,
@@ -3114,7 +3118,7 @@ int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
3114 field = 0; 3118 field = 0;
3115 else 3119 else
3116 field = TRB_DIR_IN; 3120 field = TRB_DIR_IN;
3117 queue_trb(xhci, ep_ring, false, false, 3121 queue_trb(xhci, ep_ring, false, false, false,
3118 0, 3122 0,
3119 0, 3123 0,
3120 TRB_INTR_TARGET(0), 3124 TRB_INTR_TARGET(0),
@@ -3263,7 +3267,8 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
3263 trbs_per_td = count_isoc_trbs_needed(xhci, urb, i); 3267 trbs_per_td = count_isoc_trbs_needed(xhci, urb, i);
3264 3268
3265 ret = prepare_transfer(xhci, xhci->devs[slot_id], ep_index, 3269 ret = prepare_transfer(xhci, xhci->devs[slot_id], ep_index,
3266 urb->stream_id, trbs_per_td, urb, i, mem_flags); 3270 urb->stream_id, trbs_per_td, urb, i, true,
3271 mem_flags);
3267 if (ret < 0) { 3272 if (ret < 0) {
3268 if (i == 0) 3273 if (i == 0)
3269 return ret; 3274 return ret;
@@ -3333,7 +3338,7 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
3333 remainder | 3338 remainder |
3334 TRB_INTR_TARGET(0); 3339 TRB_INTR_TARGET(0);
3335 3340
3336 queue_trb(xhci, ep_ring, false, more_trbs_coming, 3341 queue_trb(xhci, ep_ring, false, more_trbs_coming, true,
3337 lower_32_bits(addr), 3342 lower_32_bits(addr),
3338 upper_32_bits(addr), 3343 upper_32_bits(addr),
3339 length_field, 3344 length_field,
@@ -3415,7 +3420,7 @@ int xhci_queue_isoc_tx_prepare(struct xhci_hcd *xhci, gfp_t mem_flags,
3415 * Do not insert any td of the urb to the ring if the check failed. 3420 * Do not insert any td of the urb to the ring if the check failed.
3416 */ 3421 */
3417 ret = prepare_ring(xhci, ep_ring, le32_to_cpu(ep_ctx->ep_info) & EP_STATE_MASK, 3422 ret = prepare_ring(xhci, ep_ring, le32_to_cpu(ep_ctx->ep_info) & EP_STATE_MASK,
3418 num_trbs, mem_flags); 3423 num_trbs, true, mem_flags);
3419 if (ret) 3424 if (ret)
3420 return ret; 3425 return ret;
3421 3426
@@ -3474,7 +3479,7 @@ static int queue_command(struct xhci_hcd *xhci, u32 field1, u32 field2,
3474 reserved_trbs++; 3479 reserved_trbs++;
3475 3480
3476 ret = prepare_ring(xhci, xhci->cmd_ring, EP_STATE_RUNNING, 3481 ret = prepare_ring(xhci, xhci->cmd_ring, EP_STATE_RUNNING,
3477 reserved_trbs, GFP_ATOMIC); 3482 reserved_trbs, false, GFP_ATOMIC);
3478 if (ret < 0) { 3483 if (ret < 0) {
3479 xhci_err(xhci, "ERR: No room for command on command ring\n"); 3484 xhci_err(xhci, "ERR: No room for command on command ring\n");
3480 if (command_must_succeed) 3485 if (command_must_succeed)
@@ -3482,8 +3487,8 @@ static int queue_command(struct xhci_hcd *xhci, u32 field1, u32 field2,
3482 "unfailable commands failed.\n"); 3487 "unfailable commands failed.\n");
3483 return ret; 3488 return ret;
3484 } 3489 }
3485 queue_trb(xhci, xhci->cmd_ring, false, false, field1, field2, field3, 3490 queue_trb(xhci, xhci->cmd_ring, false, false, false, field1, field2,
3486 field4 | xhci->cmd_ring->cycle_state); 3491 field3, field4 | xhci->cmd_ring->cycle_state);
3487 return 0; 3492 return 0;
3488} 3493}
3489 3494
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index e738466703a5..4050656bffed 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -1462,6 +1462,7 @@ struct xhci_hcd {
1462#define XHCI_BROKEN_MSI (1 << 6) 1462#define XHCI_BROKEN_MSI (1 << 6)
1463#define XHCI_RESET_ON_RESUME (1 << 7) 1463#define XHCI_RESET_ON_RESUME (1 << 7)
1464#define XHCI_SW_BW_CHECKING (1 << 8) 1464#define XHCI_SW_BW_CHECKING (1 << 8)
1465#define XHCI_AMD_0x96_HOST (1 << 9)
1465 unsigned int num_active_eps; 1466 unsigned int num_active_eps;
1466 unsigned int limit_active_eps; 1467 unsigned int limit_active_eps;
1467 /* There are two roothubs to keep track of bus suspend info for */ 1468 /* There are two roothubs to keep track of bus suspend info for */