aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/usb
diff options
context:
space:
mode:
authorTim Gardner <timg@tpi.com>2011-10-13 12:42:28 -0400
committerHerton Ronaldo Krzesinski <herton.krzesinski@canonical.com>2011-10-17 13:33:28 -0400
commit0d9e07b3e5e033f6d292e012fe2ac789d6be6962 (patch)
tree95d58b97f5639c2e431af916cc41e9a818a4084e /drivers/usb
parentf347cfac0938cb00d5f8a338429157a6c1d6b26a (diff)
UBUNTU: SAUCE: xHCI: AMD isoc link TRB chain bit quirk
BugLink: http://bugs.launchpad.net/bugs/872811 Setting the chain (CH) bit in the link TRB of isochronous transfer rings is required by AMD 0.96 xHCI host controller to successfully transverse multi-TRB TD that span through different memory segments. When a Missed Service Error event occurs, if the chain bit is not set in the link TRB and the host skips TDs which just across a link TRB, the host may falsely recognize the link TRB as a normal TRB. You can see this may cause big trouble - the host does not jump to the right address which is pointed by the link TRB, but continue fetching the memory which is after the link TRB address, which may not even belong to the host, and the result cannot be predicted. This causes some big problems. Without the former patch I sent: "xHCI: prevent infinite loop when processing MSE event", the system may hang. With that patch applied, system does not hang, but the host still access wrong memory address and isoc transfer will fail. With this patch, isochronous transfer works as expected. This patch should be applied to kernels as old as 2.6.36, which was when the first isochronous support was added for the xHCI host controller. Signed-off-by: Andiry Xu <andiry.xu@amd.com> Signed-off-by: Sarah Sharp <sarah.a.sharp@linux.intel.com> Cc: stable@kernel.org Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de> cherry-picked from git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/usb.git usb-next Acked-by: Leann Ogasawara <leann.ogasawara@canonical.com> Acked-by: Andy Whitcroft <andy.whitcroft@canonical.com> Signed-off-by: Tim Gardner <tim.gardner@canonical.com>
Diffstat (limited to 'drivers/usb')
-rw-r--r--drivers/usb/host/xhci-mem.c32
-rw-r--r--drivers/usb/host/xhci-pci.c3
-rw-r--r--drivers/usb/host/xhci-ring.c53
-rw-r--r--drivers/usb/host/xhci.h1
4 files changed, 51 insertions, 38 deletions
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
index fcb7f7efc86..2d671af6b7e 100644
--- a/drivers/usb/host/xhci-mem.c
+++ b/drivers/usb/host/xhci-mem.c
@@ -81,7 +81,7 @@ static void xhci_segment_free(struct xhci_hcd *xhci, struct xhci_segment *seg)
81 * related flags, such as End TRB, Toggle Cycle, and no snoop. 81 * related flags, such as End TRB, Toggle Cycle, and no snoop.
82 */ 82 */
83static void xhci_link_segments(struct xhci_hcd *xhci, struct xhci_segment *prev, 83static void xhci_link_segments(struct xhci_hcd *xhci, struct xhci_segment *prev,
84 struct xhci_segment *next, bool link_trbs) 84 struct xhci_segment *next, bool link_trbs, bool isoc)
85{ 85{
86 u32 val; 86 u32 val;
87 87
@@ -97,7 +97,9 @@ static void xhci_link_segments(struct xhci_hcd *xhci, struct xhci_segment *prev,
97 val &= ~TRB_TYPE_BITMASK; 97 val &= ~TRB_TYPE_BITMASK;
98 val |= TRB_TYPE(TRB_LINK); 98 val |= TRB_TYPE(TRB_LINK);
99 /* Always set the chain bit with 0.95 hardware */ 99 /* Always set the chain bit with 0.95 hardware */
100 if (xhci_link_trb_quirk(xhci)) 100 /* Set chain bit for isoc rings on AMD 0.96 host */
101 if (xhci_link_trb_quirk(xhci) ||
102 (isoc && (xhci->quirks & XHCI_AMD_0x96_HOST)))
101 val |= TRB_CHAIN; 103 val |= TRB_CHAIN;
102 prev->trbs[TRBS_PER_SEGMENT-1].link.control = cpu_to_le32(val); 104 prev->trbs[TRBS_PER_SEGMENT-1].link.control = cpu_to_le32(val);
103 } 105 }
@@ -152,7 +154,7 @@ static void xhci_initialize_ring_info(struct xhci_ring *ring)
152 * See section 4.9.1 and figures 15 and 16. 154 * See section 4.9.1 and figures 15 and 16.
153 */ 155 */
154static struct xhci_ring *xhci_ring_alloc(struct xhci_hcd *xhci, 156static struct xhci_ring *xhci_ring_alloc(struct xhci_hcd *xhci,
155 unsigned int num_segs, bool link_trbs, gfp_t flags) 157 unsigned int num_segs, bool link_trbs, bool isoc, gfp_t flags)
156{ 158{
157 struct xhci_ring *ring; 159 struct xhci_ring *ring;
158 struct xhci_segment *prev; 160 struct xhci_segment *prev;
@@ -178,12 +180,12 @@ static struct xhci_ring *xhci_ring_alloc(struct xhci_hcd *xhci,
178 next = xhci_segment_alloc(xhci, flags); 180 next = xhci_segment_alloc(xhci, flags);
179 if (!next) 181 if (!next)
180 goto fail; 182 goto fail;
181 xhci_link_segments(xhci, prev, next, link_trbs); 183 xhci_link_segments(xhci, prev, next, link_trbs, isoc);
182 184
183 prev = next; 185 prev = next;
184 num_segs--; 186 num_segs--;
185 } 187 }
186 xhci_link_segments(xhci, prev, ring->first_seg, link_trbs); 188 xhci_link_segments(xhci, prev, ring->first_seg, link_trbs, isoc);
187 189
188 if (link_trbs) { 190 if (link_trbs) {
189 /* See section 4.9.2.1 and 6.4.4.1 */ 191 /* See section 4.9.2.1 and 6.4.4.1 */
@@ -229,14 +231,14 @@ void xhci_free_or_cache_endpoint_ring(struct xhci_hcd *xhci,
229 * pointers to the beginning of the ring. 231 * pointers to the beginning of the ring.
230 */ 232 */
231static void xhci_reinit_cached_ring(struct xhci_hcd *xhci, 233static void xhci_reinit_cached_ring(struct xhci_hcd *xhci,
232 struct xhci_ring *ring) 234 struct xhci_ring *ring, bool isoc)
233{ 235{
234 struct xhci_segment *seg = ring->first_seg; 236 struct xhci_segment *seg = ring->first_seg;
235 do { 237 do {
236 memset(seg->trbs, 0, 238 memset(seg->trbs, 0,
237 sizeof(union xhci_trb)*TRBS_PER_SEGMENT); 239 sizeof(union xhci_trb)*TRBS_PER_SEGMENT);
238 /* All endpoint rings have link TRBs */ 240 /* All endpoint rings have link TRBs */
239 xhci_link_segments(xhci, seg, seg->next, 1); 241 xhci_link_segments(xhci, seg, seg->next, 1, isoc);
240 seg = seg->next; 242 seg = seg->next;
241 } while (seg != ring->first_seg); 243 } while (seg != ring->first_seg);
242 xhci_initialize_ring_info(ring); 244 xhci_initialize_ring_info(ring);
@@ -540,7 +542,7 @@ struct xhci_stream_info *xhci_alloc_stream_info(struct xhci_hcd *xhci,
540 */ 542 */
541 for (cur_stream = 1; cur_stream < num_streams; cur_stream++) { 543 for (cur_stream = 1; cur_stream < num_streams; cur_stream++) {
542 stream_info->stream_rings[cur_stream] = 544 stream_info->stream_rings[cur_stream] =
543 xhci_ring_alloc(xhci, 1, true, mem_flags); 545 xhci_ring_alloc(xhci, 1, true, false, mem_flags);
544 cur_ring = stream_info->stream_rings[cur_stream]; 546 cur_ring = stream_info->stream_rings[cur_stream];
545 if (!cur_ring) 547 if (!cur_ring)
546 goto cleanup_rings; 548 goto cleanup_rings;
@@ -765,7 +767,7 @@ int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id,
765 } 767 }
766 768
767 /* Allocate endpoint 0 ring */ 769 /* Allocate endpoint 0 ring */
768 dev->eps[0].ring = xhci_ring_alloc(xhci, 1, true, flags); 770 dev->eps[0].ring = xhci_ring_alloc(xhci, 1, true, false, flags);
769 if (!dev->eps[0].ring) 771 if (!dev->eps[0].ring)
770 goto fail; 772 goto fail;
771 773
@@ -1175,10 +1177,10 @@ int xhci_endpoint_init(struct xhci_hcd *xhci,
1175 */ 1177 */
1176 if (usb_endpoint_xfer_isoc(&ep->desc)) 1178 if (usb_endpoint_xfer_isoc(&ep->desc))
1177 virt_dev->eps[ep_index].new_ring = 1179 virt_dev->eps[ep_index].new_ring =
1178 xhci_ring_alloc(xhci, 8, true, mem_flags); 1180 xhci_ring_alloc(xhci, 8, true, true, mem_flags);
1179 else 1181 else
1180 virt_dev->eps[ep_index].new_ring = 1182 virt_dev->eps[ep_index].new_ring =
1181 xhci_ring_alloc(xhci, 1, true, mem_flags); 1183 xhci_ring_alloc(xhci, 1, true, false, mem_flags);
1182 if (!virt_dev->eps[ep_index].new_ring) { 1184 if (!virt_dev->eps[ep_index].new_ring) {
1183 /* Attempt to use the ring cache */ 1185 /* Attempt to use the ring cache */
1184 if (virt_dev->num_rings_cached == 0) 1186 if (virt_dev->num_rings_cached == 0)
@@ -1187,7 +1189,8 @@ int xhci_endpoint_init(struct xhci_hcd *xhci,
1187 virt_dev->ring_cache[virt_dev->num_rings_cached]; 1189 virt_dev->ring_cache[virt_dev->num_rings_cached];
1188 virt_dev->ring_cache[virt_dev->num_rings_cached] = NULL; 1190 virt_dev->ring_cache[virt_dev->num_rings_cached] = NULL;
1189 virt_dev->num_rings_cached--; 1191 virt_dev->num_rings_cached--;
1190 xhci_reinit_cached_ring(xhci, virt_dev->eps[ep_index].new_ring); 1192 xhci_reinit_cached_ring(xhci, virt_dev->eps[ep_index].new_ring,
1193 usb_endpoint_xfer_isoc(&ep->desc) ? true : false);
1191 } 1194 }
1192 virt_dev->eps[ep_index].skip = false; 1195 virt_dev->eps[ep_index].skip = false;
1193 ep_ring = virt_dev->eps[ep_index].new_ring; 1196 ep_ring = virt_dev->eps[ep_index].new_ring;
@@ -2001,7 +2004,7 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
2001 goto fail; 2004 goto fail;
2002 2005
2003 /* Set up the command ring to have one segments for now. */ 2006 /* Set up the command ring to have one segments for now. */
2004 xhci->cmd_ring = xhci_ring_alloc(xhci, 1, true, flags); 2007 xhci->cmd_ring = xhci_ring_alloc(xhci, 1, true, false, flags);
2005 if (!xhci->cmd_ring) 2008 if (!xhci->cmd_ring)
2006 goto fail; 2009 goto fail;
2007 xhci_dbg(xhci, "Allocated command ring at %p\n", xhci->cmd_ring); 2010 xhci_dbg(xhci, "Allocated command ring at %p\n", xhci->cmd_ring);
@@ -2032,7 +2035,8 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
2032 * the event ring segment table (ERST). Section 4.9.3. 2035 * the event ring segment table (ERST). Section 4.9.3.
2033 */ 2036 */
2034 xhci_dbg(xhci, "// Allocating event ring\n"); 2037 xhci_dbg(xhci, "// Allocating event ring\n");
2035 xhci->event_ring = xhci_ring_alloc(xhci, ERST_NUM_SEGS, false, flags); 2038 xhci->event_ring = xhci_ring_alloc(xhci, ERST_NUM_SEGS, false, false,
2039 flags);
2036 if (!xhci->event_ring) 2040 if (!xhci->event_ring)
2037 goto fail; 2041 goto fail;
2038 if (xhci_check_trb_in_td_math(xhci, flags) < 0) 2042 if (xhci_check_trb_in_td_math(xhci, flags) < 0)
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
index cb16de213f6..50e7156a7d8 100644
--- a/drivers/usb/host/xhci-pci.c
+++ b/drivers/usb/host/xhci-pci.c
@@ -128,6 +128,9 @@ static int xhci_pci_setup(struct usb_hcd *hcd)
128 if (pdev->vendor == PCI_VENDOR_ID_NEC) 128 if (pdev->vendor == PCI_VENDOR_ID_NEC)
129 xhci->quirks |= XHCI_NEC_HOST; 129 xhci->quirks |= XHCI_NEC_HOST;
130 130
131 if (pdev->vendor == PCI_VENDOR_ID_AMD && xhci->hci_version == 0x96)
132 xhci->quirks |= XHCI_AMD_0x96_HOST;
133
131 /* AMD PLL quirk */ 134 /* AMD PLL quirk */
132 if (pdev->vendor == PCI_VENDOR_ID_AMD && usb_amd_find_chipset_info()) 135 if (pdev->vendor == PCI_VENDOR_ID_AMD && usb_amd_find_chipset_info())
133 xhci->quirks |= XHCI_AMD_PLL_FIX; 136 xhci->quirks |= XHCI_AMD_PLL_FIX;
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index d0871ea687d..e64bd6f9bfb 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -187,7 +187,7 @@ static void inc_deq(struct xhci_hcd *xhci, struct xhci_ring *ring, bool consumer
187 * prepare_transfer()? 187 * prepare_transfer()?
188 */ 188 */
189static void inc_enq(struct xhci_hcd *xhci, struct xhci_ring *ring, 189static void inc_enq(struct xhci_hcd *xhci, struct xhci_ring *ring,
190 bool consumer, bool more_trbs_coming) 190 bool consumer, bool more_trbs_coming, bool isoc)
191{ 191{
192 u32 chain; 192 u32 chain;
193 union xhci_trb *next; 193 union xhci_trb *next;
@@ -214,11 +214,13 @@ static void inc_enq(struct xhci_hcd *xhci, struct xhci_ring *ring,
214 if (!chain && !more_trbs_coming) 214 if (!chain && !more_trbs_coming)
215 break; 215 break;
216 216
217 /* If we're not dealing with 0.95 hardware, 217 /* If we're not dealing with 0.95 hardware or
218 * isoc rings on AMD 0.96 host,
218 * carry over the chain bit of the previous TRB 219 * carry over the chain bit of the previous TRB
219 * (which may mean the chain bit is cleared). 220 * (which may mean the chain bit is cleared).
220 */ 221 */
221 if (!xhci_link_trb_quirk(xhci)) { 222 if (!(isoc && (xhci->quirks & XHCI_AMD_0x96_HOST))
223 && !xhci_link_trb_quirk(xhci)) {
222 next->link.control &= 224 next->link.control &=
223 cpu_to_le32(~TRB_CHAIN); 225 cpu_to_le32(~TRB_CHAIN);
224 next->link.control |= 226 next->link.control |=
@@ -2398,7 +2400,7 @@ irqreturn_t xhci_msi_irq(int irq, struct usb_hcd *hcd)
2398 * prepare_transfer()? 2400 * prepare_transfer()?
2399 */ 2401 */
2400static void queue_trb(struct xhci_hcd *xhci, struct xhci_ring *ring, 2402static void queue_trb(struct xhci_hcd *xhci, struct xhci_ring *ring,
2401 bool consumer, bool more_trbs_coming, 2403 bool consumer, bool more_trbs_coming, bool isoc,
2402 u32 field1, u32 field2, u32 field3, u32 field4) 2404 u32 field1, u32 field2, u32 field3, u32 field4)
2403{ 2405{
2404 struct xhci_generic_trb *trb; 2406 struct xhci_generic_trb *trb;
@@ -2408,7 +2410,7 @@ static void queue_trb(struct xhci_hcd *xhci, struct xhci_ring *ring,
2408 trb->field[1] = cpu_to_le32(field2); 2410 trb->field[1] = cpu_to_le32(field2);
2409 trb->field[2] = cpu_to_le32(field3); 2411 trb->field[2] = cpu_to_le32(field3);
2410 trb->field[3] = cpu_to_le32(field4); 2412 trb->field[3] = cpu_to_le32(field4);
2411 inc_enq(xhci, ring, consumer, more_trbs_coming); 2413 inc_enq(xhci, ring, consumer, more_trbs_coming, isoc);
2412} 2414}
2413 2415
2414/* 2416/*
@@ -2416,7 +2418,7 @@ static void queue_trb(struct xhci_hcd *xhci, struct xhci_ring *ring,
2416 * FIXME allocate segments if the ring is full. 2418 * FIXME allocate segments if the ring is full.
2417 */ 2419 */
2418static int prepare_ring(struct xhci_hcd *xhci, struct xhci_ring *ep_ring, 2420static int prepare_ring(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
2419 u32 ep_state, unsigned int num_trbs, gfp_t mem_flags) 2421 u32 ep_state, unsigned int num_trbs, bool isoc, gfp_t mem_flags)
2420{ 2422{
2421 /* Make sure the endpoint has been added to xHC schedule */ 2423 /* Make sure the endpoint has been added to xHC schedule */
2422 switch (ep_state) { 2424 switch (ep_state) {
@@ -2458,10 +2460,11 @@ static int prepare_ring(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
2458 next = ring->enqueue; 2460 next = ring->enqueue;
2459 2461
2460 while (last_trb(xhci, ring, ring->enq_seg, next)) { 2462 while (last_trb(xhci, ring, ring->enq_seg, next)) {
2461 /* If we're not dealing with 0.95 hardware, 2463 /* If we're not dealing with 0.95 hardware or isoc rings
2462 * clear the chain bit. 2464 * on AMD 0.96 host, clear the chain bit.
2463 */ 2465 */
2464 if (!xhci_link_trb_quirk(xhci)) 2466 if (!xhci_link_trb_quirk(xhci) && !(isoc &&
2467 (xhci->quirks & XHCI_AMD_0x96_HOST)))
2465 next->link.control &= cpu_to_le32(~TRB_CHAIN); 2468 next->link.control &= cpu_to_le32(~TRB_CHAIN);
2466 else 2469 else
2467 next->link.control |= cpu_to_le32(TRB_CHAIN); 2470 next->link.control |= cpu_to_le32(TRB_CHAIN);
@@ -2494,6 +2497,7 @@ static int prepare_transfer(struct xhci_hcd *xhci,
2494 unsigned int num_trbs, 2497 unsigned int num_trbs,
2495 struct urb *urb, 2498 struct urb *urb,
2496 unsigned int td_index, 2499 unsigned int td_index,
2500 bool isoc,
2497 gfp_t mem_flags) 2501 gfp_t mem_flags)
2498{ 2502{
2499 int ret; 2503 int ret;
@@ -2511,7 +2515,7 @@ static int prepare_transfer(struct xhci_hcd *xhci,
2511 2515
2512 ret = prepare_ring(xhci, ep_ring, 2516 ret = prepare_ring(xhci, ep_ring,
2513 le32_to_cpu(ep_ctx->ep_info) & EP_STATE_MASK, 2517 le32_to_cpu(ep_ctx->ep_info) & EP_STATE_MASK,
2514 num_trbs, mem_flags); 2518 num_trbs, isoc, mem_flags);
2515 if (ret) 2519 if (ret)
2516 return ret; 2520 return ret;
2517 2521
@@ -2734,7 +2738,7 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
2734 2738
2735 trb_buff_len = prepare_transfer(xhci, xhci->devs[slot_id], 2739 trb_buff_len = prepare_transfer(xhci, xhci->devs[slot_id],
2736 ep_index, urb->stream_id, 2740 ep_index, urb->stream_id,
2737 num_trbs, urb, 0, mem_flags); 2741 num_trbs, urb, 0, false, mem_flags);
2738 if (trb_buff_len < 0) 2742 if (trb_buff_len < 0)
2739 return trb_buff_len; 2743 return trb_buff_len;
2740 2744
@@ -2829,7 +2833,7 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
2829 more_trbs_coming = true; 2833 more_trbs_coming = true;
2830 else 2834 else
2831 more_trbs_coming = false; 2835 more_trbs_coming = false;
2832 queue_trb(xhci, ep_ring, false, more_trbs_coming, 2836 queue_trb(xhci, ep_ring, false, more_trbs_coming, false,
2833 lower_32_bits(addr), 2837 lower_32_bits(addr),
2834 upper_32_bits(addr), 2838 upper_32_bits(addr),
2835 length_field, 2839 length_field,
@@ -2920,7 +2924,7 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
2920 2924
2921 ret = prepare_transfer(xhci, xhci->devs[slot_id], 2925 ret = prepare_transfer(xhci, xhci->devs[slot_id],
2922 ep_index, urb->stream_id, 2926 ep_index, urb->stream_id,
2923 num_trbs, urb, 0, mem_flags); 2927 num_trbs, urb, 0, false, mem_flags);
2924 if (ret < 0) 2928 if (ret < 0)
2925 return ret; 2929 return ret;
2926 2930
@@ -2992,7 +2996,7 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
2992 more_trbs_coming = true; 2996 more_trbs_coming = true;
2993 else 2997 else
2994 more_trbs_coming = false; 2998 more_trbs_coming = false;
2995 queue_trb(xhci, ep_ring, false, more_trbs_coming, 2999 queue_trb(xhci, ep_ring, false, more_trbs_coming, false,
2996 lower_32_bits(addr), 3000 lower_32_bits(addr),
2997 upper_32_bits(addr), 3001 upper_32_bits(addr),
2998 length_field, 3002 length_field,
@@ -3052,7 +3056,7 @@ int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
3052 num_trbs++; 3056 num_trbs++;
3053 ret = prepare_transfer(xhci, xhci->devs[slot_id], 3057 ret = prepare_transfer(xhci, xhci->devs[slot_id],
3054 ep_index, urb->stream_id, 3058 ep_index, urb->stream_id,
3055 num_trbs, urb, 0, mem_flags); 3059 num_trbs, urb, 0, false, mem_flags);
3056 if (ret < 0) 3060 if (ret < 0)
3057 return ret; 3061 return ret;
3058 3062
@@ -3085,7 +3089,7 @@ int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
3085 } 3089 }
3086 } 3090 }
3087 3091
3088 queue_trb(xhci, ep_ring, false, true, 3092 queue_trb(xhci, ep_ring, false, true, false,
3089 setup->bRequestType | setup->bRequest << 8 | le16_to_cpu(setup->wValue) << 16, 3093 setup->bRequestType | setup->bRequest << 8 | le16_to_cpu(setup->wValue) << 16,
3090 le16_to_cpu(setup->wIndex) | le16_to_cpu(setup->wLength) << 16, 3094 le16_to_cpu(setup->wIndex) | le16_to_cpu(setup->wLength) << 16,
3091 TRB_LEN(8) | TRB_INTR_TARGET(0), 3095 TRB_LEN(8) | TRB_INTR_TARGET(0),
@@ -3105,7 +3109,7 @@ int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
3105 if (urb->transfer_buffer_length > 0) { 3109 if (urb->transfer_buffer_length > 0) {
3106 if (setup->bRequestType & USB_DIR_IN) 3110 if (setup->bRequestType & USB_DIR_IN)
3107 field |= TRB_DIR_IN; 3111 field |= TRB_DIR_IN;
3108 queue_trb(xhci, ep_ring, false, true, 3112 queue_trb(xhci, ep_ring, false, true, false,
3109 lower_32_bits(urb->transfer_dma), 3113 lower_32_bits(urb->transfer_dma),
3110 upper_32_bits(urb->transfer_dma), 3114 upper_32_bits(urb->transfer_dma),
3111 length_field, 3115 length_field,
@@ -3121,7 +3125,7 @@ int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
3121 field = 0; 3125 field = 0;
3122 else 3126 else
3123 field = TRB_DIR_IN; 3127 field = TRB_DIR_IN;
3124 queue_trb(xhci, ep_ring, false, false, 3128 queue_trb(xhci, ep_ring, false, false, false,
3125 0, 3129 0,
3126 0, 3130 0,
3127 TRB_INTR_TARGET(0), 3131 TRB_INTR_TARGET(0),
@@ -3270,7 +3274,8 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
3270 trbs_per_td = count_isoc_trbs_needed(xhci, urb, i); 3274 trbs_per_td = count_isoc_trbs_needed(xhci, urb, i);
3271 3275
3272 ret = prepare_transfer(xhci, xhci->devs[slot_id], ep_index, 3276 ret = prepare_transfer(xhci, xhci->devs[slot_id], ep_index,
3273 urb->stream_id, trbs_per_td, urb, i, mem_flags); 3277 urb->stream_id, trbs_per_td, urb, i, true,
3278 mem_flags);
3274 if (ret < 0) { 3279 if (ret < 0) {
3275 if (i == 0) 3280 if (i == 0)
3276 return ret; 3281 return ret;
@@ -3340,7 +3345,7 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
3340 remainder | 3345 remainder |
3341 TRB_INTR_TARGET(0); 3346 TRB_INTR_TARGET(0);
3342 3347
3343 queue_trb(xhci, ep_ring, false, more_trbs_coming, 3348 queue_trb(xhci, ep_ring, false, more_trbs_coming, true,
3344 lower_32_bits(addr), 3349 lower_32_bits(addr),
3345 upper_32_bits(addr), 3350 upper_32_bits(addr),
3346 length_field, 3351 length_field,
@@ -3422,7 +3427,7 @@ int xhci_queue_isoc_tx_prepare(struct xhci_hcd *xhci, gfp_t mem_flags,
3422 * Do not insert any td of the urb to the ring if the check failed. 3427 * Do not insert any td of the urb to the ring if the check failed.
3423 */ 3428 */
3424 ret = prepare_ring(xhci, ep_ring, le32_to_cpu(ep_ctx->ep_info) & EP_STATE_MASK, 3429 ret = prepare_ring(xhci, ep_ring, le32_to_cpu(ep_ctx->ep_info) & EP_STATE_MASK,
3425 num_trbs, mem_flags); 3430 num_trbs, true, mem_flags);
3426 if (ret) 3431 if (ret)
3427 return ret; 3432 return ret;
3428 3433
@@ -3481,7 +3486,7 @@ static int queue_command(struct xhci_hcd *xhci, u32 field1, u32 field2,
3481 reserved_trbs++; 3486 reserved_trbs++;
3482 3487
3483 ret = prepare_ring(xhci, xhci->cmd_ring, EP_STATE_RUNNING, 3488 ret = prepare_ring(xhci, xhci->cmd_ring, EP_STATE_RUNNING,
3484 reserved_trbs, GFP_ATOMIC); 3489 reserved_trbs, false, GFP_ATOMIC);
3485 if (ret < 0) { 3490 if (ret < 0) {
3486 xhci_err(xhci, "ERR: No room for command on command ring\n"); 3491 xhci_err(xhci, "ERR: No room for command on command ring\n");
3487 if (command_must_succeed) 3492 if (command_must_succeed)
@@ -3489,8 +3494,8 @@ static int queue_command(struct xhci_hcd *xhci, u32 field1, u32 field2,
3489 "unfailable commands failed.\n"); 3494 "unfailable commands failed.\n");
3490 return ret; 3495 return ret;
3491 } 3496 }
3492 queue_trb(xhci, xhci->cmd_ring, false, false, field1, field2, field3, 3497 queue_trb(xhci, xhci->cmd_ring, false, false, false, field1, field2,
3493 field4 | xhci->cmd_ring->cycle_state); 3498 field3, field4 | xhci->cmd_ring->cycle_state);
3494 return 0; 3499 return 0;
3495} 3500}
3496 3501
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index d8bbf5ccb10..8a98416e535 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -1311,6 +1311,7 @@ struct xhci_hcd {
1311#define XHCI_EP_LIMIT_QUIRK (1 << 5) 1311#define XHCI_EP_LIMIT_QUIRK (1 << 5)
1312#define XHCI_BROKEN_MSI (1 << 6) 1312#define XHCI_BROKEN_MSI (1 << 6)
1313#define XHCI_RESET_ON_RESUME (1 << 7) 1313#define XHCI_RESET_ON_RESUME (1 << 7)
1314#define XHCI_AMD_0x96_HOST (1 << 9)
1314 unsigned int num_active_eps; 1315 unsigned int num_active_eps;
1315 unsigned int limit_active_eps; 1316 unsigned int limit_active_eps;
1316 /* There are two roothubs to keep track of bus suspend info for */ 1317 /* There are two roothubs to keep track of bus suspend info for */