aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/usb
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/usb')
-rw-r--r--drivers/usb/core/config.c2
-rw-r--r--drivers/usb/core/hub.c131
-rw-r--r--drivers/usb/host/xhci-dbg.c51
-rw-r--r--drivers/usb/host/xhci-hub.c165
-rw-r--r--drivers/usb/host/xhci-mem.c124
-rw-r--r--drivers/usb/host/xhci-ring.c477
-rw-r--r--drivers/usb/host/xhci.c124
-rw-r--r--drivers/usb/host/xhci.h136
8 files changed, 762 insertions, 448 deletions
diff --git a/drivers/usb/core/config.c b/drivers/usb/core/config.c
index 83126b03e7cf..c962608b4b9a 100644
--- a/drivers/usb/core/config.c
+++ b/drivers/usb/core/config.c
@@ -129,7 +129,7 @@ static void usb_parse_ss_endpoint_companion(struct device *ddev, int cfgno,
129 max_tx = ep->desc.wMaxPacketSize * (desc->bMaxBurst + 1); 129 max_tx = ep->desc.wMaxPacketSize * (desc->bMaxBurst + 1);
130 else 130 else
131 max_tx = 999999; 131 max_tx = 999999;
132 if (desc->wBytesPerInterval > max_tx) { 132 if (le16_to_cpu(desc->wBytesPerInterval) > max_tx) {
133 dev_warn(ddev, "%s endpoint with wBytesPerInterval of %d in " 133 dev_warn(ddev, "%s endpoint with wBytesPerInterval of %d in "
134 "config %d interface %d altsetting %d ep %d: " 134 "config %d interface %d altsetting %d ep %d: "
135 "setting to %d\n", 135 "setting to %d\n",
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index 93720bdc9efd..79a58c3a2e2a 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -379,15 +379,6 @@ static int hub_port_status(struct usb_hub *hub, int port1,
379 *status = le16_to_cpu(hub->status->port.wPortStatus); 379 *status = le16_to_cpu(hub->status->port.wPortStatus);
380 *change = le16_to_cpu(hub->status->port.wPortChange); 380 *change = le16_to_cpu(hub->status->port.wPortChange);
381 381
382 if ((hub->hdev->parent != NULL) &&
383 hub_is_superspeed(hub->hdev)) {
384 /* Translate the USB 3 port status */
385 u16 tmp = *status & USB_SS_PORT_STAT_MASK;
386 if (*status & USB_SS_PORT_STAT_POWER)
387 tmp |= USB_PORT_STAT_POWER;
388 *status = tmp;
389 }
390
391 ret = 0; 382 ret = 0;
392 } 383 }
393 mutex_unlock(&hub->status_mutex); 384 mutex_unlock(&hub->status_mutex);
@@ -2160,11 +2151,76 @@ static int hub_port_reset(struct usb_hub *hub, int port1,
2160 return status; 2151 return status;
2161} 2152}
2162 2153
2154/* Warm reset a USB3 protocol port */
2155static int hub_port_warm_reset(struct usb_hub *hub, int port)
2156{
2157 int ret;
2158 u16 portstatus, portchange;
2159
2160 if (!hub_is_superspeed(hub->hdev)) {
2161 dev_err(hub->intfdev, "only USB3 hub support warm reset\n");
2162 return -EINVAL;
2163 }
2164
2165 /* Warm reset the port */
2166 ret = set_port_feature(hub->hdev,
2167 port, USB_PORT_FEAT_BH_PORT_RESET);
2168 if (ret) {
2169 dev_err(hub->intfdev, "cannot warm reset port %d\n", port);
2170 return ret;
2171 }
2172
2173 msleep(20);
2174 ret = hub_port_status(hub, port, &portstatus, &portchange);
2175
2176 if (portchange & USB_PORT_STAT_C_RESET)
2177 clear_port_feature(hub->hdev, port, USB_PORT_FEAT_C_RESET);
2178
2179 if (portchange & USB_PORT_STAT_C_BH_RESET)
2180 clear_port_feature(hub->hdev, port,
2181 USB_PORT_FEAT_C_BH_PORT_RESET);
2182
2183 if (portchange & USB_PORT_STAT_C_LINK_STATE)
2184 clear_port_feature(hub->hdev, port,
2185 USB_PORT_FEAT_C_PORT_LINK_STATE);
2186
2187 return ret;
2188}
2189
2190/* Check if a port is power on */
2191static int port_is_power_on(struct usb_hub *hub, unsigned portstatus)
2192{
2193 int ret = 0;
2194
2195 if (hub_is_superspeed(hub->hdev)) {
2196 if (portstatus & USB_SS_PORT_STAT_POWER)
2197 ret = 1;
2198 } else {
2199 if (portstatus & USB_PORT_STAT_POWER)
2200 ret = 1;
2201 }
2202
2203 return ret;
2204}
2205
2163#ifdef CONFIG_PM 2206#ifdef CONFIG_PM
2164 2207
2165#define MASK_BITS (USB_PORT_STAT_POWER | USB_PORT_STAT_CONNECTION | \ 2208/* Check if a port is suspended(USB2.0 port) or in U3 state(USB3.0 port) */
2166 USB_PORT_STAT_SUSPEND) 2209static int port_is_suspended(struct usb_hub *hub, unsigned portstatus)
2167#define WANT_BITS (USB_PORT_STAT_POWER | USB_PORT_STAT_CONNECTION) 2210{
2211 int ret = 0;
2212
2213 if (hub_is_superspeed(hub->hdev)) {
2214 if ((portstatus & USB_PORT_STAT_LINK_STATE)
2215 == USB_SS_PORT_LS_U3)
2216 ret = 1;
2217 } else {
2218 if (portstatus & USB_PORT_STAT_SUSPEND)
2219 ret = 1;
2220 }
2221
2222 return ret;
2223}
2168 2224
2169/* Determine whether the device on a port is ready for a normal resume, 2225/* Determine whether the device on a port is ready for a normal resume,
2170 * is ready for a reset-resume, or should be disconnected. 2226 * is ready for a reset-resume, or should be disconnected.
@@ -2174,7 +2230,9 @@ static int check_port_resume_type(struct usb_device *udev,
2174 int status, unsigned portchange, unsigned portstatus) 2230 int status, unsigned portchange, unsigned portstatus)
2175{ 2231{
2176 /* Is the device still present? */ 2232 /* Is the device still present? */
2177 if (status || (portstatus & MASK_BITS) != WANT_BITS) { 2233 if (status || port_is_suspended(hub, portstatus) ||
2234 !port_is_power_on(hub, portstatus) ||
2235 !(portstatus & USB_PORT_STAT_CONNECTION)) {
2178 if (status >= 0) 2236 if (status >= 0)
2179 status = -ENODEV; 2237 status = -ENODEV;
2180 } 2238 }
@@ -2285,14 +2343,10 @@ int usb_port_suspend(struct usb_device *udev, pm_message_t msg)
2285 } 2343 }
2286 2344
2287 /* see 7.1.7.6 */ 2345 /* see 7.1.7.6 */
2288 /* Clear PORT_POWER if it's a USB3.0 device connected to USB 3.0 2346 if (hub_is_superspeed(hub->hdev))
2289 * external hub. 2347 status = set_port_feature(hub->hdev,
2290 * FIXME: this is a temporary workaround to make the system able 2348 port1 | (USB_SS_PORT_LS_U3 << 3),
2291 * to suspend/resume. 2349 USB_PORT_FEAT_LINK_STATE);
2292 */
2293 if ((hub->hdev->parent != NULL) && hub_is_superspeed(hub->hdev))
2294 status = clear_port_feature(hub->hdev, port1,
2295 USB_PORT_FEAT_POWER);
2296 else 2350 else
2297 status = set_port_feature(hub->hdev, port1, 2351 status = set_port_feature(hub->hdev, port1,
2298 USB_PORT_FEAT_SUSPEND); 2352 USB_PORT_FEAT_SUSPEND);
@@ -2439,7 +2493,7 @@ int usb_port_resume(struct usb_device *udev, pm_message_t msg)
2439 2493
2440 /* Skip the initial Clear-Suspend step for a remote wakeup */ 2494 /* Skip the initial Clear-Suspend step for a remote wakeup */
2441 status = hub_port_status(hub, port1, &portstatus, &portchange); 2495 status = hub_port_status(hub, port1, &portstatus, &portchange);
2442 if (status == 0 && !(portstatus & USB_PORT_STAT_SUSPEND)) 2496 if (status == 0 && !port_is_suspended(hub, portstatus))
2443 goto SuspendCleared; 2497 goto SuspendCleared;
2444 2498
2445 // dev_dbg(hub->intfdev, "resume port %d\n", port1); 2499 // dev_dbg(hub->intfdev, "resume port %d\n", port1);
@@ -2447,8 +2501,13 @@ int usb_port_resume(struct usb_device *udev, pm_message_t msg)
2447 set_bit(port1, hub->busy_bits); 2501 set_bit(port1, hub->busy_bits);
2448 2502
2449 /* see 7.1.7.7; affects power usage, but not budgeting */ 2503 /* see 7.1.7.7; affects power usage, but not budgeting */
2450 status = clear_port_feature(hub->hdev, 2504 if (hub_is_superspeed(hub->hdev))
2451 port1, USB_PORT_FEAT_SUSPEND); 2505 status = set_port_feature(hub->hdev,
2506 port1 | (USB_SS_PORT_LS_U0 << 3),
2507 USB_PORT_FEAT_LINK_STATE);
2508 else
2509 status = clear_port_feature(hub->hdev,
2510 port1, USB_PORT_FEAT_SUSPEND);
2452 if (status) { 2511 if (status) {
2453 dev_dbg(hub->intfdev, "can't resume port %d, status %d\n", 2512 dev_dbg(hub->intfdev, "can't resume port %d, status %d\n",
2454 port1, status); 2513 port1, status);
@@ -2470,9 +2529,15 @@ int usb_port_resume(struct usb_device *udev, pm_message_t msg)
2470 2529
2471 SuspendCleared: 2530 SuspendCleared:
2472 if (status == 0) { 2531 if (status == 0) {
2473 if (portchange & USB_PORT_STAT_C_SUSPEND) 2532 if (hub_is_superspeed(hub->hdev)) {
2474 clear_port_feature(hub->hdev, port1, 2533 if (portchange & USB_PORT_STAT_C_LINK_STATE)
2475 USB_PORT_FEAT_C_SUSPEND); 2534 clear_port_feature(hub->hdev, port1,
2535 USB_PORT_FEAT_C_PORT_LINK_STATE);
2536 } else {
2537 if (portchange & USB_PORT_STAT_C_SUSPEND)
2538 clear_port_feature(hub->hdev, port1,
2539 USB_PORT_FEAT_C_SUSPEND);
2540 }
2476 } 2541 }
2477 2542
2478 clear_bit(port1, hub->busy_bits); 2543 clear_bit(port1, hub->busy_bits);
@@ -3147,7 +3212,7 @@ static void hub_port_connect_change(struct usb_hub *hub, int port1,
3147 3212
3148 /* maybe switch power back on (e.g. root hub was reset) */ 3213 /* maybe switch power back on (e.g. root hub was reset) */
3149 if ((wHubCharacteristics & HUB_CHAR_LPSM) < 2 3214 if ((wHubCharacteristics & HUB_CHAR_LPSM) < 2
3150 && !(portstatus & USB_PORT_STAT_POWER)) 3215 && !port_is_power_on(hub, portstatus))
3151 set_port_feature(hdev, port1, USB_PORT_FEAT_POWER); 3216 set_port_feature(hdev, port1, USB_PORT_FEAT_POWER);
3152 3217
3153 if (portstatus & USB_PORT_STAT_ENABLE) 3218 if (portstatus & USB_PORT_STAT_ENABLE)
@@ -3490,6 +3555,16 @@ static void hub_events(void)
3490 USB_PORT_FEAT_C_PORT_CONFIG_ERROR); 3555 USB_PORT_FEAT_C_PORT_CONFIG_ERROR);
3491 } 3556 }
3492 3557
3558 /* Warm reset a USB3 protocol port if it's in
3559 * SS.Inactive state.
3560 */
3561 if (hub_is_superspeed(hub->hdev) &&
3562 (portstatus & USB_PORT_STAT_LINK_STATE)
3563 == USB_SS_PORT_LS_SS_INACTIVE) {
3564 dev_dbg(hub_dev, "warm reset port %d\n", i);
3565 hub_port_warm_reset(hub, i);
3566 }
3567
3493 if (connect_change) 3568 if (connect_change)
3494 hub_port_connect_change(hub, i, 3569 hub_port_connect_change(hub, i,
3495 portstatus, portchange); 3570 portstatus, portchange);
diff --git a/drivers/usb/host/xhci-dbg.c b/drivers/usb/host/xhci-dbg.c
index 0231814a97a5..2e0486178dbe 100644
--- a/drivers/usb/host/xhci-dbg.c
+++ b/drivers/usb/host/xhci-dbg.c
@@ -147,7 +147,7 @@ static void xhci_print_op_regs(struct xhci_hcd *xhci)
147 147
148static void xhci_print_ports(struct xhci_hcd *xhci) 148static void xhci_print_ports(struct xhci_hcd *xhci)
149{ 149{
150 u32 __iomem *addr; 150 __le32 __iomem *addr;
151 int i, j; 151 int i, j;
152 int ports; 152 int ports;
153 char *names[NUM_PORT_REGS] = { 153 char *names[NUM_PORT_REGS] = {
@@ -253,27 +253,27 @@ void xhci_print_trb_offsets(struct xhci_hcd *xhci, union xhci_trb *trb)
253void xhci_debug_trb(struct xhci_hcd *xhci, union xhci_trb *trb) 253void xhci_debug_trb(struct xhci_hcd *xhci, union xhci_trb *trb)
254{ 254{
255 u64 address; 255 u64 address;
256 u32 type = xhci_readl(xhci, &trb->link.control) & TRB_TYPE_BITMASK; 256 u32 type = le32_to_cpu(trb->link.control) & TRB_TYPE_BITMASK;
257 257
258 switch (type) { 258 switch (type) {
259 case TRB_TYPE(TRB_LINK): 259 case TRB_TYPE(TRB_LINK):
260 xhci_dbg(xhci, "Link TRB:\n"); 260 xhci_dbg(xhci, "Link TRB:\n");
261 xhci_print_trb_offsets(xhci, trb); 261 xhci_print_trb_offsets(xhci, trb);
262 262
263 address = trb->link.segment_ptr; 263 address = le64_to_cpu(trb->link.segment_ptr);
264 xhci_dbg(xhci, "Next ring segment DMA address = 0x%llx\n", address); 264 xhci_dbg(xhci, "Next ring segment DMA address = 0x%llx\n", address);
265 265
266 xhci_dbg(xhci, "Interrupter target = 0x%x\n", 266 xhci_dbg(xhci, "Interrupter target = 0x%x\n",
267 GET_INTR_TARGET(trb->link.intr_target)); 267 GET_INTR_TARGET(le32_to_cpu(trb->link.intr_target)));
268 xhci_dbg(xhci, "Cycle bit = %u\n", 268 xhci_dbg(xhci, "Cycle bit = %u\n",
269 (unsigned int) (trb->link.control & TRB_CYCLE)); 269 (unsigned int) (le32_to_cpu(trb->link.control) & TRB_CYCLE));
270 xhci_dbg(xhci, "Toggle cycle bit = %u\n", 270 xhci_dbg(xhci, "Toggle cycle bit = %u\n",
271 (unsigned int) (trb->link.control & LINK_TOGGLE)); 271 (unsigned int) (le32_to_cpu(trb->link.control) & LINK_TOGGLE));
272 xhci_dbg(xhci, "No Snoop bit = %u\n", 272 xhci_dbg(xhci, "No Snoop bit = %u\n",
273 (unsigned int) (trb->link.control & TRB_NO_SNOOP)); 273 (unsigned int) (le32_to_cpu(trb->link.control) & TRB_NO_SNOOP));
274 break; 274 break;
275 case TRB_TYPE(TRB_TRANSFER): 275 case TRB_TYPE(TRB_TRANSFER):
276 address = trb->trans_event.buffer; 276 address = le64_to_cpu(trb->trans_event.buffer);
277 /* 277 /*
278 * FIXME: look at flags to figure out if it's an address or if 278 * FIXME: look at flags to figure out if it's an address or if
279 * the data is directly in the buffer field. 279 * the data is directly in the buffer field.
@@ -281,11 +281,12 @@ void xhci_debug_trb(struct xhci_hcd *xhci, union xhci_trb *trb)
281 xhci_dbg(xhci, "DMA address or buffer contents= %llu\n", address); 281 xhci_dbg(xhci, "DMA address or buffer contents= %llu\n", address);
282 break; 282 break;
283 case TRB_TYPE(TRB_COMPLETION): 283 case TRB_TYPE(TRB_COMPLETION):
284 address = trb->event_cmd.cmd_trb; 284 address = le64_to_cpu(trb->event_cmd.cmd_trb);
285 xhci_dbg(xhci, "Command TRB pointer = %llu\n", address); 285 xhci_dbg(xhci, "Command TRB pointer = %llu\n", address);
286 xhci_dbg(xhci, "Completion status = %u\n", 286 xhci_dbg(xhci, "Completion status = %u\n",
287 (unsigned int) GET_COMP_CODE(trb->event_cmd.status)); 287 (unsigned int) GET_COMP_CODE(le32_to_cpu(trb->event_cmd.status)));
288 xhci_dbg(xhci, "Flags = 0x%x\n", (unsigned int) trb->event_cmd.flags); 288 xhci_dbg(xhci, "Flags = 0x%x\n",
289 (unsigned int) le32_to_cpu(trb->event_cmd.flags));
289 break; 290 break;
290 default: 291 default:
291 xhci_dbg(xhci, "Unknown TRB with TRB type ID %u\n", 292 xhci_dbg(xhci, "Unknown TRB with TRB type ID %u\n",
@@ -311,16 +312,16 @@ void xhci_debug_trb(struct xhci_hcd *xhci, union xhci_trb *trb)
311void xhci_debug_segment(struct xhci_hcd *xhci, struct xhci_segment *seg) 312void xhci_debug_segment(struct xhci_hcd *xhci, struct xhci_segment *seg)
312{ 313{
313 int i; 314 int i;
314 u32 addr = (u32) seg->dma; 315 u64 addr = seg->dma;
315 union xhci_trb *trb = seg->trbs; 316 union xhci_trb *trb = seg->trbs;
316 317
317 for (i = 0; i < TRBS_PER_SEGMENT; ++i) { 318 for (i = 0; i < TRBS_PER_SEGMENT; ++i) {
318 trb = &seg->trbs[i]; 319 trb = &seg->trbs[i];
319 xhci_dbg(xhci, "@%08x %08x %08x %08x %08x\n", addr, 320 xhci_dbg(xhci, "@%016llx %08x %08x %08x %08x\n", addr,
320 lower_32_bits(trb->link.segment_ptr), 321 (u32)lower_32_bits(le64_to_cpu(trb->link.segment_ptr)),
321 upper_32_bits(trb->link.segment_ptr), 322 (u32)upper_32_bits(le64_to_cpu(trb->link.segment_ptr)),
322 (unsigned int) trb->link.intr_target, 323 (unsigned int) le32_to_cpu(trb->link.intr_target),
323 (unsigned int) trb->link.control); 324 (unsigned int) le32_to_cpu(trb->link.control));
324 addr += sizeof(*trb); 325 addr += sizeof(*trb);
325 } 326 }
326} 327}
@@ -391,18 +392,18 @@ void xhci_dbg_ep_rings(struct xhci_hcd *xhci,
391 392
392void xhci_dbg_erst(struct xhci_hcd *xhci, struct xhci_erst *erst) 393void xhci_dbg_erst(struct xhci_hcd *xhci, struct xhci_erst *erst)
393{ 394{
394 u32 addr = (u32) erst->erst_dma_addr; 395 u64 addr = erst->erst_dma_addr;
395 int i; 396 int i;
396 struct xhci_erst_entry *entry; 397 struct xhci_erst_entry *entry;
397 398
398 for (i = 0; i < erst->num_entries; ++i) { 399 for (i = 0; i < erst->num_entries; ++i) {
399 entry = &erst->entries[i]; 400 entry = &erst->entries[i];
400 xhci_dbg(xhci, "@%08x %08x %08x %08x %08x\n", 401 xhci_dbg(xhci, "@%016llx %08x %08x %08x %08x\n",
401 (unsigned int) addr, 402 addr,
402 lower_32_bits(entry->seg_addr), 403 lower_32_bits(le64_to_cpu(entry->seg_addr)),
403 upper_32_bits(entry->seg_addr), 404 upper_32_bits(le64_to_cpu(entry->seg_addr)),
404 (unsigned int) entry->seg_size, 405 (unsigned int) le32_to_cpu(entry->seg_size),
405 (unsigned int) entry->rsvd); 406 (unsigned int) le32_to_cpu(entry->rsvd));
406 addr += sizeof(*entry); 407 addr += sizeof(*entry);
407 } 408 }
408} 409}
@@ -436,7 +437,7 @@ char *xhci_get_slot_state(struct xhci_hcd *xhci,
436{ 437{
437 struct xhci_slot_ctx *slot_ctx = xhci_get_slot_ctx(xhci, ctx); 438 struct xhci_slot_ctx *slot_ctx = xhci_get_slot_ctx(xhci, ctx);
438 439
439 switch (GET_SLOT_STATE(slot_ctx->dev_state)) { 440 switch (GET_SLOT_STATE(le32_to_cpu(slot_ctx->dev_state))) {
440 case 0: 441 case 0:
441 return "enabled/disabled"; 442 return "enabled/disabled";
442 case 1: 443 case 1:
diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c
index a78f2ebd11b7..e3ddc6a95afe 100644
--- a/drivers/usb/host/xhci-hub.c
+++ b/drivers/usb/host/xhci-hub.c
@@ -50,7 +50,7 @@ static void xhci_common_hub_descriptor(struct xhci_hcd *xhci,
50 temp |= 0x0008; 50 temp |= 0x0008;
51 /* Bits 6:5 - no TTs in root ports */ 51 /* Bits 6:5 - no TTs in root ports */
52 /* Bit 7 - no port indicators */ 52 /* Bit 7 - no port indicators */
53 desc->wHubCharacteristics = (__force __u16) cpu_to_le16(temp); 53 desc->wHubCharacteristics = cpu_to_le16(temp);
54} 54}
55 55
56/* Fill in the USB 2.0 roothub descriptor */ 56/* Fill in the USB 2.0 roothub descriptor */
@@ -314,7 +314,7 @@ void xhci_ring_device(struct xhci_hcd *xhci, int slot_id)
314} 314}
315 315
316static void xhci_disable_port(struct usb_hcd *hcd, struct xhci_hcd *xhci, 316static void xhci_disable_port(struct usb_hcd *hcd, struct xhci_hcd *xhci,
317 u16 wIndex, u32 __iomem *addr, u32 port_status) 317 u16 wIndex, __le32 __iomem *addr, u32 port_status)
318{ 318{
319 /* Don't allow the USB core to disable SuperSpeed ports. */ 319 /* Don't allow the USB core to disable SuperSpeed ports. */
320 if (hcd->speed == HCD_USB3) { 320 if (hcd->speed == HCD_USB3) {
@@ -331,7 +331,7 @@ static void xhci_disable_port(struct usb_hcd *hcd, struct xhci_hcd *xhci,
331} 331}
332 332
333static void xhci_clear_port_change_bit(struct xhci_hcd *xhci, u16 wValue, 333static void xhci_clear_port_change_bit(struct xhci_hcd *xhci, u16 wValue,
334 u16 wIndex, u32 __iomem *addr, u32 port_status) 334 u16 wIndex, __le32 __iomem *addr, u32 port_status)
335{ 335{
336 char *port_change_bit; 336 char *port_change_bit;
337 u32 status; 337 u32 status;
@@ -341,6 +341,10 @@ static void xhci_clear_port_change_bit(struct xhci_hcd *xhci, u16 wValue,
341 status = PORT_RC; 341 status = PORT_RC;
342 port_change_bit = "reset"; 342 port_change_bit = "reset";
343 break; 343 break;
344 case USB_PORT_FEAT_C_BH_PORT_RESET:
345 status = PORT_WRC;
346 port_change_bit = "warm(BH) reset";
347 break;
344 case USB_PORT_FEAT_C_CONNECTION: 348 case USB_PORT_FEAT_C_CONNECTION:
345 status = PORT_CSC; 349 status = PORT_CSC;
346 port_change_bit = "connect"; 350 port_change_bit = "connect";
@@ -357,6 +361,10 @@ static void xhci_clear_port_change_bit(struct xhci_hcd *xhci, u16 wValue,
357 status = PORT_PLC; 361 status = PORT_PLC;
358 port_change_bit = "suspend/resume"; 362 port_change_bit = "suspend/resume";
359 break; 363 break;
364 case USB_PORT_FEAT_C_PORT_LINK_STATE:
365 status = PORT_PLC;
366 port_change_bit = "link state";
367 break;
360 default: 368 default:
361 /* Should never happen */ 369 /* Should never happen */
362 return; 370 return;
@@ -376,9 +384,10 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
376 unsigned long flags; 384 unsigned long flags;
377 u32 temp, temp1, status; 385 u32 temp, temp1, status;
378 int retval = 0; 386 int retval = 0;
379 u32 __iomem **port_array; 387 __le32 __iomem **port_array;
380 int slot_id; 388 int slot_id;
381 struct xhci_bus_state *bus_state; 389 struct xhci_bus_state *bus_state;
390 u16 link_state = 0;
382 391
383 if (hcd->speed == HCD_USB3) { 392 if (hcd->speed == HCD_USB3) {
384 ports = xhci->num_usb3_ports; 393 ports = xhci->num_usb3_ports;
@@ -422,9 +431,6 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
422 } 431 }
423 xhci_dbg(xhci, "get port status, actual port %d status = 0x%x\n", wIndex, temp); 432 xhci_dbg(xhci, "get port status, actual port %d status = 0x%x\n", wIndex, temp);
424 433
425 /* FIXME - should we return a port status value like the USB
426 * 3.0 external hubs do?
427 */
428 /* wPortChange bits */ 434 /* wPortChange bits */
429 if (temp & PORT_CSC) 435 if (temp & PORT_CSC)
430 status |= USB_PORT_STAT_C_CONNECTION << 16; 436 status |= USB_PORT_STAT_C_CONNECTION << 16;
@@ -432,13 +438,21 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
432 status |= USB_PORT_STAT_C_ENABLE << 16; 438 status |= USB_PORT_STAT_C_ENABLE << 16;
433 if ((temp & PORT_OCC)) 439 if ((temp & PORT_OCC))
434 status |= USB_PORT_STAT_C_OVERCURRENT << 16; 440 status |= USB_PORT_STAT_C_OVERCURRENT << 16;
435 /* 441 if ((temp & PORT_RC))
436 * FIXME ignoring reset and USB 2.1/3.0 specific 442 status |= USB_PORT_STAT_C_RESET << 16;
437 * changes 443 /* USB3.0 only */
438 */ 444 if (hcd->speed == HCD_USB3) {
439 if ((temp & PORT_PLS_MASK) == XDEV_U3 445 if ((temp & PORT_PLC))
440 && (temp & PORT_POWER)) 446 status |= USB_PORT_STAT_C_LINK_STATE << 16;
441 status |= 1 << USB_PORT_FEAT_SUSPEND; 447 if ((temp & PORT_WRC))
448 status |= USB_PORT_STAT_C_BH_RESET << 16;
449 }
450
451 if (hcd->speed != HCD_USB3) {
452 if ((temp & PORT_PLS_MASK) == XDEV_U3
453 && (temp & PORT_POWER))
454 status |= USB_PORT_STAT_SUSPEND;
455 }
442 if ((temp & PORT_PLS_MASK) == XDEV_RESUME) { 456 if ((temp & PORT_PLS_MASK) == XDEV_RESUME) {
443 if ((temp & PORT_RESET) || !(temp & PORT_PE)) 457 if ((temp & PORT_RESET) || !(temp & PORT_PE))
444 goto error; 458 goto error;
@@ -469,7 +483,8 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
469 && (temp & PORT_POWER) 483 && (temp & PORT_POWER)
470 && (bus_state->suspended_ports & (1 << wIndex))) { 484 && (bus_state->suspended_ports & (1 << wIndex))) {
471 bus_state->suspended_ports &= ~(1 << wIndex); 485 bus_state->suspended_ports &= ~(1 << wIndex);
472 bus_state->port_c_suspend |= 1 << wIndex; 486 if (hcd->speed != HCD_USB3)
487 bus_state->port_c_suspend |= 1 << wIndex;
473 } 488 }
474 if (temp & PORT_CONNECT) { 489 if (temp & PORT_CONNECT) {
475 status |= USB_PORT_STAT_CONNECTION; 490 status |= USB_PORT_STAT_CONNECTION;
@@ -481,14 +496,28 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
481 status |= USB_PORT_STAT_OVERCURRENT; 496 status |= USB_PORT_STAT_OVERCURRENT;
482 if (temp & PORT_RESET) 497 if (temp & PORT_RESET)
483 status |= USB_PORT_STAT_RESET; 498 status |= USB_PORT_STAT_RESET;
484 if (temp & PORT_POWER) 499 if (temp & PORT_POWER) {
485 status |= USB_PORT_STAT_POWER; 500 if (hcd->speed == HCD_USB3)
501 status |= USB_SS_PORT_STAT_POWER;
502 else
503 status |= USB_PORT_STAT_POWER;
504 }
505 /* Port Link State */
506 if (hcd->speed == HCD_USB3) {
507 /* resume state is a xHCI internal state.
508 * Do not report it to usb core.
509 */
510 if ((temp & PORT_PLS_MASK) != XDEV_RESUME)
511 status |= (temp & PORT_PLS_MASK);
512 }
486 if (bus_state->port_c_suspend & (1 << wIndex)) 513 if (bus_state->port_c_suspend & (1 << wIndex))
487 status |= 1 << USB_PORT_FEAT_C_SUSPEND; 514 status |= 1 << USB_PORT_FEAT_C_SUSPEND;
488 xhci_dbg(xhci, "Get port status returned 0x%x\n", status); 515 xhci_dbg(xhci, "Get port status returned 0x%x\n", status);
489 put_unaligned(cpu_to_le32(status), (__le32 *) buf); 516 put_unaligned(cpu_to_le32(status), (__le32 *) buf);
490 break; 517 break;
491 case SetPortFeature: 518 case SetPortFeature:
519 if (wValue == USB_PORT_FEAT_LINK_STATE)
520 link_state = (wIndex & 0xff00) >> 3;
492 wIndex &= 0xff; 521 wIndex &= 0xff;
493 if (!wIndex || wIndex > ports) 522 if (!wIndex || wIndex > ports)
494 goto error; 523 goto error;
@@ -537,6 +566,44 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
537 temp = xhci_readl(xhci, port_array[wIndex]); 566 temp = xhci_readl(xhci, port_array[wIndex]);
538 bus_state->suspended_ports |= 1 << wIndex; 567 bus_state->suspended_ports |= 1 << wIndex;
539 break; 568 break;
569 case USB_PORT_FEAT_LINK_STATE:
570 temp = xhci_readl(xhci, port_array[wIndex]);
571 /* Software should not attempt to set
572 * port link state above '5' (Rx.Detect) and the port
573 * must be enabled.
574 */
575 if ((temp & PORT_PE) == 0 ||
576 (link_state > USB_SS_PORT_LS_RX_DETECT)) {
577 xhci_warn(xhci, "Cannot set link state.\n");
578 goto error;
579 }
580
581 if (link_state == USB_SS_PORT_LS_U3) {
582 slot_id = xhci_find_slot_id_by_port(hcd, xhci,
583 wIndex + 1);
584 if (slot_id) {
585 /* unlock to execute stop endpoint
586 * commands */
587 spin_unlock_irqrestore(&xhci->lock,
588 flags);
589 xhci_stop_device(xhci, slot_id, 1);
590 spin_lock_irqsave(&xhci->lock, flags);
591 }
592 }
593
594 temp = xhci_port_state_to_neutral(temp);
595 temp &= ~PORT_PLS_MASK;
596 temp |= PORT_LINK_STROBE | link_state;
597 xhci_writel(xhci, temp, port_array[wIndex]);
598
599 spin_unlock_irqrestore(&xhci->lock, flags);
600 msleep(20); /* wait device to enter */
601 spin_lock_irqsave(&xhci->lock, flags);
602
603 temp = xhci_readl(xhci, port_array[wIndex]);
604 if (link_state == USB_SS_PORT_LS_U3)
605 bus_state->suspended_ports |= 1 << wIndex;
606 break;
540 case USB_PORT_FEAT_POWER: 607 case USB_PORT_FEAT_POWER:
541 /* 608 /*
542 * Turn on ports, even if there isn't per-port switching. 609 * Turn on ports, even if there isn't per-port switching.
@@ -557,6 +624,12 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
557 temp = xhci_readl(xhci, port_array[wIndex]); 624 temp = xhci_readl(xhci, port_array[wIndex]);
558 xhci_dbg(xhci, "set port reset, actual port %d status = 0x%x\n", wIndex, temp); 625 xhci_dbg(xhci, "set port reset, actual port %d status = 0x%x\n", wIndex, temp);
559 break; 626 break;
627 case USB_PORT_FEAT_BH_PORT_RESET:
628 temp |= PORT_WR;
629 xhci_writel(xhci, temp, port_array[wIndex]);
630
631 temp = xhci_readl(xhci, port_array[wIndex]);
632 break;
560 default: 633 default:
561 goto error; 634 goto error;
562 } 635 }
@@ -584,35 +657,27 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
584 if (temp & XDEV_U3) { 657 if (temp & XDEV_U3) {
585 if ((temp & PORT_PE) == 0) 658 if ((temp & PORT_PE) == 0)
586 goto error; 659 goto error;
587 if (DEV_SUPERSPEED(temp)) {
588 temp = xhci_port_state_to_neutral(temp);
589 temp &= ~PORT_PLS_MASK;
590 temp |= PORT_LINK_STROBE | XDEV_U0;
591 xhci_writel(xhci, temp,
592 port_array[wIndex]);
593 xhci_readl(xhci, port_array[wIndex]);
594 } else {
595 temp = xhci_port_state_to_neutral(temp);
596 temp &= ~PORT_PLS_MASK;
597 temp |= PORT_LINK_STROBE | XDEV_RESUME;
598 xhci_writel(xhci, temp,
599 port_array[wIndex]);
600 660
601 spin_unlock_irqrestore(&xhci->lock, 661 temp = xhci_port_state_to_neutral(temp);
602 flags); 662 temp &= ~PORT_PLS_MASK;
603 msleep(20); 663 temp |= PORT_LINK_STROBE | XDEV_RESUME;
604 spin_lock_irqsave(&xhci->lock, flags); 664 xhci_writel(xhci, temp,
665 port_array[wIndex]);
605 666
606 temp = xhci_readl(xhci, 667 spin_unlock_irqrestore(&xhci->lock,
607 port_array[wIndex]); 668 flags);
608 temp = xhci_port_state_to_neutral(temp); 669 msleep(20);
609 temp &= ~PORT_PLS_MASK; 670 spin_lock_irqsave(&xhci->lock, flags);
610 temp |= PORT_LINK_STROBE | XDEV_U0; 671
611 xhci_writel(xhci, temp, 672 temp = xhci_readl(xhci,
612 port_array[wIndex]); 673 port_array[wIndex]);
613 } 674 temp = xhci_port_state_to_neutral(temp);
614 bus_state->port_c_suspend |= 1 << wIndex; 675 temp &= ~PORT_PLS_MASK;
676 temp |= PORT_LINK_STROBE | XDEV_U0;
677 xhci_writel(xhci, temp,
678 port_array[wIndex]);
615 } 679 }
680 bus_state->port_c_suspend |= 1 << wIndex;
616 681
617 slot_id = xhci_find_slot_id_by_port(hcd, xhci, 682 slot_id = xhci_find_slot_id_by_port(hcd, xhci,
618 wIndex + 1); 683 wIndex + 1);
@@ -625,9 +690,11 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
625 case USB_PORT_FEAT_C_SUSPEND: 690 case USB_PORT_FEAT_C_SUSPEND:
626 bus_state->port_c_suspend &= ~(1 << wIndex); 691 bus_state->port_c_suspend &= ~(1 << wIndex);
627 case USB_PORT_FEAT_C_RESET: 692 case USB_PORT_FEAT_C_RESET:
693 case USB_PORT_FEAT_C_BH_PORT_RESET:
628 case USB_PORT_FEAT_C_CONNECTION: 694 case USB_PORT_FEAT_C_CONNECTION:
629 case USB_PORT_FEAT_C_OVER_CURRENT: 695 case USB_PORT_FEAT_C_OVER_CURRENT:
630 case USB_PORT_FEAT_C_ENABLE: 696 case USB_PORT_FEAT_C_ENABLE:
697 case USB_PORT_FEAT_C_PORT_LINK_STATE:
631 xhci_clear_port_change_bit(xhci, wValue, wIndex, 698 xhci_clear_port_change_bit(xhci, wValue, wIndex,
632 port_array[wIndex], temp); 699 port_array[wIndex], temp);
633 break; 700 break;
@@ -664,7 +731,7 @@ int xhci_hub_status_data(struct usb_hcd *hcd, char *buf)
664 int i, retval; 731 int i, retval;
665 struct xhci_hcd *xhci = hcd_to_xhci(hcd); 732 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
666 int ports; 733 int ports;
667 u32 __iomem **port_array; 734 __le32 __iomem **port_array;
668 struct xhci_bus_state *bus_state; 735 struct xhci_bus_state *bus_state;
669 736
670 if (hcd->speed == HCD_USB3) { 737 if (hcd->speed == HCD_USB3) {
@@ -681,7 +748,7 @@ int xhci_hub_status_data(struct usb_hcd *hcd, char *buf)
681 memset(buf, 0, retval); 748 memset(buf, 0, retval);
682 status = 0; 749 status = 0;
683 750
684 mask = PORT_CSC | PORT_PEC | PORT_OCC; 751 mask = PORT_CSC | PORT_PEC | PORT_OCC | PORT_PLC;
685 752
686 spin_lock_irqsave(&xhci->lock, flags); 753 spin_lock_irqsave(&xhci->lock, flags);
687 /* For each port, did anything change? If so, set that bit in buf. */ 754 /* For each port, did anything change? If so, set that bit in buf. */
@@ -709,7 +776,7 @@ int xhci_bus_suspend(struct usb_hcd *hcd)
709{ 776{
710 struct xhci_hcd *xhci = hcd_to_xhci(hcd); 777 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
711 int max_ports, port_index; 778 int max_ports, port_index;
712 u32 __iomem **port_array; 779 __le32 __iomem **port_array;
713 struct xhci_bus_state *bus_state; 780 struct xhci_bus_state *bus_state;
714 unsigned long flags; 781 unsigned long flags;
715 782
@@ -779,7 +846,7 @@ int xhci_bus_suspend(struct usb_hcd *hcd)
779 846
780 if (DEV_HIGHSPEED(t1)) { 847 if (DEV_HIGHSPEED(t1)) {
781 /* enable remote wake up for USB 2.0 */ 848 /* enable remote wake up for USB 2.0 */
782 u32 __iomem *addr; 849 __le32 __iomem *addr;
783 u32 tmp; 850 u32 tmp;
784 851
785 /* Add one to the port status register address to get 852 /* Add one to the port status register address to get
@@ -801,7 +868,7 @@ int xhci_bus_resume(struct usb_hcd *hcd)
801{ 868{
802 struct xhci_hcd *xhci = hcd_to_xhci(hcd); 869 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
803 int max_ports, port_index; 870 int max_ports, port_index;
804 u32 __iomem **port_array; 871 __le32 __iomem **port_array;
805 struct xhci_bus_state *bus_state; 872 struct xhci_bus_state *bus_state;
806 u32 temp; 873 u32 temp;
807 unsigned long flags; 874 unsigned long flags;
@@ -875,7 +942,7 @@ int xhci_bus_resume(struct usb_hcd *hcd)
875 942
876 if (DEV_HIGHSPEED(temp)) { 943 if (DEV_HIGHSPEED(temp)) {
877 /* disable remote wake up for USB 2.0 */ 944 /* disable remote wake up for USB 2.0 */
878 u32 __iomem *addr; 945 __le32 __iomem *addr;
879 u32 tmp; 946 u32 tmp;
880 947
881 /* Add one to the port status register address to get 948 /* Add one to the port status register address to get
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
index 627f3438028c..a4fc4d929385 100644
--- a/drivers/usb/host/xhci-mem.c
+++ b/drivers/usb/host/xhci-mem.c
@@ -89,16 +89,17 @@ static void xhci_link_segments(struct xhci_hcd *xhci, struct xhci_segment *prev,
89 return; 89 return;
90 prev->next = next; 90 prev->next = next;
91 if (link_trbs) { 91 if (link_trbs) {
92 prev->trbs[TRBS_PER_SEGMENT-1].link.segment_ptr = next->dma; 92 prev->trbs[TRBS_PER_SEGMENT-1].link.
93 segment_ptr = cpu_to_le64(next->dma);
93 94
94 /* Set the last TRB in the segment to have a TRB type ID of Link TRB */ 95 /* Set the last TRB in the segment to have a TRB type ID of Link TRB */
95 val = prev->trbs[TRBS_PER_SEGMENT-1].link.control; 96 val = le32_to_cpu(prev->trbs[TRBS_PER_SEGMENT-1].link.control);
96 val &= ~TRB_TYPE_BITMASK; 97 val &= ~TRB_TYPE_BITMASK;
97 val |= TRB_TYPE(TRB_LINK); 98 val |= TRB_TYPE(TRB_LINK);
98 /* Always set the chain bit with 0.95 hardware */ 99 /* Always set the chain bit with 0.95 hardware */
99 if (xhci_link_trb_quirk(xhci)) 100 if (xhci_link_trb_quirk(xhci))
100 val |= TRB_CHAIN; 101 val |= TRB_CHAIN;
101 prev->trbs[TRBS_PER_SEGMENT-1].link.control = val; 102 prev->trbs[TRBS_PER_SEGMENT-1].link.control = cpu_to_le32(val);
102 } 103 }
103 xhci_dbg(xhci, "Linking segment 0x%llx to segment 0x%llx (DMA)\n", 104 xhci_dbg(xhci, "Linking segment 0x%llx to segment 0x%llx (DMA)\n",
104 (unsigned long long)prev->dma, 105 (unsigned long long)prev->dma,
@@ -186,7 +187,8 @@ static struct xhci_ring *xhci_ring_alloc(struct xhci_hcd *xhci,
186 187
187 if (link_trbs) { 188 if (link_trbs) {
188 /* See section 4.9.2.1 and 6.4.4.1 */ 189 /* See section 4.9.2.1 and 6.4.4.1 */
189 prev->trbs[TRBS_PER_SEGMENT-1].link.control |= (LINK_TOGGLE); 190 prev->trbs[TRBS_PER_SEGMENT-1].link.
191 control |= cpu_to_le32(LINK_TOGGLE);
190 xhci_dbg(xhci, "Wrote link toggle flag to" 192 xhci_dbg(xhci, "Wrote link toggle flag to"
191 " segment %p (virtual), 0x%llx (DMA)\n", 193 " segment %p (virtual), 0x%llx (DMA)\n",
192 prev, (unsigned long long)prev->dma); 194 prev, (unsigned long long)prev->dma);
@@ -548,7 +550,8 @@ struct xhci_stream_info *xhci_alloc_stream_info(struct xhci_hcd *xhci,
548 addr = cur_ring->first_seg->dma | 550 addr = cur_ring->first_seg->dma |
549 SCT_FOR_CTX(SCT_PRI_TR) | 551 SCT_FOR_CTX(SCT_PRI_TR) |
550 cur_ring->cycle_state; 552 cur_ring->cycle_state;
551 stream_info->stream_ctx_array[cur_stream].stream_ring = addr; 553 stream_info->stream_ctx_array[cur_stream].
554 stream_ring = cpu_to_le64(addr);
552 xhci_dbg(xhci, "Setting stream %d ring ptr to 0x%08llx\n", 555 xhci_dbg(xhci, "Setting stream %d ring ptr to 0x%08llx\n",
553 cur_stream, (unsigned long long) addr); 556 cur_stream, (unsigned long long) addr);
554 557
@@ -614,10 +617,10 @@ void xhci_setup_streams_ep_input_ctx(struct xhci_hcd *xhci,
614 max_primary_streams = fls(stream_info->num_stream_ctxs) - 2; 617 max_primary_streams = fls(stream_info->num_stream_ctxs) - 2;
615 xhci_dbg(xhci, "Setting number of stream ctx array entries to %u\n", 618 xhci_dbg(xhci, "Setting number of stream ctx array entries to %u\n",
616 1 << (max_primary_streams + 1)); 619 1 << (max_primary_streams + 1));
617 ep_ctx->ep_info &= ~EP_MAXPSTREAMS_MASK; 620 ep_ctx->ep_info &= cpu_to_le32(~EP_MAXPSTREAMS_MASK);
618 ep_ctx->ep_info |= EP_MAXPSTREAMS(max_primary_streams); 621 ep_ctx->ep_info |= cpu_to_le32(EP_MAXPSTREAMS(max_primary_streams)
619 ep_ctx->ep_info |= EP_HAS_LSA; 622 | EP_HAS_LSA);
620 ep_ctx->deq = stream_info->ctx_array_dma; 623 ep_ctx->deq = cpu_to_le64(stream_info->ctx_array_dma);
621} 624}
622 625
623/* 626/*
@@ -630,10 +633,9 @@ void xhci_setup_no_streams_ep_input_ctx(struct xhci_hcd *xhci,
630 struct xhci_virt_ep *ep) 633 struct xhci_virt_ep *ep)
631{ 634{
632 dma_addr_t addr; 635 dma_addr_t addr;
633 ep_ctx->ep_info &= ~EP_MAXPSTREAMS_MASK; 636 ep_ctx->ep_info &= cpu_to_le32(~(EP_MAXPSTREAMS_MASK | EP_HAS_LSA));
634 ep_ctx->ep_info &= ~EP_HAS_LSA;
635 addr = xhci_trb_virt_to_dma(ep->ring->deq_seg, ep->ring->dequeue); 637 addr = xhci_trb_virt_to_dma(ep->ring->deq_seg, ep->ring->dequeue);
636 ep_ctx->deq = addr | ep->ring->cycle_state; 638 ep_ctx->deq = cpu_to_le64(addr | ep->ring->cycle_state);
637} 639}
638 640
639/* Frees all stream contexts associated with the endpoint, 641/* Frees all stream contexts associated with the endpoint,
@@ -781,11 +783,11 @@ int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id,
781 dev->udev = udev; 783 dev->udev = udev;
782 784
783 /* Point to output device context in dcbaa. */ 785 /* Point to output device context in dcbaa. */
784 xhci->dcbaa->dev_context_ptrs[slot_id] = dev->out_ctx->dma; 786 xhci->dcbaa->dev_context_ptrs[slot_id] = cpu_to_le64(dev->out_ctx->dma);
785 xhci_dbg(xhci, "Set slot id %d dcbaa entry %p to 0x%llx\n", 787 xhci_dbg(xhci, "Set slot id %d dcbaa entry %p to 0x%llx\n",
786 slot_id, 788 slot_id,
787 &xhci->dcbaa->dev_context_ptrs[slot_id], 789 &xhci->dcbaa->dev_context_ptrs[slot_id],
788 (unsigned long long) xhci->dcbaa->dev_context_ptrs[slot_id]); 790 (unsigned long long) le64_to_cpu(xhci->dcbaa->dev_context_ptrs[slot_id]));
789 791
790 return 1; 792 return 1;
791fail: 793fail:
@@ -810,8 +812,9 @@ void xhci_copy_ep0_dequeue_into_input_ctx(struct xhci_hcd *xhci,
810 * configured device has reset, so all control transfers should have 812 * configured device has reset, so all control transfers should have
811 * been completed or cancelled before the reset. 813 * been completed or cancelled before the reset.
812 */ 814 */
813 ep0_ctx->deq = xhci_trb_virt_to_dma(ep_ring->enq_seg, ep_ring->enqueue); 815 ep0_ctx->deq = cpu_to_le64(xhci_trb_virt_to_dma(ep_ring->enq_seg,
814 ep0_ctx->deq |= ep_ring->cycle_state; 816 ep_ring->enqueue)
817 | ep_ring->cycle_state);
815} 818}
816 819
817/* 820/*
@@ -885,24 +888,22 @@ int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *ud
885 slot_ctx = xhci_get_slot_ctx(xhci, dev->in_ctx); 888 slot_ctx = xhci_get_slot_ctx(xhci, dev->in_ctx);
886 889
887 /* 2) New slot context and endpoint 0 context are valid*/ 890 /* 2) New slot context and endpoint 0 context are valid*/
888 ctrl_ctx->add_flags = SLOT_FLAG | EP0_FLAG; 891 ctrl_ctx->add_flags = cpu_to_le32(SLOT_FLAG | EP0_FLAG);
889 892
890 /* 3) Only the control endpoint is valid - one endpoint context */ 893 /* 3) Only the control endpoint is valid - one endpoint context */
891 slot_ctx->dev_info |= LAST_CTX(1); 894 slot_ctx->dev_info |= cpu_to_le32(LAST_CTX(1) | (u32) udev->route);
892
893 slot_ctx->dev_info |= (u32) udev->route;
894 switch (udev->speed) { 895 switch (udev->speed) {
895 case USB_SPEED_SUPER: 896 case USB_SPEED_SUPER:
896 slot_ctx->dev_info |= (u32) SLOT_SPEED_SS; 897 slot_ctx->dev_info |= cpu_to_le32((u32) SLOT_SPEED_SS);
897 break; 898 break;
898 case USB_SPEED_HIGH: 899 case USB_SPEED_HIGH:
899 slot_ctx->dev_info |= (u32) SLOT_SPEED_HS; 900 slot_ctx->dev_info |= cpu_to_le32((u32) SLOT_SPEED_HS);
900 break; 901 break;
901 case USB_SPEED_FULL: 902 case USB_SPEED_FULL:
902 slot_ctx->dev_info |= (u32) SLOT_SPEED_FS; 903 slot_ctx->dev_info |= cpu_to_le32((u32) SLOT_SPEED_FS);
903 break; 904 break;
904 case USB_SPEED_LOW: 905 case USB_SPEED_LOW:
905 slot_ctx->dev_info |= (u32) SLOT_SPEED_LS; 906 slot_ctx->dev_info |= cpu_to_le32((u32) SLOT_SPEED_LS);
906 break; 907 break;
907 case USB_SPEED_WIRELESS: 908 case USB_SPEED_WIRELESS:
908 xhci_dbg(xhci, "FIXME xHCI doesn't support wireless speeds\n"); 909 xhci_dbg(xhci, "FIXME xHCI doesn't support wireless speeds\n");
@@ -916,7 +917,7 @@ int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *ud
916 port_num = xhci_find_real_port_number(xhci, udev); 917 port_num = xhci_find_real_port_number(xhci, udev);
917 if (!port_num) 918 if (!port_num)
918 return -EINVAL; 919 return -EINVAL;
919 slot_ctx->dev_info2 |= (u32) ROOT_HUB_PORT(port_num); 920 slot_ctx->dev_info2 |= cpu_to_le32((u32) ROOT_HUB_PORT(port_num));
920 /* Set the port number in the virtual_device to the faked port number */ 921 /* Set the port number in the virtual_device to the faked port number */
921 for (top_dev = udev; top_dev->parent && top_dev->parent->parent; 922 for (top_dev = udev; top_dev->parent && top_dev->parent->parent;
922 top_dev = top_dev->parent) 923 top_dev = top_dev->parent)
@@ -927,31 +928,31 @@ int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *ud
927 928
928 /* Is this a LS/FS device under an external HS hub? */ 929 /* Is this a LS/FS device under an external HS hub? */
929 if (udev->tt && udev->tt->hub->parent) { 930 if (udev->tt && udev->tt->hub->parent) {
930 slot_ctx->tt_info = udev->tt->hub->slot_id; 931 slot_ctx->tt_info = cpu_to_le32(udev->tt->hub->slot_id |
931 slot_ctx->tt_info |= udev->ttport << 8; 932 (udev->ttport << 8));
932 if (udev->tt->multi) 933 if (udev->tt->multi)
933 slot_ctx->dev_info |= DEV_MTT; 934 slot_ctx->dev_info |= cpu_to_le32(DEV_MTT);
934 } 935 }
935 xhci_dbg(xhci, "udev->tt = %p\n", udev->tt); 936 xhci_dbg(xhci, "udev->tt = %p\n", udev->tt);
936 xhci_dbg(xhci, "udev->ttport = 0x%x\n", udev->ttport); 937 xhci_dbg(xhci, "udev->ttport = 0x%x\n", udev->ttport);
937 938
938 /* Step 4 - ring already allocated */ 939 /* Step 4 - ring already allocated */
939 /* Step 5 */ 940 /* Step 5 */
940 ep0_ctx->ep_info2 = EP_TYPE(CTRL_EP); 941 ep0_ctx->ep_info2 = cpu_to_le32(EP_TYPE(CTRL_EP));
941 /* 942 /*
942 * XXX: Not sure about wireless USB devices. 943 * XXX: Not sure about wireless USB devices.
943 */ 944 */
944 switch (udev->speed) { 945 switch (udev->speed) {
945 case USB_SPEED_SUPER: 946 case USB_SPEED_SUPER:
946 ep0_ctx->ep_info2 |= MAX_PACKET(512); 947 ep0_ctx->ep_info2 |= cpu_to_le32(MAX_PACKET(512));
947 break; 948 break;
948 case USB_SPEED_HIGH: 949 case USB_SPEED_HIGH:
949 /* USB core guesses at a 64-byte max packet first for FS devices */ 950 /* USB core guesses at a 64-byte max packet first for FS devices */
950 case USB_SPEED_FULL: 951 case USB_SPEED_FULL:
951 ep0_ctx->ep_info2 |= MAX_PACKET(64); 952 ep0_ctx->ep_info2 |= cpu_to_le32(MAX_PACKET(64));
952 break; 953 break;
953 case USB_SPEED_LOW: 954 case USB_SPEED_LOW:
954 ep0_ctx->ep_info2 |= MAX_PACKET(8); 955 ep0_ctx->ep_info2 |= cpu_to_le32(MAX_PACKET(8));
955 break; 956 break;
956 case USB_SPEED_WIRELESS: 957 case USB_SPEED_WIRELESS:
957 xhci_dbg(xhci, "FIXME xHCI doesn't support wireless speeds\n"); 958 xhci_dbg(xhci, "FIXME xHCI doesn't support wireless speeds\n");
@@ -962,12 +963,10 @@ int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *ud
962 BUG(); 963 BUG();
963 } 964 }
964 /* EP 0 can handle "burst" sizes of 1, so Max Burst Size field is 0 */ 965 /* EP 0 can handle "burst" sizes of 1, so Max Burst Size field is 0 */
965 ep0_ctx->ep_info2 |= MAX_BURST(0); 966 ep0_ctx->ep_info2 |= cpu_to_le32(MAX_BURST(0) | ERROR_COUNT(3));
966 ep0_ctx->ep_info2 |= ERROR_COUNT(3);
967 967
968 ep0_ctx->deq = 968 ep0_ctx->deq = cpu_to_le64(dev->eps[0].ring->first_seg->dma |
969 dev->eps[0].ring->first_seg->dma; 969 dev->eps[0].ring->cycle_state);
970 ep0_ctx->deq |= dev->eps[0].ring->cycle_state;
971 970
972 /* Steps 7 and 8 were done in xhci_alloc_virt_device() */ 971 /* Steps 7 and 8 were done in xhci_alloc_virt_device() */
973 972
@@ -1131,10 +1130,10 @@ static u32 xhci_get_max_esit_payload(struct xhci_hcd *xhci,
1131 return 0; 1130 return 0;
1132 1131
1133 if (udev->speed == USB_SPEED_SUPER) 1132 if (udev->speed == USB_SPEED_SUPER)
1134 return ep->ss_ep_comp.wBytesPerInterval; 1133 return le16_to_cpu(ep->ss_ep_comp.wBytesPerInterval);
1135 1134
1136 max_packet = GET_MAX_PACKET(ep->desc.wMaxPacketSize); 1135 max_packet = GET_MAX_PACKET(le16_to_cpu(ep->desc.wMaxPacketSize));
1137 max_burst = (ep->desc.wMaxPacketSize & 0x1800) >> 11; 1136 max_burst = (le16_to_cpu(ep->desc.wMaxPacketSize) & 0x1800) >> 11;
1138 /* A 0 in max burst means 1 transfer per ESIT */ 1137 /* A 0 in max burst means 1 transfer per ESIT */
1139 return max_packet * (max_burst + 1); 1138 return max_packet * (max_burst + 1);
1140} 1139}
@@ -1183,10 +1182,10 @@ int xhci_endpoint_init(struct xhci_hcd *xhci,
1183 } 1182 }
1184 virt_dev->eps[ep_index].skip = false; 1183 virt_dev->eps[ep_index].skip = false;
1185 ep_ring = virt_dev->eps[ep_index].new_ring; 1184 ep_ring = virt_dev->eps[ep_index].new_ring;
1186 ep_ctx->deq = ep_ring->first_seg->dma | ep_ring->cycle_state; 1185 ep_ctx->deq = cpu_to_le64(ep_ring->first_seg->dma | ep_ring->cycle_state);
1187 1186
1188 ep_ctx->ep_info = xhci_get_endpoint_interval(udev, ep); 1187 ep_ctx->ep_info = cpu_to_le32(xhci_get_endpoint_interval(udev, ep)
1189 ep_ctx->ep_info |= EP_MULT(xhci_get_endpoint_mult(udev, ep)); 1188 | EP_MULT(xhci_get_endpoint_mult(udev, ep)));
1190 1189
1191 /* FIXME dig Mult and streams info out of ep companion desc */ 1190 /* FIXME dig Mult and streams info out of ep companion desc */
1192 1191
@@ -1194,22 +1193,22 @@ int xhci_endpoint_init(struct xhci_hcd *xhci,
1194 * error count = 0 means infinite retries. 1193 * error count = 0 means infinite retries.
1195 */ 1194 */
1196 if (!usb_endpoint_xfer_isoc(&ep->desc)) 1195 if (!usb_endpoint_xfer_isoc(&ep->desc))
1197 ep_ctx->ep_info2 = ERROR_COUNT(3); 1196 ep_ctx->ep_info2 = cpu_to_le32(ERROR_COUNT(3));
1198 else 1197 else
1199 ep_ctx->ep_info2 = ERROR_COUNT(1); 1198 ep_ctx->ep_info2 = cpu_to_le32(ERROR_COUNT(1));
1200 1199
1201 ep_ctx->ep_info2 |= xhci_get_endpoint_type(udev, ep); 1200 ep_ctx->ep_info2 |= cpu_to_le32(xhci_get_endpoint_type(udev, ep));
1202 1201
1203 /* Set the max packet size and max burst */ 1202 /* Set the max packet size and max burst */
1204 switch (udev->speed) { 1203 switch (udev->speed) {
1205 case USB_SPEED_SUPER: 1204 case USB_SPEED_SUPER:
1206 max_packet = ep->desc.wMaxPacketSize; 1205 max_packet = le16_to_cpu(ep->desc.wMaxPacketSize);
1207 ep_ctx->ep_info2 |= MAX_PACKET(max_packet); 1206 ep_ctx->ep_info2 |= cpu_to_le32(MAX_PACKET(max_packet));
1208 /* dig out max burst from ep companion desc */ 1207 /* dig out max burst from ep companion desc */
1209 max_packet = ep->ss_ep_comp.bMaxBurst; 1208 max_packet = ep->ss_ep_comp.bMaxBurst;
1210 if (!max_packet) 1209 if (!max_packet)
1211 xhci_warn(xhci, "WARN no SS endpoint bMaxBurst\n"); 1210 xhci_warn(xhci, "WARN no SS endpoint bMaxBurst\n");
1212 ep_ctx->ep_info2 |= MAX_BURST(max_packet); 1211 ep_ctx->ep_info2 |= cpu_to_le32(MAX_BURST(max_packet));
1213 break; 1212 break;
1214 case USB_SPEED_HIGH: 1213 case USB_SPEED_HIGH:
1215 /* bits 11:12 specify the number of additional transaction 1214 /* bits 11:12 specify the number of additional transaction
@@ -1217,20 +1216,21 @@ int xhci_endpoint_init(struct xhci_hcd *xhci,
1217 */ 1216 */
1218 if (usb_endpoint_xfer_isoc(&ep->desc) || 1217 if (usb_endpoint_xfer_isoc(&ep->desc) ||
1219 usb_endpoint_xfer_int(&ep->desc)) { 1218 usb_endpoint_xfer_int(&ep->desc)) {
1220 max_burst = (ep->desc.wMaxPacketSize & 0x1800) >> 11; 1219 max_burst = (le16_to_cpu(ep->desc.wMaxPacketSize)
1221 ep_ctx->ep_info2 |= MAX_BURST(max_burst); 1220 & 0x1800) >> 11;
1221 ep_ctx->ep_info2 |= cpu_to_le32(MAX_BURST(max_burst));
1222 } 1222 }
1223 /* Fall through */ 1223 /* Fall through */
1224 case USB_SPEED_FULL: 1224 case USB_SPEED_FULL:
1225 case USB_SPEED_LOW: 1225 case USB_SPEED_LOW:
1226 max_packet = GET_MAX_PACKET(ep->desc.wMaxPacketSize); 1226 max_packet = GET_MAX_PACKET(le16_to_cpu(ep->desc.wMaxPacketSize));
1227 ep_ctx->ep_info2 |= MAX_PACKET(max_packet); 1227 ep_ctx->ep_info2 |= cpu_to_le32(MAX_PACKET(max_packet));
1228 break; 1228 break;
1229 default: 1229 default:
1230 BUG(); 1230 BUG();
1231 } 1231 }
1232 max_esit_payload = xhci_get_max_esit_payload(xhci, udev, ep); 1232 max_esit_payload = xhci_get_max_esit_payload(xhci, udev, ep);
1233 ep_ctx->tx_info = MAX_ESIT_PAYLOAD_FOR_EP(max_esit_payload); 1233 ep_ctx->tx_info = cpu_to_le32(MAX_ESIT_PAYLOAD_FOR_EP(max_esit_payload));
1234 1234
1235 /* 1235 /*
1236 * XXX no idea how to calculate the average TRB buffer length for bulk 1236 * XXX no idea how to calculate the average TRB buffer length for bulk
@@ -1247,7 +1247,7 @@ int xhci_endpoint_init(struct xhci_hcd *xhci,
1247 * use Event Data TRBs, and we don't chain in a link TRB on short 1247 * use Event Data TRBs, and we don't chain in a link TRB on short
1248 * transfers, we're basically dividing by 1. 1248 * transfers, we're basically dividing by 1.
1249 */ 1249 */
1250 ep_ctx->tx_info |= AVG_TRB_LENGTH_FOR_EP(max_esit_payload); 1250 ep_ctx->tx_info |= cpu_to_le32(AVG_TRB_LENGTH_FOR_EP(max_esit_payload));
1251 1251
1252 /* FIXME Debug endpoint context */ 1252 /* FIXME Debug endpoint context */
1253 return 0; 1253 return 0;
@@ -1347,7 +1347,7 @@ static int scratchpad_alloc(struct xhci_hcd *xhci, gfp_t flags)
1347 if (!xhci->scratchpad->sp_dma_buffers) 1347 if (!xhci->scratchpad->sp_dma_buffers)
1348 goto fail_sp4; 1348 goto fail_sp4;
1349 1349
1350 xhci->dcbaa->dev_context_ptrs[0] = xhci->scratchpad->sp_dma; 1350 xhci->dcbaa->dev_context_ptrs[0] = cpu_to_le64(xhci->scratchpad->sp_dma);
1351 for (i = 0; i < num_sp; i++) { 1351 for (i = 0; i < num_sp; i++) {
1352 dma_addr_t dma; 1352 dma_addr_t dma;
1353 void *buf = pci_alloc_consistent(to_pci_dev(dev), 1353 void *buf = pci_alloc_consistent(to_pci_dev(dev),
@@ -1724,7 +1724,7 @@ static void xhci_set_hc_event_deq(struct xhci_hcd *xhci)
1724} 1724}
1725 1725
1726static void xhci_add_in_port(struct xhci_hcd *xhci, unsigned int num_ports, 1726static void xhci_add_in_port(struct xhci_hcd *xhci, unsigned int num_ports,
1727 u32 __iomem *addr, u8 major_revision) 1727 __le32 __iomem *addr, u8 major_revision)
1728{ 1728{
1729 u32 temp, port_offset, port_count; 1729 u32 temp, port_offset, port_count;
1730 int i; 1730 int i;
@@ -1789,7 +1789,7 @@ static void xhci_add_in_port(struct xhci_hcd *xhci, unsigned int num_ports,
1789 */ 1789 */
1790static int xhci_setup_port_arrays(struct xhci_hcd *xhci, gfp_t flags) 1790static int xhci_setup_port_arrays(struct xhci_hcd *xhci, gfp_t flags)
1791{ 1791{
1792 u32 __iomem *addr; 1792 __le32 __iomem *addr;
1793 u32 offset; 1793 u32 offset;
1794 unsigned int num_ports; 1794 unsigned int num_ports;
1795 int i, port_index; 1795 int i, port_index;
@@ -2042,8 +2042,8 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
2042 /* set ring base address and size for each segment table entry */ 2042 /* set ring base address and size for each segment table entry */
2043 for (val = 0, seg = xhci->event_ring->first_seg; val < ERST_NUM_SEGS; val++) { 2043 for (val = 0, seg = xhci->event_ring->first_seg; val < ERST_NUM_SEGS; val++) {
2044 struct xhci_erst_entry *entry = &xhci->erst.entries[val]; 2044 struct xhci_erst_entry *entry = &xhci->erst.entries[val];
2045 entry->seg_addr = seg->dma; 2045 entry->seg_addr = cpu_to_le64(seg->dma);
2046 entry->seg_size = TRBS_PER_SEGMENT; 2046 entry->seg_size = cpu_to_le32(TRBS_PER_SEGMENT);
2047 entry->rsvd = 0; 2047 entry->rsvd = 0;
2048 seg = seg->next; 2048 seg = seg->next;
2049 } 2049 }
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index 7437386a9a50..396f8d2a2e8d 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -100,7 +100,7 @@ static bool last_trb_on_last_seg(struct xhci_hcd *xhci, struct xhci_ring *ring,
100 return (trb == &seg->trbs[TRBS_PER_SEGMENT]) && 100 return (trb == &seg->trbs[TRBS_PER_SEGMENT]) &&
101 (seg->next == xhci->event_ring->first_seg); 101 (seg->next == xhci->event_ring->first_seg);
102 else 102 else
103 return trb->link.control & LINK_TOGGLE; 103 return le32_to_cpu(trb->link.control) & LINK_TOGGLE;
104} 104}
105 105
106/* Is this TRB a link TRB or was the last TRB the last TRB in this event ring 106/* Is this TRB a link TRB or was the last TRB the last TRB in this event ring
@@ -113,13 +113,15 @@ static int last_trb(struct xhci_hcd *xhci, struct xhci_ring *ring,
113 if (ring == xhci->event_ring) 113 if (ring == xhci->event_ring)
114 return trb == &seg->trbs[TRBS_PER_SEGMENT]; 114 return trb == &seg->trbs[TRBS_PER_SEGMENT];
115 else 115 else
116 return (trb->link.control & TRB_TYPE_BITMASK) == TRB_TYPE(TRB_LINK); 116 return (le32_to_cpu(trb->link.control) & TRB_TYPE_BITMASK)
117 == TRB_TYPE(TRB_LINK);
117} 118}
118 119
119static int enqueue_is_link_trb(struct xhci_ring *ring) 120static int enqueue_is_link_trb(struct xhci_ring *ring)
120{ 121{
121 struct xhci_link_trb *link = &ring->enqueue->link; 122 struct xhci_link_trb *link = &ring->enqueue->link;
122 return ((link->control & TRB_TYPE_BITMASK) == TRB_TYPE(TRB_LINK)); 123 return ((le32_to_cpu(link->control) & TRB_TYPE_BITMASK) ==
124 TRB_TYPE(TRB_LINK));
123} 125}
124 126
125/* Updates trb to point to the next TRB in the ring, and updates seg if the next 127/* Updates trb to point to the next TRB in the ring, and updates seg if the next
@@ -197,7 +199,7 @@ static void inc_enq(struct xhci_hcd *xhci, struct xhci_ring *ring,
197 union xhci_trb *next; 199 union xhci_trb *next;
198 unsigned long long addr; 200 unsigned long long addr;
199 201
200 chain = ring->enqueue->generic.field[3] & TRB_CHAIN; 202 chain = le32_to_cpu(ring->enqueue->generic.field[3]) & TRB_CHAIN;
201 next = ++(ring->enqueue); 203 next = ++(ring->enqueue);
202 204
203 ring->enq_updates++; 205 ring->enq_updates++;
@@ -223,12 +225,14 @@ static void inc_enq(struct xhci_hcd *xhci, struct xhci_ring *ring,
223 * (which may mean the chain bit is cleared). 225 * (which may mean the chain bit is cleared).
224 */ 226 */
225 if (!xhci_link_trb_quirk(xhci)) { 227 if (!xhci_link_trb_quirk(xhci)) {
226 next->link.control &= ~TRB_CHAIN; 228 next->link.control &=
227 next->link.control |= chain; 229 cpu_to_le32(~TRB_CHAIN);
230 next->link.control |=
231 cpu_to_le32(chain);
228 } 232 }
229 /* Give this link TRB to the hardware */ 233 /* Give this link TRB to the hardware */
230 wmb(); 234 wmb();
231 next->link.control ^= TRB_CYCLE; 235 next->link.control ^= cpu_to_le32(TRB_CYCLE);
232 } 236 }
233 /* Toggle the cycle bit after the last ring segment. */ 237 /* Toggle the cycle bit after the last ring segment. */
234 if (last_trb_on_last_seg(xhci, ring, ring->enq_seg, next)) { 238 if (last_trb_on_last_seg(xhci, ring, ring->enq_seg, next)) {
@@ -319,7 +323,7 @@ void xhci_ring_ep_doorbell(struct xhci_hcd *xhci,
319 unsigned int ep_index, 323 unsigned int ep_index,
320 unsigned int stream_id) 324 unsigned int stream_id)
321{ 325{
322 __u32 __iomem *db_addr = &xhci->dba->doorbell[slot_id]; 326 __le32 __iomem *db_addr = &xhci->dba->doorbell[slot_id];
323 struct xhci_virt_ep *ep = &xhci->devs[slot_id]->eps[ep_index]; 327 struct xhci_virt_ep *ep = &xhci->devs[slot_id]->eps[ep_index];
324 unsigned int ep_state = ep->ep_state; 328 unsigned int ep_state = ep->ep_state;
325 329
@@ -380,7 +384,7 @@ static struct xhci_segment *find_trb_seg(
380 while (cur_seg->trbs > trb || 384 while (cur_seg->trbs > trb ||
381 &cur_seg->trbs[TRBS_PER_SEGMENT - 1] < trb) { 385 &cur_seg->trbs[TRBS_PER_SEGMENT - 1] < trb) {
382 generic_trb = &cur_seg->trbs[TRBS_PER_SEGMENT - 1].generic; 386 generic_trb = &cur_seg->trbs[TRBS_PER_SEGMENT - 1].generic;
383 if (generic_trb->field[3] & LINK_TOGGLE) 387 if (le32_to_cpu(generic_trb->field[3]) & LINK_TOGGLE)
384 *cycle_state ^= 0x1; 388 *cycle_state ^= 0x1;
385 cur_seg = cur_seg->next; 389 cur_seg = cur_seg->next;
386 if (cur_seg == start_seg) 390 if (cur_seg == start_seg)
@@ -447,6 +451,10 @@ static struct xhci_ring *xhci_urb_to_transfer_ring(struct xhci_hcd *xhci,
447 * any link TRBs with the toggle cycle bit set. 451 * any link TRBs with the toggle cycle bit set.
448 * - Finally we move the dequeue state one TRB further, toggling the cycle bit 452 * - Finally we move the dequeue state one TRB further, toggling the cycle bit
449 * if we've moved it past a link TRB with the toggle cycle bit set. 453 * if we've moved it past a link TRB with the toggle cycle bit set.
454 *
455 * Some of the uses of xhci_generic_trb are grotty, but if they're done
456 * with correct __le32 accesses they should work fine. Only users of this are
457 * in here.
450 */ 458 */
451void xhci_find_new_dequeue_state(struct xhci_hcd *xhci, 459void xhci_find_new_dequeue_state(struct xhci_hcd *xhci,
452 unsigned int slot_id, unsigned int ep_index, 460 unsigned int slot_id, unsigned int ep_index,
@@ -480,7 +488,7 @@ void xhci_find_new_dequeue_state(struct xhci_hcd *xhci,
480 /* Dig out the cycle state saved by the xHC during the stop ep cmd */ 488 /* Dig out the cycle state saved by the xHC during the stop ep cmd */
481 xhci_dbg(xhci, "Finding endpoint context\n"); 489 xhci_dbg(xhci, "Finding endpoint context\n");
482 ep_ctx = xhci_get_ep_ctx(xhci, dev->out_ctx, ep_index); 490 ep_ctx = xhci_get_ep_ctx(xhci, dev->out_ctx, ep_index);
483 state->new_cycle_state = 0x1 & ep_ctx->deq; 491 state->new_cycle_state = 0x1 & le64_to_cpu(ep_ctx->deq);
484 492
485 state->new_deq_ptr = cur_td->last_trb; 493 state->new_deq_ptr = cur_td->last_trb;
486 xhci_dbg(xhci, "Finding segment containing last TRB in TD.\n"); 494 xhci_dbg(xhci, "Finding segment containing last TRB in TD.\n");
@@ -493,8 +501,8 @@ void xhci_find_new_dequeue_state(struct xhci_hcd *xhci,
493 } 501 }
494 502
495 trb = &state->new_deq_ptr->generic; 503 trb = &state->new_deq_ptr->generic;
496 if ((trb->field[3] & TRB_TYPE_BITMASK) == TRB_TYPE(TRB_LINK) && 504 if ((le32_to_cpu(trb->field[3]) & TRB_TYPE_BITMASK) ==
497 (trb->field[3] & LINK_TOGGLE)) 505 TRB_TYPE(TRB_LINK) && (le32_to_cpu(trb->field[3]) & LINK_TOGGLE))
498 state->new_cycle_state ^= 0x1; 506 state->new_cycle_state ^= 0x1;
499 next_trb(xhci, ep_ring, &state->new_deq_seg, &state->new_deq_ptr); 507 next_trb(xhci, ep_ring, &state->new_deq_seg, &state->new_deq_ptr);
500 508
@@ -529,12 +537,12 @@ static void td_to_noop(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
529 for (cur_seg = cur_td->start_seg, cur_trb = cur_td->first_trb; 537 for (cur_seg = cur_td->start_seg, cur_trb = cur_td->first_trb;
530 true; 538 true;
531 next_trb(xhci, ep_ring, &cur_seg, &cur_trb)) { 539 next_trb(xhci, ep_ring, &cur_seg, &cur_trb)) {
532 if ((cur_trb->generic.field[3] & TRB_TYPE_BITMASK) == 540 if ((le32_to_cpu(cur_trb->generic.field[3]) & TRB_TYPE_BITMASK)
533 TRB_TYPE(TRB_LINK)) { 541 == TRB_TYPE(TRB_LINK)) {
534 /* Unchain any chained Link TRBs, but 542 /* Unchain any chained Link TRBs, but
535 * leave the pointers intact. 543 * leave the pointers intact.
536 */ 544 */
537 cur_trb->generic.field[3] &= ~TRB_CHAIN; 545 cur_trb->generic.field[3] &= cpu_to_le32(~TRB_CHAIN);
538 xhci_dbg(xhci, "Cancel (unchain) link TRB\n"); 546 xhci_dbg(xhci, "Cancel (unchain) link TRB\n");
539 xhci_dbg(xhci, "Address = %p (0x%llx dma); " 547 xhci_dbg(xhci, "Address = %p (0x%llx dma); "
540 "in seg %p (0x%llx dma)\n", 548 "in seg %p (0x%llx dma)\n",
@@ -547,8 +555,9 @@ static void td_to_noop(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
547 cur_trb->generic.field[1] = 0; 555 cur_trb->generic.field[1] = 0;
548 cur_trb->generic.field[2] = 0; 556 cur_trb->generic.field[2] = 0;
549 /* Preserve only the cycle bit of this TRB */ 557 /* Preserve only the cycle bit of this TRB */
550 cur_trb->generic.field[3] &= TRB_CYCLE; 558 cur_trb->generic.field[3] &= cpu_to_le32(TRB_CYCLE);
551 cur_trb->generic.field[3] |= TRB_TYPE(TRB_TR_NOOP); 559 cur_trb->generic.field[3] |= cpu_to_le32(
560 TRB_TYPE(TRB_TR_NOOP));
552 xhci_dbg(xhci, "Cancel TRB %p (0x%llx dma) " 561 xhci_dbg(xhci, "Cancel TRB %p (0x%llx dma) "
553 "in seg %p (0x%llx dma)\n", 562 "in seg %p (0x%llx dma)\n",
554 cur_trb, 563 cur_trb,
@@ -662,9 +671,9 @@ static void handle_stopped_endpoint(struct xhci_hcd *xhci,
662 struct xhci_dequeue_state deq_state; 671 struct xhci_dequeue_state deq_state;
663 672
664 if (unlikely(TRB_TO_SUSPEND_PORT( 673 if (unlikely(TRB_TO_SUSPEND_PORT(
665 xhci->cmd_ring->dequeue->generic.field[3]))) { 674 le32_to_cpu(xhci->cmd_ring->dequeue->generic.field[3])))) {
666 slot_id = TRB_TO_SLOT_ID( 675 slot_id = TRB_TO_SLOT_ID(
667 xhci->cmd_ring->dequeue->generic.field[3]); 676 le32_to_cpu(xhci->cmd_ring->dequeue->generic.field[3]));
668 virt_dev = xhci->devs[slot_id]; 677 virt_dev = xhci->devs[slot_id];
669 if (virt_dev) 678 if (virt_dev)
670 handle_cmd_in_cmd_wait_list(xhci, virt_dev, 679 handle_cmd_in_cmd_wait_list(xhci, virt_dev,
@@ -677,8 +686,8 @@ static void handle_stopped_endpoint(struct xhci_hcd *xhci,
677 } 686 }
678 687
679 memset(&deq_state, 0, sizeof(deq_state)); 688 memset(&deq_state, 0, sizeof(deq_state));
680 slot_id = TRB_TO_SLOT_ID(trb->generic.field[3]); 689 slot_id = TRB_TO_SLOT_ID(le32_to_cpu(trb->generic.field[3]));
681 ep_index = TRB_TO_EP_INDEX(trb->generic.field[3]); 690 ep_index = TRB_TO_EP_INDEX(le32_to_cpu(trb->generic.field[3]));
682 ep = &xhci->devs[slot_id]->eps[ep_index]; 691 ep = &xhci->devs[slot_id]->eps[ep_index];
683 692
684 if (list_empty(&ep->cancelled_td_list)) { 693 if (list_empty(&ep->cancelled_td_list)) {
@@ -910,9 +919,9 @@ static void handle_set_deq_completion(struct xhci_hcd *xhci,
910 struct xhci_ep_ctx *ep_ctx; 919 struct xhci_ep_ctx *ep_ctx;
911 struct xhci_slot_ctx *slot_ctx; 920 struct xhci_slot_ctx *slot_ctx;
912 921
913 slot_id = TRB_TO_SLOT_ID(trb->generic.field[3]); 922 slot_id = TRB_TO_SLOT_ID(le32_to_cpu(trb->generic.field[3]));
914 ep_index = TRB_TO_EP_INDEX(trb->generic.field[3]); 923 ep_index = TRB_TO_EP_INDEX(le32_to_cpu(trb->generic.field[3]));
915 stream_id = TRB_TO_STREAM_ID(trb->generic.field[2]); 924 stream_id = TRB_TO_STREAM_ID(le32_to_cpu(trb->generic.field[2]));
916 dev = xhci->devs[slot_id]; 925 dev = xhci->devs[slot_id];
917 926
918 ep_ring = xhci_stream_id_to_ring(dev, ep_index, stream_id); 927 ep_ring = xhci_stream_id_to_ring(dev, ep_index, stream_id);
@@ -928,11 +937,11 @@ static void handle_set_deq_completion(struct xhci_hcd *xhci,
928 ep_ctx = xhci_get_ep_ctx(xhci, dev->out_ctx, ep_index); 937 ep_ctx = xhci_get_ep_ctx(xhci, dev->out_ctx, ep_index);
929 slot_ctx = xhci_get_slot_ctx(xhci, dev->out_ctx); 938 slot_ctx = xhci_get_slot_ctx(xhci, dev->out_ctx);
930 939
931 if (GET_COMP_CODE(event->status) != COMP_SUCCESS) { 940 if (GET_COMP_CODE(le32_to_cpu(event->status)) != COMP_SUCCESS) {
932 unsigned int ep_state; 941 unsigned int ep_state;
933 unsigned int slot_state; 942 unsigned int slot_state;
934 943
935 switch (GET_COMP_CODE(event->status)) { 944 switch (GET_COMP_CODE(le32_to_cpu(event->status))) {
936 case COMP_TRB_ERR: 945 case COMP_TRB_ERR:
937 xhci_warn(xhci, "WARN Set TR Deq Ptr cmd invalid because " 946 xhci_warn(xhci, "WARN Set TR Deq Ptr cmd invalid because "
938 "of stream ID configuration\n"); 947 "of stream ID configuration\n");
@@ -940,9 +949,9 @@ static void handle_set_deq_completion(struct xhci_hcd *xhci,
940 case COMP_CTX_STATE: 949 case COMP_CTX_STATE:
941 xhci_warn(xhci, "WARN Set TR Deq Ptr cmd failed due " 950 xhci_warn(xhci, "WARN Set TR Deq Ptr cmd failed due "
942 "to incorrect slot or ep state.\n"); 951 "to incorrect slot or ep state.\n");
943 ep_state = ep_ctx->ep_info; 952 ep_state = le32_to_cpu(ep_ctx->ep_info);
944 ep_state &= EP_STATE_MASK; 953 ep_state &= EP_STATE_MASK;
945 slot_state = slot_ctx->dev_state; 954 slot_state = le32_to_cpu(slot_ctx->dev_state);
946 slot_state = GET_SLOT_STATE(slot_state); 955 slot_state = GET_SLOT_STATE(slot_state);
947 xhci_dbg(xhci, "Slot state = %u, EP state = %u\n", 956 xhci_dbg(xhci, "Slot state = %u, EP state = %u\n",
948 slot_state, ep_state); 957 slot_state, ep_state);
@@ -954,7 +963,7 @@ static void handle_set_deq_completion(struct xhci_hcd *xhci,
954 default: 963 default:
955 xhci_warn(xhci, "WARN Set TR Deq Ptr cmd with unknown " 964 xhci_warn(xhci, "WARN Set TR Deq Ptr cmd with unknown "
956 "completion code of %u.\n", 965 "completion code of %u.\n",
957 GET_COMP_CODE(event->status)); 966 GET_COMP_CODE(le32_to_cpu(event->status)));
958 break; 967 break;
959 } 968 }
960 /* OK what do we do now? The endpoint state is hosed, and we 969 /* OK what do we do now? The endpoint state is hosed, and we
@@ -965,10 +974,10 @@ static void handle_set_deq_completion(struct xhci_hcd *xhci,
965 */ 974 */
966 } else { 975 } else {
967 xhci_dbg(xhci, "Successful Set TR Deq Ptr cmd, deq = @%08llx\n", 976 xhci_dbg(xhci, "Successful Set TR Deq Ptr cmd, deq = @%08llx\n",
968 ep_ctx->deq); 977 le64_to_cpu(ep_ctx->deq));
969 if (xhci_trb_virt_to_dma(dev->eps[ep_index].queued_deq_seg, 978 if (xhci_trb_virt_to_dma(dev->eps[ep_index].queued_deq_seg,
970 dev->eps[ep_index].queued_deq_ptr) == 979 dev->eps[ep_index].queued_deq_ptr) ==
971 (ep_ctx->deq & ~(EP_CTX_CYCLE_MASK))) { 980 (le64_to_cpu(ep_ctx->deq) & ~(EP_CTX_CYCLE_MASK))) {
972 /* Update the ring's dequeue segment and dequeue pointer 981 /* Update the ring's dequeue segment and dequeue pointer
973 * to reflect the new position. 982 * to reflect the new position.
974 */ 983 */
@@ -997,13 +1006,13 @@ static void handle_reset_ep_completion(struct xhci_hcd *xhci,
997 int slot_id; 1006 int slot_id;
998 unsigned int ep_index; 1007 unsigned int ep_index;
999 1008
1000 slot_id = TRB_TO_SLOT_ID(trb->generic.field[3]); 1009 slot_id = TRB_TO_SLOT_ID(le32_to_cpu(trb->generic.field[3]));
1001 ep_index = TRB_TO_EP_INDEX(trb->generic.field[3]); 1010 ep_index = TRB_TO_EP_INDEX(le32_to_cpu(trb->generic.field[3]));
1002 /* This command will only fail if the endpoint wasn't halted, 1011 /* This command will only fail if the endpoint wasn't halted,
1003 * but we don't care. 1012 * but we don't care.
1004 */ 1013 */
1005 xhci_dbg(xhci, "Ignoring reset ep completion code of %u\n", 1014 xhci_dbg(xhci, "Ignoring reset ep completion code of %u\n",
1006 (unsigned int) GET_COMP_CODE(event->status)); 1015 (unsigned int) GET_COMP_CODE(le32_to_cpu(event->status)));
1007 1016
1008 /* HW with the reset endpoint quirk needs to have a configure endpoint 1017 /* HW with the reset endpoint quirk needs to have a configure endpoint
1009 * command complete before the endpoint can be used. Queue that here 1018 * command complete before the endpoint can be used. Queue that here
@@ -1040,8 +1049,7 @@ static int handle_cmd_in_cmd_wait_list(struct xhci_hcd *xhci,
1040 if (xhci->cmd_ring->dequeue != command->command_trb) 1049 if (xhci->cmd_ring->dequeue != command->command_trb)
1041 return 0; 1050 return 0;
1042 1051
1043 command->status = 1052 command->status = GET_COMP_CODE(le32_to_cpu(event->status));
1044 GET_COMP_CODE(event->status);
1045 list_del(&command->cmd_list); 1053 list_del(&command->cmd_list);
1046 if (command->completion) 1054 if (command->completion)
1047 complete(command->completion); 1055 complete(command->completion);
@@ -1053,7 +1061,7 @@ static int handle_cmd_in_cmd_wait_list(struct xhci_hcd *xhci,
1053static void handle_cmd_completion(struct xhci_hcd *xhci, 1061static void handle_cmd_completion(struct xhci_hcd *xhci,
1054 struct xhci_event_cmd *event) 1062 struct xhci_event_cmd *event)
1055{ 1063{
1056 int slot_id = TRB_TO_SLOT_ID(event->flags); 1064 int slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags));
1057 u64 cmd_dma; 1065 u64 cmd_dma;
1058 dma_addr_t cmd_dequeue_dma; 1066 dma_addr_t cmd_dequeue_dma;
1059 struct xhci_input_control_ctx *ctrl_ctx; 1067 struct xhci_input_control_ctx *ctrl_ctx;
@@ -1062,7 +1070,7 @@ static void handle_cmd_completion(struct xhci_hcd *xhci,
1062 struct xhci_ring *ep_ring; 1070 struct xhci_ring *ep_ring;
1063 unsigned int ep_state; 1071 unsigned int ep_state;
1064 1072
1065 cmd_dma = event->cmd_trb; 1073 cmd_dma = le64_to_cpu(event->cmd_trb);
1066 cmd_dequeue_dma = xhci_trb_virt_to_dma(xhci->cmd_ring->deq_seg, 1074 cmd_dequeue_dma = xhci_trb_virt_to_dma(xhci->cmd_ring->deq_seg,
1067 xhci->cmd_ring->dequeue); 1075 xhci->cmd_ring->dequeue);
1068 /* Is the command ring deq ptr out of sync with the deq seg ptr? */ 1076 /* Is the command ring deq ptr out of sync with the deq seg ptr? */
@@ -1075,9 +1083,10 @@ static void handle_cmd_completion(struct xhci_hcd *xhci,
1075 xhci->error_bitmask |= 1 << 5; 1083 xhci->error_bitmask |= 1 << 5;
1076 return; 1084 return;
1077 } 1085 }
1078 switch (xhci->cmd_ring->dequeue->generic.field[3] & TRB_TYPE_BITMASK) { 1086 switch (le32_to_cpu(xhci->cmd_ring->dequeue->generic.field[3])
1087 & TRB_TYPE_BITMASK) {
1079 case TRB_TYPE(TRB_ENABLE_SLOT): 1088 case TRB_TYPE(TRB_ENABLE_SLOT):
1080 if (GET_COMP_CODE(event->status) == COMP_SUCCESS) 1089 if (GET_COMP_CODE(le32_to_cpu(event->status)) == COMP_SUCCESS)
1081 xhci->slot_id = slot_id; 1090 xhci->slot_id = slot_id;
1082 else 1091 else
1083 xhci->slot_id = 0; 1092 xhci->slot_id = 0;
@@ -1102,7 +1111,7 @@ static void handle_cmd_completion(struct xhci_hcd *xhci,
1102 ctrl_ctx = xhci_get_input_control_ctx(xhci, 1111 ctrl_ctx = xhci_get_input_control_ctx(xhci,
1103 virt_dev->in_ctx); 1112 virt_dev->in_ctx);
1104 /* Input ctx add_flags are the endpoint index plus one */ 1113 /* Input ctx add_flags are the endpoint index plus one */
1105 ep_index = xhci_last_valid_endpoint(ctrl_ctx->add_flags) - 1; 1114 ep_index = xhci_last_valid_endpoint(le32_to_cpu(ctrl_ctx->add_flags)) - 1;
1106 /* A usb_set_interface() call directly after clearing a halted 1115 /* A usb_set_interface() call directly after clearing a halted
1107 * condition may race on this quirky hardware. Not worth 1116 * condition may race on this quirky hardware. Not worth
1108 * worrying about, since this is prototype hardware. Not sure 1117 * worrying about, since this is prototype hardware. Not sure
@@ -1111,8 +1120,8 @@ static void handle_cmd_completion(struct xhci_hcd *xhci,
1111 */ 1120 */
1112 if (xhci->quirks & XHCI_RESET_EP_QUIRK && 1121 if (xhci->quirks & XHCI_RESET_EP_QUIRK &&
1113 ep_index != (unsigned int) -1 && 1122 ep_index != (unsigned int) -1 &&
1114 ctrl_ctx->add_flags - SLOT_FLAG == 1123 le32_to_cpu(ctrl_ctx->add_flags) - SLOT_FLAG ==
1115 ctrl_ctx->drop_flags) { 1124 le32_to_cpu(ctrl_ctx->drop_flags)) {
1116 ep_ring = xhci->devs[slot_id]->eps[ep_index].ring; 1125 ep_ring = xhci->devs[slot_id]->eps[ep_index].ring;
1117 ep_state = xhci->devs[slot_id]->eps[ep_index].ep_state; 1126 ep_state = xhci->devs[slot_id]->eps[ep_index].ep_state;
1118 if (!(ep_state & EP_HALTED)) 1127 if (!(ep_state & EP_HALTED))
@@ -1129,18 +1138,18 @@ static void handle_cmd_completion(struct xhci_hcd *xhci,
1129bandwidth_change: 1138bandwidth_change:
1130 xhci_dbg(xhci, "Completed config ep cmd\n"); 1139 xhci_dbg(xhci, "Completed config ep cmd\n");
1131 xhci->devs[slot_id]->cmd_status = 1140 xhci->devs[slot_id]->cmd_status =
1132 GET_COMP_CODE(event->status); 1141 GET_COMP_CODE(le32_to_cpu(event->status));
1133 complete(&xhci->devs[slot_id]->cmd_completion); 1142 complete(&xhci->devs[slot_id]->cmd_completion);
1134 break; 1143 break;
1135 case TRB_TYPE(TRB_EVAL_CONTEXT): 1144 case TRB_TYPE(TRB_EVAL_CONTEXT):
1136 virt_dev = xhci->devs[slot_id]; 1145 virt_dev = xhci->devs[slot_id];
1137 if (handle_cmd_in_cmd_wait_list(xhci, virt_dev, event)) 1146 if (handle_cmd_in_cmd_wait_list(xhci, virt_dev, event))
1138 break; 1147 break;
1139 xhci->devs[slot_id]->cmd_status = GET_COMP_CODE(event->status); 1148 xhci->devs[slot_id]->cmd_status = GET_COMP_CODE(le32_to_cpu(event->status));
1140 complete(&xhci->devs[slot_id]->cmd_completion); 1149 complete(&xhci->devs[slot_id]->cmd_completion);
1141 break; 1150 break;
1142 case TRB_TYPE(TRB_ADDR_DEV): 1151 case TRB_TYPE(TRB_ADDR_DEV):
1143 xhci->devs[slot_id]->cmd_status = GET_COMP_CODE(event->status); 1152 xhci->devs[slot_id]->cmd_status = GET_COMP_CODE(le32_to_cpu(event->status));
1144 complete(&xhci->addr_dev); 1153 complete(&xhci->addr_dev);
1145 break; 1154 break;
1146 case TRB_TYPE(TRB_STOP_RING): 1155 case TRB_TYPE(TRB_STOP_RING):
@@ -1157,7 +1166,7 @@ bandwidth_change:
1157 case TRB_TYPE(TRB_RESET_DEV): 1166 case TRB_TYPE(TRB_RESET_DEV):
1158 xhci_dbg(xhci, "Completed reset device command.\n"); 1167 xhci_dbg(xhci, "Completed reset device command.\n");
1159 slot_id = TRB_TO_SLOT_ID( 1168 slot_id = TRB_TO_SLOT_ID(
1160 xhci->cmd_ring->dequeue->generic.field[3]); 1169 le32_to_cpu(xhci->cmd_ring->dequeue->generic.field[3]));
1161 virt_dev = xhci->devs[slot_id]; 1170 virt_dev = xhci->devs[slot_id];
1162 if (virt_dev) 1171 if (virt_dev)
1163 handle_cmd_in_cmd_wait_list(xhci, virt_dev, event); 1172 handle_cmd_in_cmd_wait_list(xhci, virt_dev, event);
@@ -1171,8 +1180,8 @@ bandwidth_change:
1171 break; 1180 break;
1172 } 1181 }
1173 xhci_dbg(xhci, "NEC firmware version %2x.%02x\n", 1182 xhci_dbg(xhci, "NEC firmware version %2x.%02x\n",
1174 NEC_FW_MAJOR(event->status), 1183 NEC_FW_MAJOR(le32_to_cpu(event->status)),
1175 NEC_FW_MINOR(event->status)); 1184 NEC_FW_MINOR(le32_to_cpu(event->status)));
1176 break; 1185 break;
1177 default: 1186 default:
1178 /* Skip over unknown commands on the event ring */ 1187 /* Skip over unknown commands on the event ring */
@@ -1187,7 +1196,7 @@ static void handle_vendor_event(struct xhci_hcd *xhci,
1187{ 1196{
1188 u32 trb_type; 1197 u32 trb_type;
1189 1198
1190 trb_type = TRB_FIELD_TO_TYPE(event->generic.field[3]); 1199 trb_type = TRB_FIELD_TO_TYPE(le32_to_cpu(event->generic.field[3]));
1191 xhci_dbg(xhci, "Vendor specific event TRB type = %u\n", trb_type); 1200 xhci_dbg(xhci, "Vendor specific event TRB type = %u\n", trb_type);
1192 if (trb_type == TRB_NEC_CMD_COMP && (xhci->quirks & XHCI_NEC_HOST)) 1201 if (trb_type == TRB_NEC_CMD_COMP && (xhci->quirks & XHCI_NEC_HOST))
1193 handle_cmd_completion(xhci, &event->event_cmd); 1202 handle_cmd_completion(xhci, &event->event_cmd);
@@ -1241,15 +1250,15 @@ static void handle_port_status(struct xhci_hcd *xhci,
1241 unsigned int faked_port_index; 1250 unsigned int faked_port_index;
1242 u8 major_revision; 1251 u8 major_revision;
1243 struct xhci_bus_state *bus_state; 1252 struct xhci_bus_state *bus_state;
1244 u32 __iomem **port_array; 1253 __le32 __iomem **port_array;
1245 bool bogus_port_status = false; 1254 bool bogus_port_status = false;
1246 1255
1247 /* Port status change events always have a successful completion code */ 1256 /* Port status change events always have a successful completion code */
1248 if (GET_COMP_CODE(event->generic.field[2]) != COMP_SUCCESS) { 1257 if (GET_COMP_CODE(le32_to_cpu(event->generic.field[2])) != COMP_SUCCESS) {
1249 xhci_warn(xhci, "WARN: xHC returned failed port status event\n"); 1258 xhci_warn(xhci, "WARN: xHC returned failed port status event\n");
1250 xhci->error_bitmask |= 1 << 8; 1259 xhci->error_bitmask |= 1 << 8;
1251 } 1260 }
1252 port_id = GET_PORT_ID(event->generic.field[0]); 1261 port_id = GET_PORT_ID(le32_to_cpu(event->generic.field[0]));
1253 xhci_dbg(xhci, "Port Status Change Event for port %d\n", port_id); 1262 xhci_dbg(xhci, "Port Status Change Event for port %d\n", port_id);
1254 1263
1255 max_ports = HCS_MAX_PORTS(xhci->hcs_params1); 1264 max_ports = HCS_MAX_PORTS(xhci->hcs_params1);
@@ -1456,7 +1465,7 @@ static int xhci_requires_manual_halt_cleanup(struct xhci_hcd *xhci,
1456 * endpoint anyway. Check if a babble halted the 1465 * endpoint anyway. Check if a babble halted the
1457 * endpoint. 1466 * endpoint.
1458 */ 1467 */
1459 if ((ep_ctx->ep_info & EP_STATE_MASK) == EP_STATE_HALTED) 1468 if ((le32_to_cpu(ep_ctx->ep_info) & EP_STATE_MASK) == EP_STATE_HALTED)
1460 return 1; 1469 return 1;
1461 1470
1462 return 0; 1471 return 0;
@@ -1494,12 +1503,12 @@ static int finish_td(struct xhci_hcd *xhci, struct xhci_td *td,
1494 struct urb_priv *urb_priv; 1503 struct urb_priv *urb_priv;
1495 u32 trb_comp_code; 1504 u32 trb_comp_code;
1496 1505
1497 slot_id = TRB_TO_SLOT_ID(event->flags); 1506 slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags));
1498 xdev = xhci->devs[slot_id]; 1507 xdev = xhci->devs[slot_id];
1499 ep_index = TRB_TO_EP_ID(event->flags) - 1; 1508 ep_index = TRB_TO_EP_ID(le32_to_cpu(event->flags)) - 1;
1500 ep_ring = xhci_dma_to_transfer_ring(ep, event->buffer); 1509 ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer));
1501 ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index); 1510 ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
1502 trb_comp_code = GET_COMP_CODE(event->transfer_len); 1511 trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
1503 1512
1504 if (skip) 1513 if (skip)
1505 goto td_cleanup; 1514 goto td_cleanup;
@@ -1602,12 +1611,12 @@ static int process_ctrl_td(struct xhci_hcd *xhci, struct xhci_td *td,
1602 struct xhci_ep_ctx *ep_ctx; 1611 struct xhci_ep_ctx *ep_ctx;
1603 u32 trb_comp_code; 1612 u32 trb_comp_code;
1604 1613
1605 slot_id = TRB_TO_SLOT_ID(event->flags); 1614 slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags));
1606 xdev = xhci->devs[slot_id]; 1615 xdev = xhci->devs[slot_id];
1607 ep_index = TRB_TO_EP_ID(event->flags) - 1; 1616 ep_index = TRB_TO_EP_ID(le32_to_cpu(event->flags)) - 1;
1608 ep_ring = xhci_dma_to_transfer_ring(ep, event->buffer); 1617 ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer));
1609 ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index); 1618 ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
1610 trb_comp_code = GET_COMP_CODE(event->transfer_len); 1619 trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
1611 1620
1612 xhci_debug_trb(xhci, xhci->event_ring->dequeue); 1621 xhci_debug_trb(xhci, xhci->event_ring->dequeue);
1613 switch (trb_comp_code) { 1622 switch (trb_comp_code) {
@@ -1646,7 +1655,7 @@ static int process_ctrl_td(struct xhci_hcd *xhci, struct xhci_td *td,
1646 event_trb != td->last_trb) 1655 event_trb != td->last_trb)
1647 td->urb->actual_length = 1656 td->urb->actual_length =
1648 td->urb->transfer_buffer_length 1657 td->urb->transfer_buffer_length
1649 - TRB_LEN(event->transfer_len); 1658 - TRB_LEN(le32_to_cpu(event->transfer_len));
1650 else 1659 else
1651 td->urb->actual_length = 0; 1660 td->urb->actual_length = 0;
1652 1661
@@ -1680,7 +1689,7 @@ static int process_ctrl_td(struct xhci_hcd *xhci, struct xhci_td *td,
1680 /* We didn't stop on a link TRB in the middle */ 1689 /* We didn't stop on a link TRB in the middle */
1681 td->urb->actual_length = 1690 td->urb->actual_length =
1682 td->urb->transfer_buffer_length - 1691 td->urb->transfer_buffer_length -
1683 TRB_LEN(event->transfer_len); 1692 TRB_LEN(le32_to_cpu(event->transfer_len));
1684 xhci_dbg(xhci, "Waiting for status " 1693 xhci_dbg(xhci, "Waiting for status "
1685 "stage event\n"); 1694 "stage event\n");
1686 return 0; 1695 return 0;
@@ -1708,8 +1717,8 @@ static int process_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td,
1708 u32 trb_comp_code; 1717 u32 trb_comp_code;
1709 bool skip_td = false; 1718 bool skip_td = false;
1710 1719
1711 ep_ring = xhci_dma_to_transfer_ring(ep, event->buffer); 1720 ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer));
1712 trb_comp_code = GET_COMP_CODE(event->transfer_len); 1721 trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
1713 urb_priv = td->urb->hcpriv; 1722 urb_priv = td->urb->hcpriv;
1714 idx = urb_priv->td_cnt; 1723 idx = urb_priv->td_cnt;
1715 frame = &td->urb->iso_frame_desc[idx]; 1724 frame = &td->urb->iso_frame_desc[idx];
@@ -1752,15 +1761,14 @@ static int process_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td,
1752 for (cur_trb = ep_ring->dequeue, 1761 for (cur_trb = ep_ring->dequeue,
1753 cur_seg = ep_ring->deq_seg; cur_trb != event_trb; 1762 cur_seg = ep_ring->deq_seg; cur_trb != event_trb;
1754 next_trb(xhci, ep_ring, &cur_seg, &cur_trb)) { 1763 next_trb(xhci, ep_ring, &cur_seg, &cur_trb)) {
1755 if ((cur_trb->generic.field[3] & 1764 if ((le32_to_cpu(cur_trb->generic.field[3]) &
1756 TRB_TYPE_BITMASK) != TRB_TYPE(TRB_TR_NOOP) && 1765 TRB_TYPE_BITMASK) != TRB_TYPE(TRB_TR_NOOP) &&
1757 (cur_trb->generic.field[3] & 1766 (le32_to_cpu(cur_trb->generic.field[3]) &
1758 TRB_TYPE_BITMASK) != TRB_TYPE(TRB_LINK)) 1767 TRB_TYPE_BITMASK) != TRB_TYPE(TRB_LINK))
1759 len += 1768 len += TRB_LEN(le32_to_cpu(cur_trb->generic.field[2]));
1760 TRB_LEN(cur_trb->generic.field[2]);
1761 } 1769 }
1762 len += TRB_LEN(cur_trb->generic.field[2]) - 1770 len += TRB_LEN(le32_to_cpu(cur_trb->generic.field[2])) -
1763 TRB_LEN(event->transfer_len); 1771 TRB_LEN(le32_to_cpu(event->transfer_len));
1764 1772
1765 if (trb_comp_code != COMP_STOP_INVAL) { 1773 if (trb_comp_code != COMP_STOP_INVAL) {
1766 frame->actual_length = len; 1774 frame->actual_length = len;
@@ -1815,8 +1823,8 @@ static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_td *td,
1815 struct xhci_segment *cur_seg; 1823 struct xhci_segment *cur_seg;
1816 u32 trb_comp_code; 1824 u32 trb_comp_code;
1817 1825
1818 ep_ring = xhci_dma_to_transfer_ring(ep, event->buffer); 1826 ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer));
1819 trb_comp_code = GET_COMP_CODE(event->transfer_len); 1827 trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
1820 1828
1821 switch (trb_comp_code) { 1829 switch (trb_comp_code) {
1822 case COMP_SUCCESS: 1830 case COMP_SUCCESS:
@@ -1852,18 +1860,18 @@ static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_td *td,
1852 "%d bytes untransferred\n", 1860 "%d bytes untransferred\n",
1853 td->urb->ep->desc.bEndpointAddress, 1861 td->urb->ep->desc.bEndpointAddress,
1854 td->urb->transfer_buffer_length, 1862 td->urb->transfer_buffer_length,
1855 TRB_LEN(event->transfer_len)); 1863 TRB_LEN(le32_to_cpu(event->transfer_len)));
1856 /* Fast path - was this the last TRB in the TD for this URB? */ 1864 /* Fast path - was this the last TRB in the TD for this URB? */
1857 if (event_trb == td->last_trb) { 1865 if (event_trb == td->last_trb) {
1858 if (TRB_LEN(event->transfer_len) != 0) { 1866 if (TRB_LEN(le32_to_cpu(event->transfer_len)) != 0) {
1859 td->urb->actual_length = 1867 td->urb->actual_length =
1860 td->urb->transfer_buffer_length - 1868 td->urb->transfer_buffer_length -
1861 TRB_LEN(event->transfer_len); 1869 TRB_LEN(le32_to_cpu(event->transfer_len));
1862 if (td->urb->transfer_buffer_length < 1870 if (td->urb->transfer_buffer_length <
1863 td->urb->actual_length) { 1871 td->urb->actual_length) {
1864 xhci_warn(xhci, "HC gave bad length " 1872 xhci_warn(xhci, "HC gave bad length "
1865 "of %d bytes left\n", 1873 "of %d bytes left\n",
1866 TRB_LEN(event->transfer_len)); 1874 TRB_LEN(le32_to_cpu(event->transfer_len)));
1867 td->urb->actual_length = 0; 1875 td->urb->actual_length = 0;
1868 if (td->urb->transfer_flags & URB_SHORT_NOT_OK) 1876 if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
1869 *status = -EREMOTEIO; 1877 *status = -EREMOTEIO;
@@ -1894,20 +1902,20 @@ static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_td *td,
1894 for (cur_trb = ep_ring->dequeue, cur_seg = ep_ring->deq_seg; 1902 for (cur_trb = ep_ring->dequeue, cur_seg = ep_ring->deq_seg;
1895 cur_trb != event_trb; 1903 cur_trb != event_trb;
1896 next_trb(xhci, ep_ring, &cur_seg, &cur_trb)) { 1904 next_trb(xhci, ep_ring, &cur_seg, &cur_trb)) {
1897 if ((cur_trb->generic.field[3] & 1905 if ((le32_to_cpu(cur_trb->generic.field[3]) &
1898 TRB_TYPE_BITMASK) != TRB_TYPE(TRB_TR_NOOP) && 1906 TRB_TYPE_BITMASK) != TRB_TYPE(TRB_TR_NOOP) &&
1899 (cur_trb->generic.field[3] & 1907 (le32_to_cpu(cur_trb->generic.field[3]) &
1900 TRB_TYPE_BITMASK) != TRB_TYPE(TRB_LINK)) 1908 TRB_TYPE_BITMASK) != TRB_TYPE(TRB_LINK))
1901 td->urb->actual_length += 1909 td->urb->actual_length +=
1902 TRB_LEN(cur_trb->generic.field[2]); 1910 TRB_LEN(le32_to_cpu(cur_trb->generic.field[2]));
1903 } 1911 }
1904 /* If the ring didn't stop on a Link or No-op TRB, add 1912 /* If the ring didn't stop on a Link or No-op TRB, add
1905 * in the actual bytes transferred from the Normal TRB 1913 * in the actual bytes transferred from the Normal TRB
1906 */ 1914 */
1907 if (trb_comp_code != COMP_STOP_INVAL) 1915 if (trb_comp_code != COMP_STOP_INVAL)
1908 td->urb->actual_length += 1916 td->urb->actual_length +=
1909 TRB_LEN(cur_trb->generic.field[2]) - 1917 TRB_LEN(le32_to_cpu(cur_trb->generic.field[2])) -
1910 TRB_LEN(event->transfer_len); 1918 TRB_LEN(le32_to_cpu(event->transfer_len));
1911 } 1919 }
1912 1920
1913 return finish_td(xhci, td, event_trb, event, ep, status, false); 1921 return finish_td(xhci, td, event_trb, event, ep, status, false);
@@ -1937,7 +1945,7 @@ static int handle_tx_event(struct xhci_hcd *xhci,
1937 u32 trb_comp_code; 1945 u32 trb_comp_code;
1938 int ret = 0; 1946 int ret = 0;
1939 1947
1940 slot_id = TRB_TO_SLOT_ID(event->flags); 1948 slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags));
1941 xdev = xhci->devs[slot_id]; 1949 xdev = xhci->devs[slot_id];
1942 if (!xdev) { 1950 if (!xdev) {
1943 xhci_err(xhci, "ERROR Transfer event pointed to bad slot\n"); 1951 xhci_err(xhci, "ERROR Transfer event pointed to bad slot\n");
@@ -1945,20 +1953,21 @@ static int handle_tx_event(struct xhci_hcd *xhci,
1945 } 1953 }
1946 1954
1947 /* Endpoint ID is 1 based, our index is zero based */ 1955 /* Endpoint ID is 1 based, our index is zero based */
1948 ep_index = TRB_TO_EP_ID(event->flags) - 1; 1956 ep_index = TRB_TO_EP_ID(le32_to_cpu(event->flags)) - 1;
1949 xhci_dbg(xhci, "%s - ep index = %d\n", __func__, ep_index); 1957 xhci_dbg(xhci, "%s - ep index = %d\n", __func__, ep_index);
1950 ep = &xdev->eps[ep_index]; 1958 ep = &xdev->eps[ep_index];
1951 ep_ring = xhci_dma_to_transfer_ring(ep, event->buffer); 1959 ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer));
1952 ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index); 1960 ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
1953 if (!ep_ring || 1961 if (!ep_ring ||
1954 (ep_ctx->ep_info & EP_STATE_MASK) == EP_STATE_DISABLED) { 1962 (le32_to_cpu(ep_ctx->ep_info) & EP_STATE_MASK) ==
1963 EP_STATE_DISABLED) {
1955 xhci_err(xhci, "ERROR Transfer event for disabled endpoint " 1964 xhci_err(xhci, "ERROR Transfer event for disabled endpoint "
1956 "or incorrect stream ring\n"); 1965 "or incorrect stream ring\n");
1957 return -ENODEV; 1966 return -ENODEV;
1958 } 1967 }
1959 1968
1960 event_dma = event->buffer; 1969 event_dma = le64_to_cpu(event->buffer);
1961 trb_comp_code = GET_COMP_CODE(event->transfer_len); 1970 trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
1962 /* Look for common error cases */ 1971 /* Look for common error cases */
1963 switch (trb_comp_code) { 1972 switch (trb_comp_code) {
1964 /* Skip codes that require special handling depending on 1973 /* Skip codes that require special handling depending on
@@ -2011,14 +2020,16 @@ static int handle_tx_event(struct xhci_hcd *xhci,
2011 if (!list_empty(&ep_ring->td_list)) 2020 if (!list_empty(&ep_ring->td_list))
2012 xhci_dbg(xhci, "Underrun Event for slot %d ep %d " 2021 xhci_dbg(xhci, "Underrun Event for slot %d ep %d "
2013 "still with TDs queued?\n", 2022 "still with TDs queued?\n",
2014 TRB_TO_SLOT_ID(event->flags), ep_index); 2023 TRB_TO_SLOT_ID(le32_to_cpu(event->flags)),
2024 ep_index);
2015 goto cleanup; 2025 goto cleanup;
2016 case COMP_OVERRUN: 2026 case COMP_OVERRUN:
2017 xhci_dbg(xhci, "overrun event on endpoint\n"); 2027 xhci_dbg(xhci, "overrun event on endpoint\n");
2018 if (!list_empty(&ep_ring->td_list)) 2028 if (!list_empty(&ep_ring->td_list))
2019 xhci_dbg(xhci, "Overrun Event for slot %d ep %d " 2029 xhci_dbg(xhci, "Overrun Event for slot %d ep %d "
2020 "still with TDs queued?\n", 2030 "still with TDs queued?\n",
2021 TRB_TO_SLOT_ID(event->flags), ep_index); 2031 TRB_TO_SLOT_ID(le32_to_cpu(event->flags)),
2032 ep_index);
2022 goto cleanup; 2033 goto cleanup;
2023 case COMP_MISSED_INT: 2034 case COMP_MISSED_INT:
2024 /* 2035 /*
@@ -2047,9 +2058,11 @@ static int handle_tx_event(struct xhci_hcd *xhci,
2047 if (list_empty(&ep_ring->td_list)) { 2058 if (list_empty(&ep_ring->td_list)) {
2048 xhci_warn(xhci, "WARN Event TRB for slot %d ep %d " 2059 xhci_warn(xhci, "WARN Event TRB for slot %d ep %d "
2049 "with no TDs queued?\n", 2060 "with no TDs queued?\n",
2050 TRB_TO_SLOT_ID(event->flags), ep_index); 2061 TRB_TO_SLOT_ID(le32_to_cpu(event->flags)),
2062 ep_index);
2051 xhci_dbg(xhci, "Event TRB with TRB type ID %u\n", 2063 xhci_dbg(xhci, "Event TRB with TRB type ID %u\n",
2052 (unsigned int) (event->flags & TRB_TYPE_BITMASK)>>10); 2064 (unsigned int) (le32_to_cpu(event->flags)
2065 & TRB_TYPE_BITMASK)>>10);
2053 xhci_print_trb_offsets(xhci, (union xhci_trb *) event); 2066 xhci_print_trb_offsets(xhci, (union xhci_trb *) event);
2054 if (ep->skip) { 2067 if (ep->skip) {
2055 ep->skip = false; 2068 ep->skip = false;
@@ -2092,7 +2105,8 @@ static int handle_tx_event(struct xhci_hcd *xhci,
2092 * corresponding TD has been cancelled. Just ignore 2105 * corresponding TD has been cancelled. Just ignore
2093 * the TD. 2106 * the TD.
2094 */ 2107 */
2095 if ((event_trb->generic.field[3] & TRB_TYPE_BITMASK) 2108 if ((le32_to_cpu(event_trb->generic.field[3])
2109 & TRB_TYPE_BITMASK)
2096 == TRB_TYPE(TRB_TR_NOOP)) { 2110 == TRB_TYPE(TRB_TR_NOOP)) {
2097 xhci_dbg(xhci, 2111 xhci_dbg(xhci,
2098 "event_trb is a no-op TRB. Skip it\n"); 2112 "event_trb is a no-op TRB. Skip it\n");
@@ -2157,8 +2171,10 @@ cleanup:
2157/* 2171/*
2158 * This function handles all OS-owned events on the event ring. It may drop 2172 * This function handles all OS-owned events on the event ring. It may drop
2159 * xhci->lock between event processing (e.g. to pass up port status changes). 2173 * xhci->lock between event processing (e.g. to pass up port status changes).
2174 * Returns >0 for "possibly more events to process" (caller should call again),
2175 * otherwise 0 if done. In future, <0 returns should indicate error code.
2160 */ 2176 */
2161static void xhci_handle_event(struct xhci_hcd *xhci) 2177static int xhci_handle_event(struct xhci_hcd *xhci)
2162{ 2178{
2163 union xhci_trb *event; 2179 union xhci_trb *event;
2164 int update_ptrs = 1; 2180 int update_ptrs = 1;
@@ -2167,20 +2183,25 @@ static void xhci_handle_event(struct xhci_hcd *xhci)
2167 xhci_dbg(xhci, "In %s\n", __func__); 2183 xhci_dbg(xhci, "In %s\n", __func__);
2168 if (!xhci->event_ring || !xhci->event_ring->dequeue) { 2184 if (!xhci->event_ring || !xhci->event_ring->dequeue) {
2169 xhci->error_bitmask |= 1 << 1; 2185 xhci->error_bitmask |= 1 << 1;
2170 return; 2186 return 0;
2171 } 2187 }
2172 2188
2173 event = xhci->event_ring->dequeue; 2189 event = xhci->event_ring->dequeue;
2174 /* Does the HC or OS own the TRB? */ 2190 /* Does the HC or OS own the TRB? */
2175 if ((event->event_cmd.flags & TRB_CYCLE) != 2191 if ((le32_to_cpu(event->event_cmd.flags) & TRB_CYCLE) !=
2176 xhci->event_ring->cycle_state) { 2192 xhci->event_ring->cycle_state) {
2177 xhci->error_bitmask |= 1 << 2; 2193 xhci->error_bitmask |= 1 << 2;
2178 return; 2194 return 0;
2179 } 2195 }
2180 xhci_dbg(xhci, "%s - OS owns TRB\n", __func__); 2196 xhci_dbg(xhci, "%s - OS owns TRB\n", __func__);
2181 2197
2198 /*
2199 * Barrier between reading the TRB_CYCLE (valid) flag above and any
2200 * speculative reads of the event's flags/data below.
2201 */
2202 rmb();
2182 /* FIXME: Handle more event types. */ 2203 /* FIXME: Handle more event types. */
2183 switch ((event->event_cmd.flags & TRB_TYPE_BITMASK)) { 2204 switch ((le32_to_cpu(event->event_cmd.flags) & TRB_TYPE_BITMASK)) {
2184 case TRB_TYPE(TRB_COMPLETION): 2205 case TRB_TYPE(TRB_COMPLETION):
2185 xhci_dbg(xhci, "%s - calling handle_cmd_completion\n", __func__); 2206 xhci_dbg(xhci, "%s - calling handle_cmd_completion\n", __func__);
2186 handle_cmd_completion(xhci, &event->event_cmd); 2207 handle_cmd_completion(xhci, &event->event_cmd);
@@ -2202,7 +2223,8 @@ static void xhci_handle_event(struct xhci_hcd *xhci)
2202 update_ptrs = 0; 2223 update_ptrs = 0;
2203 break; 2224 break;
2204 default: 2225 default:
2205 if ((event->event_cmd.flags & TRB_TYPE_BITMASK) >= TRB_TYPE(48)) 2226 if ((le32_to_cpu(event->event_cmd.flags) & TRB_TYPE_BITMASK) >=
2227 TRB_TYPE(48))
2206 handle_vendor_event(xhci, event); 2228 handle_vendor_event(xhci, event);
2207 else 2229 else
2208 xhci->error_bitmask |= 1 << 3; 2230 xhci->error_bitmask |= 1 << 3;
@@ -2213,15 +2235,17 @@ static void xhci_handle_event(struct xhci_hcd *xhci)
2213 if (xhci->xhc_state & XHCI_STATE_DYING) { 2235 if (xhci->xhc_state & XHCI_STATE_DYING) {
2214 xhci_dbg(xhci, "xHCI host dying, returning from " 2236 xhci_dbg(xhci, "xHCI host dying, returning from "
2215 "event handler.\n"); 2237 "event handler.\n");
2216 return; 2238 return 0;
2217 } 2239 }
2218 2240
2219 if (update_ptrs) 2241 if (update_ptrs)
2220 /* Update SW event ring dequeue pointer */ 2242 /* Update SW event ring dequeue pointer */
2221 inc_deq(xhci, xhci->event_ring, true); 2243 inc_deq(xhci, xhci->event_ring, true);
2222 2244
2223 /* Are there more items on the event ring? */ 2245 /* Are there more items on the event ring? Caller will call us again to
2224 xhci_handle_event(xhci); 2246 * check.
2247 */
2248 return 1;
2225} 2249}
2226 2250
2227/* 2251/*
@@ -2252,12 +2276,12 @@ irqreturn_t xhci_irq(struct usb_hcd *hcd)
2252 xhci_dbg(xhci, "op reg status = %08x\n", status); 2276 xhci_dbg(xhci, "op reg status = %08x\n", status);
2253 xhci_dbg(xhci, "Event ring dequeue ptr:\n"); 2277 xhci_dbg(xhci, "Event ring dequeue ptr:\n");
2254 xhci_dbg(xhci, "@%llx %08x %08x %08x %08x\n", 2278 xhci_dbg(xhci, "@%llx %08x %08x %08x %08x\n",
2255 (unsigned long long) 2279 (unsigned long long)
2256 xhci_trb_virt_to_dma(xhci->event_ring->deq_seg, trb), 2280 xhci_trb_virt_to_dma(xhci->event_ring->deq_seg, trb),
2257 lower_32_bits(trb->link.segment_ptr), 2281 lower_32_bits(le64_to_cpu(trb->link.segment_ptr)),
2258 upper_32_bits(trb->link.segment_ptr), 2282 upper_32_bits(le64_to_cpu(trb->link.segment_ptr)),
2259 (unsigned int) trb->link.intr_target, 2283 (unsigned int) le32_to_cpu(trb->link.intr_target),
2260 (unsigned int) trb->link.control); 2284 (unsigned int) le32_to_cpu(trb->link.control));
2261 2285
2262 if (status & STS_FATAL) { 2286 if (status & STS_FATAL) {
2263 xhci_warn(xhci, "WARNING: Host System Error\n"); 2287 xhci_warn(xhci, "WARNING: Host System Error\n");
@@ -2303,7 +2327,7 @@ hw_died:
2303 /* FIXME this should be a delayed service routine 2327 /* FIXME this should be a delayed service routine
2304 * that clears the EHB. 2328 * that clears the EHB.
2305 */ 2329 */
2306 xhci_handle_event(xhci); 2330 while (xhci_handle_event(xhci) > 0) {}
2307 2331
2308 temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue); 2332 temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
2309 /* If necessary, update the HW's version of the event ring deq ptr. */ 2333 /* If necessary, update the HW's version of the event ring deq ptr. */
@@ -2358,10 +2382,10 @@ static void queue_trb(struct xhci_hcd *xhci, struct xhci_ring *ring,
2358 struct xhci_generic_trb *trb; 2382 struct xhci_generic_trb *trb;
2359 2383
2360 trb = &ring->enqueue->generic; 2384 trb = &ring->enqueue->generic;
2361 trb->field[0] = field1; 2385 trb->field[0] = cpu_to_le32(field1);
2362 trb->field[1] = field2; 2386 trb->field[1] = cpu_to_le32(field2);
2363 trb->field[2] = field3; 2387 trb->field[2] = cpu_to_le32(field3);
2364 trb->field[3] = field4; 2388 trb->field[3] = cpu_to_le32(field4);
2365 inc_enq(xhci, ring, consumer, more_trbs_coming); 2389 inc_enq(xhci, ring, consumer, more_trbs_coming);
2366} 2390}
2367 2391
@@ -2414,17 +2438,16 @@ static int prepare_ring(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
2414 next = ring->enqueue; 2438 next = ring->enqueue;
2415 2439
2416 while (last_trb(xhci, ring, ring->enq_seg, next)) { 2440 while (last_trb(xhci, ring, ring->enq_seg, next)) {
2417
2418 /* If we're not dealing with 0.95 hardware, 2441 /* If we're not dealing with 0.95 hardware,
2419 * clear the chain bit. 2442 * clear the chain bit.
2420 */ 2443 */
2421 if (!xhci_link_trb_quirk(xhci)) 2444 if (!xhci_link_trb_quirk(xhci))
2422 next->link.control &= ~TRB_CHAIN; 2445 next->link.control &= cpu_to_le32(~TRB_CHAIN);
2423 else 2446 else
2424 next->link.control |= TRB_CHAIN; 2447 next->link.control |= cpu_to_le32(TRB_CHAIN);
2425 2448
2426 wmb(); 2449 wmb();
2427 next->link.control ^= (u32) TRB_CYCLE; 2450 next->link.control ^= cpu_to_le32((u32) TRB_CYCLE);
2428 2451
2429 /* Toggle the cycle bit after the last ring segment. */ 2452 /* Toggle the cycle bit after the last ring segment. */
2430 if (last_trb_on_last_seg(xhci, ring, ring->enq_seg, next)) { 2453 if (last_trb_on_last_seg(xhci, ring, ring->enq_seg, next)) {
@@ -2467,8 +2490,8 @@ static int prepare_transfer(struct xhci_hcd *xhci,
2467 } 2490 }
2468 2491
2469 ret = prepare_ring(xhci, ep_ring, 2492 ret = prepare_ring(xhci, ep_ring,
2470 ep_ctx->ep_info & EP_STATE_MASK, 2493 le32_to_cpu(ep_ctx->ep_info) & EP_STATE_MASK,
2471 num_trbs, mem_flags); 2494 num_trbs, mem_flags);
2472 if (ret) 2495 if (ret)
2473 return ret; 2496 return ret;
2474 2497
@@ -2570,9 +2593,9 @@ static void giveback_first_trb(struct xhci_hcd *xhci, int slot_id,
2570 */ 2593 */
2571 wmb(); 2594 wmb();
2572 if (start_cycle) 2595 if (start_cycle)
2573 start_trb->field[3] |= start_cycle; 2596 start_trb->field[3] |= cpu_to_le32(start_cycle);
2574 else 2597 else
2575 start_trb->field[3] &= ~0x1; 2598 start_trb->field[3] &= cpu_to_le32(~TRB_CYCLE);
2576 xhci_ring_ep_doorbell(xhci, slot_id, ep_index, stream_id); 2599 xhci_ring_ep_doorbell(xhci, slot_id, ep_index, stream_id);
2577} 2600}
2578 2601
@@ -2590,7 +2613,7 @@ int xhci_queue_intr_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
2590 int xhci_interval; 2613 int xhci_interval;
2591 int ep_interval; 2614 int ep_interval;
2592 2615
2593 xhci_interval = EP_INTERVAL_TO_UFRAMES(ep_ctx->ep_info); 2616 xhci_interval = EP_INTERVAL_TO_UFRAMES(le32_to_cpu(ep_ctx->ep_info));
2594 ep_interval = urb->interval; 2617 ep_interval = urb->interval;
2595 /* Convert to microframes */ 2618 /* Convert to microframes */
2596 if (urb->dev->speed == USB_SPEED_LOW || 2619 if (urb->dev->speed == USB_SPEED_LOW ||
@@ -2632,6 +2655,35 @@ static u32 xhci_td_remainder(unsigned int remainder)
2632 return (remainder >> 10) << 17; 2655 return (remainder >> 10) << 17;
2633} 2656}
2634 2657
2658/*
2659 * For xHCI 1.0 host controllers, TD size is the number of packets remaining in
2660 * the TD (*not* including this TRB).
2661 *
2662 * Total TD packet count = total_packet_count =
2663 * roundup(TD size in bytes / wMaxPacketSize)
2664 *
2665 * Packets transferred up to and including this TRB = packets_transferred =
2666 * rounddown(total bytes transferred including this TRB / wMaxPacketSize)
2667 *
2668 * TD size = total_packet_count - packets_transferred
2669 *
2670 * It must fit in bits 21:17, so it can't be bigger than 31.
2671 */
2672
2673static u32 xhci_v1_0_td_remainder(int running_total, int trb_buff_len,
2674 unsigned int total_packet_count, struct urb *urb)
2675{
2676 int packets_transferred;
2677
2678 /* All the TRB queueing functions don't count the current TRB in
2679 * running_total.
2680 */
2681 packets_transferred = (running_total + trb_buff_len) /
2682 le16_to_cpu(urb->ep->desc.wMaxPacketSize);
2683
2684 return xhci_td_remainder(total_packet_count - packets_transferred);
2685}
2686
2635static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags, 2687static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
2636 struct urb *urb, int slot_id, unsigned int ep_index) 2688 struct urb *urb, int slot_id, unsigned int ep_index)
2637{ 2689{
@@ -2642,6 +2694,7 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
2642 struct scatterlist *sg; 2694 struct scatterlist *sg;
2643 int num_sgs; 2695 int num_sgs;
2644 int trb_buff_len, this_sg_len, running_total; 2696 int trb_buff_len, this_sg_len, running_total;
2697 unsigned int total_packet_count;
2645 bool first_trb; 2698 bool first_trb;
2646 u64 addr; 2699 u64 addr;
2647 bool more_trbs_coming; 2700 bool more_trbs_coming;
@@ -2655,6 +2708,8 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
2655 2708
2656 num_trbs = count_sg_trbs_needed(xhci, urb); 2709 num_trbs = count_sg_trbs_needed(xhci, urb);
2657 num_sgs = urb->num_sgs; 2710 num_sgs = urb->num_sgs;
2711 total_packet_count = roundup(urb->transfer_buffer_length,
2712 le16_to_cpu(urb->ep->desc.wMaxPacketSize));
2658 2713
2659 trb_buff_len = prepare_transfer(xhci, xhci->devs[slot_id], 2714 trb_buff_len = prepare_transfer(xhci, xhci->devs[slot_id],
2660 ep_index, urb->stream_id, 2715 ep_index, urb->stream_id,
@@ -2718,6 +2773,11 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
2718 td->last_trb = ep_ring->enqueue; 2773 td->last_trb = ep_ring->enqueue;
2719 field |= TRB_IOC; 2774 field |= TRB_IOC;
2720 } 2775 }
2776
2777 /* Only set interrupt on short packet for IN endpoints */
2778 if (usb_urb_dir_in(urb))
2779 field |= TRB_ISP;
2780
2721 xhci_dbg(xhci, " sg entry: dma = %#x, len = %#x (%d), " 2781 xhci_dbg(xhci, " sg entry: dma = %#x, len = %#x (%d), "
2722 "64KB boundary at %#x, end dma = %#x\n", 2782 "64KB boundary at %#x, end dma = %#x\n",
2723 (unsigned int) addr, trb_buff_len, trb_buff_len, 2783 (unsigned int) addr, trb_buff_len, trb_buff_len,
@@ -2730,11 +2790,20 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
2730 (unsigned int) (addr + TRB_MAX_BUFF_SIZE) & ~(TRB_MAX_BUFF_SIZE - 1), 2790 (unsigned int) (addr + TRB_MAX_BUFF_SIZE) & ~(TRB_MAX_BUFF_SIZE - 1),
2731 (unsigned int) addr + trb_buff_len); 2791 (unsigned int) addr + trb_buff_len);
2732 } 2792 }
2733 remainder = xhci_td_remainder(urb->transfer_buffer_length - 2793
2734 running_total) ; 2794 /* Set the TRB length, TD size, and interrupter fields. */
2795 if (xhci->hci_version < 0x100) {
2796 remainder = xhci_td_remainder(
2797 urb->transfer_buffer_length -
2798 running_total);
2799 } else {
2800 remainder = xhci_v1_0_td_remainder(running_total,
2801 trb_buff_len, total_packet_count, urb);
2802 }
2735 length_field = TRB_LEN(trb_buff_len) | 2803 length_field = TRB_LEN(trb_buff_len) |
2736 remainder | 2804 remainder |
2737 TRB_INTR_TARGET(0); 2805 TRB_INTR_TARGET(0);
2806
2738 if (num_trbs > 1) 2807 if (num_trbs > 1)
2739 more_trbs_coming = true; 2808 more_trbs_coming = true;
2740 else 2809 else
@@ -2743,12 +2812,7 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
2743 lower_32_bits(addr), 2812 lower_32_bits(addr),
2744 upper_32_bits(addr), 2813 upper_32_bits(addr),
2745 length_field, 2814 length_field,
2746 /* We always want to know if the TRB was short, 2815 field | TRB_TYPE(TRB_NORMAL));
2747 * or we won't get an event when it completes.
2748 * (Unless we use event data TRBs, which are a
2749 * waste of space and HC resources.)
2750 */
2751 field | TRB_ISP | TRB_TYPE(TRB_NORMAL));
2752 --num_trbs; 2816 --num_trbs;
2753 running_total += trb_buff_len; 2817 running_total += trb_buff_len;
2754 2818
@@ -2796,6 +2860,7 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
2796 u32 field, length_field; 2860 u32 field, length_field;
2797 2861
2798 int running_total, trb_buff_len, ret; 2862 int running_total, trb_buff_len, ret;
2863 unsigned int total_packet_count;
2799 u64 addr; 2864 u64 addr;
2800 2865
2801 if (urb->num_sgs) 2866 if (urb->num_sgs)
@@ -2850,6 +2915,8 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
2850 start_cycle = ep_ring->cycle_state; 2915 start_cycle = ep_ring->cycle_state;
2851 2916
2852 running_total = 0; 2917 running_total = 0;
2918 total_packet_count = roundup(urb->transfer_buffer_length,
2919 le16_to_cpu(urb->ep->desc.wMaxPacketSize));
2853 /* How much data is in the first TRB? */ 2920 /* How much data is in the first TRB? */
2854 addr = (u64) urb->transfer_dma; 2921 addr = (u64) urb->transfer_dma;
2855 trb_buff_len = TRB_MAX_BUFF_SIZE - 2922 trb_buff_len = TRB_MAX_BUFF_SIZE -
@@ -2882,11 +2949,24 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
2882 td->last_trb = ep_ring->enqueue; 2949 td->last_trb = ep_ring->enqueue;
2883 field |= TRB_IOC; 2950 field |= TRB_IOC;
2884 } 2951 }
2885 remainder = xhci_td_remainder(urb->transfer_buffer_length - 2952
2886 running_total); 2953 /* Only set interrupt on short packet for IN endpoints */
2954 if (usb_urb_dir_in(urb))
2955 field |= TRB_ISP;
2956
2957 /* Set the TRB length, TD size, and interrupter fields. */
2958 if (xhci->hci_version < 0x100) {
2959 remainder = xhci_td_remainder(
2960 urb->transfer_buffer_length -
2961 running_total);
2962 } else {
2963 remainder = xhci_v1_0_td_remainder(running_total,
2964 trb_buff_len, total_packet_count, urb);
2965 }
2887 length_field = TRB_LEN(trb_buff_len) | 2966 length_field = TRB_LEN(trb_buff_len) |
2888 remainder | 2967 remainder |
2889 TRB_INTR_TARGET(0); 2968 TRB_INTR_TARGET(0);
2969
2890 if (num_trbs > 1) 2970 if (num_trbs > 1)
2891 more_trbs_coming = true; 2971 more_trbs_coming = true;
2892 else 2972 else
@@ -2895,12 +2975,7 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
2895 lower_32_bits(addr), 2975 lower_32_bits(addr),
2896 upper_32_bits(addr), 2976 upper_32_bits(addr),
2897 length_field, 2977 length_field,
2898 /* We always want to know if the TRB was short, 2978 field | TRB_TYPE(TRB_NORMAL));
2899 * or we won't get an event when it completes.
2900 * (Unless we use event data TRBs, which are a
2901 * waste of space and HC resources.)
2902 */
2903 field | TRB_ISP | TRB_TYPE(TRB_NORMAL));
2904 --num_trbs; 2979 --num_trbs;
2905 running_total += trb_buff_len; 2980 running_total += trb_buff_len;
2906 2981
@@ -2979,15 +3054,19 @@ int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
2979 if (start_cycle == 0) 3054 if (start_cycle == 0)
2980 field |= 0x1; 3055 field |= 0x1;
2981 queue_trb(xhci, ep_ring, false, true, 3056 queue_trb(xhci, ep_ring, false, true,
2982 /* FIXME endianness is probably going to bite my ass here. */ 3057 setup->bRequestType | setup->bRequest << 8 | le16_to_cpu(setup->wValue) << 16,
2983 setup->bRequestType | setup->bRequest << 8 | setup->wValue << 16, 3058 le16_to_cpu(setup->wIndex) | le16_to_cpu(setup->wLength) << 16,
2984 setup->wIndex | setup->wLength << 16, 3059 TRB_LEN(8) | TRB_INTR_TARGET(0),
2985 TRB_LEN(8) | TRB_INTR_TARGET(0), 3060 /* Immediate data in pointer */
2986 /* Immediate data in pointer */ 3061 field);
2987 field);
2988 3062
2989 /* If there's data, queue data TRBs */ 3063 /* If there's data, queue data TRBs */
2990 field = 0; 3064 /* Only set interrupt on short packet for IN endpoints */
3065 if (usb_urb_dir_in(urb))
3066 field = TRB_ISP | TRB_TYPE(TRB_DATA);
3067 else
3068 field = TRB_TYPE(TRB_DATA);
3069
2991 length_field = TRB_LEN(urb->transfer_buffer_length) | 3070 length_field = TRB_LEN(urb->transfer_buffer_length) |
2992 xhci_td_remainder(urb->transfer_buffer_length) | 3071 xhci_td_remainder(urb->transfer_buffer_length) |
2993 TRB_INTR_TARGET(0); 3072 TRB_INTR_TARGET(0);
@@ -2998,8 +3077,7 @@ int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
2998 lower_32_bits(urb->transfer_dma), 3077 lower_32_bits(urb->transfer_dma),
2999 upper_32_bits(urb->transfer_dma), 3078 upper_32_bits(urb->transfer_dma),
3000 length_field, 3079 length_field,
3001 /* Event on short tx */ 3080 field | ep_ring->cycle_state);
3002 field | TRB_ISP | TRB_TYPE(TRB_DATA) | ep_ring->cycle_state);
3003 } 3081 }
3004 3082
3005 /* Save the DMA address of the last TRB in the TD */ 3083 /* Save the DMA address of the last TRB in the TD */
@@ -3045,6 +3123,63 @@ static int count_isoc_trbs_needed(struct xhci_hcd *xhci,
3045 return num_trbs; 3123 return num_trbs;
3046} 3124}
3047 3125
3126/*
3127 * The transfer burst count field of the isochronous TRB defines the number of
3128 * bursts that are required to move all packets in this TD. Only SuperSpeed
3129 * devices can burst up to bMaxBurst number of packets per service interval.
3130 * This field is zero based, meaning a value of zero in the field means one
3131 * burst. Basically, for everything but SuperSpeed devices, this field will be
3132 * zero. Only xHCI 1.0 host controllers support this field.
3133 */
3134static unsigned int xhci_get_burst_count(struct xhci_hcd *xhci,
3135 struct usb_device *udev,
3136 struct urb *urb, unsigned int total_packet_count)
3137{
3138 unsigned int max_burst;
3139
3140 if (xhci->hci_version < 0x100 || udev->speed != USB_SPEED_SUPER)
3141 return 0;
3142
3143 max_burst = urb->ep->ss_ep_comp.bMaxBurst;
3144 return roundup(total_packet_count, max_burst + 1) - 1;
3145}
3146
3147/*
3148 * Returns the number of packets in the last "burst" of packets. This field is
3149 * valid for all speeds of devices. USB 2.0 devices can only do one "burst", so
3150 * the last burst packet count is equal to the total number of packets in the
3151 * TD. SuperSpeed endpoints can have up to 3 bursts. All but the last burst
3152 * must contain (bMaxBurst + 1) number of packets, but the last burst can
3153 * contain 1 to (bMaxBurst + 1) packets.
3154 */
3155static unsigned int xhci_get_last_burst_packet_count(struct xhci_hcd *xhci,
3156 struct usb_device *udev,
3157 struct urb *urb, unsigned int total_packet_count)
3158{
3159 unsigned int max_burst;
3160 unsigned int residue;
3161
3162 if (xhci->hci_version < 0x100)
3163 return 0;
3164
3165 switch (udev->speed) {
3166 case USB_SPEED_SUPER:
3167 /* bMaxBurst is zero based: 0 means 1 packet per burst */
3168 max_burst = urb->ep->ss_ep_comp.bMaxBurst;
3169 residue = total_packet_count % (max_burst + 1);
3170 /* If residue is zero, the last burst contains (max_burst + 1)
3171 * number of packets, but the TLBPC field is zero-based.
3172 */
3173 if (residue == 0)
3174 return max_burst;
3175 return residue - 1;
3176 default:
3177 if (total_packet_count == 0)
3178 return 0;
3179 return total_packet_count - 1;
3180 }
3181}
3182
3048/* This is for isoc transfer */ 3183/* This is for isoc transfer */
3049static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags, 3184static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
3050 struct urb *urb, int slot_id, unsigned int ep_index) 3185 struct urb *urb, int slot_id, unsigned int ep_index)
@@ -3085,12 +3220,22 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
3085 3220
3086 /* Queue the first TRB, even if it's zero-length */ 3221 /* Queue the first TRB, even if it's zero-length */
3087 for (i = 0; i < num_tds; i++) { 3222 for (i = 0; i < num_tds; i++) {
3088 first_trb = true; 3223 unsigned int total_packet_count;
3224 unsigned int burst_count;
3225 unsigned int residue;
3089 3226
3227 first_trb = true;
3090 running_total = 0; 3228 running_total = 0;
3091 addr = start_addr + urb->iso_frame_desc[i].offset; 3229 addr = start_addr + urb->iso_frame_desc[i].offset;
3092 td_len = urb->iso_frame_desc[i].length; 3230 td_len = urb->iso_frame_desc[i].length;
3093 td_remain_len = td_len; 3231 td_remain_len = td_len;
3232 /* FIXME: Ignoring zero-length packets, can those happen? */
3233 total_packet_count = roundup(td_len,
3234 le16_to_cpu(urb->ep->desc.wMaxPacketSize));
3235 burst_count = xhci_get_burst_count(xhci, urb->dev, urb,
3236 total_packet_count);
3237 residue = xhci_get_last_burst_packet_count(xhci,
3238 urb->dev, urb, total_packet_count);
3094 3239
3095 trbs_per_td = count_isoc_trbs_needed(xhci, urb, i); 3240 trbs_per_td = count_isoc_trbs_needed(xhci, urb, i);
3096 3241
@@ -3104,7 +3249,7 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
3104 3249
3105 for (j = 0; j < trbs_per_td; j++) { 3250 for (j = 0; j < trbs_per_td; j++) {
3106 u32 remainder = 0; 3251 u32 remainder = 0;
3107 field = 0; 3252 field = TRB_TBC(burst_count) | TRB_TLBPC(residue);
3108 3253
3109 if (first_trb) { 3254 if (first_trb) {
3110 /* Queue the isoc TRB */ 3255 /* Queue the isoc TRB */
@@ -3123,6 +3268,10 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
3123 field |= ep_ring->cycle_state; 3268 field |= ep_ring->cycle_state;
3124 } 3269 }
3125 3270
3271 /* Only set interrupt on short packet for IN EPs */
3272 if (usb_urb_dir_in(urb))
3273 field |= TRB_ISP;
3274
3126 /* Chain all the TRBs together; clear the chain bit in 3275 /* Chain all the TRBs together; clear the chain bit in
3127 * the last TRB to indicate it's the last TRB in the 3276 * the last TRB to indicate it's the last TRB in the
3128 * chain. 3277 * chain.
@@ -3142,20 +3291,24 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
3142 if (trb_buff_len > td_remain_len) 3291 if (trb_buff_len > td_remain_len)
3143 trb_buff_len = td_remain_len; 3292 trb_buff_len = td_remain_len;
3144 3293
3145 remainder = xhci_td_remainder(td_len - running_total); 3294 /* Set the TRB length, TD size, & interrupter fields. */
3295 if (xhci->hci_version < 0x100) {
3296 remainder = xhci_td_remainder(
3297 td_len - running_total);
3298 } else {
3299 remainder = xhci_v1_0_td_remainder(
3300 running_total, trb_buff_len,
3301 total_packet_count, urb);
3302 }
3146 length_field = TRB_LEN(trb_buff_len) | 3303 length_field = TRB_LEN(trb_buff_len) |
3147 remainder | 3304 remainder |
3148 TRB_INTR_TARGET(0); 3305 TRB_INTR_TARGET(0);
3306
3149 queue_trb(xhci, ep_ring, false, more_trbs_coming, 3307 queue_trb(xhci, ep_ring, false, more_trbs_coming,
3150 lower_32_bits(addr), 3308 lower_32_bits(addr),
3151 upper_32_bits(addr), 3309 upper_32_bits(addr),
3152 length_field, 3310 length_field,
3153 /* We always want to know if the TRB was short, 3311 field);
3154 * or we won't get an event when it completes.
3155 * (Unless we use event data TRBs, which are a
3156 * waste of space and HC resources.)
3157 */
3158 field | TRB_ISP);
3159 running_total += trb_buff_len; 3312 running_total += trb_buff_len;
3160 3313
3161 addr += trb_buff_len; 3314 addr += trb_buff_len;
@@ -3211,8 +3364,8 @@ int xhci_queue_isoc_tx_prepare(struct xhci_hcd *xhci, gfp_t mem_flags,
3211 /* Check the ring to guarantee there is enough room for the whole urb. 3364 /* Check the ring to guarantee there is enough room for the whole urb.
3212 * Do not insert any td of the urb to the ring if the check failed. 3365 * Do not insert any td of the urb to the ring if the check failed.
3213 */ 3366 */
3214 ret = prepare_ring(xhci, ep_ring, ep_ctx->ep_info & EP_STATE_MASK, 3367 ret = prepare_ring(xhci, ep_ring, le32_to_cpu(ep_ctx->ep_info) & EP_STATE_MASK,
3215 num_trbs, mem_flags); 3368 num_trbs, mem_flags);
3216 if (ret) 3369 if (ret)
3217 return ret; 3370 return ret;
3218 3371
@@ -3224,7 +3377,7 @@ int xhci_queue_isoc_tx_prepare(struct xhci_hcd *xhci, gfp_t mem_flags,
3224 urb->dev->speed == USB_SPEED_FULL) 3377 urb->dev->speed == USB_SPEED_FULL)
3225 urb->start_frame >>= 3; 3378 urb->start_frame >>= 3;
3226 3379
3227 xhci_interval = EP_INTERVAL_TO_UFRAMES(ep_ctx->ep_info); 3380 xhci_interval = EP_INTERVAL_TO_UFRAMES(le32_to_cpu(ep_ctx->ep_info));
3228 ep_interval = urb->interval; 3381 ep_interval = urb->interval;
3229 /* Convert to microframes */ 3382 /* Convert to microframes */
3230 if (urb->dev->speed == USB_SPEED_LOW || 3383 if (urb->dev->speed == USB_SPEED_LOW ||
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index 81b976e45880..6864759c8d1a 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -973,8 +973,8 @@ static int xhci_check_maxpacket(struct xhci_hcd *xhci, unsigned int slot_id,
973 973
974 out_ctx = xhci->devs[slot_id]->out_ctx; 974 out_ctx = xhci->devs[slot_id]->out_ctx;
975 ep_ctx = xhci_get_ep_ctx(xhci, out_ctx, ep_index); 975 ep_ctx = xhci_get_ep_ctx(xhci, out_ctx, ep_index);
976 hw_max_packet_size = MAX_PACKET_DECODED(ep_ctx->ep_info2); 976 hw_max_packet_size = MAX_PACKET_DECODED(le32_to_cpu(ep_ctx->ep_info2));
977 max_packet_size = urb->dev->ep0.desc.wMaxPacketSize; 977 max_packet_size = le16_to_cpu(urb->dev->ep0.desc.wMaxPacketSize);
978 if (hw_max_packet_size != max_packet_size) { 978 if (hw_max_packet_size != max_packet_size) {
979 xhci_dbg(xhci, "Max Packet Size for ep 0 changed.\n"); 979 xhci_dbg(xhci, "Max Packet Size for ep 0 changed.\n");
980 xhci_dbg(xhci, "Max packet size in usb_device = %d\n", 980 xhci_dbg(xhci, "Max packet size in usb_device = %d\n",
@@ -988,15 +988,15 @@ static int xhci_check_maxpacket(struct xhci_hcd *xhci, unsigned int slot_id,
988 xhci->devs[slot_id]->out_ctx, ep_index); 988 xhci->devs[slot_id]->out_ctx, ep_index);
989 in_ctx = xhci->devs[slot_id]->in_ctx; 989 in_ctx = xhci->devs[slot_id]->in_ctx;
990 ep_ctx = xhci_get_ep_ctx(xhci, in_ctx, ep_index); 990 ep_ctx = xhci_get_ep_ctx(xhci, in_ctx, ep_index);
991 ep_ctx->ep_info2 &= ~MAX_PACKET_MASK; 991 ep_ctx->ep_info2 &= cpu_to_le32(~MAX_PACKET_MASK);
992 ep_ctx->ep_info2 |= MAX_PACKET(max_packet_size); 992 ep_ctx->ep_info2 |= cpu_to_le32(MAX_PACKET(max_packet_size));
993 993
994 /* Set up the input context flags for the command */ 994 /* Set up the input context flags for the command */
995 /* FIXME: This won't work if a non-default control endpoint 995 /* FIXME: This won't work if a non-default control endpoint
996 * changes max packet sizes. 996 * changes max packet sizes.
997 */ 997 */
998 ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx); 998 ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx);
999 ctrl_ctx->add_flags = EP0_FLAG; 999 ctrl_ctx->add_flags = cpu_to_le32(EP0_FLAG);
1000 ctrl_ctx->drop_flags = 0; 1000 ctrl_ctx->drop_flags = 0;
1001 1001
1002 xhci_dbg(xhci, "Slot %d input context\n", slot_id); 1002 xhci_dbg(xhci, "Slot %d input context\n", slot_id);
@@ -1010,7 +1010,7 @@ static int xhci_check_maxpacket(struct xhci_hcd *xhci, unsigned int slot_id,
1010 /* Clean up the input context for later use by bandwidth 1010 /* Clean up the input context for later use by bandwidth
1011 * functions. 1011 * functions.
1012 */ 1012 */
1013 ctrl_ctx->add_flags = SLOT_FLAG; 1013 ctrl_ctx->add_flags = cpu_to_le32(SLOT_FLAG);
1014 } 1014 }
1015 return ret; 1015 return ret;
1016} 1016}
@@ -1331,27 +1331,30 @@ int xhci_drop_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
1331 /* If the HC already knows the endpoint is disabled, 1331 /* If the HC already knows the endpoint is disabled,
1332 * or the HCD has noted it is disabled, ignore this request 1332 * or the HCD has noted it is disabled, ignore this request
1333 */ 1333 */
1334 if ((ep_ctx->ep_info & EP_STATE_MASK) == EP_STATE_DISABLED || 1334 if ((le32_to_cpu(ep_ctx->ep_info) & EP_STATE_MASK) ==
1335 ctrl_ctx->drop_flags & xhci_get_endpoint_flag(&ep->desc)) { 1335 EP_STATE_DISABLED ||
1336 le32_to_cpu(ctrl_ctx->drop_flags) &
1337 xhci_get_endpoint_flag(&ep->desc)) {
1336 xhci_warn(xhci, "xHCI %s called with disabled ep %p\n", 1338 xhci_warn(xhci, "xHCI %s called with disabled ep %p\n",
1337 __func__, ep); 1339 __func__, ep);
1338 return 0; 1340 return 0;
1339 } 1341 }
1340 1342
1341 ctrl_ctx->drop_flags |= drop_flag; 1343 ctrl_ctx->drop_flags |= cpu_to_le32(drop_flag);
1342 new_drop_flags = ctrl_ctx->drop_flags; 1344 new_drop_flags = le32_to_cpu(ctrl_ctx->drop_flags);
1343 1345
1344 ctrl_ctx->add_flags &= ~drop_flag; 1346 ctrl_ctx->add_flags &= cpu_to_le32(~drop_flag);
1345 new_add_flags = ctrl_ctx->add_flags; 1347 new_add_flags = le32_to_cpu(ctrl_ctx->add_flags);
1346 1348
1347 last_ctx = xhci_last_valid_endpoint(ctrl_ctx->add_flags); 1349 last_ctx = xhci_last_valid_endpoint(le32_to_cpu(ctrl_ctx->add_flags));
1348 slot_ctx = xhci_get_slot_ctx(xhci, in_ctx); 1350 slot_ctx = xhci_get_slot_ctx(xhci, in_ctx);
1349 /* Update the last valid endpoint context, if we deleted the last one */ 1351 /* Update the last valid endpoint context, if we deleted the last one */
1350 if ((slot_ctx->dev_info & LAST_CTX_MASK) > LAST_CTX(last_ctx)) { 1352 if ((le32_to_cpu(slot_ctx->dev_info) & LAST_CTX_MASK) >
1351 slot_ctx->dev_info &= ~LAST_CTX_MASK; 1353 LAST_CTX(last_ctx)) {
1352 slot_ctx->dev_info |= LAST_CTX(last_ctx); 1354 slot_ctx->dev_info &= cpu_to_le32(~LAST_CTX_MASK);
1355 slot_ctx->dev_info |= cpu_to_le32(LAST_CTX(last_ctx));
1353 } 1356 }
1354 new_slot_info = slot_ctx->dev_info; 1357 new_slot_info = le32_to_cpu(slot_ctx->dev_info);
1355 1358
1356 xhci_endpoint_zero(xhci, xhci->devs[udev->slot_id], ep); 1359 xhci_endpoint_zero(xhci, xhci->devs[udev->slot_id], ep);
1357 1360
@@ -1419,7 +1422,8 @@ int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
1419 /* If the HCD has already noted the endpoint is enabled, 1422 /* If the HCD has already noted the endpoint is enabled,
1420 * ignore this request. 1423 * ignore this request.
1421 */ 1424 */
1422 if (ctrl_ctx->add_flags & xhci_get_endpoint_flag(&ep->desc)) { 1425 if (le32_to_cpu(ctrl_ctx->add_flags) &
1426 xhci_get_endpoint_flag(&ep->desc)) {
1423 xhci_warn(xhci, "xHCI %s called with enabled ep %p\n", 1427 xhci_warn(xhci, "xHCI %s called with enabled ep %p\n",
1424 __func__, ep); 1428 __func__, ep);
1425 return 0; 1429 return 0;
@@ -1437,8 +1441,8 @@ int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
1437 return -ENOMEM; 1441 return -ENOMEM;
1438 } 1442 }
1439 1443
1440 ctrl_ctx->add_flags |= added_ctxs; 1444 ctrl_ctx->add_flags |= cpu_to_le32(added_ctxs);
1441 new_add_flags = ctrl_ctx->add_flags; 1445 new_add_flags = le32_to_cpu(ctrl_ctx->add_flags);
1442 1446
1443 /* If xhci_endpoint_disable() was called for this endpoint, but the 1447 /* If xhci_endpoint_disable() was called for this endpoint, but the
1444 * xHC hasn't been notified yet through the check_bandwidth() call, 1448 * xHC hasn't been notified yet through the check_bandwidth() call,
@@ -1446,15 +1450,16 @@ int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
1446 * descriptors. We must drop and re-add this endpoint, so we leave the 1450 * descriptors. We must drop and re-add this endpoint, so we leave the
1447 * drop flags alone. 1451 * drop flags alone.
1448 */ 1452 */
1449 new_drop_flags = ctrl_ctx->drop_flags; 1453 new_drop_flags = le32_to_cpu(ctrl_ctx->drop_flags);
1450 1454
1451 slot_ctx = xhci_get_slot_ctx(xhci, in_ctx); 1455 slot_ctx = xhci_get_slot_ctx(xhci, in_ctx);
1452 /* Update the last valid endpoint context, if we just added one past */ 1456 /* Update the last valid endpoint context, if we just added one past */
1453 if ((slot_ctx->dev_info & LAST_CTX_MASK) < LAST_CTX(last_ctx)) { 1457 if ((le32_to_cpu(slot_ctx->dev_info) & LAST_CTX_MASK) <
1454 slot_ctx->dev_info &= ~LAST_CTX_MASK; 1458 LAST_CTX(last_ctx)) {
1455 slot_ctx->dev_info |= LAST_CTX(last_ctx); 1459 slot_ctx->dev_info &= cpu_to_le32(~LAST_CTX_MASK);
1460 slot_ctx->dev_info |= cpu_to_le32(LAST_CTX(last_ctx));
1456 } 1461 }
1457 new_slot_info = slot_ctx->dev_info; 1462 new_slot_info = le32_to_cpu(slot_ctx->dev_info);
1458 1463
1459 /* Store the usb_device pointer for later use */ 1464 /* Store the usb_device pointer for later use */
1460 ep->hcpriv = udev; 1465 ep->hcpriv = udev;
@@ -1484,9 +1489,9 @@ static void xhci_zero_in_ctx(struct xhci_hcd *xhci, struct xhci_virt_device *vir
1484 ctrl_ctx->drop_flags = 0; 1489 ctrl_ctx->drop_flags = 0;
1485 ctrl_ctx->add_flags = 0; 1490 ctrl_ctx->add_flags = 0;
1486 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx); 1491 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx);
1487 slot_ctx->dev_info &= ~LAST_CTX_MASK; 1492 slot_ctx->dev_info &= cpu_to_le32(~LAST_CTX_MASK);
1488 /* Endpoint 0 is always valid */ 1493 /* Endpoint 0 is always valid */
1489 slot_ctx->dev_info |= LAST_CTX(1); 1494 slot_ctx->dev_info |= cpu_to_le32(LAST_CTX(1));
1490 for (i = 1; i < 31; ++i) { 1495 for (i = 1; i < 31; ++i) {
1491 ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, i); 1496 ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, i);
1492 ep_ctx->ep_info = 0; 1497 ep_ctx->ep_info = 0;
@@ -1497,7 +1502,7 @@ static void xhci_zero_in_ctx(struct xhci_hcd *xhci, struct xhci_virt_device *vir
1497} 1502}
1498 1503
1499static int xhci_configure_endpoint_result(struct xhci_hcd *xhci, 1504static int xhci_configure_endpoint_result(struct xhci_hcd *xhci,
1500 struct usb_device *udev, int *cmd_status) 1505 struct usb_device *udev, u32 *cmd_status)
1501{ 1506{
1502 int ret; 1507 int ret;
1503 1508
@@ -1535,7 +1540,7 @@ static int xhci_configure_endpoint_result(struct xhci_hcd *xhci,
1535} 1540}
1536 1541
1537static int xhci_evaluate_context_result(struct xhci_hcd *xhci, 1542static int xhci_evaluate_context_result(struct xhci_hcd *xhci,
1538 struct usb_device *udev, int *cmd_status) 1543 struct usb_device *udev, u32 *cmd_status)
1539{ 1544{
1540 int ret; 1545 int ret;
1541 struct xhci_virt_device *virt_dev = xhci->devs[udev->slot_id]; 1546 struct xhci_virt_device *virt_dev = xhci->devs[udev->slot_id];
@@ -1581,7 +1586,7 @@ static int xhci_configure_endpoint(struct xhci_hcd *xhci,
1581 unsigned long flags; 1586 unsigned long flags;
1582 struct xhci_container_ctx *in_ctx; 1587 struct xhci_container_ctx *in_ctx;
1583 struct completion *cmd_completion; 1588 struct completion *cmd_completion;
1584 int *cmd_status; 1589 u32 *cmd_status;
1585 struct xhci_virt_device *virt_dev; 1590 struct xhci_virt_device *virt_dev;
1586 1591
1587 spin_lock_irqsave(&xhci->lock, flags); 1592 spin_lock_irqsave(&xhci->lock, flags);
@@ -1595,8 +1600,8 @@ static int xhci_configure_endpoint(struct xhci_hcd *xhci,
1595 /* Enqueue pointer can be left pointing to the link TRB, 1600 /* Enqueue pointer can be left pointing to the link TRB,
1596 * we must handle that 1601 * we must handle that
1597 */ 1602 */
1598 if ((command->command_trb->link.control & TRB_TYPE_BITMASK) 1603 if ((le32_to_cpu(command->command_trb->link.control)
1599 == TRB_TYPE(TRB_LINK)) 1604 & TRB_TYPE_BITMASK) == TRB_TYPE(TRB_LINK))
1600 command->command_trb = 1605 command->command_trb =
1601 xhci->cmd_ring->enq_seg->next->trbs; 1606 xhci->cmd_ring->enq_seg->next->trbs;
1602 1607
@@ -1672,14 +1677,13 @@ int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
1672 1677
1673 /* See section 4.6.6 - A0 = 1; A1 = D0 = D1 = 0 */ 1678 /* See section 4.6.6 - A0 = 1; A1 = D0 = D1 = 0 */
1674 ctrl_ctx = xhci_get_input_control_ctx(xhci, virt_dev->in_ctx); 1679 ctrl_ctx = xhci_get_input_control_ctx(xhci, virt_dev->in_ctx);
1675 ctrl_ctx->add_flags |= SLOT_FLAG; 1680 ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG);
1676 ctrl_ctx->add_flags &= ~EP0_FLAG; 1681 ctrl_ctx->add_flags &= cpu_to_le32(~EP0_FLAG);
1677 ctrl_ctx->drop_flags &= ~SLOT_FLAG; 1682 ctrl_ctx->drop_flags &= cpu_to_le32(~(SLOT_FLAG | EP0_FLAG));
1678 ctrl_ctx->drop_flags &= ~EP0_FLAG;
1679 xhci_dbg(xhci, "New Input Control Context:\n"); 1683 xhci_dbg(xhci, "New Input Control Context:\n");
1680 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx); 1684 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx);
1681 xhci_dbg_ctx(xhci, virt_dev->in_ctx, 1685 xhci_dbg_ctx(xhci, virt_dev->in_ctx,
1682 LAST_CTX_TO_EP_NUM(slot_ctx->dev_info)); 1686 LAST_CTX_TO_EP_NUM(le32_to_cpu(slot_ctx->dev_info)));
1683 1687
1684 ret = xhci_configure_endpoint(xhci, udev, NULL, 1688 ret = xhci_configure_endpoint(xhci, udev, NULL,
1685 false, false); 1689 false, false);
@@ -1690,7 +1694,7 @@ int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
1690 1694
1691 xhci_dbg(xhci, "Output context after successful config ep cmd:\n"); 1695 xhci_dbg(xhci, "Output context after successful config ep cmd:\n");
1692 xhci_dbg_ctx(xhci, virt_dev->out_ctx, 1696 xhci_dbg_ctx(xhci, virt_dev->out_ctx,
1693 LAST_CTX_TO_EP_NUM(slot_ctx->dev_info)); 1697 LAST_CTX_TO_EP_NUM(le32_to_cpu(slot_ctx->dev_info)));
1694 1698
1695 xhci_zero_in_ctx(xhci, virt_dev); 1699 xhci_zero_in_ctx(xhci, virt_dev);
1696 /* Install new rings and free or cache any old rings */ 1700 /* Install new rings and free or cache any old rings */
@@ -1740,10 +1744,10 @@ static void xhci_setup_input_ctx_for_config_ep(struct xhci_hcd *xhci,
1740{ 1744{
1741 struct xhci_input_control_ctx *ctrl_ctx; 1745 struct xhci_input_control_ctx *ctrl_ctx;
1742 ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx); 1746 ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx);
1743 ctrl_ctx->add_flags = add_flags; 1747 ctrl_ctx->add_flags = cpu_to_le32(add_flags);
1744 ctrl_ctx->drop_flags = drop_flags; 1748 ctrl_ctx->drop_flags = cpu_to_le32(drop_flags);
1745 xhci_slot_copy(xhci, in_ctx, out_ctx); 1749 xhci_slot_copy(xhci, in_ctx, out_ctx);
1746 ctrl_ctx->add_flags |= SLOT_FLAG; 1750 ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG);
1747 1751
1748 xhci_dbg(xhci, "Input Context:\n"); 1752 xhci_dbg(xhci, "Input Context:\n");
1749 xhci_dbg_ctx(xhci, in_ctx, xhci_last_valid_endpoint(add_flags)); 1753 xhci_dbg_ctx(xhci, in_ctx, xhci_last_valid_endpoint(add_flags));
@@ -1772,7 +1776,7 @@ static void xhci_setup_input_ctx_for_quirk(struct xhci_hcd *xhci,
1772 deq_state->new_deq_ptr); 1776 deq_state->new_deq_ptr);
1773 return; 1777 return;
1774 } 1778 }
1775 ep_ctx->deq = addr | deq_state->new_cycle_state; 1779 ep_ctx->deq = cpu_to_le64(addr | deq_state->new_cycle_state);
1776 1780
1777 added_ctxs = xhci_get_endpoint_flag_from_index(ep_index); 1781 added_ctxs = xhci_get_endpoint_flag_from_index(ep_index);
1778 xhci_setup_input_ctx_for_config_ep(xhci, xhci->devs[slot_id]->in_ctx, 1782 xhci_setup_input_ctx_for_config_ep(xhci, xhci->devs[slot_id]->in_ctx,
@@ -2327,8 +2331,8 @@ int xhci_discover_or_reset_device(struct usb_hcd *hcd, struct usb_device *udev)
2327 /* Enqueue pointer can be left pointing to the link TRB, 2331 /* Enqueue pointer can be left pointing to the link TRB,
2328 * we must handle that 2332 * we must handle that
2329 */ 2333 */
2330 if ((reset_device_cmd->command_trb->link.control & TRB_TYPE_BITMASK) 2334 if ((le32_to_cpu(reset_device_cmd->command_trb->link.control)
2331 == TRB_TYPE(TRB_LINK)) 2335 & TRB_TYPE_BITMASK) == TRB_TYPE(TRB_LINK))
2332 reset_device_cmd->command_trb = 2336 reset_device_cmd->command_trb =
2333 xhci->cmd_ring->enq_seg->next->trbs; 2337 xhci->cmd_ring->enq_seg->next->trbs;
2334 2338
@@ -2542,6 +2546,17 @@ int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev)
2542 2546
2543 virt_dev = xhci->devs[udev->slot_id]; 2547 virt_dev = xhci->devs[udev->slot_id];
2544 2548
2549 if (WARN_ON(!virt_dev)) {
2550 /*
2551 * In plug/unplug torture test with an NEC controller,
2552 * a zero-dereference was observed once due to virt_dev = 0.
2553 * Print useful debug rather than crash if it is observed again!
2554 */
2555 xhci_warn(xhci, "Virt dev invalid for slot_id 0x%x!\n",
2556 udev->slot_id);
2557 return -EINVAL;
2558 }
2559
2545 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx); 2560 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx);
2546 /* 2561 /*
2547 * If this is the first Set Address since device plug-in or 2562 * If this is the first Set Address since device plug-in or
@@ -2609,10 +2624,10 @@ int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev)
2609 temp_64 = xhci_read_64(xhci, &xhci->op_regs->dcbaa_ptr); 2624 temp_64 = xhci_read_64(xhci, &xhci->op_regs->dcbaa_ptr);
2610 xhci_dbg(xhci, "Op regs DCBAA ptr = %#016llx\n", temp_64); 2625 xhci_dbg(xhci, "Op regs DCBAA ptr = %#016llx\n", temp_64);
2611 xhci_dbg(xhci, "Slot ID %d dcbaa entry @%p = %#016llx\n", 2626 xhci_dbg(xhci, "Slot ID %d dcbaa entry @%p = %#016llx\n",
2612 udev->slot_id, 2627 udev->slot_id,
2613 &xhci->dcbaa->dev_context_ptrs[udev->slot_id], 2628 &xhci->dcbaa->dev_context_ptrs[udev->slot_id],
2614 (unsigned long long) 2629 (unsigned long long)
2615 xhci->dcbaa->dev_context_ptrs[udev->slot_id]); 2630 le64_to_cpu(xhci->dcbaa->dev_context_ptrs[udev->slot_id]));
2616 xhci_dbg(xhci, "Output Context DMA address = %#08llx\n", 2631 xhci_dbg(xhci, "Output Context DMA address = %#08llx\n",
2617 (unsigned long long)virt_dev->out_ctx->dma); 2632 (unsigned long long)virt_dev->out_ctx->dma);
2618 xhci_dbg(xhci, "Slot ID %d Input Context:\n", udev->slot_id); 2633 xhci_dbg(xhci, "Slot ID %d Input Context:\n", udev->slot_id);
@@ -2626,7 +2641,8 @@ int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev)
2626 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx); 2641 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx);
2627 /* Use kernel assigned address for devices; store xHC assigned 2642 /* Use kernel assigned address for devices; store xHC assigned
2628 * address locally. */ 2643 * address locally. */
2629 virt_dev->address = (slot_ctx->dev_state & DEV_ADDR_MASK) + 1; 2644 virt_dev->address = (le32_to_cpu(slot_ctx->dev_state) & DEV_ADDR_MASK)
2645 + 1;
2630 /* Zero the input context control for later use */ 2646 /* Zero the input context control for later use */
2631 ctrl_ctx = xhci_get_input_control_ctx(xhci, virt_dev->in_ctx); 2647 ctrl_ctx = xhci_get_input_control_ctx(xhci, virt_dev->in_ctx);
2632 ctrl_ctx->add_flags = 0; 2648 ctrl_ctx->add_flags = 0;
@@ -2670,16 +2686,16 @@ int xhci_update_hub_device(struct usb_hcd *hcd, struct usb_device *hdev,
2670 spin_lock_irqsave(&xhci->lock, flags); 2686 spin_lock_irqsave(&xhci->lock, flags);
2671 xhci_slot_copy(xhci, config_cmd->in_ctx, vdev->out_ctx); 2687 xhci_slot_copy(xhci, config_cmd->in_ctx, vdev->out_ctx);
2672 ctrl_ctx = xhci_get_input_control_ctx(xhci, config_cmd->in_ctx); 2688 ctrl_ctx = xhci_get_input_control_ctx(xhci, config_cmd->in_ctx);
2673 ctrl_ctx->add_flags |= SLOT_FLAG; 2689 ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG);
2674 slot_ctx = xhci_get_slot_ctx(xhci, config_cmd->in_ctx); 2690 slot_ctx = xhci_get_slot_ctx(xhci, config_cmd->in_ctx);
2675 slot_ctx->dev_info |= DEV_HUB; 2691 slot_ctx->dev_info |= cpu_to_le32(DEV_HUB);
2676 if (tt->multi) 2692 if (tt->multi)
2677 slot_ctx->dev_info |= DEV_MTT; 2693 slot_ctx->dev_info |= cpu_to_le32(DEV_MTT);
2678 if (xhci->hci_version > 0x95) { 2694 if (xhci->hci_version > 0x95) {
2679 xhci_dbg(xhci, "xHCI version %x needs hub " 2695 xhci_dbg(xhci, "xHCI version %x needs hub "
2680 "TT think time and number of ports\n", 2696 "TT think time and number of ports\n",
2681 (unsigned int) xhci->hci_version); 2697 (unsigned int) xhci->hci_version);
2682 slot_ctx->dev_info2 |= XHCI_MAX_PORTS(hdev->maxchild); 2698 slot_ctx->dev_info2 |= cpu_to_le32(XHCI_MAX_PORTS(hdev->maxchild));
2683 /* Set TT think time - convert from ns to FS bit times. 2699 /* Set TT think time - convert from ns to FS bit times.
2684 * 0 = 8 FS bit times, 1 = 16 FS bit times, 2700 * 0 = 8 FS bit times, 1 = 16 FS bit times,
2685 * 2 = 24 FS bit times, 3 = 32 FS bit times. 2701 * 2 = 24 FS bit times, 3 = 32 FS bit times.
@@ -2687,7 +2703,7 @@ int xhci_update_hub_device(struct usb_hcd *hcd, struct usb_device *hdev,
2687 think_time = tt->think_time; 2703 think_time = tt->think_time;
2688 if (think_time != 0) 2704 if (think_time != 0)
2689 think_time = (think_time / 666) - 1; 2705 think_time = (think_time / 666) - 1;
2690 slot_ctx->tt_info |= TT_THINK_TIME(think_time); 2706 slot_ctx->tt_info |= cpu_to_le32(TT_THINK_TIME(think_time));
2691 } else { 2707 } else {
2692 xhci_dbg(xhci, "xHCI version %x doesn't need hub " 2708 xhci_dbg(xhci, "xHCI version %x doesn't need hub "
2693 "TT think time or number of ports\n", 2709 "TT think time or number of ports\n",
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index ba1be6b7cc6d..db661543a805 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -57,13 +57,13 @@
57 * @run_regs_off: RTSOFF - Runtime register space offset 57 * @run_regs_off: RTSOFF - Runtime register space offset
58 */ 58 */
59struct xhci_cap_regs { 59struct xhci_cap_regs {
60 u32 hc_capbase; 60 __le32 hc_capbase;
61 u32 hcs_params1; 61 __le32 hcs_params1;
62 u32 hcs_params2; 62 __le32 hcs_params2;
63 u32 hcs_params3; 63 __le32 hcs_params3;
64 u32 hcc_params; 64 __le32 hcc_params;
65 u32 db_off; 65 __le32 db_off;
66 u32 run_regs_off; 66 __le32 run_regs_off;
67 /* Reserved up to (CAPLENGTH - 0x1C) */ 67 /* Reserved up to (CAPLENGTH - 0x1C) */
68}; 68};
69 69
@@ -155,26 +155,26 @@ struct xhci_cap_regs {
155 * devices. 155 * devices.
156 */ 156 */
157struct xhci_op_regs { 157struct xhci_op_regs {
158 u32 command; 158 __le32 command;
159 u32 status; 159 __le32 status;
160 u32 page_size; 160 __le32 page_size;
161 u32 reserved1; 161 __le32 reserved1;
162 u32 reserved2; 162 __le32 reserved2;
163 u32 dev_notification; 163 __le32 dev_notification;
164 u64 cmd_ring; 164 __le64 cmd_ring;
165 /* rsvd: offset 0x20-2F */ 165 /* rsvd: offset 0x20-2F */
166 u32 reserved3[4]; 166 __le32 reserved3[4];
167 u64 dcbaa_ptr; 167 __le64 dcbaa_ptr;
168 u32 config_reg; 168 __le32 config_reg;
169 /* rsvd: offset 0x3C-3FF */ 169 /* rsvd: offset 0x3C-3FF */
170 u32 reserved4[241]; 170 __le32 reserved4[241];
171 /* port 1 registers, which serve as a base address for other ports */ 171 /* port 1 registers, which serve as a base address for other ports */
172 u32 port_status_base; 172 __le32 port_status_base;
173 u32 port_power_base; 173 __le32 port_power_base;
174 u32 port_link_base; 174 __le32 port_link_base;
175 u32 reserved5; 175 __le32 reserved5;
176 /* registers for ports 2-255 */ 176 /* registers for ports 2-255 */
177 u32 reserved6[NUM_PORT_REGS*254]; 177 __le32 reserved6[NUM_PORT_REGS*254];
178}; 178};
179 179
180/* USBCMD - USB command - command bitmasks */ 180/* USBCMD - USB command - command bitmasks */
@@ -382,12 +382,12 @@ struct xhci_op_regs {
382 * updates the dequeue pointer. 382 * updates the dequeue pointer.
383 */ 383 */
384struct xhci_intr_reg { 384struct xhci_intr_reg {
385 u32 irq_pending; 385 __le32 irq_pending;
386 u32 irq_control; 386 __le32 irq_control;
387 u32 erst_size; 387 __le32 erst_size;
388 u32 rsvd; 388 __le32 rsvd;
389 u64 erst_base; 389 __le64 erst_base;
390 u64 erst_dequeue; 390 __le64 erst_dequeue;
391}; 391};
392 392
393/* irq_pending bitmasks */ 393/* irq_pending bitmasks */
@@ -432,8 +432,8 @@ struct xhci_intr_reg {
432 * or larger accesses" 432 * or larger accesses"
433 */ 433 */
434struct xhci_run_regs { 434struct xhci_run_regs {
435 u32 microframe_index; 435 __le32 microframe_index;
436 u32 rsvd[7]; 436 __le32 rsvd[7];
437 struct xhci_intr_reg ir_set[128]; 437 struct xhci_intr_reg ir_set[128];
438}; 438};
439 439
@@ -447,7 +447,7 @@ struct xhci_run_regs {
447 * Section 5.6 447 * Section 5.6
448 */ 448 */
449struct xhci_doorbell_array { 449struct xhci_doorbell_array {
450 u32 doorbell[256]; 450 __le32 doorbell[256];
451}; 451};
452 452
453#define DB_VALUE(ep, stream) ((((ep) + 1) & 0xff) | ((stream) << 16)) 453#define DB_VALUE(ep, stream) ((((ep) + 1) & 0xff) | ((stream) << 16))
@@ -504,12 +504,12 @@ struct xhci_container_ctx {
504 * reserved at the end of the slot context for HC internal use. 504 * reserved at the end of the slot context for HC internal use.
505 */ 505 */
506struct xhci_slot_ctx { 506struct xhci_slot_ctx {
507 u32 dev_info; 507 __le32 dev_info;
508 u32 dev_info2; 508 __le32 dev_info2;
509 u32 tt_info; 509 __le32 tt_info;
510 u32 dev_state; 510 __le32 dev_state;
511 /* offset 0x10 to 0x1f reserved for HC internal use */ 511 /* offset 0x10 to 0x1f reserved for HC internal use */
512 u32 reserved[4]; 512 __le32 reserved[4];
513}; 513};
514 514
515/* dev_info bitmasks */ 515/* dev_info bitmasks */
@@ -580,12 +580,12 @@ struct xhci_slot_ctx {
580 * reserved at the end of the endpoint context for HC internal use. 580 * reserved at the end of the endpoint context for HC internal use.
581 */ 581 */
582struct xhci_ep_ctx { 582struct xhci_ep_ctx {
583 u32 ep_info; 583 __le32 ep_info;
584 u32 ep_info2; 584 __le32 ep_info2;
585 u64 deq; 585 __le64 deq;
586 u32 tx_info; 586 __le32 tx_info;
587 /* offset 0x14 - 0x1f reserved for HC internal use */ 587 /* offset 0x14 - 0x1f reserved for HC internal use */
588 u32 reserved[3]; 588 __le32 reserved[3];
589}; 589};
590 590
591/* ep_info bitmasks */ 591/* ep_info bitmasks */
@@ -660,9 +660,9 @@ struct xhci_ep_ctx {
660 * @add_context: set the bit of the endpoint context you want to enable 660 * @add_context: set the bit of the endpoint context you want to enable
661 */ 661 */
662struct xhci_input_control_ctx { 662struct xhci_input_control_ctx {
663 u32 drop_flags; 663 __le32 drop_flags;
664 u32 add_flags; 664 __le32 add_flags;
665 u32 rsvd2[6]; 665 __le32 rsvd2[6];
666}; 666};
667 667
668/* Represents everything that is needed to issue a command on the command ring. 668/* Represents everything that is needed to issue a command on the command ring.
@@ -688,9 +688,9 @@ struct xhci_command {
688 688
689struct xhci_stream_ctx { 689struct xhci_stream_ctx {
690 /* 64-bit stream ring address, cycle state, and stream type */ 690 /* 64-bit stream ring address, cycle state, and stream type */
691 u64 stream_ring; 691 __le64 stream_ring;
692 /* offset 0x14 - 0x1f reserved for HC internal use */ 692 /* offset 0x14 - 0x1f reserved for HC internal use */
693 u32 reserved[2]; 693 __le32 reserved[2];
694}; 694};
695 695
696/* Stream Context Types (section 6.4.1) - bits 3:1 of stream ctx deq ptr */ 696/* Stream Context Types (section 6.4.1) - bits 3:1 of stream ctx deq ptr */
@@ -803,7 +803,7 @@ struct xhci_virt_device {
803 */ 803 */
804struct xhci_device_context_array { 804struct xhci_device_context_array {
805 /* 64-bit device addresses; we only write 32-bit addresses */ 805 /* 64-bit device addresses; we only write 32-bit addresses */
806 u64 dev_context_ptrs[MAX_HC_SLOTS]; 806 __le64 dev_context_ptrs[MAX_HC_SLOTS];
807 /* private xHCD pointers */ 807 /* private xHCD pointers */
808 dma_addr_t dma; 808 dma_addr_t dma;
809}; 809};
@@ -816,10 +816,10 @@ struct xhci_device_context_array {
816 816
817struct xhci_transfer_event { 817struct xhci_transfer_event {
818 /* 64-bit buffer address, or immediate data */ 818 /* 64-bit buffer address, or immediate data */
819 u64 buffer; 819 __le64 buffer;
820 u32 transfer_len; 820 __le32 transfer_len;
821 /* This field is interpreted differently based on the type of TRB */ 821 /* This field is interpreted differently based on the type of TRB */
822 u32 flags; 822 __le32 flags;
823}; 823};
824 824
825/** Transfer Event bit fields **/ 825/** Transfer Event bit fields **/
@@ -898,9 +898,9 @@ struct xhci_transfer_event {
898 898
899struct xhci_link_trb { 899struct xhci_link_trb {
900 /* 64-bit segment pointer*/ 900 /* 64-bit segment pointer*/
901 u64 segment_ptr; 901 __le64 segment_ptr;
902 u32 intr_target; 902 __le32 intr_target;
903 u32 control; 903 __le32 control;
904}; 904};
905 905
906/* control bitfields */ 906/* control bitfields */
@@ -909,9 +909,9 @@ struct xhci_link_trb {
909/* Command completion event TRB */ 909/* Command completion event TRB */
910struct xhci_event_cmd { 910struct xhci_event_cmd {
911 /* Pointer to command TRB, or the value passed by the event data trb */ 911 /* Pointer to command TRB, or the value passed by the event data trb */
912 u64 cmd_trb; 912 __le64 cmd_trb;
913 u32 status; 913 __le32 status;
914 u32 flags; 914 __le32 flags;
915}; 915};
916 916
917/* flags bitmasks */ 917/* flags bitmasks */
@@ -943,6 +943,8 @@ struct xhci_event_cmd {
943/* Interrupter Target - which MSI-X vector to target the completion event at */ 943/* Interrupter Target - which MSI-X vector to target the completion event at */
944#define TRB_INTR_TARGET(p) (((p) & 0x3ff) << 22) 944#define TRB_INTR_TARGET(p) (((p) & 0x3ff) << 22)
945#define GET_INTR_TARGET(p) (((p) >> 22) & 0x3ff) 945#define GET_INTR_TARGET(p) (((p) >> 22) & 0x3ff)
946#define TRB_TBC(p) (((p) & 0x3) << 7)
947#define TRB_TLBPC(p) (((p) & 0xf) << 16)
946 948
947/* Cycle bit - indicates TRB ownership by HC or HCD */ 949/* Cycle bit - indicates TRB ownership by HC or HCD */
948#define TRB_CYCLE (1<<0) 950#define TRB_CYCLE (1<<0)
@@ -970,7 +972,7 @@ struct xhci_event_cmd {
970#define TRB_SIA (1<<31) 972#define TRB_SIA (1<<31)
971 973
972struct xhci_generic_trb { 974struct xhci_generic_trb {
973 u32 field[4]; 975 __le32 field[4];
974}; 976};
975 977
976union xhci_trb { 978union xhci_trb {
@@ -1118,10 +1120,10 @@ struct xhci_ring {
1118 1120
1119struct xhci_erst_entry { 1121struct xhci_erst_entry {
1120 /* 64-bit event ring segment address */ 1122 /* 64-bit event ring segment address */
1121 u64 seg_addr; 1123 __le64 seg_addr;
1122 u32 seg_size; 1124 __le32 seg_size;
1123 /* Set to zero */ 1125 /* Set to zero */
1124 u32 rsvd; 1126 __le32 rsvd;
1125}; 1127};
1126 1128
1127struct xhci_erst { 1129struct xhci_erst {
@@ -1286,10 +1288,10 @@ struct xhci_hcd {
1286 /* Is each xHCI roothub port a USB 3.0, USB 2.0, or USB 1.1 port? */ 1288 /* Is each xHCI roothub port a USB 3.0, USB 2.0, or USB 1.1 port? */
1287 u8 *port_array; 1289 u8 *port_array;
1288 /* Array of pointers to USB 3.0 PORTSC registers */ 1290 /* Array of pointers to USB 3.0 PORTSC registers */
1289 u32 __iomem **usb3_ports; 1291 __le32 __iomem **usb3_ports;
1290 unsigned int num_usb3_ports; 1292 unsigned int num_usb3_ports;
1291 /* Array of pointers to USB 2.0 PORTSC registers */ 1293 /* Array of pointers to USB 2.0 PORTSC registers */
1292 u32 __iomem **usb2_ports; 1294 __le32 __iomem **usb2_ports;
1293 unsigned int num_usb2_ports; 1295 unsigned int num_usb2_ports;
1294}; 1296};
1295 1297
@@ -1322,12 +1324,12 @@ static inline struct usb_hcd *xhci_to_hcd(struct xhci_hcd *xhci)
1322/* TODO: copied from ehci.h - can be refactored? */ 1324/* TODO: copied from ehci.h - can be refactored? */
1323/* xHCI spec says all registers are little endian */ 1325/* xHCI spec says all registers are little endian */
1324static inline unsigned int xhci_readl(const struct xhci_hcd *xhci, 1326static inline unsigned int xhci_readl(const struct xhci_hcd *xhci,
1325 __u32 __iomem *regs) 1327 __le32 __iomem *regs)
1326{ 1328{
1327 return readl(regs); 1329 return readl(regs);
1328} 1330}
1329static inline void xhci_writel(struct xhci_hcd *xhci, 1331static inline void xhci_writel(struct xhci_hcd *xhci,
1330 const unsigned int val, __u32 __iomem *regs) 1332 const unsigned int val, __le32 __iomem *regs)
1331{ 1333{
1332 xhci_dbg(xhci, 1334 xhci_dbg(xhci,
1333 "`MEM_WRITE_DWORD(3'b000, 32'h%p, 32'h%0x, 4'hf);\n", 1335 "`MEM_WRITE_DWORD(3'b000, 32'h%p, 32'h%0x, 4'hf);\n",
@@ -1345,7 +1347,7 @@ static inline void xhci_writel(struct xhci_hcd *xhci,
1345 * the high dword, and write order is irrelevant. 1347 * the high dword, and write order is irrelevant.
1346 */ 1348 */
1347static inline u64 xhci_read_64(const struct xhci_hcd *xhci, 1349static inline u64 xhci_read_64(const struct xhci_hcd *xhci,
1348 __u64 __iomem *regs) 1350 __le64 __iomem *regs)
1349{ 1351{
1350 __u32 __iomem *ptr = (__u32 __iomem *) regs; 1352 __u32 __iomem *ptr = (__u32 __iomem *) regs;
1351 u64 val_lo = readl(ptr); 1353 u64 val_lo = readl(ptr);
@@ -1353,7 +1355,7 @@ static inline u64 xhci_read_64(const struct xhci_hcd *xhci,
1353 return val_lo + (val_hi << 32); 1355 return val_lo + (val_hi << 32);
1354} 1356}
1355static inline void xhci_write_64(struct xhci_hcd *xhci, 1357static inline void xhci_write_64(struct xhci_hcd *xhci,
1356 const u64 val, __u64 __iomem *regs) 1358 const u64 val, __le64 __iomem *regs)
1357{ 1359{
1358 __u32 __iomem *ptr = (__u32 __iomem *) regs; 1360 __u32 __iomem *ptr = (__u32 __iomem *) regs;
1359 u32 val_lo = lower_32_bits(val); 1361 u32 val_lo = lower_32_bits(val);