diff options
author | Greg Kroah-Hartman <gregkh@suse.de> | 2009-04-29 22:14:08 -0400 |
---|---|---|
committer | Greg Kroah-Hartman <gregkh@suse.de> | 2009-06-16 00:44:50 -0400 |
commit | 700e2052c6814b1b1d2714225d568c5c64bc49ae (patch) | |
tree | 67c62f3c3773d2b856e21662435c839a1aab9dd4 /drivers/usb/host/xhci-ring.c | |
parent | b7258a4aba2b24d5c27a0f6674795e83e7771969 (diff) |
USB: xhci: fix lots of compiler warnings.
Turns out someone never built this code on a 64bit platform.
Someone owes me a beer...
Reported-by: Stephen Rothwell <sfr@canb.auug.org.au>
Cc: Sarah Sharp <sarah.a.sharp@linux.intel.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
Diffstat (limited to 'drivers/usb/host/xhci-ring.c')
-rw-r--r-- | drivers/usb/host/xhci-ring.c | 72 |
1 files changed, 35 insertions, 37 deletions
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c index 1feca20612d1..9d6874710669 100644 --- a/drivers/usb/host/xhci-ring.c +++ b/drivers/usb/host/xhci-ring.c | |||
@@ -74,12 +74,12 @@ | |||
74 | dma_addr_t trb_virt_to_dma(struct xhci_segment *seg, | 74 | dma_addr_t trb_virt_to_dma(struct xhci_segment *seg, |
75 | union xhci_trb *trb) | 75 | union xhci_trb *trb) |
76 | { | 76 | { |
77 | unsigned int offset; | 77 | dma_addr_t offset; |
78 | 78 | ||
79 | if (!seg || !trb || (void *) trb < (void *) seg->trbs) | 79 | if (!seg || !trb || (void *) trb < (void *) seg->trbs) |
80 | return 0; | 80 | return 0; |
81 | /* offset in bytes, since these are byte-addressable */ | 81 | /* offset in bytes, since these are byte-addressable */ |
82 | offset = (unsigned int) trb - (unsigned int) seg->trbs; | 82 | offset = trb - seg->trbs; |
83 | /* SEGMENT_SIZE in bytes, trbs are 16-byte aligned */ | 83 | /* SEGMENT_SIZE in bytes, trbs are 16-byte aligned */ |
84 | if (offset > SEGMENT_SIZE || (offset % sizeof(*trb)) != 0) | 84 | if (offset > SEGMENT_SIZE || (offset % sizeof(*trb)) != 0) |
85 | return 0; | 85 | return 0; |
@@ -145,8 +145,8 @@ static void inc_deq(struct xhci_hcd *xhci, struct xhci_ring *ring, bool consumer | |||
145 | if (consumer && last_trb_on_last_seg(xhci, ring, ring->deq_seg, next)) { | 145 | if (consumer && last_trb_on_last_seg(xhci, ring, ring->deq_seg, next)) { |
146 | ring->cycle_state = (ring->cycle_state ? 0 : 1); | 146 | ring->cycle_state = (ring->cycle_state ? 0 : 1); |
147 | if (!in_interrupt()) | 147 | if (!in_interrupt()) |
148 | xhci_dbg(xhci, "Toggle cycle state for ring 0x%x = %i\n", | 148 | xhci_dbg(xhci, "Toggle cycle state for ring %p = %i\n", |
149 | (unsigned int) ring, | 149 | ring, |
150 | (unsigned int) ring->cycle_state); | 150 | (unsigned int) ring->cycle_state); |
151 | } | 151 | } |
152 | ring->deq_seg = ring->deq_seg->next; | 152 | ring->deq_seg = ring->deq_seg->next; |
@@ -195,8 +195,8 @@ static void inc_enq(struct xhci_hcd *xhci, struct xhci_ring *ring, bool consumer | |||
195 | if (last_trb_on_last_seg(xhci, ring, ring->enq_seg, next)) { | 195 | if (last_trb_on_last_seg(xhci, ring, ring->enq_seg, next)) { |
196 | ring->cycle_state = (ring->cycle_state ? 0 : 1); | 196 | ring->cycle_state = (ring->cycle_state ? 0 : 1); |
197 | if (!in_interrupt()) | 197 | if (!in_interrupt()) |
198 | xhci_dbg(xhci, "Toggle cycle state for ring 0x%x = %i\n", | 198 | xhci_dbg(xhci, "Toggle cycle state for ring %p = %i\n", |
199 | (unsigned int) ring, | 199 | ring, |
200 | (unsigned int) ring->cycle_state); | 200 | (unsigned int) ring->cycle_state); |
201 | } | 201 | } |
202 | } | 202 | } |
@@ -387,12 +387,12 @@ void td_to_noop(struct xhci_hcd *xhci, struct xhci_ring *ep_ring, | |||
387 | */ | 387 | */ |
388 | cur_trb->generic.field[3] &= ~TRB_CHAIN; | 388 | cur_trb->generic.field[3] &= ~TRB_CHAIN; |
389 | xhci_dbg(xhci, "Cancel (unchain) link TRB\n"); | 389 | xhci_dbg(xhci, "Cancel (unchain) link TRB\n"); |
390 | xhci_dbg(xhci, "Address = 0x%x (0x%x dma); " | 390 | xhci_dbg(xhci, "Address = %p (0x%llx dma); " |
391 | "in seg 0x%x (0x%x dma)\n", | 391 | "in seg %p (0x%llx dma)\n", |
392 | (unsigned int) cur_trb, | 392 | cur_trb, |
393 | trb_virt_to_dma(cur_seg, cur_trb), | 393 | (unsigned long long)trb_virt_to_dma(cur_seg, cur_trb), |
394 | (unsigned int) cur_seg, | 394 | cur_seg, |
395 | cur_seg->dma); | 395 | (unsigned long long)cur_seg->dma); |
396 | } else { | 396 | } else { |
397 | cur_trb->generic.field[0] = 0; | 397 | cur_trb->generic.field[0] = 0; |
398 | cur_trb->generic.field[1] = 0; | 398 | cur_trb->generic.field[1] = 0; |
@@ -400,12 +400,12 @@ void td_to_noop(struct xhci_hcd *xhci, struct xhci_ring *ep_ring, | |||
400 | /* Preserve only the cycle bit of this TRB */ | 400 | /* Preserve only the cycle bit of this TRB */ |
401 | cur_trb->generic.field[3] &= TRB_CYCLE; | 401 | cur_trb->generic.field[3] &= TRB_CYCLE; |
402 | cur_trb->generic.field[3] |= TRB_TYPE(TRB_TR_NOOP); | 402 | cur_trb->generic.field[3] |= TRB_TYPE(TRB_TR_NOOP); |
403 | xhci_dbg(xhci, "Cancel TRB 0x%x (0x%x dma) " | 403 | xhci_dbg(xhci, "Cancel TRB %p (0x%llx dma) " |
404 | "in seg 0x%x (0x%x dma)\n", | 404 | "in seg %p (0x%llx dma)\n", |
405 | (unsigned int) cur_trb, | 405 | cur_trb, |
406 | trb_virt_to_dma(cur_seg, cur_trb), | 406 | (unsigned long long)trb_virt_to_dma(cur_seg, cur_trb), |
407 | (unsigned int) cur_seg, | 407 | cur_seg, |
408 | cur_seg->dma); | 408 | (unsigned long long)cur_seg->dma); |
409 | } | 409 | } |
410 | if (cur_trb == cur_td->last_trb) | 410 | if (cur_trb == cur_td->last_trb) |
411 | break; | 411 | break; |
@@ -456,9 +456,9 @@ static void handle_stopped_endpoint(struct xhci_hcd *xhci, | |||
456 | */ | 456 | */ |
457 | list_for_each(entry, &ep_ring->cancelled_td_list) { | 457 | list_for_each(entry, &ep_ring->cancelled_td_list) { |
458 | cur_td = list_entry(entry, struct xhci_td, cancelled_td_list); | 458 | cur_td = list_entry(entry, struct xhci_td, cancelled_td_list); |
459 | xhci_dbg(xhci, "Cancelling TD starting at 0x%x, 0x%x (dma).\n", | 459 | xhci_dbg(xhci, "Cancelling TD starting at %p, 0x%llx (dma).\n", |
460 | (unsigned int) cur_td->first_trb, | 460 | cur_td->first_trb, |
461 | trb_virt_to_dma(cur_td->start_seg, cur_td->first_trb)); | 461 | (unsigned long long)trb_virt_to_dma(cur_td->start_seg, cur_td->first_trb)); |
462 | /* | 462 | /* |
463 | * If we stopped on the TD we need to cancel, then we have to | 463 | * If we stopped on the TD we need to cancel, then we have to |
464 | * move the xHC endpoint ring dequeue pointer past this TD. | 464 | * move the xHC endpoint ring dequeue pointer past this TD. |
@@ -480,12 +480,12 @@ static void handle_stopped_endpoint(struct xhci_hcd *xhci, | |||
480 | 480 | ||
481 | /* If necessary, queue a Set Transfer Ring Dequeue Pointer command */ | 481 | /* If necessary, queue a Set Transfer Ring Dequeue Pointer command */ |
482 | if (deq_state.new_deq_ptr && deq_state.new_deq_seg) { | 482 | if (deq_state.new_deq_ptr && deq_state.new_deq_seg) { |
483 | xhci_dbg(xhci, "Set TR Deq Ptr cmd, new deq seg = 0x%x (0x%x dma), " | 483 | xhci_dbg(xhci, "Set TR Deq Ptr cmd, new deq seg = %p (0x%llx dma), " |
484 | "new deq ptr = 0x%x (0x%x dma), new cycle = %u\n", | 484 | "new deq ptr = %p (0x%llx dma), new cycle = %u\n", |
485 | (unsigned int) deq_state.new_deq_seg, | 485 | deq_state.new_deq_seg, |
486 | deq_state.new_deq_seg->dma, | 486 | (unsigned long long)deq_state.new_deq_seg->dma, |
487 | (unsigned int) deq_state.new_deq_ptr, | 487 | deq_state.new_deq_ptr, |
488 | trb_virt_to_dma(deq_state.new_deq_seg, deq_state.new_deq_ptr), | 488 | (unsigned long long)trb_virt_to_dma(deq_state.new_deq_seg, deq_state.new_deq_ptr), |
489 | deq_state.new_cycle_state); | 489 | deq_state.new_cycle_state); |
490 | queue_set_tr_deq(xhci, slot_id, ep_index, | 490 | queue_set_tr_deq(xhci, slot_id, ep_index, |
491 | deq_state.new_deq_seg, | 491 | deq_state.new_deq_seg, |
@@ -522,8 +522,7 @@ static void handle_stopped_endpoint(struct xhci_hcd *xhci, | |||
522 | cur_td->urb->hcpriv = NULL; | 522 | cur_td->urb->hcpriv = NULL; |
523 | usb_hcd_unlink_urb_from_ep(xhci_to_hcd(xhci), cur_td->urb); | 523 | usb_hcd_unlink_urb_from_ep(xhci_to_hcd(xhci), cur_td->urb); |
524 | 524 | ||
525 | xhci_dbg(xhci, "Giveback cancelled URB 0x%x\n", | 525 | xhci_dbg(xhci, "Giveback cancelled URB %p\n", cur_td->urb); |
526 | (unsigned int) cur_td->urb); | ||
527 | spin_unlock(&xhci->lock); | 526 | spin_unlock(&xhci->lock); |
528 | /* Doesn't matter what we pass for status, since the core will | 527 | /* Doesn't matter what we pass for status, since the core will |
529 | * just overwrite it (because the URB has been unlinked). | 528 | * just overwrite it (because the URB has been unlinked). |
@@ -1183,9 +1182,9 @@ unsigned int count_sg_trbs_needed(struct xhci_hcd *xhci, struct urb *urb) | |||
1183 | num_trbs++; | 1182 | num_trbs++; |
1184 | running_total += TRB_MAX_BUFF_SIZE; | 1183 | running_total += TRB_MAX_BUFF_SIZE; |
1185 | } | 1184 | } |
1186 | xhci_dbg(xhci, " sg #%d: dma = %#x, len = %#x (%d), num_trbs = %d\n", | 1185 | xhci_dbg(xhci, " sg #%d: dma = %#llx, len = %#x (%d), num_trbs = %d\n", |
1187 | i, sg_dma_address(sg), len, len, | 1186 | i, (unsigned long long)sg_dma_address(sg), |
1188 | num_trbs - previous_total_trbs); | 1187 | len, len, num_trbs - previous_total_trbs); |
1189 | 1188 | ||
1190 | len = min_t(int, len, temp); | 1189 | len = min_t(int, len, temp); |
1191 | temp -= len; | 1190 | temp -= len; |
@@ -1394,11 +1393,11 @@ int queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags, | |||
1394 | /* FIXME: this doesn't deal with URB_ZERO_PACKET - need one more */ | 1393 | /* FIXME: this doesn't deal with URB_ZERO_PACKET - need one more */ |
1395 | 1394 | ||
1396 | if (!in_interrupt()) | 1395 | if (!in_interrupt()) |
1397 | dev_dbg(&urb->dev->dev, "ep %#x - urb len = %#x (%d), addr = %#x, num_trbs = %d\n", | 1396 | dev_dbg(&urb->dev->dev, "ep %#x - urb len = %#x (%d), addr = %#llx, num_trbs = %d\n", |
1398 | urb->ep->desc.bEndpointAddress, | 1397 | urb->ep->desc.bEndpointAddress, |
1399 | urb->transfer_buffer_length, | 1398 | urb->transfer_buffer_length, |
1400 | urb->transfer_buffer_length, | 1399 | urb->transfer_buffer_length, |
1401 | urb->transfer_dma, | 1400 | (unsigned long long)urb->transfer_dma, |
1402 | num_trbs); | 1401 | num_trbs); |
1403 | 1402 | ||
1404 | ret = xhci_prepare_transfer(xhci, xhci->devs[slot_id], ep_index, | 1403 | ret = xhci_prepare_transfer(xhci, xhci->devs[slot_id], ep_index, |
@@ -1640,9 +1639,8 @@ static int queue_set_tr_deq(struct xhci_hcd *xhci, int slot_id, | |||
1640 | addr = trb_virt_to_dma(deq_seg, deq_ptr); | 1639 | addr = trb_virt_to_dma(deq_seg, deq_ptr); |
1641 | if (addr == 0) | 1640 | if (addr == 0) |
1642 | xhci_warn(xhci, "WARN Cannot submit Set TR Deq Ptr\n"); | 1641 | xhci_warn(xhci, "WARN Cannot submit Set TR Deq Ptr\n"); |
1643 | xhci_warn(xhci, "WARN deq seg = 0x%x, deq pt = 0x%x\n", | 1642 | xhci_warn(xhci, "WARN deq seg = %p, deq pt = %p\n", |
1644 | (unsigned int) deq_seg, | 1643 | deq_seg, deq_ptr); |
1645 | (unsigned int) deq_ptr); | ||
1646 | return queue_command(xhci, (u32) addr | cycle_state, 0, 0, | 1644 | return queue_command(xhci, (u32) addr | cycle_state, 0, 0, |
1647 | trb_slot_id | trb_ep_index | type); | 1645 | trb_slot_id | trb_ep_index | type); |
1648 | } | 1646 | } |