diff options
author | Paul Mundt <lethal@linux-sh.org> | 2009-08-04 02:05:39 -0400 |
---|---|---|
committer | Paul Mundt <lethal@linux-sh.org> | 2009-08-04 02:05:39 -0400 |
commit | 00111076f725ec498f5ada19cdd984d111b0e9b3 (patch) | |
tree | d7d08779bfc836382537c23298613f5948368bdc /drivers/usb/host/xhci-mem.c | |
parent | 133b170f08d6c20578f25b1ae71f80a5e638ccb6 (diff) | |
parent | 2e6713c7662cc5ebc7346b033c404cb2f708fd51 (diff) |
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/suspend-2.6 into sh/hwblk
Diffstat (limited to 'drivers/usb/host/xhci-mem.c')
-rw-r--r-- | drivers/usb/host/xhci-mem.c | 300 |
1 files changed, 224 insertions, 76 deletions
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c index c8a72de1c508..e6b9a1c6002d 100644 --- a/drivers/usb/host/xhci-mem.c +++ b/drivers/usb/host/xhci-mem.c | |||
@@ -88,7 +88,7 @@ static void xhci_link_segments(struct xhci_hcd *xhci, struct xhci_segment *prev, | |||
88 | return; | 88 | return; |
89 | prev->next = next; | 89 | prev->next = next; |
90 | if (link_trbs) { | 90 | if (link_trbs) { |
91 | prev->trbs[TRBS_PER_SEGMENT-1].link.segment_ptr[0] = next->dma; | 91 | prev->trbs[TRBS_PER_SEGMENT-1].link.segment_ptr = next->dma; |
92 | 92 | ||
93 | /* Set the last TRB in the segment to have a TRB type ID of Link TRB */ | 93 | /* Set the last TRB in the segment to have a TRB type ID of Link TRB */ |
94 | val = prev->trbs[TRBS_PER_SEGMENT-1].link.control; | 94 | val = prev->trbs[TRBS_PER_SEGMENT-1].link.control; |
@@ -189,6 +189,63 @@ fail: | |||
189 | return 0; | 189 | return 0; |
190 | } | 190 | } |
191 | 191 | ||
192 | #define CTX_SIZE(_hcc) (HCC_64BYTE_CONTEXT(_hcc) ? 64 : 32) | ||
193 | |||
194 | struct xhci_container_ctx *xhci_alloc_container_ctx(struct xhci_hcd *xhci, | ||
195 | int type, gfp_t flags) | ||
196 | { | ||
197 | struct xhci_container_ctx *ctx = kzalloc(sizeof(*ctx), flags); | ||
198 | if (!ctx) | ||
199 | return NULL; | ||
200 | |||
201 | BUG_ON((type != XHCI_CTX_TYPE_DEVICE) && (type != XHCI_CTX_TYPE_INPUT)); | ||
202 | ctx->type = type; | ||
203 | ctx->size = HCC_64BYTE_CONTEXT(xhci->hcc_params) ? 2048 : 1024; | ||
204 | if (type == XHCI_CTX_TYPE_INPUT) | ||
205 | ctx->size += CTX_SIZE(xhci->hcc_params); | ||
206 | |||
207 | ctx->bytes = dma_pool_alloc(xhci->device_pool, flags, &ctx->dma); | ||
208 | memset(ctx->bytes, 0, ctx->size); | ||
209 | return ctx; | ||
210 | } | ||
211 | |||
212 | void xhci_free_container_ctx(struct xhci_hcd *xhci, | ||
213 | struct xhci_container_ctx *ctx) | ||
214 | { | ||
215 | dma_pool_free(xhci->device_pool, ctx->bytes, ctx->dma); | ||
216 | kfree(ctx); | ||
217 | } | ||
218 | |||
219 | struct xhci_input_control_ctx *xhci_get_input_control_ctx(struct xhci_hcd *xhci, | ||
220 | struct xhci_container_ctx *ctx) | ||
221 | { | ||
222 | BUG_ON(ctx->type != XHCI_CTX_TYPE_INPUT); | ||
223 | return (struct xhci_input_control_ctx *)ctx->bytes; | ||
224 | } | ||
225 | |||
226 | struct xhci_slot_ctx *xhci_get_slot_ctx(struct xhci_hcd *xhci, | ||
227 | struct xhci_container_ctx *ctx) | ||
228 | { | ||
229 | if (ctx->type == XHCI_CTX_TYPE_DEVICE) | ||
230 | return (struct xhci_slot_ctx *)ctx->bytes; | ||
231 | |||
232 | return (struct xhci_slot_ctx *) | ||
233 | (ctx->bytes + CTX_SIZE(xhci->hcc_params)); | ||
234 | } | ||
235 | |||
236 | struct xhci_ep_ctx *xhci_get_ep_ctx(struct xhci_hcd *xhci, | ||
237 | struct xhci_container_ctx *ctx, | ||
238 | unsigned int ep_index) | ||
239 | { | ||
240 | /* increment ep index by offset of start of ep ctx array */ | ||
241 | ep_index++; | ||
242 | if (ctx->type == XHCI_CTX_TYPE_INPUT) | ||
243 | ep_index++; | ||
244 | |||
245 | return (struct xhci_ep_ctx *) | ||
246 | (ctx->bytes + (ep_index * CTX_SIZE(xhci->hcc_params))); | ||
247 | } | ||
248 | |||
192 | /* All the xhci_tds in the ring's TD list should be freed at this point */ | 249 | /* All the xhci_tds in the ring's TD list should be freed at this point */ |
193 | void xhci_free_virt_device(struct xhci_hcd *xhci, int slot_id) | 250 | void xhci_free_virt_device(struct xhci_hcd *xhci, int slot_id) |
194 | { | 251 | { |
@@ -200,8 +257,7 @@ void xhci_free_virt_device(struct xhci_hcd *xhci, int slot_id) | |||
200 | return; | 257 | return; |
201 | 258 | ||
202 | dev = xhci->devs[slot_id]; | 259 | dev = xhci->devs[slot_id]; |
203 | xhci->dcbaa->dev_context_ptrs[2*slot_id] = 0; | 260 | xhci->dcbaa->dev_context_ptrs[slot_id] = 0; |
204 | xhci->dcbaa->dev_context_ptrs[2*slot_id + 1] = 0; | ||
205 | if (!dev) | 261 | if (!dev) |
206 | return; | 262 | return; |
207 | 263 | ||
@@ -210,11 +266,10 @@ void xhci_free_virt_device(struct xhci_hcd *xhci, int slot_id) | |||
210 | xhci_ring_free(xhci, dev->ep_rings[i]); | 266 | xhci_ring_free(xhci, dev->ep_rings[i]); |
211 | 267 | ||
212 | if (dev->in_ctx) | 268 | if (dev->in_ctx) |
213 | dma_pool_free(xhci->device_pool, | 269 | xhci_free_container_ctx(xhci, dev->in_ctx); |
214 | dev->in_ctx, dev->in_ctx_dma); | ||
215 | if (dev->out_ctx) | 270 | if (dev->out_ctx) |
216 | dma_pool_free(xhci->device_pool, | 271 | xhci_free_container_ctx(xhci, dev->out_ctx); |
217 | dev->out_ctx, dev->out_ctx_dma); | 272 | |
218 | kfree(xhci->devs[slot_id]); | 273 | kfree(xhci->devs[slot_id]); |
219 | xhci->devs[slot_id] = 0; | 274 | xhci->devs[slot_id] = 0; |
220 | } | 275 | } |
@@ -222,7 +277,6 @@ void xhci_free_virt_device(struct xhci_hcd *xhci, int slot_id) | |||
222 | int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id, | 277 | int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id, |
223 | struct usb_device *udev, gfp_t flags) | 278 | struct usb_device *udev, gfp_t flags) |
224 | { | 279 | { |
225 | dma_addr_t dma; | ||
226 | struct xhci_virt_device *dev; | 280 | struct xhci_virt_device *dev; |
227 | 281 | ||
228 | /* Slot ID 0 is reserved */ | 282 | /* Slot ID 0 is reserved */ |
@@ -236,23 +290,21 @@ int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id, | |||
236 | return 0; | 290 | return 0; |
237 | dev = xhci->devs[slot_id]; | 291 | dev = xhci->devs[slot_id]; |
238 | 292 | ||
239 | /* Allocate the (output) device context that will be used in the HC */ | 293 | /* Allocate the (output) device context that will be used in the HC. */ |
240 | dev->out_ctx = dma_pool_alloc(xhci->device_pool, flags, &dma); | 294 | dev->out_ctx = xhci_alloc_container_ctx(xhci, XHCI_CTX_TYPE_DEVICE, flags); |
241 | if (!dev->out_ctx) | 295 | if (!dev->out_ctx) |
242 | goto fail; | 296 | goto fail; |
243 | dev->out_ctx_dma = dma; | 297 | |
244 | xhci_dbg(xhci, "Slot %d output ctx = 0x%llx (dma)\n", slot_id, | 298 | xhci_dbg(xhci, "Slot %d output ctx = 0x%llx (dma)\n", slot_id, |
245 | (unsigned long long)dma); | 299 | (unsigned long long)dev->out_ctx->dma); |
246 | memset(dev->out_ctx, 0, sizeof(*dev->out_ctx)); | ||
247 | 300 | ||
248 | /* Allocate the (input) device context for address device command */ | 301 | /* Allocate the (input) device context for address device command */ |
249 | dev->in_ctx = dma_pool_alloc(xhci->device_pool, flags, &dma); | 302 | dev->in_ctx = xhci_alloc_container_ctx(xhci, XHCI_CTX_TYPE_INPUT, flags); |
250 | if (!dev->in_ctx) | 303 | if (!dev->in_ctx) |
251 | goto fail; | 304 | goto fail; |
252 | dev->in_ctx_dma = dma; | 305 | |
253 | xhci_dbg(xhci, "Slot %d input ctx = 0x%llx (dma)\n", slot_id, | 306 | xhci_dbg(xhci, "Slot %d input ctx = 0x%llx (dma)\n", slot_id, |
254 | (unsigned long long)dma); | 307 | (unsigned long long)dev->in_ctx->dma); |
255 | memset(dev->in_ctx, 0, sizeof(*dev->in_ctx)); | ||
256 | 308 | ||
257 | /* Allocate endpoint 0 ring */ | 309 | /* Allocate endpoint 0 ring */ |
258 | dev->ep_rings[0] = xhci_ring_alloc(xhci, 1, true, flags); | 310 | dev->ep_rings[0] = xhci_ring_alloc(xhci, 1, true, flags); |
@@ -261,17 +313,12 @@ int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id, | |||
261 | 313 | ||
262 | init_completion(&dev->cmd_completion); | 314 | init_completion(&dev->cmd_completion); |
263 | 315 | ||
264 | /* | 316 | /* Point to output device context in dcbaa. */ |
265 | * Point to output device context in dcbaa; skip the output control | 317 | xhci->dcbaa->dev_context_ptrs[slot_id] = dev->out_ctx->dma; |
266 | * context, which is eight 32 bit fields (or 32 bytes long) | ||
267 | */ | ||
268 | xhci->dcbaa->dev_context_ptrs[2*slot_id] = | ||
269 | (u32) dev->out_ctx_dma + (32); | ||
270 | xhci_dbg(xhci, "Set slot id %d dcbaa entry %p to 0x%llx\n", | 318 | xhci_dbg(xhci, "Set slot id %d dcbaa entry %p to 0x%llx\n", |
271 | slot_id, | 319 | slot_id, |
272 | &xhci->dcbaa->dev_context_ptrs[2*slot_id], | 320 | &xhci->dcbaa->dev_context_ptrs[slot_id], |
273 | (unsigned long long)dev->out_ctx_dma); | 321 | (unsigned long long) xhci->dcbaa->dev_context_ptrs[slot_id]); |
274 | xhci->dcbaa->dev_context_ptrs[2*slot_id + 1] = 0; | ||
275 | 322 | ||
276 | return 1; | 323 | return 1; |
277 | fail: | 324 | fail: |
@@ -285,6 +332,8 @@ int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *ud | |||
285 | struct xhci_virt_device *dev; | 332 | struct xhci_virt_device *dev; |
286 | struct xhci_ep_ctx *ep0_ctx; | 333 | struct xhci_ep_ctx *ep0_ctx; |
287 | struct usb_device *top_dev; | 334 | struct usb_device *top_dev; |
335 | struct xhci_slot_ctx *slot_ctx; | ||
336 | struct xhci_input_control_ctx *ctrl_ctx; | ||
288 | 337 | ||
289 | dev = xhci->devs[udev->slot_id]; | 338 | dev = xhci->devs[udev->slot_id]; |
290 | /* Slot ID 0 is reserved */ | 339 | /* Slot ID 0 is reserved */ |
@@ -293,27 +342,29 @@ int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *ud | |||
293 | udev->slot_id); | 342 | udev->slot_id); |
294 | return -EINVAL; | 343 | return -EINVAL; |
295 | } | 344 | } |
296 | ep0_ctx = &dev->in_ctx->ep[0]; | 345 | ep0_ctx = xhci_get_ep_ctx(xhci, dev->in_ctx, 0); |
346 | ctrl_ctx = xhci_get_input_control_ctx(xhci, dev->in_ctx); | ||
347 | slot_ctx = xhci_get_slot_ctx(xhci, dev->in_ctx); | ||
297 | 348 | ||
298 | /* 2) New slot context and endpoint 0 context are valid*/ | 349 | /* 2) New slot context and endpoint 0 context are valid*/ |
299 | dev->in_ctx->add_flags = SLOT_FLAG | EP0_FLAG; | 350 | ctrl_ctx->add_flags = SLOT_FLAG | EP0_FLAG; |
300 | 351 | ||
301 | /* 3) Only the control endpoint is valid - one endpoint context */ | 352 | /* 3) Only the control endpoint is valid - one endpoint context */ |
302 | dev->in_ctx->slot.dev_info |= LAST_CTX(1); | 353 | slot_ctx->dev_info |= LAST_CTX(1); |
303 | 354 | ||
304 | switch (udev->speed) { | 355 | switch (udev->speed) { |
305 | case USB_SPEED_SUPER: | 356 | case USB_SPEED_SUPER: |
306 | dev->in_ctx->slot.dev_info |= (u32) udev->route; | 357 | slot_ctx->dev_info |= (u32) udev->route; |
307 | dev->in_ctx->slot.dev_info |= (u32) SLOT_SPEED_SS; | 358 | slot_ctx->dev_info |= (u32) SLOT_SPEED_SS; |
308 | break; | 359 | break; |
309 | case USB_SPEED_HIGH: | 360 | case USB_SPEED_HIGH: |
310 | dev->in_ctx->slot.dev_info |= (u32) SLOT_SPEED_HS; | 361 | slot_ctx->dev_info |= (u32) SLOT_SPEED_HS; |
311 | break; | 362 | break; |
312 | case USB_SPEED_FULL: | 363 | case USB_SPEED_FULL: |
313 | dev->in_ctx->slot.dev_info |= (u32) SLOT_SPEED_FS; | 364 | slot_ctx->dev_info |= (u32) SLOT_SPEED_FS; |
314 | break; | 365 | break; |
315 | case USB_SPEED_LOW: | 366 | case USB_SPEED_LOW: |
316 | dev->in_ctx->slot.dev_info |= (u32) SLOT_SPEED_LS; | 367 | slot_ctx->dev_info |= (u32) SLOT_SPEED_LS; |
317 | break; | 368 | break; |
318 | case USB_SPEED_VARIABLE: | 369 | case USB_SPEED_VARIABLE: |
319 | xhci_dbg(xhci, "FIXME xHCI doesn't support wireless speeds\n"); | 370 | xhci_dbg(xhci, "FIXME xHCI doesn't support wireless speeds\n"); |
@@ -327,7 +378,7 @@ int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *ud | |||
327 | for (top_dev = udev; top_dev->parent && top_dev->parent->parent; | 378 | for (top_dev = udev; top_dev->parent && top_dev->parent->parent; |
328 | top_dev = top_dev->parent) | 379 | top_dev = top_dev->parent) |
329 | /* Found device below root hub */; | 380 | /* Found device below root hub */; |
330 | dev->in_ctx->slot.dev_info2 |= (u32) ROOT_HUB_PORT(top_dev->portnum); | 381 | slot_ctx->dev_info2 |= (u32) ROOT_HUB_PORT(top_dev->portnum); |
331 | xhci_dbg(xhci, "Set root hub portnum to %d\n", top_dev->portnum); | 382 | xhci_dbg(xhci, "Set root hub portnum to %d\n", top_dev->portnum); |
332 | 383 | ||
333 | /* Is this a LS/FS device under a HS hub? */ | 384 | /* Is this a LS/FS device under a HS hub? */ |
@@ -337,8 +388,8 @@ int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *ud | |||
337 | */ | 388 | */ |
338 | if ((udev->speed == USB_SPEED_LOW || udev->speed == USB_SPEED_FULL) && | 389 | if ((udev->speed == USB_SPEED_LOW || udev->speed == USB_SPEED_FULL) && |
339 | udev->tt) { | 390 | udev->tt) { |
340 | dev->in_ctx->slot.tt_info = udev->tt->hub->slot_id; | 391 | slot_ctx->tt_info = udev->tt->hub->slot_id; |
341 | dev->in_ctx->slot.tt_info |= udev->ttport << 8; | 392 | slot_ctx->tt_info |= udev->ttport << 8; |
342 | } | 393 | } |
343 | xhci_dbg(xhci, "udev->tt = %p\n", udev->tt); | 394 | xhci_dbg(xhci, "udev->tt = %p\n", udev->tt); |
344 | xhci_dbg(xhci, "udev->ttport = 0x%x\n", udev->ttport); | 395 | xhci_dbg(xhci, "udev->ttport = 0x%x\n", udev->ttport); |
@@ -360,10 +411,9 @@ int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *ud | |||
360 | ep0_ctx->ep_info2 |= MAX_BURST(0); | 411 | ep0_ctx->ep_info2 |= MAX_BURST(0); |
361 | ep0_ctx->ep_info2 |= ERROR_COUNT(3); | 412 | ep0_ctx->ep_info2 |= ERROR_COUNT(3); |
362 | 413 | ||
363 | ep0_ctx->deq[0] = | 414 | ep0_ctx->deq = |
364 | dev->ep_rings[0]->first_seg->dma; | 415 | dev->ep_rings[0]->first_seg->dma; |
365 | ep0_ctx->deq[0] |= dev->ep_rings[0]->cycle_state; | 416 | ep0_ctx->deq |= dev->ep_rings[0]->cycle_state; |
366 | ep0_ctx->deq[1] = 0; | ||
367 | 417 | ||
368 | /* Steps 7 and 8 were done in xhci_alloc_virt_device() */ | 418 | /* Steps 7 and 8 were done in xhci_alloc_virt_device() */ |
369 | 419 | ||
@@ -470,25 +520,26 @@ int xhci_endpoint_init(struct xhci_hcd *xhci, | |||
470 | unsigned int max_burst; | 520 | unsigned int max_burst; |
471 | 521 | ||
472 | ep_index = xhci_get_endpoint_index(&ep->desc); | 522 | ep_index = xhci_get_endpoint_index(&ep->desc); |
473 | ep_ctx = &virt_dev->in_ctx->ep[ep_index]; | 523 | ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, ep_index); |
474 | 524 | ||
475 | /* Set up the endpoint ring */ | 525 | /* Set up the endpoint ring */ |
476 | virt_dev->new_ep_rings[ep_index] = xhci_ring_alloc(xhci, 1, true, mem_flags); | 526 | virt_dev->new_ep_rings[ep_index] = xhci_ring_alloc(xhci, 1, true, mem_flags); |
477 | if (!virt_dev->new_ep_rings[ep_index]) | 527 | if (!virt_dev->new_ep_rings[ep_index]) |
478 | return -ENOMEM; | 528 | return -ENOMEM; |
479 | ep_ring = virt_dev->new_ep_rings[ep_index]; | 529 | ep_ring = virt_dev->new_ep_rings[ep_index]; |
480 | ep_ctx->deq[0] = ep_ring->first_seg->dma | ep_ring->cycle_state; | 530 | ep_ctx->deq = ep_ring->first_seg->dma | ep_ring->cycle_state; |
481 | ep_ctx->deq[1] = 0; | ||
482 | 531 | ||
483 | ep_ctx->ep_info = xhci_get_endpoint_interval(udev, ep); | 532 | ep_ctx->ep_info = xhci_get_endpoint_interval(udev, ep); |
484 | 533 | ||
485 | /* FIXME dig Mult and streams info out of ep companion desc */ | 534 | /* FIXME dig Mult and streams info out of ep companion desc */ |
486 | 535 | ||
487 | /* Allow 3 retries for everything but isoc */ | 536 | /* Allow 3 retries for everything but isoc; |
537 | * error count = 0 means infinite retries. | ||
538 | */ | ||
488 | if (!usb_endpoint_xfer_isoc(&ep->desc)) | 539 | if (!usb_endpoint_xfer_isoc(&ep->desc)) |
489 | ep_ctx->ep_info2 = ERROR_COUNT(3); | 540 | ep_ctx->ep_info2 = ERROR_COUNT(3); |
490 | else | 541 | else |
491 | ep_ctx->ep_info2 = ERROR_COUNT(0); | 542 | ep_ctx->ep_info2 = ERROR_COUNT(1); |
492 | 543 | ||
493 | ep_ctx->ep_info2 |= xhci_get_endpoint_type(udev, ep); | 544 | ep_ctx->ep_info2 |= xhci_get_endpoint_type(udev, ep); |
494 | 545 | ||
@@ -498,7 +549,12 @@ int xhci_endpoint_init(struct xhci_hcd *xhci, | |||
498 | max_packet = ep->desc.wMaxPacketSize; | 549 | max_packet = ep->desc.wMaxPacketSize; |
499 | ep_ctx->ep_info2 |= MAX_PACKET(max_packet); | 550 | ep_ctx->ep_info2 |= MAX_PACKET(max_packet); |
500 | /* dig out max burst from ep companion desc */ | 551 | /* dig out max burst from ep companion desc */ |
501 | max_packet = ep->ss_ep_comp->desc.bMaxBurst; | 552 | if (!ep->ss_ep_comp) { |
553 | xhci_warn(xhci, "WARN no SS endpoint companion descriptor.\n"); | ||
554 | max_packet = 0; | ||
555 | } else { | ||
556 | max_packet = ep->ss_ep_comp->desc.bMaxBurst; | ||
557 | } | ||
502 | ep_ctx->ep_info2 |= MAX_BURST(max_packet); | 558 | ep_ctx->ep_info2 |= MAX_BURST(max_packet); |
503 | break; | 559 | break; |
504 | case USB_SPEED_HIGH: | 560 | case USB_SPEED_HIGH: |
@@ -531,18 +587,114 @@ void xhci_endpoint_zero(struct xhci_hcd *xhci, | |||
531 | struct xhci_ep_ctx *ep_ctx; | 587 | struct xhci_ep_ctx *ep_ctx; |
532 | 588 | ||
533 | ep_index = xhci_get_endpoint_index(&ep->desc); | 589 | ep_index = xhci_get_endpoint_index(&ep->desc); |
534 | ep_ctx = &virt_dev->in_ctx->ep[ep_index]; | 590 | ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, ep_index); |
535 | 591 | ||
536 | ep_ctx->ep_info = 0; | 592 | ep_ctx->ep_info = 0; |
537 | ep_ctx->ep_info2 = 0; | 593 | ep_ctx->ep_info2 = 0; |
538 | ep_ctx->deq[0] = 0; | 594 | ep_ctx->deq = 0; |
539 | ep_ctx->deq[1] = 0; | ||
540 | ep_ctx->tx_info = 0; | 595 | ep_ctx->tx_info = 0; |
541 | /* Don't free the endpoint ring until the set interface or configuration | 596 | /* Don't free the endpoint ring until the set interface or configuration |
542 | * request succeeds. | 597 | * request succeeds. |
543 | */ | 598 | */ |
544 | } | 599 | } |
545 | 600 | ||
601 | /* Set up the scratchpad buffer array and scratchpad buffers, if needed. */ | ||
602 | static int scratchpad_alloc(struct xhci_hcd *xhci, gfp_t flags) | ||
603 | { | ||
604 | int i; | ||
605 | struct device *dev = xhci_to_hcd(xhci)->self.controller; | ||
606 | int num_sp = HCS_MAX_SCRATCHPAD(xhci->hcs_params2); | ||
607 | |||
608 | xhci_dbg(xhci, "Allocating %d scratchpad buffers\n", num_sp); | ||
609 | |||
610 | if (!num_sp) | ||
611 | return 0; | ||
612 | |||
613 | xhci->scratchpad = kzalloc(sizeof(*xhci->scratchpad), flags); | ||
614 | if (!xhci->scratchpad) | ||
615 | goto fail_sp; | ||
616 | |||
617 | xhci->scratchpad->sp_array = | ||
618 | pci_alloc_consistent(to_pci_dev(dev), | ||
619 | num_sp * sizeof(u64), | ||
620 | &xhci->scratchpad->sp_dma); | ||
621 | if (!xhci->scratchpad->sp_array) | ||
622 | goto fail_sp2; | ||
623 | |||
624 | xhci->scratchpad->sp_buffers = kzalloc(sizeof(void *) * num_sp, flags); | ||
625 | if (!xhci->scratchpad->sp_buffers) | ||
626 | goto fail_sp3; | ||
627 | |||
628 | xhci->scratchpad->sp_dma_buffers = | ||
629 | kzalloc(sizeof(dma_addr_t) * num_sp, flags); | ||
630 | |||
631 | if (!xhci->scratchpad->sp_dma_buffers) | ||
632 | goto fail_sp4; | ||
633 | |||
634 | xhci->dcbaa->dev_context_ptrs[0] = xhci->scratchpad->sp_dma; | ||
635 | for (i = 0; i < num_sp; i++) { | ||
636 | dma_addr_t dma; | ||
637 | void *buf = pci_alloc_consistent(to_pci_dev(dev), | ||
638 | xhci->page_size, &dma); | ||
639 | if (!buf) | ||
640 | goto fail_sp5; | ||
641 | |||
642 | xhci->scratchpad->sp_array[i] = dma; | ||
643 | xhci->scratchpad->sp_buffers[i] = buf; | ||
644 | xhci->scratchpad->sp_dma_buffers[i] = dma; | ||
645 | } | ||
646 | |||
647 | return 0; | ||
648 | |||
649 | fail_sp5: | ||
650 | for (i = i - 1; i >= 0; i--) { | ||
651 | pci_free_consistent(to_pci_dev(dev), xhci->page_size, | ||
652 | xhci->scratchpad->sp_buffers[i], | ||
653 | xhci->scratchpad->sp_dma_buffers[i]); | ||
654 | } | ||
655 | kfree(xhci->scratchpad->sp_dma_buffers); | ||
656 | |||
657 | fail_sp4: | ||
658 | kfree(xhci->scratchpad->sp_buffers); | ||
659 | |||
660 | fail_sp3: | ||
661 | pci_free_consistent(to_pci_dev(dev), num_sp * sizeof(u64), | ||
662 | xhci->scratchpad->sp_array, | ||
663 | xhci->scratchpad->sp_dma); | ||
664 | |||
665 | fail_sp2: | ||
666 | kfree(xhci->scratchpad); | ||
667 | xhci->scratchpad = NULL; | ||
668 | |||
669 | fail_sp: | ||
670 | return -ENOMEM; | ||
671 | } | ||
672 | |||
673 | static void scratchpad_free(struct xhci_hcd *xhci) | ||
674 | { | ||
675 | int num_sp; | ||
676 | int i; | ||
677 | struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller); | ||
678 | |||
679 | if (!xhci->scratchpad) | ||
680 | return; | ||
681 | |||
682 | num_sp = HCS_MAX_SCRATCHPAD(xhci->hcs_params2); | ||
683 | |||
684 | for (i = 0; i < num_sp; i++) { | ||
685 | pci_free_consistent(pdev, xhci->page_size, | ||
686 | xhci->scratchpad->sp_buffers[i], | ||
687 | xhci->scratchpad->sp_dma_buffers[i]); | ||
688 | } | ||
689 | kfree(xhci->scratchpad->sp_dma_buffers); | ||
690 | kfree(xhci->scratchpad->sp_buffers); | ||
691 | pci_free_consistent(pdev, num_sp * sizeof(u64), | ||
692 | xhci->scratchpad->sp_array, | ||
693 | xhci->scratchpad->sp_dma); | ||
694 | kfree(xhci->scratchpad); | ||
695 | xhci->scratchpad = NULL; | ||
696 | } | ||
697 | |||
546 | void xhci_mem_cleanup(struct xhci_hcd *xhci) | 698 | void xhci_mem_cleanup(struct xhci_hcd *xhci) |
547 | { | 699 | { |
548 | struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller); | 700 | struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller); |
@@ -551,10 +703,8 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci) | |||
551 | 703 | ||
552 | /* Free the Event Ring Segment Table and the actual Event Ring */ | 704 | /* Free the Event Ring Segment Table and the actual Event Ring */ |
553 | xhci_writel(xhci, 0, &xhci->ir_set->erst_size); | 705 | xhci_writel(xhci, 0, &xhci->ir_set->erst_size); |
554 | xhci_writel(xhci, 0, &xhci->ir_set->erst_base[0]); | 706 | xhci_write_64(xhci, 0, &xhci->ir_set->erst_base); |
555 | xhci_writel(xhci, 0, &xhci->ir_set->erst_base[1]); | 707 | xhci_write_64(xhci, 0, &xhci->ir_set->erst_dequeue); |
556 | xhci_writel(xhci, 0, &xhci->ir_set->erst_dequeue[0]); | ||
557 | xhci_writel(xhci, 0, &xhci->ir_set->erst_dequeue[1]); | ||
558 | size = sizeof(struct xhci_erst_entry)*(xhci->erst.num_entries); | 708 | size = sizeof(struct xhci_erst_entry)*(xhci->erst.num_entries); |
559 | if (xhci->erst.entries) | 709 | if (xhci->erst.entries) |
560 | pci_free_consistent(pdev, size, | 710 | pci_free_consistent(pdev, size, |
@@ -566,8 +716,7 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci) | |||
566 | xhci->event_ring = NULL; | 716 | xhci->event_ring = NULL; |
567 | xhci_dbg(xhci, "Freed event ring\n"); | 717 | xhci_dbg(xhci, "Freed event ring\n"); |
568 | 718 | ||
569 | xhci_writel(xhci, 0, &xhci->op_regs->cmd_ring[0]); | 719 | xhci_write_64(xhci, 0, &xhci->op_regs->cmd_ring); |
570 | xhci_writel(xhci, 0, &xhci->op_regs->cmd_ring[1]); | ||
571 | if (xhci->cmd_ring) | 720 | if (xhci->cmd_ring) |
572 | xhci_ring_free(xhci, xhci->cmd_ring); | 721 | xhci_ring_free(xhci, xhci->cmd_ring); |
573 | xhci->cmd_ring = NULL; | 722 | xhci->cmd_ring = NULL; |
@@ -586,8 +735,7 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci) | |||
586 | xhci->device_pool = NULL; | 735 | xhci->device_pool = NULL; |
587 | xhci_dbg(xhci, "Freed device context pool\n"); | 736 | xhci_dbg(xhci, "Freed device context pool\n"); |
588 | 737 | ||
589 | xhci_writel(xhci, 0, &xhci->op_regs->dcbaa_ptr[0]); | 738 | xhci_write_64(xhci, 0, &xhci->op_regs->dcbaa_ptr); |
590 | xhci_writel(xhci, 0, &xhci->op_regs->dcbaa_ptr[1]); | ||
591 | if (xhci->dcbaa) | 739 | if (xhci->dcbaa) |
592 | pci_free_consistent(pdev, sizeof(*xhci->dcbaa), | 740 | pci_free_consistent(pdev, sizeof(*xhci->dcbaa), |
593 | xhci->dcbaa, xhci->dcbaa->dma); | 741 | xhci->dcbaa, xhci->dcbaa->dma); |
@@ -595,6 +743,7 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci) | |||
595 | 743 | ||
596 | xhci->page_size = 0; | 744 | xhci->page_size = 0; |
597 | xhci->page_shift = 0; | 745 | xhci->page_shift = 0; |
746 | scratchpad_free(xhci); | ||
598 | } | 747 | } |
599 | 748 | ||
600 | int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags) | 749 | int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags) |
@@ -602,6 +751,7 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags) | |||
602 | dma_addr_t dma; | 751 | dma_addr_t dma; |
603 | struct device *dev = xhci_to_hcd(xhci)->self.controller; | 752 | struct device *dev = xhci_to_hcd(xhci)->self.controller; |
604 | unsigned int val, val2; | 753 | unsigned int val, val2; |
754 | u64 val_64; | ||
605 | struct xhci_segment *seg; | 755 | struct xhci_segment *seg; |
606 | u32 page_size; | 756 | u32 page_size; |
607 | int i; | 757 | int i; |
@@ -647,8 +797,7 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags) | |||
647 | xhci->dcbaa->dma = dma; | 797 | xhci->dcbaa->dma = dma; |
648 | xhci_dbg(xhci, "// Device context base array address = 0x%llx (DMA), %p (virt)\n", | 798 | xhci_dbg(xhci, "// Device context base array address = 0x%llx (DMA), %p (virt)\n", |
649 | (unsigned long long)xhci->dcbaa->dma, xhci->dcbaa); | 799 | (unsigned long long)xhci->dcbaa->dma, xhci->dcbaa); |
650 | xhci_writel(xhci, dma, &xhci->op_regs->dcbaa_ptr[0]); | 800 | xhci_write_64(xhci, dma, &xhci->op_regs->dcbaa_ptr); |
651 | xhci_writel(xhci, (u32) 0, &xhci->op_regs->dcbaa_ptr[1]); | ||
652 | 801 | ||
653 | /* | 802 | /* |
654 | * Initialize the ring segment pool. The ring must be a contiguous | 803 | * Initialize the ring segment pool. The ring must be a contiguous |
@@ -658,11 +807,10 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags) | |||
658 | */ | 807 | */ |
659 | xhci->segment_pool = dma_pool_create("xHCI ring segments", dev, | 808 | xhci->segment_pool = dma_pool_create("xHCI ring segments", dev, |
660 | SEGMENT_SIZE, 64, xhci->page_size); | 809 | SEGMENT_SIZE, 64, xhci->page_size); |
810 | |||
661 | /* See Table 46 and Note on Figure 55 */ | 811 | /* See Table 46 and Note on Figure 55 */ |
662 | /* FIXME support 64-byte contexts */ | ||
663 | xhci->device_pool = dma_pool_create("xHCI input/output contexts", dev, | 812 | xhci->device_pool = dma_pool_create("xHCI input/output contexts", dev, |
664 | sizeof(struct xhci_device_control), | 813 | 2112, 64, xhci->page_size); |
665 | 64, xhci->page_size); | ||
666 | if (!xhci->segment_pool || !xhci->device_pool) | 814 | if (!xhci->segment_pool || !xhci->device_pool) |
667 | goto fail; | 815 | goto fail; |
668 | 816 | ||
@@ -675,14 +823,12 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags) | |||
675 | (unsigned long long)xhci->cmd_ring->first_seg->dma); | 823 | (unsigned long long)xhci->cmd_ring->first_seg->dma); |
676 | 824 | ||
677 | /* Set the address in the Command Ring Control register */ | 825 | /* Set the address in the Command Ring Control register */ |
678 | val = xhci_readl(xhci, &xhci->op_regs->cmd_ring[0]); | 826 | val_64 = xhci_read_64(xhci, &xhci->op_regs->cmd_ring); |
679 | val = (val & ~CMD_RING_ADDR_MASK) | | 827 | val_64 = (val_64 & (u64) CMD_RING_RSVD_BITS) | |
680 | (xhci->cmd_ring->first_seg->dma & CMD_RING_ADDR_MASK) | | 828 | (xhci->cmd_ring->first_seg->dma & (u64) ~CMD_RING_RSVD_BITS) | |
681 | xhci->cmd_ring->cycle_state; | 829 | xhci->cmd_ring->cycle_state; |
682 | xhci_dbg(xhci, "// Setting command ring address low bits to 0x%x\n", val); | 830 | xhci_dbg(xhci, "// Setting command ring address to 0x%x\n", val); |
683 | xhci_writel(xhci, val, &xhci->op_regs->cmd_ring[0]); | 831 | xhci_write_64(xhci, val_64, &xhci->op_regs->cmd_ring); |
684 | xhci_dbg(xhci, "// Setting command ring address high bits to 0x0\n"); | ||
685 | xhci_writel(xhci, (u32) 0, &xhci->op_regs->cmd_ring[1]); | ||
686 | xhci_dbg_cmd_ptrs(xhci); | 832 | xhci_dbg_cmd_ptrs(xhci); |
687 | 833 | ||
688 | val = xhci_readl(xhci, &xhci->cap_regs->db_off); | 834 | val = xhci_readl(xhci, &xhci->cap_regs->db_off); |
@@ -722,8 +868,7 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags) | |||
722 | /* set ring base address and size for each segment table entry */ | 868 | /* set ring base address and size for each segment table entry */ |
723 | for (val = 0, seg = xhci->event_ring->first_seg; val < ERST_NUM_SEGS; val++) { | 869 | for (val = 0, seg = xhci->event_ring->first_seg; val < ERST_NUM_SEGS; val++) { |
724 | struct xhci_erst_entry *entry = &xhci->erst.entries[val]; | 870 | struct xhci_erst_entry *entry = &xhci->erst.entries[val]; |
725 | entry->seg_addr[0] = seg->dma; | 871 | entry->seg_addr = seg->dma; |
726 | entry->seg_addr[1] = 0; | ||
727 | entry->seg_size = TRBS_PER_SEGMENT; | 872 | entry->seg_size = TRBS_PER_SEGMENT; |
728 | entry->rsvd = 0; | 873 | entry->rsvd = 0; |
729 | seg = seg->next; | 874 | seg = seg->next; |
@@ -741,11 +886,10 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags) | |||
741 | /* set the segment table base address */ | 886 | /* set the segment table base address */ |
742 | xhci_dbg(xhci, "// Set ERST base address for ir_set 0 = 0x%llx\n", | 887 | xhci_dbg(xhci, "// Set ERST base address for ir_set 0 = 0x%llx\n", |
743 | (unsigned long long)xhci->erst.erst_dma_addr); | 888 | (unsigned long long)xhci->erst.erst_dma_addr); |
744 | val = xhci_readl(xhci, &xhci->ir_set->erst_base[0]); | 889 | val_64 = xhci_read_64(xhci, &xhci->ir_set->erst_base); |
745 | val &= ERST_PTR_MASK; | 890 | val_64 &= ERST_PTR_MASK; |
746 | val |= (xhci->erst.erst_dma_addr & ~ERST_PTR_MASK); | 891 | val_64 |= (xhci->erst.erst_dma_addr & (u64) ~ERST_PTR_MASK); |
747 | xhci_writel(xhci, val, &xhci->ir_set->erst_base[0]); | 892 | xhci_write_64(xhci, val_64, &xhci->ir_set->erst_base); |
748 | xhci_writel(xhci, 0, &xhci->ir_set->erst_base[1]); | ||
749 | 893 | ||
750 | /* Set the event ring dequeue address */ | 894 | /* Set the event ring dequeue address */ |
751 | xhci_set_hc_event_deq(xhci); | 895 | xhci_set_hc_event_deq(xhci); |
@@ -761,7 +905,11 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags) | |||
761 | for (i = 0; i < MAX_HC_SLOTS; ++i) | 905 | for (i = 0; i < MAX_HC_SLOTS; ++i) |
762 | xhci->devs[i] = 0; | 906 | xhci->devs[i] = 0; |
763 | 907 | ||
908 | if (scratchpad_alloc(xhci, flags)) | ||
909 | goto fail; | ||
910 | |||
764 | return 0; | 911 | return 0; |
912 | |||
765 | fail: | 913 | fail: |
766 | xhci_warn(xhci, "Couldn't initialize memory\n"); | 914 | xhci_warn(xhci, "Couldn't initialize memory\n"); |
767 | xhci_mem_cleanup(xhci); | 915 | xhci_mem_cleanup(xhci); |