aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/firewire
diff options
context:
space:
mode:
authorDavid Moore <dcm@MIT.EDU>2008-01-06 17:21:41 -0500
committerStefan Richter <stefanr@s5r6.in-berlin.de>2008-01-30 16:22:24 -0500
commitfe5ca63430d640c3a922e5d7c6dd411ab6a2e077 (patch)
tree7124dc3af55c7b66da44449fdab56a93a0e10403 /drivers/firewire
parentbb9f2206b60ace29e49a057fbd9be86d79d86200 (diff)
firewire: fw-ohci: Dynamically allocate buffers for DMA descriptors
Previously, the fw-ohci driver used fixed-length buffers for storing descriptors for isochronous receive DMA programs. If an application (such as libdc1394) generated a DMA program that was too large, fw-ohci would reach the limit of its fixed-sized buffer and return an error to userspace. This patch replaces the fixed-length ring-buffer with a linked-list of page-sized buffers. Additional buffers can be dynamically allocated and appended to the list when necessary. For a particular context, buffers are kept around after use and reused as necessary, so there is no allocation taking place after the DMA program is generated for the first time. In addition, the buffers it uses are coherent for DMA so there is no syncing required before and after writes. This syncing wasn't properly done in the previous version of the code. - This is the fourth version of my patch that replaces a fixed-length buffer for DMA descriptors with a dynamically allocated linked-list of buffers. As we discovered with the last attempt, new context programs are sometimes queued from interrupt context, making it unacceptable to call tasklet_disable() from context_get_descriptors(). This version of the patch uses ohci->lock for all locking needs instead of tasklet_disable/enable. There is a new requirement that context_get_descriptors() be called while holding ohci->lock. It was already held for the AT context, so adding the requirement for the iso context did not seem particularly onerous. In addition, this has the side benefit of allowing iso queue to be safely called from concurrent user-space threads, which previously was not safe. Signed-off-by: David Moore <dcm@acm.org> Signed-off-by: Kristian Høgsberg <krh@redhat.com> Signed-off-by: Jarod Wilson <jwilson@redhat.com> - Fixes the following issues: - Isochronous reception stopped prematurely if an application used a larger buffer. (Reproduced with coriander.) - Isochronous reception stopped after one or a few frames on VT630x in OHCI 1.0 mode. (Fixes reception in coriander, but dvgrab still doesn't work with these chips.) Patch update: struct member alignment, whitespace nits Signed-off-by: Stefan Richter <stefanr@s5r6.in-berlin.de>
Diffstat (limited to 'drivers/firewire')
-rw-r--r--drivers/firewire/fw-ohci.c236
1 files changed, 156 insertions, 80 deletions
diff --git a/drivers/firewire/fw-ohci.c b/drivers/firewire/fw-ohci.c
index 74d5d945f200..7ebad3c14cb8 100644
--- a/drivers/firewire/fw-ohci.c
+++ b/drivers/firewire/fw-ohci.c
@@ -98,17 +98,48 @@ struct context;
98typedef int (*descriptor_callback_t)(struct context *ctx, 98typedef int (*descriptor_callback_t)(struct context *ctx,
99 struct descriptor *d, 99 struct descriptor *d,
100 struct descriptor *last); 100 struct descriptor *last);
101
102/*
103 * A buffer that contains a block of DMA-able coherent memory used for
104 * storing a portion of a DMA descriptor program.
105 */
106struct descriptor_buffer {
107 struct list_head list;
108 dma_addr_t buffer_bus;
109 size_t buffer_size;
110 size_t used;
111 struct descriptor buffer[0];
112};
113
101struct context { 114struct context {
102 struct fw_ohci *ohci; 115 struct fw_ohci *ohci;
103 u32 regs; 116 u32 regs;
117 int total_allocation;
104 118
105 struct descriptor *buffer; 119 /*
106 dma_addr_t buffer_bus; 120 * List of page-sized buffers for storing DMA descriptors.
107 size_t buffer_size; 121 * Head of list contains buffers in use and tail of list contains
108 struct descriptor *head_descriptor; 122 * free buffers.
109 struct descriptor *tail_descriptor; 123 */
110 struct descriptor *tail_descriptor_last; 124 struct list_head buffer_list;
111 struct descriptor *prev_descriptor; 125
126 /*
127 * Pointer to a buffer inside buffer_list that contains the tail
128 * end of the current DMA program.
129 */
130 struct descriptor_buffer *buffer_tail;
131
132 /*
133 * The descriptor containing the branch address of the first
134 * descriptor that has not yet been filled by the device.
135 */
136 struct descriptor *last;
137
138 /*
139 * The last descriptor in the DMA program. It contains the branch
140 * address that must be updated upon appending a new descriptor.
141 */
142 struct descriptor *prev;
112 143
113 descriptor_callback_t callback; 144 descriptor_callback_t callback;
114 145
@@ -198,8 +229,6 @@ static inline struct fw_ohci *fw_ohci(struct fw_card *card)
198#define SELF_ID_BUF_SIZE 0x800 229#define SELF_ID_BUF_SIZE 0x800
199#define OHCI_TCODE_PHY_PACKET 0x0e 230#define OHCI_TCODE_PHY_PACKET 0x0e
200#define OHCI_VERSION_1_1 0x010010 231#define OHCI_VERSION_1_1 0x010010
201#define ISO_BUFFER_SIZE (64 * 1024)
202#define AT_BUFFER_SIZE 4096
203 232
204static char ohci_driver_name[] = KBUILD_MODNAME; 233static char ohci_driver_name[] = KBUILD_MODNAME;
205 234
@@ -456,71 +485,108 @@ find_branch_descriptor(struct descriptor *d, int z)
456static void context_tasklet(unsigned long data) 485static void context_tasklet(unsigned long data)
457{ 486{
458 struct context *ctx = (struct context *) data; 487 struct context *ctx = (struct context *) data;
459 struct fw_ohci *ohci = ctx->ohci;
460 struct descriptor *d, *last; 488 struct descriptor *d, *last;
461 u32 address; 489 u32 address;
462 int z; 490 int z;
491 struct descriptor_buffer *desc;
463 492
464 dma_sync_single_for_cpu(ohci->card.device, ctx->buffer_bus, 493 desc = list_entry(ctx->buffer_list.next,
465 ctx->buffer_size, DMA_TO_DEVICE); 494 struct descriptor_buffer, list);
466 495 last = ctx->last;
467 d = ctx->tail_descriptor;
468 last = ctx->tail_descriptor_last;
469
470 while (last->branch_address != 0) { 496 while (last->branch_address != 0) {
497 struct descriptor_buffer *old_desc = desc;
471 address = le32_to_cpu(last->branch_address); 498 address = le32_to_cpu(last->branch_address);
472 z = address & 0xf; 499 z = address & 0xf;
473 d = ctx->buffer + (address - ctx->buffer_bus) / sizeof(*d); 500 address &= ~0xf;
501
502 /* If the branch address points to a buffer outside of the
503 * current buffer, advance to the next buffer. */
504 if (address < desc->buffer_bus ||
505 address >= desc->buffer_bus + desc->used)
506 desc = list_entry(desc->list.next,
507 struct descriptor_buffer, list);
508 d = desc->buffer + (address - desc->buffer_bus) / sizeof(*d);
474 last = find_branch_descriptor(d, z); 509 last = find_branch_descriptor(d, z);
475 510
476 if (!ctx->callback(ctx, d, last)) 511 if (!ctx->callback(ctx, d, last))
477 break; 512 break;
478 513
479 ctx->tail_descriptor = d; 514 if (old_desc != desc) {
480 ctx->tail_descriptor_last = last; 515 /* If we've advanced to the next buffer, move the
516 * previous buffer to the free list. */
517 unsigned long flags;
518 old_desc->used = 0;
519 spin_lock_irqsave(&ctx->ohci->lock, flags);
520 list_move_tail(&old_desc->list, &ctx->buffer_list);
521 spin_unlock_irqrestore(&ctx->ohci->lock, flags);
522 }
523 ctx->last = last;
481 } 524 }
482} 525}
483 526
527/*
528 * Allocate a new buffer and add it to the list of free buffers for this
529 * context. Must be called with ohci->lock held.
530 */
531static int
532context_add_buffer(struct context *ctx)
533{
534 struct descriptor_buffer *desc;
535 dma_addr_t bus_addr;
536 int offset;
537
538 /*
539 * 16MB of descriptors should be far more than enough for any DMA
540 * program. This will catch run-away userspace or DoS attacks.
541 */
542 if (ctx->total_allocation >= 16*1024*1024)
543 return -ENOMEM;
544
545 desc = dma_alloc_coherent(ctx->ohci->card.device, PAGE_SIZE,
546 &bus_addr, GFP_ATOMIC);
547 if (!desc)
548 return -ENOMEM;
549
550 offset = (void *)&desc->buffer - (void *)desc;
551 desc->buffer_size = PAGE_SIZE - offset;
552 desc->buffer_bus = bus_addr + offset;
553 desc->used = 0;
554
555 list_add_tail(&desc->list, &ctx->buffer_list);
556 ctx->total_allocation += PAGE_SIZE;
557
558 return 0;
559}
560
484static int 561static int
485context_init(struct context *ctx, struct fw_ohci *ohci, 562context_init(struct context *ctx, struct fw_ohci *ohci,
486 size_t buffer_size, u32 regs, 563 u32 regs, descriptor_callback_t callback)
487 descriptor_callback_t callback)
488{ 564{
489 ctx->ohci = ohci; 565 ctx->ohci = ohci;
490 ctx->regs = regs; 566 ctx->regs = regs;
491 ctx->buffer_size = buffer_size; 567 ctx->total_allocation = 0;
492 ctx->buffer = kmalloc(buffer_size, GFP_KERNEL); 568
493 if (ctx->buffer == NULL) 569 INIT_LIST_HEAD(&ctx->buffer_list);
570 if (context_add_buffer(ctx) < 0)
494 return -ENOMEM; 571 return -ENOMEM;
495 572
573 ctx->buffer_tail = list_entry(ctx->buffer_list.next,
574 struct descriptor_buffer, list);
575
496 tasklet_init(&ctx->tasklet, context_tasklet, (unsigned long)ctx); 576 tasklet_init(&ctx->tasklet, context_tasklet, (unsigned long)ctx);
497 ctx->callback = callback; 577 ctx->callback = callback;
498 578
499 ctx->buffer_bus =
500 dma_map_single(ohci->card.device, ctx->buffer,
501 buffer_size, DMA_TO_DEVICE);
502 if (dma_mapping_error(ctx->buffer_bus)) {
503 kfree(ctx->buffer);
504 return -ENOMEM;
505 }
506
507 ctx->head_descriptor = ctx->buffer;
508 ctx->prev_descriptor = ctx->buffer;
509 ctx->tail_descriptor = ctx->buffer;
510 ctx->tail_descriptor_last = ctx->buffer;
511
512 /* 579 /*
513 * We put a dummy descriptor in the buffer that has a NULL 580 * We put a dummy descriptor in the buffer that has a NULL
514 * branch address and looks like it's been sent. That way we 581 * branch address and looks like it's been sent. That way we
515 * have a descriptor to append DMA programs to. Also, the 582 * have a descriptor to append DMA programs to.
516 * ring buffer invariant is that it always has at least one
517 * element so that head == tail means buffer full.
518 */ 583 */
519 584 memset(ctx->buffer_tail->buffer, 0, sizeof(*ctx->buffer_tail->buffer));
520 memset(ctx->head_descriptor, 0, sizeof(*ctx->head_descriptor)); 585 ctx->buffer_tail->buffer->control = cpu_to_le16(DESCRIPTOR_OUTPUT_LAST);
521 ctx->head_descriptor->control = cpu_to_le16(DESCRIPTOR_OUTPUT_LAST); 586 ctx->buffer_tail->buffer->transfer_status = cpu_to_le16(0x8011);
522 ctx->head_descriptor->transfer_status = cpu_to_le16(0x8011); 587 ctx->buffer_tail->used += sizeof(*ctx->buffer_tail->buffer);
523 ctx->head_descriptor++; 588 ctx->last = ctx->buffer_tail->buffer;
589 ctx->prev = ctx->buffer_tail->buffer;
524 590
525 return 0; 591 return 0;
526} 592}
@@ -529,35 +595,42 @@ static void
529context_release(struct context *ctx) 595context_release(struct context *ctx)
530{ 596{
531 struct fw_card *card = &ctx->ohci->card; 597 struct fw_card *card = &ctx->ohci->card;
598 struct descriptor_buffer *desc, *tmp;
532 599
533 dma_unmap_single(card->device, ctx->buffer_bus, 600 list_for_each_entry_safe(desc, tmp, &ctx->buffer_list, list)
534 ctx->buffer_size, DMA_TO_DEVICE); 601 dma_free_coherent(card->device, PAGE_SIZE, desc,
535 kfree(ctx->buffer); 602 desc->buffer_bus -
603 ((void *)&desc->buffer - (void *)desc));
536} 604}
537 605
606/* Must be called with ohci->lock held */
538static struct descriptor * 607static struct descriptor *
539context_get_descriptors(struct context *ctx, int z, dma_addr_t *d_bus) 608context_get_descriptors(struct context *ctx, int z, dma_addr_t *d_bus)
540{ 609{
541 struct descriptor *d, *tail, *end; 610 struct descriptor *d = NULL;
542 611 struct descriptor_buffer *desc = ctx->buffer_tail;
543 d = ctx->head_descriptor; 612
544 tail = ctx->tail_descriptor; 613 if (z * sizeof(*d) > desc->buffer_size)
545 end = ctx->buffer + ctx->buffer_size / sizeof(*d); 614 return NULL;
546 615
547 if (d + z <= tail) { 616 if (z * sizeof(*d) > desc->buffer_size - desc->used) {
548 goto has_space; 617 /* No room for the descriptor in this buffer, so advance to the
549 } else if (d > tail && d + z <= end) { 618 * next one. */
550 goto has_space;
551 } else if (d > tail && ctx->buffer + z <= tail) {
552 d = ctx->buffer;
553 goto has_space;
554 }
555 619
556 return NULL; 620 if (desc->list.next == &ctx->buffer_list) {
621 /* If there is no free buffer next in the list,
622 * allocate one. */
623 if (context_add_buffer(ctx) < 0)
624 return NULL;
625 }
626 desc = list_entry(desc->list.next,
627 struct descriptor_buffer, list);
628 ctx->buffer_tail = desc;
629 }
557 630
558 has_space: 631 d = desc->buffer + desc->used / sizeof(*d);
559 memset(d, 0, z * sizeof(*d)); 632 memset(d, 0, z * sizeof(*d));
560 *d_bus = ctx->buffer_bus + (d - ctx->buffer) * sizeof(*d); 633 *d_bus = desc->buffer_bus + desc->used;
561 634
562 return d; 635 return d;
563} 636}
@@ -567,7 +640,7 @@ static void context_run(struct context *ctx, u32 extra)
567 struct fw_ohci *ohci = ctx->ohci; 640 struct fw_ohci *ohci = ctx->ohci;
568 641
569 reg_write(ohci, COMMAND_PTR(ctx->regs), 642 reg_write(ohci, COMMAND_PTR(ctx->regs),
570 le32_to_cpu(ctx->tail_descriptor_last->branch_address)); 643 le32_to_cpu(ctx->last->branch_address));
571 reg_write(ohci, CONTROL_CLEAR(ctx->regs), ~0); 644 reg_write(ohci, CONTROL_CLEAR(ctx->regs), ~0);
572 reg_write(ohci, CONTROL_SET(ctx->regs), CONTEXT_RUN | extra); 645 reg_write(ohci, CONTROL_SET(ctx->regs), CONTEXT_RUN | extra);
573 flush_writes(ohci); 646 flush_writes(ohci);
@@ -577,15 +650,13 @@ static void context_append(struct context *ctx,
577 struct descriptor *d, int z, int extra) 650 struct descriptor *d, int z, int extra)
578{ 651{
579 dma_addr_t d_bus; 652 dma_addr_t d_bus;
653 struct descriptor_buffer *desc = ctx->buffer_tail;
580 654
581 d_bus = ctx->buffer_bus + (d - ctx->buffer) * sizeof(*d); 655 d_bus = desc->buffer_bus + (d - desc->buffer) * sizeof(*d);
582 656
583 ctx->head_descriptor = d + z + extra; 657 desc->used += (z + extra) * sizeof(*d);
584 ctx->prev_descriptor->branch_address = cpu_to_le32(d_bus | z); 658 ctx->prev->branch_address = cpu_to_le32(d_bus | z);
585 ctx->prev_descriptor = find_branch_descriptor(d, z); 659 ctx->prev = find_branch_descriptor(d, z);
586
587 dma_sync_single_for_device(ctx->ohci->card.device, ctx->buffer_bus,
588 ctx->buffer_size, DMA_TO_DEVICE);
589 660
590 reg_write(ctx->ohci, CONTROL_SET(ctx->regs), CONTEXT_WAKE); 661 reg_write(ctx->ohci, CONTROL_SET(ctx->regs), CONTEXT_WAKE);
591 flush_writes(ctx->ohci); 662 flush_writes(ctx->ohci);
@@ -1571,8 +1642,7 @@ ohci_allocate_iso_context(struct fw_card *card, int type, size_t header_size)
1571 if (ctx->header == NULL) 1642 if (ctx->header == NULL)
1572 goto out; 1643 goto out;
1573 1644
1574 retval = context_init(&ctx->context, ohci, ISO_BUFFER_SIZE, 1645 retval = context_init(&ctx->context, ohci, regs, callback);
1575 regs, callback);
1576 if (retval < 0) 1646 if (retval < 0)
1577 goto out_with_header; 1647 goto out_with_header;
1578 1648
@@ -1933,16 +2003,22 @@ ohci_queue_iso(struct fw_iso_context *base,
1933 unsigned long payload) 2003 unsigned long payload)
1934{ 2004{
1935 struct iso_context *ctx = container_of(base, struct iso_context, base); 2005 struct iso_context *ctx = container_of(base, struct iso_context, base);
2006 unsigned long flags;
2007 int retval;
1936 2008
2009 spin_lock_irqsave(&ctx->context.ohci->lock, flags);
1937 if (base->type == FW_ISO_CONTEXT_TRANSMIT) 2010 if (base->type == FW_ISO_CONTEXT_TRANSMIT)
1938 return ohci_queue_iso_transmit(base, packet, buffer, payload); 2011 retval = ohci_queue_iso_transmit(base, packet, buffer, payload);
1939 else if (ctx->context.ohci->version >= OHCI_VERSION_1_1) 2012 else if (ctx->context.ohci->version >= OHCI_VERSION_1_1)
1940 return ohci_queue_iso_receive_dualbuffer(base, packet, 2013 retval = ohci_queue_iso_receive_dualbuffer(base, packet,
1941 buffer, payload); 2014 buffer, payload);
1942 else 2015 else
1943 return ohci_queue_iso_receive_packet_per_buffer(base, packet, 2016 retval = ohci_queue_iso_receive_packet_per_buffer(base, packet,
1944 buffer, 2017 buffer,
1945 payload); 2018 payload);
2019 spin_unlock_irqrestore(&ctx->context.ohci->lock, flags);
2020
2021 return retval;
1946} 2022}
1947 2023
1948static const struct fw_card_driver ohci_driver = { 2024static const struct fw_card_driver ohci_driver = {
@@ -2014,10 +2090,10 @@ pci_probe(struct pci_dev *dev, const struct pci_device_id *ent)
2014 ar_context_init(&ohci->ar_response_ctx, ohci, 2090 ar_context_init(&ohci->ar_response_ctx, ohci,
2015 OHCI1394_AsRspRcvContextControlSet); 2091 OHCI1394_AsRspRcvContextControlSet);
2016 2092
2017 context_init(&ohci->at_request_ctx, ohci, AT_BUFFER_SIZE, 2093 context_init(&ohci->at_request_ctx, ohci,
2018 OHCI1394_AsReqTrContextControlSet, handle_at_packet); 2094 OHCI1394_AsReqTrContextControlSet, handle_at_packet);
2019 2095
2020 context_init(&ohci->at_response_ctx, ohci, AT_BUFFER_SIZE, 2096 context_init(&ohci->at_response_ctx, ohci,
2021 OHCI1394_AsRspTrContextControlSet, handle_at_packet); 2097 OHCI1394_AsRspTrContextControlSet, handle_at_packet);
2022 2098
2023 reg_write(ohci, OHCI1394_IsoRecvIntMaskSet, ~0); 2099 reg_write(ohci, OHCI1394_IsoRecvIntMaskSet, ~0);