aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/firewire/fw-ohci.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2008-01-30 19:40:18 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2008-01-30 19:40:18 -0500
commit94ed294c20ef07fffa40817c68155fba35dd67f1 (patch)
tree672cb304bd6440e681daeff2d04f753dc43dd2e4 /drivers/firewire/fw-ohci.c
parent5bdeae46be6dfe9efa44a548bd622af325f4bdb4 (diff)
parent384170da9384b7bb3650c0c9b9d17ba0f7bde4ff (diff)
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/ieee1394/linux1394-2.6
* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/ieee1394/linux1394-2.6: (26 commits) firewire: fw-sbp2: Use sbp2 device-provided mgt orb timeout for logins firewire: fw-sbp2: increase login orb reply timeout, fix "failed to login" firewire: replace subtraction with bitwise and firewire: fw-core: react on bus resets while the config ROM is being fetched firewire: enforce access order between generation and node ID, fix "giving up on config rom" firewire: fw-cdev: use device generation, not card generation firewire: fw-sbp2: use device generation, not card generation firewire: fw-sbp2: try to increase reconnect_hold (speed up reconnection) firewire: fw-sbp2: skip unnecessary logout firewire vs. ieee1394: clarify MAINTAINERS firewire: fw-ohci: Dynamically allocate buffers for DMA descriptors firewire: fw-ohci: CycleTooLong interrupt management firewire: Fix extraction of source node id firewire: fw-ohci: Bug fixes for packet-per-buffer support firewire: fw-ohci: Fix for dualbuffer three-or-more buffers firewire: fw-sbp2: remove unused misleading macro firewire: fw-sbp2: prepare for s/g chaining firewire: fw-sbp2: refactor workq and kref handling ieee1394: ohci1394: don't schedule IT tasklets on IR events ieee1394: sbp2: raise default transfer size limit ...
Diffstat (limited to 'drivers/firewire/fw-ohci.c')
-rw-r--r--drivers/firewire/fw-ohci.c390
1 files changed, 238 insertions, 152 deletions
diff --git a/drivers/firewire/fw-ohci.c b/drivers/firewire/fw-ohci.c
index 436a855a4c60..7ebad3c14cb8 100644
--- a/drivers/firewire/fw-ohci.c
+++ b/drivers/firewire/fw-ohci.c
@@ -98,17 +98,48 @@ struct context;
98typedef int (*descriptor_callback_t)(struct context *ctx, 98typedef int (*descriptor_callback_t)(struct context *ctx,
99 struct descriptor *d, 99 struct descriptor *d,
100 struct descriptor *last); 100 struct descriptor *last);
101
102/*
103 * A buffer that contains a block of DMA-able coherent memory used for
104 * storing a portion of a DMA descriptor program.
105 */
106struct descriptor_buffer {
107 struct list_head list;
108 dma_addr_t buffer_bus;
109 size_t buffer_size;
110 size_t used;
111 struct descriptor buffer[0];
112};
113
101struct context { 114struct context {
102 struct fw_ohci *ohci; 115 struct fw_ohci *ohci;
103 u32 regs; 116 u32 regs;
117 int total_allocation;
104 118
105 struct descriptor *buffer; 119 /*
106 dma_addr_t buffer_bus; 120 * List of page-sized buffers for storing DMA descriptors.
107 size_t buffer_size; 121 * Head of list contains buffers in use and tail of list contains
108 struct descriptor *head_descriptor; 122 * free buffers.
109 struct descriptor *tail_descriptor; 123 */
110 struct descriptor *tail_descriptor_last; 124 struct list_head buffer_list;
111 struct descriptor *prev_descriptor; 125
126 /*
127 * Pointer to a buffer inside buffer_list that contains the tail
128 * end of the current DMA program.
129 */
130 struct descriptor_buffer *buffer_tail;
131
132 /*
133 * The descriptor containing the branch address of the first
134 * descriptor that has not yet been filled by the device.
135 */
136 struct descriptor *last;
137
138 /*
139 * The last descriptor in the DMA program. It contains the branch
140 * address that must be updated upon appending a new descriptor.
141 */
142 struct descriptor *prev;
112 143
113 descriptor_callback_t callback; 144 descriptor_callback_t callback;
114 145
@@ -125,6 +156,7 @@ struct context {
125struct iso_context { 156struct iso_context {
126 struct fw_iso_context base; 157 struct fw_iso_context base;
127 struct context context; 158 struct context context;
159 int excess_bytes;
128 void *header; 160 void *header;
129 size_t header_length; 161 size_t header_length;
130}; 162};
@@ -197,8 +229,6 @@ static inline struct fw_ohci *fw_ohci(struct fw_card *card)
197#define SELF_ID_BUF_SIZE 0x800 229#define SELF_ID_BUF_SIZE 0x800
198#define OHCI_TCODE_PHY_PACKET 0x0e 230#define OHCI_TCODE_PHY_PACKET 0x0e
199#define OHCI_VERSION_1_1 0x010010 231#define OHCI_VERSION_1_1 0x010010
200#define ISO_BUFFER_SIZE (64 * 1024)
201#define AT_BUFFER_SIZE 4096
202 232
203static char ohci_driver_name[] = KBUILD_MODNAME; 233static char ohci_driver_name[] = KBUILD_MODNAME;
204 234
@@ -455,71 +485,108 @@ find_branch_descriptor(struct descriptor *d, int z)
455static void context_tasklet(unsigned long data) 485static void context_tasklet(unsigned long data)
456{ 486{
457 struct context *ctx = (struct context *) data; 487 struct context *ctx = (struct context *) data;
458 struct fw_ohci *ohci = ctx->ohci;
459 struct descriptor *d, *last; 488 struct descriptor *d, *last;
460 u32 address; 489 u32 address;
461 int z; 490 int z;
491 struct descriptor_buffer *desc;
462 492
463 dma_sync_single_for_cpu(ohci->card.device, ctx->buffer_bus, 493 desc = list_entry(ctx->buffer_list.next,
464 ctx->buffer_size, DMA_TO_DEVICE); 494 struct descriptor_buffer, list);
465 495 last = ctx->last;
466 d = ctx->tail_descriptor;
467 last = ctx->tail_descriptor_last;
468
469 while (last->branch_address != 0) { 496 while (last->branch_address != 0) {
497 struct descriptor_buffer *old_desc = desc;
470 address = le32_to_cpu(last->branch_address); 498 address = le32_to_cpu(last->branch_address);
471 z = address & 0xf; 499 z = address & 0xf;
472 d = ctx->buffer + (address - ctx->buffer_bus) / sizeof(*d); 500 address &= ~0xf;
501
502 /* If the branch address points to a buffer outside of the
503 * current buffer, advance to the next buffer. */
504 if (address < desc->buffer_bus ||
505 address >= desc->buffer_bus + desc->used)
506 desc = list_entry(desc->list.next,
507 struct descriptor_buffer, list);
508 d = desc->buffer + (address - desc->buffer_bus) / sizeof(*d);
473 last = find_branch_descriptor(d, z); 509 last = find_branch_descriptor(d, z);
474 510
475 if (!ctx->callback(ctx, d, last)) 511 if (!ctx->callback(ctx, d, last))
476 break; 512 break;
477 513
478 ctx->tail_descriptor = d; 514 if (old_desc != desc) {
479 ctx->tail_descriptor_last = last; 515 /* If we've advanced to the next buffer, move the
516 * previous buffer to the free list. */
517 unsigned long flags;
518 old_desc->used = 0;
519 spin_lock_irqsave(&ctx->ohci->lock, flags);
520 list_move_tail(&old_desc->list, &ctx->buffer_list);
521 spin_unlock_irqrestore(&ctx->ohci->lock, flags);
522 }
523 ctx->last = last;
480 } 524 }
481} 525}
482 526
527/*
528 * Allocate a new buffer and add it to the list of free buffers for this
529 * context. Must be called with ohci->lock held.
530 */
531static int
532context_add_buffer(struct context *ctx)
533{
534 struct descriptor_buffer *desc;
535 dma_addr_t bus_addr;
536 int offset;
537
538 /*
539 * 16MB of descriptors should be far more than enough for any DMA
540 * program. This will catch run-away userspace or DoS attacks.
541 */
542 if (ctx->total_allocation >= 16*1024*1024)
543 return -ENOMEM;
544
545 desc = dma_alloc_coherent(ctx->ohci->card.device, PAGE_SIZE,
546 &bus_addr, GFP_ATOMIC);
547 if (!desc)
548 return -ENOMEM;
549
550 offset = (void *)&desc->buffer - (void *)desc;
551 desc->buffer_size = PAGE_SIZE - offset;
552 desc->buffer_bus = bus_addr + offset;
553 desc->used = 0;
554
555 list_add_tail(&desc->list, &ctx->buffer_list);
556 ctx->total_allocation += PAGE_SIZE;
557
558 return 0;
559}
560
483static int 561static int
484context_init(struct context *ctx, struct fw_ohci *ohci, 562context_init(struct context *ctx, struct fw_ohci *ohci,
485 size_t buffer_size, u32 regs, 563 u32 regs, descriptor_callback_t callback)
486 descriptor_callback_t callback)
487{ 564{
488 ctx->ohci = ohci; 565 ctx->ohci = ohci;
489 ctx->regs = regs; 566 ctx->regs = regs;
490 ctx->buffer_size = buffer_size; 567 ctx->total_allocation = 0;
491 ctx->buffer = kmalloc(buffer_size, GFP_KERNEL); 568
492 if (ctx->buffer == NULL) 569 INIT_LIST_HEAD(&ctx->buffer_list);
570 if (context_add_buffer(ctx) < 0)
493 return -ENOMEM; 571 return -ENOMEM;
494 572
573 ctx->buffer_tail = list_entry(ctx->buffer_list.next,
574 struct descriptor_buffer, list);
575
495 tasklet_init(&ctx->tasklet, context_tasklet, (unsigned long)ctx); 576 tasklet_init(&ctx->tasklet, context_tasklet, (unsigned long)ctx);
496 ctx->callback = callback; 577 ctx->callback = callback;
497 578
498 ctx->buffer_bus =
499 dma_map_single(ohci->card.device, ctx->buffer,
500 buffer_size, DMA_TO_DEVICE);
501 if (dma_mapping_error(ctx->buffer_bus)) {
502 kfree(ctx->buffer);
503 return -ENOMEM;
504 }
505
506 ctx->head_descriptor = ctx->buffer;
507 ctx->prev_descriptor = ctx->buffer;
508 ctx->tail_descriptor = ctx->buffer;
509 ctx->tail_descriptor_last = ctx->buffer;
510
511 /* 579 /*
512 * We put a dummy descriptor in the buffer that has a NULL 580 * We put a dummy descriptor in the buffer that has a NULL
513 * branch address and looks like it's been sent. That way we 581 * branch address and looks like it's been sent. That way we
514 * have a descriptor to append DMA programs to. Also, the 582 * have a descriptor to append DMA programs to.
515 * ring buffer invariant is that it always has at least one
516 * element so that head == tail means buffer full.
517 */ 583 */
518 584 memset(ctx->buffer_tail->buffer, 0, sizeof(*ctx->buffer_tail->buffer));
519 memset(ctx->head_descriptor, 0, sizeof(*ctx->head_descriptor)); 585 ctx->buffer_tail->buffer->control = cpu_to_le16(DESCRIPTOR_OUTPUT_LAST);
520 ctx->head_descriptor->control = cpu_to_le16(DESCRIPTOR_OUTPUT_LAST); 586 ctx->buffer_tail->buffer->transfer_status = cpu_to_le16(0x8011);
521 ctx->head_descriptor->transfer_status = cpu_to_le16(0x8011); 587 ctx->buffer_tail->used += sizeof(*ctx->buffer_tail->buffer);
522 ctx->head_descriptor++; 588 ctx->last = ctx->buffer_tail->buffer;
589 ctx->prev = ctx->buffer_tail->buffer;
523 590
524 return 0; 591 return 0;
525} 592}
@@ -528,35 +595,42 @@ static void
528context_release(struct context *ctx) 595context_release(struct context *ctx)
529{ 596{
530 struct fw_card *card = &ctx->ohci->card; 597 struct fw_card *card = &ctx->ohci->card;
598 struct descriptor_buffer *desc, *tmp;
531 599
532 dma_unmap_single(card->device, ctx->buffer_bus, 600 list_for_each_entry_safe(desc, tmp, &ctx->buffer_list, list)
533 ctx->buffer_size, DMA_TO_DEVICE); 601 dma_free_coherent(card->device, PAGE_SIZE, desc,
534 kfree(ctx->buffer); 602 desc->buffer_bus -
603 ((void *)&desc->buffer - (void *)desc));
535} 604}
536 605
606/* Must be called with ohci->lock held */
537static struct descriptor * 607static struct descriptor *
538context_get_descriptors(struct context *ctx, int z, dma_addr_t *d_bus) 608context_get_descriptors(struct context *ctx, int z, dma_addr_t *d_bus)
539{ 609{
540 struct descriptor *d, *tail, *end; 610 struct descriptor *d = NULL;
541 611 struct descriptor_buffer *desc = ctx->buffer_tail;
542 d = ctx->head_descriptor; 612
543 tail = ctx->tail_descriptor; 613 if (z * sizeof(*d) > desc->buffer_size)
544 end = ctx->buffer + ctx->buffer_size / sizeof(*d); 614 return NULL;
545
546 if (d + z <= tail) {
547 goto has_space;
548 } else if (d > tail && d + z <= end) {
549 goto has_space;
550 } else if (d > tail && ctx->buffer + z <= tail) {
551 d = ctx->buffer;
552 goto has_space;
553 }
554 615
555 return NULL; 616 if (z * sizeof(*d) > desc->buffer_size - desc->used) {
617 /* No room for the descriptor in this buffer, so advance to the
618 * next one. */
619
620 if (desc->list.next == &ctx->buffer_list) {
621 /* If there is no free buffer next in the list,
622 * allocate one. */
623 if (context_add_buffer(ctx) < 0)
624 return NULL;
625 }
626 desc = list_entry(desc->list.next,
627 struct descriptor_buffer, list);
628 ctx->buffer_tail = desc;
629 }
556 630
557 has_space: 631 d = desc->buffer + desc->used / sizeof(*d);
558 memset(d, 0, z * sizeof(*d)); 632 memset(d, 0, z * sizeof(*d));
559 *d_bus = ctx->buffer_bus + (d - ctx->buffer) * sizeof(*d); 633 *d_bus = desc->buffer_bus + desc->used;
560 634
561 return d; 635 return d;
562} 636}
@@ -566,7 +640,7 @@ static void context_run(struct context *ctx, u32 extra)
566 struct fw_ohci *ohci = ctx->ohci; 640 struct fw_ohci *ohci = ctx->ohci;
567 641
568 reg_write(ohci, COMMAND_PTR(ctx->regs), 642 reg_write(ohci, COMMAND_PTR(ctx->regs),
569 le32_to_cpu(ctx->tail_descriptor_last->branch_address)); 643 le32_to_cpu(ctx->last->branch_address));
570 reg_write(ohci, CONTROL_CLEAR(ctx->regs), ~0); 644 reg_write(ohci, CONTROL_CLEAR(ctx->regs), ~0);
571 reg_write(ohci, CONTROL_SET(ctx->regs), CONTEXT_RUN | extra); 645 reg_write(ohci, CONTROL_SET(ctx->regs), CONTEXT_RUN | extra);
572 flush_writes(ohci); 646 flush_writes(ohci);
@@ -576,15 +650,13 @@ static void context_append(struct context *ctx,
576 struct descriptor *d, int z, int extra) 650 struct descriptor *d, int z, int extra)
577{ 651{
578 dma_addr_t d_bus; 652 dma_addr_t d_bus;
653 struct descriptor_buffer *desc = ctx->buffer_tail;
579 654
580 d_bus = ctx->buffer_bus + (d - ctx->buffer) * sizeof(*d); 655 d_bus = desc->buffer_bus + (d - desc->buffer) * sizeof(*d);
581 656
582 ctx->head_descriptor = d + z + extra; 657 desc->used += (z + extra) * sizeof(*d);
583 ctx->prev_descriptor->branch_address = cpu_to_le32(d_bus | z); 658 ctx->prev->branch_address = cpu_to_le32(d_bus | z);
584 ctx->prev_descriptor = find_branch_descriptor(d, z); 659 ctx->prev = find_branch_descriptor(d, z);
585
586 dma_sync_single_for_device(ctx->ohci->card.device, ctx->buffer_bus,
587 ctx->buffer_size, DMA_TO_DEVICE);
588 660
589 reg_write(ctx->ohci, CONTROL_SET(ctx->regs), CONTEXT_WAKE); 661 reg_write(ctx->ohci, CONTROL_SET(ctx->regs), CONTEXT_WAKE);
590 flush_writes(ctx->ohci); 662 flush_writes(ctx->ohci);
@@ -1078,6 +1150,13 @@ static irqreturn_t irq_handler(int irq, void *data)
1078 if (unlikely(event & OHCI1394_postedWriteErr)) 1150 if (unlikely(event & OHCI1394_postedWriteErr))
1079 fw_error("PCI posted write error\n"); 1151 fw_error("PCI posted write error\n");
1080 1152
1153 if (unlikely(event & OHCI1394_cycleTooLong)) {
1154 if (printk_ratelimit())
1155 fw_notify("isochronous cycle too long\n");
1156 reg_write(ohci, OHCI1394_LinkControlSet,
1157 OHCI1394_LinkControl_cycleMaster);
1158 }
1159
1081 if (event & OHCI1394_cycle64Seconds) { 1160 if (event & OHCI1394_cycle64Seconds) {
1082 cycle_time = reg_read(ohci, OHCI1394_IsochronousCycleTimer); 1161 cycle_time = reg_read(ohci, OHCI1394_IsochronousCycleTimer);
1083 if ((cycle_time & 0x80000000) == 0) 1162 if ((cycle_time & 0x80000000) == 0)
@@ -1151,8 +1230,8 @@ static int ohci_enable(struct fw_card *card, u32 *config_rom, size_t length)
1151 OHCI1394_RQPkt | OHCI1394_RSPkt | 1230 OHCI1394_RQPkt | OHCI1394_RSPkt |
1152 OHCI1394_reqTxComplete | OHCI1394_respTxComplete | 1231 OHCI1394_reqTxComplete | OHCI1394_respTxComplete |
1153 OHCI1394_isochRx | OHCI1394_isochTx | 1232 OHCI1394_isochRx | OHCI1394_isochTx |
1154 OHCI1394_postedWriteErr | OHCI1394_cycle64Seconds | 1233 OHCI1394_postedWriteErr | OHCI1394_cycleTooLong |
1155 OHCI1394_masterIntEnable); 1234 OHCI1394_cycle64Seconds | OHCI1394_masterIntEnable);
1156 1235
1157 /* Activate link_on bit and contender bit in our self ID packets.*/ 1236 /* Activate link_on bit and contender bit in our self ID packets.*/
1158 if (ohci_update_phy_reg(card, 4, 0, 1237 if (ohci_update_phy_reg(card, 4, 0,
@@ -1408,9 +1487,13 @@ static int handle_ir_dualbuffer_packet(struct context *context,
1408 void *p, *end; 1487 void *p, *end;
1409 int i; 1488 int i;
1410 1489
1411 if (db->first_res_count > 0 && db->second_res_count > 0) 1490 if (db->first_res_count > 0 && db->second_res_count > 0) {
1412 /* This descriptor isn't done yet, stop iteration. */ 1491 if (ctx->excess_bytes <= le16_to_cpu(db->second_req_count)) {
1413 return 0; 1492 /* This descriptor isn't done yet, stop iteration. */
1493 return 0;
1494 }
1495 ctx->excess_bytes -= le16_to_cpu(db->second_req_count);
1496 }
1414 1497
1415 header_length = le16_to_cpu(db->first_req_count) - 1498 header_length = le16_to_cpu(db->first_req_count) -
1416 le16_to_cpu(db->first_res_count); 1499 le16_to_cpu(db->first_res_count);
@@ -1429,11 +1512,15 @@ static int handle_ir_dualbuffer_packet(struct context *context,
1429 *(u32 *) (ctx->header + i) = __swab32(*(u32 *) (p + 4)); 1512 *(u32 *) (ctx->header + i) = __swab32(*(u32 *) (p + 4));
1430 memcpy(ctx->header + i + 4, p + 8, ctx->base.header_size - 4); 1513 memcpy(ctx->header + i + 4, p + 8, ctx->base.header_size - 4);
1431 i += ctx->base.header_size; 1514 i += ctx->base.header_size;
1515 ctx->excess_bytes +=
1516 (le32_to_cpu(*(u32 *)(p + 4)) >> 16) & 0xffff;
1432 p += ctx->base.header_size + 4; 1517 p += ctx->base.header_size + 4;
1433 } 1518 }
1434
1435 ctx->header_length = i; 1519 ctx->header_length = i;
1436 1520
1521 ctx->excess_bytes -= le16_to_cpu(db->second_req_count) -
1522 le16_to_cpu(db->second_res_count);
1523
1437 if (le16_to_cpu(db->control) & DESCRIPTOR_IRQ_ALWAYS) { 1524 if (le16_to_cpu(db->control) & DESCRIPTOR_IRQ_ALWAYS) {
1438 ir_header = (__le32 *) (db + 1); 1525 ir_header = (__le32 *) (db + 1);
1439 ctx->base.callback(&ctx->base, 1526 ctx->base.callback(&ctx->base,
@@ -1452,24 +1539,24 @@ static int handle_ir_packet_per_buffer(struct context *context,
1452{ 1539{
1453 struct iso_context *ctx = 1540 struct iso_context *ctx =
1454 container_of(context, struct iso_context, context); 1541 container_of(context, struct iso_context, context);
1455 struct descriptor *pd = d + 1; 1542 struct descriptor *pd;
1456 __le32 *ir_header; 1543 __le32 *ir_header;
1457 size_t header_length; 1544 void *p;
1458 void *p, *end; 1545 int i;
1459 int i, z;
1460 1546
1461 if (pd->res_count == pd->req_count) 1547 for (pd = d; pd <= last; pd++) {
1548 if (pd->transfer_status)
1549 break;
1550 }
1551 if (pd > last)
1462 /* Descriptor(s) not done yet, stop iteration */ 1552 /* Descriptor(s) not done yet, stop iteration */
1463 return 0; 1553 return 0;
1464 1554
1465 header_length = le16_to_cpu(d->req_count);
1466
1467 i = ctx->header_length; 1555 i = ctx->header_length;
1468 z = le32_to_cpu(pd->branch_address) & 0xf; 1556 p = last + 1;
1469 p = d + z;
1470 end = p + header_length;
1471 1557
1472 while (p < end && i + ctx->base.header_size <= PAGE_SIZE) { 1558 if (ctx->base.header_size > 0 &&
1559 i + ctx->base.header_size <= PAGE_SIZE) {
1473 /* 1560 /*
1474 * The iso header is byteswapped to little endian by 1561 * The iso header is byteswapped to little endian by
1475 * the controller, but the remaining header quadlets 1562 * the controller, but the remaining header quadlets
@@ -1478,14 +1565,11 @@ static int handle_ir_packet_per_buffer(struct context *context,
1478 */ 1565 */
1479 *(u32 *) (ctx->header + i) = __swab32(*(u32 *) (p + 4)); 1566 *(u32 *) (ctx->header + i) = __swab32(*(u32 *) (p + 4));
1480 memcpy(ctx->header + i + 4, p + 8, ctx->base.header_size - 4); 1567 memcpy(ctx->header + i + 4, p + 8, ctx->base.header_size - 4);
1481 i += ctx->base.header_size; 1568 ctx->header_length += ctx->base.header_size;
1482 p += ctx->base.header_size + 4;
1483 } 1569 }
1484 1570
1485 ctx->header_length = i; 1571 if (le16_to_cpu(last->control) & DESCRIPTOR_IRQ_ALWAYS) {
1486 1572 ir_header = (__le32 *) p;
1487 if (le16_to_cpu(pd->control) & DESCRIPTOR_IRQ_ALWAYS) {
1488 ir_header = (__le32 *) (d + z);
1489 ctx->base.callback(&ctx->base, 1573 ctx->base.callback(&ctx->base,
1490 le32_to_cpu(ir_header[0]) & 0xffff, 1574 le32_to_cpu(ir_header[0]) & 0xffff,
1491 ctx->header_length, ctx->header, 1575 ctx->header_length, ctx->header,
@@ -1493,7 +1577,6 @@ static int handle_ir_packet_per_buffer(struct context *context,
1493 ctx->header_length = 0; 1577 ctx->header_length = 0;
1494 } 1578 }
1495 1579
1496
1497 return 1; 1580 return 1;
1498} 1581}
1499 1582
@@ -1559,8 +1642,7 @@ ohci_allocate_iso_context(struct fw_card *card, int type, size_t header_size)
1559 if (ctx->header == NULL) 1642 if (ctx->header == NULL)
1560 goto out; 1643 goto out;
1561 1644
1562 retval = context_init(&ctx->context, ohci, ISO_BUFFER_SIZE, 1645 retval = context_init(&ctx->context, ohci, regs, callback);
1563 regs, callback);
1564 if (retval < 0) 1646 if (retval < 0)
1565 goto out_with_header; 1647 goto out_with_header;
1566 1648
@@ -1775,19 +1857,6 @@ ohci_queue_iso_receive_dualbuffer(struct fw_iso_context *base,
1775 * packet, retransmit or terminate.. 1857 * packet, retransmit or terminate..
1776 */ 1858 */
1777 1859
1778 if (packet->skip) {
1779 d = context_get_descriptors(&ctx->context, 2, &d_bus);
1780 if (d == NULL)
1781 return -ENOMEM;
1782
1783 db = (struct db_descriptor *) d;
1784 db->control = cpu_to_le16(DESCRIPTOR_STATUS |
1785 DESCRIPTOR_BRANCH_ALWAYS |
1786 DESCRIPTOR_WAIT);
1787 db->first_size = cpu_to_le16(ctx->base.header_size + 4);
1788 context_append(&ctx->context, d, 2, 0);
1789 }
1790
1791 p = packet; 1860 p = packet;
1792 z = 2; 1861 z = 2;
1793 1862
@@ -1815,11 +1884,18 @@ ohci_queue_iso_receive_dualbuffer(struct fw_iso_context *base,
1815 db->control = cpu_to_le16(DESCRIPTOR_STATUS | 1884 db->control = cpu_to_le16(DESCRIPTOR_STATUS |
1816 DESCRIPTOR_BRANCH_ALWAYS); 1885 DESCRIPTOR_BRANCH_ALWAYS);
1817 db->first_size = cpu_to_le16(ctx->base.header_size + 4); 1886 db->first_size = cpu_to_le16(ctx->base.header_size + 4);
1818 db->first_req_count = cpu_to_le16(header_size); 1887 if (p->skip && rest == p->payload_length) {
1888 db->control |= cpu_to_le16(DESCRIPTOR_WAIT);
1889 db->first_req_count = db->first_size;
1890 } else {
1891 db->first_req_count = cpu_to_le16(header_size);
1892 }
1819 db->first_res_count = db->first_req_count; 1893 db->first_res_count = db->first_req_count;
1820 db->first_buffer = cpu_to_le32(d_bus + sizeof(*db)); 1894 db->first_buffer = cpu_to_le32(d_bus + sizeof(*db));
1821 1895
1822 if (offset + rest < PAGE_SIZE) 1896 if (p->skip && rest == p->payload_length)
1897 length = 4;
1898 else if (offset + rest < PAGE_SIZE)
1823 length = rest; 1899 length = rest;
1824 else 1900 else
1825 length = PAGE_SIZE - offset; 1901 length = PAGE_SIZE - offset;
@@ -1835,7 +1911,8 @@ ohci_queue_iso_receive_dualbuffer(struct fw_iso_context *base,
1835 context_append(&ctx->context, d, z, header_z); 1911 context_append(&ctx->context, d, z, header_z);
1836 offset = (offset + length) & ~PAGE_MASK; 1912 offset = (offset + length) & ~PAGE_MASK;
1837 rest -= length; 1913 rest -= length;
1838 page++; 1914 if (offset == 0)
1915 page++;
1839 } 1916 }
1840 1917
1841 return 0; 1918 return 0;
@@ -1849,67 +1926,70 @@ ohci_queue_iso_receive_packet_per_buffer(struct fw_iso_context *base,
1849{ 1926{
1850 struct iso_context *ctx = container_of(base, struct iso_context, base); 1927 struct iso_context *ctx = container_of(base, struct iso_context, base);
1851 struct descriptor *d = NULL, *pd = NULL; 1928 struct descriptor *d = NULL, *pd = NULL;
1852 struct fw_iso_packet *p; 1929 struct fw_iso_packet *p = packet;
1853 dma_addr_t d_bus, page_bus; 1930 dma_addr_t d_bus, page_bus;
1854 u32 z, header_z, rest; 1931 u32 z, header_z, rest;
1855 int i, page, offset, packet_count, header_size; 1932 int i, j, length;
1856 1933 int page, offset, packet_count, header_size, payload_per_buffer;
1857 if (packet->skip) {
1858 d = context_get_descriptors(&ctx->context, 1, &d_bus);
1859 if (d == NULL)
1860 return -ENOMEM;
1861
1862 d->control = cpu_to_le16(DESCRIPTOR_STATUS |
1863 DESCRIPTOR_INPUT_LAST |
1864 DESCRIPTOR_BRANCH_ALWAYS |
1865 DESCRIPTOR_WAIT);
1866 context_append(&ctx->context, d, 1, 0);
1867 }
1868
1869 /* one descriptor for header, one for payload */
1870 /* FIXME: handle cases where we need multiple desc. for payload */
1871 z = 2;
1872 p = packet;
1873 1934
1874 /* 1935 /*
1875 * The OHCI controller puts the status word in the 1936 * The OHCI controller puts the status word in the
1876 * buffer too, so we need 4 extra bytes per packet. 1937 * buffer too, so we need 4 extra bytes per packet.
1877 */ 1938 */
1878 packet_count = p->header_length / ctx->base.header_size; 1939 packet_count = p->header_length / ctx->base.header_size;
1879 header_size = packet_count * (ctx->base.header_size + 4); 1940 header_size = ctx->base.header_size + 4;
1880 1941
1881 /* Get header size in number of descriptors. */ 1942 /* Get header size in number of descriptors. */
1882 header_z = DIV_ROUND_UP(header_size, sizeof(*d)); 1943 header_z = DIV_ROUND_UP(header_size, sizeof(*d));
1883 page = payload >> PAGE_SHIFT; 1944 page = payload >> PAGE_SHIFT;
1884 offset = payload & ~PAGE_MASK; 1945 offset = payload & ~PAGE_MASK;
1885 rest = p->payload_length; 1946 payload_per_buffer = p->payload_length / packet_count;
1886 1947
1887 for (i = 0; i < packet_count; i++) { 1948 for (i = 0; i < packet_count; i++) {
1888 /* d points to the header descriptor */ 1949 /* d points to the header descriptor */
1950 z = DIV_ROUND_UP(payload_per_buffer + offset, PAGE_SIZE) + 1;
1889 d = context_get_descriptors(&ctx->context, 1951 d = context_get_descriptors(&ctx->context,
1890 z + header_z, &d_bus); 1952 z + header_z, &d_bus);
1891 if (d == NULL) 1953 if (d == NULL)
1892 return -ENOMEM; 1954 return -ENOMEM;
1893 1955
1894 d->control = cpu_to_le16(DESCRIPTOR_INPUT_MORE); 1956 d->control = cpu_to_le16(DESCRIPTOR_STATUS |
1957 DESCRIPTOR_INPUT_MORE);
1958 if (p->skip && i == 0)
1959 d->control |= cpu_to_le16(DESCRIPTOR_WAIT);
1895 d->req_count = cpu_to_le16(header_size); 1960 d->req_count = cpu_to_le16(header_size);
1896 d->res_count = d->req_count; 1961 d->res_count = d->req_count;
1962 d->transfer_status = 0;
1897 d->data_address = cpu_to_le32(d_bus + (z * sizeof(*d))); 1963 d->data_address = cpu_to_le32(d_bus + (z * sizeof(*d)));
1898 1964
1899 /* pd points to the payload descriptor */ 1965 rest = payload_per_buffer;
1900 pd = d + 1; 1966 for (j = 1; j < z; j++) {
1967 pd = d + j;
1968 pd->control = cpu_to_le16(DESCRIPTOR_STATUS |
1969 DESCRIPTOR_INPUT_MORE);
1970
1971 if (offset + rest < PAGE_SIZE)
1972 length = rest;
1973 else
1974 length = PAGE_SIZE - offset;
1975 pd->req_count = cpu_to_le16(length);
1976 pd->res_count = pd->req_count;
1977 pd->transfer_status = 0;
1978
1979 page_bus = page_private(buffer->pages[page]);
1980 pd->data_address = cpu_to_le32(page_bus + offset);
1981
1982 offset = (offset + length) & ~PAGE_MASK;
1983 rest -= length;
1984 if (offset == 0)
1985 page++;
1986 }
1901 pd->control = cpu_to_le16(DESCRIPTOR_STATUS | 1987 pd->control = cpu_to_le16(DESCRIPTOR_STATUS |
1902 DESCRIPTOR_INPUT_LAST | 1988 DESCRIPTOR_INPUT_LAST |
1903 DESCRIPTOR_BRANCH_ALWAYS); 1989 DESCRIPTOR_BRANCH_ALWAYS);
1904 if (p->interrupt) 1990 if (p->interrupt && i == packet_count - 1)
1905 pd->control |= cpu_to_le16(DESCRIPTOR_IRQ_ALWAYS); 1991 pd->control |= cpu_to_le16(DESCRIPTOR_IRQ_ALWAYS);
1906 1992
1907 pd->req_count = cpu_to_le16(rest);
1908 pd->res_count = pd->req_count;
1909
1910 page_bus = page_private(buffer->pages[page]);
1911 pd->data_address = cpu_to_le32(page_bus + offset);
1912
1913 context_append(&ctx->context, d, z, header_z); 1993 context_append(&ctx->context, d, z, header_z);
1914 } 1994 }
1915 1995
@@ -1923,16 +2003,22 @@ ohci_queue_iso(struct fw_iso_context *base,
1923 unsigned long payload) 2003 unsigned long payload)
1924{ 2004{
1925 struct iso_context *ctx = container_of(base, struct iso_context, base); 2005 struct iso_context *ctx = container_of(base, struct iso_context, base);
2006 unsigned long flags;
2007 int retval;
1926 2008
2009 spin_lock_irqsave(&ctx->context.ohci->lock, flags);
1927 if (base->type == FW_ISO_CONTEXT_TRANSMIT) 2010 if (base->type == FW_ISO_CONTEXT_TRANSMIT)
1928 return ohci_queue_iso_transmit(base, packet, buffer, payload); 2011 retval = ohci_queue_iso_transmit(base, packet, buffer, payload);
1929 else if (ctx->context.ohci->version >= OHCI_VERSION_1_1) 2012 else if (ctx->context.ohci->version >= OHCI_VERSION_1_1)
1930 return ohci_queue_iso_receive_dualbuffer(base, packet, 2013 retval = ohci_queue_iso_receive_dualbuffer(base, packet,
1931 buffer, payload); 2014 buffer, payload);
1932 else 2015 else
1933 return ohci_queue_iso_receive_packet_per_buffer(base, packet, 2016 retval = ohci_queue_iso_receive_packet_per_buffer(base, packet,
1934 buffer, 2017 buffer,
1935 payload); 2018 payload);
2019 spin_unlock_irqrestore(&ctx->context.ohci->lock, flags);
2020
2021 return retval;
1936} 2022}
1937 2023
1938static const struct fw_card_driver ohci_driver = { 2024static const struct fw_card_driver ohci_driver = {
@@ -2004,10 +2090,10 @@ pci_probe(struct pci_dev *dev, const struct pci_device_id *ent)
2004 ar_context_init(&ohci->ar_response_ctx, ohci, 2090 ar_context_init(&ohci->ar_response_ctx, ohci,
2005 OHCI1394_AsRspRcvContextControlSet); 2091 OHCI1394_AsRspRcvContextControlSet);
2006 2092
2007 context_init(&ohci->at_request_ctx, ohci, AT_BUFFER_SIZE, 2093 context_init(&ohci->at_request_ctx, ohci,
2008 OHCI1394_AsReqTrContextControlSet, handle_at_packet); 2094 OHCI1394_AsReqTrContextControlSet, handle_at_packet);
2009 2095
2010 context_init(&ohci->at_response_ctx, ohci, AT_BUFFER_SIZE, 2096 context_init(&ohci->at_response_ctx, ohci,
2011 OHCI1394_AsRspTrContextControlSet, handle_at_packet); 2097 OHCI1394_AsRspTrContextControlSet, handle_at_packet);
2012 2098
2013 reg_write(ohci, OHCI1394_IsoRecvIntMaskSet, ~0); 2099 reg_write(ohci, OHCI1394_IsoRecvIntMaskSet, ~0);