diff options
author | Kristian Høgsberg <krh@redhat.com> | 2007-03-14 17:34:53 -0400 |
---|---|---|
committer | Stefan Richter <stefanr@s5r6.in-berlin.de> | 2007-03-15 13:21:36 -0400 |
commit | c70dc788fd8d3870b41231b6a53a64afb98cfd13 (patch) | |
tree | 60a70261eb3e6c812fd6c07ac48a863de73ba0be | |
parent | d60d7f1d5ce83d1be8d79256f711d6a645b7a2fa (diff) |
firewire: Fix dualbuffer iso receive mode and drop buffer fill mode.
The dualbuffer DMA setup did not account for the iso trailer word
and thus didn't work correctly. With this fixed we can drop the
dual buffer fallback mode.
Signed-off-by: Kristian Høgsberg <krh@redhat.com>
Signed-off-by: Stefan Richter <stefanr@s5r6.in-berlin.de>
-rw-r--r-- | drivers/firewire/fw-device-cdev.c | 28 | ||||
-rw-r--r-- | drivers/firewire/fw-ohci.c | 160 |
2 files changed, 66 insertions, 122 deletions
diff --git a/drivers/firewire/fw-device-cdev.c b/drivers/firewire/fw-device-cdev.c index 7b0efccf78f1..be6bfcfb9065 100644 --- a/drivers/firewire/fw-device-cdev.c +++ b/drivers/firewire/fw-device-cdev.c | |||
@@ -541,20 +541,32 @@ static int ioctl_create_iso_context(struct client *client, void __user *arg) | |||
541 | if (copy_from_user(&request, arg, sizeof request)) | 541 | if (copy_from_user(&request, arg, sizeof request)) |
542 | return -EFAULT; | 542 | return -EFAULT; |
543 | 543 | ||
544 | if (request.type > FW_ISO_CONTEXT_RECEIVE) | ||
545 | return -EINVAL; | ||
546 | |||
547 | if (request.channel > 63) | 544 | if (request.channel > 63) |
548 | return -EINVAL; | 545 | return -EINVAL; |
549 | 546 | ||
550 | if (request.sync > 15) | 547 | switch (request.type) { |
551 | return -EINVAL; | 548 | case FW_ISO_CONTEXT_RECEIVE: |
549 | if (request.sync > 15) | ||
550 | return -EINVAL; | ||
552 | 551 | ||
553 | if (request.tags == 0 || request.tags > 15) | 552 | if (request.tags == 0 || request.tags > 15) |
554 | return -EINVAL; | 553 | return -EINVAL; |
554 | |||
555 | if (request.header_size < 4 || (request.header_size & 3)) | ||
556 | return -EINVAL; | ||
555 | 557 | ||
556 | if (request.speed > SCODE_3200) | 558 | break; |
559 | |||
560 | case FW_ISO_CONTEXT_TRANSMIT: | ||
561 | if (request.speed > SCODE_3200) | ||
562 | return -EINVAL; | ||
563 | |||
564 | break; | ||
565 | |||
566 | default: | ||
557 | return -EINVAL; | 567 | return -EINVAL; |
568 | } | ||
569 | |||
558 | 570 | ||
559 | client->iso_context = fw_iso_context_create(client->device->card, | 571 | client->iso_context = fw_iso_context_create(client->device->card, |
560 | request.type, | 572 | request.type, |
diff --git a/drivers/firewire/fw-ohci.c b/drivers/firewire/fw-ohci.c index 6f9895d201b8..17e13d099294 100644 --- a/drivers/firewire/fw-ohci.c +++ b/drivers/firewire/fw-ohci.c | |||
@@ -1233,24 +1233,6 @@ ohci_get_bus_time(struct fw_card *card) | |||
1233 | return bus_time; | 1233 | return bus_time; |
1234 | } | 1234 | } |
1235 | 1235 | ||
1236 | static int handle_ir_bufferfill_packet(struct context *context, | ||
1237 | struct descriptor *d, | ||
1238 | struct descriptor *last) | ||
1239 | { | ||
1240 | struct iso_context *ctx = | ||
1241 | container_of(context, struct iso_context, context); | ||
1242 | |||
1243 | if (d->res_count > 0) | ||
1244 | return 0; | ||
1245 | |||
1246 | if (le16_to_cpu(last->control) & descriptor_irq_always) | ||
1247 | ctx->base.callback(&ctx->base, | ||
1248 | le16_to_cpu(last->res_count), | ||
1249 | 0, NULL, ctx->base.callback_data); | ||
1250 | |||
1251 | return 1; | ||
1252 | } | ||
1253 | |||
1254 | static int handle_ir_dualbuffer_packet(struct context *context, | 1236 | static int handle_ir_dualbuffer_packet(struct context *context, |
1255 | struct descriptor *d, | 1237 | struct descriptor *d, |
1256 | struct descriptor *last) | 1238 | struct descriptor *last) |
@@ -1258,19 +1240,33 @@ static int handle_ir_dualbuffer_packet(struct context *context, | |||
1258 | struct iso_context *ctx = | 1240 | struct iso_context *ctx = |
1259 | container_of(context, struct iso_context, context); | 1241 | container_of(context, struct iso_context, context); |
1260 | struct db_descriptor *db = (struct db_descriptor *) d; | 1242 | struct db_descriptor *db = (struct db_descriptor *) d; |
1243 | __le32 *ir_header; | ||
1261 | size_t header_length; | 1244 | size_t header_length; |
1245 | void *p, *end; | ||
1246 | int i; | ||
1262 | 1247 | ||
1263 | if (db->first_res_count > 0 && db->second_res_count > 0) | 1248 | if (db->first_res_count > 0 && db->second_res_count > 0) |
1264 | /* This descriptor isn't done yet, stop iteration. */ | 1249 | /* This descriptor isn't done yet, stop iteration. */ |
1265 | return 0; | 1250 | return 0; |
1266 | 1251 | ||
1267 | header_length = db->first_req_count - db->first_res_count; | 1252 | header_length = le16_to_cpu(db->first_req_count) - |
1268 | if (ctx->header_length + header_length <= PAGE_SIZE) | 1253 | le16_to_cpu(db->first_res_count); |
1269 | memcpy(ctx->header + ctx->header_length, db + 1, header_length); | 1254 | |
1270 | ctx->header_length += header_length; | 1255 | i = ctx->header_length; |
1256 | p = db + 1; | ||
1257 | end = p + header_length; | ||
1258 | while (p < end && i + ctx->base.header_size <= PAGE_SIZE) { | ||
1259 | memcpy(ctx->header + i, p + 4, ctx->base.header_size); | ||
1260 | i += ctx->base.header_size; | ||
1261 | p += ctx->base.header_size + 4; | ||
1262 | } | ||
1263 | |||
1264 | ctx->header_length = i; | ||
1271 | 1265 | ||
1272 | if (le16_to_cpu(db->control) & descriptor_irq_always) { | 1266 | if (le16_to_cpu(db->control) & descriptor_irq_always) { |
1273 | ctx->base.callback(&ctx->base, 0, | 1267 | ir_header = (__le32 *) (db + 1); |
1268 | ctx->base.callback(&ctx->base, | ||
1269 | le32_to_cpu(ir_header[0]) & 0xffff, | ||
1274 | ctx->header_length, ctx->header, | 1270 | ctx->header_length, ctx->header, |
1275 | ctx->base.callback_data); | 1271 | ctx->base.callback_data); |
1276 | ctx->header_length = 0; | 1272 | ctx->header_length = 0; |
@@ -1315,12 +1311,10 @@ ohci_allocate_iso_context(struct fw_card *card, int type, | |||
1315 | } else { | 1311 | } else { |
1316 | mask = &ohci->ir_context_mask; | 1312 | mask = &ohci->ir_context_mask; |
1317 | list = ohci->ir_context_list; | 1313 | list = ohci->ir_context_list; |
1318 | if (header_size > 0) | 1314 | callback = handle_ir_dualbuffer_packet; |
1319 | callback = handle_ir_dualbuffer_packet; | ||
1320 | else | ||
1321 | callback = handle_ir_bufferfill_packet; | ||
1322 | } | 1315 | } |
1323 | 1316 | ||
1317 | /* FIXME: We need a fallback for pre 1.1 OHCI. */ | ||
1324 | if (callback == handle_ir_dualbuffer_packet && | 1318 | if (callback == handle_ir_dualbuffer_packet && |
1325 | ohci->version < OHCI_VERSION_1_1) | 1319 | ohci->version < OHCI_VERSION_1_1) |
1326 | return ERR_PTR(-EINVAL); | 1320 | return ERR_PTR(-EINVAL); |
@@ -1367,7 +1361,7 @@ static int ohci_start_iso(struct fw_iso_context *base, s32 cycle) | |||
1367 | { | 1361 | { |
1368 | struct iso_context *ctx = container_of(base, struct iso_context, base); | 1362 | struct iso_context *ctx = container_of(base, struct iso_context, base); |
1369 | struct fw_ohci *ohci = ctx->context.ohci; | 1363 | struct fw_ohci *ohci = ctx->context.ohci; |
1370 | u32 cycle_match = 0, mode; | 1364 | u32 cycle_match = 0; |
1371 | int index; | 1365 | int index; |
1372 | 1366 | ||
1373 | if (ctx->base.type == FW_ISO_CONTEXT_TRANSMIT) { | 1367 | if (ctx->base.type == FW_ISO_CONTEXT_TRANSMIT) { |
@@ -1382,16 +1376,14 @@ static int ohci_start_iso(struct fw_iso_context *base, s32 cycle) | |||
1382 | } else { | 1376 | } else { |
1383 | index = ctx - ohci->ir_context_list; | 1377 | index = ctx - ohci->ir_context_list; |
1384 | 1378 | ||
1385 | if (ctx->base.header_size > 0) | ||
1386 | mode = IR_CONTEXT_DUAL_BUFFER_MODE; | ||
1387 | else | ||
1388 | mode = IR_CONTEXT_BUFFER_FILL; | ||
1389 | reg_write(ohci, OHCI1394_IsoRecvIntEventClear, 1 << index); | 1379 | reg_write(ohci, OHCI1394_IsoRecvIntEventClear, 1 << index); |
1390 | reg_write(ohci, OHCI1394_IsoRecvIntMaskSet, 1 << index); | 1380 | reg_write(ohci, OHCI1394_IsoRecvIntMaskSet, 1 << index); |
1391 | reg_write(ohci, context_match(ctx->context.regs), | 1381 | reg_write(ohci, context_match(ctx->context.regs), |
1392 | (ctx->base.tags << 28) | | 1382 | (ctx->base.tags << 28) | |
1393 | (ctx->base.sync << 8) | ctx->base.channel); | 1383 | (ctx->base.sync << 8) | ctx->base.channel); |
1394 | context_run(&ctx->context, mode); | 1384 | context_run(&ctx->context, |
1385 | IR_CONTEXT_DUAL_BUFFER_MODE | | ||
1386 | IR_CONTEXT_ISOCH_HEADER); | ||
1395 | } | 1387 | } |
1396 | 1388 | ||
1397 | return 0; | 1389 | return 0; |
@@ -1538,26 +1530,6 @@ ohci_queue_iso_transmit(struct fw_iso_context *base, | |||
1538 | } | 1530 | } |
1539 | 1531 | ||
1540 | static int | 1532 | static int |
1541 | setup_wait_descriptor(struct context *ctx) | ||
1542 | { | ||
1543 | struct descriptor *d; | ||
1544 | dma_addr_t d_bus; | ||
1545 | |||
1546 | d = context_get_descriptors(ctx, 1, &d_bus); | ||
1547 | if (d == NULL) | ||
1548 | return -ENOMEM; | ||
1549 | |||
1550 | d->control = cpu_to_le16(descriptor_input_more | | ||
1551 | descriptor_status | | ||
1552 | descriptor_branch_always | | ||
1553 | descriptor_wait); | ||
1554 | |||
1555 | context_append(ctx, d, 1, 0); | ||
1556 | |||
1557 | return 0; | ||
1558 | } | ||
1559 | |||
1560 | static int | ||
1561 | ohci_queue_iso_receive_dualbuffer(struct fw_iso_context *base, | 1533 | ohci_queue_iso_receive_dualbuffer(struct fw_iso_context *base, |
1562 | struct fw_iso_packet *packet, | 1534 | struct fw_iso_packet *packet, |
1563 | struct fw_iso_buffer *buffer, | 1535 | struct fw_iso_buffer *buffer, |
@@ -1569,25 +1541,39 @@ ohci_queue_iso_receive_dualbuffer(struct fw_iso_context *base, | |||
1569 | struct fw_iso_packet *p; | 1541 | struct fw_iso_packet *p; |
1570 | dma_addr_t d_bus, page_bus; | 1542 | dma_addr_t d_bus, page_bus; |
1571 | u32 z, header_z, length, rest; | 1543 | u32 z, header_z, length, rest; |
1572 | int page, offset; | 1544 | int page, offset, packet_count, header_size; |
1573 | 1545 | ||
1574 | /* FIXME: Cycle lost behavior should be configurable: lose | 1546 | /* FIXME: Cycle lost behavior should be configurable: lose |
1575 | * packet, retransmit or terminate.. */ | 1547 | * packet, retransmit or terminate.. */ |
1576 | 1548 | ||
1577 | if (packet->skip && setup_wait_descriptor(&ctx->context) < 0) | 1549 | if (packet->skip) { |
1578 | return -ENOMEM; | 1550 | d = context_get_descriptors(&ctx->context, 2, &d_bus); |
1551 | if (d == NULL) | ||
1552 | return -ENOMEM; | ||
1553 | |||
1554 | db = (struct db_descriptor *) d; | ||
1555 | db->control = cpu_to_le16(descriptor_status | | ||
1556 | descriptor_branch_always | | ||
1557 | descriptor_wait); | ||
1558 | db->first_size = cpu_to_le16(ctx->base.header_size + 4); | ||
1559 | context_append(&ctx->context, d, 2, 0); | ||
1560 | } | ||
1579 | 1561 | ||
1580 | p = packet; | 1562 | p = packet; |
1581 | z = 2; | 1563 | z = 2; |
1582 | 1564 | ||
1565 | /* The OHCI controller puts the status word in the header | ||
1566 | * buffer too, so we need 4 extra bytes per packet. */ | ||
1567 | packet_count = p->header_length / ctx->base.header_size; | ||
1568 | header_size = packet_count * (ctx->base.header_size + 4); | ||
1569 | |||
1583 | /* Get header size in number of descriptors. */ | 1570 | /* Get header size in number of descriptors. */ |
1584 | header_z = DIV_ROUND_UP(p->header_length, sizeof *d); | 1571 | header_z = DIV_ROUND_UP(header_size, sizeof *d); |
1585 | page = payload >> PAGE_SHIFT; | 1572 | page = payload >> PAGE_SHIFT; |
1586 | offset = payload & ~PAGE_MASK; | 1573 | offset = payload & ~PAGE_MASK; |
1587 | rest = p->payload_length; | 1574 | rest = p->payload_length; |
1588 | 1575 | ||
1589 | /* FIXME: OHCI 1.0 doesn't support dual buffer receive */ | 1576 | /* FIXME: OHCI 1.0 doesn't support dual buffer receive */ |
1590 | /* FIXME: handle descriptor_wait */ | ||
1591 | /* FIXME: make packet-per-buffer/dual-buffer a context option */ | 1577 | /* FIXME: make packet-per-buffer/dual-buffer a context option */ |
1592 | while (rest > 0) { | 1578 | while (rest > 0) { |
1593 | d = context_get_descriptors(&ctx->context, | 1579 | d = context_get_descriptors(&ctx->context, |
@@ -1598,8 +1584,8 @@ ohci_queue_iso_receive_dualbuffer(struct fw_iso_context *base, | |||
1598 | db = (struct db_descriptor *) d; | 1584 | db = (struct db_descriptor *) d; |
1599 | db->control = cpu_to_le16(descriptor_status | | 1585 | db->control = cpu_to_le16(descriptor_status | |
1600 | descriptor_branch_always); | 1586 | descriptor_branch_always); |
1601 | db->first_size = cpu_to_le16(ctx->base.header_size); | 1587 | db->first_size = cpu_to_le16(ctx->base.header_size + 4); |
1602 | db->first_req_count = cpu_to_le16(p->header_length); | 1588 | db->first_req_count = cpu_to_le16(header_size); |
1603 | db->first_res_count = db->first_req_count; | 1589 | db->first_res_count = db->first_req_count; |
1604 | db->first_buffer = cpu_to_le32(d_bus + sizeof *db); | 1590 | db->first_buffer = cpu_to_le32(d_bus + sizeof *db); |
1605 | 1591 | ||
@@ -1626,57 +1612,6 @@ ohci_queue_iso_receive_dualbuffer(struct fw_iso_context *base, | |||
1626 | } | 1612 | } |
1627 | 1613 | ||
1628 | static int | 1614 | static int |
1629 | ohci_queue_iso_receive_bufferfill(struct fw_iso_context *base, | ||
1630 | struct fw_iso_packet *packet, | ||
1631 | struct fw_iso_buffer *buffer, | ||
1632 | unsigned long payload) | ||
1633 | { | ||
1634 | struct iso_context *ctx = container_of(base, struct iso_context, base); | ||
1635 | struct descriptor *d = NULL; | ||
1636 | dma_addr_t d_bus, page_bus; | ||
1637 | u32 length, rest; | ||
1638 | int page, offset; | ||
1639 | |||
1640 | page = payload >> PAGE_SHIFT; | ||
1641 | offset = payload & ~PAGE_MASK; | ||
1642 | rest = packet->payload_length; | ||
1643 | |||
1644 | if (packet->skip && setup_wait_descriptor(&ctx->context) < 0) | ||
1645 | return -ENOMEM; | ||
1646 | |||
1647 | while (rest > 0) { | ||
1648 | d = context_get_descriptors(&ctx->context, 1, &d_bus); | ||
1649 | if (d == NULL) | ||
1650 | return -ENOMEM; | ||
1651 | |||
1652 | d->control = cpu_to_le16(descriptor_input_more | | ||
1653 | descriptor_status | | ||
1654 | descriptor_branch_always); | ||
1655 | |||
1656 | if (offset + rest < PAGE_SIZE) | ||
1657 | length = rest; | ||
1658 | else | ||
1659 | length = PAGE_SIZE - offset; | ||
1660 | |||
1661 | page_bus = page_private(buffer->pages[page]); | ||
1662 | d->data_address = cpu_to_le32(page_bus + offset); | ||
1663 | d->req_count = cpu_to_le16(length); | ||
1664 | d->res_count = cpu_to_le16(length); | ||
1665 | |||
1666 | if (packet->interrupt && length == rest) | ||
1667 | d->control |= cpu_to_le16(descriptor_irq_always); | ||
1668 | |||
1669 | context_append(&ctx->context, d, 1, 0); | ||
1670 | |||
1671 | offset = (offset + length) & ~PAGE_MASK; | ||
1672 | rest -= length; | ||
1673 | page++; | ||
1674 | } | ||
1675 | |||
1676 | return 0; | ||
1677 | } | ||
1678 | |||
1679 | static int | ||
1680 | ohci_queue_iso(struct fw_iso_context *base, | 1615 | ohci_queue_iso(struct fw_iso_context *base, |
1681 | struct fw_iso_packet *packet, | 1616 | struct fw_iso_packet *packet, |
1682 | struct fw_iso_buffer *buffer, | 1617 | struct fw_iso_buffer *buffer, |
@@ -1686,9 +1621,6 @@ ohci_queue_iso(struct fw_iso_context *base, | |||
1686 | 1621 | ||
1687 | if (base->type == FW_ISO_CONTEXT_TRANSMIT) | 1622 | if (base->type == FW_ISO_CONTEXT_TRANSMIT) |
1688 | return ohci_queue_iso_transmit(base, packet, buffer, payload); | 1623 | return ohci_queue_iso_transmit(base, packet, buffer, payload); |
1689 | else if (base->header_size == 0) | ||
1690 | return ohci_queue_iso_receive_bufferfill(base, packet, | ||
1691 | buffer, payload); | ||
1692 | else if (ctx->context.ohci->version >= OHCI_VERSION_1_1) | 1624 | else if (ctx->context.ohci->version >= OHCI_VERSION_1_1) |
1693 | return ohci_queue_iso_receive_dualbuffer(base, packet, | 1625 | return ohci_queue_iso_receive_dualbuffer(base, packet, |
1694 | buffer, payload); | 1626 | buffer, payload); |