diff options
author | David Moore <dcm@MIT.EDU> | 2007-12-19 15:26:38 -0500 |
---|---|---|
committer | Stefan Richter <stefanr@s5r6.in-berlin.de> | 2008-01-30 16:22:23 -0500 |
commit | bcee893c6cba88badd292b636f33a8677c0dd962 (patch) | |
tree | f315048cf38e30d016d0ea50aa06989327b2f235 /drivers/firewire/fw-ohci.c | |
parent | 0642b6577f1d455ed99e2da4a4d9216a866d2449 (diff) |
firewire: fw-ohci: Bug fixes for packet-per-buffer support
This patch corrects a number of bugs in the current OHCI 1.0
packet-per-buffer support:
1. Correctly deal with payloads that cross a page boundary. The
previous version would not split the descriptor at such a boundary,
potentially corrupting unrelated memory.
2. Allow user-space to specify multiple packets per struct
fw_cdev_iso_packet in the same way that dual-buffer allows. This is
signaled by header_length being a multiple of header_size. This
multiple determines the number of packets. The payload size allocated
per packet is determined by dividing the total payload size by the
number of packets.
3. Make sync support work properly for packet-per-buffer.
I have tested this patch with libdc1394 by forcing my OHCI 1.1
controller to use the packet-per-buffer support instead of dual-buffer.
I would greatly appreciate testing by those who have a DV devices and
other types of iso streamers to make sure I didn't cause any
regressions.
Stefan, with this patch, I'm hoping that libdc1394 will work with all
your OHCI 1.0 controllers now.
The one bit of future work that remains for packet-per-buffer support is
the automatic compaction of short payloads that I discussed with
Kristian.
Signed-off-by: David Moore <dcm@acm.org>
Signed-off-by: Stefan Richter <stefanr@s5r6.in-berlin.de>
Diffstat (limited to 'drivers/firewire/fw-ohci.c')
-rw-r--r-- | drivers/firewire/fw-ohci.c | 99 |
1 files changed, 49 insertions, 50 deletions
diff --git a/drivers/firewire/fw-ohci.c b/drivers/firewire/fw-ohci.c index a44d16d0c505..a9f2d07e7c65 100644 --- a/drivers/firewire/fw-ohci.c +++ b/drivers/firewire/fw-ohci.c | |||
@@ -1461,24 +1461,24 @@ static int handle_ir_packet_per_buffer(struct context *context, | |||
1461 | { | 1461 | { |
1462 | struct iso_context *ctx = | 1462 | struct iso_context *ctx = |
1463 | container_of(context, struct iso_context, context); | 1463 | container_of(context, struct iso_context, context); |
1464 | struct descriptor *pd = d + 1; | 1464 | struct descriptor *pd; |
1465 | __le32 *ir_header; | 1465 | __le32 *ir_header; |
1466 | size_t header_length; | 1466 | void *p; |
1467 | void *p, *end; | 1467 | int i; |
1468 | int i, z; | ||
1469 | 1468 | ||
1470 | if (pd->res_count == pd->req_count) | 1469 | for (pd = d; pd <= last; pd++) { |
1470 | if (pd->transfer_status) | ||
1471 | break; | ||
1472 | } | ||
1473 | if (pd > last) | ||
1471 | /* Descriptor(s) not done yet, stop iteration */ | 1474 | /* Descriptor(s) not done yet, stop iteration */ |
1472 | return 0; | 1475 | return 0; |
1473 | 1476 | ||
1474 | header_length = le16_to_cpu(d->req_count); | ||
1475 | |||
1476 | i = ctx->header_length; | 1477 | i = ctx->header_length; |
1477 | z = le32_to_cpu(pd->branch_address) & 0xf; | 1478 | p = last + 1; |
1478 | p = d + z; | ||
1479 | end = p + header_length; | ||
1480 | 1479 | ||
1481 | while (p < end && i + ctx->base.header_size <= PAGE_SIZE) { | 1480 | if (ctx->base.header_size > 0 && |
1481 | i + ctx->base.header_size <= PAGE_SIZE) { | ||
1482 | /* | 1482 | /* |
1483 | * The iso header is byteswapped to little endian by | 1483 | * The iso header is byteswapped to little endian by |
1484 | * the controller, but the remaining header quadlets | 1484 | * the controller, but the remaining header quadlets |
@@ -1487,14 +1487,11 @@ static int handle_ir_packet_per_buffer(struct context *context, | |||
1487 | */ | 1487 | */ |
1488 | *(u32 *) (ctx->header + i) = __swab32(*(u32 *) (p + 4)); | 1488 | *(u32 *) (ctx->header + i) = __swab32(*(u32 *) (p + 4)); |
1489 | memcpy(ctx->header + i + 4, p + 8, ctx->base.header_size - 4); | 1489 | memcpy(ctx->header + i + 4, p + 8, ctx->base.header_size - 4); |
1490 | i += ctx->base.header_size; | 1490 | ctx->header_length += ctx->base.header_size; |
1491 | p += ctx->base.header_size + 4; | ||
1492 | } | 1491 | } |
1493 | 1492 | ||
1494 | ctx->header_length = i; | 1493 | if (le16_to_cpu(last->control) & DESCRIPTOR_IRQ_ALWAYS) { |
1495 | 1494 | ir_header = (__le32 *) p; | |
1496 | if (le16_to_cpu(pd->control) & DESCRIPTOR_IRQ_ALWAYS) { | ||
1497 | ir_header = (__le32 *) (d + z); | ||
1498 | ctx->base.callback(&ctx->base, | 1495 | ctx->base.callback(&ctx->base, |
1499 | le32_to_cpu(ir_header[0]) & 0xffff, | 1496 | le32_to_cpu(ir_header[0]) & 0xffff, |
1500 | ctx->header_length, ctx->header, | 1497 | ctx->header_length, ctx->header, |
@@ -1502,7 +1499,6 @@ static int handle_ir_packet_per_buffer(struct context *context, | |||
1502 | ctx->header_length = 0; | 1499 | ctx->header_length = 0; |
1503 | } | 1500 | } |
1504 | 1501 | ||
1505 | |||
1506 | return 1; | 1502 | return 1; |
1507 | } | 1503 | } |
1508 | 1504 | ||
@@ -1853,67 +1849,70 @@ ohci_queue_iso_receive_packet_per_buffer(struct fw_iso_context *base, | |||
1853 | { | 1849 | { |
1854 | struct iso_context *ctx = container_of(base, struct iso_context, base); | 1850 | struct iso_context *ctx = container_of(base, struct iso_context, base); |
1855 | struct descriptor *d = NULL, *pd = NULL; | 1851 | struct descriptor *d = NULL, *pd = NULL; |
1856 | struct fw_iso_packet *p; | 1852 | struct fw_iso_packet *p = packet; |
1857 | dma_addr_t d_bus, page_bus; | 1853 | dma_addr_t d_bus, page_bus; |
1858 | u32 z, header_z, rest; | 1854 | u32 z, header_z, rest; |
1859 | int i, page, offset, packet_count, header_size; | 1855 | int i, j, length; |
1860 | 1856 | int page, offset, packet_count, header_size, payload_per_buffer; | |
1861 | if (packet->skip) { | ||
1862 | d = context_get_descriptors(&ctx->context, 1, &d_bus); | ||
1863 | if (d == NULL) | ||
1864 | return -ENOMEM; | ||
1865 | |||
1866 | d->control = cpu_to_le16(DESCRIPTOR_STATUS | | ||
1867 | DESCRIPTOR_INPUT_LAST | | ||
1868 | DESCRIPTOR_BRANCH_ALWAYS | | ||
1869 | DESCRIPTOR_WAIT); | ||
1870 | context_append(&ctx->context, d, 1, 0); | ||
1871 | } | ||
1872 | |||
1873 | /* one descriptor for header, one for payload */ | ||
1874 | /* FIXME: handle cases where we need multiple desc. for payload */ | ||
1875 | z = 2; | ||
1876 | p = packet; | ||
1877 | 1857 | ||
1878 | /* | 1858 | /* |
1879 | * The OHCI controller puts the status word in the | 1859 | * The OHCI controller puts the status word in the |
1880 | * buffer too, so we need 4 extra bytes per packet. | 1860 | * buffer too, so we need 4 extra bytes per packet. |
1881 | */ | 1861 | */ |
1882 | packet_count = p->header_length / ctx->base.header_size; | 1862 | packet_count = p->header_length / ctx->base.header_size; |
1883 | header_size = packet_count * (ctx->base.header_size + 4); | 1863 | header_size = ctx->base.header_size + 4; |
1884 | 1864 | ||
1885 | /* Get header size in number of descriptors. */ | 1865 | /* Get header size in number of descriptors. */ |
1886 | header_z = DIV_ROUND_UP(header_size, sizeof(*d)); | 1866 | header_z = DIV_ROUND_UP(header_size, sizeof(*d)); |
1887 | page = payload >> PAGE_SHIFT; | 1867 | page = payload >> PAGE_SHIFT; |
1888 | offset = payload & ~PAGE_MASK; | 1868 | offset = payload & ~PAGE_MASK; |
1889 | rest = p->payload_length; | 1869 | payload_per_buffer = p->payload_length / packet_count; |
1890 | 1870 | ||
1891 | for (i = 0; i < packet_count; i++) { | 1871 | for (i = 0; i < packet_count; i++) { |
1892 | /* d points to the header descriptor */ | 1872 | /* d points to the header descriptor */ |
1873 | z = DIV_ROUND_UP(payload_per_buffer + offset, PAGE_SIZE) + 1; | ||
1893 | d = context_get_descriptors(&ctx->context, | 1874 | d = context_get_descriptors(&ctx->context, |
1894 | z + header_z, &d_bus); | 1875 | z + header_z, &d_bus); |
1895 | if (d == NULL) | 1876 | if (d == NULL) |
1896 | return -ENOMEM; | 1877 | return -ENOMEM; |
1897 | 1878 | ||
1898 | d->control = cpu_to_le16(DESCRIPTOR_INPUT_MORE); | 1879 | d->control = cpu_to_le16(DESCRIPTOR_STATUS | |
1880 | DESCRIPTOR_INPUT_MORE); | ||
1881 | if (p->skip && i == 0) | ||
1882 | d->control |= cpu_to_le16(DESCRIPTOR_WAIT); | ||
1899 | d->req_count = cpu_to_le16(header_size); | 1883 | d->req_count = cpu_to_le16(header_size); |
1900 | d->res_count = d->req_count; | 1884 | d->res_count = d->req_count; |
1885 | d->transfer_status = 0; | ||
1901 | d->data_address = cpu_to_le32(d_bus + (z * sizeof(*d))); | 1886 | d->data_address = cpu_to_le32(d_bus + (z * sizeof(*d))); |
1902 | 1887 | ||
1903 | /* pd points to the payload descriptor */ | 1888 | rest = payload_per_buffer; |
1904 | pd = d + 1; | 1889 | for (j = 1; j < z; j++) { |
1890 | pd = d + j; | ||
1891 | pd->control = cpu_to_le16(DESCRIPTOR_STATUS | | ||
1892 | DESCRIPTOR_INPUT_MORE); | ||
1893 | |||
1894 | if (offset + rest < PAGE_SIZE) | ||
1895 | length = rest; | ||
1896 | else | ||
1897 | length = PAGE_SIZE - offset; | ||
1898 | pd->req_count = cpu_to_le16(length); | ||
1899 | pd->res_count = pd->req_count; | ||
1900 | pd->transfer_status = 0; | ||
1901 | |||
1902 | page_bus = page_private(buffer->pages[page]); | ||
1903 | pd->data_address = cpu_to_le32(page_bus + offset); | ||
1904 | |||
1905 | offset = (offset + length) & ~PAGE_MASK; | ||
1906 | rest -= length; | ||
1907 | if (offset == 0) | ||
1908 | page++; | ||
1909 | } | ||
1905 | pd->control = cpu_to_le16(DESCRIPTOR_STATUS | | 1910 | pd->control = cpu_to_le16(DESCRIPTOR_STATUS | |
1906 | DESCRIPTOR_INPUT_LAST | | 1911 | DESCRIPTOR_INPUT_LAST | |
1907 | DESCRIPTOR_BRANCH_ALWAYS); | 1912 | DESCRIPTOR_BRANCH_ALWAYS); |
1908 | if (p->interrupt) | 1913 | if (p->interrupt && i == packet_count - 1) |
1909 | pd->control |= cpu_to_le16(DESCRIPTOR_IRQ_ALWAYS); | 1914 | pd->control |= cpu_to_le16(DESCRIPTOR_IRQ_ALWAYS); |
1910 | 1915 | ||
1911 | pd->req_count = cpu_to_le16(rest); | ||
1912 | pd->res_count = pd->req_count; | ||
1913 | |||
1914 | page_bus = page_private(buffer->pages[page]); | ||
1915 | pd->data_address = cpu_to_le32(page_bus + offset); | ||
1916 | |||
1917 | context_append(&ctx->context, d, z, header_z); | 1916 | context_append(&ctx->context, d, z, header_z); |
1918 | } | 1917 | } |
1919 | 1918 | ||