aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJarod Wilson <jwilson@redhat.com>2008-03-12 17:43:26 -0400
committerStefan Richter <stefanr@s5r6.in-berlin.de>2008-03-13 19:57:00 -0400
commitbde1709aaa98f5004ab1580842c422be18eb4bc3 (patch)
tree9c7a3241d88574680911a48e81ce8d6ad252e559
parent6e45ef4c7aeefbf97df748866cd1b24f73b86160 (diff)
firewire: fw-ohci: use dma_alloc_coherent for ar_buffer
Currently, we do nothing to guarantee we have a consistent DMA buffer for asynchronous receive packets. Rather than doing several sync's following a dma_map_single() to get consistent buffers, just switch to using dma_alloc_coherent(). Resolves constant buffer failures on my own x86_64 laptop w/4GB of RAM and likely to fix a number of other failures witnessed on x86_64 systems with 4GB of RAM or more. Signed-off-by: Jarod Wilson <jwilson@redhat.com> Signed-off-by: Stefan Richter <stefanr@s5r6.in-berlin.de>
-rw-r--r--drivers/firewire/fw-ohci.c18
1 files changed, 5 insertions, 13 deletions
diff --git a/drivers/firewire/fw-ohci.c b/drivers/firewire/fw-ohci.c
index eaa213e21592..fcf59fcae1bc 100644
--- a/drivers/firewire/fw-ohci.c
+++ b/drivers/firewire/fw-ohci.c
@@ -284,16 +284,10 @@ static int ar_context_add_page(struct ar_context *ctx)
284 dma_addr_t ab_bus; 284 dma_addr_t ab_bus;
285 size_t offset; 285 size_t offset;
286 286
287 ab = (struct ar_buffer *) __get_free_page(GFP_ATOMIC); 287 ab = dma_alloc_coherent(dev, PAGE_SIZE, &ab_bus, GFP_ATOMIC);
288 if (ab == NULL) 288 if (ab == NULL)
289 return -ENOMEM; 289 return -ENOMEM;
290 290
291 ab_bus = dma_map_single(dev, ab, PAGE_SIZE, DMA_BIDIRECTIONAL);
292 if (dma_mapping_error(ab_bus)) {
293 free_page((unsigned long) ab);
294 return -ENOMEM;
295 }
296
297 memset(&ab->descriptor, 0, sizeof(ab->descriptor)); 291 memset(&ab->descriptor, 0, sizeof(ab->descriptor));
298 ab->descriptor.control = cpu_to_le16(DESCRIPTOR_INPUT_MORE | 292 ab->descriptor.control = cpu_to_le16(DESCRIPTOR_INPUT_MORE |
299 DESCRIPTOR_STATUS | 293 DESCRIPTOR_STATUS |
@@ -304,8 +298,6 @@ static int ar_context_add_page(struct ar_context *ctx)
304 ab->descriptor.res_count = cpu_to_le16(PAGE_SIZE - offset); 298 ab->descriptor.res_count = cpu_to_le16(PAGE_SIZE - offset);
305 ab->descriptor.branch_address = 0; 299 ab->descriptor.branch_address = 0;
306 300
307 dma_sync_single_for_device(dev, ab_bus, PAGE_SIZE, DMA_BIDIRECTIONAL);
308
309 ctx->last_buffer->descriptor.branch_address = cpu_to_le32(ab_bus | 1); 301 ctx->last_buffer->descriptor.branch_address = cpu_to_le32(ab_bus | 1);
310 ctx->last_buffer->next = ab; 302 ctx->last_buffer->next = ab;
311 ctx->last_buffer = ab; 303 ctx->last_buffer = ab;
@@ -409,6 +401,7 @@ static void ar_context_tasklet(unsigned long data)
409 401
410 if (d->res_count == 0) { 402 if (d->res_count == 0) {
411 size_t size, rest, offset; 403 size_t size, rest, offset;
404 dma_addr_t buffer_bus;
412 405
413 /* 406 /*
414 * This descriptor is finished and we may have a 407 * This descriptor is finished and we may have a
@@ -417,9 +410,7 @@ static void ar_context_tasklet(unsigned long data)
417 */ 410 */
418 411
419 offset = offsetof(struct ar_buffer, data); 412 offset = offsetof(struct ar_buffer, data);
420 dma_unmap_single(ohci->card.device, 413 buffer_bus = le32_to_cpu(ab->descriptor.data_address) - offset;
421 le32_to_cpu(ab->descriptor.data_address) - offset,
422 PAGE_SIZE, DMA_BIDIRECTIONAL);
423 414
424 buffer = ab; 415 buffer = ab;
425 ab = ab->next; 416 ab = ab->next;
@@ -435,7 +426,8 @@ static void ar_context_tasklet(unsigned long data)
435 while (buffer < end) 426 while (buffer < end)
436 buffer = handle_ar_packet(ctx, buffer); 427 buffer = handle_ar_packet(ctx, buffer);
437 428
438 free_page((unsigned long)buffer); 429 dma_free_coherent(ohci->card.device, PAGE_SIZE,
430 buffer, buffer_bus);
439 ar_context_add_page(ctx); 431 ar_context_add_page(ctx);
440 } else { 432 } else {
441 buffer = ctx->pointer; 433 buffer = ctx->pointer;