aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/firewire/ohci.c
diff options
context:
space:
mode:
authorClemens Ladisch <clemens@ladisch.de>2011-10-15 17:12:23 -0400
committerStefan Richter <stefanr@s5r6.in-berlin.de>2011-10-18 06:32:39 -0400
commita572e688cf5d99d2382016c7241ec37b523b0137 (patch)
treee72d4f75ede570b3d9d4898ed8d3a09a95ad6ae4 /drivers/firewire/ohci.c
parent32eaeae177bf77fbc224c35262add45bd5e6abb3 (diff)
firewire: ohci: fix isochronous DMA synchronization
Add the dma_sync_single_* calls necessary to ensure proper cache synchronization for isochronous data buffers on non-coherent architectures. Signed-off-by: Clemens Ladisch <clemens@ladisch.de> Signed-off-by: Stefan Richter <stefanr@s5r6.in-berlin.de>
Diffstat (limited to 'drivers/firewire/ohci.c')
-rw-r--r--drivers/firewire/ohci.c73
1 files changed, 73 insertions, 0 deletions
diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c
index b6977149394e..6628feaa7622 100644
--- a/drivers/firewire/ohci.c
+++ b/drivers/firewire/ohci.c
@@ -126,6 +126,7 @@ struct context {
126 struct fw_ohci *ohci; 126 struct fw_ohci *ohci;
127 u32 regs; 127 u32 regs;
128 int total_allocation; 128 int total_allocation;
129 u32 current_bus;
129 bool running; 130 bool running;
130 bool flushing; 131 bool flushing;
131 132
@@ -1057,6 +1058,7 @@ static void context_tasklet(unsigned long data)
1057 address = le32_to_cpu(last->branch_address); 1058 address = le32_to_cpu(last->branch_address);
1058 z = address & 0xf; 1059 z = address & 0xf;
1059 address &= ~0xf; 1060 address &= ~0xf;
1061 ctx->current_bus = address;
1060 1062
1061 /* If the branch address points to a buffer outside of the 1063 /* If the branch address points to a buffer outside of the
1062 * current buffer, advance to the next buffer. */ 1064 * current buffer, advance to the next buffer. */
@@ -2697,6 +2699,7 @@ static int handle_ir_packet_per_buffer(struct context *context,
2697 struct iso_context *ctx = 2699 struct iso_context *ctx =
2698 container_of(context, struct iso_context, context); 2700 container_of(context, struct iso_context, context);
2699 struct descriptor *pd; 2701 struct descriptor *pd;
2702 u32 buffer_dma;
2700 __le32 *ir_header; 2703 __le32 *ir_header;
2701 void *p; 2704 void *p;
2702 2705
@@ -2707,6 +2710,16 @@ static int handle_ir_packet_per_buffer(struct context *context,
2707 /* Descriptor(s) not done yet, stop iteration */ 2710 /* Descriptor(s) not done yet, stop iteration */
2708 return 0; 2711 return 0;
2709 2712
2713 while (!(d->control & cpu_to_le16(DESCRIPTOR_BRANCH_ALWAYS))) {
2714 d++;
2715 buffer_dma = le32_to_cpu(d->data_address);
2716 dma_sync_single_range_for_cpu(context->ohci->card.device,
2717 buffer_dma & PAGE_MASK,
2718 buffer_dma & ~PAGE_MASK,
2719 le16_to_cpu(d->req_count),
2720 DMA_FROM_DEVICE);
2721 }
2722
2710 p = last + 1; 2723 p = last + 1;
2711 copy_iso_headers(ctx, p); 2724 copy_iso_headers(ctx, p);
2712 2725
@@ -2729,11 +2742,19 @@ static int handle_ir_buffer_fill(struct context *context,
2729{ 2742{
2730 struct iso_context *ctx = 2743 struct iso_context *ctx =
2731 container_of(context, struct iso_context, context); 2744 container_of(context, struct iso_context, context);
2745 u32 buffer_dma;
2732 2746
2733 if (!last->transfer_status) 2747 if (!last->transfer_status)
2734 /* Descriptor(s) not done yet, stop iteration */ 2748 /* Descriptor(s) not done yet, stop iteration */
2735 return 0; 2749 return 0;
2736 2750
2751 buffer_dma = le32_to_cpu(last->data_address);
2752 dma_sync_single_range_for_cpu(context->ohci->card.device,
2753 buffer_dma & PAGE_MASK,
2754 buffer_dma & ~PAGE_MASK,
2755 le16_to_cpu(last->req_count),
2756 DMA_FROM_DEVICE);
2757
2737 if (le16_to_cpu(last->control) & DESCRIPTOR_IRQ_ALWAYS) 2758 if (le16_to_cpu(last->control) & DESCRIPTOR_IRQ_ALWAYS)
2738 ctx->base.callback.mc(&ctx->base, 2759 ctx->base.callback.mc(&ctx->base,
2739 le32_to_cpu(last->data_address) + 2760 le32_to_cpu(last->data_address) +
@@ -2744,6 +2765,43 @@ static int handle_ir_buffer_fill(struct context *context,
2744 return 1; 2765 return 1;
2745} 2766}
2746 2767
2768static inline void sync_it_packet_for_cpu(struct context *context,
2769 struct descriptor *pd)
2770{
2771 __le16 control;
2772 u32 buffer_dma;
2773
2774 /* only packets beginning with OUTPUT_MORE* have data buffers */
2775 if (pd->control & cpu_to_le16(DESCRIPTOR_BRANCH_ALWAYS))
2776 return;
2777
2778 /* skip over the OUTPUT_MORE_IMMEDIATE descriptor */
2779 pd += 2;
2780
2781 /*
2782 * If the packet has a header, the first OUTPUT_MORE/LAST descriptor's
2783 * data buffer is in the context program's coherent page and must not
2784 * be synced.
2785 */
2786 if ((le32_to_cpu(pd->data_address) & PAGE_MASK) ==
2787 (context->current_bus & PAGE_MASK)) {
2788 if (pd->control & cpu_to_le16(DESCRIPTOR_BRANCH_ALWAYS))
2789 return;
2790 pd++;
2791 }
2792
2793 do {
2794 buffer_dma = le32_to_cpu(pd->data_address);
2795 dma_sync_single_range_for_cpu(context->ohci->card.device,
2796 buffer_dma & PAGE_MASK,
2797 buffer_dma & ~PAGE_MASK,
2798 le16_to_cpu(pd->req_count),
2799 DMA_TO_DEVICE);
2800 control = pd->control;
2801 pd++;
2802 } while (!(control & cpu_to_le16(DESCRIPTOR_BRANCH_ALWAYS)));
2803}
2804
2747static int handle_it_packet(struct context *context, 2805static int handle_it_packet(struct context *context,
2748 struct descriptor *d, 2806 struct descriptor *d,
2749 struct descriptor *last) 2807 struct descriptor *last)
@@ -2760,6 +2818,8 @@ static int handle_it_packet(struct context *context,
2760 /* Descriptor(s) not done yet, stop iteration */ 2818 /* Descriptor(s) not done yet, stop iteration */
2761 return 0; 2819 return 0;
2762 2820
2821 sync_it_packet_for_cpu(context, d);
2822
2763 i = ctx->header_length; 2823 i = ctx->header_length;
2764 if (i + 4 < PAGE_SIZE) { 2824 if (i + 4 < PAGE_SIZE) {
2765 /* Present this value as big-endian to match the receive code */ 2825 /* Present this value as big-endian to match the receive code */
@@ -3129,6 +3189,10 @@ static int queue_iso_transmit(struct iso_context *ctx,
3129 page_bus = page_private(buffer->pages[page]); 3189 page_bus = page_private(buffer->pages[page]);
3130 pd[i].data_address = cpu_to_le32(page_bus + offset); 3190 pd[i].data_address = cpu_to_le32(page_bus + offset);
3131 3191
3192 dma_sync_single_range_for_device(ctx->context.ohci->card.device,
3193 page_bus, offset, length,
3194 DMA_TO_DEVICE);
3195
3132 payload_index += length; 3196 payload_index += length;
3133 } 3197 }
3134 3198
@@ -3153,6 +3217,7 @@ static int queue_iso_packet_per_buffer(struct iso_context *ctx,
3153 struct fw_iso_buffer *buffer, 3217 struct fw_iso_buffer *buffer,
3154 unsigned long payload) 3218 unsigned long payload)
3155{ 3219{
3220 struct device *device = ctx->context.ohci->card.device;
3156 struct descriptor *d, *pd; 3221 struct descriptor *d, *pd;
3157 dma_addr_t d_bus, page_bus; 3222 dma_addr_t d_bus, page_bus;
3158 u32 z, header_z, rest; 3223 u32 z, header_z, rest;
@@ -3207,6 +3272,10 @@ static int queue_iso_packet_per_buffer(struct iso_context *ctx,
3207 page_bus = page_private(buffer->pages[page]); 3272 page_bus = page_private(buffer->pages[page]);
3208 pd->data_address = cpu_to_le32(page_bus + offset); 3273 pd->data_address = cpu_to_le32(page_bus + offset);
3209 3274
3275 dma_sync_single_range_for_device(device, page_bus,
3276 offset, length,
3277 DMA_FROM_DEVICE);
3278
3210 offset = (offset + length) & ~PAGE_MASK; 3279 offset = (offset + length) & ~PAGE_MASK;
3211 rest -= length; 3280 rest -= length;
3212 if (offset == 0) 3281 if (offset == 0)
@@ -3266,6 +3335,10 @@ static int queue_iso_buffer_fill(struct iso_context *ctx,
3266 page_bus = page_private(buffer->pages[page]); 3335 page_bus = page_private(buffer->pages[page]);
3267 d->data_address = cpu_to_le32(page_bus + offset); 3336 d->data_address = cpu_to_le32(page_bus + offset);
3268 3337
3338 dma_sync_single_range_for_device(ctx->context.ohci->card.device,
3339 page_bus, offset, length,
3340 DMA_FROM_DEVICE);
3341
3269 rest -= length; 3342 rest -= length;
3270 offset = 0; 3343 offset = 0;
3271 page++; 3344 page++;