diff options
author | Kristian Høgsberg <krh@redhat.com> | 2007-02-06 14:49:30 -0500 |
---|---|---|
committer | Stefan Richter <stefanr@s5r6.in-berlin.de> | 2007-03-09 16:02:50 -0500 |
commit | 32b46093a076986fa3c6e1dd484791624edf4585 (patch) | |
tree | f4d7a89c836549b4ce57f4b55720096f8840cd15 /drivers | |
parent | 641f8791f031d6133e5c3e9ce036b3e942416e9d (diff) |
firewire: Rework async receive DMA.
The old DMA program for receiving async packets stops DMA while
processing received packets and only expects one packet per
interrupt. Stopping DMA can silently drop packets and we need to
handle multiple received packets per interrupt.
This new version keeps DMA running at all times and just append new
pages as buffers fill up, and supports multiple packets per interrupt.
Signed-off-by: Kristian Høgsberg <krh@redhat.com>
Signed-off-by: Stefan Richter <stefanr@s5r6.in-berlin.de>
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/firewire/fw-ohci.c | 212 | ||||
-rw-r--r-- | drivers/firewire/fw-transaction.c | 3 |
2 files changed, 132 insertions, 83 deletions
diff --git a/drivers/firewire/fw-ohci.c b/drivers/firewire/fw-ohci.c index 29285f209dcf..ec47ae9a2dd1 100644 --- a/drivers/firewire/fw-ohci.c +++ b/drivers/firewire/fw-ohci.c | |||
@@ -55,17 +55,20 @@ struct descriptor { | |||
55 | __le16 transfer_status; | 55 | __le16 transfer_status; |
56 | } __attribute__((aligned(16))); | 56 | } __attribute__((aligned(16))); |
57 | 57 | ||
58 | struct ar_context { | 58 | struct ar_buffer { |
59 | struct fw_ohci *ohci; | ||
60 | struct descriptor descriptor; | 59 | struct descriptor descriptor; |
61 | __le32 buffer[512]; | 60 | struct ar_buffer *next; |
62 | dma_addr_t descriptor_bus; | 61 | __le32 data[0]; |
63 | dma_addr_t buffer_bus; | 62 | }; |
64 | 63 | ||
64 | struct ar_context { | ||
65 | struct fw_ohci *ohci; | ||
66 | struct ar_buffer *current_buffer; | ||
67 | struct ar_buffer *last_buffer; | ||
68 | void *pointer; | ||
65 | u32 command_ptr; | 69 | u32 command_ptr; |
66 | u32 control_set; | 70 | u32 control_set; |
67 | u32 control_clear; | 71 | u32 control_clear; |
68 | |||
69 | struct tasklet_struct tasklet; | 72 | struct tasklet_struct tasklet; |
70 | }; | 73 | }; |
71 | 74 | ||
@@ -169,8 +172,7 @@ static inline struct fw_ohci *fw_ohci(struct fw_card *card) | |||
169 | #define OHCI_LOOP_COUNT 500 | 172 | #define OHCI_LOOP_COUNT 500 |
170 | #define OHCI1394_PCI_HCI_Control 0x40 | 173 | #define OHCI1394_PCI_HCI_Control 0x40 |
171 | #define SELF_ID_BUF_SIZE 0x800 | 174 | #define SELF_ID_BUF_SIZE 0x800 |
172 | 175 | #define OHCI_TCODE_PHY_PACKET 0x0e | |
173 | #define MAX_STOP_CONTEXT_LOOPS 1000 | ||
174 | 176 | ||
175 | static char ohci_driver_name[] = KBUILD_MODNAME; | 177 | static char ohci_driver_name[] = KBUILD_MODNAME; |
176 | 178 | ||
@@ -213,66 +215,97 @@ ohci_update_phy_reg(struct fw_card *card, int addr, | |||
213 | return 0; | 215 | return 0; |
214 | } | 216 | } |
215 | 217 | ||
216 | static void ar_context_run(struct ar_context *ctx) | 218 | static int ar_context_add_page(struct ar_context *ctx) |
217 | { | 219 | { |
218 | reg_write(ctx->ohci, ctx->command_ptr, ctx->descriptor_bus | 1); | 220 | struct device *dev = ctx->ohci->card.device; |
219 | reg_write(ctx->ohci, ctx->control_set, CONTEXT_RUN); | 221 | struct ar_buffer *ab; |
222 | dma_addr_t ab_bus; | ||
223 | size_t offset; | ||
224 | |||
225 | ab = (struct ar_buffer *) __get_free_page(GFP_ATOMIC); | ||
226 | if (ab == NULL) | ||
227 | return -ENOMEM; | ||
228 | |||
229 | ab_bus = dma_map_single(dev, ab, PAGE_SIZE, DMA_BIDIRECTIONAL); | ||
230 | if (dma_mapping_error(ab_bus)) { | ||
231 | free_page((unsigned long) ab); | ||
232 | return -ENOMEM; | ||
233 | } | ||
234 | |||
235 | memset(&ab->descriptor, 0, sizeof ab->descriptor); | ||
236 | ab->descriptor.control = cpu_to_le16(descriptor_input_more | | ||
237 | descriptor_status | | ||
238 | descriptor_branch_always); | ||
239 | offset = offsetof(struct ar_buffer, data); | ||
240 | ab->descriptor.req_count = cpu_to_le16(PAGE_SIZE - offset); | ||
241 | ab->descriptor.data_address = cpu_to_le32(ab_bus + offset); | ||
242 | ab->descriptor.res_count = cpu_to_le16(PAGE_SIZE - offset); | ||
243 | ab->descriptor.branch_address = 0; | ||
244 | |||
245 | dma_sync_single_for_device(dev, ab_bus, PAGE_SIZE, DMA_BIDIRECTIONAL); | ||
246 | |||
247 | ctx->last_buffer->descriptor.branch_address = ab_bus | 1; | ||
248 | ctx->last_buffer->next = ab; | ||
249 | ctx->last_buffer = ab; | ||
250 | |||
251 | reg_write(ctx->ohci, ctx->control_set, CONTEXT_WAKE); | ||
220 | flush_writes(ctx->ohci); | 252 | flush_writes(ctx->ohci); |
253 | |||
254 | return 0; | ||
221 | } | 255 | } |
222 | 256 | ||
223 | static void ar_context_tasklet(unsigned long data) | 257 | static __le32 *handle_ar_packet(struct ar_context *ctx, __le32 *buffer) |
224 | { | 258 | { |
225 | struct ar_context *ctx = (struct ar_context *)data; | ||
226 | struct fw_ohci *ohci = ctx->ohci; | 259 | struct fw_ohci *ohci = ctx->ohci; |
227 | struct fw_packet p; | 260 | struct fw_packet p; |
228 | u32 status, length, tcode; | 261 | u32 status, length, tcode; |
229 | int i; | ||
230 | 262 | ||
231 | /* FIXME: We stop and restart the ar context here, what if we | 263 | p.header[0] = le32_to_cpu(buffer[0]); |
232 | * stop while a receive is in progress? Maybe we could just | 264 | p.header[1] = le32_to_cpu(buffer[1]); |
233 | * loop the context back to itself and use it in buffer fill | 265 | p.header[2] = le32_to_cpu(buffer[2]); |
234 | * mode as intended... */ | ||
235 | reg_write(ctx->ohci, ctx->control_clear, CONTEXT_RUN); | ||
236 | |||
237 | /* FIXME: What to do about evt_* errors? */ | ||
238 | length = le16_to_cpu(ctx->descriptor.req_count) - | ||
239 | le16_to_cpu(ctx->descriptor.res_count) - 4; | ||
240 | status = le32_to_cpu(ctx->buffer[length / 4]); | ||
241 | |||
242 | p.ack = ((status >> 16) & 0x1f) - 16; | ||
243 | p.speed = (status >> 21) & 0x7; | ||
244 | p.timestamp = status & 0xffff; | ||
245 | p.generation = ohci->request_generation; | ||
246 | |||
247 | p.header[0] = le32_to_cpu(ctx->buffer[0]); | ||
248 | p.header[1] = le32_to_cpu(ctx->buffer[1]); | ||
249 | p.header[2] = le32_to_cpu(ctx->buffer[2]); | ||
250 | 266 | ||
251 | tcode = (p.header[0] >> 4) & 0x0f; | 267 | tcode = (p.header[0] >> 4) & 0x0f; |
252 | switch (tcode) { | 268 | switch (tcode) { |
253 | case TCODE_WRITE_QUADLET_REQUEST: | 269 | case TCODE_WRITE_QUADLET_REQUEST: |
254 | case TCODE_READ_QUADLET_RESPONSE: | 270 | case TCODE_READ_QUADLET_RESPONSE: |
255 | p.header[3] = ctx->buffer[3]; | 271 | p.header[3] = (__force __u32) buffer[3]; |
256 | p.header_length = 16; | 272 | p.header_length = 16; |
273 | p.payload_length = 0; | ||
257 | break; | 274 | break; |
258 | 275 | ||
259 | case TCODE_WRITE_BLOCK_REQUEST: | ||
260 | case TCODE_READ_BLOCK_REQUEST : | 276 | case TCODE_READ_BLOCK_REQUEST : |
277 | p.header[3] = le32_to_cpu(buffer[3]); | ||
278 | p.header_length = 16; | ||
279 | p.payload_length = 0; | ||
280 | break; | ||
281 | |||
282 | case TCODE_WRITE_BLOCK_REQUEST: | ||
261 | case TCODE_READ_BLOCK_RESPONSE: | 283 | case TCODE_READ_BLOCK_RESPONSE: |
262 | case TCODE_LOCK_REQUEST: | 284 | case TCODE_LOCK_REQUEST: |
263 | case TCODE_LOCK_RESPONSE: | 285 | case TCODE_LOCK_RESPONSE: |
264 | p.header[3] = le32_to_cpu(ctx->buffer[3]); | 286 | p.header[3] = le32_to_cpu(buffer[3]); |
265 | p.header_length = 16; | 287 | p.header_length = 16; |
288 | p.payload_length = p.header[3] >> 16; | ||
266 | break; | 289 | break; |
267 | 290 | ||
268 | case TCODE_WRITE_RESPONSE: | 291 | case TCODE_WRITE_RESPONSE: |
269 | case TCODE_READ_QUADLET_REQUEST: | 292 | case TCODE_READ_QUADLET_REQUEST: |
293 | case OHCI_TCODE_PHY_PACKET: | ||
270 | p.header_length = 12; | 294 | p.header_length = 12; |
295 | p.payload_length = 0; | ||
271 | break; | 296 | break; |
272 | } | 297 | } |
273 | 298 | ||
274 | p.payload = (void *) ctx->buffer + p.header_length; | 299 | p.payload = (void *) buffer + p.header_length; |
275 | p.payload_length = length - p.header_length; | 300 | |
301 | /* FIXME: What to do about evt_* errors? */ | ||
302 | length = (p.header_length + p.payload_length + 3) / 4; | ||
303 | status = le32_to_cpu(buffer[length]); | ||
304 | |||
305 | p.ack = ((status >> 16) & 0x1f) - 16; | ||
306 | p.speed = (status >> 21) & 0x7; | ||
307 | p.timestamp = status & 0xffff; | ||
308 | p.generation = ohci->request_generation; | ||
276 | 309 | ||
277 | /* The OHCI bus reset handler synthesizes a phy packet with | 310 | /* The OHCI bus reset handler synthesizes a phy packet with |
278 | * the new generation number when a bus reset happens (see | 311 | * the new generation number when a bus reset happens (see |
@@ -283,69 +316,84 @@ static void ar_context_tasklet(unsigned long data) | |||
283 | * request. */ | 316 | * request. */ |
284 | 317 | ||
285 | if (p.ack + 16 == 0x09) | 318 | if (p.ack + 16 == 0x09) |
286 | ohci->request_generation = (ctx->buffer[2] >> 16) & 0xff; | 319 | ohci->request_generation = (buffer[2] >> 16) & 0xff; |
287 | else if (ctx == &ohci->ar_request_ctx) | 320 | else if (ctx == &ohci->ar_request_ctx) |
288 | fw_core_handle_request(&ohci->card, &p); | 321 | fw_core_handle_request(&ohci->card, &p); |
289 | else | 322 | else |
290 | fw_core_handle_response(&ohci->card, &p); | 323 | fw_core_handle_response(&ohci->card, &p); |
291 | 324 | ||
292 | ctx->descriptor.data_address = cpu_to_le32(ctx->buffer_bus); | 325 | return buffer + length + 1; |
293 | ctx->descriptor.req_count = cpu_to_le16(sizeof ctx->buffer); | 326 | } |
294 | ctx->descriptor.res_count = cpu_to_le16(sizeof ctx->buffer); | ||
295 | |||
296 | dma_sync_single_for_device(ohci->card.device, ctx->descriptor_bus, | ||
297 | sizeof ctx->descriptor_bus, DMA_TO_DEVICE); | ||
298 | 327 | ||
299 | /* Make sure the active bit is 0 before we reprogram the DMA. */ | 328 | static void ar_context_tasklet(unsigned long data) |
300 | for (i = 0; i < MAX_STOP_CONTEXT_LOOPS; i++) | 329 | { |
301 | if (!(reg_read(ctx->ohci, | 330 | struct ar_context *ctx = (struct ar_context *)data; |
302 | ctx->control_clear) & CONTEXT_ACTIVE)) | 331 | struct fw_ohci *ohci = ctx->ohci; |
303 | break; | 332 | struct ar_buffer *ab; |
304 | if (i == MAX_STOP_CONTEXT_LOOPS) | 333 | struct descriptor *d; |
305 | fw_error("Failed to stop ar context\n"); | 334 | void *buffer, *end; |
335 | |||
336 | ab = ctx->current_buffer; | ||
337 | d = &ab->descriptor; | ||
338 | |||
339 | if (d->res_count == 0) { | ||
340 | size_t size, rest, offset; | ||
341 | |||
342 | /* This descriptor is finished and we may have a | ||
343 | * packet split across this and the next buffer. We | ||
344 | * reuse the page for reassembling the split packet. */ | ||
345 | |||
346 | offset = offsetof(struct ar_buffer, data); | ||
347 | dma_unmap_single(ohci->card.device, | ||
348 | ab->descriptor.data_address - offset, | ||
349 | PAGE_SIZE, DMA_BIDIRECTIONAL); | ||
350 | |||
351 | buffer = ab; | ||
352 | ab = ab->next; | ||
353 | d = &ab->descriptor; | ||
354 | size = buffer + PAGE_SIZE - ctx->pointer; | ||
355 | rest = le16_to_cpu(d->req_count) - le16_to_cpu(d->res_count); | ||
356 | memmove(buffer, ctx->pointer, size); | ||
357 | memcpy(buffer + size, ab->data, rest); | ||
358 | ctx->current_buffer = ab; | ||
359 | ctx->pointer = (void *) ab->data + rest; | ||
360 | end = buffer + size + rest; | ||
361 | |||
362 | while (buffer < end) | ||
363 | buffer = handle_ar_packet(ctx, buffer); | ||
364 | |||
365 | free_page((unsigned long)buffer); | ||
366 | ar_context_add_page(ctx); | ||
367 | } else { | ||
368 | buffer = ctx->pointer; | ||
369 | ctx->pointer = end = | ||
370 | (void *) ab + PAGE_SIZE - le16_to_cpu(d->res_count); | ||
306 | 371 | ||
307 | ar_context_run(ctx); | 372 | while (buffer < end) |
373 | buffer = handle_ar_packet(ctx, buffer); | ||
374 | } | ||
308 | } | 375 | } |
309 | 376 | ||
310 | static int | 377 | static int |
311 | ar_context_init(struct ar_context *ctx, struct fw_ohci *ohci, u32 control_set) | 378 | ar_context_init(struct ar_context *ctx, struct fw_ohci *ohci, u32 control_set) |
312 | { | 379 | { |
313 | ctx->descriptor_bus = | 380 | struct ar_buffer ab; |
314 | dma_map_single(ohci->card.device, &ctx->descriptor, | ||
315 | sizeof ctx->descriptor, DMA_TO_DEVICE); | ||
316 | if (ctx->descriptor_bus == 0) | ||
317 | return -ENOMEM; | ||
318 | |||
319 | if (ctx->descriptor_bus & 0xf) | ||
320 | fw_notify("descriptor not 16-byte aligned: 0x%08lx\n", | ||
321 | (unsigned long)ctx->descriptor_bus); | ||
322 | |||
323 | ctx->buffer_bus = | ||
324 | dma_map_single(ohci->card.device, ctx->buffer, | ||
325 | sizeof ctx->buffer, DMA_FROM_DEVICE); | ||
326 | |||
327 | if (ctx->buffer_bus == 0) { | ||
328 | dma_unmap_single(ohci->card.device, ctx->descriptor_bus, | ||
329 | sizeof ctx->descriptor, DMA_TO_DEVICE); | ||
330 | return -ENOMEM; | ||
331 | } | ||
332 | |||
333 | memset(&ctx->descriptor, 0, sizeof ctx->descriptor); | ||
334 | ctx->descriptor.control = cpu_to_le16(descriptor_input_more | | ||
335 | descriptor_status | | ||
336 | descriptor_branch_always); | ||
337 | ctx->descriptor.req_count = cpu_to_le16(sizeof ctx->buffer); | ||
338 | ctx->descriptor.data_address = cpu_to_le32(ctx->buffer_bus); | ||
339 | ctx->descriptor.res_count = cpu_to_le16(sizeof ctx->buffer); | ||
340 | 381 | ||
341 | ctx->control_set = control_set; | 382 | ctx->control_set = control_set; |
342 | ctx->control_clear = control_set + 4; | 383 | ctx->control_clear = control_set + 4; |
343 | ctx->command_ptr = control_set + 12; | 384 | ctx->command_ptr = control_set + 12; |
344 | ctx->ohci = ohci; | 385 | ctx->ohci = ohci; |
345 | 386 | ctx->last_buffer = &ab; | |
346 | tasklet_init(&ctx->tasklet, ar_context_tasklet, (unsigned long)ctx); | 387 | tasklet_init(&ctx->tasklet, ar_context_tasklet, (unsigned long)ctx); |
347 | 388 | ||
348 | ar_context_run(ctx); | 389 | ar_context_add_page(ctx); |
390 | ar_context_add_page(ctx); | ||
391 | ctx->current_buffer = ab.next; | ||
392 | ctx->pointer = ctx->current_buffer->data; | ||
393 | |||
394 | reg_write(ctx->ohci, ctx->command_ptr, ab.descriptor.branch_address); | ||
395 | reg_write(ctx->ohci, ctx->control_set, CONTEXT_RUN); | ||
396 | flush_writes(ctx->ohci); | ||
349 | 397 | ||
350 | return 0; | 398 | return 0; |
351 | } | 399 | } |
diff --git a/drivers/firewire/fw-transaction.c b/drivers/firewire/fw-transaction.c index 4a48e2d7694e..fb3b77e1bb2d 100644 --- a/drivers/firewire/fw-transaction.c +++ b/drivers/firewire/fw-transaction.c | |||
@@ -640,7 +640,8 @@ fw_core_handle_response(struct fw_card *card, struct fw_packet *p) | |||
640 | spin_unlock_irqrestore(&card->lock, flags); | 640 | spin_unlock_irqrestore(&card->lock, flags); |
641 | 641 | ||
642 | if (&t->link == &card->transaction_list) { | 642 | if (&t->link == &card->transaction_list) { |
643 | fw_notify("Unsolicited response\n"); | 643 | fw_notify("Unsolicited response (source %x, tlabel %x)\n", |
644 | source, tlabel); | ||
644 | return; | 645 | return; |
645 | } | 646 | } |
646 | 647 | ||