aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/firewire/fw-ohci.c360
1 files changed, 218 insertions, 142 deletions
diff --git a/drivers/firewire/fw-ohci.c b/drivers/firewire/fw-ohci.c
index 8499d70bf9ee..86fe55cff28a 100644
--- a/drivers/firewire/fw-ohci.c
+++ b/drivers/firewire/fw-ohci.c
@@ -75,6 +75,30 @@ struct ar_context {
75 struct tasklet_struct tasklet; 75 struct tasklet_struct tasklet;
76}; 76};
77 77
78struct context;
79
80typedef int (*descriptor_callback_t)(struct context *ctx,
81 struct descriptor *d,
82 struct descriptor *last);
83struct context {
84 struct fw_ohci *ohci;
85 u32 regs;
86
87 struct descriptor *buffer;
88 dma_addr_t buffer_bus;
89 size_t buffer_size;
90 struct descriptor *head_descriptor;
91 struct descriptor *tail_descriptor;
92 struct descriptor *tail_descriptor_last;
93 struct descriptor *prev_descriptor;
94
95 descriptor_callback_t callback;
96
97 struct tasklet_struct tasklet;
98};
99
100
101
78struct at_context { 102struct at_context {
79 struct fw_ohci *ohci; 103 struct fw_ohci *ohci;
80 dma_addr_t descriptor_bus; 104 dma_addr_t descriptor_bus;
@@ -103,15 +127,7 @@ struct at_context {
103 127
104struct iso_context { 128struct iso_context {
105 struct fw_iso_context base; 129 struct fw_iso_context base;
106 struct tasklet_struct tasklet; 130 struct context context;
107 u32 regs;
108
109 struct descriptor *buffer;
110 dma_addr_t buffer_bus;
111 struct descriptor *head_descriptor;
112 struct descriptor *tail_descriptor;
113 struct descriptor *tail_descriptor_last;
114 struct descriptor *prev_descriptor;
115}; 131};
116 132
117#define CONFIG_ROM_SIZE 1024 133#define CONFIG_ROM_SIZE 1024
@@ -394,6 +410,154 @@ ar_context_init(struct ar_context *ctx, struct fw_ohci *ohci, u32 regs)
394 410
395 return 0; 411 return 0;
396} 412}
413
414static void context_tasklet(unsigned long data)
415{
416 struct context *ctx = (struct context *) data;
417 struct fw_ohci *ohci = ctx->ohci;
418 struct descriptor *d, *last;
419 u32 address;
420 int z;
421
422 dma_sync_single_for_cpu(ohci->card.device, ctx->buffer_bus,
423 ctx->buffer_size, DMA_TO_DEVICE);
424
425 d = ctx->tail_descriptor;
426 last = ctx->tail_descriptor_last;
427
428 while (last->branch_address != 0) {
429 address = le32_to_cpu(last->branch_address);
430 z = address & 0xf;
431 d = ctx->buffer + (address - ctx->buffer_bus) / sizeof *d;
432 last = (z == 2) ? d : d + z - 1;
433
434 if (!ctx->callback(ctx, d, last))
435 break;
436
437 ctx->tail_descriptor = d;
438 ctx->tail_descriptor_last = last;
439 }
440}
441
442static int
443context_init(struct context *ctx, struct fw_ohci *ohci,
444 size_t buffer_size, u32 regs,
445 descriptor_callback_t callback)
446{
447 ctx->ohci = ohci;
448 ctx->regs = regs;
449 ctx->buffer_size = buffer_size;
450 ctx->buffer = kmalloc(buffer_size, GFP_KERNEL);
451 if (ctx->buffer == NULL)
452 return -ENOMEM;
453
454 tasklet_init(&ctx->tasklet, context_tasklet, (unsigned long)ctx);
455 ctx->callback = callback;
456
457 ctx->buffer_bus =
458 dma_map_single(ohci->card.device, ctx->buffer,
459 buffer_size, DMA_TO_DEVICE);
460 if (dma_mapping_error(ctx->buffer_bus)) {
461 kfree(ctx->buffer);
462 return -ENOMEM;
463 }
464
465 ctx->head_descriptor = ctx->buffer;
466 ctx->prev_descriptor = ctx->buffer;
467 ctx->tail_descriptor = ctx->buffer;
468 ctx->tail_descriptor_last = ctx->buffer;
469
470 /* We put a dummy descriptor in the buffer that has a NULL
471 * branch address and looks like it's been sent. That way we
472 * have a descriptor to append DMA programs to. Also, the
473 * ring buffer invariant is that it always has at least one
474 * element so that head == tail means buffer full. */
475
476 memset(ctx->head_descriptor, 0, sizeof *ctx->head_descriptor);
477 ctx->head_descriptor->control = cpu_to_le16(descriptor_output_last);
478 ctx->head_descriptor->transfer_status = cpu_to_le16(0x8011);
479 ctx->head_descriptor++;
480
481 return 0;
482}
483
484 static void
485context_release(struct context *ctx)
486{
487 struct fw_card *card = &ctx->ohci->card;
488
489 dma_unmap_single(card->device, ctx->buffer_bus,
490 ctx->buffer_size, DMA_TO_DEVICE);
491 kfree(ctx->buffer);
492}
493
494static struct descriptor *
495context_get_descriptors(struct context *ctx, int z, dma_addr_t *d_bus)
496{
497 struct descriptor *d, *tail, *end;
498
499 d = ctx->head_descriptor;
500 tail = ctx->tail_descriptor;
501 end = ctx->buffer + ctx->buffer_size / sizeof(struct descriptor);
502
503 if (d + z <= tail) {
504 goto has_space;
505 } else if (d > tail && d + z <= end) {
506 goto has_space;
507 } else if (d > tail && ctx->buffer + z <= tail) {
508 d = ctx->buffer;
509 goto has_space;
510 }
511
512 return NULL;
513
514 has_space:
515 memset(d, 0, z * sizeof *d);
516 *d_bus = ctx->buffer_bus + (d - ctx->buffer) * sizeof *d;
517
518 return d;
519}
520
521static void context_run(struct context *ctx, u32 cycle_match)
522{
523 struct fw_ohci *ohci = ctx->ohci;
524
525 reg_write(ohci, command_ptr(ctx->regs),
526 le32_to_cpu(ctx->tail_descriptor_last->branch_address));
527 reg_write(ohci, control_clear(ctx->regs), ~0);
528 reg_write(ohci, control_set(ctx->regs), CONTEXT_RUN | cycle_match);
529 flush_writes(ohci);
530}
531
532static void context_append(struct context *ctx,
533 struct descriptor *d, int z, int extra)
534{
535 dma_addr_t d_bus;
536
537 d_bus = ctx->buffer_bus + (d - ctx->buffer) * sizeof *d;
538
539 ctx->head_descriptor = d + z + extra;
540 ctx->prev_descriptor->branch_address = cpu_to_le32(d_bus | z);
541 ctx->prev_descriptor = z == 2 ? d : d + z - 1;
542
543 dma_sync_single_for_device(ctx->ohci->card.device, ctx->buffer_bus,
544 ctx->buffer_size, DMA_TO_DEVICE);
545
546 reg_write(ctx->ohci, control_set(ctx->regs), CONTEXT_WAKE);
547 flush_writes(ctx->ohci);
548}
549
550static void context_stop(struct context *ctx)
551{
552 u32 reg;
553
554 reg_write(ctx->ohci, control_clear(ctx->regs), CONTEXT_RUN);
555
556 reg = reg_read(ctx->ohci, control_set(ctx->regs));
557 if (reg & CONTEXT_ACTIVE)
558 fw_notify("Tried to stop context, but it is still active "
559 "(0x%08x).\n", reg);
560}
397 561
398static void 562static void
399do_packet_callbacks(struct fw_ohci *ohci, struct list_head *list) 563do_packet_callbacks(struct fw_ohci *ohci, struct list_head *list)
@@ -852,7 +1016,7 @@ static irqreturn_t irq_handler(int irq, void *data)
852 1016
853 while (iso_event) { 1017 while (iso_event) {
854 i = ffs(iso_event) - 1; 1018 i = ffs(iso_event) - 1;
855 tasklet_schedule(&ohci->ir_context_list[i].tasklet); 1019 tasklet_schedule(&ohci->ir_context_list[i].context.tasklet);
856 iso_event &= ~(1 << i); 1020 iso_event &= ~(1 << i);
857 } 1021 }
858 1022
@@ -861,7 +1025,7 @@ static irqreturn_t irq_handler(int irq, void *data)
861 1025
862 while (iso_event) { 1026 while (iso_event) {
863 i = ffs(iso_event) - 1; 1027 i = ffs(iso_event) - 1;
864 tasklet_schedule(&ohci->it_context_list[i].tasklet); 1028 tasklet_schedule(&ohci->it_context_list[i].context.tasklet);
865 iso_event &= ~(1 << i); 1029 iso_event &= ~(1 << i);
866 } 1030 }
867 1031
@@ -1085,64 +1249,41 @@ static void ir_context_tasklet(unsigned long data)
1085 1249
1086#define ISO_BUFFER_SIZE (64 * 1024) 1250#define ISO_BUFFER_SIZE (64 * 1024)
1087 1251
1088static void flush_iso_context(struct iso_context *ctx) 1252static int handle_it_packet(struct context *context,
1089{ 1253 struct descriptor *d,
1090 struct fw_ohci *ohci = fw_ohci(ctx->base.card); 1254 struct descriptor *last)
1091 struct descriptor *d, *last;
1092 u32 address;
1093 int z;
1094
1095 dma_sync_single_for_cpu(ohci->card.device, ctx->buffer_bus,
1096 ISO_BUFFER_SIZE, DMA_TO_DEVICE);
1097
1098 d = ctx->tail_descriptor;
1099 last = ctx->tail_descriptor_last;
1100
1101 while (last->branch_address != 0 && last->transfer_status != 0) {
1102 address = le32_to_cpu(last->branch_address);
1103 z = address & 0xf;
1104 d = ctx->buffer + (address - ctx->buffer_bus) / sizeof *d;
1105
1106 if (z == 2)
1107 last = d;
1108 else
1109 last = d + z - 1;
1110
1111 if (le16_to_cpu(last->control) & descriptor_irq_always)
1112 ctx->base.callback(&ctx->base,
1113 0, le16_to_cpu(last->res_count),
1114 ctx->base.callback_data);
1115 }
1116
1117 ctx->tail_descriptor = d;
1118 ctx->tail_descriptor_last = last;
1119}
1120
1121static void it_context_tasklet(unsigned long data)
1122{ 1255{
1123 struct iso_context *ctx = (struct iso_context *)data; 1256 struct iso_context *ctx =
1124 1257 container_of(context, struct iso_context, context);
1125 flush_iso_context(ctx); 1258
1259 if (last->transfer_status == 0)
1260 /* This descriptor isn't done yet, stop iteration. */
1261 return 0;
1262
1263 if (le16_to_cpu(last->control) & descriptor_irq_always)
1264 ctx->base.callback(&ctx->base,
1265 0, le16_to_cpu(last->res_count),
1266 ctx->base.callback_data);
1267
1268 return 1;
1126} 1269}
1127 1270
1128static struct fw_iso_context *ohci_allocate_iso_context(struct fw_card *card, 1271static struct fw_iso_context *
1129 int type) 1272ohci_allocate_iso_context(struct fw_card *card, int type)
1130{ 1273{
1131 struct fw_ohci *ohci = fw_ohci(card); 1274 struct fw_ohci *ohci = fw_ohci(card);
1132 struct iso_context *ctx, *list; 1275 struct iso_context *ctx, *list;
1133 void (*tasklet) (unsigned long data); 1276 descriptor_callback_t callback;
1134 u32 *mask; 1277 u32 *mask;
1135 unsigned long flags; 1278 unsigned long flags;
1136 int index; 1279 int index, retval;
1137 1280
1138 if (type == FW_ISO_CONTEXT_TRANSMIT) { 1281 if (type == FW_ISO_CONTEXT_TRANSMIT) {
1139 mask = &ohci->it_context_mask; 1282 mask = &ohci->it_context_mask;
1140 list = ohci->it_context_list; 1283 list = ohci->it_context_list;
1141 tasklet = it_context_tasklet; 1284 callback = handle_it_packet;
1142 } else { 1285 } else {
1143 mask = &ohci->ir_context_mask; 1286 return ERR_PTR(-EINVAL);
1144 list = ohci->ir_context_list;
1145 tasklet = ir_context_tasklet;
1146 } 1287 }
1147 1288
1148 spin_lock_irqsave(&ohci->lock, flags); 1289 spin_lock_irqsave(&ohci->lock, flags);
@@ -1156,50 +1297,22 @@ static struct fw_iso_context *ohci_allocate_iso_context(struct fw_card *card,
1156 1297
1157 ctx = &list[index]; 1298 ctx = &list[index];
1158 memset(ctx, 0, sizeof *ctx); 1299 memset(ctx, 0, sizeof *ctx);
1159 tasklet_init(&ctx->tasklet, tasklet, (unsigned long)ctx); 1300 retval = context_init(&ctx->context, ohci, ISO_BUFFER_SIZE,
1160 1301 OHCI1394_IsoXmitContextBase(index), callback);
1161 ctx->buffer = kmalloc(ISO_BUFFER_SIZE, GFP_KERNEL); 1302 if (retval < 0) {
1162 if (ctx->buffer == NULL) 1303 spin_lock_irqsave(&ohci->lock, flags);
1163 goto buffer_alloc_failed; 1304 *mask |= 1 << index;
1164 1305 spin_unlock_irqrestore(&ohci->lock, flags);
1165 ctx->buffer_bus = 1306 return ERR_PTR(retval);
1166 dma_map_single(card->device, ctx->buffer, 1307 }
1167 ISO_BUFFER_SIZE, DMA_TO_DEVICE);
1168 if (dma_mapping_error(ctx->buffer_bus))
1169 goto buffer_map_failed;
1170
1171 ctx->head_descriptor = ctx->buffer;
1172 ctx->prev_descriptor = ctx->buffer;
1173 ctx->tail_descriptor = ctx->buffer;
1174 ctx->tail_descriptor_last = ctx->buffer;
1175
1176 /* We put a dummy descriptor in the buffer that has a NULL
1177 * branch address and looks like it's been sent. That way we
1178 * have a descriptor to append DMA programs to. Also, the
1179 * ring buffer invariant is that it always has at least one
1180 * element so that head == tail means buffer full. */
1181
1182 memset(ctx->head_descriptor, 0, sizeof *ctx->head_descriptor);
1183 ctx->head_descriptor->control = cpu_to_le16(descriptor_output_last);
1184 ctx->head_descriptor->transfer_status = cpu_to_le16(0x8011);
1185 ctx->head_descriptor++;
1186 1308
1187 return &ctx->base; 1309 return &ctx->base;
1188
1189 buffer_map_failed:
1190 kfree(ctx->buffer);
1191 buffer_alloc_failed:
1192 spin_lock_irqsave(&ohci->lock, flags);
1193 *mask |= 1 << index;
1194 spin_unlock_irqrestore(&ohci->lock, flags);
1195
1196 return ERR_PTR(-ENOMEM);
1197} 1310}
1198 1311
1199static int ohci_send_iso(struct fw_iso_context *base, s32 cycle) 1312static int ohci_send_iso(struct fw_iso_context *base, s32 cycle)
1200{ 1313{
1201 struct iso_context *ctx = (struct iso_context *)base; 1314 struct iso_context *ctx = container_of(base, struct iso_context, base);
1202 struct fw_ohci *ohci = fw_ohci(ctx->base.card); 1315 struct fw_ohci *ohci = ctx->context.ohci;
1203 u32 cycle_match = 0; 1316 u32 cycle_match = 0;
1204 int index; 1317 int index;
1205 1318
@@ -1209,12 +1322,7 @@ static int ohci_send_iso(struct fw_iso_context *base, s32 cycle)
1209 (cycle & 0x7fff) << 16; 1322 (cycle & 0x7fff) << 16;
1210 1323
1211 reg_write(ohci, OHCI1394_IsoXmitIntMaskSet, 1 << index); 1324 reg_write(ohci, OHCI1394_IsoXmitIntMaskSet, 1 << index);
1212 reg_write(ohci, OHCI1394_IsoXmitCommandPtr(index), 1325 context_run(&ctx->context, cycle_match);
1213 le32_to_cpu(ctx->tail_descriptor_last->branch_address));
1214 reg_write(ohci, OHCI1394_IsoXmitContextControlClear(index), ~0);
1215 reg_write(ohci, OHCI1394_IsoXmitContextControlSet(index),
1216 CONTEXT_RUN | cycle_match);
1217 flush_writes(ohci);
1218 1326
1219 return 0; 1327 return 0;
1220} 1328}
@@ -1222,12 +1330,10 @@ static int ohci_send_iso(struct fw_iso_context *base, s32 cycle)
1222static void ohci_free_iso_context(struct fw_iso_context *base) 1330static void ohci_free_iso_context(struct fw_iso_context *base)
1223{ 1331{
1224 struct fw_ohci *ohci = fw_ohci(base->card); 1332 struct fw_ohci *ohci = fw_ohci(base->card);
1225 struct iso_context *ctx = (struct iso_context *)base; 1333 struct iso_context *ctx = container_of(base, struct iso_context, base);
1226 unsigned long flags; 1334 unsigned long flags;
1227 int index; 1335 int index;
1228 1336
1229 flush_iso_context(ctx);
1230
1231 spin_lock_irqsave(&ohci->lock, flags); 1337 spin_lock_irqsave(&ohci->lock, flags);
1232 1338
1233 if (ctx->base.type == FW_ISO_CONTEXT_TRANSMIT) { 1339 if (ctx->base.type == FW_ISO_CONTEXT_TRANSMIT) {
@@ -1243,8 +1349,7 @@ static void ohci_free_iso_context(struct fw_iso_context *base)
1243 } 1349 }
1244 flush_writes(ohci); 1350 flush_writes(ohci);
1245 1351
1246 dma_unmap_single(ohci->card.device, ctx->buffer_bus, 1352 context_release(&ctx->context);
1247 ISO_BUFFER_SIZE, DMA_TO_DEVICE);
1248 1353
1249 spin_unlock_irqrestore(&ohci->lock, flags); 1354 spin_unlock_irqrestore(&ohci->lock, flags);
1250} 1355}
@@ -1255,24 +1360,20 @@ ohci_queue_iso(struct fw_iso_context *base,
1255 struct fw_iso_buffer *buffer, 1360 struct fw_iso_buffer *buffer,
1256 unsigned long payload) 1361 unsigned long payload)
1257{ 1362{
1258 struct iso_context *ctx = (struct iso_context *)base; 1363 struct iso_context *ctx = container_of(base, struct iso_context, base);
1259 struct fw_ohci *ohci = fw_ohci(ctx->base.card); 1364 struct descriptor *d, *last, *pd;
1260 struct descriptor *d, *end, *last, *tail, *pd;
1261 struct fw_iso_packet *p; 1365 struct fw_iso_packet *p;
1262 __le32 *header; 1366 __le32 *header;
1263 dma_addr_t d_bus, page_bus; 1367 dma_addr_t d_bus, page_bus;
1264 u32 z, header_z, payload_z, irq; 1368 u32 z, header_z, payload_z, irq;
1265 u32 payload_index, payload_end_index, next_page_index; 1369 u32 payload_index, payload_end_index, next_page_index;
1266 int index, page, end_page, i, length, offset; 1370 int page, end_page, i, length, offset;
1267 1371
1268 /* FIXME: Cycle lost behavior should be configurable: lose 1372 /* FIXME: Cycle lost behavior should be configurable: lose
1269 * packet, retransmit or terminate.. */ 1373 * packet, retransmit or terminate.. */
1270 1374
1271 p = packet; 1375 p = packet;
1272 payload_index = payload; 1376 payload_index = payload;
1273 d = ctx->head_descriptor;
1274 tail = ctx->tail_descriptor;
1275 end = ctx->buffer + ISO_BUFFER_SIZE / sizeof(struct descriptor);
1276 1377
1277 if (p->skip) 1378 if (p->skip)
1278 z = 1; 1379 z = 1;
@@ -1293,21 +1394,9 @@ ohci_queue_iso(struct fw_iso_context *base,
1293 /* Get header size in number of descriptors. */ 1394 /* Get header size in number of descriptors. */
1294 header_z = DIV_ROUND_UP(p->header_length, sizeof *d); 1395 header_z = DIV_ROUND_UP(p->header_length, sizeof *d);
1295 1396
1296 if (d + z + header_z <= tail) { 1397 d = context_get_descriptors(&ctx->context, z + header_z, &d_bus);
1297 goto has_space; 1398 if (d == NULL)
1298 } else if (d > tail && d + z + header_z <= end) { 1399 return -ENOMEM;
1299 goto has_space;
1300 } else if (d > tail && ctx->buffer + z + header_z <= tail) {
1301 d = ctx->buffer;
1302 goto has_space;
1303 }
1304
1305 /* No space in buffer */
1306 return -1;
1307
1308 has_space:
1309 memset(d, 0, (z + header_z) * sizeof *d);
1310 d_bus = ctx->buffer_bus + (d - ctx->buffer) * sizeof *d;
1311 1400
1312 if (!p->skip) { 1401 if (!p->skip) {
1313 d[0].control = cpu_to_le16(descriptor_key_immediate); 1402 d[0].control = cpu_to_le16(descriptor_key_immediate);
@@ -1346,31 +1435,18 @@ ohci_queue_iso(struct fw_iso_context *base,
1346 payload_index += length; 1435 payload_index += length;
1347 } 1436 }
1348 1437
1349 if (z == 2)
1350 last = d;
1351 else
1352 last = d + z - 1;
1353
1354 if (p->interrupt) 1438 if (p->interrupt)
1355 irq = descriptor_irq_always; 1439 irq = descriptor_irq_always;
1356 else 1440 else
1357 irq = descriptor_no_irq; 1441 irq = descriptor_no_irq;
1358 1442
1443 last = z == 2 ? d : d + z - 1;
1359 last->control |= cpu_to_le16(descriptor_output_last | 1444 last->control |= cpu_to_le16(descriptor_output_last |
1360 descriptor_status | 1445 descriptor_status |
1361 descriptor_branch_always | 1446 descriptor_branch_always |
1362 irq); 1447 irq);
1363 1448
1364 dma_sync_single_for_device(ohci->card.device, ctx->buffer_bus, 1449 context_append(&ctx->context, d, z, header_z);
1365 ISO_BUFFER_SIZE, DMA_TO_DEVICE);
1366
1367 ctx->head_descriptor = d + z + header_z;
1368 ctx->prev_descriptor->branch_address = cpu_to_le32(d_bus | z);
1369 ctx->prev_descriptor = last;
1370
1371 index = ctx - ohci->it_context_list;
1372 reg_write(ohci, OHCI1394_IsoXmitContextControlSet(index), CONTEXT_WAKE);
1373 flush_writes(ohci);
1374 1450
1375 return 0; 1451 return 0;
1376} 1452}