aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/firewire/fw-ohci.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/firewire/fw-ohci.c')
-rw-r--r--drivers/firewire/fw-ohci.c148
1 files changed, 74 insertions, 74 deletions
diff --git a/drivers/firewire/fw-ohci.c b/drivers/firewire/fw-ohci.c
index beb924403dab..7e1a4e1f7d46 100644
--- a/drivers/firewire/fw-ohci.c
+++ b/drivers/firewire/fw-ohci.c
@@ -33,19 +33,19 @@
33#include "fw-transaction.h" 33#include "fw-transaction.h"
34#include "fw-ohci.h" 34#include "fw-ohci.h"
35 35
36#define descriptor_output_more 0 36#define DESCRIPTOR_OUTPUT_MORE 0
37#define descriptor_output_last (1 << 12) 37#define DESCRIPTOR_OUTPUT_LAST (1 << 12)
38#define descriptor_input_more (2 << 12) 38#define DESCRIPTOR_INPUT_MORE (2 << 12)
39#define descriptor_input_last (3 << 12) 39#define DESCRIPTOR_INPUT_LAST (3 << 12)
40#define descriptor_status (1 << 11) 40#define DESCRIPTOR_STATUS (1 << 11)
41#define descriptor_key_immediate (2 << 8) 41#define DESCRIPTOR_KEY_IMMEDIATE (2 << 8)
42#define descriptor_ping (1 << 7) 42#define DESCRIPTOR_PING (1 << 7)
43#define descriptor_yy (1 << 6) 43#define DESCRIPTOR_YY (1 << 6)
44#define descriptor_no_irq (0 << 4) 44#define DESCRIPTOR_NO_IRQ (0 << 4)
45#define descriptor_irq_error (1 << 4) 45#define DESCRIPTOR_IRQ_ERROR (1 << 4)
46#define descriptor_irq_always (3 << 4) 46#define DESCRIPTOR_IRQ_ALWAYS (3 << 4)
47#define descriptor_branch_always (3 << 2) 47#define DESCRIPTOR_BRANCH_ALWAYS (3 << 2)
48#define descriptor_wait (3 << 0) 48#define DESCRIPTOR_WAIT (3 << 0)
49 49
50struct descriptor { 50struct descriptor {
51 __le16 req_count; 51 __le16 req_count;
@@ -70,10 +70,10 @@ struct db_descriptor {
70 __le32 reserved1; 70 __le32 reserved1;
71} __attribute__((aligned(16))); 71} __attribute__((aligned(16)));
72 72
73#define control_set(regs) (regs) 73#define CONTROL_SET(regs) (regs)
74#define control_clear(regs) ((regs) + 4) 74#define CONTROL_CLEAR(regs) ((regs) + 4)
75#define command_ptr(regs) ((regs) + 12) 75#define COMMAND_PTR(regs) ((regs) + 12)
76#define context_match(regs) ((regs) + 16) 76#define CONTEXT_MATCH(regs) ((regs) + 16)
77 77
78struct ar_buffer { 78struct ar_buffer {
79 struct descriptor descriptor; 79 struct descriptor descriptor;
@@ -112,12 +112,12 @@ struct context {
112 struct tasklet_struct tasklet; 112 struct tasklet_struct tasklet;
113}; 113};
114 114
115#define it_header_sy(v) ((v) << 0) 115#define IT_HEADER_SY(v) ((v) << 0)
116#define it_header_tcode(v) ((v) << 4) 116#define IT_HEADER_TCODE(v) ((v) << 4)
117#define it_header_channel(v) ((v) << 8) 117#define IT_HEADER_CHANNEL(v) ((v) << 8)
118#define it_header_tag(v) ((v) << 14) 118#define IT_HEADER_TAG(v) ((v) << 14)
119#define it_header_speed(v) ((v) << 16) 119#define IT_HEADER_SPEED(v) ((v) << 16)
120#define it_header_data_length(v) ((v) << 16) 120#define IT_HEADER_DATA_LENGTH(v) ((v) << 16)
121 121
122struct iso_context { 122struct iso_context {
123 struct fw_iso_context base; 123 struct fw_iso_context base;
@@ -256,9 +256,9 @@ static int ar_context_add_page(struct ar_context *ctx)
256 } 256 }
257 257
258 memset(&ab->descriptor, 0, sizeof ab->descriptor); 258 memset(&ab->descriptor, 0, sizeof ab->descriptor);
259 ab->descriptor.control = cpu_to_le16(descriptor_input_more | 259 ab->descriptor.control = cpu_to_le16(DESCRIPTOR_INPUT_MORE |
260 descriptor_status | 260 DESCRIPTOR_STATUS |
261 descriptor_branch_always); 261 DESCRIPTOR_BRANCH_ALWAYS);
262 offset = offsetof(struct ar_buffer, data); 262 offset = offsetof(struct ar_buffer, data);
263 ab->descriptor.req_count = cpu_to_le16(PAGE_SIZE - offset); 263 ab->descriptor.req_count = cpu_to_le16(PAGE_SIZE - offset);
264 ab->descriptor.data_address = cpu_to_le32(ab_bus + offset); 264 ab->descriptor.data_address = cpu_to_le32(ab_bus + offset);
@@ -271,7 +271,7 @@ static int ar_context_add_page(struct ar_context *ctx)
271 ctx->last_buffer->next = ab; 271 ctx->last_buffer->next = ab;
272 ctx->last_buffer = ab; 272 ctx->last_buffer = ab;
273 273
274 reg_write(ctx->ohci, control_set(ctx->regs), CONTEXT_WAKE); 274 reg_write(ctx->ohci, CONTROL_SET(ctx->regs), CONTEXT_WAKE);
275 flush_writes(ctx->ohci); 275 flush_writes(ctx->ohci);
276 276
277 return 0; 277 return 0;
@@ -416,8 +416,8 @@ ar_context_init(struct ar_context *ctx, struct fw_ohci *ohci, u32 regs)
416 ctx->current_buffer = ab.next; 416 ctx->current_buffer = ab.next;
417 ctx->pointer = ctx->current_buffer->data; 417 ctx->pointer = ctx->current_buffer->data;
418 418
419 reg_write(ctx->ohci, command_ptr(ctx->regs), ab.descriptor.branch_address); 419 reg_write(ctx->ohci, COMMAND_PTR(ctx->regs), ab.descriptor.branch_address);
420 reg_write(ctx->ohci, control_set(ctx->regs), CONTEXT_RUN); 420 reg_write(ctx->ohci, CONTROL_SET(ctx->regs), CONTEXT_RUN);
421 flush_writes(ctx->ohci); 421 flush_writes(ctx->ohci);
422 422
423 return 0; 423 return 0;
@@ -488,7 +488,7 @@ context_init(struct context *ctx, struct fw_ohci *ohci,
488 */ 488 */
489 489
490 memset(ctx->head_descriptor, 0, sizeof *ctx->head_descriptor); 490 memset(ctx->head_descriptor, 0, sizeof *ctx->head_descriptor);
491 ctx->head_descriptor->control = cpu_to_le16(descriptor_output_last); 491 ctx->head_descriptor->control = cpu_to_le16(DESCRIPTOR_OUTPUT_LAST);
492 ctx->head_descriptor->transfer_status = cpu_to_le16(0x8011); 492 ctx->head_descriptor->transfer_status = cpu_to_le16(0x8011);
493 ctx->head_descriptor++; 493 ctx->head_descriptor++;
494 494
@@ -536,10 +536,10 @@ static void context_run(struct context *ctx, u32 extra)
536{ 536{
537 struct fw_ohci *ohci = ctx->ohci; 537 struct fw_ohci *ohci = ctx->ohci;
538 538
539 reg_write(ohci, command_ptr(ctx->regs), 539 reg_write(ohci, COMMAND_PTR(ctx->regs),
540 le32_to_cpu(ctx->tail_descriptor_last->branch_address)); 540 le32_to_cpu(ctx->tail_descriptor_last->branch_address));
541 reg_write(ohci, control_clear(ctx->regs), ~0); 541 reg_write(ohci, CONTROL_CLEAR(ctx->regs), ~0);
542 reg_write(ohci, control_set(ctx->regs), CONTEXT_RUN | extra); 542 reg_write(ohci, CONTROL_SET(ctx->regs), CONTEXT_RUN | extra);
543 flush_writes(ohci); 543 flush_writes(ohci);
544} 544}
545 545
@@ -557,7 +557,7 @@ static void context_append(struct context *ctx,
557 dma_sync_single_for_device(ctx->ohci->card.device, ctx->buffer_bus, 557 dma_sync_single_for_device(ctx->ohci->card.device, ctx->buffer_bus,
558 ctx->buffer_size, DMA_TO_DEVICE); 558 ctx->buffer_size, DMA_TO_DEVICE);
559 559
560 reg_write(ctx->ohci, control_set(ctx->regs), CONTEXT_WAKE); 560 reg_write(ctx->ohci, CONTROL_SET(ctx->regs), CONTEXT_WAKE);
561 flush_writes(ctx->ohci); 561 flush_writes(ctx->ohci);
562} 562}
563 563
@@ -566,11 +566,11 @@ static void context_stop(struct context *ctx)
566 u32 reg; 566 u32 reg;
567 int i; 567 int i;
568 568
569 reg_write(ctx->ohci, control_clear(ctx->regs), CONTEXT_RUN); 569 reg_write(ctx->ohci, CONTROL_CLEAR(ctx->regs), CONTEXT_RUN);
570 flush_writes(ctx->ohci); 570 flush_writes(ctx->ohci);
571 571
572 for (i = 0; i < 10; i++) { 572 for (i = 0; i < 10; i++) {
573 reg = reg_read(ctx->ohci, control_set(ctx->regs)); 573 reg = reg_read(ctx->ohci, CONTROL_SET(ctx->regs));
574 if ((reg & CONTEXT_ACTIVE) == 0) 574 if ((reg & CONTEXT_ACTIVE) == 0)
575 break; 575 break;
576 576
@@ -605,7 +605,7 @@ at_context_queue_packet(struct context *ctx, struct fw_packet *packet)
605 return -1; 605 return -1;
606 } 606 }
607 607
608 d[0].control = cpu_to_le16(descriptor_key_immediate); 608 d[0].control = cpu_to_le16(DESCRIPTOR_KEY_IMMEDIATE);
609 d[0].res_count = cpu_to_le16(packet->timestamp); 609 d[0].res_count = cpu_to_le16(packet->timestamp);
610 610
611 /* 611 /*
@@ -660,9 +660,9 @@ at_context_queue_packet(struct context *ctx, struct fw_packet *packet)
660 z = 2; 660 z = 2;
661 } 661 }
662 662
663 last->control |= cpu_to_le16(descriptor_output_last | 663 last->control |= cpu_to_le16(DESCRIPTOR_OUTPUT_LAST |
664 descriptor_irq_always | 664 DESCRIPTOR_IRQ_ALWAYS |
665 descriptor_branch_always); 665 DESCRIPTOR_BRANCH_ALWAYS);
666 666
667 /* FIXME: Document how the locking works. */ 667 /* FIXME: Document how the locking works. */
668 if (ohci->generation != packet->generation) { 668 if (ohci->generation != packet->generation) {
@@ -673,7 +673,7 @@ at_context_queue_packet(struct context *ctx, struct fw_packet *packet)
673 context_append(ctx, d, z, 4 - z); 673 context_append(ctx, d, z, 4 - z);
674 674
675 /* If the context isn't already running, start it up. */ 675 /* If the context isn't already running, start it up. */
676 reg = reg_read(ctx->ohci, control_set(ctx->regs)); 676 reg = reg_read(ctx->ohci, CONTROL_SET(ctx->regs));
677 if ((reg & CONTEXT_RUN) == 0) 677 if ((reg & CONTEXT_RUN) == 0)
678 context_run(ctx, 0); 678 context_run(ctx, 0);
679 679
@@ -750,11 +750,11 @@ static int handle_at_packet(struct context *context,
750 return 1; 750 return 1;
751} 751}
752 752
753#define header_get_destination(q) (((q) >> 16) & 0xffff) 753#define HEADER_GET_DESTINATION(q) (((q) >> 16) & 0xffff)
754#define header_get_tcode(q) (((q) >> 4) & 0x0f) 754#define HEADER_GET_TCODE(q) (((q) >> 4) & 0x0f)
755#define header_get_offset_high(q) (((q) >> 0) & 0xffff) 755#define HEADER_GET_OFFSET_HIGH(q) (((q) >> 0) & 0xffff)
756#define header_get_data_length(q) (((q) >> 16) & 0xffff) 756#define HEADER_GET_DATA_LENGTH(q) (((q) >> 16) & 0xffff)
757#define header_get_extended_tcode(q) (((q) >> 0) & 0xffff) 757#define HEADER_GET_EXTENDED_TCODE(q) (((q) >> 0) & 0xffff)
758 758
759static void 759static void
760handle_local_rom(struct fw_ohci *ohci, struct fw_packet *packet, u32 csr) 760handle_local_rom(struct fw_ohci *ohci, struct fw_packet *packet, u32 csr)
@@ -762,9 +762,9 @@ handle_local_rom(struct fw_ohci *ohci, struct fw_packet *packet, u32 csr)
762 struct fw_packet response; 762 struct fw_packet response;
763 int tcode, length, i; 763 int tcode, length, i;
764 764
765 tcode = header_get_tcode(packet->header[0]); 765 tcode = HEADER_GET_TCODE(packet->header[0]);
766 if (TCODE_IS_BLOCK_PACKET(tcode)) 766 if (TCODE_IS_BLOCK_PACKET(tcode))
767 length = header_get_data_length(packet->header[3]); 767 length = HEADER_GET_DATA_LENGTH(packet->header[3]);
768 else 768 else
769 length = 4; 769 length = 4;
770 770
@@ -791,10 +791,10 @@ handle_local_lock(struct fw_ohci *ohci, struct fw_packet *packet, u32 csr)
791 __be32 *payload, lock_old; 791 __be32 *payload, lock_old;
792 u32 lock_arg, lock_data; 792 u32 lock_arg, lock_data;
793 793
794 tcode = header_get_tcode(packet->header[0]); 794 tcode = HEADER_GET_TCODE(packet->header[0]);
795 length = header_get_data_length(packet->header[3]); 795 length = HEADER_GET_DATA_LENGTH(packet->header[3]);
796 payload = packet->payload; 796 payload = packet->payload;
797 ext_tcode = header_get_extended_tcode(packet->header[3]); 797 ext_tcode = HEADER_GET_EXTENDED_TCODE(packet->header[3]);
798 798
799 if (tcode == TCODE_LOCK_REQUEST && 799 if (tcode == TCODE_LOCK_REQUEST &&
800 ext_tcode == EXTCODE_COMPARE_SWAP && length == 8) { 800 ext_tcode == EXTCODE_COMPARE_SWAP && length == 8) {
@@ -838,7 +838,7 @@ handle_local_request(struct context *ctx, struct fw_packet *packet)
838 838
839 offset = 839 offset =
840 ((unsigned long long) 840 ((unsigned long long)
841 header_get_offset_high(packet->header[1]) << 32) | 841 HEADER_GET_OFFSET_HIGH(packet->header[1]) << 32) |
842 packet->header[2]; 842 packet->header[2];
843 csr = offset - CSR_REGISTER_BASE; 843 csr = offset - CSR_REGISTER_BASE;
844 844
@@ -874,7 +874,7 @@ at_context_transmit(struct context *ctx, struct fw_packet *packet)
874 874
875 spin_lock_irqsave(&ctx->ohci->lock, flags); 875 spin_lock_irqsave(&ctx->ohci->lock, flags);
876 876
877 if (header_get_destination(packet->header[0]) == ctx->ohci->node_id && 877 if (HEADER_GET_DESTINATION(packet->header[0]) == ctx->ohci->node_id &&
878 ctx->ohci->generation == packet->generation) { 878 ctx->ohci->generation == packet->generation) {
879 spin_unlock_irqrestore(&ctx->ohci->lock, flags); 879 spin_unlock_irqrestore(&ctx->ohci->lock, flags);
880 handle_local_request(ctx, packet); 880 handle_local_request(ctx, packet);
@@ -1306,7 +1306,7 @@ static int handle_ir_dualbuffer_packet(struct context *context,
1306 1306
1307 ctx->header_length = i; 1307 ctx->header_length = i;
1308 1308
1309 if (le16_to_cpu(db->control) & descriptor_irq_always) { 1309 if (le16_to_cpu(db->control) & DESCRIPTOR_IRQ_ALWAYS) {
1310 ir_header = (__le32 *) (db + 1); 1310 ir_header = (__le32 *) (db + 1);
1311 ctx->base.callback(&ctx->base, 1311 ctx->base.callback(&ctx->base,
1312 le32_to_cpu(ir_header[0]) & 0xffff, 1312 le32_to_cpu(ir_header[0]) & 0xffff,
@@ -1329,7 +1329,7 @@ static int handle_it_packet(struct context *context,
1329 /* This descriptor isn't done yet, stop iteration. */ 1329 /* This descriptor isn't done yet, stop iteration. */
1330 return 0; 1330 return 0;
1331 1331
1332 if (le16_to_cpu(last->control) & descriptor_irq_always) 1332 if (le16_to_cpu(last->control) & DESCRIPTOR_IRQ_ALWAYS)
1333 ctx->base.callback(&ctx->base, le16_to_cpu(last->res_count), 1333 ctx->base.callback(&ctx->base, le16_to_cpu(last->res_count),
1334 0, NULL, ctx->base.callback_data); 1334 0, NULL, ctx->base.callback_data);
1335 1335
@@ -1428,7 +1428,7 @@ static int ohci_start_iso(struct fw_iso_context *base,
1428 1428
1429 reg_write(ohci, OHCI1394_IsoRecvIntEventClear, 1 << index); 1429 reg_write(ohci, OHCI1394_IsoRecvIntEventClear, 1 << index);
1430 reg_write(ohci, OHCI1394_IsoRecvIntMaskSet, 1 << index); 1430 reg_write(ohci, OHCI1394_IsoRecvIntMaskSet, 1 << index);
1431 reg_write(ohci, context_match(ctx->context.regs), match); 1431 reg_write(ohci, CONTEXT_MATCH(ctx->context.regs), match);
1432 context_run(&ctx->context, control); 1432 context_run(&ctx->context, control);
1433 } 1433 }
1434 1434
@@ -1525,17 +1525,17 @@ ohci_queue_iso_transmit(struct fw_iso_context *base,
1525 return -ENOMEM; 1525 return -ENOMEM;
1526 1526
1527 if (!p->skip) { 1527 if (!p->skip) {
1528 d[0].control = cpu_to_le16(descriptor_key_immediate); 1528 d[0].control = cpu_to_le16(DESCRIPTOR_KEY_IMMEDIATE);
1529 d[0].req_count = cpu_to_le16(8); 1529 d[0].req_count = cpu_to_le16(8);
1530 1530
1531 header = (__le32 *) &d[1]; 1531 header = (__le32 *) &d[1];
1532 header[0] = cpu_to_le32(it_header_sy(p->sy) | 1532 header[0] = cpu_to_le32(IT_HEADER_SY(p->sy) |
1533 it_header_tag(p->tag) | 1533 IT_HEADER_TAG(p->tag) |
1534 it_header_tcode(TCODE_STREAM_DATA) | 1534 IT_HEADER_TCODE(TCODE_STREAM_DATA) |
1535 it_header_channel(ctx->base.channel) | 1535 IT_HEADER_CHANNEL(ctx->base.channel) |
1536 it_header_speed(ctx->base.speed)); 1536 IT_HEADER_SPEED(ctx->base.speed));
1537 header[1] = 1537 header[1] =
1538 cpu_to_le32(it_header_data_length(p->header_length + 1538 cpu_to_le32(IT_HEADER_DATA_LENGTH(p->header_length +
1539 p->payload_length)); 1539 p->payload_length));
1540 } 1540 }
1541 1541
@@ -1562,14 +1562,14 @@ ohci_queue_iso_transmit(struct fw_iso_context *base,
1562 } 1562 }
1563 1563
1564 if (p->interrupt) 1564 if (p->interrupt)
1565 irq = descriptor_irq_always; 1565 irq = DESCRIPTOR_IRQ_ALWAYS;
1566 else 1566 else
1567 irq = descriptor_no_irq; 1567 irq = DESCRIPTOR_NO_IRQ;
1568 1568
1569 last = z == 2 ? d : d + z - 1; 1569 last = z == 2 ? d : d + z - 1;
1570 last->control |= cpu_to_le16(descriptor_output_last | 1570 last->control |= cpu_to_le16(DESCRIPTOR_OUTPUT_LAST |
1571 descriptor_status | 1571 DESCRIPTOR_STATUS |
1572 descriptor_branch_always | 1572 DESCRIPTOR_BRANCH_ALWAYS |
1573 irq); 1573 irq);
1574 1574
1575 context_append(&ctx->context, d, z, header_z); 1575 context_append(&ctx->context, d, z, header_z);
@@ -1602,9 +1602,9 @@ ohci_queue_iso_receive_dualbuffer(struct fw_iso_context *base,
1602 return -ENOMEM; 1602 return -ENOMEM;
1603 1603
1604 db = (struct db_descriptor *) d; 1604 db = (struct db_descriptor *) d;
1605 db->control = cpu_to_le16(descriptor_status | 1605 db->control = cpu_to_le16(DESCRIPTOR_STATUS |
1606 descriptor_branch_always | 1606 DESCRIPTOR_BRANCH_ALWAYS |
1607 descriptor_wait); 1607 DESCRIPTOR_WAIT);
1608 db->first_size = cpu_to_le16(ctx->base.header_size + 4); 1608 db->first_size = cpu_to_le16(ctx->base.header_size + 4);
1609 context_append(&ctx->context, d, 2, 0); 1609 context_append(&ctx->context, d, 2, 0);
1610 } 1610 }
@@ -1634,8 +1634,8 @@ ohci_queue_iso_receive_dualbuffer(struct fw_iso_context *base,
1634 return -ENOMEM; 1634 return -ENOMEM;
1635 1635
1636 db = (struct db_descriptor *) d; 1636 db = (struct db_descriptor *) d;
1637 db->control = cpu_to_le16(descriptor_status | 1637 db->control = cpu_to_le16(DESCRIPTOR_STATUS |
1638 descriptor_branch_always); 1638 DESCRIPTOR_BRANCH_ALWAYS);
1639 db->first_size = cpu_to_le16(ctx->base.header_size + 4); 1639 db->first_size = cpu_to_le16(ctx->base.header_size + 4);
1640 db->first_req_count = cpu_to_le16(header_size); 1640 db->first_req_count = cpu_to_le16(header_size);
1641 db->first_res_count = db->first_req_count; 1641 db->first_res_count = db->first_req_count;
@@ -1652,7 +1652,7 @@ ohci_queue_iso_receive_dualbuffer(struct fw_iso_context *base,
1652 db->second_buffer = cpu_to_le32(page_bus + offset); 1652 db->second_buffer = cpu_to_le32(page_bus + offset);
1653 1653
1654 if (p->interrupt && length == rest) 1654 if (p->interrupt && length == rest)
1655 db->control |= cpu_to_le16(descriptor_irq_always); 1655 db->control |= cpu_to_le16(DESCRIPTOR_IRQ_ALWAYS);
1656 1656
1657 context_append(&ctx->context, d, z, header_z); 1657 context_append(&ctx->context, d, z, header_z);
1658 offset = (offset + length) & ~PAGE_MASK; 1658 offset = (offset + length) & ~PAGE_MASK;