aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorKristian Høgsberg <krh@redhat.com>2007-05-07 20:33:35 -0400
committerStefan Richter <stefanr@s5r6.in-berlin.de>2007-05-10 12:24:13 -0400
commita77754a75d58d534fd34a5add8ac1bb91d4ffc0f (patch)
tree773885568a19a8ed354acba1bf5c1d5a63a828d1 /drivers
parenta98e27198771d066934a263177673ebde797e8fb (diff)
firewire: Uppercase most macro names.
Signed-off-by: Kristian Hoegsberg <krh@redhat.com> Signed-off-by: Stefan Richter <stefanr@s5r6.in-berlin.de>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/firewire/fw-card.c42
-rw-r--r--drivers/firewire/fw-ohci.c148
-rw-r--r--drivers/firewire/fw-sbp2.c107
-rw-r--r--drivers/firewire/fw-topology.c40
-rw-r--r--drivers/firewire/fw-transaction.c126
5 files changed, 231 insertions, 232 deletions
diff --git a/drivers/firewire/fw-card.c b/drivers/firewire/fw-card.c
index 74aab9aafd21..b8404ee5314c 100644
--- a/drivers/firewire/fw-card.c
+++ b/drivers/firewire/fw-card.c
@@ -44,20 +44,20 @@ static LIST_HEAD(card_list);
44static LIST_HEAD(descriptor_list); 44static LIST_HEAD(descriptor_list);
45static int descriptor_count; 45static int descriptor_count;
46 46
47#define bib_crc(v) ((v) << 0) 47#define BIB_CRC(v) ((v) << 0)
48#define bib_crc_length(v) ((v) << 16) 48#define BIB_CRC_LENGTH(v) ((v) << 16)
49#define bib_info_length(v) ((v) << 24) 49#define BIB_INFO_LENGTH(v) ((v) << 24)
50 50
51#define bib_link_speed(v) ((v) << 0) 51#define BIB_LINK_SPEED(v) ((v) << 0)
52#define bib_generation(v) ((v) << 4) 52#define BIB_GENERATION(v) ((v) << 4)
53#define bib_max_rom(v) ((v) << 8) 53#define BIB_MAX_ROM(v) ((v) << 8)
54#define bib_max_receive(v) ((v) << 12) 54#define BIB_MAX_RECEIVE(v) ((v) << 12)
55#define bib_cyc_clk_acc(v) ((v) << 16) 55#define BIB_CYC_CLK_ACC(v) ((v) << 16)
56#define bib_pmc ((1) << 27) 56#define BIB_PMC ((1) << 27)
57#define bib_bmc ((1) << 28) 57#define BIB_BMC ((1) << 28)
58#define bib_isc ((1) << 29) 58#define BIB_ISC ((1) << 29)
59#define bib_cmc ((1) << 30) 59#define BIB_CMC ((1) << 30)
60#define bib_imc ((1) << 31) 60#define BIB_IMC ((1) << 31)
61 61
62static u32 * 62static u32 *
63generate_config_rom(struct fw_card *card, size_t *config_rom_length) 63generate_config_rom(struct fw_card *card, size_t *config_rom_length)
@@ -76,15 +76,15 @@ generate_config_rom(struct fw_card *card, size_t *config_rom_length)
76 */ 76 */
77 77
78 memset(config_rom, 0, sizeof config_rom); 78 memset(config_rom, 0, sizeof config_rom);
79 config_rom[0] = bib_crc_length(4) | bib_info_length(4) | bib_crc(0); 79 config_rom[0] = BIB_CRC_LENGTH(4) | BIB_INFO_LENGTH(4) | BIB_CRC(0);
80 config_rom[1] = 0x31333934; 80 config_rom[1] = 0x31333934;
81 81
82 config_rom[2] = 82 config_rom[2] =
83 bib_link_speed(card->link_speed) | 83 BIB_LINK_SPEED(card->link_speed) |
84 bib_generation(card->config_rom_generation++ % 14 + 2) | 84 BIB_GENERATION(card->config_rom_generation++ % 14 + 2) |
85 bib_max_rom(2) | 85 BIB_MAX_ROM(2) |
86 bib_max_receive(card->max_receive) | 86 BIB_MAX_RECEIVE(card->max_receive) |
87 bib_bmc | bib_isc | bib_cmc | bib_imc; 87 BIB_BMC | BIB_ISC | BIB_CMC | BIB_IMC;
88 config_rom[3] = card->guid >> 32; 88 config_rom[3] = card->guid >> 32;
89 config_rom[4] = card->guid; 89 config_rom[4] = card->guid;
90 90
@@ -318,7 +318,7 @@ fw_card_bm_work(struct work_struct *work)
318 */ 318 */
319 spin_unlock_irqrestore(&card->lock, flags); 319 spin_unlock_irqrestore(&card->lock, flags);
320 return; 320 return;
321 } else if (root->config_rom[2] & bib_cmc) { 321 } else if (root->config_rom[2] & BIB_CMC) {
322 /* 322 /*
323 * FIXME: I suppose we should set the cmstr bit in the 323 * FIXME: I suppose we should set the cmstr bit in the
324 * STATE_CLEAR register of this node, as described in 324 * STATE_CLEAR register of this node, as described in
diff --git a/drivers/firewire/fw-ohci.c b/drivers/firewire/fw-ohci.c
index beb924403dab..7e1a4e1f7d46 100644
--- a/drivers/firewire/fw-ohci.c
+++ b/drivers/firewire/fw-ohci.c
@@ -33,19 +33,19 @@
33#include "fw-transaction.h" 33#include "fw-transaction.h"
34#include "fw-ohci.h" 34#include "fw-ohci.h"
35 35
36#define descriptor_output_more 0 36#define DESCRIPTOR_OUTPUT_MORE 0
37#define descriptor_output_last (1 << 12) 37#define DESCRIPTOR_OUTPUT_LAST (1 << 12)
38#define descriptor_input_more (2 << 12) 38#define DESCRIPTOR_INPUT_MORE (2 << 12)
39#define descriptor_input_last (3 << 12) 39#define DESCRIPTOR_INPUT_LAST (3 << 12)
40#define descriptor_status (1 << 11) 40#define DESCRIPTOR_STATUS (1 << 11)
41#define descriptor_key_immediate (2 << 8) 41#define DESCRIPTOR_KEY_IMMEDIATE (2 << 8)
42#define descriptor_ping (1 << 7) 42#define DESCRIPTOR_PING (1 << 7)
43#define descriptor_yy (1 << 6) 43#define DESCRIPTOR_YY (1 << 6)
44#define descriptor_no_irq (0 << 4) 44#define DESCRIPTOR_NO_IRQ (0 << 4)
45#define descriptor_irq_error (1 << 4) 45#define DESCRIPTOR_IRQ_ERROR (1 << 4)
46#define descriptor_irq_always (3 << 4) 46#define DESCRIPTOR_IRQ_ALWAYS (3 << 4)
47#define descriptor_branch_always (3 << 2) 47#define DESCRIPTOR_BRANCH_ALWAYS (3 << 2)
48#define descriptor_wait (3 << 0) 48#define DESCRIPTOR_WAIT (3 << 0)
49 49
50struct descriptor { 50struct descriptor {
51 __le16 req_count; 51 __le16 req_count;
@@ -70,10 +70,10 @@ struct db_descriptor {
70 __le32 reserved1; 70 __le32 reserved1;
71} __attribute__((aligned(16))); 71} __attribute__((aligned(16)));
72 72
73#define control_set(regs) (regs) 73#define CONTROL_SET(regs) (regs)
74#define control_clear(regs) ((regs) + 4) 74#define CONTROL_CLEAR(regs) ((regs) + 4)
75#define command_ptr(regs) ((regs) + 12) 75#define COMMAND_PTR(regs) ((regs) + 12)
76#define context_match(regs) ((regs) + 16) 76#define CONTEXT_MATCH(regs) ((regs) + 16)
77 77
78struct ar_buffer { 78struct ar_buffer {
79 struct descriptor descriptor; 79 struct descriptor descriptor;
@@ -112,12 +112,12 @@ struct context {
112 struct tasklet_struct tasklet; 112 struct tasklet_struct tasklet;
113}; 113};
114 114
115#define it_header_sy(v) ((v) << 0) 115#define IT_HEADER_SY(v) ((v) << 0)
116#define it_header_tcode(v) ((v) << 4) 116#define IT_HEADER_TCODE(v) ((v) << 4)
117#define it_header_channel(v) ((v) << 8) 117#define IT_HEADER_CHANNEL(v) ((v) << 8)
118#define it_header_tag(v) ((v) << 14) 118#define IT_HEADER_TAG(v) ((v) << 14)
119#define it_header_speed(v) ((v) << 16) 119#define IT_HEADER_SPEED(v) ((v) << 16)
120#define it_header_data_length(v) ((v) << 16) 120#define IT_HEADER_DATA_LENGTH(v) ((v) << 16)
121 121
122struct iso_context { 122struct iso_context {
123 struct fw_iso_context base; 123 struct fw_iso_context base;
@@ -256,9 +256,9 @@ static int ar_context_add_page(struct ar_context *ctx)
256 } 256 }
257 257
258 memset(&ab->descriptor, 0, sizeof ab->descriptor); 258 memset(&ab->descriptor, 0, sizeof ab->descriptor);
259 ab->descriptor.control = cpu_to_le16(descriptor_input_more | 259 ab->descriptor.control = cpu_to_le16(DESCRIPTOR_INPUT_MORE |
260 descriptor_status | 260 DESCRIPTOR_STATUS |
261 descriptor_branch_always); 261 DESCRIPTOR_BRANCH_ALWAYS);
262 offset = offsetof(struct ar_buffer, data); 262 offset = offsetof(struct ar_buffer, data);
263 ab->descriptor.req_count = cpu_to_le16(PAGE_SIZE - offset); 263 ab->descriptor.req_count = cpu_to_le16(PAGE_SIZE - offset);
264 ab->descriptor.data_address = cpu_to_le32(ab_bus + offset); 264 ab->descriptor.data_address = cpu_to_le32(ab_bus + offset);
@@ -271,7 +271,7 @@ static int ar_context_add_page(struct ar_context *ctx)
271 ctx->last_buffer->next = ab; 271 ctx->last_buffer->next = ab;
272 ctx->last_buffer = ab; 272 ctx->last_buffer = ab;
273 273
274 reg_write(ctx->ohci, control_set(ctx->regs), CONTEXT_WAKE); 274 reg_write(ctx->ohci, CONTROL_SET(ctx->regs), CONTEXT_WAKE);
275 flush_writes(ctx->ohci); 275 flush_writes(ctx->ohci);
276 276
277 return 0; 277 return 0;
@@ -416,8 +416,8 @@ ar_context_init(struct ar_context *ctx, struct fw_ohci *ohci, u32 regs)
416 ctx->current_buffer = ab.next; 416 ctx->current_buffer = ab.next;
417 ctx->pointer = ctx->current_buffer->data; 417 ctx->pointer = ctx->current_buffer->data;
418 418
419 reg_write(ctx->ohci, command_ptr(ctx->regs), ab.descriptor.branch_address); 419 reg_write(ctx->ohci, COMMAND_PTR(ctx->regs), ab.descriptor.branch_address);
420 reg_write(ctx->ohci, control_set(ctx->regs), CONTEXT_RUN); 420 reg_write(ctx->ohci, CONTROL_SET(ctx->regs), CONTEXT_RUN);
421 flush_writes(ctx->ohci); 421 flush_writes(ctx->ohci);
422 422
423 return 0; 423 return 0;
@@ -488,7 +488,7 @@ context_init(struct context *ctx, struct fw_ohci *ohci,
488 */ 488 */
489 489
490 memset(ctx->head_descriptor, 0, sizeof *ctx->head_descriptor); 490 memset(ctx->head_descriptor, 0, sizeof *ctx->head_descriptor);
491 ctx->head_descriptor->control = cpu_to_le16(descriptor_output_last); 491 ctx->head_descriptor->control = cpu_to_le16(DESCRIPTOR_OUTPUT_LAST);
492 ctx->head_descriptor->transfer_status = cpu_to_le16(0x8011); 492 ctx->head_descriptor->transfer_status = cpu_to_le16(0x8011);
493 ctx->head_descriptor++; 493 ctx->head_descriptor++;
494 494
@@ -536,10 +536,10 @@ static void context_run(struct context *ctx, u32 extra)
536{ 536{
537 struct fw_ohci *ohci = ctx->ohci; 537 struct fw_ohci *ohci = ctx->ohci;
538 538
539 reg_write(ohci, command_ptr(ctx->regs), 539 reg_write(ohci, COMMAND_PTR(ctx->regs),
540 le32_to_cpu(ctx->tail_descriptor_last->branch_address)); 540 le32_to_cpu(ctx->tail_descriptor_last->branch_address));
541 reg_write(ohci, control_clear(ctx->regs), ~0); 541 reg_write(ohci, CONTROL_CLEAR(ctx->regs), ~0);
542 reg_write(ohci, control_set(ctx->regs), CONTEXT_RUN | extra); 542 reg_write(ohci, CONTROL_SET(ctx->regs), CONTEXT_RUN | extra);
543 flush_writes(ohci); 543 flush_writes(ohci);
544} 544}
545 545
@@ -557,7 +557,7 @@ static void context_append(struct context *ctx,
557 dma_sync_single_for_device(ctx->ohci->card.device, ctx->buffer_bus, 557 dma_sync_single_for_device(ctx->ohci->card.device, ctx->buffer_bus,
558 ctx->buffer_size, DMA_TO_DEVICE); 558 ctx->buffer_size, DMA_TO_DEVICE);
559 559
560 reg_write(ctx->ohci, control_set(ctx->regs), CONTEXT_WAKE); 560 reg_write(ctx->ohci, CONTROL_SET(ctx->regs), CONTEXT_WAKE);
561 flush_writes(ctx->ohci); 561 flush_writes(ctx->ohci);
562} 562}
563 563
@@ -566,11 +566,11 @@ static void context_stop(struct context *ctx)
566 u32 reg; 566 u32 reg;
567 int i; 567 int i;
568 568
569 reg_write(ctx->ohci, control_clear(ctx->regs), CONTEXT_RUN); 569 reg_write(ctx->ohci, CONTROL_CLEAR(ctx->regs), CONTEXT_RUN);
570 flush_writes(ctx->ohci); 570 flush_writes(ctx->ohci);
571 571
572 for (i = 0; i < 10; i++) { 572 for (i = 0; i < 10; i++) {
573 reg = reg_read(ctx->ohci, control_set(ctx->regs)); 573 reg = reg_read(ctx->ohci, CONTROL_SET(ctx->regs));
574 if ((reg & CONTEXT_ACTIVE) == 0) 574 if ((reg & CONTEXT_ACTIVE) == 0)
575 break; 575 break;
576 576
@@ -605,7 +605,7 @@ at_context_queue_packet(struct context *ctx, struct fw_packet *packet)
605 return -1; 605 return -1;
606 } 606 }
607 607
608 d[0].control = cpu_to_le16(descriptor_key_immediate); 608 d[0].control = cpu_to_le16(DESCRIPTOR_KEY_IMMEDIATE);
609 d[0].res_count = cpu_to_le16(packet->timestamp); 609 d[0].res_count = cpu_to_le16(packet->timestamp);
610 610
611 /* 611 /*
@@ -660,9 +660,9 @@ at_context_queue_packet(struct context *ctx, struct fw_packet *packet)
660 z = 2; 660 z = 2;
661 } 661 }
662 662
663 last->control |= cpu_to_le16(descriptor_output_last | 663 last->control |= cpu_to_le16(DESCRIPTOR_OUTPUT_LAST |
664 descriptor_irq_always | 664 DESCRIPTOR_IRQ_ALWAYS |
665 descriptor_branch_always); 665 DESCRIPTOR_BRANCH_ALWAYS);
666 666
667 /* FIXME: Document how the locking works. */ 667 /* FIXME: Document how the locking works. */
668 if (ohci->generation != packet->generation) { 668 if (ohci->generation != packet->generation) {
@@ -673,7 +673,7 @@ at_context_queue_packet(struct context *ctx, struct fw_packet *packet)
673 context_append(ctx, d, z, 4 - z); 673 context_append(ctx, d, z, 4 - z);
674 674
675 /* If the context isn't already running, start it up. */ 675 /* If the context isn't already running, start it up. */
676 reg = reg_read(ctx->ohci, control_set(ctx->regs)); 676 reg = reg_read(ctx->ohci, CONTROL_SET(ctx->regs));
677 if ((reg & CONTEXT_RUN) == 0) 677 if ((reg & CONTEXT_RUN) == 0)
678 context_run(ctx, 0); 678 context_run(ctx, 0);
679 679
@@ -750,11 +750,11 @@ static int handle_at_packet(struct context *context,
750 return 1; 750 return 1;
751} 751}
752 752
753#define header_get_destination(q) (((q) >> 16) & 0xffff) 753#define HEADER_GET_DESTINATION(q) (((q) >> 16) & 0xffff)
754#define header_get_tcode(q) (((q) >> 4) & 0x0f) 754#define HEADER_GET_TCODE(q) (((q) >> 4) & 0x0f)
755#define header_get_offset_high(q) (((q) >> 0) & 0xffff) 755#define HEADER_GET_OFFSET_HIGH(q) (((q) >> 0) & 0xffff)
756#define header_get_data_length(q) (((q) >> 16) & 0xffff) 756#define HEADER_GET_DATA_LENGTH(q) (((q) >> 16) & 0xffff)
757#define header_get_extended_tcode(q) (((q) >> 0) & 0xffff) 757#define HEADER_GET_EXTENDED_TCODE(q) (((q) >> 0) & 0xffff)
758 758
759static void 759static void
760handle_local_rom(struct fw_ohci *ohci, struct fw_packet *packet, u32 csr) 760handle_local_rom(struct fw_ohci *ohci, struct fw_packet *packet, u32 csr)
@@ -762,9 +762,9 @@ handle_local_rom(struct fw_ohci *ohci, struct fw_packet *packet, u32 csr)
762 struct fw_packet response; 762 struct fw_packet response;
763 int tcode, length, i; 763 int tcode, length, i;
764 764
765 tcode = header_get_tcode(packet->header[0]); 765 tcode = HEADER_GET_TCODE(packet->header[0]);
766 if (TCODE_IS_BLOCK_PACKET(tcode)) 766 if (TCODE_IS_BLOCK_PACKET(tcode))
767 length = header_get_data_length(packet->header[3]); 767 length = HEADER_GET_DATA_LENGTH(packet->header[3]);
768 else 768 else
769 length = 4; 769 length = 4;
770 770
@@ -791,10 +791,10 @@ handle_local_lock(struct fw_ohci *ohci, struct fw_packet *packet, u32 csr)
791 __be32 *payload, lock_old; 791 __be32 *payload, lock_old;
792 u32 lock_arg, lock_data; 792 u32 lock_arg, lock_data;
793 793
794 tcode = header_get_tcode(packet->header[0]); 794 tcode = HEADER_GET_TCODE(packet->header[0]);
795 length = header_get_data_length(packet->header[3]); 795 length = HEADER_GET_DATA_LENGTH(packet->header[3]);
796 payload = packet->payload; 796 payload = packet->payload;
797 ext_tcode = header_get_extended_tcode(packet->header[3]); 797 ext_tcode = HEADER_GET_EXTENDED_TCODE(packet->header[3]);
798 798
799 if (tcode == TCODE_LOCK_REQUEST && 799 if (tcode == TCODE_LOCK_REQUEST &&
800 ext_tcode == EXTCODE_COMPARE_SWAP && length == 8) { 800 ext_tcode == EXTCODE_COMPARE_SWAP && length == 8) {
@@ -838,7 +838,7 @@ handle_local_request(struct context *ctx, struct fw_packet *packet)
838 838
839 offset = 839 offset =
840 ((unsigned long long) 840 ((unsigned long long)
841 header_get_offset_high(packet->header[1]) << 32) | 841 HEADER_GET_OFFSET_HIGH(packet->header[1]) << 32) |
842 packet->header[2]; 842 packet->header[2];
843 csr = offset - CSR_REGISTER_BASE; 843 csr = offset - CSR_REGISTER_BASE;
844 844
@@ -874,7 +874,7 @@ at_context_transmit(struct context *ctx, struct fw_packet *packet)
874 874
875 spin_lock_irqsave(&ctx->ohci->lock, flags); 875 spin_lock_irqsave(&ctx->ohci->lock, flags);
876 876
877 if (header_get_destination(packet->header[0]) == ctx->ohci->node_id && 877 if (HEADER_GET_DESTINATION(packet->header[0]) == ctx->ohci->node_id &&
878 ctx->ohci->generation == packet->generation) { 878 ctx->ohci->generation == packet->generation) {
879 spin_unlock_irqrestore(&ctx->ohci->lock, flags); 879 spin_unlock_irqrestore(&ctx->ohci->lock, flags);
880 handle_local_request(ctx, packet); 880 handle_local_request(ctx, packet);
@@ -1306,7 +1306,7 @@ static int handle_ir_dualbuffer_packet(struct context *context,
1306 1306
1307 ctx->header_length = i; 1307 ctx->header_length = i;
1308 1308
1309 if (le16_to_cpu(db->control) & descriptor_irq_always) { 1309 if (le16_to_cpu(db->control) & DESCRIPTOR_IRQ_ALWAYS) {
1310 ir_header = (__le32 *) (db + 1); 1310 ir_header = (__le32 *) (db + 1);
1311 ctx->base.callback(&ctx->base, 1311 ctx->base.callback(&ctx->base,
1312 le32_to_cpu(ir_header[0]) & 0xffff, 1312 le32_to_cpu(ir_header[0]) & 0xffff,
@@ -1329,7 +1329,7 @@ static int handle_it_packet(struct context *context,
1329 /* This descriptor isn't done yet, stop iteration. */ 1329 /* This descriptor isn't done yet, stop iteration. */
1330 return 0; 1330 return 0;
1331 1331
1332 if (le16_to_cpu(last->control) & descriptor_irq_always) 1332 if (le16_to_cpu(last->control) & DESCRIPTOR_IRQ_ALWAYS)
1333 ctx->base.callback(&ctx->base, le16_to_cpu(last->res_count), 1333 ctx->base.callback(&ctx->base, le16_to_cpu(last->res_count),
1334 0, NULL, ctx->base.callback_data); 1334 0, NULL, ctx->base.callback_data);
1335 1335
@@ -1428,7 +1428,7 @@ static int ohci_start_iso(struct fw_iso_context *base,
1428 1428
1429 reg_write(ohci, OHCI1394_IsoRecvIntEventClear, 1 << index); 1429 reg_write(ohci, OHCI1394_IsoRecvIntEventClear, 1 << index);
1430 reg_write(ohci, OHCI1394_IsoRecvIntMaskSet, 1 << index); 1430 reg_write(ohci, OHCI1394_IsoRecvIntMaskSet, 1 << index);
1431 reg_write(ohci, context_match(ctx->context.regs), match); 1431 reg_write(ohci, CONTEXT_MATCH(ctx->context.regs), match);
1432 context_run(&ctx->context, control); 1432 context_run(&ctx->context, control);
1433 } 1433 }
1434 1434
@@ -1525,17 +1525,17 @@ ohci_queue_iso_transmit(struct fw_iso_context *base,
1525 return -ENOMEM; 1525 return -ENOMEM;
1526 1526
1527 if (!p->skip) { 1527 if (!p->skip) {
1528 d[0].control = cpu_to_le16(descriptor_key_immediate); 1528 d[0].control = cpu_to_le16(DESCRIPTOR_KEY_IMMEDIATE);
1529 d[0].req_count = cpu_to_le16(8); 1529 d[0].req_count = cpu_to_le16(8);
1530 1530
1531 header = (__le32 *) &d[1]; 1531 header = (__le32 *) &d[1];
1532 header[0] = cpu_to_le32(it_header_sy(p->sy) | 1532 header[0] = cpu_to_le32(IT_HEADER_SY(p->sy) |
1533 it_header_tag(p->tag) | 1533 IT_HEADER_TAG(p->tag) |
1534 it_header_tcode(TCODE_STREAM_DATA) | 1534 IT_HEADER_TCODE(TCODE_STREAM_DATA) |
1535 it_header_channel(ctx->base.channel) | 1535 IT_HEADER_CHANNEL(ctx->base.channel) |
1536 it_header_speed(ctx->base.speed)); 1536 IT_HEADER_SPEED(ctx->base.speed));
1537 header[1] = 1537 header[1] =
1538 cpu_to_le32(it_header_data_length(p->header_length + 1538 cpu_to_le32(IT_HEADER_DATA_LENGTH(p->header_length +
1539 p->payload_length)); 1539 p->payload_length));
1540 } 1540 }
1541 1541
@@ -1562,14 +1562,14 @@ ohci_queue_iso_transmit(struct fw_iso_context *base,
1562 } 1562 }
1563 1563
1564 if (p->interrupt) 1564 if (p->interrupt)
1565 irq = descriptor_irq_always; 1565 irq = DESCRIPTOR_IRQ_ALWAYS;
1566 else 1566 else
1567 irq = descriptor_no_irq; 1567 irq = DESCRIPTOR_NO_IRQ;
1568 1568
1569 last = z == 2 ? d : d + z - 1; 1569 last = z == 2 ? d : d + z - 1;
1570 last->control |= cpu_to_le16(descriptor_output_last | 1570 last->control |= cpu_to_le16(DESCRIPTOR_OUTPUT_LAST |
1571 descriptor_status | 1571 DESCRIPTOR_STATUS |
1572 descriptor_branch_always | 1572 DESCRIPTOR_BRANCH_ALWAYS |
1573 irq); 1573 irq);
1574 1574
1575 context_append(&ctx->context, d, z, header_z); 1575 context_append(&ctx->context, d, z, header_z);
@@ -1602,9 +1602,9 @@ ohci_queue_iso_receive_dualbuffer(struct fw_iso_context *base,
1602 return -ENOMEM; 1602 return -ENOMEM;
1603 1603
1604 db = (struct db_descriptor *) d; 1604 db = (struct db_descriptor *) d;
1605 db->control = cpu_to_le16(descriptor_status | 1605 db->control = cpu_to_le16(DESCRIPTOR_STATUS |
1606 descriptor_branch_always | 1606 DESCRIPTOR_BRANCH_ALWAYS |
1607 descriptor_wait); 1607 DESCRIPTOR_WAIT);
1608 db->first_size = cpu_to_le16(ctx->base.header_size + 4); 1608 db->first_size = cpu_to_le16(ctx->base.header_size + 4);
1609 context_append(&ctx->context, d, 2, 0); 1609 context_append(&ctx->context, d, 2, 0);
1610 } 1610 }
@@ -1634,8 +1634,8 @@ ohci_queue_iso_receive_dualbuffer(struct fw_iso_context *base,
1634 return -ENOMEM; 1634 return -ENOMEM;
1635 1635
1636 db = (struct db_descriptor *) d; 1636 db = (struct db_descriptor *) d;
1637 db->control = cpu_to_le16(descriptor_status | 1637 db->control = cpu_to_le16(DESCRIPTOR_STATUS |
1638 descriptor_branch_always); 1638 DESCRIPTOR_BRANCH_ALWAYS);
1639 db->first_size = cpu_to_le16(ctx->base.header_size + 4); 1639 db->first_size = cpu_to_le16(ctx->base.header_size + 4);
1640 db->first_req_count = cpu_to_le16(header_size); 1640 db->first_req_count = cpu_to_le16(header_size);
1641 db->first_res_count = db->first_req_count; 1641 db->first_res_count = db->first_req_count;
@@ -1652,7 +1652,7 @@ ohci_queue_iso_receive_dualbuffer(struct fw_iso_context *base,
1652 db->second_buffer = cpu_to_le32(page_bus + offset); 1652 db->second_buffer = cpu_to_le32(page_bus + offset);
1653 1653
1654 if (p->interrupt && length == rest) 1654 if (p->interrupt && length == rest)
1655 db->control |= cpu_to_le16(descriptor_irq_always); 1655 db->control |= cpu_to_le16(DESCRIPTOR_IRQ_ALWAYS);
1656 1656
1657 context_append(&ctx->context, d, z, header_z); 1657 context_append(&ctx->context, d, z, header_z);
1658 offset = (offset + length) & ~PAGE_MASK; 1658 offset = (offset + length) & ~PAGE_MASK;
diff --git a/drivers/firewire/fw-sbp2.c b/drivers/firewire/fw-sbp2.c
index 3308bc089beb..196de46c00b6 100644
--- a/drivers/firewire/fw-sbp2.c
+++ b/drivers/firewire/fw-sbp2.c
@@ -123,14 +123,14 @@ struct sbp2_device {
123#define SBP2_STATUS_ILLEGAL_REQUEST 0x2 123#define SBP2_STATUS_ILLEGAL_REQUEST 0x2
124#define SBP2_STATUS_VENDOR_DEPENDENT 0x3 124#define SBP2_STATUS_VENDOR_DEPENDENT 0x3
125 125
126#define status_get_orb_high(v) ((v).status & 0xffff) 126#define STATUS_GET_ORB_HIGH(v) ((v).status & 0xffff)
127#define status_get_sbp_status(v) (((v).status >> 16) & 0xff) 127#define STATUS_GET_SBP_STATUS(v) (((v).status >> 16) & 0xff)
128#define status_get_len(v) (((v).status >> 24) & 0x07) 128#define STATUS_GET_LEN(v) (((v).status >> 24) & 0x07)
129#define status_get_dead(v) (((v).status >> 27) & 0x01) 129#define STATUS_GET_DEAD(v) (((v).status >> 27) & 0x01)
130#define status_get_response(v) (((v).status >> 28) & 0x03) 130#define STATUS_GET_RESPONSE(v) (((v).status >> 28) & 0x03)
131#define status_get_source(v) (((v).status >> 30) & 0x03) 131#define STATUS_GET_SOURCE(v) (((v).status >> 30) & 0x03)
132#define status_get_orb_low(v) ((v).orb_low) 132#define STATUS_GET_ORB_LOW(v) ((v).orb_low)
133#define status_get_data(v) ((v).data) 133#define STATUS_GET_DATA(v) ((v).data)
134 134
135struct sbp2_status { 135struct sbp2_status {
136 u32 status; 136 u32 status;
@@ -152,15 +152,15 @@ struct sbp2_orb {
152 struct list_head link; 152 struct list_head link;
153}; 153};
154 154
155#define management_orb_lun(v) ((v)) 155#define MANAGEMENT_ORB_LUN(v) ((v))
156#define management_orb_function(v) ((v) << 16) 156#define MANAGEMENT_ORB_FUNCTION(v) ((v) << 16)
157#define management_orb_reconnect(v) ((v) << 20) 157#define MANAGEMENT_ORB_RECONNECT(v) ((v) << 20)
158#define management_orb_exclusive ((1) << 28) 158#define MANAGEMENT_ORB_EXCLUSIVE ((1) << 28)
159#define management_orb_request_format(v) ((v) << 29) 159#define MANAGEMENT_ORB_REQUEST_FORMAT(v) ((v) << 29)
160#define management_orb_notify ((1) << 31) 160#define MANAGEMENT_ORB_NOTIFY ((1) << 31)
161 161
162#define management_orb_response_length(v) ((v)) 162#define MANAGEMENT_ORB_RESPONSE_LENGTH(v) ((v))
163#define management_orb_password_length(v) ((v) << 16) 163#define MANAGEMENT_ORB_PASSWORD_LENGTH(v) ((v) << 16)
164 164
165struct sbp2_management_orb { 165struct sbp2_management_orb {
166 struct sbp2_orb base; 166 struct sbp2_orb base;
@@ -177,23 +177,22 @@ struct sbp2_management_orb {
177 struct sbp2_status status; 177 struct sbp2_status status;
178}; 178};
179 179
180#define login_response_get_login_id(v) ((v).misc & 0xffff) 180#define LOGIN_RESPONSE_GET_LOGIN_ID(v) ((v).misc & 0xffff)
181#define login_response_get_length(v) (((v).misc >> 16) & 0xffff) 181#define LOGIN_RESPONSE_GET_LENGTH(v) (((v).misc >> 16) & 0xffff)
182 182
183struct sbp2_login_response { 183struct sbp2_login_response {
184 u32 misc; 184 u32 misc;
185 struct sbp2_pointer command_block_agent; 185 struct sbp2_pointer command_block_agent;
186 u32 reconnect_hold; 186 u32 reconnect_hold;
187}; 187};
188 188#define COMMAND_ORB_DATA_SIZE(v) ((v))
189#define command_orb_data_size(v) ((v)) 189#define COMMAND_ORB_PAGE_SIZE(v) ((v) << 16)
190#define command_orb_page_size(v) ((v) << 16) 190#define COMMAND_ORB_PAGE_TABLE_PRESENT ((1) << 19)
191#define command_orb_page_table_present ((1) << 19) 191#define COMMAND_ORB_MAX_PAYLOAD(v) ((v) << 20)
192#define command_orb_max_payload(v) ((v) << 20) 192#define COMMAND_ORB_SPEED(v) ((v) << 24)
193#define command_orb_speed(v) ((v) << 24) 193#define COMMAND_ORB_DIRECTION(v) ((v) << 27)
194#define command_orb_direction(v) ((v) << 27) 194#define COMMAND_ORB_REQUEST_FORMAT(v) ((v) << 29)
195#define command_orb_request_format(v) ((v) << 29) 195#define COMMAND_ORB_NOTIFY ((1) << 31)
196#define command_orb_notify ((1) << 31)
197 196
198struct sbp2_command_orb { 197struct sbp2_command_orb {
199 struct sbp2_orb base; 198 struct sbp2_orb base;
@@ -290,7 +289,7 @@ sbp2_status_write(struct fw_card *card, struct fw_request *request,
290 fw_memcpy_from_be32(&status, payload, header_size); 289 fw_memcpy_from_be32(&status, payload, header_size);
291 if (length > header_size) 290 if (length > header_size)
292 memcpy(status.data, payload + 8, length - header_size); 291 memcpy(status.data, payload + 8, length - header_size);
293 if (status_get_source(status) == 2 || status_get_source(status) == 3) { 292 if (STATUS_GET_SOURCE(status) == 2 || STATUS_GET_SOURCE(status) == 3) {
294 fw_notify("non-orb related status write, not handled\n"); 293 fw_notify("non-orb related status write, not handled\n");
295 fw_send_response(card, request, RCODE_COMPLETE); 294 fw_send_response(card, request, RCODE_COMPLETE);
296 return; 295 return;
@@ -299,8 +298,8 @@ sbp2_status_write(struct fw_card *card, struct fw_request *request,
299 /* Lookup the orb corresponding to this status write. */ 298 /* Lookup the orb corresponding to this status write. */
300 spin_lock_irqsave(&card->lock, flags); 299 spin_lock_irqsave(&card->lock, flags);
301 list_for_each_entry(orb, &sd->orb_list, link) { 300 list_for_each_entry(orb, &sd->orb_list, link) {
302 if (status_get_orb_high(status) == 0 && 301 if (STATUS_GET_ORB_HIGH(status) == 0 &&
303 status_get_orb_low(status) == orb->request_bus && 302 STATUS_GET_ORB_LOW(status) == orb->request_bus &&
304 orb->rcode == RCODE_COMPLETE) { 303 orb->rcode == RCODE_COMPLETE) {
305 list_del(&orb->link); 304 list_del(&orb->link);
306 break; 305 break;
@@ -425,11 +424,11 @@ sbp2_send_management_orb(struct fw_unit *unit, int node_id, int generation,
425 orb->request.response.low = orb->response_bus; 424 orb->request.response.low = orb->response_bus;
426 425
427 orb->request.misc = 426 orb->request.misc =
428 management_orb_notify | 427 MANAGEMENT_ORB_NOTIFY |
429 management_orb_function(function) | 428 MANAGEMENT_ORB_FUNCTION(function) |
430 management_orb_lun(lun); 429 MANAGEMENT_ORB_LUN(lun);
431 orb->request.length = 430 orb->request.length =
432 management_orb_response_length(sizeof orb->response); 431 MANAGEMENT_ORB_RESPONSE_LENGTH(sizeof orb->response);
433 432
434 orb->request.status_fifo.high = sd->address_handler.offset >> 32; 433 orb->request.status_fifo.high = sd->address_handler.offset >> 32;
435 orb->request.status_fifo.low = sd->address_handler.offset; 434 orb->request.status_fifo.low = sd->address_handler.offset;
@@ -441,8 +440,8 @@ sbp2_send_management_orb(struct fw_unit *unit, int node_id, int generation,
441 */ 440 */
442 if (function == SBP2_LOGIN_REQUEST) { 441 if (function == SBP2_LOGIN_REQUEST) {
443 orb->request.misc |= 442 orb->request.misc |=
444 management_orb_exclusive | 443 MANAGEMENT_ORB_EXCLUSIVE |
445 management_orb_reconnect(0); 444 MANAGEMENT_ORB_RECONNECT(0);
446 } 445 }
447 446
448 fw_memcpy_to_be32(&orb->request, &orb->request, sizeof orb->request); 447 fw_memcpy_to_be32(&orb->request, &orb->request, sizeof orb->request);
@@ -469,11 +468,11 @@ sbp2_send_management_orb(struct fw_unit *unit, int node_id, int generation,
469 goto out; 468 goto out;
470 } 469 }
471 470
472 if (status_get_response(orb->status) != 0 || 471 if (STATUS_GET_RESPONSE(orb->status) != 0 ||
473 status_get_sbp_status(orb->status) != 0) { 472 STATUS_GET_SBP_STATUS(orb->status) != 0) {
474 fw_error("error status: %d:%d\n", 473 fw_error("error status: %d:%d\n",
475 status_get_response(orb->status), 474 STATUS_GET_RESPONSE(orb->status),
476 status_get_sbp_status(orb->status)); 475 STATUS_GET_SBP_STATUS(orb->status));
477 goto out; 476 goto out;
478 } 477 }
479 478
@@ -577,7 +576,7 @@ static void sbp2_login(struct work_struct *work)
577 sd->command_block_agent_address = 576 sd->command_block_agent_address =
578 ((u64) (response.command_block_agent.high & 0xffff) << 32) | 577 ((u64) (response.command_block_agent.high & 0xffff) << 32) |
579 response.command_block_agent.low; 578 response.command_block_agent.low;
580 sd->login_id = login_response_get_login_id(response); 579 sd->login_id = LOGIN_RESPONSE_GET_LOGIN_ID(response);
581 580
582 fw_notify("logged in to sbp2 unit %s (%d retries)\n", 581 fw_notify("logged in to sbp2 unit %s (%d retries)\n",
583 unit->device.bus_id, sd->retries); 582 unit->device.bus_id, sd->retries);
@@ -828,10 +827,10 @@ complete_command_orb(struct sbp2_orb *base_orb, struct sbp2_status *status)
828 int result; 827 int result;
829 828
830 if (status != NULL) { 829 if (status != NULL) {
831 if (status_get_dead(*status)) 830 if (STATUS_GET_DEAD(*status))
832 sbp2_agent_reset(unit); 831 sbp2_agent_reset(unit);
833 832
834 switch (status_get_response(*status)) { 833 switch (STATUS_GET_RESPONSE(*status)) {
835 case SBP2_STATUS_REQUEST_COMPLETE: 834 case SBP2_STATUS_REQUEST_COMPLETE:
836 result = DID_OK << 16; 835 result = DID_OK << 16;
837 break; 836 break;
@@ -845,8 +844,8 @@ complete_command_orb(struct sbp2_orb *base_orb, struct sbp2_status *status)
845 break; 844 break;
846 } 845 }
847 846
848 if (result == DID_OK << 16 && status_get_len(*status) > 1) 847 if (result == DID_OK << 16 && STATUS_GET_LEN(*status) > 1)
849 result = sbp2_status_to_sense_data(status_get_data(*status), 848 result = sbp2_status_to_sense_data(STATUS_GET_DATA(*status),
850 orb->cmd->sense_buffer); 849 orb->cmd->sense_buffer);
851 } else { 850 } else {
852 /* 851 /*
@@ -906,7 +905,7 @@ static void sbp2_command_orb_map_scatterlist(struct sbp2_command_orb *orb)
906 orb->request.data_descriptor.high = sd->address_high; 905 orb->request.data_descriptor.high = sd->address_high;
907 orb->request.data_descriptor.low = sg_dma_address(sg); 906 orb->request.data_descriptor.low = sg_dma_address(sg);
908 orb->request.misc |= 907 orb->request.misc |=
909 command_orb_data_size(sg_dma_len(sg)); 908 COMMAND_ORB_DATA_SIZE(sg_dma_len(sg));
910 return; 909 return;
911 } 910 }
912 911
@@ -943,8 +942,8 @@ static void sbp2_command_orb_map_scatterlist(struct sbp2_command_orb *orb)
943 orb->request.data_descriptor.high = sd->address_high; 942 orb->request.data_descriptor.high = sd->address_high;
944 orb->request.data_descriptor.low = orb->page_table_bus; 943 orb->request.data_descriptor.low = orb->page_table_bus;
945 orb->request.misc |= 944 orb->request.misc |=
946 command_orb_page_table_present | 945 COMMAND_ORB_PAGE_TABLE_PRESENT |
947 command_orb_data_size(j); 946 COMMAND_ORB_DATA_SIZE(j);
948 947
949 fw_memcpy_to_be32(orb->page_table, orb->page_table, size); 948 fw_memcpy_to_be32(orb->page_table, orb->page_table, size);
950} 949}
@@ -969,7 +968,7 @@ static void sbp2_command_orb_map_buffer(struct sbp2_command_orb *orb)
969 orb->request.data_descriptor.high = sd->address_high; 968 orb->request.data_descriptor.high = sd->address_high;
970 orb->request.data_descriptor.low = orb->request_buffer_bus; 969 orb->request.data_descriptor.low = orb->request_buffer_bus;
971 orb->request.misc |= 970 orb->request.misc |=
972 command_orb_data_size(orb->cmd->request_bufflen); 971 COMMAND_ORB_DATA_SIZE(orb->cmd->request_bufflen);
973} 972}
974 973
975/* SCSI stack integration */ 974/* SCSI stack integration */
@@ -1017,16 +1016,16 @@ static int sbp2_scsi_queuecommand(struct scsi_cmnd *cmd, scsi_done_fn_t done)
1017 * if we set this to max_speed + 7, we get the right value. 1016 * if we set this to max_speed + 7, we get the right value.
1018 */ 1017 */
1019 orb->request.misc = 1018 orb->request.misc =
1020 command_orb_max_payload(device->node->max_speed + 7) | 1019 COMMAND_ORB_MAX_PAYLOAD(device->node->max_speed + 7) |
1021 command_orb_speed(device->node->max_speed) | 1020 COMMAND_ORB_SPEED(device->node->max_speed) |
1022 command_orb_notify; 1021 COMMAND_ORB_NOTIFY;
1023 1022
1024 if (cmd->sc_data_direction == DMA_FROM_DEVICE) 1023 if (cmd->sc_data_direction == DMA_FROM_DEVICE)
1025 orb->request.misc |= 1024 orb->request.misc |=
1026 command_orb_direction(SBP2_DIRECTION_FROM_MEDIA); 1025 COMMAND_ORB_DIRECTION(SBP2_DIRECTION_FROM_MEDIA);
1027 else if (cmd->sc_data_direction == DMA_TO_DEVICE) 1026 else if (cmd->sc_data_direction == DMA_TO_DEVICE)
1028 orb->request.misc |= 1027 orb->request.misc |=
1029 command_orb_direction(SBP2_DIRECTION_TO_MEDIA); 1028 COMMAND_ORB_DIRECTION(SBP2_DIRECTION_TO_MEDIA);
1030 1029
1031 if (cmd->use_sg) { 1030 if (cmd->use_sg) {
1032 sbp2_command_orb_map_scatterlist(orb); 1031 sbp2_command_orb_map_scatterlist(orb);
diff --git a/drivers/firewire/fw-topology.c b/drivers/firewire/fw-topology.c
index 490490205ea9..b9dce70b3aed 100644
--- a/drivers/firewire/fw-topology.c
+++ b/drivers/firewire/fw-topology.c
@@ -24,16 +24,16 @@
24#include "fw-transaction.h" 24#include "fw-transaction.h"
25#include "fw-topology.h" 25#include "fw-topology.h"
26 26
27#define self_id_phy_id(q) (((q) >> 24) & 0x3f) 27#define SELF_ID_PHY_ID(q) (((q) >> 24) & 0x3f)
28#define self_id_extended(q) (((q) >> 23) & 0x01) 28#define SELF_ID_EXTENDED(q) (((q) >> 23) & 0x01)
29#define self_id_link_on(q) (((q) >> 22) & 0x01) 29#define SELF_ID_LINK_ON(q) (((q) >> 22) & 0x01)
30#define self_id_gap_count(q) (((q) >> 16) & 0x3f) 30#define SELF_ID_GAP_COUNT(q) (((q) >> 16) & 0x3f)
31#define self_id_phy_speed(q) (((q) >> 14) & 0x03) 31#define SELF_ID_PHY_SPEED(q) (((q) >> 14) & 0x03)
32#define self_id_contender(q) (((q) >> 11) & 0x01) 32#define SELF_ID_CONTENDER(q) (((q) >> 11) & 0x01)
33#define self_id_phy_initiator(q) (((q) >> 1) & 0x01) 33#define SELF_ID_PHY_INITIATOR(q) (((q) >> 1) & 0x01)
34#define self_id_more_packets(q) (((q) >> 0) & 0x01) 34#define SELF_ID_MORE_PACKETS(q) (((q) >> 0) & 0x01)
35 35
36#define self_id_ext_sequence(q) (((q) >> 20) & 0x07) 36#define SELF_ID_EXT_SEQUENCE(q) (((q) >> 20) & 0x07)
37 37
38static u32 *count_ports(u32 *sid, int *total_port_count, int *child_port_count) 38static u32 *count_ports(u32 *sid, int *total_port_count, int *child_port_count)
39{ 39{
@@ -61,7 +61,7 @@ static u32 *count_ports(u32 *sid, int *total_port_count, int *child_port_count)
61 61
62 shift -= 2; 62 shift -= 2;
63 if (shift == 0) { 63 if (shift == 0) {
64 if (!self_id_more_packets(q)) 64 if (!SELF_ID_MORE_PACKETS(q))
65 return sid + 1; 65 return sid + 1;
66 66
67 shift = 16; 67 shift = 16;
@@ -75,8 +75,8 @@ static u32 *count_ports(u32 *sid, int *total_port_count, int *child_port_count)
75 * packets increase as expected. 75 * packets increase as expected.
76 */ 76 */
77 77
78 if (!self_id_extended(q) || 78 if (!SELF_ID_EXTENDED(q) ||
79 seq != self_id_ext_sequence(q)) 79 seq != SELF_ID_EXT_SEQUENCE(q))
80 return NULL; 80 return NULL;
81 81
82 seq++; 82 seq++;
@@ -103,9 +103,9 @@ static struct fw_node *fw_node_create(u32 sid, int port_count, int color)
103 return NULL; 103 return NULL;
104 104
105 node->color = color; 105 node->color = color;
106 node->node_id = LOCAL_BUS | self_id_phy_id(sid); 106 node->node_id = LOCAL_BUS | SELF_ID_PHY_ID(sid);
107 node->link_on = self_id_link_on(sid); 107 node->link_on = SELF_ID_LINK_ON(sid);
108 node->phy_speed = self_id_phy_speed(sid); 108 node->phy_speed = SELF_ID_PHY_SPEED(sid);
109 node->port_count = port_count; 109 node->port_count = port_count;
110 110
111 atomic_set(&node->ref_count, 1); 111 atomic_set(&node->ref_count, 1);
@@ -181,7 +181,7 @@ static struct fw_node *build_tree(struct fw_card *card,
181 end = sid + self_id_count; 181 end = sid + self_id_count;
182 phy_id = 0; 182 phy_id = 0;
183 irm_node = NULL; 183 irm_node = NULL;
184 gap_count = self_id_gap_count(*sid); 184 gap_count = SELF_ID_GAP_COUNT(*sid);
185 topology_type = 0; 185 topology_type = 0;
186 186
187 while (sid < end) { 187 while (sid < end) {
@@ -193,9 +193,9 @@ static struct fw_node *build_tree(struct fw_card *card,
193 } 193 }
194 194
195 q = *sid; 195 q = *sid;
196 if (phy_id != self_id_phy_id(q)) { 196 if (phy_id != SELF_ID_PHY_ID(q)) {
197 fw_error("PHY ID mismatch in self ID: %d != %d.\n", 197 fw_error("PHY ID mismatch in self ID: %d != %d.\n",
198 phy_id, self_id_phy_id(q)); 198 phy_id, SELF_ID_PHY_ID(q));
199 return NULL; 199 return NULL;
200 } 200 }
201 201
@@ -221,7 +221,7 @@ static struct fw_node *build_tree(struct fw_card *card,
221 if (phy_id == (card->node_id & 0x3f)) 221 if (phy_id == (card->node_id & 0x3f))
222 local_node = node; 222 local_node = node;
223 223
224 if (self_id_contender(q)) 224 if (SELF_ID_CONTENDER(q))
225 irm_node = node; 225 irm_node = node;
226 226
227 if (node->phy_speed == SCODE_BETA) 227 if (node->phy_speed == SCODE_BETA)
@@ -283,7 +283,7 @@ static struct fw_node *build_tree(struct fw_card *card,
283 * setting, we fall back to 63 which will force a gap 283 * setting, we fall back to 63 which will force a gap
284 * count reconfiguration and a reset. 284 * count reconfiguration and a reset.
285 */ 285 */
286 if (self_id_gap_count(q) != gap_count) 286 if (SELF_ID_GAP_COUNT(q) != gap_count)
287 gap_count = 63; 287 gap_count = 63;
288 288
289 update_hop_count(node); 289 update_hop_count(node);
diff --git a/drivers/firewire/fw-transaction.c b/drivers/firewire/fw-transaction.c
index e4355de710fa..01c438f1c670 100644
--- a/drivers/firewire/fw-transaction.c
+++ b/drivers/firewire/fw-transaction.c
@@ -34,29 +34,29 @@
34#include "fw-topology.h" 34#include "fw-topology.h"
35#include "fw-device.h" 35#include "fw-device.h"
36 36
37#define header_pri(pri) ((pri) << 0) 37#define HEADER_PRI(pri) ((pri) << 0)
38#define header_tcode(tcode) ((tcode) << 4) 38#define HEADER_TCODE(tcode) ((tcode) << 4)
39#define header_retry(retry) ((retry) << 8) 39#define HEADER_RETRY(retry) ((retry) << 8)
40#define header_tlabel(tlabel) ((tlabel) << 10) 40#define HEADER_TLABEL(tlabel) ((tlabel) << 10)
41#define header_destination(destination) ((destination) << 16) 41#define HEADER_DESTINATION(destination) ((destination) << 16)
42#define header_source(source) ((source) << 16) 42#define HEADER_SOURCE(source) ((source) << 16)
43#define header_rcode(rcode) ((rcode) << 12) 43#define HEADER_RCODE(rcode) ((rcode) << 12)
44#define header_offset_high(offset_high) ((offset_high) << 0) 44#define HEADER_OFFSET_HIGH(offset_high) ((offset_high) << 0)
45#define header_data_length(length) ((length) << 16) 45#define HEADER_DATA_LENGTH(length) ((length) << 16)
46#define header_extended_tcode(tcode) ((tcode) << 0) 46#define HEADER_EXTENDED_TCODE(tcode) ((tcode) << 0)
47 47
48#define header_get_tcode(q) (((q) >> 4) & 0x0f) 48#define HEADER_GET_TCODE(q) (((q) >> 4) & 0x0f)
49#define header_get_tlabel(q) (((q) >> 10) & 0x3f) 49#define HEADER_GET_TLABEL(q) (((q) >> 10) & 0x3f)
50#define header_get_rcode(q) (((q) >> 12) & 0x0f) 50#define HEADER_GET_RCODE(q) (((q) >> 12) & 0x0f)
51#define header_get_destination(q) (((q) >> 16) & 0xffff) 51#define HEADER_GET_DESTINATION(q) (((q) >> 16) & 0xffff)
52#define header_get_source(q) (((q) >> 16) & 0xffff) 52#define HEADER_GET_SOURCE(q) (((q) >> 16) & 0xffff)
53#define header_get_offset_high(q) (((q) >> 0) & 0xffff) 53#define HEADER_GET_OFFSET_HIGH(q) (((q) >> 0) & 0xffff)
54#define header_get_data_length(q) (((q) >> 16) & 0xffff) 54#define HEADER_GET_DATA_LENGTH(q) (((q) >> 16) & 0xffff)
55#define header_get_extended_tcode(q) (((q) >> 0) & 0xffff) 55#define HEADER_GET_EXTENDED_TCODE(q) (((q) >> 0) & 0xffff)
56 56
57#define phy_config_gap_count(gap_count) (((gap_count) << 16) | (1 << 22)) 57#define PHY_CONFIG_GAP_COUNT(gap_count) (((gap_count) << 16) | (1 << 22))
58#define phy_config_root_id(node_id) ((((node_id) & 0x3f) << 24) | (1 << 23)) 58#define PHY_CONFIG_ROOT_ID(node_id) ((((node_id) & 0x3f) << 24) | (1 << 23))
59#define phy_identifier(id) ((id) << 30) 59#define PHY_IDENTIFIER(id) ((id) << 30)
60 60
61static int 61static int
62close_transaction(struct fw_transaction *transaction, 62close_transaction(struct fw_transaction *transaction,
@@ -159,12 +159,12 @@ fw_fill_request(struct fw_packet *packet, int tcode, int tlabel,
159 ext_tcode = 0; 159 ext_tcode = 0;
160 160
161 packet->header[0] = 161 packet->header[0] =
162 header_retry(RETRY_X) | 162 HEADER_RETRY(RETRY_X) |
163 header_tlabel(tlabel) | 163 HEADER_TLABEL(tlabel) |
164 header_tcode(tcode) | 164 HEADER_TCODE(tcode) |
165 header_destination(node_id); 165 HEADER_DESTINATION(node_id);
166 packet->header[1] = 166 packet->header[1] =
167 header_offset_high(offset >> 32) | header_source(source_id); 167 HEADER_OFFSET_HIGH(offset >> 32) | HEADER_SOURCE(source_id);
168 packet->header[2] = 168 packet->header[2] =
169 offset; 169 offset;
170 170
@@ -178,8 +178,8 @@ fw_fill_request(struct fw_packet *packet, int tcode, int tlabel,
178 case TCODE_LOCK_REQUEST: 178 case TCODE_LOCK_REQUEST:
179 case TCODE_WRITE_BLOCK_REQUEST: 179 case TCODE_WRITE_BLOCK_REQUEST:
180 packet->header[3] = 180 packet->header[3] =
181 header_data_length(length) | 181 HEADER_DATA_LENGTH(length) |
182 header_extended_tcode(ext_tcode); 182 HEADER_EXTENDED_TCODE(ext_tcode);
183 packet->header_length = 16; 183 packet->header_length = 16;
184 packet->payload = payload; 184 packet->payload = payload;
185 packet->payload_length = length; 185 packet->payload_length = length;
@@ -192,8 +192,8 @@ fw_fill_request(struct fw_packet *packet, int tcode, int tlabel,
192 192
193 case TCODE_READ_BLOCK_REQUEST: 193 case TCODE_READ_BLOCK_REQUEST:
194 packet->header[3] = 194 packet->header[3] =
195 header_data_length(length) | 195 HEADER_DATA_LENGTH(length) |
196 header_extended_tcode(ext_tcode); 196 HEADER_EXTENDED_TCODE(ext_tcode);
197 packet->header_length = 16; 197 packet->header_length = 16;
198 packet->payload_length = 0; 198 packet->payload_length = 0;
199 break; 199 break;
@@ -325,9 +325,9 @@ void fw_send_phy_config(struct fw_card *card,
325{ 325{
326 u32 q; 326 u32 q;
327 327
328 q = phy_identifier(PHY_PACKET_CONFIG) | 328 q = PHY_IDENTIFIER(PHY_PACKET_CONFIG) |
329 phy_config_root_id(node_id) | 329 PHY_CONFIG_ROOT_ID(node_id) |
330 phy_config_gap_count(gap_count); 330 PHY_CONFIG_GAP_COUNT(gap_count);
331 331
332 send_phy_packet(card, q, generation); 332 send_phy_packet(card, q, generation);
333} 333}
@@ -485,32 +485,32 @@ fw_fill_response(struct fw_packet *response, u32 *request_header,
485{ 485{
486 int tcode, tlabel, extended_tcode, source, destination; 486 int tcode, tlabel, extended_tcode, source, destination;
487 487
488 tcode = header_get_tcode(request_header[0]); 488 tcode = HEADER_GET_TCODE(request_header[0]);
489 tlabel = header_get_tlabel(request_header[0]); 489 tlabel = HEADER_GET_TLABEL(request_header[0]);
490 source = header_get_destination(request_header[0]); 490 source = HEADER_GET_DESTINATION(request_header[0]);
491 destination = header_get_source(request_header[1]); 491 destination = HEADER_GET_SOURCE(request_header[1]);
492 extended_tcode = header_get_extended_tcode(request_header[3]); 492 extended_tcode = HEADER_GET_EXTENDED_TCODE(request_header[3]);
493 493
494 response->header[0] = 494 response->header[0] =
495 header_retry(RETRY_1) | 495 HEADER_RETRY(RETRY_1) |
496 header_tlabel(tlabel) | 496 HEADER_TLABEL(tlabel) |
497 header_destination(destination); 497 HEADER_DESTINATION(destination);
498 response->header[1] = 498 response->header[1] =
499 header_source(source) | 499 HEADER_SOURCE(source) |
500 header_rcode(rcode); 500 HEADER_RCODE(rcode);
501 response->header[2] = 0; 501 response->header[2] = 0;
502 502
503 switch (tcode) { 503 switch (tcode) {
504 case TCODE_WRITE_QUADLET_REQUEST: 504 case TCODE_WRITE_QUADLET_REQUEST:
505 case TCODE_WRITE_BLOCK_REQUEST: 505 case TCODE_WRITE_BLOCK_REQUEST:
506 response->header[0] |= header_tcode(TCODE_WRITE_RESPONSE); 506 response->header[0] |= HEADER_TCODE(TCODE_WRITE_RESPONSE);
507 response->header_length = 12; 507 response->header_length = 12;
508 response->payload_length = 0; 508 response->payload_length = 0;
509 break; 509 break;
510 510
511 case TCODE_READ_QUADLET_REQUEST: 511 case TCODE_READ_QUADLET_REQUEST:
512 response->header[0] |= 512 response->header[0] |=
513 header_tcode(TCODE_READ_QUADLET_RESPONSE); 513 HEADER_TCODE(TCODE_READ_QUADLET_RESPONSE);
514 if (payload != NULL) 514 if (payload != NULL)
515 response->header[3] = *(u32 *)payload; 515 response->header[3] = *(u32 *)payload;
516 else 516 else
@@ -521,10 +521,10 @@ fw_fill_response(struct fw_packet *response, u32 *request_header,
521 521
522 case TCODE_READ_BLOCK_REQUEST: 522 case TCODE_READ_BLOCK_REQUEST:
523 case TCODE_LOCK_REQUEST: 523 case TCODE_LOCK_REQUEST:
524 response->header[0] |= header_tcode(tcode + 2); 524 response->header[0] |= HEADER_TCODE(tcode + 2);
525 response->header[3] = 525 response->header[3] =
526 header_data_length(length) | 526 HEADER_DATA_LENGTH(length) |
527 header_extended_tcode(extended_tcode); 527 HEADER_EXTENDED_TCODE(extended_tcode);
528 response->header_length = 16; 528 response->header_length = 16;
529 response->payload = payload; 529 response->payload = payload;
530 response->payload_length = length; 530 response->payload_length = length;
@@ -544,7 +544,7 @@ allocate_request(struct fw_packet *p)
544 u32 *data, length; 544 u32 *data, length;
545 int request_tcode, t; 545 int request_tcode, t;
546 546
547 request_tcode = header_get_tcode(p->header[0]); 547 request_tcode = HEADER_GET_TCODE(p->header[0]);
548 switch (request_tcode) { 548 switch (request_tcode) {
549 case TCODE_WRITE_QUADLET_REQUEST: 549 case TCODE_WRITE_QUADLET_REQUEST:
550 data = &p->header[3]; 550 data = &p->header[3];
@@ -554,7 +554,7 @@ allocate_request(struct fw_packet *p)
554 case TCODE_WRITE_BLOCK_REQUEST: 554 case TCODE_WRITE_BLOCK_REQUEST:
555 case TCODE_LOCK_REQUEST: 555 case TCODE_LOCK_REQUEST:
556 data = p->payload; 556 data = p->payload;
557 length = header_get_data_length(p->header[3]); 557 length = HEADER_GET_DATA_LENGTH(p->header[3]);
558 break; 558 break;
559 559
560 case TCODE_READ_QUADLET_REQUEST: 560 case TCODE_READ_QUADLET_REQUEST:
@@ -564,7 +564,7 @@ allocate_request(struct fw_packet *p)
564 564
565 case TCODE_READ_BLOCK_REQUEST: 565 case TCODE_READ_BLOCK_REQUEST:
566 data = NULL; 566 data = NULL;
567 length = header_get_data_length(p->header[3]); 567 length = HEADER_GET_DATA_LENGTH(p->header[3]);
568 break; 568 break;
569 569
570 default: 570 default:
@@ -644,10 +644,10 @@ fw_core_handle_request(struct fw_card *card, struct fw_packet *p)
644 644
645 offset = 645 offset =
646 ((unsigned long long) 646 ((unsigned long long)
647 header_get_offset_high(p->header[1]) << 32) | p->header[2]; 647 HEADER_GET_OFFSET_HIGH(p->header[1]) << 32) | p->header[2];
648 tcode = header_get_tcode(p->header[0]); 648 tcode = HEADER_GET_TCODE(p->header[0]);
649 destination = header_get_destination(p->header[0]); 649 destination = HEADER_GET_DESTINATION(p->header[0]);
650 source = header_get_source(p->header[0]); 650 source = HEADER_GET_SOURCE(p->header[0]);
651 651
652 spin_lock_irqsave(&address_handler_lock, flags); 652 spin_lock_irqsave(&address_handler_lock, flags);
653 handler = lookup_enclosing_address_handler(&address_handler_list, 653 handler = lookup_enclosing_address_handler(&address_handler_list,
@@ -682,11 +682,11 @@ fw_core_handle_response(struct fw_card *card, struct fw_packet *p)
682 size_t data_length; 682 size_t data_length;
683 int tcode, tlabel, destination, source, rcode; 683 int tcode, tlabel, destination, source, rcode;
684 684
685 tcode = header_get_tcode(p->header[0]); 685 tcode = HEADER_GET_TCODE(p->header[0]);
686 tlabel = header_get_tlabel(p->header[0]); 686 tlabel = HEADER_GET_TLABEL(p->header[0]);
687 destination = header_get_destination(p->header[0]); 687 destination = HEADER_GET_DESTINATION(p->header[0]);
688 source = header_get_source(p->header[1]); 688 source = HEADER_GET_SOURCE(p->header[1]);
689 rcode = header_get_rcode(p->header[1]); 689 rcode = HEADER_GET_RCODE(p->header[1]);
690 690
691 spin_lock_irqsave(&card->lock, flags); 691 spin_lock_irqsave(&card->lock, flags);
692 list_for_each_entry(t, &card->transaction_list, link) { 692 list_for_each_entry(t, &card->transaction_list, link) {
@@ -723,7 +723,7 @@ fw_core_handle_response(struct fw_card *card, struct fw_packet *p)
723 case TCODE_READ_BLOCK_RESPONSE: 723 case TCODE_READ_BLOCK_RESPONSE:
724 case TCODE_LOCK_RESPONSE: 724 case TCODE_LOCK_RESPONSE:
725 data = p->payload; 725 data = p->payload;
726 data_length = header_get_data_length(p->header[3]); 726 data_length = HEADER_GET_DATA_LENGTH(p->header[3]);
727 break; 727 break;
728 728
729 default: 729 default: