aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/firewire
diff options
context:
space:
mode:
authorKristian Høgsberg <krh@redhat.com>2007-03-07 12:12:49 -0500
committerStefan Richter <stefanr@s5r6.in-berlin.de>2007-03-09 16:03:11 -0500
commitf319b6a02f12c3712eb64eee6a23584367cb3588 (patch)
tree8f4fbfac565193a4bf31c9588f495b1429e6ef4b /drivers/firewire
parent2603bf219e9bef3396b96b65326de7db27958c95 (diff)
firewire: Move async transmit to use the general context code.
The old async transmit context handling was starting and stopping DMA for every packet transmission. This could cause silently failing packet transmission, if the DMA was reprogrammed too close to being stopped. The general context code keeps DMA running at all times and fixes this problem. It's also a nice cleanup. Signed-off-by: Kristian Høgsberg <krh@redhat.com> Signed-off-by: Stefan Richter <stefanr@s5r6.in-berlin.de>
Diffstat (limited to 'drivers/firewire')
-rw-r--r--drivers/firewire/fw-ohci.c360
-rw-r--r--drivers/firewire/fw-transaction.h3
2 files changed, 149 insertions, 214 deletions
diff --git a/drivers/firewire/fw-ohci.c b/drivers/firewire/fw-ohci.c
index 37467923e98b..9e8a8f909303 100644
--- a/drivers/firewire/fw-ohci.c
+++ b/drivers/firewire/fw-ohci.c
@@ -112,25 +112,6 @@ struct context {
112 struct tasklet_struct tasklet; 112 struct tasklet_struct tasklet;
113}; 113};
114 114
115struct at_context {
116 struct fw_ohci *ohci;
117 dma_addr_t descriptor_bus;
118 dma_addr_t buffer_bus;
119 struct fw_packet *current_packet;
120
121 struct list_head list;
122
123 struct {
124 struct descriptor more;
125 __le32 header[4];
126 struct descriptor last;
127 } d;
128
129 u32 regs;
130
131 struct tasklet_struct tasklet;
132};
133
134#define it_header_sy(v) ((v) << 0) 115#define it_header_sy(v) ((v) << 0)
135#define it_header_tcode(v) ((v) << 4) 116#define it_header_tcode(v) ((v) << 4)
136#define it_header_channel(v) ((v) << 8) 117#define it_header_channel(v) ((v) << 8)
@@ -173,8 +154,8 @@ struct fw_ohci {
173 154
174 struct ar_context ar_request_ctx; 155 struct ar_context ar_request_ctx;
175 struct ar_context ar_response_ctx; 156 struct ar_context ar_response_ctx;
176 struct at_context at_request_ctx; 157 struct context at_request_ctx;
177 struct at_context at_response_ctx; 158 struct context at_response_ctx;
178 159
179 u32 it_context_mask; 160 u32 it_context_mask;
180 struct iso_context *it_context_list; 161 struct iso_context *it_context_list;
@@ -210,6 +191,8 @@ static inline struct fw_ohci *fw_ohci(struct fw_card *card)
210#define SELF_ID_BUF_SIZE 0x800 191#define SELF_ID_BUF_SIZE 0x800
211#define OHCI_TCODE_PHY_PACKET 0x0e 192#define OHCI_TCODE_PHY_PACKET 0x0e
212#define OHCI_VERSION_1_1 0x010010 193#define OHCI_VERSION_1_1 0x010010
194#define ISO_BUFFER_SIZE (64 * 1024)
195#define AT_BUFFER_SIZE 4096
213 196
214static char ohci_driver_name[] = KBUILD_MODNAME; 197static char ohci_driver_name[] = KBUILD_MODNAME;
215 198
@@ -587,210 +570,166 @@ static void context_stop(struct context *ctx)
587 } 570 }
588} 571}
589 572
590static void 573struct driver_data {
591do_packet_callbacks(struct fw_ohci *ohci, struct list_head *list) 574 struct fw_packet *packet;
592{ 575};
593 struct fw_packet *p, *next;
594
595 list_for_each_entry_safe(p, next, list, link)
596 p->callback(p, &ohci->card, p->ack);
597}
598
599static void
600complete_transmission(struct fw_packet *packet,
601 int ack, struct list_head *list)
602{
603 list_move_tail(&packet->link, list);
604 packet->ack = ack;
605}
606 576
607/* This function prepares the first packet in the context queue for 577/* This function apppends a packet to the DMA queue for transmission.
608 * transmission. Must always be called with the ochi->lock held to 578 * Must always be called with the ochi->lock held to ensure proper
609 * ensure proper generation handling and locking around packet queue 579 * generation handling and locking around packet queue manipulation. */
610 * manipulation. */ 580static int
611static void 581at_context_queue_packet(struct context *ctx, struct fw_packet *packet)
612at_context_setup_packet(struct at_context *ctx, struct list_head *list)
613{ 582{
614 struct fw_packet *packet;
615 struct fw_ohci *ohci = ctx->ohci; 583 struct fw_ohci *ohci = ctx->ohci;
584 dma_addr_t d_bus, payload_bus;
585 struct driver_data *driver_data;
586 struct descriptor *d, *last;
587 __le32 *header;
616 int z, tcode; 588 int z, tcode;
589 u32 reg;
617 590
618 packet = fw_packet(ctx->list.next); 591 d = context_get_descriptors(ctx, 4, &d_bus);
619 592 if (d == NULL) {
620 memset(&ctx->d, 0, sizeof ctx->d); 593 packet->ack = RCODE_SEND_ERROR;
621 if (packet->payload_length > 0) { 594 return -1;
622 packet->payload_bus = dma_map_single(ohci->card.device,
623 packet->payload,
624 packet->payload_length,
625 DMA_TO_DEVICE);
626 if (dma_mapping_error(packet->payload_bus)) {
627 complete_transmission(packet, RCODE_SEND_ERROR, list);
628 return;
629 }
630
631 ctx->d.more.control =
632 cpu_to_le16(descriptor_output_more |
633 descriptor_key_immediate);
634 ctx->d.more.req_count = cpu_to_le16(packet->header_length);
635 ctx->d.more.res_count = cpu_to_le16(packet->timestamp);
636 ctx->d.last.control =
637 cpu_to_le16(descriptor_output_last |
638 descriptor_irq_always |
639 descriptor_branch_always);
640 ctx->d.last.req_count = cpu_to_le16(packet->payload_length);
641 ctx->d.last.data_address = cpu_to_le32(packet->payload_bus);
642 z = 3;
643 } else {
644 ctx->d.more.control =
645 cpu_to_le16(descriptor_output_last |
646 descriptor_key_immediate |
647 descriptor_irq_always |
648 descriptor_branch_always);
649 ctx->d.more.req_count = cpu_to_le16(packet->header_length);
650 ctx->d.more.res_count = cpu_to_le16(packet->timestamp);
651 z = 2;
652 } 595 }
653 596
597 d[0].control = cpu_to_le16(descriptor_key_immediate);
598 d[0].res_count = cpu_to_le16(packet->timestamp);
599
654 /* The DMA format for asyncronous link packets is different 600 /* The DMA format for asyncronous link packets is different
655 * from the IEEE1394 layout, so shift the fields around 601 * from the IEEE1394 layout, so shift the fields around
656 * accordingly. If header_length is 8, it's a PHY packet, to 602 * accordingly. If header_length is 8, it's a PHY packet, to
657 * which we need to prepend an extra quadlet. */ 603 * which we need to prepend an extra quadlet. */
604
605 header = (__le32 *) &d[1];
658 if (packet->header_length > 8) { 606 if (packet->header_length > 8) {
659 ctx->d.header[0] = cpu_to_le32((packet->header[0] & 0xffff) | 607 header[0] = cpu_to_le32((packet->header[0] & 0xffff) |
660 (packet->speed << 16)); 608 (packet->speed << 16));
661 ctx->d.header[1] = cpu_to_le32((packet->header[1] & 0xffff) | 609 header[1] = cpu_to_le32((packet->header[1] & 0xffff) |
662 (packet->header[0] & 0xffff0000)); 610 (packet->header[0] & 0xffff0000));
663 ctx->d.header[2] = cpu_to_le32(packet->header[2]); 611 header[2] = cpu_to_le32(packet->header[2]);
664 612
665 tcode = (packet->header[0] >> 4) & 0x0f; 613 tcode = (packet->header[0] >> 4) & 0x0f;
666 if (TCODE_IS_BLOCK_PACKET(tcode)) 614 if (TCODE_IS_BLOCK_PACKET(tcode))
667 ctx->d.header[3] = cpu_to_le32(packet->header[3]); 615 header[3] = cpu_to_le32(packet->header[3]);
668 else 616 else
669 ctx->d.header[3] = packet->header[3]; 617 header[3] = (__force __le32) packet->header[3];
618
619 d[0].req_count = cpu_to_le16(packet->header_length);
670 } else { 620 } else {
671 ctx->d.header[0] = 621 header[0] = cpu_to_le32((OHCI1394_phy_tcode << 4) |
672 cpu_to_le32((OHCI1394_phy_tcode << 4) | 622 (packet->speed << 16));
673 (packet->speed << 16)); 623 header[1] = cpu_to_le32(packet->header[0]);
674 ctx->d.header[1] = cpu_to_le32(packet->header[0]); 624 header[2] = cpu_to_le32(packet->header[1]);
675 ctx->d.header[2] = cpu_to_le32(packet->header[1]); 625 d[0].req_count = cpu_to_le16(12);
676 ctx->d.more.req_count = cpu_to_le16(12);
677 } 626 }
678 627
679 /* FIXME: Document how the locking works. */ 628 driver_data = (struct driver_data *) &d[3];
680 if (ohci->generation == packet->generation) { 629 driver_data->packet = packet;
681 reg_write(ctx->ohci, command_ptr(ctx->regs), 630
682 ctx->descriptor_bus | z); 631 if (packet->payload_length > 0) {
683 reg_write(ctx->ohci, control_set(ctx->regs), 632 payload_bus =
684 CONTEXT_RUN | CONTEXT_WAKE); 633 dma_map_single(ohci->card.device, packet->payload,
685 ctx->current_packet = packet; 634 packet->payload_length, DMA_TO_DEVICE);
635 if (dma_mapping_error(payload_bus)) {
636 packet->ack = RCODE_SEND_ERROR;
637 return -1;
638 }
639
640 d[2].req_count = cpu_to_le16(packet->payload_length);
641 d[2].data_address = cpu_to_le32(payload_bus);
642 last = &d[2];
643 z = 3;
686 } else { 644 } else {
687 /* We dont return error codes from this function; all 645 last = &d[0];
688 * transmission errors are reported through the 646 z = 2;
689 * callback. */
690 complete_transmission(packet, RCODE_GENERATION, list);
691 } 647 }
692}
693 648
694static void at_context_stop(struct at_context *ctx) 649 last->control |= cpu_to_le16(descriptor_output_last |
695{ 650 descriptor_irq_always |
696 u32 reg; 651 descriptor_branch_always);
697 652
698 reg_write(ctx->ohci, control_clear(ctx->regs), CONTEXT_RUN); 653 /* FIXME: Document how the locking works. */
654 if (ohci->generation != packet->generation) {
655 packet->ack = RCODE_GENERATION;
656 return -1;
657 }
658
659 context_append(ctx, d, z, 4 - z);
699 660
661 /* If the context isn't already running, start it up. */
700 reg = reg_read(ctx->ohci, control_set(ctx->regs)); 662 reg = reg_read(ctx->ohci, control_set(ctx->regs));
701 if (reg & CONTEXT_ACTIVE) 663 if ((reg & CONTEXT_ACTIVE) == 0)
702 fw_notify("Tried to stop context, but it is still active " 664 context_run(ctx, 0);
703 "(0x%08x).\n", reg); 665
666 return 0;
704} 667}
705 668
706static void at_context_tasklet(unsigned long data) 669static int handle_at_packet(struct context *context,
670 struct descriptor *d,
671 struct descriptor *last)
707{ 672{
708 struct at_context *ctx = (struct at_context *)data; 673 struct driver_data *driver_data;
709 struct fw_ohci *ohci = ctx->ohci;
710 struct fw_packet *packet; 674 struct fw_packet *packet;
711 LIST_HEAD(list); 675 struct fw_ohci *ohci = context->ohci;
712 unsigned long flags; 676 dma_addr_t payload_bus;
713 int evt; 677 int evt;
714 678
715 spin_lock_irqsave(&ohci->lock, flags); 679 if (last->transfer_status == 0)
716 680 /* This descriptor isn't done yet, stop iteration. */
717 packet = fw_packet(ctx->list.next); 681 return 0;
718
719 at_context_stop(ctx);
720 682
721 /* If the head of the list isn't the packet that just got 683 driver_data = (struct driver_data *) &d[3];
722 * transmitted, the packet got cancelled before we finished 684 packet = driver_data->packet;
723 * transmitting it. */ 685 if (packet == NULL)
724 if (ctx->current_packet != packet) 686 /* This packet was cancelled, just continue. */
725 goto skip_to_next; 687 return 1;
726 688
727 if (packet->payload_length > 0) { 689 payload_bus = le32_to_cpu(last->data_address);
728 dma_unmap_single(ohci->card.device, packet->payload_bus, 690 if (payload_bus != 0)
691 dma_unmap_single(ohci->card.device, payload_bus,
729 packet->payload_length, DMA_TO_DEVICE); 692 packet->payload_length, DMA_TO_DEVICE);
730 evt = le16_to_cpu(ctx->d.last.transfer_status) & 0x1f;
731 packet->timestamp = le16_to_cpu(ctx->d.last.res_count);
732 }
733 else {
734 evt = le16_to_cpu(ctx->d.more.transfer_status) & 0x1f;
735 packet->timestamp = le16_to_cpu(ctx->d.more.res_count);
736 }
737
738 if (evt < 16) {
739 switch (evt) {
740 case OHCI1394_evt_timeout:
741 /* Async response transmit timed out. */
742 complete_transmission(packet, RCODE_CANCELLED, &list);
743 break;
744
745 case OHCI1394_evt_flushed:
746 /* The packet was flushed should give same
747 * error as when we try to use a stale
748 * generation count. */
749 complete_transmission(packet,
750 RCODE_GENERATION, &list);
751 break;
752
753 case OHCI1394_evt_missing_ack:
754 /* Using a valid (current) generation count,
755 * but the node is not on the bus or not
756 * sending acks. */
757 complete_transmission(packet, RCODE_NO_ACK, &list);
758 break;
759
760 default:
761 complete_transmission(packet, RCODE_SEND_ERROR, &list);
762 break;
763 }
764 } else
765 complete_transmission(packet, evt - 16, &list);
766 693
767 skip_to_next: 694 evt = le16_to_cpu(last->transfer_status) & 0x1f;
768 /* If more packets are queued, set up the next one. */ 695 packet->timestamp = le16_to_cpu(last->res_count);
769 if (!list_empty(&ctx->list))
770 at_context_setup_packet(ctx, &list);
771 696
772 spin_unlock_irqrestore(&ohci->lock, flags); 697 switch (evt) {
698 case OHCI1394_evt_timeout:
699 /* Async response transmit timed out. */
700 packet->ack = RCODE_CANCELLED;
701 break;
773 702
774 do_packet_callbacks(ohci, &list); 703 case OHCI1394_evt_flushed:
775} 704 /* The packet was flushed should give same error as
705 * when we try to use a stale generation count. */
706 packet->ack = RCODE_GENERATION;
707 break;
776 708
777static int 709 case OHCI1394_evt_missing_ack:
778at_context_init(struct at_context *ctx, struct fw_ohci *ohci, u32 regs) 710 /* Using a valid (current) generation count, but the
779{ 711 * node is not on the bus or not sending acks. */
780 INIT_LIST_HEAD(&ctx->list); 712 packet->ack = RCODE_NO_ACK;
713 break;
781 714
782 ctx->descriptor_bus = 715 case ACK_COMPLETE + 0x10:
783 dma_map_single(ohci->card.device, &ctx->d, 716 case ACK_PENDING + 0x10:
784 sizeof ctx->d, DMA_TO_DEVICE); 717 case ACK_BUSY_X + 0x10:
785 if (dma_mapping_error(ctx->descriptor_bus)) 718 case ACK_BUSY_A + 0x10:
786 return -ENOMEM; 719 case ACK_BUSY_B + 0x10:
720 case ACK_DATA_ERROR + 0x10:
721 case ACK_TYPE_ERROR + 0x10:
722 packet->ack = evt - 0x10;
723 break;
787 724
788 ctx->regs = regs; 725 default:
789 ctx->ohci = ohci; 726 packet->ack = RCODE_SEND_ERROR;
727 break;
728 }
790 729
791 tasklet_init(&ctx->tasklet, at_context_tasklet, (unsigned long)ctx); 730 packet->callback(packet, &ohci->card, packet->ack);
792 731
793 return 0; 732 return 1;
794} 733}
795 734
796#define header_get_destination(q) (((q) >> 16) & 0xffff) 735#define header_get_destination(q) (((q) >> 16) & 0xffff)
@@ -869,7 +808,7 @@ handle_local_lock(struct fw_ohci *ohci, struct fw_packet *packet, u32 csr)
869} 808}
870 809
871static void 810static void
872handle_local_request(struct at_context *ctx, struct fw_packet *packet) 811handle_local_request(struct context *ctx, struct fw_packet *packet)
873{ 812{
874 u64 offset; 813 u64 offset;
875 u32 csr; 814 u32 csr;
@@ -903,10 +842,10 @@ handle_local_request(struct at_context *ctx, struct fw_packet *packet)
903} 842}
904 843
905static void 844static void
906at_context_transmit(struct at_context *ctx, struct fw_packet *packet) 845at_context_transmit(struct context *ctx, struct fw_packet *packet)
907{ 846{
908 LIST_HEAD(list);
909 unsigned long flags; 847 unsigned long flags;
848 int retval;
910 849
911 spin_lock_irqsave(&ctx->ohci->lock, flags); 850 spin_lock_irqsave(&ctx->ohci->lock, flags);
912 851
@@ -917,13 +856,12 @@ at_context_transmit(struct at_context *ctx, struct fw_packet *packet)
917 return; 856 return;
918 } 857 }
919 858
920 list_add_tail(&packet->link, &ctx->list); 859 retval = at_context_queue_packet(ctx, packet);
921 if (ctx->list.next == &packet->link)
922 at_context_setup_packet(ctx, &list);
923
924 spin_unlock_irqrestore(&ctx->ohci->lock, flags); 860 spin_unlock_irqrestore(&ctx->ohci->lock, flags);
925 861
926 do_packet_callbacks(ctx->ohci, &list); 862 if (retval < 0)
863 packet->callback(packet, &ctx->ohci->card, packet->ack);
864
927} 865}
928 866
929static void bus_reset_tasklet(unsigned long data) 867static void bus_reset_tasklet(unsigned long data)
@@ -977,8 +915,8 @@ static void bus_reset_tasklet(unsigned long data)
977 spin_lock_irqsave(&ohci->lock, flags); 915 spin_lock_irqsave(&ohci->lock, flags);
978 916
979 ohci->generation = generation; 917 ohci->generation = generation;
980 at_context_stop(&ohci->at_request_ctx); 918 context_stop(&ohci->at_request_ctx);
981 at_context_stop(&ohci->at_response_ctx); 919 context_stop(&ohci->at_response_ctx);
982 reg_write(ohci, OHCI1394_IntEventClear, OHCI1394_busReset); 920 reg_write(ohci, OHCI1394_IntEventClear, OHCI1394_busReset);
983 921
984 /* This next bit is unrelated to the AT context stuff but we 922 /* This next bit is unrelated to the AT context stuff but we
@@ -1216,24 +1154,24 @@ static void ohci_send_response(struct fw_card *card, struct fw_packet *packet)
1216static int ohci_cancel_packet(struct fw_card *card, struct fw_packet *packet) 1154static int ohci_cancel_packet(struct fw_card *card, struct fw_packet *packet)
1217{ 1155{
1218 struct fw_ohci *ohci = fw_ohci(card); 1156 struct fw_ohci *ohci = fw_ohci(card);
1219 LIST_HEAD(list); 1157 struct context *ctx = &ohci->at_request_ctx;
1220 unsigned long flags; 1158 struct driver_data *driver_data = packet->driver_data;
1221 1159 int retval = -ENOENT;
1222 spin_lock_irqsave(&ohci->lock, flags);
1223 1160
1224 if (packet->ack == 0) { 1161 tasklet_disable(&ctx->tasklet);
1225 fw_notify("cancelling packet %p (header[0]=%08x)\n",
1226 packet, packet->header[0]);
1227 1162
1228 complete_transmission(packet, RCODE_CANCELLED, &list); 1163 if (packet->ack != 0)
1229 } 1164 goto out;
1230 1165
1231 spin_unlock_irqrestore(&ohci->lock, flags); 1166 driver_data->packet = NULL;
1167 packet->ack = RCODE_CANCELLED;
1168 packet->callback(packet, &ohci->card, packet->ack);
1169 retval = 0;
1232 1170
1233 do_packet_callbacks(ohci, &list); 1171 out:
1172 tasklet_enable(&ctx->tasklet);
1234 1173
1235 /* Return success if we actually cancelled something. */ 1174 return retval;
1236 return list_empty(&list) ? -ENOENT : 0;
1237} 1175}
1238 1176
1239static int 1177static int
@@ -1314,8 +1252,6 @@ static int handle_ir_dualbuffer_packet(struct context *context,
1314 return 1; 1252 return 1;
1315} 1253}
1316 1254
1317#define ISO_BUFFER_SIZE (64 * 1024)
1318
1319static int handle_it_packet(struct context *context, 1255static int handle_it_packet(struct context *context,
1320 struct descriptor *d, 1256 struct descriptor *d,
1321 struct descriptor *last) 1257 struct descriptor *last)
@@ -1872,11 +1808,11 @@ pci_probe(struct pci_dev *dev, const struct pci_device_id *ent)
1872 ar_context_init(&ohci->ar_response_ctx, ohci, 1808 ar_context_init(&ohci->ar_response_ctx, ohci,
1873 OHCI1394_AsRspRcvContextControlSet); 1809 OHCI1394_AsRspRcvContextControlSet);
1874 1810
1875 at_context_init(&ohci->at_request_ctx, ohci, 1811 context_init(&ohci->at_request_ctx, ohci, AT_BUFFER_SIZE,
1876 OHCI1394_AsReqTrContextControlSet); 1812 OHCI1394_AsReqTrContextControlSet, handle_at_packet);
1877 1813
1878 at_context_init(&ohci->at_response_ctx, ohci, 1814 context_init(&ohci->at_response_ctx, ohci, AT_BUFFER_SIZE,
1879 OHCI1394_AsRspTrContextControlSet); 1815 OHCI1394_AsRspTrContextControlSet, handle_at_packet);
1880 1816
1881 reg_write(ohci, OHCI1394_ATRetries, 1817 reg_write(ohci, OHCI1394_ATRetries,
1882 OHCI1394_MAX_AT_REQ_RETRIES | 1818 OHCI1394_MAX_AT_REQ_RETRIES |
diff --git a/drivers/firewire/fw-transaction.h b/drivers/firewire/fw-transaction.h
index b0d057533fb0..e7301b83f91e 100644
--- a/drivers/firewire/fw-transaction.h
+++ b/drivers/firewire/fw-transaction.h
@@ -203,8 +203,6 @@ struct fw_packet {
203 size_t payload_length; 203 size_t payload_length;
204 u32 timestamp; 204 u32 timestamp;
205 205
206 dma_addr_t payload_bus;
207
208 /* This callback is called when the packet transmission has 206 /* This callback is called when the packet transmission has
209 * completed; for successful transmission, the status code is 207 * completed; for successful transmission, the status code is
210 * the ack received from the destination, otherwise it's a 208 * the ack received from the destination, otherwise it's a
@@ -215,6 +213,7 @@ struct fw_packet {
215 fw_packet_callback_t callback; 213 fw_packet_callback_t callback;
216 int ack; 214 int ack;
217 struct list_head link; 215 struct list_head link;
216 void *driver_data;
218}; 217};
219 218
220struct fw_transaction { 219struct fw_transaction {