diff options
author | Clemens Ladisch <clemens@ladisch.de> | 2010-11-26 02:57:31 -0500 |
---|---|---|
committer | Stefan Richter <stefanr@s5r6.in-berlin.de> | 2010-12-13 14:39:13 -0500 |
commit | 7a39d8b82165462729d09066bddb395a19025acd (patch) | |
tree | c0fea8d963b7f421e97dccacb059cfca610072de | |
parent | 5878730be4e3d0c9527d6f2f688874e38acacc98 (diff) |
firewire: ohci: Asynchronous Reception rewrite
Move the AR DMA descriptors out of the buffer pages, and map the buffer
pages linearly into the kernel's address space. This allows the driver
to ignore any page boundaries in the DMA data and thus to avoid any
copying around of packet payloads.
This fixes the bug where S800 packets that are so big (> 4080 bytes)
that they can be split over three pages were not handled correctly.
Due to the changed algorithm, we can now use arbitrarily many buffer
pages, which improves performance because the controller can more easily
unload its DMA FIFO.
Furthermore, using streaming DMA mappings should improve perfomance on
architectures where coherent DMA mappings are not cacheable. Even on
other architectures, the caching behaviour should be improved slightly
because the CPU no longer writes to the buffer pages.
v2: Detect the last filled buffer page by searching the descriptor's
residual count value fields in order (like in the old code), instead
of going backwards through the transfer status fields; it looks as
if some controllers do not set the latter correctly.
v3: Fix an old resume bug that would now make the handler run into
a BUG_ON, and replace that check with more useful error handling.
Increase the buffer size for better performance with non-TI chips.
Signed-off-by: Clemens Ladisch <clemens@ladisch.de>
Maxim Levitsky writes:
Works almost perfectly. I can still see RCODE_BUSY errors
sometimes, not very often though. 64K here eliminates these errors
completely. This is most likely due to nouveau drivers and lowest
perf level I use to lower card temperature. That increases
latencies too much I think. Besides that the IO is just perfect.
Tested-by: Maxim Levitsky <maximlevitsky@gmail.com>
Signed-off-by: Stefan Richter <stefanr@s5r6.in-berlin.de>
-rw-r--r-- | drivers/firewire/Kconfig | 2 | ||||
-rw-r--r-- | drivers/firewire/ohci.c | 411 |
2 files changed, 278 insertions, 135 deletions
diff --git a/drivers/firewire/Kconfig b/drivers/firewire/Kconfig index 40a222e19b2d..68f942cb30f2 100644 --- a/drivers/firewire/Kconfig +++ b/drivers/firewire/Kconfig | |||
@@ -19,7 +19,7 @@ config FIREWIRE | |||
19 | 19 | ||
20 | config FIREWIRE_OHCI | 20 | config FIREWIRE_OHCI |
21 | tristate "OHCI-1394 controllers" | 21 | tristate "OHCI-1394 controllers" |
22 | depends on PCI && FIREWIRE | 22 | depends on PCI && FIREWIRE && MMU |
23 | help | 23 | help |
24 | Enable this driver if you have a FireWire controller based | 24 | Enable this driver if you have a FireWire controller based |
25 | on the OHCI specification. For all practical purposes, this | 25 | on the OHCI specification. For all practical purposes, this |
diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c index e3c8b60bd86b..be0a01d8cef3 100644 --- a/drivers/firewire/ohci.c +++ b/drivers/firewire/ohci.c | |||
@@ -40,6 +40,7 @@ | |||
40 | #include <linux/spinlock.h> | 40 | #include <linux/spinlock.h> |
41 | #include <linux/string.h> | 41 | #include <linux/string.h> |
42 | #include <linux/time.h> | 42 | #include <linux/time.h> |
43 | #include <linux/vmalloc.h> | ||
43 | 44 | ||
44 | #include <asm/byteorder.h> | 45 | #include <asm/byteorder.h> |
45 | #include <asm/page.h> | 46 | #include <asm/page.h> |
@@ -80,17 +81,23 @@ struct descriptor { | |||
80 | #define COMMAND_PTR(regs) ((regs) + 12) | 81 | #define COMMAND_PTR(regs) ((regs) + 12) |
81 | #define CONTEXT_MATCH(regs) ((regs) + 16) | 82 | #define CONTEXT_MATCH(regs) ((regs) + 16) |
82 | 83 | ||
83 | struct ar_buffer { | 84 | #define AR_BUFFER_SIZE (32*1024) |
84 | struct descriptor descriptor; | 85 | #define AR_BUFFERS_MIN DIV_ROUND_UP(AR_BUFFER_SIZE, PAGE_SIZE) |
85 | struct ar_buffer *next; | 86 | /* we need at least two pages for proper list management */ |
86 | __le32 data[0]; | 87 | #define AR_BUFFERS (AR_BUFFERS_MIN >= 2 ? AR_BUFFERS_MIN : 2) |
87 | }; | 88 | |
89 | #define MAX_ASYNC_PAYLOAD 4096 | ||
90 | #define MAX_AR_PACKET_SIZE (16 + MAX_ASYNC_PAYLOAD + 4) | ||
91 | #define AR_WRAPAROUND_PAGES DIV_ROUND_UP(MAX_AR_PACKET_SIZE, PAGE_SIZE) | ||
88 | 92 | ||
89 | struct ar_context { | 93 | struct ar_context { |
90 | struct fw_ohci *ohci; | 94 | struct fw_ohci *ohci; |
91 | struct ar_buffer *current_buffer; | 95 | struct page *pages[AR_BUFFERS]; |
92 | struct ar_buffer *last_buffer; | 96 | void *buffer; |
97 | struct descriptor *descriptors; | ||
98 | dma_addr_t descriptors_bus; | ||
93 | void *pointer; | 99 | void *pointer; |
100 | unsigned int last_buffer_index; | ||
94 | u32 regs; | 101 | u32 regs; |
95 | struct tasklet_struct tasklet; | 102 | struct tasklet_struct tasklet; |
96 | }; | 103 | }; |
@@ -594,59 +601,155 @@ static int ohci_update_phy_reg(struct fw_card *card, int addr, | |||
594 | return ret; | 601 | return ret; |
595 | } | 602 | } |
596 | 603 | ||
597 | static void ar_context_link_page(struct ar_context *ctx, | 604 | static inline dma_addr_t ar_buffer_bus(struct ar_context *ctx, unsigned int i) |
598 | struct ar_buffer *ab, dma_addr_t ab_bus) | 605 | { |
606 | return page_private(ctx->pages[i]); | ||
607 | } | ||
608 | |||
609 | static void ar_context_link_page(struct ar_context *ctx, unsigned int index) | ||
599 | { | 610 | { |
600 | size_t offset; | 611 | struct descriptor *d; |
601 | 612 | ||
602 | ab->next = NULL; | 613 | d = &ctx->descriptors[index]; |
603 | memset(&ab->descriptor, 0, sizeof(ab->descriptor)); | 614 | d->branch_address &= cpu_to_le32(~0xf); |
604 | ab->descriptor.control = cpu_to_le16(DESCRIPTOR_INPUT_MORE | | 615 | d->res_count = cpu_to_le16(PAGE_SIZE); |
605 | DESCRIPTOR_STATUS | | 616 | d->transfer_status = 0; |
606 | DESCRIPTOR_BRANCH_ALWAYS); | ||
607 | offset = offsetof(struct ar_buffer, data); | ||
608 | ab->descriptor.req_count = cpu_to_le16(PAGE_SIZE - offset); | ||
609 | ab->descriptor.data_address = cpu_to_le32(ab_bus + offset); | ||
610 | ab->descriptor.res_count = cpu_to_le16(PAGE_SIZE - offset); | ||
611 | ab->descriptor.branch_address = 0; | ||
612 | 617 | ||
613 | wmb(); /* finish init of new descriptors before branch_address update */ | 618 | wmb(); /* finish init of new descriptors before branch_address update */ |
614 | ctx->last_buffer->descriptor.branch_address = cpu_to_le32(ab_bus | 1); | 619 | d = &ctx->descriptors[ctx->last_buffer_index]; |
615 | ctx->last_buffer->next = ab; | 620 | d->branch_address |= cpu_to_le32(1); |
616 | ctx->last_buffer = ab; | 621 | |
622 | ctx->last_buffer_index = index; | ||
617 | 623 | ||
618 | reg_write(ctx->ohci, CONTROL_SET(ctx->regs), CONTEXT_WAKE); | 624 | reg_write(ctx->ohci, CONTROL_SET(ctx->regs), CONTEXT_WAKE); |
619 | flush_writes(ctx->ohci); | 625 | flush_writes(ctx->ohci); |
620 | } | 626 | } |
621 | 627 | ||
622 | static int ar_context_add_page(struct ar_context *ctx) | 628 | static void ar_context_release(struct ar_context *ctx) |
623 | { | 629 | { |
624 | struct device *dev = ctx->ohci->card.device; | 630 | unsigned int i; |
625 | struct ar_buffer *ab; | ||
626 | dma_addr_t uninitialized_var(ab_bus); | ||
627 | 631 | ||
628 | ab = dma_alloc_coherent(dev, PAGE_SIZE, &ab_bus, GFP_ATOMIC); | 632 | if (ctx->descriptors) |
629 | if (ab == NULL) | 633 | dma_free_coherent(ctx->ohci->card.device, |
630 | return -ENOMEM; | 634 | AR_BUFFERS * sizeof(struct descriptor), |
635 | ctx->descriptors, ctx->descriptors_bus); | ||
631 | 636 | ||
632 | ar_context_link_page(ctx, ab, ab_bus); | 637 | if (ctx->buffer) |
638 | vm_unmap_ram(ctx->buffer, AR_BUFFERS + AR_WRAPAROUND_PAGES); | ||
633 | 639 | ||
634 | return 0; | 640 | for (i = 0; i < AR_BUFFERS; i++) |
641 | if (ctx->pages[i]) { | ||
642 | dma_unmap_page(ctx->ohci->card.device, | ||
643 | ar_buffer_bus(ctx, i), | ||
644 | PAGE_SIZE, DMA_FROM_DEVICE); | ||
645 | __free_page(ctx->pages[i]); | ||
646 | } | ||
635 | } | 647 | } |
636 | 648 | ||
637 | static void ar_context_release(struct ar_context *ctx) | 649 | static void ar_context_abort(struct ar_context *ctx, const char *error_msg) |
638 | { | 650 | { |
639 | struct ar_buffer *ab, *ab_next; | 651 | if (reg_read(ctx->ohci, CONTROL_CLEAR(ctx->regs)) & CONTEXT_RUN) { |
640 | size_t offset; | 652 | reg_write(ctx->ohci, CONTROL_CLEAR(ctx->regs), CONTEXT_RUN); |
641 | dma_addr_t ab_bus; | 653 | flush_writes(ctx->ohci); |
642 | 654 | ||
643 | for (ab = ctx->current_buffer; ab; ab = ab_next) { | 655 | fw_error("AR error: %s; DMA stopped\n", error_msg); |
644 | ab_next = ab->next; | ||
645 | offset = offsetof(struct ar_buffer, data); | ||
646 | ab_bus = le32_to_cpu(ab->descriptor.data_address) - offset; | ||
647 | dma_free_coherent(ctx->ohci->card.device, PAGE_SIZE, | ||
648 | ab, ab_bus); | ||
649 | } | 656 | } |
657 | /* FIXME: restart? */ | ||
658 | } | ||
659 | |||
660 | static inline unsigned int ar_next_buffer_index(unsigned int index) | ||
661 | { | ||
662 | return (index + 1) % AR_BUFFERS; | ||
663 | } | ||
664 | |||
665 | static inline unsigned int ar_prev_buffer_index(unsigned int index) | ||
666 | { | ||
667 | return (index - 1 + AR_BUFFERS) % AR_BUFFERS; | ||
668 | } | ||
669 | |||
670 | static inline unsigned int ar_first_buffer_index(struct ar_context *ctx) | ||
671 | { | ||
672 | return ar_next_buffer_index(ctx->last_buffer_index); | ||
673 | } | ||
674 | |||
675 | /* | ||
676 | * We search for the buffer that contains the last AR packet DMA data written | ||
677 | * by the controller. | ||
678 | */ | ||
679 | static unsigned int ar_search_last_active_buffer(struct ar_context *ctx, | ||
680 | unsigned int *buffer_offset) | ||
681 | { | ||
682 | unsigned int i, next_i, last = ctx->last_buffer_index; | ||
683 | __le16 res_count, next_res_count; | ||
684 | |||
685 | i = ar_first_buffer_index(ctx); | ||
686 | res_count = ACCESS_ONCE(ctx->descriptors[i].res_count); | ||
687 | |||
688 | /* A buffer that is not yet completely filled must be the last one. */ | ||
689 | while (i != last && res_count == 0) { | ||
690 | |||
691 | /* Peek at the next descriptor. */ | ||
692 | next_i = ar_next_buffer_index(i); | ||
693 | rmb(); /* read descriptors in order */ | ||
694 | next_res_count = ACCESS_ONCE( | ||
695 | ctx->descriptors[next_i].res_count); | ||
696 | /* | ||
697 | * If the next descriptor is still empty, we must stop at this | ||
698 | * descriptor. | ||
699 | */ | ||
700 | if (next_res_count == cpu_to_le16(PAGE_SIZE)) { | ||
701 | /* | ||
702 | * The exception is when the DMA data for one packet is | ||
703 | * split over three buffers; in this case, the middle | ||
704 | * buffer's descriptor might be never updated by the | ||
705 | * controller and look still empty, and we have to peek | ||
706 | * at the third one. | ||
707 | */ | ||
708 | if (MAX_AR_PACKET_SIZE > PAGE_SIZE && i != last) { | ||
709 | next_i = ar_next_buffer_index(next_i); | ||
710 | rmb(); | ||
711 | next_res_count = ACCESS_ONCE( | ||
712 | ctx->descriptors[next_i].res_count); | ||
713 | if (next_res_count != cpu_to_le16(PAGE_SIZE)) | ||
714 | goto next_buffer_is_active; | ||
715 | } | ||
716 | |||
717 | break; | ||
718 | } | ||
719 | |||
720 | next_buffer_is_active: | ||
721 | i = next_i; | ||
722 | res_count = next_res_count; | ||
723 | } | ||
724 | |||
725 | rmb(); /* read res_count before the DMA data */ | ||
726 | |||
727 | *buffer_offset = PAGE_SIZE - le16_to_cpu(res_count); | ||
728 | if (*buffer_offset > PAGE_SIZE) { | ||
729 | *buffer_offset = 0; | ||
730 | ar_context_abort(ctx, "corrupted descriptor"); | ||
731 | } | ||
732 | |||
733 | return i; | ||
734 | } | ||
735 | |||
736 | static void ar_sync_buffers_for_cpu(struct ar_context *ctx, | ||
737 | unsigned int end_buffer_index, | ||
738 | unsigned int end_buffer_offset) | ||
739 | { | ||
740 | unsigned int i; | ||
741 | |||
742 | i = ar_first_buffer_index(ctx); | ||
743 | while (i != end_buffer_index) { | ||
744 | dma_sync_single_for_cpu(ctx->ohci->card.device, | ||
745 | ar_buffer_bus(ctx, i), | ||
746 | PAGE_SIZE, DMA_FROM_DEVICE); | ||
747 | i = ar_next_buffer_index(i); | ||
748 | } | ||
749 | if (end_buffer_offset > 0) | ||
750 | dma_sync_single_for_cpu(ctx->ohci->card.device, | ||
751 | ar_buffer_bus(ctx, i), | ||
752 | end_buffer_offset, DMA_FROM_DEVICE); | ||
650 | } | 753 | } |
651 | 754 | ||
652 | #if defined(CONFIG_PPC_PMAC) && defined(CONFIG_PPC32) | 755 | #if defined(CONFIG_PPC_PMAC) && defined(CONFIG_PPC32) |
@@ -689,6 +792,10 @@ static __le32 *handle_ar_packet(struct ar_context *ctx, __le32 *buffer) | |||
689 | p.header[3] = cond_le32_to_cpu(buffer[3]); | 792 | p.header[3] = cond_le32_to_cpu(buffer[3]); |
690 | p.header_length = 16; | 793 | p.header_length = 16; |
691 | p.payload_length = p.header[3] >> 16; | 794 | p.payload_length = p.header[3] >> 16; |
795 | if (p.payload_length > MAX_ASYNC_PAYLOAD) { | ||
796 | ar_context_abort(ctx, "invalid packet length"); | ||
797 | return NULL; | ||
798 | } | ||
692 | break; | 799 | break; |
693 | 800 | ||
694 | case TCODE_WRITE_RESPONSE: | 801 | case TCODE_WRITE_RESPONSE: |
@@ -699,9 +806,8 @@ static __le32 *handle_ar_packet(struct ar_context *ctx, __le32 *buffer) | |||
699 | break; | 806 | break; |
700 | 807 | ||
701 | default: | 808 | default: |
702 | /* FIXME: Stop context, discard everything, and restart? */ | 809 | ar_context_abort(ctx, "invalid tcode"); |
703 | p.header_length = 0; | 810 | return NULL; |
704 | p.payload_length = 0; | ||
705 | } | 811 | } |
706 | 812 | ||
707 | p.payload = (void *) buffer + p.header_length; | 813 | p.payload = (void *) buffer + p.header_length; |
@@ -751,121 +857,152 @@ static __le32 *handle_ar_packet(struct ar_context *ctx, __le32 *buffer) | |||
751 | return buffer + length + 1; | 857 | return buffer + length + 1; |
752 | } | 858 | } |
753 | 859 | ||
860 | static void *handle_ar_packets(struct ar_context *ctx, void *p, void *end) | ||
861 | { | ||
862 | void *next; | ||
863 | |||
864 | while (p < end) { | ||
865 | next = handle_ar_packet(ctx, p); | ||
866 | if (!next) | ||
867 | return p; | ||
868 | p = next; | ||
869 | } | ||
870 | |||
871 | return p; | ||
872 | } | ||
873 | |||
874 | static void ar_recycle_buffers(struct ar_context *ctx, unsigned int end_buffer) | ||
875 | { | ||
876 | unsigned int i; | ||
877 | |||
878 | i = ar_first_buffer_index(ctx); | ||
879 | while (i != end_buffer) { | ||
880 | dma_sync_single_for_device(ctx->ohci->card.device, | ||
881 | ar_buffer_bus(ctx, i), | ||
882 | PAGE_SIZE, DMA_FROM_DEVICE); | ||
883 | ar_context_link_page(ctx, i); | ||
884 | i = ar_next_buffer_index(i); | ||
885 | } | ||
886 | } | ||
887 | |||
754 | static void ar_context_tasklet(unsigned long data) | 888 | static void ar_context_tasklet(unsigned long data) |
755 | { | 889 | { |
756 | struct ar_context *ctx = (struct ar_context *)data; | 890 | struct ar_context *ctx = (struct ar_context *)data; |
757 | struct ar_buffer *ab; | 891 | unsigned int end_buffer_index, end_buffer_offset; |
758 | struct descriptor *d; | 892 | void *p, *end; |
759 | void *buffer, *end; | ||
760 | __le16 res_count; | ||
761 | 893 | ||
762 | ab = ctx->current_buffer; | 894 | p = ctx->pointer; |
763 | d = &ab->descriptor; | 895 | if (!p) |
896 | return; | ||
764 | 897 | ||
765 | res_count = ACCESS_ONCE(d->res_count); | 898 | end_buffer_index = ar_search_last_active_buffer(ctx, |
766 | if (res_count == 0) { | 899 | &end_buffer_offset); |
767 | size_t size, size2, rest, pktsize, size3, offset; | 900 | ar_sync_buffers_for_cpu(ctx, end_buffer_index, end_buffer_offset); |
768 | dma_addr_t start_bus; | 901 | end = ctx->buffer + end_buffer_index * PAGE_SIZE + end_buffer_offset; |
769 | void *start; | ||
770 | 902 | ||
903 | if (end_buffer_index < ar_first_buffer_index(ctx)) { | ||
771 | /* | 904 | /* |
772 | * This descriptor is finished and we may have a | 905 | * The filled part of the overall buffer wraps around; handle |
773 | * packet split across this and the next buffer. We | 906 | * all packets up to the buffer end here. If the last packet |
774 | * reuse the page for reassembling the split packet. | 907 | * wraps around, its tail will be visible after the buffer end |
908 | * because the buffer start pages are mapped there again. | ||
775 | */ | 909 | */ |
910 | void *buffer_end = ctx->buffer + AR_BUFFERS * PAGE_SIZE; | ||
911 | p = handle_ar_packets(ctx, p, buffer_end); | ||
912 | if (p < buffer_end) | ||
913 | goto error; | ||
914 | /* adjust p to point back into the actual buffer */ | ||
915 | p -= AR_BUFFERS * PAGE_SIZE; | ||
916 | } | ||
776 | 917 | ||
777 | offset = offsetof(struct ar_buffer, data); | 918 | p = handle_ar_packets(ctx, p, end); |
778 | start = ab; | 919 | if (p != end) { |
779 | start_bus = le32_to_cpu(ab->descriptor.data_address) - offset; | 920 | if (p > end) |
780 | buffer = ab->data; | 921 | ar_context_abort(ctx, "inconsistent descriptor"); |
781 | 922 | goto error; | |
782 | ab = ab->next; | 923 | } |
783 | d = &ab->descriptor; | ||
784 | size = start + PAGE_SIZE - ctx->pointer; | ||
785 | /* valid buffer data in the next page */ | ||
786 | rest = le16_to_cpu(d->req_count) - le16_to_cpu(d->res_count); | ||
787 | /* what actually fits in this page */ | ||
788 | size2 = min(rest, (size_t)PAGE_SIZE - offset - size); | ||
789 | memmove(buffer, ctx->pointer, size); | ||
790 | memcpy(buffer + size, ab->data, size2); | ||
791 | |||
792 | while (size > 0) { | ||
793 | void *next = handle_ar_packet(ctx, buffer); | ||
794 | pktsize = next - buffer; | ||
795 | if (pktsize >= size) { | ||
796 | /* | ||
797 | * We have handled all the data that was | ||
798 | * originally in this page, so we can now | ||
799 | * continue in the next page. | ||
800 | */ | ||
801 | buffer = next; | ||
802 | break; | ||
803 | } | ||
804 | /* move the next packet to the start of the buffer */ | ||
805 | memmove(buffer, next, size + size2 - pktsize); | ||
806 | size -= pktsize; | ||
807 | /* fill up this page again */ | ||
808 | size3 = min(rest - size2, | ||
809 | (size_t)PAGE_SIZE - offset - size - size2); | ||
810 | memcpy(buffer + size + size2, | ||
811 | (void *) ab->data + size2, size3); | ||
812 | size2 += size3; | ||
813 | } | ||
814 | |||
815 | if (rest > 0) { | ||
816 | /* handle the packets that are fully in the next page */ | ||
817 | buffer = (void *) ab->data + | ||
818 | (buffer - (start + offset + size)); | ||
819 | end = (void *) ab->data + rest; | ||
820 | 924 | ||
821 | while (buffer < end) | 925 | ctx->pointer = p; |
822 | buffer = handle_ar_packet(ctx, buffer); | 926 | ar_recycle_buffers(ctx, end_buffer_index); |
823 | 927 | ||
824 | ctx->current_buffer = ab; | 928 | return; |
825 | ctx->pointer = end; | ||
826 | 929 | ||
827 | ar_context_link_page(ctx, start, start_bus); | 930 | error: |
828 | } else { | 931 | ctx->pointer = NULL; |
829 | ctx->pointer = start + PAGE_SIZE; | ||
830 | } | ||
831 | } else { | ||
832 | buffer = ctx->pointer; | ||
833 | ctx->pointer = end = | ||
834 | (void *) ab + PAGE_SIZE - le16_to_cpu(res_count); | ||
835 | |||
836 | while (buffer < end) | ||
837 | buffer = handle_ar_packet(ctx, buffer); | ||
838 | } | ||
839 | } | 932 | } |
840 | 933 | ||
841 | static int ar_context_init(struct ar_context *ctx, | 934 | static int ar_context_init(struct ar_context *ctx, |
842 | struct fw_ohci *ohci, u32 regs) | 935 | struct fw_ohci *ohci, u32 regs) |
843 | { | 936 | { |
844 | struct ar_buffer ab; | 937 | unsigned int i; |
938 | dma_addr_t dma_addr; | ||
939 | struct page *pages[AR_BUFFERS + AR_WRAPAROUND_PAGES]; | ||
940 | struct descriptor *d; | ||
845 | 941 | ||
846 | ctx->regs = regs; | 942 | ctx->regs = regs; |
847 | ctx->ohci = ohci; | 943 | ctx->ohci = ohci; |
848 | ctx->last_buffer = &ab; | ||
849 | tasklet_init(&ctx->tasklet, ar_context_tasklet, (unsigned long)ctx); | 944 | tasklet_init(&ctx->tasklet, ar_context_tasklet, (unsigned long)ctx); |
850 | 945 | ||
851 | ar_context_add_page(ctx); | 946 | for (i = 0; i < AR_BUFFERS; i++) { |
852 | ar_context_add_page(ctx); | 947 | ctx->pages[i] = alloc_page(GFP_KERNEL | GFP_DMA32); |
853 | ctx->current_buffer = ab.next; | 948 | if (!ctx->pages[i]) |
854 | ctx->pointer = ctx->current_buffer->data; | 949 | goto out_of_memory; |
950 | dma_addr = dma_map_page(ohci->card.device, ctx->pages[i], | ||
951 | 0, PAGE_SIZE, DMA_FROM_DEVICE); | ||
952 | if (dma_mapping_error(ohci->card.device, dma_addr)) { | ||
953 | __free_page(ctx->pages[i]); | ||
954 | ctx->pages[i] = NULL; | ||
955 | goto out_of_memory; | ||
956 | } | ||
957 | set_page_private(ctx->pages[i], dma_addr); | ||
958 | } | ||
959 | |||
960 | for (i = 0; i < AR_BUFFERS; i++) | ||
961 | pages[i] = ctx->pages[i]; | ||
962 | for (i = 0; i < AR_WRAPAROUND_PAGES; i++) | ||
963 | pages[AR_BUFFERS + i] = ctx->pages[i]; | ||
964 | ctx->buffer = vm_map_ram(pages, AR_BUFFERS + AR_WRAPAROUND_PAGES, | ||
965 | -1, PAGE_KERNEL_RO); | ||
966 | if (!ctx->buffer) | ||
967 | goto out_of_memory; | ||
968 | |||
969 | ctx->descriptors = | ||
970 | dma_alloc_coherent(ohci->card.device, | ||
971 | AR_BUFFERS * sizeof(struct descriptor), | ||
972 | &ctx->descriptors_bus, | ||
973 | GFP_KERNEL); | ||
974 | if (!ctx->descriptors) | ||
975 | goto out_of_memory; | ||
976 | |||
977 | for (i = 0; i < AR_BUFFERS; i++) { | ||
978 | d = &ctx->descriptors[i]; | ||
979 | d->req_count = cpu_to_le16(PAGE_SIZE); | ||
980 | d->control = cpu_to_le16(DESCRIPTOR_INPUT_MORE | | ||
981 | DESCRIPTOR_STATUS | | ||
982 | DESCRIPTOR_BRANCH_ALWAYS); | ||
983 | d->data_address = cpu_to_le32(ar_buffer_bus(ctx, i)); | ||
984 | d->branch_address = cpu_to_le32(ctx->descriptors_bus + | ||
985 | ar_next_buffer_index(i) * sizeof(struct descriptor)); | ||
986 | } | ||
855 | 987 | ||
856 | return 0; | 988 | return 0; |
989 | |||
990 | out_of_memory: | ||
991 | ar_context_release(ctx); | ||
992 | |||
993 | return -ENOMEM; | ||
857 | } | 994 | } |
858 | 995 | ||
859 | static void ar_context_run(struct ar_context *ctx) | 996 | static void ar_context_run(struct ar_context *ctx) |
860 | { | 997 | { |
861 | struct ar_buffer *ab = ctx->current_buffer; | 998 | unsigned int i; |
862 | dma_addr_t ab_bus; | 999 | |
863 | size_t offset; | 1000 | for (i = 0; i < AR_BUFFERS; i++) |
1001 | ar_context_link_page(ctx, i); | ||
864 | 1002 | ||
865 | offset = offsetof(struct ar_buffer, data); | 1003 | ctx->pointer = ctx->buffer; |
866 | ab_bus = le32_to_cpu(ab->descriptor.data_address) - offset; | ||
867 | 1004 | ||
868 | reg_write(ctx->ohci, COMMAND_PTR(ctx->regs), ab_bus | 1); | 1005 | reg_write(ctx->ohci, COMMAND_PTR(ctx->regs), ctx->descriptors_bus | 1); |
869 | reg_write(ctx->ohci, CONTROL_SET(ctx->regs), CONTEXT_RUN); | 1006 | reg_write(ctx->ohci, CONTROL_SET(ctx->regs), CONTEXT_RUN); |
870 | flush_writes(ctx->ohci); | 1007 | flush_writes(ctx->ohci); |
871 | } | 1008 | } |
@@ -2955,11 +3092,15 @@ static int __devinit pci_probe(struct pci_dev *dev, | |||
2955 | if (param_quirks) | 3092 | if (param_quirks) |
2956 | ohci->quirks = param_quirks; | 3093 | ohci->quirks = param_quirks; |
2957 | 3094 | ||
2958 | ar_context_init(&ohci->ar_request_ctx, ohci, | 3095 | err = ar_context_init(&ohci->ar_request_ctx, ohci, |
2959 | OHCI1394_AsReqRcvContextControlSet); | 3096 | OHCI1394_AsReqRcvContextControlSet); |
3097 | if (err < 0) | ||
3098 | goto fail_iounmap; | ||
2960 | 3099 | ||
2961 | ar_context_init(&ohci->ar_response_ctx, ohci, | 3100 | err = ar_context_init(&ohci->ar_response_ctx, ohci, |
2962 | OHCI1394_AsRspRcvContextControlSet); | 3101 | OHCI1394_AsRspRcvContextControlSet); |
3102 | if (err < 0) | ||
3103 | goto fail_arreq_ctx; | ||
2963 | 3104 | ||
2964 | context_init(&ohci->at_request_ctx, ohci, | 3105 | context_init(&ohci->at_request_ctx, ohci, |
2965 | OHCI1394_AsReqTrContextControlSet, handle_at_packet); | 3106 | OHCI1394_AsReqTrContextControlSet, handle_at_packet); |
@@ -3024,7 +3165,9 @@ static int __devinit pci_probe(struct pci_dev *dev, | |||
3024 | context_release(&ohci->at_response_ctx); | 3165 | context_release(&ohci->at_response_ctx); |
3025 | context_release(&ohci->at_request_ctx); | 3166 | context_release(&ohci->at_request_ctx); |
3026 | ar_context_release(&ohci->ar_response_ctx); | 3167 | ar_context_release(&ohci->ar_response_ctx); |
3168 | fail_arreq_ctx: | ||
3027 | ar_context_release(&ohci->ar_request_ctx); | 3169 | ar_context_release(&ohci->ar_request_ctx); |
3170 | fail_iounmap: | ||
3028 | pci_iounmap(dev, ohci->registers); | 3171 | pci_iounmap(dev, ohci->registers); |
3029 | fail_iomem: | 3172 | fail_iomem: |
3030 | pci_release_region(dev, 0); | 3173 | pci_release_region(dev, 0); |