diff options
Diffstat (limited to 'drivers/firewire/fw-ohci.c')
-rw-r--r-- | drivers/firewire/fw-ohci.c | 260 |
1 files changed, 135 insertions, 125 deletions
diff --git a/drivers/firewire/fw-ohci.c b/drivers/firewire/fw-ohci.c index 6d19828a93a5..1180d0be0bb4 100644 --- a/drivers/firewire/fw-ohci.c +++ b/drivers/firewire/fw-ohci.c | |||
@@ -205,6 +205,7 @@ struct fw_ohci { | |||
205 | 205 | ||
206 | u32 it_context_mask; | 206 | u32 it_context_mask; |
207 | struct iso_context *it_context_list; | 207 | struct iso_context *it_context_list; |
208 | u64 ir_context_channels; | ||
208 | u32 ir_context_mask; | 209 | u32 ir_context_mask; |
209 | struct iso_context *ir_context_list; | 210 | struct iso_context *ir_context_list; |
210 | }; | 211 | }; |
@@ -441,9 +442,8 @@ static inline void flush_writes(const struct fw_ohci *ohci) | |||
441 | reg_read(ohci, OHCI1394_Version); | 442 | reg_read(ohci, OHCI1394_Version); |
442 | } | 443 | } |
443 | 444 | ||
444 | static int | 445 | static int ohci_update_phy_reg(struct fw_card *card, int addr, |
445 | ohci_update_phy_reg(struct fw_card *card, int addr, | 446 | int clear_bits, int set_bits) |
446 | int clear_bits, int set_bits) | ||
447 | { | 447 | { |
448 | struct fw_ohci *ohci = fw_ohci(card); | 448 | struct fw_ohci *ohci = fw_ohci(card); |
449 | u32 val, old; | 449 | u32 val, old; |
@@ -658,8 +658,8 @@ static void ar_context_tasklet(unsigned long data) | |||
658 | } | 658 | } |
659 | } | 659 | } |
660 | 660 | ||
661 | static int | 661 | static int ar_context_init(struct ar_context *ctx, |
662 | ar_context_init(struct ar_context *ctx, struct fw_ohci *ohci, u32 regs) | 662 | struct fw_ohci *ohci, u32 regs) |
663 | { | 663 | { |
664 | struct ar_buffer ab; | 664 | struct ar_buffer ab; |
665 | 665 | ||
@@ -690,8 +690,7 @@ static void ar_context_run(struct ar_context *ctx) | |||
690 | flush_writes(ctx->ohci); | 690 | flush_writes(ctx->ohci); |
691 | } | 691 | } |
692 | 692 | ||
693 | static struct descriptor * | 693 | static struct descriptor *find_branch_descriptor(struct descriptor *d, int z) |
694 | find_branch_descriptor(struct descriptor *d, int z) | ||
695 | { | 694 | { |
696 | int b, key; | 695 | int b, key; |
697 | 696 | ||
@@ -751,8 +750,7 @@ static void context_tasklet(unsigned long data) | |||
751 | * Allocate a new buffer and add it to the list of free buffers for this | 750 | * Allocate a new buffer and add it to the list of free buffers for this |
752 | * context. Must be called with ohci->lock held. | 751 | * context. Must be called with ohci->lock held. |
753 | */ | 752 | */ |
754 | static int | 753 | static int context_add_buffer(struct context *ctx) |
755 | context_add_buffer(struct context *ctx) | ||
756 | { | 754 | { |
757 | struct descriptor_buffer *desc; | 755 | struct descriptor_buffer *desc; |
758 | dma_addr_t uninitialized_var(bus_addr); | 756 | dma_addr_t uninitialized_var(bus_addr); |
@@ -781,9 +779,8 @@ context_add_buffer(struct context *ctx) | |||
781 | return 0; | 779 | return 0; |
782 | } | 780 | } |
783 | 781 | ||
784 | static int | 782 | static int context_init(struct context *ctx, struct fw_ohci *ohci, |
785 | context_init(struct context *ctx, struct fw_ohci *ohci, | 783 | u32 regs, descriptor_callback_t callback) |
786 | u32 regs, descriptor_callback_t callback) | ||
787 | { | 784 | { |
788 | ctx->ohci = ohci; | 785 | ctx->ohci = ohci; |
789 | ctx->regs = regs; | 786 | ctx->regs = regs; |
@@ -814,8 +811,7 @@ context_init(struct context *ctx, struct fw_ohci *ohci, | |||
814 | return 0; | 811 | return 0; |
815 | } | 812 | } |
816 | 813 | ||
817 | static void | 814 | static void context_release(struct context *ctx) |
818 | context_release(struct context *ctx) | ||
819 | { | 815 | { |
820 | struct fw_card *card = &ctx->ohci->card; | 816 | struct fw_card *card = &ctx->ohci->card; |
821 | struct descriptor_buffer *desc, *tmp; | 817 | struct descriptor_buffer *desc, *tmp; |
@@ -827,8 +823,8 @@ context_release(struct context *ctx) | |||
827 | } | 823 | } |
828 | 824 | ||
829 | /* Must be called with ohci->lock held */ | 825 | /* Must be called with ohci->lock held */ |
830 | static struct descriptor * | 826 | static struct descriptor *context_get_descriptors(struct context *ctx, |
831 | context_get_descriptors(struct context *ctx, int z, dma_addr_t *d_bus) | 827 | int z, dma_addr_t *d_bus) |
832 | { | 828 | { |
833 | struct descriptor *d = NULL; | 829 | struct descriptor *d = NULL; |
834 | struct descriptor_buffer *desc = ctx->buffer_tail; | 830 | struct descriptor_buffer *desc = ctx->buffer_tail; |
@@ -912,8 +908,8 @@ struct driver_data { | |||
912 | * Must always be called with the ochi->lock held to ensure proper | 908 | * Must always be called with the ochi->lock held to ensure proper |
913 | * generation handling and locking around packet queue manipulation. | 909 | * generation handling and locking around packet queue manipulation. |
914 | */ | 910 | */ |
915 | static int | 911 | static int at_context_queue_packet(struct context *ctx, |
916 | at_context_queue_packet(struct context *ctx, struct fw_packet *packet) | 912 | struct fw_packet *packet) |
917 | { | 913 | { |
918 | struct fw_ohci *ohci = ctx->ohci; | 914 | struct fw_ohci *ohci = ctx->ohci; |
919 | dma_addr_t d_bus, uninitialized_var(payload_bus); | 915 | dma_addr_t d_bus, uninitialized_var(payload_bus); |
@@ -940,7 +936,9 @@ at_context_queue_packet(struct context *ctx, struct fw_packet *packet) | |||
940 | */ | 936 | */ |
941 | 937 | ||
942 | header = (__le32 *) &d[1]; | 938 | header = (__le32 *) &d[1]; |
943 | if (packet->header_length > 8) { | 939 | switch (packet->header_length) { |
940 | case 16: | ||
941 | case 12: | ||
944 | header[0] = cpu_to_le32((packet->header[0] & 0xffff) | | 942 | header[0] = cpu_to_le32((packet->header[0] & 0xffff) | |
945 | (packet->speed << 16)); | 943 | (packet->speed << 16)); |
946 | header[1] = cpu_to_le32((packet->header[1] & 0xffff) | | 944 | header[1] = cpu_to_le32((packet->header[1] & 0xffff) | |
@@ -954,12 +952,27 @@ at_context_queue_packet(struct context *ctx, struct fw_packet *packet) | |||
954 | header[3] = (__force __le32) packet->header[3]; | 952 | header[3] = (__force __le32) packet->header[3]; |
955 | 953 | ||
956 | d[0].req_count = cpu_to_le16(packet->header_length); | 954 | d[0].req_count = cpu_to_le16(packet->header_length); |
957 | } else { | 955 | break; |
956 | |||
957 | case 8: | ||
958 | header[0] = cpu_to_le32((OHCI1394_phy_tcode << 4) | | 958 | header[0] = cpu_to_le32((OHCI1394_phy_tcode << 4) | |
959 | (packet->speed << 16)); | 959 | (packet->speed << 16)); |
960 | header[1] = cpu_to_le32(packet->header[0]); | 960 | header[1] = cpu_to_le32(packet->header[0]); |
961 | header[2] = cpu_to_le32(packet->header[1]); | 961 | header[2] = cpu_to_le32(packet->header[1]); |
962 | d[0].req_count = cpu_to_le16(12); | 962 | d[0].req_count = cpu_to_le16(12); |
963 | break; | ||
964 | |||
965 | case 4: | ||
966 | header[0] = cpu_to_le32((packet->header[0] & 0xffff) | | ||
967 | (packet->speed << 16)); | ||
968 | header[1] = cpu_to_le32(packet->header[0] & 0xffff0000); | ||
969 | d[0].req_count = cpu_to_le16(8); | ||
970 | break; | ||
971 | |||
972 | default: | ||
973 | /* BUG(); */ | ||
974 | packet->ack = RCODE_SEND_ERROR; | ||
975 | return -1; | ||
963 | } | 976 | } |
964 | 977 | ||
965 | driver_data = (struct driver_data *) &d[3]; | 978 | driver_data = (struct driver_data *) &d[3]; |
@@ -1095,8 +1108,8 @@ static int handle_at_packet(struct context *context, | |||
1095 | #define HEADER_GET_DATA_LENGTH(q) (((q) >> 16) & 0xffff) | 1108 | #define HEADER_GET_DATA_LENGTH(q) (((q) >> 16) & 0xffff) |
1096 | #define HEADER_GET_EXTENDED_TCODE(q) (((q) >> 0) & 0xffff) | 1109 | #define HEADER_GET_EXTENDED_TCODE(q) (((q) >> 0) & 0xffff) |
1097 | 1110 | ||
1098 | static void | 1111 | static void handle_local_rom(struct fw_ohci *ohci, |
1099 | handle_local_rom(struct fw_ohci *ohci, struct fw_packet *packet, u32 csr) | 1112 | struct fw_packet *packet, u32 csr) |
1100 | { | 1113 | { |
1101 | struct fw_packet response; | 1114 | struct fw_packet response; |
1102 | int tcode, length, i; | 1115 | int tcode, length, i; |
@@ -1122,8 +1135,8 @@ handle_local_rom(struct fw_ohci *ohci, struct fw_packet *packet, u32 csr) | |||
1122 | fw_core_handle_response(&ohci->card, &response); | 1135 | fw_core_handle_response(&ohci->card, &response); |
1123 | } | 1136 | } |
1124 | 1137 | ||
1125 | static void | 1138 | static void handle_local_lock(struct fw_ohci *ohci, |
1126 | handle_local_lock(struct fw_ohci *ohci, struct fw_packet *packet, u32 csr) | 1139 | struct fw_packet *packet, u32 csr) |
1127 | { | 1140 | { |
1128 | struct fw_packet response; | 1141 | struct fw_packet response; |
1129 | int tcode, length, ext_tcode, sel; | 1142 | int tcode, length, ext_tcode, sel; |
@@ -1164,8 +1177,7 @@ handle_local_lock(struct fw_ohci *ohci, struct fw_packet *packet, u32 csr) | |||
1164 | fw_core_handle_response(&ohci->card, &response); | 1177 | fw_core_handle_response(&ohci->card, &response); |
1165 | } | 1178 | } |
1166 | 1179 | ||
1167 | static void | 1180 | static void handle_local_request(struct context *ctx, struct fw_packet *packet) |
1168 | handle_local_request(struct context *ctx, struct fw_packet *packet) | ||
1169 | { | 1181 | { |
1170 | u64 offset; | 1182 | u64 offset; |
1171 | u32 csr; | 1183 | u32 csr; |
@@ -1205,11 +1217,10 @@ handle_local_request(struct context *ctx, struct fw_packet *packet) | |||
1205 | } | 1217 | } |
1206 | } | 1218 | } |
1207 | 1219 | ||
1208 | static void | 1220 | static void at_context_transmit(struct context *ctx, struct fw_packet *packet) |
1209 | at_context_transmit(struct context *ctx, struct fw_packet *packet) | ||
1210 | { | 1221 | { |
1211 | unsigned long flags; | 1222 | unsigned long flags; |
1212 | int retval; | 1223 | int ret; |
1213 | 1224 | ||
1214 | spin_lock_irqsave(&ctx->ohci->lock, flags); | 1225 | spin_lock_irqsave(&ctx->ohci->lock, flags); |
1215 | 1226 | ||
@@ -1220,10 +1231,10 @@ at_context_transmit(struct context *ctx, struct fw_packet *packet) | |||
1220 | return; | 1231 | return; |
1221 | } | 1232 | } |
1222 | 1233 | ||
1223 | retval = at_context_queue_packet(ctx, packet); | 1234 | ret = at_context_queue_packet(ctx, packet); |
1224 | spin_unlock_irqrestore(&ctx->ohci->lock, flags); | 1235 | spin_unlock_irqrestore(&ctx->ohci->lock, flags); |
1225 | 1236 | ||
1226 | if (retval < 0) | 1237 | if (ret < 0) |
1227 | packet->callback(packet, &ctx->ohci->card, packet->ack); | 1238 | packet->callback(packet, &ctx->ohci->card, packet->ack); |
1228 | 1239 | ||
1229 | } | 1240 | } |
@@ -1590,12 +1601,12 @@ static int ohci_enable(struct fw_card *card, u32 *config_rom, size_t length) | |||
1590 | return 0; | 1601 | return 0; |
1591 | } | 1602 | } |
1592 | 1603 | ||
1593 | static int | 1604 | static int ohci_set_config_rom(struct fw_card *card, |
1594 | ohci_set_config_rom(struct fw_card *card, u32 *config_rom, size_t length) | 1605 | u32 *config_rom, size_t length) |
1595 | { | 1606 | { |
1596 | struct fw_ohci *ohci; | 1607 | struct fw_ohci *ohci; |
1597 | unsigned long flags; | 1608 | unsigned long flags; |
1598 | int retval = -EBUSY; | 1609 | int ret = -EBUSY; |
1599 | __be32 *next_config_rom; | 1610 | __be32 *next_config_rom; |
1600 | dma_addr_t uninitialized_var(next_config_rom_bus); | 1611 | dma_addr_t uninitialized_var(next_config_rom_bus); |
1601 | 1612 | ||
@@ -1649,7 +1660,7 @@ ohci_set_config_rom(struct fw_card *card, u32 *config_rom, size_t length) | |||
1649 | 1660 | ||
1650 | reg_write(ohci, OHCI1394_ConfigROMmap, | 1661 | reg_write(ohci, OHCI1394_ConfigROMmap, |
1651 | ohci->next_config_rom_bus); | 1662 | ohci->next_config_rom_bus); |
1652 | retval = 0; | 1663 | ret = 0; |
1653 | } | 1664 | } |
1654 | 1665 | ||
1655 | spin_unlock_irqrestore(&ohci->lock, flags); | 1666 | spin_unlock_irqrestore(&ohci->lock, flags); |
@@ -1661,13 +1672,13 @@ ohci_set_config_rom(struct fw_card *card, u32 *config_rom, size_t length) | |||
1661 | * controller could need to access it before the bus reset | 1672 | * controller could need to access it before the bus reset |
1662 | * takes effect. | 1673 | * takes effect. |
1663 | */ | 1674 | */ |
1664 | if (retval == 0) | 1675 | if (ret == 0) |
1665 | fw_core_initiate_bus_reset(&ohci->card, 1); | 1676 | fw_core_initiate_bus_reset(&ohci->card, 1); |
1666 | else | 1677 | else |
1667 | dma_free_coherent(ohci->card.device, CONFIG_ROM_SIZE, | 1678 | dma_free_coherent(ohci->card.device, CONFIG_ROM_SIZE, |
1668 | next_config_rom, next_config_rom_bus); | 1679 | next_config_rom, next_config_rom_bus); |
1669 | 1680 | ||
1670 | return retval; | 1681 | return ret; |
1671 | } | 1682 | } |
1672 | 1683 | ||
1673 | static void ohci_send_request(struct fw_card *card, struct fw_packet *packet) | 1684 | static void ohci_send_request(struct fw_card *card, struct fw_packet *packet) |
@@ -1689,7 +1700,7 @@ static int ohci_cancel_packet(struct fw_card *card, struct fw_packet *packet) | |||
1689 | struct fw_ohci *ohci = fw_ohci(card); | 1700 | struct fw_ohci *ohci = fw_ohci(card); |
1690 | struct context *ctx = &ohci->at_request_ctx; | 1701 | struct context *ctx = &ohci->at_request_ctx; |
1691 | struct driver_data *driver_data = packet->driver_data; | 1702 | struct driver_data *driver_data = packet->driver_data; |
1692 | int retval = -ENOENT; | 1703 | int ret = -ENOENT; |
1693 | 1704 | ||
1694 | tasklet_disable(&ctx->tasklet); | 1705 | tasklet_disable(&ctx->tasklet); |
1695 | 1706 | ||
@@ -1704,23 +1715,22 @@ static int ohci_cancel_packet(struct fw_card *card, struct fw_packet *packet) | |||
1704 | driver_data->packet = NULL; | 1715 | driver_data->packet = NULL; |
1705 | packet->ack = RCODE_CANCELLED; | 1716 | packet->ack = RCODE_CANCELLED; |
1706 | packet->callback(packet, &ohci->card, packet->ack); | 1717 | packet->callback(packet, &ohci->card, packet->ack); |
1707 | retval = 0; | 1718 | ret = 0; |
1708 | |||
1709 | out: | 1719 | out: |
1710 | tasklet_enable(&ctx->tasklet); | 1720 | tasklet_enable(&ctx->tasklet); |
1711 | 1721 | ||
1712 | return retval; | 1722 | return ret; |
1713 | } | 1723 | } |
1714 | 1724 | ||
1715 | static int | 1725 | static int ohci_enable_phys_dma(struct fw_card *card, |
1716 | ohci_enable_phys_dma(struct fw_card *card, int node_id, int generation) | 1726 | int node_id, int generation) |
1717 | { | 1727 | { |
1718 | #ifdef CONFIG_FIREWIRE_OHCI_REMOTE_DMA | 1728 | #ifdef CONFIG_FIREWIRE_OHCI_REMOTE_DMA |
1719 | return 0; | 1729 | return 0; |
1720 | #else | 1730 | #else |
1721 | struct fw_ohci *ohci = fw_ohci(card); | 1731 | struct fw_ohci *ohci = fw_ohci(card); |
1722 | unsigned long flags; | 1732 | unsigned long flags; |
1723 | int n, retval = 0; | 1733 | int n, ret = 0; |
1724 | 1734 | ||
1725 | /* | 1735 | /* |
1726 | * FIXME: Make sure this bitmask is cleared when we clear the busReset | 1736 | * FIXME: Make sure this bitmask is cleared when we clear the busReset |
@@ -1730,7 +1740,7 @@ ohci_enable_phys_dma(struct fw_card *card, int node_id, int generation) | |||
1730 | spin_lock_irqsave(&ohci->lock, flags); | 1740 | spin_lock_irqsave(&ohci->lock, flags); |
1731 | 1741 | ||
1732 | if (ohci->generation != generation) { | 1742 | if (ohci->generation != generation) { |
1733 | retval = -ESTALE; | 1743 | ret = -ESTALE; |
1734 | goto out; | 1744 | goto out; |
1735 | } | 1745 | } |
1736 | 1746 | ||
@@ -1748,12 +1758,12 @@ ohci_enable_phys_dma(struct fw_card *card, int node_id, int generation) | |||
1748 | flush_writes(ohci); | 1758 | flush_writes(ohci); |
1749 | out: | 1759 | out: |
1750 | spin_unlock_irqrestore(&ohci->lock, flags); | 1760 | spin_unlock_irqrestore(&ohci->lock, flags); |
1751 | return retval; | 1761 | |
1762 | return ret; | ||
1752 | #endif /* CONFIG_FIREWIRE_OHCI_REMOTE_DMA */ | 1763 | #endif /* CONFIG_FIREWIRE_OHCI_REMOTE_DMA */ |
1753 | } | 1764 | } |
1754 | 1765 | ||
1755 | static u64 | 1766 | static u64 ohci_get_bus_time(struct fw_card *card) |
1756 | ohci_get_bus_time(struct fw_card *card) | ||
1757 | { | 1767 | { |
1758 | struct fw_ohci *ohci = fw_ohci(card); | 1768 | struct fw_ohci *ohci = fw_ohci(card); |
1759 | u32 cycle_time; | 1769 | u32 cycle_time; |
@@ -1765,6 +1775,28 @@ ohci_get_bus_time(struct fw_card *card) | |||
1765 | return bus_time; | 1775 | return bus_time; |
1766 | } | 1776 | } |
1767 | 1777 | ||
1778 | static void copy_iso_headers(struct iso_context *ctx, void *p) | ||
1779 | { | ||
1780 | int i = ctx->header_length; | ||
1781 | |||
1782 | if (i + ctx->base.header_size > PAGE_SIZE) | ||
1783 | return; | ||
1784 | |||
1785 | /* | ||
1786 | * The iso header is byteswapped to little endian by | ||
1787 | * the controller, but the remaining header quadlets | ||
1788 | * are big endian. We want to present all the headers | ||
1789 | * as big endian, so we have to swap the first quadlet. | ||
1790 | */ | ||
1791 | if (ctx->base.header_size > 0) | ||
1792 | *(u32 *) (ctx->header + i) = __swab32(*(u32 *) (p + 4)); | ||
1793 | if (ctx->base.header_size > 4) | ||
1794 | *(u32 *) (ctx->header + i + 4) = __swab32(*(u32 *) p); | ||
1795 | if (ctx->base.header_size > 8) | ||
1796 | memcpy(ctx->header + i + 8, p + 8, ctx->base.header_size - 8); | ||
1797 | ctx->header_length += ctx->base.header_size; | ||
1798 | } | ||
1799 | |||
1768 | static int handle_ir_dualbuffer_packet(struct context *context, | 1800 | static int handle_ir_dualbuffer_packet(struct context *context, |
1769 | struct descriptor *d, | 1801 | struct descriptor *d, |
1770 | struct descriptor *last) | 1802 | struct descriptor *last) |
@@ -1775,7 +1807,6 @@ static int handle_ir_dualbuffer_packet(struct context *context, | |||
1775 | __le32 *ir_header; | 1807 | __le32 *ir_header; |
1776 | size_t header_length; | 1808 | size_t header_length; |
1777 | void *p, *end; | 1809 | void *p, *end; |
1778 | int i; | ||
1779 | 1810 | ||
1780 | if (db->first_res_count != 0 && db->second_res_count != 0) { | 1811 | if (db->first_res_count != 0 && db->second_res_count != 0) { |
1781 | if (ctx->excess_bytes <= le16_to_cpu(db->second_req_count)) { | 1812 | if (ctx->excess_bytes <= le16_to_cpu(db->second_req_count)) { |
@@ -1788,25 +1819,14 @@ static int handle_ir_dualbuffer_packet(struct context *context, | |||
1788 | header_length = le16_to_cpu(db->first_req_count) - | 1819 | header_length = le16_to_cpu(db->first_req_count) - |
1789 | le16_to_cpu(db->first_res_count); | 1820 | le16_to_cpu(db->first_res_count); |
1790 | 1821 | ||
1791 | i = ctx->header_length; | ||
1792 | p = db + 1; | 1822 | p = db + 1; |
1793 | end = p + header_length; | 1823 | end = p + header_length; |
1794 | while (p < end && i + ctx->base.header_size <= PAGE_SIZE) { | 1824 | while (p < end) { |
1795 | /* | 1825 | copy_iso_headers(ctx, p); |
1796 | * The iso header is byteswapped to little endian by | ||
1797 | * the controller, but the remaining header quadlets | ||
1798 | * are big endian. We want to present all the headers | ||
1799 | * as big endian, so we have to swap the first | ||
1800 | * quadlet. | ||
1801 | */ | ||
1802 | *(u32 *) (ctx->header + i) = __swab32(*(u32 *) (p + 4)); | ||
1803 | memcpy(ctx->header + i + 4, p + 8, ctx->base.header_size - 4); | ||
1804 | i += ctx->base.header_size; | ||
1805 | ctx->excess_bytes += | 1826 | ctx->excess_bytes += |
1806 | (le32_to_cpu(*(__le32 *)(p + 4)) >> 16) & 0xffff; | 1827 | (le32_to_cpu(*(__le32 *)(p + 4)) >> 16) & 0xffff; |
1807 | p += ctx->base.header_size + 4; | 1828 | p += max(ctx->base.header_size, (size_t)8); |
1808 | } | 1829 | } |
1809 | ctx->header_length = i; | ||
1810 | 1830 | ||
1811 | ctx->excess_bytes -= le16_to_cpu(db->second_req_count) - | 1831 | ctx->excess_bytes -= le16_to_cpu(db->second_req_count) - |
1812 | le16_to_cpu(db->second_res_count); | 1832 | le16_to_cpu(db->second_res_count); |
@@ -1832,7 +1852,6 @@ static int handle_ir_packet_per_buffer(struct context *context, | |||
1832 | struct descriptor *pd; | 1852 | struct descriptor *pd; |
1833 | __le32 *ir_header; | 1853 | __le32 *ir_header; |
1834 | void *p; | 1854 | void *p; |
1835 | int i; | ||
1836 | 1855 | ||
1837 | for (pd = d; pd <= last; pd++) { | 1856 | for (pd = d; pd <= last; pd++) { |
1838 | if (pd->transfer_status) | 1857 | if (pd->transfer_status) |
@@ -1842,21 +1861,8 @@ static int handle_ir_packet_per_buffer(struct context *context, | |||
1842 | /* Descriptor(s) not done yet, stop iteration */ | 1861 | /* Descriptor(s) not done yet, stop iteration */ |
1843 | return 0; | 1862 | return 0; |
1844 | 1863 | ||
1845 | i = ctx->header_length; | 1864 | p = last + 1; |
1846 | p = last + 1; | 1865 | copy_iso_headers(ctx, p); |
1847 | |||
1848 | if (ctx->base.header_size > 0 && | ||
1849 | i + ctx->base.header_size <= PAGE_SIZE) { | ||
1850 | /* | ||
1851 | * The iso header is byteswapped to little endian by | ||
1852 | * the controller, but the remaining header quadlets | ||
1853 | * are big endian. We want to present all the headers | ||
1854 | * as big endian, so we have to swap the first quadlet. | ||
1855 | */ | ||
1856 | *(u32 *) (ctx->header + i) = __swab32(*(u32 *) (p + 4)); | ||
1857 | memcpy(ctx->header + i + 4, p + 8, ctx->base.header_size - 4); | ||
1858 | ctx->header_length += ctx->base.header_size; | ||
1859 | } | ||
1860 | 1866 | ||
1861 | if (le16_to_cpu(last->control) & DESCRIPTOR_IRQ_ALWAYS) { | 1867 | if (le16_to_cpu(last->control) & DESCRIPTOR_IRQ_ALWAYS) { |
1862 | ir_header = (__le32 *) p; | 1868 | ir_header = (__le32 *) p; |
@@ -1888,21 +1894,24 @@ static int handle_it_packet(struct context *context, | |||
1888 | return 1; | 1894 | return 1; |
1889 | } | 1895 | } |
1890 | 1896 | ||
1891 | static struct fw_iso_context * | 1897 | static struct fw_iso_context *ohci_allocate_iso_context(struct fw_card *card, |
1892 | ohci_allocate_iso_context(struct fw_card *card, int type, size_t header_size) | 1898 | int type, int channel, size_t header_size) |
1893 | { | 1899 | { |
1894 | struct fw_ohci *ohci = fw_ohci(card); | 1900 | struct fw_ohci *ohci = fw_ohci(card); |
1895 | struct iso_context *ctx, *list; | 1901 | struct iso_context *ctx, *list; |
1896 | descriptor_callback_t callback; | 1902 | descriptor_callback_t callback; |
1903 | u64 *channels, dont_care = ~0ULL; | ||
1897 | u32 *mask, regs; | 1904 | u32 *mask, regs; |
1898 | unsigned long flags; | 1905 | unsigned long flags; |
1899 | int index, retval = -ENOMEM; | 1906 | int index, ret = -ENOMEM; |
1900 | 1907 | ||
1901 | if (type == FW_ISO_CONTEXT_TRANSMIT) { | 1908 | if (type == FW_ISO_CONTEXT_TRANSMIT) { |
1909 | channels = &dont_care; | ||
1902 | mask = &ohci->it_context_mask; | 1910 | mask = &ohci->it_context_mask; |
1903 | list = ohci->it_context_list; | 1911 | list = ohci->it_context_list; |
1904 | callback = handle_it_packet; | 1912 | callback = handle_it_packet; |
1905 | } else { | 1913 | } else { |
1914 | channels = &ohci->ir_context_channels; | ||
1906 | mask = &ohci->ir_context_mask; | 1915 | mask = &ohci->ir_context_mask; |
1907 | list = ohci->ir_context_list; | 1916 | list = ohci->ir_context_list; |
1908 | if (ohci->use_dualbuffer) | 1917 | if (ohci->use_dualbuffer) |
@@ -1912,9 +1921,11 @@ ohci_allocate_iso_context(struct fw_card *card, int type, size_t header_size) | |||
1912 | } | 1921 | } |
1913 | 1922 | ||
1914 | spin_lock_irqsave(&ohci->lock, flags); | 1923 | spin_lock_irqsave(&ohci->lock, flags); |
1915 | index = ffs(*mask) - 1; | 1924 | index = *channels & 1ULL << channel ? ffs(*mask) - 1 : -1; |
1916 | if (index >= 0) | 1925 | if (index >= 0) { |
1926 | *channels &= ~(1ULL << channel); | ||
1917 | *mask &= ~(1 << index); | 1927 | *mask &= ~(1 << index); |
1928 | } | ||
1918 | spin_unlock_irqrestore(&ohci->lock, flags); | 1929 | spin_unlock_irqrestore(&ohci->lock, flags); |
1919 | 1930 | ||
1920 | if (index < 0) | 1931 | if (index < 0) |
@@ -1932,8 +1943,8 @@ ohci_allocate_iso_context(struct fw_card *card, int type, size_t header_size) | |||
1932 | if (ctx->header == NULL) | 1943 | if (ctx->header == NULL) |
1933 | goto out; | 1944 | goto out; |
1934 | 1945 | ||
1935 | retval = context_init(&ctx->context, ohci, regs, callback); | 1946 | ret = context_init(&ctx->context, ohci, regs, callback); |
1936 | if (retval < 0) | 1947 | if (ret < 0) |
1937 | goto out_with_header; | 1948 | goto out_with_header; |
1938 | 1949 | ||
1939 | return &ctx->base; | 1950 | return &ctx->base; |
@@ -1945,7 +1956,7 @@ ohci_allocate_iso_context(struct fw_card *card, int type, size_t header_size) | |||
1945 | *mask |= 1 << index; | 1956 | *mask |= 1 << index; |
1946 | spin_unlock_irqrestore(&ohci->lock, flags); | 1957 | spin_unlock_irqrestore(&ohci->lock, flags); |
1947 | 1958 | ||
1948 | return ERR_PTR(retval); | 1959 | return ERR_PTR(ret); |
1949 | } | 1960 | } |
1950 | 1961 | ||
1951 | static int ohci_start_iso(struct fw_iso_context *base, | 1962 | static int ohci_start_iso(struct fw_iso_context *base, |
@@ -2024,16 +2035,16 @@ static void ohci_free_iso_context(struct fw_iso_context *base) | |||
2024 | } else { | 2035 | } else { |
2025 | index = ctx - ohci->ir_context_list; | 2036 | index = ctx - ohci->ir_context_list; |
2026 | ohci->ir_context_mask |= 1 << index; | 2037 | ohci->ir_context_mask |= 1 << index; |
2038 | ohci->ir_context_channels |= 1ULL << base->channel; | ||
2027 | } | 2039 | } |
2028 | 2040 | ||
2029 | spin_unlock_irqrestore(&ohci->lock, flags); | 2041 | spin_unlock_irqrestore(&ohci->lock, flags); |
2030 | } | 2042 | } |
2031 | 2043 | ||
2032 | static int | 2044 | static int ohci_queue_iso_transmit(struct fw_iso_context *base, |
2033 | ohci_queue_iso_transmit(struct fw_iso_context *base, | 2045 | struct fw_iso_packet *packet, |
2034 | struct fw_iso_packet *packet, | 2046 | struct fw_iso_buffer *buffer, |
2035 | struct fw_iso_buffer *buffer, | 2047 | unsigned long payload) |
2036 | unsigned long payload) | ||
2037 | { | 2048 | { |
2038 | struct iso_context *ctx = container_of(base, struct iso_context, base); | 2049 | struct iso_context *ctx = container_of(base, struct iso_context, base); |
2039 | struct descriptor *d, *last, *pd; | 2050 | struct descriptor *d, *last, *pd; |
@@ -2128,11 +2139,10 @@ ohci_queue_iso_transmit(struct fw_iso_context *base, | |||
2128 | return 0; | 2139 | return 0; |
2129 | } | 2140 | } |
2130 | 2141 | ||
2131 | static int | 2142 | static int ohci_queue_iso_receive_dualbuffer(struct fw_iso_context *base, |
2132 | ohci_queue_iso_receive_dualbuffer(struct fw_iso_context *base, | 2143 | struct fw_iso_packet *packet, |
2133 | struct fw_iso_packet *packet, | 2144 | struct fw_iso_buffer *buffer, |
2134 | struct fw_iso_buffer *buffer, | 2145 | unsigned long payload) |
2135 | unsigned long payload) | ||
2136 | { | 2146 | { |
2137 | struct iso_context *ctx = container_of(base, struct iso_context, base); | 2147 | struct iso_context *ctx = container_of(base, struct iso_context, base); |
2138 | struct db_descriptor *db = NULL; | 2148 | struct db_descriptor *db = NULL; |
@@ -2151,11 +2161,11 @@ ohci_queue_iso_receive_dualbuffer(struct fw_iso_context *base, | |||
2151 | z = 2; | 2161 | z = 2; |
2152 | 2162 | ||
2153 | /* | 2163 | /* |
2154 | * The OHCI controller puts the status word in the header | 2164 | * The OHCI controller puts the isochronous header and trailer in the |
2155 | * buffer too, so we need 4 extra bytes per packet. | 2165 | * buffer, so we need at least 8 bytes. |
2156 | */ | 2166 | */ |
2157 | packet_count = p->header_length / ctx->base.header_size; | 2167 | packet_count = p->header_length / ctx->base.header_size; |
2158 | header_size = packet_count * (ctx->base.header_size + 4); | 2168 | header_size = packet_count * max(ctx->base.header_size, (size_t)8); |
2159 | 2169 | ||
2160 | /* Get header size in number of descriptors. */ | 2170 | /* Get header size in number of descriptors. */ |
2161 | header_z = DIV_ROUND_UP(header_size, sizeof(*d)); | 2171 | header_z = DIV_ROUND_UP(header_size, sizeof(*d)); |
@@ -2173,7 +2183,8 @@ ohci_queue_iso_receive_dualbuffer(struct fw_iso_context *base, | |||
2173 | db = (struct db_descriptor *) d; | 2183 | db = (struct db_descriptor *) d; |
2174 | db->control = cpu_to_le16(DESCRIPTOR_STATUS | | 2184 | db->control = cpu_to_le16(DESCRIPTOR_STATUS | |
2175 | DESCRIPTOR_BRANCH_ALWAYS); | 2185 | DESCRIPTOR_BRANCH_ALWAYS); |
2176 | db->first_size = cpu_to_le16(ctx->base.header_size + 4); | 2186 | db->first_size = |
2187 | cpu_to_le16(max(ctx->base.header_size, (size_t)8)); | ||
2177 | if (p->skip && rest == p->payload_length) { | 2188 | if (p->skip && rest == p->payload_length) { |
2178 | db->control |= cpu_to_le16(DESCRIPTOR_WAIT); | 2189 | db->control |= cpu_to_le16(DESCRIPTOR_WAIT); |
2179 | db->first_req_count = db->first_size; | 2190 | db->first_req_count = db->first_size; |
@@ -2208,11 +2219,10 @@ ohci_queue_iso_receive_dualbuffer(struct fw_iso_context *base, | |||
2208 | return 0; | 2219 | return 0; |
2209 | } | 2220 | } |
2210 | 2221 | ||
2211 | static int | 2222 | static int ohci_queue_iso_receive_packet_per_buffer(struct fw_iso_context *base, |
2212 | ohci_queue_iso_receive_packet_per_buffer(struct fw_iso_context *base, | 2223 | struct fw_iso_packet *packet, |
2213 | struct fw_iso_packet *packet, | 2224 | struct fw_iso_buffer *buffer, |
2214 | struct fw_iso_buffer *buffer, | 2225 | unsigned long payload) |
2215 | unsigned long payload) | ||
2216 | { | 2226 | { |
2217 | struct iso_context *ctx = container_of(base, struct iso_context, base); | 2227 | struct iso_context *ctx = container_of(base, struct iso_context, base); |
2218 | struct descriptor *d = NULL, *pd = NULL; | 2228 | struct descriptor *d = NULL, *pd = NULL; |
@@ -2223,11 +2233,11 @@ ohci_queue_iso_receive_packet_per_buffer(struct fw_iso_context *base, | |||
2223 | int page, offset, packet_count, header_size, payload_per_buffer; | 2233 | int page, offset, packet_count, header_size, payload_per_buffer; |
2224 | 2234 | ||
2225 | /* | 2235 | /* |
2226 | * The OHCI controller puts the status word in the | 2236 | * The OHCI controller puts the isochronous header and trailer in the |
2227 | * buffer too, so we need 4 extra bytes per packet. | 2237 | * buffer, so we need at least 8 bytes. |
2228 | */ | 2238 | */ |
2229 | packet_count = p->header_length / ctx->base.header_size; | 2239 | packet_count = p->header_length / ctx->base.header_size; |
2230 | header_size = ctx->base.header_size + 4; | 2240 | header_size = max(ctx->base.header_size, (size_t)8); |
2231 | 2241 | ||
2232 | /* Get header size in number of descriptors. */ | 2242 | /* Get header size in number of descriptors. */ |
2233 | header_z = DIV_ROUND_UP(header_size, sizeof(*d)); | 2243 | header_z = DIV_ROUND_UP(header_size, sizeof(*d)); |
@@ -2286,29 +2296,27 @@ ohci_queue_iso_receive_packet_per_buffer(struct fw_iso_context *base, | |||
2286 | return 0; | 2296 | return 0; |
2287 | } | 2297 | } |
2288 | 2298 | ||
2289 | static int | 2299 | static int ohci_queue_iso(struct fw_iso_context *base, |
2290 | ohci_queue_iso(struct fw_iso_context *base, | 2300 | struct fw_iso_packet *packet, |
2291 | struct fw_iso_packet *packet, | 2301 | struct fw_iso_buffer *buffer, |
2292 | struct fw_iso_buffer *buffer, | 2302 | unsigned long payload) |
2293 | unsigned long payload) | ||
2294 | { | 2303 | { |
2295 | struct iso_context *ctx = container_of(base, struct iso_context, base); | 2304 | struct iso_context *ctx = container_of(base, struct iso_context, base); |
2296 | unsigned long flags; | 2305 | unsigned long flags; |
2297 | int retval; | 2306 | int ret; |
2298 | 2307 | ||
2299 | spin_lock_irqsave(&ctx->context.ohci->lock, flags); | 2308 | spin_lock_irqsave(&ctx->context.ohci->lock, flags); |
2300 | if (base->type == FW_ISO_CONTEXT_TRANSMIT) | 2309 | if (base->type == FW_ISO_CONTEXT_TRANSMIT) |
2301 | retval = ohci_queue_iso_transmit(base, packet, buffer, payload); | 2310 | ret = ohci_queue_iso_transmit(base, packet, buffer, payload); |
2302 | else if (ctx->context.ohci->use_dualbuffer) | 2311 | else if (ctx->context.ohci->use_dualbuffer) |
2303 | retval = ohci_queue_iso_receive_dualbuffer(base, packet, | 2312 | ret = ohci_queue_iso_receive_dualbuffer(base, packet, |
2304 | buffer, payload); | 2313 | buffer, payload); |
2305 | else | 2314 | else |
2306 | retval = ohci_queue_iso_receive_packet_per_buffer(base, packet, | 2315 | ret = ohci_queue_iso_receive_packet_per_buffer(base, packet, |
2307 | buffer, | 2316 | buffer, payload); |
2308 | payload); | ||
2309 | spin_unlock_irqrestore(&ctx->context.ohci->lock, flags); | 2317 | spin_unlock_irqrestore(&ctx->context.ohci->lock, flags); |
2310 | 2318 | ||
2311 | return retval; | 2319 | return ret; |
2312 | } | 2320 | } |
2313 | 2321 | ||
2314 | static const struct fw_card_driver ohci_driver = { | 2322 | static const struct fw_card_driver ohci_driver = { |
@@ -2357,8 +2365,8 @@ static void ohci_pmac_off(struct pci_dev *dev) | |||
2357 | #define ohci_pmac_off(dev) | 2365 | #define ohci_pmac_off(dev) |
2358 | #endif /* CONFIG_PPC_PMAC */ | 2366 | #endif /* CONFIG_PPC_PMAC */ |
2359 | 2367 | ||
2360 | static int __devinit | 2368 | static int __devinit pci_probe(struct pci_dev *dev, |
2361 | pci_probe(struct pci_dev *dev, const struct pci_device_id *ent) | 2369 | const struct pci_device_id *ent) |
2362 | { | 2370 | { |
2363 | struct fw_ohci *ohci; | 2371 | struct fw_ohci *ohci; |
2364 | u32 bus_options, max_receive, link_speed, version; | 2372 | u32 bus_options, max_receive, link_speed, version; |
@@ -2440,6 +2448,7 @@ pci_probe(struct pci_dev *dev, const struct pci_device_id *ent) | |||
2440 | ohci->it_context_list = kzalloc(size, GFP_KERNEL); | 2448 | ohci->it_context_list = kzalloc(size, GFP_KERNEL); |
2441 | 2449 | ||
2442 | reg_write(ohci, OHCI1394_IsoXmitIntMaskSet, ~0); | 2450 | reg_write(ohci, OHCI1394_IsoXmitIntMaskSet, ~0); |
2451 | ohci->ir_context_channels = ~0ULL; | ||
2443 | ohci->ir_context_mask = reg_read(ohci, OHCI1394_IsoXmitIntMaskSet); | 2452 | ohci->ir_context_mask = reg_read(ohci, OHCI1394_IsoXmitIntMaskSet); |
2444 | reg_write(ohci, OHCI1394_IsoXmitIntMaskClear, ~0); | 2453 | reg_write(ohci, OHCI1394_IsoXmitIntMaskClear, ~0); |
2445 | size = sizeof(struct iso_context) * hweight32(ohci->ir_context_mask); | 2454 | size = sizeof(struct iso_context) * hweight32(ohci->ir_context_mask); |
@@ -2467,11 +2476,12 @@ pci_probe(struct pci_dev *dev, const struct pci_device_id *ent) | |||
2467 | reg_read(ohci, OHCI1394_GUIDLo); | 2476 | reg_read(ohci, OHCI1394_GUIDLo); |
2468 | 2477 | ||
2469 | err = fw_card_add(&ohci->card, max_receive, link_speed, guid); | 2478 | err = fw_card_add(&ohci->card, max_receive, link_speed, guid); |
2470 | if (err < 0) | 2479 | if (err) |
2471 | goto fail_self_id; | 2480 | goto fail_self_id; |
2472 | 2481 | ||
2473 | fw_notify("Added fw-ohci device %s, OHCI version %x.%x\n", | 2482 | fw_notify("Added fw-ohci device %s, OHCI version %x.%x\n", |
2474 | dev_name(&dev->dev), version >> 16, version & 0xff); | 2483 | dev_name(&dev->dev), version >> 16, version & 0xff); |
2484 | |||
2475 | return 0; | 2485 | return 0; |
2476 | 2486 | ||
2477 | fail_self_id: | 2487 | fail_self_id: |