diff options
author | Stefan Richter <stefanr@s5r6.in-berlin.de> | 2010-07-29 12:19:22 -0400 |
---|---|---|
committer | Stefan Richter <stefanr@s5r6.in-berlin.de> | 2010-07-29 17:09:18 -0400 |
commit | 872e330e38806d835bd6c311c93ab998e2fb9058 (patch) | |
tree | 92497ce79b1157761b1aebdb63b8d74f68d42c15 /drivers/firewire | |
parent | ae2a97661482c1d0f1aa41b837da95054d0e9a1b (diff) |
firewire: add isochronous multichannel reception
This adds the DMA context programming and userspace ABI for multichannel
reception, i.e. for listening on multiple channel numbers by means of a
single DMA context.
The use case is reception of more streams than there are IR DMA units
offered by the link layer. This is already implemented by the older
ohci1394 + ieee1394 + raw1394 stack. And as discussed recently on
linux1394-devel, this feature is occasionally used in practice.
The big drawbacks of this mode are that buffer layout and interrupt
generation necessarily differ from single-channel reception: Headers
and trailers are not stripped from packets, packets are not aligned with
buffer chunks, interrupts are per buffer chunk, not per packet.
These drawbacks also cause a rather hefty code footprint to support this
rarely used OHCI-1394 feature. (367 lines added, among them 94 lines of
added userspace ABI documentation.)
This implementation enforces that a multichannel reception context may
only listen to channels to which no single-channel context on the same
link layer is presently listening to. OHCI-1394 would allow to overlay
single-channel contexts by the multi-channel context, but this would be
a departure from the present first-come-first-served policy of IR
context creation.
The implementation is heavily based on an earlier one by Jay Fenlason.
Thanks Jay.
Signed-off-by: Stefan Richter <stefanr@s5r6.in-berlin.de>
Diffstat (limited to 'drivers/firewire')
-rw-r--r-- | drivers/firewire/core-cdev.c | 93 | ||||
-rw-r--r-- | drivers/firewire/core-iso.c | 32 | ||||
-rw-r--r-- | drivers/firewire/core.h | 2 | ||||
-rw-r--r-- | drivers/firewire/ohci.c | 316 |
4 files changed, 346 insertions, 97 deletions
diff --git a/drivers/firewire/core-cdev.c b/drivers/firewire/core-cdev.c index cf989e1635e1..ba23646bb108 100644 --- a/drivers/firewire/core-cdev.c +++ b/drivers/firewire/core-cdev.c | |||
@@ -193,6 +193,11 @@ struct iso_interrupt_event { | |||
193 | struct fw_cdev_event_iso_interrupt interrupt; | 193 | struct fw_cdev_event_iso_interrupt interrupt; |
194 | }; | 194 | }; |
195 | 195 | ||
196 | struct iso_interrupt_mc_event { | ||
197 | struct event event; | ||
198 | struct fw_cdev_event_iso_interrupt_mc interrupt; | ||
199 | }; | ||
200 | |||
196 | struct iso_resource_event { | 201 | struct iso_resource_event { |
197 | struct event event; | 202 | struct event event; |
198 | struct fw_cdev_event_iso_resource iso_resource; | 203 | struct fw_cdev_event_iso_resource iso_resource; |
@@ -415,6 +420,7 @@ union ioctl_arg { | |||
415 | struct fw_cdev_get_cycle_timer2 get_cycle_timer2; | 420 | struct fw_cdev_get_cycle_timer2 get_cycle_timer2; |
416 | struct fw_cdev_send_phy_packet send_phy_packet; | 421 | struct fw_cdev_send_phy_packet send_phy_packet; |
417 | struct fw_cdev_receive_phy_packets receive_phy_packets; | 422 | struct fw_cdev_receive_phy_packets receive_phy_packets; |
423 | struct fw_cdev_set_iso_channels set_iso_channels; | ||
418 | }; | 424 | }; |
419 | 425 | ||
420 | static int ioctl_get_info(struct client *client, union ioctl_arg *arg) | 426 | static int ioctl_get_info(struct client *client, union ioctl_arg *arg) |
@@ -932,26 +938,54 @@ static void iso_callback(struct fw_iso_context *context, u32 cycle, | |||
932 | sizeof(e->interrupt) + header_length, NULL, 0); | 938 | sizeof(e->interrupt) + header_length, NULL, 0); |
933 | } | 939 | } |
934 | 940 | ||
941 | static void iso_mc_callback(struct fw_iso_context *context, | ||
942 | dma_addr_t completed, void *data) | ||
943 | { | ||
944 | struct client *client = data; | ||
945 | struct iso_interrupt_mc_event *e; | ||
946 | |||
947 | e = kmalloc(sizeof(*e), GFP_ATOMIC); | ||
948 | if (e == NULL) { | ||
949 | fw_notify("Out of memory when allocating event\n"); | ||
950 | return; | ||
951 | } | ||
952 | e->interrupt.type = FW_CDEV_EVENT_ISO_INTERRUPT_MULTICHANNEL; | ||
953 | e->interrupt.closure = client->iso_closure; | ||
954 | e->interrupt.completed = fw_iso_buffer_lookup(&client->buffer, | ||
955 | completed); | ||
956 | queue_event(client, &e->event, &e->interrupt, | ||
957 | sizeof(e->interrupt), NULL, 0); | ||
958 | } | ||
959 | |||
935 | static int ioctl_create_iso_context(struct client *client, union ioctl_arg *arg) | 960 | static int ioctl_create_iso_context(struct client *client, union ioctl_arg *arg) |
936 | { | 961 | { |
937 | struct fw_cdev_create_iso_context *a = &arg->create_iso_context; | 962 | struct fw_cdev_create_iso_context *a = &arg->create_iso_context; |
938 | struct fw_iso_context *context; | 963 | struct fw_iso_context *context; |
964 | fw_iso_callback_t cb; | ||
939 | 965 | ||
940 | BUILD_BUG_ON(FW_CDEV_ISO_CONTEXT_TRANSMIT != FW_ISO_CONTEXT_TRANSMIT || | 966 | BUILD_BUG_ON(FW_CDEV_ISO_CONTEXT_TRANSMIT != FW_ISO_CONTEXT_TRANSMIT || |
941 | FW_CDEV_ISO_CONTEXT_RECEIVE != FW_ISO_CONTEXT_RECEIVE); | 967 | FW_CDEV_ISO_CONTEXT_RECEIVE != FW_ISO_CONTEXT_RECEIVE || |
942 | 968 | FW_CDEV_ISO_CONTEXT_RECEIVE_MULTICHANNEL != | |
943 | if (a->channel > 63) | 969 | FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL); |
944 | return -EINVAL; | ||
945 | 970 | ||
946 | switch (a->type) { | 971 | switch (a->type) { |
947 | case FW_ISO_CONTEXT_RECEIVE: | 972 | case FW_ISO_CONTEXT_TRANSMIT: |
948 | if (a->header_size < 4 || (a->header_size & 3)) | 973 | if (a->speed > SCODE_3200 || a->channel > 63) |
949 | return -EINVAL; | 974 | return -EINVAL; |
975 | |||
976 | cb = iso_callback; | ||
950 | break; | 977 | break; |
951 | 978 | ||
952 | case FW_ISO_CONTEXT_TRANSMIT: | 979 | case FW_ISO_CONTEXT_RECEIVE: |
953 | if (a->speed > SCODE_3200) | 980 | if (a->header_size < 4 || (a->header_size & 3) || |
981 | a->channel > 63) | ||
954 | return -EINVAL; | 982 | return -EINVAL; |
983 | |||
984 | cb = iso_callback; | ||
985 | break; | ||
986 | |||
987 | case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL: | ||
988 | cb = (fw_iso_callback_t)iso_mc_callback; | ||
955 | break; | 989 | break; |
956 | 990 | ||
957 | default: | 991 | default: |
@@ -959,8 +993,7 @@ static int ioctl_create_iso_context(struct client *client, union ioctl_arg *arg) | |||
959 | } | 993 | } |
960 | 994 | ||
961 | context = fw_iso_context_create(client->device->card, a->type, | 995 | context = fw_iso_context_create(client->device->card, a->type, |
962 | a->channel, a->speed, a->header_size, | 996 | a->channel, a->speed, a->header_size, cb, client); |
963 | iso_callback, client); | ||
964 | if (IS_ERR(context)) | 997 | if (IS_ERR(context)) |
965 | return PTR_ERR(context); | 998 | return PTR_ERR(context); |
966 | 999 | ||
@@ -980,6 +1013,17 @@ static int ioctl_create_iso_context(struct client *client, union ioctl_arg *arg) | |||
980 | return 0; | 1013 | return 0; |
981 | } | 1014 | } |
982 | 1015 | ||
1016 | static int ioctl_set_iso_channels(struct client *client, union ioctl_arg *arg) | ||
1017 | { | ||
1018 | struct fw_cdev_set_iso_channels *a = &arg->set_iso_channels; | ||
1019 | struct fw_iso_context *ctx = client->iso_context; | ||
1020 | |||
1021 | if (ctx == NULL || a->handle != 0) | ||
1022 | return -EINVAL; | ||
1023 | |||
1024 | return fw_iso_context_set_channels(ctx, &a->channels); | ||
1025 | } | ||
1026 | |||
983 | /* Macros for decoding the iso packet control header. */ | 1027 | /* Macros for decoding the iso packet control header. */ |
984 | #define GET_PAYLOAD_LENGTH(v) ((v) & 0xffff) | 1028 | #define GET_PAYLOAD_LENGTH(v) ((v) & 0xffff) |
985 | #define GET_INTERRUPT(v) (((v) >> 16) & 0x01) | 1029 | #define GET_INTERRUPT(v) (((v) >> 16) & 0x01) |
@@ -993,7 +1037,7 @@ static int ioctl_queue_iso(struct client *client, union ioctl_arg *arg) | |||
993 | struct fw_cdev_queue_iso *a = &arg->queue_iso; | 1037 | struct fw_cdev_queue_iso *a = &arg->queue_iso; |
994 | struct fw_cdev_iso_packet __user *p, *end, *next; | 1038 | struct fw_cdev_iso_packet __user *p, *end, *next; |
995 | struct fw_iso_context *ctx = client->iso_context; | 1039 | struct fw_iso_context *ctx = client->iso_context; |
996 | unsigned long payload, buffer_end, transmit_header_bytes; | 1040 | unsigned long payload, buffer_end, transmit_header_bytes = 0; |
997 | u32 control; | 1041 | u32 control; |
998 | int count; | 1042 | int count; |
999 | struct { | 1043 | struct { |
@@ -1013,7 +1057,6 @@ static int ioctl_queue_iso(struct client *client, union ioctl_arg *arg) | |||
1013 | * use the indirect payload, the iso buffer need not be mapped | 1057 | * use the indirect payload, the iso buffer need not be mapped |
1014 | * and the a->data pointer is ignored. | 1058 | * and the a->data pointer is ignored. |
1015 | */ | 1059 | */ |
1016 | |||
1017 | payload = (unsigned long)a->data - client->vm_start; | 1060 | payload = (unsigned long)a->data - client->vm_start; |
1018 | buffer_end = client->buffer.page_count << PAGE_SHIFT; | 1061 | buffer_end = client->buffer.page_count << PAGE_SHIFT; |
1019 | if (a->data == 0 || client->buffer.pages == NULL || | 1062 | if (a->data == 0 || client->buffer.pages == NULL || |
@@ -1022,8 +1065,10 @@ static int ioctl_queue_iso(struct client *client, union ioctl_arg *arg) | |||
1022 | buffer_end = 0; | 1065 | buffer_end = 0; |
1023 | } | 1066 | } |
1024 | 1067 | ||
1025 | p = (struct fw_cdev_iso_packet __user *)u64_to_uptr(a->packets); | 1068 | if (ctx->type == FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL && payload & 3) |
1069 | return -EINVAL; | ||
1026 | 1070 | ||
1071 | p = (struct fw_cdev_iso_packet __user *)u64_to_uptr(a->packets); | ||
1027 | if (!access_ok(VERIFY_READ, p, a->size)) | 1072 | if (!access_ok(VERIFY_READ, p, a->size)) |
1028 | return -EFAULT; | 1073 | return -EFAULT; |
1029 | 1074 | ||
@@ -1039,19 +1084,24 @@ static int ioctl_queue_iso(struct client *client, union ioctl_arg *arg) | |||
1039 | u.packet.sy = GET_SY(control); | 1084 | u.packet.sy = GET_SY(control); |
1040 | u.packet.header_length = GET_HEADER_LENGTH(control); | 1085 | u.packet.header_length = GET_HEADER_LENGTH(control); |
1041 | 1086 | ||
1042 | if (ctx->type == FW_ISO_CONTEXT_TRANSMIT) { | 1087 | switch (ctx->type) { |
1043 | if (u.packet.header_length % 4 != 0) | 1088 | case FW_ISO_CONTEXT_TRANSMIT: |
1089 | if (u.packet.header_length & 3) | ||
1044 | return -EINVAL; | 1090 | return -EINVAL; |
1045 | transmit_header_bytes = u.packet.header_length; | 1091 | transmit_header_bytes = u.packet.header_length; |
1046 | } else { | 1092 | break; |
1047 | /* | 1093 | |
1048 | * We require that header_length is a multiple of | 1094 | case FW_ISO_CONTEXT_RECEIVE: |
1049 | * the fixed header size, ctx->header_size. | ||
1050 | */ | ||
1051 | if (u.packet.header_length == 0 || | 1095 | if (u.packet.header_length == 0 || |
1052 | u.packet.header_length % ctx->header_size != 0) | 1096 | u.packet.header_length % ctx->header_size != 0) |
1053 | return -EINVAL; | 1097 | return -EINVAL; |
1054 | transmit_header_bytes = 0; | 1098 | break; |
1099 | |||
1100 | case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL: | ||
1101 | if (u.packet.payload_length == 0 || | ||
1102 | u.packet.payload_length & 3) | ||
1103 | return -EINVAL; | ||
1104 | break; | ||
1055 | } | 1105 | } |
1056 | 1106 | ||
1057 | next = (struct fw_cdev_iso_packet __user *) | 1107 | next = (struct fw_cdev_iso_packet __user *) |
@@ -1534,6 +1584,7 @@ static int (* const ioctl_handlers[])(struct client *, union ioctl_arg *) = { | |||
1534 | [0x14] = ioctl_get_cycle_timer2, | 1584 | [0x14] = ioctl_get_cycle_timer2, |
1535 | [0x15] = ioctl_send_phy_packet, | 1585 | [0x15] = ioctl_send_phy_packet, |
1536 | [0x16] = ioctl_receive_phy_packets, | 1586 | [0x16] = ioctl_receive_phy_packets, |
1587 | [0x17] = ioctl_set_iso_channels, | ||
1537 | }; | 1588 | }; |
1538 | 1589 | ||
1539 | static int dispatch_ioctl(struct client *client, | 1590 | static int dispatch_ioctl(struct client *client, |
diff --git a/drivers/firewire/core-iso.c b/drivers/firewire/core-iso.c index 4fe932e60fb8..0c8e662a5daf 100644 --- a/drivers/firewire/core-iso.c +++ b/drivers/firewire/core-iso.c | |||
@@ -117,6 +117,23 @@ void fw_iso_buffer_destroy(struct fw_iso_buffer *buffer, | |||
117 | } | 117 | } |
118 | EXPORT_SYMBOL(fw_iso_buffer_destroy); | 118 | EXPORT_SYMBOL(fw_iso_buffer_destroy); |
119 | 119 | ||
120 | /* Convert DMA address to offset into virtually contiguous buffer. */ | ||
121 | size_t fw_iso_buffer_lookup(struct fw_iso_buffer *buffer, dma_addr_t completed) | ||
122 | { | ||
123 | int i; | ||
124 | dma_addr_t address; | ||
125 | ssize_t offset; | ||
126 | |||
127 | for (i = 0; i < buffer->page_count; i++) { | ||
128 | address = page_private(buffer->pages[i]); | ||
129 | offset = (ssize_t)completed - (ssize_t)address; | ||
130 | if (offset > 0 && offset <= PAGE_SIZE) | ||
131 | return (i << PAGE_SHIFT) + offset; | ||
132 | } | ||
133 | |||
134 | return 0; | ||
135 | } | ||
136 | |||
120 | struct fw_iso_context *fw_iso_context_create(struct fw_card *card, | 137 | struct fw_iso_context *fw_iso_context_create(struct fw_card *card, |
121 | int type, int channel, int speed, size_t header_size, | 138 | int type, int channel, int speed, size_t header_size, |
122 | fw_iso_callback_t callback, void *callback_data) | 139 | fw_iso_callback_t callback, void *callback_data) |
@@ -133,7 +150,7 @@ struct fw_iso_context *fw_iso_context_create(struct fw_card *card, | |||
133 | ctx->channel = channel; | 150 | ctx->channel = channel; |
134 | ctx->speed = speed; | 151 | ctx->speed = speed; |
135 | ctx->header_size = header_size; | 152 | ctx->header_size = header_size; |
136 | ctx->callback = callback; | 153 | ctx->callback.sc = callback; |
137 | ctx->callback_data = callback_data; | 154 | ctx->callback_data = callback_data; |
138 | 155 | ||
139 | return ctx; | 156 | return ctx; |
@@ -142,9 +159,7 @@ EXPORT_SYMBOL(fw_iso_context_create); | |||
142 | 159 | ||
143 | void fw_iso_context_destroy(struct fw_iso_context *ctx) | 160 | void fw_iso_context_destroy(struct fw_iso_context *ctx) |
144 | { | 161 | { |
145 | struct fw_card *card = ctx->card; | 162 | ctx->card->driver->free_iso_context(ctx); |
146 | |||
147 | card->driver->free_iso_context(ctx); | ||
148 | } | 163 | } |
149 | EXPORT_SYMBOL(fw_iso_context_destroy); | 164 | EXPORT_SYMBOL(fw_iso_context_destroy); |
150 | 165 | ||
@@ -155,14 +170,17 @@ int fw_iso_context_start(struct fw_iso_context *ctx, | |||
155 | } | 170 | } |
156 | EXPORT_SYMBOL(fw_iso_context_start); | 171 | EXPORT_SYMBOL(fw_iso_context_start); |
157 | 172 | ||
173 | int fw_iso_context_set_channels(struct fw_iso_context *ctx, u64 *channels) | ||
174 | { | ||
175 | return ctx->card->driver->set_iso_channels(ctx, channels); | ||
176 | } | ||
177 | |||
158 | int fw_iso_context_queue(struct fw_iso_context *ctx, | 178 | int fw_iso_context_queue(struct fw_iso_context *ctx, |
159 | struct fw_iso_packet *packet, | 179 | struct fw_iso_packet *packet, |
160 | struct fw_iso_buffer *buffer, | 180 | struct fw_iso_buffer *buffer, |
161 | unsigned long payload) | 181 | unsigned long payload) |
162 | { | 182 | { |
163 | struct fw_card *card = ctx->card; | 183 | return ctx->card->driver->queue_iso(ctx, packet, buffer, payload); |
164 | |||
165 | return card->driver->queue_iso(ctx, packet, buffer, payload); | ||
166 | } | 184 | } |
167 | EXPORT_SYMBOL(fw_iso_context_queue); | 185 | EXPORT_SYMBOL(fw_iso_context_queue); |
168 | 186 | ||
diff --git a/drivers/firewire/core.h b/drivers/firewire/core.h index 28621e44b111..e6239f971be6 100644 --- a/drivers/firewire/core.h +++ b/drivers/firewire/core.h | |||
@@ -90,6 +90,8 @@ struct fw_card_driver { | |||
90 | int (*start_iso)(struct fw_iso_context *ctx, | 90 | int (*start_iso)(struct fw_iso_context *ctx, |
91 | s32 cycle, u32 sync, u32 tags); | 91 | s32 cycle, u32 sync, u32 tags); |
92 | 92 | ||
93 | int (*set_iso_channels)(struct fw_iso_context *ctx, u64 *channels); | ||
94 | |||
93 | int (*queue_iso)(struct fw_iso_context *ctx, | 95 | int (*queue_iso)(struct fw_iso_context *ctx, |
94 | struct fw_iso_packet *packet, | 96 | struct fw_iso_packet *packet, |
95 | struct fw_iso_buffer *buffer, | 97 | struct fw_iso_buffer *buffer, |
diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c index 2e4b425847a7..4bda1c1b74ba 100644 --- a/drivers/firewire/ohci.c +++ b/drivers/firewire/ohci.c | |||
@@ -190,11 +190,13 @@ struct fw_ohci { | |||
190 | struct context at_request_ctx; | 190 | struct context at_request_ctx; |
191 | struct context at_response_ctx; | 191 | struct context at_response_ctx; |
192 | 192 | ||
193 | u32 it_context_mask; | 193 | u32 it_context_mask; /* unoccupied IT contexts */ |
194 | struct iso_context *it_context_list; | 194 | struct iso_context *it_context_list; |
195 | u64 ir_context_channels; | 195 | u64 ir_context_channels; /* unoccupied channels */ |
196 | u32 ir_context_mask; | 196 | u32 ir_context_mask; /* unoccupied IR contexts */ |
197 | struct iso_context *ir_context_list; | 197 | struct iso_context *ir_context_list; |
198 | u64 mc_channels; /* channels in use by the multichannel IR context */ | ||
199 | bool mc_allocated; | ||
198 | 200 | ||
199 | __be32 *config_rom; | 201 | __be32 *config_rom; |
200 | dma_addr_t config_rom_bus; | 202 | dma_addr_t config_rom_bus; |
@@ -2197,10 +2199,9 @@ static int handle_ir_packet_per_buffer(struct context *context, | |||
2197 | __le32 *ir_header; | 2199 | __le32 *ir_header; |
2198 | void *p; | 2200 | void *p; |
2199 | 2201 | ||
2200 | for (pd = d; pd <= last; pd++) { | 2202 | for (pd = d; pd <= last; pd++) |
2201 | if (pd->transfer_status) | 2203 | if (pd->transfer_status) |
2202 | break; | 2204 | break; |
2203 | } | ||
2204 | if (pd > last) | 2205 | if (pd > last) |
2205 | /* Descriptor(s) not done yet, stop iteration */ | 2206 | /* Descriptor(s) not done yet, stop iteration */ |
2206 | return 0; | 2207 | return 0; |
@@ -2210,16 +2211,38 @@ static int handle_ir_packet_per_buffer(struct context *context, | |||
2210 | 2211 | ||
2211 | if (le16_to_cpu(last->control) & DESCRIPTOR_IRQ_ALWAYS) { | 2212 | if (le16_to_cpu(last->control) & DESCRIPTOR_IRQ_ALWAYS) { |
2212 | ir_header = (__le32 *) p; | 2213 | ir_header = (__le32 *) p; |
2213 | ctx->base.callback(&ctx->base, | 2214 | ctx->base.callback.sc(&ctx->base, |
2214 | le32_to_cpu(ir_header[0]) & 0xffff, | 2215 | le32_to_cpu(ir_header[0]) & 0xffff, |
2215 | ctx->header_length, ctx->header, | 2216 | ctx->header_length, ctx->header, |
2216 | ctx->base.callback_data); | 2217 | ctx->base.callback_data); |
2217 | ctx->header_length = 0; | 2218 | ctx->header_length = 0; |
2218 | } | 2219 | } |
2219 | 2220 | ||
2220 | return 1; | 2221 | return 1; |
2221 | } | 2222 | } |
2222 | 2223 | ||
2224 | /* d == last because each descriptor block is only a single descriptor. */ | ||
2225 | static int handle_ir_buffer_fill(struct context *context, | ||
2226 | struct descriptor *d, | ||
2227 | struct descriptor *last) | ||
2228 | { | ||
2229 | struct iso_context *ctx = | ||
2230 | container_of(context, struct iso_context, context); | ||
2231 | |||
2232 | if (!last->transfer_status) | ||
2233 | /* Descriptor(s) not done yet, stop iteration */ | ||
2234 | return 0; | ||
2235 | |||
2236 | if (le16_to_cpu(last->control) & DESCRIPTOR_IRQ_ALWAYS) | ||
2237 | ctx->base.callback.mc(&ctx->base, | ||
2238 | le32_to_cpu(last->data_address) + | ||
2239 | le16_to_cpu(last->req_count) - | ||
2240 | le16_to_cpu(last->res_count), | ||
2241 | ctx->base.callback_data); | ||
2242 | |||
2243 | return 1; | ||
2244 | } | ||
2245 | |||
2223 | static int handle_it_packet(struct context *context, | 2246 | static int handle_it_packet(struct context *context, |
2224 | struct descriptor *d, | 2247 | struct descriptor *d, |
2225 | struct descriptor *last) | 2248 | struct descriptor *last) |
@@ -2245,72 +2268,118 @@ static int handle_it_packet(struct context *context, | |||
2245 | ctx->header_length += 4; | 2268 | ctx->header_length += 4; |
2246 | } | 2269 | } |
2247 | if (le16_to_cpu(last->control) & DESCRIPTOR_IRQ_ALWAYS) { | 2270 | if (le16_to_cpu(last->control) & DESCRIPTOR_IRQ_ALWAYS) { |
2248 | ctx->base.callback(&ctx->base, le16_to_cpu(last->res_count), | 2271 | ctx->base.callback.sc(&ctx->base, le16_to_cpu(last->res_count), |
2249 | ctx->header_length, ctx->header, | 2272 | ctx->header_length, ctx->header, |
2250 | ctx->base.callback_data); | 2273 | ctx->base.callback_data); |
2251 | ctx->header_length = 0; | 2274 | ctx->header_length = 0; |
2252 | } | 2275 | } |
2253 | return 1; | 2276 | return 1; |
2254 | } | 2277 | } |
2255 | 2278 | ||
2279 | static void set_multichannel_mask(struct fw_ohci *ohci, u64 channels) | ||
2280 | { | ||
2281 | u32 hi = channels >> 32, lo = channels; | ||
2282 | |||
2283 | reg_write(ohci, OHCI1394_IRMultiChanMaskHiClear, ~hi); | ||
2284 | reg_write(ohci, OHCI1394_IRMultiChanMaskLoClear, ~lo); | ||
2285 | reg_write(ohci, OHCI1394_IRMultiChanMaskHiSet, hi); | ||
2286 | reg_write(ohci, OHCI1394_IRMultiChanMaskLoSet, lo); | ||
2287 | mmiowb(); | ||
2288 | ohci->mc_channels = channels; | ||
2289 | } | ||
2290 | |||
2256 | static struct fw_iso_context *ohci_allocate_iso_context(struct fw_card *card, | 2291 | static struct fw_iso_context *ohci_allocate_iso_context(struct fw_card *card, |
2257 | int type, int channel, size_t header_size) | 2292 | int type, int channel, size_t header_size) |
2258 | { | 2293 | { |
2259 | struct fw_ohci *ohci = fw_ohci(card); | 2294 | struct fw_ohci *ohci = fw_ohci(card); |
2260 | struct iso_context *ctx, *list; | 2295 | struct iso_context *uninitialized_var(ctx); |
2261 | descriptor_callback_t callback; | 2296 | descriptor_callback_t uninitialized_var(callback); |
2262 | u64 *channels, dont_care = ~0ULL; | 2297 | u64 *uninitialized_var(channels); |
2263 | u32 *mask, regs; | 2298 | u32 *uninitialized_var(mask), uninitialized_var(regs); |
2264 | unsigned long flags; | 2299 | unsigned long flags; |
2265 | int index, ret = -ENOMEM; | 2300 | int index, ret = -EBUSY; |
2266 | 2301 | ||
2267 | if (type == FW_ISO_CONTEXT_TRANSMIT) { | 2302 | spin_lock_irqsave(&ohci->lock, flags); |
2268 | channels = &dont_care; | 2303 | |
2269 | mask = &ohci->it_context_mask; | 2304 | switch (type) { |
2270 | list = ohci->it_context_list; | 2305 | case FW_ISO_CONTEXT_TRANSMIT: |
2306 | mask = &ohci->it_context_mask; | ||
2271 | callback = handle_it_packet; | 2307 | callback = handle_it_packet; |
2272 | } else { | 2308 | index = ffs(*mask) - 1; |
2309 | if (index >= 0) { | ||
2310 | *mask &= ~(1 << index); | ||
2311 | regs = OHCI1394_IsoXmitContextBase(index); | ||
2312 | ctx = &ohci->it_context_list[index]; | ||
2313 | } | ||
2314 | break; | ||
2315 | |||
2316 | case FW_ISO_CONTEXT_RECEIVE: | ||
2273 | channels = &ohci->ir_context_channels; | 2317 | channels = &ohci->ir_context_channels; |
2274 | mask = &ohci->ir_context_mask; | 2318 | mask = &ohci->ir_context_mask; |
2275 | list = ohci->ir_context_list; | ||
2276 | callback = handle_ir_packet_per_buffer; | 2319 | callback = handle_ir_packet_per_buffer; |
2277 | } | 2320 | index = *channels & 1ULL << channel ? ffs(*mask) - 1 : -1; |
2321 | if (index >= 0) { | ||
2322 | *channels &= ~(1ULL << channel); | ||
2323 | *mask &= ~(1 << index); | ||
2324 | regs = OHCI1394_IsoRcvContextBase(index); | ||
2325 | ctx = &ohci->ir_context_list[index]; | ||
2326 | } | ||
2327 | break; | ||
2278 | 2328 | ||
2279 | spin_lock_irqsave(&ohci->lock, flags); | 2329 | case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL: |
2280 | index = *channels & 1ULL << channel ? ffs(*mask) - 1 : -1; | 2330 | mask = &ohci->ir_context_mask; |
2281 | if (index >= 0) { | 2331 | callback = handle_ir_buffer_fill; |
2282 | *channels &= ~(1ULL << channel); | 2332 | index = !ohci->mc_allocated ? ffs(*mask) - 1 : -1; |
2283 | *mask &= ~(1 << index); | 2333 | if (index >= 0) { |
2334 | ohci->mc_allocated = true; | ||
2335 | *mask &= ~(1 << index); | ||
2336 | regs = OHCI1394_IsoRcvContextBase(index); | ||
2337 | ctx = &ohci->ir_context_list[index]; | ||
2338 | } | ||
2339 | break; | ||
2340 | |||
2341 | default: | ||
2342 | index = -1; | ||
2343 | ret = -ENOSYS; | ||
2284 | } | 2344 | } |
2345 | |||
2285 | spin_unlock_irqrestore(&ohci->lock, flags); | 2346 | spin_unlock_irqrestore(&ohci->lock, flags); |
2286 | 2347 | ||
2287 | if (index < 0) | 2348 | if (index < 0) |
2288 | return ERR_PTR(-EBUSY); | 2349 | return ERR_PTR(ret); |
2289 | |||
2290 | if (type == FW_ISO_CONTEXT_TRANSMIT) | ||
2291 | regs = OHCI1394_IsoXmitContextBase(index); | ||
2292 | else | ||
2293 | regs = OHCI1394_IsoRcvContextBase(index); | ||
2294 | 2350 | ||
2295 | ctx = &list[index]; | ||
2296 | memset(ctx, 0, sizeof(*ctx)); | 2351 | memset(ctx, 0, sizeof(*ctx)); |
2297 | ctx->header_length = 0; | 2352 | ctx->header_length = 0; |
2298 | ctx->header = (void *) __get_free_page(GFP_KERNEL); | 2353 | ctx->header = (void *) __get_free_page(GFP_KERNEL); |
2299 | if (ctx->header == NULL) | 2354 | if (ctx->header == NULL) { |
2355 | ret = -ENOMEM; | ||
2300 | goto out; | 2356 | goto out; |
2301 | 2357 | } | |
2302 | ret = context_init(&ctx->context, ohci, regs, callback); | 2358 | ret = context_init(&ctx->context, ohci, regs, callback); |
2303 | if (ret < 0) | 2359 | if (ret < 0) |
2304 | goto out_with_header; | 2360 | goto out_with_header; |
2305 | 2361 | ||
2362 | if (type == FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL) | ||
2363 | set_multichannel_mask(ohci, 0); | ||
2364 | |||
2306 | return &ctx->base; | 2365 | return &ctx->base; |
2307 | 2366 | ||
2308 | out_with_header: | 2367 | out_with_header: |
2309 | free_page((unsigned long)ctx->header); | 2368 | free_page((unsigned long)ctx->header); |
2310 | out: | 2369 | out: |
2311 | spin_lock_irqsave(&ohci->lock, flags); | 2370 | spin_lock_irqsave(&ohci->lock, flags); |
2312 | *channels |= 1ULL << channel; | 2371 | |
2372 | switch (type) { | ||
2373 | case FW_ISO_CONTEXT_RECEIVE: | ||
2374 | *channels |= 1ULL << channel; | ||
2375 | break; | ||
2376 | |||
2377 | case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL: | ||
2378 | ohci->mc_allocated = false; | ||
2379 | break; | ||
2380 | } | ||
2313 | *mask |= 1 << index; | 2381 | *mask |= 1 << index; |
2382 | |||
2314 | spin_unlock_irqrestore(&ohci->lock, flags); | 2383 | spin_unlock_irqrestore(&ohci->lock, flags); |
2315 | 2384 | ||
2316 | return ERR_PTR(ret); | 2385 | return ERR_PTR(ret); |
@@ -2321,10 +2390,11 @@ static int ohci_start_iso(struct fw_iso_context *base, | |||
2321 | { | 2390 | { |
2322 | struct iso_context *ctx = container_of(base, struct iso_context, base); | 2391 | struct iso_context *ctx = container_of(base, struct iso_context, base); |
2323 | struct fw_ohci *ohci = ctx->context.ohci; | 2392 | struct fw_ohci *ohci = ctx->context.ohci; |
2324 | u32 control, match; | 2393 | u32 control = IR_CONTEXT_ISOCH_HEADER, match; |
2325 | int index; | 2394 | int index; |
2326 | 2395 | ||
2327 | if (ctx->base.type == FW_ISO_CONTEXT_TRANSMIT) { | 2396 | switch (ctx->base.type) { |
2397 | case FW_ISO_CONTEXT_TRANSMIT: | ||
2328 | index = ctx - ohci->it_context_list; | 2398 | index = ctx - ohci->it_context_list; |
2329 | match = 0; | 2399 | match = 0; |
2330 | if (cycle >= 0) | 2400 | if (cycle >= 0) |
@@ -2334,9 +2404,13 @@ static int ohci_start_iso(struct fw_iso_context *base, | |||
2334 | reg_write(ohci, OHCI1394_IsoXmitIntEventClear, 1 << index); | 2404 | reg_write(ohci, OHCI1394_IsoXmitIntEventClear, 1 << index); |
2335 | reg_write(ohci, OHCI1394_IsoXmitIntMaskSet, 1 << index); | 2405 | reg_write(ohci, OHCI1394_IsoXmitIntMaskSet, 1 << index); |
2336 | context_run(&ctx->context, match); | 2406 | context_run(&ctx->context, match); |
2337 | } else { | 2407 | break; |
2408 | |||
2409 | case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL: | ||
2410 | control |= IR_CONTEXT_BUFFER_FILL|IR_CONTEXT_MULTI_CHANNEL_MODE; | ||
2411 | /* fall through */ | ||
2412 | case FW_ISO_CONTEXT_RECEIVE: | ||
2338 | index = ctx - ohci->ir_context_list; | 2413 | index = ctx - ohci->ir_context_list; |
2339 | control = IR_CONTEXT_ISOCH_HEADER; | ||
2340 | match = (tags << 28) | (sync << 8) | ctx->base.channel; | 2414 | match = (tags << 28) | (sync << 8) | ctx->base.channel; |
2341 | if (cycle >= 0) { | 2415 | if (cycle >= 0) { |
2342 | match |= (cycle & 0x07fff) << 12; | 2416 | match |= (cycle & 0x07fff) << 12; |
@@ -2347,6 +2421,7 @@ static int ohci_start_iso(struct fw_iso_context *base, | |||
2347 | reg_write(ohci, OHCI1394_IsoRecvIntMaskSet, 1 << index); | 2421 | reg_write(ohci, OHCI1394_IsoRecvIntMaskSet, 1 << index); |
2348 | reg_write(ohci, CONTEXT_MATCH(ctx->context.regs), match); | 2422 | reg_write(ohci, CONTEXT_MATCH(ctx->context.regs), match); |
2349 | context_run(&ctx->context, control); | 2423 | context_run(&ctx->context, control); |
2424 | break; | ||
2350 | } | 2425 | } |
2351 | 2426 | ||
2352 | return 0; | 2427 | return 0; |
@@ -2358,12 +2433,17 @@ static int ohci_stop_iso(struct fw_iso_context *base) | |||
2358 | struct iso_context *ctx = container_of(base, struct iso_context, base); | 2433 | struct iso_context *ctx = container_of(base, struct iso_context, base); |
2359 | int index; | 2434 | int index; |
2360 | 2435 | ||
2361 | if (ctx->base.type == FW_ISO_CONTEXT_TRANSMIT) { | 2436 | switch (ctx->base.type) { |
2437 | case FW_ISO_CONTEXT_TRANSMIT: | ||
2362 | index = ctx - ohci->it_context_list; | 2438 | index = ctx - ohci->it_context_list; |
2363 | reg_write(ohci, OHCI1394_IsoXmitIntMaskClear, 1 << index); | 2439 | reg_write(ohci, OHCI1394_IsoXmitIntMaskClear, 1 << index); |
2364 | } else { | 2440 | break; |
2441 | |||
2442 | case FW_ISO_CONTEXT_RECEIVE: | ||
2443 | case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL: | ||
2365 | index = ctx - ohci->ir_context_list; | 2444 | index = ctx - ohci->ir_context_list; |
2366 | reg_write(ohci, OHCI1394_IsoRecvIntMaskClear, 1 << index); | 2445 | reg_write(ohci, OHCI1394_IsoRecvIntMaskClear, 1 << index); |
2446 | break; | ||
2367 | } | 2447 | } |
2368 | flush_writes(ohci); | 2448 | flush_writes(ohci); |
2369 | context_stop(&ctx->context); | 2449 | context_stop(&ctx->context); |
@@ -2384,24 +2464,65 @@ static void ohci_free_iso_context(struct fw_iso_context *base) | |||
2384 | 2464 | ||
2385 | spin_lock_irqsave(&ohci->lock, flags); | 2465 | spin_lock_irqsave(&ohci->lock, flags); |
2386 | 2466 | ||
2387 | if (ctx->base.type == FW_ISO_CONTEXT_TRANSMIT) { | 2467 | switch (base->type) { |
2468 | case FW_ISO_CONTEXT_TRANSMIT: | ||
2388 | index = ctx - ohci->it_context_list; | 2469 | index = ctx - ohci->it_context_list; |
2389 | ohci->it_context_mask |= 1 << index; | 2470 | ohci->it_context_mask |= 1 << index; |
2390 | } else { | 2471 | break; |
2472 | |||
2473 | case FW_ISO_CONTEXT_RECEIVE: | ||
2391 | index = ctx - ohci->ir_context_list; | 2474 | index = ctx - ohci->ir_context_list; |
2392 | ohci->ir_context_mask |= 1 << index; | 2475 | ohci->ir_context_mask |= 1 << index; |
2393 | ohci->ir_context_channels |= 1ULL << base->channel; | 2476 | ohci->ir_context_channels |= 1ULL << base->channel; |
2477 | break; | ||
2478 | |||
2479 | case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL: | ||
2480 | index = ctx - ohci->ir_context_list; | ||
2481 | ohci->ir_context_mask |= 1 << index; | ||
2482 | ohci->ir_context_channels |= ohci->mc_channels; | ||
2483 | ohci->mc_channels = 0; | ||
2484 | ohci->mc_allocated = false; | ||
2485 | break; | ||
2394 | } | 2486 | } |
2395 | 2487 | ||
2396 | spin_unlock_irqrestore(&ohci->lock, flags); | 2488 | spin_unlock_irqrestore(&ohci->lock, flags); |
2397 | } | 2489 | } |
2398 | 2490 | ||
2399 | static int ohci_queue_iso_transmit(struct fw_iso_context *base, | 2491 | static int ohci_set_iso_channels(struct fw_iso_context *base, u64 *channels) |
2400 | struct fw_iso_packet *packet, | 2492 | { |
2401 | struct fw_iso_buffer *buffer, | 2493 | struct fw_ohci *ohci = fw_ohci(base->card); |
2402 | unsigned long payload) | 2494 | unsigned long flags; |
2495 | int ret; | ||
2496 | |||
2497 | switch (base->type) { | ||
2498 | case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL: | ||
2499 | |||
2500 | spin_lock_irqsave(&ohci->lock, flags); | ||
2501 | |||
2502 | /* Don't allow multichannel to grab other contexts' channels. */ | ||
2503 | if (~ohci->ir_context_channels & ~ohci->mc_channels & *channels) { | ||
2504 | *channels = ohci->ir_context_channels; | ||
2505 | ret = -EBUSY; | ||
2506 | } else { | ||
2507 | set_multichannel_mask(ohci, *channels); | ||
2508 | ret = 0; | ||
2509 | } | ||
2510 | |||
2511 | spin_unlock_irqrestore(&ohci->lock, flags); | ||
2512 | |||
2513 | break; | ||
2514 | default: | ||
2515 | ret = -EINVAL; | ||
2516 | } | ||
2517 | |||
2518 | return ret; | ||
2519 | } | ||
2520 | |||
2521 | static int queue_iso_transmit(struct iso_context *ctx, | ||
2522 | struct fw_iso_packet *packet, | ||
2523 | struct fw_iso_buffer *buffer, | ||
2524 | unsigned long payload) | ||
2403 | { | 2525 | { |
2404 | struct iso_context *ctx = container_of(base, struct iso_context, base); | ||
2405 | struct descriptor *d, *last, *pd; | 2526 | struct descriptor *d, *last, *pd; |
2406 | struct fw_iso_packet *p; | 2527 | struct fw_iso_packet *p; |
2407 | __le32 *header; | 2528 | __le32 *header; |
@@ -2497,14 +2618,12 @@ static int ohci_queue_iso_transmit(struct fw_iso_context *base, | |||
2497 | return 0; | 2618 | return 0; |
2498 | } | 2619 | } |
2499 | 2620 | ||
2500 | static int ohci_queue_iso_receive_packet_per_buffer(struct fw_iso_context *base, | 2621 | static int queue_iso_packet_per_buffer(struct iso_context *ctx, |
2501 | struct fw_iso_packet *packet, | 2622 | struct fw_iso_packet *packet, |
2502 | struct fw_iso_buffer *buffer, | 2623 | struct fw_iso_buffer *buffer, |
2503 | unsigned long payload) | 2624 | unsigned long payload) |
2504 | { | 2625 | { |
2505 | struct iso_context *ctx = container_of(base, struct iso_context, base); | ||
2506 | struct descriptor *d, *pd; | 2626 | struct descriptor *d, *pd; |
2507 | struct fw_iso_packet *p = packet; | ||
2508 | dma_addr_t d_bus, page_bus; | 2627 | dma_addr_t d_bus, page_bus; |
2509 | u32 z, header_z, rest; | 2628 | u32 z, header_z, rest; |
2510 | int i, j, length; | 2629 | int i, j, length; |
@@ -2514,14 +2633,14 @@ static int ohci_queue_iso_receive_packet_per_buffer(struct fw_iso_context *base, | |||
2514 | * The OHCI controller puts the isochronous header and trailer in the | 2633 | * The OHCI controller puts the isochronous header and trailer in the |
2515 | * buffer, so we need at least 8 bytes. | 2634 | * buffer, so we need at least 8 bytes. |
2516 | */ | 2635 | */ |
2517 | packet_count = p->header_length / ctx->base.header_size; | 2636 | packet_count = packet->header_length / ctx->base.header_size; |
2518 | header_size = max(ctx->base.header_size, (size_t)8); | 2637 | header_size = max(ctx->base.header_size, (size_t)8); |
2519 | 2638 | ||
2520 | /* Get header size in number of descriptors. */ | 2639 | /* Get header size in number of descriptors. */ |
2521 | header_z = DIV_ROUND_UP(header_size, sizeof(*d)); | 2640 | header_z = DIV_ROUND_UP(header_size, sizeof(*d)); |
2522 | page = payload >> PAGE_SHIFT; | 2641 | page = payload >> PAGE_SHIFT; |
2523 | offset = payload & ~PAGE_MASK; | 2642 | offset = payload & ~PAGE_MASK; |
2524 | payload_per_buffer = p->payload_length / packet_count; | 2643 | payload_per_buffer = packet->payload_length / packet_count; |
2525 | 2644 | ||
2526 | for (i = 0; i < packet_count; i++) { | 2645 | for (i = 0; i < packet_count; i++) { |
2527 | /* d points to the header descriptor */ | 2646 | /* d points to the header descriptor */ |
@@ -2533,7 +2652,7 @@ static int ohci_queue_iso_receive_packet_per_buffer(struct fw_iso_context *base, | |||
2533 | 2652 | ||
2534 | d->control = cpu_to_le16(DESCRIPTOR_STATUS | | 2653 | d->control = cpu_to_le16(DESCRIPTOR_STATUS | |
2535 | DESCRIPTOR_INPUT_MORE); | 2654 | DESCRIPTOR_INPUT_MORE); |
2536 | if (p->skip && i == 0) | 2655 | if (packet->skip && i == 0) |
2537 | d->control |= cpu_to_le16(DESCRIPTOR_WAIT); | 2656 | d->control |= cpu_to_le16(DESCRIPTOR_WAIT); |
2538 | d->req_count = cpu_to_le16(header_size); | 2657 | d->req_count = cpu_to_le16(header_size); |
2539 | d->res_count = d->req_count; | 2658 | d->res_count = d->req_count; |
@@ -2566,7 +2685,7 @@ static int ohci_queue_iso_receive_packet_per_buffer(struct fw_iso_context *base, | |||
2566 | pd->control = cpu_to_le16(DESCRIPTOR_STATUS | | 2685 | pd->control = cpu_to_le16(DESCRIPTOR_STATUS | |
2567 | DESCRIPTOR_INPUT_LAST | | 2686 | DESCRIPTOR_INPUT_LAST | |
2568 | DESCRIPTOR_BRANCH_ALWAYS); | 2687 | DESCRIPTOR_BRANCH_ALWAYS); |
2569 | if (p->interrupt && i == packet_count - 1) | 2688 | if (packet->interrupt && i == packet_count - 1) |
2570 | pd->control |= cpu_to_le16(DESCRIPTOR_IRQ_ALWAYS); | 2689 | pd->control |= cpu_to_le16(DESCRIPTOR_IRQ_ALWAYS); |
2571 | 2690 | ||
2572 | context_append(&ctx->context, d, z, header_z); | 2691 | context_append(&ctx->context, d, z, header_z); |
@@ -2575,6 +2694,58 @@ static int ohci_queue_iso_receive_packet_per_buffer(struct fw_iso_context *base, | |||
2575 | return 0; | 2694 | return 0; |
2576 | } | 2695 | } |
2577 | 2696 | ||
2697 | static int queue_iso_buffer_fill(struct iso_context *ctx, | ||
2698 | struct fw_iso_packet *packet, | ||
2699 | struct fw_iso_buffer *buffer, | ||
2700 | unsigned long payload) | ||
2701 | { | ||
2702 | struct descriptor *d; | ||
2703 | dma_addr_t d_bus, page_bus; | ||
2704 | int page, offset, rest, z, i, length; | ||
2705 | |||
2706 | page = payload >> PAGE_SHIFT; | ||
2707 | offset = payload & ~PAGE_MASK; | ||
2708 | rest = packet->payload_length; | ||
2709 | |||
2710 | /* We need one descriptor for each page in the buffer. */ | ||
2711 | z = DIV_ROUND_UP(offset + rest, PAGE_SIZE); | ||
2712 | |||
2713 | if (WARN_ON(offset & 3 || rest & 3 || page + z > buffer->page_count)) | ||
2714 | return -EFAULT; | ||
2715 | |||
2716 | for (i = 0; i < z; i++) { | ||
2717 | d = context_get_descriptors(&ctx->context, 1, &d_bus); | ||
2718 | if (d == NULL) | ||
2719 | return -ENOMEM; | ||
2720 | |||
2721 | d->control = cpu_to_le16(DESCRIPTOR_INPUT_MORE | | ||
2722 | DESCRIPTOR_BRANCH_ALWAYS); | ||
2723 | if (packet->skip && i == 0) | ||
2724 | d->control |= cpu_to_le16(DESCRIPTOR_WAIT); | ||
2725 | if (packet->interrupt && i == z - 1) | ||
2726 | d->control |= cpu_to_le16(DESCRIPTOR_IRQ_ALWAYS); | ||
2727 | |||
2728 | if (offset + rest < PAGE_SIZE) | ||
2729 | length = rest; | ||
2730 | else | ||
2731 | length = PAGE_SIZE - offset; | ||
2732 | d->req_count = cpu_to_le16(length); | ||
2733 | d->res_count = d->req_count; | ||
2734 | d->transfer_status = 0; | ||
2735 | |||
2736 | page_bus = page_private(buffer->pages[page]); | ||
2737 | d->data_address = cpu_to_le32(page_bus + offset); | ||
2738 | |||
2739 | rest -= length; | ||
2740 | offset = 0; | ||
2741 | page++; | ||
2742 | |||
2743 | context_append(&ctx->context, d, 1, 0); | ||
2744 | } | ||
2745 | |||
2746 | return 0; | ||
2747 | } | ||
2748 | |||
2578 | static int ohci_queue_iso(struct fw_iso_context *base, | 2749 | static int ohci_queue_iso(struct fw_iso_context *base, |
2579 | struct fw_iso_packet *packet, | 2750 | struct fw_iso_packet *packet, |
2580 | struct fw_iso_buffer *buffer, | 2751 | struct fw_iso_buffer *buffer, |
@@ -2582,14 +2753,20 @@ static int ohci_queue_iso(struct fw_iso_context *base, | |||
2582 | { | 2753 | { |
2583 | struct iso_context *ctx = container_of(base, struct iso_context, base); | 2754 | struct iso_context *ctx = container_of(base, struct iso_context, base); |
2584 | unsigned long flags; | 2755 | unsigned long flags; |
2585 | int ret; | 2756 | int ret = -ENOSYS; |
2586 | 2757 | ||
2587 | spin_lock_irqsave(&ctx->context.ohci->lock, flags); | 2758 | spin_lock_irqsave(&ctx->context.ohci->lock, flags); |
2588 | if (base->type == FW_ISO_CONTEXT_TRANSMIT) | 2759 | switch (base->type) { |
2589 | ret = ohci_queue_iso_transmit(base, packet, buffer, payload); | 2760 | case FW_ISO_CONTEXT_TRANSMIT: |
2590 | else | 2761 | ret = queue_iso_transmit(ctx, packet, buffer, payload); |
2591 | ret = ohci_queue_iso_receive_packet_per_buffer(base, packet, | 2762 | break; |
2592 | buffer, payload); | 2763 | case FW_ISO_CONTEXT_RECEIVE: |
2764 | ret = queue_iso_packet_per_buffer(ctx, packet, buffer, payload); | ||
2765 | break; | ||
2766 | case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL: | ||
2767 | ret = queue_iso_buffer_fill(ctx, packet, buffer, payload); | ||
2768 | break; | ||
2769 | } | ||
2593 | spin_unlock_irqrestore(&ctx->context.ohci->lock, flags); | 2770 | spin_unlock_irqrestore(&ctx->context.ohci->lock, flags); |
2594 | 2771 | ||
2595 | return ret; | 2772 | return ret; |
@@ -2609,6 +2786,7 @@ static const struct fw_card_driver ohci_driver = { | |||
2609 | 2786 | ||
2610 | .allocate_iso_context = ohci_allocate_iso_context, | 2787 | .allocate_iso_context = ohci_allocate_iso_context, |
2611 | .free_iso_context = ohci_free_iso_context, | 2788 | .free_iso_context = ohci_free_iso_context, |
2789 | .set_iso_channels = ohci_set_iso_channels, | ||
2612 | .queue_iso = ohci_queue_iso, | 2790 | .queue_iso = ohci_queue_iso, |
2613 | .start_iso = ohci_start_iso, | 2791 | .start_iso = ohci_start_iso, |
2614 | .stop_iso = ohci_stop_iso, | 2792 | .stop_iso = ohci_stop_iso, |