diff options
author | Jay Fenlason, Stefan Richter <stefanr@s5r6.in-berlin.de> | 2009-01-04 10:23:29 -0500 |
---|---|---|
committer | Stefan Richter <stefanr@s5r6.in-berlin.de> | 2009-03-24 15:56:43 -0400 |
commit | b1bda4cdc2037447bd66753bf5ccab66d91b0b59 (patch) | |
tree | 6aae47fb85125c15150d6d306354de5deb1e316f /drivers | |
parent | b769bd17656f991c5588c676376e5ec77d25997a (diff) |
firewire: cdev: add ioctls for isochronous resource management
Based on
Date: Tue, 18 Nov 2008 11:41:27 -0500
From: Jay Fenlason <fenlason@redhat.com>
Subject: [Patch V4] Add ISO resource management support
with several changes to the ABI and implementation. Only the part of
the ABI which enables auto-reallocation and auto-deallocation is
included here.
This implements ioctls for kernel-assisted allocation of isochronous
channels and isochronous bandwidth. The benefits are:
- The client does not have to have write access to the /dev/fw* device
corresponding to the IRM.
- The client does not have to perform reallocation after bus resets.
- Channel and bandwidth are deallocated by the kernel if the file is
closed before the client deallocated the resources. Thus resources
are released even if the client crashes.
It is anticipated that future in-kernel code (firewire-core IRM code;
the firewire port of firedtv), will use the fw-iso.c portions of this
code too.
Signed-off-by: Stefan Richter <stefanr@s5r6.in-berlin.de>
Tested-by: David Moore <dcm@acm.org>
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/firewire/fw-cdev.c | 215 | ||||
-rw-r--r-- | drivers/firewire/fw-iso.c | 176 | ||||
-rw-r--r-- | drivers/firewire/fw-transaction.h | 4 |
3 files changed, 388 insertions, 7 deletions
diff --git a/drivers/firewire/fw-cdev.c b/drivers/firewire/fw-cdev.c index 4c33b51b735a..a227853aa1e2 100644 --- a/drivers/firewire/fw-cdev.c +++ b/drivers/firewire/fw-cdev.c | |||
@@ -24,6 +24,7 @@ | |||
24 | #include <linux/errno.h> | 24 | #include <linux/errno.h> |
25 | #include <linux/firewire-cdev.h> | 25 | #include <linux/firewire-cdev.h> |
26 | #include <linux/idr.h> | 26 | #include <linux/idr.h> |
27 | #include <linux/jiffies.h> | ||
27 | #include <linux/kernel.h> | 28 | #include <linux/kernel.h> |
28 | #include <linux/kref.h> | 29 | #include <linux/kref.h> |
29 | #include <linux/mm.h> | 30 | #include <linux/mm.h> |
@@ -35,6 +36,7 @@ | |||
35 | #include <linux/time.h> | 36 | #include <linux/time.h> |
36 | #include <linux/vmalloc.h> | 37 | #include <linux/vmalloc.h> |
37 | #include <linux/wait.h> | 38 | #include <linux/wait.h> |
39 | #include <linux/workqueue.h> | ||
38 | 40 | ||
39 | #include <asm/system.h> | 41 | #include <asm/system.h> |
40 | #include <asm/uaccess.h> | 42 | #include <asm/uaccess.h> |
@@ -114,6 +116,21 @@ struct descriptor_resource { | |||
114 | u32 data[0]; | 116 | u32 data[0]; |
115 | }; | 117 | }; |
116 | 118 | ||
119 | struct iso_resource { | ||
120 | struct client_resource resource; | ||
121 | struct client *client; | ||
122 | /* Schedule work and access todo only with client->lock held. */ | ||
123 | struct delayed_work work; | ||
124 | enum {ISO_RES_ALLOC, ISO_RES_REALLOC, ISO_RES_DEALLOC,} todo; | ||
125 | int generation; | ||
126 | u64 channels; | ||
127 | s32 bandwidth; | ||
128 | struct iso_resource_event *e_alloc, *e_dealloc; | ||
129 | }; | ||
130 | |||
131 | static void schedule_iso_resource(struct iso_resource *); | ||
132 | static void release_iso_resource(struct client *, struct client_resource *); | ||
133 | |||
117 | /* | 134 | /* |
118 | * dequeue_event() just kfree()'s the event, so the event has to be | 135 | * dequeue_event() just kfree()'s the event, so the event has to be |
119 | * the first field in a struct XYZ_event. | 136 | * the first field in a struct XYZ_event. |
@@ -145,6 +162,11 @@ struct iso_interrupt_event { | |||
145 | struct fw_cdev_event_iso_interrupt interrupt; | 162 | struct fw_cdev_event_iso_interrupt interrupt; |
146 | }; | 163 | }; |
147 | 164 | ||
165 | struct iso_resource_event { | ||
166 | struct event event; | ||
167 | struct fw_cdev_event_iso_resource resource; | ||
168 | }; | ||
169 | |||
148 | static inline void __user *u64_to_uptr(__u64 value) | 170 | static inline void __user *u64_to_uptr(__u64 value) |
149 | { | 171 | { |
150 | return (void __user *)(unsigned long)value; | 172 | return (void __user *)(unsigned long)value; |
@@ -290,6 +312,16 @@ static void for_each_client(struct fw_device *device, | |||
290 | mutex_unlock(&device->client_list_mutex); | 312 | mutex_unlock(&device->client_list_mutex); |
291 | } | 313 | } |
292 | 314 | ||
315 | static int schedule_reallocations(int id, void *p, void *data) | ||
316 | { | ||
317 | struct client_resource *r = p; | ||
318 | |||
319 | if (r->release == release_iso_resource) | ||
320 | schedule_iso_resource(container_of(r, | ||
321 | struct iso_resource, resource)); | ||
322 | return 0; | ||
323 | } | ||
324 | |||
293 | static void queue_bus_reset_event(struct client *client) | 325 | static void queue_bus_reset_event(struct client *client) |
294 | { | 326 | { |
295 | struct bus_reset_event *e; | 327 | struct bus_reset_event *e; |
@@ -304,6 +336,10 @@ static void queue_bus_reset_event(struct client *client) | |||
304 | 336 | ||
305 | queue_event(client, &e->event, | 337 | queue_event(client, &e->event, |
306 | &e->reset, sizeof(e->reset), NULL, 0); | 338 | &e->reset, sizeof(e->reset), NULL, 0); |
339 | |||
340 | spin_lock_irq(&client->lock); | ||
341 | idr_for_each(&client->resource_idr, schedule_reallocations, client); | ||
342 | spin_unlock_irq(&client->lock); | ||
307 | } | 343 | } |
308 | 344 | ||
309 | void fw_device_cdev_update(struct fw_device *device) | 345 | void fw_device_cdev_update(struct fw_device *device) |
@@ -376,8 +412,12 @@ static int add_client_resource(struct client *client, | |||
376 | else | 412 | else |
377 | ret = idr_get_new(&client->resource_idr, resource, | 413 | ret = idr_get_new(&client->resource_idr, resource, |
378 | &resource->handle); | 414 | &resource->handle); |
379 | if (ret >= 0) | 415 | if (ret >= 0) { |
380 | client_get(client); | 416 | client_get(client); |
417 | if (resource->release == release_iso_resource) | ||
418 | schedule_iso_resource(container_of(resource, | ||
419 | struct iso_resource, resource)); | ||
420 | } | ||
381 | spin_unlock_irqrestore(&client->lock, flags); | 421 | spin_unlock_irqrestore(&client->lock, flags); |
382 | 422 | ||
383 | if (ret == -EAGAIN) | 423 | if (ret == -EAGAIN) |
@@ -970,6 +1010,177 @@ static int ioctl_get_cycle_timer(struct client *client, void *buffer) | |||
970 | return 0; | 1010 | return 0; |
971 | } | 1011 | } |
972 | 1012 | ||
1013 | static void iso_resource_work(struct work_struct *work) | ||
1014 | { | ||
1015 | struct iso_resource_event *e; | ||
1016 | struct iso_resource *r = | ||
1017 | container_of(work, struct iso_resource, work.work); | ||
1018 | struct client *client = r->client; | ||
1019 | int generation, channel, bandwidth, todo; | ||
1020 | bool skip, free, success; | ||
1021 | |||
1022 | spin_lock_irq(&client->lock); | ||
1023 | generation = client->device->generation; | ||
1024 | todo = r->todo; | ||
1025 | /* Allow 1000ms grace period for other reallocations. */ | ||
1026 | if (todo == ISO_RES_ALLOC && | ||
1027 | time_is_after_jiffies(client->device->card->reset_jiffies + HZ)) { | ||
1028 | if (schedule_delayed_work(&r->work, DIV_ROUND_UP(HZ, 3))) | ||
1029 | client_get(client); | ||
1030 | skip = true; | ||
1031 | } else { | ||
1032 | /* We could be called twice within the same generation. */ | ||
1033 | skip = todo == ISO_RES_REALLOC && | ||
1034 | r->generation == generation; | ||
1035 | } | ||
1036 | free = todo == ISO_RES_DEALLOC; | ||
1037 | r->generation = generation; | ||
1038 | spin_unlock_irq(&client->lock); | ||
1039 | |||
1040 | if (skip) | ||
1041 | goto out; | ||
1042 | |||
1043 | bandwidth = r->bandwidth; | ||
1044 | |||
1045 | fw_iso_resource_manage(client->device->card, generation, | ||
1046 | r->channels, &channel, &bandwidth, | ||
1047 | todo == ISO_RES_ALLOC || todo == ISO_RES_REALLOC); | ||
1048 | /* | ||
1049 | * Is this generation outdated already? As long as this resource sticks | ||
1050 | * in the idr, it will be scheduled again for a newer generation or at | ||
1051 | * shutdown. | ||
1052 | */ | ||
1053 | if (channel == -EAGAIN && | ||
1054 | (todo == ISO_RES_ALLOC || todo == ISO_RES_REALLOC)) | ||
1055 | goto out; | ||
1056 | |||
1057 | success = channel >= 0 || bandwidth > 0; | ||
1058 | |||
1059 | spin_lock_irq(&client->lock); | ||
1060 | /* | ||
1061 | * Transit from allocation to reallocation, except if the client | ||
1062 | * requested deallocation in the meantime. | ||
1063 | */ | ||
1064 | if (r->todo == ISO_RES_ALLOC) | ||
1065 | r->todo = ISO_RES_REALLOC; | ||
1066 | /* | ||
1067 | * Allocation or reallocation failure? Pull this resource out of the | ||
1068 | * idr and prepare for deletion, unless the client is shutting down. | ||
1069 | */ | ||
1070 | if (r->todo == ISO_RES_REALLOC && !success && | ||
1071 | !client->in_shutdown && | ||
1072 | idr_find(&client->resource_idr, r->resource.handle)) { | ||
1073 | idr_remove(&client->resource_idr, r->resource.handle); | ||
1074 | client_put(client); | ||
1075 | free = true; | ||
1076 | } | ||
1077 | spin_unlock_irq(&client->lock); | ||
1078 | |||
1079 | if (todo == ISO_RES_ALLOC && channel >= 0) | ||
1080 | r->channels = 1ULL << (63 - channel); | ||
1081 | |||
1082 | if (todo == ISO_RES_REALLOC && success) | ||
1083 | goto out; | ||
1084 | |||
1085 | if (todo == ISO_RES_ALLOC) { | ||
1086 | e = r->e_alloc; | ||
1087 | r->e_alloc = NULL; | ||
1088 | } else { | ||
1089 | e = r->e_dealloc; | ||
1090 | r->e_dealloc = NULL; | ||
1091 | } | ||
1092 | e->resource.handle = r->resource.handle; | ||
1093 | e->resource.channel = channel; | ||
1094 | e->resource.bandwidth = bandwidth; | ||
1095 | |||
1096 | queue_event(client, &e->event, | ||
1097 | &e->resource, sizeof(e->resource), NULL, 0); | ||
1098 | |||
1099 | if (free) { | ||
1100 | cancel_delayed_work(&r->work); | ||
1101 | kfree(r->e_alloc); | ||
1102 | kfree(r->e_dealloc); | ||
1103 | kfree(r); | ||
1104 | } | ||
1105 | out: | ||
1106 | client_put(client); | ||
1107 | } | ||
1108 | |||
1109 | static void schedule_iso_resource(struct iso_resource *r) | ||
1110 | { | ||
1111 | if (schedule_delayed_work(&r->work, 0)) | ||
1112 | client_get(r->client); | ||
1113 | } | ||
1114 | |||
1115 | static void release_iso_resource(struct client *client, | ||
1116 | struct client_resource *resource) | ||
1117 | { | ||
1118 | struct iso_resource *r = | ||
1119 | container_of(resource, struct iso_resource, resource); | ||
1120 | |||
1121 | spin_lock_irq(&client->lock); | ||
1122 | r->todo = ISO_RES_DEALLOC; | ||
1123 | schedule_iso_resource(r); | ||
1124 | spin_unlock_irq(&client->lock); | ||
1125 | } | ||
1126 | |||
1127 | static int ioctl_allocate_iso_resource(struct client *client, void *buffer) | ||
1128 | { | ||
1129 | struct fw_cdev_allocate_iso_resource *request = buffer; | ||
1130 | struct iso_resource_event *e1, *e2; | ||
1131 | struct iso_resource *r; | ||
1132 | int ret; | ||
1133 | |||
1134 | if ((request->channels == 0 && request->bandwidth == 0) || | ||
1135 | request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL || | ||
1136 | request->bandwidth < 0) | ||
1137 | return -EINVAL; | ||
1138 | |||
1139 | r = kmalloc(sizeof(*r), GFP_KERNEL); | ||
1140 | e1 = kmalloc(sizeof(*e1), GFP_KERNEL); | ||
1141 | e2 = kmalloc(sizeof(*e2), GFP_KERNEL); | ||
1142 | if (r == NULL || e1 == NULL || e2 == NULL) { | ||
1143 | ret = -ENOMEM; | ||
1144 | goto fail; | ||
1145 | } | ||
1146 | |||
1147 | INIT_DELAYED_WORK(&r->work, iso_resource_work); | ||
1148 | r->client = client; | ||
1149 | r->todo = ISO_RES_ALLOC; | ||
1150 | r->generation = -1; | ||
1151 | r->channels = request->channels; | ||
1152 | r->bandwidth = request->bandwidth; | ||
1153 | r->e_alloc = e1; | ||
1154 | r->e_dealloc = e2; | ||
1155 | |||
1156 | e1->resource.closure = request->closure; | ||
1157 | e1->resource.type = FW_CDEV_EVENT_ISO_RESOURCE_ALLOCATED; | ||
1158 | e2->resource.closure = request->closure; | ||
1159 | e2->resource.type = FW_CDEV_EVENT_ISO_RESOURCE_DEALLOCATED; | ||
1160 | |||
1161 | r->resource.release = release_iso_resource; | ||
1162 | ret = add_client_resource(client, &r->resource, GFP_KERNEL); | ||
1163 | if (ret < 0) | ||
1164 | goto fail; | ||
1165 | request->handle = r->resource.handle; | ||
1166 | |||
1167 | return 0; | ||
1168 | fail: | ||
1169 | kfree(r); | ||
1170 | kfree(e1); | ||
1171 | kfree(e2); | ||
1172 | |||
1173 | return ret; | ||
1174 | } | ||
1175 | |||
1176 | static int ioctl_deallocate_iso_resource(struct client *client, void *buffer) | ||
1177 | { | ||
1178 | struct fw_cdev_deallocate *request = buffer; | ||
1179 | |||
1180 | return release_client_resource(client, request->handle, | ||
1181 | release_iso_resource, NULL); | ||
1182 | } | ||
1183 | |||
973 | static int (* const ioctl_handlers[])(struct client *client, void *buffer) = { | 1184 | static int (* const ioctl_handlers[])(struct client *client, void *buffer) = { |
974 | ioctl_get_info, | 1185 | ioctl_get_info, |
975 | ioctl_send_request, | 1186 | ioctl_send_request, |
@@ -984,6 +1195,8 @@ static int (* const ioctl_handlers[])(struct client *client, void *buffer) = { | |||
984 | ioctl_start_iso, | 1195 | ioctl_start_iso, |
985 | ioctl_stop_iso, | 1196 | ioctl_stop_iso, |
986 | ioctl_get_cycle_timer, | 1197 | ioctl_get_cycle_timer, |
1198 | ioctl_allocate_iso_resource, | ||
1199 | ioctl_deallocate_iso_resource, | ||
987 | }; | 1200 | }; |
988 | 1201 | ||
989 | static int dispatch_ioctl(struct client *client, | 1202 | static int dispatch_ioctl(struct client *client, |
diff --git a/drivers/firewire/fw-iso.c b/drivers/firewire/fw-iso.c index 39f3bacee404..a7b57b253b06 100644 --- a/drivers/firewire/fw-iso.c +++ b/drivers/firewire/fw-iso.c | |||
@@ -1,5 +1,7 @@ | |||
1 | /* | 1 | /* |
2 | * Isochronous IO functionality | 2 | * Isochronous I/O functionality: |
3 | * - Isochronous DMA context management | ||
4 | * - Isochronous bus resource management (channels, bandwidth), client side | ||
3 | * | 5 | * |
4 | * Copyright (C) 2006 Kristian Hoegsberg <krh@bitplanet.net> | 6 | * Copyright (C) 2006 Kristian Hoegsberg <krh@bitplanet.net> |
5 | * | 7 | * |
@@ -18,15 +20,20 @@ | |||
18 | * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | 20 | * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. |
19 | */ | 21 | */ |
20 | 22 | ||
21 | #include <linux/kernel.h> | ||
22 | #include <linux/module.h> | ||
23 | #include <linux/dma-mapping.h> | 23 | #include <linux/dma-mapping.h> |
24 | #include <linux/vmalloc.h> | 24 | #include <linux/errno.h> |
25 | #include <linux/firewire-constants.h> | ||
26 | #include <linux/kernel.h> | ||
25 | #include <linux/mm.h> | 27 | #include <linux/mm.h> |
28 | #include <linux/spinlock.h> | ||
29 | #include <linux/vmalloc.h> | ||
26 | 30 | ||
27 | #include "fw-transaction.h" | ||
28 | #include "fw-topology.h" | 31 | #include "fw-topology.h" |
29 | #include "fw-device.h" | 32 | #include "fw-transaction.h" |
33 | |||
34 | /* | ||
35 | * Isochronous DMA context management | ||
36 | */ | ||
30 | 37 | ||
31 | int fw_iso_buffer_init(struct fw_iso_buffer *buffer, struct fw_card *card, | 38 | int fw_iso_buffer_init(struct fw_iso_buffer *buffer, struct fw_card *card, |
32 | int page_count, enum dma_data_direction direction) | 39 | int page_count, enum dma_data_direction direction) |
@@ -153,3 +160,160 @@ int fw_iso_context_stop(struct fw_iso_context *ctx) | |||
153 | { | 160 | { |
154 | return ctx->card->driver->stop_iso(ctx); | 161 | return ctx->card->driver->stop_iso(ctx); |
155 | } | 162 | } |
163 | |||
164 | /* | ||
165 | * Isochronous bus resource management (channels, bandwidth), client side | ||
166 | */ | ||
167 | |||
168 | static int manage_bandwidth(struct fw_card *card, int irm_id, int generation, | ||
169 | int bandwidth, bool allocate) | ||
170 | { | ||
171 | __be32 data[2]; | ||
172 | int try, new, old = allocate ? BANDWIDTH_AVAILABLE_INITIAL : 0; | ||
173 | |||
174 | /* | ||
175 | * On a 1394a IRM with low contention, try < 1 is enough. | ||
176 | * On a 1394-1995 IRM, we need at least try < 2. | ||
177 | * Let's just do try < 5. | ||
178 | */ | ||
179 | for (try = 0; try < 5; try++) { | ||
180 | new = allocate ? old - bandwidth : old + bandwidth; | ||
181 | if (new < 0 || new > BANDWIDTH_AVAILABLE_INITIAL) | ||
182 | break; | ||
183 | |||
184 | data[0] = cpu_to_be32(old); | ||
185 | data[1] = cpu_to_be32(new); | ||
186 | switch (fw_run_transaction(card, TCODE_LOCK_COMPARE_SWAP, | ||
187 | irm_id, generation, SCODE_100, | ||
188 | CSR_REGISTER_BASE + CSR_BANDWIDTH_AVAILABLE, | ||
189 | data, sizeof(data))) { | ||
190 | case RCODE_GENERATION: | ||
191 | /* A generation change frees all bandwidth. */ | ||
192 | return allocate ? -EAGAIN : bandwidth; | ||
193 | |||
194 | case RCODE_COMPLETE: | ||
195 | if (be32_to_cpup(data) == old) | ||
196 | return bandwidth; | ||
197 | |||
198 | old = be32_to_cpup(data); | ||
199 | /* Fall through. */ | ||
200 | } | ||
201 | } | ||
202 | |||
203 | return -EIO; | ||
204 | } | ||
205 | |||
206 | static int manage_channel(struct fw_card *card, int irm_id, int generation, | ||
207 | __be32 channels_mask, u64 offset, bool allocate) | ||
208 | { | ||
209 | __be32 data[2], c, old = allocate ? cpu_to_be32(~0) : 0; | ||
210 | int i, retry = 5; | ||
211 | |||
212 | for (i = 0; i < 32; i++) { | ||
213 | c = cpu_to_be32(1 << (31 - i)); | ||
214 | if (!(channels_mask & c)) | ||
215 | continue; | ||
216 | |||
217 | if (allocate == !(old & c)) | ||
218 | continue; | ||
219 | |||
220 | data[0] = old; | ||
221 | data[1] = old ^ c; | ||
222 | switch (fw_run_transaction(card, TCODE_LOCK_COMPARE_SWAP, | ||
223 | irm_id, generation, SCODE_100, | ||
224 | offset, data, sizeof(data))) { | ||
225 | case RCODE_GENERATION: | ||
226 | /* A generation change frees all channels. */ | ||
227 | return allocate ? -EAGAIN : i; | ||
228 | |||
229 | case RCODE_COMPLETE: | ||
230 | if (data[0] == old) | ||
231 | return i; | ||
232 | |||
233 | old = data[0]; | ||
234 | |||
235 | /* Is the IRM 1394a-2000 compliant? */ | ||
236 | if ((data[0] & c) != (data[1] & c)) | ||
237 | continue; | ||
238 | |||
239 | /* 1394-1995 IRM, fall through to retry. */ | ||
240 | default: | ||
241 | if (retry--) | ||
242 | i--; | ||
243 | } | ||
244 | } | ||
245 | |||
246 | return -EIO; | ||
247 | } | ||
248 | |||
249 | static void deallocate_channel(struct fw_card *card, int irm_id, | ||
250 | int generation, int channel) | ||
251 | { | ||
252 | __be32 mask; | ||
253 | u64 offset; | ||
254 | |||
255 | mask = channel < 32 ? cpu_to_be32(1 << (31 - channel)) : | ||
256 | cpu_to_be32(1 << (63 - channel)); | ||
257 | offset = channel < 32 ? CSR_REGISTER_BASE + CSR_CHANNELS_AVAILABLE_HI : | ||
258 | CSR_REGISTER_BASE + CSR_CHANNELS_AVAILABLE_LO; | ||
259 | |||
260 | manage_channel(card, irm_id, generation, mask, offset, false); | ||
261 | } | ||
262 | |||
263 | /** | ||
264 | * fw_iso_resource_manage - Allocate or deallocate a channel and/or bandwidth | ||
265 | * | ||
266 | * In parameters: card, generation, channels_mask, bandwidth, allocate | ||
267 | * Out parameters: channel, bandwidth | ||
268 | * This function blocks (sleeps) during communication with the IRM. | ||
269 | * Allocates or deallocates at most one channel out of channels_mask. | ||
270 | * | ||
271 | * Returns channel < 0 if no channel was allocated or deallocated. | ||
272 | * Returns bandwidth = 0 if no bandwidth was allocated or deallocated. | ||
273 | * | ||
274 | * If generation is stale, deallocations succeed but allocations fail with | ||
275 | * channel = -EAGAIN. | ||
276 | * | ||
277 | * If channel (de)allocation fails, bandwidth (de)allocation fails too. | ||
278 | * If bandwidth allocation fails, no channel will be allocated either. | ||
279 | * If bandwidth deallocation fails, channel deallocation may still have been | ||
280 | * successful. | ||
281 | */ | ||
282 | void fw_iso_resource_manage(struct fw_card *card, int generation, | ||
283 | u64 channels_mask, int *channel, int *bandwidth, | ||
284 | bool allocate) | ||
285 | { | ||
286 | __be32 channels_hi = cpu_to_be32(channels_mask >> 32); | ||
287 | __be32 channels_lo = cpu_to_be32(channels_mask); | ||
288 | int irm_id, ret, c = -EINVAL; | ||
289 | |||
290 | spin_lock_irq(&card->lock); | ||
291 | irm_id = card->irm_node->node_id; | ||
292 | spin_unlock_irq(&card->lock); | ||
293 | |||
294 | if (channels_hi) | ||
295 | c = manage_channel(card, irm_id, generation, channels_hi, | ||
296 | CSR_REGISTER_BASE + CSR_CHANNELS_AVAILABLE_HI, allocate); | ||
297 | if (channels_lo && c < 0) { | ||
298 | c = manage_channel(card, irm_id, generation, channels_lo, | ||
299 | CSR_REGISTER_BASE + CSR_CHANNELS_AVAILABLE_LO, allocate); | ||
300 | if (c >= 0) | ||
301 | c += 32; | ||
302 | } | ||
303 | *channel = c; | ||
304 | |||
305 | if (channels_mask != 0 && c < 0) | ||
306 | *bandwidth = 0; | ||
307 | |||
308 | if (*bandwidth == 0) | ||
309 | return; | ||
310 | |||
311 | ret = manage_bandwidth(card, irm_id, generation, *bandwidth, allocate); | ||
312 | if (ret < 0) | ||
313 | *bandwidth = 0; | ||
314 | |||
315 | if (ret < 0 && c >= 0 && allocate) { | ||
316 | deallocate_channel(card, irm_id, generation, c); | ||
317 | *channel = ret; | ||
318 | } | ||
319 | } | ||
diff --git a/drivers/firewire/fw-transaction.h b/drivers/firewire/fw-transaction.h index 48e88d53998b..212a10293828 100644 --- a/drivers/firewire/fw-transaction.h +++ b/drivers/firewire/fw-transaction.h | |||
@@ -82,6 +82,7 @@ | |||
82 | #define CSR_SPEED_MAP 0x2000 | 82 | #define CSR_SPEED_MAP 0x2000 |
83 | #define CSR_SPEED_MAP_END 0x3000 | 83 | #define CSR_SPEED_MAP_END 0x3000 |
84 | 84 | ||
85 | #define BANDWIDTH_AVAILABLE_INITIAL 4915 | ||
85 | #define BROADCAST_CHANNEL_INITIAL (1 << 31 | 31) | 86 | #define BROADCAST_CHANNEL_INITIAL (1 << 31 | 31) |
86 | #define BROADCAST_CHANNEL_VALID (1 << 30) | 87 | #define BROADCAST_CHANNEL_VALID (1 << 30) |
87 | 88 | ||
@@ -343,6 +344,9 @@ int fw_iso_context_start(struct fw_iso_context *ctx, | |||
343 | int fw_iso_context_stop(struct fw_iso_context *ctx); | 344 | int fw_iso_context_stop(struct fw_iso_context *ctx); |
344 | void fw_iso_context_destroy(struct fw_iso_context *ctx); | 345 | void fw_iso_context_destroy(struct fw_iso_context *ctx); |
345 | 346 | ||
347 | void fw_iso_resource_manage(struct fw_card *card, int generation, | ||
348 | u64 channels_mask, int *channel, int *bandwidth, bool allocate); | ||
349 | |||
346 | struct fw_card_driver { | 350 | struct fw_card_driver { |
347 | /* | 351 | /* |
348 | * Enable the given card with the given initial config rom. | 352 | * Enable the given card with the given initial config rom. |