diff options
author | Jay Fenlason, Stefan Richter <stefanr@s5r6.in-berlin.de> | 2009-01-04 10:23:29 -0500 |
---|---|---|
committer | Stefan Richter <stefanr@s5r6.in-berlin.de> | 2009-03-24 15:56:43 -0400 |
commit | b1bda4cdc2037447bd66753bf5ccab66d91b0b59 (patch) | |
tree | 6aae47fb85125c15150d6d306354de5deb1e316f /drivers/firewire/fw-cdev.c | |
parent | b769bd17656f991c5588c676376e5ec77d25997a (diff) |
firewire: cdev: add ioctls for isochronous resource management
Based on
Date: Tue, 18 Nov 2008 11:41:27 -0500
From: Jay Fenlason <fenlason@redhat.com>
Subject: [Patch V4] Add ISO resource management support
with several changes to the ABI and implementation. Only the part of
the ABI which enables auto-reallocation and auto-deallocation is
included here.
This implements ioctls for kernel-assisted allocation of isochronous
channels and isochronous bandwidth. The benefits are:
- The client does not have to have write access to the /dev/fw* device
corresponding to the IRM.
- The client does not have to perform reallocation after bus resets.
- Channel and bandwidth are deallocated by the kernel if the file is
closed before the client deallocated the resources. Thus resources
are released even if the client crashes.
It is anticipated that future in-kernel code (firewire-core IRM code;
the firewire port of firedtv), will use the fw-iso.c portions of this
code too.
Signed-off-by: Stefan Richter <stefanr@s5r6.in-berlin.de>
Tested-by: David Moore <dcm@acm.org>
Diffstat (limited to 'drivers/firewire/fw-cdev.c')
-rw-r--r-- | drivers/firewire/fw-cdev.c | 215 |
1 files changed, 214 insertions, 1 deletions
diff --git a/drivers/firewire/fw-cdev.c b/drivers/firewire/fw-cdev.c index 4c33b51b735a..a227853aa1e2 100644 --- a/drivers/firewire/fw-cdev.c +++ b/drivers/firewire/fw-cdev.c | |||
@@ -24,6 +24,7 @@ | |||
24 | #include <linux/errno.h> | 24 | #include <linux/errno.h> |
25 | #include <linux/firewire-cdev.h> | 25 | #include <linux/firewire-cdev.h> |
26 | #include <linux/idr.h> | 26 | #include <linux/idr.h> |
27 | #include <linux/jiffies.h> | ||
27 | #include <linux/kernel.h> | 28 | #include <linux/kernel.h> |
28 | #include <linux/kref.h> | 29 | #include <linux/kref.h> |
29 | #include <linux/mm.h> | 30 | #include <linux/mm.h> |
@@ -35,6 +36,7 @@ | |||
35 | #include <linux/time.h> | 36 | #include <linux/time.h> |
36 | #include <linux/vmalloc.h> | 37 | #include <linux/vmalloc.h> |
37 | #include <linux/wait.h> | 38 | #include <linux/wait.h> |
39 | #include <linux/workqueue.h> | ||
38 | 40 | ||
39 | #include <asm/system.h> | 41 | #include <asm/system.h> |
40 | #include <asm/uaccess.h> | 42 | #include <asm/uaccess.h> |
@@ -114,6 +116,21 @@ struct descriptor_resource { | |||
114 | u32 data[0]; | 116 | u32 data[0]; |
115 | }; | 117 | }; |
116 | 118 | ||
119 | struct iso_resource { | ||
120 | struct client_resource resource; | ||
121 | struct client *client; | ||
122 | /* Schedule work and access todo only with client->lock held. */ | ||
123 | struct delayed_work work; | ||
124 | enum {ISO_RES_ALLOC, ISO_RES_REALLOC, ISO_RES_DEALLOC,} todo; | ||
125 | int generation; | ||
126 | u64 channels; | ||
127 | s32 bandwidth; | ||
128 | struct iso_resource_event *e_alloc, *e_dealloc; | ||
129 | }; | ||
130 | |||
131 | static void schedule_iso_resource(struct iso_resource *); | ||
132 | static void release_iso_resource(struct client *, struct client_resource *); | ||
133 | |||
117 | /* | 134 | /* |
118 | * dequeue_event() just kfree()'s the event, so the event has to be | 135 | * dequeue_event() just kfree()'s the event, so the event has to be |
119 | * the first field in a struct XYZ_event. | 136 | * the first field in a struct XYZ_event. |
@@ -145,6 +162,11 @@ struct iso_interrupt_event { | |||
145 | struct fw_cdev_event_iso_interrupt interrupt; | 162 | struct fw_cdev_event_iso_interrupt interrupt; |
146 | }; | 163 | }; |
147 | 164 | ||
165 | struct iso_resource_event { | ||
166 | struct event event; | ||
167 | struct fw_cdev_event_iso_resource resource; | ||
168 | }; | ||
169 | |||
148 | static inline void __user *u64_to_uptr(__u64 value) | 170 | static inline void __user *u64_to_uptr(__u64 value) |
149 | { | 171 | { |
150 | return (void __user *)(unsigned long)value; | 172 | return (void __user *)(unsigned long)value; |
@@ -290,6 +312,16 @@ static void for_each_client(struct fw_device *device, | |||
290 | mutex_unlock(&device->client_list_mutex); | 312 | mutex_unlock(&device->client_list_mutex); |
291 | } | 313 | } |
292 | 314 | ||
315 | static int schedule_reallocations(int id, void *p, void *data) | ||
316 | { | ||
317 | struct client_resource *r = p; | ||
318 | |||
319 | if (r->release == release_iso_resource) | ||
320 | schedule_iso_resource(container_of(r, | ||
321 | struct iso_resource, resource)); | ||
322 | return 0; | ||
323 | } | ||
324 | |||
293 | static void queue_bus_reset_event(struct client *client) | 325 | static void queue_bus_reset_event(struct client *client) |
294 | { | 326 | { |
295 | struct bus_reset_event *e; | 327 | struct bus_reset_event *e; |
@@ -304,6 +336,10 @@ static void queue_bus_reset_event(struct client *client) | |||
304 | 336 | ||
305 | queue_event(client, &e->event, | 337 | queue_event(client, &e->event, |
306 | &e->reset, sizeof(e->reset), NULL, 0); | 338 | &e->reset, sizeof(e->reset), NULL, 0); |
339 | |||
340 | spin_lock_irq(&client->lock); | ||
341 | idr_for_each(&client->resource_idr, schedule_reallocations, client); | ||
342 | spin_unlock_irq(&client->lock); | ||
307 | } | 343 | } |
308 | 344 | ||
309 | void fw_device_cdev_update(struct fw_device *device) | 345 | void fw_device_cdev_update(struct fw_device *device) |
@@ -376,8 +412,12 @@ static int add_client_resource(struct client *client, | |||
376 | else | 412 | else |
377 | ret = idr_get_new(&client->resource_idr, resource, | 413 | ret = idr_get_new(&client->resource_idr, resource, |
378 | &resource->handle); | 414 | &resource->handle); |
379 | if (ret >= 0) | 415 | if (ret >= 0) { |
380 | client_get(client); | 416 | client_get(client); |
417 | if (resource->release == release_iso_resource) | ||
418 | schedule_iso_resource(container_of(resource, | ||
419 | struct iso_resource, resource)); | ||
420 | } | ||
381 | spin_unlock_irqrestore(&client->lock, flags); | 421 | spin_unlock_irqrestore(&client->lock, flags); |
382 | 422 | ||
383 | if (ret == -EAGAIN) | 423 | if (ret == -EAGAIN) |
@@ -970,6 +1010,177 @@ static int ioctl_get_cycle_timer(struct client *client, void *buffer) | |||
970 | return 0; | 1010 | return 0; |
971 | } | 1011 | } |
972 | 1012 | ||
1013 | static void iso_resource_work(struct work_struct *work) | ||
1014 | { | ||
1015 | struct iso_resource_event *e; | ||
1016 | struct iso_resource *r = | ||
1017 | container_of(work, struct iso_resource, work.work); | ||
1018 | struct client *client = r->client; | ||
1019 | int generation, channel, bandwidth, todo; | ||
1020 | bool skip, free, success; | ||
1021 | |||
1022 | spin_lock_irq(&client->lock); | ||
1023 | generation = client->device->generation; | ||
1024 | todo = r->todo; | ||
1025 | /* Allow 1000ms grace period for other reallocations. */ | ||
1026 | if (todo == ISO_RES_ALLOC && | ||
1027 | time_is_after_jiffies(client->device->card->reset_jiffies + HZ)) { | ||
1028 | if (schedule_delayed_work(&r->work, DIV_ROUND_UP(HZ, 3))) | ||
1029 | client_get(client); | ||
1030 | skip = true; | ||
1031 | } else { | ||
1032 | /* We could be called twice within the same generation. */ | ||
1033 | skip = todo == ISO_RES_REALLOC && | ||
1034 | r->generation == generation; | ||
1035 | } | ||
1036 | free = todo == ISO_RES_DEALLOC; | ||
1037 | r->generation = generation; | ||
1038 | spin_unlock_irq(&client->lock); | ||
1039 | |||
1040 | if (skip) | ||
1041 | goto out; | ||
1042 | |||
1043 | bandwidth = r->bandwidth; | ||
1044 | |||
1045 | fw_iso_resource_manage(client->device->card, generation, | ||
1046 | r->channels, &channel, &bandwidth, | ||
1047 | todo == ISO_RES_ALLOC || todo == ISO_RES_REALLOC); | ||
1048 | /* | ||
1049 | * Is this generation outdated already? As long as this resource sticks | ||
1050 | * in the idr, it will be scheduled again for a newer generation or at | ||
1051 | * shutdown. | ||
1052 | */ | ||
1053 | if (channel == -EAGAIN && | ||
1054 | (todo == ISO_RES_ALLOC || todo == ISO_RES_REALLOC)) | ||
1055 | goto out; | ||
1056 | |||
1057 | success = channel >= 0 || bandwidth > 0; | ||
1058 | |||
1059 | spin_lock_irq(&client->lock); | ||
1060 | /* | ||
1061 | * Transit from allocation to reallocation, except if the client | ||
1062 | * requested deallocation in the meantime. | ||
1063 | */ | ||
1064 | if (r->todo == ISO_RES_ALLOC) | ||
1065 | r->todo = ISO_RES_REALLOC; | ||
1066 | /* | ||
1067 | * Allocation or reallocation failure? Pull this resource out of the | ||
1068 | * idr and prepare for deletion, unless the client is shutting down. | ||
1069 | */ | ||
1070 | if (r->todo == ISO_RES_REALLOC && !success && | ||
1071 | !client->in_shutdown && | ||
1072 | idr_find(&client->resource_idr, r->resource.handle)) { | ||
1073 | idr_remove(&client->resource_idr, r->resource.handle); | ||
1074 | client_put(client); | ||
1075 | free = true; | ||
1076 | } | ||
1077 | spin_unlock_irq(&client->lock); | ||
1078 | |||
1079 | if (todo == ISO_RES_ALLOC && channel >= 0) | ||
1080 | r->channels = 1ULL << (63 - channel); | ||
1081 | |||
1082 | if (todo == ISO_RES_REALLOC && success) | ||
1083 | goto out; | ||
1084 | |||
1085 | if (todo == ISO_RES_ALLOC) { | ||
1086 | e = r->e_alloc; | ||
1087 | r->e_alloc = NULL; | ||
1088 | } else { | ||
1089 | e = r->e_dealloc; | ||
1090 | r->e_dealloc = NULL; | ||
1091 | } | ||
1092 | e->resource.handle = r->resource.handle; | ||
1093 | e->resource.channel = channel; | ||
1094 | e->resource.bandwidth = bandwidth; | ||
1095 | |||
1096 | queue_event(client, &e->event, | ||
1097 | &e->resource, sizeof(e->resource), NULL, 0); | ||
1098 | |||
1099 | if (free) { | ||
1100 | cancel_delayed_work(&r->work); | ||
1101 | kfree(r->e_alloc); | ||
1102 | kfree(r->e_dealloc); | ||
1103 | kfree(r); | ||
1104 | } | ||
1105 | out: | ||
1106 | client_put(client); | ||
1107 | } | ||
1108 | |||
1109 | static void schedule_iso_resource(struct iso_resource *r) | ||
1110 | { | ||
1111 | if (schedule_delayed_work(&r->work, 0)) | ||
1112 | client_get(r->client); | ||
1113 | } | ||
1114 | |||
1115 | static void release_iso_resource(struct client *client, | ||
1116 | struct client_resource *resource) | ||
1117 | { | ||
1118 | struct iso_resource *r = | ||
1119 | container_of(resource, struct iso_resource, resource); | ||
1120 | |||
1121 | spin_lock_irq(&client->lock); | ||
1122 | r->todo = ISO_RES_DEALLOC; | ||
1123 | schedule_iso_resource(r); | ||
1124 | spin_unlock_irq(&client->lock); | ||
1125 | } | ||
1126 | |||
1127 | static int ioctl_allocate_iso_resource(struct client *client, void *buffer) | ||
1128 | { | ||
1129 | struct fw_cdev_allocate_iso_resource *request = buffer; | ||
1130 | struct iso_resource_event *e1, *e2; | ||
1131 | struct iso_resource *r; | ||
1132 | int ret; | ||
1133 | |||
1134 | if ((request->channels == 0 && request->bandwidth == 0) || | ||
1135 | request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL || | ||
1136 | request->bandwidth < 0) | ||
1137 | return -EINVAL; | ||
1138 | |||
1139 | r = kmalloc(sizeof(*r), GFP_KERNEL); | ||
1140 | e1 = kmalloc(sizeof(*e1), GFP_KERNEL); | ||
1141 | e2 = kmalloc(sizeof(*e2), GFP_KERNEL); | ||
1142 | if (r == NULL || e1 == NULL || e2 == NULL) { | ||
1143 | ret = -ENOMEM; | ||
1144 | goto fail; | ||
1145 | } | ||
1146 | |||
1147 | INIT_DELAYED_WORK(&r->work, iso_resource_work); | ||
1148 | r->client = client; | ||
1149 | r->todo = ISO_RES_ALLOC; | ||
1150 | r->generation = -1; | ||
1151 | r->channels = request->channels; | ||
1152 | r->bandwidth = request->bandwidth; | ||
1153 | r->e_alloc = e1; | ||
1154 | r->e_dealloc = e2; | ||
1155 | |||
1156 | e1->resource.closure = request->closure; | ||
1157 | e1->resource.type = FW_CDEV_EVENT_ISO_RESOURCE_ALLOCATED; | ||
1158 | e2->resource.closure = request->closure; | ||
1159 | e2->resource.type = FW_CDEV_EVENT_ISO_RESOURCE_DEALLOCATED; | ||
1160 | |||
1161 | r->resource.release = release_iso_resource; | ||
1162 | ret = add_client_resource(client, &r->resource, GFP_KERNEL); | ||
1163 | if (ret < 0) | ||
1164 | goto fail; | ||
1165 | request->handle = r->resource.handle; | ||
1166 | |||
1167 | return 0; | ||
1168 | fail: | ||
1169 | kfree(r); | ||
1170 | kfree(e1); | ||
1171 | kfree(e2); | ||
1172 | |||
1173 | return ret; | ||
1174 | } | ||
1175 | |||
1176 | static int ioctl_deallocate_iso_resource(struct client *client, void *buffer) | ||
1177 | { | ||
1178 | struct fw_cdev_deallocate *request = buffer; | ||
1179 | |||
1180 | return release_client_resource(client, request->handle, | ||
1181 | release_iso_resource, NULL); | ||
1182 | } | ||
1183 | |||
973 | static int (* const ioctl_handlers[])(struct client *client, void *buffer) = { | 1184 | static int (* const ioctl_handlers[])(struct client *client, void *buffer) = { |
974 | ioctl_get_info, | 1185 | ioctl_get_info, |
975 | ioctl_send_request, | 1186 | ioctl_send_request, |
@@ -984,6 +1195,8 @@ static int (* const ioctl_handlers[])(struct client *client, void *buffer) = { | |||
984 | ioctl_start_iso, | 1195 | ioctl_start_iso, |
985 | ioctl_stop_iso, | 1196 | ioctl_stop_iso, |
986 | ioctl_get_cycle_timer, | 1197 | ioctl_get_cycle_timer, |
1198 | ioctl_allocate_iso_resource, | ||
1199 | ioctl_deallocate_iso_resource, | ||
987 | }; | 1200 | }; |
988 | 1201 | ||
989 | static int dispatch_ioctl(struct client *client, | 1202 | static int dispatch_ioctl(struct client *client, |