diff options
Diffstat (limited to 'drivers/firewire/fw-cdev.c')
-rw-r--r-- | drivers/firewire/fw-cdev.c | 1044 |
1 files changed, 729 insertions, 315 deletions
diff --git a/drivers/firewire/fw-cdev.c b/drivers/firewire/fw-cdev.c index ed03234cbea8..7eb6594cc3e5 100644 --- a/drivers/firewire/fw-cdev.c +++ b/drivers/firewire/fw-cdev.c | |||
@@ -18,87 +18,162 @@ | |||
18 | * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | 18 | * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. |
19 | */ | 19 | */ |
20 | 20 | ||
21 | #include <linux/module.h> | 21 | #include <linux/compat.h> |
22 | #include <linux/kernel.h> | 22 | #include <linux/delay.h> |
23 | #include <linux/wait.h> | ||
24 | #include <linux/errno.h> | ||
25 | #include <linux/device.h> | 23 | #include <linux/device.h> |
26 | #include <linux/vmalloc.h> | 24 | #include <linux/errno.h> |
25 | #include <linux/firewire-cdev.h> | ||
26 | #include <linux/idr.h> | ||
27 | #include <linux/jiffies.h> | ||
28 | #include <linux/kernel.h> | ||
29 | #include <linux/kref.h> | ||
30 | #include <linux/mm.h> | ||
31 | #include <linux/module.h> | ||
32 | #include <linux/mutex.h> | ||
27 | #include <linux/poll.h> | 33 | #include <linux/poll.h> |
28 | #include <linux/preempt.h> | 34 | #include <linux/preempt.h> |
35 | #include <linux/spinlock.h> | ||
29 | #include <linux/time.h> | 36 | #include <linux/time.h> |
30 | #include <linux/delay.h> | 37 | #include <linux/vmalloc.h> |
31 | #include <linux/mm.h> | 38 | #include <linux/wait.h> |
32 | #include <linux/idr.h> | 39 | #include <linux/workqueue.h> |
33 | #include <linux/compat.h> | 40 | |
34 | #include <linux/firewire-cdev.h> | ||
35 | #include <asm/system.h> | 41 | #include <asm/system.h> |
36 | #include <asm/uaccess.h> | 42 | #include <asm/uaccess.h> |
37 | #include "fw-transaction.h" | 43 | |
38 | #include "fw-topology.h" | ||
39 | #include "fw-device.h" | 44 | #include "fw-device.h" |
45 | #include "fw-topology.h" | ||
46 | #include "fw-transaction.h" | ||
47 | |||
48 | struct client { | ||
49 | u32 version; | ||
50 | struct fw_device *device; | ||
51 | |||
52 | spinlock_t lock; | ||
53 | bool in_shutdown; | ||
54 | struct idr resource_idr; | ||
55 | struct list_head event_list; | ||
56 | wait_queue_head_t wait; | ||
57 | u64 bus_reset_closure; | ||
58 | |||
59 | struct fw_iso_context *iso_context; | ||
60 | u64 iso_closure; | ||
61 | struct fw_iso_buffer buffer; | ||
62 | unsigned long vm_start; | ||
40 | 63 | ||
41 | struct client; | ||
42 | struct client_resource { | ||
43 | struct list_head link; | 64 | struct list_head link; |
44 | void (*release)(struct client *client, struct client_resource *r); | 65 | struct kref kref; |
45 | u32 handle; | ||
46 | }; | 66 | }; |
47 | 67 | ||
68 | static inline void client_get(struct client *client) | ||
69 | { | ||
70 | kref_get(&client->kref); | ||
71 | } | ||
72 | |||
73 | static void client_release(struct kref *kref) | ||
74 | { | ||
75 | struct client *client = container_of(kref, struct client, kref); | ||
76 | |||
77 | fw_device_put(client->device); | ||
78 | kfree(client); | ||
79 | } | ||
80 | |||
81 | static void client_put(struct client *client) | ||
82 | { | ||
83 | kref_put(&client->kref, client_release); | ||
84 | } | ||
85 | |||
86 | struct client_resource; | ||
87 | typedef void (*client_resource_release_fn_t)(struct client *, | ||
88 | struct client_resource *); | ||
89 | struct client_resource { | ||
90 | client_resource_release_fn_t release; | ||
91 | int handle; | ||
92 | }; | ||
93 | |||
94 | struct address_handler_resource { | ||
95 | struct client_resource resource; | ||
96 | struct fw_address_handler handler; | ||
97 | __u64 closure; | ||
98 | struct client *client; | ||
99 | }; | ||
100 | |||
101 | struct outbound_transaction_resource { | ||
102 | struct client_resource resource; | ||
103 | struct fw_transaction transaction; | ||
104 | }; | ||
105 | |||
106 | struct inbound_transaction_resource { | ||
107 | struct client_resource resource; | ||
108 | struct fw_request *request; | ||
109 | void *data; | ||
110 | size_t length; | ||
111 | }; | ||
112 | |||
113 | struct descriptor_resource { | ||
114 | struct client_resource resource; | ||
115 | struct fw_descriptor descriptor; | ||
116 | u32 data[0]; | ||
117 | }; | ||
118 | |||
119 | struct iso_resource { | ||
120 | struct client_resource resource; | ||
121 | struct client *client; | ||
122 | /* Schedule work and access todo only with client->lock held. */ | ||
123 | struct delayed_work work; | ||
124 | enum {ISO_RES_ALLOC, ISO_RES_REALLOC, ISO_RES_DEALLOC, | ||
125 | ISO_RES_ALLOC_ONCE, ISO_RES_DEALLOC_ONCE,} todo; | ||
126 | int generation; | ||
127 | u64 channels; | ||
128 | s32 bandwidth; | ||
129 | struct iso_resource_event *e_alloc, *e_dealloc; | ||
130 | }; | ||
131 | |||
132 | static void schedule_iso_resource(struct iso_resource *); | ||
133 | static void release_iso_resource(struct client *, struct client_resource *); | ||
134 | |||
48 | /* | 135 | /* |
49 | * dequeue_event() just kfree()'s the event, so the event has to be | 136 | * dequeue_event() just kfree()'s the event, so the event has to be |
50 | * the first field in the struct. | 137 | * the first field in a struct XYZ_event. |
51 | */ | 138 | */ |
52 | |||
53 | struct event { | 139 | struct event { |
54 | struct { void *data; size_t size; } v[2]; | 140 | struct { void *data; size_t size; } v[2]; |
55 | struct list_head link; | 141 | struct list_head link; |
56 | }; | 142 | }; |
57 | 143 | ||
58 | struct bus_reset { | 144 | struct bus_reset_event { |
59 | struct event event; | 145 | struct event event; |
60 | struct fw_cdev_event_bus_reset reset; | 146 | struct fw_cdev_event_bus_reset reset; |
61 | }; | 147 | }; |
62 | 148 | ||
63 | struct response { | 149 | struct outbound_transaction_event { |
64 | struct event event; | 150 | struct event event; |
65 | struct fw_transaction transaction; | ||
66 | struct client *client; | 151 | struct client *client; |
67 | struct client_resource resource; | 152 | struct outbound_transaction_resource r; |
68 | struct fw_cdev_event_response response; | 153 | struct fw_cdev_event_response response; |
69 | }; | 154 | }; |
70 | 155 | ||
71 | struct iso_interrupt { | 156 | struct inbound_transaction_event { |
72 | struct event event; | 157 | struct event event; |
73 | struct fw_cdev_event_iso_interrupt interrupt; | 158 | struct fw_cdev_event_request request; |
74 | }; | 159 | }; |
75 | 160 | ||
76 | struct client { | 161 | struct iso_interrupt_event { |
77 | u32 version; | 162 | struct event event; |
78 | struct fw_device *device; | 163 | struct fw_cdev_event_iso_interrupt interrupt; |
79 | spinlock_t lock; | 164 | }; |
80 | u32 resource_handle; | ||
81 | struct list_head resource_list; | ||
82 | struct list_head event_list; | ||
83 | wait_queue_head_t wait; | ||
84 | u64 bus_reset_closure; | ||
85 | |||
86 | struct fw_iso_context *iso_context; | ||
87 | u64 iso_closure; | ||
88 | struct fw_iso_buffer buffer; | ||
89 | unsigned long vm_start; | ||
90 | 165 | ||
91 | struct list_head link; | 166 | struct iso_resource_event { |
167 | struct event event; | ||
168 | struct fw_cdev_event_iso_resource resource; | ||
92 | }; | 169 | }; |
93 | 170 | ||
94 | static inline void __user * | 171 | static inline void __user *u64_to_uptr(__u64 value) |
95 | u64_to_uptr(__u64 value) | ||
96 | { | 172 | { |
97 | return (void __user *)(unsigned long)value; | 173 | return (void __user *)(unsigned long)value; |
98 | } | 174 | } |
99 | 175 | ||
100 | static inline __u64 | 176 | static inline __u64 uptr_to_u64(void __user *ptr) |
101 | uptr_to_u64(void __user *ptr) | ||
102 | { | 177 | { |
103 | return (__u64)(unsigned long)ptr; | 178 | return (__u64)(unsigned long)ptr; |
104 | } | 179 | } |
@@ -107,7 +182,6 @@ static int fw_device_op_open(struct inode *inode, struct file *file) | |||
107 | { | 182 | { |
108 | struct fw_device *device; | 183 | struct fw_device *device; |
109 | struct client *client; | 184 | struct client *client; |
110 | unsigned long flags; | ||
111 | 185 | ||
112 | device = fw_device_get_by_devt(inode->i_rdev); | 186 | device = fw_device_get_by_devt(inode->i_rdev); |
113 | if (device == NULL) | 187 | if (device == NULL) |
@@ -125,16 +199,17 @@ static int fw_device_op_open(struct inode *inode, struct file *file) | |||
125 | } | 199 | } |
126 | 200 | ||
127 | client->device = device; | 201 | client->device = device; |
128 | INIT_LIST_HEAD(&client->event_list); | ||
129 | INIT_LIST_HEAD(&client->resource_list); | ||
130 | spin_lock_init(&client->lock); | 202 | spin_lock_init(&client->lock); |
203 | idr_init(&client->resource_idr); | ||
204 | INIT_LIST_HEAD(&client->event_list); | ||
131 | init_waitqueue_head(&client->wait); | 205 | init_waitqueue_head(&client->wait); |
206 | kref_init(&client->kref); | ||
132 | 207 | ||
133 | file->private_data = client; | 208 | file->private_data = client; |
134 | 209 | ||
135 | spin_lock_irqsave(&device->card->lock, flags); | 210 | mutex_lock(&device->client_list_mutex); |
136 | list_add_tail(&client->link, &device->client_list); | 211 | list_add_tail(&client->link, &device->client_list); |
137 | spin_unlock_irqrestore(&device->card->lock, flags); | 212 | mutex_unlock(&device->client_list_mutex); |
138 | 213 | ||
139 | return 0; | 214 | return 0; |
140 | } | 215 | } |
@@ -150,68 +225,69 @@ static void queue_event(struct client *client, struct event *event, | |||
150 | event->v[1].size = size1; | 225 | event->v[1].size = size1; |
151 | 226 | ||
152 | spin_lock_irqsave(&client->lock, flags); | 227 | spin_lock_irqsave(&client->lock, flags); |
153 | list_add_tail(&event->link, &client->event_list); | 228 | if (client->in_shutdown) |
229 | kfree(event); | ||
230 | else | ||
231 | list_add_tail(&event->link, &client->event_list); | ||
154 | spin_unlock_irqrestore(&client->lock, flags); | 232 | spin_unlock_irqrestore(&client->lock, flags); |
155 | 233 | ||
156 | wake_up_interruptible(&client->wait); | 234 | wake_up_interruptible(&client->wait); |
157 | } | 235 | } |
158 | 236 | ||
159 | static int | 237 | static int dequeue_event(struct client *client, |
160 | dequeue_event(struct client *client, char __user *buffer, size_t count) | 238 | char __user *buffer, size_t count) |
161 | { | 239 | { |
162 | unsigned long flags; | ||
163 | struct event *event; | 240 | struct event *event; |
164 | size_t size, total; | 241 | size_t size, total; |
165 | int i, retval; | 242 | int i, ret; |
166 | 243 | ||
167 | retval = wait_event_interruptible(client->wait, | 244 | ret = wait_event_interruptible(client->wait, |
168 | !list_empty(&client->event_list) || | 245 | !list_empty(&client->event_list) || |
169 | fw_device_is_shutdown(client->device)); | 246 | fw_device_is_shutdown(client->device)); |
170 | if (retval < 0) | 247 | if (ret < 0) |
171 | return retval; | 248 | return ret; |
172 | 249 | ||
173 | if (list_empty(&client->event_list) && | 250 | if (list_empty(&client->event_list) && |
174 | fw_device_is_shutdown(client->device)) | 251 | fw_device_is_shutdown(client->device)) |
175 | return -ENODEV; | 252 | return -ENODEV; |
176 | 253 | ||
177 | spin_lock_irqsave(&client->lock, flags); | 254 | spin_lock_irq(&client->lock); |
178 | event = container_of(client->event_list.next, struct event, link); | 255 | event = list_first_entry(&client->event_list, struct event, link); |
179 | list_del(&event->link); | 256 | list_del(&event->link); |
180 | spin_unlock_irqrestore(&client->lock, flags); | 257 | spin_unlock_irq(&client->lock); |
181 | 258 | ||
182 | total = 0; | 259 | total = 0; |
183 | for (i = 0; i < ARRAY_SIZE(event->v) && total < count; i++) { | 260 | for (i = 0; i < ARRAY_SIZE(event->v) && total < count; i++) { |
184 | size = min(event->v[i].size, count - total); | 261 | size = min(event->v[i].size, count - total); |
185 | if (copy_to_user(buffer + total, event->v[i].data, size)) { | 262 | if (copy_to_user(buffer + total, event->v[i].data, size)) { |
186 | retval = -EFAULT; | 263 | ret = -EFAULT; |
187 | goto out; | 264 | goto out; |
188 | } | 265 | } |
189 | total += size; | 266 | total += size; |
190 | } | 267 | } |
191 | retval = total; | 268 | ret = total; |
192 | 269 | ||
193 | out: | 270 | out: |
194 | kfree(event); | 271 | kfree(event); |
195 | 272 | ||
196 | return retval; | 273 | return ret; |
197 | } | 274 | } |
198 | 275 | ||
199 | static ssize_t | 276 | static ssize_t fw_device_op_read(struct file *file, char __user *buffer, |
200 | fw_device_op_read(struct file *file, | 277 | size_t count, loff_t *offset) |
201 | char __user *buffer, size_t count, loff_t *offset) | ||
202 | { | 278 | { |
203 | struct client *client = file->private_data; | 279 | struct client *client = file->private_data; |
204 | 280 | ||
205 | return dequeue_event(client, buffer, count); | 281 | return dequeue_event(client, buffer, count); |
206 | } | 282 | } |
207 | 283 | ||
208 | /* caller must hold card->lock so that node pointers can be dereferenced here */ | 284 | static void fill_bus_reset_event(struct fw_cdev_event_bus_reset *event, |
209 | static void | 285 | struct client *client) |
210 | fill_bus_reset_event(struct fw_cdev_event_bus_reset *event, | ||
211 | struct client *client) | ||
212 | { | 286 | { |
213 | struct fw_card *card = client->device->card; | 287 | struct fw_card *card = client->device->card; |
214 | 288 | ||
289 | spin_lock_irq(&card->lock); | ||
290 | |||
215 | event->closure = client->bus_reset_closure; | 291 | event->closure = client->bus_reset_closure; |
216 | event->type = FW_CDEV_EVENT_BUS_RESET; | 292 | event->type = FW_CDEV_EVENT_BUS_RESET; |
217 | event->generation = client->device->generation; | 293 | event->generation = client->device->generation; |
@@ -220,39 +296,49 @@ fill_bus_reset_event(struct fw_cdev_event_bus_reset *event, | |||
220 | event->bm_node_id = 0; /* FIXME: We don't track the BM. */ | 296 | event->bm_node_id = 0; /* FIXME: We don't track the BM. */ |
221 | event->irm_node_id = card->irm_node->node_id; | 297 | event->irm_node_id = card->irm_node->node_id; |
222 | event->root_node_id = card->root_node->node_id; | 298 | event->root_node_id = card->root_node->node_id; |
299 | |||
300 | spin_unlock_irq(&card->lock); | ||
223 | } | 301 | } |
224 | 302 | ||
225 | static void | 303 | static void for_each_client(struct fw_device *device, |
226 | for_each_client(struct fw_device *device, | 304 | void (*callback)(struct client *client)) |
227 | void (*callback)(struct client *client)) | ||
228 | { | 305 | { |
229 | struct fw_card *card = device->card; | ||
230 | struct client *c; | 306 | struct client *c; |
231 | unsigned long flags; | ||
232 | |||
233 | spin_lock_irqsave(&card->lock, flags); | ||
234 | 307 | ||
308 | mutex_lock(&device->client_list_mutex); | ||
235 | list_for_each_entry(c, &device->client_list, link) | 309 | list_for_each_entry(c, &device->client_list, link) |
236 | callback(c); | 310 | callback(c); |
311 | mutex_unlock(&device->client_list_mutex); | ||
312 | } | ||
313 | |||
314 | static int schedule_reallocations(int id, void *p, void *data) | ||
315 | { | ||
316 | struct client_resource *r = p; | ||
237 | 317 | ||
238 | spin_unlock_irqrestore(&card->lock, flags); | 318 | if (r->release == release_iso_resource) |
319 | schedule_iso_resource(container_of(r, | ||
320 | struct iso_resource, resource)); | ||
321 | return 0; | ||
239 | } | 322 | } |
240 | 323 | ||
241 | static void | 324 | static void queue_bus_reset_event(struct client *client) |
242 | queue_bus_reset_event(struct client *client) | ||
243 | { | 325 | { |
244 | struct bus_reset *bus_reset; | 326 | struct bus_reset_event *e; |
245 | 327 | ||
246 | bus_reset = kzalloc(sizeof(*bus_reset), GFP_ATOMIC); | 328 | e = kzalloc(sizeof(*e), GFP_KERNEL); |
247 | if (bus_reset == NULL) { | 329 | if (e == NULL) { |
248 | fw_notify("Out of memory when allocating bus reset event\n"); | 330 | fw_notify("Out of memory when allocating bus reset event\n"); |
249 | return; | 331 | return; |
250 | } | 332 | } |
251 | 333 | ||
252 | fill_bus_reset_event(&bus_reset->reset, client); | 334 | fill_bus_reset_event(&e->reset, client); |
335 | |||
336 | queue_event(client, &e->event, | ||
337 | &e->reset, sizeof(e->reset), NULL, 0); | ||
253 | 338 | ||
254 | queue_event(client, &bus_reset->event, | 339 | spin_lock_irq(&client->lock); |
255 | &bus_reset->reset, sizeof(bus_reset->reset), NULL, 0); | 340 | idr_for_each(&client->resource_idr, schedule_reallocations, client); |
341 | spin_unlock_irq(&client->lock); | ||
256 | } | 342 | } |
257 | 343 | ||
258 | void fw_device_cdev_update(struct fw_device *device) | 344 | void fw_device_cdev_update(struct fw_device *device) |
@@ -274,11 +360,11 @@ static int ioctl_get_info(struct client *client, void *buffer) | |||
274 | { | 360 | { |
275 | struct fw_cdev_get_info *get_info = buffer; | 361 | struct fw_cdev_get_info *get_info = buffer; |
276 | struct fw_cdev_event_bus_reset bus_reset; | 362 | struct fw_cdev_event_bus_reset bus_reset; |
277 | struct fw_card *card = client->device->card; | ||
278 | unsigned long ret = 0; | 363 | unsigned long ret = 0; |
279 | 364 | ||
280 | client->version = get_info->version; | 365 | client->version = get_info->version; |
281 | get_info->version = FW_CDEV_VERSION; | 366 | get_info->version = FW_CDEV_VERSION; |
367 | get_info->card = client->device->card->index; | ||
282 | 368 | ||
283 | down_read(&fw_device_rwsem); | 369 | down_read(&fw_device_rwsem); |
284 | 370 | ||
@@ -300,49 +386,61 @@ static int ioctl_get_info(struct client *client, void *buffer) | |||
300 | client->bus_reset_closure = get_info->bus_reset_closure; | 386 | client->bus_reset_closure = get_info->bus_reset_closure; |
301 | if (get_info->bus_reset != 0) { | 387 | if (get_info->bus_reset != 0) { |
302 | void __user *uptr = u64_to_uptr(get_info->bus_reset); | 388 | void __user *uptr = u64_to_uptr(get_info->bus_reset); |
303 | unsigned long flags; | ||
304 | 389 | ||
305 | spin_lock_irqsave(&card->lock, flags); | ||
306 | fill_bus_reset_event(&bus_reset, client); | 390 | fill_bus_reset_event(&bus_reset, client); |
307 | spin_unlock_irqrestore(&card->lock, flags); | ||
308 | |||
309 | if (copy_to_user(uptr, &bus_reset, sizeof(bus_reset))) | 391 | if (copy_to_user(uptr, &bus_reset, sizeof(bus_reset))) |
310 | return -EFAULT; | 392 | return -EFAULT; |
311 | } | 393 | } |
312 | 394 | ||
313 | get_info->card = card->index; | ||
314 | |||
315 | return 0; | 395 | return 0; |
316 | } | 396 | } |
317 | 397 | ||
318 | static void | 398 | static int add_client_resource(struct client *client, |
319 | add_client_resource(struct client *client, struct client_resource *resource) | 399 | struct client_resource *resource, gfp_t gfp_mask) |
320 | { | 400 | { |
321 | unsigned long flags; | 401 | unsigned long flags; |
402 | int ret; | ||
403 | |||
404 | retry: | ||
405 | if (idr_pre_get(&client->resource_idr, gfp_mask) == 0) | ||
406 | return -ENOMEM; | ||
322 | 407 | ||
323 | spin_lock_irqsave(&client->lock, flags); | 408 | spin_lock_irqsave(&client->lock, flags); |
324 | list_add_tail(&resource->link, &client->resource_list); | 409 | if (client->in_shutdown) |
325 | resource->handle = client->resource_handle++; | 410 | ret = -ECANCELED; |
411 | else | ||
412 | ret = idr_get_new(&client->resource_idr, resource, | ||
413 | &resource->handle); | ||
414 | if (ret >= 0) { | ||
415 | client_get(client); | ||
416 | if (resource->release == release_iso_resource) | ||
417 | schedule_iso_resource(container_of(resource, | ||
418 | struct iso_resource, resource)); | ||
419 | } | ||
326 | spin_unlock_irqrestore(&client->lock, flags); | 420 | spin_unlock_irqrestore(&client->lock, flags); |
421 | |||
422 | if (ret == -EAGAIN) | ||
423 | goto retry; | ||
424 | |||
425 | return ret < 0 ? ret : 0; | ||
327 | } | 426 | } |
328 | 427 | ||
329 | static int | 428 | static int release_client_resource(struct client *client, u32 handle, |
330 | release_client_resource(struct client *client, u32 handle, | 429 | client_resource_release_fn_t release, |
331 | struct client_resource **resource) | 430 | struct client_resource **resource) |
332 | { | 431 | { |
333 | struct client_resource *r; | 432 | struct client_resource *r; |
334 | unsigned long flags; | ||
335 | 433 | ||
336 | spin_lock_irqsave(&client->lock, flags); | 434 | spin_lock_irq(&client->lock); |
337 | list_for_each_entry(r, &client->resource_list, link) { | 435 | if (client->in_shutdown) |
338 | if (r->handle == handle) { | 436 | r = NULL; |
339 | list_del(&r->link); | 437 | else |
340 | break; | 438 | r = idr_find(&client->resource_idr, handle); |
341 | } | 439 | if (r && r->release == release) |
342 | } | 440 | idr_remove(&client->resource_idr, handle); |
343 | spin_unlock_irqrestore(&client->lock, flags); | 441 | spin_unlock_irq(&client->lock); |
344 | 442 | ||
345 | if (&r->link == &client->resource_list) | 443 | if (!(r && r->release == release)) |
346 | return -EINVAL; | 444 | return -EINVAL; |
347 | 445 | ||
348 | if (resource) | 446 | if (resource) |
@@ -350,203 +448,239 @@ release_client_resource(struct client *client, u32 handle, | |||
350 | else | 448 | else |
351 | r->release(client, r); | 449 | r->release(client, r); |
352 | 450 | ||
451 | client_put(client); | ||
452 | |||
353 | return 0; | 453 | return 0; |
354 | } | 454 | } |
355 | 455 | ||
356 | static void | 456 | static void release_transaction(struct client *client, |
357 | release_transaction(struct client *client, struct client_resource *resource) | 457 | struct client_resource *resource) |
358 | { | 458 | { |
359 | struct response *response = | 459 | struct outbound_transaction_resource *r = container_of(resource, |
360 | container_of(resource, struct response, resource); | 460 | struct outbound_transaction_resource, resource); |
361 | 461 | ||
362 | fw_cancel_transaction(client->device->card, &response->transaction); | 462 | fw_cancel_transaction(client->device->card, &r->transaction); |
363 | } | 463 | } |
364 | 464 | ||
365 | static void | 465 | static void complete_transaction(struct fw_card *card, int rcode, |
366 | complete_transaction(struct fw_card *card, int rcode, | 466 | void *payload, size_t length, void *data) |
367 | void *payload, size_t length, void *data) | ||
368 | { | 467 | { |
369 | struct response *response = data; | 468 | struct outbound_transaction_event *e = data; |
370 | struct client *client = response->client; | 469 | struct fw_cdev_event_response *rsp = &e->response; |
470 | struct client *client = e->client; | ||
371 | unsigned long flags; | 471 | unsigned long flags; |
372 | struct fw_cdev_event_response *r = &response->response; | ||
373 | 472 | ||
374 | if (length < r->length) | 473 | if (length < rsp->length) |
375 | r->length = length; | 474 | rsp->length = length; |
376 | if (rcode == RCODE_COMPLETE) | 475 | if (rcode == RCODE_COMPLETE) |
377 | memcpy(r->data, payload, r->length); | 476 | memcpy(rsp->data, payload, rsp->length); |
378 | 477 | ||
379 | spin_lock_irqsave(&client->lock, flags); | 478 | spin_lock_irqsave(&client->lock, flags); |
380 | list_del(&response->resource.link); | 479 | /* |
480 | * 1. If called while in shutdown, the idr tree must be left untouched. | ||
481 | * The idr handle will be removed and the client reference will be | ||
482 | * dropped later. | ||
483 | * 2. If the call chain was release_client_resource -> | ||
484 | * release_transaction -> complete_transaction (instead of a normal | ||
485 | * conclusion of the transaction), i.e. if this resource was already | ||
486 | * unregistered from the idr, the client reference will be dropped | ||
487 | * by release_client_resource and we must not drop it here. | ||
488 | */ | ||
489 | if (!client->in_shutdown && | ||
490 | idr_find(&client->resource_idr, e->r.resource.handle)) { | ||
491 | idr_remove(&client->resource_idr, e->r.resource.handle); | ||
492 | /* Drop the idr's reference */ | ||
493 | client_put(client); | ||
494 | } | ||
381 | spin_unlock_irqrestore(&client->lock, flags); | 495 | spin_unlock_irqrestore(&client->lock, flags); |
382 | 496 | ||
383 | r->type = FW_CDEV_EVENT_RESPONSE; | 497 | rsp->type = FW_CDEV_EVENT_RESPONSE; |
384 | r->rcode = rcode; | 498 | rsp->rcode = rcode; |
385 | 499 | ||
386 | /* | 500 | /* |
387 | * In the case that sizeof(*r) doesn't align with the position of the | 501 | * In the case that sizeof(*rsp) doesn't align with the position of the |
388 | * data, and the read is short, preserve an extra copy of the data | 502 | * data, and the read is short, preserve an extra copy of the data |
389 | * to stay compatible with a pre-2.6.27 bug. Since the bug is harmless | 503 | * to stay compatible with a pre-2.6.27 bug. Since the bug is harmless |
390 | * for short reads and some apps depended on it, this is both safe | 504 | * for short reads and some apps depended on it, this is both safe |
391 | * and prudent for compatibility. | 505 | * and prudent for compatibility. |
392 | */ | 506 | */ |
393 | if (r->length <= sizeof(*r) - offsetof(typeof(*r), data)) | 507 | if (rsp->length <= sizeof(*rsp) - offsetof(typeof(*rsp), data)) |
394 | queue_event(client, &response->event, r, sizeof(*r), | 508 | queue_event(client, &e->event, rsp, sizeof(*rsp), |
395 | r->data, r->length); | 509 | rsp->data, rsp->length); |
396 | else | 510 | else |
397 | queue_event(client, &response->event, r, sizeof(*r) + r->length, | 511 | queue_event(client, &e->event, rsp, sizeof(*rsp) + rsp->length, |
398 | NULL, 0); | 512 | NULL, 0); |
513 | |||
514 | /* Drop the transaction callback's reference */ | ||
515 | client_put(client); | ||
399 | } | 516 | } |
400 | 517 | ||
401 | static int ioctl_send_request(struct client *client, void *buffer) | 518 | static int init_request(struct client *client, |
519 | struct fw_cdev_send_request *request, | ||
520 | int destination_id, int speed) | ||
402 | { | 521 | { |
403 | struct fw_device *device = client->device; | 522 | struct outbound_transaction_event *e; |
404 | struct fw_cdev_send_request *request = buffer; | 523 | int ret; |
405 | struct response *response; | ||
406 | 524 | ||
407 | /* What is the biggest size we'll accept, really? */ | 525 | if (request->tcode != TCODE_STREAM_DATA && |
408 | if (request->length > 4096) | 526 | (request->length > 4096 || request->length > 512 << speed)) |
409 | return -EINVAL; | 527 | return -EIO; |
410 | 528 | ||
411 | response = kmalloc(sizeof(*response) + request->length, GFP_KERNEL); | 529 | e = kmalloc(sizeof(*e) + request->length, GFP_KERNEL); |
412 | if (response == NULL) | 530 | if (e == NULL) |
413 | return -ENOMEM; | 531 | return -ENOMEM; |
414 | 532 | ||
415 | response->client = client; | 533 | e->client = client; |
416 | response->response.length = request->length; | 534 | e->response.length = request->length; |
417 | response->response.closure = request->closure; | 535 | e->response.closure = request->closure; |
418 | 536 | ||
419 | if (request->data && | 537 | if (request->data && |
420 | copy_from_user(response->response.data, | 538 | copy_from_user(e->response.data, |
421 | u64_to_uptr(request->data), request->length)) { | 539 | u64_to_uptr(request->data), request->length)) { |
422 | kfree(response); | 540 | ret = -EFAULT; |
423 | return -EFAULT; | 541 | goto failed; |
424 | } | 542 | } |
425 | 543 | ||
426 | response->resource.release = release_transaction; | 544 | e->r.resource.release = release_transaction; |
427 | add_client_resource(client, &response->resource); | 545 | ret = add_client_resource(client, &e->r.resource, GFP_KERNEL); |
546 | if (ret < 0) | ||
547 | goto failed; | ||
428 | 548 | ||
429 | fw_send_request(device->card, &response->transaction, | 549 | /* Get a reference for the transaction callback */ |
430 | request->tcode & 0x1f, | 550 | client_get(client); |
431 | device->node->node_id, | ||
432 | request->generation, | ||
433 | device->max_speed, | ||
434 | request->offset, | ||
435 | response->response.data, request->length, | ||
436 | complete_transaction, response); | ||
437 | 551 | ||
438 | if (request->data) | 552 | fw_send_request(client->device->card, &e->r.transaction, |
439 | return sizeof(request) + request->length; | 553 | request->tcode, destination_id, request->generation, |
440 | else | 554 | speed, request->offset, e->response.data, |
441 | return sizeof(request); | 555 | request->length, complete_transaction, e); |
556 | return 0; | ||
557 | |||
558 | failed: | ||
559 | kfree(e); | ||
560 | |||
561 | return ret; | ||
442 | } | 562 | } |
443 | 563 | ||
444 | struct address_handler { | 564 | static int ioctl_send_request(struct client *client, void *buffer) |
445 | struct fw_address_handler handler; | 565 | { |
446 | __u64 closure; | 566 | struct fw_cdev_send_request *request = buffer; |
447 | struct client *client; | ||
448 | struct client_resource resource; | ||
449 | }; | ||
450 | 567 | ||
451 | struct request { | 568 | switch (request->tcode) { |
452 | struct fw_request *request; | 569 | case TCODE_WRITE_QUADLET_REQUEST: |
453 | void *data; | 570 | case TCODE_WRITE_BLOCK_REQUEST: |
454 | size_t length; | 571 | case TCODE_READ_QUADLET_REQUEST: |
455 | struct client_resource resource; | 572 | case TCODE_READ_BLOCK_REQUEST: |
456 | }; | 573 | case TCODE_LOCK_MASK_SWAP: |
574 | case TCODE_LOCK_COMPARE_SWAP: | ||
575 | case TCODE_LOCK_FETCH_ADD: | ||
576 | case TCODE_LOCK_LITTLE_ADD: | ||
577 | case TCODE_LOCK_BOUNDED_ADD: | ||
578 | case TCODE_LOCK_WRAP_ADD: | ||
579 | case TCODE_LOCK_VENDOR_DEPENDENT: | ||
580 | break; | ||
581 | default: | ||
582 | return -EINVAL; | ||
583 | } | ||
457 | 584 | ||
458 | struct request_event { | 585 | return init_request(client, request, client->device->node_id, |
459 | struct event event; | 586 | client->device->max_speed); |
460 | struct fw_cdev_event_request request; | 587 | } |
461 | }; | ||
462 | 588 | ||
463 | static void | 589 | static void release_request(struct client *client, |
464 | release_request(struct client *client, struct client_resource *resource) | 590 | struct client_resource *resource) |
465 | { | 591 | { |
466 | struct request *request = | 592 | struct inbound_transaction_resource *r = container_of(resource, |
467 | container_of(resource, struct request, resource); | 593 | struct inbound_transaction_resource, resource); |
468 | 594 | ||
469 | fw_send_response(client->device->card, request->request, | 595 | fw_send_response(client->device->card, r->request, |
470 | RCODE_CONFLICT_ERROR); | 596 | RCODE_CONFLICT_ERROR); |
471 | kfree(request); | 597 | kfree(r); |
472 | } | 598 | } |
473 | 599 | ||
474 | static void | 600 | static void handle_request(struct fw_card *card, struct fw_request *request, |
475 | handle_request(struct fw_card *card, struct fw_request *r, | 601 | int tcode, int destination, int source, |
476 | int tcode, int destination, int source, | 602 | int generation, int speed, |
477 | int generation, int speed, | 603 | unsigned long long offset, |
478 | unsigned long long offset, | 604 | void *payload, size_t length, void *callback_data) |
479 | void *payload, size_t length, void *callback_data) | ||
480 | { | 605 | { |
481 | struct address_handler *handler = callback_data; | 606 | struct address_handler_resource *handler = callback_data; |
482 | struct request *request; | 607 | struct inbound_transaction_resource *r; |
483 | struct request_event *e; | 608 | struct inbound_transaction_event *e; |
484 | struct client *client = handler->client; | 609 | int ret; |
485 | 610 | ||
486 | request = kmalloc(sizeof(*request), GFP_ATOMIC); | 611 | r = kmalloc(sizeof(*r), GFP_ATOMIC); |
487 | e = kmalloc(sizeof(*e), GFP_ATOMIC); | 612 | e = kmalloc(sizeof(*e), GFP_ATOMIC); |
488 | if (request == NULL || e == NULL) { | 613 | if (r == NULL || e == NULL) |
489 | kfree(request); | 614 | goto failed; |
490 | kfree(e); | ||
491 | fw_send_response(card, r, RCODE_CONFLICT_ERROR); | ||
492 | return; | ||
493 | } | ||
494 | 615 | ||
495 | request->request = r; | 616 | r->request = request; |
496 | request->data = payload; | 617 | r->data = payload; |
497 | request->length = length; | 618 | r->length = length; |
498 | 619 | ||
499 | request->resource.release = release_request; | 620 | r->resource.release = release_request; |
500 | add_client_resource(client, &request->resource); | 621 | ret = add_client_resource(handler->client, &r->resource, GFP_ATOMIC); |
622 | if (ret < 0) | ||
623 | goto failed; | ||
501 | 624 | ||
502 | e->request.type = FW_CDEV_EVENT_REQUEST; | 625 | e->request.type = FW_CDEV_EVENT_REQUEST; |
503 | e->request.tcode = tcode; | 626 | e->request.tcode = tcode; |
504 | e->request.offset = offset; | 627 | e->request.offset = offset; |
505 | e->request.length = length; | 628 | e->request.length = length; |
506 | e->request.handle = request->resource.handle; | 629 | e->request.handle = r->resource.handle; |
507 | e->request.closure = handler->closure; | 630 | e->request.closure = handler->closure; |
508 | 631 | ||
509 | queue_event(client, &e->event, | 632 | queue_event(handler->client, &e->event, |
510 | &e->request, sizeof(e->request), payload, length); | 633 | &e->request, sizeof(e->request), payload, length); |
634 | return; | ||
635 | |||
636 | failed: | ||
637 | kfree(r); | ||
638 | kfree(e); | ||
639 | fw_send_response(card, request, RCODE_CONFLICT_ERROR); | ||
511 | } | 640 | } |
512 | 641 | ||
513 | static void | 642 | static void release_address_handler(struct client *client, |
514 | release_address_handler(struct client *client, | 643 | struct client_resource *resource) |
515 | struct client_resource *resource) | ||
516 | { | 644 | { |
517 | struct address_handler *handler = | 645 | struct address_handler_resource *r = |
518 | container_of(resource, struct address_handler, resource); | 646 | container_of(resource, struct address_handler_resource, resource); |
519 | 647 | ||
520 | fw_core_remove_address_handler(&handler->handler); | 648 | fw_core_remove_address_handler(&r->handler); |
521 | kfree(handler); | 649 | kfree(r); |
522 | } | 650 | } |
523 | 651 | ||
524 | static int ioctl_allocate(struct client *client, void *buffer) | 652 | static int ioctl_allocate(struct client *client, void *buffer) |
525 | { | 653 | { |
526 | struct fw_cdev_allocate *request = buffer; | 654 | struct fw_cdev_allocate *request = buffer; |
527 | struct address_handler *handler; | 655 | struct address_handler_resource *r; |
528 | struct fw_address_region region; | 656 | struct fw_address_region region; |
657 | int ret; | ||
529 | 658 | ||
530 | handler = kmalloc(sizeof(*handler), GFP_KERNEL); | 659 | r = kmalloc(sizeof(*r), GFP_KERNEL); |
531 | if (handler == NULL) | 660 | if (r == NULL) |
532 | return -ENOMEM; | 661 | return -ENOMEM; |
533 | 662 | ||
534 | region.start = request->offset; | 663 | region.start = request->offset; |
535 | region.end = request->offset + request->length; | 664 | region.end = request->offset + request->length; |
536 | handler->handler.length = request->length; | 665 | r->handler.length = request->length; |
537 | handler->handler.address_callback = handle_request; | 666 | r->handler.address_callback = handle_request; |
538 | handler->handler.callback_data = handler; | 667 | r->handler.callback_data = r; |
539 | handler->closure = request->closure; | 668 | r->closure = request->closure; |
540 | handler->client = client; | 669 | r->client = client; |
541 | 670 | ||
542 | if (fw_core_add_address_handler(&handler->handler, ®ion) < 0) { | 671 | ret = fw_core_add_address_handler(&r->handler, ®ion); |
543 | kfree(handler); | 672 | if (ret < 0) { |
544 | return -EBUSY; | 673 | kfree(r); |
674 | return ret; | ||
545 | } | 675 | } |
546 | 676 | ||
547 | handler->resource.release = release_address_handler; | 677 | r->resource.release = release_address_handler; |
548 | add_client_resource(client, &handler->resource); | 678 | ret = add_client_resource(client, &r->resource, GFP_KERNEL); |
549 | request->handle = handler->resource.handle; | 679 | if (ret < 0) { |
680 | release_address_handler(client, &r->resource); | ||
681 | return ret; | ||
682 | } | ||
683 | request->handle = r->resource.handle; | ||
550 | 684 | ||
551 | return 0; | 685 | return 0; |
552 | } | 686 | } |
@@ -555,18 +689,22 @@ static int ioctl_deallocate(struct client *client, void *buffer) | |||
555 | { | 689 | { |
556 | struct fw_cdev_deallocate *request = buffer; | 690 | struct fw_cdev_deallocate *request = buffer; |
557 | 691 | ||
558 | return release_client_resource(client, request->handle, NULL); | 692 | return release_client_resource(client, request->handle, |
693 | release_address_handler, NULL); | ||
559 | } | 694 | } |
560 | 695 | ||
561 | static int ioctl_send_response(struct client *client, void *buffer) | 696 | static int ioctl_send_response(struct client *client, void *buffer) |
562 | { | 697 | { |
563 | struct fw_cdev_send_response *request = buffer; | 698 | struct fw_cdev_send_response *request = buffer; |
564 | struct client_resource *resource; | 699 | struct client_resource *resource; |
565 | struct request *r; | 700 | struct inbound_transaction_resource *r; |
566 | 701 | ||
567 | if (release_client_resource(client, request->handle, &resource) < 0) | 702 | if (release_client_resource(client, request->handle, |
703 | release_request, &resource) < 0) | ||
568 | return -EINVAL; | 704 | return -EINVAL; |
569 | r = container_of(resource, struct request, resource); | 705 | |
706 | r = container_of(resource, struct inbound_transaction_resource, | ||
707 | resource); | ||
570 | if (request->length < r->length) | 708 | if (request->length < r->length) |
571 | r->length = request->length; | 709 | r->length = request->length; |
572 | if (copy_from_user(r->data, u64_to_uptr(request->data), r->length)) | 710 | if (copy_from_user(r->data, u64_to_uptr(request->data), r->length)) |
@@ -588,85 +726,92 @@ static int ioctl_initiate_bus_reset(struct client *client, void *buffer) | |||
588 | return fw_core_initiate_bus_reset(client->device->card, short_reset); | 726 | return fw_core_initiate_bus_reset(client->device->card, short_reset); |
589 | } | 727 | } |
590 | 728 | ||
591 | struct descriptor { | ||
592 | struct fw_descriptor d; | ||
593 | struct client_resource resource; | ||
594 | u32 data[0]; | ||
595 | }; | ||
596 | |||
597 | static void release_descriptor(struct client *client, | 729 | static void release_descriptor(struct client *client, |
598 | struct client_resource *resource) | 730 | struct client_resource *resource) |
599 | { | 731 | { |
600 | struct descriptor *descriptor = | 732 | struct descriptor_resource *r = |
601 | container_of(resource, struct descriptor, resource); | 733 | container_of(resource, struct descriptor_resource, resource); |
602 | 734 | ||
603 | fw_core_remove_descriptor(&descriptor->d); | 735 | fw_core_remove_descriptor(&r->descriptor); |
604 | kfree(descriptor); | 736 | kfree(r); |
605 | } | 737 | } |
606 | 738 | ||
607 | static int ioctl_add_descriptor(struct client *client, void *buffer) | 739 | static int ioctl_add_descriptor(struct client *client, void *buffer) |
608 | { | 740 | { |
609 | struct fw_cdev_add_descriptor *request = buffer; | 741 | struct fw_cdev_add_descriptor *request = buffer; |
610 | struct descriptor *descriptor; | 742 | struct fw_card *card = client->device->card; |
611 | int retval; | 743 | struct descriptor_resource *r; |
744 | int ret; | ||
745 | |||
746 | /* Access policy: Allow this ioctl only on local nodes' device files. */ | ||
747 | spin_lock_irq(&card->lock); | ||
748 | ret = client->device->node_id != card->local_node->node_id; | ||
749 | spin_unlock_irq(&card->lock); | ||
750 | if (ret) | ||
751 | return -ENOSYS; | ||
612 | 752 | ||
613 | if (request->length > 256) | 753 | if (request->length > 256) |
614 | return -EINVAL; | 754 | return -EINVAL; |
615 | 755 | ||
616 | descriptor = | 756 | r = kmalloc(sizeof(*r) + request->length * 4, GFP_KERNEL); |
617 | kmalloc(sizeof(*descriptor) + request->length * 4, GFP_KERNEL); | 757 | if (r == NULL) |
618 | if (descriptor == NULL) | ||
619 | return -ENOMEM; | 758 | return -ENOMEM; |
620 | 759 | ||
621 | if (copy_from_user(descriptor->data, | 760 | if (copy_from_user(r->data, |
622 | u64_to_uptr(request->data), request->length * 4)) { | 761 | u64_to_uptr(request->data), request->length * 4)) { |
623 | kfree(descriptor); | 762 | ret = -EFAULT; |
624 | return -EFAULT; | 763 | goto failed; |
625 | } | 764 | } |
626 | 765 | ||
627 | descriptor->d.length = request->length; | 766 | r->descriptor.length = request->length; |
628 | descriptor->d.immediate = request->immediate; | 767 | r->descriptor.immediate = request->immediate; |
629 | descriptor->d.key = request->key; | 768 | r->descriptor.key = request->key; |
630 | descriptor->d.data = descriptor->data; | 769 | r->descriptor.data = r->data; |
631 | 770 | ||
632 | retval = fw_core_add_descriptor(&descriptor->d); | 771 | ret = fw_core_add_descriptor(&r->descriptor); |
633 | if (retval < 0) { | 772 | if (ret < 0) |
634 | kfree(descriptor); | 773 | goto failed; |
635 | return retval; | ||
636 | } | ||
637 | 774 | ||
638 | descriptor->resource.release = release_descriptor; | 775 | r->resource.release = release_descriptor; |
639 | add_client_resource(client, &descriptor->resource); | 776 | ret = add_client_resource(client, &r->resource, GFP_KERNEL); |
640 | request->handle = descriptor->resource.handle; | 777 | if (ret < 0) { |
778 | fw_core_remove_descriptor(&r->descriptor); | ||
779 | goto failed; | ||
780 | } | ||
781 | request->handle = r->resource.handle; | ||
641 | 782 | ||
642 | return 0; | 783 | return 0; |
784 | failed: | ||
785 | kfree(r); | ||
786 | |||
787 | return ret; | ||
643 | } | 788 | } |
644 | 789 | ||
645 | static int ioctl_remove_descriptor(struct client *client, void *buffer) | 790 | static int ioctl_remove_descriptor(struct client *client, void *buffer) |
646 | { | 791 | { |
647 | struct fw_cdev_remove_descriptor *request = buffer; | 792 | struct fw_cdev_remove_descriptor *request = buffer; |
648 | 793 | ||
649 | return release_client_resource(client, request->handle, NULL); | 794 | return release_client_resource(client, request->handle, |
795 | release_descriptor, NULL); | ||
650 | } | 796 | } |
651 | 797 | ||
652 | static void | 798 | static void iso_callback(struct fw_iso_context *context, u32 cycle, |
653 | iso_callback(struct fw_iso_context *context, u32 cycle, | 799 | size_t header_length, void *header, void *data) |
654 | size_t header_length, void *header, void *data) | ||
655 | { | 800 | { |
656 | struct client *client = data; | 801 | struct client *client = data; |
657 | struct iso_interrupt *irq; | 802 | struct iso_interrupt_event *e; |
658 | 803 | ||
659 | irq = kzalloc(sizeof(*irq) + header_length, GFP_ATOMIC); | 804 | e = kzalloc(sizeof(*e) + header_length, GFP_ATOMIC); |
660 | if (irq == NULL) | 805 | if (e == NULL) |
661 | return; | 806 | return; |
662 | 807 | ||
663 | irq->interrupt.type = FW_CDEV_EVENT_ISO_INTERRUPT; | 808 | e->interrupt.type = FW_CDEV_EVENT_ISO_INTERRUPT; |
664 | irq->interrupt.closure = client->iso_closure; | 809 | e->interrupt.closure = client->iso_closure; |
665 | irq->interrupt.cycle = cycle; | 810 | e->interrupt.cycle = cycle; |
666 | irq->interrupt.header_length = header_length; | 811 | e->interrupt.header_length = header_length; |
667 | memcpy(irq->interrupt.header, header, header_length); | 812 | memcpy(e->interrupt.header, header, header_length); |
668 | queue_event(client, &irq->event, &irq->interrupt, | 813 | queue_event(client, &e->event, &e->interrupt, |
669 | sizeof(irq->interrupt) + header_length, NULL, 0); | 814 | sizeof(e->interrupt) + header_length, NULL, 0); |
670 | } | 815 | } |
671 | 816 | ||
672 | static int ioctl_create_iso_context(struct client *client, void *buffer) | 817 | static int ioctl_create_iso_context(struct client *client, void *buffer) |
@@ -871,6 +1016,261 @@ static int ioctl_get_cycle_timer(struct client *client, void *buffer) | |||
871 | return 0; | 1016 | return 0; |
872 | } | 1017 | } |
873 | 1018 | ||
1019 | static void iso_resource_work(struct work_struct *work) | ||
1020 | { | ||
1021 | struct iso_resource_event *e; | ||
1022 | struct iso_resource *r = | ||
1023 | container_of(work, struct iso_resource, work.work); | ||
1024 | struct client *client = r->client; | ||
1025 | int generation, channel, bandwidth, todo; | ||
1026 | bool skip, free, success; | ||
1027 | |||
1028 | spin_lock_irq(&client->lock); | ||
1029 | generation = client->device->generation; | ||
1030 | todo = r->todo; | ||
1031 | /* Allow 1000ms grace period for other reallocations. */ | ||
1032 | if (todo == ISO_RES_ALLOC && | ||
1033 | time_is_after_jiffies(client->device->card->reset_jiffies + HZ)) { | ||
1034 | if (schedule_delayed_work(&r->work, DIV_ROUND_UP(HZ, 3))) | ||
1035 | client_get(client); | ||
1036 | skip = true; | ||
1037 | } else { | ||
1038 | /* We could be called twice within the same generation. */ | ||
1039 | skip = todo == ISO_RES_REALLOC && | ||
1040 | r->generation == generation; | ||
1041 | } | ||
1042 | free = todo == ISO_RES_DEALLOC || | ||
1043 | todo == ISO_RES_ALLOC_ONCE || | ||
1044 | todo == ISO_RES_DEALLOC_ONCE; | ||
1045 | r->generation = generation; | ||
1046 | spin_unlock_irq(&client->lock); | ||
1047 | |||
1048 | if (skip) | ||
1049 | goto out; | ||
1050 | |||
1051 | bandwidth = r->bandwidth; | ||
1052 | |||
1053 | fw_iso_resource_manage(client->device->card, generation, | ||
1054 | r->channels, &channel, &bandwidth, | ||
1055 | todo == ISO_RES_ALLOC || | ||
1056 | todo == ISO_RES_REALLOC || | ||
1057 | todo == ISO_RES_ALLOC_ONCE); | ||
1058 | /* | ||
1059 | * Is this generation outdated already? As long as this resource sticks | ||
1060 | * in the idr, it will be scheduled again for a newer generation or at | ||
1061 | * shutdown. | ||
1062 | */ | ||
1063 | if (channel == -EAGAIN && | ||
1064 | (todo == ISO_RES_ALLOC || todo == ISO_RES_REALLOC)) | ||
1065 | goto out; | ||
1066 | |||
1067 | success = channel >= 0 || bandwidth > 0; | ||
1068 | |||
1069 | spin_lock_irq(&client->lock); | ||
1070 | /* | ||
1071 | * Transit from allocation to reallocation, except if the client | ||
1072 | * requested deallocation in the meantime. | ||
1073 | */ | ||
1074 | if (r->todo == ISO_RES_ALLOC) | ||
1075 | r->todo = ISO_RES_REALLOC; | ||
1076 | /* | ||
1077 | * Allocation or reallocation failure? Pull this resource out of the | ||
1078 | * idr and prepare for deletion, unless the client is shutting down. | ||
1079 | */ | ||
1080 | if (r->todo == ISO_RES_REALLOC && !success && | ||
1081 | !client->in_shutdown && | ||
1082 | idr_find(&client->resource_idr, r->resource.handle)) { | ||
1083 | idr_remove(&client->resource_idr, r->resource.handle); | ||
1084 | client_put(client); | ||
1085 | free = true; | ||
1086 | } | ||
1087 | spin_unlock_irq(&client->lock); | ||
1088 | |||
1089 | if (todo == ISO_RES_ALLOC && channel >= 0) | ||
1090 | r->channels = 1ULL << channel; | ||
1091 | |||
1092 | if (todo == ISO_RES_REALLOC && success) | ||
1093 | goto out; | ||
1094 | |||
1095 | if (todo == ISO_RES_ALLOC || todo == ISO_RES_ALLOC_ONCE) { | ||
1096 | e = r->e_alloc; | ||
1097 | r->e_alloc = NULL; | ||
1098 | } else { | ||
1099 | e = r->e_dealloc; | ||
1100 | r->e_dealloc = NULL; | ||
1101 | } | ||
1102 | e->resource.handle = r->resource.handle; | ||
1103 | e->resource.channel = channel; | ||
1104 | e->resource.bandwidth = bandwidth; | ||
1105 | |||
1106 | queue_event(client, &e->event, | ||
1107 | &e->resource, sizeof(e->resource), NULL, 0); | ||
1108 | |||
1109 | if (free) { | ||
1110 | cancel_delayed_work(&r->work); | ||
1111 | kfree(r->e_alloc); | ||
1112 | kfree(r->e_dealloc); | ||
1113 | kfree(r); | ||
1114 | } | ||
1115 | out: | ||
1116 | client_put(client); | ||
1117 | } | ||
1118 | |||
1119 | static void schedule_iso_resource(struct iso_resource *r) | ||
1120 | { | ||
1121 | client_get(r->client); | ||
1122 | if (!schedule_delayed_work(&r->work, 0)) | ||
1123 | client_put(r->client); | ||
1124 | } | ||
1125 | |||
1126 | static void release_iso_resource(struct client *client, | ||
1127 | struct client_resource *resource) | ||
1128 | { | ||
1129 | struct iso_resource *r = | ||
1130 | container_of(resource, struct iso_resource, resource); | ||
1131 | |||
1132 | spin_lock_irq(&client->lock); | ||
1133 | r->todo = ISO_RES_DEALLOC; | ||
1134 | schedule_iso_resource(r); | ||
1135 | spin_unlock_irq(&client->lock); | ||
1136 | } | ||
1137 | |||
1138 | static int init_iso_resource(struct client *client, | ||
1139 | struct fw_cdev_allocate_iso_resource *request, int todo) | ||
1140 | { | ||
1141 | struct iso_resource_event *e1, *e2; | ||
1142 | struct iso_resource *r; | ||
1143 | int ret; | ||
1144 | |||
1145 | if ((request->channels == 0 && request->bandwidth == 0) || | ||
1146 | request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL || | ||
1147 | request->bandwidth < 0) | ||
1148 | return -EINVAL; | ||
1149 | |||
1150 | r = kmalloc(sizeof(*r), GFP_KERNEL); | ||
1151 | e1 = kmalloc(sizeof(*e1), GFP_KERNEL); | ||
1152 | e2 = kmalloc(sizeof(*e2), GFP_KERNEL); | ||
1153 | if (r == NULL || e1 == NULL || e2 == NULL) { | ||
1154 | ret = -ENOMEM; | ||
1155 | goto fail; | ||
1156 | } | ||
1157 | |||
1158 | INIT_DELAYED_WORK(&r->work, iso_resource_work); | ||
1159 | r->client = client; | ||
1160 | r->todo = todo; | ||
1161 | r->generation = -1; | ||
1162 | r->channels = request->channels; | ||
1163 | r->bandwidth = request->bandwidth; | ||
1164 | r->e_alloc = e1; | ||
1165 | r->e_dealloc = e2; | ||
1166 | |||
1167 | e1->resource.closure = request->closure; | ||
1168 | e1->resource.type = FW_CDEV_EVENT_ISO_RESOURCE_ALLOCATED; | ||
1169 | e2->resource.closure = request->closure; | ||
1170 | e2->resource.type = FW_CDEV_EVENT_ISO_RESOURCE_DEALLOCATED; | ||
1171 | |||
1172 | if (todo == ISO_RES_ALLOC) { | ||
1173 | r->resource.release = release_iso_resource; | ||
1174 | ret = add_client_resource(client, &r->resource, GFP_KERNEL); | ||
1175 | if (ret < 0) | ||
1176 | goto fail; | ||
1177 | } else { | ||
1178 | r->resource.release = NULL; | ||
1179 | r->resource.handle = -1; | ||
1180 | schedule_iso_resource(r); | ||
1181 | } | ||
1182 | request->handle = r->resource.handle; | ||
1183 | |||
1184 | return 0; | ||
1185 | fail: | ||
1186 | kfree(r); | ||
1187 | kfree(e1); | ||
1188 | kfree(e2); | ||
1189 | |||
1190 | return ret; | ||
1191 | } | ||
1192 | |||
1193 | static int ioctl_allocate_iso_resource(struct client *client, void *buffer) | ||
1194 | { | ||
1195 | struct fw_cdev_allocate_iso_resource *request = buffer; | ||
1196 | |||
1197 | return init_iso_resource(client, request, ISO_RES_ALLOC); | ||
1198 | } | ||
1199 | |||
1200 | static int ioctl_deallocate_iso_resource(struct client *client, void *buffer) | ||
1201 | { | ||
1202 | struct fw_cdev_deallocate *request = buffer; | ||
1203 | |||
1204 | return release_client_resource(client, request->handle, | ||
1205 | release_iso_resource, NULL); | ||
1206 | } | ||
1207 | |||
1208 | static int ioctl_allocate_iso_resource_once(struct client *client, void *buffer) | ||
1209 | { | ||
1210 | struct fw_cdev_allocate_iso_resource *request = buffer; | ||
1211 | |||
1212 | return init_iso_resource(client, request, ISO_RES_ALLOC_ONCE); | ||
1213 | } | ||
1214 | |||
1215 | static int ioctl_deallocate_iso_resource_once(struct client *client, void *buffer) | ||
1216 | { | ||
1217 | struct fw_cdev_allocate_iso_resource *request = buffer; | ||
1218 | |||
1219 | return init_iso_resource(client, request, ISO_RES_DEALLOC_ONCE); | ||
1220 | } | ||
1221 | |||
1222 | /* | ||
1223 | * Returns a speed code: Maximum speed to or from this device, | ||
1224 | * limited by the device's link speed, the local node's link speed, | ||
1225 | * and all PHY port speeds between the two links. | ||
1226 | */ | ||
1227 | static int ioctl_get_speed(struct client *client, void *buffer) | ||
1228 | { | ||
1229 | return client->device->max_speed; | ||
1230 | } | ||
1231 | |||
1232 | static int ioctl_send_broadcast_request(struct client *client, void *buffer) | ||
1233 | { | ||
1234 | struct fw_cdev_send_request *request = buffer; | ||
1235 | |||
1236 | switch (request->tcode) { | ||
1237 | case TCODE_WRITE_QUADLET_REQUEST: | ||
1238 | case TCODE_WRITE_BLOCK_REQUEST: | ||
1239 | break; | ||
1240 | default: | ||
1241 | return -EINVAL; | ||
1242 | } | ||
1243 | |||
1244 | /* Security policy: Only allow accesses to Units Space. */ | ||
1245 | if (request->offset < CSR_REGISTER_BASE + CSR_CONFIG_ROM_END) | ||
1246 | return -EACCES; | ||
1247 | |||
1248 | return init_request(client, request, LOCAL_BUS | 0x3f, SCODE_100); | ||
1249 | } | ||
1250 | |||
1251 | static int ioctl_send_stream_packet(struct client *client, void *buffer) | ||
1252 | { | ||
1253 | struct fw_cdev_send_stream_packet *p = buffer; | ||
1254 | struct fw_cdev_send_request request; | ||
1255 | int dest; | ||
1256 | |||
1257 | if (p->speed > client->device->card->link_speed || | ||
1258 | p->length > 1024 << p->speed) | ||
1259 | return -EIO; | ||
1260 | |||
1261 | if (p->tag > 3 || p->channel > 63 || p->sy > 15) | ||
1262 | return -EINVAL; | ||
1263 | |||
1264 | dest = fw_stream_packet_destination_id(p->tag, p->channel, p->sy); | ||
1265 | request.tcode = TCODE_STREAM_DATA; | ||
1266 | request.length = p->length; | ||
1267 | request.closure = p->closure; | ||
1268 | request.data = p->data; | ||
1269 | request.generation = p->generation; | ||
1270 | |||
1271 | return init_request(client, &request, dest, p->speed); | ||
1272 | } | ||
1273 | |||
874 | static int (* const ioctl_handlers[])(struct client *client, void *buffer) = { | 1274 | static int (* const ioctl_handlers[])(struct client *client, void *buffer) = { |
875 | ioctl_get_info, | 1275 | ioctl_get_info, |
876 | ioctl_send_request, | 1276 | ioctl_send_request, |
@@ -885,13 +1285,20 @@ static int (* const ioctl_handlers[])(struct client *client, void *buffer) = { | |||
885 | ioctl_start_iso, | 1285 | ioctl_start_iso, |
886 | ioctl_stop_iso, | 1286 | ioctl_stop_iso, |
887 | ioctl_get_cycle_timer, | 1287 | ioctl_get_cycle_timer, |
1288 | ioctl_allocate_iso_resource, | ||
1289 | ioctl_deallocate_iso_resource, | ||
1290 | ioctl_allocate_iso_resource_once, | ||
1291 | ioctl_deallocate_iso_resource_once, | ||
1292 | ioctl_get_speed, | ||
1293 | ioctl_send_broadcast_request, | ||
1294 | ioctl_send_stream_packet, | ||
888 | }; | 1295 | }; |
889 | 1296 | ||
890 | static int | 1297 | static int dispatch_ioctl(struct client *client, |
891 | dispatch_ioctl(struct client *client, unsigned int cmd, void __user *arg) | 1298 | unsigned int cmd, void __user *arg) |
892 | { | 1299 | { |
893 | char buffer[256]; | 1300 | char buffer[256]; |
894 | int retval; | 1301 | int ret; |
895 | 1302 | ||
896 | if (_IOC_TYPE(cmd) != '#' || | 1303 | if (_IOC_TYPE(cmd) != '#' || |
897 | _IOC_NR(cmd) >= ARRAY_SIZE(ioctl_handlers)) | 1304 | _IOC_NR(cmd) >= ARRAY_SIZE(ioctl_handlers)) |
@@ -903,9 +1310,9 @@ dispatch_ioctl(struct client *client, unsigned int cmd, void __user *arg) | |||
903 | return -EFAULT; | 1310 | return -EFAULT; |
904 | } | 1311 | } |
905 | 1312 | ||
906 | retval = ioctl_handlers[_IOC_NR(cmd)](client, buffer); | 1313 | ret = ioctl_handlers[_IOC_NR(cmd)](client, buffer); |
907 | if (retval < 0) | 1314 | if (ret < 0) |
908 | return retval; | 1315 | return ret; |
909 | 1316 | ||
910 | if (_IOC_DIR(cmd) & _IOC_READ) { | 1317 | if (_IOC_DIR(cmd) & _IOC_READ) { |
911 | if (_IOC_SIZE(cmd) > sizeof(buffer) || | 1318 | if (_IOC_SIZE(cmd) > sizeof(buffer) || |
@@ -913,12 +1320,11 @@ dispatch_ioctl(struct client *client, unsigned int cmd, void __user *arg) | |||
913 | return -EFAULT; | 1320 | return -EFAULT; |
914 | } | 1321 | } |
915 | 1322 | ||
916 | return retval; | 1323 | return ret; |
917 | } | 1324 | } |
918 | 1325 | ||
919 | static long | 1326 | static long fw_device_op_ioctl(struct file *file, |
920 | fw_device_op_ioctl(struct file *file, | 1327 | unsigned int cmd, unsigned long arg) |
921 | unsigned int cmd, unsigned long arg) | ||
922 | { | 1328 | { |
923 | struct client *client = file->private_data; | 1329 | struct client *client = file->private_data; |
924 | 1330 | ||
@@ -929,9 +1335,8 @@ fw_device_op_ioctl(struct file *file, | |||
929 | } | 1335 | } |
930 | 1336 | ||
931 | #ifdef CONFIG_COMPAT | 1337 | #ifdef CONFIG_COMPAT |
932 | static long | 1338 | static long fw_device_op_compat_ioctl(struct file *file, |
933 | fw_device_op_compat_ioctl(struct file *file, | 1339 | unsigned int cmd, unsigned long arg) |
934 | unsigned int cmd, unsigned long arg) | ||
935 | { | 1340 | { |
936 | struct client *client = file->private_data; | 1341 | struct client *client = file->private_data; |
937 | 1342 | ||
@@ -947,7 +1352,7 @@ static int fw_device_op_mmap(struct file *file, struct vm_area_struct *vma) | |||
947 | struct client *client = file->private_data; | 1352 | struct client *client = file->private_data; |
948 | enum dma_data_direction direction; | 1353 | enum dma_data_direction direction; |
949 | unsigned long size; | 1354 | unsigned long size; |
950 | int page_count, retval; | 1355 | int page_count, ret; |
951 | 1356 | ||
952 | if (fw_device_is_shutdown(client->device)) | 1357 | if (fw_device_is_shutdown(client->device)) |
953 | return -ENODEV; | 1358 | return -ENODEV; |
@@ -973,48 +1378,57 @@ static int fw_device_op_mmap(struct file *file, struct vm_area_struct *vma) | |||
973 | else | 1378 | else |
974 | direction = DMA_FROM_DEVICE; | 1379 | direction = DMA_FROM_DEVICE; |
975 | 1380 | ||
976 | retval = fw_iso_buffer_init(&client->buffer, client->device->card, | 1381 | ret = fw_iso_buffer_init(&client->buffer, client->device->card, |
977 | page_count, direction); | 1382 | page_count, direction); |
978 | if (retval < 0) | 1383 | if (ret < 0) |
979 | return retval; | 1384 | return ret; |
980 | 1385 | ||
981 | retval = fw_iso_buffer_map(&client->buffer, vma); | 1386 | ret = fw_iso_buffer_map(&client->buffer, vma); |
982 | if (retval < 0) | 1387 | if (ret < 0) |
983 | fw_iso_buffer_destroy(&client->buffer, client->device->card); | 1388 | fw_iso_buffer_destroy(&client->buffer, client->device->card); |
984 | 1389 | ||
985 | return retval; | 1390 | return ret; |
1391 | } | ||
1392 | |||
1393 | static int shutdown_resource(int id, void *p, void *data) | ||
1394 | { | ||
1395 | struct client_resource *r = p; | ||
1396 | struct client *client = data; | ||
1397 | |||
1398 | r->release(client, r); | ||
1399 | client_put(client); | ||
1400 | |||
1401 | return 0; | ||
986 | } | 1402 | } |
987 | 1403 | ||
988 | static int fw_device_op_release(struct inode *inode, struct file *file) | 1404 | static int fw_device_op_release(struct inode *inode, struct file *file) |
989 | { | 1405 | { |
990 | struct client *client = file->private_data; | 1406 | struct client *client = file->private_data; |
991 | struct event *e, *next_e; | 1407 | struct event *e, *next_e; |
992 | struct client_resource *r, *next_r; | ||
993 | unsigned long flags; | ||
994 | 1408 | ||
995 | if (client->buffer.pages) | 1409 | mutex_lock(&client->device->client_list_mutex); |
996 | fw_iso_buffer_destroy(&client->buffer, client->device->card); | 1410 | list_del(&client->link); |
1411 | mutex_unlock(&client->device->client_list_mutex); | ||
997 | 1412 | ||
998 | if (client->iso_context) | 1413 | if (client->iso_context) |
999 | fw_iso_context_destroy(client->iso_context); | 1414 | fw_iso_context_destroy(client->iso_context); |
1000 | 1415 | ||
1001 | list_for_each_entry_safe(r, next_r, &client->resource_list, link) | 1416 | if (client->buffer.pages) |
1002 | r->release(client, r); | 1417 | fw_iso_buffer_destroy(&client->buffer, client->device->card); |
1003 | 1418 | ||
1004 | /* | 1419 | /* Freeze client->resource_idr and client->event_list */ |
1005 | * FIXME: We should wait for the async tasklets to stop | 1420 | spin_lock_irq(&client->lock); |
1006 | * running before freeing the memory. | 1421 | client->in_shutdown = true; |
1007 | */ | 1422 | spin_unlock_irq(&client->lock); |
1423 | |||
1424 | idr_for_each(&client->resource_idr, shutdown_resource, client); | ||
1425 | idr_remove_all(&client->resource_idr); | ||
1426 | idr_destroy(&client->resource_idr); | ||
1008 | 1427 | ||
1009 | list_for_each_entry_safe(e, next_e, &client->event_list, link) | 1428 | list_for_each_entry_safe(e, next_e, &client->event_list, link) |
1010 | kfree(e); | 1429 | kfree(e); |
1011 | 1430 | ||
1012 | spin_lock_irqsave(&client->device->card->lock, flags); | 1431 | client_put(client); |
1013 | list_del(&client->link); | ||
1014 | spin_unlock_irqrestore(&client->device->card->lock, flags); | ||
1015 | |||
1016 | fw_device_put(client->device); | ||
1017 | kfree(client); | ||
1018 | 1432 | ||
1019 | return 0; | 1433 | return 0; |
1020 | } | 1434 | } |