diff options
Diffstat (limited to 'drivers/ieee1394/raw1394.c')
-rw-r--r-- | drivers/ieee1394/raw1394.c | 3096 |
1 files changed, 0 insertions, 3096 deletions
diff --git a/drivers/ieee1394/raw1394.c b/drivers/ieee1394/raw1394.c deleted file mode 100644 index f3401427404c..000000000000 --- a/drivers/ieee1394/raw1394.c +++ /dev/null | |||
@@ -1,3096 +0,0 @@ | |||
1 | /* | ||
2 | * IEEE 1394 for Linux | ||
3 | * | ||
4 | * Raw interface to the bus | ||
5 | * | ||
6 | * Copyright (C) 1999, 2000 Andreas E. Bombe | ||
7 | * 2001, 2002 Manfred Weihs <weihs@ict.tuwien.ac.at> | ||
8 | * 2002 Christian Toegel <christian.toegel@gmx.at> | ||
9 | * | ||
10 | * This code is licensed under the GPL. See the file COPYING in the root | ||
11 | * directory of the kernel sources for details. | ||
12 | * | ||
13 | * | ||
14 | * Contributions: | ||
15 | * | ||
16 | * Manfred Weihs <weihs@ict.tuwien.ac.at> | ||
17 | * configuration ROM manipulation | ||
18 | * address range mapping | ||
19 | * adaptation for new (transparent) loopback mechanism | ||
20 | * sending of arbitrary async packets | ||
21 | * Christian Toegel <christian.toegel@gmx.at> | ||
22 | * address range mapping | ||
23 | * lock64 request | ||
24 | * transmit physical packet | ||
25 | * busreset notification control (switch on/off) | ||
26 | * busreset with selection of type (short/long) | ||
27 | * request_reply | ||
28 | */ | ||
29 | |||
30 | #include <linux/kernel.h> | ||
31 | #include <linux/list.h> | ||
32 | #include <linux/sched.h> | ||
33 | #include <linux/string.h> | ||
34 | #include <linux/slab.h> | ||
35 | #include <linux/fs.h> | ||
36 | #include <linux/poll.h> | ||
37 | #include <linux/module.h> | ||
38 | #include <linux/mutex.h> | ||
39 | #include <linux/init.h> | ||
40 | #include <linux/interrupt.h> | ||
41 | #include <linux/vmalloc.h> | ||
42 | #include <linux/cdev.h> | ||
43 | #include <asm/uaccess.h> | ||
44 | #include <asm/atomic.h> | ||
45 | #include <linux/compat.h> | ||
46 | |||
47 | #include "csr1212.h" | ||
48 | #include "highlevel.h" | ||
49 | #include "hosts.h" | ||
50 | #include "ieee1394.h" | ||
51 | #include "ieee1394_core.h" | ||
52 | #include "ieee1394_hotplug.h" | ||
53 | #include "ieee1394_transactions.h" | ||
54 | #include "ieee1394_types.h" | ||
55 | #include "iso.h" | ||
56 | #include "nodemgr.h" | ||
57 | #include "raw1394.h" | ||
58 | #include "raw1394-private.h" | ||
59 | |||
60 | #define int2ptr(x) ((void __user *)(unsigned long)x) | ||
61 | #define ptr2int(x) ((u64)(unsigned long)(void __user *)x) | ||
62 | |||
63 | #ifdef CONFIG_IEEE1394_VERBOSEDEBUG | ||
64 | #define RAW1394_DEBUG | ||
65 | #endif | ||
66 | |||
67 | #ifdef RAW1394_DEBUG | ||
68 | #define DBGMSG(fmt, args...) \ | ||
69 | printk(KERN_INFO "raw1394:" fmt "\n" , ## args) | ||
70 | #else | ||
71 | #define DBGMSG(fmt, args...) do {} while (0) | ||
72 | #endif | ||
73 | |||
74 | static LIST_HEAD(host_info_list); | ||
75 | static int host_count; | ||
76 | static DEFINE_SPINLOCK(host_info_lock); | ||
77 | static atomic_t internal_generation = ATOMIC_INIT(0); | ||
78 | |||
79 | static atomic_t iso_buffer_size; | ||
80 | static const int iso_buffer_max = 4 * 1024 * 1024; /* 4 MB */ | ||
81 | |||
82 | static struct hpsb_highlevel raw1394_highlevel; | ||
83 | |||
84 | static int arm_read(struct hpsb_host *host, int nodeid, quadlet_t * buffer, | ||
85 | u64 addr, size_t length, u16 flags); | ||
86 | static int arm_write(struct hpsb_host *host, int nodeid, int destid, | ||
87 | quadlet_t * data, u64 addr, size_t length, u16 flags); | ||
88 | static int arm_lock(struct hpsb_host *host, int nodeid, quadlet_t * store, | ||
89 | u64 addr, quadlet_t data, quadlet_t arg, int ext_tcode, | ||
90 | u16 flags); | ||
91 | static int arm_lock64(struct hpsb_host *host, int nodeid, octlet_t * store, | ||
92 | u64 addr, octlet_t data, octlet_t arg, int ext_tcode, | ||
93 | u16 flags); | ||
94 | static const struct hpsb_address_ops arm_ops = { | ||
95 | .read = arm_read, | ||
96 | .write = arm_write, | ||
97 | .lock = arm_lock, | ||
98 | .lock64 = arm_lock64, | ||
99 | }; | ||
100 | |||
101 | static void queue_complete_cb(struct pending_request *req); | ||
102 | |||
103 | static struct pending_request *__alloc_pending_request(gfp_t flags) | ||
104 | { | ||
105 | struct pending_request *req; | ||
106 | |||
107 | req = kzalloc(sizeof(*req), flags); | ||
108 | if (req) | ||
109 | INIT_LIST_HEAD(&req->list); | ||
110 | |||
111 | return req; | ||
112 | } | ||
113 | |||
114 | static inline struct pending_request *alloc_pending_request(void) | ||
115 | { | ||
116 | return __alloc_pending_request(GFP_KERNEL); | ||
117 | } | ||
118 | |||
119 | static void free_pending_request(struct pending_request *req) | ||
120 | { | ||
121 | if (req->ibs) { | ||
122 | if (atomic_dec_and_test(&req->ibs->refcount)) { | ||
123 | atomic_sub(req->ibs->data_size, &iso_buffer_size); | ||
124 | kfree(req->ibs); | ||
125 | } | ||
126 | } else if (req->free_data) { | ||
127 | kfree(req->data); | ||
128 | } | ||
129 | hpsb_free_packet(req->packet); | ||
130 | kfree(req); | ||
131 | } | ||
132 | |||
133 | /* fi->reqlists_lock must be taken */ | ||
134 | static void __queue_complete_req(struct pending_request *req) | ||
135 | { | ||
136 | struct file_info *fi = req->file_info; | ||
137 | |||
138 | list_move_tail(&req->list, &fi->req_complete); | ||
139 | wake_up(&fi->wait_complete); | ||
140 | } | ||
141 | |||
142 | static void queue_complete_req(struct pending_request *req) | ||
143 | { | ||
144 | unsigned long flags; | ||
145 | struct file_info *fi = req->file_info; | ||
146 | |||
147 | spin_lock_irqsave(&fi->reqlists_lock, flags); | ||
148 | __queue_complete_req(req); | ||
149 | spin_unlock_irqrestore(&fi->reqlists_lock, flags); | ||
150 | } | ||
151 | |||
152 | static void queue_complete_cb(struct pending_request *req) | ||
153 | { | ||
154 | struct hpsb_packet *packet = req->packet; | ||
155 | int rcode = (packet->header[1] >> 12) & 0xf; | ||
156 | |||
157 | switch (packet->ack_code) { | ||
158 | case ACKX_NONE: | ||
159 | case ACKX_SEND_ERROR: | ||
160 | req->req.error = RAW1394_ERROR_SEND_ERROR; | ||
161 | break; | ||
162 | case ACKX_ABORTED: | ||
163 | req->req.error = RAW1394_ERROR_ABORTED; | ||
164 | break; | ||
165 | case ACKX_TIMEOUT: | ||
166 | req->req.error = RAW1394_ERROR_TIMEOUT; | ||
167 | break; | ||
168 | default: | ||
169 | req->req.error = (packet->ack_code << 16) | rcode; | ||
170 | break; | ||
171 | } | ||
172 | |||
173 | if (!((packet->ack_code == ACK_PENDING) && (rcode == RCODE_COMPLETE))) { | ||
174 | req->req.length = 0; | ||
175 | } | ||
176 | |||
177 | if ((req->req.type == RAW1394_REQ_ASYNC_READ) || | ||
178 | (req->req.type == RAW1394_REQ_ASYNC_WRITE) || | ||
179 | (req->req.type == RAW1394_REQ_ASYNC_STREAM) || | ||
180 | (req->req.type == RAW1394_REQ_LOCK) || | ||
181 | (req->req.type == RAW1394_REQ_LOCK64)) | ||
182 | hpsb_free_tlabel(packet); | ||
183 | |||
184 | queue_complete_req(req); | ||
185 | } | ||
186 | |||
187 | static void add_host(struct hpsb_host *host) | ||
188 | { | ||
189 | struct host_info *hi; | ||
190 | unsigned long flags; | ||
191 | |||
192 | hi = kmalloc(sizeof(*hi), GFP_KERNEL); | ||
193 | |||
194 | if (hi) { | ||
195 | INIT_LIST_HEAD(&hi->list); | ||
196 | hi->host = host; | ||
197 | INIT_LIST_HEAD(&hi->file_info_list); | ||
198 | |||
199 | spin_lock_irqsave(&host_info_lock, flags); | ||
200 | list_add_tail(&hi->list, &host_info_list); | ||
201 | host_count++; | ||
202 | spin_unlock_irqrestore(&host_info_lock, flags); | ||
203 | } | ||
204 | |||
205 | atomic_inc(&internal_generation); | ||
206 | } | ||
207 | |||
208 | static struct host_info *find_host_info(struct hpsb_host *host) | ||
209 | { | ||
210 | struct host_info *hi; | ||
211 | |||
212 | list_for_each_entry(hi, &host_info_list, list) | ||
213 | if (hi->host == host) | ||
214 | return hi; | ||
215 | |||
216 | return NULL; | ||
217 | } | ||
218 | |||
219 | static void remove_host(struct hpsb_host *host) | ||
220 | { | ||
221 | struct host_info *hi; | ||
222 | unsigned long flags; | ||
223 | |||
224 | spin_lock_irqsave(&host_info_lock, flags); | ||
225 | hi = find_host_info(host); | ||
226 | |||
227 | if (hi != NULL) { | ||
228 | list_del(&hi->list); | ||
229 | host_count--; | ||
230 | /* | ||
231 | FIXME: address ranges should be removed | ||
232 | and fileinfo states should be initialized | ||
233 | (including setting generation to | ||
234 | internal-generation ...) | ||
235 | */ | ||
236 | } | ||
237 | spin_unlock_irqrestore(&host_info_lock, flags); | ||
238 | |||
239 | if (hi == NULL) { | ||
240 | printk(KERN_ERR "raw1394: attempt to remove unknown host " | ||
241 | "0x%p\n", host); | ||
242 | return; | ||
243 | } | ||
244 | |||
245 | kfree(hi); | ||
246 | |||
247 | atomic_inc(&internal_generation); | ||
248 | } | ||
249 | |||
250 | static void host_reset(struct hpsb_host *host) | ||
251 | { | ||
252 | unsigned long flags; | ||
253 | struct host_info *hi; | ||
254 | struct file_info *fi; | ||
255 | struct pending_request *req; | ||
256 | |||
257 | spin_lock_irqsave(&host_info_lock, flags); | ||
258 | hi = find_host_info(host); | ||
259 | |||
260 | if (hi != NULL) { | ||
261 | list_for_each_entry(fi, &hi->file_info_list, list) { | ||
262 | if (fi->notification == RAW1394_NOTIFY_ON) { | ||
263 | req = __alloc_pending_request(GFP_ATOMIC); | ||
264 | |||
265 | if (req != NULL) { | ||
266 | req->file_info = fi; | ||
267 | req->req.type = RAW1394_REQ_BUS_RESET; | ||
268 | req->req.generation = | ||
269 | get_hpsb_generation(host); | ||
270 | req->req.misc = (host->node_id << 16) | ||
271 | | host->node_count; | ||
272 | if (fi->protocol_version > 3) { | ||
273 | req->req.misc |= | ||
274 | (NODEID_TO_NODE | ||
275 | (host->irm_id) | ||
276 | << 8); | ||
277 | } | ||
278 | |||
279 | queue_complete_req(req); | ||
280 | } | ||
281 | } | ||
282 | } | ||
283 | } | ||
284 | spin_unlock_irqrestore(&host_info_lock, flags); | ||
285 | } | ||
286 | |||
287 | static void fcp_request(struct hpsb_host *host, int nodeid, int direction, | ||
288 | int cts, u8 * data, size_t length) | ||
289 | { | ||
290 | unsigned long flags; | ||
291 | struct host_info *hi; | ||
292 | struct file_info *fi; | ||
293 | struct pending_request *req, *req_next; | ||
294 | struct iso_block_store *ibs = NULL; | ||
295 | LIST_HEAD(reqs); | ||
296 | |||
297 | if ((atomic_read(&iso_buffer_size) + length) > iso_buffer_max) { | ||
298 | HPSB_INFO("dropped fcp request"); | ||
299 | return; | ||
300 | } | ||
301 | |||
302 | spin_lock_irqsave(&host_info_lock, flags); | ||
303 | hi = find_host_info(host); | ||
304 | |||
305 | if (hi != NULL) { | ||
306 | list_for_each_entry(fi, &hi->file_info_list, list) { | ||
307 | if (!fi->fcp_buffer) | ||
308 | continue; | ||
309 | |||
310 | req = __alloc_pending_request(GFP_ATOMIC); | ||
311 | if (!req) | ||
312 | break; | ||
313 | |||
314 | if (!ibs) { | ||
315 | ibs = kmalloc(sizeof(*ibs) + length, | ||
316 | GFP_ATOMIC); | ||
317 | if (!ibs) { | ||
318 | kfree(req); | ||
319 | break; | ||
320 | } | ||
321 | |||
322 | atomic_add(length, &iso_buffer_size); | ||
323 | atomic_set(&ibs->refcount, 0); | ||
324 | ibs->data_size = length; | ||
325 | memcpy(ibs->data, data, length); | ||
326 | } | ||
327 | |||
328 | atomic_inc(&ibs->refcount); | ||
329 | |||
330 | req->file_info = fi; | ||
331 | req->ibs = ibs; | ||
332 | req->data = ibs->data; | ||
333 | req->req.type = RAW1394_REQ_FCP_REQUEST; | ||
334 | req->req.generation = get_hpsb_generation(host); | ||
335 | req->req.misc = nodeid | (direction << 16); | ||
336 | req->req.recvb = ptr2int(fi->fcp_buffer); | ||
337 | req->req.length = length; | ||
338 | |||
339 | list_add_tail(&req->list, &reqs); | ||
340 | } | ||
341 | } | ||
342 | spin_unlock_irqrestore(&host_info_lock, flags); | ||
343 | |||
344 | list_for_each_entry_safe(req, req_next, &reqs, list) | ||
345 | queue_complete_req(req); | ||
346 | } | ||
347 | |||
348 | #ifdef CONFIG_COMPAT | ||
349 | struct compat_raw1394_req { | ||
350 | __u32 type; | ||
351 | __s32 error; | ||
352 | __u32 misc; | ||
353 | |||
354 | __u32 generation; | ||
355 | __u32 length; | ||
356 | |||
357 | __u64 address; | ||
358 | |||
359 | __u64 tag; | ||
360 | |||
361 | __u64 sendb; | ||
362 | __u64 recvb; | ||
363 | } | ||
364 | #if defined(CONFIG_X86_64) || defined(CONFIG_IA64) | ||
365 | __attribute__((packed)) | ||
366 | #endif | ||
367 | ; | ||
368 | |||
369 | static const char __user *raw1394_compat_write(const char __user *buf) | ||
370 | { | ||
371 | struct compat_raw1394_req __user *cr = (typeof(cr)) buf; | ||
372 | struct raw1394_request __user *r; | ||
373 | |||
374 | r = compat_alloc_user_space(sizeof(struct raw1394_request)); | ||
375 | |||
376 | #define C(x) __copy_in_user(&r->x, &cr->x, sizeof(r->x)) | ||
377 | |||
378 | if (copy_in_user(r, cr, sizeof(struct compat_raw1394_req)) || | ||
379 | C(address) || | ||
380 | C(tag) || | ||
381 | C(sendb) || | ||
382 | C(recvb)) | ||
383 | return (__force const char __user *)ERR_PTR(-EFAULT); | ||
384 | |||
385 | return (const char __user *)r; | ||
386 | } | ||
387 | #undef C | ||
388 | |||
389 | #define P(x) __put_user(r->x, &cr->x) | ||
390 | |||
391 | static int | ||
392 | raw1394_compat_read(const char __user *buf, struct raw1394_request *r) | ||
393 | { | ||
394 | struct compat_raw1394_req __user *cr = (typeof(cr)) buf; | ||
395 | |||
396 | if (!access_ok(VERIFY_WRITE, cr, sizeof(struct compat_raw1394_req)) || | ||
397 | P(type) || | ||
398 | P(error) || | ||
399 | P(misc) || | ||
400 | P(generation) || | ||
401 | P(length) || | ||
402 | P(address) || | ||
403 | P(tag) || | ||
404 | P(sendb) || | ||
405 | P(recvb)) | ||
406 | return -EFAULT; | ||
407 | |||
408 | return sizeof(struct compat_raw1394_req); | ||
409 | } | ||
410 | #undef P | ||
411 | |||
412 | #endif | ||
413 | |||
414 | /* get next completed request (caller must hold fi->reqlists_lock) */ | ||
415 | static inline struct pending_request *__next_complete_req(struct file_info *fi) | ||
416 | { | ||
417 | struct list_head *lh; | ||
418 | struct pending_request *req = NULL; | ||
419 | |||
420 | if (!list_empty(&fi->req_complete)) { | ||
421 | lh = fi->req_complete.next; | ||
422 | list_del(lh); | ||
423 | req = list_entry(lh, struct pending_request, list); | ||
424 | } | ||
425 | return req; | ||
426 | } | ||
427 | |||
428 | /* atomically get next completed request */ | ||
429 | static struct pending_request *next_complete_req(struct file_info *fi) | ||
430 | { | ||
431 | unsigned long flags; | ||
432 | struct pending_request *req; | ||
433 | |||
434 | spin_lock_irqsave(&fi->reqlists_lock, flags); | ||
435 | req = __next_complete_req(fi); | ||
436 | spin_unlock_irqrestore(&fi->reqlists_lock, flags); | ||
437 | return req; | ||
438 | } | ||
439 | |||
440 | static ssize_t raw1394_read(struct file *file, char __user * buffer, | ||
441 | size_t count, loff_t * offset_is_ignored) | ||
442 | { | ||
443 | struct file_info *fi = file->private_data; | ||
444 | struct pending_request *req; | ||
445 | ssize_t ret; | ||
446 | |||
447 | #ifdef CONFIG_COMPAT | ||
448 | if (count == sizeof(struct compat_raw1394_req)) { | ||
449 | /* ok */ | ||
450 | } else | ||
451 | #endif | ||
452 | if (count != sizeof(struct raw1394_request)) { | ||
453 | return -EINVAL; | ||
454 | } | ||
455 | |||
456 | if (!access_ok(VERIFY_WRITE, buffer, count)) { | ||
457 | return -EFAULT; | ||
458 | } | ||
459 | |||
460 | if (file->f_flags & O_NONBLOCK) { | ||
461 | if (!(req = next_complete_req(fi))) | ||
462 | return -EAGAIN; | ||
463 | } else { | ||
464 | /* | ||
465 | * NB: We call the macro wait_event_interruptible() with a | ||
466 | * condition argument with side effect. This is only possible | ||
467 | * because the side effect does not occur until the condition | ||
468 | * became true, and wait_event_interruptible() won't evaluate | ||
469 | * the condition again after that. | ||
470 | */ | ||
471 | if (wait_event_interruptible(fi->wait_complete, | ||
472 | (req = next_complete_req(fi)))) | ||
473 | return -ERESTARTSYS; | ||
474 | } | ||
475 | |||
476 | if (req->req.length) { | ||
477 | if (copy_to_user(int2ptr(req->req.recvb), req->data, | ||
478 | req->req.length)) { | ||
479 | req->req.error = RAW1394_ERROR_MEMFAULT; | ||
480 | } | ||
481 | } | ||
482 | |||
483 | #ifdef CONFIG_COMPAT | ||
484 | if (count == sizeof(struct compat_raw1394_req) && | ||
485 | sizeof(struct compat_raw1394_req) != | ||
486 | sizeof(struct raw1394_request)) { | ||
487 | ret = raw1394_compat_read(buffer, &req->req); | ||
488 | } else | ||
489 | #endif | ||
490 | { | ||
491 | if (copy_to_user(buffer, &req->req, sizeof(req->req))) { | ||
492 | ret = -EFAULT; | ||
493 | goto out; | ||
494 | } | ||
495 | ret = (ssize_t) sizeof(struct raw1394_request); | ||
496 | } | ||
497 | out: | ||
498 | free_pending_request(req); | ||
499 | return ret; | ||
500 | } | ||
501 | |||
502 | static int state_opened(struct file_info *fi, struct pending_request *req) | ||
503 | { | ||
504 | if (req->req.type == RAW1394_REQ_INITIALIZE) { | ||
505 | switch (req->req.misc) { | ||
506 | case RAW1394_KERNELAPI_VERSION: | ||
507 | case 3: | ||
508 | fi->state = initialized; | ||
509 | fi->protocol_version = req->req.misc; | ||
510 | req->req.error = RAW1394_ERROR_NONE; | ||
511 | req->req.generation = atomic_read(&internal_generation); | ||
512 | break; | ||
513 | |||
514 | default: | ||
515 | req->req.error = RAW1394_ERROR_COMPAT; | ||
516 | req->req.misc = RAW1394_KERNELAPI_VERSION; | ||
517 | } | ||
518 | } else { | ||
519 | req->req.error = RAW1394_ERROR_STATE_ORDER; | ||
520 | } | ||
521 | |||
522 | req->req.length = 0; | ||
523 | queue_complete_req(req); | ||
524 | return 0; | ||
525 | } | ||
526 | |||
527 | static int state_initialized(struct file_info *fi, struct pending_request *req) | ||
528 | { | ||
529 | unsigned long flags; | ||
530 | struct host_info *hi; | ||
531 | struct raw1394_khost_list *khl; | ||
532 | |||
533 | if (req->req.generation != atomic_read(&internal_generation)) { | ||
534 | req->req.error = RAW1394_ERROR_GENERATION; | ||
535 | req->req.generation = atomic_read(&internal_generation); | ||
536 | req->req.length = 0; | ||
537 | queue_complete_req(req); | ||
538 | return 0; | ||
539 | } | ||
540 | |||
541 | switch (req->req.type) { | ||
542 | case RAW1394_REQ_LIST_CARDS: | ||
543 | spin_lock_irqsave(&host_info_lock, flags); | ||
544 | khl = kmalloc(sizeof(*khl) * host_count, GFP_ATOMIC); | ||
545 | |||
546 | if (khl) { | ||
547 | req->req.misc = host_count; | ||
548 | req->data = (quadlet_t *) khl; | ||
549 | |||
550 | list_for_each_entry(hi, &host_info_list, list) { | ||
551 | khl->nodes = hi->host->node_count; | ||
552 | strcpy(khl->name, hi->host->driver->name); | ||
553 | khl++; | ||
554 | } | ||
555 | } | ||
556 | spin_unlock_irqrestore(&host_info_lock, flags); | ||
557 | |||
558 | if (khl) { | ||
559 | req->req.error = RAW1394_ERROR_NONE; | ||
560 | req->req.length = min(req->req.length, | ||
561 | (u32) (sizeof | ||
562 | (struct raw1394_khost_list) | ||
563 | * req->req.misc)); | ||
564 | req->free_data = 1; | ||
565 | } else { | ||
566 | return -ENOMEM; | ||
567 | } | ||
568 | break; | ||
569 | |||
570 | case RAW1394_REQ_SET_CARD: | ||
571 | spin_lock_irqsave(&host_info_lock, flags); | ||
572 | if (req->req.misc >= host_count) { | ||
573 | req->req.error = RAW1394_ERROR_INVALID_ARG; | ||
574 | goto out_set_card; | ||
575 | } | ||
576 | list_for_each_entry(hi, &host_info_list, list) | ||
577 | if (!req->req.misc--) | ||
578 | break; | ||
579 | get_device(&hi->host->device); /* FIXME handle failure case */ | ||
580 | list_add_tail(&fi->list, &hi->file_info_list); | ||
581 | |||
582 | /* prevent unloading of the host's low-level driver */ | ||
583 | if (!try_module_get(hi->host->driver->owner)) { | ||
584 | req->req.error = RAW1394_ERROR_ABORTED; | ||
585 | goto out_set_card; | ||
586 | } | ||
587 | WARN_ON(fi->host); | ||
588 | fi->host = hi->host; | ||
589 | fi->state = connected; | ||
590 | |||
591 | req->req.error = RAW1394_ERROR_NONE; | ||
592 | req->req.generation = get_hpsb_generation(fi->host); | ||
593 | req->req.misc = (fi->host->node_id << 16) | ||
594 | | fi->host->node_count; | ||
595 | if (fi->protocol_version > 3) | ||
596 | req->req.misc |= NODEID_TO_NODE(fi->host->irm_id) << 8; | ||
597 | out_set_card: | ||
598 | spin_unlock_irqrestore(&host_info_lock, flags); | ||
599 | |||
600 | req->req.length = 0; | ||
601 | break; | ||
602 | |||
603 | default: | ||
604 | req->req.error = RAW1394_ERROR_STATE_ORDER; | ||
605 | req->req.length = 0; | ||
606 | break; | ||
607 | } | ||
608 | |||
609 | queue_complete_req(req); | ||
610 | return 0; | ||
611 | } | ||
612 | |||
613 | static void handle_fcp_listen(struct file_info *fi, struct pending_request *req) | ||
614 | { | ||
615 | if (req->req.misc) { | ||
616 | if (fi->fcp_buffer) { | ||
617 | req->req.error = RAW1394_ERROR_ALREADY; | ||
618 | } else { | ||
619 | fi->fcp_buffer = int2ptr(req->req.recvb); | ||
620 | } | ||
621 | } else { | ||
622 | if (!fi->fcp_buffer) { | ||
623 | req->req.error = RAW1394_ERROR_ALREADY; | ||
624 | } else { | ||
625 | fi->fcp_buffer = NULL; | ||
626 | } | ||
627 | } | ||
628 | |||
629 | req->req.length = 0; | ||
630 | queue_complete_req(req); | ||
631 | } | ||
632 | |||
633 | static int handle_async_request(struct file_info *fi, | ||
634 | struct pending_request *req, int node) | ||
635 | { | ||
636 | unsigned long flags; | ||
637 | struct hpsb_packet *packet = NULL; | ||
638 | u64 addr = req->req.address & 0xffffffffffffULL; | ||
639 | |||
640 | switch (req->req.type) { | ||
641 | case RAW1394_REQ_ASYNC_READ: | ||
642 | DBGMSG("read_request called"); | ||
643 | packet = | ||
644 | hpsb_make_readpacket(fi->host, node, addr, req->req.length); | ||
645 | |||
646 | if (!packet) | ||
647 | return -ENOMEM; | ||
648 | |||
649 | if (req->req.length == 4) | ||
650 | req->data = &packet->header[3]; | ||
651 | else | ||
652 | req->data = packet->data; | ||
653 | |||
654 | break; | ||
655 | |||
656 | case RAW1394_REQ_ASYNC_WRITE: | ||
657 | DBGMSG("write_request called"); | ||
658 | |||
659 | packet = hpsb_make_writepacket(fi->host, node, addr, NULL, | ||
660 | req->req.length); | ||
661 | if (!packet) | ||
662 | return -ENOMEM; | ||
663 | |||
664 | if (req->req.length == 4) { | ||
665 | if (copy_from_user | ||
666 | (&packet->header[3], int2ptr(req->req.sendb), | ||
667 | req->req.length)) | ||
668 | req->req.error = RAW1394_ERROR_MEMFAULT; | ||
669 | } else { | ||
670 | if (copy_from_user | ||
671 | (packet->data, int2ptr(req->req.sendb), | ||
672 | req->req.length)) | ||
673 | req->req.error = RAW1394_ERROR_MEMFAULT; | ||
674 | } | ||
675 | |||
676 | req->req.length = 0; | ||
677 | break; | ||
678 | |||
679 | case RAW1394_REQ_ASYNC_STREAM: | ||
680 | DBGMSG("stream_request called"); | ||
681 | |||
682 | packet = | ||
683 | hpsb_make_streampacket(fi->host, NULL, req->req.length, | ||
684 | node & 0x3f /*channel */ , | ||
685 | (req->req.misc >> 16) & 0x3, | ||
686 | req->req.misc & 0xf); | ||
687 | if (!packet) | ||
688 | return -ENOMEM; | ||
689 | |||
690 | if (copy_from_user(packet->data, int2ptr(req->req.sendb), | ||
691 | req->req.length)) | ||
692 | req->req.error = RAW1394_ERROR_MEMFAULT; | ||
693 | |||
694 | req->req.length = 0; | ||
695 | break; | ||
696 | |||
697 | case RAW1394_REQ_LOCK: | ||
698 | DBGMSG("lock_request called"); | ||
699 | if ((req->req.misc == EXTCODE_FETCH_ADD) | ||
700 | || (req->req.misc == EXTCODE_LITTLE_ADD)) { | ||
701 | if (req->req.length != 4) { | ||
702 | req->req.error = RAW1394_ERROR_INVALID_ARG; | ||
703 | break; | ||
704 | } | ||
705 | } else { | ||
706 | if (req->req.length != 8) { | ||
707 | req->req.error = RAW1394_ERROR_INVALID_ARG; | ||
708 | break; | ||
709 | } | ||
710 | } | ||
711 | |||
712 | packet = hpsb_make_lockpacket(fi->host, node, addr, | ||
713 | req->req.misc, NULL, 0); | ||
714 | if (!packet) | ||
715 | return -ENOMEM; | ||
716 | |||
717 | if (copy_from_user(packet->data, int2ptr(req->req.sendb), | ||
718 | req->req.length)) { | ||
719 | req->req.error = RAW1394_ERROR_MEMFAULT; | ||
720 | break; | ||
721 | } | ||
722 | |||
723 | req->data = packet->data; | ||
724 | req->req.length = 4; | ||
725 | break; | ||
726 | |||
727 | case RAW1394_REQ_LOCK64: | ||
728 | DBGMSG("lock64_request called"); | ||
729 | if ((req->req.misc == EXTCODE_FETCH_ADD) | ||
730 | || (req->req.misc == EXTCODE_LITTLE_ADD)) { | ||
731 | if (req->req.length != 8) { | ||
732 | req->req.error = RAW1394_ERROR_INVALID_ARG; | ||
733 | break; | ||
734 | } | ||
735 | } else { | ||
736 | if (req->req.length != 16) { | ||
737 | req->req.error = RAW1394_ERROR_INVALID_ARG; | ||
738 | break; | ||
739 | } | ||
740 | } | ||
741 | packet = hpsb_make_lock64packet(fi->host, node, addr, | ||
742 | req->req.misc, NULL, 0); | ||
743 | if (!packet) | ||
744 | return -ENOMEM; | ||
745 | |||
746 | if (copy_from_user(packet->data, int2ptr(req->req.sendb), | ||
747 | req->req.length)) { | ||
748 | req->req.error = RAW1394_ERROR_MEMFAULT; | ||
749 | break; | ||
750 | } | ||
751 | |||
752 | req->data = packet->data; | ||
753 | req->req.length = 8; | ||
754 | break; | ||
755 | |||
756 | default: | ||
757 | req->req.error = RAW1394_ERROR_STATE_ORDER; | ||
758 | } | ||
759 | |||
760 | req->packet = packet; | ||
761 | |||
762 | if (req->req.error) { | ||
763 | req->req.length = 0; | ||
764 | queue_complete_req(req); | ||
765 | return 0; | ||
766 | } | ||
767 | |||
768 | hpsb_set_packet_complete_task(packet, | ||
769 | (void (*)(void *))queue_complete_cb, req); | ||
770 | |||
771 | spin_lock_irqsave(&fi->reqlists_lock, flags); | ||
772 | list_add_tail(&req->list, &fi->req_pending); | ||
773 | spin_unlock_irqrestore(&fi->reqlists_lock, flags); | ||
774 | |||
775 | packet->generation = req->req.generation; | ||
776 | |||
777 | if (hpsb_send_packet(packet) < 0) { | ||
778 | req->req.error = RAW1394_ERROR_SEND_ERROR; | ||
779 | req->req.length = 0; | ||
780 | hpsb_free_tlabel(packet); | ||
781 | queue_complete_req(req); | ||
782 | } | ||
783 | return 0; | ||
784 | } | ||
785 | |||
786 | static int handle_async_send(struct file_info *fi, struct pending_request *req) | ||
787 | { | ||
788 | unsigned long flags; | ||
789 | struct hpsb_packet *packet; | ||
790 | int header_length = req->req.misc & 0xffff; | ||
791 | int expect_response = req->req.misc >> 16; | ||
792 | size_t data_size; | ||
793 | |||
794 | if (header_length > req->req.length || header_length < 12 || | ||
795 | header_length > FIELD_SIZEOF(struct hpsb_packet, header)) { | ||
796 | req->req.error = RAW1394_ERROR_INVALID_ARG; | ||
797 | req->req.length = 0; | ||
798 | queue_complete_req(req); | ||
799 | return 0; | ||
800 | } | ||
801 | |||
802 | data_size = req->req.length - header_length; | ||
803 | packet = hpsb_alloc_packet(data_size); | ||
804 | req->packet = packet; | ||
805 | if (!packet) | ||
806 | return -ENOMEM; | ||
807 | |||
808 | if (copy_from_user(packet->header, int2ptr(req->req.sendb), | ||
809 | header_length)) { | ||
810 | req->req.error = RAW1394_ERROR_MEMFAULT; | ||
811 | req->req.length = 0; | ||
812 | queue_complete_req(req); | ||
813 | return 0; | ||
814 | } | ||
815 | |||
816 | if (copy_from_user | ||
817 | (packet->data, int2ptr(req->req.sendb) + header_length, | ||
818 | data_size)) { | ||
819 | req->req.error = RAW1394_ERROR_MEMFAULT; | ||
820 | req->req.length = 0; | ||
821 | queue_complete_req(req); | ||
822 | return 0; | ||
823 | } | ||
824 | |||
825 | packet->type = hpsb_async; | ||
826 | packet->node_id = packet->header[0] >> 16; | ||
827 | packet->tcode = (packet->header[0] >> 4) & 0xf; | ||
828 | packet->tlabel = (packet->header[0] >> 10) & 0x3f; | ||
829 | packet->host = fi->host; | ||
830 | packet->expect_response = expect_response; | ||
831 | packet->header_size = header_length; | ||
832 | packet->data_size = data_size; | ||
833 | |||
834 | req->req.length = 0; | ||
835 | hpsb_set_packet_complete_task(packet, | ||
836 | (void (*)(void *))queue_complete_cb, req); | ||
837 | |||
838 | spin_lock_irqsave(&fi->reqlists_lock, flags); | ||
839 | list_add_tail(&req->list, &fi->req_pending); | ||
840 | spin_unlock_irqrestore(&fi->reqlists_lock, flags); | ||
841 | |||
842 | /* Update the generation of the packet just before sending. */ | ||
843 | packet->generation = req->req.generation; | ||
844 | |||
845 | if (hpsb_send_packet(packet) < 0) { | ||
846 | req->req.error = RAW1394_ERROR_SEND_ERROR; | ||
847 | queue_complete_req(req); | ||
848 | } | ||
849 | |||
850 | return 0; | ||
851 | } | ||
852 | |||
853 | static int arm_read(struct hpsb_host *host, int nodeid, quadlet_t * buffer, | ||
854 | u64 addr, size_t length, u16 flags) | ||
855 | { | ||
856 | unsigned long irqflags; | ||
857 | struct pending_request *req; | ||
858 | struct host_info *hi; | ||
859 | struct file_info *fi = NULL; | ||
860 | struct list_head *entry; | ||
861 | struct arm_addr *arm_addr = NULL; | ||
862 | struct arm_request *arm_req = NULL; | ||
863 | struct arm_response *arm_resp = NULL; | ||
864 | int found = 0, size = 0, rcode = -1; | ||
865 | struct arm_request_response *arm_req_resp = NULL; | ||
866 | |||
867 | DBGMSG("arm_read called by node: %X " | ||
868 | "addr: %4.4x %8.8x length: %Zu", nodeid, | ||
869 | (u16) ((addr >> 32) & 0xFFFF), (u32) (addr & 0xFFFFFFFF), | ||
870 | length); | ||
871 | spin_lock_irqsave(&host_info_lock, irqflags); | ||
872 | hi = find_host_info(host); /* search address-entry */ | ||
873 | if (hi != NULL) { | ||
874 | list_for_each_entry(fi, &hi->file_info_list, list) { | ||
875 | entry = fi->addr_list.next; | ||
876 | while (entry != &(fi->addr_list)) { | ||
877 | arm_addr = | ||
878 | list_entry(entry, struct arm_addr, | ||
879 | addr_list); | ||
880 | if (((arm_addr->start) <= (addr)) | ||
881 | && ((arm_addr->end) >= (addr + length))) { | ||
882 | found = 1; | ||
883 | break; | ||
884 | } | ||
885 | entry = entry->next; | ||
886 | } | ||
887 | if (found) { | ||
888 | break; | ||
889 | } | ||
890 | } | ||
891 | } | ||
892 | rcode = -1; | ||
893 | if (!found) { | ||
894 | printk(KERN_ERR "raw1394: arm_read FAILED addr_entry not found" | ||
895 | " -> rcode_address_error\n"); | ||
896 | spin_unlock_irqrestore(&host_info_lock, irqflags); | ||
897 | return (RCODE_ADDRESS_ERROR); | ||
898 | } else { | ||
899 | DBGMSG("arm_read addr_entry FOUND"); | ||
900 | } | ||
901 | if (arm_addr->rec_length < length) { | ||
902 | DBGMSG("arm_read blocklength too big -> rcode_data_error"); | ||
903 | rcode = RCODE_DATA_ERROR; /* hardware error, data is unavailable */ | ||
904 | } | ||
905 | if (rcode == -1) { | ||
906 | if (arm_addr->access_rights & ARM_READ) { | ||
907 | if (!(arm_addr->client_transactions & ARM_READ)) { | ||
908 | memcpy(buffer, | ||
909 | (arm_addr->addr_space_buffer) + (addr - | ||
910 | (arm_addr-> | ||
911 | start)), | ||
912 | length); | ||
913 | DBGMSG("arm_read -> (rcode_complete)"); | ||
914 | rcode = RCODE_COMPLETE; | ||
915 | } | ||
916 | } else { | ||
917 | rcode = RCODE_TYPE_ERROR; /* function not allowed */ | ||
918 | DBGMSG("arm_read -> rcode_type_error (access denied)"); | ||
919 | } | ||
920 | } | ||
921 | if (arm_addr->notification_options & ARM_READ) { | ||
922 | DBGMSG("arm_read -> entering notification-section"); | ||
923 | req = __alloc_pending_request(GFP_ATOMIC); | ||
924 | if (!req) { | ||
925 | DBGMSG("arm_read -> rcode_conflict_error"); | ||
926 | spin_unlock_irqrestore(&host_info_lock, irqflags); | ||
927 | return (RCODE_CONFLICT_ERROR); /* A resource conflict was detected. | ||
928 | The request may be retried */ | ||
929 | } | ||
930 | if (rcode == RCODE_COMPLETE) { | ||
931 | size = | ||
932 | sizeof(struct arm_request) + | ||
933 | sizeof(struct arm_response) + | ||
934 | length * sizeof(byte_t) + | ||
935 | sizeof(struct arm_request_response); | ||
936 | } else { | ||
937 | size = | ||
938 | sizeof(struct arm_request) + | ||
939 | sizeof(struct arm_response) + | ||
940 | sizeof(struct arm_request_response); | ||
941 | } | ||
942 | req->data = kmalloc(size, GFP_ATOMIC); | ||
943 | if (!(req->data)) { | ||
944 | free_pending_request(req); | ||
945 | DBGMSG("arm_read -> rcode_conflict_error"); | ||
946 | spin_unlock_irqrestore(&host_info_lock, irqflags); | ||
947 | return (RCODE_CONFLICT_ERROR); /* A resource conflict was detected. | ||
948 | The request may be retried */ | ||
949 | } | ||
950 | req->free_data = 1; | ||
951 | req->file_info = fi; | ||
952 | req->req.type = RAW1394_REQ_ARM; | ||
953 | req->req.generation = get_hpsb_generation(host); | ||
954 | req->req.misc = | ||
955 | (((length << 16) & (0xFFFF0000)) | (ARM_READ & 0xFF)); | ||
956 | req->req.tag = arm_addr->arm_tag; | ||
957 | req->req.recvb = arm_addr->recvb; | ||
958 | req->req.length = size; | ||
959 | arm_req_resp = (struct arm_request_response *)(req->data); | ||
960 | arm_req = (struct arm_request *)((byte_t *) (req->data) + | ||
961 | (sizeof | ||
962 | (struct | ||
963 | arm_request_response))); | ||
964 | arm_resp = | ||
965 | (struct arm_response *)((byte_t *) (arm_req) + | ||
966 | (sizeof(struct arm_request))); | ||
967 | arm_req->buffer = NULL; | ||
968 | arm_resp->buffer = NULL; | ||
969 | if (rcode == RCODE_COMPLETE) { | ||
970 | byte_t *buf = | ||
971 | (byte_t *) arm_resp + sizeof(struct arm_response); | ||
972 | memcpy(buf, | ||
973 | (arm_addr->addr_space_buffer) + (addr - | ||
974 | (arm_addr-> | ||
975 | start)), | ||
976 | length); | ||
977 | arm_resp->buffer = | ||
978 | int2ptr((arm_addr->recvb) + | ||
979 | sizeof(struct arm_request_response) + | ||
980 | sizeof(struct arm_request) + | ||
981 | sizeof(struct arm_response)); | ||
982 | } | ||
983 | arm_resp->buffer_length = | ||
984 | (rcode == RCODE_COMPLETE) ? length : 0; | ||
985 | arm_resp->response_code = rcode; | ||
986 | arm_req->buffer_length = 0; | ||
987 | arm_req->generation = req->req.generation; | ||
988 | arm_req->extended_transaction_code = 0; | ||
989 | arm_req->destination_offset = addr; | ||
990 | arm_req->source_nodeid = nodeid; | ||
991 | arm_req->destination_nodeid = host->node_id; | ||
992 | arm_req->tlabel = (flags >> 10) & 0x3f; | ||
993 | arm_req->tcode = (flags >> 4) & 0x0f; | ||
994 | arm_req_resp->request = int2ptr((arm_addr->recvb) + | ||
995 | sizeof(struct | ||
996 | arm_request_response)); | ||
997 | arm_req_resp->response = | ||
998 | int2ptr((arm_addr->recvb) + | ||
999 | sizeof(struct arm_request_response) + | ||
1000 | sizeof(struct arm_request)); | ||
1001 | queue_complete_req(req); | ||
1002 | } | ||
1003 | spin_unlock_irqrestore(&host_info_lock, irqflags); | ||
1004 | return (rcode); | ||
1005 | } | ||
1006 | |||
1007 | static int arm_write(struct hpsb_host *host, int nodeid, int destid, | ||
1008 | quadlet_t * data, u64 addr, size_t length, u16 flags) | ||
1009 | { | ||
1010 | unsigned long irqflags; | ||
1011 | struct pending_request *req; | ||
1012 | struct host_info *hi; | ||
1013 | struct file_info *fi = NULL; | ||
1014 | struct list_head *entry; | ||
1015 | struct arm_addr *arm_addr = NULL; | ||
1016 | struct arm_request *arm_req = NULL; | ||
1017 | struct arm_response *arm_resp = NULL; | ||
1018 | int found = 0, size = 0, rcode = -1; | ||
1019 | struct arm_request_response *arm_req_resp = NULL; | ||
1020 | |||
1021 | DBGMSG("arm_write called by node: %X " | ||
1022 | "addr: %4.4x %8.8x length: %Zu", nodeid, | ||
1023 | (u16) ((addr >> 32) & 0xFFFF), (u32) (addr & 0xFFFFFFFF), | ||
1024 | length); | ||
1025 | spin_lock_irqsave(&host_info_lock, irqflags); | ||
1026 | hi = find_host_info(host); /* search address-entry */ | ||
1027 | if (hi != NULL) { | ||
1028 | list_for_each_entry(fi, &hi->file_info_list, list) { | ||
1029 | entry = fi->addr_list.next; | ||
1030 | while (entry != &(fi->addr_list)) { | ||
1031 | arm_addr = | ||
1032 | list_entry(entry, struct arm_addr, | ||
1033 | addr_list); | ||
1034 | if (((arm_addr->start) <= (addr)) | ||
1035 | && ((arm_addr->end) >= (addr + length))) { | ||
1036 | found = 1; | ||
1037 | break; | ||
1038 | } | ||
1039 | entry = entry->next; | ||
1040 | } | ||
1041 | if (found) { | ||
1042 | break; | ||
1043 | } | ||
1044 | } | ||
1045 | } | ||
1046 | rcode = -1; | ||
1047 | if (!found) { | ||
1048 | printk(KERN_ERR "raw1394: arm_write FAILED addr_entry not found" | ||
1049 | " -> rcode_address_error\n"); | ||
1050 | spin_unlock_irqrestore(&host_info_lock, irqflags); | ||
1051 | return (RCODE_ADDRESS_ERROR); | ||
1052 | } else { | ||
1053 | DBGMSG("arm_write addr_entry FOUND"); | ||
1054 | } | ||
1055 | if (arm_addr->rec_length < length) { | ||
1056 | DBGMSG("arm_write blocklength too big -> rcode_data_error"); | ||
1057 | rcode = RCODE_DATA_ERROR; /* hardware error, data is unavailable */ | ||
1058 | } | ||
1059 | if (rcode == -1) { | ||
1060 | if (arm_addr->access_rights & ARM_WRITE) { | ||
1061 | if (!(arm_addr->client_transactions & ARM_WRITE)) { | ||
1062 | memcpy((arm_addr->addr_space_buffer) + | ||
1063 | (addr - (arm_addr->start)), data, | ||
1064 | length); | ||
1065 | DBGMSG("arm_write -> (rcode_complete)"); | ||
1066 | rcode = RCODE_COMPLETE; | ||
1067 | } | ||
1068 | } else { | ||
1069 | rcode = RCODE_TYPE_ERROR; /* function not allowed */ | ||
1070 | DBGMSG("arm_write -> rcode_type_error (access denied)"); | ||
1071 | } | ||
1072 | } | ||
1073 | if (arm_addr->notification_options & ARM_WRITE) { | ||
1074 | DBGMSG("arm_write -> entering notification-section"); | ||
1075 | req = __alloc_pending_request(GFP_ATOMIC); | ||
1076 | if (!req) { | ||
1077 | DBGMSG("arm_write -> rcode_conflict_error"); | ||
1078 | spin_unlock_irqrestore(&host_info_lock, irqflags); | ||
1079 | return (RCODE_CONFLICT_ERROR); /* A resource conflict was detected. | ||
1080 | The request my be retried */ | ||
1081 | } | ||
1082 | size = | ||
1083 | sizeof(struct arm_request) + sizeof(struct arm_response) + | ||
1084 | (length) * sizeof(byte_t) + | ||
1085 | sizeof(struct arm_request_response); | ||
1086 | req->data = kmalloc(size, GFP_ATOMIC); | ||
1087 | if (!(req->data)) { | ||
1088 | free_pending_request(req); | ||
1089 | DBGMSG("arm_write -> rcode_conflict_error"); | ||
1090 | spin_unlock_irqrestore(&host_info_lock, irqflags); | ||
1091 | return (RCODE_CONFLICT_ERROR); /* A resource conflict was detected. | ||
1092 | The request may be retried */ | ||
1093 | } | ||
1094 | req->free_data = 1; | ||
1095 | req->file_info = fi; | ||
1096 | req->req.type = RAW1394_REQ_ARM; | ||
1097 | req->req.generation = get_hpsb_generation(host); | ||
1098 | req->req.misc = | ||
1099 | (((length << 16) & (0xFFFF0000)) | (ARM_WRITE & 0xFF)); | ||
1100 | req->req.tag = arm_addr->arm_tag; | ||
1101 | req->req.recvb = arm_addr->recvb; | ||
1102 | req->req.length = size; | ||
1103 | arm_req_resp = (struct arm_request_response *)(req->data); | ||
1104 | arm_req = (struct arm_request *)((byte_t *) (req->data) + | ||
1105 | (sizeof | ||
1106 | (struct | ||
1107 | arm_request_response))); | ||
1108 | arm_resp = | ||
1109 | (struct arm_response *)((byte_t *) (arm_req) + | ||
1110 | (sizeof(struct arm_request))); | ||
1111 | arm_resp->buffer = NULL; | ||
1112 | memcpy((byte_t *) arm_resp + sizeof(struct arm_response), | ||
1113 | data, length); | ||
1114 | arm_req->buffer = int2ptr((arm_addr->recvb) + | ||
1115 | sizeof(struct arm_request_response) + | ||
1116 | sizeof(struct arm_request) + | ||
1117 | sizeof(struct arm_response)); | ||
1118 | arm_req->buffer_length = length; | ||
1119 | arm_req->generation = req->req.generation; | ||
1120 | arm_req->extended_transaction_code = 0; | ||
1121 | arm_req->destination_offset = addr; | ||
1122 | arm_req->source_nodeid = nodeid; | ||
1123 | arm_req->destination_nodeid = destid; | ||
1124 | arm_req->tlabel = (flags >> 10) & 0x3f; | ||
1125 | arm_req->tcode = (flags >> 4) & 0x0f; | ||
1126 | arm_resp->buffer_length = 0; | ||
1127 | arm_resp->response_code = rcode; | ||
1128 | arm_req_resp->request = int2ptr((arm_addr->recvb) + | ||
1129 | sizeof(struct | ||
1130 | arm_request_response)); | ||
1131 | arm_req_resp->response = | ||
1132 | int2ptr((arm_addr->recvb) + | ||
1133 | sizeof(struct arm_request_response) + | ||
1134 | sizeof(struct arm_request)); | ||
1135 | queue_complete_req(req); | ||
1136 | } | ||
1137 | spin_unlock_irqrestore(&host_info_lock, irqflags); | ||
1138 | return (rcode); | ||
1139 | } | ||
1140 | |||
1141 | static int arm_lock(struct hpsb_host *host, int nodeid, quadlet_t * store, | ||
1142 | u64 addr, quadlet_t data, quadlet_t arg, int ext_tcode, | ||
1143 | u16 flags) | ||
1144 | { | ||
1145 | unsigned long irqflags; | ||
1146 | struct pending_request *req; | ||
1147 | struct host_info *hi; | ||
1148 | struct file_info *fi = NULL; | ||
1149 | struct list_head *entry; | ||
1150 | struct arm_addr *arm_addr = NULL; | ||
1151 | struct arm_request *arm_req = NULL; | ||
1152 | struct arm_response *arm_resp = NULL; | ||
1153 | int found = 0, size = 0, rcode = -1; | ||
1154 | quadlet_t old, new; | ||
1155 | struct arm_request_response *arm_req_resp = NULL; | ||
1156 | |||
1157 | if (((ext_tcode & 0xFF) == EXTCODE_FETCH_ADD) || | ||
1158 | ((ext_tcode & 0xFF) == EXTCODE_LITTLE_ADD)) { | ||
1159 | DBGMSG("arm_lock called by node: %X " | ||
1160 | "addr: %4.4x %8.8x extcode: %2.2X data: %8.8X", | ||
1161 | nodeid, (u16) ((addr >> 32) & 0xFFFF), | ||
1162 | (u32) (addr & 0xFFFFFFFF), ext_tcode & 0xFF, | ||
1163 | be32_to_cpu(data)); | ||
1164 | } else { | ||
1165 | DBGMSG("arm_lock called by node: %X " | ||
1166 | "addr: %4.4x %8.8x extcode: %2.2X data: %8.8X arg: %8.8X", | ||
1167 | nodeid, (u16) ((addr >> 32) & 0xFFFF), | ||
1168 | (u32) (addr & 0xFFFFFFFF), ext_tcode & 0xFF, | ||
1169 | be32_to_cpu(data), be32_to_cpu(arg)); | ||
1170 | } | ||
1171 | spin_lock_irqsave(&host_info_lock, irqflags); | ||
1172 | hi = find_host_info(host); /* search address-entry */ | ||
1173 | if (hi != NULL) { | ||
1174 | list_for_each_entry(fi, &hi->file_info_list, list) { | ||
1175 | entry = fi->addr_list.next; | ||
1176 | while (entry != &(fi->addr_list)) { | ||
1177 | arm_addr = | ||
1178 | list_entry(entry, struct arm_addr, | ||
1179 | addr_list); | ||
1180 | if (((arm_addr->start) <= (addr)) | ||
1181 | && ((arm_addr->end) >= | ||
1182 | (addr + sizeof(*store)))) { | ||
1183 | found = 1; | ||
1184 | break; | ||
1185 | } | ||
1186 | entry = entry->next; | ||
1187 | } | ||
1188 | if (found) { | ||
1189 | break; | ||
1190 | } | ||
1191 | } | ||
1192 | } | ||
1193 | rcode = -1; | ||
1194 | if (!found) { | ||
1195 | printk(KERN_ERR "raw1394: arm_lock FAILED addr_entry not found" | ||
1196 | " -> rcode_address_error\n"); | ||
1197 | spin_unlock_irqrestore(&host_info_lock, irqflags); | ||
1198 | return (RCODE_ADDRESS_ERROR); | ||
1199 | } else { | ||
1200 | DBGMSG("arm_lock addr_entry FOUND"); | ||
1201 | } | ||
1202 | if (rcode == -1) { | ||
1203 | if (arm_addr->access_rights & ARM_LOCK) { | ||
1204 | if (!(arm_addr->client_transactions & ARM_LOCK)) { | ||
1205 | memcpy(&old, | ||
1206 | (arm_addr->addr_space_buffer) + (addr - | ||
1207 | (arm_addr-> | ||
1208 | start)), | ||
1209 | sizeof(old)); | ||
1210 | switch (ext_tcode) { | ||
1211 | case (EXTCODE_MASK_SWAP): | ||
1212 | new = data | (old & ~arg); | ||
1213 | break; | ||
1214 | case (EXTCODE_COMPARE_SWAP): | ||
1215 | if (old == arg) { | ||
1216 | new = data; | ||
1217 | } else { | ||
1218 | new = old; | ||
1219 | } | ||
1220 | break; | ||
1221 | case (EXTCODE_FETCH_ADD): | ||
1222 | new = | ||
1223 | cpu_to_be32(be32_to_cpu(data) + | ||
1224 | be32_to_cpu(old)); | ||
1225 | break; | ||
1226 | case (EXTCODE_LITTLE_ADD): | ||
1227 | new = | ||
1228 | cpu_to_le32(le32_to_cpu(data) + | ||
1229 | le32_to_cpu(old)); | ||
1230 | break; | ||
1231 | case (EXTCODE_BOUNDED_ADD): | ||
1232 | if (old != arg) { | ||
1233 | new = | ||
1234 | cpu_to_be32(be32_to_cpu | ||
1235 | (data) + | ||
1236 | be32_to_cpu | ||
1237 | (old)); | ||
1238 | } else { | ||
1239 | new = old; | ||
1240 | } | ||
1241 | break; | ||
1242 | case (EXTCODE_WRAP_ADD): | ||
1243 | if (old != arg) { | ||
1244 | new = | ||
1245 | cpu_to_be32(be32_to_cpu | ||
1246 | (data) + | ||
1247 | be32_to_cpu | ||
1248 | (old)); | ||
1249 | } else { | ||
1250 | new = data; | ||
1251 | } | ||
1252 | break; | ||
1253 | default: | ||
1254 | rcode = RCODE_TYPE_ERROR; /* function not allowed */ | ||
1255 | printk(KERN_ERR | ||
1256 | "raw1394: arm_lock FAILED " | ||
1257 | "ext_tcode not allowed -> rcode_type_error\n"); | ||
1258 | break; | ||
1259 | } /*switch */ | ||
1260 | if (rcode == -1) { | ||
1261 | DBGMSG("arm_lock -> (rcode_complete)"); | ||
1262 | rcode = RCODE_COMPLETE; | ||
1263 | memcpy(store, &old, sizeof(*store)); | ||
1264 | memcpy((arm_addr->addr_space_buffer) + | ||
1265 | (addr - (arm_addr->start)), | ||
1266 | &new, sizeof(*store)); | ||
1267 | } | ||
1268 | } | ||
1269 | } else { | ||
1270 | rcode = RCODE_TYPE_ERROR; /* function not allowed */ | ||
1271 | DBGMSG("arm_lock -> rcode_type_error (access denied)"); | ||
1272 | } | ||
1273 | } | ||
1274 | if (arm_addr->notification_options & ARM_LOCK) { | ||
1275 | byte_t *buf1, *buf2; | ||
1276 | DBGMSG("arm_lock -> entering notification-section"); | ||
1277 | req = __alloc_pending_request(GFP_ATOMIC); | ||
1278 | if (!req) { | ||
1279 | DBGMSG("arm_lock -> rcode_conflict_error"); | ||
1280 | spin_unlock_irqrestore(&host_info_lock, irqflags); | ||
1281 | return (RCODE_CONFLICT_ERROR); /* A resource conflict was detected. | ||
1282 | The request may be retried */ | ||
1283 | } | ||
1284 | size = sizeof(struct arm_request) + sizeof(struct arm_response) + 3 * sizeof(*store) + sizeof(struct arm_request_response); /* maximum */ | ||
1285 | req->data = kmalloc(size, GFP_ATOMIC); | ||
1286 | if (!(req->data)) { | ||
1287 | free_pending_request(req); | ||
1288 | DBGMSG("arm_lock -> rcode_conflict_error"); | ||
1289 | spin_unlock_irqrestore(&host_info_lock, irqflags); | ||
1290 | return (RCODE_CONFLICT_ERROR); /* A resource conflict was detected. | ||
1291 | The request may be retried */ | ||
1292 | } | ||
1293 | req->free_data = 1; | ||
1294 | arm_req_resp = (struct arm_request_response *)(req->data); | ||
1295 | arm_req = (struct arm_request *)((byte_t *) (req->data) + | ||
1296 | (sizeof | ||
1297 | (struct | ||
1298 | arm_request_response))); | ||
1299 | arm_resp = | ||
1300 | (struct arm_response *)((byte_t *) (arm_req) + | ||
1301 | (sizeof(struct arm_request))); | ||
1302 | buf1 = (byte_t *) arm_resp + sizeof(struct arm_response); | ||
1303 | buf2 = buf1 + 2 * sizeof(*store); | ||
1304 | if ((ext_tcode == EXTCODE_FETCH_ADD) || | ||
1305 | (ext_tcode == EXTCODE_LITTLE_ADD)) { | ||
1306 | arm_req->buffer_length = sizeof(*store); | ||
1307 | memcpy(buf1, &data, sizeof(*store)); | ||
1308 | |||
1309 | } else { | ||
1310 | arm_req->buffer_length = 2 * sizeof(*store); | ||
1311 | memcpy(buf1, &arg, sizeof(*store)); | ||
1312 | memcpy(buf1 + sizeof(*store), &data, sizeof(*store)); | ||
1313 | } | ||
1314 | if (rcode == RCODE_COMPLETE) { | ||
1315 | arm_resp->buffer_length = sizeof(*store); | ||
1316 | memcpy(buf2, &old, sizeof(*store)); | ||
1317 | } else { | ||
1318 | arm_resp->buffer_length = 0; | ||
1319 | } | ||
1320 | req->file_info = fi; | ||
1321 | req->req.type = RAW1394_REQ_ARM; | ||
1322 | req->req.generation = get_hpsb_generation(host); | ||
1323 | req->req.misc = ((((sizeof(*store)) << 16) & (0xFFFF0000)) | | ||
1324 | (ARM_LOCK & 0xFF)); | ||
1325 | req->req.tag = arm_addr->arm_tag; | ||
1326 | req->req.recvb = arm_addr->recvb; | ||
1327 | req->req.length = size; | ||
1328 | arm_req->generation = req->req.generation; | ||
1329 | arm_req->extended_transaction_code = ext_tcode; | ||
1330 | arm_req->destination_offset = addr; | ||
1331 | arm_req->source_nodeid = nodeid; | ||
1332 | arm_req->destination_nodeid = host->node_id; | ||
1333 | arm_req->tlabel = (flags >> 10) & 0x3f; | ||
1334 | arm_req->tcode = (flags >> 4) & 0x0f; | ||
1335 | arm_resp->response_code = rcode; | ||
1336 | arm_req_resp->request = int2ptr((arm_addr->recvb) + | ||
1337 | sizeof(struct | ||
1338 | arm_request_response)); | ||
1339 | arm_req_resp->response = | ||
1340 | int2ptr((arm_addr->recvb) + | ||
1341 | sizeof(struct arm_request_response) + | ||
1342 | sizeof(struct arm_request)); | ||
1343 | arm_req->buffer = | ||
1344 | int2ptr((arm_addr->recvb) + | ||
1345 | sizeof(struct arm_request_response) + | ||
1346 | sizeof(struct arm_request) + | ||
1347 | sizeof(struct arm_response)); | ||
1348 | arm_resp->buffer = | ||
1349 | int2ptr((arm_addr->recvb) + | ||
1350 | sizeof(struct arm_request_response) + | ||
1351 | sizeof(struct arm_request) + | ||
1352 | sizeof(struct arm_response) + 2 * sizeof(*store)); | ||
1353 | queue_complete_req(req); | ||
1354 | } | ||
1355 | spin_unlock_irqrestore(&host_info_lock, irqflags); | ||
1356 | return (rcode); | ||
1357 | } | ||
1358 | |||
1359 | static int arm_lock64(struct hpsb_host *host, int nodeid, octlet_t * store, | ||
1360 | u64 addr, octlet_t data, octlet_t arg, int ext_tcode, | ||
1361 | u16 flags) | ||
1362 | { | ||
1363 | unsigned long irqflags; | ||
1364 | struct pending_request *req; | ||
1365 | struct host_info *hi; | ||
1366 | struct file_info *fi = NULL; | ||
1367 | struct list_head *entry; | ||
1368 | struct arm_addr *arm_addr = NULL; | ||
1369 | struct arm_request *arm_req = NULL; | ||
1370 | struct arm_response *arm_resp = NULL; | ||
1371 | int found = 0, size = 0, rcode = -1; | ||
1372 | octlet_t old, new; | ||
1373 | struct arm_request_response *arm_req_resp = NULL; | ||
1374 | |||
1375 | if (((ext_tcode & 0xFF) == EXTCODE_FETCH_ADD) || | ||
1376 | ((ext_tcode & 0xFF) == EXTCODE_LITTLE_ADD)) { | ||
1377 | DBGMSG("arm_lock64 called by node: %X " | ||
1378 | "addr: %4.4x %8.8x extcode: %2.2X data: %8.8X %8.8X ", | ||
1379 | nodeid, (u16) ((addr >> 32) & 0xFFFF), | ||
1380 | (u32) (addr & 0xFFFFFFFF), | ||
1381 | ext_tcode & 0xFF, | ||
1382 | (u32) ((be64_to_cpu(data) >> 32) & 0xFFFFFFFF), | ||
1383 | (u32) (be64_to_cpu(data) & 0xFFFFFFFF)); | ||
1384 | } else { | ||
1385 | DBGMSG("arm_lock64 called by node: %X " | ||
1386 | "addr: %4.4x %8.8x extcode: %2.2X data: %8.8X %8.8X arg: " | ||
1387 | "%8.8X %8.8X ", | ||
1388 | nodeid, (u16) ((addr >> 32) & 0xFFFF), | ||
1389 | (u32) (addr & 0xFFFFFFFF), | ||
1390 | ext_tcode & 0xFF, | ||
1391 | (u32) ((be64_to_cpu(data) >> 32) & 0xFFFFFFFF), | ||
1392 | (u32) (be64_to_cpu(data) & 0xFFFFFFFF), | ||
1393 | (u32) ((be64_to_cpu(arg) >> 32) & 0xFFFFFFFF), | ||
1394 | (u32) (be64_to_cpu(arg) & 0xFFFFFFFF)); | ||
1395 | } | ||
1396 | spin_lock_irqsave(&host_info_lock, irqflags); | ||
1397 | hi = find_host_info(host); /* search addressentry in file_info's for host */ | ||
1398 | if (hi != NULL) { | ||
1399 | list_for_each_entry(fi, &hi->file_info_list, list) { | ||
1400 | entry = fi->addr_list.next; | ||
1401 | while (entry != &(fi->addr_list)) { | ||
1402 | arm_addr = | ||
1403 | list_entry(entry, struct arm_addr, | ||
1404 | addr_list); | ||
1405 | if (((arm_addr->start) <= (addr)) | ||
1406 | && ((arm_addr->end) >= | ||
1407 | (addr + sizeof(*store)))) { | ||
1408 | found = 1; | ||
1409 | break; | ||
1410 | } | ||
1411 | entry = entry->next; | ||
1412 | } | ||
1413 | if (found) { | ||
1414 | break; | ||
1415 | } | ||
1416 | } | ||
1417 | } | ||
1418 | rcode = -1; | ||
1419 | if (!found) { | ||
1420 | printk(KERN_ERR | ||
1421 | "raw1394: arm_lock64 FAILED addr_entry not found" | ||
1422 | " -> rcode_address_error\n"); | ||
1423 | spin_unlock_irqrestore(&host_info_lock, irqflags); | ||
1424 | return (RCODE_ADDRESS_ERROR); | ||
1425 | } else { | ||
1426 | DBGMSG("arm_lock64 addr_entry FOUND"); | ||
1427 | } | ||
1428 | if (rcode == -1) { | ||
1429 | if (arm_addr->access_rights & ARM_LOCK) { | ||
1430 | if (!(arm_addr->client_transactions & ARM_LOCK)) { | ||
1431 | memcpy(&old, | ||
1432 | (arm_addr->addr_space_buffer) + (addr - | ||
1433 | (arm_addr-> | ||
1434 | start)), | ||
1435 | sizeof(old)); | ||
1436 | switch (ext_tcode) { | ||
1437 | case (EXTCODE_MASK_SWAP): | ||
1438 | new = data | (old & ~arg); | ||
1439 | break; | ||
1440 | case (EXTCODE_COMPARE_SWAP): | ||
1441 | if (old == arg) { | ||
1442 | new = data; | ||
1443 | } else { | ||
1444 | new = old; | ||
1445 | } | ||
1446 | break; | ||
1447 | case (EXTCODE_FETCH_ADD): | ||
1448 | new = | ||
1449 | cpu_to_be64(be64_to_cpu(data) + | ||
1450 | be64_to_cpu(old)); | ||
1451 | break; | ||
1452 | case (EXTCODE_LITTLE_ADD): | ||
1453 | new = | ||
1454 | cpu_to_le64(le64_to_cpu(data) + | ||
1455 | le64_to_cpu(old)); | ||
1456 | break; | ||
1457 | case (EXTCODE_BOUNDED_ADD): | ||
1458 | if (old != arg) { | ||
1459 | new = | ||
1460 | cpu_to_be64(be64_to_cpu | ||
1461 | (data) + | ||
1462 | be64_to_cpu | ||
1463 | (old)); | ||
1464 | } else { | ||
1465 | new = old; | ||
1466 | } | ||
1467 | break; | ||
1468 | case (EXTCODE_WRAP_ADD): | ||
1469 | if (old != arg) { | ||
1470 | new = | ||
1471 | cpu_to_be64(be64_to_cpu | ||
1472 | (data) + | ||
1473 | be64_to_cpu | ||
1474 | (old)); | ||
1475 | } else { | ||
1476 | new = data; | ||
1477 | } | ||
1478 | break; | ||
1479 | default: | ||
1480 | printk(KERN_ERR | ||
1481 | "raw1394: arm_lock64 FAILED " | ||
1482 | "ext_tcode not allowed -> rcode_type_error\n"); | ||
1483 | rcode = RCODE_TYPE_ERROR; /* function not allowed */ | ||
1484 | break; | ||
1485 | } /*switch */ | ||
1486 | if (rcode == -1) { | ||
1487 | DBGMSG | ||
1488 | ("arm_lock64 -> (rcode_complete)"); | ||
1489 | rcode = RCODE_COMPLETE; | ||
1490 | memcpy(store, &old, sizeof(*store)); | ||
1491 | memcpy((arm_addr->addr_space_buffer) + | ||
1492 | (addr - (arm_addr->start)), | ||
1493 | &new, sizeof(*store)); | ||
1494 | } | ||
1495 | } | ||
1496 | } else { | ||
1497 | rcode = RCODE_TYPE_ERROR; /* function not allowed */ | ||
1498 | DBGMSG | ||
1499 | ("arm_lock64 -> rcode_type_error (access denied)"); | ||
1500 | } | ||
1501 | } | ||
1502 | if (arm_addr->notification_options & ARM_LOCK) { | ||
1503 | byte_t *buf1, *buf2; | ||
1504 | DBGMSG("arm_lock64 -> entering notification-section"); | ||
1505 | req = __alloc_pending_request(GFP_ATOMIC); | ||
1506 | if (!req) { | ||
1507 | spin_unlock_irqrestore(&host_info_lock, irqflags); | ||
1508 | DBGMSG("arm_lock64 -> rcode_conflict_error"); | ||
1509 | return (RCODE_CONFLICT_ERROR); /* A resource conflict was detected. | ||
1510 | The request may be retried */ | ||
1511 | } | ||
1512 | size = sizeof(struct arm_request) + sizeof(struct arm_response) + 3 * sizeof(*store) + sizeof(struct arm_request_response); /* maximum */ | ||
1513 | req->data = kmalloc(size, GFP_ATOMIC); | ||
1514 | if (!(req->data)) { | ||
1515 | free_pending_request(req); | ||
1516 | spin_unlock_irqrestore(&host_info_lock, irqflags); | ||
1517 | DBGMSG("arm_lock64 -> rcode_conflict_error"); | ||
1518 | return (RCODE_CONFLICT_ERROR); /* A resource conflict was detected. | ||
1519 | The request may be retried */ | ||
1520 | } | ||
1521 | req->free_data = 1; | ||
1522 | arm_req_resp = (struct arm_request_response *)(req->data); | ||
1523 | arm_req = (struct arm_request *)((byte_t *) (req->data) + | ||
1524 | (sizeof | ||
1525 | (struct | ||
1526 | arm_request_response))); | ||
1527 | arm_resp = | ||
1528 | (struct arm_response *)((byte_t *) (arm_req) + | ||
1529 | (sizeof(struct arm_request))); | ||
1530 | buf1 = (byte_t *) arm_resp + sizeof(struct arm_response); | ||
1531 | buf2 = buf1 + 2 * sizeof(*store); | ||
1532 | if ((ext_tcode == EXTCODE_FETCH_ADD) || | ||
1533 | (ext_tcode == EXTCODE_LITTLE_ADD)) { | ||
1534 | arm_req->buffer_length = sizeof(*store); | ||
1535 | memcpy(buf1, &data, sizeof(*store)); | ||
1536 | |||
1537 | } else { | ||
1538 | arm_req->buffer_length = 2 * sizeof(*store); | ||
1539 | memcpy(buf1, &arg, sizeof(*store)); | ||
1540 | memcpy(buf1 + sizeof(*store), &data, sizeof(*store)); | ||
1541 | } | ||
1542 | if (rcode == RCODE_COMPLETE) { | ||
1543 | arm_resp->buffer_length = sizeof(*store); | ||
1544 | memcpy(buf2, &old, sizeof(*store)); | ||
1545 | } else { | ||
1546 | arm_resp->buffer_length = 0; | ||
1547 | } | ||
1548 | req->file_info = fi; | ||
1549 | req->req.type = RAW1394_REQ_ARM; | ||
1550 | req->req.generation = get_hpsb_generation(host); | ||
1551 | req->req.misc = ((((sizeof(*store)) << 16) & (0xFFFF0000)) | | ||
1552 | (ARM_LOCK & 0xFF)); | ||
1553 | req->req.tag = arm_addr->arm_tag; | ||
1554 | req->req.recvb = arm_addr->recvb; | ||
1555 | req->req.length = size; | ||
1556 | arm_req->generation = req->req.generation; | ||
1557 | arm_req->extended_transaction_code = ext_tcode; | ||
1558 | arm_req->destination_offset = addr; | ||
1559 | arm_req->source_nodeid = nodeid; | ||
1560 | arm_req->destination_nodeid = host->node_id; | ||
1561 | arm_req->tlabel = (flags >> 10) & 0x3f; | ||
1562 | arm_req->tcode = (flags >> 4) & 0x0f; | ||
1563 | arm_resp->response_code = rcode; | ||
1564 | arm_req_resp->request = int2ptr((arm_addr->recvb) + | ||
1565 | sizeof(struct | ||
1566 | arm_request_response)); | ||
1567 | arm_req_resp->response = | ||
1568 | int2ptr((arm_addr->recvb) + | ||
1569 | sizeof(struct arm_request_response) + | ||
1570 | sizeof(struct arm_request)); | ||
1571 | arm_req->buffer = | ||
1572 | int2ptr((arm_addr->recvb) + | ||
1573 | sizeof(struct arm_request_response) + | ||
1574 | sizeof(struct arm_request) + | ||
1575 | sizeof(struct arm_response)); | ||
1576 | arm_resp->buffer = | ||
1577 | int2ptr((arm_addr->recvb) + | ||
1578 | sizeof(struct arm_request_response) + | ||
1579 | sizeof(struct arm_request) + | ||
1580 | sizeof(struct arm_response) + 2 * sizeof(*store)); | ||
1581 | queue_complete_req(req); | ||
1582 | } | ||
1583 | spin_unlock_irqrestore(&host_info_lock, irqflags); | ||
1584 | return (rcode); | ||
1585 | } | ||
1586 | |||
1587 | static int arm_register(struct file_info *fi, struct pending_request *req) | ||
1588 | { | ||
1589 | int retval; | ||
1590 | struct arm_addr *addr; | ||
1591 | struct host_info *hi; | ||
1592 | struct file_info *fi_hlp = NULL; | ||
1593 | struct list_head *entry; | ||
1594 | struct arm_addr *arm_addr = NULL; | ||
1595 | int same_host, another_host; | ||
1596 | unsigned long flags; | ||
1597 | |||
1598 | DBGMSG("arm_register called " | ||
1599 | "addr(Offset): %8.8x %8.8x length: %u " | ||
1600 | "rights: %2.2X notify: %2.2X " | ||
1601 | "max_blk_len: %4.4X", | ||
1602 | (u32) ((req->req.address >> 32) & 0xFFFF), | ||
1603 | (u32) (req->req.address & 0xFFFFFFFF), | ||
1604 | req->req.length, ((req->req.misc >> 8) & 0xFF), | ||
1605 | (req->req.misc & 0xFF), ((req->req.misc >> 16) & 0xFFFF)); | ||
1606 | /* check addressrange */ | ||
1607 | if ((((req->req.address) & ~(0xFFFFFFFFFFFFULL)) != 0) || | ||
1608 | (((req->req.address + req->req.length) & ~(0xFFFFFFFFFFFFULL)) != | ||
1609 | 0)) { | ||
1610 | req->req.length = 0; | ||
1611 | return (-EINVAL); | ||
1612 | } | ||
1613 | /* addr-list-entry for fileinfo */ | ||
1614 | addr = kmalloc(sizeof(*addr), GFP_KERNEL); | ||
1615 | if (!addr) { | ||
1616 | req->req.length = 0; | ||
1617 | return (-ENOMEM); | ||
1618 | } | ||
1619 | /* allocation of addr_space_buffer */ | ||
1620 | addr->addr_space_buffer = vmalloc(req->req.length); | ||
1621 | if (!(addr->addr_space_buffer)) { | ||
1622 | kfree(addr); | ||
1623 | req->req.length = 0; | ||
1624 | return (-ENOMEM); | ||
1625 | } | ||
1626 | /* initialization of addr_space_buffer */ | ||
1627 | if ((req->req.sendb) == (unsigned long)NULL) { | ||
1628 | /* init: set 0 */ | ||
1629 | memset(addr->addr_space_buffer, 0, req->req.length); | ||
1630 | } else { | ||
1631 | /* init: user -> kernel */ | ||
1632 | if (copy_from_user | ||
1633 | (addr->addr_space_buffer, int2ptr(req->req.sendb), | ||
1634 | req->req.length)) { | ||
1635 | vfree(addr->addr_space_buffer); | ||
1636 | kfree(addr); | ||
1637 | return (-EFAULT); | ||
1638 | } | ||
1639 | } | ||
1640 | INIT_LIST_HEAD(&addr->addr_list); | ||
1641 | addr->arm_tag = req->req.tag; | ||
1642 | addr->start = req->req.address; | ||
1643 | addr->end = req->req.address + req->req.length; | ||
1644 | addr->access_rights = (u8) (req->req.misc & 0x0F); | ||
1645 | addr->notification_options = (u8) ((req->req.misc >> 4) & 0x0F); | ||
1646 | addr->client_transactions = (u8) ((req->req.misc >> 8) & 0x0F); | ||
1647 | addr->access_rights |= addr->client_transactions; | ||
1648 | addr->notification_options |= addr->client_transactions; | ||
1649 | addr->recvb = req->req.recvb; | ||
1650 | addr->rec_length = (u16) ((req->req.misc >> 16) & 0xFFFF); | ||
1651 | |||
1652 | spin_lock_irqsave(&host_info_lock, flags); | ||
1653 | hi = find_host_info(fi->host); | ||
1654 | same_host = 0; | ||
1655 | another_host = 0; | ||
1656 | /* same host with address-entry containing same addressrange ? */ | ||
1657 | list_for_each_entry(fi_hlp, &hi->file_info_list, list) { | ||
1658 | entry = fi_hlp->addr_list.next; | ||
1659 | while (entry != &(fi_hlp->addr_list)) { | ||
1660 | arm_addr = | ||
1661 | list_entry(entry, struct arm_addr, addr_list); | ||
1662 | if ((arm_addr->start == addr->start) | ||
1663 | && (arm_addr->end == addr->end)) { | ||
1664 | DBGMSG("same host ownes same " | ||
1665 | "addressrange -> EALREADY"); | ||
1666 | same_host = 1; | ||
1667 | break; | ||
1668 | } | ||
1669 | entry = entry->next; | ||
1670 | } | ||
1671 | if (same_host) { | ||
1672 | break; | ||
1673 | } | ||
1674 | } | ||
1675 | if (same_host) { | ||
1676 | /* addressrange occupied by same host */ | ||
1677 | spin_unlock_irqrestore(&host_info_lock, flags); | ||
1678 | vfree(addr->addr_space_buffer); | ||
1679 | kfree(addr); | ||
1680 | return (-EALREADY); | ||
1681 | } | ||
1682 | /* another host with valid address-entry containing same addressrange */ | ||
1683 | list_for_each_entry(hi, &host_info_list, list) { | ||
1684 | if (hi->host != fi->host) { | ||
1685 | list_for_each_entry(fi_hlp, &hi->file_info_list, list) { | ||
1686 | entry = fi_hlp->addr_list.next; | ||
1687 | while (entry != &(fi_hlp->addr_list)) { | ||
1688 | arm_addr = | ||
1689 | list_entry(entry, struct arm_addr, | ||
1690 | addr_list); | ||
1691 | if ((arm_addr->start == addr->start) | ||
1692 | && (arm_addr->end == addr->end)) { | ||
1693 | DBGMSG | ||
1694 | ("another host ownes same " | ||
1695 | "addressrange"); | ||
1696 | another_host = 1; | ||
1697 | break; | ||
1698 | } | ||
1699 | entry = entry->next; | ||
1700 | } | ||
1701 | if (another_host) { | ||
1702 | break; | ||
1703 | } | ||
1704 | } | ||
1705 | } | ||
1706 | } | ||
1707 | spin_unlock_irqrestore(&host_info_lock, flags); | ||
1708 | |||
1709 | if (another_host) { | ||
1710 | DBGMSG("another hosts entry is valid -> SUCCESS"); | ||
1711 | if (copy_to_user(int2ptr(req->req.recvb), | ||
1712 | &addr->start, sizeof(u64))) { | ||
1713 | printk(KERN_ERR "raw1394: arm_register failed " | ||
1714 | " address-range-entry is invalid -> EFAULT !!!\n"); | ||
1715 | vfree(addr->addr_space_buffer); | ||
1716 | kfree(addr); | ||
1717 | return (-EFAULT); | ||
1718 | } | ||
1719 | free_pending_request(req); /* immediate success or fail */ | ||
1720 | /* INSERT ENTRY */ | ||
1721 | spin_lock_irqsave(&host_info_lock, flags); | ||
1722 | list_add_tail(&addr->addr_list, &fi->addr_list); | ||
1723 | spin_unlock_irqrestore(&host_info_lock, flags); | ||
1724 | return 0; | ||
1725 | } | ||
1726 | retval = | ||
1727 | hpsb_register_addrspace(&raw1394_highlevel, fi->host, &arm_ops, | ||
1728 | req->req.address, | ||
1729 | req->req.address + req->req.length); | ||
1730 | if (retval) { | ||
1731 | /* INSERT ENTRY */ | ||
1732 | spin_lock_irqsave(&host_info_lock, flags); | ||
1733 | list_add_tail(&addr->addr_list, &fi->addr_list); | ||
1734 | spin_unlock_irqrestore(&host_info_lock, flags); | ||
1735 | } else { | ||
1736 | DBGMSG("arm_register failed errno: %d \n", retval); | ||
1737 | vfree(addr->addr_space_buffer); | ||
1738 | kfree(addr); | ||
1739 | return (-EALREADY); | ||
1740 | } | ||
1741 | free_pending_request(req); /* immediate success or fail */ | ||
1742 | return 0; | ||
1743 | } | ||
1744 | |||
1745 | static int arm_unregister(struct file_info *fi, struct pending_request *req) | ||
1746 | { | ||
1747 | int found = 0; | ||
1748 | int retval = 0; | ||
1749 | struct list_head *entry; | ||
1750 | struct arm_addr *addr = NULL; | ||
1751 | struct host_info *hi; | ||
1752 | struct file_info *fi_hlp = NULL; | ||
1753 | struct arm_addr *arm_addr = NULL; | ||
1754 | int another_host; | ||
1755 | unsigned long flags; | ||
1756 | |||
1757 | DBGMSG("arm_Unregister called addr(Offset): " | ||
1758 | "%8.8x %8.8x", | ||
1759 | (u32) ((req->req.address >> 32) & 0xFFFF), | ||
1760 | (u32) (req->req.address & 0xFFFFFFFF)); | ||
1761 | spin_lock_irqsave(&host_info_lock, flags); | ||
1762 | /* get addr */ | ||
1763 | entry = fi->addr_list.next; | ||
1764 | while (entry != &(fi->addr_list)) { | ||
1765 | addr = list_entry(entry, struct arm_addr, addr_list); | ||
1766 | if (addr->start == req->req.address) { | ||
1767 | found = 1; | ||
1768 | break; | ||
1769 | } | ||
1770 | entry = entry->next; | ||
1771 | } | ||
1772 | if (!found) { | ||
1773 | DBGMSG("arm_Unregister addr not found"); | ||
1774 | spin_unlock_irqrestore(&host_info_lock, flags); | ||
1775 | return (-EINVAL); | ||
1776 | } | ||
1777 | DBGMSG("arm_Unregister addr found"); | ||
1778 | another_host = 0; | ||
1779 | /* another host with valid address-entry containing | ||
1780 | same addressrange */ | ||
1781 | list_for_each_entry(hi, &host_info_list, list) { | ||
1782 | if (hi->host != fi->host) { | ||
1783 | list_for_each_entry(fi_hlp, &hi->file_info_list, list) { | ||
1784 | entry = fi_hlp->addr_list.next; | ||
1785 | while (entry != &(fi_hlp->addr_list)) { | ||
1786 | arm_addr = list_entry(entry, | ||
1787 | struct arm_addr, | ||
1788 | addr_list); | ||
1789 | if (arm_addr->start == addr->start) { | ||
1790 | DBGMSG("another host ownes " | ||
1791 | "same addressrange"); | ||
1792 | another_host = 1; | ||
1793 | break; | ||
1794 | } | ||
1795 | entry = entry->next; | ||
1796 | } | ||
1797 | if (another_host) { | ||
1798 | break; | ||
1799 | } | ||
1800 | } | ||
1801 | } | ||
1802 | } | ||
1803 | if (another_host) { | ||
1804 | DBGMSG("delete entry from list -> success"); | ||
1805 | list_del(&addr->addr_list); | ||
1806 | spin_unlock_irqrestore(&host_info_lock, flags); | ||
1807 | vfree(addr->addr_space_buffer); | ||
1808 | kfree(addr); | ||
1809 | free_pending_request(req); /* immediate success or fail */ | ||
1810 | return 0; | ||
1811 | } | ||
1812 | retval = | ||
1813 | hpsb_unregister_addrspace(&raw1394_highlevel, fi->host, | ||
1814 | addr->start); | ||
1815 | if (!retval) { | ||
1816 | printk(KERN_ERR "raw1394: arm_Unregister failed -> EINVAL\n"); | ||
1817 | spin_unlock_irqrestore(&host_info_lock, flags); | ||
1818 | return (-EINVAL); | ||
1819 | } | ||
1820 | DBGMSG("delete entry from list -> success"); | ||
1821 | list_del(&addr->addr_list); | ||
1822 | spin_unlock_irqrestore(&host_info_lock, flags); | ||
1823 | vfree(addr->addr_space_buffer); | ||
1824 | kfree(addr); | ||
1825 | free_pending_request(req); /* immediate success or fail */ | ||
1826 | return 0; | ||
1827 | } | ||
1828 | |||
1829 | /* Copy data from ARM buffer(s) to user buffer. */ | ||
1830 | static int arm_get_buf(struct file_info *fi, struct pending_request *req) | ||
1831 | { | ||
1832 | struct arm_addr *arm_addr = NULL; | ||
1833 | unsigned long flags; | ||
1834 | unsigned long offset; | ||
1835 | |||
1836 | struct list_head *entry; | ||
1837 | |||
1838 | DBGMSG("arm_get_buf " | ||
1839 | "addr(Offset): %04X %08X length: %u", | ||
1840 | (u32) ((req->req.address >> 32) & 0xFFFF), | ||
1841 | (u32) (req->req.address & 0xFFFFFFFF), (u32) req->req.length); | ||
1842 | |||
1843 | spin_lock_irqsave(&host_info_lock, flags); | ||
1844 | entry = fi->addr_list.next; | ||
1845 | while (entry != &(fi->addr_list)) { | ||
1846 | arm_addr = list_entry(entry, struct arm_addr, addr_list); | ||
1847 | if ((arm_addr->start <= req->req.address) && | ||
1848 | (arm_addr->end > req->req.address)) { | ||
1849 | if (req->req.address + req->req.length <= arm_addr->end) { | ||
1850 | offset = req->req.address - arm_addr->start; | ||
1851 | spin_unlock_irqrestore(&host_info_lock, flags); | ||
1852 | |||
1853 | DBGMSG | ||
1854 | ("arm_get_buf copy_to_user( %08X, %p, %u )", | ||
1855 | (u32) req->req.recvb, | ||
1856 | arm_addr->addr_space_buffer + offset, | ||
1857 | (u32) req->req.length); | ||
1858 | if (copy_to_user | ||
1859 | (int2ptr(req->req.recvb), | ||
1860 | arm_addr->addr_space_buffer + offset, | ||
1861 | req->req.length)) | ||
1862 | return (-EFAULT); | ||
1863 | |||
1864 | /* We have to free the request, because we | ||
1865 | * queue no response, and therefore nobody | ||
1866 | * will free it. */ | ||
1867 | free_pending_request(req); | ||
1868 | return 0; | ||
1869 | } else { | ||
1870 | DBGMSG("arm_get_buf request exceeded mapping"); | ||
1871 | spin_unlock_irqrestore(&host_info_lock, flags); | ||
1872 | return (-EINVAL); | ||
1873 | } | ||
1874 | } | ||
1875 | entry = entry->next; | ||
1876 | } | ||
1877 | spin_unlock_irqrestore(&host_info_lock, flags); | ||
1878 | return (-EINVAL); | ||
1879 | } | ||
1880 | |||
1881 | /* Copy data from user buffer to ARM buffer(s). */ | ||
1882 | static int arm_set_buf(struct file_info *fi, struct pending_request *req) | ||
1883 | { | ||
1884 | struct arm_addr *arm_addr = NULL; | ||
1885 | unsigned long flags; | ||
1886 | unsigned long offset; | ||
1887 | |||
1888 | struct list_head *entry; | ||
1889 | |||
1890 | DBGMSG("arm_set_buf " | ||
1891 | "addr(Offset): %04X %08X length: %u", | ||
1892 | (u32) ((req->req.address >> 32) & 0xFFFF), | ||
1893 | (u32) (req->req.address & 0xFFFFFFFF), (u32) req->req.length); | ||
1894 | |||
1895 | spin_lock_irqsave(&host_info_lock, flags); | ||
1896 | entry = fi->addr_list.next; | ||
1897 | while (entry != &(fi->addr_list)) { | ||
1898 | arm_addr = list_entry(entry, struct arm_addr, addr_list); | ||
1899 | if ((arm_addr->start <= req->req.address) && | ||
1900 | (arm_addr->end > req->req.address)) { | ||
1901 | if (req->req.address + req->req.length <= arm_addr->end) { | ||
1902 | offset = req->req.address - arm_addr->start; | ||
1903 | spin_unlock_irqrestore(&host_info_lock, flags); | ||
1904 | |||
1905 | DBGMSG | ||
1906 | ("arm_set_buf copy_from_user( %p, %08X, %u )", | ||
1907 | arm_addr->addr_space_buffer + offset, | ||
1908 | (u32) req->req.sendb, | ||
1909 | (u32) req->req.length); | ||
1910 | if (copy_from_user | ||
1911 | (arm_addr->addr_space_buffer + offset, | ||
1912 | int2ptr(req->req.sendb), | ||
1913 | req->req.length)) | ||
1914 | return (-EFAULT); | ||
1915 | |||
1916 | /* We have to free the request, because we | ||
1917 | * queue no response, and therefore nobody | ||
1918 | * will free it. */ | ||
1919 | free_pending_request(req); | ||
1920 | return 0; | ||
1921 | } else { | ||
1922 | DBGMSG("arm_set_buf request exceeded mapping"); | ||
1923 | spin_unlock_irqrestore(&host_info_lock, flags); | ||
1924 | return (-EINVAL); | ||
1925 | } | ||
1926 | } | ||
1927 | entry = entry->next; | ||
1928 | } | ||
1929 | spin_unlock_irqrestore(&host_info_lock, flags); | ||
1930 | return (-EINVAL); | ||
1931 | } | ||
1932 | |||
1933 | static int reset_notification(struct file_info *fi, struct pending_request *req) | ||
1934 | { | ||
1935 | DBGMSG("reset_notification called - switch %s ", | ||
1936 | (req->req.misc == RAW1394_NOTIFY_OFF) ? "OFF" : "ON"); | ||
1937 | if ((req->req.misc == RAW1394_NOTIFY_OFF) || | ||
1938 | (req->req.misc == RAW1394_NOTIFY_ON)) { | ||
1939 | fi->notification = (u8) req->req.misc; | ||
1940 | free_pending_request(req); /* we have to free the request, because we queue no response, and therefore nobody will free it */ | ||
1941 | return 0; | ||
1942 | } | ||
1943 | /* error EINVAL (22) invalid argument */ | ||
1944 | return (-EINVAL); | ||
1945 | } | ||
1946 | |||
1947 | static int write_phypacket(struct file_info *fi, struct pending_request *req) | ||
1948 | { | ||
1949 | struct hpsb_packet *packet = NULL; | ||
1950 | int retval = 0; | ||
1951 | quadlet_t data; | ||
1952 | unsigned long flags; | ||
1953 | |||
1954 | data = be32_to_cpu((u32) req->req.sendb); | ||
1955 | DBGMSG("write_phypacket called - quadlet 0x%8.8x ", data); | ||
1956 | packet = hpsb_make_phypacket(fi->host, data); | ||
1957 | if (!packet) | ||
1958 | return -ENOMEM; | ||
1959 | req->req.length = 0; | ||
1960 | req->packet = packet; | ||
1961 | hpsb_set_packet_complete_task(packet, | ||
1962 | (void (*)(void *))queue_complete_cb, req); | ||
1963 | spin_lock_irqsave(&fi->reqlists_lock, flags); | ||
1964 | list_add_tail(&req->list, &fi->req_pending); | ||
1965 | spin_unlock_irqrestore(&fi->reqlists_lock, flags); | ||
1966 | packet->generation = req->req.generation; | ||
1967 | retval = hpsb_send_packet(packet); | ||
1968 | DBGMSG("write_phypacket send_packet called => retval: %d ", retval); | ||
1969 | if (retval < 0) { | ||
1970 | req->req.error = RAW1394_ERROR_SEND_ERROR; | ||
1971 | req->req.length = 0; | ||
1972 | queue_complete_req(req); | ||
1973 | } | ||
1974 | return 0; | ||
1975 | } | ||
1976 | |||
1977 | static int get_config_rom(struct file_info *fi, struct pending_request *req) | ||
1978 | { | ||
1979 | int ret = 0; | ||
1980 | quadlet_t *data = kmalloc(req->req.length, GFP_KERNEL); | ||
1981 | int status; | ||
1982 | |||
1983 | if (!data) | ||
1984 | return -ENOMEM; | ||
1985 | |||
1986 | status = | ||
1987 | csr1212_read(fi->host->csr.rom, CSR1212_CONFIG_ROM_SPACE_OFFSET, | ||
1988 | data, req->req.length); | ||
1989 | if (copy_to_user(int2ptr(req->req.recvb), data, req->req.length)) | ||
1990 | ret = -EFAULT; | ||
1991 | if (copy_to_user | ||
1992 | (int2ptr(req->req.tag), &fi->host->csr.rom->cache_head->len, | ||
1993 | sizeof(fi->host->csr.rom->cache_head->len))) | ||
1994 | ret = -EFAULT; | ||
1995 | if (copy_to_user(int2ptr(req->req.address), &fi->host->csr.generation, | ||
1996 | sizeof(fi->host->csr.generation))) | ||
1997 | ret = -EFAULT; | ||
1998 | if (copy_to_user(int2ptr(req->req.sendb), &status, sizeof(status))) | ||
1999 | ret = -EFAULT; | ||
2000 | kfree(data); | ||
2001 | if (ret >= 0) { | ||
2002 | free_pending_request(req); /* we have to free the request, because we queue no response, and therefore nobody will free it */ | ||
2003 | } | ||
2004 | return ret; | ||
2005 | } | ||
2006 | |||
2007 | static int update_config_rom(struct file_info *fi, struct pending_request *req) | ||
2008 | { | ||
2009 | int ret = 0; | ||
2010 | quadlet_t *data = kmalloc(req->req.length, GFP_KERNEL); | ||
2011 | if (!data) | ||
2012 | return -ENOMEM; | ||
2013 | if (copy_from_user(data, int2ptr(req->req.sendb), req->req.length)) { | ||
2014 | ret = -EFAULT; | ||
2015 | } else { | ||
2016 | int status = hpsb_update_config_rom(fi->host, | ||
2017 | data, req->req.length, | ||
2018 | (unsigned char)req->req. | ||
2019 | misc); | ||
2020 | if (copy_to_user | ||
2021 | (int2ptr(req->req.recvb), &status, sizeof(status))) | ||
2022 | ret = -ENOMEM; | ||
2023 | } | ||
2024 | kfree(data); | ||
2025 | if (ret >= 0) { | ||
2026 | free_pending_request(req); /* we have to free the request, because we queue no response, and therefore nobody will free it */ | ||
2027 | fi->cfgrom_upd = 1; | ||
2028 | } | ||
2029 | return ret; | ||
2030 | } | ||
2031 | |||
2032 | static int modify_config_rom(struct file_info *fi, struct pending_request *req) | ||
2033 | { | ||
2034 | struct csr1212_keyval *kv; | ||
2035 | struct csr1212_csr_rom_cache *cache; | ||
2036 | struct csr1212_dentry *dentry; | ||
2037 | u32 dr; | ||
2038 | int ret = 0; | ||
2039 | |||
2040 | if (req->req.misc == ~0) { | ||
2041 | if (req->req.length == 0) | ||
2042 | return -EINVAL; | ||
2043 | |||
2044 | /* Find an unused slot */ | ||
2045 | for (dr = 0; | ||
2046 | dr < RAW1394_MAX_USER_CSR_DIRS && fi->csr1212_dirs[dr]; | ||
2047 | dr++) ; | ||
2048 | |||
2049 | if (dr == RAW1394_MAX_USER_CSR_DIRS) | ||
2050 | return -ENOMEM; | ||
2051 | |||
2052 | fi->csr1212_dirs[dr] = | ||
2053 | csr1212_new_directory(CSR1212_KV_ID_VENDOR); | ||
2054 | if (!fi->csr1212_dirs[dr]) | ||
2055 | return -ENOMEM; | ||
2056 | } else { | ||
2057 | dr = req->req.misc; | ||
2058 | if (!fi->csr1212_dirs[dr]) | ||
2059 | return -EINVAL; | ||
2060 | |||
2061 | /* Delete old stuff */ | ||
2062 | for (dentry = | ||
2063 | fi->csr1212_dirs[dr]->value.directory.dentries_head; | ||
2064 | dentry; dentry = dentry->next) { | ||
2065 | csr1212_detach_keyval_from_directory(fi->host->csr.rom-> | ||
2066 | root_kv, | ||
2067 | dentry->kv); | ||
2068 | } | ||
2069 | |||
2070 | if (req->req.length == 0) { | ||
2071 | csr1212_release_keyval(fi->csr1212_dirs[dr]); | ||
2072 | fi->csr1212_dirs[dr] = NULL; | ||
2073 | |||
2074 | hpsb_update_config_rom_image(fi->host); | ||
2075 | free_pending_request(req); | ||
2076 | return 0; | ||
2077 | } | ||
2078 | } | ||
2079 | |||
2080 | cache = csr1212_rom_cache_malloc(0, req->req.length); | ||
2081 | if (!cache) { | ||
2082 | csr1212_release_keyval(fi->csr1212_dirs[dr]); | ||
2083 | fi->csr1212_dirs[dr] = NULL; | ||
2084 | return -ENOMEM; | ||
2085 | } | ||
2086 | |||
2087 | cache->filled_head = kmalloc(sizeof(*cache->filled_head), GFP_KERNEL); | ||
2088 | if (!cache->filled_head) { | ||
2089 | csr1212_release_keyval(fi->csr1212_dirs[dr]); | ||
2090 | fi->csr1212_dirs[dr] = NULL; | ||
2091 | CSR1212_FREE(cache); | ||
2092 | return -ENOMEM; | ||
2093 | } | ||
2094 | cache->filled_tail = cache->filled_head; | ||
2095 | |||
2096 | if (copy_from_user(cache->data, int2ptr(req->req.sendb), | ||
2097 | req->req.length)) { | ||
2098 | csr1212_release_keyval(fi->csr1212_dirs[dr]); | ||
2099 | fi->csr1212_dirs[dr] = NULL; | ||
2100 | ret = -EFAULT; | ||
2101 | } else { | ||
2102 | cache->len = req->req.length; | ||
2103 | cache->filled_head->offset_start = 0; | ||
2104 | cache->filled_head->offset_end = cache->size - 1; | ||
2105 | |||
2106 | cache->layout_head = cache->layout_tail = fi->csr1212_dirs[dr]; | ||
2107 | |||
2108 | ret = CSR1212_SUCCESS; | ||
2109 | /* parse all the items */ | ||
2110 | for (kv = cache->layout_head; ret == CSR1212_SUCCESS && kv; | ||
2111 | kv = kv->next) { | ||
2112 | ret = csr1212_parse_keyval(kv, cache); | ||
2113 | } | ||
2114 | |||
2115 | /* attach top level items to the root directory */ | ||
2116 | for (dentry = | ||
2117 | fi->csr1212_dirs[dr]->value.directory.dentries_head; | ||
2118 | ret == CSR1212_SUCCESS && dentry; dentry = dentry->next) { | ||
2119 | ret = | ||
2120 | csr1212_attach_keyval_to_directory(fi->host->csr. | ||
2121 | rom->root_kv, | ||
2122 | dentry->kv); | ||
2123 | } | ||
2124 | |||
2125 | if (ret == CSR1212_SUCCESS) { | ||
2126 | ret = hpsb_update_config_rom_image(fi->host); | ||
2127 | |||
2128 | if (ret >= 0 && copy_to_user(int2ptr(req->req.recvb), | ||
2129 | &dr, sizeof(dr))) { | ||
2130 | ret = -ENOMEM; | ||
2131 | } | ||
2132 | } | ||
2133 | } | ||
2134 | kfree(cache->filled_head); | ||
2135 | CSR1212_FREE(cache); | ||
2136 | |||
2137 | if (ret >= 0) { | ||
2138 | /* we have to free the request, because we queue no response, | ||
2139 | * and therefore nobody will free it */ | ||
2140 | free_pending_request(req); | ||
2141 | return 0; | ||
2142 | } else { | ||
2143 | for (dentry = | ||
2144 | fi->csr1212_dirs[dr]->value.directory.dentries_head; | ||
2145 | dentry; dentry = dentry->next) { | ||
2146 | csr1212_detach_keyval_from_directory(fi->host->csr.rom-> | ||
2147 | root_kv, | ||
2148 | dentry->kv); | ||
2149 | } | ||
2150 | csr1212_release_keyval(fi->csr1212_dirs[dr]); | ||
2151 | fi->csr1212_dirs[dr] = NULL; | ||
2152 | return ret; | ||
2153 | } | ||
2154 | } | ||
2155 | |||
2156 | static int state_connected(struct file_info *fi, struct pending_request *req) | ||
2157 | { | ||
2158 | int node = req->req.address >> 48; | ||
2159 | |||
2160 | req->req.error = RAW1394_ERROR_NONE; | ||
2161 | |||
2162 | switch (req->req.type) { | ||
2163 | |||
2164 | case RAW1394_REQ_ECHO: | ||
2165 | queue_complete_req(req); | ||
2166 | return 0; | ||
2167 | |||
2168 | case RAW1394_REQ_ARM_REGISTER: | ||
2169 | return arm_register(fi, req); | ||
2170 | |||
2171 | case RAW1394_REQ_ARM_UNREGISTER: | ||
2172 | return arm_unregister(fi, req); | ||
2173 | |||
2174 | case RAW1394_REQ_ARM_SET_BUF: | ||
2175 | return arm_set_buf(fi, req); | ||
2176 | |||
2177 | case RAW1394_REQ_ARM_GET_BUF: | ||
2178 | return arm_get_buf(fi, req); | ||
2179 | |||
2180 | case RAW1394_REQ_RESET_NOTIFY: | ||
2181 | return reset_notification(fi, req); | ||
2182 | |||
2183 | case RAW1394_REQ_ISO_SEND: | ||
2184 | case RAW1394_REQ_ISO_LISTEN: | ||
2185 | printk(KERN_DEBUG "raw1394: old iso ABI has been removed\n"); | ||
2186 | req->req.error = RAW1394_ERROR_COMPAT; | ||
2187 | req->req.misc = RAW1394_KERNELAPI_VERSION; | ||
2188 | queue_complete_req(req); | ||
2189 | return 0; | ||
2190 | |||
2191 | case RAW1394_REQ_FCP_LISTEN: | ||
2192 | handle_fcp_listen(fi, req); | ||
2193 | return 0; | ||
2194 | |||
2195 | case RAW1394_REQ_RESET_BUS: | ||
2196 | if (req->req.misc == RAW1394_LONG_RESET) { | ||
2197 | DBGMSG("busreset called (type: LONG)"); | ||
2198 | hpsb_reset_bus(fi->host, LONG_RESET); | ||
2199 | free_pending_request(req); /* we have to free the request, because we queue no response, and therefore nobody will free it */ | ||
2200 | return 0; | ||
2201 | } | ||
2202 | if (req->req.misc == RAW1394_SHORT_RESET) { | ||
2203 | DBGMSG("busreset called (type: SHORT)"); | ||
2204 | hpsb_reset_bus(fi->host, SHORT_RESET); | ||
2205 | free_pending_request(req); /* we have to free the request, because we queue no response, and therefore nobody will free it */ | ||
2206 | return 0; | ||
2207 | } | ||
2208 | /* error EINVAL (22) invalid argument */ | ||
2209 | return (-EINVAL); | ||
2210 | case RAW1394_REQ_GET_ROM: | ||
2211 | return get_config_rom(fi, req); | ||
2212 | |||
2213 | case RAW1394_REQ_UPDATE_ROM: | ||
2214 | return update_config_rom(fi, req); | ||
2215 | |||
2216 | case RAW1394_REQ_MODIFY_ROM: | ||
2217 | return modify_config_rom(fi, req); | ||
2218 | } | ||
2219 | |||
2220 | if (req->req.generation != get_hpsb_generation(fi->host)) { | ||
2221 | req->req.error = RAW1394_ERROR_GENERATION; | ||
2222 | req->req.generation = get_hpsb_generation(fi->host); | ||
2223 | req->req.length = 0; | ||
2224 | queue_complete_req(req); | ||
2225 | return 0; | ||
2226 | } | ||
2227 | |||
2228 | switch (req->req.type) { | ||
2229 | case RAW1394_REQ_PHYPACKET: | ||
2230 | return write_phypacket(fi, req); | ||
2231 | case RAW1394_REQ_ASYNC_SEND: | ||
2232 | return handle_async_send(fi, req); | ||
2233 | } | ||
2234 | |||
2235 | if (req->req.length == 0) { | ||
2236 | req->req.error = RAW1394_ERROR_INVALID_ARG; | ||
2237 | queue_complete_req(req); | ||
2238 | return 0; | ||
2239 | } | ||
2240 | |||
2241 | return handle_async_request(fi, req, node); | ||
2242 | } | ||
2243 | |||
2244 | static ssize_t raw1394_write(struct file *file, const char __user * buffer, | ||
2245 | size_t count, loff_t * offset_is_ignored) | ||
2246 | { | ||
2247 | struct file_info *fi = file->private_data; | ||
2248 | struct pending_request *req; | ||
2249 | ssize_t retval = -EBADFD; | ||
2250 | |||
2251 | #ifdef CONFIG_COMPAT | ||
2252 | if (count == sizeof(struct compat_raw1394_req) && | ||
2253 | sizeof(struct compat_raw1394_req) != | ||
2254 | sizeof(struct raw1394_request)) { | ||
2255 | buffer = raw1394_compat_write(buffer); | ||
2256 | if (IS_ERR((__force void *)buffer)) | ||
2257 | return PTR_ERR((__force void *)buffer); | ||
2258 | } else | ||
2259 | #endif | ||
2260 | if (count != sizeof(struct raw1394_request)) { | ||
2261 | return -EINVAL; | ||
2262 | } | ||
2263 | |||
2264 | req = alloc_pending_request(); | ||
2265 | if (req == NULL) { | ||
2266 | return -ENOMEM; | ||
2267 | } | ||
2268 | req->file_info = fi; | ||
2269 | |||
2270 | if (copy_from_user(&req->req, buffer, sizeof(struct raw1394_request))) { | ||
2271 | free_pending_request(req); | ||
2272 | return -EFAULT; | ||
2273 | } | ||
2274 | |||
2275 | if (!mutex_trylock(&fi->state_mutex)) { | ||
2276 | free_pending_request(req); | ||
2277 | return -EAGAIN; | ||
2278 | } | ||
2279 | |||
2280 | switch (fi->state) { | ||
2281 | case opened: | ||
2282 | retval = state_opened(fi, req); | ||
2283 | break; | ||
2284 | |||
2285 | case initialized: | ||
2286 | retval = state_initialized(fi, req); | ||
2287 | break; | ||
2288 | |||
2289 | case connected: | ||
2290 | retval = state_connected(fi, req); | ||
2291 | break; | ||
2292 | } | ||
2293 | |||
2294 | mutex_unlock(&fi->state_mutex); | ||
2295 | |||
2296 | if (retval < 0) { | ||
2297 | free_pending_request(req); | ||
2298 | } else { | ||
2299 | BUG_ON(retval); | ||
2300 | retval = count; | ||
2301 | } | ||
2302 | |||
2303 | return retval; | ||
2304 | } | ||
2305 | |||
2306 | /* rawiso operations */ | ||
2307 | |||
2308 | /* check if any RAW1394_REQ_RAWISO_ACTIVITY event is already in the | ||
2309 | * completion queue (reqlists_lock must be taken) */ | ||
2310 | static inline int __rawiso_event_in_queue(struct file_info *fi) | ||
2311 | { | ||
2312 | struct pending_request *req; | ||
2313 | |||
2314 | list_for_each_entry(req, &fi->req_complete, list) | ||
2315 | if (req->req.type == RAW1394_REQ_RAWISO_ACTIVITY) | ||
2316 | return 1; | ||
2317 | |||
2318 | return 0; | ||
2319 | } | ||
2320 | |||
2321 | /* put a RAWISO_ACTIVITY event in the queue, if one isn't there already */ | ||
2322 | static void queue_rawiso_event(struct file_info *fi) | ||
2323 | { | ||
2324 | unsigned long flags; | ||
2325 | |||
2326 | spin_lock_irqsave(&fi->reqlists_lock, flags); | ||
2327 | |||
2328 | /* only one ISO activity event may be in the queue */ | ||
2329 | if (!__rawiso_event_in_queue(fi)) { | ||
2330 | struct pending_request *req = | ||
2331 | __alloc_pending_request(GFP_ATOMIC); | ||
2332 | |||
2333 | if (req) { | ||
2334 | req->file_info = fi; | ||
2335 | req->req.type = RAW1394_REQ_RAWISO_ACTIVITY; | ||
2336 | req->req.generation = get_hpsb_generation(fi->host); | ||
2337 | __queue_complete_req(req); | ||
2338 | } else { | ||
2339 | /* on allocation failure, signal an overflow */ | ||
2340 | if (fi->iso_handle) { | ||
2341 | atomic_inc(&fi->iso_handle->overflows); | ||
2342 | } | ||
2343 | } | ||
2344 | } | ||
2345 | spin_unlock_irqrestore(&fi->reqlists_lock, flags); | ||
2346 | } | ||
2347 | |||
2348 | static void rawiso_activity_cb(struct hpsb_iso *iso) | ||
2349 | { | ||
2350 | unsigned long flags; | ||
2351 | struct host_info *hi; | ||
2352 | struct file_info *fi; | ||
2353 | |||
2354 | spin_lock_irqsave(&host_info_lock, flags); | ||
2355 | hi = find_host_info(iso->host); | ||
2356 | |||
2357 | if (hi != NULL) { | ||
2358 | list_for_each_entry(fi, &hi->file_info_list, list) { | ||
2359 | if (fi->iso_handle == iso) | ||
2360 | queue_rawiso_event(fi); | ||
2361 | } | ||
2362 | } | ||
2363 | |||
2364 | spin_unlock_irqrestore(&host_info_lock, flags); | ||
2365 | } | ||
2366 | |||
2367 | /* helper function - gather all the kernel iso status bits for returning to user-space */ | ||
2368 | static void raw1394_iso_fill_status(struct hpsb_iso *iso, | ||
2369 | struct raw1394_iso_status *stat) | ||
2370 | { | ||
2371 | int overflows = atomic_read(&iso->overflows); | ||
2372 | int skips = atomic_read(&iso->skips); | ||
2373 | |||
2374 | stat->config.data_buf_size = iso->buf_size; | ||
2375 | stat->config.buf_packets = iso->buf_packets; | ||
2376 | stat->config.channel = iso->channel; | ||
2377 | stat->config.speed = iso->speed; | ||
2378 | stat->config.irq_interval = iso->irq_interval; | ||
2379 | stat->n_packets = hpsb_iso_n_ready(iso); | ||
2380 | stat->overflows = ((skips & 0xFFFF) << 16) | ((overflows & 0xFFFF)); | ||
2381 | stat->xmit_cycle = iso->xmit_cycle; | ||
2382 | } | ||
2383 | |||
2384 | static int raw1394_iso_xmit_init(struct file_info *fi, void __user * uaddr) | ||
2385 | { | ||
2386 | struct raw1394_iso_status stat; | ||
2387 | |||
2388 | if (!fi->host) | ||
2389 | return -EINVAL; | ||
2390 | |||
2391 | if (copy_from_user(&stat, uaddr, sizeof(stat))) | ||
2392 | return -EFAULT; | ||
2393 | |||
2394 | fi->iso_handle = hpsb_iso_xmit_init(fi->host, | ||
2395 | stat.config.data_buf_size, | ||
2396 | stat.config.buf_packets, | ||
2397 | stat.config.channel, | ||
2398 | stat.config.speed, | ||
2399 | stat.config.irq_interval, | ||
2400 | rawiso_activity_cb); | ||
2401 | if (!fi->iso_handle) | ||
2402 | return -ENOMEM; | ||
2403 | |||
2404 | fi->iso_state = RAW1394_ISO_XMIT; | ||
2405 | |||
2406 | raw1394_iso_fill_status(fi->iso_handle, &stat); | ||
2407 | if (copy_to_user(uaddr, &stat, sizeof(stat))) | ||
2408 | return -EFAULT; | ||
2409 | |||
2410 | /* queue an event to get things started */ | ||
2411 | rawiso_activity_cb(fi->iso_handle); | ||
2412 | |||
2413 | return 0; | ||
2414 | } | ||
2415 | |||
2416 | static int raw1394_iso_recv_init(struct file_info *fi, void __user * uaddr) | ||
2417 | { | ||
2418 | struct raw1394_iso_status stat; | ||
2419 | |||
2420 | if (!fi->host) | ||
2421 | return -EINVAL; | ||
2422 | |||
2423 | if (copy_from_user(&stat, uaddr, sizeof(stat))) | ||
2424 | return -EFAULT; | ||
2425 | |||
2426 | fi->iso_handle = hpsb_iso_recv_init(fi->host, | ||
2427 | stat.config.data_buf_size, | ||
2428 | stat.config.buf_packets, | ||
2429 | stat.config.channel, | ||
2430 | stat.config.dma_mode, | ||
2431 | stat.config.irq_interval, | ||
2432 | rawiso_activity_cb); | ||
2433 | if (!fi->iso_handle) | ||
2434 | return -ENOMEM; | ||
2435 | |||
2436 | fi->iso_state = RAW1394_ISO_RECV; | ||
2437 | |||
2438 | raw1394_iso_fill_status(fi->iso_handle, &stat); | ||
2439 | if (copy_to_user(uaddr, &stat, sizeof(stat))) | ||
2440 | return -EFAULT; | ||
2441 | return 0; | ||
2442 | } | ||
2443 | |||
2444 | static int raw1394_iso_get_status(struct file_info *fi, void __user * uaddr) | ||
2445 | { | ||
2446 | struct raw1394_iso_status stat; | ||
2447 | struct hpsb_iso *iso = fi->iso_handle; | ||
2448 | |||
2449 | raw1394_iso_fill_status(fi->iso_handle, &stat); | ||
2450 | if (copy_to_user(uaddr, &stat, sizeof(stat))) | ||
2451 | return -EFAULT; | ||
2452 | |||
2453 | /* reset overflow counter */ | ||
2454 | atomic_set(&iso->overflows, 0); | ||
2455 | /* reset skip counter */ | ||
2456 | atomic_set(&iso->skips, 0); | ||
2457 | |||
2458 | return 0; | ||
2459 | } | ||
2460 | |||
2461 | /* copy N packet_infos out of the ringbuffer into user-supplied array */ | ||
2462 | static int raw1394_iso_recv_packets(struct file_info *fi, void __user * uaddr) | ||
2463 | { | ||
2464 | struct raw1394_iso_packets upackets; | ||
2465 | unsigned int packet = fi->iso_handle->first_packet; | ||
2466 | int i; | ||
2467 | |||
2468 | if (copy_from_user(&upackets, uaddr, sizeof(upackets))) | ||
2469 | return -EFAULT; | ||
2470 | |||
2471 | if (upackets.n_packets > hpsb_iso_n_ready(fi->iso_handle)) | ||
2472 | return -EINVAL; | ||
2473 | |||
2474 | /* ensure user-supplied buffer is accessible and big enough */ | ||
2475 | if (!access_ok(VERIFY_WRITE, upackets.infos, | ||
2476 | upackets.n_packets * | ||
2477 | sizeof(struct raw1394_iso_packet_info))) | ||
2478 | return -EFAULT; | ||
2479 | |||
2480 | /* copy the packet_infos out */ | ||
2481 | for (i = 0; i < upackets.n_packets; i++) { | ||
2482 | if (__copy_to_user(&upackets.infos[i], | ||
2483 | &fi->iso_handle->infos[packet], | ||
2484 | sizeof(struct raw1394_iso_packet_info))) | ||
2485 | return -EFAULT; | ||
2486 | |||
2487 | packet = (packet + 1) % fi->iso_handle->buf_packets; | ||
2488 | } | ||
2489 | |||
2490 | return 0; | ||
2491 | } | ||
2492 | |||
2493 | /* copy N packet_infos from user to ringbuffer, and queue them for transmission */ | ||
2494 | static int raw1394_iso_send_packets(struct file_info *fi, void __user * uaddr) | ||
2495 | { | ||
2496 | struct raw1394_iso_packets upackets; | ||
2497 | int i, rv; | ||
2498 | |||
2499 | if (copy_from_user(&upackets, uaddr, sizeof(upackets))) | ||
2500 | return -EFAULT; | ||
2501 | |||
2502 | if (upackets.n_packets >= fi->iso_handle->buf_packets) | ||
2503 | return -EINVAL; | ||
2504 | |||
2505 | if (upackets.n_packets >= hpsb_iso_n_ready(fi->iso_handle)) | ||
2506 | return -EAGAIN; | ||
2507 | |||
2508 | /* ensure user-supplied buffer is accessible and big enough */ | ||
2509 | if (!access_ok(VERIFY_READ, upackets.infos, | ||
2510 | upackets.n_packets * | ||
2511 | sizeof(struct raw1394_iso_packet_info))) | ||
2512 | return -EFAULT; | ||
2513 | |||
2514 | /* copy the infos structs in and queue the packets */ | ||
2515 | for (i = 0; i < upackets.n_packets; i++) { | ||
2516 | struct raw1394_iso_packet_info info; | ||
2517 | |||
2518 | if (__copy_from_user(&info, &upackets.infos[i], | ||
2519 | sizeof(struct raw1394_iso_packet_info))) | ||
2520 | return -EFAULT; | ||
2521 | |||
2522 | rv = hpsb_iso_xmit_queue_packet(fi->iso_handle, info.offset, | ||
2523 | info.len, info.tag, info.sy); | ||
2524 | if (rv) | ||
2525 | return rv; | ||
2526 | } | ||
2527 | |||
2528 | return 0; | ||
2529 | } | ||
2530 | |||
2531 | static void raw1394_iso_shutdown(struct file_info *fi) | ||
2532 | { | ||
2533 | if (fi->iso_handle) | ||
2534 | hpsb_iso_shutdown(fi->iso_handle); | ||
2535 | |||
2536 | fi->iso_handle = NULL; | ||
2537 | fi->iso_state = RAW1394_ISO_INACTIVE; | ||
2538 | } | ||
2539 | |||
2540 | static int raw1394_read_cycle_timer(struct file_info *fi, void __user * uaddr) | ||
2541 | { | ||
2542 | struct raw1394_cycle_timer ct; | ||
2543 | int err; | ||
2544 | |||
2545 | err = hpsb_read_cycle_timer(fi->host, &ct.cycle_timer, &ct.local_time); | ||
2546 | if (!err) | ||
2547 | if (copy_to_user(uaddr, &ct, sizeof(ct))) | ||
2548 | err = -EFAULT; | ||
2549 | return err; | ||
2550 | } | ||
2551 | |||
2552 | /* mmap the rawiso xmit/recv buffer */ | ||
2553 | static int raw1394_mmap(struct file *file, struct vm_area_struct *vma) | ||
2554 | { | ||
2555 | struct file_info *fi = file->private_data; | ||
2556 | int ret; | ||
2557 | |||
2558 | if (!mutex_trylock(&fi->state_mutex)) | ||
2559 | return -EAGAIN; | ||
2560 | |||
2561 | if (fi->iso_state == RAW1394_ISO_INACTIVE) | ||
2562 | ret = -EINVAL; | ||
2563 | else | ||
2564 | ret = dma_region_mmap(&fi->iso_handle->data_buf, file, vma); | ||
2565 | |||
2566 | mutex_unlock(&fi->state_mutex); | ||
2567 | |||
2568 | return ret; | ||
2569 | } | ||
2570 | |||
2571 | static long raw1394_ioctl_inactive(struct file_info *fi, unsigned int cmd, | ||
2572 | void __user *argp) | ||
2573 | { | ||
2574 | switch (cmd) { | ||
2575 | case RAW1394_IOC_ISO_XMIT_INIT: | ||
2576 | return raw1394_iso_xmit_init(fi, argp); | ||
2577 | case RAW1394_IOC_ISO_RECV_INIT: | ||
2578 | return raw1394_iso_recv_init(fi, argp); | ||
2579 | default: | ||
2580 | return -EINVAL; | ||
2581 | } | ||
2582 | } | ||
2583 | |||
2584 | static long raw1394_ioctl_recv(struct file_info *fi, unsigned int cmd, | ||
2585 | unsigned long arg) | ||
2586 | { | ||
2587 | void __user *argp = (void __user *)arg; | ||
2588 | |||
2589 | switch (cmd) { | ||
2590 | case RAW1394_IOC_ISO_RECV_START:{ | ||
2591 | int args[3]; | ||
2592 | |||
2593 | if (copy_from_user(&args[0], argp, sizeof(args))) | ||
2594 | return -EFAULT; | ||
2595 | return hpsb_iso_recv_start(fi->iso_handle, | ||
2596 | args[0], args[1], args[2]); | ||
2597 | } | ||
2598 | case RAW1394_IOC_ISO_XMIT_RECV_STOP: | ||
2599 | hpsb_iso_stop(fi->iso_handle); | ||
2600 | return 0; | ||
2601 | case RAW1394_IOC_ISO_RECV_LISTEN_CHANNEL: | ||
2602 | return hpsb_iso_recv_listen_channel(fi->iso_handle, arg); | ||
2603 | case RAW1394_IOC_ISO_RECV_UNLISTEN_CHANNEL: | ||
2604 | return hpsb_iso_recv_unlisten_channel(fi->iso_handle, arg); | ||
2605 | case RAW1394_IOC_ISO_RECV_SET_CHANNEL_MASK:{ | ||
2606 | u64 mask; | ||
2607 | |||
2608 | if (copy_from_user(&mask, argp, sizeof(mask))) | ||
2609 | return -EFAULT; | ||
2610 | return hpsb_iso_recv_set_channel_mask(fi->iso_handle, | ||
2611 | mask); | ||
2612 | } | ||
2613 | case RAW1394_IOC_ISO_GET_STATUS: | ||
2614 | return raw1394_iso_get_status(fi, argp); | ||
2615 | case RAW1394_IOC_ISO_RECV_PACKETS: | ||
2616 | return raw1394_iso_recv_packets(fi, argp); | ||
2617 | case RAW1394_IOC_ISO_RECV_RELEASE_PACKETS: | ||
2618 | return hpsb_iso_recv_release_packets(fi->iso_handle, arg); | ||
2619 | case RAW1394_IOC_ISO_RECV_FLUSH: | ||
2620 | return hpsb_iso_recv_flush(fi->iso_handle); | ||
2621 | case RAW1394_IOC_ISO_SHUTDOWN: | ||
2622 | raw1394_iso_shutdown(fi); | ||
2623 | return 0; | ||
2624 | case RAW1394_IOC_ISO_QUEUE_ACTIVITY: | ||
2625 | queue_rawiso_event(fi); | ||
2626 | return 0; | ||
2627 | default: | ||
2628 | return -EINVAL; | ||
2629 | } | ||
2630 | } | ||
2631 | |||
2632 | static long raw1394_ioctl_xmit(struct file_info *fi, unsigned int cmd, | ||
2633 | void __user *argp) | ||
2634 | { | ||
2635 | switch (cmd) { | ||
2636 | case RAW1394_IOC_ISO_XMIT_START:{ | ||
2637 | int args[2]; | ||
2638 | |||
2639 | if (copy_from_user(&args[0], argp, sizeof(args))) | ||
2640 | return -EFAULT; | ||
2641 | return hpsb_iso_xmit_start(fi->iso_handle, | ||
2642 | args[0], args[1]); | ||
2643 | } | ||
2644 | case RAW1394_IOC_ISO_XMIT_SYNC: | ||
2645 | return hpsb_iso_xmit_sync(fi->iso_handle); | ||
2646 | case RAW1394_IOC_ISO_XMIT_RECV_STOP: | ||
2647 | hpsb_iso_stop(fi->iso_handle); | ||
2648 | return 0; | ||
2649 | case RAW1394_IOC_ISO_GET_STATUS: | ||
2650 | return raw1394_iso_get_status(fi, argp); | ||
2651 | case RAW1394_IOC_ISO_XMIT_PACKETS: | ||
2652 | return raw1394_iso_send_packets(fi, argp); | ||
2653 | case RAW1394_IOC_ISO_SHUTDOWN: | ||
2654 | raw1394_iso_shutdown(fi); | ||
2655 | return 0; | ||
2656 | case RAW1394_IOC_ISO_QUEUE_ACTIVITY: | ||
2657 | queue_rawiso_event(fi); | ||
2658 | return 0; | ||
2659 | default: | ||
2660 | return -EINVAL; | ||
2661 | } | ||
2662 | } | ||
2663 | |||
2664 | /* ioctl is only used for rawiso operations */ | ||
2665 | static long raw1394_ioctl(struct file *file, unsigned int cmd, | ||
2666 | unsigned long arg) | ||
2667 | { | ||
2668 | struct file_info *fi = file->private_data; | ||
2669 | void __user *argp = (void __user *)arg; | ||
2670 | long ret; | ||
2671 | |||
2672 | /* state-independent commands */ | ||
2673 | switch(cmd) { | ||
2674 | case RAW1394_IOC_GET_CYCLE_TIMER: | ||
2675 | return raw1394_read_cycle_timer(fi, argp); | ||
2676 | default: | ||
2677 | break; | ||
2678 | } | ||
2679 | |||
2680 | if (!mutex_trylock(&fi->state_mutex)) | ||
2681 | return -EAGAIN; | ||
2682 | |||
2683 | switch (fi->iso_state) { | ||
2684 | case RAW1394_ISO_INACTIVE: | ||
2685 | ret = raw1394_ioctl_inactive(fi, cmd, argp); | ||
2686 | break; | ||
2687 | case RAW1394_ISO_RECV: | ||
2688 | ret = raw1394_ioctl_recv(fi, cmd, arg); | ||
2689 | break; | ||
2690 | case RAW1394_ISO_XMIT: | ||
2691 | ret = raw1394_ioctl_xmit(fi, cmd, argp); | ||
2692 | break; | ||
2693 | default: | ||
2694 | ret = -EINVAL; | ||
2695 | break; | ||
2696 | } | ||
2697 | |||
2698 | mutex_unlock(&fi->state_mutex); | ||
2699 | |||
2700 | return ret; | ||
2701 | } | ||
2702 | |||
2703 | #ifdef CONFIG_COMPAT | ||
2704 | struct raw1394_iso_packets32 { | ||
2705 | __u32 n_packets; | ||
2706 | compat_uptr_t infos; | ||
2707 | } __attribute__((packed)); | ||
2708 | |||
2709 | struct raw1394_cycle_timer32 { | ||
2710 | __u32 cycle_timer; | ||
2711 | __u64 local_time; | ||
2712 | } | ||
2713 | #if defined(CONFIG_X86_64) || defined(CONFIG_IA64) | ||
2714 | __attribute__((packed)) | ||
2715 | #endif | ||
2716 | ; | ||
2717 | |||
2718 | #define RAW1394_IOC_ISO_RECV_PACKETS32 \ | ||
2719 | _IOW ('#', 0x25, struct raw1394_iso_packets32) | ||
2720 | #define RAW1394_IOC_ISO_XMIT_PACKETS32 \ | ||
2721 | _IOW ('#', 0x27, struct raw1394_iso_packets32) | ||
2722 | #define RAW1394_IOC_GET_CYCLE_TIMER32 \ | ||
2723 | _IOR ('#', 0x30, struct raw1394_cycle_timer32) | ||
2724 | |||
2725 | static long raw1394_iso_xmit_recv_packets32(struct file *file, unsigned int cmd, | ||
2726 | struct raw1394_iso_packets32 __user *arg) | ||
2727 | { | ||
2728 | compat_uptr_t infos32; | ||
2729 | void __user *infos; | ||
2730 | long err = -EFAULT; | ||
2731 | struct raw1394_iso_packets __user *dst = compat_alloc_user_space(sizeof(struct raw1394_iso_packets)); | ||
2732 | |||
2733 | if (!copy_in_user(&dst->n_packets, &arg->n_packets, sizeof arg->n_packets) && | ||
2734 | !copy_from_user(&infos32, &arg->infos, sizeof infos32)) { | ||
2735 | infos = compat_ptr(infos32); | ||
2736 | if (!copy_to_user(&dst->infos, &infos, sizeof infos)) | ||
2737 | err = raw1394_ioctl(file, cmd, (unsigned long)dst); | ||
2738 | } | ||
2739 | return err; | ||
2740 | } | ||
2741 | |||
2742 | static long raw1394_read_cycle_timer32(struct file_info *fi, void __user * uaddr) | ||
2743 | { | ||
2744 | struct raw1394_cycle_timer32 ct; | ||
2745 | int err; | ||
2746 | |||
2747 | err = hpsb_read_cycle_timer(fi->host, &ct.cycle_timer, &ct.local_time); | ||
2748 | if (!err) | ||
2749 | if (copy_to_user(uaddr, &ct, sizeof(ct))) | ||
2750 | err = -EFAULT; | ||
2751 | return err; | ||
2752 | } | ||
2753 | |||
2754 | static long raw1394_compat_ioctl(struct file *file, | ||
2755 | unsigned int cmd, unsigned long arg) | ||
2756 | { | ||
2757 | struct file_info *fi = file->private_data; | ||
2758 | void __user *argp = (void __user *)arg; | ||
2759 | long err; | ||
2760 | |||
2761 | switch (cmd) { | ||
2762 | /* These requests have same format as long as 'int' has same size. */ | ||
2763 | case RAW1394_IOC_ISO_RECV_INIT: | ||
2764 | case RAW1394_IOC_ISO_RECV_START: | ||
2765 | case RAW1394_IOC_ISO_RECV_LISTEN_CHANNEL: | ||
2766 | case RAW1394_IOC_ISO_RECV_UNLISTEN_CHANNEL: | ||
2767 | case RAW1394_IOC_ISO_RECV_SET_CHANNEL_MASK: | ||
2768 | case RAW1394_IOC_ISO_RECV_RELEASE_PACKETS: | ||
2769 | case RAW1394_IOC_ISO_RECV_FLUSH: | ||
2770 | case RAW1394_IOC_ISO_XMIT_RECV_STOP: | ||
2771 | case RAW1394_IOC_ISO_XMIT_INIT: | ||
2772 | case RAW1394_IOC_ISO_XMIT_START: | ||
2773 | case RAW1394_IOC_ISO_XMIT_SYNC: | ||
2774 | case RAW1394_IOC_ISO_GET_STATUS: | ||
2775 | case RAW1394_IOC_ISO_SHUTDOWN: | ||
2776 | case RAW1394_IOC_ISO_QUEUE_ACTIVITY: | ||
2777 | err = raw1394_ioctl(file, cmd, arg); | ||
2778 | break; | ||
2779 | /* These request have different format. */ | ||
2780 | case RAW1394_IOC_ISO_RECV_PACKETS32: | ||
2781 | err = raw1394_iso_xmit_recv_packets32(file, RAW1394_IOC_ISO_RECV_PACKETS, argp); | ||
2782 | break; | ||
2783 | case RAW1394_IOC_ISO_XMIT_PACKETS32: | ||
2784 | err = raw1394_iso_xmit_recv_packets32(file, RAW1394_IOC_ISO_XMIT_PACKETS, argp); | ||
2785 | break; | ||
2786 | case RAW1394_IOC_GET_CYCLE_TIMER32: | ||
2787 | err = raw1394_read_cycle_timer32(fi, argp); | ||
2788 | break; | ||
2789 | default: | ||
2790 | err = -EINVAL; | ||
2791 | break; | ||
2792 | } | ||
2793 | |||
2794 | return err; | ||
2795 | } | ||
2796 | #endif | ||
2797 | |||
2798 | static unsigned int raw1394_poll(struct file *file, poll_table * pt) | ||
2799 | { | ||
2800 | struct file_info *fi = file->private_data; | ||
2801 | unsigned int mask = POLLOUT | POLLWRNORM; | ||
2802 | unsigned long flags; | ||
2803 | |||
2804 | poll_wait(file, &fi->wait_complete, pt); | ||
2805 | |||
2806 | spin_lock_irqsave(&fi->reqlists_lock, flags); | ||
2807 | if (!list_empty(&fi->req_complete)) { | ||
2808 | mask |= POLLIN | POLLRDNORM; | ||
2809 | } | ||
2810 | spin_unlock_irqrestore(&fi->reqlists_lock, flags); | ||
2811 | |||
2812 | return mask; | ||
2813 | } | ||
2814 | |||
2815 | static int raw1394_open(struct inode *inode, struct file *file) | ||
2816 | { | ||
2817 | struct file_info *fi; | ||
2818 | |||
2819 | fi = kzalloc(sizeof(*fi), GFP_KERNEL); | ||
2820 | if (!fi) | ||
2821 | return -ENOMEM; | ||
2822 | |||
2823 | fi->notification = (u8) RAW1394_NOTIFY_ON; /* busreset notification */ | ||
2824 | |||
2825 | INIT_LIST_HEAD(&fi->list); | ||
2826 | mutex_init(&fi->state_mutex); | ||
2827 | fi->state = opened; | ||
2828 | INIT_LIST_HEAD(&fi->req_pending); | ||
2829 | INIT_LIST_HEAD(&fi->req_complete); | ||
2830 | spin_lock_init(&fi->reqlists_lock); | ||
2831 | init_waitqueue_head(&fi->wait_complete); | ||
2832 | INIT_LIST_HEAD(&fi->addr_list); | ||
2833 | |||
2834 | file->private_data = fi; | ||
2835 | |||
2836 | return nonseekable_open(inode, file); | ||
2837 | } | ||
2838 | |||
2839 | static int raw1394_release(struct inode *inode, struct file *file) | ||
2840 | { | ||
2841 | struct file_info *fi = file->private_data; | ||
2842 | struct list_head *lh; | ||
2843 | struct pending_request *req; | ||
2844 | int i, fail; | ||
2845 | int retval = 0; | ||
2846 | struct list_head *entry; | ||
2847 | struct arm_addr *addr = NULL; | ||
2848 | struct host_info *hi; | ||
2849 | struct file_info *fi_hlp = NULL; | ||
2850 | struct arm_addr *arm_addr = NULL; | ||
2851 | int another_host; | ||
2852 | int csr_mod = 0; | ||
2853 | unsigned long flags; | ||
2854 | |||
2855 | if (fi->iso_state != RAW1394_ISO_INACTIVE) | ||
2856 | raw1394_iso_shutdown(fi); | ||
2857 | |||
2858 | spin_lock_irqsave(&host_info_lock, flags); | ||
2859 | |||
2860 | fail = 0; | ||
2861 | /* set address-entries invalid */ | ||
2862 | |||
2863 | while (!list_empty(&fi->addr_list)) { | ||
2864 | another_host = 0; | ||
2865 | lh = fi->addr_list.next; | ||
2866 | addr = list_entry(lh, struct arm_addr, addr_list); | ||
2867 | /* another host with valid address-entry containing | ||
2868 | same addressrange? */ | ||
2869 | list_for_each_entry(hi, &host_info_list, list) { | ||
2870 | if (hi->host != fi->host) { | ||
2871 | list_for_each_entry(fi_hlp, &hi->file_info_list, | ||
2872 | list) { | ||
2873 | entry = fi_hlp->addr_list.next; | ||
2874 | while (entry != &(fi_hlp->addr_list)) { | ||
2875 | arm_addr = list_entry(entry, struct | ||
2876 | arm_addr, | ||
2877 | addr_list); | ||
2878 | if (arm_addr->start == | ||
2879 | addr->start) { | ||
2880 | DBGMSG | ||
2881 | ("raw1394_release: " | ||
2882 | "another host ownes " | ||
2883 | "same addressrange"); | ||
2884 | another_host = 1; | ||
2885 | break; | ||
2886 | } | ||
2887 | entry = entry->next; | ||
2888 | } | ||
2889 | if (another_host) { | ||
2890 | break; | ||
2891 | } | ||
2892 | } | ||
2893 | } | ||
2894 | } | ||
2895 | if (!another_host) { | ||
2896 | DBGMSG("raw1394_release: call hpsb_arm_unregister"); | ||
2897 | retval = | ||
2898 | hpsb_unregister_addrspace(&raw1394_highlevel, | ||
2899 | fi->host, addr->start); | ||
2900 | if (!retval) { | ||
2901 | ++fail; | ||
2902 | printk(KERN_ERR | ||
2903 | "raw1394_release arm_Unregister failed\n"); | ||
2904 | } | ||
2905 | } | ||
2906 | DBGMSG("raw1394_release: delete addr_entry from list"); | ||
2907 | list_del(&addr->addr_list); | ||
2908 | vfree(addr->addr_space_buffer); | ||
2909 | kfree(addr); | ||
2910 | } /* while */ | ||
2911 | spin_unlock_irqrestore(&host_info_lock, flags); | ||
2912 | if (fail > 0) { | ||
2913 | printk(KERN_ERR "raw1394: during addr_list-release " | ||
2914 | "error(s) occurred \n"); | ||
2915 | } | ||
2916 | |||
2917 | for (;;) { | ||
2918 | /* This locked section guarantees that neither | ||
2919 | * complete nor pending requests exist once i!=0 */ | ||
2920 | spin_lock_irqsave(&fi->reqlists_lock, flags); | ||
2921 | while ((req = __next_complete_req(fi))) | ||
2922 | free_pending_request(req); | ||
2923 | |||
2924 | i = list_empty(&fi->req_pending); | ||
2925 | spin_unlock_irqrestore(&fi->reqlists_lock, flags); | ||
2926 | |||
2927 | if (i) | ||
2928 | break; | ||
2929 | /* | ||
2930 | * Sleep until more requests can be freed. | ||
2931 | * | ||
2932 | * NB: We call the macro wait_event() with a condition argument | ||
2933 | * with side effect. This is only possible because the side | ||
2934 | * effect does not occur until the condition became true, and | ||
2935 | * wait_event() won't evaluate the condition again after that. | ||
2936 | */ | ||
2937 | wait_event(fi->wait_complete, (req = next_complete_req(fi))); | ||
2938 | free_pending_request(req); | ||
2939 | } | ||
2940 | |||
2941 | /* Remove any sub-trees left by user space programs */ | ||
2942 | for (i = 0; i < RAW1394_MAX_USER_CSR_DIRS; i++) { | ||
2943 | struct csr1212_dentry *dentry; | ||
2944 | if (!fi->csr1212_dirs[i]) | ||
2945 | continue; | ||
2946 | for (dentry = | ||
2947 | fi->csr1212_dirs[i]->value.directory.dentries_head; dentry; | ||
2948 | dentry = dentry->next) { | ||
2949 | csr1212_detach_keyval_from_directory(fi->host->csr.rom-> | ||
2950 | root_kv, | ||
2951 | dentry->kv); | ||
2952 | } | ||
2953 | csr1212_release_keyval(fi->csr1212_dirs[i]); | ||
2954 | fi->csr1212_dirs[i] = NULL; | ||
2955 | csr_mod = 1; | ||
2956 | } | ||
2957 | |||
2958 | if ((csr_mod || fi->cfgrom_upd) | ||
2959 | && hpsb_update_config_rom_image(fi->host) < 0) | ||
2960 | HPSB_ERR | ||
2961 | ("Failed to generate Configuration ROM image for host %d", | ||
2962 | fi->host->id); | ||
2963 | |||
2964 | if (fi->state == connected) { | ||
2965 | spin_lock_irqsave(&host_info_lock, flags); | ||
2966 | list_del(&fi->list); | ||
2967 | spin_unlock_irqrestore(&host_info_lock, flags); | ||
2968 | |||
2969 | put_device(&fi->host->device); | ||
2970 | } | ||
2971 | |||
2972 | spin_lock_irqsave(&host_info_lock, flags); | ||
2973 | if (fi->host) | ||
2974 | module_put(fi->host->driver->owner); | ||
2975 | spin_unlock_irqrestore(&host_info_lock, flags); | ||
2976 | |||
2977 | kfree(fi); | ||
2978 | |||
2979 | return 0; | ||
2980 | } | ||
2981 | |||
2982 | /*** HOTPLUG STUFF **********************************************************/ | ||
2983 | /* | ||
2984 | * Export information about protocols/devices supported by this driver. | ||
2985 | */ | ||
2986 | #ifdef MODULE | ||
2987 | static const struct ieee1394_device_id raw1394_id_table[] = { | ||
2988 | { | ||
2989 | .match_flags = IEEE1394_MATCH_SPECIFIER_ID | IEEE1394_MATCH_VERSION, | ||
2990 | .specifier_id = AVC_UNIT_SPEC_ID_ENTRY & 0xffffff, | ||
2991 | .version = AVC_SW_VERSION_ENTRY & 0xffffff}, | ||
2992 | { | ||
2993 | .match_flags = IEEE1394_MATCH_SPECIFIER_ID | IEEE1394_MATCH_VERSION, | ||
2994 | .specifier_id = CAMERA_UNIT_SPEC_ID_ENTRY & 0xffffff, | ||
2995 | .version = CAMERA_SW_VERSION_ENTRY & 0xffffff}, | ||
2996 | { | ||
2997 | .match_flags = IEEE1394_MATCH_SPECIFIER_ID | IEEE1394_MATCH_VERSION, | ||
2998 | .specifier_id = CAMERA_UNIT_SPEC_ID_ENTRY & 0xffffff, | ||
2999 | .version = (CAMERA_SW_VERSION_ENTRY + 1) & 0xffffff}, | ||
3000 | { | ||
3001 | .match_flags = IEEE1394_MATCH_SPECIFIER_ID | IEEE1394_MATCH_VERSION, | ||
3002 | .specifier_id = CAMERA_UNIT_SPEC_ID_ENTRY & 0xffffff, | ||
3003 | .version = (CAMERA_SW_VERSION_ENTRY + 2) & 0xffffff}, | ||
3004 | {} | ||
3005 | }; | ||
3006 | |||
3007 | MODULE_DEVICE_TABLE(ieee1394, raw1394_id_table); | ||
3008 | #endif /* MODULE */ | ||
3009 | |||
3010 | static struct hpsb_protocol_driver raw1394_driver = { | ||
3011 | .name = "raw1394", | ||
3012 | }; | ||
3013 | |||
3014 | /******************************************************************************/ | ||
3015 | |||
3016 | static struct hpsb_highlevel raw1394_highlevel = { | ||
3017 | .name = RAW1394_DEVICE_NAME, | ||
3018 | .add_host = add_host, | ||
3019 | .remove_host = remove_host, | ||
3020 | .host_reset = host_reset, | ||
3021 | .fcp_request = fcp_request, | ||
3022 | }; | ||
3023 | |||
3024 | static struct cdev raw1394_cdev; | ||
3025 | static const struct file_operations raw1394_fops = { | ||
3026 | .owner = THIS_MODULE, | ||
3027 | .read = raw1394_read, | ||
3028 | .write = raw1394_write, | ||
3029 | .mmap = raw1394_mmap, | ||
3030 | .unlocked_ioctl = raw1394_ioctl, | ||
3031 | #ifdef CONFIG_COMPAT | ||
3032 | .compat_ioctl = raw1394_compat_ioctl, | ||
3033 | #endif | ||
3034 | .poll = raw1394_poll, | ||
3035 | .open = raw1394_open, | ||
3036 | .release = raw1394_release, | ||
3037 | .llseek = no_llseek, | ||
3038 | }; | ||
3039 | |||
3040 | static int __init init_raw1394(void) | ||
3041 | { | ||
3042 | int ret = 0; | ||
3043 | |||
3044 | hpsb_register_highlevel(&raw1394_highlevel); | ||
3045 | |||
3046 | if (IS_ERR | ||
3047 | (device_create(hpsb_protocol_class, NULL, | ||
3048 | MKDEV(IEEE1394_MAJOR, | ||
3049 | IEEE1394_MINOR_BLOCK_RAW1394 * 16), | ||
3050 | NULL, RAW1394_DEVICE_NAME))) { | ||
3051 | ret = -EFAULT; | ||
3052 | goto out_unreg; | ||
3053 | } | ||
3054 | |||
3055 | cdev_init(&raw1394_cdev, &raw1394_fops); | ||
3056 | raw1394_cdev.owner = THIS_MODULE; | ||
3057 | ret = cdev_add(&raw1394_cdev, IEEE1394_RAW1394_DEV, 1); | ||
3058 | if (ret) { | ||
3059 | HPSB_ERR("raw1394 failed to register minor device block"); | ||
3060 | goto out_dev; | ||
3061 | } | ||
3062 | |||
3063 | HPSB_INFO("raw1394: /dev/%s device initialized", RAW1394_DEVICE_NAME); | ||
3064 | |||
3065 | ret = hpsb_register_protocol(&raw1394_driver); | ||
3066 | if (ret) { | ||
3067 | HPSB_ERR("raw1394: failed to register protocol"); | ||
3068 | cdev_del(&raw1394_cdev); | ||
3069 | goto out_dev; | ||
3070 | } | ||
3071 | |||
3072 | goto out; | ||
3073 | |||
3074 | out_dev: | ||
3075 | device_destroy(hpsb_protocol_class, | ||
3076 | MKDEV(IEEE1394_MAJOR, | ||
3077 | IEEE1394_MINOR_BLOCK_RAW1394 * 16)); | ||
3078 | out_unreg: | ||
3079 | hpsb_unregister_highlevel(&raw1394_highlevel); | ||
3080 | out: | ||
3081 | return ret; | ||
3082 | } | ||
3083 | |||
3084 | static void __exit cleanup_raw1394(void) | ||
3085 | { | ||
3086 | device_destroy(hpsb_protocol_class, | ||
3087 | MKDEV(IEEE1394_MAJOR, | ||
3088 | IEEE1394_MINOR_BLOCK_RAW1394 * 16)); | ||
3089 | cdev_del(&raw1394_cdev); | ||
3090 | hpsb_unregister_highlevel(&raw1394_highlevel); | ||
3091 | hpsb_unregister_protocol(&raw1394_driver); | ||
3092 | } | ||
3093 | |||
3094 | module_init(init_raw1394); | ||
3095 | module_exit(cleanup_raw1394); | ||
3096 | MODULE_LICENSE("GPL"); | ||