diff options
Diffstat (limited to 'drivers/firewire/core-transaction.c')
-rw-r--r-- | drivers/firewire/core-transaction.c | 978 |
1 files changed, 978 insertions, 0 deletions
diff --git a/drivers/firewire/core-transaction.c b/drivers/firewire/core-transaction.c new file mode 100644 index 00000000000..479b22f5a1e --- /dev/null +++ b/drivers/firewire/core-transaction.c | |||
@@ -0,0 +1,978 @@ | |||
1 | /* | ||
2 | * Core IEEE1394 transaction logic | ||
3 | * | ||
4 | * Copyright (C) 2004-2006 Kristian Hoegsberg <krh@bitplanet.net> | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License as published by | ||
8 | * the Free Software Foundation; either version 2 of the License, or | ||
9 | * (at your option) any later version. | ||
10 | * | ||
11 | * This program is distributed in the hope that it will be useful, | ||
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
14 | * GNU General Public License for more details. | ||
15 | * | ||
16 | * You should have received a copy of the GNU General Public License | ||
17 | * along with this program; if not, write to the Free Software Foundation, | ||
18 | * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | ||
19 | */ | ||
20 | |||
21 | #include <linux/bug.h> | ||
22 | #include <linux/completion.h> | ||
23 | #include <linux/device.h> | ||
24 | #include <linux/errno.h> | ||
25 | #include <linux/firewire.h> | ||
26 | #include <linux/firewire-constants.h> | ||
27 | #include <linux/fs.h> | ||
28 | #include <linux/init.h> | ||
29 | #include <linux/idr.h> | ||
30 | #include <linux/jiffies.h> | ||
31 | #include <linux/kernel.h> | ||
32 | #include <linux/list.h> | ||
33 | #include <linux/module.h> | ||
34 | #include <linux/slab.h> | ||
35 | #include <linux/spinlock.h> | ||
36 | #include <linux/string.h> | ||
37 | #include <linux/timer.h> | ||
38 | #include <linux/types.h> | ||
39 | |||
40 | #include <asm/byteorder.h> | ||
41 | |||
42 | #include "core.h" | ||
43 | |||
44 | #define HEADER_PRI(pri) ((pri) << 0) | ||
45 | #define HEADER_TCODE(tcode) ((tcode) << 4) | ||
46 | #define HEADER_RETRY(retry) ((retry) << 8) | ||
47 | #define HEADER_TLABEL(tlabel) ((tlabel) << 10) | ||
48 | #define HEADER_DESTINATION(destination) ((destination) << 16) | ||
49 | #define HEADER_SOURCE(source) ((source) << 16) | ||
50 | #define HEADER_RCODE(rcode) ((rcode) << 12) | ||
51 | #define HEADER_OFFSET_HIGH(offset_high) ((offset_high) << 0) | ||
52 | #define HEADER_DATA_LENGTH(length) ((length) << 16) | ||
53 | #define HEADER_EXTENDED_TCODE(tcode) ((tcode) << 0) | ||
54 | |||
55 | #define HEADER_GET_TCODE(q) (((q) >> 4) & 0x0f) | ||
56 | #define HEADER_GET_TLABEL(q) (((q) >> 10) & 0x3f) | ||
57 | #define HEADER_GET_RCODE(q) (((q) >> 12) & 0x0f) | ||
58 | #define HEADER_GET_DESTINATION(q) (((q) >> 16) & 0xffff) | ||
59 | #define HEADER_GET_SOURCE(q) (((q) >> 16) & 0xffff) | ||
60 | #define HEADER_GET_OFFSET_HIGH(q) (((q) >> 0) & 0xffff) | ||
61 | #define HEADER_GET_DATA_LENGTH(q) (((q) >> 16) & 0xffff) | ||
62 | #define HEADER_GET_EXTENDED_TCODE(q) (((q) >> 0) & 0xffff) | ||
63 | |||
64 | #define HEADER_DESTINATION_IS_BROADCAST(q) \ | ||
65 | (((q) & HEADER_DESTINATION(0x3f)) == HEADER_DESTINATION(0x3f)) | ||
66 | |||
67 | #define PHY_PACKET_CONFIG 0x0 | ||
68 | #define PHY_PACKET_LINK_ON 0x1 | ||
69 | #define PHY_PACKET_SELF_ID 0x2 | ||
70 | |||
71 | #define PHY_CONFIG_GAP_COUNT(gap_count) (((gap_count) << 16) | (1 << 22)) | ||
72 | #define PHY_CONFIG_ROOT_ID(node_id) ((((node_id) & 0x3f) << 24) | (1 << 23)) | ||
73 | #define PHY_IDENTIFIER(id) ((id) << 30) | ||
74 | |||
75 | static int close_transaction(struct fw_transaction *transaction, | ||
76 | struct fw_card *card, int rcode) | ||
77 | { | ||
78 | struct fw_transaction *t; | ||
79 | unsigned long flags; | ||
80 | |||
81 | spin_lock_irqsave(&card->lock, flags); | ||
82 | list_for_each_entry(t, &card->transaction_list, link) { | ||
83 | if (t == transaction) { | ||
84 | list_del(&t->link); | ||
85 | card->tlabel_mask &= ~(1ULL << t->tlabel); | ||
86 | break; | ||
87 | } | ||
88 | } | ||
89 | spin_unlock_irqrestore(&card->lock, flags); | ||
90 | |||
91 | if (&t->link != &card->transaction_list) { | ||
92 | t->callback(card, rcode, NULL, 0, t->callback_data); | ||
93 | return 0; | ||
94 | } | ||
95 | |||
96 | return -ENOENT; | ||
97 | } | ||
98 | |||
99 | /* | ||
100 | * Only valid for transactions that are potentially pending (ie have | ||
101 | * been sent). | ||
102 | */ | ||
103 | int fw_cancel_transaction(struct fw_card *card, | ||
104 | struct fw_transaction *transaction) | ||
105 | { | ||
106 | /* | ||
107 | * Cancel the packet transmission if it's still queued. That | ||
108 | * will call the packet transmission callback which cancels | ||
109 | * the transaction. | ||
110 | */ | ||
111 | |||
112 | if (card->driver->cancel_packet(card, &transaction->packet) == 0) | ||
113 | return 0; | ||
114 | |||
115 | /* | ||
116 | * If the request packet has already been sent, we need to see | ||
117 | * if the transaction is still pending and remove it in that case. | ||
118 | */ | ||
119 | |||
120 | return close_transaction(transaction, card, RCODE_CANCELLED); | ||
121 | } | ||
122 | EXPORT_SYMBOL(fw_cancel_transaction); | ||
123 | |||
124 | static void transmit_complete_callback(struct fw_packet *packet, | ||
125 | struct fw_card *card, int status) | ||
126 | { | ||
127 | struct fw_transaction *t = | ||
128 | container_of(packet, struct fw_transaction, packet); | ||
129 | |||
130 | switch (status) { | ||
131 | case ACK_COMPLETE: | ||
132 | close_transaction(t, card, RCODE_COMPLETE); | ||
133 | break; | ||
134 | case ACK_PENDING: | ||
135 | t->timestamp = packet->timestamp; | ||
136 | break; | ||
137 | case ACK_BUSY_X: | ||
138 | case ACK_BUSY_A: | ||
139 | case ACK_BUSY_B: | ||
140 | close_transaction(t, card, RCODE_BUSY); | ||
141 | break; | ||
142 | case ACK_DATA_ERROR: | ||
143 | close_transaction(t, card, RCODE_DATA_ERROR); | ||
144 | break; | ||
145 | case ACK_TYPE_ERROR: | ||
146 | close_transaction(t, card, RCODE_TYPE_ERROR); | ||
147 | break; | ||
148 | default: | ||
149 | /* | ||
150 | * In this case the ack is really a juju specific | ||
151 | * rcode, so just forward that to the callback. | ||
152 | */ | ||
153 | close_transaction(t, card, status); | ||
154 | break; | ||
155 | } | ||
156 | } | ||
157 | |||
158 | static void fw_fill_request(struct fw_packet *packet, int tcode, int tlabel, | ||
159 | int destination_id, int source_id, int generation, int speed, | ||
160 | unsigned long long offset, void *payload, size_t length) | ||
161 | { | ||
162 | int ext_tcode; | ||
163 | |||
164 | if (tcode == TCODE_STREAM_DATA) { | ||
165 | packet->header[0] = | ||
166 | HEADER_DATA_LENGTH(length) | | ||
167 | destination_id | | ||
168 | HEADER_TCODE(TCODE_STREAM_DATA); | ||
169 | packet->header_length = 4; | ||
170 | packet->payload = payload; | ||
171 | packet->payload_length = length; | ||
172 | |||
173 | goto common; | ||
174 | } | ||
175 | |||
176 | if (tcode > 0x10) { | ||
177 | ext_tcode = tcode & ~0x10; | ||
178 | tcode = TCODE_LOCK_REQUEST; | ||
179 | } else | ||
180 | ext_tcode = 0; | ||
181 | |||
182 | packet->header[0] = | ||
183 | HEADER_RETRY(RETRY_X) | | ||
184 | HEADER_TLABEL(tlabel) | | ||
185 | HEADER_TCODE(tcode) | | ||
186 | HEADER_DESTINATION(destination_id); | ||
187 | packet->header[1] = | ||
188 | HEADER_OFFSET_HIGH(offset >> 32) | HEADER_SOURCE(source_id); | ||
189 | packet->header[2] = | ||
190 | offset; | ||
191 | |||
192 | switch (tcode) { | ||
193 | case TCODE_WRITE_QUADLET_REQUEST: | ||
194 | packet->header[3] = *(u32 *)payload; | ||
195 | packet->header_length = 16; | ||
196 | packet->payload_length = 0; | ||
197 | break; | ||
198 | |||
199 | case TCODE_LOCK_REQUEST: | ||
200 | case TCODE_WRITE_BLOCK_REQUEST: | ||
201 | packet->header[3] = | ||
202 | HEADER_DATA_LENGTH(length) | | ||
203 | HEADER_EXTENDED_TCODE(ext_tcode); | ||
204 | packet->header_length = 16; | ||
205 | packet->payload = payload; | ||
206 | packet->payload_length = length; | ||
207 | break; | ||
208 | |||
209 | case TCODE_READ_QUADLET_REQUEST: | ||
210 | packet->header_length = 12; | ||
211 | packet->payload_length = 0; | ||
212 | break; | ||
213 | |||
214 | case TCODE_READ_BLOCK_REQUEST: | ||
215 | packet->header[3] = | ||
216 | HEADER_DATA_LENGTH(length) | | ||
217 | HEADER_EXTENDED_TCODE(ext_tcode); | ||
218 | packet->header_length = 16; | ||
219 | packet->payload_length = 0; | ||
220 | break; | ||
221 | } | ||
222 | common: | ||
223 | packet->speed = speed; | ||
224 | packet->generation = generation; | ||
225 | packet->ack = 0; | ||
226 | packet->payload_bus = 0; | ||
227 | } | ||
228 | |||
229 | /** | ||
230 | * This function provides low-level access to the IEEE1394 transaction | ||
231 | * logic. Most C programs would use either fw_read(), fw_write() or | ||
232 | * fw_lock() instead - those function are convenience wrappers for | ||
233 | * this function. The fw_send_request() function is primarily | ||
234 | * provided as a flexible, one-stop entry point for languages bindings | ||
235 | * and protocol bindings. | ||
236 | * | ||
237 | * FIXME: Document this function further, in particular the possible | ||
238 | * values for rcode in the callback. In short, we map ACK_COMPLETE to | ||
239 | * RCODE_COMPLETE, internal errors set errno and set rcode to | ||
240 | * RCODE_SEND_ERROR (which is out of range for standard ieee1394 | ||
241 | * rcodes). All other rcodes are forwarded unchanged. For all | ||
242 | * errors, payload is NULL, length is 0. | ||
243 | * | ||
244 | * Can not expect the callback to be called before the function | ||
245 | * returns, though this does happen in some cases (ACK_COMPLETE and | ||
246 | * errors). | ||
247 | * | ||
248 | * The payload is only used for write requests and must not be freed | ||
249 | * until the callback has been called. | ||
250 | * | ||
251 | * @param card the card from which to send the request | ||
252 | * @param tcode the tcode for this transaction. Do not use | ||
253 | * TCODE_LOCK_REQUEST directly, instead use TCODE_LOCK_MASK_SWAP | ||
254 | * etc. to specify tcode and ext_tcode. | ||
255 | * @param node_id the destination node ID (bus ID and PHY ID concatenated) | ||
256 | * @param generation the generation for which node_id is valid | ||
257 | * @param speed the speed to use for sending the request | ||
258 | * @param offset the 48 bit offset on the destination node | ||
259 | * @param payload the data payload for the request subaction | ||
260 | * @param length the length in bytes of the data to read | ||
261 | * @param callback function to be called when the transaction is completed | ||
262 | * @param callback_data pointer to arbitrary data, which will be | ||
263 | * passed to the callback | ||
264 | * | ||
265 | * In case of asynchronous stream packets i.e. TCODE_STREAM_DATA, the caller | ||
266 | * needs to synthesize @destination_id with fw_stream_packet_destination_id(). | ||
267 | */ | ||
268 | void fw_send_request(struct fw_card *card, struct fw_transaction *t, int tcode, | ||
269 | int destination_id, int generation, int speed, | ||
270 | unsigned long long offset, void *payload, size_t length, | ||
271 | fw_transaction_callback_t callback, void *callback_data) | ||
272 | { | ||
273 | unsigned long flags; | ||
274 | int tlabel; | ||
275 | |||
276 | /* | ||
277 | * Bump the flush timer up 100ms first of all so we | ||
278 | * don't race with a flush timer callback. | ||
279 | */ | ||
280 | |||
281 | mod_timer(&card->flush_timer, jiffies + DIV_ROUND_UP(HZ, 10)); | ||
282 | |||
283 | /* | ||
284 | * Allocate tlabel from the bitmap and put the transaction on | ||
285 | * the list while holding the card spinlock. | ||
286 | */ | ||
287 | |||
288 | spin_lock_irqsave(&card->lock, flags); | ||
289 | |||
290 | tlabel = card->current_tlabel; | ||
291 | if (card->tlabel_mask & (1ULL << tlabel)) { | ||
292 | spin_unlock_irqrestore(&card->lock, flags); | ||
293 | callback(card, RCODE_SEND_ERROR, NULL, 0, callback_data); | ||
294 | return; | ||
295 | } | ||
296 | |||
297 | card->current_tlabel = (card->current_tlabel + 1) & 0x3f; | ||
298 | card->tlabel_mask |= (1ULL << tlabel); | ||
299 | |||
300 | t->node_id = destination_id; | ||
301 | t->tlabel = tlabel; | ||
302 | t->callback = callback; | ||
303 | t->callback_data = callback_data; | ||
304 | |||
305 | fw_fill_request(&t->packet, tcode, t->tlabel, | ||
306 | destination_id, card->node_id, generation, | ||
307 | speed, offset, payload, length); | ||
308 | t->packet.callback = transmit_complete_callback; | ||
309 | |||
310 | list_add_tail(&t->link, &card->transaction_list); | ||
311 | |||
312 | spin_unlock_irqrestore(&card->lock, flags); | ||
313 | |||
314 | card->driver->send_request(card, &t->packet); | ||
315 | } | ||
316 | EXPORT_SYMBOL(fw_send_request); | ||
317 | |||
318 | struct transaction_callback_data { | ||
319 | struct completion done; | ||
320 | void *payload; | ||
321 | int rcode; | ||
322 | }; | ||
323 | |||
324 | static void transaction_callback(struct fw_card *card, int rcode, | ||
325 | void *payload, size_t length, void *data) | ||
326 | { | ||
327 | struct transaction_callback_data *d = data; | ||
328 | |||
329 | if (rcode == RCODE_COMPLETE) | ||
330 | memcpy(d->payload, payload, length); | ||
331 | d->rcode = rcode; | ||
332 | complete(&d->done); | ||
333 | } | ||
334 | |||
335 | /** | ||
336 | * fw_run_transaction - send request and sleep until transaction is completed | ||
337 | * | ||
338 | * Returns the RCODE. | ||
339 | */ | ||
340 | int fw_run_transaction(struct fw_card *card, int tcode, int destination_id, | ||
341 | int generation, int speed, unsigned long long offset, | ||
342 | void *payload, size_t length) | ||
343 | { | ||
344 | struct transaction_callback_data d; | ||
345 | struct fw_transaction t; | ||
346 | |||
347 | init_completion(&d.done); | ||
348 | d.payload = payload; | ||
349 | fw_send_request(card, &t, tcode, destination_id, generation, speed, | ||
350 | offset, payload, length, transaction_callback, &d); | ||
351 | wait_for_completion(&d.done); | ||
352 | |||
353 | return d.rcode; | ||
354 | } | ||
355 | EXPORT_SYMBOL(fw_run_transaction); | ||
356 | |||
357 | static DEFINE_MUTEX(phy_config_mutex); | ||
358 | static DECLARE_COMPLETION(phy_config_done); | ||
359 | |||
360 | static void transmit_phy_packet_callback(struct fw_packet *packet, | ||
361 | struct fw_card *card, int status) | ||
362 | { | ||
363 | complete(&phy_config_done); | ||
364 | } | ||
365 | |||
366 | static struct fw_packet phy_config_packet = { | ||
367 | .header_length = 8, | ||
368 | .payload_length = 0, | ||
369 | .speed = SCODE_100, | ||
370 | .callback = transmit_phy_packet_callback, | ||
371 | }; | ||
372 | |||
373 | void fw_send_phy_config(struct fw_card *card, | ||
374 | int node_id, int generation, int gap_count) | ||
375 | { | ||
376 | long timeout = DIV_ROUND_UP(HZ, 10); | ||
377 | u32 data = PHY_IDENTIFIER(PHY_PACKET_CONFIG) | | ||
378 | PHY_CONFIG_ROOT_ID(node_id) | | ||
379 | PHY_CONFIG_GAP_COUNT(gap_count); | ||
380 | |||
381 | mutex_lock(&phy_config_mutex); | ||
382 | |||
383 | phy_config_packet.header[0] = data; | ||
384 | phy_config_packet.header[1] = ~data; | ||
385 | phy_config_packet.generation = generation; | ||
386 | INIT_COMPLETION(phy_config_done); | ||
387 | |||
388 | card->driver->send_request(card, &phy_config_packet); | ||
389 | wait_for_completion_timeout(&phy_config_done, timeout); | ||
390 | |||
391 | mutex_unlock(&phy_config_mutex); | ||
392 | } | ||
393 | |||
394 | void fw_flush_transactions(struct fw_card *card) | ||
395 | { | ||
396 | struct fw_transaction *t, *next; | ||
397 | struct list_head list; | ||
398 | unsigned long flags; | ||
399 | |||
400 | INIT_LIST_HEAD(&list); | ||
401 | spin_lock_irqsave(&card->lock, flags); | ||
402 | list_splice_init(&card->transaction_list, &list); | ||
403 | card->tlabel_mask = 0; | ||
404 | spin_unlock_irqrestore(&card->lock, flags); | ||
405 | |||
406 | list_for_each_entry_safe(t, next, &list, link) { | ||
407 | card->driver->cancel_packet(card, &t->packet); | ||
408 | |||
409 | /* | ||
410 | * At this point cancel_packet will never call the | ||
411 | * transaction callback, since we just took all the | ||
412 | * transactions out of the list. So do it here. | ||
413 | */ | ||
414 | t->callback(card, RCODE_CANCELLED, NULL, 0, t->callback_data); | ||
415 | } | ||
416 | } | ||
417 | |||
418 | static struct fw_address_handler *lookup_overlapping_address_handler( | ||
419 | struct list_head *list, unsigned long long offset, size_t length) | ||
420 | { | ||
421 | struct fw_address_handler *handler; | ||
422 | |||
423 | list_for_each_entry(handler, list, link) { | ||
424 | if (handler->offset < offset + length && | ||
425 | offset < handler->offset + handler->length) | ||
426 | return handler; | ||
427 | } | ||
428 | |||
429 | return NULL; | ||
430 | } | ||
431 | |||
432 | static struct fw_address_handler *lookup_enclosing_address_handler( | ||
433 | struct list_head *list, unsigned long long offset, size_t length) | ||
434 | { | ||
435 | struct fw_address_handler *handler; | ||
436 | |||
437 | list_for_each_entry(handler, list, link) { | ||
438 | if (handler->offset <= offset && | ||
439 | offset + length <= handler->offset + handler->length) | ||
440 | return handler; | ||
441 | } | ||
442 | |||
443 | return NULL; | ||
444 | } | ||
445 | |||
446 | static DEFINE_SPINLOCK(address_handler_lock); | ||
447 | static LIST_HEAD(address_handler_list); | ||
448 | |||
449 | const struct fw_address_region fw_high_memory_region = | ||
450 | { .start = 0x000100000000ULL, .end = 0xffffe0000000ULL, }; | ||
451 | EXPORT_SYMBOL(fw_high_memory_region); | ||
452 | |||
453 | #if 0 | ||
454 | const struct fw_address_region fw_low_memory_region = | ||
455 | { .start = 0x000000000000ULL, .end = 0x000100000000ULL, }; | ||
456 | const struct fw_address_region fw_private_region = | ||
457 | { .start = 0xffffe0000000ULL, .end = 0xfffff0000000ULL, }; | ||
458 | const struct fw_address_region fw_csr_region = | ||
459 | { .start = CSR_REGISTER_BASE, | ||
460 | .end = CSR_REGISTER_BASE | CSR_CONFIG_ROM_END, }; | ||
461 | const struct fw_address_region fw_unit_space_region = | ||
462 | { .start = 0xfffff0000900ULL, .end = 0x1000000000000ULL, }; | ||
463 | #endif /* 0 */ | ||
464 | |||
465 | /** | ||
466 | * fw_core_add_address_handler - register for incoming requests | ||
467 | * @handler: callback | ||
468 | * @region: region in the IEEE 1212 node space address range | ||
469 | * | ||
470 | * region->start, ->end, and handler->length have to be quadlet-aligned. | ||
471 | * | ||
472 | * When a request is received that falls within the specified address range, | ||
473 | * the specified callback is invoked. The parameters passed to the callback | ||
474 | * give the details of the particular request. | ||
475 | * | ||
476 | * Return value: 0 on success, non-zero otherwise. | ||
477 | * The start offset of the handler's address region is determined by | ||
478 | * fw_core_add_address_handler() and is returned in handler->offset. | ||
479 | */ | ||
480 | int fw_core_add_address_handler(struct fw_address_handler *handler, | ||
481 | const struct fw_address_region *region) | ||
482 | { | ||
483 | struct fw_address_handler *other; | ||
484 | unsigned long flags; | ||
485 | int ret = -EBUSY; | ||
486 | |||
487 | if (region->start & 0xffff000000000003ULL || | ||
488 | region->end & 0xffff000000000003ULL || | ||
489 | region->start >= region->end || | ||
490 | handler->length & 3 || | ||
491 | handler->length == 0) | ||
492 | return -EINVAL; | ||
493 | |||
494 | spin_lock_irqsave(&address_handler_lock, flags); | ||
495 | |||
496 | handler->offset = region->start; | ||
497 | while (handler->offset + handler->length <= region->end) { | ||
498 | other = | ||
499 | lookup_overlapping_address_handler(&address_handler_list, | ||
500 | handler->offset, | ||
501 | handler->length); | ||
502 | if (other != NULL) { | ||
503 | handler->offset += other->length; | ||
504 | } else { | ||
505 | list_add_tail(&handler->link, &address_handler_list); | ||
506 | ret = 0; | ||
507 | break; | ||
508 | } | ||
509 | } | ||
510 | |||
511 | spin_unlock_irqrestore(&address_handler_lock, flags); | ||
512 | |||
513 | return ret; | ||
514 | } | ||
515 | EXPORT_SYMBOL(fw_core_add_address_handler); | ||
516 | |||
517 | /** | ||
518 | * fw_core_remove_address_handler - unregister an address handler | ||
519 | */ | ||
520 | void fw_core_remove_address_handler(struct fw_address_handler *handler) | ||
521 | { | ||
522 | unsigned long flags; | ||
523 | |||
524 | spin_lock_irqsave(&address_handler_lock, flags); | ||
525 | list_del(&handler->link); | ||
526 | spin_unlock_irqrestore(&address_handler_lock, flags); | ||
527 | } | ||
528 | EXPORT_SYMBOL(fw_core_remove_address_handler); | ||
529 | |||
530 | struct fw_request { | ||
531 | struct fw_packet response; | ||
532 | u32 request_header[4]; | ||
533 | int ack; | ||
534 | u32 length; | ||
535 | u32 data[0]; | ||
536 | }; | ||
537 | |||
538 | static void free_response_callback(struct fw_packet *packet, | ||
539 | struct fw_card *card, int status) | ||
540 | { | ||
541 | struct fw_request *request; | ||
542 | |||
543 | request = container_of(packet, struct fw_request, response); | ||
544 | kfree(request); | ||
545 | } | ||
546 | |||
547 | void fw_fill_response(struct fw_packet *response, u32 *request_header, | ||
548 | int rcode, void *payload, size_t length) | ||
549 | { | ||
550 | int tcode, tlabel, extended_tcode, source, destination; | ||
551 | |||
552 | tcode = HEADER_GET_TCODE(request_header[0]); | ||
553 | tlabel = HEADER_GET_TLABEL(request_header[0]); | ||
554 | source = HEADER_GET_DESTINATION(request_header[0]); | ||
555 | destination = HEADER_GET_SOURCE(request_header[1]); | ||
556 | extended_tcode = HEADER_GET_EXTENDED_TCODE(request_header[3]); | ||
557 | |||
558 | response->header[0] = | ||
559 | HEADER_RETRY(RETRY_1) | | ||
560 | HEADER_TLABEL(tlabel) | | ||
561 | HEADER_DESTINATION(destination); | ||
562 | response->header[1] = | ||
563 | HEADER_SOURCE(source) | | ||
564 | HEADER_RCODE(rcode); | ||
565 | response->header[2] = 0; | ||
566 | |||
567 | switch (tcode) { | ||
568 | case TCODE_WRITE_QUADLET_REQUEST: | ||
569 | case TCODE_WRITE_BLOCK_REQUEST: | ||
570 | response->header[0] |= HEADER_TCODE(TCODE_WRITE_RESPONSE); | ||
571 | response->header_length = 12; | ||
572 | response->payload_length = 0; | ||
573 | break; | ||
574 | |||
575 | case TCODE_READ_QUADLET_REQUEST: | ||
576 | response->header[0] |= | ||
577 | HEADER_TCODE(TCODE_READ_QUADLET_RESPONSE); | ||
578 | if (payload != NULL) | ||
579 | response->header[3] = *(u32 *)payload; | ||
580 | else | ||
581 | response->header[3] = 0; | ||
582 | response->header_length = 16; | ||
583 | response->payload_length = 0; | ||
584 | break; | ||
585 | |||
586 | case TCODE_READ_BLOCK_REQUEST: | ||
587 | case TCODE_LOCK_REQUEST: | ||
588 | response->header[0] |= HEADER_TCODE(tcode + 2); | ||
589 | response->header[3] = | ||
590 | HEADER_DATA_LENGTH(length) | | ||
591 | HEADER_EXTENDED_TCODE(extended_tcode); | ||
592 | response->header_length = 16; | ||
593 | response->payload = payload; | ||
594 | response->payload_length = length; | ||
595 | break; | ||
596 | |||
597 | default: | ||
598 | BUG(); | ||
599 | return; | ||
600 | } | ||
601 | |||
602 | response->payload_bus = 0; | ||
603 | } | ||
604 | EXPORT_SYMBOL(fw_fill_response); | ||
605 | |||
606 | static struct fw_request *allocate_request(struct fw_packet *p) | ||
607 | { | ||
608 | struct fw_request *request; | ||
609 | u32 *data, length; | ||
610 | int request_tcode, t; | ||
611 | |||
612 | request_tcode = HEADER_GET_TCODE(p->header[0]); | ||
613 | switch (request_tcode) { | ||
614 | case TCODE_WRITE_QUADLET_REQUEST: | ||
615 | data = &p->header[3]; | ||
616 | length = 4; | ||
617 | break; | ||
618 | |||
619 | case TCODE_WRITE_BLOCK_REQUEST: | ||
620 | case TCODE_LOCK_REQUEST: | ||
621 | data = p->payload; | ||
622 | length = HEADER_GET_DATA_LENGTH(p->header[3]); | ||
623 | break; | ||
624 | |||
625 | case TCODE_READ_QUADLET_REQUEST: | ||
626 | data = NULL; | ||
627 | length = 4; | ||
628 | break; | ||
629 | |||
630 | case TCODE_READ_BLOCK_REQUEST: | ||
631 | data = NULL; | ||
632 | length = HEADER_GET_DATA_LENGTH(p->header[3]); | ||
633 | break; | ||
634 | |||
635 | default: | ||
636 | fw_error("ERROR - corrupt request received - %08x %08x %08x\n", | ||
637 | p->header[0], p->header[1], p->header[2]); | ||
638 | return NULL; | ||
639 | } | ||
640 | |||
641 | request = kmalloc(sizeof(*request) + length, GFP_ATOMIC); | ||
642 | if (request == NULL) | ||
643 | return NULL; | ||
644 | |||
645 | t = (p->timestamp & 0x1fff) + 4000; | ||
646 | if (t >= 8000) | ||
647 | t = (p->timestamp & ~0x1fff) + 0x2000 + t - 8000; | ||
648 | else | ||
649 | t = (p->timestamp & ~0x1fff) + t; | ||
650 | |||
651 | request->response.speed = p->speed; | ||
652 | request->response.timestamp = t; | ||
653 | request->response.generation = p->generation; | ||
654 | request->response.ack = 0; | ||
655 | request->response.callback = free_response_callback; | ||
656 | request->ack = p->ack; | ||
657 | request->length = length; | ||
658 | if (data) | ||
659 | memcpy(request->data, data, length); | ||
660 | |||
661 | memcpy(request->request_header, p->header, sizeof(p->header)); | ||
662 | |||
663 | return request; | ||
664 | } | ||
665 | |||
666 | void fw_send_response(struct fw_card *card, | ||
667 | struct fw_request *request, int rcode) | ||
668 | { | ||
669 | /* unified transaction or broadcast transaction: don't respond */ | ||
670 | if (request->ack != ACK_PENDING || | ||
671 | HEADER_DESTINATION_IS_BROADCAST(request->request_header[0])) { | ||
672 | kfree(request); | ||
673 | return; | ||
674 | } | ||
675 | |||
676 | if (rcode == RCODE_COMPLETE) | ||
677 | fw_fill_response(&request->response, request->request_header, | ||
678 | rcode, request->data, request->length); | ||
679 | else | ||
680 | fw_fill_response(&request->response, request->request_header, | ||
681 | rcode, NULL, 0); | ||
682 | |||
683 | card->driver->send_response(card, &request->response); | ||
684 | } | ||
685 | EXPORT_SYMBOL(fw_send_response); | ||
686 | |||
687 | void fw_core_handle_request(struct fw_card *card, struct fw_packet *p) | ||
688 | { | ||
689 | struct fw_address_handler *handler; | ||
690 | struct fw_request *request; | ||
691 | unsigned long long offset; | ||
692 | unsigned long flags; | ||
693 | int tcode, destination, source; | ||
694 | |||
695 | if (p->ack != ACK_PENDING && p->ack != ACK_COMPLETE) | ||
696 | return; | ||
697 | |||
698 | request = allocate_request(p); | ||
699 | if (request == NULL) { | ||
700 | /* FIXME: send statically allocated busy packet. */ | ||
701 | return; | ||
702 | } | ||
703 | |||
704 | offset = | ||
705 | ((unsigned long long) | ||
706 | HEADER_GET_OFFSET_HIGH(p->header[1]) << 32) | p->header[2]; | ||
707 | tcode = HEADER_GET_TCODE(p->header[0]); | ||
708 | destination = HEADER_GET_DESTINATION(p->header[0]); | ||
709 | source = HEADER_GET_SOURCE(p->header[1]); | ||
710 | |||
711 | spin_lock_irqsave(&address_handler_lock, flags); | ||
712 | handler = lookup_enclosing_address_handler(&address_handler_list, | ||
713 | offset, request->length); | ||
714 | spin_unlock_irqrestore(&address_handler_lock, flags); | ||
715 | |||
716 | /* | ||
717 | * FIXME: lookup the fw_node corresponding to the sender of | ||
718 | * this request and pass that to the address handler instead | ||
719 | * of the node ID. We may also want to move the address | ||
720 | * allocations to fw_node so we only do this callback if the | ||
721 | * upper layers registered it for this node. | ||
722 | */ | ||
723 | |||
724 | if (handler == NULL) | ||
725 | fw_send_response(card, request, RCODE_ADDRESS_ERROR); | ||
726 | else | ||
727 | handler->address_callback(card, request, | ||
728 | tcode, destination, source, | ||
729 | p->generation, p->speed, offset, | ||
730 | request->data, request->length, | ||
731 | handler->callback_data); | ||
732 | } | ||
733 | EXPORT_SYMBOL(fw_core_handle_request); | ||
734 | |||
735 | void fw_core_handle_response(struct fw_card *card, struct fw_packet *p) | ||
736 | { | ||
737 | struct fw_transaction *t; | ||
738 | unsigned long flags; | ||
739 | u32 *data; | ||
740 | size_t data_length; | ||
741 | int tcode, tlabel, destination, source, rcode; | ||
742 | |||
743 | tcode = HEADER_GET_TCODE(p->header[0]); | ||
744 | tlabel = HEADER_GET_TLABEL(p->header[0]); | ||
745 | destination = HEADER_GET_DESTINATION(p->header[0]); | ||
746 | source = HEADER_GET_SOURCE(p->header[1]); | ||
747 | rcode = HEADER_GET_RCODE(p->header[1]); | ||
748 | |||
749 | spin_lock_irqsave(&card->lock, flags); | ||
750 | list_for_each_entry(t, &card->transaction_list, link) { | ||
751 | if (t->node_id == source && t->tlabel == tlabel) { | ||
752 | list_del(&t->link); | ||
753 | card->tlabel_mask &= ~(1 << t->tlabel); | ||
754 | break; | ||
755 | } | ||
756 | } | ||
757 | spin_unlock_irqrestore(&card->lock, flags); | ||
758 | |||
759 | if (&t->link == &card->transaction_list) { | ||
760 | fw_notify("Unsolicited response (source %x, tlabel %x)\n", | ||
761 | source, tlabel); | ||
762 | return; | ||
763 | } | ||
764 | |||
765 | /* | ||
766 | * FIXME: sanity check packet, is length correct, does tcodes | ||
767 | * and addresses match. | ||
768 | */ | ||
769 | |||
770 | switch (tcode) { | ||
771 | case TCODE_READ_QUADLET_RESPONSE: | ||
772 | data = (u32 *) &p->header[3]; | ||
773 | data_length = 4; | ||
774 | break; | ||
775 | |||
776 | case TCODE_WRITE_RESPONSE: | ||
777 | data = NULL; | ||
778 | data_length = 0; | ||
779 | break; | ||
780 | |||
781 | case TCODE_READ_BLOCK_RESPONSE: | ||
782 | case TCODE_LOCK_RESPONSE: | ||
783 | data = p->payload; | ||
784 | data_length = HEADER_GET_DATA_LENGTH(p->header[3]); | ||
785 | break; | ||
786 | |||
787 | default: | ||
788 | /* Should never happen, this is just to shut up gcc. */ | ||
789 | data = NULL; | ||
790 | data_length = 0; | ||
791 | break; | ||
792 | } | ||
793 | |||
794 | /* | ||
795 | * The response handler may be executed while the request handler | ||
796 | * is still pending. Cancel the request handler. | ||
797 | */ | ||
798 | card->driver->cancel_packet(card, &t->packet); | ||
799 | |||
800 | t->callback(card, rcode, data, data_length, t->callback_data); | ||
801 | } | ||
802 | EXPORT_SYMBOL(fw_core_handle_response); | ||
803 | |||
804 | static const struct fw_address_region topology_map_region = | ||
805 | { .start = CSR_REGISTER_BASE | CSR_TOPOLOGY_MAP, | ||
806 | .end = CSR_REGISTER_BASE | CSR_TOPOLOGY_MAP_END, }; | ||
807 | |||
808 | static void handle_topology_map(struct fw_card *card, struct fw_request *request, | ||
809 | int tcode, int destination, int source, int generation, | ||
810 | int speed, unsigned long long offset, | ||
811 | void *payload, size_t length, void *callback_data) | ||
812 | { | ||
813 | int i, start, end; | ||
814 | __be32 *map; | ||
815 | |||
816 | if (!TCODE_IS_READ_REQUEST(tcode)) { | ||
817 | fw_send_response(card, request, RCODE_TYPE_ERROR); | ||
818 | return; | ||
819 | } | ||
820 | |||
821 | if ((offset & 3) > 0 || (length & 3) > 0) { | ||
822 | fw_send_response(card, request, RCODE_ADDRESS_ERROR); | ||
823 | return; | ||
824 | } | ||
825 | |||
826 | start = (offset - topology_map_region.start) / 4; | ||
827 | end = start + length / 4; | ||
828 | map = payload; | ||
829 | |||
830 | for (i = 0; i < length / 4; i++) | ||
831 | map[i] = cpu_to_be32(card->topology_map[start + i]); | ||
832 | |||
833 | fw_send_response(card, request, RCODE_COMPLETE); | ||
834 | } | ||
835 | |||
836 | static struct fw_address_handler topology_map = { | ||
837 | .length = 0x200, | ||
838 | .address_callback = handle_topology_map, | ||
839 | }; | ||
840 | |||
841 | static const struct fw_address_region registers_region = | ||
842 | { .start = CSR_REGISTER_BASE, | ||
843 | .end = CSR_REGISTER_BASE | CSR_CONFIG_ROM, }; | ||
844 | |||
845 | static void handle_registers(struct fw_card *card, struct fw_request *request, | ||
846 | int tcode, int destination, int source, int generation, | ||
847 | int speed, unsigned long long offset, | ||
848 | void *payload, size_t length, void *callback_data) | ||
849 | { | ||
850 | int reg = offset & ~CSR_REGISTER_BASE; | ||
851 | unsigned long long bus_time; | ||
852 | __be32 *data = payload; | ||
853 | int rcode = RCODE_COMPLETE; | ||
854 | |||
855 | switch (reg) { | ||
856 | case CSR_CYCLE_TIME: | ||
857 | case CSR_BUS_TIME: | ||
858 | if (!TCODE_IS_READ_REQUEST(tcode) || length != 4) { | ||
859 | rcode = RCODE_TYPE_ERROR; | ||
860 | break; | ||
861 | } | ||
862 | |||
863 | bus_time = card->driver->get_bus_time(card); | ||
864 | if (reg == CSR_CYCLE_TIME) | ||
865 | *data = cpu_to_be32(bus_time); | ||
866 | else | ||
867 | *data = cpu_to_be32(bus_time >> 25); | ||
868 | break; | ||
869 | |||
870 | case CSR_BROADCAST_CHANNEL: | ||
871 | if (tcode == TCODE_READ_QUADLET_REQUEST) | ||
872 | *data = cpu_to_be32(card->broadcast_channel); | ||
873 | else if (tcode == TCODE_WRITE_QUADLET_REQUEST) | ||
874 | card->broadcast_channel = | ||
875 | (be32_to_cpu(*data) & BROADCAST_CHANNEL_VALID) | | ||
876 | BROADCAST_CHANNEL_INITIAL; | ||
877 | else | ||
878 | rcode = RCODE_TYPE_ERROR; | ||
879 | break; | ||
880 | |||
881 | case CSR_BUS_MANAGER_ID: | ||
882 | case CSR_BANDWIDTH_AVAILABLE: | ||
883 | case CSR_CHANNELS_AVAILABLE_HI: | ||
884 | case CSR_CHANNELS_AVAILABLE_LO: | ||
885 | /* | ||
886 | * FIXME: these are handled by the OHCI hardware and | ||
887 | * the stack never sees these request. If we add | ||
888 | * support for a new type of controller that doesn't | ||
889 | * handle this in hardware we need to deal with these | ||
890 | * transactions. | ||
891 | */ | ||
892 | BUG(); | ||
893 | break; | ||
894 | |||
895 | case CSR_BUSY_TIMEOUT: | ||
896 | /* FIXME: Implement this. */ | ||
897 | |||
898 | default: | ||
899 | rcode = RCODE_ADDRESS_ERROR; | ||
900 | break; | ||
901 | } | ||
902 | |||
903 | fw_send_response(card, request, rcode); | ||
904 | } | ||
905 | |||
906 | static struct fw_address_handler registers = { | ||
907 | .length = 0x400, | ||
908 | .address_callback = handle_registers, | ||
909 | }; | ||
910 | |||
911 | MODULE_AUTHOR("Kristian Hoegsberg <krh@bitplanet.net>"); | ||
912 | MODULE_DESCRIPTION("Core IEEE1394 transaction logic"); | ||
913 | MODULE_LICENSE("GPL"); | ||
914 | |||
915 | static const u32 vendor_textual_descriptor[] = { | ||
916 | /* textual descriptor leaf () */ | ||
917 | 0x00060000, | ||
918 | 0x00000000, | ||
919 | 0x00000000, | ||
920 | 0x4c696e75, /* L i n u */ | ||
921 | 0x78204669, /* x F i */ | ||
922 | 0x72657769, /* r e w i */ | ||
923 | 0x72650000, /* r e */ | ||
924 | }; | ||
925 | |||
926 | static const u32 model_textual_descriptor[] = { | ||
927 | /* model descriptor leaf () */ | ||
928 | 0x00030000, | ||
929 | 0x00000000, | ||
930 | 0x00000000, | ||
931 | 0x4a756a75, /* J u j u */ | ||
932 | }; | ||
933 | |||
934 | static struct fw_descriptor vendor_id_descriptor = { | ||
935 | .length = ARRAY_SIZE(vendor_textual_descriptor), | ||
936 | .immediate = 0x03d00d1e, | ||
937 | .key = 0x81000000, | ||
938 | .data = vendor_textual_descriptor, | ||
939 | }; | ||
940 | |||
941 | static struct fw_descriptor model_id_descriptor = { | ||
942 | .length = ARRAY_SIZE(model_textual_descriptor), | ||
943 | .immediate = 0x17000001, | ||
944 | .key = 0x81000000, | ||
945 | .data = model_textual_descriptor, | ||
946 | }; | ||
947 | |||
948 | static int __init fw_core_init(void) | ||
949 | { | ||
950 | int ret; | ||
951 | |||
952 | ret = bus_register(&fw_bus_type); | ||
953 | if (ret < 0) | ||
954 | return ret; | ||
955 | |||
956 | fw_cdev_major = register_chrdev(0, "firewire", &fw_device_ops); | ||
957 | if (fw_cdev_major < 0) { | ||
958 | bus_unregister(&fw_bus_type); | ||
959 | return fw_cdev_major; | ||
960 | } | ||
961 | |||
962 | fw_core_add_address_handler(&topology_map, &topology_map_region); | ||
963 | fw_core_add_address_handler(®isters, ®isters_region); | ||
964 | fw_core_add_descriptor(&vendor_id_descriptor); | ||
965 | fw_core_add_descriptor(&model_id_descriptor); | ||
966 | |||
967 | return 0; | ||
968 | } | ||
969 | |||
970 | static void __exit fw_core_cleanup(void) | ||
971 | { | ||
972 | unregister_chrdev(fw_cdev_major, "firewire"); | ||
973 | bus_unregister(&fw_bus_type); | ||
974 | idr_destroy(&fw_device_idr); | ||
975 | } | ||
976 | |||
977 | module_init(fw_core_init); | ||
978 | module_exit(fw_core_cleanup); | ||