diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2010-08-07 20:09:24 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2010-08-07 20:09:24 -0400 |
commit | 2d53056973079e6c2ffc0d7ae3afbdd3d4f18ae3 (patch) | |
tree | e921596d80cd0a6434629dbd8d22c0ca3ec14b88 /drivers/firewire | |
parent | 9e50ab91d025afc17ca14a1764be2e1d0c24245d (diff) | |
parent | e78483c5aeb0d7fbb0e365802145f1045e62957e (diff) |
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/ieee1394/linux1394-2.6
* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/ieee1394/linux1394-2.6: (82 commits)
firewire: core: add forgotten dummy driver methods, remove unused ones
firewire: add isochronous multichannel reception
firewire: core: small clarifications in core-cdev
firewire: core: remove unused code
firewire: ohci: release channel in error path
firewire: ohci: use memory barriers to order descriptor updates
tools/firewire: nosy-dump: increment program version
tools/firewire: nosy-dump: remove unused code
tools/firewire: nosy-dump: use linux/firewire-constants.h
tools/firewire: nosy-dump: break up a deeply nested function
tools/firewire: nosy-dump: make some symbols static or const
tools/firewire: nosy-dump: change to kernel coding style
tools/firewire: nosy-dump: work around segfault in decode_fcp
tools/firewire: nosy-dump: fix it on x86-64
tools/firewire: add userspace front-end of nosy
firewire: nosy: note ioctls in ioctl-number.txt
firewire: nosy: use generic printk macros
firewire: nosy: endianess fixes and annotations
firewire: nosy: annotate __user pointers and __iomem pointers
firewire: nosy: fix device shutdown with active client
...
Diffstat (limited to 'drivers/firewire')
-rw-r--r-- | drivers/firewire/Kconfig | 24 | ||||
-rw-r--r-- | drivers/firewire/Makefile | 1 | ||||
-rw-r--r-- | drivers/firewire/core-card.c | 218 | ||||
-rw-r--r-- | drivers/firewire/core-cdev.c | 409 | ||||
-rw-r--r-- | drivers/firewire/core-device.c | 11 | ||||
-rw-r--r-- | drivers/firewire/core-iso.c | 34 | ||||
-rw-r--r-- | drivers/firewire/core-topology.c | 22 | ||||
-rw-r--r-- | drivers/firewire/core-transaction.c | 306 | ||||
-rw-r--r-- | drivers/firewire/core.h | 24 | ||||
-rw-r--r-- | drivers/firewire/net.c | 4 | ||||
-rw-r--r-- | drivers/firewire/nosy-user.h | 25 | ||||
-rw-r--r-- | drivers/firewire/nosy.c | 721 | ||||
-rw-r--r-- | drivers/firewire/nosy.h | 237 | ||||
-rw-r--r-- | drivers/firewire/ohci.c | 701 | ||||
-rw-r--r-- | drivers/firewire/ohci.h | 1 | ||||
-rw-r--r-- | drivers/firewire/sbp2.c | 13 |
16 files changed, 2333 insertions, 418 deletions
diff --git a/drivers/firewire/Kconfig b/drivers/firewire/Kconfig index a9371b36a9b9..fcf3ea28340b 100644 --- a/drivers/firewire/Kconfig +++ b/drivers/firewire/Kconfig | |||
@@ -66,4 +66,28 @@ config FIREWIRE_NET | |||
66 | 66 | ||
67 | source "drivers/ieee1394/Kconfig" | 67 | source "drivers/ieee1394/Kconfig" |
68 | 68 | ||
69 | config FIREWIRE_NOSY | ||
70 | tristate "Nosy - a FireWire traffic sniffer for PCILynx cards" | ||
71 | depends on PCI | ||
72 | help | ||
73 | Nosy is an IEEE 1394 packet sniffer that is used for protocol | ||
74 | analysis and in development of IEEE 1394 drivers, applications, | ||
75 | or firmwares. | ||
76 | |||
77 | This driver lets you use a Texas Instruments PCILynx 1394 to PCI | ||
78 | link layer controller TSB12LV21/A/B as a low-budget bus analyzer. | ||
79 | PCILynx is a nowadays very rare IEEE 1394 controller which is | ||
80 | not OHCI 1394 compliant. | ||
81 | |||
82 | The following cards are known to be based on PCILynx or PCILynx-2: | ||
83 | IOI IOI-1394TT (PCI card), Unibrain Fireboard 400 PCI Lynx-2 | ||
84 | (PCI card), Newer Technology FireWire 2 Go (CardBus card), | ||
85 | Apple Power Mac G3 blue & white (onboard controller). | ||
86 | |||
87 | To compile this driver as a module, say M here: The module will be | ||
88 | called nosy. Source code of a userspace interface to nosy, called | ||
89 | nosy-dump, can be found in tools/firewire/ of the kernel sources. | ||
90 | |||
91 | If unsure, say N. | ||
92 | |||
69 | endmenu | 93 | endmenu |
diff --git a/drivers/firewire/Makefile b/drivers/firewire/Makefile index a8f9bb6d9fdf..3c6a7fb20aa7 100644 --- a/drivers/firewire/Makefile +++ b/drivers/firewire/Makefile | |||
@@ -12,3 +12,4 @@ obj-$(CONFIG_FIREWIRE) += firewire-core.o | |||
12 | obj-$(CONFIG_FIREWIRE_OHCI) += firewire-ohci.o | 12 | obj-$(CONFIG_FIREWIRE_OHCI) += firewire-ohci.o |
13 | obj-$(CONFIG_FIREWIRE_SBP2) += firewire-sbp2.o | 13 | obj-$(CONFIG_FIREWIRE_SBP2) += firewire-sbp2.o |
14 | obj-$(CONFIG_FIREWIRE_NET) += firewire-net.o | 14 | obj-$(CONFIG_FIREWIRE_NET) += firewire-net.o |
15 | obj-$(CONFIG_FIREWIRE_NOSY) += nosy.o | ||
diff --git a/drivers/firewire/core-card.c b/drivers/firewire/core-card.c index 371713ff0266..be0492398ef9 100644 --- a/drivers/firewire/core-card.c +++ b/drivers/firewire/core-card.c | |||
@@ -204,17 +204,62 @@ void fw_core_remove_descriptor(struct fw_descriptor *desc) | |||
204 | } | 204 | } |
205 | EXPORT_SYMBOL(fw_core_remove_descriptor); | 205 | EXPORT_SYMBOL(fw_core_remove_descriptor); |
206 | 206 | ||
207 | static int reset_bus(struct fw_card *card, bool short_reset) | ||
208 | { | ||
209 | int reg = short_reset ? 5 : 1; | ||
210 | int bit = short_reset ? PHY_BUS_SHORT_RESET : PHY_BUS_RESET; | ||
211 | |||
212 | return card->driver->update_phy_reg(card, reg, 0, bit); | ||
213 | } | ||
214 | |||
215 | void fw_schedule_bus_reset(struct fw_card *card, bool delayed, bool short_reset) | ||
216 | { | ||
217 | /* We don't try hard to sort out requests of long vs. short resets. */ | ||
218 | card->br_short = short_reset; | ||
219 | |||
220 | /* Use an arbitrary short delay to combine multiple reset requests. */ | ||
221 | fw_card_get(card); | ||
222 | if (!schedule_delayed_work(&card->br_work, | ||
223 | delayed ? DIV_ROUND_UP(HZ, 100) : 0)) | ||
224 | fw_card_put(card); | ||
225 | } | ||
226 | EXPORT_SYMBOL(fw_schedule_bus_reset); | ||
227 | |||
228 | static void br_work(struct work_struct *work) | ||
229 | { | ||
230 | struct fw_card *card = container_of(work, struct fw_card, br_work.work); | ||
231 | |||
232 | /* Delay for 2s after last reset per IEEE 1394 clause 8.2.1. */ | ||
233 | if (card->reset_jiffies != 0 && | ||
234 | time_is_after_jiffies(card->reset_jiffies + 2 * HZ)) { | ||
235 | if (!schedule_delayed_work(&card->br_work, 2 * HZ)) | ||
236 | fw_card_put(card); | ||
237 | return; | ||
238 | } | ||
239 | |||
240 | fw_send_phy_config(card, FW_PHY_CONFIG_NO_NODE_ID, card->generation, | ||
241 | FW_PHY_CONFIG_CURRENT_GAP_COUNT); | ||
242 | reset_bus(card, card->br_short); | ||
243 | fw_card_put(card); | ||
244 | } | ||
245 | |||
207 | static void allocate_broadcast_channel(struct fw_card *card, int generation) | 246 | static void allocate_broadcast_channel(struct fw_card *card, int generation) |
208 | { | 247 | { |
209 | int channel, bandwidth = 0; | 248 | int channel, bandwidth = 0; |
210 | 249 | ||
211 | fw_iso_resource_manage(card, generation, 1ULL << 31, &channel, | 250 | if (!card->broadcast_channel_allocated) { |
212 | &bandwidth, true, card->bm_transaction_data); | 251 | fw_iso_resource_manage(card, generation, 1ULL << 31, |
213 | if (channel == 31) { | 252 | &channel, &bandwidth, true, |
253 | card->bm_transaction_data); | ||
254 | if (channel != 31) { | ||
255 | fw_notify("failed to allocate broadcast channel\n"); | ||
256 | return; | ||
257 | } | ||
214 | card->broadcast_channel_allocated = true; | 258 | card->broadcast_channel_allocated = true; |
215 | device_for_each_child(card->device, (void *)(long)generation, | ||
216 | fw_device_set_broadcast_channel); | ||
217 | } | 259 | } |
260 | |||
261 | device_for_each_child(card->device, (void *)(long)generation, | ||
262 | fw_device_set_broadcast_channel); | ||
218 | } | 263 | } |
219 | 264 | ||
220 | static const char gap_count_table[] = { | 265 | static const char gap_count_table[] = { |
@@ -224,27 +269,26 @@ static const char gap_count_table[] = { | |||
224 | void fw_schedule_bm_work(struct fw_card *card, unsigned long delay) | 269 | void fw_schedule_bm_work(struct fw_card *card, unsigned long delay) |
225 | { | 270 | { |
226 | fw_card_get(card); | 271 | fw_card_get(card); |
227 | if (!schedule_delayed_work(&card->work, delay)) | 272 | if (!schedule_delayed_work(&card->bm_work, delay)) |
228 | fw_card_put(card); | 273 | fw_card_put(card); |
229 | } | 274 | } |
230 | 275 | ||
231 | static void fw_card_bm_work(struct work_struct *work) | 276 | static void bm_work(struct work_struct *work) |
232 | { | 277 | { |
233 | struct fw_card *card = container_of(work, struct fw_card, work.work); | 278 | struct fw_card *card = container_of(work, struct fw_card, bm_work.work); |
234 | struct fw_device *root_device, *irm_device; | 279 | struct fw_device *root_device, *irm_device; |
235 | struct fw_node *root_node; | 280 | struct fw_node *root_node; |
236 | unsigned long flags; | 281 | int root_id, new_root_id, irm_id, bm_id, local_id; |
237 | int root_id, new_root_id, irm_id, local_id; | ||
238 | int gap_count, generation, grace, rcode; | 282 | int gap_count, generation, grace, rcode; |
239 | bool do_reset = false; | 283 | bool do_reset = false; |
240 | bool root_device_is_running; | 284 | bool root_device_is_running; |
241 | bool root_device_is_cmc; | 285 | bool root_device_is_cmc; |
242 | bool irm_is_1394_1995_only; | 286 | bool irm_is_1394_1995_only; |
243 | 287 | ||
244 | spin_lock_irqsave(&card->lock, flags); | 288 | spin_lock_irq(&card->lock); |
245 | 289 | ||
246 | if (card->local_node == NULL) { | 290 | if (card->local_node == NULL) { |
247 | spin_unlock_irqrestore(&card->lock, flags); | 291 | spin_unlock_irq(&card->lock); |
248 | goto out_put_card; | 292 | goto out_put_card; |
249 | } | 293 | } |
250 | 294 | ||
@@ -267,7 +311,8 @@ static void fw_card_bm_work(struct work_struct *work) | |||
267 | 311 | ||
268 | grace = time_after(jiffies, card->reset_jiffies + DIV_ROUND_UP(HZ, 8)); | 312 | grace = time_after(jiffies, card->reset_jiffies + DIV_ROUND_UP(HZ, 8)); |
269 | 313 | ||
270 | if (is_next_generation(generation, card->bm_generation) || | 314 | if ((is_next_generation(generation, card->bm_generation) && |
315 | !card->bm_abdicate) || | ||
271 | (card->bm_generation != generation && grace)) { | 316 | (card->bm_generation != generation && grace)) { |
272 | /* | 317 | /* |
273 | * This first step is to figure out who is IRM and | 318 | * This first step is to figure out who is IRM and |
@@ -298,21 +343,26 @@ static void fw_card_bm_work(struct work_struct *work) | |||
298 | card->bm_transaction_data[0] = cpu_to_be32(0x3f); | 343 | card->bm_transaction_data[0] = cpu_to_be32(0x3f); |
299 | card->bm_transaction_data[1] = cpu_to_be32(local_id); | 344 | card->bm_transaction_data[1] = cpu_to_be32(local_id); |
300 | 345 | ||
301 | spin_unlock_irqrestore(&card->lock, flags); | 346 | spin_unlock_irq(&card->lock); |
302 | 347 | ||
303 | rcode = fw_run_transaction(card, TCODE_LOCK_COMPARE_SWAP, | 348 | rcode = fw_run_transaction(card, TCODE_LOCK_COMPARE_SWAP, |
304 | irm_id, generation, SCODE_100, | 349 | irm_id, generation, SCODE_100, |
305 | CSR_REGISTER_BASE + CSR_BUS_MANAGER_ID, | 350 | CSR_REGISTER_BASE + CSR_BUS_MANAGER_ID, |
306 | card->bm_transaction_data, | 351 | card->bm_transaction_data, 8); |
307 | sizeof(card->bm_transaction_data)); | ||
308 | 352 | ||
309 | if (rcode == RCODE_GENERATION) | 353 | if (rcode == RCODE_GENERATION) |
310 | /* Another bus reset, BM work has been rescheduled. */ | 354 | /* Another bus reset, BM work has been rescheduled. */ |
311 | goto out; | 355 | goto out; |
312 | 356 | ||
313 | if (rcode == RCODE_COMPLETE && | 357 | bm_id = be32_to_cpu(card->bm_transaction_data[0]); |
314 | card->bm_transaction_data[0] != cpu_to_be32(0x3f)) { | ||
315 | 358 | ||
359 | spin_lock_irq(&card->lock); | ||
360 | if (rcode == RCODE_COMPLETE && generation == card->generation) | ||
361 | card->bm_node_id = | ||
362 | bm_id == 0x3f ? local_id : 0xffc0 | bm_id; | ||
363 | spin_unlock_irq(&card->lock); | ||
364 | |||
365 | if (rcode == RCODE_COMPLETE && bm_id != 0x3f) { | ||
316 | /* Somebody else is BM. Only act as IRM. */ | 366 | /* Somebody else is BM. Only act as IRM. */ |
317 | if (local_id == irm_id) | 367 | if (local_id == irm_id) |
318 | allocate_broadcast_channel(card, generation); | 368 | allocate_broadcast_channel(card, generation); |
@@ -320,7 +370,17 @@ static void fw_card_bm_work(struct work_struct *work) | |||
320 | goto out; | 370 | goto out; |
321 | } | 371 | } |
322 | 372 | ||
323 | spin_lock_irqsave(&card->lock, flags); | 373 | if (rcode == RCODE_SEND_ERROR) { |
374 | /* | ||
375 | * We have been unable to send the lock request due to | ||
376 | * some local problem. Let's try again later and hope | ||
377 | * that the problem has gone away by then. | ||
378 | */ | ||
379 | fw_schedule_bm_work(card, DIV_ROUND_UP(HZ, 8)); | ||
380 | goto out; | ||
381 | } | ||
382 | |||
383 | spin_lock_irq(&card->lock); | ||
324 | 384 | ||
325 | if (rcode != RCODE_COMPLETE) { | 385 | if (rcode != RCODE_COMPLETE) { |
326 | /* | 386 | /* |
@@ -339,7 +399,7 @@ static void fw_card_bm_work(struct work_struct *work) | |||
339 | * We weren't BM in the last generation, and the last | 399 | * We weren't BM in the last generation, and the last |
340 | * bus reset is less than 125ms ago. Reschedule this job. | 400 | * bus reset is less than 125ms ago. Reschedule this job. |
341 | */ | 401 | */ |
342 | spin_unlock_irqrestore(&card->lock, flags); | 402 | spin_unlock_irq(&card->lock); |
343 | fw_schedule_bm_work(card, DIV_ROUND_UP(HZ, 8)); | 403 | fw_schedule_bm_work(card, DIV_ROUND_UP(HZ, 8)); |
344 | goto out; | 404 | goto out; |
345 | } | 405 | } |
@@ -362,14 +422,12 @@ static void fw_card_bm_work(struct work_struct *work) | |||
362 | * If we haven't probed this device yet, bail out now | 422 | * If we haven't probed this device yet, bail out now |
363 | * and let's try again once that's done. | 423 | * and let's try again once that's done. |
364 | */ | 424 | */ |
365 | spin_unlock_irqrestore(&card->lock, flags); | 425 | spin_unlock_irq(&card->lock); |
366 | goto out; | 426 | goto out; |
367 | } else if (root_device_is_cmc) { | 427 | } else if (root_device_is_cmc) { |
368 | /* | 428 | /* |
369 | * FIXME: I suppose we should set the cmstr bit in the | 429 | * We will send out a force root packet for this |
370 | * STATE_CLEAR register of this node, as described in | 430 | * node as part of the gap count optimization. |
371 | * 1394-1995, 8.4.2.6. Also, send out a force root | ||
372 | * packet for this node. | ||
373 | */ | 431 | */ |
374 | new_root_id = root_id; | 432 | new_root_id = root_id; |
375 | } else { | 433 | } else { |
@@ -402,19 +460,33 @@ static void fw_card_bm_work(struct work_struct *work) | |||
402 | (card->gap_count != gap_count || new_root_id != root_id)) | 460 | (card->gap_count != gap_count || new_root_id != root_id)) |
403 | do_reset = true; | 461 | do_reset = true; |
404 | 462 | ||
405 | spin_unlock_irqrestore(&card->lock, flags); | 463 | spin_unlock_irq(&card->lock); |
406 | 464 | ||
407 | if (do_reset) { | 465 | if (do_reset) { |
408 | fw_notify("phy config: card %d, new root=%x, gap_count=%d\n", | 466 | fw_notify("phy config: card %d, new root=%x, gap_count=%d\n", |
409 | card->index, new_root_id, gap_count); | 467 | card->index, new_root_id, gap_count); |
410 | fw_send_phy_config(card, new_root_id, generation, gap_count); | 468 | fw_send_phy_config(card, new_root_id, generation, gap_count); |
411 | fw_core_initiate_bus_reset(card, 1); | 469 | reset_bus(card, true); |
412 | /* Will allocate broadcast channel after the reset. */ | 470 | /* Will allocate broadcast channel after the reset. */ |
413 | } else { | 471 | goto out; |
414 | if (local_id == irm_id) | 472 | } |
415 | allocate_broadcast_channel(card, generation); | 473 | |
474 | if (root_device_is_cmc) { | ||
475 | /* | ||
476 | * Make sure that the cycle master sends cycle start packets. | ||
477 | */ | ||
478 | card->bm_transaction_data[0] = cpu_to_be32(CSR_STATE_BIT_CMSTR); | ||
479 | rcode = fw_run_transaction(card, TCODE_WRITE_QUADLET_REQUEST, | ||
480 | root_id, generation, SCODE_100, | ||
481 | CSR_REGISTER_BASE + CSR_STATE_SET, | ||
482 | card->bm_transaction_data, 4); | ||
483 | if (rcode == RCODE_GENERATION) | ||
484 | goto out; | ||
416 | } | 485 | } |
417 | 486 | ||
487 | if (local_id == irm_id) | ||
488 | allocate_broadcast_channel(card, generation); | ||
489 | |||
418 | out: | 490 | out: |
419 | fw_node_put(root_node); | 491 | fw_node_put(root_node); |
420 | out_put_card: | 492 | out_put_card: |
@@ -432,17 +504,23 @@ void fw_card_initialize(struct fw_card *card, | |||
432 | card->device = device; | 504 | card->device = device; |
433 | card->current_tlabel = 0; | 505 | card->current_tlabel = 0; |
434 | card->tlabel_mask = 0; | 506 | card->tlabel_mask = 0; |
507 | card->split_timeout_hi = 0; | ||
508 | card->split_timeout_lo = 800 << 19; | ||
509 | card->split_timeout_cycles = 800; | ||
510 | card->split_timeout_jiffies = DIV_ROUND_UP(HZ, 10); | ||
435 | card->color = 0; | 511 | card->color = 0; |
436 | card->broadcast_channel = BROADCAST_CHANNEL_INITIAL; | 512 | card->broadcast_channel = BROADCAST_CHANNEL_INITIAL; |
437 | 513 | ||
438 | kref_init(&card->kref); | 514 | kref_init(&card->kref); |
439 | init_completion(&card->done); | 515 | init_completion(&card->done); |
440 | INIT_LIST_HEAD(&card->transaction_list); | 516 | INIT_LIST_HEAD(&card->transaction_list); |
517 | INIT_LIST_HEAD(&card->phy_receiver_list); | ||
441 | spin_lock_init(&card->lock); | 518 | spin_lock_init(&card->lock); |
442 | 519 | ||
443 | card->local_node = NULL; | 520 | card->local_node = NULL; |
444 | 521 | ||
445 | INIT_DELAYED_WORK(&card->work, fw_card_bm_work); | 522 | INIT_DELAYED_WORK(&card->br_work, br_work); |
523 | INIT_DELAYED_WORK(&card->bm_work, bm_work); | ||
446 | } | 524 | } |
447 | EXPORT_SYMBOL(fw_card_initialize); | 525 | EXPORT_SYMBOL(fw_card_initialize); |
448 | 526 | ||
@@ -468,20 +546,22 @@ int fw_card_add(struct fw_card *card, | |||
468 | } | 546 | } |
469 | EXPORT_SYMBOL(fw_card_add); | 547 | EXPORT_SYMBOL(fw_card_add); |
470 | 548 | ||
471 | |||
472 | /* | 549 | /* |
473 | * The next few functions implement a dummy driver that is used once a card | 550 | * The next few functions implement a dummy driver that is used once a card |
474 | * driver shuts down an fw_card. This allows the driver to cleanly unload, | 551 | * driver shuts down an fw_card. This allows the driver to cleanly unload, |
475 | * as all IO to the card will be handled (and failed) by the dummy driver | 552 | * as all IO to the card will be handled (and failed) by the dummy driver |
476 | * instead of calling into the module. Only functions for iso context | 553 | * instead of calling into the module. Only functions for iso context |
477 | * shutdown still need to be provided by the card driver. | 554 | * shutdown still need to be provided by the card driver. |
555 | * | ||
556 | * .read/write_csr() should never be called anymore after the dummy driver | ||
557 | * was bound since they are only used within request handler context. | ||
558 | * .set_config_rom() is never called since the card is taken out of card_list | ||
559 | * before switching to the dummy driver. | ||
478 | */ | 560 | */ |
479 | 561 | ||
480 | static int dummy_enable(struct fw_card *card, | 562 | static int dummy_read_phy_reg(struct fw_card *card, int address) |
481 | const __be32 *config_rom, size_t length) | ||
482 | { | 563 | { |
483 | BUG(); | 564 | return -ENODEV; |
484 | return -1; | ||
485 | } | 565 | } |
486 | 566 | ||
487 | static int dummy_update_phy_reg(struct fw_card *card, int address, | 567 | static int dummy_update_phy_reg(struct fw_card *card, int address, |
@@ -490,25 +570,14 @@ static int dummy_update_phy_reg(struct fw_card *card, int address, | |||
490 | return -ENODEV; | 570 | return -ENODEV; |
491 | } | 571 | } |
492 | 572 | ||
493 | static int dummy_set_config_rom(struct fw_card *card, | ||
494 | const __be32 *config_rom, size_t length) | ||
495 | { | ||
496 | /* | ||
497 | * We take the card out of card_list before setting the dummy | ||
498 | * driver, so this should never get called. | ||
499 | */ | ||
500 | BUG(); | ||
501 | return -1; | ||
502 | } | ||
503 | |||
504 | static void dummy_send_request(struct fw_card *card, struct fw_packet *packet) | 573 | static void dummy_send_request(struct fw_card *card, struct fw_packet *packet) |
505 | { | 574 | { |
506 | packet->callback(packet, card, -ENODEV); | 575 | packet->callback(packet, card, RCODE_CANCELLED); |
507 | } | 576 | } |
508 | 577 | ||
509 | static void dummy_send_response(struct fw_card *card, struct fw_packet *packet) | 578 | static void dummy_send_response(struct fw_card *card, struct fw_packet *packet) |
510 | { | 579 | { |
511 | packet->callback(packet, card, -ENODEV); | 580 | packet->callback(packet, card, RCODE_CANCELLED); |
512 | } | 581 | } |
513 | 582 | ||
514 | static int dummy_cancel_packet(struct fw_card *card, struct fw_packet *packet) | 583 | static int dummy_cancel_packet(struct fw_card *card, struct fw_packet *packet) |
@@ -522,14 +591,40 @@ static int dummy_enable_phys_dma(struct fw_card *card, | |||
522 | return -ENODEV; | 591 | return -ENODEV; |
523 | } | 592 | } |
524 | 593 | ||
594 | static struct fw_iso_context *dummy_allocate_iso_context(struct fw_card *card, | ||
595 | int type, int channel, size_t header_size) | ||
596 | { | ||
597 | return ERR_PTR(-ENODEV); | ||
598 | } | ||
599 | |||
600 | static int dummy_start_iso(struct fw_iso_context *ctx, | ||
601 | s32 cycle, u32 sync, u32 tags) | ||
602 | { | ||
603 | return -ENODEV; | ||
604 | } | ||
605 | |||
606 | static int dummy_set_iso_channels(struct fw_iso_context *ctx, u64 *channels) | ||
607 | { | ||
608 | return -ENODEV; | ||
609 | } | ||
610 | |||
611 | static int dummy_queue_iso(struct fw_iso_context *ctx, struct fw_iso_packet *p, | ||
612 | struct fw_iso_buffer *buffer, unsigned long payload) | ||
613 | { | ||
614 | return -ENODEV; | ||
615 | } | ||
616 | |||
525 | static const struct fw_card_driver dummy_driver_template = { | 617 | static const struct fw_card_driver dummy_driver_template = { |
526 | .enable = dummy_enable, | 618 | .read_phy_reg = dummy_read_phy_reg, |
527 | .update_phy_reg = dummy_update_phy_reg, | 619 | .update_phy_reg = dummy_update_phy_reg, |
528 | .set_config_rom = dummy_set_config_rom, | 620 | .send_request = dummy_send_request, |
529 | .send_request = dummy_send_request, | 621 | .send_response = dummy_send_response, |
530 | .cancel_packet = dummy_cancel_packet, | 622 | .cancel_packet = dummy_cancel_packet, |
531 | .send_response = dummy_send_response, | 623 | .enable_phys_dma = dummy_enable_phys_dma, |
532 | .enable_phys_dma = dummy_enable_phys_dma, | 624 | .allocate_iso_context = dummy_allocate_iso_context, |
625 | .start_iso = dummy_start_iso, | ||
626 | .set_iso_channels = dummy_set_iso_channels, | ||
627 | .queue_iso = dummy_queue_iso, | ||
533 | }; | 628 | }; |
534 | 629 | ||
535 | void fw_card_release(struct kref *kref) | 630 | void fw_card_release(struct kref *kref) |
@@ -545,7 +640,7 @@ void fw_core_remove_card(struct fw_card *card) | |||
545 | 640 | ||
546 | card->driver->update_phy_reg(card, 4, | 641 | card->driver->update_phy_reg(card, 4, |
547 | PHY_LINK_ACTIVE | PHY_CONTENDER, 0); | 642 | PHY_LINK_ACTIVE | PHY_CONTENDER, 0); |
548 | fw_core_initiate_bus_reset(card, 1); | 643 | fw_schedule_bus_reset(card, false, true); |
549 | 644 | ||
550 | mutex_lock(&card_mutex); | 645 | mutex_lock(&card_mutex); |
551 | list_del_init(&card->link); | 646 | list_del_init(&card->link); |
@@ -565,12 +660,3 @@ void fw_core_remove_card(struct fw_card *card) | |||
565 | WARN_ON(!list_empty(&card->transaction_list)); | 660 | WARN_ON(!list_empty(&card->transaction_list)); |
566 | } | 661 | } |
567 | EXPORT_SYMBOL(fw_core_remove_card); | 662 | EXPORT_SYMBOL(fw_core_remove_card); |
568 | |||
569 | int fw_core_initiate_bus_reset(struct fw_card *card, int short_reset) | ||
570 | { | ||
571 | int reg = short_reset ? 5 : 1; | ||
572 | int bit = short_reset ? PHY_BUS_SHORT_RESET : PHY_BUS_RESET; | ||
573 | |||
574 | return card->driver->update_phy_reg(card, reg, 0, bit); | ||
575 | } | ||
576 | EXPORT_SYMBOL(fw_core_initiate_bus_reset); | ||
diff --git a/drivers/firewire/core-cdev.c b/drivers/firewire/core-cdev.c index 5bf106b9d791..14bb7b7b5dd7 100644 --- a/drivers/firewire/core-cdev.c +++ b/drivers/firewire/core-cdev.c | |||
@@ -18,6 +18,7 @@ | |||
18 | * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | 18 | * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. |
19 | */ | 19 | */ |
20 | 20 | ||
21 | #include <linux/bug.h> | ||
21 | #include <linux/compat.h> | 22 | #include <linux/compat.h> |
22 | #include <linux/delay.h> | 23 | #include <linux/delay.h> |
23 | #include <linux/device.h> | 24 | #include <linux/device.h> |
@@ -33,7 +34,7 @@ | |||
33 | #include <linux/module.h> | 34 | #include <linux/module.h> |
34 | #include <linux/mutex.h> | 35 | #include <linux/mutex.h> |
35 | #include <linux/poll.h> | 36 | #include <linux/poll.h> |
36 | #include <linux/sched.h> | 37 | #include <linux/sched.h> /* required for linux/wait.h */ |
37 | #include <linux/slab.h> | 38 | #include <linux/slab.h> |
38 | #include <linux/spinlock.h> | 39 | #include <linux/spinlock.h> |
39 | #include <linux/string.h> | 40 | #include <linux/string.h> |
@@ -47,6 +48,13 @@ | |||
47 | 48 | ||
48 | #include "core.h" | 49 | #include "core.h" |
49 | 50 | ||
51 | /* | ||
52 | * ABI version history is documented in linux/firewire-cdev.h. | ||
53 | */ | ||
54 | #define FW_CDEV_KERNEL_VERSION 4 | ||
55 | #define FW_CDEV_VERSION_EVENT_REQUEST2 4 | ||
56 | #define FW_CDEV_VERSION_ALLOCATE_REGION_END 4 | ||
57 | |||
50 | struct client { | 58 | struct client { |
51 | u32 version; | 59 | u32 version; |
52 | struct fw_device *device; | 60 | struct fw_device *device; |
@@ -63,6 +71,9 @@ struct client { | |||
63 | struct fw_iso_buffer buffer; | 71 | struct fw_iso_buffer buffer; |
64 | unsigned long vm_start; | 72 | unsigned long vm_start; |
65 | 73 | ||
74 | struct list_head phy_receiver_link; | ||
75 | u64 phy_receiver_closure; | ||
76 | |||
66 | struct list_head link; | 77 | struct list_head link; |
67 | struct kref kref; | 78 | struct kref kref; |
68 | }; | 79 | }; |
@@ -107,6 +118,7 @@ struct outbound_transaction_resource { | |||
107 | 118 | ||
108 | struct inbound_transaction_resource { | 119 | struct inbound_transaction_resource { |
109 | struct client_resource resource; | 120 | struct client_resource resource; |
121 | struct fw_card *card; | ||
110 | struct fw_request *request; | 122 | struct fw_request *request; |
111 | void *data; | 123 | void *data; |
112 | size_t length; | 124 | size_t length; |
@@ -171,7 +183,10 @@ struct outbound_transaction_event { | |||
171 | 183 | ||
172 | struct inbound_transaction_event { | 184 | struct inbound_transaction_event { |
173 | struct event event; | 185 | struct event event; |
174 | struct fw_cdev_event_request request; | 186 | union { |
187 | struct fw_cdev_event_request request; | ||
188 | struct fw_cdev_event_request2 request2; | ||
189 | } req; | ||
175 | }; | 190 | }; |
176 | 191 | ||
177 | struct iso_interrupt_event { | 192 | struct iso_interrupt_event { |
@@ -179,11 +194,28 @@ struct iso_interrupt_event { | |||
179 | struct fw_cdev_event_iso_interrupt interrupt; | 194 | struct fw_cdev_event_iso_interrupt interrupt; |
180 | }; | 195 | }; |
181 | 196 | ||
197 | struct iso_interrupt_mc_event { | ||
198 | struct event event; | ||
199 | struct fw_cdev_event_iso_interrupt_mc interrupt; | ||
200 | }; | ||
201 | |||
182 | struct iso_resource_event { | 202 | struct iso_resource_event { |
183 | struct event event; | 203 | struct event event; |
184 | struct fw_cdev_event_iso_resource iso_resource; | 204 | struct fw_cdev_event_iso_resource iso_resource; |
185 | }; | 205 | }; |
186 | 206 | ||
207 | struct outbound_phy_packet_event { | ||
208 | struct event event; | ||
209 | struct client *client; | ||
210 | struct fw_packet p; | ||
211 | struct fw_cdev_event_phy_packet phy_packet; | ||
212 | }; | ||
213 | |||
214 | struct inbound_phy_packet_event { | ||
215 | struct event event; | ||
216 | struct fw_cdev_event_phy_packet phy_packet; | ||
217 | }; | ||
218 | |||
187 | static inline void __user *u64_to_uptr(__u64 value) | 219 | static inline void __user *u64_to_uptr(__u64 value) |
188 | { | 220 | { |
189 | return (void __user *)(unsigned long)value; | 221 | return (void __user *)(unsigned long)value; |
@@ -219,6 +251,7 @@ static int fw_device_op_open(struct inode *inode, struct file *file) | |||
219 | idr_init(&client->resource_idr); | 251 | idr_init(&client->resource_idr); |
220 | INIT_LIST_HEAD(&client->event_list); | 252 | INIT_LIST_HEAD(&client->event_list); |
221 | init_waitqueue_head(&client->wait); | 253 | init_waitqueue_head(&client->wait); |
254 | INIT_LIST_HEAD(&client->phy_receiver_link); | ||
222 | kref_init(&client->kref); | 255 | kref_init(&client->kref); |
223 | 256 | ||
224 | file->private_data = client; | 257 | file->private_data = client; |
@@ -309,7 +342,7 @@ static void fill_bus_reset_event(struct fw_cdev_event_bus_reset *event, | |||
309 | event->generation = client->device->generation; | 342 | event->generation = client->device->generation; |
310 | event->node_id = client->device->node_id; | 343 | event->node_id = client->device->node_id; |
311 | event->local_node_id = card->local_node->node_id; | 344 | event->local_node_id = card->local_node->node_id; |
312 | event->bm_node_id = 0; /* FIXME: We don't track the BM. */ | 345 | event->bm_node_id = card->bm_node_id; |
313 | event->irm_node_id = card->irm_node->node_id; | 346 | event->irm_node_id = card->irm_node->node_id; |
314 | event->root_node_id = card->root_node->node_id; | 347 | event->root_node_id = card->root_node->node_id; |
315 | 348 | ||
@@ -340,7 +373,7 @@ static void queue_bus_reset_event(struct client *client) | |||
340 | 373 | ||
341 | e = kzalloc(sizeof(*e), GFP_KERNEL); | 374 | e = kzalloc(sizeof(*e), GFP_KERNEL); |
342 | if (e == NULL) { | 375 | if (e == NULL) { |
343 | fw_notify("Out of memory when allocating bus reset event\n"); | 376 | fw_notify("Out of memory when allocating event\n"); |
344 | return; | 377 | return; |
345 | } | 378 | } |
346 | 379 | ||
@@ -386,6 +419,9 @@ union ioctl_arg { | |||
386 | struct fw_cdev_allocate_iso_resource allocate_iso_resource; | 419 | struct fw_cdev_allocate_iso_resource allocate_iso_resource; |
387 | struct fw_cdev_send_stream_packet send_stream_packet; | 420 | struct fw_cdev_send_stream_packet send_stream_packet; |
388 | struct fw_cdev_get_cycle_timer2 get_cycle_timer2; | 421 | struct fw_cdev_get_cycle_timer2 get_cycle_timer2; |
422 | struct fw_cdev_send_phy_packet send_phy_packet; | ||
423 | struct fw_cdev_receive_phy_packets receive_phy_packets; | ||
424 | struct fw_cdev_set_iso_channels set_iso_channels; | ||
389 | }; | 425 | }; |
390 | 426 | ||
391 | static int ioctl_get_info(struct client *client, union ioctl_arg *arg) | 427 | static int ioctl_get_info(struct client *client, union ioctl_arg *arg) |
@@ -395,7 +431,7 @@ static int ioctl_get_info(struct client *client, union ioctl_arg *arg) | |||
395 | unsigned long ret = 0; | 431 | unsigned long ret = 0; |
396 | 432 | ||
397 | client->version = a->version; | 433 | client->version = a->version; |
398 | a->version = FW_CDEV_VERSION; | 434 | a->version = FW_CDEV_KERNEL_VERSION; |
399 | a->card = client->device->card->index; | 435 | a->card = client->device->card->index; |
400 | 436 | ||
401 | down_read(&fw_device_rwsem); | 437 | down_read(&fw_device_rwsem); |
@@ -554,6 +590,10 @@ static int init_request(struct client *client, | |||
554 | (request->length > 4096 || request->length > 512 << speed)) | 590 | (request->length > 4096 || request->length > 512 << speed)) |
555 | return -EIO; | 591 | return -EIO; |
556 | 592 | ||
593 | if (request->tcode == TCODE_WRITE_QUADLET_REQUEST && | ||
594 | request->length < 4) | ||
595 | return -EINVAL; | ||
596 | |||
557 | e = kmalloc(sizeof(*e) + request->length, GFP_KERNEL); | 597 | e = kmalloc(sizeof(*e) + request->length, GFP_KERNEL); |
558 | if (e == NULL) | 598 | if (e == NULL) |
559 | return -ENOMEM; | 599 | return -ENOMEM; |
@@ -626,28 +666,34 @@ static void release_request(struct client *client, | |||
626 | if (is_fcp_request(r->request)) | 666 | if (is_fcp_request(r->request)) |
627 | kfree(r->data); | 667 | kfree(r->data); |
628 | else | 668 | else |
629 | fw_send_response(client->device->card, r->request, | 669 | fw_send_response(r->card, r->request, RCODE_CONFLICT_ERROR); |
630 | RCODE_CONFLICT_ERROR); | 670 | |
671 | fw_card_put(r->card); | ||
631 | kfree(r); | 672 | kfree(r); |
632 | } | 673 | } |
633 | 674 | ||
634 | static void handle_request(struct fw_card *card, struct fw_request *request, | 675 | static void handle_request(struct fw_card *card, struct fw_request *request, |
635 | int tcode, int destination, int source, | 676 | int tcode, int destination, int source, |
636 | int generation, int speed, | 677 | int generation, unsigned long long offset, |
637 | unsigned long long offset, | ||
638 | void *payload, size_t length, void *callback_data) | 678 | void *payload, size_t length, void *callback_data) |
639 | { | 679 | { |
640 | struct address_handler_resource *handler = callback_data; | 680 | struct address_handler_resource *handler = callback_data; |
641 | struct inbound_transaction_resource *r; | 681 | struct inbound_transaction_resource *r; |
642 | struct inbound_transaction_event *e; | 682 | struct inbound_transaction_event *e; |
683 | size_t event_size0; | ||
643 | void *fcp_frame = NULL; | 684 | void *fcp_frame = NULL; |
644 | int ret; | 685 | int ret; |
645 | 686 | ||
687 | /* card may be different from handler->client->device->card */ | ||
688 | fw_card_get(card); | ||
689 | |||
646 | r = kmalloc(sizeof(*r), GFP_ATOMIC); | 690 | r = kmalloc(sizeof(*r), GFP_ATOMIC); |
647 | e = kmalloc(sizeof(*e), GFP_ATOMIC); | 691 | e = kmalloc(sizeof(*e), GFP_ATOMIC); |
648 | if (r == NULL || e == NULL) | 692 | if (r == NULL || e == NULL) { |
693 | fw_notify("Out of memory when allocating event\n"); | ||
649 | goto failed; | 694 | goto failed; |
650 | 695 | } | |
696 | r->card = card; | ||
651 | r->request = request; | 697 | r->request = request; |
652 | r->data = payload; | 698 | r->data = payload; |
653 | r->length = length; | 699 | r->length = length; |
@@ -669,15 +715,37 @@ static void handle_request(struct fw_card *card, struct fw_request *request, | |||
669 | if (ret < 0) | 715 | if (ret < 0) |
670 | goto failed; | 716 | goto failed; |
671 | 717 | ||
672 | e->request.type = FW_CDEV_EVENT_REQUEST; | 718 | if (handler->client->version < FW_CDEV_VERSION_EVENT_REQUEST2) { |
673 | e->request.tcode = tcode; | 719 | struct fw_cdev_event_request *req = &e->req.request; |
674 | e->request.offset = offset; | 720 | |
675 | e->request.length = length; | 721 | if (tcode & 0x10) |
676 | e->request.handle = r->resource.handle; | 722 | tcode = TCODE_LOCK_REQUEST; |
677 | e->request.closure = handler->closure; | 723 | |
724 | req->type = FW_CDEV_EVENT_REQUEST; | ||
725 | req->tcode = tcode; | ||
726 | req->offset = offset; | ||
727 | req->length = length; | ||
728 | req->handle = r->resource.handle; | ||
729 | req->closure = handler->closure; | ||
730 | event_size0 = sizeof(*req); | ||
731 | } else { | ||
732 | struct fw_cdev_event_request2 *req = &e->req.request2; | ||
733 | |||
734 | req->type = FW_CDEV_EVENT_REQUEST2; | ||
735 | req->tcode = tcode; | ||
736 | req->offset = offset; | ||
737 | req->source_node_id = source; | ||
738 | req->destination_node_id = destination; | ||
739 | req->card = card->index; | ||
740 | req->generation = generation; | ||
741 | req->length = length; | ||
742 | req->handle = r->resource.handle; | ||
743 | req->closure = handler->closure; | ||
744 | event_size0 = sizeof(*req); | ||
745 | } | ||
678 | 746 | ||
679 | queue_event(handler->client, &e->event, | 747 | queue_event(handler->client, &e->event, |
680 | &e->request, sizeof(e->request), r->data, length); | 748 | &e->req, event_size0, r->data, length); |
681 | return; | 749 | return; |
682 | 750 | ||
683 | failed: | 751 | failed: |
@@ -687,6 +755,8 @@ static void handle_request(struct fw_card *card, struct fw_request *request, | |||
687 | 755 | ||
688 | if (!is_fcp_request(request)) | 756 | if (!is_fcp_request(request)) |
689 | fw_send_response(card, request, RCODE_CONFLICT_ERROR); | 757 | fw_send_response(card, request, RCODE_CONFLICT_ERROR); |
758 | |||
759 | fw_card_put(card); | ||
690 | } | 760 | } |
691 | 761 | ||
692 | static void release_address_handler(struct client *client, | 762 | static void release_address_handler(struct client *client, |
@@ -711,7 +781,11 @@ static int ioctl_allocate(struct client *client, union ioctl_arg *arg) | |||
711 | return -ENOMEM; | 781 | return -ENOMEM; |
712 | 782 | ||
713 | region.start = a->offset; | 783 | region.start = a->offset; |
714 | region.end = a->offset + a->length; | 784 | if (client->version < FW_CDEV_VERSION_ALLOCATE_REGION_END) |
785 | region.end = a->offset + a->length; | ||
786 | else | ||
787 | region.end = a->region_end; | ||
788 | |||
715 | r->handler.length = a->length; | 789 | r->handler.length = a->length; |
716 | r->handler.address_callback = handle_request; | 790 | r->handler.address_callback = handle_request; |
717 | r->handler.callback_data = r; | 791 | r->handler.callback_data = r; |
@@ -723,6 +797,7 @@ static int ioctl_allocate(struct client *client, union ioctl_arg *arg) | |||
723 | kfree(r); | 797 | kfree(r); |
724 | return ret; | 798 | return ret; |
725 | } | 799 | } |
800 | a->offset = r->handler.offset; | ||
726 | 801 | ||
727 | r->resource.release = release_address_handler; | 802 | r->resource.release = release_address_handler; |
728 | ret = add_client_resource(client, &r->resource, GFP_KERNEL); | 803 | ret = add_client_resource(client, &r->resource, GFP_KERNEL); |
@@ -757,15 +832,19 @@ static int ioctl_send_response(struct client *client, union ioctl_arg *arg) | |||
757 | if (is_fcp_request(r->request)) | 832 | if (is_fcp_request(r->request)) |
758 | goto out; | 833 | goto out; |
759 | 834 | ||
760 | if (a->length < r->length) | 835 | if (a->length != fw_get_response_length(r->request)) { |
761 | r->length = a->length; | 836 | ret = -EINVAL; |
762 | if (copy_from_user(r->data, u64_to_uptr(a->data), r->length)) { | 837 | kfree(r->request); |
838 | goto out; | ||
839 | } | ||
840 | if (copy_from_user(r->data, u64_to_uptr(a->data), a->length)) { | ||
763 | ret = -EFAULT; | 841 | ret = -EFAULT; |
764 | kfree(r->request); | 842 | kfree(r->request); |
765 | goto out; | 843 | goto out; |
766 | } | 844 | } |
767 | fw_send_response(client->device->card, r->request, a->rcode); | 845 | fw_send_response(r->card, r->request, a->rcode); |
768 | out: | 846 | out: |
847 | fw_card_put(r->card); | ||
769 | kfree(r); | 848 | kfree(r); |
770 | 849 | ||
771 | return ret; | 850 | return ret; |
@@ -773,8 +852,9 @@ static int ioctl_send_response(struct client *client, union ioctl_arg *arg) | |||
773 | 852 | ||
774 | static int ioctl_initiate_bus_reset(struct client *client, union ioctl_arg *arg) | 853 | static int ioctl_initiate_bus_reset(struct client *client, union ioctl_arg *arg) |
775 | { | 854 | { |
776 | return fw_core_initiate_bus_reset(client->device->card, | 855 | fw_schedule_bus_reset(client->device->card, true, |
777 | arg->initiate_bus_reset.type == FW_CDEV_SHORT_RESET); | 856 | arg->initiate_bus_reset.type == FW_CDEV_SHORT_RESET); |
857 | return 0; | ||
778 | } | 858 | } |
779 | 859 | ||
780 | static void release_descriptor(struct client *client, | 860 | static void release_descriptor(struct client *client, |
@@ -845,10 +925,11 @@ static void iso_callback(struct fw_iso_context *context, u32 cycle, | |||
845 | struct client *client = data; | 925 | struct client *client = data; |
846 | struct iso_interrupt_event *e; | 926 | struct iso_interrupt_event *e; |
847 | 927 | ||
848 | e = kzalloc(sizeof(*e) + header_length, GFP_ATOMIC); | 928 | e = kmalloc(sizeof(*e) + header_length, GFP_ATOMIC); |
849 | if (e == NULL) | 929 | if (e == NULL) { |
930 | fw_notify("Out of memory when allocating event\n"); | ||
850 | return; | 931 | return; |
851 | 932 | } | |
852 | e->interrupt.type = FW_CDEV_EVENT_ISO_INTERRUPT; | 933 | e->interrupt.type = FW_CDEV_EVENT_ISO_INTERRUPT; |
853 | e->interrupt.closure = client->iso_closure; | 934 | e->interrupt.closure = client->iso_closure; |
854 | e->interrupt.cycle = cycle; | 935 | e->interrupt.cycle = cycle; |
@@ -858,27 +939,54 @@ static void iso_callback(struct fw_iso_context *context, u32 cycle, | |||
858 | sizeof(e->interrupt) + header_length, NULL, 0); | 939 | sizeof(e->interrupt) + header_length, NULL, 0); |
859 | } | 940 | } |
860 | 941 | ||
942 | static void iso_mc_callback(struct fw_iso_context *context, | ||
943 | dma_addr_t completed, void *data) | ||
944 | { | ||
945 | struct client *client = data; | ||
946 | struct iso_interrupt_mc_event *e; | ||
947 | |||
948 | e = kmalloc(sizeof(*e), GFP_ATOMIC); | ||
949 | if (e == NULL) { | ||
950 | fw_notify("Out of memory when allocating event\n"); | ||
951 | return; | ||
952 | } | ||
953 | e->interrupt.type = FW_CDEV_EVENT_ISO_INTERRUPT_MULTICHANNEL; | ||
954 | e->interrupt.closure = client->iso_closure; | ||
955 | e->interrupt.completed = fw_iso_buffer_lookup(&client->buffer, | ||
956 | completed); | ||
957 | queue_event(client, &e->event, &e->interrupt, | ||
958 | sizeof(e->interrupt), NULL, 0); | ||
959 | } | ||
960 | |||
861 | static int ioctl_create_iso_context(struct client *client, union ioctl_arg *arg) | 961 | static int ioctl_create_iso_context(struct client *client, union ioctl_arg *arg) |
862 | { | 962 | { |
863 | struct fw_cdev_create_iso_context *a = &arg->create_iso_context; | 963 | struct fw_cdev_create_iso_context *a = &arg->create_iso_context; |
864 | struct fw_iso_context *context; | 964 | struct fw_iso_context *context; |
965 | fw_iso_callback_t cb; | ||
865 | 966 | ||
866 | /* We only support one context at this time. */ | 967 | BUILD_BUG_ON(FW_CDEV_ISO_CONTEXT_TRANSMIT != FW_ISO_CONTEXT_TRANSMIT || |
867 | if (client->iso_context != NULL) | 968 | FW_CDEV_ISO_CONTEXT_RECEIVE != FW_ISO_CONTEXT_RECEIVE || |
868 | return -EBUSY; | 969 | FW_CDEV_ISO_CONTEXT_RECEIVE_MULTICHANNEL != |
869 | 970 | FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL); | |
870 | if (a->channel > 63) | ||
871 | return -EINVAL; | ||
872 | 971 | ||
873 | switch (a->type) { | 972 | switch (a->type) { |
874 | case FW_ISO_CONTEXT_RECEIVE: | 973 | case FW_ISO_CONTEXT_TRANSMIT: |
875 | if (a->header_size < 4 || (a->header_size & 3)) | 974 | if (a->speed > SCODE_3200 || a->channel > 63) |
876 | return -EINVAL; | 975 | return -EINVAL; |
976 | |||
977 | cb = iso_callback; | ||
877 | break; | 978 | break; |
878 | 979 | ||
879 | case FW_ISO_CONTEXT_TRANSMIT: | 980 | case FW_ISO_CONTEXT_RECEIVE: |
880 | if (a->speed > SCODE_3200) | 981 | if (a->header_size < 4 || (a->header_size & 3) || |
982 | a->channel > 63) | ||
881 | return -EINVAL; | 983 | return -EINVAL; |
984 | |||
985 | cb = iso_callback; | ||
986 | break; | ||
987 | |||
988 | case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL: | ||
989 | cb = (fw_iso_callback_t)iso_mc_callback; | ||
882 | break; | 990 | break; |
883 | 991 | ||
884 | default: | 992 | default: |
@@ -886,20 +994,37 @@ static int ioctl_create_iso_context(struct client *client, union ioctl_arg *arg) | |||
886 | } | 994 | } |
887 | 995 | ||
888 | context = fw_iso_context_create(client->device->card, a->type, | 996 | context = fw_iso_context_create(client->device->card, a->type, |
889 | a->channel, a->speed, a->header_size, | 997 | a->channel, a->speed, a->header_size, cb, client); |
890 | iso_callback, client); | ||
891 | if (IS_ERR(context)) | 998 | if (IS_ERR(context)) |
892 | return PTR_ERR(context); | 999 | return PTR_ERR(context); |
893 | 1000 | ||
1001 | /* We only support one context at this time. */ | ||
1002 | spin_lock_irq(&client->lock); | ||
1003 | if (client->iso_context != NULL) { | ||
1004 | spin_unlock_irq(&client->lock); | ||
1005 | fw_iso_context_destroy(context); | ||
1006 | return -EBUSY; | ||
1007 | } | ||
894 | client->iso_closure = a->closure; | 1008 | client->iso_closure = a->closure; |
895 | client->iso_context = context; | 1009 | client->iso_context = context; |
1010 | spin_unlock_irq(&client->lock); | ||
896 | 1011 | ||
897 | /* We only support one context at this time. */ | ||
898 | a->handle = 0; | 1012 | a->handle = 0; |
899 | 1013 | ||
900 | return 0; | 1014 | return 0; |
901 | } | 1015 | } |
902 | 1016 | ||
1017 | static int ioctl_set_iso_channels(struct client *client, union ioctl_arg *arg) | ||
1018 | { | ||
1019 | struct fw_cdev_set_iso_channels *a = &arg->set_iso_channels; | ||
1020 | struct fw_iso_context *ctx = client->iso_context; | ||
1021 | |||
1022 | if (ctx == NULL || a->handle != 0) | ||
1023 | return -EINVAL; | ||
1024 | |||
1025 | return fw_iso_context_set_channels(ctx, &a->channels); | ||
1026 | } | ||
1027 | |||
903 | /* Macros for decoding the iso packet control header. */ | 1028 | /* Macros for decoding the iso packet control header. */ |
904 | #define GET_PAYLOAD_LENGTH(v) ((v) & 0xffff) | 1029 | #define GET_PAYLOAD_LENGTH(v) ((v) & 0xffff) |
905 | #define GET_INTERRUPT(v) (((v) >> 16) & 0x01) | 1030 | #define GET_INTERRUPT(v) (((v) >> 16) & 0x01) |
@@ -913,7 +1038,7 @@ static int ioctl_queue_iso(struct client *client, union ioctl_arg *arg) | |||
913 | struct fw_cdev_queue_iso *a = &arg->queue_iso; | 1038 | struct fw_cdev_queue_iso *a = &arg->queue_iso; |
914 | struct fw_cdev_iso_packet __user *p, *end, *next; | 1039 | struct fw_cdev_iso_packet __user *p, *end, *next; |
915 | struct fw_iso_context *ctx = client->iso_context; | 1040 | struct fw_iso_context *ctx = client->iso_context; |
916 | unsigned long payload, buffer_end, header_length; | 1041 | unsigned long payload, buffer_end, transmit_header_bytes = 0; |
917 | u32 control; | 1042 | u32 control; |
918 | int count; | 1043 | int count; |
919 | struct { | 1044 | struct { |
@@ -933,7 +1058,6 @@ static int ioctl_queue_iso(struct client *client, union ioctl_arg *arg) | |||
933 | * use the indirect payload, the iso buffer need not be mapped | 1058 | * use the indirect payload, the iso buffer need not be mapped |
934 | * and the a->data pointer is ignored. | 1059 | * and the a->data pointer is ignored. |
935 | */ | 1060 | */ |
936 | |||
937 | payload = (unsigned long)a->data - client->vm_start; | 1061 | payload = (unsigned long)a->data - client->vm_start; |
938 | buffer_end = client->buffer.page_count << PAGE_SHIFT; | 1062 | buffer_end = client->buffer.page_count << PAGE_SHIFT; |
939 | if (a->data == 0 || client->buffer.pages == NULL || | 1063 | if (a->data == 0 || client->buffer.pages == NULL || |
@@ -942,8 +1066,10 @@ static int ioctl_queue_iso(struct client *client, union ioctl_arg *arg) | |||
942 | buffer_end = 0; | 1066 | buffer_end = 0; |
943 | } | 1067 | } |
944 | 1068 | ||
945 | p = (struct fw_cdev_iso_packet __user *)u64_to_uptr(a->packets); | 1069 | if (ctx->type == FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL && payload & 3) |
1070 | return -EINVAL; | ||
946 | 1071 | ||
1072 | p = (struct fw_cdev_iso_packet __user *)u64_to_uptr(a->packets); | ||
947 | if (!access_ok(VERIFY_READ, p, a->size)) | 1073 | if (!access_ok(VERIFY_READ, p, a->size)) |
948 | return -EFAULT; | 1074 | return -EFAULT; |
949 | 1075 | ||
@@ -959,31 +1085,32 @@ static int ioctl_queue_iso(struct client *client, union ioctl_arg *arg) | |||
959 | u.packet.sy = GET_SY(control); | 1085 | u.packet.sy = GET_SY(control); |
960 | u.packet.header_length = GET_HEADER_LENGTH(control); | 1086 | u.packet.header_length = GET_HEADER_LENGTH(control); |
961 | 1087 | ||
962 | if (ctx->type == FW_ISO_CONTEXT_TRANSMIT) { | 1088 | switch (ctx->type) { |
963 | if (u.packet.header_length % 4 != 0) | 1089 | case FW_ISO_CONTEXT_TRANSMIT: |
1090 | if (u.packet.header_length & 3) | ||
1091 | return -EINVAL; | ||
1092 | transmit_header_bytes = u.packet.header_length; | ||
1093 | break; | ||
1094 | |||
1095 | case FW_ISO_CONTEXT_RECEIVE: | ||
1096 | if (u.packet.header_length == 0 || | ||
1097 | u.packet.header_length % ctx->header_size != 0) | ||
964 | return -EINVAL; | 1098 | return -EINVAL; |
965 | header_length = u.packet.header_length; | 1099 | break; |
966 | } else { | 1100 | |
967 | /* | 1101 | case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL: |
968 | * We require that header_length is a multiple of | 1102 | if (u.packet.payload_length == 0 || |
969 | * the fixed header size, ctx->header_size. | 1103 | u.packet.payload_length & 3) |
970 | */ | ||
971 | if (ctx->header_size == 0) { | ||
972 | if (u.packet.header_length > 0) | ||
973 | return -EINVAL; | ||
974 | } else if (u.packet.header_length == 0 || | ||
975 | u.packet.header_length % ctx->header_size != 0) { | ||
976 | return -EINVAL; | 1104 | return -EINVAL; |
977 | } | 1105 | break; |
978 | header_length = 0; | ||
979 | } | 1106 | } |
980 | 1107 | ||
981 | next = (struct fw_cdev_iso_packet __user *) | 1108 | next = (struct fw_cdev_iso_packet __user *) |
982 | &p->header[header_length / 4]; | 1109 | &p->header[transmit_header_bytes / 4]; |
983 | if (next > end) | 1110 | if (next > end) |
984 | return -EINVAL; | 1111 | return -EINVAL; |
985 | if (__copy_from_user | 1112 | if (__copy_from_user |
986 | (u.packet.header, p->header, header_length)) | 1113 | (u.packet.header, p->header, transmit_header_bytes)) |
987 | return -EFAULT; | 1114 | return -EFAULT; |
988 | if (u.packet.skip && ctx->type == FW_ISO_CONTEXT_TRANSMIT && | 1115 | if (u.packet.skip && ctx->type == FW_ISO_CONTEXT_TRANSMIT && |
989 | u.packet.header_length + u.packet.payload_length > 0) | 1116 | u.packet.header_length + u.packet.payload_length > 0) |
@@ -1011,6 +1138,13 @@ static int ioctl_start_iso(struct client *client, union ioctl_arg *arg) | |||
1011 | { | 1138 | { |
1012 | struct fw_cdev_start_iso *a = &arg->start_iso; | 1139 | struct fw_cdev_start_iso *a = &arg->start_iso; |
1013 | 1140 | ||
1141 | BUILD_BUG_ON( | ||
1142 | FW_CDEV_ISO_CONTEXT_MATCH_TAG0 != FW_ISO_CONTEXT_MATCH_TAG0 || | ||
1143 | FW_CDEV_ISO_CONTEXT_MATCH_TAG1 != FW_ISO_CONTEXT_MATCH_TAG1 || | ||
1144 | FW_CDEV_ISO_CONTEXT_MATCH_TAG2 != FW_ISO_CONTEXT_MATCH_TAG2 || | ||
1145 | FW_CDEV_ISO_CONTEXT_MATCH_TAG3 != FW_ISO_CONTEXT_MATCH_TAG3 || | ||
1146 | FW_CDEV_ISO_CONTEXT_MATCH_ALL_TAGS != FW_ISO_CONTEXT_MATCH_ALL_TAGS); | ||
1147 | |||
1014 | if (client->iso_context == NULL || a->handle != 0) | 1148 | if (client->iso_context == NULL || a->handle != 0) |
1015 | return -EINVAL; | 1149 | return -EINVAL; |
1016 | 1150 | ||
@@ -1042,7 +1176,7 @@ static int ioctl_get_cycle_timer2(struct client *client, union ioctl_arg *arg) | |||
1042 | 1176 | ||
1043 | local_irq_disable(); | 1177 | local_irq_disable(); |
1044 | 1178 | ||
1045 | cycle_time = card->driver->get_cycle_time(card); | 1179 | cycle_time = card->driver->read_csr(card, CSR_CYCLE_TIME); |
1046 | 1180 | ||
1047 | switch (a->clk_id) { | 1181 | switch (a->clk_id) { |
1048 | case CLOCK_REALTIME: getnstimeofday(&ts); break; | 1182 | case CLOCK_REALTIME: getnstimeofday(&ts); break; |
@@ -1323,28 +1457,135 @@ static int ioctl_send_stream_packet(struct client *client, union ioctl_arg *arg) | |||
1323 | return init_request(client, &request, dest, a->speed); | 1457 | return init_request(client, &request, dest, a->speed); |
1324 | } | 1458 | } |
1325 | 1459 | ||
1460 | static void outbound_phy_packet_callback(struct fw_packet *packet, | ||
1461 | struct fw_card *card, int status) | ||
1462 | { | ||
1463 | struct outbound_phy_packet_event *e = | ||
1464 | container_of(packet, struct outbound_phy_packet_event, p); | ||
1465 | |||
1466 | switch (status) { | ||
1467 | /* expected: */ | ||
1468 | case ACK_COMPLETE: e->phy_packet.rcode = RCODE_COMPLETE; break; | ||
1469 | /* should never happen with PHY packets: */ | ||
1470 | case ACK_PENDING: e->phy_packet.rcode = RCODE_COMPLETE; break; | ||
1471 | case ACK_BUSY_X: | ||
1472 | case ACK_BUSY_A: | ||
1473 | case ACK_BUSY_B: e->phy_packet.rcode = RCODE_BUSY; break; | ||
1474 | case ACK_DATA_ERROR: e->phy_packet.rcode = RCODE_DATA_ERROR; break; | ||
1475 | case ACK_TYPE_ERROR: e->phy_packet.rcode = RCODE_TYPE_ERROR; break; | ||
1476 | /* stale generation; cancelled; on certain controllers: no ack */ | ||
1477 | default: e->phy_packet.rcode = status; break; | ||
1478 | } | ||
1479 | e->phy_packet.data[0] = packet->timestamp; | ||
1480 | |||
1481 | queue_event(e->client, &e->event, &e->phy_packet, | ||
1482 | sizeof(e->phy_packet) + e->phy_packet.length, NULL, 0); | ||
1483 | client_put(e->client); | ||
1484 | } | ||
1485 | |||
1486 | static int ioctl_send_phy_packet(struct client *client, union ioctl_arg *arg) | ||
1487 | { | ||
1488 | struct fw_cdev_send_phy_packet *a = &arg->send_phy_packet; | ||
1489 | struct fw_card *card = client->device->card; | ||
1490 | struct outbound_phy_packet_event *e; | ||
1491 | |||
1492 | /* Access policy: Allow this ioctl only on local nodes' device files. */ | ||
1493 | if (!client->device->is_local) | ||
1494 | return -ENOSYS; | ||
1495 | |||
1496 | e = kzalloc(sizeof(*e) + 4, GFP_KERNEL); | ||
1497 | if (e == NULL) | ||
1498 | return -ENOMEM; | ||
1499 | |||
1500 | client_get(client); | ||
1501 | e->client = client; | ||
1502 | e->p.speed = SCODE_100; | ||
1503 | e->p.generation = a->generation; | ||
1504 | e->p.header[0] = a->data[0]; | ||
1505 | e->p.header[1] = a->data[1]; | ||
1506 | e->p.header_length = 8; | ||
1507 | e->p.callback = outbound_phy_packet_callback; | ||
1508 | e->phy_packet.closure = a->closure; | ||
1509 | e->phy_packet.type = FW_CDEV_EVENT_PHY_PACKET_SENT; | ||
1510 | if (is_ping_packet(a->data)) | ||
1511 | e->phy_packet.length = 4; | ||
1512 | |||
1513 | card->driver->send_request(card, &e->p); | ||
1514 | |||
1515 | return 0; | ||
1516 | } | ||
1517 | |||
1518 | static int ioctl_receive_phy_packets(struct client *client, union ioctl_arg *arg) | ||
1519 | { | ||
1520 | struct fw_cdev_receive_phy_packets *a = &arg->receive_phy_packets; | ||
1521 | struct fw_card *card = client->device->card; | ||
1522 | |||
1523 | /* Access policy: Allow this ioctl only on local nodes' device files. */ | ||
1524 | if (!client->device->is_local) | ||
1525 | return -ENOSYS; | ||
1526 | |||
1527 | spin_lock_irq(&card->lock); | ||
1528 | |||
1529 | list_move_tail(&client->phy_receiver_link, &card->phy_receiver_list); | ||
1530 | client->phy_receiver_closure = a->closure; | ||
1531 | |||
1532 | spin_unlock_irq(&card->lock); | ||
1533 | |||
1534 | return 0; | ||
1535 | } | ||
1536 | |||
1537 | void fw_cdev_handle_phy_packet(struct fw_card *card, struct fw_packet *p) | ||
1538 | { | ||
1539 | struct client *client; | ||
1540 | struct inbound_phy_packet_event *e; | ||
1541 | unsigned long flags; | ||
1542 | |||
1543 | spin_lock_irqsave(&card->lock, flags); | ||
1544 | |||
1545 | list_for_each_entry(client, &card->phy_receiver_list, phy_receiver_link) { | ||
1546 | e = kmalloc(sizeof(*e) + 8, GFP_ATOMIC); | ||
1547 | if (e == NULL) { | ||
1548 | fw_notify("Out of memory when allocating event\n"); | ||
1549 | break; | ||
1550 | } | ||
1551 | e->phy_packet.closure = client->phy_receiver_closure; | ||
1552 | e->phy_packet.type = FW_CDEV_EVENT_PHY_PACKET_RECEIVED; | ||
1553 | e->phy_packet.rcode = RCODE_COMPLETE; | ||
1554 | e->phy_packet.length = 8; | ||
1555 | e->phy_packet.data[0] = p->header[1]; | ||
1556 | e->phy_packet.data[1] = p->header[2]; | ||
1557 | queue_event(client, &e->event, | ||
1558 | &e->phy_packet, sizeof(e->phy_packet) + 8, NULL, 0); | ||
1559 | } | ||
1560 | |||
1561 | spin_unlock_irqrestore(&card->lock, flags); | ||
1562 | } | ||
1563 | |||
1326 | static int (* const ioctl_handlers[])(struct client *, union ioctl_arg *) = { | 1564 | static int (* const ioctl_handlers[])(struct client *, union ioctl_arg *) = { |
1327 | ioctl_get_info, | 1565 | [0x00] = ioctl_get_info, |
1328 | ioctl_send_request, | 1566 | [0x01] = ioctl_send_request, |
1329 | ioctl_allocate, | 1567 | [0x02] = ioctl_allocate, |
1330 | ioctl_deallocate, | 1568 | [0x03] = ioctl_deallocate, |
1331 | ioctl_send_response, | 1569 | [0x04] = ioctl_send_response, |
1332 | ioctl_initiate_bus_reset, | 1570 | [0x05] = ioctl_initiate_bus_reset, |
1333 | ioctl_add_descriptor, | 1571 | [0x06] = ioctl_add_descriptor, |
1334 | ioctl_remove_descriptor, | 1572 | [0x07] = ioctl_remove_descriptor, |
1335 | ioctl_create_iso_context, | 1573 | [0x08] = ioctl_create_iso_context, |
1336 | ioctl_queue_iso, | 1574 | [0x09] = ioctl_queue_iso, |
1337 | ioctl_start_iso, | 1575 | [0x0a] = ioctl_start_iso, |
1338 | ioctl_stop_iso, | 1576 | [0x0b] = ioctl_stop_iso, |
1339 | ioctl_get_cycle_timer, | 1577 | [0x0c] = ioctl_get_cycle_timer, |
1340 | ioctl_allocate_iso_resource, | 1578 | [0x0d] = ioctl_allocate_iso_resource, |
1341 | ioctl_deallocate_iso_resource, | 1579 | [0x0e] = ioctl_deallocate_iso_resource, |
1342 | ioctl_allocate_iso_resource_once, | 1580 | [0x0f] = ioctl_allocate_iso_resource_once, |
1343 | ioctl_deallocate_iso_resource_once, | 1581 | [0x10] = ioctl_deallocate_iso_resource_once, |
1344 | ioctl_get_speed, | 1582 | [0x11] = ioctl_get_speed, |
1345 | ioctl_send_broadcast_request, | 1583 | [0x12] = ioctl_send_broadcast_request, |
1346 | ioctl_send_stream_packet, | 1584 | [0x13] = ioctl_send_stream_packet, |
1347 | ioctl_get_cycle_timer2, | 1585 | [0x14] = ioctl_get_cycle_timer2, |
1586 | [0x15] = ioctl_send_phy_packet, | ||
1587 | [0x16] = ioctl_receive_phy_packets, | ||
1588 | [0x17] = ioctl_set_iso_channels, | ||
1348 | }; | 1589 | }; |
1349 | 1590 | ||
1350 | static int dispatch_ioctl(struct client *client, | 1591 | static int dispatch_ioctl(struct client *client, |
@@ -1452,6 +1693,10 @@ static int fw_device_op_release(struct inode *inode, struct file *file) | |||
1452 | struct client *client = file->private_data; | 1693 | struct client *client = file->private_data; |
1453 | struct event *event, *next_event; | 1694 | struct event *event, *next_event; |
1454 | 1695 | ||
1696 | spin_lock_irq(&client->device->card->lock); | ||
1697 | list_del(&client->phy_receiver_link); | ||
1698 | spin_unlock_irq(&client->device->card->lock); | ||
1699 | |||
1455 | mutex_lock(&client->device->client_list_mutex); | 1700 | mutex_lock(&client->device->client_list_mutex); |
1456 | list_del(&client->link); | 1701 | list_del(&client->link); |
1457 | mutex_unlock(&client->device->client_list_mutex); | 1702 | mutex_unlock(&client->device->client_list_mutex); |
diff --git a/drivers/firewire/core-device.c b/drivers/firewire/core-device.c index 4b8523f00dce..6113b896e790 100644 --- a/drivers/firewire/core-device.c +++ b/drivers/firewire/core-device.c | |||
@@ -107,11 +107,11 @@ static int textual_leaf_to_string(const u32 *block, char *buf, size_t size) | |||
107 | } | 107 | } |
108 | 108 | ||
109 | /** | 109 | /** |
110 | * fw_csr_string - reads a string from the configuration ROM | 110 | * fw_csr_string() - reads a string from the configuration ROM |
111 | * @directory: e.g. root directory or unit directory | 111 | * @directory: e.g. root directory or unit directory |
112 | * @key: the key of the preceding directory entry | 112 | * @key: the key of the preceding directory entry |
113 | * @buf: where to put the string | 113 | * @buf: where to put the string |
114 | * @size: size of @buf, in bytes | 114 | * @size: size of @buf, in bytes |
115 | * | 115 | * |
116 | * The string is taken from a minimal ASCII text descriptor leaf after | 116 | * The string is taken from a minimal ASCII text descriptor leaf after |
117 | * the immediate entry with @key. The string is zero-terminated. | 117 | * the immediate entry with @key. The string is zero-terminated. |
@@ -1136,6 +1136,7 @@ static void fw_device_refresh(struct work_struct *work) | |||
1136 | goto give_up; | 1136 | goto give_up; |
1137 | } | 1137 | } |
1138 | 1138 | ||
1139 | fw_device_cdev_update(device); | ||
1139 | create_units(device); | 1140 | create_units(device); |
1140 | 1141 | ||
1141 | /* Userspace may want to re-read attributes. */ | 1142 | /* Userspace may want to re-read attributes. */ |
diff --git a/drivers/firewire/core-iso.c b/drivers/firewire/core-iso.c index 8f5aebfb29df..c003fa4e2db1 100644 --- a/drivers/firewire/core-iso.c +++ b/drivers/firewire/core-iso.c | |||
@@ -118,6 +118,23 @@ void fw_iso_buffer_destroy(struct fw_iso_buffer *buffer, | |||
118 | } | 118 | } |
119 | EXPORT_SYMBOL(fw_iso_buffer_destroy); | 119 | EXPORT_SYMBOL(fw_iso_buffer_destroy); |
120 | 120 | ||
121 | /* Convert DMA address to offset into virtually contiguous buffer. */ | ||
122 | size_t fw_iso_buffer_lookup(struct fw_iso_buffer *buffer, dma_addr_t completed) | ||
123 | { | ||
124 | int i; | ||
125 | dma_addr_t address; | ||
126 | ssize_t offset; | ||
127 | |||
128 | for (i = 0; i < buffer->page_count; i++) { | ||
129 | address = page_private(buffer->pages[i]); | ||
130 | offset = (ssize_t)completed - (ssize_t)address; | ||
131 | if (offset > 0 && offset <= PAGE_SIZE) | ||
132 | return (i << PAGE_SHIFT) + offset; | ||
133 | } | ||
134 | |||
135 | return 0; | ||
136 | } | ||
137 | |||
121 | struct fw_iso_context *fw_iso_context_create(struct fw_card *card, | 138 | struct fw_iso_context *fw_iso_context_create(struct fw_card *card, |
122 | int type, int channel, int speed, size_t header_size, | 139 | int type, int channel, int speed, size_t header_size, |
123 | fw_iso_callback_t callback, void *callback_data) | 140 | fw_iso_callback_t callback, void *callback_data) |
@@ -134,7 +151,7 @@ struct fw_iso_context *fw_iso_context_create(struct fw_card *card, | |||
134 | ctx->channel = channel; | 151 | ctx->channel = channel; |
135 | ctx->speed = speed; | 152 | ctx->speed = speed; |
136 | ctx->header_size = header_size; | 153 | ctx->header_size = header_size; |
137 | ctx->callback = callback; | 154 | ctx->callback.sc = callback; |
138 | ctx->callback_data = callback_data; | 155 | ctx->callback_data = callback_data; |
139 | 156 | ||
140 | return ctx; | 157 | return ctx; |
@@ -143,9 +160,7 @@ EXPORT_SYMBOL(fw_iso_context_create); | |||
143 | 160 | ||
144 | void fw_iso_context_destroy(struct fw_iso_context *ctx) | 161 | void fw_iso_context_destroy(struct fw_iso_context *ctx) |
145 | { | 162 | { |
146 | struct fw_card *card = ctx->card; | 163 | ctx->card->driver->free_iso_context(ctx); |
147 | |||
148 | card->driver->free_iso_context(ctx); | ||
149 | } | 164 | } |
150 | EXPORT_SYMBOL(fw_iso_context_destroy); | 165 | EXPORT_SYMBOL(fw_iso_context_destroy); |
151 | 166 | ||
@@ -156,14 +171,17 @@ int fw_iso_context_start(struct fw_iso_context *ctx, | |||
156 | } | 171 | } |
157 | EXPORT_SYMBOL(fw_iso_context_start); | 172 | EXPORT_SYMBOL(fw_iso_context_start); |
158 | 173 | ||
174 | int fw_iso_context_set_channels(struct fw_iso_context *ctx, u64 *channels) | ||
175 | { | ||
176 | return ctx->card->driver->set_iso_channels(ctx, channels); | ||
177 | } | ||
178 | |||
159 | int fw_iso_context_queue(struct fw_iso_context *ctx, | 179 | int fw_iso_context_queue(struct fw_iso_context *ctx, |
160 | struct fw_iso_packet *packet, | 180 | struct fw_iso_packet *packet, |
161 | struct fw_iso_buffer *buffer, | 181 | struct fw_iso_buffer *buffer, |
162 | unsigned long payload) | 182 | unsigned long payload) |
163 | { | 183 | { |
164 | struct fw_card *card = ctx->card; | 184 | return ctx->card->driver->queue_iso(ctx, packet, buffer, payload); |
165 | |||
166 | return card->driver->queue_iso(ctx, packet, buffer, payload); | ||
167 | } | 185 | } |
168 | EXPORT_SYMBOL(fw_iso_context_queue); | 186 | EXPORT_SYMBOL(fw_iso_context_queue); |
169 | 187 | ||
@@ -279,7 +297,7 @@ static void deallocate_channel(struct fw_card *card, int irm_id, | |||
279 | } | 297 | } |
280 | 298 | ||
281 | /** | 299 | /** |
282 | * fw_iso_resource_manage - Allocate or deallocate a channel and/or bandwidth | 300 | * fw_iso_resource_manage() - Allocate or deallocate a channel and/or bandwidth |
283 | * | 301 | * |
284 | * In parameters: card, generation, channels_mask, bandwidth, allocate | 302 | * In parameters: card, generation, channels_mask, bandwidth, allocate |
285 | * Out parameters: channel, bandwidth | 303 | * Out parameters: channel, bandwidth |
diff --git a/drivers/firewire/core-topology.c b/drivers/firewire/core-topology.c index 93ec64cdeef7..09be1a635505 100644 --- a/drivers/firewire/core-topology.c +++ b/drivers/firewire/core-topology.c | |||
@@ -174,12 +174,7 @@ static inline struct fw_node *fw_node(struct list_head *l) | |||
174 | return list_entry(l, struct fw_node, link); | 174 | return list_entry(l, struct fw_node, link); |
175 | } | 175 | } |
176 | 176 | ||
177 | /** | 177 | /* |
178 | * build_tree - Build the tree representation of the topology | ||
179 | * @self_ids: array of self IDs to create the tree from | ||
180 | * @self_id_count: the length of the self_ids array | ||
181 | * @local_id: the node ID of the local node | ||
182 | * | ||
183 | * This function builds the tree representation of the topology given | 178 | * This function builds the tree representation of the topology given |
184 | * by the self IDs from the latest bus reset. During the construction | 179 | * by the self IDs from the latest bus reset. During the construction |
185 | * of the tree, the function checks that the self IDs are valid and | 180 | * of the tree, the function checks that the self IDs are valid and |
@@ -420,11 +415,10 @@ static void move_tree(struct fw_node *node0, struct fw_node *node1, int port) | |||
420 | } | 415 | } |
421 | } | 416 | } |
422 | 417 | ||
423 | /** | 418 | /* |
424 | * update_tree - compare the old topology tree for card with the new | 419 | * Compare the old topology tree for card with the new one specified by root. |
425 | * one specified by root. Queue the nodes and mark them as either | 420 | * Queue the nodes and mark them as either found, lost or updated. |
426 | * found, lost or updated. Update the nodes in the card topology tree | 421 | * Update the nodes in the card topology tree as we go. |
427 | * as we go. | ||
428 | */ | 422 | */ |
429 | static void update_tree(struct fw_card *card, struct fw_node *root) | 423 | static void update_tree(struct fw_card *card, struct fw_node *root) |
430 | { | 424 | { |
@@ -524,7 +518,7 @@ static void update_topology_map(struct fw_card *card, | |||
524 | } | 518 | } |
525 | 519 | ||
526 | void fw_core_handle_bus_reset(struct fw_card *card, int node_id, int generation, | 520 | void fw_core_handle_bus_reset(struct fw_card *card, int node_id, int generation, |
527 | int self_id_count, u32 *self_ids) | 521 | int self_id_count, u32 *self_ids, bool bm_abdicate) |
528 | { | 522 | { |
529 | struct fw_node *local_node; | 523 | struct fw_node *local_node; |
530 | unsigned long flags; | 524 | unsigned long flags; |
@@ -543,7 +537,7 @@ void fw_core_handle_bus_reset(struct fw_card *card, int node_id, int generation, | |||
543 | 537 | ||
544 | spin_lock_irqsave(&card->lock, flags); | 538 | spin_lock_irqsave(&card->lock, flags); |
545 | 539 | ||
546 | card->broadcast_channel_allocated = false; | 540 | card->broadcast_channel_allocated = card->broadcast_channel_auto_allocated; |
547 | card->node_id = node_id; | 541 | card->node_id = node_id; |
548 | /* | 542 | /* |
549 | * Update node_id before generation to prevent anybody from using | 543 | * Update node_id before generation to prevent anybody from using |
@@ -552,6 +546,8 @@ void fw_core_handle_bus_reset(struct fw_card *card, int node_id, int generation, | |||
552 | smp_wmb(); | 546 | smp_wmb(); |
553 | card->generation = generation; | 547 | card->generation = generation; |
554 | card->reset_jiffies = jiffies; | 548 | card->reset_jiffies = jiffies; |
549 | card->bm_node_id = 0xffff; | ||
550 | card->bm_abdicate = bm_abdicate; | ||
555 | fw_schedule_bm_work(card, 0); | 551 | fw_schedule_bm_work(card, 0); |
556 | 552 | ||
557 | local_node = build_tree(card, self_ids, self_id_count); | 553 | local_node = build_tree(card, self_ids, self_id_count); |
diff --git a/drivers/firewire/core-transaction.c b/drivers/firewire/core-transaction.c index fdc33ff06dc1..ca7ca56661e0 100644 --- a/drivers/firewire/core-transaction.c +++ b/drivers/firewire/core-transaction.c | |||
@@ -246,7 +246,7 @@ static void fw_fill_request(struct fw_packet *packet, int tcode, int tlabel, | |||
246 | break; | 246 | break; |
247 | 247 | ||
248 | default: | 248 | default: |
249 | WARN(1, KERN_ERR "wrong tcode %d", tcode); | 249 | WARN(1, "wrong tcode %d", tcode); |
250 | } | 250 | } |
251 | common: | 251 | common: |
252 | packet->speed = speed; | 252 | packet->speed = speed; |
@@ -273,43 +273,52 @@ static int allocate_tlabel(struct fw_card *card) | |||
273 | } | 273 | } |
274 | 274 | ||
275 | /** | 275 | /** |
276 | * This function provides low-level access to the IEEE1394 transaction | 276 | * fw_send_request() - submit a request packet for transmission |
277 | * logic. Most C programs would use either fw_read(), fw_write() or | 277 | * @card: interface to send the request at |
278 | * fw_lock() instead - those function are convenience wrappers for | 278 | * @t: transaction instance to which the request belongs |
279 | * this function. The fw_send_request() function is primarily | 279 | * @tcode: transaction code |
280 | * provided as a flexible, one-stop entry point for languages bindings | 280 | * @destination_id: destination node ID, consisting of bus_ID and phy_ID |
281 | * and protocol bindings. | 281 | * @generation: bus generation in which request and response are valid |
282 | * @speed: transmission speed | ||
283 | * @offset: 48bit wide offset into destination's address space | ||
284 | * @payload: data payload for the request subaction | ||
285 | * @length: length of the payload, in bytes | ||
286 | * @callback: function to be called when the transaction is completed | ||
287 | * @callback_data: data to be passed to the transaction completion callback | ||
282 | * | 288 | * |
283 | * FIXME: Document this function further, in particular the possible | 289 | * Submit a request packet into the asynchronous request transmission queue. |
284 | * values for rcode in the callback. In short, we map ACK_COMPLETE to | 290 | * Can be called from atomic context. If you prefer a blocking API, use |
285 | * RCODE_COMPLETE, internal errors set errno and set rcode to | 291 | * fw_run_transaction() in a context that can sleep. |
286 | * RCODE_SEND_ERROR (which is out of range for standard ieee1394 | ||
287 | * rcodes). All other rcodes are forwarded unchanged. For all | ||
288 | * errors, payload is NULL, length is 0. | ||
289 | * | 292 | * |
290 | * Can not expect the callback to be called before the function | 293 | * In case of lock requests, specify one of the firewire-core specific %TCODE_ |
291 | * returns, though this does happen in some cases (ACK_COMPLETE and | 294 | * constants instead of %TCODE_LOCK_REQUEST in @tcode. |
292 | * errors). | ||
293 | * | 295 | * |
294 | * The payload is only used for write requests and must not be freed | 296 | * Make sure that the value in @destination_id is not older than the one in |
295 | * until the callback has been called. | 297 | * @generation. Otherwise the request is in danger to be sent to a wrong node. |
296 | * | 298 | * |
297 | * @param card the card from which to send the request | 299 | * In case of asynchronous stream packets i.e. %TCODE_STREAM_DATA, the caller |
298 | * @param tcode the tcode for this transaction. Do not use | ||
299 | * TCODE_LOCK_REQUEST directly, instead use TCODE_LOCK_MASK_SWAP | ||
300 | * etc. to specify tcode and ext_tcode. | ||
301 | * @param node_id the destination node ID (bus ID and PHY ID concatenated) | ||
302 | * @param generation the generation for which node_id is valid | ||
303 | * @param speed the speed to use for sending the request | ||
304 | * @param offset the 48 bit offset on the destination node | ||
305 | * @param payload the data payload for the request subaction | ||
306 | * @param length the length in bytes of the data to read | ||
307 | * @param callback function to be called when the transaction is completed | ||
308 | * @param callback_data pointer to arbitrary data, which will be | ||
309 | * passed to the callback | ||
310 | * | ||
311 | * In case of asynchronous stream packets i.e. TCODE_STREAM_DATA, the caller | ||
312 | * needs to synthesize @destination_id with fw_stream_packet_destination_id(). | 300 | * needs to synthesize @destination_id with fw_stream_packet_destination_id(). |
301 | * It will contain tag, channel, and sy data instead of a node ID then. | ||
302 | * | ||
303 | * The payload buffer at @data is going to be DMA-mapped except in case of | ||
304 | * quadlet-sized payload or of local (loopback) requests. Hence make sure that | ||
305 | * the buffer complies with the restrictions for DMA-mapped memory. The | ||
306 | * @payload must not be freed before the @callback is called. | ||
307 | * | ||
308 | * In case of request types without payload, @data is NULL and @length is 0. | ||
309 | * | ||
310 | * After the transaction is completed successfully or unsuccessfully, the | ||
311 | * @callback will be called. Among its parameters is the response code which | ||
312 | * is either one of the rcodes per IEEE 1394 or, in case of internal errors, | ||
313 | * the firewire-core specific %RCODE_SEND_ERROR. The other firewire-core | ||
314 | * specific rcodes (%RCODE_CANCELLED, %RCODE_BUSY, %RCODE_GENERATION, | ||
315 | * %RCODE_NO_ACK) denote transaction timeout, busy responder, stale request | ||
316 | * generation, or missing ACK respectively. | ||
317 | * | ||
318 | * Note some timing corner cases: fw_send_request() may complete much earlier | ||
319 | * than when the request packet actually hits the wire. On the other hand, | ||
320 | * transaction completion and hence execution of @callback may happen even | ||
321 | * before fw_send_request() returns. | ||
313 | */ | 322 | */ |
314 | void fw_send_request(struct fw_card *card, struct fw_transaction *t, int tcode, | 323 | void fw_send_request(struct fw_card *card, struct fw_transaction *t, int tcode, |
315 | int destination_id, int generation, int speed, | 324 | int destination_id, int generation, int speed, |
@@ -339,7 +348,8 @@ void fw_send_request(struct fw_card *card, struct fw_transaction *t, int tcode, | |||
339 | setup_timer(&t->split_timeout_timer, | 348 | setup_timer(&t->split_timeout_timer, |
340 | split_transaction_timeout_callback, (unsigned long)t); | 349 | split_transaction_timeout_callback, (unsigned long)t); |
341 | /* FIXME: start this timer later, relative to t->timestamp */ | 350 | /* FIXME: start this timer later, relative to t->timestamp */ |
342 | mod_timer(&t->split_timeout_timer, jiffies + DIV_ROUND_UP(HZ, 10)); | 351 | mod_timer(&t->split_timeout_timer, |
352 | jiffies + card->split_timeout_jiffies); | ||
343 | t->callback = callback; | 353 | t->callback = callback; |
344 | t->callback_data = callback_data; | 354 | t->callback_data = callback_data; |
345 | 355 | ||
@@ -374,9 +384,11 @@ static void transaction_callback(struct fw_card *card, int rcode, | |||
374 | } | 384 | } |
375 | 385 | ||
376 | /** | 386 | /** |
377 | * fw_run_transaction - send request and sleep until transaction is completed | 387 | * fw_run_transaction() - send request and sleep until transaction is completed |
378 | * | 388 | * |
379 | * Returns the RCODE. | 389 | * Returns the RCODE. See fw_send_request() for parameter documentation. |
390 | * Unlike fw_send_request(), @data points to the payload of the request or/and | ||
391 | * to the payload of the response. | ||
380 | */ | 392 | */ |
381 | int fw_run_transaction(struct fw_card *card, int tcode, int destination_id, | 393 | int fw_run_transaction(struct fw_card *card, int tcode, int destination_id, |
382 | int generation, int speed, unsigned long long offset, | 394 | int generation, int speed, unsigned long long offset, |
@@ -417,9 +429,21 @@ void fw_send_phy_config(struct fw_card *card, | |||
417 | int node_id, int generation, int gap_count) | 429 | int node_id, int generation, int gap_count) |
418 | { | 430 | { |
419 | long timeout = DIV_ROUND_UP(HZ, 10); | 431 | long timeout = DIV_ROUND_UP(HZ, 10); |
420 | u32 data = PHY_IDENTIFIER(PHY_PACKET_CONFIG) | | 432 | u32 data = PHY_IDENTIFIER(PHY_PACKET_CONFIG); |
421 | PHY_CONFIG_ROOT_ID(node_id) | | 433 | |
422 | PHY_CONFIG_GAP_COUNT(gap_count); | 434 | if (node_id != FW_PHY_CONFIG_NO_NODE_ID) |
435 | data |= PHY_CONFIG_ROOT_ID(node_id); | ||
436 | |||
437 | if (gap_count == FW_PHY_CONFIG_CURRENT_GAP_COUNT) { | ||
438 | gap_count = card->driver->read_phy_reg(card, 1); | ||
439 | if (gap_count < 0) | ||
440 | return; | ||
441 | |||
442 | gap_count &= 63; | ||
443 | if (gap_count == 63) | ||
444 | return; | ||
445 | } | ||
446 | data |= PHY_CONFIG_GAP_COUNT(gap_count); | ||
423 | 447 | ||
424 | mutex_lock(&phy_config_mutex); | 448 | mutex_lock(&phy_config_mutex); |
425 | 449 | ||
@@ -494,9 +518,9 @@ static bool is_in_fcp_region(u64 offset, size_t length) | |||
494 | } | 518 | } |
495 | 519 | ||
496 | /** | 520 | /** |
497 | * fw_core_add_address_handler - register for incoming requests | 521 | * fw_core_add_address_handler() - register for incoming requests |
498 | * @handler: callback | 522 | * @handler: callback |
499 | * @region: region in the IEEE 1212 node space address range | 523 | * @region: region in the IEEE 1212 node space address range |
500 | * | 524 | * |
501 | * region->start, ->end, and handler->length have to be quadlet-aligned. | 525 | * region->start, ->end, and handler->length have to be quadlet-aligned. |
502 | * | 526 | * |
@@ -519,8 +543,8 @@ int fw_core_add_address_handler(struct fw_address_handler *handler, | |||
519 | int ret = -EBUSY; | 543 | int ret = -EBUSY; |
520 | 544 | ||
521 | if (region->start & 0xffff000000000003ULL || | 545 | if (region->start & 0xffff000000000003ULL || |
522 | region->end & 0xffff000000000003ULL || | ||
523 | region->start >= region->end || | 546 | region->start >= region->end || |
547 | region->end > 0x0001000000000000ULL || | ||
524 | handler->length & 3 || | 548 | handler->length & 3 || |
525 | handler->length == 0) | 549 | handler->length == 0) |
526 | return -EINVAL; | 550 | return -EINVAL; |
@@ -551,7 +575,7 @@ int fw_core_add_address_handler(struct fw_address_handler *handler, | |||
551 | EXPORT_SYMBOL(fw_core_add_address_handler); | 575 | EXPORT_SYMBOL(fw_core_add_address_handler); |
552 | 576 | ||
553 | /** | 577 | /** |
554 | * fw_core_remove_address_handler - unregister an address handler | 578 | * fw_core_remove_address_handler() - unregister an address handler |
555 | */ | 579 | */ |
556 | void fw_core_remove_address_handler(struct fw_address_handler *handler) | 580 | void fw_core_remove_address_handler(struct fw_address_handler *handler) |
557 | { | 581 | { |
@@ -580,6 +604,41 @@ static void free_response_callback(struct fw_packet *packet, | |||
580 | kfree(request); | 604 | kfree(request); |
581 | } | 605 | } |
582 | 606 | ||
607 | int fw_get_response_length(struct fw_request *r) | ||
608 | { | ||
609 | int tcode, ext_tcode, data_length; | ||
610 | |||
611 | tcode = HEADER_GET_TCODE(r->request_header[0]); | ||
612 | |||
613 | switch (tcode) { | ||
614 | case TCODE_WRITE_QUADLET_REQUEST: | ||
615 | case TCODE_WRITE_BLOCK_REQUEST: | ||
616 | return 0; | ||
617 | |||
618 | case TCODE_READ_QUADLET_REQUEST: | ||
619 | return 4; | ||
620 | |||
621 | case TCODE_READ_BLOCK_REQUEST: | ||
622 | data_length = HEADER_GET_DATA_LENGTH(r->request_header[3]); | ||
623 | return data_length; | ||
624 | |||
625 | case TCODE_LOCK_REQUEST: | ||
626 | ext_tcode = HEADER_GET_EXTENDED_TCODE(r->request_header[3]); | ||
627 | data_length = HEADER_GET_DATA_LENGTH(r->request_header[3]); | ||
628 | switch (ext_tcode) { | ||
629 | case EXTCODE_FETCH_ADD: | ||
630 | case EXTCODE_LITTLE_ADD: | ||
631 | return data_length; | ||
632 | default: | ||
633 | return data_length / 2; | ||
634 | } | ||
635 | |||
636 | default: | ||
637 | WARN(1, "wrong tcode %d", tcode); | ||
638 | return 0; | ||
639 | } | ||
640 | } | ||
641 | |||
583 | void fw_fill_response(struct fw_packet *response, u32 *request_header, | 642 | void fw_fill_response(struct fw_packet *response, u32 *request_header, |
584 | int rcode, void *payload, size_t length) | 643 | int rcode, void *payload, size_t length) |
585 | { | 644 | { |
@@ -631,18 +690,35 @@ void fw_fill_response(struct fw_packet *response, u32 *request_header, | |||
631 | break; | 690 | break; |
632 | 691 | ||
633 | default: | 692 | default: |
634 | WARN(1, KERN_ERR "wrong tcode %d", tcode); | 693 | WARN(1, "wrong tcode %d", tcode); |
635 | } | 694 | } |
636 | 695 | ||
637 | response->payload_mapped = false; | 696 | response->payload_mapped = false; |
638 | } | 697 | } |
639 | EXPORT_SYMBOL(fw_fill_response); | 698 | EXPORT_SYMBOL(fw_fill_response); |
640 | 699 | ||
641 | static struct fw_request *allocate_request(struct fw_packet *p) | 700 | static u32 compute_split_timeout_timestamp(struct fw_card *card, |
701 | u32 request_timestamp) | ||
702 | { | ||
703 | unsigned int cycles; | ||
704 | u32 timestamp; | ||
705 | |||
706 | cycles = card->split_timeout_cycles; | ||
707 | cycles += request_timestamp & 0x1fff; | ||
708 | |||
709 | timestamp = request_timestamp & ~0x1fff; | ||
710 | timestamp += (cycles / 8000) << 13; | ||
711 | timestamp |= cycles % 8000; | ||
712 | |||
713 | return timestamp; | ||
714 | } | ||
715 | |||
716 | static struct fw_request *allocate_request(struct fw_card *card, | ||
717 | struct fw_packet *p) | ||
642 | { | 718 | { |
643 | struct fw_request *request; | 719 | struct fw_request *request; |
644 | u32 *data, length; | 720 | u32 *data, length; |
645 | int request_tcode, t; | 721 | int request_tcode; |
646 | 722 | ||
647 | request_tcode = HEADER_GET_TCODE(p->header[0]); | 723 | request_tcode = HEADER_GET_TCODE(p->header[0]); |
648 | switch (request_tcode) { | 724 | switch (request_tcode) { |
@@ -677,14 +753,9 @@ static struct fw_request *allocate_request(struct fw_packet *p) | |||
677 | if (request == NULL) | 753 | if (request == NULL) |
678 | return NULL; | 754 | return NULL; |
679 | 755 | ||
680 | t = (p->timestamp & 0x1fff) + 4000; | ||
681 | if (t >= 8000) | ||
682 | t = (p->timestamp & ~0x1fff) + 0x2000 + t - 8000; | ||
683 | else | ||
684 | t = (p->timestamp & ~0x1fff) + t; | ||
685 | |||
686 | request->response.speed = p->speed; | 756 | request->response.speed = p->speed; |
687 | request->response.timestamp = t; | 757 | request->response.timestamp = |
758 | compute_split_timeout_timestamp(card, p->timestamp); | ||
688 | request->response.generation = p->generation; | 759 | request->response.generation = p->generation; |
689 | request->response.ack = 0; | 760 | request->response.ack = 0; |
690 | request->response.callback = free_response_callback; | 761 | request->response.callback = free_response_callback; |
@@ -713,7 +784,8 @@ void fw_send_response(struct fw_card *card, | |||
713 | 784 | ||
714 | if (rcode == RCODE_COMPLETE) | 785 | if (rcode == RCODE_COMPLETE) |
715 | fw_fill_response(&request->response, request->request_header, | 786 | fw_fill_response(&request->response, request->request_header, |
716 | rcode, request->data, request->length); | 787 | rcode, request->data, |
788 | fw_get_response_length(request)); | ||
717 | else | 789 | else |
718 | fw_fill_response(&request->response, request->request_header, | 790 | fw_fill_response(&request->response, request->request_header, |
719 | rcode, NULL, 0); | 791 | rcode, NULL, 0); |
@@ -731,9 +803,11 @@ static void handle_exclusive_region_request(struct fw_card *card, | |||
731 | unsigned long flags; | 803 | unsigned long flags; |
732 | int tcode, destination, source; | 804 | int tcode, destination, source; |
733 | 805 | ||
734 | tcode = HEADER_GET_TCODE(p->header[0]); | ||
735 | destination = HEADER_GET_DESTINATION(p->header[0]); | 806 | destination = HEADER_GET_DESTINATION(p->header[0]); |
736 | source = HEADER_GET_SOURCE(p->header[1]); | 807 | source = HEADER_GET_SOURCE(p->header[1]); |
808 | tcode = HEADER_GET_TCODE(p->header[0]); | ||
809 | if (tcode == TCODE_LOCK_REQUEST) | ||
810 | tcode = 0x10 + HEADER_GET_EXTENDED_TCODE(p->header[3]); | ||
737 | 811 | ||
738 | spin_lock_irqsave(&address_handler_lock, flags); | 812 | spin_lock_irqsave(&address_handler_lock, flags); |
739 | handler = lookup_enclosing_address_handler(&address_handler_list, | 813 | handler = lookup_enclosing_address_handler(&address_handler_list, |
@@ -753,7 +827,7 @@ static void handle_exclusive_region_request(struct fw_card *card, | |||
753 | else | 827 | else |
754 | handler->address_callback(card, request, | 828 | handler->address_callback(card, request, |
755 | tcode, destination, source, | 829 | tcode, destination, source, |
756 | p->generation, p->speed, offset, | 830 | p->generation, offset, |
757 | request->data, request->length, | 831 | request->data, request->length, |
758 | handler->callback_data); | 832 | handler->callback_data); |
759 | } | 833 | } |
@@ -791,8 +865,8 @@ static void handle_fcp_region_request(struct fw_card *card, | |||
791 | if (is_enclosing_handler(handler, offset, request->length)) | 865 | if (is_enclosing_handler(handler, offset, request->length)) |
792 | handler->address_callback(card, NULL, tcode, | 866 | handler->address_callback(card, NULL, tcode, |
793 | destination, source, | 867 | destination, source, |
794 | p->generation, p->speed, | 868 | p->generation, offset, |
795 | offset, request->data, | 869 | request->data, |
796 | request->length, | 870 | request->length, |
797 | handler->callback_data); | 871 | handler->callback_data); |
798 | } | 872 | } |
@@ -809,7 +883,12 @@ void fw_core_handle_request(struct fw_card *card, struct fw_packet *p) | |||
809 | if (p->ack != ACK_PENDING && p->ack != ACK_COMPLETE) | 883 | if (p->ack != ACK_PENDING && p->ack != ACK_COMPLETE) |
810 | return; | 884 | return; |
811 | 885 | ||
812 | request = allocate_request(p); | 886 | if (TCODE_IS_LINK_INTERNAL(HEADER_GET_TCODE(p->header[0]))) { |
887 | fw_cdev_handle_phy_packet(card, p); | ||
888 | return; | ||
889 | } | ||
890 | |||
891 | request = allocate_request(card, p); | ||
813 | if (request == NULL) { | 892 | if (request == NULL) { |
814 | /* FIXME: send statically allocated busy packet. */ | 893 | /* FIXME: send statically allocated busy packet. */ |
815 | return; | 894 | return; |
@@ -832,13 +911,12 @@ void fw_core_handle_response(struct fw_card *card, struct fw_packet *p) | |||
832 | unsigned long flags; | 911 | unsigned long flags; |
833 | u32 *data; | 912 | u32 *data; |
834 | size_t data_length; | 913 | size_t data_length; |
835 | int tcode, tlabel, destination, source, rcode; | 914 | int tcode, tlabel, source, rcode; |
836 | 915 | ||
837 | tcode = HEADER_GET_TCODE(p->header[0]); | 916 | tcode = HEADER_GET_TCODE(p->header[0]); |
838 | tlabel = HEADER_GET_TLABEL(p->header[0]); | 917 | tlabel = HEADER_GET_TLABEL(p->header[0]); |
839 | destination = HEADER_GET_DESTINATION(p->header[0]); | 918 | source = HEADER_GET_SOURCE(p->header[1]); |
840 | source = HEADER_GET_SOURCE(p->header[1]); | 919 | rcode = HEADER_GET_RCODE(p->header[1]); |
841 | rcode = HEADER_GET_RCODE(p->header[1]); | ||
842 | 920 | ||
843 | spin_lock_irqsave(&card->lock, flags); | 921 | spin_lock_irqsave(&card->lock, flags); |
844 | list_for_each_entry(t, &card->transaction_list, link) { | 922 | list_for_each_entry(t, &card->transaction_list, link) { |
@@ -903,8 +981,8 @@ static const struct fw_address_region topology_map_region = | |||
903 | 981 | ||
904 | static void handle_topology_map(struct fw_card *card, struct fw_request *request, | 982 | static void handle_topology_map(struct fw_card *card, struct fw_request *request, |
905 | int tcode, int destination, int source, int generation, | 983 | int tcode, int destination, int source, int generation, |
906 | int speed, unsigned long long offset, | 984 | unsigned long long offset, void *payload, size_t length, |
907 | void *payload, size_t length, void *callback_data) | 985 | void *callback_data) |
908 | { | 986 | { |
909 | int start; | 987 | int start; |
910 | 988 | ||
@@ -933,19 +1011,97 @@ static const struct fw_address_region registers_region = | |||
933 | { .start = CSR_REGISTER_BASE, | 1011 | { .start = CSR_REGISTER_BASE, |
934 | .end = CSR_REGISTER_BASE | CSR_CONFIG_ROM, }; | 1012 | .end = CSR_REGISTER_BASE | CSR_CONFIG_ROM, }; |
935 | 1013 | ||
1014 | static void update_split_timeout(struct fw_card *card) | ||
1015 | { | ||
1016 | unsigned int cycles; | ||
1017 | |||
1018 | cycles = card->split_timeout_hi * 8000 + (card->split_timeout_lo >> 19); | ||
1019 | |||
1020 | cycles = max(cycles, 800u); /* minimum as per the spec */ | ||
1021 | cycles = min(cycles, 3u * 8000u); /* maximum OHCI timeout */ | ||
1022 | |||
1023 | card->split_timeout_cycles = cycles; | ||
1024 | card->split_timeout_jiffies = DIV_ROUND_UP(cycles * HZ, 8000); | ||
1025 | } | ||
1026 | |||
936 | static void handle_registers(struct fw_card *card, struct fw_request *request, | 1027 | static void handle_registers(struct fw_card *card, struct fw_request *request, |
937 | int tcode, int destination, int source, int generation, | 1028 | int tcode, int destination, int source, int generation, |
938 | int speed, unsigned long long offset, | 1029 | unsigned long long offset, void *payload, size_t length, |
939 | void *payload, size_t length, void *callback_data) | 1030 | void *callback_data) |
940 | { | 1031 | { |
941 | int reg = offset & ~CSR_REGISTER_BASE; | 1032 | int reg = offset & ~CSR_REGISTER_BASE; |
942 | __be32 *data = payload; | 1033 | __be32 *data = payload; |
943 | int rcode = RCODE_COMPLETE; | 1034 | int rcode = RCODE_COMPLETE; |
1035 | unsigned long flags; | ||
944 | 1036 | ||
945 | switch (reg) { | 1037 | switch (reg) { |
1038 | case CSR_PRIORITY_BUDGET: | ||
1039 | if (!card->priority_budget_implemented) { | ||
1040 | rcode = RCODE_ADDRESS_ERROR; | ||
1041 | break; | ||
1042 | } | ||
1043 | /* else fall through */ | ||
1044 | |||
1045 | case CSR_NODE_IDS: | ||
1046 | /* | ||
1047 | * per IEEE 1394-2008 8.3.22.3, not IEEE 1394.1-2004 3.2.8 | ||
1048 | * and 9.6, but interoperable with IEEE 1394.1-2004 bridges | ||
1049 | */ | ||
1050 | /* fall through */ | ||
1051 | |||
1052 | case CSR_STATE_CLEAR: | ||
1053 | case CSR_STATE_SET: | ||
946 | case CSR_CYCLE_TIME: | 1054 | case CSR_CYCLE_TIME: |
947 | if (TCODE_IS_READ_REQUEST(tcode) && length == 4) | 1055 | case CSR_BUS_TIME: |
948 | *data = cpu_to_be32(card->driver->get_cycle_time(card)); | 1056 | case CSR_BUSY_TIMEOUT: |
1057 | if (tcode == TCODE_READ_QUADLET_REQUEST) | ||
1058 | *data = cpu_to_be32(card->driver->read_csr(card, reg)); | ||
1059 | else if (tcode == TCODE_WRITE_QUADLET_REQUEST) | ||
1060 | card->driver->write_csr(card, reg, be32_to_cpu(*data)); | ||
1061 | else | ||
1062 | rcode = RCODE_TYPE_ERROR; | ||
1063 | break; | ||
1064 | |||
1065 | case CSR_RESET_START: | ||
1066 | if (tcode == TCODE_WRITE_QUADLET_REQUEST) | ||
1067 | card->driver->write_csr(card, CSR_STATE_CLEAR, | ||
1068 | CSR_STATE_BIT_ABDICATE); | ||
1069 | else | ||
1070 | rcode = RCODE_TYPE_ERROR; | ||
1071 | break; | ||
1072 | |||
1073 | case CSR_SPLIT_TIMEOUT_HI: | ||
1074 | if (tcode == TCODE_READ_QUADLET_REQUEST) { | ||
1075 | *data = cpu_to_be32(card->split_timeout_hi); | ||
1076 | } else if (tcode == TCODE_WRITE_QUADLET_REQUEST) { | ||
1077 | spin_lock_irqsave(&card->lock, flags); | ||
1078 | card->split_timeout_hi = be32_to_cpu(*data) & 7; | ||
1079 | update_split_timeout(card); | ||
1080 | spin_unlock_irqrestore(&card->lock, flags); | ||
1081 | } else { | ||
1082 | rcode = RCODE_TYPE_ERROR; | ||
1083 | } | ||
1084 | break; | ||
1085 | |||
1086 | case CSR_SPLIT_TIMEOUT_LO: | ||
1087 | if (tcode == TCODE_READ_QUADLET_REQUEST) { | ||
1088 | *data = cpu_to_be32(card->split_timeout_lo); | ||
1089 | } else if (tcode == TCODE_WRITE_QUADLET_REQUEST) { | ||
1090 | spin_lock_irqsave(&card->lock, flags); | ||
1091 | card->split_timeout_lo = | ||
1092 | be32_to_cpu(*data) & 0xfff80000; | ||
1093 | update_split_timeout(card); | ||
1094 | spin_unlock_irqrestore(&card->lock, flags); | ||
1095 | } else { | ||
1096 | rcode = RCODE_TYPE_ERROR; | ||
1097 | } | ||
1098 | break; | ||
1099 | |||
1100 | case CSR_MAINT_UTILITY: | ||
1101 | if (tcode == TCODE_READ_QUADLET_REQUEST) | ||
1102 | *data = card->maint_utility_register; | ||
1103 | else if (tcode == TCODE_WRITE_QUADLET_REQUEST) | ||
1104 | card->maint_utility_register = *data; | ||
949 | else | 1105 | else |
950 | rcode = RCODE_TYPE_ERROR; | 1106 | rcode = RCODE_TYPE_ERROR; |
951 | break; | 1107 | break; |
@@ -975,12 +1131,6 @@ static void handle_registers(struct fw_card *card, struct fw_request *request, | |||
975 | BUG(); | 1131 | BUG(); |
976 | break; | 1132 | break; |
977 | 1133 | ||
978 | case CSR_BUSY_TIMEOUT: | ||
979 | /* FIXME: Implement this. */ | ||
980 | |||
981 | case CSR_BUS_TIME: | ||
982 | /* Useless without initialization by the bus manager. */ | ||
983 | |||
984 | default: | 1134 | default: |
985 | rcode = RCODE_ADDRESS_ERROR; | 1135 | rcode = RCODE_ADDRESS_ERROR; |
986 | break; | 1136 | break; |
diff --git a/drivers/firewire/core.h b/drivers/firewire/core.h index 0ecfcd95f4c5..e6239f971be6 100644 --- a/drivers/firewire/core.h +++ b/drivers/firewire/core.h | |||
@@ -38,6 +38,9 @@ struct fw_packet; | |||
38 | #define BROADCAST_CHANNEL_INITIAL (1 << 31 | 31) | 38 | #define BROADCAST_CHANNEL_INITIAL (1 << 31 | 31) |
39 | #define BROADCAST_CHANNEL_VALID (1 << 30) | 39 | #define BROADCAST_CHANNEL_VALID (1 << 30) |
40 | 40 | ||
41 | #define CSR_STATE_BIT_CMSTR (1 << 8) | ||
42 | #define CSR_STATE_BIT_ABDICATE (1 << 10) | ||
43 | |||
41 | struct fw_card_driver { | 44 | struct fw_card_driver { |
42 | /* | 45 | /* |
43 | * Enable the given card with the given initial config rom. | 46 | * Enable the given card with the given initial config rom. |
@@ -48,6 +51,7 @@ struct fw_card_driver { | |||
48 | int (*enable)(struct fw_card *card, | 51 | int (*enable)(struct fw_card *card, |
49 | const __be32 *config_rom, size_t length); | 52 | const __be32 *config_rom, size_t length); |
50 | 53 | ||
54 | int (*read_phy_reg)(struct fw_card *card, int address); | ||
51 | int (*update_phy_reg)(struct fw_card *card, int address, | 55 | int (*update_phy_reg)(struct fw_card *card, int address, |
52 | int clear_bits, int set_bits); | 56 | int clear_bits, int set_bits); |
53 | 57 | ||
@@ -75,7 +79,8 @@ struct fw_card_driver { | |||
75 | int (*enable_phys_dma)(struct fw_card *card, | 79 | int (*enable_phys_dma)(struct fw_card *card, |
76 | int node_id, int generation); | 80 | int node_id, int generation); |
77 | 81 | ||
78 | u32 (*get_cycle_time)(struct fw_card *card); | 82 | u32 (*read_csr)(struct fw_card *card, int csr_offset); |
83 | void (*write_csr)(struct fw_card *card, int csr_offset, u32 value); | ||
79 | 84 | ||
80 | struct fw_iso_context * | 85 | struct fw_iso_context * |
81 | (*allocate_iso_context)(struct fw_card *card, | 86 | (*allocate_iso_context)(struct fw_card *card, |
@@ -85,6 +90,8 @@ struct fw_card_driver { | |||
85 | int (*start_iso)(struct fw_iso_context *ctx, | 90 | int (*start_iso)(struct fw_iso_context *ctx, |
86 | s32 cycle, u32 sync, u32 tags); | 91 | s32 cycle, u32 sync, u32 tags); |
87 | 92 | ||
93 | int (*set_iso_channels)(struct fw_iso_context *ctx, u64 *channels); | ||
94 | |||
88 | int (*queue_iso)(struct fw_iso_context *ctx, | 95 | int (*queue_iso)(struct fw_iso_context *ctx, |
89 | struct fw_iso_packet *packet, | 96 | struct fw_iso_packet *packet, |
90 | struct fw_iso_buffer *buffer, | 97 | struct fw_iso_buffer *buffer, |
@@ -98,8 +105,8 @@ void fw_card_initialize(struct fw_card *card, | |||
98 | int fw_card_add(struct fw_card *card, | 105 | int fw_card_add(struct fw_card *card, |
99 | u32 max_receive, u32 link_speed, u64 guid); | 106 | u32 max_receive, u32 link_speed, u64 guid); |
100 | void fw_core_remove_card(struct fw_card *card); | 107 | void fw_core_remove_card(struct fw_card *card); |
101 | int fw_core_initiate_bus_reset(struct fw_card *card, int short_reset); | ||
102 | int fw_compute_block_crc(__be32 *block); | 108 | int fw_compute_block_crc(__be32 *block); |
109 | void fw_schedule_bus_reset(struct fw_card *card, bool delayed, bool short_reset); | ||
103 | void fw_schedule_bm_work(struct fw_card *card, unsigned long delay); | 110 | void fw_schedule_bm_work(struct fw_card *card, unsigned long delay); |
104 | 111 | ||
105 | static inline struct fw_card *fw_card_get(struct fw_card *card) | 112 | static inline struct fw_card *fw_card_get(struct fw_card *card) |
@@ -123,6 +130,7 @@ extern const struct file_operations fw_device_ops; | |||
123 | 130 | ||
124 | void fw_device_cdev_update(struct fw_device *device); | 131 | void fw_device_cdev_update(struct fw_device *device); |
125 | void fw_device_cdev_remove(struct fw_device *device); | 132 | void fw_device_cdev_remove(struct fw_device *device); |
133 | void fw_cdev_handle_phy_packet(struct fw_card *card, struct fw_packet *p); | ||
126 | 134 | ||
127 | 135 | ||
128 | /* -device */ | 136 | /* -device */ |
@@ -192,7 +200,7 @@ static inline void fw_node_put(struct fw_node *node) | |||
192 | } | 200 | } |
193 | 201 | ||
194 | void fw_core_handle_bus_reset(struct fw_card *card, int node_id, | 202 | void fw_core_handle_bus_reset(struct fw_card *card, int node_id, |
195 | int generation, int self_id_count, u32 *self_ids); | 203 | int generation, int self_id_count, u32 *self_ids, bool bm_abdicate); |
196 | void fw_destroy_nodes(struct fw_card *card); | 204 | void fw_destroy_nodes(struct fw_card *card); |
197 | 205 | ||
198 | /* | 206 | /* |
@@ -209,6 +217,7 @@ static inline bool is_next_generation(int new_generation, int old_generation) | |||
209 | 217 | ||
210 | #define TCODE_IS_READ_REQUEST(tcode) (((tcode) & ~1) == 4) | 218 | #define TCODE_IS_READ_REQUEST(tcode) (((tcode) & ~1) == 4) |
211 | #define TCODE_IS_BLOCK_PACKET(tcode) (((tcode) & 1) != 0) | 219 | #define TCODE_IS_BLOCK_PACKET(tcode) (((tcode) & 1) != 0) |
220 | #define TCODE_IS_LINK_INTERNAL(tcode) ((tcode) == 0xe) | ||
212 | #define TCODE_IS_REQUEST(tcode) (((tcode) & 2) == 0) | 221 | #define TCODE_IS_REQUEST(tcode) (((tcode) & 2) == 0) |
213 | #define TCODE_IS_RESPONSE(tcode) (((tcode) & 2) != 0) | 222 | #define TCODE_IS_RESPONSE(tcode) (((tcode) & 2) != 0) |
214 | #define TCODE_HAS_REQUEST_DATA(tcode) (((tcode) & 12) != 4) | 223 | #define TCODE_HAS_REQUEST_DATA(tcode) (((tcode) & 12) != 4) |
@@ -218,9 +227,18 @@ static inline bool is_next_generation(int new_generation, int old_generation) | |||
218 | 227 | ||
219 | void fw_core_handle_request(struct fw_card *card, struct fw_packet *request); | 228 | void fw_core_handle_request(struct fw_card *card, struct fw_packet *request); |
220 | void fw_core_handle_response(struct fw_card *card, struct fw_packet *packet); | 229 | void fw_core_handle_response(struct fw_card *card, struct fw_packet *packet); |
230 | int fw_get_response_length(struct fw_request *request); | ||
221 | void fw_fill_response(struct fw_packet *response, u32 *request_header, | 231 | void fw_fill_response(struct fw_packet *response, u32 *request_header, |
222 | int rcode, void *payload, size_t length); | 232 | int rcode, void *payload, size_t length); |
233 | |||
234 | #define FW_PHY_CONFIG_NO_NODE_ID -1 | ||
235 | #define FW_PHY_CONFIG_CURRENT_GAP_COUNT -1 | ||
223 | void fw_send_phy_config(struct fw_card *card, | 236 | void fw_send_phy_config(struct fw_card *card, |
224 | int node_id, int generation, int gap_count); | 237 | int node_id, int generation, int gap_count); |
225 | 238 | ||
239 | static inline bool is_ping_packet(u32 *data) | ||
240 | { | ||
241 | return (data[0] & 0xc0ffffff) == 0 && ~data[0] == data[1]; | ||
242 | } | ||
243 | |||
226 | #endif /* _FIREWIRE_CORE_H */ | 244 | #endif /* _FIREWIRE_CORE_H */ |
diff --git a/drivers/firewire/net.c b/drivers/firewire/net.c index 7142eeec8074..da17d409a244 100644 --- a/drivers/firewire/net.c +++ b/drivers/firewire/net.c | |||
@@ -806,8 +806,8 @@ static int fwnet_incoming_packet(struct fwnet_device *dev, __be32 *buf, int len, | |||
806 | 806 | ||
807 | static void fwnet_receive_packet(struct fw_card *card, struct fw_request *r, | 807 | static void fwnet_receive_packet(struct fw_card *card, struct fw_request *r, |
808 | int tcode, int destination, int source, int generation, | 808 | int tcode, int destination, int source, int generation, |
809 | int speed, unsigned long long offset, void *payload, | 809 | unsigned long long offset, void *payload, size_t length, |
810 | size_t length, void *callback_data) | 810 | void *callback_data) |
811 | { | 811 | { |
812 | struct fwnet_device *dev = callback_data; | 812 | struct fwnet_device *dev = callback_data; |
813 | int rcode; | 813 | int rcode; |
diff --git a/drivers/firewire/nosy-user.h b/drivers/firewire/nosy-user.h new file mode 100644 index 000000000000..e48aa6200c72 --- /dev/null +++ b/drivers/firewire/nosy-user.h | |||
@@ -0,0 +1,25 @@ | |||
1 | #ifndef __nosy_user_h | ||
2 | #define __nosy_user_h | ||
3 | |||
4 | #include <linux/ioctl.h> | ||
5 | #include <linux/types.h> | ||
6 | |||
7 | #define NOSY_IOC_GET_STATS _IOR('&', 0, struct nosy_stats) | ||
8 | #define NOSY_IOC_START _IO('&', 1) | ||
9 | #define NOSY_IOC_STOP _IO('&', 2) | ||
10 | #define NOSY_IOC_FILTER _IOW('&', 2, __u32) | ||
11 | |||
12 | struct nosy_stats { | ||
13 | __u32 total_packet_count; | ||
14 | __u32 lost_packet_count; | ||
15 | }; | ||
16 | |||
17 | /* | ||
18 | * Format of packets returned from the kernel driver: | ||
19 | * | ||
20 | * quadlet with timestamp (microseconds, CPU endian) | ||
21 | * quadlet-padded packet data... (little endian) | ||
22 | * quadlet with ack (little endian) | ||
23 | */ | ||
24 | |||
25 | #endif /* __nosy_user_h */ | ||
diff --git a/drivers/firewire/nosy.c b/drivers/firewire/nosy.c new file mode 100644 index 000000000000..8528b10763ed --- /dev/null +++ b/drivers/firewire/nosy.c | |||
@@ -0,0 +1,721 @@ | |||
1 | /* | ||
2 | * nosy - Snoop mode driver for TI PCILynx 1394 controllers | ||
3 | * Copyright (C) 2002-2007 Kristian Høgsberg | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or modify | ||
6 | * it under the terms of the GNU General Public License as published by | ||
7 | * the Free Software Foundation; either version 2 of the License, or | ||
8 | * (at your option) any later version. | ||
9 | * | ||
10 | * This program is distributed in the hope that it will be useful, | ||
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
13 | * GNU General Public License for more details. | ||
14 | * | ||
15 | * You should have received a copy of the GNU General Public License | ||
16 | * along with this program; if not, write to the Free Software Foundation, | ||
17 | * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | ||
18 | */ | ||
19 | |||
20 | #include <linux/device.h> | ||
21 | #include <linux/errno.h> | ||
22 | #include <linux/fs.h> | ||
23 | #include <linux/init.h> | ||
24 | #include <linux/interrupt.h> | ||
25 | #include <linux/io.h> | ||
26 | #include <linux/kernel.h> | ||
27 | #include <linux/kref.h> | ||
28 | #include <linux/miscdevice.h> | ||
29 | #include <linux/module.h> | ||
30 | #include <linux/mutex.h> | ||
31 | #include <linux/pci.h> | ||
32 | #include <linux/poll.h> | ||
33 | #include <linux/sched.h> /* required for linux/wait.h */ | ||
34 | #include <linux/slab.h> | ||
35 | #include <linux/spinlock.h> | ||
36 | #include <linux/timex.h> | ||
37 | #include <linux/uaccess.h> | ||
38 | #include <linux/wait.h> | ||
39 | |||
40 | #include <asm/atomic.h> | ||
41 | #include <asm/byteorder.h> | ||
42 | |||
43 | #include "nosy.h" | ||
44 | #include "nosy-user.h" | ||
45 | |||
46 | #define TCODE_PHY_PACKET 0x10 | ||
47 | #define PCI_DEVICE_ID_TI_PCILYNX 0x8000 | ||
48 | |||
49 | static char driver_name[] = KBUILD_MODNAME; | ||
50 | |||
51 | /* this is the physical layout of a PCL, its size is 128 bytes */ | ||
52 | struct pcl { | ||
53 | __le32 next; | ||
54 | __le32 async_error_next; | ||
55 | u32 user_data; | ||
56 | __le32 pcl_status; | ||
57 | __le32 remaining_transfer_count; | ||
58 | __le32 next_data_buffer; | ||
59 | struct { | ||
60 | __le32 control; | ||
61 | __le32 pointer; | ||
62 | } buffer[13]; | ||
63 | }; | ||
64 | |||
65 | struct packet { | ||
66 | unsigned int length; | ||
67 | char data[0]; | ||
68 | }; | ||
69 | |||
70 | struct packet_buffer { | ||
71 | char *data; | ||
72 | size_t capacity; | ||
73 | long total_packet_count, lost_packet_count; | ||
74 | atomic_t size; | ||
75 | struct packet *head, *tail; | ||
76 | wait_queue_head_t wait; | ||
77 | }; | ||
78 | |||
79 | struct pcilynx { | ||
80 | struct pci_dev *pci_device; | ||
81 | __iomem char *registers; | ||
82 | |||
83 | struct pcl *rcv_start_pcl, *rcv_pcl; | ||
84 | __le32 *rcv_buffer; | ||
85 | |||
86 | dma_addr_t rcv_start_pcl_bus, rcv_pcl_bus, rcv_buffer_bus; | ||
87 | |||
88 | spinlock_t client_list_lock; | ||
89 | struct list_head client_list; | ||
90 | |||
91 | struct miscdevice misc; | ||
92 | struct list_head link; | ||
93 | struct kref kref; | ||
94 | }; | ||
95 | |||
96 | static inline struct pcilynx * | ||
97 | lynx_get(struct pcilynx *lynx) | ||
98 | { | ||
99 | kref_get(&lynx->kref); | ||
100 | |||
101 | return lynx; | ||
102 | } | ||
103 | |||
104 | static void | ||
105 | lynx_release(struct kref *kref) | ||
106 | { | ||
107 | kfree(container_of(kref, struct pcilynx, kref)); | ||
108 | } | ||
109 | |||
110 | static inline void | ||
111 | lynx_put(struct pcilynx *lynx) | ||
112 | { | ||
113 | kref_put(&lynx->kref, lynx_release); | ||
114 | } | ||
115 | |||
116 | struct client { | ||
117 | struct pcilynx *lynx; | ||
118 | u32 tcode_mask; | ||
119 | struct packet_buffer buffer; | ||
120 | struct list_head link; | ||
121 | }; | ||
122 | |||
123 | static DEFINE_MUTEX(card_mutex); | ||
124 | static LIST_HEAD(card_list); | ||
125 | |||
126 | static int | ||
127 | packet_buffer_init(struct packet_buffer *buffer, size_t capacity) | ||
128 | { | ||
129 | buffer->data = kmalloc(capacity, GFP_KERNEL); | ||
130 | if (buffer->data == NULL) | ||
131 | return -ENOMEM; | ||
132 | buffer->head = (struct packet *) buffer->data; | ||
133 | buffer->tail = (struct packet *) buffer->data; | ||
134 | buffer->capacity = capacity; | ||
135 | buffer->lost_packet_count = 0; | ||
136 | atomic_set(&buffer->size, 0); | ||
137 | init_waitqueue_head(&buffer->wait); | ||
138 | |||
139 | return 0; | ||
140 | } | ||
141 | |||
142 | static void | ||
143 | packet_buffer_destroy(struct packet_buffer *buffer) | ||
144 | { | ||
145 | kfree(buffer->data); | ||
146 | } | ||
147 | |||
148 | static int | ||
149 | packet_buffer_get(struct client *client, char __user *data, size_t user_length) | ||
150 | { | ||
151 | struct packet_buffer *buffer = &client->buffer; | ||
152 | size_t length; | ||
153 | char *end; | ||
154 | |||
155 | if (wait_event_interruptible(buffer->wait, | ||
156 | atomic_read(&buffer->size) > 0) || | ||
157 | list_empty(&client->lynx->link)) | ||
158 | return -ERESTARTSYS; | ||
159 | |||
160 | if (atomic_read(&buffer->size) == 0) | ||
161 | return -ENODEV; | ||
162 | |||
163 | /* FIXME: Check length <= user_length. */ | ||
164 | |||
165 | end = buffer->data + buffer->capacity; | ||
166 | length = buffer->head->length; | ||
167 | |||
168 | if (&buffer->head->data[length] < end) { | ||
169 | if (copy_to_user(data, buffer->head->data, length)) | ||
170 | return -EFAULT; | ||
171 | buffer->head = (struct packet *) &buffer->head->data[length]; | ||
172 | } else { | ||
173 | size_t split = end - buffer->head->data; | ||
174 | |||
175 | if (copy_to_user(data, buffer->head->data, split)) | ||
176 | return -EFAULT; | ||
177 | if (copy_to_user(data + split, buffer->data, length - split)) | ||
178 | return -EFAULT; | ||
179 | buffer->head = (struct packet *) &buffer->data[length - split]; | ||
180 | } | ||
181 | |||
182 | /* | ||
183 | * Decrease buffer->size as the last thing, since this is what | ||
184 | * keeps the interrupt from overwriting the packet we are | ||
185 | * retrieving from the buffer. | ||
186 | */ | ||
187 | atomic_sub(sizeof(struct packet) + length, &buffer->size); | ||
188 | |||
189 | return length; | ||
190 | } | ||
191 | |||
192 | static void | ||
193 | packet_buffer_put(struct packet_buffer *buffer, void *data, size_t length) | ||
194 | { | ||
195 | char *end; | ||
196 | |||
197 | buffer->total_packet_count++; | ||
198 | |||
199 | if (buffer->capacity < | ||
200 | atomic_read(&buffer->size) + sizeof(struct packet) + length) { | ||
201 | buffer->lost_packet_count++; | ||
202 | return; | ||
203 | } | ||
204 | |||
205 | end = buffer->data + buffer->capacity; | ||
206 | buffer->tail->length = length; | ||
207 | |||
208 | if (&buffer->tail->data[length] < end) { | ||
209 | memcpy(buffer->tail->data, data, length); | ||
210 | buffer->tail = (struct packet *) &buffer->tail->data[length]; | ||
211 | } else { | ||
212 | size_t split = end - buffer->tail->data; | ||
213 | |||
214 | memcpy(buffer->tail->data, data, split); | ||
215 | memcpy(buffer->data, data + split, length - split); | ||
216 | buffer->tail = (struct packet *) &buffer->data[length - split]; | ||
217 | } | ||
218 | |||
219 | /* Finally, adjust buffer size and wake up userspace reader. */ | ||
220 | |||
221 | atomic_add(sizeof(struct packet) + length, &buffer->size); | ||
222 | wake_up_interruptible(&buffer->wait); | ||
223 | } | ||
224 | |||
225 | static inline void | ||
226 | reg_write(struct pcilynx *lynx, int offset, u32 data) | ||
227 | { | ||
228 | writel(data, lynx->registers + offset); | ||
229 | } | ||
230 | |||
231 | static inline u32 | ||
232 | reg_read(struct pcilynx *lynx, int offset) | ||
233 | { | ||
234 | return readl(lynx->registers + offset); | ||
235 | } | ||
236 | |||
237 | static inline void | ||
238 | reg_set_bits(struct pcilynx *lynx, int offset, u32 mask) | ||
239 | { | ||
240 | reg_write(lynx, offset, (reg_read(lynx, offset) | mask)); | ||
241 | } | ||
242 | |||
243 | /* | ||
244 | * Maybe the pcl programs could be set up to just append data instead | ||
245 | * of using a whole packet. | ||
246 | */ | ||
247 | static inline void | ||
248 | run_pcl(struct pcilynx *lynx, dma_addr_t pcl_bus, | ||
249 | int dmachan) | ||
250 | { | ||
251 | reg_write(lynx, DMA0_CURRENT_PCL + dmachan * 0x20, pcl_bus); | ||
252 | reg_write(lynx, DMA0_CHAN_CTRL + dmachan * 0x20, | ||
253 | DMA_CHAN_CTRL_ENABLE | DMA_CHAN_CTRL_LINK); | ||
254 | } | ||
255 | |||
256 | static int | ||
257 | set_phy_reg(struct pcilynx *lynx, int addr, int val) | ||
258 | { | ||
259 | if (addr > 15) { | ||
260 | dev_err(&lynx->pci_device->dev, | ||
261 | "PHY register address %d out of range\n", addr); | ||
262 | return -1; | ||
263 | } | ||
264 | if (val > 0xff) { | ||
265 | dev_err(&lynx->pci_device->dev, | ||
266 | "PHY register value %d out of range\n", val); | ||
267 | return -1; | ||
268 | } | ||
269 | reg_write(lynx, LINK_PHY, LINK_PHY_WRITE | | ||
270 | LINK_PHY_ADDR(addr) | LINK_PHY_WDATA(val)); | ||
271 | |||
272 | return 0; | ||
273 | } | ||
274 | |||
275 | static int | ||
276 | nosy_open(struct inode *inode, struct file *file) | ||
277 | { | ||
278 | int minor = iminor(inode); | ||
279 | struct client *client; | ||
280 | struct pcilynx *tmp, *lynx = NULL; | ||
281 | |||
282 | mutex_lock(&card_mutex); | ||
283 | list_for_each_entry(tmp, &card_list, link) | ||
284 | if (tmp->misc.minor == minor) { | ||
285 | lynx = lynx_get(tmp); | ||
286 | break; | ||
287 | } | ||
288 | mutex_unlock(&card_mutex); | ||
289 | if (lynx == NULL) | ||
290 | return -ENODEV; | ||
291 | |||
292 | client = kmalloc(sizeof *client, GFP_KERNEL); | ||
293 | if (client == NULL) | ||
294 | goto fail; | ||
295 | |||
296 | client->tcode_mask = ~0; | ||
297 | client->lynx = lynx; | ||
298 | INIT_LIST_HEAD(&client->link); | ||
299 | |||
300 | if (packet_buffer_init(&client->buffer, 128 * 1024) < 0) | ||
301 | goto fail; | ||
302 | |||
303 | file->private_data = client; | ||
304 | |||
305 | return 0; | ||
306 | fail: | ||
307 | kfree(client); | ||
308 | lynx_put(lynx); | ||
309 | |||
310 | return -ENOMEM; | ||
311 | } | ||
312 | |||
313 | static int | ||
314 | nosy_release(struct inode *inode, struct file *file) | ||
315 | { | ||
316 | struct client *client = file->private_data; | ||
317 | struct pcilynx *lynx = client->lynx; | ||
318 | |||
319 | spin_lock_irq(&lynx->client_list_lock); | ||
320 | list_del_init(&client->link); | ||
321 | spin_unlock_irq(&lynx->client_list_lock); | ||
322 | |||
323 | packet_buffer_destroy(&client->buffer); | ||
324 | kfree(client); | ||
325 | lynx_put(lynx); | ||
326 | |||
327 | return 0; | ||
328 | } | ||
329 | |||
330 | static unsigned int | ||
331 | nosy_poll(struct file *file, poll_table *pt) | ||
332 | { | ||
333 | struct client *client = file->private_data; | ||
334 | unsigned int ret = 0; | ||
335 | |||
336 | poll_wait(file, &client->buffer.wait, pt); | ||
337 | |||
338 | if (atomic_read(&client->buffer.size) > 0) | ||
339 | ret = POLLIN | POLLRDNORM; | ||
340 | |||
341 | if (list_empty(&client->lynx->link)) | ||
342 | ret |= POLLHUP; | ||
343 | |||
344 | return ret; | ||
345 | } | ||
346 | |||
347 | static ssize_t | ||
348 | nosy_read(struct file *file, char __user *buffer, size_t count, loff_t *offset) | ||
349 | { | ||
350 | struct client *client = file->private_data; | ||
351 | |||
352 | return packet_buffer_get(client, buffer, count); | ||
353 | } | ||
354 | |||
355 | static long | ||
356 | nosy_ioctl(struct file *file, unsigned int cmd, unsigned long arg) | ||
357 | { | ||
358 | struct client *client = file->private_data; | ||
359 | spinlock_t *client_list_lock = &client->lynx->client_list_lock; | ||
360 | struct nosy_stats stats; | ||
361 | |||
362 | switch (cmd) { | ||
363 | case NOSY_IOC_GET_STATS: | ||
364 | spin_lock_irq(client_list_lock); | ||
365 | stats.total_packet_count = client->buffer.total_packet_count; | ||
366 | stats.lost_packet_count = client->buffer.lost_packet_count; | ||
367 | spin_unlock_irq(client_list_lock); | ||
368 | |||
369 | if (copy_to_user((void __user *) arg, &stats, sizeof stats)) | ||
370 | return -EFAULT; | ||
371 | else | ||
372 | return 0; | ||
373 | |||
374 | case NOSY_IOC_START: | ||
375 | spin_lock_irq(client_list_lock); | ||
376 | list_add_tail(&client->link, &client->lynx->client_list); | ||
377 | spin_unlock_irq(client_list_lock); | ||
378 | |||
379 | return 0; | ||
380 | |||
381 | case NOSY_IOC_STOP: | ||
382 | spin_lock_irq(client_list_lock); | ||
383 | list_del_init(&client->link); | ||
384 | spin_unlock_irq(client_list_lock); | ||
385 | |||
386 | return 0; | ||
387 | |||
388 | case NOSY_IOC_FILTER: | ||
389 | spin_lock_irq(client_list_lock); | ||
390 | client->tcode_mask = arg; | ||
391 | spin_unlock_irq(client_list_lock); | ||
392 | |||
393 | return 0; | ||
394 | |||
395 | default: | ||
396 | return -EINVAL; | ||
397 | /* Flush buffer, configure filter. */ | ||
398 | } | ||
399 | } | ||
400 | |||
401 | static const struct file_operations nosy_ops = { | ||
402 | .owner = THIS_MODULE, | ||
403 | .read = nosy_read, | ||
404 | .unlocked_ioctl = nosy_ioctl, | ||
405 | .poll = nosy_poll, | ||
406 | .open = nosy_open, | ||
407 | .release = nosy_release, | ||
408 | }; | ||
409 | |||
410 | #define PHY_PACKET_SIZE 12 /* 1 payload, 1 inverse, 1 ack = 3 quadlets */ | ||
411 | |||
412 | static void | ||
413 | packet_irq_handler(struct pcilynx *lynx) | ||
414 | { | ||
415 | struct client *client; | ||
416 | u32 tcode_mask, tcode; | ||
417 | size_t length; | ||
418 | struct timeval tv; | ||
419 | |||
420 | /* FIXME: Also report rcv_speed. */ | ||
421 | |||
422 | length = __le32_to_cpu(lynx->rcv_pcl->pcl_status) & 0x00001fff; | ||
423 | tcode = __le32_to_cpu(lynx->rcv_buffer[1]) >> 4 & 0xf; | ||
424 | |||
425 | do_gettimeofday(&tv); | ||
426 | lynx->rcv_buffer[0] = (__force __le32)tv.tv_usec; | ||
427 | |||
428 | if (length == PHY_PACKET_SIZE) | ||
429 | tcode_mask = 1 << TCODE_PHY_PACKET; | ||
430 | else | ||
431 | tcode_mask = 1 << tcode; | ||
432 | |||
433 | spin_lock(&lynx->client_list_lock); | ||
434 | |||
435 | list_for_each_entry(client, &lynx->client_list, link) | ||
436 | if (client->tcode_mask & tcode_mask) | ||
437 | packet_buffer_put(&client->buffer, | ||
438 | lynx->rcv_buffer, length + 4); | ||
439 | |||
440 | spin_unlock(&lynx->client_list_lock); | ||
441 | } | ||
442 | |||
443 | static void | ||
444 | bus_reset_irq_handler(struct pcilynx *lynx) | ||
445 | { | ||
446 | struct client *client; | ||
447 | struct timeval tv; | ||
448 | |||
449 | do_gettimeofday(&tv); | ||
450 | |||
451 | spin_lock(&lynx->client_list_lock); | ||
452 | |||
453 | list_for_each_entry(client, &lynx->client_list, link) | ||
454 | packet_buffer_put(&client->buffer, &tv.tv_usec, 4); | ||
455 | |||
456 | spin_unlock(&lynx->client_list_lock); | ||
457 | } | ||
458 | |||
459 | static irqreturn_t | ||
460 | irq_handler(int irq, void *device) | ||
461 | { | ||
462 | struct pcilynx *lynx = device; | ||
463 | u32 pci_int_status; | ||
464 | |||
465 | pci_int_status = reg_read(lynx, PCI_INT_STATUS); | ||
466 | |||
467 | if (pci_int_status == ~0) | ||
468 | /* Card was ejected. */ | ||
469 | return IRQ_NONE; | ||
470 | |||
471 | if ((pci_int_status & PCI_INT_INT_PEND) == 0) | ||
472 | /* Not our interrupt, bail out quickly. */ | ||
473 | return IRQ_NONE; | ||
474 | |||
475 | if ((pci_int_status & PCI_INT_P1394_INT) != 0) { | ||
476 | u32 link_int_status; | ||
477 | |||
478 | link_int_status = reg_read(lynx, LINK_INT_STATUS); | ||
479 | reg_write(lynx, LINK_INT_STATUS, link_int_status); | ||
480 | |||
481 | if ((link_int_status & LINK_INT_PHY_BUSRESET) > 0) | ||
482 | bus_reset_irq_handler(lynx); | ||
483 | } | ||
484 | |||
485 | /* Clear the PCI_INT_STATUS register only after clearing the | ||
486 | * LINK_INT_STATUS register; otherwise the PCI_INT_P1394 will | ||
487 | * be set again immediately. */ | ||
488 | |||
489 | reg_write(lynx, PCI_INT_STATUS, pci_int_status); | ||
490 | |||
491 | if ((pci_int_status & PCI_INT_DMA0_HLT) > 0) { | ||
492 | packet_irq_handler(lynx); | ||
493 | run_pcl(lynx, lynx->rcv_start_pcl_bus, 0); | ||
494 | } | ||
495 | |||
496 | return IRQ_HANDLED; | ||
497 | } | ||
498 | |||
499 | static void | ||
500 | remove_card(struct pci_dev *dev) | ||
501 | { | ||
502 | struct pcilynx *lynx = pci_get_drvdata(dev); | ||
503 | struct client *client; | ||
504 | |||
505 | mutex_lock(&card_mutex); | ||
506 | list_del_init(&lynx->link); | ||
507 | misc_deregister(&lynx->misc); | ||
508 | mutex_unlock(&card_mutex); | ||
509 | |||
510 | reg_write(lynx, PCI_INT_ENABLE, 0); | ||
511 | free_irq(lynx->pci_device->irq, lynx); | ||
512 | |||
513 | spin_lock_irq(&lynx->client_list_lock); | ||
514 | list_for_each_entry(client, &lynx->client_list, link) | ||
515 | wake_up_interruptible(&client->buffer.wait); | ||
516 | spin_unlock_irq(&lynx->client_list_lock); | ||
517 | |||
518 | pci_free_consistent(lynx->pci_device, sizeof(struct pcl), | ||
519 | lynx->rcv_start_pcl, lynx->rcv_start_pcl_bus); | ||
520 | pci_free_consistent(lynx->pci_device, sizeof(struct pcl), | ||
521 | lynx->rcv_pcl, lynx->rcv_pcl_bus); | ||
522 | pci_free_consistent(lynx->pci_device, PAGE_SIZE, | ||
523 | lynx->rcv_buffer, lynx->rcv_buffer_bus); | ||
524 | |||
525 | iounmap(lynx->registers); | ||
526 | pci_disable_device(dev); | ||
527 | lynx_put(lynx); | ||
528 | } | ||
529 | |||
530 | #define RCV_BUFFER_SIZE (16 * 1024) | ||
531 | |||
532 | static int __devinit | ||
533 | add_card(struct pci_dev *dev, const struct pci_device_id *unused) | ||
534 | { | ||
535 | struct pcilynx *lynx; | ||
536 | u32 p, end; | ||
537 | int ret, i; | ||
538 | |||
539 | if (pci_set_dma_mask(dev, 0xffffffff)) { | ||
540 | dev_err(&dev->dev, | ||
541 | "DMA address limits not supported for PCILynx hardware\n"); | ||
542 | return -ENXIO; | ||
543 | } | ||
544 | if (pci_enable_device(dev)) { | ||
545 | dev_err(&dev->dev, "Failed to enable PCILynx hardware\n"); | ||
546 | return -ENXIO; | ||
547 | } | ||
548 | pci_set_master(dev); | ||
549 | |||
550 | lynx = kzalloc(sizeof *lynx, GFP_KERNEL); | ||
551 | if (lynx == NULL) { | ||
552 | dev_err(&dev->dev, "Failed to allocate control structure\n"); | ||
553 | ret = -ENOMEM; | ||
554 | goto fail_disable; | ||
555 | } | ||
556 | lynx->pci_device = dev; | ||
557 | pci_set_drvdata(dev, lynx); | ||
558 | |||
559 | spin_lock_init(&lynx->client_list_lock); | ||
560 | INIT_LIST_HEAD(&lynx->client_list); | ||
561 | kref_init(&lynx->kref); | ||
562 | |||
563 | lynx->registers = ioremap_nocache(pci_resource_start(dev, 0), | ||
564 | PCILYNX_MAX_REGISTER); | ||
565 | |||
566 | lynx->rcv_start_pcl = pci_alloc_consistent(lynx->pci_device, | ||
567 | sizeof(struct pcl), &lynx->rcv_start_pcl_bus); | ||
568 | lynx->rcv_pcl = pci_alloc_consistent(lynx->pci_device, | ||
569 | sizeof(struct pcl), &lynx->rcv_pcl_bus); | ||
570 | lynx->rcv_buffer = pci_alloc_consistent(lynx->pci_device, | ||
571 | RCV_BUFFER_SIZE, &lynx->rcv_buffer_bus); | ||
572 | if (lynx->rcv_start_pcl == NULL || | ||
573 | lynx->rcv_pcl == NULL || | ||
574 | lynx->rcv_buffer == NULL) { | ||
575 | dev_err(&dev->dev, "Failed to allocate receive buffer\n"); | ||
576 | ret = -ENOMEM; | ||
577 | goto fail_deallocate; | ||
578 | } | ||
579 | lynx->rcv_start_pcl->next = cpu_to_le32(lynx->rcv_pcl_bus); | ||
580 | lynx->rcv_pcl->next = cpu_to_le32(PCL_NEXT_INVALID); | ||
581 | lynx->rcv_pcl->async_error_next = cpu_to_le32(PCL_NEXT_INVALID); | ||
582 | |||
583 | lynx->rcv_pcl->buffer[0].control = | ||
584 | cpu_to_le32(PCL_CMD_RCV | PCL_BIGENDIAN | 2044); | ||
585 | lynx->rcv_pcl->buffer[0].pointer = | ||
586 | cpu_to_le32(lynx->rcv_buffer_bus + 4); | ||
587 | p = lynx->rcv_buffer_bus + 2048; | ||
588 | end = lynx->rcv_buffer_bus + RCV_BUFFER_SIZE; | ||
589 | for (i = 1; p < end; i++, p += 2048) { | ||
590 | lynx->rcv_pcl->buffer[i].control = | ||
591 | cpu_to_le32(PCL_CMD_RCV | PCL_BIGENDIAN | 2048); | ||
592 | lynx->rcv_pcl->buffer[i].pointer = cpu_to_le32(p); | ||
593 | } | ||
594 | lynx->rcv_pcl->buffer[i - 1].control |= cpu_to_le32(PCL_LAST_BUFF); | ||
595 | |||
596 | reg_set_bits(lynx, MISC_CONTROL, MISC_CONTROL_SWRESET); | ||
597 | /* Fix buggy cards with autoboot pin not tied low: */ | ||
598 | reg_write(lynx, DMA0_CHAN_CTRL, 0); | ||
599 | reg_write(lynx, DMA_GLOBAL_REGISTER, 0x00 << 24); | ||
600 | |||
601 | #if 0 | ||
602 | /* now, looking for PHY register set */ | ||
603 | if ((get_phy_reg(lynx, 2) & 0xe0) == 0xe0) { | ||
604 | lynx->phyic.reg_1394a = 1; | ||
605 | PRINT(KERN_INFO, lynx->id, | ||
606 | "found 1394a conform PHY (using extended register set)"); | ||
607 | lynx->phyic.vendor = get_phy_vendorid(lynx); | ||
608 | lynx->phyic.product = get_phy_productid(lynx); | ||
609 | } else { | ||
610 | lynx->phyic.reg_1394a = 0; | ||
611 | PRINT(KERN_INFO, lynx->id, "found old 1394 PHY"); | ||
612 | } | ||
613 | #endif | ||
614 | |||
615 | /* Setup the general receive FIFO max size. */ | ||
616 | reg_write(lynx, FIFO_SIZES, 255); | ||
617 | |||
618 | reg_set_bits(lynx, PCI_INT_ENABLE, PCI_INT_DMA_ALL); | ||
619 | |||
620 | reg_write(lynx, LINK_INT_ENABLE, | ||
621 | LINK_INT_PHY_TIME_OUT | LINK_INT_PHY_REG_RCVD | | ||
622 | LINK_INT_PHY_BUSRESET | LINK_INT_IT_STUCK | | ||
623 | LINK_INT_AT_STUCK | LINK_INT_SNTRJ | | ||
624 | LINK_INT_TC_ERR | LINK_INT_GRF_OVER_FLOW | | ||
625 | LINK_INT_ITF_UNDER_FLOW | LINK_INT_ATF_UNDER_FLOW); | ||
626 | |||
627 | /* Disable the L flag in self ID packets. */ | ||
628 | set_phy_reg(lynx, 4, 0); | ||
629 | |||
630 | /* Put this baby into snoop mode */ | ||
631 | reg_set_bits(lynx, LINK_CONTROL, LINK_CONTROL_SNOOP_ENABLE); | ||
632 | |||
633 | run_pcl(lynx, lynx->rcv_start_pcl_bus, 0); | ||
634 | |||
635 | if (request_irq(dev->irq, irq_handler, IRQF_SHARED, | ||
636 | driver_name, lynx)) { | ||
637 | dev_err(&dev->dev, | ||
638 | "Failed to allocate shared interrupt %d\n", dev->irq); | ||
639 | ret = -EIO; | ||
640 | goto fail_deallocate; | ||
641 | } | ||
642 | |||
643 | lynx->misc.parent = &dev->dev; | ||
644 | lynx->misc.minor = MISC_DYNAMIC_MINOR; | ||
645 | lynx->misc.name = "nosy"; | ||
646 | lynx->misc.fops = &nosy_ops; | ||
647 | |||
648 | mutex_lock(&card_mutex); | ||
649 | ret = misc_register(&lynx->misc); | ||
650 | if (ret) { | ||
651 | dev_err(&dev->dev, "Failed to register misc char device\n"); | ||
652 | mutex_unlock(&card_mutex); | ||
653 | goto fail_free_irq; | ||
654 | } | ||
655 | list_add_tail(&lynx->link, &card_list); | ||
656 | mutex_unlock(&card_mutex); | ||
657 | |||
658 | dev_info(&dev->dev, | ||
659 | "Initialized PCILynx IEEE1394 card, irq=%d\n", dev->irq); | ||
660 | |||
661 | return 0; | ||
662 | |||
663 | fail_free_irq: | ||
664 | reg_write(lynx, PCI_INT_ENABLE, 0); | ||
665 | free_irq(lynx->pci_device->irq, lynx); | ||
666 | |||
667 | fail_deallocate: | ||
668 | if (lynx->rcv_start_pcl) | ||
669 | pci_free_consistent(lynx->pci_device, sizeof(struct pcl), | ||
670 | lynx->rcv_start_pcl, lynx->rcv_start_pcl_bus); | ||
671 | if (lynx->rcv_pcl) | ||
672 | pci_free_consistent(lynx->pci_device, sizeof(struct pcl), | ||
673 | lynx->rcv_pcl, lynx->rcv_pcl_bus); | ||
674 | if (lynx->rcv_buffer) | ||
675 | pci_free_consistent(lynx->pci_device, PAGE_SIZE, | ||
676 | lynx->rcv_buffer, lynx->rcv_buffer_bus); | ||
677 | iounmap(lynx->registers); | ||
678 | kfree(lynx); | ||
679 | |||
680 | fail_disable: | ||
681 | pci_disable_device(dev); | ||
682 | |||
683 | return ret; | ||
684 | } | ||
685 | |||
686 | static struct pci_device_id pci_table[] __devinitdata = { | ||
687 | { | ||
688 | .vendor = PCI_VENDOR_ID_TI, | ||
689 | .device = PCI_DEVICE_ID_TI_PCILYNX, | ||
690 | .subvendor = PCI_ANY_ID, | ||
691 | .subdevice = PCI_ANY_ID, | ||
692 | }, | ||
693 | { } /* Terminating entry */ | ||
694 | }; | ||
695 | |||
696 | static struct pci_driver lynx_pci_driver = { | ||
697 | .name = driver_name, | ||
698 | .id_table = pci_table, | ||
699 | .probe = add_card, | ||
700 | .remove = remove_card, | ||
701 | }; | ||
702 | |||
703 | MODULE_AUTHOR("Kristian Hoegsberg"); | ||
704 | MODULE_DESCRIPTION("Snoop mode driver for TI pcilynx 1394 controllers"); | ||
705 | MODULE_LICENSE("GPL"); | ||
706 | MODULE_DEVICE_TABLE(pci, pci_table); | ||
707 | |||
708 | static int __init nosy_init(void) | ||
709 | { | ||
710 | return pci_register_driver(&lynx_pci_driver); | ||
711 | } | ||
712 | |||
713 | static void __exit nosy_cleanup(void) | ||
714 | { | ||
715 | pci_unregister_driver(&lynx_pci_driver); | ||
716 | |||
717 | pr_info("Unloaded %s\n", driver_name); | ||
718 | } | ||
719 | |||
720 | module_init(nosy_init); | ||
721 | module_exit(nosy_cleanup); | ||
diff --git a/drivers/firewire/nosy.h b/drivers/firewire/nosy.h new file mode 100644 index 000000000000..078ff27f4756 --- /dev/null +++ b/drivers/firewire/nosy.h | |||
@@ -0,0 +1,237 @@ | |||
1 | /* | ||
2 | * Chip register definitions for PCILynx chipset. Based on pcilynx.h | ||
3 | * from the Linux 1394 drivers, but modified a bit so the names here | ||
4 | * match the specification exactly (even though they have weird names, | ||
5 | * like xxx_OVER_FLOW, or arbitrary abbreviations like SNTRJ for "sent | ||
6 | * reject" etc.) | ||
7 | */ | ||
8 | |||
9 | #define PCILYNX_MAX_REGISTER 0xfff | ||
10 | #define PCILYNX_MAX_MEMORY 0xffff | ||
11 | |||
12 | #define PCI_LATENCY_CACHELINE 0x0c | ||
13 | |||
14 | #define MISC_CONTROL 0x40 | ||
15 | #define MISC_CONTROL_SWRESET (1<<0) | ||
16 | |||
17 | #define SERIAL_EEPROM_CONTROL 0x44 | ||
18 | |||
19 | #define PCI_INT_STATUS 0x48 | ||
20 | #define PCI_INT_ENABLE 0x4c | ||
21 | /* status and enable have identical bit numbers */ | ||
22 | #define PCI_INT_INT_PEND (1<<31) | ||
23 | #define PCI_INT_FRC_INT (1<<30) | ||
24 | #define PCI_INT_SLV_ADR_PERR (1<<28) | ||
25 | #define PCI_INT_SLV_DAT_PERR (1<<27) | ||
26 | #define PCI_INT_MST_DAT_PERR (1<<26) | ||
27 | #define PCI_INT_MST_DEV_TO (1<<25) | ||
28 | #define PCI_INT_INT_SLV_TO (1<<23) | ||
29 | #define PCI_INT_AUX_TO (1<<18) | ||
30 | #define PCI_INT_AUX_INT (1<<17) | ||
31 | #define PCI_INT_P1394_INT (1<<16) | ||
32 | #define PCI_INT_DMA4_PCL (1<<9) | ||
33 | #define PCI_INT_DMA4_HLT (1<<8) | ||
34 | #define PCI_INT_DMA3_PCL (1<<7) | ||
35 | #define PCI_INT_DMA3_HLT (1<<6) | ||
36 | #define PCI_INT_DMA2_PCL (1<<5) | ||
37 | #define PCI_INT_DMA2_HLT (1<<4) | ||
38 | #define PCI_INT_DMA1_PCL (1<<3) | ||
39 | #define PCI_INT_DMA1_HLT (1<<2) | ||
40 | #define PCI_INT_DMA0_PCL (1<<1) | ||
41 | #define PCI_INT_DMA0_HLT (1<<0) | ||
42 | /* all DMA interrupts combined: */ | ||
43 | #define PCI_INT_DMA_ALL 0x3ff | ||
44 | |||
45 | #define PCI_INT_DMA_HLT(chan) (1 << (chan * 2)) | ||
46 | #define PCI_INT_DMA_PCL(chan) (1 << (chan * 2 + 1)) | ||
47 | |||
48 | #define LBUS_ADDR 0xb4 | ||
49 | #define LBUS_ADDR_SEL_RAM (0x0<<16) | ||
50 | #define LBUS_ADDR_SEL_ROM (0x1<<16) | ||
51 | #define LBUS_ADDR_SEL_AUX (0x2<<16) | ||
52 | #define LBUS_ADDR_SEL_ZV (0x3<<16) | ||
53 | |||
54 | #define GPIO_CTRL_A 0xb8 | ||
55 | #define GPIO_CTRL_B 0xbc | ||
56 | #define GPIO_DATA_BASE 0xc0 | ||
57 | |||
58 | #define DMA_BREG(base, chan) (base + chan * 0x20) | ||
59 | #define DMA_SREG(base, chan) (base + chan * 0x10) | ||
60 | |||
61 | #define PCL_NEXT_INVALID (1<<0) | ||
62 | |||
63 | /* transfer commands */ | ||
64 | #define PCL_CMD_RCV (0x1<<24) | ||
65 | #define PCL_CMD_RCV_AND_UPDATE (0xa<<24) | ||
66 | #define PCL_CMD_XMT (0x2<<24) | ||
67 | #define PCL_CMD_UNFXMT (0xc<<24) | ||
68 | #define PCL_CMD_PCI_TO_LBUS (0x8<<24) | ||
69 | #define PCL_CMD_LBUS_TO_PCI (0x9<<24) | ||
70 | |||
71 | /* aux commands */ | ||
72 | #define PCL_CMD_NOP (0x0<<24) | ||
73 | #define PCL_CMD_LOAD (0x3<<24) | ||
74 | #define PCL_CMD_STOREQ (0x4<<24) | ||
75 | #define PCL_CMD_STORED (0xb<<24) | ||
76 | #define PCL_CMD_STORE0 (0x5<<24) | ||
77 | #define PCL_CMD_STORE1 (0x6<<24) | ||
78 | #define PCL_CMD_COMPARE (0xe<<24) | ||
79 | #define PCL_CMD_SWAP_COMPARE (0xf<<24) | ||
80 | #define PCL_CMD_ADD (0xd<<24) | ||
81 | #define PCL_CMD_BRANCH (0x7<<24) | ||
82 | |||
83 | /* BRANCH condition codes */ | ||
84 | #define PCL_COND_DMARDY_SET (0x1<<20) | ||
85 | #define PCL_COND_DMARDY_CLEAR (0x2<<20) | ||
86 | |||
87 | #define PCL_GEN_INTR (1<<19) | ||
88 | #define PCL_LAST_BUFF (1<<18) | ||
89 | #define PCL_LAST_CMD (PCL_LAST_BUFF) | ||
90 | #define PCL_WAITSTAT (1<<17) | ||
91 | #define PCL_BIGENDIAN (1<<16) | ||
92 | #define PCL_ISOMODE (1<<12) | ||
93 | |||
94 | #define DMA0_PREV_PCL 0x100 | ||
95 | #define DMA1_PREV_PCL 0x120 | ||
96 | #define DMA2_PREV_PCL 0x140 | ||
97 | #define DMA3_PREV_PCL 0x160 | ||
98 | #define DMA4_PREV_PCL 0x180 | ||
99 | #define DMA_PREV_PCL(chan) (DMA_BREG(DMA0_PREV_PCL, chan)) | ||
100 | |||
101 | #define DMA0_CURRENT_PCL 0x104 | ||
102 | #define DMA1_CURRENT_PCL 0x124 | ||
103 | #define DMA2_CURRENT_PCL 0x144 | ||
104 | #define DMA3_CURRENT_PCL 0x164 | ||
105 | #define DMA4_CURRENT_PCL 0x184 | ||
106 | #define DMA_CURRENT_PCL(chan) (DMA_BREG(DMA0_CURRENT_PCL, chan)) | ||
107 | |||
108 | #define DMA0_CHAN_STAT 0x10c | ||
109 | #define DMA1_CHAN_STAT 0x12c | ||
110 | #define DMA2_CHAN_STAT 0x14c | ||
111 | #define DMA3_CHAN_STAT 0x16c | ||
112 | #define DMA4_CHAN_STAT 0x18c | ||
113 | #define DMA_CHAN_STAT(chan) (DMA_BREG(DMA0_CHAN_STAT, chan)) | ||
114 | /* CHAN_STATUS registers share bits */ | ||
115 | #define DMA_CHAN_STAT_SELFID (1<<31) | ||
116 | #define DMA_CHAN_STAT_ISOPKT (1<<30) | ||
117 | #define DMA_CHAN_STAT_PCIERR (1<<29) | ||
118 | #define DMA_CHAN_STAT_PKTERR (1<<28) | ||
119 | #define DMA_CHAN_STAT_PKTCMPL (1<<27) | ||
120 | #define DMA_CHAN_STAT_SPECIALACK (1<<14) | ||
121 | |||
122 | #define DMA0_CHAN_CTRL 0x110 | ||
123 | #define DMA1_CHAN_CTRL 0x130 | ||
124 | #define DMA2_CHAN_CTRL 0x150 | ||
125 | #define DMA3_CHAN_CTRL 0x170 | ||
126 | #define DMA4_CHAN_CTRL 0x190 | ||
127 | #define DMA_CHAN_CTRL(chan) (DMA_BREG(DMA0_CHAN_CTRL, chan)) | ||
128 | /* CHAN_CTRL registers share bits */ | ||
129 | #define DMA_CHAN_CTRL_ENABLE (1<<31) | ||
130 | #define DMA_CHAN_CTRL_BUSY (1<<30) | ||
131 | #define DMA_CHAN_CTRL_LINK (1<<29) | ||
132 | |||
133 | #define DMA0_READY 0x114 | ||
134 | #define DMA1_READY 0x134 | ||
135 | #define DMA2_READY 0x154 | ||
136 | #define DMA3_READY 0x174 | ||
137 | #define DMA4_READY 0x194 | ||
138 | #define DMA_READY(chan) (DMA_BREG(DMA0_READY, chan)) | ||
139 | |||
140 | #define DMA_GLOBAL_REGISTER 0x908 | ||
141 | |||
142 | #define FIFO_SIZES 0xa00 | ||
143 | |||
144 | #define FIFO_CONTROL 0xa10 | ||
145 | #define FIFO_CONTROL_GRF_FLUSH (1<<4) | ||
146 | #define FIFO_CONTROL_ITF_FLUSH (1<<3) | ||
147 | #define FIFO_CONTROL_ATF_FLUSH (1<<2) | ||
148 | |||
149 | #define FIFO_XMIT_THRESHOLD 0xa14 | ||
150 | |||
151 | #define DMA0_WORD0_CMP_VALUE 0xb00 | ||
152 | #define DMA1_WORD0_CMP_VALUE 0xb10 | ||
153 | #define DMA2_WORD0_CMP_VALUE 0xb20 | ||
154 | #define DMA3_WORD0_CMP_VALUE 0xb30 | ||
155 | #define DMA4_WORD0_CMP_VALUE 0xb40 | ||
156 | #define DMA_WORD0_CMP_VALUE(chan) (DMA_SREG(DMA0_WORD0_CMP_VALUE, chan)) | ||
157 | |||
158 | #define DMA0_WORD0_CMP_ENABLE 0xb04 | ||
159 | #define DMA1_WORD0_CMP_ENABLE 0xb14 | ||
160 | #define DMA2_WORD0_CMP_ENABLE 0xb24 | ||
161 | #define DMA3_WORD0_CMP_ENABLE 0xb34 | ||
162 | #define DMA4_WORD0_CMP_ENABLE 0xb44 | ||
163 | #define DMA_WORD0_CMP_ENABLE(chan) (DMA_SREG(DMA0_WORD0_CMP_ENABLE, chan)) | ||
164 | |||
165 | #define DMA0_WORD1_CMP_VALUE 0xb08 | ||
166 | #define DMA1_WORD1_CMP_VALUE 0xb18 | ||
167 | #define DMA2_WORD1_CMP_VALUE 0xb28 | ||
168 | #define DMA3_WORD1_CMP_VALUE 0xb38 | ||
169 | #define DMA4_WORD1_CMP_VALUE 0xb48 | ||
170 | #define DMA_WORD1_CMP_VALUE(chan) (DMA_SREG(DMA0_WORD1_CMP_VALUE, chan)) | ||
171 | |||
172 | #define DMA0_WORD1_CMP_ENABLE 0xb0c | ||
173 | #define DMA1_WORD1_CMP_ENABLE 0xb1c | ||
174 | #define DMA2_WORD1_CMP_ENABLE 0xb2c | ||
175 | #define DMA3_WORD1_CMP_ENABLE 0xb3c | ||
176 | #define DMA4_WORD1_CMP_ENABLE 0xb4c | ||
177 | #define DMA_WORD1_CMP_ENABLE(chan) (DMA_SREG(DMA0_WORD1_CMP_ENABLE, chan)) | ||
178 | /* word 1 compare enable flags */ | ||
179 | #define DMA_WORD1_CMP_MATCH_OTHERBUS (1<<15) | ||
180 | #define DMA_WORD1_CMP_MATCH_BROADCAST (1<<14) | ||
181 | #define DMA_WORD1_CMP_MATCH_BUS_BCAST (1<<13) | ||
182 | #define DMA_WORD1_CMP_MATCH_LOCAL_NODE (1<<12) | ||
183 | #define DMA_WORD1_CMP_MATCH_EXACT (1<<11) | ||
184 | #define DMA_WORD1_CMP_ENABLE_SELF_ID (1<<10) | ||
185 | #define DMA_WORD1_CMP_ENABLE_MASTER (1<<8) | ||
186 | |||
187 | #define LINK_ID 0xf00 | ||
188 | #define LINK_ID_BUS(id) (id<<22) | ||
189 | #define LINK_ID_NODE(id) (id<<16) | ||
190 | |||
191 | #define LINK_CONTROL 0xf04 | ||
192 | #define LINK_CONTROL_BUSY (1<<29) | ||
193 | #define LINK_CONTROL_TX_ISO_EN (1<<26) | ||
194 | #define LINK_CONTROL_RX_ISO_EN (1<<25) | ||
195 | #define LINK_CONTROL_TX_ASYNC_EN (1<<24) | ||
196 | #define LINK_CONTROL_RX_ASYNC_EN (1<<23) | ||
197 | #define LINK_CONTROL_RESET_TX (1<<21) | ||
198 | #define LINK_CONTROL_RESET_RX (1<<20) | ||
199 | #define LINK_CONTROL_CYCMASTER (1<<11) | ||
200 | #define LINK_CONTROL_CYCSOURCE (1<<10) | ||
201 | #define LINK_CONTROL_CYCTIMEREN (1<<9) | ||
202 | #define LINK_CONTROL_RCV_CMP_VALID (1<<7) | ||
203 | #define LINK_CONTROL_SNOOP_ENABLE (1<<6) | ||
204 | |||
205 | #define CYCLE_TIMER 0xf08 | ||
206 | |||
207 | #define LINK_PHY 0xf0c | ||
208 | #define LINK_PHY_READ (1<<31) | ||
209 | #define LINK_PHY_WRITE (1<<30) | ||
210 | #define LINK_PHY_ADDR(addr) (addr<<24) | ||
211 | #define LINK_PHY_WDATA(data) (data<<16) | ||
212 | #define LINK_PHY_RADDR(addr) (addr<<8) | ||
213 | |||
214 | #define LINK_INT_STATUS 0xf14 | ||
215 | #define LINK_INT_ENABLE 0xf18 | ||
216 | /* status and enable have identical bit numbers */ | ||
217 | #define LINK_INT_LINK_INT (1<<31) | ||
218 | #define LINK_INT_PHY_TIME_OUT (1<<30) | ||
219 | #define LINK_INT_PHY_REG_RCVD (1<<29) | ||
220 | #define LINK_INT_PHY_BUSRESET (1<<28) | ||
221 | #define LINK_INT_TX_RDY (1<<26) | ||
222 | #define LINK_INT_RX_DATA_RDY (1<<25) | ||
223 | #define LINK_INT_IT_STUCK (1<<20) | ||
224 | #define LINK_INT_AT_STUCK (1<<19) | ||
225 | #define LINK_INT_SNTRJ (1<<17) | ||
226 | #define LINK_INT_HDR_ERR (1<<16) | ||
227 | #define LINK_INT_TC_ERR (1<<15) | ||
228 | #define LINK_INT_CYC_SEC (1<<11) | ||
229 | #define LINK_INT_CYC_STRT (1<<10) | ||
230 | #define LINK_INT_CYC_DONE (1<<9) | ||
231 | #define LINK_INT_CYC_PEND (1<<8) | ||
232 | #define LINK_INT_CYC_LOST (1<<7) | ||
233 | #define LINK_INT_CYC_ARB_FAILED (1<<6) | ||
234 | #define LINK_INT_GRF_OVER_FLOW (1<<5) | ||
235 | #define LINK_INT_ITF_UNDER_FLOW (1<<4) | ||
236 | #define LINK_INT_ATF_UNDER_FLOW (1<<3) | ||
237 | #define LINK_INT_IARB_FAILED (1<<0) | ||
diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c index 9f627e758cfc..7f03540cabe8 100644 --- a/drivers/firewire/ohci.c +++ b/drivers/firewire/ohci.c | |||
@@ -18,6 +18,7 @@ | |||
18 | * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | 18 | * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. |
19 | */ | 19 | */ |
20 | 20 | ||
21 | #include <linux/bug.h> | ||
21 | #include <linux/compiler.h> | 22 | #include <linux/compiler.h> |
22 | #include <linux/delay.h> | 23 | #include <linux/delay.h> |
23 | #include <linux/device.h> | 24 | #include <linux/device.h> |
@@ -32,11 +33,13 @@ | |||
32 | #include <linux/mm.h> | 33 | #include <linux/mm.h> |
33 | #include <linux/module.h> | 34 | #include <linux/module.h> |
34 | #include <linux/moduleparam.h> | 35 | #include <linux/moduleparam.h> |
36 | #include <linux/mutex.h> | ||
35 | #include <linux/pci.h> | 37 | #include <linux/pci.h> |
36 | #include <linux/pci_ids.h> | 38 | #include <linux/pci_ids.h> |
37 | #include <linux/slab.h> | 39 | #include <linux/slab.h> |
38 | #include <linux/spinlock.h> | 40 | #include <linux/spinlock.h> |
39 | #include <linux/string.h> | 41 | #include <linux/string.h> |
42 | #include <linux/time.h> | ||
40 | 43 | ||
41 | #include <asm/byteorder.h> | 44 | #include <asm/byteorder.h> |
42 | #include <asm/page.h> | 45 | #include <asm/page.h> |
@@ -170,6 +173,10 @@ struct fw_ohci { | |||
170 | int generation; | 173 | int generation; |
171 | int request_generation; /* for timestamping incoming requests */ | 174 | int request_generation; /* for timestamping incoming requests */ |
172 | unsigned quirks; | 175 | unsigned quirks; |
176 | unsigned int pri_req_max; | ||
177 | u32 bus_time; | ||
178 | bool is_root; | ||
179 | bool csr_state_setclear_abdicate; | ||
173 | 180 | ||
174 | /* | 181 | /* |
175 | * Spinlock for accessing fw_ohci data. Never call out of | 182 | * Spinlock for accessing fw_ohci data. Never call out of |
@@ -177,16 +184,20 @@ struct fw_ohci { | |||
177 | */ | 184 | */ |
178 | spinlock_t lock; | 185 | spinlock_t lock; |
179 | 186 | ||
187 | struct mutex phy_reg_mutex; | ||
188 | |||
180 | struct ar_context ar_request_ctx; | 189 | struct ar_context ar_request_ctx; |
181 | struct ar_context ar_response_ctx; | 190 | struct ar_context ar_response_ctx; |
182 | struct context at_request_ctx; | 191 | struct context at_request_ctx; |
183 | struct context at_response_ctx; | 192 | struct context at_response_ctx; |
184 | 193 | ||
185 | u32 it_context_mask; | 194 | u32 it_context_mask; /* unoccupied IT contexts */ |
186 | struct iso_context *it_context_list; | 195 | struct iso_context *it_context_list; |
187 | u64 ir_context_channels; | 196 | u64 ir_context_channels; /* unoccupied channels */ |
188 | u32 ir_context_mask; | 197 | u32 ir_context_mask; /* unoccupied IR contexts */ |
189 | struct iso_context *ir_context_list; | 198 | struct iso_context *ir_context_list; |
199 | u64 mc_channels; /* channels in use by the multichannel IR context */ | ||
200 | bool mc_allocated; | ||
190 | 201 | ||
191 | __be32 *config_rom; | 202 | __be32 *config_rom; |
192 | dma_addr_t config_rom_bus; | 203 | dma_addr_t config_rom_bus; |
@@ -231,12 +242,14 @@ static inline struct fw_ohci *fw_ohci(struct fw_card *card) | |||
231 | 242 | ||
232 | static char ohci_driver_name[] = KBUILD_MODNAME; | 243 | static char ohci_driver_name[] = KBUILD_MODNAME; |
233 | 244 | ||
245 | #define PCI_DEVICE_ID_JMICRON_JMB38X_FW 0x2380 | ||
234 | #define PCI_DEVICE_ID_TI_TSB12LV22 0x8009 | 246 | #define PCI_DEVICE_ID_TI_TSB12LV22 0x8009 |
235 | 247 | ||
236 | #define QUIRK_CYCLE_TIMER 1 | 248 | #define QUIRK_CYCLE_TIMER 1 |
237 | #define QUIRK_RESET_PACKET 2 | 249 | #define QUIRK_RESET_PACKET 2 |
238 | #define QUIRK_BE_HEADERS 4 | 250 | #define QUIRK_BE_HEADERS 4 |
239 | #define QUIRK_NO_1394A 8 | 251 | #define QUIRK_NO_1394A 8 |
252 | #define QUIRK_NO_MSI 16 | ||
240 | 253 | ||
241 | /* In case of multiple matches in ohci_quirks[], only the first one is used. */ | 254 | /* In case of multiple matches in ohci_quirks[], only the first one is used. */ |
242 | static const struct { | 255 | static const struct { |
@@ -247,6 +260,7 @@ static const struct { | |||
247 | QUIRK_NO_1394A}, | 260 | QUIRK_NO_1394A}, |
248 | {PCI_VENDOR_ID_TI, PCI_ANY_ID, QUIRK_RESET_PACKET}, | 261 | {PCI_VENDOR_ID_TI, PCI_ANY_ID, QUIRK_RESET_PACKET}, |
249 | {PCI_VENDOR_ID_AL, PCI_ANY_ID, QUIRK_CYCLE_TIMER}, | 262 | {PCI_VENDOR_ID_AL, PCI_ANY_ID, QUIRK_CYCLE_TIMER}, |
263 | {PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB38X_FW, QUIRK_NO_MSI}, | ||
250 | {PCI_VENDOR_ID_NEC, PCI_ANY_ID, QUIRK_CYCLE_TIMER}, | 264 | {PCI_VENDOR_ID_NEC, PCI_ANY_ID, QUIRK_CYCLE_TIMER}, |
251 | {PCI_VENDOR_ID_VIA, PCI_ANY_ID, QUIRK_CYCLE_TIMER}, | 265 | {PCI_VENDOR_ID_VIA, PCI_ANY_ID, QUIRK_CYCLE_TIMER}, |
252 | {PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_FW, QUIRK_BE_HEADERS}, | 266 | {PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_FW, QUIRK_BE_HEADERS}, |
@@ -260,6 +274,7 @@ MODULE_PARM_DESC(quirks, "Chip quirks (default = 0" | |||
260 | ", reset packet generation = " __stringify(QUIRK_RESET_PACKET) | 274 | ", reset packet generation = " __stringify(QUIRK_RESET_PACKET) |
261 | ", AR/selfID endianess = " __stringify(QUIRK_BE_HEADERS) | 275 | ", AR/selfID endianess = " __stringify(QUIRK_BE_HEADERS) |
262 | ", no 1394a enhancements = " __stringify(QUIRK_NO_1394A) | 276 | ", no 1394a enhancements = " __stringify(QUIRK_NO_1394A) |
277 | ", disable MSI = " __stringify(QUIRK_NO_MSI) | ||
263 | ")"); | 278 | ")"); |
264 | 279 | ||
265 | #define OHCI_PARAM_DEBUG_AT_AR 1 | 280 | #define OHCI_PARAM_DEBUG_AT_AR 1 |
@@ -288,7 +303,7 @@ static void log_irqs(u32 evt) | |||
288 | !(evt & OHCI1394_busReset)) | 303 | !(evt & OHCI1394_busReset)) |
289 | return; | 304 | return; |
290 | 305 | ||
291 | fw_notify("IRQ %08x%s%s%s%s%s%s%s%s%s%s%s%s%s\n", evt, | 306 | fw_notify("IRQ %08x%s%s%s%s%s%s%s%s%s%s%s%s%s%s\n", evt, |
292 | evt & OHCI1394_selfIDComplete ? " selfID" : "", | 307 | evt & OHCI1394_selfIDComplete ? " selfID" : "", |
293 | evt & OHCI1394_RQPkt ? " AR_req" : "", | 308 | evt & OHCI1394_RQPkt ? " AR_req" : "", |
294 | evt & OHCI1394_RSPkt ? " AR_resp" : "", | 309 | evt & OHCI1394_RSPkt ? " AR_resp" : "", |
@@ -298,6 +313,7 @@ static void log_irqs(u32 evt) | |||
298 | evt & OHCI1394_isochTx ? " IT" : "", | 313 | evt & OHCI1394_isochTx ? " IT" : "", |
299 | evt & OHCI1394_postedWriteErr ? " postedWriteErr" : "", | 314 | evt & OHCI1394_postedWriteErr ? " postedWriteErr" : "", |
300 | evt & OHCI1394_cycleTooLong ? " cycleTooLong" : "", | 315 | evt & OHCI1394_cycleTooLong ? " cycleTooLong" : "", |
316 | evt & OHCI1394_cycle64Seconds ? " cycle64Seconds" : "", | ||
301 | evt & OHCI1394_cycleInconsistent ? " cycleInconsistent" : "", | 317 | evt & OHCI1394_cycleInconsistent ? " cycleInconsistent" : "", |
302 | evt & OHCI1394_regAccessFail ? " regAccessFail" : "", | 318 | evt & OHCI1394_regAccessFail ? " regAccessFail" : "", |
303 | evt & OHCI1394_busReset ? " busReset" : "", | 319 | evt & OHCI1394_busReset ? " busReset" : "", |
@@ -305,7 +321,8 @@ static void log_irqs(u32 evt) | |||
305 | OHCI1394_RSPkt | OHCI1394_reqTxComplete | | 321 | OHCI1394_RSPkt | OHCI1394_reqTxComplete | |
306 | OHCI1394_respTxComplete | OHCI1394_isochRx | | 322 | OHCI1394_respTxComplete | OHCI1394_isochRx | |
307 | OHCI1394_isochTx | OHCI1394_postedWriteErr | | 323 | OHCI1394_isochTx | OHCI1394_postedWriteErr | |
308 | OHCI1394_cycleTooLong | OHCI1394_cycleInconsistent | | 324 | OHCI1394_cycleTooLong | OHCI1394_cycle64Seconds | |
325 | OHCI1394_cycleInconsistent | | ||
309 | OHCI1394_regAccessFail | OHCI1394_busReset) | 326 | OHCI1394_regAccessFail | OHCI1394_busReset) |
310 | ? " ?" : ""); | 327 | ? " ?" : ""); |
311 | } | 328 | } |
@@ -470,12 +487,17 @@ static int read_phy_reg(struct fw_ohci *ohci, int addr) | |||
470 | int i; | 487 | int i; |
471 | 488 | ||
472 | reg_write(ohci, OHCI1394_PhyControl, OHCI1394_PhyControl_Read(addr)); | 489 | reg_write(ohci, OHCI1394_PhyControl, OHCI1394_PhyControl_Read(addr)); |
473 | for (i = 0; i < 10; i++) { | 490 | for (i = 0; i < 3 + 100; i++) { |
474 | val = reg_read(ohci, OHCI1394_PhyControl); | 491 | val = reg_read(ohci, OHCI1394_PhyControl); |
475 | if (val & OHCI1394_PhyControl_ReadDone) | 492 | if (val & OHCI1394_PhyControl_ReadDone) |
476 | return OHCI1394_PhyControl_ReadData(val); | 493 | return OHCI1394_PhyControl_ReadData(val); |
477 | 494 | ||
478 | msleep(1); | 495 | /* |
496 | * Try a few times without waiting. Sleeping is necessary | ||
497 | * only when the link/PHY interface is busy. | ||
498 | */ | ||
499 | if (i >= 3) | ||
500 | msleep(1); | ||
479 | } | 501 | } |
480 | fw_error("failed to read phy reg\n"); | 502 | fw_error("failed to read phy reg\n"); |
481 | 503 | ||
@@ -488,25 +510,23 @@ static int write_phy_reg(const struct fw_ohci *ohci, int addr, u32 val) | |||
488 | 510 | ||
489 | reg_write(ohci, OHCI1394_PhyControl, | 511 | reg_write(ohci, OHCI1394_PhyControl, |
490 | OHCI1394_PhyControl_Write(addr, val)); | 512 | OHCI1394_PhyControl_Write(addr, val)); |
491 | for (i = 0; i < 100; i++) { | 513 | for (i = 0; i < 3 + 100; i++) { |
492 | val = reg_read(ohci, OHCI1394_PhyControl); | 514 | val = reg_read(ohci, OHCI1394_PhyControl); |
493 | if (!(val & OHCI1394_PhyControl_WritePending)) | 515 | if (!(val & OHCI1394_PhyControl_WritePending)) |
494 | return 0; | 516 | return 0; |
495 | 517 | ||
496 | msleep(1); | 518 | if (i >= 3) |
519 | msleep(1); | ||
497 | } | 520 | } |
498 | fw_error("failed to write phy reg\n"); | 521 | fw_error("failed to write phy reg\n"); |
499 | 522 | ||
500 | return -EBUSY; | 523 | return -EBUSY; |
501 | } | 524 | } |
502 | 525 | ||
503 | static int ohci_update_phy_reg(struct fw_card *card, int addr, | 526 | static int update_phy_reg(struct fw_ohci *ohci, int addr, |
504 | int clear_bits, int set_bits) | 527 | int clear_bits, int set_bits) |
505 | { | 528 | { |
506 | struct fw_ohci *ohci = fw_ohci(card); | 529 | int ret = read_phy_reg(ohci, addr); |
507 | int ret; | ||
508 | |||
509 | ret = read_phy_reg(ohci, addr); | ||
510 | if (ret < 0) | 530 | if (ret < 0) |
511 | return ret; | 531 | return ret; |
512 | 532 | ||
@@ -524,13 +544,38 @@ static int read_paged_phy_reg(struct fw_ohci *ohci, int page, int addr) | |||
524 | { | 544 | { |
525 | int ret; | 545 | int ret; |
526 | 546 | ||
527 | ret = ohci_update_phy_reg(&ohci->card, 7, PHY_PAGE_SELECT, page << 5); | 547 | ret = update_phy_reg(ohci, 7, PHY_PAGE_SELECT, page << 5); |
528 | if (ret < 0) | 548 | if (ret < 0) |
529 | return ret; | 549 | return ret; |
530 | 550 | ||
531 | return read_phy_reg(ohci, addr); | 551 | return read_phy_reg(ohci, addr); |
532 | } | 552 | } |
533 | 553 | ||
554 | static int ohci_read_phy_reg(struct fw_card *card, int addr) | ||
555 | { | ||
556 | struct fw_ohci *ohci = fw_ohci(card); | ||
557 | int ret; | ||
558 | |||
559 | mutex_lock(&ohci->phy_reg_mutex); | ||
560 | ret = read_phy_reg(ohci, addr); | ||
561 | mutex_unlock(&ohci->phy_reg_mutex); | ||
562 | |||
563 | return ret; | ||
564 | } | ||
565 | |||
566 | static int ohci_update_phy_reg(struct fw_card *card, int addr, | ||
567 | int clear_bits, int set_bits) | ||
568 | { | ||
569 | struct fw_ohci *ohci = fw_ohci(card); | ||
570 | int ret; | ||
571 | |||
572 | mutex_lock(&ohci->phy_reg_mutex); | ||
573 | ret = update_phy_reg(ohci, addr, clear_bits, set_bits); | ||
574 | mutex_unlock(&ohci->phy_reg_mutex); | ||
575 | |||
576 | return ret; | ||
577 | } | ||
578 | |||
534 | static int ar_context_add_page(struct ar_context *ctx) | 579 | static int ar_context_add_page(struct ar_context *ctx) |
535 | { | 580 | { |
536 | struct device *dev = ctx->ohci->card.device; | 581 | struct device *dev = ctx->ohci->card.device; |
@@ -553,6 +598,7 @@ static int ar_context_add_page(struct ar_context *ctx) | |||
553 | ab->descriptor.res_count = cpu_to_le16(PAGE_SIZE - offset); | 598 | ab->descriptor.res_count = cpu_to_le16(PAGE_SIZE - offset); |
554 | ab->descriptor.branch_address = 0; | 599 | ab->descriptor.branch_address = 0; |
555 | 600 | ||
601 | wmb(); /* finish init of new descriptors before branch_address update */ | ||
556 | ctx->last_buffer->descriptor.branch_address = cpu_to_le32(ab_bus | 1); | 602 | ctx->last_buffer->descriptor.branch_address = cpu_to_le32(ab_bus | 1); |
557 | ctx->last_buffer->next = ab; | 603 | ctx->last_buffer->next = ab; |
558 | ctx->last_buffer = ab; | 604 | ctx->last_buffer = ab; |
@@ -940,6 +986,8 @@ static void context_append(struct context *ctx, | |||
940 | d_bus = desc->buffer_bus + (d - desc->buffer) * sizeof(*d); | 986 | d_bus = desc->buffer_bus + (d - desc->buffer) * sizeof(*d); |
941 | 987 | ||
942 | desc->used += (z + extra) * sizeof(*d); | 988 | desc->used += (z + extra) * sizeof(*d); |
989 | |||
990 | wmb(); /* finish init of new descriptors before branch_address update */ | ||
943 | ctx->prev->branch_address = cpu_to_le32(d_bus | z); | 991 | ctx->prev->branch_address = cpu_to_le32(d_bus | z); |
944 | ctx->prev = find_branch_descriptor(d, z); | 992 | ctx->prev = find_branch_descriptor(d, z); |
945 | 993 | ||
@@ -1026,6 +1074,9 @@ static int at_context_queue_packet(struct context *ctx, | |||
1026 | header[1] = cpu_to_le32(packet->header[0]); | 1074 | header[1] = cpu_to_le32(packet->header[0]); |
1027 | header[2] = cpu_to_le32(packet->header[1]); | 1075 | header[2] = cpu_to_le32(packet->header[1]); |
1028 | d[0].req_count = cpu_to_le16(12); | 1076 | d[0].req_count = cpu_to_le16(12); |
1077 | |||
1078 | if (is_ping_packet(packet->header)) | ||
1079 | d[0].control |= cpu_to_le16(DESCRIPTOR_PING); | ||
1029 | break; | 1080 | break; |
1030 | 1081 | ||
1031 | case 4: | 1082 | case 4: |
@@ -1311,6 +1362,78 @@ static void at_context_transmit(struct context *ctx, struct fw_packet *packet) | |||
1311 | 1362 | ||
1312 | } | 1363 | } |
1313 | 1364 | ||
1365 | static u32 cycle_timer_ticks(u32 cycle_timer) | ||
1366 | { | ||
1367 | u32 ticks; | ||
1368 | |||
1369 | ticks = cycle_timer & 0xfff; | ||
1370 | ticks += 3072 * ((cycle_timer >> 12) & 0x1fff); | ||
1371 | ticks += (3072 * 8000) * (cycle_timer >> 25); | ||
1372 | |||
1373 | return ticks; | ||
1374 | } | ||
1375 | |||
1376 | /* | ||
1377 | * Some controllers exhibit one or more of the following bugs when updating the | ||
1378 | * iso cycle timer register: | ||
1379 | * - When the lowest six bits are wrapping around to zero, a read that happens | ||
1380 | * at the same time will return garbage in the lowest ten bits. | ||
1381 | * - When the cycleOffset field wraps around to zero, the cycleCount field is | ||
1382 | * not incremented for about 60 ns. | ||
1383 | * - Occasionally, the entire register reads zero. | ||
1384 | * | ||
1385 | * To catch these, we read the register three times and ensure that the | ||
1386 | * difference between each two consecutive reads is approximately the same, i.e. | ||
1387 | * less than twice the other. Furthermore, any negative difference indicates an | ||
1388 | * error. (A PCI read should take at least 20 ticks of the 24.576 MHz timer to | ||
1389 | * execute, so we have enough precision to compute the ratio of the differences.) | ||
1390 | */ | ||
1391 | static u32 get_cycle_time(struct fw_ohci *ohci) | ||
1392 | { | ||
1393 | u32 c0, c1, c2; | ||
1394 | u32 t0, t1, t2; | ||
1395 | s32 diff01, diff12; | ||
1396 | int i; | ||
1397 | |||
1398 | c2 = reg_read(ohci, OHCI1394_IsochronousCycleTimer); | ||
1399 | |||
1400 | if (ohci->quirks & QUIRK_CYCLE_TIMER) { | ||
1401 | i = 0; | ||
1402 | c1 = c2; | ||
1403 | c2 = reg_read(ohci, OHCI1394_IsochronousCycleTimer); | ||
1404 | do { | ||
1405 | c0 = c1; | ||
1406 | c1 = c2; | ||
1407 | c2 = reg_read(ohci, OHCI1394_IsochronousCycleTimer); | ||
1408 | t0 = cycle_timer_ticks(c0); | ||
1409 | t1 = cycle_timer_ticks(c1); | ||
1410 | t2 = cycle_timer_ticks(c2); | ||
1411 | diff01 = t1 - t0; | ||
1412 | diff12 = t2 - t1; | ||
1413 | } while ((diff01 <= 0 || diff12 <= 0 || | ||
1414 | diff01 / diff12 >= 2 || diff12 / diff01 >= 2) | ||
1415 | && i++ < 20); | ||
1416 | } | ||
1417 | |||
1418 | return c2; | ||
1419 | } | ||
1420 | |||
1421 | /* | ||
1422 | * This function has to be called at least every 64 seconds. The bus_time | ||
1423 | * field stores not only the upper 25 bits of the BUS_TIME register but also | ||
1424 | * the most significant bit of the cycle timer in bit 6 so that we can detect | ||
1425 | * changes in this bit. | ||
1426 | */ | ||
1427 | static u32 update_bus_time(struct fw_ohci *ohci) | ||
1428 | { | ||
1429 | u32 cycle_time_seconds = get_cycle_time(ohci) >> 25; | ||
1430 | |||
1431 | if ((ohci->bus_time & 0x40) != (cycle_time_seconds & 0x40)) | ||
1432 | ohci->bus_time += 0x40; | ||
1433 | |||
1434 | return ohci->bus_time | cycle_time_seconds; | ||
1435 | } | ||
1436 | |||
1314 | static void bus_reset_tasklet(unsigned long data) | 1437 | static void bus_reset_tasklet(unsigned long data) |
1315 | { | 1438 | { |
1316 | struct fw_ohci *ohci = (struct fw_ohci *)data; | 1439 | struct fw_ohci *ohci = (struct fw_ohci *)data; |
@@ -1319,6 +1442,7 @@ static void bus_reset_tasklet(unsigned long data) | |||
1319 | unsigned long flags; | 1442 | unsigned long flags; |
1320 | void *free_rom = NULL; | 1443 | void *free_rom = NULL; |
1321 | dma_addr_t free_rom_bus = 0; | 1444 | dma_addr_t free_rom_bus = 0; |
1445 | bool is_new_root; | ||
1322 | 1446 | ||
1323 | reg = reg_read(ohci, OHCI1394_NodeID); | 1447 | reg = reg_read(ohci, OHCI1394_NodeID); |
1324 | if (!(reg & OHCI1394_NodeID_idValid)) { | 1448 | if (!(reg & OHCI1394_NodeID_idValid)) { |
@@ -1332,6 +1456,12 @@ static void bus_reset_tasklet(unsigned long data) | |||
1332 | ohci->node_id = reg & (OHCI1394_NodeID_busNumber | | 1456 | ohci->node_id = reg & (OHCI1394_NodeID_busNumber | |
1333 | OHCI1394_NodeID_nodeNumber); | 1457 | OHCI1394_NodeID_nodeNumber); |
1334 | 1458 | ||
1459 | is_new_root = (reg & OHCI1394_NodeID_root) != 0; | ||
1460 | if (!(ohci->is_root && is_new_root)) | ||
1461 | reg_write(ohci, OHCI1394_LinkControlSet, | ||
1462 | OHCI1394_LinkControl_cycleMaster); | ||
1463 | ohci->is_root = is_new_root; | ||
1464 | |||
1335 | reg = reg_read(ohci, OHCI1394_SelfIDCount); | 1465 | reg = reg_read(ohci, OHCI1394_SelfIDCount); |
1336 | if (reg & OHCI1394_SelfIDCount_selfIDError) { | 1466 | if (reg & OHCI1394_SelfIDCount_selfIDError) { |
1337 | fw_notify("inconsistent self IDs\n"); | 1467 | fw_notify("inconsistent self IDs\n"); |
@@ -1439,7 +1569,9 @@ static void bus_reset_tasklet(unsigned long data) | |||
1439 | self_id_count, ohci->self_id_buffer); | 1569 | self_id_count, ohci->self_id_buffer); |
1440 | 1570 | ||
1441 | fw_core_handle_bus_reset(&ohci->card, ohci->node_id, generation, | 1571 | fw_core_handle_bus_reset(&ohci->card, ohci->node_id, generation, |
1442 | self_id_count, ohci->self_id_buffer); | 1572 | self_id_count, ohci->self_id_buffer, |
1573 | ohci->csr_state_setclear_abdicate); | ||
1574 | ohci->csr_state_setclear_abdicate = false; | ||
1443 | } | 1575 | } |
1444 | 1576 | ||
1445 | static irqreturn_t irq_handler(int irq, void *data) | 1577 | static irqreturn_t irq_handler(int irq, void *data) |
@@ -1515,6 +1647,12 @@ static irqreturn_t irq_handler(int irq, void *data) | |||
1515 | fw_notify("isochronous cycle inconsistent\n"); | 1647 | fw_notify("isochronous cycle inconsistent\n"); |
1516 | } | 1648 | } |
1517 | 1649 | ||
1650 | if (event & OHCI1394_cycle64Seconds) { | ||
1651 | spin_lock(&ohci->lock); | ||
1652 | update_bus_time(ohci); | ||
1653 | spin_unlock(&ohci->lock); | ||
1654 | } | ||
1655 | |||
1518 | return IRQ_HANDLED; | 1656 | return IRQ_HANDLED; |
1519 | } | 1657 | } |
1520 | 1658 | ||
@@ -1577,7 +1715,7 @@ static int configure_1394a_enhancements(struct fw_ohci *ohci) | |||
1577 | clear = PHY_ENABLE_ACCEL | PHY_ENABLE_MULTI; | 1715 | clear = PHY_ENABLE_ACCEL | PHY_ENABLE_MULTI; |
1578 | set = 0; | 1716 | set = 0; |
1579 | } | 1717 | } |
1580 | ret = ohci_update_phy_reg(&ohci->card, 5, clear, set); | 1718 | ret = update_phy_reg(ohci, 5, clear, set); |
1581 | if (ret < 0) | 1719 | if (ret < 0) |
1582 | return ret; | 1720 | return ret; |
1583 | 1721 | ||
@@ -1599,7 +1737,7 @@ static int ohci_enable(struct fw_card *card, | |||
1599 | { | 1737 | { |
1600 | struct fw_ohci *ohci = fw_ohci(card); | 1738 | struct fw_ohci *ohci = fw_ohci(card); |
1601 | struct pci_dev *dev = to_pci_dev(card->device); | 1739 | struct pci_dev *dev = to_pci_dev(card->device); |
1602 | u32 lps; | 1740 | u32 lps, seconds, version, irqs; |
1603 | int i, ret; | 1741 | int i, ret; |
1604 | 1742 | ||
1605 | if (software_reset(ohci)) { | 1743 | if (software_reset(ohci)) { |
@@ -1635,17 +1773,34 @@ static int ohci_enable(struct fw_card *card, | |||
1635 | OHCI1394_HCControl_noByteSwapData); | 1773 | OHCI1394_HCControl_noByteSwapData); |
1636 | 1774 | ||
1637 | reg_write(ohci, OHCI1394_SelfIDBuffer, ohci->self_id_bus); | 1775 | reg_write(ohci, OHCI1394_SelfIDBuffer, ohci->self_id_bus); |
1638 | reg_write(ohci, OHCI1394_LinkControlClear, | ||
1639 | OHCI1394_LinkControl_rcvPhyPkt); | ||
1640 | reg_write(ohci, OHCI1394_LinkControlSet, | 1776 | reg_write(ohci, OHCI1394_LinkControlSet, |
1641 | OHCI1394_LinkControl_rcvSelfID | | 1777 | OHCI1394_LinkControl_rcvSelfID | |
1778 | OHCI1394_LinkControl_rcvPhyPkt | | ||
1642 | OHCI1394_LinkControl_cycleTimerEnable | | 1779 | OHCI1394_LinkControl_cycleTimerEnable | |
1643 | OHCI1394_LinkControl_cycleMaster); | 1780 | OHCI1394_LinkControl_cycleMaster); |
1644 | 1781 | ||
1645 | reg_write(ohci, OHCI1394_ATRetries, | 1782 | reg_write(ohci, OHCI1394_ATRetries, |
1646 | OHCI1394_MAX_AT_REQ_RETRIES | | 1783 | OHCI1394_MAX_AT_REQ_RETRIES | |
1647 | (OHCI1394_MAX_AT_RESP_RETRIES << 4) | | 1784 | (OHCI1394_MAX_AT_RESP_RETRIES << 4) | |
1648 | (OHCI1394_MAX_PHYS_RESP_RETRIES << 8)); | 1785 | (OHCI1394_MAX_PHYS_RESP_RETRIES << 8) | |
1786 | (200 << 16)); | ||
1787 | |||
1788 | seconds = lower_32_bits(get_seconds()); | ||
1789 | reg_write(ohci, OHCI1394_IsochronousCycleTimer, seconds << 25); | ||
1790 | ohci->bus_time = seconds & ~0x3f; | ||
1791 | |||
1792 | version = reg_read(ohci, OHCI1394_Version) & 0x00ff00ff; | ||
1793 | if (version >= OHCI_VERSION_1_1) { | ||
1794 | reg_write(ohci, OHCI1394_InitialChannelsAvailableHi, | ||
1795 | 0xfffffffe); | ||
1796 | card->broadcast_channel_auto_allocated = true; | ||
1797 | } | ||
1798 | |||
1799 | /* Get implemented bits of the priority arbitration request counter. */ | ||
1800 | reg_write(ohci, OHCI1394_FairnessControl, 0x3f); | ||
1801 | ohci->pri_req_max = reg_read(ohci, OHCI1394_FairnessControl) & 0x3f; | ||
1802 | reg_write(ohci, OHCI1394_FairnessControl, 0); | ||
1803 | card->priority_budget_implemented = ohci->pri_req_max != 0; | ||
1649 | 1804 | ||
1650 | ar_context_run(&ohci->ar_request_ctx); | 1805 | ar_context_run(&ohci->ar_request_ctx); |
1651 | ar_context_run(&ohci->ar_response_ctx); | 1806 | ar_context_run(&ohci->ar_response_ctx); |
@@ -1653,16 +1808,6 @@ static int ohci_enable(struct fw_card *card, | |||
1653 | reg_write(ohci, OHCI1394_PhyUpperBound, 0x00010000); | 1808 | reg_write(ohci, OHCI1394_PhyUpperBound, 0x00010000); |
1654 | reg_write(ohci, OHCI1394_IntEventClear, ~0); | 1809 | reg_write(ohci, OHCI1394_IntEventClear, ~0); |
1655 | reg_write(ohci, OHCI1394_IntMaskClear, ~0); | 1810 | reg_write(ohci, OHCI1394_IntMaskClear, ~0); |
1656 | reg_write(ohci, OHCI1394_IntMaskSet, | ||
1657 | OHCI1394_selfIDComplete | | ||
1658 | OHCI1394_RQPkt | OHCI1394_RSPkt | | ||
1659 | OHCI1394_reqTxComplete | OHCI1394_respTxComplete | | ||
1660 | OHCI1394_isochRx | OHCI1394_isochTx | | ||
1661 | OHCI1394_postedWriteErr | OHCI1394_cycleTooLong | | ||
1662 | OHCI1394_cycleInconsistent | OHCI1394_regAccessFail | | ||
1663 | OHCI1394_masterIntEnable); | ||
1664 | if (param_debug & OHCI_PARAM_DEBUG_BUSRESETS) | ||
1665 | reg_write(ohci, OHCI1394_IntMaskSet, OHCI1394_busReset); | ||
1666 | 1811 | ||
1667 | ret = configure_1394a_enhancements(ohci); | 1812 | ret = configure_1394a_enhancements(ohci); |
1668 | if (ret < 0) | 1813 | if (ret < 0) |
@@ -1719,26 +1864,38 @@ static int ohci_enable(struct fw_card *card, | |||
1719 | 1864 | ||
1720 | reg_write(ohci, OHCI1394_AsReqFilterHiSet, 0x80000000); | 1865 | reg_write(ohci, OHCI1394_AsReqFilterHiSet, 0x80000000); |
1721 | 1866 | ||
1867 | if (!(ohci->quirks & QUIRK_NO_MSI)) | ||
1868 | pci_enable_msi(dev); | ||
1722 | if (request_irq(dev->irq, irq_handler, | 1869 | if (request_irq(dev->irq, irq_handler, |
1723 | IRQF_SHARED, ohci_driver_name, ohci)) { | 1870 | pci_dev_msi_enabled(dev) ? 0 : IRQF_SHARED, |
1724 | fw_error("Failed to allocate shared interrupt %d.\n", | 1871 | ohci_driver_name, ohci)) { |
1725 | dev->irq); | 1872 | fw_error("Failed to allocate interrupt %d.\n", dev->irq); |
1873 | pci_disable_msi(dev); | ||
1726 | dma_free_coherent(ohci->card.device, CONFIG_ROM_SIZE, | 1874 | dma_free_coherent(ohci->card.device, CONFIG_ROM_SIZE, |
1727 | ohci->config_rom, ohci->config_rom_bus); | 1875 | ohci->config_rom, ohci->config_rom_bus); |
1728 | return -EIO; | 1876 | return -EIO; |
1729 | } | 1877 | } |
1730 | 1878 | ||
1879 | irqs = OHCI1394_reqTxComplete | OHCI1394_respTxComplete | | ||
1880 | OHCI1394_RQPkt | OHCI1394_RSPkt | | ||
1881 | OHCI1394_isochTx | OHCI1394_isochRx | | ||
1882 | OHCI1394_postedWriteErr | | ||
1883 | OHCI1394_selfIDComplete | | ||
1884 | OHCI1394_regAccessFail | | ||
1885 | OHCI1394_cycle64Seconds | | ||
1886 | OHCI1394_cycleInconsistent | OHCI1394_cycleTooLong | | ||
1887 | OHCI1394_masterIntEnable; | ||
1888 | if (param_debug & OHCI_PARAM_DEBUG_BUSRESETS) | ||
1889 | irqs |= OHCI1394_busReset; | ||
1890 | reg_write(ohci, OHCI1394_IntMaskSet, irqs); | ||
1891 | |||
1731 | reg_write(ohci, OHCI1394_HCControlSet, | 1892 | reg_write(ohci, OHCI1394_HCControlSet, |
1732 | OHCI1394_HCControl_linkEnable | | 1893 | OHCI1394_HCControl_linkEnable | |
1733 | OHCI1394_HCControl_BIBimageValid); | 1894 | OHCI1394_HCControl_BIBimageValid); |
1734 | flush_writes(ohci); | 1895 | flush_writes(ohci); |
1735 | 1896 | ||
1736 | /* | 1897 | /* We are ready to go, reset bus to finish initialization. */ |
1737 | * We are ready to go, initiate bus reset to finish the | 1898 | fw_schedule_bus_reset(&ohci->card, false, true); |
1738 | * initialization. | ||
1739 | */ | ||
1740 | |||
1741 | fw_core_initiate_bus_reset(&ohci->card, 1); | ||
1742 | 1899 | ||
1743 | return 0; | 1900 | return 0; |
1744 | } | 1901 | } |
@@ -1813,7 +1970,7 @@ static int ohci_set_config_rom(struct fw_card *card, | |||
1813 | * takes effect. | 1970 | * takes effect. |
1814 | */ | 1971 | */ |
1815 | if (ret == 0) | 1972 | if (ret == 0) |
1816 | fw_core_initiate_bus_reset(&ohci->card, 1); | 1973 | fw_schedule_bus_reset(&ohci->card, true, true); |
1817 | else | 1974 | else |
1818 | dma_free_coherent(ohci->card.device, CONFIG_ROM_SIZE, | 1975 | dma_free_coherent(ohci->card.device, CONFIG_ROM_SIZE, |
1819 | next_config_rom, next_config_rom_bus); | 1976 | next_config_rom, next_config_rom_bus); |
@@ -1903,61 +2060,117 @@ static int ohci_enable_phys_dma(struct fw_card *card, | |||
1903 | #endif /* CONFIG_FIREWIRE_OHCI_REMOTE_DMA */ | 2060 | #endif /* CONFIG_FIREWIRE_OHCI_REMOTE_DMA */ |
1904 | } | 2061 | } |
1905 | 2062 | ||
1906 | static u32 cycle_timer_ticks(u32 cycle_timer) | 2063 | static u32 ohci_read_csr(struct fw_card *card, int csr_offset) |
1907 | { | 2064 | { |
1908 | u32 ticks; | 2065 | struct fw_ohci *ohci = fw_ohci(card); |
2066 | unsigned long flags; | ||
2067 | u32 value; | ||
2068 | |||
2069 | switch (csr_offset) { | ||
2070 | case CSR_STATE_CLEAR: | ||
2071 | case CSR_STATE_SET: | ||
2072 | if (ohci->is_root && | ||
2073 | (reg_read(ohci, OHCI1394_LinkControlSet) & | ||
2074 | OHCI1394_LinkControl_cycleMaster)) | ||
2075 | value = CSR_STATE_BIT_CMSTR; | ||
2076 | else | ||
2077 | value = 0; | ||
2078 | if (ohci->csr_state_setclear_abdicate) | ||
2079 | value |= CSR_STATE_BIT_ABDICATE; | ||
1909 | 2080 | ||
1910 | ticks = cycle_timer & 0xfff; | 2081 | return value; |
1911 | ticks += 3072 * ((cycle_timer >> 12) & 0x1fff); | ||
1912 | ticks += (3072 * 8000) * (cycle_timer >> 25); | ||
1913 | 2082 | ||
1914 | return ticks; | 2083 | case CSR_NODE_IDS: |
2084 | return reg_read(ohci, OHCI1394_NodeID) << 16; | ||
2085 | |||
2086 | case CSR_CYCLE_TIME: | ||
2087 | return get_cycle_time(ohci); | ||
2088 | |||
2089 | case CSR_BUS_TIME: | ||
2090 | /* | ||
2091 | * We might be called just after the cycle timer has wrapped | ||
2092 | * around but just before the cycle64Seconds handler, so we | ||
2093 | * better check here, too, if the bus time needs to be updated. | ||
2094 | */ | ||
2095 | spin_lock_irqsave(&ohci->lock, flags); | ||
2096 | value = update_bus_time(ohci); | ||
2097 | spin_unlock_irqrestore(&ohci->lock, flags); | ||
2098 | return value; | ||
2099 | |||
2100 | case CSR_BUSY_TIMEOUT: | ||
2101 | value = reg_read(ohci, OHCI1394_ATRetries); | ||
2102 | return (value >> 4) & 0x0ffff00f; | ||
2103 | |||
2104 | case CSR_PRIORITY_BUDGET: | ||
2105 | return (reg_read(ohci, OHCI1394_FairnessControl) & 0x3f) | | ||
2106 | (ohci->pri_req_max << 8); | ||
2107 | |||
2108 | default: | ||
2109 | WARN_ON(1); | ||
2110 | return 0; | ||
2111 | } | ||
1915 | } | 2112 | } |
1916 | 2113 | ||
1917 | /* | 2114 | static void ohci_write_csr(struct fw_card *card, int csr_offset, u32 value) |
1918 | * Some controllers exhibit one or more of the following bugs when updating the | ||
1919 | * iso cycle timer register: | ||
1920 | * - When the lowest six bits are wrapping around to zero, a read that happens | ||
1921 | * at the same time will return garbage in the lowest ten bits. | ||
1922 | * - When the cycleOffset field wraps around to zero, the cycleCount field is | ||
1923 | * not incremented for about 60 ns. | ||
1924 | * - Occasionally, the entire register reads zero. | ||
1925 | * | ||
1926 | * To catch these, we read the register three times and ensure that the | ||
1927 | * difference between each two consecutive reads is approximately the same, i.e. | ||
1928 | * less than twice the other. Furthermore, any negative difference indicates an | ||
1929 | * error. (A PCI read should take at least 20 ticks of the 24.576 MHz timer to | ||
1930 | * execute, so we have enough precision to compute the ratio of the differences.) | ||
1931 | */ | ||
1932 | static u32 ohci_get_cycle_time(struct fw_card *card) | ||
1933 | { | 2115 | { |
1934 | struct fw_ohci *ohci = fw_ohci(card); | 2116 | struct fw_ohci *ohci = fw_ohci(card); |
1935 | u32 c0, c1, c2; | 2117 | unsigned long flags; |
1936 | u32 t0, t1, t2; | ||
1937 | s32 diff01, diff12; | ||
1938 | int i; | ||
1939 | 2118 | ||
1940 | c2 = reg_read(ohci, OHCI1394_IsochronousCycleTimer); | 2119 | switch (csr_offset) { |
2120 | case CSR_STATE_CLEAR: | ||
2121 | if ((value & CSR_STATE_BIT_CMSTR) && ohci->is_root) { | ||
2122 | reg_write(ohci, OHCI1394_LinkControlClear, | ||
2123 | OHCI1394_LinkControl_cycleMaster); | ||
2124 | flush_writes(ohci); | ||
2125 | } | ||
2126 | if (value & CSR_STATE_BIT_ABDICATE) | ||
2127 | ohci->csr_state_setclear_abdicate = false; | ||
2128 | break; | ||
1941 | 2129 | ||
1942 | if (ohci->quirks & QUIRK_CYCLE_TIMER) { | 2130 | case CSR_STATE_SET: |
1943 | i = 0; | 2131 | if ((value & CSR_STATE_BIT_CMSTR) && ohci->is_root) { |
1944 | c1 = c2; | 2132 | reg_write(ohci, OHCI1394_LinkControlSet, |
1945 | c2 = reg_read(ohci, OHCI1394_IsochronousCycleTimer); | 2133 | OHCI1394_LinkControl_cycleMaster); |
1946 | do { | 2134 | flush_writes(ohci); |
1947 | c0 = c1; | 2135 | } |
1948 | c1 = c2; | 2136 | if (value & CSR_STATE_BIT_ABDICATE) |
1949 | c2 = reg_read(ohci, OHCI1394_IsochronousCycleTimer); | 2137 | ohci->csr_state_setclear_abdicate = true; |
1950 | t0 = cycle_timer_ticks(c0); | 2138 | break; |
1951 | t1 = cycle_timer_ticks(c1); | ||
1952 | t2 = cycle_timer_ticks(c2); | ||
1953 | diff01 = t1 - t0; | ||
1954 | diff12 = t2 - t1; | ||
1955 | } while ((diff01 <= 0 || diff12 <= 0 || | ||
1956 | diff01 / diff12 >= 2 || diff12 / diff01 >= 2) | ||
1957 | && i++ < 20); | ||
1958 | } | ||
1959 | 2139 | ||
1960 | return c2; | 2140 | case CSR_NODE_IDS: |
2141 | reg_write(ohci, OHCI1394_NodeID, value >> 16); | ||
2142 | flush_writes(ohci); | ||
2143 | break; | ||
2144 | |||
2145 | case CSR_CYCLE_TIME: | ||
2146 | reg_write(ohci, OHCI1394_IsochronousCycleTimer, value); | ||
2147 | reg_write(ohci, OHCI1394_IntEventSet, | ||
2148 | OHCI1394_cycleInconsistent); | ||
2149 | flush_writes(ohci); | ||
2150 | break; | ||
2151 | |||
2152 | case CSR_BUS_TIME: | ||
2153 | spin_lock_irqsave(&ohci->lock, flags); | ||
2154 | ohci->bus_time = (ohci->bus_time & 0x7f) | (value & ~0x7f); | ||
2155 | spin_unlock_irqrestore(&ohci->lock, flags); | ||
2156 | break; | ||
2157 | |||
2158 | case CSR_BUSY_TIMEOUT: | ||
2159 | value = (value & 0xf) | ((value & 0xf) << 4) | | ||
2160 | ((value & 0xf) << 8) | ((value & 0x0ffff000) << 4); | ||
2161 | reg_write(ohci, OHCI1394_ATRetries, value); | ||
2162 | flush_writes(ohci); | ||
2163 | break; | ||
2164 | |||
2165 | case CSR_PRIORITY_BUDGET: | ||
2166 | reg_write(ohci, OHCI1394_FairnessControl, value & 0x3f); | ||
2167 | flush_writes(ohci); | ||
2168 | break; | ||
2169 | |||
2170 | default: | ||
2171 | WARN_ON(1); | ||
2172 | break; | ||
2173 | } | ||
1961 | } | 2174 | } |
1962 | 2175 | ||
1963 | static void copy_iso_headers(struct iso_context *ctx, void *p) | 2176 | static void copy_iso_headers(struct iso_context *ctx, void *p) |
@@ -1992,10 +2205,9 @@ static int handle_ir_packet_per_buffer(struct context *context, | |||
1992 | __le32 *ir_header; | 2205 | __le32 *ir_header; |
1993 | void *p; | 2206 | void *p; |
1994 | 2207 | ||
1995 | for (pd = d; pd <= last; pd++) { | 2208 | for (pd = d; pd <= last; pd++) |
1996 | if (pd->transfer_status) | 2209 | if (pd->transfer_status) |
1997 | break; | 2210 | break; |
1998 | } | ||
1999 | if (pd > last) | 2211 | if (pd > last) |
2000 | /* Descriptor(s) not done yet, stop iteration */ | 2212 | /* Descriptor(s) not done yet, stop iteration */ |
2001 | return 0; | 2213 | return 0; |
@@ -2005,16 +2217,38 @@ static int handle_ir_packet_per_buffer(struct context *context, | |||
2005 | 2217 | ||
2006 | if (le16_to_cpu(last->control) & DESCRIPTOR_IRQ_ALWAYS) { | 2218 | if (le16_to_cpu(last->control) & DESCRIPTOR_IRQ_ALWAYS) { |
2007 | ir_header = (__le32 *) p; | 2219 | ir_header = (__le32 *) p; |
2008 | ctx->base.callback(&ctx->base, | 2220 | ctx->base.callback.sc(&ctx->base, |
2009 | le32_to_cpu(ir_header[0]) & 0xffff, | 2221 | le32_to_cpu(ir_header[0]) & 0xffff, |
2010 | ctx->header_length, ctx->header, | 2222 | ctx->header_length, ctx->header, |
2011 | ctx->base.callback_data); | 2223 | ctx->base.callback_data); |
2012 | ctx->header_length = 0; | 2224 | ctx->header_length = 0; |
2013 | } | 2225 | } |
2014 | 2226 | ||
2015 | return 1; | 2227 | return 1; |
2016 | } | 2228 | } |
2017 | 2229 | ||
2230 | /* d == last because each descriptor block is only a single descriptor. */ | ||
2231 | static int handle_ir_buffer_fill(struct context *context, | ||
2232 | struct descriptor *d, | ||
2233 | struct descriptor *last) | ||
2234 | { | ||
2235 | struct iso_context *ctx = | ||
2236 | container_of(context, struct iso_context, context); | ||
2237 | |||
2238 | if (!last->transfer_status) | ||
2239 | /* Descriptor(s) not done yet, stop iteration */ | ||
2240 | return 0; | ||
2241 | |||
2242 | if (le16_to_cpu(last->control) & DESCRIPTOR_IRQ_ALWAYS) | ||
2243 | ctx->base.callback.mc(&ctx->base, | ||
2244 | le32_to_cpu(last->data_address) + | ||
2245 | le16_to_cpu(last->req_count) - | ||
2246 | le16_to_cpu(last->res_count), | ||
2247 | ctx->base.callback_data); | ||
2248 | |||
2249 | return 1; | ||
2250 | } | ||
2251 | |||
2018 | static int handle_it_packet(struct context *context, | 2252 | static int handle_it_packet(struct context *context, |
2019 | struct descriptor *d, | 2253 | struct descriptor *d, |
2020 | struct descriptor *last) | 2254 | struct descriptor *last) |
@@ -2040,71 +2274,118 @@ static int handle_it_packet(struct context *context, | |||
2040 | ctx->header_length += 4; | 2274 | ctx->header_length += 4; |
2041 | } | 2275 | } |
2042 | if (le16_to_cpu(last->control) & DESCRIPTOR_IRQ_ALWAYS) { | 2276 | if (le16_to_cpu(last->control) & DESCRIPTOR_IRQ_ALWAYS) { |
2043 | ctx->base.callback(&ctx->base, le16_to_cpu(last->res_count), | 2277 | ctx->base.callback.sc(&ctx->base, le16_to_cpu(last->res_count), |
2044 | ctx->header_length, ctx->header, | 2278 | ctx->header_length, ctx->header, |
2045 | ctx->base.callback_data); | 2279 | ctx->base.callback_data); |
2046 | ctx->header_length = 0; | 2280 | ctx->header_length = 0; |
2047 | } | 2281 | } |
2048 | return 1; | 2282 | return 1; |
2049 | } | 2283 | } |
2050 | 2284 | ||
2285 | static void set_multichannel_mask(struct fw_ohci *ohci, u64 channels) | ||
2286 | { | ||
2287 | u32 hi = channels >> 32, lo = channels; | ||
2288 | |||
2289 | reg_write(ohci, OHCI1394_IRMultiChanMaskHiClear, ~hi); | ||
2290 | reg_write(ohci, OHCI1394_IRMultiChanMaskLoClear, ~lo); | ||
2291 | reg_write(ohci, OHCI1394_IRMultiChanMaskHiSet, hi); | ||
2292 | reg_write(ohci, OHCI1394_IRMultiChanMaskLoSet, lo); | ||
2293 | mmiowb(); | ||
2294 | ohci->mc_channels = channels; | ||
2295 | } | ||
2296 | |||
2051 | static struct fw_iso_context *ohci_allocate_iso_context(struct fw_card *card, | 2297 | static struct fw_iso_context *ohci_allocate_iso_context(struct fw_card *card, |
2052 | int type, int channel, size_t header_size) | 2298 | int type, int channel, size_t header_size) |
2053 | { | 2299 | { |
2054 | struct fw_ohci *ohci = fw_ohci(card); | 2300 | struct fw_ohci *ohci = fw_ohci(card); |
2055 | struct iso_context *ctx, *list; | 2301 | struct iso_context *uninitialized_var(ctx); |
2056 | descriptor_callback_t callback; | 2302 | descriptor_callback_t uninitialized_var(callback); |
2057 | u64 *channels, dont_care = ~0ULL; | 2303 | u64 *uninitialized_var(channels); |
2058 | u32 *mask, regs; | 2304 | u32 *uninitialized_var(mask), uninitialized_var(regs); |
2059 | unsigned long flags; | 2305 | unsigned long flags; |
2060 | int index, ret = -ENOMEM; | 2306 | int index, ret = -EBUSY; |
2307 | |||
2308 | spin_lock_irqsave(&ohci->lock, flags); | ||
2061 | 2309 | ||
2062 | if (type == FW_ISO_CONTEXT_TRANSMIT) { | 2310 | switch (type) { |
2063 | channels = &dont_care; | 2311 | case FW_ISO_CONTEXT_TRANSMIT: |
2064 | mask = &ohci->it_context_mask; | 2312 | mask = &ohci->it_context_mask; |
2065 | list = ohci->it_context_list; | ||
2066 | callback = handle_it_packet; | 2313 | callback = handle_it_packet; |
2067 | } else { | 2314 | index = ffs(*mask) - 1; |
2315 | if (index >= 0) { | ||
2316 | *mask &= ~(1 << index); | ||
2317 | regs = OHCI1394_IsoXmitContextBase(index); | ||
2318 | ctx = &ohci->it_context_list[index]; | ||
2319 | } | ||
2320 | break; | ||
2321 | |||
2322 | case FW_ISO_CONTEXT_RECEIVE: | ||
2068 | channels = &ohci->ir_context_channels; | 2323 | channels = &ohci->ir_context_channels; |
2069 | mask = &ohci->ir_context_mask; | 2324 | mask = &ohci->ir_context_mask; |
2070 | list = ohci->ir_context_list; | ||
2071 | callback = handle_ir_packet_per_buffer; | 2325 | callback = handle_ir_packet_per_buffer; |
2072 | } | 2326 | index = *channels & 1ULL << channel ? ffs(*mask) - 1 : -1; |
2327 | if (index >= 0) { | ||
2328 | *channels &= ~(1ULL << channel); | ||
2329 | *mask &= ~(1 << index); | ||
2330 | regs = OHCI1394_IsoRcvContextBase(index); | ||
2331 | ctx = &ohci->ir_context_list[index]; | ||
2332 | } | ||
2333 | break; | ||
2073 | 2334 | ||
2074 | spin_lock_irqsave(&ohci->lock, flags); | 2335 | case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL: |
2075 | index = *channels & 1ULL << channel ? ffs(*mask) - 1 : -1; | 2336 | mask = &ohci->ir_context_mask; |
2076 | if (index >= 0) { | 2337 | callback = handle_ir_buffer_fill; |
2077 | *channels &= ~(1ULL << channel); | 2338 | index = !ohci->mc_allocated ? ffs(*mask) - 1 : -1; |
2078 | *mask &= ~(1 << index); | 2339 | if (index >= 0) { |
2340 | ohci->mc_allocated = true; | ||
2341 | *mask &= ~(1 << index); | ||
2342 | regs = OHCI1394_IsoRcvContextBase(index); | ||
2343 | ctx = &ohci->ir_context_list[index]; | ||
2344 | } | ||
2345 | break; | ||
2346 | |||
2347 | default: | ||
2348 | index = -1; | ||
2349 | ret = -ENOSYS; | ||
2079 | } | 2350 | } |
2351 | |||
2080 | spin_unlock_irqrestore(&ohci->lock, flags); | 2352 | spin_unlock_irqrestore(&ohci->lock, flags); |
2081 | 2353 | ||
2082 | if (index < 0) | 2354 | if (index < 0) |
2083 | return ERR_PTR(-EBUSY); | 2355 | return ERR_PTR(ret); |
2084 | |||
2085 | if (type == FW_ISO_CONTEXT_TRANSMIT) | ||
2086 | regs = OHCI1394_IsoXmitContextBase(index); | ||
2087 | else | ||
2088 | regs = OHCI1394_IsoRcvContextBase(index); | ||
2089 | 2356 | ||
2090 | ctx = &list[index]; | ||
2091 | memset(ctx, 0, sizeof(*ctx)); | 2357 | memset(ctx, 0, sizeof(*ctx)); |
2092 | ctx->header_length = 0; | 2358 | ctx->header_length = 0; |
2093 | ctx->header = (void *) __get_free_page(GFP_KERNEL); | 2359 | ctx->header = (void *) __get_free_page(GFP_KERNEL); |
2094 | if (ctx->header == NULL) | 2360 | if (ctx->header == NULL) { |
2361 | ret = -ENOMEM; | ||
2095 | goto out; | 2362 | goto out; |
2096 | 2363 | } | |
2097 | ret = context_init(&ctx->context, ohci, regs, callback); | 2364 | ret = context_init(&ctx->context, ohci, regs, callback); |
2098 | if (ret < 0) | 2365 | if (ret < 0) |
2099 | goto out_with_header; | 2366 | goto out_with_header; |
2100 | 2367 | ||
2368 | if (type == FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL) | ||
2369 | set_multichannel_mask(ohci, 0); | ||
2370 | |||
2101 | return &ctx->base; | 2371 | return &ctx->base; |
2102 | 2372 | ||
2103 | out_with_header: | 2373 | out_with_header: |
2104 | free_page((unsigned long)ctx->header); | 2374 | free_page((unsigned long)ctx->header); |
2105 | out: | 2375 | out: |
2106 | spin_lock_irqsave(&ohci->lock, flags); | 2376 | spin_lock_irqsave(&ohci->lock, flags); |
2377 | |||
2378 | switch (type) { | ||
2379 | case FW_ISO_CONTEXT_RECEIVE: | ||
2380 | *channels |= 1ULL << channel; | ||
2381 | break; | ||
2382 | |||
2383 | case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL: | ||
2384 | ohci->mc_allocated = false; | ||
2385 | break; | ||
2386 | } | ||
2107 | *mask |= 1 << index; | 2387 | *mask |= 1 << index; |
2388 | |||
2108 | spin_unlock_irqrestore(&ohci->lock, flags); | 2389 | spin_unlock_irqrestore(&ohci->lock, flags); |
2109 | 2390 | ||
2110 | return ERR_PTR(ret); | 2391 | return ERR_PTR(ret); |
@@ -2115,10 +2396,11 @@ static int ohci_start_iso(struct fw_iso_context *base, | |||
2115 | { | 2396 | { |
2116 | struct iso_context *ctx = container_of(base, struct iso_context, base); | 2397 | struct iso_context *ctx = container_of(base, struct iso_context, base); |
2117 | struct fw_ohci *ohci = ctx->context.ohci; | 2398 | struct fw_ohci *ohci = ctx->context.ohci; |
2118 | u32 control, match; | 2399 | u32 control = IR_CONTEXT_ISOCH_HEADER, match; |
2119 | int index; | 2400 | int index; |
2120 | 2401 | ||
2121 | if (ctx->base.type == FW_ISO_CONTEXT_TRANSMIT) { | 2402 | switch (ctx->base.type) { |
2403 | case FW_ISO_CONTEXT_TRANSMIT: | ||
2122 | index = ctx - ohci->it_context_list; | 2404 | index = ctx - ohci->it_context_list; |
2123 | match = 0; | 2405 | match = 0; |
2124 | if (cycle >= 0) | 2406 | if (cycle >= 0) |
@@ -2128,9 +2410,13 @@ static int ohci_start_iso(struct fw_iso_context *base, | |||
2128 | reg_write(ohci, OHCI1394_IsoXmitIntEventClear, 1 << index); | 2410 | reg_write(ohci, OHCI1394_IsoXmitIntEventClear, 1 << index); |
2129 | reg_write(ohci, OHCI1394_IsoXmitIntMaskSet, 1 << index); | 2411 | reg_write(ohci, OHCI1394_IsoXmitIntMaskSet, 1 << index); |
2130 | context_run(&ctx->context, match); | 2412 | context_run(&ctx->context, match); |
2131 | } else { | 2413 | break; |
2414 | |||
2415 | case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL: | ||
2416 | control |= IR_CONTEXT_BUFFER_FILL|IR_CONTEXT_MULTI_CHANNEL_MODE; | ||
2417 | /* fall through */ | ||
2418 | case FW_ISO_CONTEXT_RECEIVE: | ||
2132 | index = ctx - ohci->ir_context_list; | 2419 | index = ctx - ohci->ir_context_list; |
2133 | control = IR_CONTEXT_ISOCH_HEADER; | ||
2134 | match = (tags << 28) | (sync << 8) | ctx->base.channel; | 2420 | match = (tags << 28) | (sync << 8) | ctx->base.channel; |
2135 | if (cycle >= 0) { | 2421 | if (cycle >= 0) { |
2136 | match |= (cycle & 0x07fff) << 12; | 2422 | match |= (cycle & 0x07fff) << 12; |
@@ -2141,6 +2427,7 @@ static int ohci_start_iso(struct fw_iso_context *base, | |||
2141 | reg_write(ohci, OHCI1394_IsoRecvIntMaskSet, 1 << index); | 2427 | reg_write(ohci, OHCI1394_IsoRecvIntMaskSet, 1 << index); |
2142 | reg_write(ohci, CONTEXT_MATCH(ctx->context.regs), match); | 2428 | reg_write(ohci, CONTEXT_MATCH(ctx->context.regs), match); |
2143 | context_run(&ctx->context, control); | 2429 | context_run(&ctx->context, control); |
2430 | break; | ||
2144 | } | 2431 | } |
2145 | 2432 | ||
2146 | return 0; | 2433 | return 0; |
@@ -2152,12 +2439,17 @@ static int ohci_stop_iso(struct fw_iso_context *base) | |||
2152 | struct iso_context *ctx = container_of(base, struct iso_context, base); | 2439 | struct iso_context *ctx = container_of(base, struct iso_context, base); |
2153 | int index; | 2440 | int index; |
2154 | 2441 | ||
2155 | if (ctx->base.type == FW_ISO_CONTEXT_TRANSMIT) { | 2442 | switch (ctx->base.type) { |
2443 | case FW_ISO_CONTEXT_TRANSMIT: | ||
2156 | index = ctx - ohci->it_context_list; | 2444 | index = ctx - ohci->it_context_list; |
2157 | reg_write(ohci, OHCI1394_IsoXmitIntMaskClear, 1 << index); | 2445 | reg_write(ohci, OHCI1394_IsoXmitIntMaskClear, 1 << index); |
2158 | } else { | 2446 | break; |
2447 | |||
2448 | case FW_ISO_CONTEXT_RECEIVE: | ||
2449 | case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL: | ||
2159 | index = ctx - ohci->ir_context_list; | 2450 | index = ctx - ohci->ir_context_list; |
2160 | reg_write(ohci, OHCI1394_IsoRecvIntMaskClear, 1 << index); | 2451 | reg_write(ohci, OHCI1394_IsoRecvIntMaskClear, 1 << index); |
2452 | break; | ||
2161 | } | 2453 | } |
2162 | flush_writes(ohci); | 2454 | flush_writes(ohci); |
2163 | context_stop(&ctx->context); | 2455 | context_stop(&ctx->context); |
@@ -2178,24 +2470,65 @@ static void ohci_free_iso_context(struct fw_iso_context *base) | |||
2178 | 2470 | ||
2179 | spin_lock_irqsave(&ohci->lock, flags); | 2471 | spin_lock_irqsave(&ohci->lock, flags); |
2180 | 2472 | ||
2181 | if (ctx->base.type == FW_ISO_CONTEXT_TRANSMIT) { | 2473 | switch (base->type) { |
2474 | case FW_ISO_CONTEXT_TRANSMIT: | ||
2182 | index = ctx - ohci->it_context_list; | 2475 | index = ctx - ohci->it_context_list; |
2183 | ohci->it_context_mask |= 1 << index; | 2476 | ohci->it_context_mask |= 1 << index; |
2184 | } else { | 2477 | break; |
2478 | |||
2479 | case FW_ISO_CONTEXT_RECEIVE: | ||
2185 | index = ctx - ohci->ir_context_list; | 2480 | index = ctx - ohci->ir_context_list; |
2186 | ohci->ir_context_mask |= 1 << index; | 2481 | ohci->ir_context_mask |= 1 << index; |
2187 | ohci->ir_context_channels |= 1ULL << base->channel; | 2482 | ohci->ir_context_channels |= 1ULL << base->channel; |
2483 | break; | ||
2484 | |||
2485 | case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL: | ||
2486 | index = ctx - ohci->ir_context_list; | ||
2487 | ohci->ir_context_mask |= 1 << index; | ||
2488 | ohci->ir_context_channels |= ohci->mc_channels; | ||
2489 | ohci->mc_channels = 0; | ||
2490 | ohci->mc_allocated = false; | ||
2491 | break; | ||
2188 | } | 2492 | } |
2189 | 2493 | ||
2190 | spin_unlock_irqrestore(&ohci->lock, flags); | 2494 | spin_unlock_irqrestore(&ohci->lock, flags); |
2191 | } | 2495 | } |
2192 | 2496 | ||
2193 | static int ohci_queue_iso_transmit(struct fw_iso_context *base, | 2497 | static int ohci_set_iso_channels(struct fw_iso_context *base, u64 *channels) |
2194 | struct fw_iso_packet *packet, | 2498 | { |
2195 | struct fw_iso_buffer *buffer, | 2499 | struct fw_ohci *ohci = fw_ohci(base->card); |
2196 | unsigned long payload) | 2500 | unsigned long flags; |
2501 | int ret; | ||
2502 | |||
2503 | switch (base->type) { | ||
2504 | case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL: | ||
2505 | |||
2506 | spin_lock_irqsave(&ohci->lock, flags); | ||
2507 | |||
2508 | /* Don't allow multichannel to grab other contexts' channels. */ | ||
2509 | if (~ohci->ir_context_channels & ~ohci->mc_channels & *channels) { | ||
2510 | *channels = ohci->ir_context_channels; | ||
2511 | ret = -EBUSY; | ||
2512 | } else { | ||
2513 | set_multichannel_mask(ohci, *channels); | ||
2514 | ret = 0; | ||
2515 | } | ||
2516 | |||
2517 | spin_unlock_irqrestore(&ohci->lock, flags); | ||
2518 | |||
2519 | break; | ||
2520 | default: | ||
2521 | ret = -EINVAL; | ||
2522 | } | ||
2523 | |||
2524 | return ret; | ||
2525 | } | ||
2526 | |||
2527 | static int queue_iso_transmit(struct iso_context *ctx, | ||
2528 | struct fw_iso_packet *packet, | ||
2529 | struct fw_iso_buffer *buffer, | ||
2530 | unsigned long payload) | ||
2197 | { | 2531 | { |
2198 | struct iso_context *ctx = container_of(base, struct iso_context, base); | ||
2199 | struct descriptor *d, *last, *pd; | 2532 | struct descriptor *d, *last, *pd; |
2200 | struct fw_iso_packet *p; | 2533 | struct fw_iso_packet *p; |
2201 | __le32 *header; | 2534 | __le32 *header; |
@@ -2291,14 +2624,12 @@ static int ohci_queue_iso_transmit(struct fw_iso_context *base, | |||
2291 | return 0; | 2624 | return 0; |
2292 | } | 2625 | } |
2293 | 2626 | ||
2294 | static int ohci_queue_iso_receive_packet_per_buffer(struct fw_iso_context *base, | 2627 | static int queue_iso_packet_per_buffer(struct iso_context *ctx, |
2295 | struct fw_iso_packet *packet, | 2628 | struct fw_iso_packet *packet, |
2296 | struct fw_iso_buffer *buffer, | 2629 | struct fw_iso_buffer *buffer, |
2297 | unsigned long payload) | 2630 | unsigned long payload) |
2298 | { | 2631 | { |
2299 | struct iso_context *ctx = container_of(base, struct iso_context, base); | ||
2300 | struct descriptor *d, *pd; | 2632 | struct descriptor *d, *pd; |
2301 | struct fw_iso_packet *p = packet; | ||
2302 | dma_addr_t d_bus, page_bus; | 2633 | dma_addr_t d_bus, page_bus; |
2303 | u32 z, header_z, rest; | 2634 | u32 z, header_z, rest; |
2304 | int i, j, length; | 2635 | int i, j, length; |
@@ -2308,14 +2639,14 @@ static int ohci_queue_iso_receive_packet_per_buffer(struct fw_iso_context *base, | |||
2308 | * The OHCI controller puts the isochronous header and trailer in the | 2639 | * The OHCI controller puts the isochronous header and trailer in the |
2309 | * buffer, so we need at least 8 bytes. | 2640 | * buffer, so we need at least 8 bytes. |
2310 | */ | 2641 | */ |
2311 | packet_count = p->header_length / ctx->base.header_size; | 2642 | packet_count = packet->header_length / ctx->base.header_size; |
2312 | header_size = max(ctx->base.header_size, (size_t)8); | 2643 | header_size = max(ctx->base.header_size, (size_t)8); |
2313 | 2644 | ||
2314 | /* Get header size in number of descriptors. */ | 2645 | /* Get header size in number of descriptors. */ |
2315 | header_z = DIV_ROUND_UP(header_size, sizeof(*d)); | 2646 | header_z = DIV_ROUND_UP(header_size, sizeof(*d)); |
2316 | page = payload >> PAGE_SHIFT; | 2647 | page = payload >> PAGE_SHIFT; |
2317 | offset = payload & ~PAGE_MASK; | 2648 | offset = payload & ~PAGE_MASK; |
2318 | payload_per_buffer = p->payload_length / packet_count; | 2649 | payload_per_buffer = packet->payload_length / packet_count; |
2319 | 2650 | ||
2320 | for (i = 0; i < packet_count; i++) { | 2651 | for (i = 0; i < packet_count; i++) { |
2321 | /* d points to the header descriptor */ | 2652 | /* d points to the header descriptor */ |
@@ -2327,7 +2658,7 @@ static int ohci_queue_iso_receive_packet_per_buffer(struct fw_iso_context *base, | |||
2327 | 2658 | ||
2328 | d->control = cpu_to_le16(DESCRIPTOR_STATUS | | 2659 | d->control = cpu_to_le16(DESCRIPTOR_STATUS | |
2329 | DESCRIPTOR_INPUT_MORE); | 2660 | DESCRIPTOR_INPUT_MORE); |
2330 | if (p->skip && i == 0) | 2661 | if (packet->skip && i == 0) |
2331 | d->control |= cpu_to_le16(DESCRIPTOR_WAIT); | 2662 | d->control |= cpu_to_le16(DESCRIPTOR_WAIT); |
2332 | d->req_count = cpu_to_le16(header_size); | 2663 | d->req_count = cpu_to_le16(header_size); |
2333 | d->res_count = d->req_count; | 2664 | d->res_count = d->req_count; |
@@ -2360,7 +2691,7 @@ static int ohci_queue_iso_receive_packet_per_buffer(struct fw_iso_context *base, | |||
2360 | pd->control = cpu_to_le16(DESCRIPTOR_STATUS | | 2691 | pd->control = cpu_to_le16(DESCRIPTOR_STATUS | |
2361 | DESCRIPTOR_INPUT_LAST | | 2692 | DESCRIPTOR_INPUT_LAST | |
2362 | DESCRIPTOR_BRANCH_ALWAYS); | 2693 | DESCRIPTOR_BRANCH_ALWAYS); |
2363 | if (p->interrupt && i == packet_count - 1) | 2694 | if (packet->interrupt && i == packet_count - 1) |
2364 | pd->control |= cpu_to_le16(DESCRIPTOR_IRQ_ALWAYS); | 2695 | pd->control |= cpu_to_le16(DESCRIPTOR_IRQ_ALWAYS); |
2365 | 2696 | ||
2366 | context_append(&ctx->context, d, z, header_z); | 2697 | context_append(&ctx->context, d, z, header_z); |
@@ -2369,6 +2700,58 @@ static int ohci_queue_iso_receive_packet_per_buffer(struct fw_iso_context *base, | |||
2369 | return 0; | 2700 | return 0; |
2370 | } | 2701 | } |
2371 | 2702 | ||
2703 | static int queue_iso_buffer_fill(struct iso_context *ctx, | ||
2704 | struct fw_iso_packet *packet, | ||
2705 | struct fw_iso_buffer *buffer, | ||
2706 | unsigned long payload) | ||
2707 | { | ||
2708 | struct descriptor *d; | ||
2709 | dma_addr_t d_bus, page_bus; | ||
2710 | int page, offset, rest, z, i, length; | ||
2711 | |||
2712 | page = payload >> PAGE_SHIFT; | ||
2713 | offset = payload & ~PAGE_MASK; | ||
2714 | rest = packet->payload_length; | ||
2715 | |||
2716 | /* We need one descriptor for each page in the buffer. */ | ||
2717 | z = DIV_ROUND_UP(offset + rest, PAGE_SIZE); | ||
2718 | |||
2719 | if (WARN_ON(offset & 3 || rest & 3 || page + z > buffer->page_count)) | ||
2720 | return -EFAULT; | ||
2721 | |||
2722 | for (i = 0; i < z; i++) { | ||
2723 | d = context_get_descriptors(&ctx->context, 1, &d_bus); | ||
2724 | if (d == NULL) | ||
2725 | return -ENOMEM; | ||
2726 | |||
2727 | d->control = cpu_to_le16(DESCRIPTOR_INPUT_MORE | | ||
2728 | DESCRIPTOR_BRANCH_ALWAYS); | ||
2729 | if (packet->skip && i == 0) | ||
2730 | d->control |= cpu_to_le16(DESCRIPTOR_WAIT); | ||
2731 | if (packet->interrupt && i == z - 1) | ||
2732 | d->control |= cpu_to_le16(DESCRIPTOR_IRQ_ALWAYS); | ||
2733 | |||
2734 | if (offset + rest < PAGE_SIZE) | ||
2735 | length = rest; | ||
2736 | else | ||
2737 | length = PAGE_SIZE - offset; | ||
2738 | d->req_count = cpu_to_le16(length); | ||
2739 | d->res_count = d->req_count; | ||
2740 | d->transfer_status = 0; | ||
2741 | |||
2742 | page_bus = page_private(buffer->pages[page]); | ||
2743 | d->data_address = cpu_to_le32(page_bus + offset); | ||
2744 | |||
2745 | rest -= length; | ||
2746 | offset = 0; | ||
2747 | page++; | ||
2748 | |||
2749 | context_append(&ctx->context, d, 1, 0); | ||
2750 | } | ||
2751 | |||
2752 | return 0; | ||
2753 | } | ||
2754 | |||
2372 | static int ohci_queue_iso(struct fw_iso_context *base, | 2755 | static int ohci_queue_iso(struct fw_iso_context *base, |
2373 | struct fw_iso_packet *packet, | 2756 | struct fw_iso_packet *packet, |
2374 | struct fw_iso_buffer *buffer, | 2757 | struct fw_iso_buffer *buffer, |
@@ -2376,14 +2759,20 @@ static int ohci_queue_iso(struct fw_iso_context *base, | |||
2376 | { | 2759 | { |
2377 | struct iso_context *ctx = container_of(base, struct iso_context, base); | 2760 | struct iso_context *ctx = container_of(base, struct iso_context, base); |
2378 | unsigned long flags; | 2761 | unsigned long flags; |
2379 | int ret; | 2762 | int ret = -ENOSYS; |
2380 | 2763 | ||
2381 | spin_lock_irqsave(&ctx->context.ohci->lock, flags); | 2764 | spin_lock_irqsave(&ctx->context.ohci->lock, flags); |
2382 | if (base->type == FW_ISO_CONTEXT_TRANSMIT) | 2765 | switch (base->type) { |
2383 | ret = ohci_queue_iso_transmit(base, packet, buffer, payload); | 2766 | case FW_ISO_CONTEXT_TRANSMIT: |
2384 | else | 2767 | ret = queue_iso_transmit(ctx, packet, buffer, payload); |
2385 | ret = ohci_queue_iso_receive_packet_per_buffer(base, packet, | 2768 | break; |
2386 | buffer, payload); | 2769 | case FW_ISO_CONTEXT_RECEIVE: |
2770 | ret = queue_iso_packet_per_buffer(ctx, packet, buffer, payload); | ||
2771 | break; | ||
2772 | case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL: | ||
2773 | ret = queue_iso_buffer_fill(ctx, packet, buffer, payload); | ||
2774 | break; | ||
2775 | } | ||
2387 | spin_unlock_irqrestore(&ctx->context.ohci->lock, flags); | 2776 | spin_unlock_irqrestore(&ctx->context.ohci->lock, flags); |
2388 | 2777 | ||
2389 | return ret; | 2778 | return ret; |
@@ -2391,16 +2780,19 @@ static int ohci_queue_iso(struct fw_iso_context *base, | |||
2391 | 2780 | ||
2392 | static const struct fw_card_driver ohci_driver = { | 2781 | static const struct fw_card_driver ohci_driver = { |
2393 | .enable = ohci_enable, | 2782 | .enable = ohci_enable, |
2783 | .read_phy_reg = ohci_read_phy_reg, | ||
2394 | .update_phy_reg = ohci_update_phy_reg, | 2784 | .update_phy_reg = ohci_update_phy_reg, |
2395 | .set_config_rom = ohci_set_config_rom, | 2785 | .set_config_rom = ohci_set_config_rom, |
2396 | .send_request = ohci_send_request, | 2786 | .send_request = ohci_send_request, |
2397 | .send_response = ohci_send_response, | 2787 | .send_response = ohci_send_response, |
2398 | .cancel_packet = ohci_cancel_packet, | 2788 | .cancel_packet = ohci_cancel_packet, |
2399 | .enable_phys_dma = ohci_enable_phys_dma, | 2789 | .enable_phys_dma = ohci_enable_phys_dma, |
2400 | .get_cycle_time = ohci_get_cycle_time, | 2790 | .read_csr = ohci_read_csr, |
2791 | .write_csr = ohci_write_csr, | ||
2401 | 2792 | ||
2402 | .allocate_iso_context = ohci_allocate_iso_context, | 2793 | .allocate_iso_context = ohci_allocate_iso_context, |
2403 | .free_iso_context = ohci_free_iso_context, | 2794 | .free_iso_context = ohci_free_iso_context, |
2795 | .set_iso_channels = ohci_set_iso_channels, | ||
2404 | .queue_iso = ohci_queue_iso, | 2796 | .queue_iso = ohci_queue_iso, |
2405 | .start_iso = ohci_start_iso, | 2797 | .start_iso = ohci_start_iso, |
2406 | .stop_iso = ohci_stop_iso, | 2798 | .stop_iso = ohci_stop_iso, |
@@ -2465,6 +2857,7 @@ static int __devinit pci_probe(struct pci_dev *dev, | |||
2465 | pci_set_drvdata(dev, ohci); | 2857 | pci_set_drvdata(dev, ohci); |
2466 | 2858 | ||
2467 | spin_lock_init(&ohci->lock); | 2859 | spin_lock_init(&ohci->lock); |
2860 | mutex_init(&ohci->phy_reg_mutex); | ||
2468 | 2861 | ||
2469 | tasklet_init(&ohci->bus_reset_tasklet, | 2862 | tasklet_init(&ohci->bus_reset_tasklet, |
2470 | bus_reset_tasklet, (unsigned long)ohci); | 2863 | bus_reset_tasklet, (unsigned long)ohci); |
@@ -2625,6 +3018,7 @@ static void pci_remove(struct pci_dev *dev) | |||
2625 | context_release(&ohci->at_response_ctx); | 3018 | context_release(&ohci->at_response_ctx); |
2626 | kfree(ohci->it_context_list); | 3019 | kfree(ohci->it_context_list); |
2627 | kfree(ohci->ir_context_list); | 3020 | kfree(ohci->ir_context_list); |
3021 | pci_disable_msi(dev); | ||
2628 | pci_iounmap(dev, ohci->registers); | 3022 | pci_iounmap(dev, ohci->registers); |
2629 | pci_release_region(dev, 0); | 3023 | pci_release_region(dev, 0); |
2630 | pci_disable_device(dev); | 3024 | pci_disable_device(dev); |
@@ -2642,6 +3036,7 @@ static int pci_suspend(struct pci_dev *dev, pm_message_t state) | |||
2642 | 3036 | ||
2643 | software_reset(ohci); | 3037 | software_reset(ohci); |
2644 | free_irq(dev->irq, ohci); | 3038 | free_irq(dev->irq, ohci); |
3039 | pci_disable_msi(dev); | ||
2645 | err = pci_save_state(dev); | 3040 | err = pci_save_state(dev); |
2646 | if (err) { | 3041 | if (err) { |
2647 | fw_error("pci_save_state failed\n"); | 3042 | fw_error("pci_save_state failed\n"); |
diff --git a/drivers/firewire/ohci.h b/drivers/firewire/ohci.h index 3bc9a5d744eb..0e6c5a466908 100644 --- a/drivers/firewire/ohci.h +++ b/drivers/firewire/ohci.h | |||
@@ -60,6 +60,7 @@ | |||
60 | #define OHCI1394_LinkControl_cycleSource (1 << 22) | 60 | #define OHCI1394_LinkControl_cycleSource (1 << 22) |
61 | #define OHCI1394_NodeID 0x0E8 | 61 | #define OHCI1394_NodeID 0x0E8 |
62 | #define OHCI1394_NodeID_idValid 0x80000000 | 62 | #define OHCI1394_NodeID_idValid 0x80000000 |
63 | #define OHCI1394_NodeID_root 0x40000000 | ||
63 | #define OHCI1394_NodeID_nodeNumber 0x0000003f | 64 | #define OHCI1394_NodeID_nodeNumber 0x0000003f |
64 | #define OHCI1394_NodeID_busNumber 0x0000ffc0 | 65 | #define OHCI1394_NodeID_busNumber 0x0000ffc0 |
65 | #define OHCI1394_PhyControl 0x0EC | 66 | #define OHCI1394_PhyControl 0x0EC |
diff --git a/drivers/firewire/sbp2.c b/drivers/firewire/sbp2.c index ca264f2fdf0c..9f76171717e5 100644 --- a/drivers/firewire/sbp2.c +++ b/drivers/firewire/sbp2.c | |||
@@ -410,8 +410,7 @@ static void free_orb(struct kref *kref) | |||
410 | 410 | ||
411 | static void sbp2_status_write(struct fw_card *card, struct fw_request *request, | 411 | static void sbp2_status_write(struct fw_card *card, struct fw_request *request, |
412 | int tcode, int destination, int source, | 412 | int tcode, int destination, int source, |
413 | int generation, int speed, | 413 | int generation, unsigned long long offset, |
414 | unsigned long long offset, | ||
415 | void *payload, size_t length, void *callback_data) | 414 | void *payload, size_t length, void *callback_data) |
416 | { | 415 | { |
417 | struct sbp2_logical_unit *lu = callback_data; | 416 | struct sbp2_logical_unit *lu = callback_data; |
@@ -508,8 +507,7 @@ static void sbp2_send_orb(struct sbp2_orb *orb, struct sbp2_logical_unit *lu, | |||
508 | 507 | ||
509 | fw_send_request(device->card, &orb->t, TCODE_WRITE_BLOCK_REQUEST, | 508 | fw_send_request(device->card, &orb->t, TCODE_WRITE_BLOCK_REQUEST, |
510 | node_id, generation, device->max_speed, offset, | 509 | node_id, generation, device->max_speed, offset, |
511 | &orb->pointer, sizeof(orb->pointer), | 510 | &orb->pointer, 8, complete_transaction, orb); |
512 | complete_transaction, orb); | ||
513 | } | 511 | } |
514 | 512 | ||
515 | static int sbp2_cancel_orbs(struct sbp2_logical_unit *lu) | 513 | static int sbp2_cancel_orbs(struct sbp2_logical_unit *lu) |
@@ -654,7 +652,7 @@ static void sbp2_agent_reset(struct sbp2_logical_unit *lu) | |||
654 | fw_run_transaction(device->card, TCODE_WRITE_QUADLET_REQUEST, | 652 | fw_run_transaction(device->card, TCODE_WRITE_QUADLET_REQUEST, |
655 | lu->tgt->node_id, lu->generation, device->max_speed, | 653 | lu->tgt->node_id, lu->generation, device->max_speed, |
656 | lu->command_block_agent_address + SBP2_AGENT_RESET, | 654 | lu->command_block_agent_address + SBP2_AGENT_RESET, |
657 | &d, sizeof(d)); | 655 | &d, 4); |
658 | } | 656 | } |
659 | 657 | ||
660 | static void complete_agent_reset_write_no_wait(struct fw_card *card, | 658 | static void complete_agent_reset_write_no_wait(struct fw_card *card, |
@@ -676,7 +674,7 @@ static void sbp2_agent_reset_no_wait(struct sbp2_logical_unit *lu) | |||
676 | fw_send_request(device->card, t, TCODE_WRITE_QUADLET_REQUEST, | 674 | fw_send_request(device->card, t, TCODE_WRITE_QUADLET_REQUEST, |
677 | lu->tgt->node_id, lu->generation, device->max_speed, | 675 | lu->tgt->node_id, lu->generation, device->max_speed, |
678 | lu->command_block_agent_address + SBP2_AGENT_RESET, | 676 | lu->command_block_agent_address + SBP2_AGENT_RESET, |
679 | &d, sizeof(d), complete_agent_reset_write_no_wait, t); | 677 | &d, 4, complete_agent_reset_write_no_wait, t); |
680 | } | 678 | } |
681 | 679 | ||
682 | static inline void sbp2_allow_block(struct sbp2_logical_unit *lu) | 680 | static inline void sbp2_allow_block(struct sbp2_logical_unit *lu) |
@@ -866,8 +864,7 @@ static void sbp2_set_busy_timeout(struct sbp2_logical_unit *lu) | |||
866 | 864 | ||
867 | fw_run_transaction(device->card, TCODE_WRITE_QUADLET_REQUEST, | 865 | fw_run_transaction(device->card, TCODE_WRITE_QUADLET_REQUEST, |
868 | lu->tgt->node_id, lu->generation, device->max_speed, | 866 | lu->tgt->node_id, lu->generation, device->max_speed, |
869 | CSR_REGISTER_BASE + CSR_BUSY_TIMEOUT, | 867 | CSR_REGISTER_BASE + CSR_BUSY_TIMEOUT, &d, 4); |
870 | &d, sizeof(d)); | ||
871 | } | 868 | } |
872 | 869 | ||
873 | static void sbp2_reconnect(struct work_struct *work); | 870 | static void sbp2_reconnect(struct work_struct *work); |