diff options
Diffstat (limited to 'drivers/firewire')
-rw-r--r-- | drivers/firewire/Kconfig | 16 | ||||
-rw-r--r-- | drivers/firewire/Makefile | 1 | ||||
-rw-r--r-- | drivers/firewire/core-card.c | 59 | ||||
-rw-r--r-- | drivers/firewire/core-cdev.c | 68 | ||||
-rw-r--r-- | drivers/firewire/core-device.c | 53 | ||||
-rw-r--r-- | drivers/firewire/core-iso.c | 50 | ||||
-rw-r--r-- | drivers/firewire/core-topology.c | 2 | ||||
-rw-r--r-- | drivers/firewire/core-transaction.c | 77 | ||||
-rw-r--r-- | drivers/firewire/core.h | 9 | ||||
-rw-r--r-- | drivers/firewire/init_ohci1394_dma.c | 309 | ||||
-rw-r--r-- | drivers/firewire/net.c | 227 | ||||
-rw-r--r-- | drivers/firewire/nosy.c | 2 | ||||
-rw-r--r-- | drivers/firewire/ohci.c | 877 | ||||
-rw-r--r-- | drivers/firewire/sbp2.c | 46 |
14 files changed, 1299 insertions, 497 deletions
diff --git a/drivers/firewire/Kconfig b/drivers/firewire/Kconfig index fcf3ea28340b..2be6f4520772 100644 --- a/drivers/firewire/Kconfig +++ b/drivers/firewire/Kconfig | |||
@@ -3,9 +3,6 @@ menu "IEEE 1394 (FireWire) support" | |||
3 | # firewire-core does not depend on PCI but is | 3 | # firewire-core does not depend on PCI but is |
4 | # not useful without PCI controller driver | 4 | # not useful without PCI controller driver |
5 | 5 | ||
6 | comment "You can enable one or both FireWire driver stacks." | ||
7 | comment "The newer stack is recommended." | ||
8 | |||
9 | config FIREWIRE | 6 | config FIREWIRE |
10 | tristate "FireWire driver stack" | 7 | tristate "FireWire driver stack" |
11 | select CRC_ITU_T | 8 | select CRC_ITU_T |
@@ -22,7 +19,7 @@ config FIREWIRE | |||
22 | 19 | ||
23 | config FIREWIRE_OHCI | 20 | config FIREWIRE_OHCI |
24 | tristate "OHCI-1394 controllers" | 21 | tristate "OHCI-1394 controllers" |
25 | depends on PCI && FIREWIRE | 22 | depends on PCI && FIREWIRE && MMU |
26 | help | 23 | help |
27 | Enable this driver if you have a FireWire controller based | 24 | Enable this driver if you have a FireWire controller based |
28 | on the OHCI specification. For all practical purposes, this | 25 | on the OHCI specification. For all practical purposes, this |
@@ -52,20 +49,16 @@ config FIREWIRE_SBP2 | |||
52 | configuration section. | 49 | configuration section. |
53 | 50 | ||
54 | config FIREWIRE_NET | 51 | config FIREWIRE_NET |
55 | tristate "IP networking over 1394 (EXPERIMENTAL)" | 52 | tristate "IP networking over 1394" |
56 | depends on FIREWIRE && INET && EXPERIMENTAL | 53 | depends on FIREWIRE && INET |
57 | help | 54 | help |
58 | This enables IPv4 over IEEE 1394, providing IP connectivity with | 55 | This enables IPv4 over IEEE 1394, providing IP connectivity with |
59 | other implementations of RFC 2734 as found on several operating | 56 | other implementations of RFC 2734 as found on several operating |
60 | systems. Multicast support is currently limited. | 57 | systems. Multicast support is currently limited. |
61 | 58 | ||
62 | NOTE, this driver is not stable yet! | ||
63 | |||
64 | To compile this driver as a module, say M here: The module will be | 59 | To compile this driver as a module, say M here: The module will be |
65 | called firewire-net. | 60 | called firewire-net. |
66 | 61 | ||
67 | source "drivers/ieee1394/Kconfig" | ||
68 | |||
69 | config FIREWIRE_NOSY | 62 | config FIREWIRE_NOSY |
70 | tristate "Nosy - a FireWire traffic sniffer for PCILynx cards" | 63 | tristate "Nosy - a FireWire traffic sniffer for PCILynx cards" |
71 | depends on PCI | 64 | depends on PCI |
@@ -82,7 +75,8 @@ config FIREWIRE_NOSY | |||
82 | The following cards are known to be based on PCILynx or PCILynx-2: | 75 | The following cards are known to be based on PCILynx or PCILynx-2: |
83 | IOI IOI-1394TT (PCI card), Unibrain Fireboard 400 PCI Lynx-2 | 76 | IOI IOI-1394TT (PCI card), Unibrain Fireboard 400 PCI Lynx-2 |
84 | (PCI card), Newer Technology FireWire 2 Go (CardBus card), | 77 | (PCI card), Newer Technology FireWire 2 Go (CardBus card), |
85 | Apple Power Mac G3 blue & white (onboard controller). | 78 | Apple Power Mac G3 blue & white and G4 with PCI graphics |
79 | (onboard controller). | ||
86 | 80 | ||
87 | To compile this driver as a module, say M here: The module will be | 81 | To compile this driver as a module, say M here: The module will be |
88 | called nosy. Source code of a userspace interface to nosy, called | 82 | called nosy. Source code of a userspace interface to nosy, called |
diff --git a/drivers/firewire/Makefile b/drivers/firewire/Makefile index 3c6a7fb20aa7..e3870d5c43dd 100644 --- a/drivers/firewire/Makefile +++ b/drivers/firewire/Makefile | |||
@@ -13,3 +13,4 @@ obj-$(CONFIG_FIREWIRE_OHCI) += firewire-ohci.o | |||
13 | obj-$(CONFIG_FIREWIRE_SBP2) += firewire-sbp2.o | 13 | obj-$(CONFIG_FIREWIRE_SBP2) += firewire-sbp2.o |
14 | obj-$(CONFIG_FIREWIRE_NET) += firewire-net.o | 14 | obj-$(CONFIG_FIREWIRE_NET) += firewire-net.o |
15 | obj-$(CONFIG_FIREWIRE_NOSY) += nosy.o | 15 | obj-$(CONFIG_FIREWIRE_NOSY) += nosy.o |
16 | obj-$(CONFIG_PROVIDE_OHCI1394_DMA_INIT) += init_ohci1394_dma.o | ||
diff --git a/drivers/firewire/core-card.c b/drivers/firewire/core-card.c index be0492398ef9..29d2423fae6d 100644 --- a/drivers/firewire/core-card.c +++ b/drivers/firewire/core-card.c | |||
@@ -75,6 +75,15 @@ static size_t config_rom_length = 1 + 4 + 1 + 1; | |||
75 | #define BIB_IRMC ((1) << 31) | 75 | #define BIB_IRMC ((1) << 31) |
76 | #define NODE_CAPABILITIES 0x0c0083c0 /* per IEEE 1394 clause 8.3.2.6.5.2 */ | 76 | #define NODE_CAPABILITIES 0x0c0083c0 /* per IEEE 1394 clause 8.3.2.6.5.2 */ |
77 | 77 | ||
78 | /* | ||
79 | * IEEE-1394 specifies a default SPLIT_TIMEOUT value of 800 cycles (100 ms), | ||
80 | * but we have to make it longer because there are many devices whose firmware | ||
81 | * is just too slow for that. | ||
82 | */ | ||
83 | #define DEFAULT_SPLIT_TIMEOUT (2 * 8000) | ||
84 | |||
85 | #define CANON_OUI 0x000085 | ||
86 | |||
78 | static void generate_config_rom(struct fw_card *card, __be32 *config_rom) | 87 | static void generate_config_rom(struct fw_card *card, __be32 *config_rom) |
79 | { | 88 | { |
80 | struct fw_descriptor *desc; | 89 | struct fw_descriptor *desc; |
@@ -219,8 +228,8 @@ void fw_schedule_bus_reset(struct fw_card *card, bool delayed, bool short_reset) | |||
219 | 228 | ||
220 | /* Use an arbitrary short delay to combine multiple reset requests. */ | 229 | /* Use an arbitrary short delay to combine multiple reset requests. */ |
221 | fw_card_get(card); | 230 | fw_card_get(card); |
222 | if (!schedule_delayed_work(&card->br_work, | 231 | if (!queue_delayed_work(fw_workqueue, &card->br_work, |
223 | delayed ? DIV_ROUND_UP(HZ, 100) : 0)) | 232 | delayed ? DIV_ROUND_UP(HZ, 100) : 0)) |
224 | fw_card_put(card); | 233 | fw_card_put(card); |
225 | } | 234 | } |
226 | EXPORT_SYMBOL(fw_schedule_bus_reset); | 235 | EXPORT_SYMBOL(fw_schedule_bus_reset); |
@@ -231,8 +240,8 @@ static void br_work(struct work_struct *work) | |||
231 | 240 | ||
232 | /* Delay for 2s after last reset per IEEE 1394 clause 8.2.1. */ | 241 | /* Delay for 2s after last reset per IEEE 1394 clause 8.2.1. */ |
233 | if (card->reset_jiffies != 0 && | 242 | if (card->reset_jiffies != 0 && |
234 | time_is_after_jiffies(card->reset_jiffies + 2 * HZ)) { | 243 | time_before64(get_jiffies_64(), card->reset_jiffies + 2 * HZ)) { |
235 | if (!schedule_delayed_work(&card->br_work, 2 * HZ)) | 244 | if (!queue_delayed_work(fw_workqueue, &card->br_work, 2 * HZ)) |
236 | fw_card_put(card); | 245 | fw_card_put(card); |
237 | return; | 246 | return; |
238 | } | 247 | } |
@@ -249,8 +258,7 @@ static void allocate_broadcast_channel(struct fw_card *card, int generation) | |||
249 | 258 | ||
250 | if (!card->broadcast_channel_allocated) { | 259 | if (!card->broadcast_channel_allocated) { |
251 | fw_iso_resource_manage(card, generation, 1ULL << 31, | 260 | fw_iso_resource_manage(card, generation, 1ULL << 31, |
252 | &channel, &bandwidth, true, | 261 | &channel, &bandwidth, true); |
253 | card->bm_transaction_data); | ||
254 | if (channel != 31) { | 262 | if (channel != 31) { |
255 | fw_notify("failed to allocate broadcast channel\n"); | 263 | fw_notify("failed to allocate broadcast channel\n"); |
256 | return; | 264 | return; |
@@ -284,6 +292,8 @@ static void bm_work(struct work_struct *work) | |||
284 | bool root_device_is_running; | 292 | bool root_device_is_running; |
285 | bool root_device_is_cmc; | 293 | bool root_device_is_cmc; |
286 | bool irm_is_1394_1995_only; | 294 | bool irm_is_1394_1995_only; |
295 | bool keep_this_irm; | ||
296 | __be32 transaction_data[2]; | ||
287 | 297 | ||
288 | spin_lock_irq(&card->lock); | 298 | spin_lock_irq(&card->lock); |
289 | 299 | ||
@@ -305,11 +315,16 @@ static void bm_work(struct work_struct *work) | |||
305 | irm_is_1394_1995_only = irm_device && irm_device->config_rom && | 315 | irm_is_1394_1995_only = irm_device && irm_device->config_rom && |
306 | (irm_device->config_rom[2] & 0x000000f0) == 0; | 316 | (irm_device->config_rom[2] & 0x000000f0) == 0; |
307 | 317 | ||
318 | /* Canon MV5i works unreliably if it is not root node. */ | ||
319 | keep_this_irm = irm_device && irm_device->config_rom && | ||
320 | irm_device->config_rom[3] >> 8 == CANON_OUI; | ||
321 | |||
308 | root_id = root_node->node_id; | 322 | root_id = root_node->node_id; |
309 | irm_id = card->irm_node->node_id; | 323 | irm_id = card->irm_node->node_id; |
310 | local_id = card->local_node->node_id; | 324 | local_id = card->local_node->node_id; |
311 | 325 | ||
312 | grace = time_after(jiffies, card->reset_jiffies + DIV_ROUND_UP(HZ, 8)); | 326 | grace = time_after64(get_jiffies_64(), |
327 | card->reset_jiffies + DIV_ROUND_UP(HZ, 8)); | ||
313 | 328 | ||
314 | if ((is_next_generation(generation, card->bm_generation) && | 329 | if ((is_next_generation(generation, card->bm_generation) && |
315 | !card->bm_abdicate) || | 330 | !card->bm_abdicate) || |
@@ -333,28 +348,28 @@ static void bm_work(struct work_struct *work) | |||
333 | goto pick_me; | 348 | goto pick_me; |
334 | } | 349 | } |
335 | 350 | ||
336 | if (irm_is_1394_1995_only) { | 351 | if (irm_is_1394_1995_only && !keep_this_irm) { |
337 | new_root_id = local_id; | 352 | new_root_id = local_id; |
338 | fw_notify("%s, making local node (%02x) root.\n", | 353 | fw_notify("%s, making local node (%02x) root.\n", |
339 | "IRM is not 1394a compliant", new_root_id); | 354 | "IRM is not 1394a compliant", new_root_id); |
340 | goto pick_me; | 355 | goto pick_me; |
341 | } | 356 | } |
342 | 357 | ||
343 | card->bm_transaction_data[0] = cpu_to_be32(0x3f); | 358 | transaction_data[0] = cpu_to_be32(0x3f); |
344 | card->bm_transaction_data[1] = cpu_to_be32(local_id); | 359 | transaction_data[1] = cpu_to_be32(local_id); |
345 | 360 | ||
346 | spin_unlock_irq(&card->lock); | 361 | spin_unlock_irq(&card->lock); |
347 | 362 | ||
348 | rcode = fw_run_transaction(card, TCODE_LOCK_COMPARE_SWAP, | 363 | rcode = fw_run_transaction(card, TCODE_LOCK_COMPARE_SWAP, |
349 | irm_id, generation, SCODE_100, | 364 | irm_id, generation, SCODE_100, |
350 | CSR_REGISTER_BASE + CSR_BUS_MANAGER_ID, | 365 | CSR_REGISTER_BASE + CSR_BUS_MANAGER_ID, |
351 | card->bm_transaction_data, 8); | 366 | transaction_data, 8); |
352 | 367 | ||
353 | if (rcode == RCODE_GENERATION) | 368 | if (rcode == RCODE_GENERATION) |
354 | /* Another bus reset, BM work has been rescheduled. */ | 369 | /* Another bus reset, BM work has been rescheduled. */ |
355 | goto out; | 370 | goto out; |
356 | 371 | ||
357 | bm_id = be32_to_cpu(card->bm_transaction_data[0]); | 372 | bm_id = be32_to_cpu(transaction_data[0]); |
358 | 373 | ||
359 | spin_lock_irq(&card->lock); | 374 | spin_lock_irq(&card->lock); |
360 | if (rcode == RCODE_COMPLETE && generation == card->generation) | 375 | if (rcode == RCODE_COMPLETE && generation == card->generation) |
@@ -382,7 +397,7 @@ static void bm_work(struct work_struct *work) | |||
382 | 397 | ||
383 | spin_lock_irq(&card->lock); | 398 | spin_lock_irq(&card->lock); |
384 | 399 | ||
385 | if (rcode != RCODE_COMPLETE) { | 400 | if (rcode != RCODE_COMPLETE && !keep_this_irm) { |
386 | /* | 401 | /* |
387 | * The lock request failed, maybe the IRM | 402 | * The lock request failed, maybe the IRM |
388 | * isn't really IRM capable after all. Let's | 403 | * isn't really IRM capable after all. Let's |
@@ -475,11 +490,11 @@ static void bm_work(struct work_struct *work) | |||
475 | /* | 490 | /* |
476 | * Make sure that the cycle master sends cycle start packets. | 491 | * Make sure that the cycle master sends cycle start packets. |
477 | */ | 492 | */ |
478 | card->bm_transaction_data[0] = cpu_to_be32(CSR_STATE_BIT_CMSTR); | 493 | transaction_data[0] = cpu_to_be32(CSR_STATE_BIT_CMSTR); |
479 | rcode = fw_run_transaction(card, TCODE_WRITE_QUADLET_REQUEST, | 494 | rcode = fw_run_transaction(card, TCODE_WRITE_QUADLET_REQUEST, |
480 | root_id, generation, SCODE_100, | 495 | root_id, generation, SCODE_100, |
481 | CSR_REGISTER_BASE + CSR_STATE_SET, | 496 | CSR_REGISTER_BASE + CSR_STATE_SET, |
482 | card->bm_transaction_data, 4); | 497 | transaction_data, 4); |
483 | if (rcode == RCODE_GENERATION) | 498 | if (rcode == RCODE_GENERATION) |
484 | goto out; | 499 | goto out; |
485 | } | 500 | } |
@@ -504,10 +519,11 @@ void fw_card_initialize(struct fw_card *card, | |||
504 | card->device = device; | 519 | card->device = device; |
505 | card->current_tlabel = 0; | 520 | card->current_tlabel = 0; |
506 | card->tlabel_mask = 0; | 521 | card->tlabel_mask = 0; |
507 | card->split_timeout_hi = 0; | 522 | card->split_timeout_hi = DEFAULT_SPLIT_TIMEOUT / 8000; |
508 | card->split_timeout_lo = 800 << 19; | 523 | card->split_timeout_lo = (DEFAULT_SPLIT_TIMEOUT % 8000) << 19; |
509 | card->split_timeout_cycles = 800; | 524 | card->split_timeout_cycles = DEFAULT_SPLIT_TIMEOUT; |
510 | card->split_timeout_jiffies = DIV_ROUND_UP(HZ, 10); | 525 | card->split_timeout_jiffies = |
526 | DIV_ROUND_UP(DEFAULT_SPLIT_TIMEOUT * HZ, 8000); | ||
511 | card->color = 0; | 527 | card->color = 0; |
512 | card->broadcast_channel = BROADCAST_CHANNEL_INITIAL; | 528 | card->broadcast_channel = BROADCAST_CHANNEL_INITIAL; |
513 | 529 | ||
@@ -614,6 +630,10 @@ static int dummy_queue_iso(struct fw_iso_context *ctx, struct fw_iso_packet *p, | |||
614 | return -ENODEV; | 630 | return -ENODEV; |
615 | } | 631 | } |
616 | 632 | ||
633 | static void dummy_flush_queue_iso(struct fw_iso_context *ctx) | ||
634 | { | ||
635 | } | ||
636 | |||
617 | static const struct fw_card_driver dummy_driver_template = { | 637 | static const struct fw_card_driver dummy_driver_template = { |
618 | .read_phy_reg = dummy_read_phy_reg, | 638 | .read_phy_reg = dummy_read_phy_reg, |
619 | .update_phy_reg = dummy_update_phy_reg, | 639 | .update_phy_reg = dummy_update_phy_reg, |
@@ -625,6 +645,7 @@ static const struct fw_card_driver dummy_driver_template = { | |||
625 | .start_iso = dummy_start_iso, | 645 | .start_iso = dummy_start_iso, |
626 | .set_iso_channels = dummy_set_iso_channels, | 646 | .set_iso_channels = dummy_set_iso_channels, |
627 | .queue_iso = dummy_queue_iso, | 647 | .queue_iso = dummy_queue_iso, |
648 | .flush_queue_iso = dummy_flush_queue_iso, | ||
628 | }; | 649 | }; |
629 | 650 | ||
630 | void fw_card_release(struct kref *kref) | 651 | void fw_card_release(struct kref *kref) |
diff --git a/drivers/firewire/core-cdev.c b/drivers/firewire/core-cdev.c index 14bb7b7b5dd7..b1c11775839c 100644 --- a/drivers/firewire/core-cdev.c +++ b/drivers/firewire/core-cdev.c | |||
@@ -64,6 +64,7 @@ struct client { | |||
64 | struct idr resource_idr; | 64 | struct idr resource_idr; |
65 | struct list_head event_list; | 65 | struct list_head event_list; |
66 | wait_queue_head_t wait; | 66 | wait_queue_head_t wait; |
67 | wait_queue_head_t tx_flush_wait; | ||
67 | u64 bus_reset_closure; | 68 | u64 bus_reset_closure; |
68 | 69 | ||
69 | struct fw_iso_context *iso_context; | 70 | struct fw_iso_context *iso_context; |
@@ -140,7 +141,6 @@ struct iso_resource { | |||
140 | int generation; | 141 | int generation; |
141 | u64 channels; | 142 | u64 channels; |
142 | s32 bandwidth; | 143 | s32 bandwidth; |
143 | __be32 transaction_data[2]; | ||
144 | struct iso_resource_event *e_alloc, *e_dealloc; | 144 | struct iso_resource_event *e_alloc, *e_dealloc; |
145 | }; | 145 | }; |
146 | 146 | ||
@@ -149,7 +149,7 @@ static void release_iso_resource(struct client *, struct client_resource *); | |||
149 | static void schedule_iso_resource(struct iso_resource *r, unsigned long delay) | 149 | static void schedule_iso_resource(struct iso_resource *r, unsigned long delay) |
150 | { | 150 | { |
151 | client_get(r->client); | 151 | client_get(r->client); |
152 | if (!schedule_delayed_work(&r->work, delay)) | 152 | if (!queue_delayed_work(fw_workqueue, &r->work, delay)) |
153 | client_put(r->client); | 153 | client_put(r->client); |
154 | } | 154 | } |
155 | 155 | ||
@@ -251,6 +251,7 @@ static int fw_device_op_open(struct inode *inode, struct file *file) | |||
251 | idr_init(&client->resource_idr); | 251 | idr_init(&client->resource_idr); |
252 | INIT_LIST_HEAD(&client->event_list); | 252 | INIT_LIST_HEAD(&client->event_list); |
253 | init_waitqueue_head(&client->wait); | 253 | init_waitqueue_head(&client->wait); |
254 | init_waitqueue_head(&client->tx_flush_wait); | ||
254 | INIT_LIST_HEAD(&client->phy_receiver_link); | 255 | INIT_LIST_HEAD(&client->phy_receiver_link); |
255 | kref_init(&client->kref); | 256 | kref_init(&client->kref); |
256 | 257 | ||
@@ -520,10 +521,6 @@ static int release_client_resource(struct client *client, u32 handle, | |||
520 | static void release_transaction(struct client *client, | 521 | static void release_transaction(struct client *client, |
521 | struct client_resource *resource) | 522 | struct client_resource *resource) |
522 | { | 523 | { |
523 | struct outbound_transaction_resource *r = container_of(resource, | ||
524 | struct outbound_transaction_resource, resource); | ||
525 | |||
526 | fw_cancel_transaction(client->device->card, &r->transaction); | ||
527 | } | 524 | } |
528 | 525 | ||
529 | static void complete_transaction(struct fw_card *card, int rcode, | 526 | static void complete_transaction(struct fw_card *card, int rcode, |
@@ -540,22 +537,9 @@ static void complete_transaction(struct fw_card *card, int rcode, | |||
540 | memcpy(rsp->data, payload, rsp->length); | 537 | memcpy(rsp->data, payload, rsp->length); |
541 | 538 | ||
542 | spin_lock_irqsave(&client->lock, flags); | 539 | spin_lock_irqsave(&client->lock, flags); |
543 | /* | 540 | idr_remove(&client->resource_idr, e->r.resource.handle); |
544 | * 1. If called while in shutdown, the idr tree must be left untouched. | 541 | if (client->in_shutdown) |
545 | * The idr handle will be removed and the client reference will be | 542 | wake_up(&client->tx_flush_wait); |
546 | * dropped later. | ||
547 | * 2. If the call chain was release_client_resource -> | ||
548 | * release_transaction -> complete_transaction (instead of a normal | ||
549 | * conclusion of the transaction), i.e. if this resource was already | ||
550 | * unregistered from the idr, the client reference will be dropped | ||
551 | * by release_client_resource and we must not drop it here. | ||
552 | */ | ||
553 | if (!client->in_shutdown && | ||
554 | idr_find(&client->resource_idr, e->r.resource.handle)) { | ||
555 | idr_remove(&client->resource_idr, e->r.resource.handle); | ||
556 | /* Drop the idr's reference */ | ||
557 | client_put(client); | ||
558 | } | ||
559 | spin_unlock_irqrestore(&client->lock, flags); | 543 | spin_unlock_irqrestore(&client->lock, flags); |
560 | 544 | ||
561 | rsp->type = FW_CDEV_EVENT_RESPONSE; | 545 | rsp->type = FW_CDEV_EVENT_RESPONSE; |
@@ -575,7 +559,7 @@ static void complete_transaction(struct fw_card *card, int rcode, | |||
575 | queue_event(client, &e->event, rsp, sizeof(*rsp) + rsp->length, | 559 | queue_event(client, &e->event, rsp, sizeof(*rsp) + rsp->length, |
576 | NULL, 0); | 560 | NULL, 0); |
577 | 561 | ||
578 | /* Drop the transaction callback's reference */ | 562 | /* Drop the idr's reference */ |
579 | client_put(client); | 563 | client_put(client); |
580 | } | 564 | } |
581 | 565 | ||
@@ -614,9 +598,6 @@ static int init_request(struct client *client, | |||
614 | if (ret < 0) | 598 | if (ret < 0) |
615 | goto failed; | 599 | goto failed; |
616 | 600 | ||
617 | /* Get a reference for the transaction callback */ | ||
618 | client_get(client); | ||
619 | |||
620 | fw_send_request(client->device->card, &e->r.transaction, | 601 | fw_send_request(client->device->card, &e->r.transaction, |
621 | request->tcode, destination_id, request->generation, | 602 | request->tcode, destination_id, request->generation, |
622 | speed, request->offset, e->response.data, | 603 | speed, request->offset, e->response.data, |
@@ -1126,6 +1107,7 @@ static int ioctl_queue_iso(struct client *client, union ioctl_arg *arg) | |||
1126 | payload += u.packet.payload_length; | 1107 | payload += u.packet.payload_length; |
1127 | count++; | 1108 | count++; |
1128 | } | 1109 | } |
1110 | fw_iso_context_queue_flush(ctx); | ||
1129 | 1111 | ||
1130 | a->size -= uptr_to_u64(p) - a->packets; | 1112 | a->size -= uptr_to_u64(p) - a->packets; |
1131 | a->packets = uptr_to_u64(p); | 1113 | a->packets = uptr_to_u64(p); |
@@ -1223,7 +1205,8 @@ static void iso_resource_work(struct work_struct *work) | |||
1223 | todo = r->todo; | 1205 | todo = r->todo; |
1224 | /* Allow 1000ms grace period for other reallocations. */ | 1206 | /* Allow 1000ms grace period for other reallocations. */ |
1225 | if (todo == ISO_RES_ALLOC && | 1207 | if (todo == ISO_RES_ALLOC && |
1226 | time_is_after_jiffies(client->device->card->reset_jiffies + HZ)) { | 1208 | time_before64(get_jiffies_64(), |
1209 | client->device->card->reset_jiffies + HZ)) { | ||
1227 | schedule_iso_resource(r, DIV_ROUND_UP(HZ, 3)); | 1210 | schedule_iso_resource(r, DIV_ROUND_UP(HZ, 3)); |
1228 | skip = true; | 1211 | skip = true; |
1229 | } else { | 1212 | } else { |
@@ -1246,8 +1229,7 @@ static void iso_resource_work(struct work_struct *work) | |||
1246 | r->channels, &channel, &bandwidth, | 1229 | r->channels, &channel, &bandwidth, |
1247 | todo == ISO_RES_ALLOC || | 1230 | todo == ISO_RES_ALLOC || |
1248 | todo == ISO_RES_REALLOC || | 1231 | todo == ISO_RES_REALLOC || |
1249 | todo == ISO_RES_ALLOC_ONCE, | 1232 | todo == ISO_RES_ALLOC_ONCE); |
1250 | r->transaction_data); | ||
1251 | /* | 1233 | /* |
1252 | * Is this generation outdated already? As long as this resource sticks | 1234 | * Is this generation outdated already? As long as this resource sticks |
1253 | * in the idr, it will be scheduled again for a newer generation or at | 1235 | * in the idr, it will be scheduled again for a newer generation or at |
@@ -1501,9 +1483,10 @@ static int ioctl_send_phy_packet(struct client *client, union ioctl_arg *arg) | |||
1501 | e->client = client; | 1483 | e->client = client; |
1502 | e->p.speed = SCODE_100; | 1484 | e->p.speed = SCODE_100; |
1503 | e->p.generation = a->generation; | 1485 | e->p.generation = a->generation; |
1504 | e->p.header[0] = a->data[0]; | 1486 | e->p.header[0] = TCODE_LINK_INTERNAL << 4; |
1505 | e->p.header[1] = a->data[1]; | 1487 | e->p.header[1] = a->data[0]; |
1506 | e->p.header_length = 8; | 1488 | e->p.header[2] = a->data[1]; |
1489 | e->p.header_length = 12; | ||
1507 | e->p.callback = outbound_phy_packet_callback; | 1490 | e->p.callback = outbound_phy_packet_callback; |
1508 | e->phy_packet.closure = a->closure; | 1491 | e->phy_packet.closure = a->closure; |
1509 | e->phy_packet.type = FW_CDEV_EVENT_PHY_PACKET_SENT; | 1492 | e->phy_packet.type = FW_CDEV_EVENT_PHY_PACKET_SENT; |
@@ -1677,6 +1660,25 @@ static int fw_device_op_mmap(struct file *file, struct vm_area_struct *vma) | |||
1677 | return ret; | 1660 | return ret; |
1678 | } | 1661 | } |
1679 | 1662 | ||
1663 | static int is_outbound_transaction_resource(int id, void *p, void *data) | ||
1664 | { | ||
1665 | struct client_resource *resource = p; | ||
1666 | |||
1667 | return resource->release == release_transaction; | ||
1668 | } | ||
1669 | |||
1670 | static int has_outbound_transactions(struct client *client) | ||
1671 | { | ||
1672 | int ret; | ||
1673 | |||
1674 | spin_lock_irq(&client->lock); | ||
1675 | ret = idr_for_each(&client->resource_idr, | ||
1676 | is_outbound_transaction_resource, NULL); | ||
1677 | spin_unlock_irq(&client->lock); | ||
1678 | |||
1679 | return ret; | ||
1680 | } | ||
1681 | |||
1680 | static int shutdown_resource(int id, void *p, void *data) | 1682 | static int shutdown_resource(int id, void *p, void *data) |
1681 | { | 1683 | { |
1682 | struct client_resource *resource = p; | 1684 | struct client_resource *resource = p; |
@@ -1712,6 +1714,8 @@ static int fw_device_op_release(struct inode *inode, struct file *file) | |||
1712 | client->in_shutdown = true; | 1714 | client->in_shutdown = true; |
1713 | spin_unlock_irq(&client->lock); | 1715 | spin_unlock_irq(&client->lock); |
1714 | 1716 | ||
1717 | wait_event(client->tx_flush_wait, !has_outbound_transactions(client)); | ||
1718 | |||
1715 | idr_for_each(&client->resource_idr, shutdown_resource, client); | 1719 | idr_for_each(&client->resource_idr, shutdown_resource, client); |
1716 | idr_remove_all(&client->resource_idr); | 1720 | idr_remove_all(&client->resource_idr); |
1717 | idr_destroy(&client->resource_idr); | 1721 | idr_destroy(&client->resource_idr); |
diff --git a/drivers/firewire/core-device.c b/drivers/firewire/core-device.c index 6113b896e790..95a471401892 100644 --- a/drivers/firewire/core-device.c +++ b/drivers/firewire/core-device.c | |||
@@ -725,6 +725,15 @@ struct fw_device *fw_device_get_by_devt(dev_t devt) | |||
725 | return device; | 725 | return device; |
726 | } | 726 | } |
727 | 727 | ||
728 | struct workqueue_struct *fw_workqueue; | ||
729 | EXPORT_SYMBOL(fw_workqueue); | ||
730 | |||
731 | static void fw_schedule_device_work(struct fw_device *device, | ||
732 | unsigned long delay) | ||
733 | { | ||
734 | queue_delayed_work(fw_workqueue, &device->work, delay); | ||
735 | } | ||
736 | |||
728 | /* | 737 | /* |
729 | * These defines control the retry behavior for reading the config | 738 | * These defines control the retry behavior for reading the config |
730 | * rom. It shouldn't be necessary to tweak these; if the device | 739 | * rom. It shouldn't be necessary to tweak these; if the device |
@@ -747,9 +756,10 @@ static void fw_device_shutdown(struct work_struct *work) | |||
747 | container_of(work, struct fw_device, work.work); | 756 | container_of(work, struct fw_device, work.work); |
748 | int minor = MINOR(device->device.devt); | 757 | int minor = MINOR(device->device.devt); |
749 | 758 | ||
750 | if (time_is_after_jiffies(device->card->reset_jiffies + SHUTDOWN_DELAY) | 759 | if (time_before64(get_jiffies_64(), |
760 | device->card->reset_jiffies + SHUTDOWN_DELAY) | ||
751 | && !list_empty(&device->card->link)) { | 761 | && !list_empty(&device->card->link)) { |
752 | schedule_delayed_work(&device->work, SHUTDOWN_DELAY); | 762 | fw_schedule_device_work(device, SHUTDOWN_DELAY); |
753 | return; | 763 | return; |
754 | } | 764 | } |
755 | 765 | ||
@@ -861,7 +871,7 @@ static int lookup_existing_device(struct device *dev, void *data) | |||
861 | fw_notify("rediscovered device %s\n", dev_name(dev)); | 871 | fw_notify("rediscovered device %s\n", dev_name(dev)); |
862 | 872 | ||
863 | PREPARE_DELAYED_WORK(&old->work, fw_device_update); | 873 | PREPARE_DELAYED_WORK(&old->work, fw_device_update); |
864 | schedule_delayed_work(&old->work, 0); | 874 | fw_schedule_device_work(old, 0); |
865 | 875 | ||
866 | if (current_node == card->root_node) | 876 | if (current_node == card->root_node) |
867 | fw_schedule_bm_work(card, 0); | 877 | fw_schedule_bm_work(card, 0); |
@@ -952,10 +962,11 @@ static void fw_device_init(struct work_struct *work) | |||
952 | if (device->config_rom_retries < MAX_RETRIES && | 962 | if (device->config_rom_retries < MAX_RETRIES && |
953 | atomic_read(&device->state) == FW_DEVICE_INITIALIZING) { | 963 | atomic_read(&device->state) == FW_DEVICE_INITIALIZING) { |
954 | device->config_rom_retries++; | 964 | device->config_rom_retries++; |
955 | schedule_delayed_work(&device->work, RETRY_DELAY); | 965 | fw_schedule_device_work(device, RETRY_DELAY); |
956 | } else { | 966 | } else { |
957 | fw_notify("giving up on config rom for node id %x\n", | 967 | if (device->node->link_on) |
958 | device->node_id); | 968 | fw_notify("giving up on config rom for node id %x\n", |
969 | device->node_id); | ||
959 | if (device->node == device->card->root_node) | 970 | if (device->node == device->card->root_node) |
960 | fw_schedule_bm_work(device->card, 0); | 971 | fw_schedule_bm_work(device->card, 0); |
961 | fw_device_release(&device->device); | 972 | fw_device_release(&device->device); |
@@ -1017,7 +1028,7 @@ static void fw_device_init(struct work_struct *work) | |||
1017 | FW_DEVICE_INITIALIZING, | 1028 | FW_DEVICE_INITIALIZING, |
1018 | FW_DEVICE_RUNNING) == FW_DEVICE_GONE) { | 1029 | FW_DEVICE_RUNNING) == FW_DEVICE_GONE) { |
1019 | PREPARE_DELAYED_WORK(&device->work, fw_device_shutdown); | 1030 | PREPARE_DELAYED_WORK(&device->work, fw_device_shutdown); |
1020 | schedule_delayed_work(&device->work, SHUTDOWN_DELAY); | 1031 | fw_schedule_device_work(device, SHUTDOWN_DELAY); |
1021 | } else { | 1032 | } else { |
1022 | if (device->config_rom_retries) | 1033 | if (device->config_rom_retries) |
1023 | fw_notify("created device %s: GUID %08x%08x, S%d00, " | 1034 | fw_notify("created device %s: GUID %08x%08x, S%d00, " |
@@ -1096,7 +1107,7 @@ static void fw_device_refresh(struct work_struct *work) | |||
1096 | if (device->config_rom_retries < MAX_RETRIES / 2 && | 1107 | if (device->config_rom_retries < MAX_RETRIES / 2 && |
1097 | atomic_read(&device->state) == FW_DEVICE_INITIALIZING) { | 1108 | atomic_read(&device->state) == FW_DEVICE_INITIALIZING) { |
1098 | device->config_rom_retries++; | 1109 | device->config_rom_retries++; |
1099 | schedule_delayed_work(&device->work, RETRY_DELAY / 2); | 1110 | fw_schedule_device_work(device, RETRY_DELAY / 2); |
1100 | 1111 | ||
1101 | return; | 1112 | return; |
1102 | } | 1113 | } |
@@ -1129,7 +1140,7 @@ static void fw_device_refresh(struct work_struct *work) | |||
1129 | if (device->config_rom_retries < MAX_RETRIES && | 1140 | if (device->config_rom_retries < MAX_RETRIES && |
1130 | atomic_read(&device->state) == FW_DEVICE_INITIALIZING) { | 1141 | atomic_read(&device->state) == FW_DEVICE_INITIALIZING) { |
1131 | device->config_rom_retries++; | 1142 | device->config_rom_retries++; |
1132 | schedule_delayed_work(&device->work, RETRY_DELAY); | 1143 | fw_schedule_device_work(device, RETRY_DELAY); |
1133 | 1144 | ||
1134 | return; | 1145 | return; |
1135 | } | 1146 | } |
@@ -1156,7 +1167,7 @@ static void fw_device_refresh(struct work_struct *work) | |||
1156 | gone: | 1167 | gone: |
1157 | atomic_set(&device->state, FW_DEVICE_GONE); | 1168 | atomic_set(&device->state, FW_DEVICE_GONE); |
1158 | PREPARE_DELAYED_WORK(&device->work, fw_device_shutdown); | 1169 | PREPARE_DELAYED_WORK(&device->work, fw_device_shutdown); |
1159 | schedule_delayed_work(&device->work, SHUTDOWN_DELAY); | 1170 | fw_schedule_device_work(device, SHUTDOWN_DELAY); |
1160 | out: | 1171 | out: |
1161 | if (node_id == card->root_node->node_id) | 1172 | if (node_id == card->root_node->node_id) |
1162 | fw_schedule_bm_work(card, 0); | 1173 | fw_schedule_bm_work(card, 0); |
@@ -1168,9 +1179,12 @@ void fw_node_event(struct fw_card *card, struct fw_node *node, int event) | |||
1168 | 1179 | ||
1169 | switch (event) { | 1180 | switch (event) { |
1170 | case FW_NODE_CREATED: | 1181 | case FW_NODE_CREATED: |
1171 | case FW_NODE_LINK_ON: | 1182 | /* |
1172 | if (!node->link_on) | 1183 | * Attempt to scan the node, regardless whether its self ID has |
1173 | break; | 1184 | * the L (link active) flag set or not. Some broken devices |
1185 | * send L=0 but have an up-and-running link; others send L=1 | ||
1186 | * without actually having a link. | ||
1187 | */ | ||
1174 | create: | 1188 | create: |
1175 | device = kzalloc(sizeof(*device), GFP_ATOMIC); | 1189 | device = kzalloc(sizeof(*device), GFP_ATOMIC); |
1176 | if (device == NULL) | 1190 | if (device == NULL) |
@@ -1209,10 +1223,11 @@ void fw_node_event(struct fw_card *card, struct fw_node *node, int event) | |||
1209 | * first config rom scan half a second after bus reset. | 1223 | * first config rom scan half a second after bus reset. |
1210 | */ | 1224 | */ |
1211 | INIT_DELAYED_WORK(&device->work, fw_device_init); | 1225 | INIT_DELAYED_WORK(&device->work, fw_device_init); |
1212 | schedule_delayed_work(&device->work, INITIAL_DELAY); | 1226 | fw_schedule_device_work(device, INITIAL_DELAY); |
1213 | break; | 1227 | break; |
1214 | 1228 | ||
1215 | case FW_NODE_INITIATED_RESET: | 1229 | case FW_NODE_INITIATED_RESET: |
1230 | case FW_NODE_LINK_ON: | ||
1216 | device = node->data; | 1231 | device = node->data; |
1217 | if (device == NULL) | 1232 | if (device == NULL) |
1218 | goto create; | 1233 | goto create; |
@@ -1224,22 +1239,22 @@ void fw_node_event(struct fw_card *card, struct fw_node *node, int event) | |||
1224 | FW_DEVICE_RUNNING, | 1239 | FW_DEVICE_RUNNING, |
1225 | FW_DEVICE_INITIALIZING) == FW_DEVICE_RUNNING) { | 1240 | FW_DEVICE_INITIALIZING) == FW_DEVICE_RUNNING) { |
1226 | PREPARE_DELAYED_WORK(&device->work, fw_device_refresh); | 1241 | PREPARE_DELAYED_WORK(&device->work, fw_device_refresh); |
1227 | schedule_delayed_work(&device->work, | 1242 | fw_schedule_device_work(device, |
1228 | device->is_local ? 0 : INITIAL_DELAY); | 1243 | device->is_local ? 0 : INITIAL_DELAY); |
1229 | } | 1244 | } |
1230 | break; | 1245 | break; |
1231 | 1246 | ||
1232 | case FW_NODE_UPDATED: | 1247 | case FW_NODE_UPDATED: |
1233 | if (!node->link_on || node->data == NULL) | 1248 | device = node->data; |
1249 | if (device == NULL) | ||
1234 | break; | 1250 | break; |
1235 | 1251 | ||
1236 | device = node->data; | ||
1237 | device->node_id = node->node_id; | 1252 | device->node_id = node->node_id; |
1238 | smp_wmb(); /* update node_id before generation */ | 1253 | smp_wmb(); /* update node_id before generation */ |
1239 | device->generation = card->generation; | 1254 | device->generation = card->generation; |
1240 | if (atomic_read(&device->state) == FW_DEVICE_RUNNING) { | 1255 | if (atomic_read(&device->state) == FW_DEVICE_RUNNING) { |
1241 | PREPARE_DELAYED_WORK(&device->work, fw_device_update); | 1256 | PREPARE_DELAYED_WORK(&device->work, fw_device_update); |
1242 | schedule_delayed_work(&device->work, 0); | 1257 | fw_schedule_device_work(device, 0); |
1243 | } | 1258 | } |
1244 | break; | 1259 | break; |
1245 | 1260 | ||
@@ -1264,7 +1279,7 @@ void fw_node_event(struct fw_card *card, struct fw_node *node, int event) | |||
1264 | if (atomic_xchg(&device->state, | 1279 | if (atomic_xchg(&device->state, |
1265 | FW_DEVICE_GONE) == FW_DEVICE_RUNNING) { | 1280 | FW_DEVICE_GONE) == FW_DEVICE_RUNNING) { |
1266 | PREPARE_DELAYED_WORK(&device->work, fw_device_shutdown); | 1281 | PREPARE_DELAYED_WORK(&device->work, fw_device_shutdown); |
1267 | schedule_delayed_work(&device->work, | 1282 | fw_schedule_device_work(device, |
1268 | list_empty(&card->link) ? 0 : SHUTDOWN_DELAY); | 1283 | list_empty(&card->link) ? 0 : SHUTDOWN_DELAY); |
1269 | } | 1284 | } |
1270 | break; | 1285 | break; |
diff --git a/drivers/firewire/core-iso.c b/drivers/firewire/core-iso.c index c003fa4e2db1..57c3973093ad 100644 --- a/drivers/firewire/core-iso.c +++ b/drivers/firewire/core-iso.c | |||
@@ -185,6 +185,12 @@ int fw_iso_context_queue(struct fw_iso_context *ctx, | |||
185 | } | 185 | } |
186 | EXPORT_SYMBOL(fw_iso_context_queue); | 186 | EXPORT_SYMBOL(fw_iso_context_queue); |
187 | 187 | ||
188 | void fw_iso_context_queue_flush(struct fw_iso_context *ctx) | ||
189 | { | ||
190 | ctx->card->driver->flush_queue_iso(ctx); | ||
191 | } | ||
192 | EXPORT_SYMBOL(fw_iso_context_queue_flush); | ||
193 | |||
188 | int fw_iso_context_stop(struct fw_iso_context *ctx) | 194 | int fw_iso_context_stop(struct fw_iso_context *ctx) |
189 | { | 195 | { |
190 | return ctx->card->driver->stop_iso(ctx); | 196 | return ctx->card->driver->stop_iso(ctx); |
@@ -196,9 +202,10 @@ EXPORT_SYMBOL(fw_iso_context_stop); | |||
196 | */ | 202 | */ |
197 | 203 | ||
198 | static int manage_bandwidth(struct fw_card *card, int irm_id, int generation, | 204 | static int manage_bandwidth(struct fw_card *card, int irm_id, int generation, |
199 | int bandwidth, bool allocate, __be32 data[2]) | 205 | int bandwidth, bool allocate) |
200 | { | 206 | { |
201 | int try, new, old = allocate ? BANDWIDTH_AVAILABLE_INITIAL : 0; | 207 | int try, new, old = allocate ? BANDWIDTH_AVAILABLE_INITIAL : 0; |
208 | __be32 data[2]; | ||
202 | 209 | ||
203 | /* | 210 | /* |
204 | * On a 1394a IRM with low contention, try < 1 is enough. | 211 | * On a 1394a IRM with low contention, try < 1 is enough. |
@@ -233,47 +240,48 @@ static int manage_bandwidth(struct fw_card *card, int irm_id, int generation, | |||
233 | } | 240 | } |
234 | 241 | ||
235 | static int manage_channel(struct fw_card *card, int irm_id, int generation, | 242 | static int manage_channel(struct fw_card *card, int irm_id, int generation, |
236 | u32 channels_mask, u64 offset, bool allocate, __be32 data[2]) | 243 | u32 channels_mask, u64 offset, bool allocate) |
237 | { | 244 | { |
238 | __be32 c, all, old; | 245 | __be32 bit, all, old; |
239 | int i, ret = -EIO, retry = 5; | 246 | __be32 data[2]; |
247 | int channel, ret = -EIO, retry = 5; | ||
240 | 248 | ||
241 | old = all = allocate ? cpu_to_be32(~0) : 0; | 249 | old = all = allocate ? cpu_to_be32(~0) : 0; |
242 | 250 | ||
243 | for (i = 0; i < 32; i++) { | 251 | for (channel = 0; channel < 32; channel++) { |
244 | if (!(channels_mask & 1 << i)) | 252 | if (!(channels_mask & 1 << channel)) |
245 | continue; | 253 | continue; |
246 | 254 | ||
247 | ret = -EBUSY; | 255 | ret = -EBUSY; |
248 | 256 | ||
249 | c = cpu_to_be32(1 << (31 - i)); | 257 | bit = cpu_to_be32(1 << (31 - channel)); |
250 | if ((old & c) != (all & c)) | 258 | if ((old & bit) != (all & bit)) |
251 | continue; | 259 | continue; |
252 | 260 | ||
253 | data[0] = old; | 261 | data[0] = old; |
254 | data[1] = old ^ c; | 262 | data[1] = old ^ bit; |
255 | switch (fw_run_transaction(card, TCODE_LOCK_COMPARE_SWAP, | 263 | switch (fw_run_transaction(card, TCODE_LOCK_COMPARE_SWAP, |
256 | irm_id, generation, SCODE_100, | 264 | irm_id, generation, SCODE_100, |
257 | offset, data, 8)) { | 265 | offset, data, 8)) { |
258 | case RCODE_GENERATION: | 266 | case RCODE_GENERATION: |
259 | /* A generation change frees all channels. */ | 267 | /* A generation change frees all channels. */ |
260 | return allocate ? -EAGAIN : i; | 268 | return allocate ? -EAGAIN : channel; |
261 | 269 | ||
262 | case RCODE_COMPLETE: | 270 | case RCODE_COMPLETE: |
263 | if (data[0] == old) | 271 | if (data[0] == old) |
264 | return i; | 272 | return channel; |
265 | 273 | ||
266 | old = data[0]; | 274 | old = data[0]; |
267 | 275 | ||
268 | /* Is the IRM 1394a-2000 compliant? */ | 276 | /* Is the IRM 1394a-2000 compliant? */ |
269 | if ((data[0] & c) == (data[1] & c)) | 277 | if ((data[0] & bit) == (data[1] & bit)) |
270 | continue; | 278 | continue; |
271 | 279 | ||
272 | /* 1394-1995 IRM, fall through to retry. */ | 280 | /* 1394-1995 IRM, fall through to retry. */ |
273 | default: | 281 | default: |
274 | if (retry) { | 282 | if (retry) { |
275 | retry--; | 283 | retry--; |
276 | i--; | 284 | channel--; |
277 | } else { | 285 | } else { |
278 | ret = -EIO; | 286 | ret = -EIO; |
279 | } | 287 | } |
@@ -284,7 +292,7 @@ static int manage_channel(struct fw_card *card, int irm_id, int generation, | |||
284 | } | 292 | } |
285 | 293 | ||
286 | static void deallocate_channel(struct fw_card *card, int irm_id, | 294 | static void deallocate_channel(struct fw_card *card, int irm_id, |
287 | int generation, int channel, __be32 buffer[2]) | 295 | int generation, int channel) |
288 | { | 296 | { |
289 | u32 mask; | 297 | u32 mask; |
290 | u64 offset; | 298 | u64 offset; |
@@ -293,7 +301,7 @@ static void deallocate_channel(struct fw_card *card, int irm_id, | |||
293 | offset = channel < 32 ? CSR_REGISTER_BASE + CSR_CHANNELS_AVAILABLE_HI : | 301 | offset = channel < 32 ? CSR_REGISTER_BASE + CSR_CHANNELS_AVAILABLE_HI : |
294 | CSR_REGISTER_BASE + CSR_CHANNELS_AVAILABLE_LO; | 302 | CSR_REGISTER_BASE + CSR_CHANNELS_AVAILABLE_LO; |
295 | 303 | ||
296 | manage_channel(card, irm_id, generation, mask, offset, false, buffer); | 304 | manage_channel(card, irm_id, generation, mask, offset, false); |
297 | } | 305 | } |
298 | 306 | ||
299 | /** | 307 | /** |
@@ -322,7 +330,7 @@ static void deallocate_channel(struct fw_card *card, int irm_id, | |||
322 | */ | 330 | */ |
323 | void fw_iso_resource_manage(struct fw_card *card, int generation, | 331 | void fw_iso_resource_manage(struct fw_card *card, int generation, |
324 | u64 channels_mask, int *channel, int *bandwidth, | 332 | u64 channels_mask, int *channel, int *bandwidth, |
325 | bool allocate, __be32 buffer[2]) | 333 | bool allocate) |
326 | { | 334 | { |
327 | u32 channels_hi = channels_mask; /* channels 31...0 */ | 335 | u32 channels_hi = channels_mask; /* channels 31...0 */ |
328 | u32 channels_lo = channels_mask >> 32; /* channels 63...32 */ | 336 | u32 channels_lo = channels_mask >> 32; /* channels 63...32 */ |
@@ -335,11 +343,11 @@ void fw_iso_resource_manage(struct fw_card *card, int generation, | |||
335 | if (channels_hi) | 343 | if (channels_hi) |
336 | c = manage_channel(card, irm_id, generation, channels_hi, | 344 | c = manage_channel(card, irm_id, generation, channels_hi, |
337 | CSR_REGISTER_BASE + CSR_CHANNELS_AVAILABLE_HI, | 345 | CSR_REGISTER_BASE + CSR_CHANNELS_AVAILABLE_HI, |
338 | allocate, buffer); | 346 | allocate); |
339 | if (channels_lo && c < 0) { | 347 | if (channels_lo && c < 0) { |
340 | c = manage_channel(card, irm_id, generation, channels_lo, | 348 | c = manage_channel(card, irm_id, generation, channels_lo, |
341 | CSR_REGISTER_BASE + CSR_CHANNELS_AVAILABLE_LO, | 349 | CSR_REGISTER_BASE + CSR_CHANNELS_AVAILABLE_LO, |
342 | allocate, buffer); | 350 | allocate); |
343 | if (c >= 0) | 351 | if (c >= 0) |
344 | c += 32; | 352 | c += 32; |
345 | } | 353 | } |
@@ -351,14 +359,14 @@ void fw_iso_resource_manage(struct fw_card *card, int generation, | |||
351 | if (*bandwidth == 0) | 359 | if (*bandwidth == 0) |
352 | return; | 360 | return; |
353 | 361 | ||
354 | ret = manage_bandwidth(card, irm_id, generation, *bandwidth, | 362 | ret = manage_bandwidth(card, irm_id, generation, *bandwidth, allocate); |
355 | allocate, buffer); | ||
356 | if (ret < 0) | 363 | if (ret < 0) |
357 | *bandwidth = 0; | 364 | *bandwidth = 0; |
358 | 365 | ||
359 | if (allocate && ret < 0) { | 366 | if (allocate && ret < 0) { |
360 | if (c >= 0) | 367 | if (c >= 0) |
361 | deallocate_channel(card, irm_id, generation, c, buffer); | 368 | deallocate_channel(card, irm_id, generation, c); |
362 | *channel = ret; | 369 | *channel = ret; |
363 | } | 370 | } |
364 | } | 371 | } |
372 | EXPORT_SYMBOL(fw_iso_resource_manage); | ||
diff --git a/drivers/firewire/core-topology.c b/drivers/firewire/core-topology.c index 09be1a635505..193ed9233144 100644 --- a/drivers/firewire/core-topology.c +++ b/drivers/firewire/core-topology.c | |||
@@ -545,7 +545,7 @@ void fw_core_handle_bus_reset(struct fw_card *card, int node_id, int generation, | |||
545 | */ | 545 | */ |
546 | smp_wmb(); | 546 | smp_wmb(); |
547 | card->generation = generation; | 547 | card->generation = generation; |
548 | card->reset_jiffies = jiffies; | 548 | card->reset_jiffies = get_jiffies_64(); |
549 | card->bm_node_id = 0xffff; | 549 | card->bm_node_id = 0xffff; |
550 | card->bm_abdicate = bm_abdicate; | 550 | card->bm_abdicate = bm_abdicate; |
551 | fw_schedule_bm_work(card, 0); | 551 | fw_schedule_bm_work(card, 0); |
diff --git a/drivers/firewire/core-transaction.c b/drivers/firewire/core-transaction.c index b42a0bde8494..334b82a3542c 100644 --- a/drivers/firewire/core-transaction.c +++ b/drivers/firewire/core-transaction.c | |||
@@ -36,6 +36,7 @@ | |||
36 | #include <linux/string.h> | 36 | #include <linux/string.h> |
37 | #include <linux/timer.h> | 37 | #include <linux/timer.h> |
38 | #include <linux/types.h> | 38 | #include <linux/types.h> |
39 | #include <linux/workqueue.h> | ||
39 | 40 | ||
40 | #include <asm/byteorder.h> | 41 | #include <asm/byteorder.h> |
41 | 42 | ||
@@ -72,6 +73,15 @@ | |||
72 | #define PHY_CONFIG_ROOT_ID(node_id) ((((node_id) & 0x3f) << 24) | (1 << 23)) | 73 | #define PHY_CONFIG_ROOT_ID(node_id) ((((node_id) & 0x3f) << 24) | (1 << 23)) |
73 | #define PHY_IDENTIFIER(id) ((id) << 30) | 74 | #define PHY_IDENTIFIER(id) ((id) << 30) |
74 | 75 | ||
76 | /* returns 0 if the split timeout handler is already running */ | ||
77 | static int try_cancel_split_timeout(struct fw_transaction *t) | ||
78 | { | ||
79 | if (t->is_split_transaction) | ||
80 | return del_timer(&t->split_timeout_timer); | ||
81 | else | ||
82 | return 1; | ||
83 | } | ||
84 | |||
75 | static int close_transaction(struct fw_transaction *transaction, | 85 | static int close_transaction(struct fw_transaction *transaction, |
76 | struct fw_card *card, int rcode) | 86 | struct fw_card *card, int rcode) |
77 | { | 87 | { |
@@ -81,7 +91,7 @@ static int close_transaction(struct fw_transaction *transaction, | |||
81 | spin_lock_irqsave(&card->lock, flags); | 91 | spin_lock_irqsave(&card->lock, flags); |
82 | list_for_each_entry(t, &card->transaction_list, link) { | 92 | list_for_each_entry(t, &card->transaction_list, link) { |
83 | if (t == transaction) { | 93 | if (t == transaction) { |
84 | if (!del_timer(&t->split_timeout_timer)) { | 94 | if (!try_cancel_split_timeout(t)) { |
85 | spin_unlock_irqrestore(&card->lock, flags); | 95 | spin_unlock_irqrestore(&card->lock, flags); |
86 | goto timed_out; | 96 | goto timed_out; |
87 | } | 97 | } |
@@ -141,16 +151,28 @@ static void split_transaction_timeout_callback(unsigned long data) | |||
141 | card->tlabel_mask &= ~(1ULL << t->tlabel); | 151 | card->tlabel_mask &= ~(1ULL << t->tlabel); |
142 | spin_unlock_irqrestore(&card->lock, flags); | 152 | spin_unlock_irqrestore(&card->lock, flags); |
143 | 153 | ||
144 | card->driver->cancel_packet(card, &t->packet); | ||
145 | |||
146 | /* | ||
147 | * At this point cancel_packet will never call the transaction | ||
148 | * callback, since we just took the transaction out of the list. | ||
149 | * So do it here. | ||
150 | */ | ||
151 | t->callback(card, RCODE_CANCELLED, NULL, 0, t->callback_data); | 154 | t->callback(card, RCODE_CANCELLED, NULL, 0, t->callback_data); |
152 | } | 155 | } |
153 | 156 | ||
157 | static void start_split_transaction_timeout(struct fw_transaction *t, | ||
158 | struct fw_card *card) | ||
159 | { | ||
160 | unsigned long flags; | ||
161 | |||
162 | spin_lock_irqsave(&card->lock, flags); | ||
163 | |||
164 | if (list_empty(&t->link) || WARN_ON(t->is_split_transaction)) { | ||
165 | spin_unlock_irqrestore(&card->lock, flags); | ||
166 | return; | ||
167 | } | ||
168 | |||
169 | t->is_split_transaction = true; | ||
170 | mod_timer(&t->split_timeout_timer, | ||
171 | jiffies + card->split_timeout_jiffies); | ||
172 | |||
173 | spin_unlock_irqrestore(&card->lock, flags); | ||
174 | } | ||
175 | |||
154 | static void transmit_complete_callback(struct fw_packet *packet, | 176 | static void transmit_complete_callback(struct fw_packet *packet, |
155 | struct fw_card *card, int status) | 177 | struct fw_card *card, int status) |
156 | { | 178 | { |
@@ -162,7 +184,7 @@ static void transmit_complete_callback(struct fw_packet *packet, | |||
162 | close_transaction(t, card, RCODE_COMPLETE); | 184 | close_transaction(t, card, RCODE_COMPLETE); |
163 | break; | 185 | break; |
164 | case ACK_PENDING: | 186 | case ACK_PENDING: |
165 | t->timestamp = packet->timestamp; | 187 | start_split_transaction_timeout(t, card); |
166 | break; | 188 | break; |
167 | case ACK_BUSY_X: | 189 | case ACK_BUSY_X: |
168 | case ACK_BUSY_A: | 190 | case ACK_BUSY_A: |
@@ -250,7 +272,7 @@ static void fw_fill_request(struct fw_packet *packet, int tcode, int tlabel, | |||
250 | break; | 272 | break; |
251 | 273 | ||
252 | default: | 274 | default: |
253 | WARN(1, "wrong tcode %d", tcode); | 275 | WARN(1, "wrong tcode %d\n", tcode); |
254 | } | 276 | } |
255 | common: | 277 | common: |
256 | packet->speed = speed; | 278 | packet->speed = speed; |
@@ -305,8 +327,8 @@ static int allocate_tlabel(struct fw_card *card) | |||
305 | * It will contain tag, channel, and sy data instead of a node ID then. | 327 | * It will contain tag, channel, and sy data instead of a node ID then. |
306 | * | 328 | * |
307 | * The payload buffer at @data is going to be DMA-mapped except in case of | 329 | * The payload buffer at @data is going to be DMA-mapped except in case of |
308 | * quadlet-sized payload or of local (loopback) requests. Hence make sure that | 330 | * @length <= 8 or of local (loopback) requests. Hence make sure that the |
309 | * the buffer complies with the restrictions for DMA-mapped memory. The | 331 | * buffer complies with the restrictions of the streaming DMA mapping API. |
310 | * @payload must not be freed before the @callback is called. | 332 | * @payload must not be freed before the @callback is called. |
311 | * | 333 | * |
312 | * In case of request types without payload, @data is NULL and @length is 0. | 334 | * In case of request types without payload, @data is NULL and @length is 0. |
@@ -349,11 +371,9 @@ void fw_send_request(struct fw_card *card, struct fw_transaction *t, int tcode, | |||
349 | t->node_id = destination_id; | 371 | t->node_id = destination_id; |
350 | t->tlabel = tlabel; | 372 | t->tlabel = tlabel; |
351 | t->card = card; | 373 | t->card = card; |
374 | t->is_split_transaction = false; | ||
352 | setup_timer(&t->split_timeout_timer, | 375 | setup_timer(&t->split_timeout_timer, |
353 | split_transaction_timeout_callback, (unsigned long)t); | 376 | split_transaction_timeout_callback, (unsigned long)t); |
354 | /* FIXME: start this timer later, relative to t->timestamp */ | ||
355 | mod_timer(&t->split_timeout_timer, | ||
356 | jiffies + card->split_timeout_jiffies); | ||
357 | t->callback = callback; | 377 | t->callback = callback; |
358 | t->callback_data = callback_data; | 378 | t->callback_data = callback_data; |
359 | 379 | ||
@@ -392,7 +412,8 @@ static void transaction_callback(struct fw_card *card, int rcode, | |||
392 | * | 412 | * |
393 | * Returns the RCODE. See fw_send_request() for parameter documentation. | 413 | * Returns the RCODE. See fw_send_request() for parameter documentation. |
394 | * Unlike fw_send_request(), @data points to the payload of the request or/and | 414 | * Unlike fw_send_request(), @data points to the payload of the request or/and |
395 | * to the payload of the response. | 415 | * to the payload of the response. DMA mapping restrictions apply to outbound |
416 | * request payloads of >= 8 bytes but not to inbound response payloads. | ||
396 | */ | 417 | */ |
397 | int fw_run_transaction(struct fw_card *card, int tcode, int destination_id, | 418 | int fw_run_transaction(struct fw_card *card, int tcode, int destination_id, |
398 | int generation, int speed, unsigned long long offset, | 419 | int generation, int speed, unsigned long long offset, |
@@ -423,7 +444,8 @@ static void transmit_phy_packet_callback(struct fw_packet *packet, | |||
423 | } | 444 | } |
424 | 445 | ||
425 | static struct fw_packet phy_config_packet = { | 446 | static struct fw_packet phy_config_packet = { |
426 | .header_length = 8, | 447 | .header_length = 12, |
448 | .header[0] = TCODE_LINK_INTERNAL << 4, | ||
427 | .payload_length = 0, | 449 | .payload_length = 0, |
428 | .speed = SCODE_100, | 450 | .speed = SCODE_100, |
429 | .callback = transmit_phy_packet_callback, | 451 | .callback = transmit_phy_packet_callback, |
@@ -451,8 +473,8 @@ void fw_send_phy_config(struct fw_card *card, | |||
451 | 473 | ||
452 | mutex_lock(&phy_config_mutex); | 474 | mutex_lock(&phy_config_mutex); |
453 | 475 | ||
454 | phy_config_packet.header[0] = data; | 476 | phy_config_packet.header[1] = data; |
455 | phy_config_packet.header[1] = ~data; | 477 | phy_config_packet.header[2] = ~data; |
456 | phy_config_packet.generation = generation; | 478 | phy_config_packet.generation = generation; |
457 | INIT_COMPLETION(phy_config_done); | 479 | INIT_COMPLETION(phy_config_done); |
458 | 480 | ||
@@ -638,7 +660,7 @@ int fw_get_response_length(struct fw_request *r) | |||
638 | } | 660 | } |
639 | 661 | ||
640 | default: | 662 | default: |
641 | WARN(1, "wrong tcode %d", tcode); | 663 | WARN(1, "wrong tcode %d\n", tcode); |
642 | return 0; | 664 | return 0; |
643 | } | 665 | } |
644 | } | 666 | } |
@@ -694,7 +716,7 @@ void fw_fill_response(struct fw_packet *response, u32 *request_header, | |||
694 | break; | 716 | break; |
695 | 717 | ||
696 | default: | 718 | default: |
697 | WARN(1, "wrong tcode %d", tcode); | 719 | WARN(1, "wrong tcode %d\n", tcode); |
698 | } | 720 | } |
699 | 721 | ||
700 | response->payload_mapped = false; | 722 | response->payload_mapped = false; |
@@ -925,7 +947,7 @@ void fw_core_handle_response(struct fw_card *card, struct fw_packet *p) | |||
925 | spin_lock_irqsave(&card->lock, flags); | 947 | spin_lock_irqsave(&card->lock, flags); |
926 | list_for_each_entry(t, &card->transaction_list, link) { | 948 | list_for_each_entry(t, &card->transaction_list, link) { |
927 | if (t->node_id == source && t->tlabel == tlabel) { | 949 | if (t->node_id == source && t->tlabel == tlabel) { |
928 | if (!del_timer(&t->split_timeout_timer)) { | 950 | if (!try_cancel_split_timeout(t)) { |
929 | spin_unlock_irqrestore(&card->lock, flags); | 951 | spin_unlock_irqrestore(&card->lock, flags); |
930 | goto timed_out; | 952 | goto timed_out; |
931 | } | 953 | } |
@@ -1192,13 +1214,21 @@ static int __init fw_core_init(void) | |||
1192 | { | 1214 | { |
1193 | int ret; | 1215 | int ret; |
1194 | 1216 | ||
1217 | fw_workqueue = alloc_workqueue("firewire", | ||
1218 | WQ_NON_REENTRANT | WQ_MEM_RECLAIM, 0); | ||
1219 | if (!fw_workqueue) | ||
1220 | return -ENOMEM; | ||
1221 | |||
1195 | ret = bus_register(&fw_bus_type); | 1222 | ret = bus_register(&fw_bus_type); |
1196 | if (ret < 0) | 1223 | if (ret < 0) { |
1224 | destroy_workqueue(fw_workqueue); | ||
1197 | return ret; | 1225 | return ret; |
1226 | } | ||
1198 | 1227 | ||
1199 | fw_cdev_major = register_chrdev(0, "firewire", &fw_device_ops); | 1228 | fw_cdev_major = register_chrdev(0, "firewire", &fw_device_ops); |
1200 | if (fw_cdev_major < 0) { | 1229 | if (fw_cdev_major < 0) { |
1201 | bus_unregister(&fw_bus_type); | 1230 | bus_unregister(&fw_bus_type); |
1231 | destroy_workqueue(fw_workqueue); | ||
1202 | return fw_cdev_major; | 1232 | return fw_cdev_major; |
1203 | } | 1233 | } |
1204 | 1234 | ||
@@ -1214,6 +1244,7 @@ static void __exit fw_core_cleanup(void) | |||
1214 | { | 1244 | { |
1215 | unregister_chrdev(fw_cdev_major, "firewire"); | 1245 | unregister_chrdev(fw_cdev_major, "firewire"); |
1216 | bus_unregister(&fw_bus_type); | 1246 | bus_unregister(&fw_bus_type); |
1247 | destroy_workqueue(fw_workqueue); | ||
1217 | idr_destroy(&fw_device_idr); | 1248 | idr_destroy(&fw_device_idr); |
1218 | } | 1249 | } |
1219 | 1250 | ||
diff --git a/drivers/firewire/core.h b/drivers/firewire/core.h index e6239f971be6..0fe4e4e6eda7 100644 --- a/drivers/firewire/core.h +++ b/drivers/firewire/core.h | |||
@@ -97,6 +97,8 @@ struct fw_card_driver { | |||
97 | struct fw_iso_buffer *buffer, | 97 | struct fw_iso_buffer *buffer, |
98 | unsigned long payload); | 98 | unsigned long payload); |
99 | 99 | ||
100 | void (*flush_queue_iso)(struct fw_iso_context *ctx); | ||
101 | |||
100 | int (*stop_iso)(struct fw_iso_context *ctx); | 102 | int (*stop_iso)(struct fw_iso_context *ctx); |
101 | }; | 103 | }; |
102 | 104 | ||
@@ -147,9 +149,6 @@ void fw_node_event(struct fw_card *card, struct fw_node *node, int event); | |||
147 | /* -iso */ | 149 | /* -iso */ |
148 | 150 | ||
149 | int fw_iso_buffer_map(struct fw_iso_buffer *buffer, struct vm_area_struct *vma); | 151 | int fw_iso_buffer_map(struct fw_iso_buffer *buffer, struct vm_area_struct *vma); |
150 | void fw_iso_resource_manage(struct fw_card *card, int generation, | ||
151 | u64 channels_mask, int *channel, int *bandwidth, | ||
152 | bool allocate, __be32 buffer[2]); | ||
153 | 152 | ||
154 | 153 | ||
155 | /* -topology */ | 154 | /* -topology */ |
@@ -215,9 +214,11 @@ static inline bool is_next_generation(int new_generation, int old_generation) | |||
215 | 214 | ||
216 | /* -transaction */ | 215 | /* -transaction */ |
217 | 216 | ||
217 | #define TCODE_LINK_INTERNAL 0xe | ||
218 | |||
218 | #define TCODE_IS_READ_REQUEST(tcode) (((tcode) & ~1) == 4) | 219 | #define TCODE_IS_READ_REQUEST(tcode) (((tcode) & ~1) == 4) |
219 | #define TCODE_IS_BLOCK_PACKET(tcode) (((tcode) & 1) != 0) | 220 | #define TCODE_IS_BLOCK_PACKET(tcode) (((tcode) & 1) != 0) |
220 | #define TCODE_IS_LINK_INTERNAL(tcode) ((tcode) == 0xe) | 221 | #define TCODE_IS_LINK_INTERNAL(tcode) ((tcode) == TCODE_LINK_INTERNAL) |
221 | #define TCODE_IS_REQUEST(tcode) (((tcode) & 2) == 0) | 222 | #define TCODE_IS_REQUEST(tcode) (((tcode) & 2) == 0) |
222 | #define TCODE_IS_RESPONSE(tcode) (((tcode) & 2) != 0) | 223 | #define TCODE_IS_RESPONSE(tcode) (((tcode) & 2) != 0) |
223 | #define TCODE_HAS_REQUEST_DATA(tcode) (((tcode) & 12) != 4) | 224 | #define TCODE_HAS_REQUEST_DATA(tcode) (((tcode) & 12) != 4) |
diff --git a/drivers/firewire/init_ohci1394_dma.c b/drivers/firewire/init_ohci1394_dma.c new file mode 100644 index 000000000000..a9a347adb353 --- /dev/null +++ b/drivers/firewire/init_ohci1394_dma.c | |||
@@ -0,0 +1,309 @@ | |||
1 | /* | ||
2 | * init_ohci1394_dma.c - Initializes physical DMA on all OHCI 1394 controllers | ||
3 | * | ||
4 | * Copyright (C) 2006-2007 Bernhard Kaindl <bk@suse.de> | ||
5 | * | ||
6 | * Derived from drivers/ieee1394/ohci1394.c and arch/x86/kernel/early-quirks.c | ||
7 | * this file has functions to: | ||
8 | * - scan the PCI very early on boot for all OHCI 1394-compliant controllers | ||
9 | * - reset and initialize them and make them join the IEEE1394 bus and | ||
10 | * - enable physical DMA on them to allow remote debugging | ||
11 | * | ||
12 | * All code and data is marked as __init and __initdata, respective as | ||
13 | * during boot, all OHCI1394 controllers may be claimed by the firewire | ||
14 | * stack and at this point, this code should not touch them anymore. | ||
15 | * | ||
16 | * To use physical DMA after the initialization of the firewire stack, | ||
17 | * be sure that the stack enables it and (re-)attach after the bus reset | ||
18 | * which may be caused by the firewire stack initialization. | ||
19 | * | ||
20 | * This program is free software; you can redistribute it and/or modify | ||
21 | * it under the terms of the GNU General Public License as published by | ||
22 | * the Free Software Foundation; either version 2 of the License, or | ||
23 | * (at your option) any later version. | ||
24 | * | ||
25 | * This program is distributed in the hope that it will be useful, | ||
26 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
27 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
28 | * GNU General Public License for more details. | ||
29 | * | ||
30 | * You should have received a copy of the GNU General Public License | ||
31 | * along with this program; if not, write to the Free Software Foundation, | ||
32 | * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | ||
33 | */ | ||
34 | |||
35 | #include <linux/delay.h> | ||
36 | #include <linux/io.h> | ||
37 | #include <linux/kernel.h> | ||
38 | #include <linux/pci.h> /* for PCI defines */ | ||
39 | #include <linux/string.h> | ||
40 | |||
41 | #include <asm/pci-direct.h> /* for direct PCI config space access */ | ||
42 | #include <asm/fixmap.h> | ||
43 | |||
44 | #include <linux/init_ohci1394_dma.h> | ||
45 | #include "ohci.h" | ||
46 | |||
47 | int __initdata init_ohci1394_dma_early; | ||
48 | |||
49 | struct ohci { | ||
50 | void __iomem *registers; | ||
51 | }; | ||
52 | |||
53 | static inline void reg_write(const struct ohci *ohci, int offset, u32 data) | ||
54 | { | ||
55 | writel(data, ohci->registers + offset); | ||
56 | } | ||
57 | |||
58 | static inline u32 reg_read(const struct ohci *ohci, int offset) | ||
59 | { | ||
60 | return readl(ohci->registers + offset); | ||
61 | } | ||
62 | |||
63 | #define OHCI_LOOP_COUNT 100 /* Number of loops for reg read waits */ | ||
64 | |||
65 | /* Reads a PHY register of an OHCI-1394 controller */ | ||
66 | static inline u8 __init get_phy_reg(struct ohci *ohci, u8 addr) | ||
67 | { | ||
68 | int i; | ||
69 | u32 r; | ||
70 | |||
71 | reg_write(ohci, OHCI1394_PhyControl, (addr << 8) | 0x00008000); | ||
72 | |||
73 | for (i = 0; i < OHCI_LOOP_COUNT; i++) { | ||
74 | if (reg_read(ohci, OHCI1394_PhyControl) & 0x80000000) | ||
75 | break; | ||
76 | mdelay(1); | ||
77 | } | ||
78 | r = reg_read(ohci, OHCI1394_PhyControl); | ||
79 | |||
80 | return (r & 0x00ff0000) >> 16; | ||
81 | } | ||
82 | |||
83 | /* Writes to a PHY register of an OHCI-1394 controller */ | ||
84 | static inline void __init set_phy_reg(struct ohci *ohci, u8 addr, u8 data) | ||
85 | { | ||
86 | int i; | ||
87 | |||
88 | reg_write(ohci, OHCI1394_PhyControl, (addr << 8) | data | 0x00004000); | ||
89 | |||
90 | for (i = 0; i < OHCI_LOOP_COUNT; i++) { | ||
91 | if (!(reg_read(ohci, OHCI1394_PhyControl) & 0x00004000)) | ||
92 | break; | ||
93 | mdelay(1); | ||
94 | } | ||
95 | } | ||
96 | |||
97 | /* Resets an OHCI-1394 controller (for sane state before initialization) */ | ||
98 | static inline void __init init_ohci1394_soft_reset(struct ohci *ohci) | ||
99 | { | ||
100 | int i; | ||
101 | |||
102 | reg_write(ohci, OHCI1394_HCControlSet, OHCI1394_HCControl_softReset); | ||
103 | |||
104 | for (i = 0; i < OHCI_LOOP_COUNT; i++) { | ||
105 | if (!(reg_read(ohci, OHCI1394_HCControlSet) | ||
106 | & OHCI1394_HCControl_softReset)) | ||
107 | break; | ||
108 | mdelay(1); | ||
109 | } | ||
110 | } | ||
111 | |||
112 | #define OHCI1394_MAX_AT_REQ_RETRIES 0xf | ||
113 | #define OHCI1394_MAX_AT_RESP_RETRIES 0x2 | ||
114 | #define OHCI1394_MAX_PHYS_RESP_RETRIES 0x8 | ||
115 | |||
116 | /* Basic OHCI-1394 register and port inititalization */ | ||
117 | static inline void __init init_ohci1394_initialize(struct ohci *ohci) | ||
118 | { | ||
119 | u32 bus_options; | ||
120 | int num_ports, i; | ||
121 | |||
122 | /* Put some defaults to these undefined bus options */ | ||
123 | bus_options = reg_read(ohci, OHCI1394_BusOptions); | ||
124 | bus_options |= 0x60000000; /* Enable CMC and ISC */ | ||
125 | bus_options &= ~0x00ff0000; /* XXX: Set cyc_clk_acc to zero for now */ | ||
126 | bus_options &= ~0x18000000; /* Disable PMC and BMC */ | ||
127 | reg_write(ohci, OHCI1394_BusOptions, bus_options); | ||
128 | |||
129 | /* Set the bus number */ | ||
130 | reg_write(ohci, OHCI1394_NodeID, 0x0000ffc0); | ||
131 | |||
132 | /* Enable posted writes */ | ||
133 | reg_write(ohci, OHCI1394_HCControlSet, | ||
134 | OHCI1394_HCControl_postedWriteEnable); | ||
135 | |||
136 | /* Clear link control register */ | ||
137 | reg_write(ohci, OHCI1394_LinkControlClear, 0xffffffff); | ||
138 | |||
139 | /* enable phys */ | ||
140 | reg_write(ohci, OHCI1394_LinkControlSet, | ||
141 | OHCI1394_LinkControl_rcvPhyPkt); | ||
142 | |||
143 | /* Don't accept phy packets into AR request context */ | ||
144 | reg_write(ohci, OHCI1394_LinkControlClear, 0x00000400); | ||
145 | |||
146 | /* Clear the Isochonouys interrupt masks */ | ||
147 | reg_write(ohci, OHCI1394_IsoRecvIntMaskClear, 0xffffffff); | ||
148 | reg_write(ohci, OHCI1394_IsoRecvIntEventClear, 0xffffffff); | ||
149 | reg_write(ohci, OHCI1394_IsoXmitIntMaskClear, 0xffffffff); | ||
150 | reg_write(ohci, OHCI1394_IsoXmitIntEventClear, 0xffffffff); | ||
151 | |||
152 | /* Accept asyncronous transfer requests from all nodes for now */ | ||
153 | reg_write(ohci, OHCI1394_AsReqFilterHiSet, 0x80000000); | ||
154 | |||
155 | /* Specify asyncronous transfer retries */ | ||
156 | reg_write(ohci, OHCI1394_ATRetries, | ||
157 | OHCI1394_MAX_AT_REQ_RETRIES | | ||
158 | (OHCI1394_MAX_AT_RESP_RETRIES<<4) | | ||
159 | (OHCI1394_MAX_PHYS_RESP_RETRIES<<8)); | ||
160 | |||
161 | /* We don't want hardware swapping */ | ||
162 | reg_write(ohci, OHCI1394_HCControlClear, | ||
163 | OHCI1394_HCControl_noByteSwapData); | ||
164 | |||
165 | /* Enable link */ | ||
166 | reg_write(ohci, OHCI1394_HCControlSet, OHCI1394_HCControl_linkEnable); | ||
167 | |||
168 | /* If anything is connected to a port, make sure it is enabled */ | ||
169 | num_ports = get_phy_reg(ohci, 2) & 0xf; | ||
170 | for (i = 0; i < num_ports; i++) { | ||
171 | unsigned int status; | ||
172 | |||
173 | set_phy_reg(ohci, 7, i); | ||
174 | status = get_phy_reg(ohci, 8); | ||
175 | |||
176 | if (status & 0x20) | ||
177 | set_phy_reg(ohci, 8, status & ~1); | ||
178 | } | ||
179 | } | ||
180 | |||
181 | /** | ||
182 | * init_ohci1394_wait_for_busresets - wait until bus resets are completed | ||
183 | * | ||
184 | * OHCI1394 initialization itself and any device going on- or offline | ||
185 | * and any cable issue cause a IEEE1394 bus reset. The OHCI1394 spec | ||
186 | * specifies that physical DMA is disabled on each bus reset and it | ||
187 | * has to be enabled after each bus reset when needed. We resort | ||
188 | * to polling here because on early boot, we have no interrupts. | ||
189 | */ | ||
190 | static inline void __init init_ohci1394_wait_for_busresets(struct ohci *ohci) | ||
191 | { | ||
192 | int i, events; | ||
193 | |||
194 | for (i = 0; i < 9; i++) { | ||
195 | mdelay(200); | ||
196 | events = reg_read(ohci, OHCI1394_IntEventSet); | ||
197 | if (events & OHCI1394_busReset) | ||
198 | reg_write(ohci, OHCI1394_IntEventClear, | ||
199 | OHCI1394_busReset); | ||
200 | } | ||
201 | } | ||
202 | |||
203 | /** | ||
204 | * init_ohci1394_enable_physical_dma - Enable physical DMA for remote debugging | ||
205 | * This enables remote DMA access over IEEE1394 from every host for the low | ||
206 | * 4GB of address space. DMA accesses above 4GB are not available currently. | ||
207 | */ | ||
208 | static inline void __init init_ohci1394_enable_physical_dma(struct ohci *ohci) | ||
209 | { | ||
210 | reg_write(ohci, OHCI1394_PhyReqFilterHiSet, 0xffffffff); | ||
211 | reg_write(ohci, OHCI1394_PhyReqFilterLoSet, 0xffffffff); | ||
212 | reg_write(ohci, OHCI1394_PhyUpperBound, 0xffff0000); | ||
213 | } | ||
214 | |||
215 | /** | ||
216 | * init_ohci1394_reset_and_init_dma - init controller and enable DMA | ||
217 | * This initializes the given controller and enables physical DMA engine in it. | ||
218 | */ | ||
219 | static inline void __init init_ohci1394_reset_and_init_dma(struct ohci *ohci) | ||
220 | { | ||
221 | /* Start off with a soft reset, clears everything to a sane state. */ | ||
222 | init_ohci1394_soft_reset(ohci); | ||
223 | |||
224 | /* Accessing some registers without LPS enabled may cause lock up */ | ||
225 | reg_write(ohci, OHCI1394_HCControlSet, OHCI1394_HCControl_LPS); | ||
226 | |||
227 | /* Disable and clear interrupts */ | ||
228 | reg_write(ohci, OHCI1394_IntEventClear, 0xffffffff); | ||
229 | reg_write(ohci, OHCI1394_IntMaskClear, 0xffffffff); | ||
230 | |||
231 | mdelay(50); /* Wait 50msec to make sure we have full link enabled */ | ||
232 | |||
233 | init_ohci1394_initialize(ohci); | ||
234 | /* | ||
235 | * The initialization causes at least one IEEE1394 bus reset. Enabling | ||
236 | * physical DMA only works *after* *all* bus resets have calmed down: | ||
237 | */ | ||
238 | init_ohci1394_wait_for_busresets(ohci); | ||
239 | |||
240 | /* We had to wait and do this now if we want to debug early problems */ | ||
241 | init_ohci1394_enable_physical_dma(ohci); | ||
242 | } | ||
243 | |||
244 | /** | ||
245 | * init_ohci1394_controller - Map the registers of the controller and init DMA | ||
246 | * This maps the registers of the specified controller and initializes it | ||
247 | */ | ||
248 | static inline void __init init_ohci1394_controller(int num, int slot, int func) | ||
249 | { | ||
250 | unsigned long ohci_base; | ||
251 | struct ohci ohci; | ||
252 | |||
253 | printk(KERN_INFO "init_ohci1394_dma: initializing OHCI-1394" | ||
254 | " at %02x:%02x.%x\n", num, slot, func); | ||
255 | |||
256 | ohci_base = read_pci_config(num, slot, func, PCI_BASE_ADDRESS_0+(0<<2)) | ||
257 | & PCI_BASE_ADDRESS_MEM_MASK; | ||
258 | |||
259 | set_fixmap_nocache(FIX_OHCI1394_BASE, ohci_base); | ||
260 | |||
261 | ohci.registers = (void __iomem *)fix_to_virt(FIX_OHCI1394_BASE); | ||
262 | |||
263 | init_ohci1394_reset_and_init_dma(&ohci); | ||
264 | } | ||
265 | |||
266 | /** | ||
267 | * debug_init_ohci1394_dma - scan for OHCI1394 controllers and init DMA on them | ||
268 | * Scans the whole PCI space for OHCI1394 controllers and inits DMA on them | ||
269 | */ | ||
270 | void __init init_ohci1394_dma_on_all_controllers(void) | ||
271 | { | ||
272 | int num, slot, func; | ||
273 | u32 class; | ||
274 | |||
275 | if (!early_pci_allowed()) | ||
276 | return; | ||
277 | |||
278 | /* Poor man's PCI discovery, the only thing we can do at early boot */ | ||
279 | for (num = 0; num < 32; num++) { | ||
280 | for (slot = 0; slot < 32; slot++) { | ||
281 | for (func = 0; func < 8; func++) { | ||
282 | class = read_pci_config(num, slot, func, | ||
283 | PCI_CLASS_REVISION); | ||
284 | if (class == 0xffffffff) | ||
285 | continue; /* No device at this func */ | ||
286 | |||
287 | if (class>>8 != PCI_CLASS_SERIAL_FIREWIRE_OHCI) | ||
288 | continue; /* Not an OHCI-1394 device */ | ||
289 | |||
290 | init_ohci1394_controller(num, slot, func); | ||
291 | break; /* Assume one controller per device */ | ||
292 | } | ||
293 | } | ||
294 | } | ||
295 | printk(KERN_INFO "init_ohci1394_dma: finished initializing OHCI DMA\n"); | ||
296 | } | ||
297 | |||
298 | /** | ||
299 | * setup_init_ohci1394_early - enables early OHCI1394 DMA initialization | ||
300 | */ | ||
301 | static int __init setup_ohci1394_dma(char *opt) | ||
302 | { | ||
303 | if (!strcmp(opt, "early")) | ||
304 | init_ohci1394_dma_early = 1; | ||
305 | return 0; | ||
306 | } | ||
307 | |||
308 | /* passing ohci1394_dma=early on boot causes early OHCI1394 DMA initialization */ | ||
309 | early_param("ohci1394_dma", setup_ohci1394_dma); | ||
diff --git a/drivers/firewire/net.c b/drivers/firewire/net.c index 33f8421c71cc..b9762d07198d 100644 --- a/drivers/firewire/net.c +++ b/drivers/firewire/net.c | |||
@@ -7,6 +7,7 @@ | |||
7 | */ | 7 | */ |
8 | 8 | ||
9 | #include <linux/bug.h> | 9 | #include <linux/bug.h> |
10 | #include <linux/delay.h> | ||
10 | #include <linux/device.h> | 11 | #include <linux/device.h> |
11 | #include <linux/ethtool.h> | 12 | #include <linux/ethtool.h> |
12 | #include <linux/firewire.h> | 13 | #include <linux/firewire.h> |
@@ -27,8 +28,14 @@ | |||
27 | #include <asm/unaligned.h> | 28 | #include <asm/unaligned.h> |
28 | #include <net/arp.h> | 29 | #include <net/arp.h> |
29 | 30 | ||
30 | #define FWNET_MAX_FRAGMENTS 25 /* arbitrary limit */ | 31 | /* rx limits */ |
31 | #define FWNET_ISO_PAGE_COUNT (PAGE_SIZE < 16 * 1024 ? 4 : 2) | 32 | #define FWNET_MAX_FRAGMENTS 30 /* arbitrary, > TX queue depth */ |
33 | #define FWNET_ISO_PAGE_COUNT (PAGE_SIZE < 16*1024 ? 4 : 2) | ||
34 | |||
35 | /* tx limits */ | ||
36 | #define FWNET_MAX_QUEUED_DATAGRAMS 20 /* < 64 = number of tlabels */ | ||
37 | #define FWNET_MIN_QUEUED_DATAGRAMS 10 /* should keep AT DMA busy enough */ | ||
38 | #define FWNET_TX_QUEUE_LEN FWNET_MAX_QUEUED_DATAGRAMS /* ? */ | ||
32 | 39 | ||
33 | #define IEEE1394_BROADCAST_CHANNEL 31 | 40 | #define IEEE1394_BROADCAST_CHANNEL 31 |
34 | #define IEEE1394_ALL_NODES (0xffc0 | 0x003f) | 41 | #define IEEE1394_ALL_NODES (0xffc0 | 0x003f) |
@@ -170,16 +177,10 @@ struct fwnet_device { | |||
170 | struct fw_address_handler handler; | 177 | struct fw_address_handler handler; |
171 | u64 local_fifo; | 178 | u64 local_fifo; |
172 | 179 | ||
173 | /* List of packets to be sent */ | 180 | /* Number of tx datagrams that have been queued but not yet acked */ |
174 | struct list_head packet_list; | 181 | int queued_datagrams; |
175 | /* | ||
176 | * List of packets that were broadcasted. When we get an ISO interrupt | ||
177 | * one of them has been sent | ||
178 | */ | ||
179 | struct list_head broadcasted_list; | ||
180 | /* List of packets that have been sent but not yet acked */ | ||
181 | struct list_head sent_list; | ||
182 | 182 | ||
183 | int peer_count; | ||
183 | struct list_head peer_list; | 184 | struct list_head peer_list; |
184 | struct fw_card *card; | 185 | struct fw_card *card; |
185 | struct net_device *netdev; | 186 | struct net_device *netdev; |
@@ -190,13 +191,14 @@ struct fwnet_peer { | |||
190 | struct fwnet_device *dev; | 191 | struct fwnet_device *dev; |
191 | u64 guid; | 192 | u64 guid; |
192 | u64 fifo; | 193 | u64 fifo; |
194 | __be32 ip; | ||
193 | 195 | ||
194 | /* guarded by dev->lock */ | 196 | /* guarded by dev->lock */ |
195 | struct list_head pd_list; /* received partial datagrams */ | 197 | struct list_head pd_list; /* received partial datagrams */ |
196 | unsigned pdg_size; /* pd_list size */ | 198 | unsigned pdg_size; /* pd_list size */ |
197 | 199 | ||
198 | u16 datagram_label; /* outgoing datagram label */ | 200 | u16 datagram_label; /* outgoing datagram label */ |
199 | unsigned max_payload; /* includes RFC2374_FRAG_HDR_SIZE overhead */ | 201 | u16 max_payload; /* includes RFC2374_FRAG_HDR_SIZE overhead */ |
200 | int node_id; | 202 | int node_id; |
201 | int generation; | 203 | int generation; |
202 | unsigned speed; | 204 | unsigned speed; |
@@ -204,22 +206,18 @@ struct fwnet_peer { | |||
204 | 206 | ||
205 | /* This is our task struct. It's used for the packet complete callback. */ | 207 | /* This is our task struct. It's used for the packet complete callback. */ |
206 | struct fwnet_packet_task { | 208 | struct fwnet_packet_task { |
207 | /* | ||
208 | * ptask can actually be on dev->packet_list, dev->broadcasted_list, | ||
209 | * or dev->sent_list depending on its current state. | ||
210 | */ | ||
211 | struct list_head pt_link; | ||
212 | struct fw_transaction transaction; | 209 | struct fw_transaction transaction; |
213 | struct rfc2734_header hdr; | 210 | struct rfc2734_header hdr; |
214 | struct sk_buff *skb; | 211 | struct sk_buff *skb; |
215 | struct fwnet_device *dev; | 212 | struct fwnet_device *dev; |
216 | 213 | ||
217 | int outstanding_pkts; | 214 | int outstanding_pkts; |
218 | unsigned max_payload; | ||
219 | u64 fifo_addr; | 215 | u64 fifo_addr; |
220 | u16 dest_node; | 216 | u16 dest_node; |
217 | u16 max_payload; | ||
221 | u8 generation; | 218 | u8 generation; |
222 | u8 speed; | 219 | u8 speed; |
220 | u8 enqueued; | ||
223 | }; | 221 | }; |
224 | 222 | ||
225 | /* | 223 | /* |
@@ -455,7 +453,7 @@ static bool fwnet_pd_update(struct fwnet_peer *peer, | |||
455 | memcpy(pd->pbuf + frag_off, frag_buf, frag_len); | 453 | memcpy(pd->pbuf + frag_off, frag_buf, frag_len); |
456 | 454 | ||
457 | /* | 455 | /* |
458 | * Move list entry to beginnig of list so that oldest partial | 456 | * Move list entry to beginning of list so that oldest partial |
459 | * datagrams percolate to the end of the list | 457 | * datagrams percolate to the end of the list |
460 | */ | 458 | */ |
461 | list_move_tail(&pd->pd_link, &peer->pd_list); | 459 | list_move_tail(&pd->pd_link, &peer->pd_list); |
@@ -573,6 +571,8 @@ static int fwnet_finish_incoming_packet(struct net_device *net, | |||
573 | peer->speed = sspd; | 571 | peer->speed = sspd; |
574 | if (peer->max_payload > max_payload) | 572 | if (peer->max_payload > max_payload) |
575 | peer->max_payload = max_payload; | 573 | peer->max_payload = max_payload; |
574 | |||
575 | peer->ip = arp1394->sip; | ||
576 | } | 576 | } |
577 | spin_unlock_irqrestore(&dev->lock, flags); | 577 | spin_unlock_irqrestore(&dev->lock, flags); |
578 | 578 | ||
@@ -651,8 +651,6 @@ static int fwnet_finish_incoming_packet(struct net_device *net, | |||
651 | net->stats.rx_packets++; | 651 | net->stats.rx_packets++; |
652 | net->stats.rx_bytes += skb->len; | 652 | net->stats.rx_bytes += skb->len; |
653 | } | 653 | } |
654 | if (netif_queue_stopped(net)) | ||
655 | netif_wake_queue(net); | ||
656 | 654 | ||
657 | return 0; | 655 | return 0; |
658 | 656 | ||
@@ -661,8 +659,6 @@ static int fwnet_finish_incoming_packet(struct net_device *net, | |||
661 | net->stats.rx_dropped++; | 659 | net->stats.rx_dropped++; |
662 | 660 | ||
663 | dev_kfree_skb_any(skb); | 661 | dev_kfree_skb_any(skb); |
664 | if (netif_queue_stopped(net)) | ||
665 | netif_wake_queue(net); | ||
666 | 662 | ||
667 | return -ENOENT; | 663 | return -ENOENT; |
668 | } | 664 | } |
@@ -794,15 +790,10 @@ static int fwnet_incoming_packet(struct fwnet_device *dev, __be32 *buf, int len, | |||
794 | * Datagram is not complete, we're done for the | 790 | * Datagram is not complete, we're done for the |
795 | * moment. | 791 | * moment. |
796 | */ | 792 | */ |
797 | spin_unlock_irqrestore(&dev->lock, flags); | 793 | retval = 0; |
798 | |||
799 | return 0; | ||
800 | fail: | 794 | fail: |
801 | spin_unlock_irqrestore(&dev->lock, flags); | 795 | spin_unlock_irqrestore(&dev->lock, flags); |
802 | 796 | ||
803 | if (netif_queue_stopped(net)) | ||
804 | netif_wake_queue(net); | ||
805 | |||
806 | return retval; | 797 | return retval; |
807 | } | 798 | } |
808 | 799 | ||
@@ -890,7 +881,9 @@ static void fwnet_receive_broadcast(struct fw_iso_context *context, | |||
890 | 881 | ||
891 | spin_unlock_irqrestore(&dev->lock, flags); | 882 | spin_unlock_irqrestore(&dev->lock, flags); |
892 | 883 | ||
893 | if (retval < 0) | 884 | if (retval >= 0) |
885 | fw_iso_context_queue_flush(dev->broadcast_rcv_context); | ||
886 | else | ||
894 | fw_error("requeue failed\n"); | 887 | fw_error("requeue failed\n"); |
895 | } | 888 | } |
896 | 889 | ||
@@ -902,11 +895,19 @@ static void fwnet_free_ptask(struct fwnet_packet_task *ptask) | |||
902 | kmem_cache_free(fwnet_packet_task_cache, ptask); | 895 | kmem_cache_free(fwnet_packet_task_cache, ptask); |
903 | } | 896 | } |
904 | 897 | ||
898 | /* Caller must hold dev->lock. */ | ||
899 | static void dec_queued_datagrams(struct fwnet_device *dev) | ||
900 | { | ||
901 | if (--dev->queued_datagrams == FWNET_MIN_QUEUED_DATAGRAMS) | ||
902 | netif_wake_queue(dev->netdev); | ||
903 | } | ||
904 | |||
905 | static int fwnet_send_packet(struct fwnet_packet_task *ptask); | 905 | static int fwnet_send_packet(struct fwnet_packet_task *ptask); |
906 | 906 | ||
907 | static void fwnet_transmit_packet_done(struct fwnet_packet_task *ptask) | 907 | static void fwnet_transmit_packet_done(struct fwnet_packet_task *ptask) |
908 | { | 908 | { |
909 | struct fwnet_device *dev = ptask->dev; | 909 | struct fwnet_device *dev = ptask->dev; |
910 | struct sk_buff *skb = ptask->skb; | ||
910 | unsigned long flags; | 911 | unsigned long flags; |
911 | bool free; | 912 | bool free; |
912 | 913 | ||
@@ -915,10 +916,14 @@ static void fwnet_transmit_packet_done(struct fwnet_packet_task *ptask) | |||
915 | ptask->outstanding_pkts--; | 916 | ptask->outstanding_pkts--; |
916 | 917 | ||
917 | /* Check whether we or the networking TX soft-IRQ is last user. */ | 918 | /* Check whether we or the networking TX soft-IRQ is last user. */ |
918 | free = (ptask->outstanding_pkts == 0 && !list_empty(&ptask->pt_link)); | 919 | free = (ptask->outstanding_pkts == 0 && ptask->enqueued); |
920 | if (free) | ||
921 | dec_queued_datagrams(dev); | ||
919 | 922 | ||
920 | if (ptask->outstanding_pkts == 0) | 923 | if (ptask->outstanding_pkts == 0) { |
921 | list_del(&ptask->pt_link); | 924 | dev->netdev->stats.tx_packets++; |
925 | dev->netdev->stats.tx_bytes += skb->len; | ||
926 | } | ||
922 | 927 | ||
923 | spin_unlock_irqrestore(&dev->lock, flags); | 928 | spin_unlock_irqrestore(&dev->lock, flags); |
924 | 929 | ||
@@ -927,7 +932,6 @@ static void fwnet_transmit_packet_done(struct fwnet_packet_task *ptask) | |||
927 | u16 fg_off; | 932 | u16 fg_off; |
928 | u16 datagram_label; | 933 | u16 datagram_label; |
929 | u16 lf; | 934 | u16 lf; |
930 | struct sk_buff *skb; | ||
931 | 935 | ||
932 | /* Update the ptask to point to the next fragment and send it */ | 936 | /* Update the ptask to point to the next fragment and send it */ |
933 | lf = fwnet_get_hdr_lf(&ptask->hdr); | 937 | lf = fwnet_get_hdr_lf(&ptask->hdr); |
@@ -954,7 +958,7 @@ static void fwnet_transmit_packet_done(struct fwnet_packet_task *ptask) | |||
954 | datagram_label = fwnet_get_hdr_dgl(&ptask->hdr); | 958 | datagram_label = fwnet_get_hdr_dgl(&ptask->hdr); |
955 | break; | 959 | break; |
956 | } | 960 | } |
957 | skb = ptask->skb; | 961 | |
958 | skb_pull(skb, ptask->max_payload); | 962 | skb_pull(skb, ptask->max_payload); |
959 | if (ptask->outstanding_pkts > 1) { | 963 | if (ptask->outstanding_pkts > 1) { |
960 | fwnet_make_sf_hdr(&ptask->hdr, RFC2374_HDR_INTFRAG, | 964 | fwnet_make_sf_hdr(&ptask->hdr, RFC2374_HDR_INTFRAG, |
@@ -971,18 +975,52 @@ static void fwnet_transmit_packet_done(struct fwnet_packet_task *ptask) | |||
971 | fwnet_free_ptask(ptask); | 975 | fwnet_free_ptask(ptask); |
972 | } | 976 | } |
973 | 977 | ||
978 | static void fwnet_transmit_packet_failed(struct fwnet_packet_task *ptask) | ||
979 | { | ||
980 | struct fwnet_device *dev = ptask->dev; | ||
981 | unsigned long flags; | ||
982 | bool free; | ||
983 | |||
984 | spin_lock_irqsave(&dev->lock, flags); | ||
985 | |||
986 | /* One fragment failed; don't try to send remaining fragments. */ | ||
987 | ptask->outstanding_pkts = 0; | ||
988 | |||
989 | /* Check whether we or the networking TX soft-IRQ is last user. */ | ||
990 | free = ptask->enqueued; | ||
991 | if (free) | ||
992 | dec_queued_datagrams(dev); | ||
993 | |||
994 | dev->netdev->stats.tx_dropped++; | ||
995 | dev->netdev->stats.tx_errors++; | ||
996 | |||
997 | spin_unlock_irqrestore(&dev->lock, flags); | ||
998 | |||
999 | if (free) | ||
1000 | fwnet_free_ptask(ptask); | ||
1001 | } | ||
1002 | |||
974 | static void fwnet_write_complete(struct fw_card *card, int rcode, | 1003 | static void fwnet_write_complete(struct fw_card *card, int rcode, |
975 | void *payload, size_t length, void *data) | 1004 | void *payload, size_t length, void *data) |
976 | { | 1005 | { |
977 | struct fwnet_packet_task *ptask; | 1006 | struct fwnet_packet_task *ptask = data; |
978 | 1007 | static unsigned long j; | |
979 | ptask = data; | 1008 | static int last_rcode, errors_skipped; |
980 | 1009 | ||
981 | if (rcode == RCODE_COMPLETE) | 1010 | if (rcode == RCODE_COMPLETE) { |
982 | fwnet_transmit_packet_done(ptask); | 1011 | fwnet_transmit_packet_done(ptask); |
983 | else | 1012 | } else { |
984 | fw_error("fwnet_write_complete: failed: %x\n", rcode); | 1013 | fwnet_transmit_packet_failed(ptask); |
985 | /* ??? error recovery */ | 1014 | |
1015 | if (printk_timed_ratelimit(&j, 1000) || rcode != last_rcode) { | ||
1016 | fw_error("fwnet_write_complete: " | ||
1017 | "failed: %x (skipped %d)\n", rcode, errors_skipped); | ||
1018 | |||
1019 | errors_skipped = 0; | ||
1020 | last_rcode = rcode; | ||
1021 | } else | ||
1022 | errors_skipped++; | ||
1023 | } | ||
986 | } | 1024 | } |
987 | 1025 | ||
988 | static int fwnet_send_packet(struct fwnet_packet_task *ptask) | 1026 | static int fwnet_send_packet(struct fwnet_packet_task *ptask) |
@@ -1040,9 +1078,11 @@ static int fwnet_send_packet(struct fwnet_packet_task *ptask) | |||
1040 | spin_lock_irqsave(&dev->lock, flags); | 1078 | spin_lock_irqsave(&dev->lock, flags); |
1041 | 1079 | ||
1042 | /* If the AT tasklet already ran, we may be last user. */ | 1080 | /* If the AT tasklet already ran, we may be last user. */ |
1043 | free = (ptask->outstanding_pkts == 0 && list_empty(&ptask->pt_link)); | 1081 | free = (ptask->outstanding_pkts == 0 && !ptask->enqueued); |
1044 | if (!free) | 1082 | if (!free) |
1045 | list_add_tail(&ptask->pt_link, &dev->broadcasted_list); | 1083 | ptask->enqueued = true; |
1084 | else | ||
1085 | dec_queued_datagrams(dev); | ||
1046 | 1086 | ||
1047 | spin_unlock_irqrestore(&dev->lock, flags); | 1087 | spin_unlock_irqrestore(&dev->lock, flags); |
1048 | 1088 | ||
@@ -1057,9 +1097,11 @@ static int fwnet_send_packet(struct fwnet_packet_task *ptask) | |||
1057 | spin_lock_irqsave(&dev->lock, flags); | 1097 | spin_lock_irqsave(&dev->lock, flags); |
1058 | 1098 | ||
1059 | /* If the AT tasklet already ran, we may be last user. */ | 1099 | /* If the AT tasklet already ran, we may be last user. */ |
1060 | free = (ptask->outstanding_pkts == 0 && list_empty(&ptask->pt_link)); | 1100 | free = (ptask->outstanding_pkts == 0 && !ptask->enqueued); |
1061 | if (!free) | 1101 | if (!free) |
1062 | list_add_tail(&ptask->pt_link, &dev->sent_list); | 1102 | ptask->enqueued = true; |
1103 | else | ||
1104 | dec_queued_datagrams(dev); | ||
1063 | 1105 | ||
1064 | spin_unlock_irqrestore(&dev->lock, flags); | 1106 | spin_unlock_irqrestore(&dev->lock, flags); |
1065 | 1107 | ||
@@ -1186,6 +1228,14 @@ static int fwnet_broadcast_start(struct fwnet_device *dev) | |||
1186 | return retval; | 1228 | return retval; |
1187 | } | 1229 | } |
1188 | 1230 | ||
1231 | static void set_carrier_state(struct fwnet_device *dev) | ||
1232 | { | ||
1233 | if (dev->peer_count > 1) | ||
1234 | netif_carrier_on(dev->netdev); | ||
1235 | else | ||
1236 | netif_carrier_off(dev->netdev); | ||
1237 | } | ||
1238 | |||
1189 | /* ifup */ | 1239 | /* ifup */ |
1190 | static int fwnet_open(struct net_device *net) | 1240 | static int fwnet_open(struct net_device *net) |
1191 | { | 1241 | { |
@@ -1199,6 +1249,10 @@ static int fwnet_open(struct net_device *net) | |||
1199 | } | 1249 | } |
1200 | netif_start_queue(net); | 1250 | netif_start_queue(net); |
1201 | 1251 | ||
1252 | spin_lock_irq(&dev->lock); | ||
1253 | set_carrier_state(dev); | ||
1254 | spin_unlock_irq(&dev->lock); | ||
1255 | |||
1202 | return 0; | 1256 | return 0; |
1203 | } | 1257 | } |
1204 | 1258 | ||
@@ -1225,6 +1279,15 @@ static netdev_tx_t fwnet_tx(struct sk_buff *skb, struct net_device *net) | |||
1225 | struct fwnet_peer *peer; | 1279 | struct fwnet_peer *peer; |
1226 | unsigned long flags; | 1280 | unsigned long flags; |
1227 | 1281 | ||
1282 | spin_lock_irqsave(&dev->lock, flags); | ||
1283 | |||
1284 | /* Can this happen? */ | ||
1285 | if (netif_queue_stopped(dev->netdev)) { | ||
1286 | spin_unlock_irqrestore(&dev->lock, flags); | ||
1287 | |||
1288 | return NETDEV_TX_BUSY; | ||
1289 | } | ||
1290 | |||
1228 | ptask = kmem_cache_alloc(fwnet_packet_task_cache, GFP_ATOMIC); | 1291 | ptask = kmem_cache_alloc(fwnet_packet_task_cache, GFP_ATOMIC); |
1229 | if (ptask == NULL) | 1292 | if (ptask == NULL) |
1230 | goto fail; | 1293 | goto fail; |
@@ -1243,9 +1306,6 @@ static netdev_tx_t fwnet_tx(struct sk_buff *skb, struct net_device *net) | |||
1243 | proto = hdr_buf.h_proto; | 1306 | proto = hdr_buf.h_proto; |
1244 | dg_size = skb->len; | 1307 | dg_size = skb->len; |
1245 | 1308 | ||
1246 | /* serialize access to peer, including peer->datagram_label */ | ||
1247 | spin_lock_irqsave(&dev->lock, flags); | ||
1248 | |||
1249 | /* | 1309 | /* |
1250 | * Set the transmission type for the packet. ARP packets and IP | 1310 | * Set the transmission type for the packet. ARP packets and IP |
1251 | * broadcast packets are sent via GASP. | 1311 | * broadcast packets are sent via GASP. |
@@ -1267,7 +1327,7 @@ static netdev_tx_t fwnet_tx(struct sk_buff *skb, struct net_device *net) | |||
1267 | 1327 | ||
1268 | peer = fwnet_peer_find_by_guid(dev, be64_to_cpu(guid)); | 1328 | peer = fwnet_peer_find_by_guid(dev, be64_to_cpu(guid)); |
1269 | if (!peer || peer->fifo == FWNET_NO_FIFO_ADDR) | 1329 | if (!peer || peer->fifo == FWNET_NO_FIFO_ADDR) |
1270 | goto fail_unlock; | 1330 | goto fail; |
1271 | 1331 | ||
1272 | generation = peer->generation; | 1332 | generation = peer->generation; |
1273 | dest_node = peer->node_id; | 1333 | dest_node = peer->node_id; |
@@ -1321,18 +1381,21 @@ static netdev_tx_t fwnet_tx(struct sk_buff *skb, struct net_device *net) | |||
1321 | max_payload += RFC2374_FRAG_HDR_SIZE; | 1381 | max_payload += RFC2374_FRAG_HDR_SIZE; |
1322 | } | 1382 | } |
1323 | 1383 | ||
1384 | if (++dev->queued_datagrams == FWNET_MAX_QUEUED_DATAGRAMS) | ||
1385 | netif_stop_queue(dev->netdev); | ||
1386 | |||
1324 | spin_unlock_irqrestore(&dev->lock, flags); | 1387 | spin_unlock_irqrestore(&dev->lock, flags); |
1325 | 1388 | ||
1326 | ptask->max_payload = max_payload; | 1389 | ptask->max_payload = max_payload; |
1327 | INIT_LIST_HEAD(&ptask->pt_link); | 1390 | ptask->enqueued = 0; |
1328 | 1391 | ||
1329 | fwnet_send_packet(ptask); | 1392 | fwnet_send_packet(ptask); |
1330 | 1393 | ||
1331 | return NETDEV_TX_OK; | 1394 | return NETDEV_TX_OK; |
1332 | 1395 | ||
1333 | fail_unlock: | ||
1334 | spin_unlock_irqrestore(&dev->lock, flags); | ||
1335 | fail: | 1396 | fail: |
1397 | spin_unlock_irqrestore(&dev->lock, flags); | ||
1398 | |||
1336 | if (ptask) | 1399 | if (ptask) |
1337 | kmem_cache_free(fwnet_packet_task_cache, ptask); | 1400 | kmem_cache_free(fwnet_packet_task_cache, ptask); |
1338 | 1401 | ||
@@ -1361,15 +1424,8 @@ static int fwnet_change_mtu(struct net_device *net, int new_mtu) | |||
1361 | return 0; | 1424 | return 0; |
1362 | } | 1425 | } |
1363 | 1426 | ||
1364 | static void fwnet_get_drvinfo(struct net_device *net, | ||
1365 | struct ethtool_drvinfo *info) | ||
1366 | { | ||
1367 | strcpy(info->driver, KBUILD_MODNAME); | ||
1368 | strcpy(info->bus_info, "ieee1394"); | ||
1369 | } | ||
1370 | |||
1371 | static const struct ethtool_ops fwnet_ethtool_ops = { | 1427 | static const struct ethtool_ops fwnet_ethtool_ops = { |
1372 | .get_drvinfo = fwnet_get_drvinfo, | 1428 | .get_link = ethtool_op_get_link, |
1373 | }; | 1429 | }; |
1374 | 1430 | ||
1375 | static const struct net_device_ops fwnet_netdev_ops = { | 1431 | static const struct net_device_ops fwnet_netdev_ops = { |
@@ -1389,8 +1445,8 @@ static void fwnet_init_dev(struct net_device *net) | |||
1389 | net->addr_len = FWNET_ALEN; | 1445 | net->addr_len = FWNET_ALEN; |
1390 | net->hard_header_len = FWNET_HLEN; | 1446 | net->hard_header_len = FWNET_HLEN; |
1391 | net->type = ARPHRD_IEEE1394; | 1447 | net->type = ARPHRD_IEEE1394; |
1392 | net->tx_queue_len = 10; | 1448 | net->tx_queue_len = FWNET_TX_QUEUE_LEN; |
1393 | SET_ETHTOOL_OPS(net, &fwnet_ethtool_ops); | 1449 | net->ethtool_ops = &fwnet_ethtool_ops; |
1394 | } | 1450 | } |
1395 | 1451 | ||
1396 | /* caller must hold fwnet_device_mutex */ | 1452 | /* caller must hold fwnet_device_mutex */ |
@@ -1419,6 +1475,7 @@ static int fwnet_add_peer(struct fwnet_device *dev, | |||
1419 | peer->dev = dev; | 1475 | peer->dev = dev; |
1420 | peer->guid = (u64)device->config_rom[3] << 32 | device->config_rom[4]; | 1476 | peer->guid = (u64)device->config_rom[3] << 32 | device->config_rom[4]; |
1421 | peer->fifo = FWNET_NO_FIFO_ADDR; | 1477 | peer->fifo = FWNET_NO_FIFO_ADDR; |
1478 | peer->ip = 0; | ||
1422 | INIT_LIST_HEAD(&peer->pd_list); | 1479 | INIT_LIST_HEAD(&peer->pd_list); |
1423 | peer->pdg_size = 0; | 1480 | peer->pdg_size = 0; |
1424 | peer->datagram_label = 0; | 1481 | peer->datagram_label = 0; |
@@ -1431,6 +1488,8 @@ static int fwnet_add_peer(struct fwnet_device *dev, | |||
1431 | 1488 | ||
1432 | spin_lock_irq(&dev->lock); | 1489 | spin_lock_irq(&dev->lock); |
1433 | list_add_tail(&peer->peer_link, &dev->peer_list); | 1490 | list_add_tail(&peer->peer_link, &dev->peer_list); |
1491 | dev->peer_count++; | ||
1492 | set_carrier_state(dev); | ||
1434 | spin_unlock_irq(&dev->lock); | 1493 | spin_unlock_irq(&dev->lock); |
1435 | 1494 | ||
1436 | return 0; | 1495 | return 0; |
@@ -1470,14 +1529,9 @@ static int fwnet_probe(struct device *_dev) | |||
1470 | dev->broadcast_rcv_context = NULL; | 1529 | dev->broadcast_rcv_context = NULL; |
1471 | dev->broadcast_xmt_max_payload = 0; | 1530 | dev->broadcast_xmt_max_payload = 0; |
1472 | dev->broadcast_xmt_datagramlabel = 0; | 1531 | dev->broadcast_xmt_datagramlabel = 0; |
1473 | |||
1474 | dev->local_fifo = FWNET_NO_FIFO_ADDR; | 1532 | dev->local_fifo = FWNET_NO_FIFO_ADDR; |
1475 | 1533 | dev->queued_datagrams = 0; | |
1476 | INIT_LIST_HEAD(&dev->packet_list); | ||
1477 | INIT_LIST_HEAD(&dev->broadcasted_list); | ||
1478 | INIT_LIST_HEAD(&dev->sent_list); | ||
1479 | INIT_LIST_HEAD(&dev->peer_list); | 1534 | INIT_LIST_HEAD(&dev->peer_list); |
1480 | |||
1481 | dev->card = card; | 1535 | dev->card = card; |
1482 | dev->netdev = net; | 1536 | dev->netdev = net; |
1483 | 1537 | ||
@@ -1516,13 +1570,15 @@ static int fwnet_probe(struct device *_dev) | |||
1516 | return ret; | 1570 | return ret; |
1517 | } | 1571 | } |
1518 | 1572 | ||
1519 | static void fwnet_remove_peer(struct fwnet_peer *peer) | 1573 | static void fwnet_remove_peer(struct fwnet_peer *peer, struct fwnet_device *dev) |
1520 | { | 1574 | { |
1521 | struct fwnet_partial_datagram *pd, *pd_next; | 1575 | struct fwnet_partial_datagram *pd, *pd_next; |
1522 | 1576 | ||
1523 | spin_lock_irq(&peer->dev->lock); | 1577 | spin_lock_irq(&dev->lock); |
1524 | list_del(&peer->peer_link); | 1578 | list_del(&peer->peer_link); |
1525 | spin_unlock_irq(&peer->dev->lock); | 1579 | dev->peer_count--; |
1580 | set_carrier_state(dev); | ||
1581 | spin_unlock_irq(&dev->lock); | ||
1526 | 1582 | ||
1527 | list_for_each_entry_safe(pd, pd_next, &peer->pd_list, pd_link) | 1583 | list_for_each_entry_safe(pd, pd_next, &peer->pd_list, pd_link) |
1528 | fwnet_pd_delete(pd); | 1584 | fwnet_pd_delete(pd); |
@@ -1535,14 +1591,17 @@ static int fwnet_remove(struct device *_dev) | |||
1535 | struct fwnet_peer *peer = dev_get_drvdata(_dev); | 1591 | struct fwnet_peer *peer = dev_get_drvdata(_dev); |
1536 | struct fwnet_device *dev = peer->dev; | 1592 | struct fwnet_device *dev = peer->dev; |
1537 | struct net_device *net; | 1593 | struct net_device *net; |
1538 | struct fwnet_packet_task *ptask, *pt_next; | 1594 | int i; |
1539 | 1595 | ||
1540 | mutex_lock(&fwnet_device_mutex); | 1596 | mutex_lock(&fwnet_device_mutex); |
1541 | 1597 | ||
1542 | fwnet_remove_peer(peer); | 1598 | net = dev->netdev; |
1599 | if (net && peer->ip) | ||
1600 | arp_invalidate(net, peer->ip); | ||
1601 | |||
1602 | fwnet_remove_peer(peer, dev); | ||
1543 | 1603 | ||
1544 | if (list_empty(&dev->peer_list)) { | 1604 | if (list_empty(&dev->peer_list)) { |
1545 | net = dev->netdev; | ||
1546 | unregister_netdev(net); | 1605 | unregister_netdev(net); |
1547 | 1606 | ||
1548 | if (dev->local_fifo != FWNET_NO_FIFO_ADDR) | 1607 | if (dev->local_fifo != FWNET_NO_FIFO_ADDR) |
@@ -1553,21 +1612,9 @@ static int fwnet_remove(struct device *_dev) | |||
1553 | dev->card); | 1612 | dev->card); |
1554 | fw_iso_context_destroy(dev->broadcast_rcv_context); | 1613 | fw_iso_context_destroy(dev->broadcast_rcv_context); |
1555 | } | 1614 | } |
1556 | list_for_each_entry_safe(ptask, pt_next, | 1615 | for (i = 0; dev->queued_datagrams && i < 5; i++) |
1557 | &dev->packet_list, pt_link) { | 1616 | ssleep(1); |
1558 | dev_kfree_skb_any(ptask->skb); | 1617 | WARN_ON(dev->queued_datagrams); |
1559 | kmem_cache_free(fwnet_packet_task_cache, ptask); | ||
1560 | } | ||
1561 | list_for_each_entry_safe(ptask, pt_next, | ||
1562 | &dev->broadcasted_list, pt_link) { | ||
1563 | dev_kfree_skb_any(ptask->skb); | ||
1564 | kmem_cache_free(fwnet_packet_task_cache, ptask); | ||
1565 | } | ||
1566 | list_for_each_entry_safe(ptask, pt_next, | ||
1567 | &dev->sent_list, pt_link) { | ||
1568 | dev_kfree_skb_any(ptask->skb); | ||
1569 | kmem_cache_free(fwnet_packet_task_cache, ptask); | ||
1570 | } | ||
1571 | list_del(&dev->dev_link); | 1618 | list_del(&dev->dev_link); |
1572 | 1619 | ||
1573 | free_netdev(net); | 1620 | free_netdev(net); |
diff --git a/drivers/firewire/nosy.c b/drivers/firewire/nosy.c index 8528b10763ed..0618145376ad 100644 --- a/drivers/firewire/nosy.c +++ b/drivers/firewire/nosy.c | |||
@@ -302,7 +302,7 @@ nosy_open(struct inode *inode, struct file *file) | |||
302 | 302 | ||
303 | file->private_data = client; | 303 | file->private_data = client; |
304 | 304 | ||
305 | return 0; | 305 | return nonseekable_open(inode, file); |
306 | fail: | 306 | fail: |
307 | kfree(client); | 307 | kfree(client); |
308 | lynx_put(lynx); | 308 | lynx_put(lynx); |
diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c index 9dcb17d51aee..ebb897329c1e 100644 --- a/drivers/firewire/ohci.c +++ b/drivers/firewire/ohci.c | |||
@@ -18,6 +18,7 @@ | |||
18 | * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | 18 | * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. |
19 | */ | 19 | */ |
20 | 20 | ||
21 | #include <linux/bitops.h> | ||
21 | #include <linux/bug.h> | 22 | #include <linux/bug.h> |
22 | #include <linux/compiler.h> | 23 | #include <linux/compiler.h> |
23 | #include <linux/delay.h> | 24 | #include <linux/delay.h> |
@@ -40,6 +41,7 @@ | |||
40 | #include <linux/spinlock.h> | 41 | #include <linux/spinlock.h> |
41 | #include <linux/string.h> | 42 | #include <linux/string.h> |
42 | #include <linux/time.h> | 43 | #include <linux/time.h> |
44 | #include <linux/vmalloc.h> | ||
43 | 45 | ||
44 | #include <asm/byteorder.h> | 46 | #include <asm/byteorder.h> |
45 | #include <asm/page.h> | 47 | #include <asm/page.h> |
@@ -80,17 +82,23 @@ struct descriptor { | |||
80 | #define COMMAND_PTR(regs) ((regs) + 12) | 82 | #define COMMAND_PTR(regs) ((regs) + 12) |
81 | #define CONTEXT_MATCH(regs) ((regs) + 16) | 83 | #define CONTEXT_MATCH(regs) ((regs) + 16) |
82 | 84 | ||
83 | struct ar_buffer { | 85 | #define AR_BUFFER_SIZE (32*1024) |
84 | struct descriptor descriptor; | 86 | #define AR_BUFFERS_MIN DIV_ROUND_UP(AR_BUFFER_SIZE, PAGE_SIZE) |
85 | struct ar_buffer *next; | 87 | /* we need at least two pages for proper list management */ |
86 | __le32 data[0]; | 88 | #define AR_BUFFERS (AR_BUFFERS_MIN >= 2 ? AR_BUFFERS_MIN : 2) |
87 | }; | 89 | |
90 | #define MAX_ASYNC_PAYLOAD 4096 | ||
91 | #define MAX_AR_PACKET_SIZE (16 + MAX_ASYNC_PAYLOAD + 4) | ||
92 | #define AR_WRAPAROUND_PAGES DIV_ROUND_UP(MAX_AR_PACKET_SIZE, PAGE_SIZE) | ||
88 | 93 | ||
89 | struct ar_context { | 94 | struct ar_context { |
90 | struct fw_ohci *ohci; | 95 | struct fw_ohci *ohci; |
91 | struct ar_buffer *current_buffer; | 96 | struct page *pages[AR_BUFFERS]; |
92 | struct ar_buffer *last_buffer; | 97 | void *buffer; |
98 | struct descriptor *descriptors; | ||
99 | dma_addr_t descriptors_bus; | ||
93 | void *pointer; | 100 | void *pointer; |
101 | unsigned int last_buffer_index; | ||
94 | u32 regs; | 102 | u32 regs; |
95 | struct tasklet_struct tasklet; | 103 | struct tasklet_struct tasklet; |
96 | }; | 104 | }; |
@@ -117,6 +125,8 @@ struct context { | |||
117 | struct fw_ohci *ohci; | 125 | struct fw_ohci *ohci; |
118 | u32 regs; | 126 | u32 regs; |
119 | int total_allocation; | 127 | int total_allocation; |
128 | bool running; | ||
129 | bool flushing; | ||
120 | 130 | ||
121 | /* | 131 | /* |
122 | * List of page-sized buffers for storing DMA descriptors. | 132 | * List of page-sized buffers for storing DMA descriptors. |
@@ -161,6 +171,9 @@ struct iso_context { | |||
161 | int excess_bytes; | 171 | int excess_bytes; |
162 | void *header; | 172 | void *header; |
163 | size_t header_length; | 173 | size_t header_length; |
174 | |||
175 | u8 sync; | ||
176 | u8 tags; | ||
164 | }; | 177 | }; |
165 | 178 | ||
166 | #define CONFIG_ROM_SIZE 1024 | 179 | #define CONFIG_ROM_SIZE 1024 |
@@ -177,7 +190,8 @@ struct fw_ohci { | |||
177 | u32 bus_time; | 190 | u32 bus_time; |
178 | bool is_root; | 191 | bool is_root; |
179 | bool csr_state_setclear_abdicate; | 192 | bool csr_state_setclear_abdicate; |
180 | 193 | int n_ir; | |
194 | int n_it; | ||
181 | /* | 195 | /* |
182 | * Spinlock for accessing fw_ohci data. Never call out of | 196 | * Spinlock for accessing fw_ohci data. Never call out of |
183 | * this driver with this lock held. | 197 | * this driver with this lock held. |
@@ -186,14 +200,19 @@ struct fw_ohci { | |||
186 | 200 | ||
187 | struct mutex phy_reg_mutex; | 201 | struct mutex phy_reg_mutex; |
188 | 202 | ||
203 | void *misc_buffer; | ||
204 | dma_addr_t misc_buffer_bus; | ||
205 | |||
189 | struct ar_context ar_request_ctx; | 206 | struct ar_context ar_request_ctx; |
190 | struct ar_context ar_response_ctx; | 207 | struct ar_context ar_response_ctx; |
191 | struct context at_request_ctx; | 208 | struct context at_request_ctx; |
192 | struct context at_response_ctx; | 209 | struct context at_response_ctx; |
193 | 210 | ||
211 | u32 it_context_support; | ||
194 | u32 it_context_mask; /* unoccupied IT contexts */ | 212 | u32 it_context_mask; /* unoccupied IT contexts */ |
195 | struct iso_context *it_context_list; | 213 | struct iso_context *it_context_list; |
196 | u64 ir_context_channels; /* unoccupied channels */ | 214 | u64 ir_context_channels; /* unoccupied channels */ |
215 | u32 ir_context_support; | ||
197 | u32 ir_context_mask; /* unoccupied IR contexts */ | 216 | u32 ir_context_mask; /* unoccupied IR contexts */ |
198 | struct iso_context *ir_context_list; | 217 | struct iso_context *ir_context_list; |
199 | u64 mc_channels; /* channels in use by the multichannel IR context */ | 218 | u64 mc_channels; /* channels in use by the multichannel IR context */ |
@@ -242,8 +261,10 @@ static inline struct fw_ohci *fw_ohci(struct fw_card *card) | |||
242 | 261 | ||
243 | static char ohci_driver_name[] = KBUILD_MODNAME; | 262 | static char ohci_driver_name[] = KBUILD_MODNAME; |
244 | 263 | ||
264 | #define PCI_DEVICE_ID_AGERE_FW643 0x5901 | ||
245 | #define PCI_DEVICE_ID_JMICRON_JMB38X_FW 0x2380 | 265 | #define PCI_DEVICE_ID_JMICRON_JMB38X_FW 0x2380 |
246 | #define PCI_DEVICE_ID_TI_TSB12LV22 0x8009 | 266 | #define PCI_DEVICE_ID_TI_TSB12LV22 0x8009 |
267 | #define PCI_VENDOR_ID_PINNACLE_SYSTEMS 0x11bd | ||
247 | 268 | ||
248 | #define QUIRK_CYCLE_TIMER 1 | 269 | #define QUIRK_CYCLE_TIMER 1 |
249 | #define QUIRK_RESET_PACKET 2 | 270 | #define QUIRK_RESET_PACKET 2 |
@@ -253,18 +274,34 @@ static char ohci_driver_name[] = KBUILD_MODNAME; | |||
253 | 274 | ||
254 | /* In case of multiple matches in ohci_quirks[], only the first one is used. */ | 275 | /* In case of multiple matches in ohci_quirks[], only the first one is used. */ |
255 | static const struct { | 276 | static const struct { |
256 | unsigned short vendor, device, flags; | 277 | unsigned short vendor, device, revision, flags; |
257 | } ohci_quirks[] = { | 278 | } ohci_quirks[] = { |
258 | {PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_TSB12LV22, QUIRK_CYCLE_TIMER | | 279 | {PCI_VENDOR_ID_AL, PCI_ANY_ID, PCI_ANY_ID, |
259 | QUIRK_RESET_PACKET | | 280 | QUIRK_CYCLE_TIMER}, |
260 | QUIRK_NO_1394A}, | 281 | |
261 | {PCI_VENDOR_ID_TI, PCI_ANY_ID, QUIRK_RESET_PACKET}, | 282 | {PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_FW, PCI_ANY_ID, |
262 | {PCI_VENDOR_ID_AL, PCI_ANY_ID, QUIRK_CYCLE_TIMER}, | 283 | QUIRK_BE_HEADERS}, |
263 | {PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB38X_FW, QUIRK_NO_MSI}, | 284 | |
264 | {PCI_VENDOR_ID_NEC, PCI_ANY_ID, QUIRK_CYCLE_TIMER}, | 285 | {PCI_VENDOR_ID_ATT, PCI_DEVICE_ID_AGERE_FW643, 6, |
265 | {PCI_VENDOR_ID_VIA, PCI_ANY_ID, QUIRK_CYCLE_TIMER}, | 286 | QUIRK_NO_MSI}, |
266 | {PCI_VENDOR_ID_RICOH, PCI_ANY_ID, QUIRK_CYCLE_TIMER}, | 287 | |
267 | {PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_FW, QUIRK_BE_HEADERS}, | 288 | {PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB38X_FW, PCI_ANY_ID, |
289 | QUIRK_NO_MSI}, | ||
290 | |||
291 | {PCI_VENDOR_ID_NEC, PCI_ANY_ID, PCI_ANY_ID, | ||
292 | QUIRK_CYCLE_TIMER}, | ||
293 | |||
294 | {PCI_VENDOR_ID_RICOH, PCI_ANY_ID, PCI_ANY_ID, | ||
295 | QUIRK_CYCLE_TIMER}, | ||
296 | |||
297 | {PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_TSB12LV22, PCI_ANY_ID, | ||
298 | QUIRK_CYCLE_TIMER | QUIRK_RESET_PACKET | QUIRK_NO_1394A}, | ||
299 | |||
300 | {PCI_VENDOR_ID_TI, PCI_ANY_ID, PCI_ANY_ID, | ||
301 | QUIRK_RESET_PACKET}, | ||
302 | |||
303 | {PCI_VENDOR_ID_VIA, PCI_ANY_ID, PCI_ANY_ID, | ||
304 | QUIRK_CYCLE_TIMER | QUIRK_NO_MSI}, | ||
268 | }; | 305 | }; |
269 | 306 | ||
270 | /* This overrides anything that was found in ohci_quirks[]. */ | 307 | /* This overrides anything that was found in ohci_quirks[]. */ |
@@ -304,7 +341,7 @@ static void log_irqs(u32 evt) | |||
304 | !(evt & OHCI1394_busReset)) | 341 | !(evt & OHCI1394_busReset)) |
305 | return; | 342 | return; |
306 | 343 | ||
307 | fw_notify("IRQ %08x%s%s%s%s%s%s%s%s%s%s%s%s%s%s\n", evt, | 344 | fw_notify("IRQ %08x%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s\n", evt, |
308 | evt & OHCI1394_selfIDComplete ? " selfID" : "", | 345 | evt & OHCI1394_selfIDComplete ? " selfID" : "", |
309 | evt & OHCI1394_RQPkt ? " AR_req" : "", | 346 | evt & OHCI1394_RQPkt ? " AR_req" : "", |
310 | evt & OHCI1394_RSPkt ? " AR_resp" : "", | 347 | evt & OHCI1394_RSPkt ? " AR_resp" : "", |
@@ -317,6 +354,7 @@ static void log_irqs(u32 evt) | |||
317 | evt & OHCI1394_cycle64Seconds ? " cycle64Seconds" : "", | 354 | evt & OHCI1394_cycle64Seconds ? " cycle64Seconds" : "", |
318 | evt & OHCI1394_cycleInconsistent ? " cycleInconsistent" : "", | 355 | evt & OHCI1394_cycleInconsistent ? " cycleInconsistent" : "", |
319 | evt & OHCI1394_regAccessFail ? " regAccessFail" : "", | 356 | evt & OHCI1394_regAccessFail ? " regAccessFail" : "", |
357 | evt & OHCI1394_unrecoverableError ? " unrecoverableError" : "", | ||
320 | evt & OHCI1394_busReset ? " busReset" : "", | 358 | evt & OHCI1394_busReset ? " busReset" : "", |
321 | evt & ~(OHCI1394_selfIDComplete | OHCI1394_RQPkt | | 359 | evt & ~(OHCI1394_selfIDComplete | OHCI1394_RQPkt | |
322 | OHCI1394_RSPkt | OHCI1394_reqTxComplete | | 360 | OHCI1394_RSPkt | OHCI1394_reqTxComplete | |
@@ -394,10 +432,6 @@ static const char *tcodes[] = { | |||
394 | [0xc] = "-reserved-", [0xd] = "-reserved-", | 432 | [0xc] = "-reserved-", [0xd] = "-reserved-", |
395 | [0xe] = "link internal", [0xf] = "-reserved-", | 433 | [0xe] = "link internal", [0xf] = "-reserved-", |
396 | }; | 434 | }; |
397 | static const char *phys[] = { | ||
398 | [0x0] = "phy config packet", [0x1] = "link-on packet", | ||
399 | [0x2] = "self-id packet", [0x3] = "-reserved-", | ||
400 | }; | ||
401 | 435 | ||
402 | static void log_ar_at_event(char dir, int speed, u32 *header, int evt) | 436 | static void log_ar_at_event(char dir, int speed, u32 *header, int evt) |
403 | { | 437 | { |
@@ -416,12 +450,6 @@ static void log_ar_at_event(char dir, int speed, u32 *header, int evt) | |||
416 | return; | 450 | return; |
417 | } | 451 | } |
418 | 452 | ||
419 | if (header[0] == ~header[1]) { | ||
420 | fw_notify("A%c %s, %s, %08x\n", | ||
421 | dir, evts[evt], phys[header[0] >> 30 & 0x3], header[0]); | ||
422 | return; | ||
423 | } | ||
424 | |||
425 | switch (tcode) { | 453 | switch (tcode) { |
426 | case 0x0: case 0x6: case 0x8: | 454 | case 0x0: case 0x6: case 0x8: |
427 | snprintf(specific, sizeof(specific), " = %08x", | 455 | snprintf(specific, sizeof(specific), " = %08x", |
@@ -436,9 +464,13 @@ static void log_ar_at_event(char dir, int speed, u32 *header, int evt) | |||
436 | } | 464 | } |
437 | 465 | ||
438 | switch (tcode) { | 466 | switch (tcode) { |
439 | case 0xe: case 0xa: | 467 | case 0xa: |
440 | fw_notify("A%c %s, %s\n", dir, evts[evt], tcodes[tcode]); | 468 | fw_notify("A%c %s, %s\n", dir, evts[evt], tcodes[tcode]); |
441 | break; | 469 | break; |
470 | case 0xe: | ||
471 | fw_notify("A%c %s, PHY %08x %08x\n", | ||
472 | dir, evts[evt], header[1], header[2]); | ||
473 | break; | ||
442 | case 0x0: case 0x1: case 0x4: case 0x5: case 0x9: | 474 | case 0x0: case 0x1: case 0x4: case 0x5: case 0x9: |
443 | fw_notify("A%c spd %x tl %02x, " | 475 | fw_notify("A%c spd %x tl %02x, " |
444 | "%04x -> %04x, %s, " | 476 | "%04x -> %04x, %s, " |
@@ -577,52 +609,150 @@ static int ohci_update_phy_reg(struct fw_card *card, int addr, | |||
577 | return ret; | 609 | return ret; |
578 | } | 610 | } |
579 | 611 | ||
580 | static int ar_context_add_page(struct ar_context *ctx) | 612 | static inline dma_addr_t ar_buffer_bus(struct ar_context *ctx, unsigned int i) |
581 | { | 613 | { |
582 | struct device *dev = ctx->ohci->card.device; | 614 | return page_private(ctx->pages[i]); |
583 | struct ar_buffer *ab; | 615 | } |
584 | dma_addr_t uninitialized_var(ab_bus); | ||
585 | size_t offset; | ||
586 | 616 | ||
587 | ab = dma_alloc_coherent(dev, PAGE_SIZE, &ab_bus, GFP_ATOMIC); | 617 | static void ar_context_link_page(struct ar_context *ctx, unsigned int index) |
588 | if (ab == NULL) | 618 | { |
589 | return -ENOMEM; | 619 | struct descriptor *d; |
590 | 620 | ||
591 | ab->next = NULL; | 621 | d = &ctx->descriptors[index]; |
592 | memset(&ab->descriptor, 0, sizeof(ab->descriptor)); | 622 | d->branch_address &= cpu_to_le32(~0xf); |
593 | ab->descriptor.control = cpu_to_le16(DESCRIPTOR_INPUT_MORE | | 623 | d->res_count = cpu_to_le16(PAGE_SIZE); |
594 | DESCRIPTOR_STATUS | | 624 | d->transfer_status = 0; |
595 | DESCRIPTOR_BRANCH_ALWAYS); | ||
596 | offset = offsetof(struct ar_buffer, data); | ||
597 | ab->descriptor.req_count = cpu_to_le16(PAGE_SIZE - offset); | ||
598 | ab->descriptor.data_address = cpu_to_le32(ab_bus + offset); | ||
599 | ab->descriptor.res_count = cpu_to_le16(PAGE_SIZE - offset); | ||
600 | ab->descriptor.branch_address = 0; | ||
601 | 625 | ||
602 | wmb(); /* finish init of new descriptors before branch_address update */ | 626 | wmb(); /* finish init of new descriptors before branch_address update */ |
603 | ctx->last_buffer->descriptor.branch_address = cpu_to_le32(ab_bus | 1); | 627 | d = &ctx->descriptors[ctx->last_buffer_index]; |
604 | ctx->last_buffer->next = ab; | 628 | d->branch_address |= cpu_to_le32(1); |
605 | ctx->last_buffer = ab; | 629 | |
630 | ctx->last_buffer_index = index; | ||
606 | 631 | ||
607 | reg_write(ctx->ohci, CONTROL_SET(ctx->regs), CONTEXT_WAKE); | 632 | reg_write(ctx->ohci, CONTROL_SET(ctx->regs), CONTEXT_WAKE); |
608 | flush_writes(ctx->ohci); | 633 | flush_writes(ctx->ohci); |
609 | |||
610 | return 0; | ||
611 | } | 634 | } |
612 | 635 | ||
613 | static void ar_context_release(struct ar_context *ctx) | 636 | static void ar_context_release(struct ar_context *ctx) |
614 | { | 637 | { |
615 | struct ar_buffer *ab, *ab_next; | 638 | unsigned int i; |
616 | size_t offset; | 639 | |
617 | dma_addr_t ab_bus; | 640 | if (ctx->buffer) |
641 | vm_unmap_ram(ctx->buffer, AR_BUFFERS + AR_WRAPAROUND_PAGES); | ||
642 | |||
643 | for (i = 0; i < AR_BUFFERS; i++) | ||
644 | if (ctx->pages[i]) { | ||
645 | dma_unmap_page(ctx->ohci->card.device, | ||
646 | ar_buffer_bus(ctx, i), | ||
647 | PAGE_SIZE, DMA_FROM_DEVICE); | ||
648 | __free_page(ctx->pages[i]); | ||
649 | } | ||
650 | } | ||
651 | |||
652 | static void ar_context_abort(struct ar_context *ctx, const char *error_msg) | ||
653 | { | ||
654 | if (reg_read(ctx->ohci, CONTROL_CLEAR(ctx->regs)) & CONTEXT_RUN) { | ||
655 | reg_write(ctx->ohci, CONTROL_CLEAR(ctx->regs), CONTEXT_RUN); | ||
656 | flush_writes(ctx->ohci); | ||
618 | 657 | ||
619 | for (ab = ctx->current_buffer; ab; ab = ab_next) { | 658 | fw_error("AR error: %s; DMA stopped\n", error_msg); |
620 | ab_next = ab->next; | ||
621 | offset = offsetof(struct ar_buffer, data); | ||
622 | ab_bus = le32_to_cpu(ab->descriptor.data_address) - offset; | ||
623 | dma_free_coherent(ctx->ohci->card.device, PAGE_SIZE, | ||
624 | ab, ab_bus); | ||
625 | } | 659 | } |
660 | /* FIXME: restart? */ | ||
661 | } | ||
662 | |||
663 | static inline unsigned int ar_next_buffer_index(unsigned int index) | ||
664 | { | ||
665 | return (index + 1) % AR_BUFFERS; | ||
666 | } | ||
667 | |||
668 | static inline unsigned int ar_prev_buffer_index(unsigned int index) | ||
669 | { | ||
670 | return (index - 1 + AR_BUFFERS) % AR_BUFFERS; | ||
671 | } | ||
672 | |||
673 | static inline unsigned int ar_first_buffer_index(struct ar_context *ctx) | ||
674 | { | ||
675 | return ar_next_buffer_index(ctx->last_buffer_index); | ||
676 | } | ||
677 | |||
678 | /* | ||
679 | * We search for the buffer that contains the last AR packet DMA data written | ||
680 | * by the controller. | ||
681 | */ | ||
682 | static unsigned int ar_search_last_active_buffer(struct ar_context *ctx, | ||
683 | unsigned int *buffer_offset) | ||
684 | { | ||
685 | unsigned int i, next_i, last = ctx->last_buffer_index; | ||
686 | __le16 res_count, next_res_count; | ||
687 | |||
688 | i = ar_first_buffer_index(ctx); | ||
689 | res_count = ACCESS_ONCE(ctx->descriptors[i].res_count); | ||
690 | |||
691 | /* A buffer that is not yet completely filled must be the last one. */ | ||
692 | while (i != last && res_count == 0) { | ||
693 | |||
694 | /* Peek at the next descriptor. */ | ||
695 | next_i = ar_next_buffer_index(i); | ||
696 | rmb(); /* read descriptors in order */ | ||
697 | next_res_count = ACCESS_ONCE( | ||
698 | ctx->descriptors[next_i].res_count); | ||
699 | /* | ||
700 | * If the next descriptor is still empty, we must stop at this | ||
701 | * descriptor. | ||
702 | */ | ||
703 | if (next_res_count == cpu_to_le16(PAGE_SIZE)) { | ||
704 | /* | ||
705 | * The exception is when the DMA data for one packet is | ||
706 | * split over three buffers; in this case, the middle | ||
707 | * buffer's descriptor might be never updated by the | ||
708 | * controller and look still empty, and we have to peek | ||
709 | * at the third one. | ||
710 | */ | ||
711 | if (MAX_AR_PACKET_SIZE > PAGE_SIZE && i != last) { | ||
712 | next_i = ar_next_buffer_index(next_i); | ||
713 | rmb(); | ||
714 | next_res_count = ACCESS_ONCE( | ||
715 | ctx->descriptors[next_i].res_count); | ||
716 | if (next_res_count != cpu_to_le16(PAGE_SIZE)) | ||
717 | goto next_buffer_is_active; | ||
718 | } | ||
719 | |||
720 | break; | ||
721 | } | ||
722 | |||
723 | next_buffer_is_active: | ||
724 | i = next_i; | ||
725 | res_count = next_res_count; | ||
726 | } | ||
727 | |||
728 | rmb(); /* read res_count before the DMA data */ | ||
729 | |||
730 | *buffer_offset = PAGE_SIZE - le16_to_cpu(res_count); | ||
731 | if (*buffer_offset > PAGE_SIZE) { | ||
732 | *buffer_offset = 0; | ||
733 | ar_context_abort(ctx, "corrupted descriptor"); | ||
734 | } | ||
735 | |||
736 | return i; | ||
737 | } | ||
738 | |||
739 | static void ar_sync_buffers_for_cpu(struct ar_context *ctx, | ||
740 | unsigned int end_buffer_index, | ||
741 | unsigned int end_buffer_offset) | ||
742 | { | ||
743 | unsigned int i; | ||
744 | |||
745 | i = ar_first_buffer_index(ctx); | ||
746 | while (i != end_buffer_index) { | ||
747 | dma_sync_single_for_cpu(ctx->ohci->card.device, | ||
748 | ar_buffer_bus(ctx, i), | ||
749 | PAGE_SIZE, DMA_FROM_DEVICE); | ||
750 | i = ar_next_buffer_index(i); | ||
751 | } | ||
752 | if (end_buffer_offset > 0) | ||
753 | dma_sync_single_for_cpu(ctx->ohci->card.device, | ||
754 | ar_buffer_bus(ctx, i), | ||
755 | end_buffer_offset, DMA_FROM_DEVICE); | ||
626 | } | 756 | } |
627 | 757 | ||
628 | #if defined(CONFIG_PPC_PMAC) && defined(CONFIG_PPC32) | 758 | #if defined(CONFIG_PPC_PMAC) && defined(CONFIG_PPC32) |
@@ -665,6 +795,10 @@ static __le32 *handle_ar_packet(struct ar_context *ctx, __le32 *buffer) | |||
665 | p.header[3] = cond_le32_to_cpu(buffer[3]); | 795 | p.header[3] = cond_le32_to_cpu(buffer[3]); |
666 | p.header_length = 16; | 796 | p.header_length = 16; |
667 | p.payload_length = p.header[3] >> 16; | 797 | p.payload_length = p.header[3] >> 16; |
798 | if (p.payload_length > MAX_ASYNC_PAYLOAD) { | ||
799 | ar_context_abort(ctx, "invalid packet length"); | ||
800 | return NULL; | ||
801 | } | ||
668 | break; | 802 | break; |
669 | 803 | ||
670 | case TCODE_WRITE_RESPONSE: | 804 | case TCODE_WRITE_RESPONSE: |
@@ -675,9 +809,8 @@ static __le32 *handle_ar_packet(struct ar_context *ctx, __le32 *buffer) | |||
675 | break; | 809 | break; |
676 | 810 | ||
677 | default: | 811 | default: |
678 | /* FIXME: Stop context, discard everything, and restart? */ | 812 | ar_context_abort(ctx, "invalid tcode"); |
679 | p.header_length = 0; | 813 | return NULL; |
680 | p.payload_length = 0; | ||
681 | } | 814 | } |
682 | 815 | ||
683 | p.payload = (void *) buffer + p.header_length; | 816 | p.payload = (void *) buffer + p.header_length; |
@@ -727,99 +860,159 @@ static __le32 *handle_ar_packet(struct ar_context *ctx, __le32 *buffer) | |||
727 | return buffer + length + 1; | 860 | return buffer + length + 1; |
728 | } | 861 | } |
729 | 862 | ||
863 | static void *handle_ar_packets(struct ar_context *ctx, void *p, void *end) | ||
864 | { | ||
865 | void *next; | ||
866 | |||
867 | while (p < end) { | ||
868 | next = handle_ar_packet(ctx, p); | ||
869 | if (!next) | ||
870 | return p; | ||
871 | p = next; | ||
872 | } | ||
873 | |||
874 | return p; | ||
875 | } | ||
876 | |||
877 | static void ar_recycle_buffers(struct ar_context *ctx, unsigned int end_buffer) | ||
878 | { | ||
879 | unsigned int i; | ||
880 | |||
881 | i = ar_first_buffer_index(ctx); | ||
882 | while (i != end_buffer) { | ||
883 | dma_sync_single_for_device(ctx->ohci->card.device, | ||
884 | ar_buffer_bus(ctx, i), | ||
885 | PAGE_SIZE, DMA_FROM_DEVICE); | ||
886 | ar_context_link_page(ctx, i); | ||
887 | i = ar_next_buffer_index(i); | ||
888 | } | ||
889 | } | ||
890 | |||
730 | static void ar_context_tasklet(unsigned long data) | 891 | static void ar_context_tasklet(unsigned long data) |
731 | { | 892 | { |
732 | struct ar_context *ctx = (struct ar_context *)data; | 893 | struct ar_context *ctx = (struct ar_context *)data; |
733 | struct fw_ohci *ohci = ctx->ohci; | 894 | unsigned int end_buffer_index, end_buffer_offset; |
734 | struct ar_buffer *ab; | 895 | void *p, *end; |
735 | struct descriptor *d; | ||
736 | void *buffer, *end; | ||
737 | 896 | ||
738 | ab = ctx->current_buffer; | 897 | p = ctx->pointer; |
739 | d = &ab->descriptor; | 898 | if (!p) |
899 | return; | ||
740 | 900 | ||
741 | if (d->res_count == 0) { | 901 | end_buffer_index = ar_search_last_active_buffer(ctx, |
742 | size_t size, rest, offset; | 902 | &end_buffer_offset); |
743 | dma_addr_t start_bus; | 903 | ar_sync_buffers_for_cpu(ctx, end_buffer_index, end_buffer_offset); |
744 | void *start; | 904 | end = ctx->buffer + end_buffer_index * PAGE_SIZE + end_buffer_offset; |
745 | 905 | ||
906 | if (end_buffer_index < ar_first_buffer_index(ctx)) { | ||
746 | /* | 907 | /* |
747 | * This descriptor is finished and we may have a | 908 | * The filled part of the overall buffer wraps around; handle |
748 | * packet split across this and the next buffer. We | 909 | * all packets up to the buffer end here. If the last packet |
749 | * reuse the page for reassembling the split packet. | 910 | * wraps around, its tail will be visible after the buffer end |
911 | * because the buffer start pages are mapped there again. | ||
750 | */ | 912 | */ |
913 | void *buffer_end = ctx->buffer + AR_BUFFERS * PAGE_SIZE; | ||
914 | p = handle_ar_packets(ctx, p, buffer_end); | ||
915 | if (p < buffer_end) | ||
916 | goto error; | ||
917 | /* adjust p to point back into the actual buffer */ | ||
918 | p -= AR_BUFFERS * PAGE_SIZE; | ||
919 | } | ||
751 | 920 | ||
752 | offset = offsetof(struct ar_buffer, data); | 921 | p = handle_ar_packets(ctx, p, end); |
753 | start = buffer = ab; | 922 | if (p != end) { |
754 | start_bus = le32_to_cpu(ab->descriptor.data_address) - offset; | 923 | if (p > end) |
755 | 924 | ar_context_abort(ctx, "inconsistent descriptor"); | |
756 | ab = ab->next; | 925 | goto error; |
757 | d = &ab->descriptor; | ||
758 | size = buffer + PAGE_SIZE - ctx->pointer; | ||
759 | rest = le16_to_cpu(d->req_count) - le16_to_cpu(d->res_count); | ||
760 | memmove(buffer, ctx->pointer, size); | ||
761 | memcpy(buffer + size, ab->data, rest); | ||
762 | ctx->current_buffer = ab; | ||
763 | ctx->pointer = (void *) ab->data + rest; | ||
764 | end = buffer + size + rest; | ||
765 | |||
766 | while (buffer < end) | ||
767 | buffer = handle_ar_packet(ctx, buffer); | ||
768 | |||
769 | dma_free_coherent(ohci->card.device, PAGE_SIZE, | ||
770 | start, start_bus); | ||
771 | ar_context_add_page(ctx); | ||
772 | } else { | ||
773 | buffer = ctx->pointer; | ||
774 | ctx->pointer = end = | ||
775 | (void *) ab + PAGE_SIZE - le16_to_cpu(d->res_count); | ||
776 | |||
777 | while (buffer < end) | ||
778 | buffer = handle_ar_packet(ctx, buffer); | ||
779 | } | 926 | } |
927 | |||
928 | ctx->pointer = p; | ||
929 | ar_recycle_buffers(ctx, end_buffer_index); | ||
930 | |||
931 | return; | ||
932 | |||
933 | error: | ||
934 | ctx->pointer = NULL; | ||
780 | } | 935 | } |
781 | 936 | ||
782 | static int ar_context_init(struct ar_context *ctx, | 937 | static int ar_context_init(struct ar_context *ctx, struct fw_ohci *ohci, |
783 | struct fw_ohci *ohci, u32 regs) | 938 | unsigned int descriptors_offset, u32 regs) |
784 | { | 939 | { |
785 | struct ar_buffer ab; | 940 | unsigned int i; |
941 | dma_addr_t dma_addr; | ||
942 | struct page *pages[AR_BUFFERS + AR_WRAPAROUND_PAGES]; | ||
943 | struct descriptor *d; | ||
786 | 944 | ||
787 | ctx->regs = regs; | 945 | ctx->regs = regs; |
788 | ctx->ohci = ohci; | 946 | ctx->ohci = ohci; |
789 | ctx->last_buffer = &ab; | ||
790 | tasklet_init(&ctx->tasklet, ar_context_tasklet, (unsigned long)ctx); | 947 | tasklet_init(&ctx->tasklet, ar_context_tasklet, (unsigned long)ctx); |
791 | 948 | ||
792 | ar_context_add_page(ctx); | 949 | for (i = 0; i < AR_BUFFERS; i++) { |
793 | ar_context_add_page(ctx); | 950 | ctx->pages[i] = alloc_page(GFP_KERNEL | GFP_DMA32); |
794 | ctx->current_buffer = ab.next; | 951 | if (!ctx->pages[i]) |
795 | ctx->pointer = ctx->current_buffer->data; | 952 | goto out_of_memory; |
953 | dma_addr = dma_map_page(ohci->card.device, ctx->pages[i], | ||
954 | 0, PAGE_SIZE, DMA_FROM_DEVICE); | ||
955 | if (dma_mapping_error(ohci->card.device, dma_addr)) { | ||
956 | __free_page(ctx->pages[i]); | ||
957 | ctx->pages[i] = NULL; | ||
958 | goto out_of_memory; | ||
959 | } | ||
960 | set_page_private(ctx->pages[i], dma_addr); | ||
961 | } | ||
962 | |||
963 | for (i = 0; i < AR_BUFFERS; i++) | ||
964 | pages[i] = ctx->pages[i]; | ||
965 | for (i = 0; i < AR_WRAPAROUND_PAGES; i++) | ||
966 | pages[AR_BUFFERS + i] = ctx->pages[i]; | ||
967 | ctx->buffer = vm_map_ram(pages, AR_BUFFERS + AR_WRAPAROUND_PAGES, | ||
968 | -1, PAGE_KERNEL); | ||
969 | if (!ctx->buffer) | ||
970 | goto out_of_memory; | ||
971 | |||
972 | ctx->descriptors = ohci->misc_buffer + descriptors_offset; | ||
973 | ctx->descriptors_bus = ohci->misc_buffer_bus + descriptors_offset; | ||
974 | |||
975 | for (i = 0; i < AR_BUFFERS; i++) { | ||
976 | d = &ctx->descriptors[i]; | ||
977 | d->req_count = cpu_to_le16(PAGE_SIZE); | ||
978 | d->control = cpu_to_le16(DESCRIPTOR_INPUT_MORE | | ||
979 | DESCRIPTOR_STATUS | | ||
980 | DESCRIPTOR_BRANCH_ALWAYS); | ||
981 | d->data_address = cpu_to_le32(ar_buffer_bus(ctx, i)); | ||
982 | d->branch_address = cpu_to_le32(ctx->descriptors_bus + | ||
983 | ar_next_buffer_index(i) * sizeof(struct descriptor)); | ||
984 | } | ||
796 | 985 | ||
797 | return 0; | 986 | return 0; |
987 | |||
988 | out_of_memory: | ||
989 | ar_context_release(ctx); | ||
990 | |||
991 | return -ENOMEM; | ||
798 | } | 992 | } |
799 | 993 | ||
800 | static void ar_context_run(struct ar_context *ctx) | 994 | static void ar_context_run(struct ar_context *ctx) |
801 | { | 995 | { |
802 | struct ar_buffer *ab = ctx->current_buffer; | 996 | unsigned int i; |
803 | dma_addr_t ab_bus; | ||
804 | size_t offset; | ||
805 | 997 | ||
806 | offset = offsetof(struct ar_buffer, data); | 998 | for (i = 0; i < AR_BUFFERS; i++) |
807 | ab_bus = le32_to_cpu(ab->descriptor.data_address) - offset; | 999 | ar_context_link_page(ctx, i); |
808 | 1000 | ||
809 | reg_write(ctx->ohci, COMMAND_PTR(ctx->regs), ab_bus | 1); | 1001 | ctx->pointer = ctx->buffer; |
1002 | |||
1003 | reg_write(ctx->ohci, COMMAND_PTR(ctx->regs), ctx->descriptors_bus | 1); | ||
810 | reg_write(ctx->ohci, CONTROL_SET(ctx->regs), CONTEXT_RUN); | 1004 | reg_write(ctx->ohci, CONTROL_SET(ctx->regs), CONTEXT_RUN); |
811 | flush_writes(ctx->ohci); | 1005 | flush_writes(ctx->ohci); |
812 | } | 1006 | } |
813 | 1007 | ||
814 | static struct descriptor *find_branch_descriptor(struct descriptor *d, int z) | 1008 | static struct descriptor *find_branch_descriptor(struct descriptor *d, int z) |
815 | { | 1009 | { |
816 | int b, key; | 1010 | __le16 branch; |
817 | 1011 | ||
818 | b = (le16_to_cpu(d->control) & DESCRIPTOR_BRANCH_ALWAYS) >> 2; | 1012 | branch = d->control & cpu_to_le16(DESCRIPTOR_BRANCH_ALWAYS); |
819 | key = (le16_to_cpu(d->control) & DESCRIPTOR_KEY_IMMEDIATE) >> 8; | ||
820 | 1013 | ||
821 | /* figure out which descriptor the branch address goes in */ | 1014 | /* figure out which descriptor the branch address goes in */ |
822 | if (z == 2 && (b == 3 || key == 2)) | 1015 | if (z == 2 && branch == cpu_to_le16(DESCRIPTOR_BRANCH_ALWAYS)) |
823 | return d; | 1016 | return d; |
824 | else | 1017 | else |
825 | return d + z - 1; | 1018 | return d + z - 1; |
@@ -983,6 +1176,7 @@ static void context_run(struct context *ctx, u32 extra) | |||
983 | le32_to_cpu(ctx->last->branch_address)); | 1176 | le32_to_cpu(ctx->last->branch_address)); |
984 | reg_write(ohci, CONTROL_CLEAR(ctx->regs), ~0); | 1177 | reg_write(ohci, CONTROL_CLEAR(ctx->regs), ~0); |
985 | reg_write(ohci, CONTROL_SET(ctx->regs), CONTEXT_RUN | extra); | 1178 | reg_write(ohci, CONTROL_SET(ctx->regs), CONTEXT_RUN | extra); |
1179 | ctx->running = true; | ||
986 | flush_writes(ohci); | 1180 | flush_writes(ohci); |
987 | } | 1181 | } |
988 | 1182 | ||
@@ -999,9 +1193,6 @@ static void context_append(struct context *ctx, | |||
999 | wmb(); /* finish init of new descriptors before branch_address update */ | 1193 | wmb(); /* finish init of new descriptors before branch_address update */ |
1000 | ctx->prev->branch_address = cpu_to_le32(d_bus | z); | 1194 | ctx->prev->branch_address = cpu_to_le32(d_bus | z); |
1001 | ctx->prev = find_branch_descriptor(d, z); | 1195 | ctx->prev = find_branch_descriptor(d, z); |
1002 | |||
1003 | reg_write(ctx->ohci, CONTROL_SET(ctx->regs), CONTEXT_WAKE); | ||
1004 | flush_writes(ctx->ohci); | ||
1005 | } | 1196 | } |
1006 | 1197 | ||
1007 | static void context_stop(struct context *ctx) | 1198 | static void context_stop(struct context *ctx) |
@@ -1010,6 +1201,7 @@ static void context_stop(struct context *ctx) | |||
1010 | int i; | 1201 | int i; |
1011 | 1202 | ||
1012 | reg_write(ctx->ohci, CONTROL_CLEAR(ctx->regs), CONTEXT_RUN); | 1203 | reg_write(ctx->ohci, CONTROL_CLEAR(ctx->regs), CONTEXT_RUN); |
1204 | ctx->running = false; | ||
1013 | flush_writes(ctx->ohci); | 1205 | flush_writes(ctx->ohci); |
1014 | 1206 | ||
1015 | for (i = 0; i < 10; i++) { | 1207 | for (i = 0; i < 10; i++) { |
@@ -1023,6 +1215,7 @@ static void context_stop(struct context *ctx) | |||
1023 | } | 1215 | } |
1024 | 1216 | ||
1025 | struct driver_data { | 1217 | struct driver_data { |
1218 | u8 inline_data[8]; | ||
1026 | struct fw_packet *packet; | 1219 | struct fw_packet *packet; |
1027 | }; | 1220 | }; |
1028 | 1221 | ||
@@ -1040,7 +1233,6 @@ static int at_context_queue_packet(struct context *ctx, | |||
1040 | struct descriptor *d, *last; | 1233 | struct descriptor *d, *last; |
1041 | __le32 *header; | 1234 | __le32 *header; |
1042 | int z, tcode; | 1235 | int z, tcode; |
1043 | u32 reg; | ||
1044 | 1236 | ||
1045 | d = context_get_descriptors(ctx, 4, &d_bus); | 1237 | d = context_get_descriptors(ctx, 4, &d_bus); |
1046 | if (d == NULL) { | 1238 | if (d == NULL) { |
@@ -1054,21 +1246,27 @@ static int at_context_queue_packet(struct context *ctx, | |||
1054 | /* | 1246 | /* |
1055 | * The DMA format for asyncronous link packets is different | 1247 | * The DMA format for asyncronous link packets is different |
1056 | * from the IEEE1394 layout, so shift the fields around | 1248 | * from the IEEE1394 layout, so shift the fields around |
1057 | * accordingly. If header_length is 8, it's a PHY packet, to | 1249 | * accordingly. |
1058 | * which we need to prepend an extra quadlet. | ||
1059 | */ | 1250 | */ |
1060 | 1251 | ||
1252 | tcode = (packet->header[0] >> 4) & 0x0f; | ||
1061 | header = (__le32 *) &d[1]; | 1253 | header = (__le32 *) &d[1]; |
1062 | switch (packet->header_length) { | 1254 | switch (tcode) { |
1063 | case 16: | 1255 | case TCODE_WRITE_QUADLET_REQUEST: |
1064 | case 12: | 1256 | case TCODE_WRITE_BLOCK_REQUEST: |
1257 | case TCODE_WRITE_RESPONSE: | ||
1258 | case TCODE_READ_QUADLET_REQUEST: | ||
1259 | case TCODE_READ_BLOCK_REQUEST: | ||
1260 | case TCODE_READ_QUADLET_RESPONSE: | ||
1261 | case TCODE_READ_BLOCK_RESPONSE: | ||
1262 | case TCODE_LOCK_REQUEST: | ||
1263 | case TCODE_LOCK_RESPONSE: | ||
1065 | header[0] = cpu_to_le32((packet->header[0] & 0xffff) | | 1264 | header[0] = cpu_to_le32((packet->header[0] & 0xffff) | |
1066 | (packet->speed << 16)); | 1265 | (packet->speed << 16)); |
1067 | header[1] = cpu_to_le32((packet->header[1] & 0xffff) | | 1266 | header[1] = cpu_to_le32((packet->header[1] & 0xffff) | |
1068 | (packet->header[0] & 0xffff0000)); | 1267 | (packet->header[0] & 0xffff0000)); |
1069 | header[2] = cpu_to_le32(packet->header[2]); | 1268 | header[2] = cpu_to_le32(packet->header[2]); |
1070 | 1269 | ||
1071 | tcode = (packet->header[0] >> 4) & 0x0f; | ||
1072 | if (TCODE_IS_BLOCK_PACKET(tcode)) | 1270 | if (TCODE_IS_BLOCK_PACKET(tcode)) |
1073 | header[3] = cpu_to_le32(packet->header[3]); | 1271 | header[3] = cpu_to_le32(packet->header[3]); |
1074 | else | 1272 | else |
@@ -1077,18 +1275,18 @@ static int at_context_queue_packet(struct context *ctx, | |||
1077 | d[0].req_count = cpu_to_le16(packet->header_length); | 1275 | d[0].req_count = cpu_to_le16(packet->header_length); |
1078 | break; | 1276 | break; |
1079 | 1277 | ||
1080 | case 8: | 1278 | case TCODE_LINK_INTERNAL: |
1081 | header[0] = cpu_to_le32((OHCI1394_phy_tcode << 4) | | 1279 | header[0] = cpu_to_le32((OHCI1394_phy_tcode << 4) | |
1082 | (packet->speed << 16)); | 1280 | (packet->speed << 16)); |
1083 | header[1] = cpu_to_le32(packet->header[0]); | 1281 | header[1] = cpu_to_le32(packet->header[1]); |
1084 | header[2] = cpu_to_le32(packet->header[1]); | 1282 | header[2] = cpu_to_le32(packet->header[2]); |
1085 | d[0].req_count = cpu_to_le16(12); | 1283 | d[0].req_count = cpu_to_le16(12); |
1086 | 1284 | ||
1087 | if (is_ping_packet(packet->header)) | 1285 | if (is_ping_packet(&packet->header[1])) |
1088 | d[0].control |= cpu_to_le16(DESCRIPTOR_PING); | 1286 | d[0].control |= cpu_to_le16(DESCRIPTOR_PING); |
1089 | break; | 1287 | break; |
1090 | 1288 | ||
1091 | case 4: | 1289 | case TCODE_STREAM_DATA: |
1092 | header[0] = cpu_to_le32((packet->header[0] & 0xffff) | | 1290 | header[0] = cpu_to_le32((packet->header[0] & 0xffff) | |
1093 | (packet->speed << 16)); | 1291 | (packet->speed << 16)); |
1094 | header[1] = cpu_to_le32(packet->header[0] & 0xffff0000); | 1292 | header[1] = cpu_to_le32(packet->header[0] & 0xffff0000); |
@@ -1101,20 +1299,28 @@ static int at_context_queue_packet(struct context *ctx, | |||
1101 | return -1; | 1299 | return -1; |
1102 | } | 1300 | } |
1103 | 1301 | ||
1302 | BUILD_BUG_ON(sizeof(struct driver_data) > sizeof(struct descriptor)); | ||
1104 | driver_data = (struct driver_data *) &d[3]; | 1303 | driver_data = (struct driver_data *) &d[3]; |
1105 | driver_data->packet = packet; | 1304 | driver_data->packet = packet; |
1106 | packet->driver_data = driver_data; | 1305 | packet->driver_data = driver_data; |
1107 | 1306 | ||
1108 | if (packet->payload_length > 0) { | 1307 | if (packet->payload_length > 0) { |
1109 | payload_bus = | 1308 | if (packet->payload_length > sizeof(driver_data->inline_data)) { |
1110 | dma_map_single(ohci->card.device, packet->payload, | 1309 | payload_bus = dma_map_single(ohci->card.device, |
1111 | packet->payload_length, DMA_TO_DEVICE); | 1310 | packet->payload, |
1112 | if (dma_mapping_error(ohci->card.device, payload_bus)) { | 1311 | packet->payload_length, |
1113 | packet->ack = RCODE_SEND_ERROR; | 1312 | DMA_TO_DEVICE); |
1114 | return -1; | 1313 | if (dma_mapping_error(ohci->card.device, payload_bus)) { |
1314 | packet->ack = RCODE_SEND_ERROR; | ||
1315 | return -1; | ||
1316 | } | ||
1317 | packet->payload_bus = payload_bus; | ||
1318 | packet->payload_mapped = true; | ||
1319 | } else { | ||
1320 | memcpy(driver_data->inline_data, packet->payload, | ||
1321 | packet->payload_length); | ||
1322 | payload_bus = d_bus + 3 * sizeof(*d); | ||
1115 | } | 1323 | } |
1116 | packet->payload_bus = payload_bus; | ||
1117 | packet->payload_mapped = true; | ||
1118 | 1324 | ||
1119 | d[2].req_count = cpu_to_le16(packet->payload_length); | 1325 | d[2].req_count = cpu_to_le16(packet->payload_length); |
1120 | d[2].data_address = cpu_to_le32(payload_bus); | 1326 | d[2].data_address = cpu_to_le32(payload_bus); |
@@ -1129,19 +1335,8 @@ static int at_context_queue_packet(struct context *ctx, | |||
1129 | DESCRIPTOR_IRQ_ALWAYS | | 1335 | DESCRIPTOR_IRQ_ALWAYS | |
1130 | DESCRIPTOR_BRANCH_ALWAYS); | 1336 | DESCRIPTOR_BRANCH_ALWAYS); |
1131 | 1337 | ||
1132 | /* | 1338 | /* FIXME: Document how the locking works. */ |
1133 | * If the controller and packet generations don't match, we need to | 1339 | if (ohci->generation != packet->generation) { |
1134 | * bail out and try again. If IntEvent.busReset is set, the AT context | ||
1135 | * is halted, so appending to the context and trying to run it is | ||
1136 | * futile. Most controllers do the right thing and just flush the AT | ||
1137 | * queue (per section 7.2.3.2 of the OHCI 1.1 specification), but | ||
1138 | * some controllers (like a JMicron JMB381 PCI-e) misbehave and wind | ||
1139 | * up stalling out. So we just bail out in software and try again | ||
1140 | * later, and everyone is happy. | ||
1141 | * FIXME: Document how the locking works. | ||
1142 | */ | ||
1143 | if (ohci->generation != packet->generation || | ||
1144 | reg_read(ohci, OHCI1394_IntEventSet) & OHCI1394_busReset) { | ||
1145 | if (packet->payload_mapped) | 1340 | if (packet->payload_mapped) |
1146 | dma_unmap_single(ohci->card.device, payload_bus, | 1341 | dma_unmap_single(ohci->card.device, payload_bus, |
1147 | packet->payload_length, DMA_TO_DEVICE); | 1342 | packet->payload_length, DMA_TO_DEVICE); |
@@ -1151,14 +1346,27 @@ static int at_context_queue_packet(struct context *ctx, | |||
1151 | 1346 | ||
1152 | context_append(ctx, d, z, 4 - z); | 1347 | context_append(ctx, d, z, 4 - z); |
1153 | 1348 | ||
1154 | /* If the context isn't already running, start it up. */ | 1349 | if (ctx->running) { |
1155 | reg = reg_read(ctx->ohci, CONTROL_SET(ctx->regs)); | 1350 | reg_write(ohci, CONTROL_SET(ctx->regs), CONTEXT_WAKE); |
1156 | if ((reg & CONTEXT_RUN) == 0) | 1351 | flush_writes(ohci); |
1352 | } else { | ||
1157 | context_run(ctx, 0); | 1353 | context_run(ctx, 0); |
1354 | } | ||
1158 | 1355 | ||
1159 | return 0; | 1356 | return 0; |
1160 | } | 1357 | } |
1161 | 1358 | ||
1359 | static void at_context_flush(struct context *ctx) | ||
1360 | { | ||
1361 | tasklet_disable(&ctx->tasklet); | ||
1362 | |||
1363 | ctx->flushing = true; | ||
1364 | context_tasklet((unsigned long)ctx); | ||
1365 | ctx->flushing = false; | ||
1366 | |||
1367 | tasklet_enable(&ctx->tasklet); | ||
1368 | } | ||
1369 | |||
1162 | static int handle_at_packet(struct context *context, | 1370 | static int handle_at_packet(struct context *context, |
1163 | struct descriptor *d, | 1371 | struct descriptor *d, |
1164 | struct descriptor *last) | 1372 | struct descriptor *last) |
@@ -1168,7 +1376,7 @@ static int handle_at_packet(struct context *context, | |||
1168 | struct fw_ohci *ohci = context->ohci; | 1376 | struct fw_ohci *ohci = context->ohci; |
1169 | int evt; | 1377 | int evt; |
1170 | 1378 | ||
1171 | if (last->transfer_status == 0) | 1379 | if (last->transfer_status == 0 && !context->flushing) |
1172 | /* This descriptor isn't done yet, stop iteration. */ | 1380 | /* This descriptor isn't done yet, stop iteration. */ |
1173 | return 0; | 1381 | return 0; |
1174 | 1382 | ||
@@ -1202,11 +1410,15 @@ static int handle_at_packet(struct context *context, | |||
1202 | break; | 1410 | break; |
1203 | 1411 | ||
1204 | case OHCI1394_evt_missing_ack: | 1412 | case OHCI1394_evt_missing_ack: |
1205 | /* | 1413 | if (context->flushing) |
1206 | * Using a valid (current) generation count, but the | 1414 | packet->ack = RCODE_GENERATION; |
1207 | * node is not on the bus or not sending acks. | 1415 | else { |
1208 | */ | 1416 | /* |
1209 | packet->ack = RCODE_NO_ACK; | 1417 | * Using a valid (current) generation count, but the |
1418 | * node is not on the bus or not sending acks. | ||
1419 | */ | ||
1420 | packet->ack = RCODE_NO_ACK; | ||
1421 | } | ||
1210 | break; | 1422 | break; |
1211 | 1423 | ||
1212 | case ACK_COMPLETE + 0x10: | 1424 | case ACK_COMPLETE + 0x10: |
@@ -1219,6 +1431,13 @@ static int handle_at_packet(struct context *context, | |||
1219 | packet->ack = evt - 0x10; | 1431 | packet->ack = evt - 0x10; |
1220 | break; | 1432 | break; |
1221 | 1433 | ||
1434 | case OHCI1394_evt_no_status: | ||
1435 | if (context->flushing) { | ||
1436 | packet->ack = RCODE_GENERATION; | ||
1437 | break; | ||
1438 | } | ||
1439 | /* fall through */ | ||
1440 | |||
1222 | default: | 1441 | default: |
1223 | packet->ack = RCODE_SEND_ERROR; | 1442 | packet->ack = RCODE_SEND_ERROR; |
1224 | break; | 1443 | break; |
@@ -1371,6 +1590,47 @@ static void at_context_transmit(struct context *ctx, struct fw_packet *packet) | |||
1371 | 1590 | ||
1372 | } | 1591 | } |
1373 | 1592 | ||
1593 | static void detect_dead_context(struct fw_ohci *ohci, | ||
1594 | const char *name, unsigned int regs) | ||
1595 | { | ||
1596 | u32 ctl; | ||
1597 | |||
1598 | ctl = reg_read(ohci, CONTROL_SET(regs)); | ||
1599 | if (ctl & CONTEXT_DEAD) { | ||
1600 | #ifdef CONFIG_FIREWIRE_OHCI_DEBUG | ||
1601 | fw_error("DMA context %s has stopped, error code: %s\n", | ||
1602 | name, evts[ctl & 0x1f]); | ||
1603 | #else | ||
1604 | fw_error("DMA context %s has stopped, error code: %#x\n", | ||
1605 | name, ctl & 0x1f); | ||
1606 | #endif | ||
1607 | } | ||
1608 | } | ||
1609 | |||
1610 | static void handle_dead_contexts(struct fw_ohci *ohci) | ||
1611 | { | ||
1612 | unsigned int i; | ||
1613 | char name[8]; | ||
1614 | |||
1615 | detect_dead_context(ohci, "ATReq", OHCI1394_AsReqTrContextBase); | ||
1616 | detect_dead_context(ohci, "ATRsp", OHCI1394_AsRspTrContextBase); | ||
1617 | detect_dead_context(ohci, "ARReq", OHCI1394_AsReqRcvContextBase); | ||
1618 | detect_dead_context(ohci, "ARRsp", OHCI1394_AsRspRcvContextBase); | ||
1619 | for (i = 0; i < 32; ++i) { | ||
1620 | if (!(ohci->it_context_support & (1 << i))) | ||
1621 | continue; | ||
1622 | sprintf(name, "IT%u", i); | ||
1623 | detect_dead_context(ohci, name, OHCI1394_IsoXmitContextBase(i)); | ||
1624 | } | ||
1625 | for (i = 0; i < 32; ++i) { | ||
1626 | if (!(ohci->ir_context_support & (1 << i))) | ||
1627 | continue; | ||
1628 | sprintf(name, "IR%u", i); | ||
1629 | detect_dead_context(ohci, name, OHCI1394_IsoRcvContextBase(i)); | ||
1630 | } | ||
1631 | /* TODO: maybe try to flush and restart the dead contexts */ | ||
1632 | } | ||
1633 | |||
1374 | static u32 cycle_timer_ticks(u32 cycle_timer) | 1634 | static u32 cycle_timer_ticks(u32 cycle_timer) |
1375 | { | 1635 | { |
1376 | u32 ticks; | 1636 | u32 ticks; |
@@ -1524,9 +1784,23 @@ static void bus_reset_tasklet(unsigned long data) | |||
1524 | /* FIXME: Document how the locking works. */ | 1784 | /* FIXME: Document how the locking works. */ |
1525 | spin_lock_irqsave(&ohci->lock, flags); | 1785 | spin_lock_irqsave(&ohci->lock, flags); |
1526 | 1786 | ||
1527 | ohci->generation = generation; | 1787 | ohci->generation = -1; /* prevent AT packet queueing */ |
1528 | context_stop(&ohci->at_request_ctx); | 1788 | context_stop(&ohci->at_request_ctx); |
1529 | context_stop(&ohci->at_response_ctx); | 1789 | context_stop(&ohci->at_response_ctx); |
1790 | |||
1791 | spin_unlock_irqrestore(&ohci->lock, flags); | ||
1792 | |||
1793 | /* | ||
1794 | * Per OHCI 1.2 draft, clause 7.2.3.3, hardware may leave unsent | ||
1795 | * packets in the AT queues and software needs to drain them. | ||
1796 | * Some OHCI 1.1 controllers (JMicron) apparently require this too. | ||
1797 | */ | ||
1798 | at_context_flush(&ohci->at_request_ctx); | ||
1799 | at_context_flush(&ohci->at_response_ctx); | ||
1800 | |||
1801 | spin_lock_irqsave(&ohci->lock, flags); | ||
1802 | |||
1803 | ohci->generation = generation; | ||
1530 | reg_write(ohci, OHCI1394_IntEventClear, OHCI1394_busReset); | 1804 | reg_write(ohci, OHCI1394_IntEventClear, OHCI1394_busReset); |
1531 | 1805 | ||
1532 | if (ohci->quirks & QUIRK_RESET_PACKET) | 1806 | if (ohci->quirks & QUIRK_RESET_PACKET) |
@@ -1594,8 +1868,12 @@ static irqreturn_t irq_handler(int irq, void *data) | |||
1594 | if (!event || !~event) | 1868 | if (!event || !~event) |
1595 | return IRQ_NONE; | 1869 | return IRQ_NONE; |
1596 | 1870 | ||
1597 | /* busReset must not be cleared yet, see OHCI 1.1 clause 7.2.3.2 */ | 1871 | /* |
1598 | reg_write(ohci, OHCI1394_IntEventClear, event & ~OHCI1394_busReset); | 1872 | * busReset and postedWriteErr must not be cleared yet |
1873 | * (OHCI 1.1 clauses 7.2.3.2 and 13.2.8.1) | ||
1874 | */ | ||
1875 | reg_write(ohci, OHCI1394_IntEventClear, | ||
1876 | event & ~(OHCI1394_busReset | OHCI1394_postedWriteErr)); | ||
1599 | log_irqs(event); | 1877 | log_irqs(event); |
1600 | 1878 | ||
1601 | if (event & OHCI1394_selfIDComplete) | 1879 | if (event & OHCI1394_selfIDComplete) |
@@ -1613,30 +1891,41 @@ static irqreturn_t irq_handler(int irq, void *data) | |||
1613 | if (event & OHCI1394_respTxComplete) | 1891 | if (event & OHCI1394_respTxComplete) |
1614 | tasklet_schedule(&ohci->at_response_ctx.tasklet); | 1892 | tasklet_schedule(&ohci->at_response_ctx.tasklet); |
1615 | 1893 | ||
1616 | iso_event = reg_read(ohci, OHCI1394_IsoRecvIntEventClear); | 1894 | if (event & OHCI1394_isochRx) { |
1617 | reg_write(ohci, OHCI1394_IsoRecvIntEventClear, iso_event); | 1895 | iso_event = reg_read(ohci, OHCI1394_IsoRecvIntEventClear); |
1896 | reg_write(ohci, OHCI1394_IsoRecvIntEventClear, iso_event); | ||
1618 | 1897 | ||
1619 | while (iso_event) { | 1898 | while (iso_event) { |
1620 | i = ffs(iso_event) - 1; | 1899 | i = ffs(iso_event) - 1; |
1621 | tasklet_schedule(&ohci->ir_context_list[i].context.tasklet); | 1900 | tasklet_schedule( |
1622 | iso_event &= ~(1 << i); | 1901 | &ohci->ir_context_list[i].context.tasklet); |
1902 | iso_event &= ~(1 << i); | ||
1903 | } | ||
1623 | } | 1904 | } |
1624 | 1905 | ||
1625 | iso_event = reg_read(ohci, OHCI1394_IsoXmitIntEventClear); | 1906 | if (event & OHCI1394_isochTx) { |
1626 | reg_write(ohci, OHCI1394_IsoXmitIntEventClear, iso_event); | 1907 | iso_event = reg_read(ohci, OHCI1394_IsoXmitIntEventClear); |
1908 | reg_write(ohci, OHCI1394_IsoXmitIntEventClear, iso_event); | ||
1627 | 1909 | ||
1628 | while (iso_event) { | 1910 | while (iso_event) { |
1629 | i = ffs(iso_event) - 1; | 1911 | i = ffs(iso_event) - 1; |
1630 | tasklet_schedule(&ohci->it_context_list[i].context.tasklet); | 1912 | tasklet_schedule( |
1631 | iso_event &= ~(1 << i); | 1913 | &ohci->it_context_list[i].context.tasklet); |
1914 | iso_event &= ~(1 << i); | ||
1915 | } | ||
1632 | } | 1916 | } |
1633 | 1917 | ||
1634 | if (unlikely(event & OHCI1394_regAccessFail)) | 1918 | if (unlikely(event & OHCI1394_regAccessFail)) |
1635 | fw_error("Register access failure - " | 1919 | fw_error("Register access failure - " |
1636 | "please notify linux1394-devel@lists.sf.net\n"); | 1920 | "please notify linux1394-devel@lists.sf.net\n"); |
1637 | 1921 | ||
1638 | if (unlikely(event & OHCI1394_postedWriteErr)) | 1922 | if (unlikely(event & OHCI1394_postedWriteErr)) { |
1923 | reg_read(ohci, OHCI1394_PostedWriteAddressHi); | ||
1924 | reg_read(ohci, OHCI1394_PostedWriteAddressLo); | ||
1925 | reg_write(ohci, OHCI1394_IntEventClear, | ||
1926 | OHCI1394_postedWriteErr); | ||
1639 | fw_error("PCI posted write error\n"); | 1927 | fw_error("PCI posted write error\n"); |
1928 | } | ||
1640 | 1929 | ||
1641 | if (unlikely(event & OHCI1394_cycleTooLong)) { | 1930 | if (unlikely(event & OHCI1394_cycleTooLong)) { |
1642 | if (printk_ratelimit()) | 1931 | if (printk_ratelimit()) |
@@ -1656,11 +1945,15 @@ static irqreturn_t irq_handler(int irq, void *data) | |||
1656 | fw_notify("isochronous cycle inconsistent\n"); | 1945 | fw_notify("isochronous cycle inconsistent\n"); |
1657 | } | 1946 | } |
1658 | 1947 | ||
1948 | if (unlikely(event & OHCI1394_unrecoverableError)) | ||
1949 | handle_dead_contexts(ohci); | ||
1950 | |||
1659 | if (event & OHCI1394_cycle64Seconds) { | 1951 | if (event & OHCI1394_cycle64Seconds) { |
1660 | spin_lock(&ohci->lock); | 1952 | spin_lock(&ohci->lock); |
1661 | update_bus_time(ohci); | 1953 | update_bus_time(ohci); |
1662 | spin_unlock(&ohci->lock); | 1954 | spin_unlock(&ohci->lock); |
1663 | } | 1955 | } else |
1956 | flush_writes(ohci); | ||
1664 | 1957 | ||
1665 | return IRQ_HANDLED; | 1958 | return IRQ_HANDLED; |
1666 | } | 1959 | } |
@@ -1783,8 +2076,6 @@ static int ohci_enable(struct fw_card *card, | |||
1783 | 2076 | ||
1784 | reg_write(ohci, OHCI1394_SelfIDBuffer, ohci->self_id_bus); | 2077 | reg_write(ohci, OHCI1394_SelfIDBuffer, ohci->self_id_bus); |
1785 | reg_write(ohci, OHCI1394_LinkControlSet, | 2078 | reg_write(ohci, OHCI1394_LinkControlSet, |
1786 | OHCI1394_LinkControl_rcvSelfID | | ||
1787 | OHCI1394_LinkControl_rcvPhyPkt | | ||
1788 | OHCI1394_LinkControl_cycleTimerEnable | | 2079 | OHCI1394_LinkControl_cycleTimerEnable | |
1789 | OHCI1394_LinkControl_cycleMaster); | 2080 | OHCI1394_LinkControl_cycleMaster); |
1790 | 2081 | ||
@@ -1811,9 +2102,6 @@ static int ohci_enable(struct fw_card *card, | |||
1811 | reg_write(ohci, OHCI1394_FairnessControl, 0); | 2102 | reg_write(ohci, OHCI1394_FairnessControl, 0); |
1812 | card->priority_budget_implemented = ohci->pri_req_max != 0; | 2103 | card->priority_budget_implemented = ohci->pri_req_max != 0; |
1813 | 2104 | ||
1814 | ar_context_run(&ohci->ar_request_ctx); | ||
1815 | ar_context_run(&ohci->ar_response_ctx); | ||
1816 | |||
1817 | reg_write(ohci, OHCI1394_PhyUpperBound, 0x00010000); | 2105 | reg_write(ohci, OHCI1394_PhyUpperBound, 0x00010000); |
1818 | reg_write(ohci, OHCI1394_IntEventClear, ~0); | 2106 | reg_write(ohci, OHCI1394_IntEventClear, ~0); |
1819 | reg_write(ohci, OHCI1394_IntMaskClear, ~0); | 2107 | reg_write(ohci, OHCI1394_IntMaskClear, ~0); |
@@ -1892,7 +2180,9 @@ static int ohci_enable(struct fw_card *card, | |||
1892 | OHCI1394_selfIDComplete | | 2180 | OHCI1394_selfIDComplete | |
1893 | OHCI1394_regAccessFail | | 2181 | OHCI1394_regAccessFail | |
1894 | OHCI1394_cycle64Seconds | | 2182 | OHCI1394_cycle64Seconds | |
1895 | OHCI1394_cycleInconsistent | OHCI1394_cycleTooLong | | 2183 | OHCI1394_cycleInconsistent | |
2184 | OHCI1394_unrecoverableError | | ||
2185 | OHCI1394_cycleTooLong | | ||
1896 | OHCI1394_masterIntEnable; | 2186 | OHCI1394_masterIntEnable; |
1897 | if (param_debug & OHCI_PARAM_DEBUG_BUSRESETS) | 2187 | if (param_debug & OHCI_PARAM_DEBUG_BUSRESETS) |
1898 | irqs |= OHCI1394_busReset; | 2188 | irqs |= OHCI1394_busReset; |
@@ -1901,7 +2191,13 @@ static int ohci_enable(struct fw_card *card, | |||
1901 | reg_write(ohci, OHCI1394_HCControlSet, | 2191 | reg_write(ohci, OHCI1394_HCControlSet, |
1902 | OHCI1394_HCControl_linkEnable | | 2192 | OHCI1394_HCControl_linkEnable | |
1903 | OHCI1394_HCControl_BIBimageValid); | 2193 | OHCI1394_HCControl_BIBimageValid); |
1904 | flush_writes(ohci); | 2194 | |
2195 | reg_write(ohci, OHCI1394_LinkControlSet, | ||
2196 | OHCI1394_LinkControl_rcvSelfID | | ||
2197 | OHCI1394_LinkControl_rcvPhyPkt); | ||
2198 | |||
2199 | ar_context_run(&ohci->ar_request_ctx); | ||
2200 | ar_context_run(&ohci->ar_response_ctx); /* also flushes writes */ | ||
1905 | 2201 | ||
1906 | /* We are ready to go, reset bus to finish initialization. */ | 2202 | /* We are ready to go, reset bus to finish initialization. */ |
1907 | fw_schedule_bus_reset(&ohci->card, false, true); | 2203 | fw_schedule_bus_reset(&ohci->card, false, true); |
@@ -1914,7 +2210,6 @@ static int ohci_set_config_rom(struct fw_card *card, | |||
1914 | { | 2210 | { |
1915 | struct fw_ohci *ohci; | 2211 | struct fw_ohci *ohci; |
1916 | unsigned long flags; | 2212 | unsigned long flags; |
1917 | int ret = -EBUSY; | ||
1918 | __be32 *next_config_rom; | 2213 | __be32 *next_config_rom; |
1919 | dma_addr_t uninitialized_var(next_config_rom_bus); | 2214 | dma_addr_t uninitialized_var(next_config_rom_bus); |
1920 | 2215 | ||
@@ -1955,22 +2250,37 @@ static int ohci_set_config_rom(struct fw_card *card, | |||
1955 | 2250 | ||
1956 | spin_lock_irqsave(&ohci->lock, flags); | 2251 | spin_lock_irqsave(&ohci->lock, flags); |
1957 | 2252 | ||
2253 | /* | ||
2254 | * If there is not an already pending config_rom update, | ||
2255 | * push our new allocation into the ohci->next_config_rom | ||
2256 | * and then mark the local variable as null so that we | ||
2257 | * won't deallocate the new buffer. | ||
2258 | * | ||
2259 | * OTOH, if there is a pending config_rom update, just | ||
2260 | * use that buffer with the new config_rom data, and | ||
2261 | * let this routine free the unused DMA allocation. | ||
2262 | */ | ||
2263 | |||
1958 | if (ohci->next_config_rom == NULL) { | 2264 | if (ohci->next_config_rom == NULL) { |
1959 | ohci->next_config_rom = next_config_rom; | 2265 | ohci->next_config_rom = next_config_rom; |
1960 | ohci->next_config_rom_bus = next_config_rom_bus; | 2266 | ohci->next_config_rom_bus = next_config_rom_bus; |
2267 | next_config_rom = NULL; | ||
2268 | } | ||
1961 | 2269 | ||
1962 | copy_config_rom(ohci->next_config_rom, config_rom, length); | 2270 | copy_config_rom(ohci->next_config_rom, config_rom, length); |
1963 | 2271 | ||
1964 | ohci->next_header = config_rom[0]; | 2272 | ohci->next_header = config_rom[0]; |
1965 | ohci->next_config_rom[0] = 0; | 2273 | ohci->next_config_rom[0] = 0; |
1966 | 2274 | ||
1967 | reg_write(ohci, OHCI1394_ConfigROMmap, | 2275 | reg_write(ohci, OHCI1394_ConfigROMmap, ohci->next_config_rom_bus); |
1968 | ohci->next_config_rom_bus); | ||
1969 | ret = 0; | ||
1970 | } | ||
1971 | 2276 | ||
1972 | spin_unlock_irqrestore(&ohci->lock, flags); | 2277 | spin_unlock_irqrestore(&ohci->lock, flags); |
1973 | 2278 | ||
2279 | /* If we didn't use the DMA allocation, delete it. */ | ||
2280 | if (next_config_rom != NULL) | ||
2281 | dma_free_coherent(ohci->card.device, CONFIG_ROM_SIZE, | ||
2282 | next_config_rom, next_config_rom_bus); | ||
2283 | |||
1974 | /* | 2284 | /* |
1975 | * Now initiate a bus reset to have the changes take | 2285 | * Now initiate a bus reset to have the changes take |
1976 | * effect. We clean up the old config rom memory and DMA | 2286 | * effect. We clean up the old config rom memory and DMA |
@@ -1978,13 +2288,10 @@ static int ohci_set_config_rom(struct fw_card *card, | |||
1978 | * controller could need to access it before the bus reset | 2288 | * controller could need to access it before the bus reset |
1979 | * takes effect. | 2289 | * takes effect. |
1980 | */ | 2290 | */ |
1981 | if (ret == 0) | ||
1982 | fw_schedule_bus_reset(&ohci->card, true, true); | ||
1983 | else | ||
1984 | dma_free_coherent(ohci->card.device, CONFIG_ROM_SIZE, | ||
1985 | next_config_rom, next_config_rom_bus); | ||
1986 | 2291 | ||
1987 | return ret; | 2292 | fw_schedule_bus_reset(&ohci->card, true, true); |
2293 | |||
2294 | return 0; | ||
1988 | } | 2295 | } |
1989 | 2296 | ||
1990 | static void ohci_send_request(struct fw_card *card, struct fw_packet *packet) | 2297 | static void ohci_send_request(struct fw_card *card, struct fw_packet *packet) |
@@ -2408,6 +2715,10 @@ static int ohci_start_iso(struct fw_iso_context *base, | |||
2408 | u32 control = IR_CONTEXT_ISOCH_HEADER, match; | 2715 | u32 control = IR_CONTEXT_ISOCH_HEADER, match; |
2409 | int index; | 2716 | int index; |
2410 | 2717 | ||
2718 | /* the controller cannot start without any queued packets */ | ||
2719 | if (ctx->context.last->branch_address == 0) | ||
2720 | return -ENODATA; | ||
2721 | |||
2411 | switch (ctx->base.type) { | 2722 | switch (ctx->base.type) { |
2412 | case FW_ISO_CONTEXT_TRANSMIT: | 2723 | case FW_ISO_CONTEXT_TRANSMIT: |
2413 | index = ctx - ohci->it_context_list; | 2724 | index = ctx - ohci->it_context_list; |
@@ -2436,6 +2747,10 @@ static int ohci_start_iso(struct fw_iso_context *base, | |||
2436 | reg_write(ohci, OHCI1394_IsoRecvIntMaskSet, 1 << index); | 2747 | reg_write(ohci, OHCI1394_IsoRecvIntMaskSet, 1 << index); |
2437 | reg_write(ohci, CONTEXT_MATCH(ctx->context.regs), match); | 2748 | reg_write(ohci, CONTEXT_MATCH(ctx->context.regs), match); |
2438 | context_run(&ctx->context, control); | 2749 | context_run(&ctx->context, control); |
2750 | |||
2751 | ctx->sync = sync; | ||
2752 | ctx->tags = tags; | ||
2753 | |||
2439 | break; | 2754 | break; |
2440 | } | 2755 | } |
2441 | 2756 | ||
@@ -2462,6 +2777,7 @@ static int ohci_stop_iso(struct fw_iso_context *base) | |||
2462 | } | 2777 | } |
2463 | flush_writes(ohci); | 2778 | flush_writes(ohci); |
2464 | context_stop(&ctx->context); | 2779 | context_stop(&ctx->context); |
2780 | tasklet_kill(&ctx->context.tasklet); | ||
2465 | 2781 | ||
2466 | return 0; | 2782 | return 0; |
2467 | } | 2783 | } |
@@ -2533,6 +2849,26 @@ static int ohci_set_iso_channels(struct fw_iso_context *base, u64 *channels) | |||
2533 | return ret; | 2849 | return ret; |
2534 | } | 2850 | } |
2535 | 2851 | ||
2852 | #ifdef CONFIG_PM | ||
2853 | static void ohci_resume_iso_dma(struct fw_ohci *ohci) | ||
2854 | { | ||
2855 | int i; | ||
2856 | struct iso_context *ctx; | ||
2857 | |||
2858 | for (i = 0 ; i < ohci->n_ir ; i++) { | ||
2859 | ctx = &ohci->ir_context_list[i]; | ||
2860 | if (ctx->context.running) | ||
2861 | ohci_start_iso(&ctx->base, 0, ctx->sync, ctx->tags); | ||
2862 | } | ||
2863 | |||
2864 | for (i = 0 ; i < ohci->n_it ; i++) { | ||
2865 | ctx = &ohci->it_context_list[i]; | ||
2866 | if (ctx->context.running) | ||
2867 | ohci_start_iso(&ctx->base, 0, ctx->sync, ctx->tags); | ||
2868 | } | ||
2869 | } | ||
2870 | #endif | ||
2871 | |||
2536 | static int queue_iso_transmit(struct iso_context *ctx, | 2872 | static int queue_iso_transmit(struct iso_context *ctx, |
2537 | struct fw_iso_packet *packet, | 2873 | struct fw_iso_packet *packet, |
2538 | struct fw_iso_buffer *buffer, | 2874 | struct fw_iso_buffer *buffer, |
@@ -2787,6 +3123,15 @@ static int ohci_queue_iso(struct fw_iso_context *base, | |||
2787 | return ret; | 3123 | return ret; |
2788 | } | 3124 | } |
2789 | 3125 | ||
3126 | static void ohci_flush_queue_iso(struct fw_iso_context *base) | ||
3127 | { | ||
3128 | struct context *ctx = | ||
3129 | &container_of(base, struct iso_context, base)->context; | ||
3130 | |||
3131 | reg_write(ctx->ohci, CONTROL_SET(ctx->regs), CONTEXT_WAKE); | ||
3132 | flush_writes(ctx->ohci); | ||
3133 | } | ||
3134 | |||
2790 | static const struct fw_card_driver ohci_driver = { | 3135 | static const struct fw_card_driver ohci_driver = { |
2791 | .enable = ohci_enable, | 3136 | .enable = ohci_enable, |
2792 | .read_phy_reg = ohci_read_phy_reg, | 3137 | .read_phy_reg = ohci_read_phy_reg, |
@@ -2803,6 +3148,7 @@ static const struct fw_card_driver ohci_driver = { | |||
2803 | .free_iso_context = ohci_free_iso_context, | 3148 | .free_iso_context = ohci_free_iso_context, |
2804 | .set_iso_channels = ohci_set_iso_channels, | 3149 | .set_iso_channels = ohci_set_iso_channels, |
2805 | .queue_iso = ohci_queue_iso, | 3150 | .queue_iso = ohci_queue_iso, |
3151 | .flush_queue_iso = ohci_flush_queue_iso, | ||
2806 | .start_iso = ohci_start_iso, | 3152 | .start_iso = ohci_start_iso, |
2807 | .stop_iso = ohci_stop_iso, | 3153 | .stop_iso = ohci_stop_iso, |
2808 | }; | 3154 | }; |
@@ -2842,9 +3188,14 @@ static int __devinit pci_probe(struct pci_dev *dev, | |||
2842 | struct fw_ohci *ohci; | 3188 | struct fw_ohci *ohci; |
2843 | u32 bus_options, max_receive, link_speed, version; | 3189 | u32 bus_options, max_receive, link_speed, version; |
2844 | u64 guid; | 3190 | u64 guid; |
2845 | int i, err, n_ir, n_it; | 3191 | int i, err; |
2846 | size_t size; | 3192 | size_t size; |
2847 | 3193 | ||
3194 | if (dev->vendor == PCI_VENDOR_ID_PINNACLE_SYSTEMS) { | ||
3195 | dev_err(&dev->dev, "Pinnacle MovieBoard is not yet supported\n"); | ||
3196 | return -ENOSYS; | ||
3197 | } | ||
3198 | |||
2848 | ohci = kzalloc(sizeof(*ohci), GFP_KERNEL); | 3199 | ohci = kzalloc(sizeof(*ohci), GFP_KERNEL); |
2849 | if (ohci == NULL) { | 3200 | if (ohci == NULL) { |
2850 | err = -ENOMEM; | 3201 | err = -ENOMEM; |
@@ -2885,40 +3236,68 @@ static int __devinit pci_probe(struct pci_dev *dev, | |||
2885 | } | 3236 | } |
2886 | 3237 | ||
2887 | for (i = 0; i < ARRAY_SIZE(ohci_quirks); i++) | 3238 | for (i = 0; i < ARRAY_SIZE(ohci_quirks); i++) |
2888 | if (ohci_quirks[i].vendor == dev->vendor && | 3239 | if ((ohci_quirks[i].vendor == dev->vendor) && |
2889 | (ohci_quirks[i].device == dev->device || | 3240 | (ohci_quirks[i].device == (unsigned short)PCI_ANY_ID || |
2890 | ohci_quirks[i].device == (unsigned short)PCI_ANY_ID)) { | 3241 | ohci_quirks[i].device == dev->device) && |
3242 | (ohci_quirks[i].revision == (unsigned short)PCI_ANY_ID || | ||
3243 | ohci_quirks[i].revision >= dev->revision)) { | ||
2891 | ohci->quirks = ohci_quirks[i].flags; | 3244 | ohci->quirks = ohci_quirks[i].flags; |
2892 | break; | 3245 | break; |
2893 | } | 3246 | } |
2894 | if (param_quirks) | 3247 | if (param_quirks) |
2895 | ohci->quirks = param_quirks; | 3248 | ohci->quirks = param_quirks; |
2896 | 3249 | ||
2897 | ar_context_init(&ohci->ar_request_ctx, ohci, | 3250 | /* |
2898 | OHCI1394_AsReqRcvContextControlSet); | 3251 | * Because dma_alloc_coherent() allocates at least one page, |
3252 | * we save space by using a common buffer for the AR request/ | ||
3253 | * response descriptors and the self IDs buffer. | ||
3254 | */ | ||
3255 | BUILD_BUG_ON(AR_BUFFERS * sizeof(struct descriptor) > PAGE_SIZE/4); | ||
3256 | BUILD_BUG_ON(SELF_ID_BUF_SIZE > PAGE_SIZE/2); | ||
3257 | ohci->misc_buffer = dma_alloc_coherent(ohci->card.device, | ||
3258 | PAGE_SIZE, | ||
3259 | &ohci->misc_buffer_bus, | ||
3260 | GFP_KERNEL); | ||
3261 | if (!ohci->misc_buffer) { | ||
3262 | err = -ENOMEM; | ||
3263 | goto fail_iounmap; | ||
3264 | } | ||
3265 | |||
3266 | err = ar_context_init(&ohci->ar_request_ctx, ohci, 0, | ||
3267 | OHCI1394_AsReqRcvContextControlSet); | ||
3268 | if (err < 0) | ||
3269 | goto fail_misc_buf; | ||
2899 | 3270 | ||
2900 | ar_context_init(&ohci->ar_response_ctx, ohci, | 3271 | err = ar_context_init(&ohci->ar_response_ctx, ohci, PAGE_SIZE/4, |
2901 | OHCI1394_AsRspRcvContextControlSet); | 3272 | OHCI1394_AsRspRcvContextControlSet); |
3273 | if (err < 0) | ||
3274 | goto fail_arreq_ctx; | ||
2902 | 3275 | ||
2903 | context_init(&ohci->at_request_ctx, ohci, | 3276 | err = context_init(&ohci->at_request_ctx, ohci, |
2904 | OHCI1394_AsReqTrContextControlSet, handle_at_packet); | 3277 | OHCI1394_AsReqTrContextControlSet, handle_at_packet); |
3278 | if (err < 0) | ||
3279 | goto fail_arrsp_ctx; | ||
2905 | 3280 | ||
2906 | context_init(&ohci->at_response_ctx, ohci, | 3281 | err = context_init(&ohci->at_response_ctx, ohci, |
2907 | OHCI1394_AsRspTrContextControlSet, handle_at_packet); | 3282 | OHCI1394_AsRspTrContextControlSet, handle_at_packet); |
3283 | if (err < 0) | ||
3284 | goto fail_atreq_ctx; | ||
2908 | 3285 | ||
2909 | reg_write(ohci, OHCI1394_IsoRecvIntMaskSet, ~0); | 3286 | reg_write(ohci, OHCI1394_IsoRecvIntMaskSet, ~0); |
2910 | ohci->ir_context_channels = ~0ULL; | 3287 | ohci->ir_context_channels = ~0ULL; |
2911 | ohci->ir_context_mask = reg_read(ohci, OHCI1394_IsoRecvIntMaskSet); | 3288 | ohci->ir_context_support = reg_read(ohci, OHCI1394_IsoRecvIntMaskSet); |
2912 | reg_write(ohci, OHCI1394_IsoRecvIntMaskClear, ~0); | 3289 | reg_write(ohci, OHCI1394_IsoRecvIntMaskClear, ~0); |
2913 | n_ir = hweight32(ohci->ir_context_mask); | 3290 | ohci->ir_context_mask = ohci->ir_context_support; |
2914 | size = sizeof(struct iso_context) * n_ir; | 3291 | ohci->n_ir = hweight32(ohci->ir_context_mask); |
3292 | size = sizeof(struct iso_context) * ohci->n_ir; | ||
2915 | ohci->ir_context_list = kzalloc(size, GFP_KERNEL); | 3293 | ohci->ir_context_list = kzalloc(size, GFP_KERNEL); |
2916 | 3294 | ||
2917 | reg_write(ohci, OHCI1394_IsoXmitIntMaskSet, ~0); | 3295 | reg_write(ohci, OHCI1394_IsoXmitIntMaskSet, ~0); |
2918 | ohci->it_context_mask = reg_read(ohci, OHCI1394_IsoXmitIntMaskSet); | 3296 | ohci->it_context_support = reg_read(ohci, OHCI1394_IsoXmitIntMaskSet); |
2919 | reg_write(ohci, OHCI1394_IsoXmitIntMaskClear, ~0); | 3297 | reg_write(ohci, OHCI1394_IsoXmitIntMaskClear, ~0); |
2920 | n_it = hweight32(ohci->it_context_mask); | 3298 | ohci->it_context_mask = ohci->it_context_support; |
2921 | size = sizeof(struct iso_context) * n_it; | 3299 | ohci->n_it = hweight32(ohci->it_context_mask); |
3300 | size = sizeof(struct iso_context) * ohci->n_it; | ||
2922 | ohci->it_context_list = kzalloc(size, GFP_KERNEL); | 3301 | ohci->it_context_list = kzalloc(size, GFP_KERNEL); |
2923 | 3302 | ||
2924 | if (ohci->it_context_list == NULL || ohci->ir_context_list == NULL) { | 3303 | if (ohci->it_context_list == NULL || ohci->ir_context_list == NULL) { |
@@ -2926,15 +3305,8 @@ static int __devinit pci_probe(struct pci_dev *dev, | |||
2926 | goto fail_contexts; | 3305 | goto fail_contexts; |
2927 | } | 3306 | } |
2928 | 3307 | ||
2929 | /* self-id dma buffer allocation */ | 3308 | ohci->self_id_cpu = ohci->misc_buffer + PAGE_SIZE/2; |
2930 | ohci->self_id_cpu = dma_alloc_coherent(ohci->card.device, | 3309 | ohci->self_id_bus = ohci->misc_buffer_bus + PAGE_SIZE/2; |
2931 | SELF_ID_BUF_SIZE, | ||
2932 | &ohci->self_id_bus, | ||
2933 | GFP_KERNEL); | ||
2934 | if (ohci->self_id_cpu == NULL) { | ||
2935 | err = -ENOMEM; | ||
2936 | goto fail_contexts; | ||
2937 | } | ||
2938 | 3310 | ||
2939 | bus_options = reg_read(ohci, OHCI1394_BusOptions); | 3311 | bus_options = reg_read(ohci, OHCI1394_BusOptions); |
2940 | max_receive = (bus_options >> 12) & 0xf; | 3312 | max_receive = (bus_options >> 12) & 0xf; |
@@ -2944,33 +3316,37 @@ static int __devinit pci_probe(struct pci_dev *dev, | |||
2944 | 3316 | ||
2945 | err = fw_card_add(&ohci->card, max_receive, link_speed, guid); | 3317 | err = fw_card_add(&ohci->card, max_receive, link_speed, guid); |
2946 | if (err) | 3318 | if (err) |
2947 | goto fail_self_id; | 3319 | goto fail_contexts; |
2948 | 3320 | ||
2949 | version = reg_read(ohci, OHCI1394_Version) & 0x00ff00ff; | 3321 | version = reg_read(ohci, OHCI1394_Version) & 0x00ff00ff; |
2950 | fw_notify("Added fw-ohci device %s, OHCI v%x.%x, " | 3322 | fw_notify("Added fw-ohci device %s, OHCI v%x.%x, " |
2951 | "%d IR + %d IT contexts, quirks 0x%x\n", | 3323 | "%d IR + %d IT contexts, quirks 0x%x\n", |
2952 | dev_name(&dev->dev), version >> 16, version & 0xff, | 3324 | dev_name(&dev->dev), version >> 16, version & 0xff, |
2953 | n_ir, n_it, ohci->quirks); | 3325 | ohci->n_ir, ohci->n_it, ohci->quirks); |
2954 | 3326 | ||
2955 | return 0; | 3327 | return 0; |
2956 | 3328 | ||
2957 | fail_self_id: | ||
2958 | dma_free_coherent(ohci->card.device, SELF_ID_BUF_SIZE, | ||
2959 | ohci->self_id_cpu, ohci->self_id_bus); | ||
2960 | fail_contexts: | 3329 | fail_contexts: |
2961 | kfree(ohci->ir_context_list); | 3330 | kfree(ohci->ir_context_list); |
2962 | kfree(ohci->it_context_list); | 3331 | kfree(ohci->it_context_list); |
2963 | context_release(&ohci->at_response_ctx); | 3332 | context_release(&ohci->at_response_ctx); |
3333 | fail_atreq_ctx: | ||
2964 | context_release(&ohci->at_request_ctx); | 3334 | context_release(&ohci->at_request_ctx); |
3335 | fail_arrsp_ctx: | ||
2965 | ar_context_release(&ohci->ar_response_ctx); | 3336 | ar_context_release(&ohci->ar_response_ctx); |
3337 | fail_arreq_ctx: | ||
2966 | ar_context_release(&ohci->ar_request_ctx); | 3338 | ar_context_release(&ohci->ar_request_ctx); |
3339 | fail_misc_buf: | ||
3340 | dma_free_coherent(ohci->card.device, PAGE_SIZE, | ||
3341 | ohci->misc_buffer, ohci->misc_buffer_bus); | ||
3342 | fail_iounmap: | ||
2967 | pci_iounmap(dev, ohci->registers); | 3343 | pci_iounmap(dev, ohci->registers); |
2968 | fail_iomem: | 3344 | fail_iomem: |
2969 | pci_release_region(dev, 0); | 3345 | pci_release_region(dev, 0); |
2970 | fail_disable: | 3346 | fail_disable: |
2971 | pci_disable_device(dev); | 3347 | pci_disable_device(dev); |
2972 | fail_free: | 3348 | fail_free: |
2973 | kfree(&ohci->card); | 3349 | kfree(ohci); |
2974 | pmac_ohci_off(dev); | 3350 | pmac_ohci_off(dev); |
2975 | fail: | 3351 | fail: |
2976 | if (err == -ENOMEM) | 3352 | if (err == -ENOMEM) |
@@ -3002,10 +3378,10 @@ static void pci_remove(struct pci_dev *dev) | |||
3002 | if (ohci->config_rom) | 3378 | if (ohci->config_rom) |
3003 | dma_free_coherent(ohci->card.device, CONFIG_ROM_SIZE, | 3379 | dma_free_coherent(ohci->card.device, CONFIG_ROM_SIZE, |
3004 | ohci->config_rom, ohci->config_rom_bus); | 3380 | ohci->config_rom, ohci->config_rom_bus); |
3005 | dma_free_coherent(ohci->card.device, SELF_ID_BUF_SIZE, | ||
3006 | ohci->self_id_cpu, ohci->self_id_bus); | ||
3007 | ar_context_release(&ohci->ar_request_ctx); | 3381 | ar_context_release(&ohci->ar_request_ctx); |
3008 | ar_context_release(&ohci->ar_response_ctx); | 3382 | ar_context_release(&ohci->ar_response_ctx); |
3383 | dma_free_coherent(ohci->card.device, PAGE_SIZE, | ||
3384 | ohci->misc_buffer, ohci->misc_buffer_bus); | ||
3009 | context_release(&ohci->at_request_ctx); | 3385 | context_release(&ohci->at_request_ctx); |
3010 | context_release(&ohci->at_response_ctx); | 3386 | context_release(&ohci->at_response_ctx); |
3011 | kfree(ohci->it_context_list); | 3387 | kfree(ohci->it_context_list); |
@@ -3014,7 +3390,7 @@ static void pci_remove(struct pci_dev *dev) | |||
3014 | pci_iounmap(dev, ohci->registers); | 3390 | pci_iounmap(dev, ohci->registers); |
3015 | pci_release_region(dev, 0); | 3391 | pci_release_region(dev, 0); |
3016 | pci_disable_device(dev); | 3392 | pci_disable_device(dev); |
3017 | kfree(&ohci->card); | 3393 | kfree(ohci); |
3018 | pmac_ohci_off(dev); | 3394 | pmac_ohci_off(dev); |
3019 | 3395 | ||
3020 | fw_notify("Removed fw-ohci device.\n"); | 3396 | fw_notify("Removed fw-ohci device.\n"); |
@@ -3056,7 +3432,20 @@ static int pci_resume(struct pci_dev *dev) | |||
3056 | return err; | 3432 | return err; |
3057 | } | 3433 | } |
3058 | 3434 | ||
3059 | return ohci_enable(&ohci->card, NULL, 0); | 3435 | /* Some systems don't setup GUID register on resume from ram */ |
3436 | if (!reg_read(ohci, OHCI1394_GUIDLo) && | ||
3437 | !reg_read(ohci, OHCI1394_GUIDHi)) { | ||
3438 | reg_write(ohci, OHCI1394_GUIDLo, (u32)ohci->card.guid); | ||
3439 | reg_write(ohci, OHCI1394_GUIDHi, (u32)(ohci->card.guid >> 32)); | ||
3440 | } | ||
3441 | |||
3442 | err = ohci_enable(&ohci->card, NULL, 0); | ||
3443 | if (err) | ||
3444 | return err; | ||
3445 | |||
3446 | ohci_resume_iso_dma(ohci); | ||
3447 | |||
3448 | return 0; | ||
3060 | } | 3449 | } |
3061 | #endif | 3450 | #endif |
3062 | 3451 | ||
diff --git a/drivers/firewire/sbp2.c b/drivers/firewire/sbp2.c index bfae4b309791..41841a3e3f99 100644 --- a/drivers/firewire/sbp2.c +++ b/drivers/firewire/sbp2.c | |||
@@ -125,9 +125,6 @@ MODULE_PARM_DESC(workarounds, "Work around device bugs (default = 0" | |||
125 | ", override internal blacklist = " __stringify(SBP2_WORKAROUND_OVERRIDE) | 125 | ", override internal blacklist = " __stringify(SBP2_WORKAROUND_OVERRIDE) |
126 | ", or a combination)"); | 126 | ", or a combination)"); |
127 | 127 | ||
128 | /* I don't know why the SCSI stack doesn't define something like this... */ | ||
129 | typedef void (*scsi_done_fn_t)(struct scsi_cmnd *); | ||
130 | |||
131 | static const char sbp2_driver_name[] = "sbp2"; | 128 | static const char sbp2_driver_name[] = "sbp2"; |
132 | 129 | ||
133 | /* | 130 | /* |
@@ -261,7 +258,6 @@ struct sbp2_orb { | |||
261 | struct kref kref; | 258 | struct kref kref; |
262 | dma_addr_t request_bus; | 259 | dma_addr_t request_bus; |
263 | int rcode; | 260 | int rcode; |
264 | struct sbp2_pointer pointer; | ||
265 | void (*callback)(struct sbp2_orb * orb, struct sbp2_status * status); | 261 | void (*callback)(struct sbp2_orb * orb, struct sbp2_status * status); |
266 | struct list_head link; | 262 | struct list_head link; |
267 | }; | 263 | }; |
@@ -314,7 +310,6 @@ struct sbp2_command_orb { | |||
314 | u8 command_block[SBP2_MAX_CDB_SIZE]; | 310 | u8 command_block[SBP2_MAX_CDB_SIZE]; |
315 | } request; | 311 | } request; |
316 | struct scsi_cmnd *cmd; | 312 | struct scsi_cmnd *cmd; |
317 | scsi_done_fn_t done; | ||
318 | struct sbp2_logical_unit *lu; | 313 | struct sbp2_logical_unit *lu; |
319 | 314 | ||
320 | struct sbp2_pointer page_table[SG_ALL] __attribute__((aligned(8))); | 315 | struct sbp2_pointer page_table[SG_ALL] __attribute__((aligned(8))); |
@@ -472,18 +467,12 @@ static void complete_transaction(struct fw_card *card, int rcode, | |||
472 | * So this callback only sets the rcode if it hasn't already | 467 | * So this callback only sets the rcode if it hasn't already |
473 | * been set and only does the cleanup if the transaction | 468 | * been set and only does the cleanup if the transaction |
474 | * failed and we didn't already get a status write. | 469 | * failed and we didn't already get a status write. |
475 | * | ||
476 | * Here we treat RCODE_CANCELLED like RCODE_COMPLETE because some | ||
477 | * OXUF936QSE firmwares occasionally respond after Split_Timeout and | ||
478 | * complete the ORB just fine. Note, we also get RCODE_CANCELLED | ||
479 | * from sbp2_cancel_orbs() if fw_cancel_transaction() == 0. | ||
480 | */ | 470 | */ |
481 | spin_lock_irqsave(&card->lock, flags); | 471 | spin_lock_irqsave(&card->lock, flags); |
482 | 472 | ||
483 | if (orb->rcode == -1) | 473 | if (orb->rcode == -1) |
484 | orb->rcode = rcode; | 474 | orb->rcode = rcode; |
485 | 475 | if (orb->rcode != RCODE_COMPLETE) { | |
486 | if (orb->rcode != RCODE_COMPLETE && orb->rcode != RCODE_CANCELLED) { | ||
487 | list_del(&orb->link); | 476 | list_del(&orb->link); |
488 | spin_unlock_irqrestore(&card->lock, flags); | 477 | spin_unlock_irqrestore(&card->lock, flags); |
489 | 478 | ||
@@ -500,10 +489,11 @@ static void sbp2_send_orb(struct sbp2_orb *orb, struct sbp2_logical_unit *lu, | |||
500 | int node_id, int generation, u64 offset) | 489 | int node_id, int generation, u64 offset) |
501 | { | 490 | { |
502 | struct fw_device *device = target_device(lu->tgt); | 491 | struct fw_device *device = target_device(lu->tgt); |
492 | struct sbp2_pointer orb_pointer; | ||
503 | unsigned long flags; | 493 | unsigned long flags; |
504 | 494 | ||
505 | orb->pointer.high = 0; | 495 | orb_pointer.high = 0; |
506 | orb->pointer.low = cpu_to_be32(orb->request_bus); | 496 | orb_pointer.low = cpu_to_be32(orb->request_bus); |
507 | 497 | ||
508 | spin_lock_irqsave(&device->card->lock, flags); | 498 | spin_lock_irqsave(&device->card->lock, flags); |
509 | list_add_tail(&orb->link, &lu->orb_list); | 499 | list_add_tail(&orb->link, &lu->orb_list); |
@@ -514,7 +504,7 @@ static void sbp2_send_orb(struct sbp2_orb *orb, struct sbp2_logical_unit *lu, | |||
514 | 504 | ||
515 | fw_send_request(device->card, &orb->t, TCODE_WRITE_BLOCK_REQUEST, | 505 | fw_send_request(device->card, &orb->t, TCODE_WRITE_BLOCK_REQUEST, |
516 | node_id, generation, device->max_speed, offset, | 506 | node_id, generation, device->max_speed, offset, |
517 | &orb->pointer, 8, complete_transaction, orb); | 507 | &orb_pointer, 8, complete_transaction, orb); |
518 | } | 508 | } |
519 | 509 | ||
520 | static int sbp2_cancel_orbs(struct sbp2_logical_unit *lu) | 510 | static int sbp2_cancel_orbs(struct sbp2_logical_unit *lu) |
@@ -532,7 +522,8 @@ static int sbp2_cancel_orbs(struct sbp2_logical_unit *lu) | |||
532 | 522 | ||
533 | list_for_each_entry_safe(orb, next, &list, link) { | 523 | list_for_each_entry_safe(orb, next, &list, link) { |
534 | retval = 0; | 524 | retval = 0; |
535 | fw_cancel_transaction(device->card, &orb->t); | 525 | if (fw_cancel_transaction(device->card, &orb->t) == 0) |
526 | continue; | ||
536 | 527 | ||
537 | orb->rcode = RCODE_CANCELLED; | 528 | orb->rcode = RCODE_CANCELLED; |
538 | orb->callback(orb, NULL); | 529 | orb->callback(orb, NULL); |
@@ -835,8 +826,6 @@ static void sbp2_target_put(struct sbp2_target *tgt) | |||
835 | kref_put(&tgt->kref, sbp2_release_target); | 826 | kref_put(&tgt->kref, sbp2_release_target); |
836 | } | 827 | } |
837 | 828 | ||
838 | static struct workqueue_struct *sbp2_wq; | ||
839 | |||
840 | /* | 829 | /* |
841 | * Always get the target's kref when scheduling work on one its units. | 830 | * Always get the target's kref when scheduling work on one its units. |
842 | * Each workqueue job is responsible to call sbp2_target_put() upon return. | 831 | * Each workqueue job is responsible to call sbp2_target_put() upon return. |
@@ -844,7 +833,7 @@ static struct workqueue_struct *sbp2_wq; | |||
844 | static void sbp2_queue_work(struct sbp2_logical_unit *lu, unsigned long delay) | 833 | static void sbp2_queue_work(struct sbp2_logical_unit *lu, unsigned long delay) |
845 | { | 834 | { |
846 | sbp2_target_get(lu->tgt); | 835 | sbp2_target_get(lu->tgt); |
847 | if (!queue_delayed_work(sbp2_wq, &lu->work, delay)) | 836 | if (!queue_delayed_work(fw_workqueue, &lu->work, delay)) |
848 | sbp2_target_put(lu->tgt); | 837 | sbp2_target_put(lu->tgt); |
849 | } | 838 | } |
850 | 839 | ||
@@ -1403,7 +1392,7 @@ static void complete_command_orb(struct sbp2_orb *base_orb, | |||
1403 | sbp2_unmap_scatterlist(device->card->device, orb); | 1392 | sbp2_unmap_scatterlist(device->card->device, orb); |
1404 | 1393 | ||
1405 | orb->cmd->result = result; | 1394 | orb->cmd->result = result; |
1406 | orb->done(orb->cmd); | 1395 | orb->cmd->scsi_done(orb->cmd); |
1407 | } | 1396 | } |
1408 | 1397 | ||
1409 | static int sbp2_map_scatterlist(struct sbp2_command_orb *orb, | 1398 | static int sbp2_map_scatterlist(struct sbp2_command_orb *orb, |
@@ -1468,7 +1457,8 @@ static int sbp2_map_scatterlist(struct sbp2_command_orb *orb, | |||
1468 | 1457 | ||
1469 | /* SCSI stack integration */ | 1458 | /* SCSI stack integration */ |
1470 | 1459 | ||
1471 | static int sbp2_scsi_queuecommand(struct scsi_cmnd *cmd, scsi_done_fn_t done) | 1460 | static int sbp2_scsi_queuecommand(struct Scsi_Host *shost, |
1461 | struct scsi_cmnd *cmd) | ||
1472 | { | 1462 | { |
1473 | struct sbp2_logical_unit *lu = cmd->device->hostdata; | 1463 | struct sbp2_logical_unit *lu = cmd->device->hostdata; |
1474 | struct fw_device *device = target_device(lu->tgt); | 1464 | struct fw_device *device = target_device(lu->tgt); |
@@ -1482,7 +1472,7 @@ static int sbp2_scsi_queuecommand(struct scsi_cmnd *cmd, scsi_done_fn_t done) | |||
1482 | if (cmd->sc_data_direction == DMA_BIDIRECTIONAL) { | 1472 | if (cmd->sc_data_direction == DMA_BIDIRECTIONAL) { |
1483 | fw_error("Can't handle DMA_BIDIRECTIONAL, rejecting command\n"); | 1473 | fw_error("Can't handle DMA_BIDIRECTIONAL, rejecting command\n"); |
1484 | cmd->result = DID_ERROR << 16; | 1474 | cmd->result = DID_ERROR << 16; |
1485 | done(cmd); | 1475 | cmd->scsi_done(cmd); |
1486 | return 0; | 1476 | return 0; |
1487 | } | 1477 | } |
1488 | 1478 | ||
@@ -1495,11 +1485,8 @@ static int sbp2_scsi_queuecommand(struct scsi_cmnd *cmd, scsi_done_fn_t done) | |||
1495 | /* Initialize rcode to something not RCODE_COMPLETE. */ | 1485 | /* Initialize rcode to something not RCODE_COMPLETE. */ |
1496 | orb->base.rcode = -1; | 1486 | orb->base.rcode = -1; |
1497 | kref_init(&orb->base.kref); | 1487 | kref_init(&orb->base.kref); |
1498 | 1488 | orb->lu = lu; | |
1499 | orb->lu = lu; | 1489 | orb->cmd = cmd; |
1500 | orb->done = done; | ||
1501 | orb->cmd = cmd; | ||
1502 | |||
1503 | orb->request.next.high = cpu_to_be32(SBP2_ORB_NULL); | 1490 | orb->request.next.high = cpu_to_be32(SBP2_ORB_NULL); |
1504 | orb->request.misc = cpu_to_be32( | 1491 | orb->request.misc = cpu_to_be32( |
1505 | COMMAND_ORB_MAX_PAYLOAD(lu->tgt->max_payload) | | 1492 | COMMAND_ORB_MAX_PAYLOAD(lu->tgt->max_payload) | |
@@ -1656,17 +1643,12 @@ MODULE_ALIAS("sbp2"); | |||
1656 | 1643 | ||
1657 | static int __init sbp2_init(void) | 1644 | static int __init sbp2_init(void) |
1658 | { | 1645 | { |
1659 | sbp2_wq = create_singlethread_workqueue(KBUILD_MODNAME); | ||
1660 | if (!sbp2_wq) | ||
1661 | return -ENOMEM; | ||
1662 | |||
1663 | return driver_register(&sbp2_driver.driver); | 1646 | return driver_register(&sbp2_driver.driver); |
1664 | } | 1647 | } |
1665 | 1648 | ||
1666 | static void __exit sbp2_cleanup(void) | 1649 | static void __exit sbp2_cleanup(void) |
1667 | { | 1650 | { |
1668 | driver_unregister(&sbp2_driver.driver); | 1651 | driver_unregister(&sbp2_driver.driver); |
1669 | destroy_workqueue(sbp2_wq); | ||
1670 | } | 1652 | } |
1671 | 1653 | ||
1672 | module_init(sbp2_init); | 1654 | module_init(sbp2_init); |