aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/firewire
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/firewire')
-rw-r--r--drivers/firewire/Kconfig3
-rw-r--r--drivers/firewire/core-card.c21
-rw-r--r--drivers/firewire/core-cdev.c54
-rw-r--r--drivers/firewire/core-device.c22
-rw-r--r--drivers/firewire/core-iso.c23
-rw-r--r--drivers/firewire/core-topology.c2
-rw-r--r--drivers/firewire/core.h3
-rw-r--r--drivers/firewire/ohci.c85
-rw-r--r--drivers/firewire/sbp2.c11
9 files changed, 140 insertions, 84 deletions
diff --git a/drivers/firewire/Kconfig b/drivers/firewire/Kconfig
index 0c56989cd907..2be6f4520772 100644
--- a/drivers/firewire/Kconfig
+++ b/drivers/firewire/Kconfig
@@ -75,7 +75,8 @@ config FIREWIRE_NOSY
75 The following cards are known to be based on PCILynx or PCILynx-2: 75 The following cards are known to be based on PCILynx or PCILynx-2:
76 IOI IOI-1394TT (PCI card), Unibrain Fireboard 400 PCI Lynx-2 76 IOI IOI-1394TT (PCI card), Unibrain Fireboard 400 PCI Lynx-2
77 (PCI card), Newer Technology FireWire 2 Go (CardBus card), 77 (PCI card), Newer Technology FireWire 2 Go (CardBus card),
78 Apple Power Mac G3 blue & white (onboard controller). 78 Apple Power Mac G3 blue & white and G4 with PCI graphics
79 (onboard controller).
79 80
80 To compile this driver as a module, say M here: The module will be 81 To compile this driver as a module, say M here: The module will be
81 called nosy. Source code of a userspace interface to nosy, called 82 called nosy. Source code of a userspace interface to nosy, called
diff --git a/drivers/firewire/core-card.c b/drivers/firewire/core-card.c
index 24ff35511e2b..3c44fbc81acb 100644
--- a/drivers/firewire/core-card.c
+++ b/drivers/firewire/core-card.c
@@ -75,6 +75,13 @@ static size_t config_rom_length = 1 + 4 + 1 + 1;
75#define BIB_IRMC ((1) << 31) 75#define BIB_IRMC ((1) << 31)
76#define NODE_CAPABILITIES 0x0c0083c0 /* per IEEE 1394 clause 8.3.2.6.5.2 */ 76#define NODE_CAPABILITIES 0x0c0083c0 /* per IEEE 1394 clause 8.3.2.6.5.2 */
77 77
78/*
79 * IEEE-1394 specifies a default SPLIT_TIMEOUT value of 800 cycles (100 ms),
80 * but we have to make it longer because there are many devices whose firmware
81 * is just too slow for that.
82 */
83#define DEFAULT_SPLIT_TIMEOUT (2 * 8000)
84
78#define CANON_OUI 0x000085 85#define CANON_OUI 0x000085
79 86
80static void generate_config_rom(struct fw_card *card, __be32 *config_rom) 87static void generate_config_rom(struct fw_card *card, __be32 *config_rom)
@@ -233,7 +240,7 @@ static void br_work(struct work_struct *work)
233 240
234 /* Delay for 2s after last reset per IEEE 1394 clause 8.2.1. */ 241 /* Delay for 2s after last reset per IEEE 1394 clause 8.2.1. */
235 if (card->reset_jiffies != 0 && 242 if (card->reset_jiffies != 0 &&
236 time_is_after_jiffies(card->reset_jiffies + 2 * HZ)) { 243 time_before64(get_jiffies_64(), card->reset_jiffies + 2 * HZ)) {
237 if (!schedule_delayed_work(&card->br_work, 2 * HZ)) 244 if (!schedule_delayed_work(&card->br_work, 2 * HZ))
238 fw_card_put(card); 245 fw_card_put(card);
239 return; 246 return;
@@ -316,7 +323,8 @@ static void bm_work(struct work_struct *work)
316 irm_id = card->irm_node->node_id; 323 irm_id = card->irm_node->node_id;
317 local_id = card->local_node->node_id; 324 local_id = card->local_node->node_id;
318 325
319 grace = time_after(jiffies, card->reset_jiffies + DIV_ROUND_UP(HZ, 8)); 326 grace = time_after64(get_jiffies_64(),
327 card->reset_jiffies + DIV_ROUND_UP(HZ, 8));
320 328
321 if ((is_next_generation(generation, card->bm_generation) && 329 if ((is_next_generation(generation, card->bm_generation) &&
322 !card->bm_abdicate) || 330 !card->bm_abdicate) ||
@@ -511,10 +519,11 @@ void fw_card_initialize(struct fw_card *card,
511 card->device = device; 519 card->device = device;
512 card->current_tlabel = 0; 520 card->current_tlabel = 0;
513 card->tlabel_mask = 0; 521 card->tlabel_mask = 0;
514 card->split_timeout_hi = 0; 522 card->split_timeout_hi = DEFAULT_SPLIT_TIMEOUT / 8000;
515 card->split_timeout_lo = 800 << 19; 523 card->split_timeout_lo = (DEFAULT_SPLIT_TIMEOUT % 8000) << 19;
516 card->split_timeout_cycles = 800; 524 card->split_timeout_cycles = DEFAULT_SPLIT_TIMEOUT;
517 card->split_timeout_jiffies = DIV_ROUND_UP(HZ, 10); 525 card->split_timeout_jiffies =
526 DIV_ROUND_UP(DEFAULT_SPLIT_TIMEOUT * HZ, 8000);
518 card->color = 0; 527 card->color = 0;
519 card->broadcast_channel = BROADCAST_CHANNEL_INITIAL; 528 card->broadcast_channel = BROADCAST_CHANNEL_INITIAL;
520 529
diff --git a/drivers/firewire/core-cdev.c b/drivers/firewire/core-cdev.c
index 48ae712e2101..62ac111af243 100644
--- a/drivers/firewire/core-cdev.c
+++ b/drivers/firewire/core-cdev.c
@@ -64,6 +64,7 @@ struct client {
64 struct idr resource_idr; 64 struct idr resource_idr;
65 struct list_head event_list; 65 struct list_head event_list;
66 wait_queue_head_t wait; 66 wait_queue_head_t wait;
67 wait_queue_head_t tx_flush_wait;
67 u64 bus_reset_closure; 68 u64 bus_reset_closure;
68 69
69 struct fw_iso_context *iso_context; 70 struct fw_iso_context *iso_context;
@@ -251,6 +252,7 @@ static int fw_device_op_open(struct inode *inode, struct file *file)
251 idr_init(&client->resource_idr); 252 idr_init(&client->resource_idr);
252 INIT_LIST_HEAD(&client->event_list); 253 INIT_LIST_HEAD(&client->event_list);
253 init_waitqueue_head(&client->wait); 254 init_waitqueue_head(&client->wait);
255 init_waitqueue_head(&client->tx_flush_wait);
254 INIT_LIST_HEAD(&client->phy_receiver_link); 256 INIT_LIST_HEAD(&client->phy_receiver_link);
255 kref_init(&client->kref); 257 kref_init(&client->kref);
256 258
@@ -520,10 +522,6 @@ static int release_client_resource(struct client *client, u32 handle,
520static void release_transaction(struct client *client, 522static void release_transaction(struct client *client,
521 struct client_resource *resource) 523 struct client_resource *resource)
522{ 524{
523 struct outbound_transaction_resource *r = container_of(resource,
524 struct outbound_transaction_resource, resource);
525
526 fw_cancel_transaction(client->device->card, &r->transaction);
527} 525}
528 526
529static void complete_transaction(struct fw_card *card, int rcode, 527static void complete_transaction(struct fw_card *card, int rcode,
@@ -540,22 +538,9 @@ static void complete_transaction(struct fw_card *card, int rcode,
540 memcpy(rsp->data, payload, rsp->length); 538 memcpy(rsp->data, payload, rsp->length);
541 539
542 spin_lock_irqsave(&client->lock, flags); 540 spin_lock_irqsave(&client->lock, flags);
543 /* 541 idr_remove(&client->resource_idr, e->r.resource.handle);
544 * 1. If called while in shutdown, the idr tree must be left untouched. 542 if (client->in_shutdown)
545 * The idr handle will be removed and the client reference will be 543 wake_up(&client->tx_flush_wait);
546 * dropped later.
547 * 2. If the call chain was release_client_resource ->
548 * release_transaction -> complete_transaction (instead of a normal
549 * conclusion of the transaction), i.e. if this resource was already
550 * unregistered from the idr, the client reference will be dropped
551 * by release_client_resource and we must not drop it here.
552 */
553 if (!client->in_shutdown &&
554 idr_find(&client->resource_idr, e->r.resource.handle)) {
555 idr_remove(&client->resource_idr, e->r.resource.handle);
556 /* Drop the idr's reference */
557 client_put(client);
558 }
559 spin_unlock_irqrestore(&client->lock, flags); 544 spin_unlock_irqrestore(&client->lock, flags);
560 545
561 rsp->type = FW_CDEV_EVENT_RESPONSE; 546 rsp->type = FW_CDEV_EVENT_RESPONSE;
@@ -575,7 +560,7 @@ static void complete_transaction(struct fw_card *card, int rcode,
575 queue_event(client, &e->event, rsp, sizeof(*rsp) + rsp->length, 560 queue_event(client, &e->event, rsp, sizeof(*rsp) + rsp->length,
576 NULL, 0); 561 NULL, 0);
577 562
578 /* Drop the transaction callback's reference */ 563 /* Drop the idr's reference */
579 client_put(client); 564 client_put(client);
580} 565}
581 566
@@ -614,9 +599,6 @@ static int init_request(struct client *client,
614 if (ret < 0) 599 if (ret < 0)
615 goto failed; 600 goto failed;
616 601
617 /* Get a reference for the transaction callback */
618 client_get(client);
619
620 fw_send_request(client->device->card, &e->r.transaction, 602 fw_send_request(client->device->card, &e->r.transaction,
621 request->tcode, destination_id, request->generation, 603 request->tcode, destination_id, request->generation,
622 speed, request->offset, e->response.data, 604 speed, request->offset, e->response.data,
@@ -1223,7 +1205,8 @@ static void iso_resource_work(struct work_struct *work)
1223 todo = r->todo; 1205 todo = r->todo;
1224 /* Allow 1000ms grace period for other reallocations. */ 1206 /* Allow 1000ms grace period for other reallocations. */
1225 if (todo == ISO_RES_ALLOC && 1207 if (todo == ISO_RES_ALLOC &&
1226 time_is_after_jiffies(client->device->card->reset_jiffies + HZ)) { 1208 time_before64(get_jiffies_64(),
1209 client->device->card->reset_jiffies + HZ)) {
1227 schedule_iso_resource(r, DIV_ROUND_UP(HZ, 3)); 1210 schedule_iso_resource(r, DIV_ROUND_UP(HZ, 3));
1228 skip = true; 1211 skip = true;
1229 } else { 1212 } else {
@@ -1678,6 +1661,25 @@ static int fw_device_op_mmap(struct file *file, struct vm_area_struct *vma)
1678 return ret; 1661 return ret;
1679} 1662}
1680 1663
1664static int is_outbound_transaction_resource(int id, void *p, void *data)
1665{
1666 struct client_resource *resource = p;
1667
1668 return resource->release == release_transaction;
1669}
1670
1671static int has_outbound_transactions(struct client *client)
1672{
1673 int ret;
1674
1675 spin_lock_irq(&client->lock);
1676 ret = idr_for_each(&client->resource_idr,
1677 is_outbound_transaction_resource, NULL);
1678 spin_unlock_irq(&client->lock);
1679
1680 return ret;
1681}
1682
1681static int shutdown_resource(int id, void *p, void *data) 1683static int shutdown_resource(int id, void *p, void *data)
1682{ 1684{
1683 struct client_resource *resource = p; 1685 struct client_resource *resource = p;
@@ -1713,6 +1715,8 @@ static int fw_device_op_release(struct inode *inode, struct file *file)
1713 client->in_shutdown = true; 1715 client->in_shutdown = true;
1714 spin_unlock_irq(&client->lock); 1716 spin_unlock_irq(&client->lock);
1715 1717
1718 wait_event(client->tx_flush_wait, !has_outbound_transactions(client));
1719
1716 idr_for_each(&client->resource_idr, shutdown_resource, client); 1720 idr_for_each(&client->resource_idr, shutdown_resource, client);
1717 idr_remove_all(&client->resource_idr); 1721 idr_remove_all(&client->resource_idr);
1718 idr_destroy(&client->resource_idr); 1722 idr_destroy(&client->resource_idr);
diff --git a/drivers/firewire/core-device.c b/drivers/firewire/core-device.c
index 6113b896e790..9a262439e3a7 100644
--- a/drivers/firewire/core-device.c
+++ b/drivers/firewire/core-device.c
@@ -747,7 +747,8 @@ static void fw_device_shutdown(struct work_struct *work)
747 container_of(work, struct fw_device, work.work); 747 container_of(work, struct fw_device, work.work);
748 int minor = MINOR(device->device.devt); 748 int minor = MINOR(device->device.devt);
749 749
750 if (time_is_after_jiffies(device->card->reset_jiffies + SHUTDOWN_DELAY) 750 if (time_before64(get_jiffies_64(),
751 device->card->reset_jiffies + SHUTDOWN_DELAY)
751 && !list_empty(&device->card->link)) { 752 && !list_empty(&device->card->link)) {
752 schedule_delayed_work(&device->work, SHUTDOWN_DELAY); 753 schedule_delayed_work(&device->work, SHUTDOWN_DELAY);
753 return; 754 return;
@@ -954,8 +955,9 @@ static void fw_device_init(struct work_struct *work)
954 device->config_rom_retries++; 955 device->config_rom_retries++;
955 schedule_delayed_work(&device->work, RETRY_DELAY); 956 schedule_delayed_work(&device->work, RETRY_DELAY);
956 } else { 957 } else {
957 fw_notify("giving up on config rom for node id %x\n", 958 if (device->node->link_on)
958 device->node_id); 959 fw_notify("giving up on config rom for node id %x\n",
960 device->node_id);
959 if (device->node == device->card->root_node) 961 if (device->node == device->card->root_node)
960 fw_schedule_bm_work(device->card, 0); 962 fw_schedule_bm_work(device->card, 0);
961 fw_device_release(&device->device); 963 fw_device_release(&device->device);
@@ -1168,9 +1170,12 @@ void fw_node_event(struct fw_card *card, struct fw_node *node, int event)
1168 1170
1169 switch (event) { 1171 switch (event) {
1170 case FW_NODE_CREATED: 1172 case FW_NODE_CREATED:
1171 case FW_NODE_LINK_ON: 1173 /*
1172 if (!node->link_on) 1174 * Attempt to scan the node, regardless whether its self ID has
1173 break; 1175 * the L (link active) flag set or not. Some broken devices
1176 * send L=0 but have an up-and-running link; others send L=1
1177 * without actually having a link.
1178 */
1174 create: 1179 create:
1175 device = kzalloc(sizeof(*device), GFP_ATOMIC); 1180 device = kzalloc(sizeof(*device), GFP_ATOMIC);
1176 if (device == NULL) 1181 if (device == NULL)
@@ -1213,6 +1218,7 @@ void fw_node_event(struct fw_card *card, struct fw_node *node, int event)
1213 break; 1218 break;
1214 1219
1215 case FW_NODE_INITIATED_RESET: 1220 case FW_NODE_INITIATED_RESET:
1221 case FW_NODE_LINK_ON:
1216 device = node->data; 1222 device = node->data;
1217 if (device == NULL) 1223 if (device == NULL)
1218 goto create; 1224 goto create;
@@ -1230,10 +1236,10 @@ void fw_node_event(struct fw_card *card, struct fw_node *node, int event)
1230 break; 1236 break;
1231 1237
1232 case FW_NODE_UPDATED: 1238 case FW_NODE_UPDATED:
1233 if (!node->link_on || node->data == NULL) 1239 device = node->data;
1240 if (device == NULL)
1234 break; 1241 break;
1235 1242
1236 device = node->data;
1237 device->node_id = node->node_id; 1243 device->node_id = node->node_id;
1238 smp_wmb(); /* update node_id before generation */ 1244 smp_wmb(); /* update node_id before generation */
1239 device->generation = card->generation; 1245 device->generation = card->generation;
diff --git a/drivers/firewire/core-iso.c b/drivers/firewire/core-iso.c
index c003fa4e2db1..481056df9268 100644
--- a/drivers/firewire/core-iso.c
+++ b/drivers/firewire/core-iso.c
@@ -235,45 +235,45 @@ static int manage_bandwidth(struct fw_card *card, int irm_id, int generation,
235static int manage_channel(struct fw_card *card, int irm_id, int generation, 235static int manage_channel(struct fw_card *card, int irm_id, int generation,
236 u32 channels_mask, u64 offset, bool allocate, __be32 data[2]) 236 u32 channels_mask, u64 offset, bool allocate, __be32 data[2])
237{ 237{
238 __be32 c, all, old; 238 __be32 bit, all, old;
239 int i, ret = -EIO, retry = 5; 239 int channel, ret = -EIO, retry = 5;
240 240
241 old = all = allocate ? cpu_to_be32(~0) : 0; 241 old = all = allocate ? cpu_to_be32(~0) : 0;
242 242
243 for (i = 0; i < 32; i++) { 243 for (channel = 0; channel < 32; channel++) {
244 if (!(channels_mask & 1 << i)) 244 if (!(channels_mask & 1 << channel))
245 continue; 245 continue;
246 246
247 ret = -EBUSY; 247 ret = -EBUSY;
248 248
249 c = cpu_to_be32(1 << (31 - i)); 249 bit = cpu_to_be32(1 << (31 - channel));
250 if ((old & c) != (all & c)) 250 if ((old & bit) != (all & bit))
251 continue; 251 continue;
252 252
253 data[0] = old; 253 data[0] = old;
254 data[1] = old ^ c; 254 data[1] = old ^ bit;
255 switch (fw_run_transaction(card, TCODE_LOCK_COMPARE_SWAP, 255 switch (fw_run_transaction(card, TCODE_LOCK_COMPARE_SWAP,
256 irm_id, generation, SCODE_100, 256 irm_id, generation, SCODE_100,
257 offset, data, 8)) { 257 offset, data, 8)) {
258 case RCODE_GENERATION: 258 case RCODE_GENERATION:
259 /* A generation change frees all channels. */ 259 /* A generation change frees all channels. */
260 return allocate ? -EAGAIN : i; 260 return allocate ? -EAGAIN : channel;
261 261
262 case RCODE_COMPLETE: 262 case RCODE_COMPLETE:
263 if (data[0] == old) 263 if (data[0] == old)
264 return i; 264 return channel;
265 265
266 old = data[0]; 266 old = data[0];
267 267
268 /* Is the IRM 1394a-2000 compliant? */ 268 /* Is the IRM 1394a-2000 compliant? */
269 if ((data[0] & c) == (data[1] & c)) 269 if ((data[0] & bit) == (data[1] & bit))
270 continue; 270 continue;
271 271
272 /* 1394-1995 IRM, fall through to retry. */ 272 /* 1394-1995 IRM, fall through to retry. */
273 default: 273 default:
274 if (retry) { 274 if (retry) {
275 retry--; 275 retry--;
276 i--; 276 channel--;
277 } else { 277 } else {
278 ret = -EIO; 278 ret = -EIO;
279 } 279 }
@@ -362,3 +362,4 @@ void fw_iso_resource_manage(struct fw_card *card, int generation,
362 *channel = ret; 362 *channel = ret;
363 } 363 }
364} 364}
365EXPORT_SYMBOL(fw_iso_resource_manage);
diff --git a/drivers/firewire/core-topology.c b/drivers/firewire/core-topology.c
index 09be1a635505..193ed9233144 100644
--- a/drivers/firewire/core-topology.c
+++ b/drivers/firewire/core-topology.c
@@ -545,7 +545,7 @@ void fw_core_handle_bus_reset(struct fw_card *card, int node_id, int generation,
545 */ 545 */
546 smp_wmb(); 546 smp_wmb();
547 card->generation = generation; 547 card->generation = generation;
548 card->reset_jiffies = jiffies; 548 card->reset_jiffies = get_jiffies_64();
549 card->bm_node_id = 0xffff; 549 card->bm_node_id = 0xffff;
550 card->bm_abdicate = bm_abdicate; 550 card->bm_abdicate = bm_abdicate;
551 fw_schedule_bm_work(card, 0); 551 fw_schedule_bm_work(card, 0);
diff --git a/drivers/firewire/core.h b/drivers/firewire/core.h
index f8dfcf1c6cbe..25e729cde2f7 100644
--- a/drivers/firewire/core.h
+++ b/drivers/firewire/core.h
@@ -147,9 +147,6 @@ void fw_node_event(struct fw_card *card, struct fw_node *node, int event);
147/* -iso */ 147/* -iso */
148 148
149int fw_iso_buffer_map(struct fw_iso_buffer *buffer, struct vm_area_struct *vma); 149int fw_iso_buffer_map(struct fw_iso_buffer *buffer, struct vm_area_struct *vma);
150void fw_iso_resource_manage(struct fw_card *card, int generation,
151 u64 channels_mask, int *channel, int *bandwidth,
152 bool allocate, __be32 buffer[2]);
153 150
154 151
155/* -topology */ 152/* -topology */
diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c
index bd3c61b6dd8d..f903d7b6f34a 100644
--- a/drivers/firewire/ohci.c
+++ b/drivers/firewire/ohci.c
@@ -208,9 +208,11 @@ struct fw_ohci {
208 struct context at_request_ctx; 208 struct context at_request_ctx;
209 struct context at_response_ctx; 209 struct context at_response_ctx;
210 210
211 u32 it_context_support;
211 u32 it_context_mask; /* unoccupied IT contexts */ 212 u32 it_context_mask; /* unoccupied IT contexts */
212 struct iso_context *it_context_list; 213 struct iso_context *it_context_list;
213 u64 ir_context_channels; /* unoccupied channels */ 214 u64 ir_context_channels; /* unoccupied channels */
215 u32 ir_context_support;
214 u32 ir_context_mask; /* unoccupied IR contexts */ 216 u32 ir_context_mask; /* unoccupied IR contexts */
215 struct iso_context *ir_context_list; 217 struct iso_context *ir_context_list;
216 u64 mc_channels; /* channels in use by the multichannel IR context */ 218 u64 mc_channels; /* channels in use by the multichannel IR context */
@@ -338,7 +340,7 @@ static void log_irqs(u32 evt)
338 !(evt & OHCI1394_busReset)) 340 !(evt & OHCI1394_busReset))
339 return; 341 return;
340 342
341 fw_notify("IRQ %08x%s%s%s%s%s%s%s%s%s%s%s%s%s%s\n", evt, 343 fw_notify("IRQ %08x%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s\n", evt,
342 evt & OHCI1394_selfIDComplete ? " selfID" : "", 344 evt & OHCI1394_selfIDComplete ? " selfID" : "",
343 evt & OHCI1394_RQPkt ? " AR_req" : "", 345 evt & OHCI1394_RQPkt ? " AR_req" : "",
344 evt & OHCI1394_RSPkt ? " AR_resp" : "", 346 evt & OHCI1394_RSPkt ? " AR_resp" : "",
@@ -351,6 +353,7 @@ static void log_irqs(u32 evt)
351 evt & OHCI1394_cycle64Seconds ? " cycle64Seconds" : "", 353 evt & OHCI1394_cycle64Seconds ? " cycle64Seconds" : "",
352 evt & OHCI1394_cycleInconsistent ? " cycleInconsistent" : "", 354 evt & OHCI1394_cycleInconsistent ? " cycleInconsistent" : "",
353 evt & OHCI1394_regAccessFail ? " regAccessFail" : "", 355 evt & OHCI1394_regAccessFail ? " regAccessFail" : "",
356 evt & OHCI1394_unrecoverableError ? " unrecoverableError" : "",
354 evt & OHCI1394_busReset ? " busReset" : "", 357 evt & OHCI1394_busReset ? " busReset" : "",
355 evt & ~(OHCI1394_selfIDComplete | OHCI1394_RQPkt | 358 evt & ~(OHCI1394_selfIDComplete | OHCI1394_RQPkt |
356 OHCI1394_RSPkt | OHCI1394_reqTxComplete | 359 OHCI1394_RSPkt | OHCI1394_reqTxComplete |
@@ -1326,21 +1329,8 @@ static int at_context_queue_packet(struct context *ctx,
1326 DESCRIPTOR_IRQ_ALWAYS | 1329 DESCRIPTOR_IRQ_ALWAYS |
1327 DESCRIPTOR_BRANCH_ALWAYS); 1330 DESCRIPTOR_BRANCH_ALWAYS);
1328 1331
1329 /* 1332 /* FIXME: Document how the locking works. */
1330 * If the controller and packet generations don't match, we need to 1333 if (ohci->generation != packet->generation) {
1331 * bail out and try again. If IntEvent.busReset is set, the AT context
1332 * is halted, so appending to the context and trying to run it is
1333 * futile. Most controllers do the right thing and just flush the AT
1334 * queue (per section 7.2.3.2 of the OHCI 1.1 specification), but
1335 * some controllers (like a JMicron JMB381 PCI-e) misbehave and wind
1336 * up stalling out. So we just bail out in software and try again
1337 * later, and everyone is happy.
1338 * FIXME: Test of IntEvent.busReset may no longer be necessary since we
1339 * flush AT queues in bus_reset_tasklet.
1340 * FIXME: Document how the locking works.
1341 */
1342 if (ohci->generation != packet->generation ||
1343 reg_read(ohci, OHCI1394_IntEventSet) & OHCI1394_busReset) {
1344 if (packet->payload_mapped) 1334 if (packet->payload_mapped)
1345 dma_unmap_single(ohci->card.device, payload_bus, 1335 dma_unmap_single(ohci->card.device, payload_bus,
1346 packet->payload_length, DMA_TO_DEVICE); 1336 packet->payload_length, DMA_TO_DEVICE);
@@ -1590,6 +1580,47 @@ static void at_context_transmit(struct context *ctx, struct fw_packet *packet)
1590 1580
1591} 1581}
1592 1582
1583static void detect_dead_context(struct fw_ohci *ohci,
1584 const char *name, unsigned int regs)
1585{
1586 u32 ctl;
1587
1588 ctl = reg_read(ohci, CONTROL_SET(regs));
1589 if (ctl & CONTEXT_DEAD) {
1590#ifdef CONFIG_FIREWIRE_OHCI_DEBUG
1591 fw_error("DMA context %s has stopped, error code: %s\n",
1592 name, evts[ctl & 0x1f]);
1593#else
1594 fw_error("DMA context %s has stopped, error code: %#x\n",
1595 name, ctl & 0x1f);
1596#endif
1597 }
1598}
1599
1600static void handle_dead_contexts(struct fw_ohci *ohci)
1601{
1602 unsigned int i;
1603 char name[8];
1604
1605 detect_dead_context(ohci, "ATReq", OHCI1394_AsReqTrContextBase);
1606 detect_dead_context(ohci, "ATRsp", OHCI1394_AsRspTrContextBase);
1607 detect_dead_context(ohci, "ARReq", OHCI1394_AsReqRcvContextBase);
1608 detect_dead_context(ohci, "ARRsp", OHCI1394_AsRspRcvContextBase);
1609 for (i = 0; i < 32; ++i) {
1610 if (!(ohci->it_context_support & (1 << i)))
1611 continue;
1612 sprintf(name, "IT%u", i);
1613 detect_dead_context(ohci, name, OHCI1394_IsoXmitContextBase(i));
1614 }
1615 for (i = 0; i < 32; ++i) {
1616 if (!(ohci->ir_context_support & (1 << i)))
1617 continue;
1618 sprintf(name, "IR%u", i);
1619 detect_dead_context(ohci, name, OHCI1394_IsoRcvContextBase(i));
1620 }
1621 /* TODO: maybe try to flush and restart the dead contexts */
1622}
1623
1593static u32 cycle_timer_ticks(u32 cycle_timer) 1624static u32 cycle_timer_ticks(u32 cycle_timer)
1594{ 1625{
1595 u32 ticks; 1626 u32 ticks;
@@ -1904,6 +1935,9 @@ static irqreturn_t irq_handler(int irq, void *data)
1904 fw_notify("isochronous cycle inconsistent\n"); 1935 fw_notify("isochronous cycle inconsistent\n");
1905 } 1936 }
1906 1937
1938 if (unlikely(event & OHCI1394_unrecoverableError))
1939 handle_dead_contexts(ohci);
1940
1907 if (event & OHCI1394_cycle64Seconds) { 1941 if (event & OHCI1394_cycle64Seconds) {
1908 spin_lock(&ohci->lock); 1942 spin_lock(&ohci->lock);
1909 update_bus_time(ohci); 1943 update_bus_time(ohci);
@@ -2141,7 +2175,9 @@ static int ohci_enable(struct fw_card *card,
2141 OHCI1394_selfIDComplete | 2175 OHCI1394_selfIDComplete |
2142 OHCI1394_regAccessFail | 2176 OHCI1394_regAccessFail |
2143 OHCI1394_cycle64Seconds | 2177 OHCI1394_cycle64Seconds |
2144 OHCI1394_cycleInconsistent | OHCI1394_cycleTooLong | 2178 OHCI1394_cycleInconsistent |
2179 OHCI1394_unrecoverableError |
2180 OHCI1394_cycleTooLong |
2145 OHCI1394_masterIntEnable; 2181 OHCI1394_masterIntEnable;
2146 if (param_debug & OHCI_PARAM_DEBUG_BUSRESETS) 2182 if (param_debug & OHCI_PARAM_DEBUG_BUSRESETS)
2147 irqs |= OHCI1394_busReset; 2183 irqs |= OHCI1394_busReset;
@@ -2657,6 +2693,10 @@ static int ohci_start_iso(struct fw_iso_context *base,
2657 u32 control = IR_CONTEXT_ISOCH_HEADER, match; 2693 u32 control = IR_CONTEXT_ISOCH_HEADER, match;
2658 int index; 2694 int index;
2659 2695
2696 /* the controller cannot start without any queued packets */
2697 if (ctx->context.last->branch_address == 0)
2698 return -ENODATA;
2699
2660 switch (ctx->base.type) { 2700 switch (ctx->base.type) {
2661 case FW_ISO_CONTEXT_TRANSMIT: 2701 case FW_ISO_CONTEXT_TRANSMIT:
2662 index = ctx - ohci->it_context_list; 2702 index = ctx - ohci->it_context_list;
@@ -2715,6 +2755,7 @@ static int ohci_stop_iso(struct fw_iso_context *base)
2715 } 2755 }
2716 flush_writes(ohci); 2756 flush_writes(ohci);
2717 context_stop(&ctx->context); 2757 context_stop(&ctx->context);
2758 tasklet_kill(&ctx->context.tasklet);
2718 2759
2719 return 0; 2760 return 0;
2720} 2761}
@@ -3207,15 +3248,17 @@ static int __devinit pci_probe(struct pci_dev *dev,
3207 3248
3208 reg_write(ohci, OHCI1394_IsoRecvIntMaskSet, ~0); 3249 reg_write(ohci, OHCI1394_IsoRecvIntMaskSet, ~0);
3209 ohci->ir_context_channels = ~0ULL; 3250 ohci->ir_context_channels = ~0ULL;
3210 ohci->ir_context_mask = reg_read(ohci, OHCI1394_IsoRecvIntMaskSet); 3251 ohci->ir_context_support = reg_read(ohci, OHCI1394_IsoRecvIntMaskSet);
3211 reg_write(ohci, OHCI1394_IsoRecvIntMaskClear, ~0); 3252 reg_write(ohci, OHCI1394_IsoRecvIntMaskClear, ~0);
3253 ohci->ir_context_mask = ohci->ir_context_support;
3212 ohci->n_ir = hweight32(ohci->ir_context_mask); 3254 ohci->n_ir = hweight32(ohci->ir_context_mask);
3213 size = sizeof(struct iso_context) * ohci->n_ir; 3255 size = sizeof(struct iso_context) * ohci->n_ir;
3214 ohci->ir_context_list = kzalloc(size, GFP_KERNEL); 3256 ohci->ir_context_list = kzalloc(size, GFP_KERNEL);
3215 3257
3216 reg_write(ohci, OHCI1394_IsoXmitIntMaskSet, ~0); 3258 reg_write(ohci, OHCI1394_IsoXmitIntMaskSet, ~0);
3217 ohci->it_context_mask = reg_read(ohci, OHCI1394_IsoXmitIntMaskSet); 3259 ohci->it_context_support = reg_read(ohci, OHCI1394_IsoXmitIntMaskSet);
3218 reg_write(ohci, OHCI1394_IsoXmitIntMaskClear, ~0); 3260 reg_write(ohci, OHCI1394_IsoXmitIntMaskClear, ~0);
3261 ohci->it_context_mask = ohci->it_context_support;
3219 ohci->n_it = hweight32(ohci->it_context_mask); 3262 ohci->n_it = hweight32(ohci->it_context_mask);
3220 size = sizeof(struct iso_context) * ohci->n_it; 3263 size = sizeof(struct iso_context) * ohci->n_it;
3221 ohci->it_context_list = kzalloc(size, GFP_KERNEL); 3264 ohci->it_context_list = kzalloc(size, GFP_KERNEL);
@@ -3266,7 +3309,7 @@ static int __devinit pci_probe(struct pci_dev *dev,
3266 fail_disable: 3309 fail_disable:
3267 pci_disable_device(dev); 3310 pci_disable_device(dev);
3268 fail_free: 3311 fail_free:
3269 kfree(&ohci->card); 3312 kfree(ohci);
3270 pmac_ohci_off(dev); 3313 pmac_ohci_off(dev);
3271 fail: 3314 fail:
3272 if (err == -ENOMEM) 3315 if (err == -ENOMEM)
@@ -3310,7 +3353,7 @@ static void pci_remove(struct pci_dev *dev)
3310 pci_iounmap(dev, ohci->registers); 3353 pci_iounmap(dev, ohci->registers);
3311 pci_release_region(dev, 0); 3354 pci_release_region(dev, 0);
3312 pci_disable_device(dev); 3355 pci_disable_device(dev);
3313 kfree(&ohci->card); 3356 kfree(ohci);
3314 pmac_ohci_off(dev); 3357 pmac_ohci_off(dev);
3315 3358
3316 fw_notify("Removed fw-ohci device.\n"); 3359 fw_notify("Removed fw-ohci device.\n");
diff --git a/drivers/firewire/sbp2.c b/drivers/firewire/sbp2.c
index afa576a75a8e..77ed589b360d 100644
--- a/drivers/firewire/sbp2.c
+++ b/drivers/firewire/sbp2.c
@@ -472,18 +472,12 @@ static void complete_transaction(struct fw_card *card, int rcode,
472 * So this callback only sets the rcode if it hasn't already 472 * So this callback only sets the rcode if it hasn't already
473 * been set and only does the cleanup if the transaction 473 * been set and only does the cleanup if the transaction
474 * failed and we didn't already get a status write. 474 * failed and we didn't already get a status write.
475 *
476 * Here we treat RCODE_CANCELLED like RCODE_COMPLETE because some
477 * OXUF936QSE firmwares occasionally respond after Split_Timeout and
478 * complete the ORB just fine. Note, we also get RCODE_CANCELLED
479 * from sbp2_cancel_orbs() if fw_cancel_transaction() == 0.
480 */ 475 */
481 spin_lock_irqsave(&card->lock, flags); 476 spin_lock_irqsave(&card->lock, flags);
482 477
483 if (orb->rcode == -1) 478 if (orb->rcode == -1)
484 orb->rcode = rcode; 479 orb->rcode = rcode;
485 480 if (orb->rcode != RCODE_COMPLETE) {
486 if (orb->rcode != RCODE_COMPLETE && orb->rcode != RCODE_CANCELLED) {
487 list_del(&orb->link); 481 list_del(&orb->link);
488 spin_unlock_irqrestore(&card->lock, flags); 482 spin_unlock_irqrestore(&card->lock, flags);
489 483
@@ -532,7 +526,8 @@ static int sbp2_cancel_orbs(struct sbp2_logical_unit *lu)
532 526
533 list_for_each_entry_safe(orb, next, &list, link) { 527 list_for_each_entry_safe(orb, next, &list, link) {
534 retval = 0; 528 retval = 0;
535 fw_cancel_transaction(device->card, &orb->t); 529 if (fw_cancel_transaction(device->card, &orb->t) == 0)
530 continue;
536 531
537 orb->rcode = RCODE_CANCELLED; 532 orb->rcode = RCODE_CANCELLED;
538 orb->callback(orb, NULL); 533 orb->callback(orb, NULL);