aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/firewire/sbp2.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/firewire/sbp2.c')
-rw-r--r--drivers/firewire/sbp2.c46
1 files changed, 14 insertions, 32 deletions
diff --git a/drivers/firewire/sbp2.c b/drivers/firewire/sbp2.c
index bfae4b309791..41841a3e3f99 100644
--- a/drivers/firewire/sbp2.c
+++ b/drivers/firewire/sbp2.c
@@ -125,9 +125,6 @@ MODULE_PARM_DESC(workarounds, "Work around device bugs (default = 0"
125 ", override internal blacklist = " __stringify(SBP2_WORKAROUND_OVERRIDE) 125 ", override internal blacklist = " __stringify(SBP2_WORKAROUND_OVERRIDE)
126 ", or a combination)"); 126 ", or a combination)");
127 127
128/* I don't know why the SCSI stack doesn't define something like this... */
129typedef void (*scsi_done_fn_t)(struct scsi_cmnd *);
130
131static const char sbp2_driver_name[] = "sbp2"; 128static const char sbp2_driver_name[] = "sbp2";
132 129
133/* 130/*
@@ -261,7 +258,6 @@ struct sbp2_orb {
261 struct kref kref; 258 struct kref kref;
262 dma_addr_t request_bus; 259 dma_addr_t request_bus;
263 int rcode; 260 int rcode;
264 struct sbp2_pointer pointer;
265 void (*callback)(struct sbp2_orb * orb, struct sbp2_status * status); 261 void (*callback)(struct sbp2_orb * orb, struct sbp2_status * status);
266 struct list_head link; 262 struct list_head link;
267}; 263};
@@ -314,7 +310,6 @@ struct sbp2_command_orb {
314 u8 command_block[SBP2_MAX_CDB_SIZE]; 310 u8 command_block[SBP2_MAX_CDB_SIZE];
315 } request; 311 } request;
316 struct scsi_cmnd *cmd; 312 struct scsi_cmnd *cmd;
317 scsi_done_fn_t done;
318 struct sbp2_logical_unit *lu; 313 struct sbp2_logical_unit *lu;
319 314
320 struct sbp2_pointer page_table[SG_ALL] __attribute__((aligned(8))); 315 struct sbp2_pointer page_table[SG_ALL] __attribute__((aligned(8)));
@@ -472,18 +467,12 @@ static void complete_transaction(struct fw_card *card, int rcode,
472 * So this callback only sets the rcode if it hasn't already 467 * So this callback only sets the rcode if it hasn't already
473 * been set and only does the cleanup if the transaction 468 * been set and only does the cleanup if the transaction
474 * failed and we didn't already get a status write. 469 * failed and we didn't already get a status write.
475 *
476 * Here we treat RCODE_CANCELLED like RCODE_COMPLETE because some
477 * OXUF936QSE firmwares occasionally respond after Split_Timeout and
478 * complete the ORB just fine. Note, we also get RCODE_CANCELLED
479 * from sbp2_cancel_orbs() if fw_cancel_transaction() == 0.
480 */ 470 */
481 spin_lock_irqsave(&card->lock, flags); 471 spin_lock_irqsave(&card->lock, flags);
482 472
483 if (orb->rcode == -1) 473 if (orb->rcode == -1)
484 orb->rcode = rcode; 474 orb->rcode = rcode;
485 475 if (orb->rcode != RCODE_COMPLETE) {
486 if (orb->rcode != RCODE_COMPLETE && orb->rcode != RCODE_CANCELLED) {
487 list_del(&orb->link); 476 list_del(&orb->link);
488 spin_unlock_irqrestore(&card->lock, flags); 477 spin_unlock_irqrestore(&card->lock, flags);
489 478
@@ -500,10 +489,11 @@ static void sbp2_send_orb(struct sbp2_orb *orb, struct sbp2_logical_unit *lu,
500 int node_id, int generation, u64 offset) 489 int node_id, int generation, u64 offset)
501{ 490{
502 struct fw_device *device = target_device(lu->tgt); 491 struct fw_device *device = target_device(lu->tgt);
492 struct sbp2_pointer orb_pointer;
503 unsigned long flags; 493 unsigned long flags;
504 494
505 orb->pointer.high = 0; 495 orb_pointer.high = 0;
506 orb->pointer.low = cpu_to_be32(orb->request_bus); 496 orb_pointer.low = cpu_to_be32(orb->request_bus);
507 497
508 spin_lock_irqsave(&device->card->lock, flags); 498 spin_lock_irqsave(&device->card->lock, flags);
509 list_add_tail(&orb->link, &lu->orb_list); 499 list_add_tail(&orb->link, &lu->orb_list);
@@ -514,7 +504,7 @@ static void sbp2_send_orb(struct sbp2_orb *orb, struct sbp2_logical_unit *lu,
514 504
515 fw_send_request(device->card, &orb->t, TCODE_WRITE_BLOCK_REQUEST, 505 fw_send_request(device->card, &orb->t, TCODE_WRITE_BLOCK_REQUEST,
516 node_id, generation, device->max_speed, offset, 506 node_id, generation, device->max_speed, offset,
517 &orb->pointer, 8, complete_transaction, orb); 507 &orb_pointer, 8, complete_transaction, orb);
518} 508}
519 509
520static int sbp2_cancel_orbs(struct sbp2_logical_unit *lu) 510static int sbp2_cancel_orbs(struct sbp2_logical_unit *lu)
@@ -532,7 +522,8 @@ static int sbp2_cancel_orbs(struct sbp2_logical_unit *lu)
532 522
533 list_for_each_entry_safe(orb, next, &list, link) { 523 list_for_each_entry_safe(orb, next, &list, link) {
534 retval = 0; 524 retval = 0;
535 fw_cancel_transaction(device->card, &orb->t); 525 if (fw_cancel_transaction(device->card, &orb->t) == 0)
526 continue;
536 527
537 orb->rcode = RCODE_CANCELLED; 528 orb->rcode = RCODE_CANCELLED;
538 orb->callback(orb, NULL); 529 orb->callback(orb, NULL);
@@ -835,8 +826,6 @@ static void sbp2_target_put(struct sbp2_target *tgt)
835 kref_put(&tgt->kref, sbp2_release_target); 826 kref_put(&tgt->kref, sbp2_release_target);
836} 827}
837 828
838static struct workqueue_struct *sbp2_wq;
839
840/* 829/*
841 * Always get the target's kref when scheduling work on one its units. 830 * Always get the target's kref when scheduling work on one its units.
842 * Each workqueue job is responsible to call sbp2_target_put() upon return. 831 * Each workqueue job is responsible to call sbp2_target_put() upon return.
@@ -844,7 +833,7 @@ static struct workqueue_struct *sbp2_wq;
844static void sbp2_queue_work(struct sbp2_logical_unit *lu, unsigned long delay) 833static void sbp2_queue_work(struct sbp2_logical_unit *lu, unsigned long delay)
845{ 834{
846 sbp2_target_get(lu->tgt); 835 sbp2_target_get(lu->tgt);
847 if (!queue_delayed_work(sbp2_wq, &lu->work, delay)) 836 if (!queue_delayed_work(fw_workqueue, &lu->work, delay))
848 sbp2_target_put(lu->tgt); 837 sbp2_target_put(lu->tgt);
849} 838}
850 839
@@ -1403,7 +1392,7 @@ static void complete_command_orb(struct sbp2_orb *base_orb,
1403 sbp2_unmap_scatterlist(device->card->device, orb); 1392 sbp2_unmap_scatterlist(device->card->device, orb);
1404 1393
1405 orb->cmd->result = result; 1394 orb->cmd->result = result;
1406 orb->done(orb->cmd); 1395 orb->cmd->scsi_done(orb->cmd);
1407} 1396}
1408 1397
1409static int sbp2_map_scatterlist(struct sbp2_command_orb *orb, 1398static int sbp2_map_scatterlist(struct sbp2_command_orb *orb,
@@ -1468,7 +1457,8 @@ static int sbp2_map_scatterlist(struct sbp2_command_orb *orb,
1468 1457
1469/* SCSI stack integration */ 1458/* SCSI stack integration */
1470 1459
1471static int sbp2_scsi_queuecommand(struct scsi_cmnd *cmd, scsi_done_fn_t done) 1460static int sbp2_scsi_queuecommand(struct Scsi_Host *shost,
1461 struct scsi_cmnd *cmd)
1472{ 1462{
1473 struct sbp2_logical_unit *lu = cmd->device->hostdata; 1463 struct sbp2_logical_unit *lu = cmd->device->hostdata;
1474 struct fw_device *device = target_device(lu->tgt); 1464 struct fw_device *device = target_device(lu->tgt);
@@ -1482,7 +1472,7 @@ static int sbp2_scsi_queuecommand(struct scsi_cmnd *cmd, scsi_done_fn_t done)
1482 if (cmd->sc_data_direction == DMA_BIDIRECTIONAL) { 1472 if (cmd->sc_data_direction == DMA_BIDIRECTIONAL) {
1483 fw_error("Can't handle DMA_BIDIRECTIONAL, rejecting command\n"); 1473 fw_error("Can't handle DMA_BIDIRECTIONAL, rejecting command\n");
1484 cmd->result = DID_ERROR << 16; 1474 cmd->result = DID_ERROR << 16;
1485 done(cmd); 1475 cmd->scsi_done(cmd);
1486 return 0; 1476 return 0;
1487 } 1477 }
1488 1478
@@ -1495,11 +1485,8 @@ static int sbp2_scsi_queuecommand(struct scsi_cmnd *cmd, scsi_done_fn_t done)
1495 /* Initialize rcode to something not RCODE_COMPLETE. */ 1485 /* Initialize rcode to something not RCODE_COMPLETE. */
1496 orb->base.rcode = -1; 1486 orb->base.rcode = -1;
1497 kref_init(&orb->base.kref); 1487 kref_init(&orb->base.kref);
1498 1488 orb->lu = lu;
1499 orb->lu = lu; 1489 orb->cmd = cmd;
1500 orb->done = done;
1501 orb->cmd = cmd;
1502
1503 orb->request.next.high = cpu_to_be32(SBP2_ORB_NULL); 1490 orb->request.next.high = cpu_to_be32(SBP2_ORB_NULL);
1504 orb->request.misc = cpu_to_be32( 1491 orb->request.misc = cpu_to_be32(
1505 COMMAND_ORB_MAX_PAYLOAD(lu->tgt->max_payload) | 1492 COMMAND_ORB_MAX_PAYLOAD(lu->tgt->max_payload) |
@@ -1656,17 +1643,12 @@ MODULE_ALIAS("sbp2");
1656 1643
1657static int __init sbp2_init(void) 1644static int __init sbp2_init(void)
1658{ 1645{
1659 sbp2_wq = create_singlethread_workqueue(KBUILD_MODNAME);
1660 if (!sbp2_wq)
1661 return -ENOMEM;
1662
1663 return driver_register(&sbp2_driver.driver); 1646 return driver_register(&sbp2_driver.driver);
1664} 1647}
1665 1648
1666static void __exit sbp2_cleanup(void) 1649static void __exit sbp2_cleanup(void)
1667{ 1650{
1668 driver_unregister(&sbp2_driver.driver); 1651 driver_unregister(&sbp2_driver.driver);
1669 destroy_workqueue(sbp2_wq);
1670} 1652}
1671 1653
1672module_init(sbp2_init); 1654module_init(sbp2_init);