aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/ieee1394/sbp2.c
diff options
context:
space:
mode:
authorStefan Richter <stefanr@s5r6.in-berlin.de>2006-12-29 17:47:04 -0500
committerStefan Richter <stefanr@s5r6.in-berlin.de>2006-12-30 08:26:59 -0500
commit97d552e35d9404df3254e1157df3340e4e2eaedc (patch)
tree028f458771d19c9c64d95a7034f728791ae4b8d8 /drivers/ieee1394/sbp2.c
parentb2bb550c4a10c44e99fe469cfaee81e2e3109994 (diff)
ieee1394: sbp2: fix bogus dma mapping
Need to use a PCI device, not a FireWire host device. Problem found by Andreas Schwab, mistake pointed out by Benjamin Herrenschmidt. http://ozlabs.org/pipermail/linuxppc-dev/2006-December/029595.html Signed-off-by: Stefan Richter <stefanr@s5r6.in-berlin.de> Tested-by: Andreas Schwab <schwab@suse.de>
Diffstat (limited to 'drivers/ieee1394/sbp2.c')
-rw-r--r--drivers/ieee1394/sbp2.c73
1 files changed, 40 insertions, 33 deletions
diff --git a/drivers/ieee1394/sbp2.c b/drivers/ieee1394/sbp2.c
index d8042830ffed..2b5d7ab3adf7 100644
--- a/drivers/ieee1394/sbp2.c
+++ b/drivers/ieee1394/sbp2.c
@@ -490,11 +490,11 @@ static int sbp2util_create_command_orb_pool(struct sbp2_lu *lu)
490 spin_unlock_irqrestore(&lu->cmd_orb_lock, flags); 490 spin_unlock_irqrestore(&lu->cmd_orb_lock, flags);
491 return -ENOMEM; 491 return -ENOMEM;
492 } 492 }
493 cmd->command_orb_dma = dma_map_single(&hi->host->device, 493 cmd->command_orb_dma = dma_map_single(hi->host->device.parent,
494 &cmd->command_orb, 494 &cmd->command_orb,
495 sizeof(struct sbp2_command_orb), 495 sizeof(struct sbp2_command_orb),
496 DMA_TO_DEVICE); 496 DMA_TO_DEVICE);
497 cmd->sge_dma = dma_map_single(&hi->host->device, 497 cmd->sge_dma = dma_map_single(hi->host->device.parent,
498 &cmd->scatter_gather_element, 498 &cmd->scatter_gather_element,
499 sizeof(cmd->scatter_gather_element), 499 sizeof(cmd->scatter_gather_element),
500 DMA_BIDIRECTIONAL); 500 DMA_BIDIRECTIONAL);
@@ -516,10 +516,11 @@ static void sbp2util_remove_command_orb_pool(struct sbp2_lu *lu)
516 if (!list_empty(&lu->cmd_orb_completed)) 516 if (!list_empty(&lu->cmd_orb_completed))
517 list_for_each_safe(lh, next, &lu->cmd_orb_completed) { 517 list_for_each_safe(lh, next, &lu->cmd_orb_completed) {
518 cmd = list_entry(lh, struct sbp2_command_info, list); 518 cmd = list_entry(lh, struct sbp2_command_info, list);
519 dma_unmap_single(&host->device, cmd->command_orb_dma, 519 dma_unmap_single(host->device.parent,
520 cmd->command_orb_dma,
520 sizeof(struct sbp2_command_orb), 521 sizeof(struct sbp2_command_orb),
521 DMA_TO_DEVICE); 522 DMA_TO_DEVICE);
522 dma_unmap_single(&host->device, cmd->sge_dma, 523 dma_unmap_single(host->device.parent, cmd->sge_dma,
523 sizeof(cmd->scatter_gather_element), 524 sizeof(cmd->scatter_gather_element),
524 DMA_BIDIRECTIONAL); 525 DMA_BIDIRECTIONAL);
525 kfree(cmd); 526 kfree(cmd);
@@ -601,17 +602,17 @@ static void sbp2util_mark_command_completed(struct sbp2_lu *lu,
601 602
602 if (cmd->cmd_dma) { 603 if (cmd->cmd_dma) {
603 if (cmd->dma_type == CMD_DMA_SINGLE) 604 if (cmd->dma_type == CMD_DMA_SINGLE)
604 dma_unmap_single(&host->device, cmd->cmd_dma, 605 dma_unmap_single(host->device.parent, cmd->cmd_dma,
605 cmd->dma_size, cmd->dma_dir); 606 cmd->dma_size, cmd->dma_dir);
606 else if (cmd->dma_type == CMD_DMA_PAGE) 607 else if (cmd->dma_type == CMD_DMA_PAGE)
607 dma_unmap_page(&host->device, cmd->cmd_dma, 608 dma_unmap_page(host->device.parent, cmd->cmd_dma,
608 cmd->dma_size, cmd->dma_dir); 609 cmd->dma_size, cmd->dma_dir);
609 /* XXX: Check for CMD_DMA_NONE bug */ 610 /* XXX: Check for CMD_DMA_NONE bug */
610 cmd->dma_type = CMD_DMA_NONE; 611 cmd->dma_type = CMD_DMA_NONE;
611 cmd->cmd_dma = 0; 612 cmd->cmd_dma = 0;
612 } 613 }
613 if (cmd->sge_buffer) { 614 if (cmd->sge_buffer) {
614 dma_unmap_sg(&host->device, cmd->sge_buffer, 615 dma_unmap_sg(host->device.parent, cmd->sge_buffer,
615 cmd->dma_size, cmd->dma_dir); 616 cmd->dma_size, cmd->dma_dir);
616 cmd->sge_buffer = NULL; 617 cmd->sge_buffer = NULL;
617 } 618 }
@@ -836,37 +837,37 @@ static int sbp2_start_device(struct sbp2_lu *lu)
836 struct sbp2_fwhost_info *hi = lu->hi; 837 struct sbp2_fwhost_info *hi = lu->hi;
837 int error; 838 int error;
838 839
839 lu->login_response = dma_alloc_coherent(&hi->host->device, 840 lu->login_response = dma_alloc_coherent(hi->host->device.parent,
840 sizeof(struct sbp2_login_response), 841 sizeof(struct sbp2_login_response),
841 &lu->login_response_dma, GFP_KERNEL); 842 &lu->login_response_dma, GFP_KERNEL);
842 if (!lu->login_response) 843 if (!lu->login_response)
843 goto alloc_fail; 844 goto alloc_fail;
844 845
845 lu->query_logins_orb = dma_alloc_coherent(&hi->host->device, 846 lu->query_logins_orb = dma_alloc_coherent(hi->host->device.parent,
846 sizeof(struct sbp2_query_logins_orb), 847 sizeof(struct sbp2_query_logins_orb),
847 &lu->query_logins_orb_dma, GFP_KERNEL); 848 &lu->query_logins_orb_dma, GFP_KERNEL);
848 if (!lu->query_logins_orb) 849 if (!lu->query_logins_orb)
849 goto alloc_fail; 850 goto alloc_fail;
850 851
851 lu->query_logins_response = dma_alloc_coherent(&hi->host->device, 852 lu->query_logins_response = dma_alloc_coherent(hi->host->device.parent,
852 sizeof(struct sbp2_query_logins_response), 853 sizeof(struct sbp2_query_logins_response),
853 &lu->query_logins_response_dma, GFP_KERNEL); 854 &lu->query_logins_response_dma, GFP_KERNEL);
854 if (!lu->query_logins_response) 855 if (!lu->query_logins_response)
855 goto alloc_fail; 856 goto alloc_fail;
856 857
857 lu->reconnect_orb = dma_alloc_coherent(&hi->host->device, 858 lu->reconnect_orb = dma_alloc_coherent(hi->host->device.parent,
858 sizeof(struct sbp2_reconnect_orb), 859 sizeof(struct sbp2_reconnect_orb),
859 &lu->reconnect_orb_dma, GFP_KERNEL); 860 &lu->reconnect_orb_dma, GFP_KERNEL);
860 if (!lu->reconnect_orb) 861 if (!lu->reconnect_orb)
861 goto alloc_fail; 862 goto alloc_fail;
862 863
863 lu->logout_orb = dma_alloc_coherent(&hi->host->device, 864 lu->logout_orb = dma_alloc_coherent(hi->host->device.parent,
864 sizeof(struct sbp2_logout_orb), 865 sizeof(struct sbp2_logout_orb),
865 &lu->logout_orb_dma, GFP_KERNEL); 866 &lu->logout_orb_dma, GFP_KERNEL);
866 if (!lu->logout_orb) 867 if (!lu->logout_orb)
867 goto alloc_fail; 868 goto alloc_fail;
868 869
869 lu->login_orb = dma_alloc_coherent(&hi->host->device, 870 lu->login_orb = dma_alloc_coherent(hi->host->device.parent,
870 sizeof(struct sbp2_login_orb), 871 sizeof(struct sbp2_login_orb),
871 &lu->login_orb_dma, GFP_KERNEL); 872 &lu->login_orb_dma, GFP_KERNEL);
872 if (!lu->login_orb) 873 if (!lu->login_orb)
@@ -929,32 +930,32 @@ static void sbp2_remove_device(struct sbp2_lu *lu)
929 list_del(&lu->lu_list); 930 list_del(&lu->lu_list);
930 931
931 if (lu->login_response) 932 if (lu->login_response)
932 dma_free_coherent(&hi->host->device, 933 dma_free_coherent(hi->host->device.parent,
933 sizeof(struct sbp2_login_response), 934 sizeof(struct sbp2_login_response),
934 lu->login_response, 935 lu->login_response,
935 lu->login_response_dma); 936 lu->login_response_dma);
936 if (lu->login_orb) 937 if (lu->login_orb)
937 dma_free_coherent(&hi->host->device, 938 dma_free_coherent(hi->host->device.parent,
938 sizeof(struct sbp2_login_orb), 939 sizeof(struct sbp2_login_orb),
939 lu->login_orb, 940 lu->login_orb,
940 lu->login_orb_dma); 941 lu->login_orb_dma);
941 if (lu->reconnect_orb) 942 if (lu->reconnect_orb)
942 dma_free_coherent(&hi->host->device, 943 dma_free_coherent(hi->host->device.parent,
943 sizeof(struct sbp2_reconnect_orb), 944 sizeof(struct sbp2_reconnect_orb),
944 lu->reconnect_orb, 945 lu->reconnect_orb,
945 lu->reconnect_orb_dma); 946 lu->reconnect_orb_dma);
946 if (lu->logout_orb) 947 if (lu->logout_orb)
947 dma_free_coherent(&hi->host->device, 948 dma_free_coherent(hi->host->device.parent,
948 sizeof(struct sbp2_logout_orb), 949 sizeof(struct sbp2_logout_orb),
949 lu->logout_orb, 950 lu->logout_orb,
950 lu->logout_orb_dma); 951 lu->logout_orb_dma);
951 if (lu->query_logins_orb) 952 if (lu->query_logins_orb)
952 dma_free_coherent(&hi->host->device, 953 dma_free_coherent(hi->host->device.parent,
953 sizeof(struct sbp2_query_logins_orb), 954 sizeof(struct sbp2_query_logins_orb),
954 lu->query_logins_orb, 955 lu->query_logins_orb,
955 lu->query_logins_orb_dma); 956 lu->query_logins_orb_dma);
956 if (lu->query_logins_response) 957 if (lu->query_logins_response)
957 dma_free_coherent(&hi->host->device, 958 dma_free_coherent(hi->host->device.parent,
958 sizeof(struct sbp2_query_logins_response), 959 sizeof(struct sbp2_query_logins_response),
959 lu->query_logins_response, 960 lu->query_logins_response,
960 lu->query_logins_response_dma); 961 lu->query_logins_response_dma);
@@ -1445,7 +1446,7 @@ static void sbp2_prep_command_orb_sg(struct sbp2_command_orb *orb,
1445 1446
1446 cmd->dma_size = sgpnt[0].length; 1447 cmd->dma_size = sgpnt[0].length;
1447 cmd->dma_type = CMD_DMA_PAGE; 1448 cmd->dma_type = CMD_DMA_PAGE;
1448 cmd->cmd_dma = dma_map_page(&hi->host->device, 1449 cmd->cmd_dma = dma_map_page(hi->host->device.parent,
1449 sgpnt[0].page, sgpnt[0].offset, 1450 sgpnt[0].page, sgpnt[0].offset,
1450 cmd->dma_size, cmd->dma_dir); 1451 cmd->dma_size, cmd->dma_dir);
1451 1452
@@ -1457,8 +1458,8 @@ static void sbp2_prep_command_orb_sg(struct sbp2_command_orb *orb,
1457 &cmd->scatter_gather_element[0]; 1458 &cmd->scatter_gather_element[0];
1458 u32 sg_count, sg_len; 1459 u32 sg_count, sg_len;
1459 dma_addr_t sg_addr; 1460 dma_addr_t sg_addr;
1460 int i, count = dma_map_sg(&hi->host->device, sgpnt, scsi_use_sg, 1461 int i, count = dma_map_sg(hi->host->device.parent, sgpnt,
1461 dma_dir); 1462 scsi_use_sg, dma_dir);
1462 1463
1463 cmd->dma_size = scsi_use_sg; 1464 cmd->dma_size = scsi_use_sg;
1464 cmd->sge_buffer = sgpnt; 1465 cmd->sge_buffer = sgpnt;
@@ -1508,7 +1509,8 @@ static void sbp2_prep_command_orb_no_sg(struct sbp2_command_orb *orb,
1508 cmd->dma_dir = dma_dir; 1509 cmd->dma_dir = dma_dir;
1509 cmd->dma_size = scsi_request_bufflen; 1510 cmd->dma_size = scsi_request_bufflen;
1510 cmd->dma_type = CMD_DMA_SINGLE; 1511 cmd->dma_type = CMD_DMA_SINGLE;
1511 cmd->cmd_dma = dma_map_single(&hi->host->device, scsi_request_buffer, 1512 cmd->cmd_dma = dma_map_single(hi->host->device.parent,
1513 scsi_request_buffer,
1512 cmd->dma_size, cmd->dma_dir); 1514 cmd->dma_size, cmd->dma_dir);
1513 orb->data_descriptor_hi = ORB_SET_NODE_ID(hi->host->node_id); 1515 orb->data_descriptor_hi = ORB_SET_NODE_ID(hi->host->node_id);
1514 orb->misc |= ORB_SET_DIRECTION(orb_direction); 1516 orb->misc |= ORB_SET_DIRECTION(orb_direction);
@@ -1626,10 +1628,11 @@ static void sbp2_link_orb_command(struct sbp2_lu *lu,
1626 size_t length; 1628 size_t length;
1627 unsigned long flags; 1629 unsigned long flags;
1628 1630
1629 dma_sync_single_for_device(&hi->host->device, cmd->command_orb_dma, 1631 dma_sync_single_for_device(hi->host->device.parent,
1632 cmd->command_orb_dma,
1630 sizeof(struct sbp2_command_orb), 1633 sizeof(struct sbp2_command_orb),
1631 DMA_TO_DEVICE); 1634 DMA_TO_DEVICE);
1632 dma_sync_single_for_device(&hi->host->device, cmd->sge_dma, 1635 dma_sync_single_for_device(hi->host->device.parent, cmd->sge_dma,
1633 sizeof(cmd->scatter_gather_element), 1636 sizeof(cmd->scatter_gather_element),
1634 DMA_BIDIRECTIONAL); 1637 DMA_BIDIRECTIONAL);
1635 1638
@@ -1655,14 +1658,15 @@ static void sbp2_link_orb_command(struct sbp2_lu *lu,
1655 * The target's fetch agent may or may not have read this 1658 * The target's fetch agent may or may not have read this
1656 * previous ORB yet. 1659 * previous ORB yet.
1657 */ 1660 */
1658 dma_sync_single_for_cpu(&hi->host->device, last_orb_dma, 1661 dma_sync_single_for_cpu(hi->host->device.parent, last_orb_dma,
1659 sizeof(struct sbp2_command_orb), 1662 sizeof(struct sbp2_command_orb),
1660 DMA_TO_DEVICE); 1663 DMA_TO_DEVICE);
1661 last_orb->next_ORB_lo = cpu_to_be32(cmd->command_orb_dma); 1664 last_orb->next_ORB_lo = cpu_to_be32(cmd->command_orb_dma);
1662 wmb(); 1665 wmb();
1663 /* Tells hardware that this pointer is valid */ 1666 /* Tells hardware that this pointer is valid */
1664 last_orb->next_ORB_hi = 0; 1667 last_orb->next_ORB_hi = 0;
1665 dma_sync_single_for_device(&hi->host->device, last_orb_dma, 1668 dma_sync_single_for_device(hi->host->device.parent,
1669 last_orb_dma,
1666 sizeof(struct sbp2_command_orb), 1670 sizeof(struct sbp2_command_orb),
1667 DMA_TO_DEVICE); 1671 DMA_TO_DEVICE);
1668 addr += SBP2_DOORBELL_OFFSET; 1672 addr += SBP2_DOORBELL_OFFSET;
@@ -1790,10 +1794,11 @@ static int sbp2_handle_status_write(struct hpsb_host *host, int nodeid,
1790 else 1794 else
1791 cmd = sbp2util_find_command_for_orb(lu, sb->ORB_offset_lo); 1795 cmd = sbp2util_find_command_for_orb(lu, sb->ORB_offset_lo);
1792 if (cmd) { 1796 if (cmd) {
1793 dma_sync_single_for_cpu(&hi->host->device, cmd->command_orb_dma, 1797 dma_sync_single_for_cpu(hi->host->device.parent,
1798 cmd->command_orb_dma,
1794 sizeof(struct sbp2_command_orb), 1799 sizeof(struct sbp2_command_orb),
1795 DMA_TO_DEVICE); 1800 DMA_TO_DEVICE);
1796 dma_sync_single_for_cpu(&hi->host->device, cmd->sge_dma, 1801 dma_sync_single_for_cpu(hi->host->device.parent, cmd->sge_dma,
1797 sizeof(cmd->scatter_gather_element), 1802 sizeof(cmd->scatter_gather_element),
1798 DMA_BIDIRECTIONAL); 1803 DMA_BIDIRECTIONAL);
1799 /* Grab SCSI command pointers and check status. */ 1804 /* Grab SCSI command pointers and check status. */
@@ -1921,10 +1926,11 @@ static void sbp2scsi_complete_all_commands(struct sbp2_lu *lu, u32 status)
1921 while (!list_empty(&lu->cmd_orb_inuse)) { 1926 while (!list_empty(&lu->cmd_orb_inuse)) {
1922 lh = lu->cmd_orb_inuse.next; 1927 lh = lu->cmd_orb_inuse.next;
1923 cmd = list_entry(lh, struct sbp2_command_info, list); 1928 cmd = list_entry(lh, struct sbp2_command_info, list);
1924 dma_sync_single_for_cpu(&hi->host->device, cmd->command_orb_dma, 1929 dma_sync_single_for_cpu(hi->host->device.parent,
1930 cmd->command_orb_dma,
1925 sizeof(struct sbp2_command_orb), 1931 sizeof(struct sbp2_command_orb),
1926 DMA_TO_DEVICE); 1932 DMA_TO_DEVICE);
1927 dma_sync_single_for_cpu(&hi->host->device, cmd->sge_dma, 1933 dma_sync_single_for_cpu(hi->host->device.parent, cmd->sge_dma,
1928 sizeof(cmd->scatter_gather_element), 1934 sizeof(cmd->scatter_gather_element),
1929 DMA_BIDIRECTIONAL); 1935 DMA_BIDIRECTIONAL);
1930 sbp2util_mark_command_completed(lu, cmd); 1936 sbp2util_mark_command_completed(lu, cmd);
@@ -2049,11 +2055,12 @@ static int sbp2scsi_abort(struct scsi_cmnd *SCpnt)
2049 spin_lock_irqsave(&lu->cmd_orb_lock, flags); 2055 spin_lock_irqsave(&lu->cmd_orb_lock, flags);
2050 cmd = sbp2util_find_command_for_SCpnt(lu, SCpnt); 2056 cmd = sbp2util_find_command_for_SCpnt(lu, SCpnt);
2051 if (cmd) { 2057 if (cmd) {
2052 dma_sync_single_for_cpu(&hi->host->device, 2058 dma_sync_single_for_cpu(hi->host->device.parent,
2053 cmd->command_orb_dma, 2059 cmd->command_orb_dma,
2054 sizeof(struct sbp2_command_orb), 2060 sizeof(struct sbp2_command_orb),
2055 DMA_TO_DEVICE); 2061 DMA_TO_DEVICE);
2056 dma_sync_single_for_cpu(&hi->host->device, cmd->sge_dma, 2062 dma_sync_single_for_cpu(hi->host->device.parent,
2063 cmd->sge_dma,
2057 sizeof(cmd->scatter_gather_element), 2064 sizeof(cmd->scatter_gather_element),
2058 DMA_BIDIRECTIONAL); 2065 DMA_BIDIRECTIONAL);
2059 sbp2util_mark_command_completed(lu, cmd); 2066 sbp2util_mark_command_completed(lu, cmd);