aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/ieee1394
diff options
context:
space:
mode:
authorStefan Richter <stefanr@s5r6.in-berlin.de>2006-11-22 15:44:34 -0500
committerStefan Richter <stefanr@s5r6.in-berlin.de>2006-12-07 17:11:43 -0500
commit9b7d9c096dd4e4baacc21b2588662bbb56f36c4e (patch)
tree738a617158f0d2fe5395095c8b6aebb71561ac47 /drivers/ieee1394
parentd41bba2d3adcddbdd7b5ed4d4abf07890cf6006f (diff)
ieee1394: sbp2: convert from PCI DMA to generic DMA
API conversion without change in functionality Signed-off-by: Stefan Richter <stefanr@s5r6.in-berlin.de>
Diffstat (limited to 'drivers/ieee1394')
-rw-r--r--drivers/ieee1394/Kconfig2
-rw-r--r--drivers/ieee1394/sbp2.c126
-rw-r--r--drivers/ieee1394/sbp2.h2
3 files changed, 62 insertions, 68 deletions
diff --git a/drivers/ieee1394/Kconfig b/drivers/ieee1394/Kconfig
index 21a13348f258..e7d56573fe56 100644
--- a/drivers/ieee1394/Kconfig
+++ b/drivers/ieee1394/Kconfig
@@ -120,7 +120,7 @@ comment "SBP-2 support (for storage devices) requires SCSI"
120 120
121config IEEE1394_SBP2 121config IEEE1394_SBP2
122 tristate "SBP-2 support (Harddisks etc.)" 122 tristate "SBP-2 support (Harddisks etc.)"
123 depends on IEEE1394 && SCSI && (PCI || BROKEN) 123 depends on IEEE1394 && SCSI
124 help 124 help
125 This option enables you to use SBP-2 devices connected to an IEEE 125 This option enables you to use SBP-2 devices connected to an IEEE
126 1394 bus. SBP-2 devices include storage devices like harddisks and 126 1394 bus. SBP-2 devices include storage devices like harddisks and
diff --git a/drivers/ieee1394/sbp2.c b/drivers/ieee1394/sbp2.c
index 5d3b96e0a951..ab7059858bbd 100644
--- a/drivers/ieee1394/sbp2.c
+++ b/drivers/ieee1394/sbp2.c
@@ -48,7 +48,6 @@
48 * - make the parameter serialize_io configurable per device 48 * - make the parameter serialize_io configurable per device
49 * - move all requests to fetch agent registers into non-atomic context, 49 * - move all requests to fetch agent registers into non-atomic context,
50 * replace all usages of sbp2util_node_write_no_wait by true transactions 50 * replace all usages of sbp2util_node_write_no_wait by true transactions
51 * - convert to generic DMA mapping API to eliminate dependency on PCI
52 * Grep for inline FIXME comments below. 51 * Grep for inline FIXME comments below.
53 */ 52 */
54 53
@@ -63,7 +62,6 @@
63#include <linux/list.h> 62#include <linux/list.h>
64#include <linux/module.h> 63#include <linux/module.h>
65#include <linux/moduleparam.h> 64#include <linux/moduleparam.h>
66#include <linux/pci.h>
67#include <linux/slab.h> 65#include <linux/slab.h>
68#include <linux/spinlock.h> 66#include <linux/spinlock.h>
69#include <linux/stat.h> 67#include <linux/stat.h>
@@ -491,14 +489,14 @@ static int sbp2util_create_command_orb_pool(struct sbp2_lu *lu)
491 spin_unlock_irqrestore(&lu->cmd_orb_lock, flags); 489 spin_unlock_irqrestore(&lu->cmd_orb_lock, flags);
492 return -ENOMEM; 490 return -ENOMEM;
493 } 491 }
494 cmd->command_orb_dma = pci_map_single(hi->host->pdev, 492 cmd->command_orb_dma = dma_map_single(&hi->host->device,
495 &cmd->command_orb, 493 &cmd->command_orb,
496 sizeof(struct sbp2_command_orb), 494 sizeof(struct sbp2_command_orb),
497 PCI_DMA_TODEVICE); 495 DMA_TO_DEVICE);
498 cmd->sge_dma = pci_map_single(hi->host->pdev, 496 cmd->sge_dma = dma_map_single(&hi->host->device,
499 &cmd->scatter_gather_element, 497 &cmd->scatter_gather_element,
500 sizeof(cmd->scatter_gather_element), 498 sizeof(cmd->scatter_gather_element),
501 PCI_DMA_BIDIRECTIONAL); 499 DMA_BIDIRECTIONAL);
502 INIT_LIST_HEAD(&cmd->list); 500 INIT_LIST_HEAD(&cmd->list);
503 list_add_tail(&cmd->list, &lu->cmd_orb_completed); 501 list_add_tail(&cmd->list, &lu->cmd_orb_completed);
504 } 502 }
@@ -517,12 +515,12 @@ static void sbp2util_remove_command_orb_pool(struct sbp2_lu *lu)
517 if (!list_empty(&lu->cmd_orb_completed)) 515 if (!list_empty(&lu->cmd_orb_completed))
518 list_for_each_safe(lh, next, &lu->cmd_orb_completed) { 516 list_for_each_safe(lh, next, &lu->cmd_orb_completed) {
519 cmd = list_entry(lh, struct sbp2_command_info, list); 517 cmd = list_entry(lh, struct sbp2_command_info, list);
520 pci_unmap_single(host->pdev, cmd->command_orb_dma, 518 dma_unmap_single(&host->device, cmd->command_orb_dma,
521 sizeof(struct sbp2_command_orb), 519 sizeof(struct sbp2_command_orb),
522 PCI_DMA_TODEVICE); 520 DMA_TO_DEVICE);
523 pci_unmap_single(host->pdev, cmd->sge_dma, 521 dma_unmap_single(&host->device, cmd->sge_dma,
524 sizeof(cmd->scatter_gather_element), 522 sizeof(cmd->scatter_gather_element),
525 PCI_DMA_BIDIRECTIONAL); 523 DMA_BIDIRECTIONAL);
526 kfree(cmd); 524 kfree(cmd);
527 } 525 }
528 spin_unlock_irqrestore(&lu->cmd_orb_lock, flags); 526 spin_unlock_irqrestore(&lu->cmd_orb_lock, flags);
@@ -602,17 +600,17 @@ static void sbp2util_mark_command_completed(struct sbp2_lu *lu,
602 600
603 if (cmd->cmd_dma) { 601 if (cmd->cmd_dma) {
604 if (cmd->dma_type == CMD_DMA_SINGLE) 602 if (cmd->dma_type == CMD_DMA_SINGLE)
605 pci_unmap_single(host->pdev, cmd->cmd_dma, 603 dma_unmap_single(&host->device, cmd->cmd_dma,
606 cmd->dma_size, cmd->dma_dir); 604 cmd->dma_size, cmd->dma_dir);
607 else if (cmd->dma_type == CMD_DMA_PAGE) 605 else if (cmd->dma_type == CMD_DMA_PAGE)
608 pci_unmap_page(host->pdev, cmd->cmd_dma, 606 dma_unmap_page(&host->device, cmd->cmd_dma,
609 cmd->dma_size, cmd->dma_dir); 607 cmd->dma_size, cmd->dma_dir);
610 /* XXX: Check for CMD_DMA_NONE bug */ 608 /* XXX: Check for CMD_DMA_NONE bug */
611 cmd->dma_type = CMD_DMA_NONE; 609 cmd->dma_type = CMD_DMA_NONE;
612 cmd->cmd_dma = 0; 610 cmd->cmd_dma = 0;
613 } 611 }
614 if (cmd->sge_buffer) { 612 if (cmd->sge_buffer) {
615 pci_unmap_sg(host->pdev, cmd->sge_buffer, 613 dma_unmap_sg(&host->device, cmd->sge_buffer,
616 cmd->dma_size, cmd->dma_dir); 614 cmd->dma_size, cmd->dma_dir);
617 cmd->sge_buffer = NULL; 615 cmd->sge_buffer = NULL;
618 } 616 }
@@ -837,39 +835,39 @@ static int sbp2_start_device(struct sbp2_lu *lu)
837 struct sbp2_fwhost_info *hi = lu->hi; 835 struct sbp2_fwhost_info *hi = lu->hi;
838 int error; 836 int error;
839 837
840 lu->login_response = pci_alloc_consistent(hi->host->pdev, 838 lu->login_response = dma_alloc_coherent(&hi->host->device,
841 sizeof(struct sbp2_login_response), 839 sizeof(struct sbp2_login_response),
842 &lu->login_response_dma); 840 &lu->login_response_dma, GFP_KERNEL);
843 if (!lu->login_response) 841 if (!lu->login_response)
844 goto alloc_fail; 842 goto alloc_fail;
845 843
846 lu->query_logins_orb = pci_alloc_consistent(hi->host->pdev, 844 lu->query_logins_orb = dma_alloc_coherent(&hi->host->device,
847 sizeof(struct sbp2_query_logins_orb), 845 sizeof(struct sbp2_query_logins_orb),
848 &lu->query_logins_orb_dma); 846 &lu->query_logins_orb_dma, GFP_KERNEL);
849 if (!lu->query_logins_orb) 847 if (!lu->query_logins_orb)
850 goto alloc_fail; 848 goto alloc_fail;
851 849
852 lu->query_logins_response = pci_alloc_consistent(hi->host->pdev, 850 lu->query_logins_response = dma_alloc_coherent(&hi->host->device,
853 sizeof(struct sbp2_query_logins_response), 851 sizeof(struct sbp2_query_logins_response),
854 &lu->query_logins_response_dma); 852 &lu->query_logins_response_dma, GFP_KERNEL);
855 if (!lu->query_logins_response) 853 if (!lu->query_logins_response)
856 goto alloc_fail; 854 goto alloc_fail;
857 855
858 lu->reconnect_orb = pci_alloc_consistent(hi->host->pdev, 856 lu->reconnect_orb = dma_alloc_coherent(&hi->host->device,
859 sizeof(struct sbp2_reconnect_orb), 857 sizeof(struct sbp2_reconnect_orb),
860 &lu->reconnect_orb_dma); 858 &lu->reconnect_orb_dma, GFP_KERNEL);
861 if (!lu->reconnect_orb) 859 if (!lu->reconnect_orb)
862 goto alloc_fail; 860 goto alloc_fail;
863 861
864 lu->logout_orb = pci_alloc_consistent(hi->host->pdev, 862 lu->logout_orb = dma_alloc_coherent(&hi->host->device,
865 sizeof(struct sbp2_logout_orb), 863 sizeof(struct sbp2_logout_orb),
866 &lu->logout_orb_dma); 864 &lu->logout_orb_dma, GFP_KERNEL);
867 if (!lu->logout_orb) 865 if (!lu->logout_orb)
868 goto alloc_fail; 866 goto alloc_fail;
869 867
870 lu->login_orb = pci_alloc_consistent(hi->host->pdev, 868 lu->login_orb = dma_alloc_coherent(&hi->host->device,
871 sizeof(struct sbp2_login_orb), 869 sizeof(struct sbp2_login_orb),
872 &lu->login_orb_dma); 870 &lu->login_orb_dma, GFP_KERNEL);
873 if (!lu->login_orb) 871 if (!lu->login_orb)
874 goto alloc_fail; 872 goto alloc_fail;
875 873
@@ -930,32 +928,32 @@ static void sbp2_remove_device(struct sbp2_lu *lu)
930 list_del(&lu->lu_list); 928 list_del(&lu->lu_list);
931 929
932 if (lu->login_response) 930 if (lu->login_response)
933 pci_free_consistent(hi->host->pdev, 931 dma_free_coherent(&hi->host->device,
934 sizeof(struct sbp2_login_response), 932 sizeof(struct sbp2_login_response),
935 lu->login_response, 933 lu->login_response,
936 lu->login_response_dma); 934 lu->login_response_dma);
937 if (lu->login_orb) 935 if (lu->login_orb)
938 pci_free_consistent(hi->host->pdev, 936 dma_free_coherent(&hi->host->device,
939 sizeof(struct sbp2_login_orb), 937 sizeof(struct sbp2_login_orb),
940 lu->login_orb, 938 lu->login_orb,
941 lu->login_orb_dma); 939 lu->login_orb_dma);
942 if (lu->reconnect_orb) 940 if (lu->reconnect_orb)
943 pci_free_consistent(hi->host->pdev, 941 dma_free_coherent(&hi->host->device,
944 sizeof(struct sbp2_reconnect_orb), 942 sizeof(struct sbp2_reconnect_orb),
945 lu->reconnect_orb, 943 lu->reconnect_orb,
946 lu->reconnect_orb_dma); 944 lu->reconnect_orb_dma);
947 if (lu->logout_orb) 945 if (lu->logout_orb)
948 pci_free_consistent(hi->host->pdev, 946 dma_free_coherent(&hi->host->device,
949 sizeof(struct sbp2_logout_orb), 947 sizeof(struct sbp2_logout_orb),
950 lu->logout_orb, 948 lu->logout_orb,
951 lu->logout_orb_dma); 949 lu->logout_orb_dma);
952 if (lu->query_logins_orb) 950 if (lu->query_logins_orb)
953 pci_free_consistent(hi->host->pdev, 951 dma_free_coherent(&hi->host->device,
954 sizeof(struct sbp2_query_logins_orb), 952 sizeof(struct sbp2_query_logins_orb),
955 lu->query_logins_orb, 953 lu->query_logins_orb,
956 lu->query_logins_orb_dma); 954 lu->query_logins_orb_dma);
957 if (lu->query_logins_response) 955 if (lu->query_logins_response)
958 pci_free_consistent(hi->host->pdev, 956 dma_free_coherent(&hi->host->device,
959 sizeof(struct sbp2_query_logins_response), 957 sizeof(struct sbp2_query_logins_response),
960 lu->query_logins_response, 958 lu->query_logins_response,
961 lu->query_logins_response_dma); 959 lu->query_logins_response_dma);
@@ -1446,7 +1444,7 @@ static void sbp2_prep_command_orb_sg(struct sbp2_command_orb *orb,
1446 1444
1447 cmd->dma_size = sgpnt[0].length; 1445 cmd->dma_size = sgpnt[0].length;
1448 cmd->dma_type = CMD_DMA_PAGE; 1446 cmd->dma_type = CMD_DMA_PAGE;
1449 cmd->cmd_dma = pci_map_page(hi->host->pdev, 1447 cmd->cmd_dma = dma_map_page(&hi->host->device,
1450 sgpnt[0].page, sgpnt[0].offset, 1448 sgpnt[0].page, sgpnt[0].offset,
1451 cmd->dma_size, cmd->dma_dir); 1449 cmd->dma_size, cmd->dma_dir);
1452 1450
@@ -1458,7 +1456,7 @@ static void sbp2_prep_command_orb_sg(struct sbp2_command_orb *orb,
1458 &cmd->scatter_gather_element[0]; 1456 &cmd->scatter_gather_element[0];
1459 u32 sg_count, sg_len; 1457 u32 sg_count, sg_len;
1460 dma_addr_t sg_addr; 1458 dma_addr_t sg_addr;
1461 int i, count = pci_map_sg(hi->host->pdev, sgpnt, scsi_use_sg, 1459 int i, count = dma_map_sg(&hi->host->device, sgpnt, scsi_use_sg,
1462 dma_dir); 1460 dma_dir);
1463 1461
1464 cmd->dma_size = scsi_use_sg; 1462 cmd->dma_size = scsi_use_sg;
@@ -1509,7 +1507,7 @@ static void sbp2_prep_command_orb_no_sg(struct sbp2_command_orb *orb,
1509 cmd->dma_dir = dma_dir; 1507 cmd->dma_dir = dma_dir;
1510 cmd->dma_size = scsi_request_bufflen; 1508 cmd->dma_size = scsi_request_bufflen;
1511 cmd->dma_type = CMD_DMA_SINGLE; 1509 cmd->dma_type = CMD_DMA_SINGLE;
1512 cmd->cmd_dma = pci_map_single(hi->host->pdev, scsi_request_buffer, 1510 cmd->cmd_dma = dma_map_single(&hi->host->device, scsi_request_buffer,
1513 cmd->dma_size, cmd->dma_dir); 1511 cmd->dma_size, cmd->dma_dir);
1514 orb->data_descriptor_hi = ORB_SET_NODE_ID(hi->host->node_id); 1512 orb->data_descriptor_hi = ORB_SET_NODE_ID(hi->host->node_id);
1515 orb->misc |= ORB_SET_DIRECTION(orb_direction); 1513 orb->misc |= ORB_SET_DIRECTION(orb_direction);
@@ -1627,12 +1625,12 @@ static void sbp2_link_orb_command(struct sbp2_lu *lu,
1627 size_t length; 1625 size_t length;
1628 unsigned long flags; 1626 unsigned long flags;
1629 1627
1630 pci_dma_sync_single_for_device(hi->host->pdev, cmd->command_orb_dma, 1628 dma_sync_single_for_device(&hi->host->device, cmd->command_orb_dma,
1631 sizeof(struct sbp2_command_orb), 1629 sizeof(struct sbp2_command_orb),
1632 PCI_DMA_TODEVICE); 1630 DMA_TO_DEVICE);
1633 pci_dma_sync_single_for_device(hi->host->pdev, cmd->sge_dma, 1631 dma_sync_single_for_device(&hi->host->device, cmd->sge_dma,
1634 sizeof(cmd->scatter_gather_element), 1632 sizeof(cmd->scatter_gather_element),
1635 PCI_DMA_BIDIRECTIONAL); 1633 DMA_BIDIRECTIONAL);
1636 1634
1637 /* check to see if there are any previous orbs to use */ 1635 /* check to see if there are any previous orbs to use */
1638 spin_lock_irqsave(&lu->cmd_orb_lock, flags); 1636 spin_lock_irqsave(&lu->cmd_orb_lock, flags);
@@ -1656,16 +1654,16 @@ static void sbp2_link_orb_command(struct sbp2_lu *lu,
1656 * The target's fetch agent may or may not have read this 1654 * The target's fetch agent may or may not have read this
1657 * previous ORB yet. 1655 * previous ORB yet.
1658 */ 1656 */
1659 pci_dma_sync_single_for_cpu(hi->host->pdev, last_orb_dma, 1657 dma_sync_single_for_cpu(&hi->host->device, last_orb_dma,
1660 sizeof(struct sbp2_command_orb), 1658 sizeof(struct sbp2_command_orb),
1661 PCI_DMA_TODEVICE); 1659 DMA_TO_DEVICE);
1662 last_orb->next_ORB_lo = cpu_to_be32(cmd->command_orb_dma); 1660 last_orb->next_ORB_lo = cpu_to_be32(cmd->command_orb_dma);
1663 wmb(); 1661 wmb();
1664 /* Tells hardware that this pointer is valid */ 1662 /* Tells hardware that this pointer is valid */
1665 last_orb->next_ORB_hi = 0; 1663 last_orb->next_ORB_hi = 0;
1666 pci_dma_sync_single_for_device(hi->host->pdev, last_orb_dma, 1664 dma_sync_single_for_device(&hi->host->device, last_orb_dma,
1667 sizeof(struct sbp2_command_orb), 1665 sizeof(struct sbp2_command_orb),
1668 PCI_DMA_TODEVICE); 1666 DMA_TO_DEVICE);
1669 addr += SBP2_DOORBELL_OFFSET; 1667 addr += SBP2_DOORBELL_OFFSET;
1670 data[0] = 0; 1668 data[0] = 0;
1671 length = 4; 1669 length = 4;
@@ -1792,14 +1790,12 @@ static int sbp2_handle_status_write(struct hpsb_host *host, int nodeid,
1792 else 1790 else
1793 cmd = sbp2util_find_command_for_orb(lu, sb->ORB_offset_lo); 1791 cmd = sbp2util_find_command_for_orb(lu, sb->ORB_offset_lo);
1794 if (cmd) { 1792 if (cmd) {
1795 pci_dma_sync_single_for_cpu(hi->host->pdev, 1793 dma_sync_single_for_cpu(&hi->host->device, cmd->command_orb_dma,
1796 cmd->command_orb_dma, 1794 sizeof(struct sbp2_command_orb),
1797 sizeof(struct sbp2_command_orb), 1795 DMA_TO_DEVICE);
1798 PCI_DMA_TODEVICE); 1796 dma_sync_single_for_cpu(&hi->host->device, cmd->sge_dma,
1799 pci_dma_sync_single_for_cpu(hi->host->pdev, 1797 sizeof(cmd->scatter_gather_element),
1800 cmd->sge_dma, 1798 DMA_BIDIRECTIONAL);
1801 sizeof(cmd->scatter_gather_element),
1802 PCI_DMA_BIDIRECTIONAL);
1803 /* Grab SCSI command pointers and check status. */ 1799 /* Grab SCSI command pointers and check status. */
1804 /* 1800 /*
1805 * FIXME: If the src field in the status is 1, the ORB DMA must 1801 * FIXME: If the src field in the status is 1, the ORB DMA must
@@ -1935,13 +1931,12 @@ static void sbp2scsi_complete_all_commands(struct sbp2_lu *lu, u32 status)
1935 while (!list_empty(&lu->cmd_orb_inuse)) { 1931 while (!list_empty(&lu->cmd_orb_inuse)) {
1936 lh = lu->cmd_orb_inuse.next; 1932 lh = lu->cmd_orb_inuse.next;
1937 cmd = list_entry(lh, struct sbp2_command_info, list); 1933 cmd = list_entry(lh, struct sbp2_command_info, list);
1938 pci_dma_sync_single_for_cpu(hi->host->pdev, 1934 dma_sync_single_for_cpu(&hi->host->device, cmd->command_orb_dma,
1939 cmd->command_orb_dma, 1935 sizeof(struct sbp2_command_orb),
1940 sizeof(struct sbp2_command_orb), 1936 DMA_TO_DEVICE);
1941 PCI_DMA_TODEVICE); 1937 dma_sync_single_for_cpu(&hi->host->device, cmd->sge_dma,
1942 pci_dma_sync_single_for_cpu(hi->host->pdev, cmd->sge_dma, 1938 sizeof(cmd->scatter_gather_element),
1943 sizeof(cmd->scatter_gather_element), 1939 DMA_BIDIRECTIONAL);
1944 PCI_DMA_BIDIRECTIONAL);
1945 sbp2util_mark_command_completed(lu, cmd); 1940 sbp2util_mark_command_completed(lu, cmd);
1946 if (cmd->Current_SCpnt) { 1941 if (cmd->Current_SCpnt) {
1947 cmd->Current_SCpnt->result = status << 16; 1942 cmd->Current_SCpnt->result = status << 16;
@@ -2064,14 +2059,13 @@ static int sbp2scsi_abort(struct scsi_cmnd *SCpnt)
2064 spin_lock_irqsave(&lu->cmd_orb_lock, flags); 2059 spin_lock_irqsave(&lu->cmd_orb_lock, flags);
2065 cmd = sbp2util_find_command_for_SCpnt(lu, SCpnt); 2060 cmd = sbp2util_find_command_for_SCpnt(lu, SCpnt);
2066 if (cmd) { 2061 if (cmd) {
2067 pci_dma_sync_single_for_cpu(hi->host->pdev, 2062 dma_sync_single_for_cpu(&hi->host->device,
2068 cmd->command_orb_dma, 2063 cmd->command_orb_dma,
2069 sizeof(struct sbp2_command_orb), 2064 sizeof(struct sbp2_command_orb),
2070 PCI_DMA_TODEVICE); 2065 DMA_TO_DEVICE);
2071 pci_dma_sync_single_for_cpu(hi->host->pdev, 2066 dma_sync_single_for_cpu(&hi->host->device, cmd->sge_dma,
2072 cmd->sge_dma,
2073 sizeof(cmd->scatter_gather_element), 2067 sizeof(cmd->scatter_gather_element),
2074 PCI_DMA_BIDIRECTIONAL); 2068 DMA_BIDIRECTIONAL);
2075 sbp2util_mark_command_completed(lu, cmd); 2069 sbp2util_mark_command_completed(lu, cmd);
2076 if (cmd->Current_SCpnt) { 2070 if (cmd->Current_SCpnt) {
2077 cmd->Current_SCpnt->result = DID_ABORT << 16; 2071 cmd->Current_SCpnt->result = DID_ABORT << 16;
diff --git a/drivers/ieee1394/sbp2.h b/drivers/ieee1394/sbp2.h
index ef24d630a577..9ae842329bf3 100644
--- a/drivers/ieee1394/sbp2.h
+++ b/drivers/ieee1394/sbp2.h
@@ -263,7 +263,7 @@ struct sbp2_command_info {
263 dma_addr_t cmd_dma; 263 dma_addr_t cmd_dma;
264 enum sbp2_dma_types dma_type; 264 enum sbp2_dma_types dma_type;
265 unsigned long dma_size; 265 unsigned long dma_size;
266 int dma_dir; 266 enum dma_data_direction dma_dir;
267}; 267};
268 268
269/* Per FireWire host */ 269/* Per FireWire host */