aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/ieee1394/sbp2.c98
-rw-r--r--drivers/ieee1394/sbp2.h33
2 files changed, 39 insertions, 92 deletions
diff --git a/drivers/ieee1394/sbp2.c b/drivers/ieee1394/sbp2.c
index 0037305f599e..c52f6e6e8af2 100644
--- a/drivers/ieee1394/sbp2.c
+++ b/drivers/ieee1394/sbp2.c
@@ -656,24 +656,11 @@ static struct sbp2_command_info *sbp2util_allocate_command_orb(
656static void sbp2util_mark_command_completed(struct sbp2_lu *lu, 656static void sbp2util_mark_command_completed(struct sbp2_lu *lu,
657 struct sbp2_command_info *cmd) 657 struct sbp2_command_info *cmd)
658{ 658{
659 struct hpsb_host *host = lu->ud->ne->host; 659 if (scsi_sg_count(cmd->Current_SCpnt))
660 660 dma_unmap_sg(lu->ud->ne->host->device.parent,
661 if (cmd->cmd_dma) { 661 scsi_sglist(cmd->Current_SCpnt),
662 if (cmd->dma_type == CMD_DMA_SINGLE) 662 scsi_sg_count(cmd->Current_SCpnt),
663 dma_unmap_single(host->device.parent, cmd->cmd_dma, 663 cmd->Current_SCpnt->sc_data_direction);
664 cmd->dma_size, cmd->dma_dir);
665 else if (cmd->dma_type == CMD_DMA_PAGE)
666 dma_unmap_page(host->device.parent, cmd->cmd_dma,
667 cmd->dma_size, cmd->dma_dir);
668 /* XXX: Check for CMD_DMA_NONE bug */
669 cmd->dma_type = CMD_DMA_NONE;
670 cmd->cmd_dma = 0;
671 }
672 if (cmd->sge_buffer) {
673 dma_unmap_sg(host->device.parent, cmd->sge_buffer,
674 cmd->dma_size, cmd->dma_dir);
675 cmd->sge_buffer = NULL;
676 }
677 list_move_tail(&cmd->list, &lu->cmd_orb_completed); 664 list_move_tail(&cmd->list, &lu->cmd_orb_completed);
678} 665}
679 666
@@ -838,6 +825,10 @@ static struct sbp2_lu *sbp2_alloc_device(struct unit_directory *ud)
838#endif 825#endif
839 } 826 }
840 827
828 if (dma_get_max_seg_size(hi->host->device.parent) > SBP2_MAX_SEG_SIZE)
829 BUG_ON(dma_set_max_seg_size(hi->host->device.parent,
830 SBP2_MAX_SEG_SIZE));
831
841 /* Prevent unloading of the 1394 host */ 832 /* Prevent unloading of the 1394 host */
842 if (!try_module_get(hi->host->driver->owner)) { 833 if (!try_module_get(hi->host->driver->owner)) {
843 SBP2_ERR("failed to get a reference on 1394 host driver"); 834 SBP2_ERR("failed to get a reference on 1394 host driver");
@@ -1512,76 +1503,41 @@ static int sbp2_agent_reset(struct sbp2_lu *lu, int wait)
1512static int sbp2_prep_command_orb_sg(struct sbp2_command_orb *orb, 1503static int sbp2_prep_command_orb_sg(struct sbp2_command_orb *orb,
1513 struct sbp2_fwhost_info *hi, 1504 struct sbp2_fwhost_info *hi,
1514 struct sbp2_command_info *cmd, 1505 struct sbp2_command_info *cmd,
1515 unsigned int scsi_use_sg, 1506 unsigned int sg_count,
1516 struct scatterlist *sg, 1507 struct scatterlist *sg,
1517 u32 orb_direction, 1508 u32 orb_direction,
1518 enum dma_data_direction dma_dir) 1509 enum dma_data_direction dma_dir)
1519{ 1510{
1520 struct device *dmadev = hi->host->device.parent; 1511 struct device *dmadev = hi->host->device.parent;
1512 struct sbp2_unrestricted_page_table *pt;
1513 int i, n;
1514
1515 n = dma_map_sg(dmadev, sg, sg_count, dma_dir);
1516 if (n == 0)
1517 return -ENOMEM;
1521 1518
1522 cmd->dma_dir = dma_dir;
1523 orb->data_descriptor_hi = ORB_SET_NODE_ID(hi->host->node_id); 1519 orb->data_descriptor_hi = ORB_SET_NODE_ID(hi->host->node_id);
1524 orb->misc |= ORB_SET_DIRECTION(orb_direction); 1520 orb->misc |= ORB_SET_DIRECTION(orb_direction);
1525 1521
1526 /* special case if only one element (and less than 64KB in size) */ 1522 /* special case if only one element (and less than 64KB in size) */
1527 if (scsi_use_sg == 1 && sg->length <= SBP2_MAX_SG_ELEMENT_LENGTH) { 1523 if (n == 1) {
1528 1524 orb->misc |= ORB_SET_DATA_SIZE(sg_dma_len(sg));
1529 cmd->dma_size = sg->length; 1525 orb->data_descriptor_lo = sg_dma_address(sg);
1530 cmd->dma_type = CMD_DMA_PAGE;
1531 cmd->cmd_dma = dma_map_page(dmadev, sg_page(sg), sg->offset,
1532 cmd->dma_size, cmd->dma_dir);
1533 if (dma_mapping_error(dmadev, cmd->cmd_dma)) {
1534 cmd->cmd_dma = 0;
1535 return -ENOMEM;
1536 }
1537
1538 orb->data_descriptor_lo = cmd->cmd_dma;
1539 orb->misc |= ORB_SET_DATA_SIZE(cmd->dma_size);
1540
1541 } else { 1526 } else {
1542 struct sbp2_unrestricted_page_table *sg_element = 1527 pt = &cmd->scatter_gather_element[0];
1543 &cmd->scatter_gather_element[0];
1544 u32 sg_count, sg_len;
1545 dma_addr_t sg_addr;
1546 int i, count = dma_map_sg(dmadev, sg, scsi_use_sg, dma_dir);
1547
1548 cmd->dma_size = scsi_use_sg;
1549 cmd->sge_buffer = sg;
1550
1551 /* use page tables (s/g) */
1552 orb->misc |= ORB_SET_PAGE_TABLE_PRESENT(0x1);
1553 orb->data_descriptor_lo = cmd->sge_dma;
1554 1528
1555 dma_sync_single_for_cpu(dmadev, cmd->sge_dma, 1529 dma_sync_single_for_cpu(dmadev, cmd->sge_dma,
1556 sizeof(cmd->scatter_gather_element), 1530 sizeof(cmd->scatter_gather_element),
1557 DMA_TO_DEVICE); 1531 DMA_TO_DEVICE);
1558 1532
1559 /* loop through and fill out our SBP-2 page tables 1533 for_each_sg(sg, sg, n, i) {
1560 * (and split up anything too large) */ 1534 pt[i].high = cpu_to_be32(sg_dma_len(sg) << 16);
1561 for (i = 0, sg_count = 0; i < count; i++, sg = sg_next(sg)) { 1535 pt[i].low = cpu_to_be32(sg_dma_address(sg));
1562 sg_len = sg_dma_len(sg);
1563 sg_addr = sg_dma_address(sg);
1564 while (sg_len) {
1565 sg_element[sg_count].segment_base_lo = sg_addr;
1566 if (sg_len > SBP2_MAX_SG_ELEMENT_LENGTH) {
1567 sg_element[sg_count].length_segment_base_hi =
1568 PAGE_TABLE_SET_SEGMENT_LENGTH(SBP2_MAX_SG_ELEMENT_LENGTH);
1569 sg_addr += SBP2_MAX_SG_ELEMENT_LENGTH;
1570 sg_len -= SBP2_MAX_SG_ELEMENT_LENGTH;
1571 } else {
1572 sg_element[sg_count].length_segment_base_hi =
1573 PAGE_TABLE_SET_SEGMENT_LENGTH(sg_len);
1574 sg_len = 0;
1575 }
1576 sg_count++;
1577 }
1578 } 1536 }
1579 1537
1580 orb->misc |= ORB_SET_DATA_SIZE(sg_count); 1538 orb->misc |= ORB_SET_PAGE_TABLE_PRESENT(0x1) |
1581 1539 ORB_SET_DATA_SIZE(n);
1582 sbp2util_cpu_to_be32_buffer(sg_element, 1540 orb->data_descriptor_lo = cmd->sge_dma;
1583 (sizeof(struct sbp2_unrestricted_page_table)) *
1584 sg_count);
1585 1541
1586 dma_sync_single_for_device(dmadev, cmd->sge_dma, 1542 dma_sync_single_for_device(dmadev, cmd->sge_dma,
1587 sizeof(cmd->scatter_gather_element), 1543 sizeof(cmd->scatter_gather_element),
@@ -2048,6 +2004,8 @@ static int sbp2scsi_slave_configure(struct scsi_device *sdev)
2048 sdev->start_stop_pwr_cond = 1; 2004 sdev->start_stop_pwr_cond = 1;
2049 if (lu->workarounds & SBP2_WORKAROUND_128K_MAX_TRANS) 2005 if (lu->workarounds & SBP2_WORKAROUND_128K_MAX_TRANS)
2050 blk_queue_max_sectors(sdev->request_queue, 128 * 1024 / 512); 2006 blk_queue_max_sectors(sdev->request_queue, 128 * 1024 / 512);
2007
2008 blk_queue_max_segment_size(sdev->request_queue, SBP2_MAX_SEG_SIZE);
2051 return 0; 2009 return 0;
2052} 2010}
2053 2011
diff --git a/drivers/ieee1394/sbp2.h b/drivers/ieee1394/sbp2.h
index 875428bc8d29..c5036f1cc5b0 100644
--- a/drivers/ieee1394/sbp2.h
+++ b/drivers/ieee1394/sbp2.h
@@ -139,13 +139,10 @@ struct sbp2_logout_orb {
139 u32 status_fifo_lo; 139 u32 status_fifo_lo;
140} __attribute__((packed)); 140} __attribute__((packed));
141 141
142#define PAGE_TABLE_SET_SEGMENT_BASE_HI(v) ((v) & 0xffff)
143#define PAGE_TABLE_SET_SEGMENT_LENGTH(v) (((v) & 0xffff) << 16)
144
145struct sbp2_unrestricted_page_table { 142struct sbp2_unrestricted_page_table {
146 u32 length_segment_base_hi; 143 __be32 high;
147 u32 segment_base_lo; 144 __be32 low;
148} __attribute__((packed)); 145};
149 146
150#define RESP_STATUS_REQUEST_COMPLETE 0x0 147#define RESP_STATUS_REQUEST_COMPLETE 0x0
151#define RESP_STATUS_TRANSPORT_FAILURE 0x1 148#define RESP_STATUS_TRANSPORT_FAILURE 0x1
@@ -216,15 +213,18 @@ struct sbp2_status_block {
216#define SBP2_UNIT_SPEC_ID_ENTRY 0x0000609e 213#define SBP2_UNIT_SPEC_ID_ENTRY 0x0000609e
217#define SBP2_SW_VERSION_ENTRY 0x00010483 214#define SBP2_SW_VERSION_ENTRY 0x00010483
218 215
219
220/* 216/*
221 * SCSI specific definitions 217 * The default maximum s/g segment size of a FireWire controller is
218 * usually 0x10000, but SBP-2 only allows 0xffff. Since buffers have to
219 * be quadlet-aligned, we set the length limit to 0xffff & ~3.
222 */ 220 */
221#define SBP2_MAX_SEG_SIZE 0xfffc
223 222
224#define SBP2_MAX_SG_ELEMENT_LENGTH 0xf000 223/*
225/* There is no real limitation of the queue depth (i.e. length of the linked 224 * There is no real limitation of the queue depth (i.e. length of the linked
226 * list of command ORBs) at the target. The chosen depth is merely an 225 * list of command ORBs) at the target. The chosen depth is merely an
227 * implementation detail of the sbp2 driver. */ 226 * implementation detail of the sbp2 driver.
227 */
228#define SBP2_MAX_CMDS 8 228#define SBP2_MAX_CMDS 8
229 229
230#define SBP2_SCSI_STATUS_GOOD 0x0 230#define SBP2_SCSI_STATUS_GOOD 0x0
@@ -240,12 +240,6 @@ struct sbp2_status_block {
240 * Representations of commands and devices 240 * Representations of commands and devices
241 */ 241 */
242 242
243enum sbp2_dma_types {
244 CMD_DMA_NONE,
245 CMD_DMA_PAGE,
246 CMD_DMA_SINGLE
247};
248
249/* Per SCSI command */ 243/* Per SCSI command */
250struct sbp2_command_info { 244struct sbp2_command_info {
251 struct list_head list; 245 struct list_head list;
@@ -258,11 +252,6 @@ struct sbp2_command_info {
258 struct sbp2_unrestricted_page_table 252 struct sbp2_unrestricted_page_table
259 scatter_gather_element[SG_ALL] __attribute__((aligned(8))); 253 scatter_gather_element[SG_ALL] __attribute__((aligned(8)));
260 dma_addr_t sge_dma; 254 dma_addr_t sge_dma;
261 void *sge_buffer;
262 dma_addr_t cmd_dma;
263 enum sbp2_dma_types dma_type;
264 unsigned long dma_size;
265 enum dma_data_direction dma_dir;
266}; 255};
267 256
268/* Per FireWire host */ 257/* Per FireWire host */