diff options
Diffstat (limited to 'drivers/firewire/fw-sbp2.c')
-rw-r--r-- | drivers/firewire/fw-sbp2.c | 63 |
1 files changed, 27 insertions, 36 deletions
diff --git a/drivers/firewire/fw-sbp2.c b/drivers/firewire/fw-sbp2.c index 05997cee4f37..5d8411afcedb 100644 --- a/drivers/firewire/fw-sbp2.c +++ b/drivers/firewire/fw-sbp2.c | |||
@@ -29,6 +29,7 @@ | |||
29 | */ | 29 | */ |
30 | 30 | ||
31 | #include <linux/blkdev.h> | 31 | #include <linux/blkdev.h> |
32 | #include <linux/bug.h> | ||
32 | #include <linux/delay.h> | 33 | #include <linux/delay.h> |
33 | #include <linux/device.h> | 34 | #include <linux/device.h> |
34 | #include <linux/dma-mapping.h> | 35 | #include <linux/dma-mapping.h> |
@@ -181,10 +182,16 @@ struct sbp2_target { | |||
181 | #define SBP2_MAX_LOGIN_ORB_TIMEOUT 40000U /* Timeout in ms */ | 182 | #define SBP2_MAX_LOGIN_ORB_TIMEOUT 40000U /* Timeout in ms */ |
182 | #define SBP2_ORB_TIMEOUT 2000U /* Timeout in ms */ | 183 | #define SBP2_ORB_TIMEOUT 2000U /* Timeout in ms */ |
183 | #define SBP2_ORB_NULL 0x80000000 | 184 | #define SBP2_ORB_NULL 0x80000000 |
184 | #define SBP2_MAX_SG_ELEMENT_LENGTH 0xf000 | ||
185 | #define SBP2_RETRY_LIMIT 0xf /* 15 retries */ | 185 | #define SBP2_RETRY_LIMIT 0xf /* 15 retries */ |
186 | #define SBP2_CYCLE_LIMIT (0xc8 << 12) /* 200 125us cycles */ | 186 | #define SBP2_CYCLE_LIMIT (0xc8 << 12) /* 200 125us cycles */ |
187 | 187 | ||
188 | /* | ||
189 | * The default maximum s/g segment size of a FireWire controller is | ||
190 | * usually 0x10000, but SBP-2 only allows 0xffff. Since buffers have to | ||
191 | * be quadlet-aligned, we set the length limit to 0xffff & ~3. | ||
192 | */ | ||
193 | #define SBP2_MAX_SEG_SIZE 0xfffc | ||
194 | |||
188 | /* Unit directory keys */ | 195 | /* Unit directory keys */ |
189 | #define SBP2_CSR_UNIT_CHARACTERISTICS 0x3a | 196 | #define SBP2_CSR_UNIT_CHARACTERISTICS 0x3a |
190 | #define SBP2_CSR_FIRMWARE_REVISION 0x3c | 197 | #define SBP2_CSR_FIRMWARE_REVISION 0x3c |
@@ -1099,6 +1106,10 @@ static int sbp2_probe(struct device *dev) | |||
1099 | struct Scsi_Host *shost; | 1106 | struct Scsi_Host *shost; |
1100 | u32 model, firmware_revision; | 1107 | u32 model, firmware_revision; |
1101 | 1108 | ||
1109 | if (dma_get_max_seg_size(device->card->device) > SBP2_MAX_SEG_SIZE) | ||
1110 | BUG_ON(dma_set_max_seg_size(device->card->device, | ||
1111 | SBP2_MAX_SEG_SIZE)); | ||
1112 | |||
1102 | shost = scsi_host_alloc(&scsi_driver_template, sizeof(*tgt)); | 1113 | shost = scsi_host_alloc(&scsi_driver_template, sizeof(*tgt)); |
1103 | if (shost == NULL) | 1114 | if (shost == NULL) |
1104 | return -ENOMEM; | 1115 | return -ENOMEM; |
@@ -1347,14 +1358,12 @@ static int | |||
1347 | sbp2_map_scatterlist(struct sbp2_command_orb *orb, struct fw_device *device, | 1358 | sbp2_map_scatterlist(struct sbp2_command_orb *orb, struct fw_device *device, |
1348 | struct sbp2_logical_unit *lu) | 1359 | struct sbp2_logical_unit *lu) |
1349 | { | 1360 | { |
1350 | struct scatterlist *sg; | 1361 | struct scatterlist *sg = scsi_sglist(orb->cmd); |
1351 | int sg_len, l, i, j, count; | 1362 | int i, n; |
1352 | dma_addr_t sg_addr; | 1363 | |
1353 | 1364 | n = dma_map_sg(device->card->device, sg, scsi_sg_count(orb->cmd), | |
1354 | sg = scsi_sglist(orb->cmd); | 1365 | orb->cmd->sc_data_direction); |
1355 | count = dma_map_sg(device->card->device, sg, scsi_sg_count(orb->cmd), | 1366 | if (n == 0) |
1356 | orb->cmd->sc_data_direction); | ||
1357 | if (count == 0) | ||
1358 | goto fail; | 1367 | goto fail; |
1359 | 1368 | ||
1360 | /* | 1369 | /* |
@@ -1364,7 +1373,7 @@ sbp2_map_scatterlist(struct sbp2_command_orb *orb, struct fw_device *device, | |||
1364 | * as the second generation iPod which doesn't support page | 1373 | * as the second generation iPod which doesn't support page |
1365 | * tables. | 1374 | * tables. |
1366 | */ | 1375 | */ |
1367 | if (count == 1 && sg_dma_len(sg) < SBP2_MAX_SG_ELEMENT_LENGTH) { | 1376 | if (n == 1) { |
1368 | orb->request.data_descriptor.high = | 1377 | orb->request.data_descriptor.high = |
1369 | cpu_to_be32(lu->tgt->address_high); | 1378 | cpu_to_be32(lu->tgt->address_high); |
1370 | orb->request.data_descriptor.low = | 1379 | orb->request.data_descriptor.low = |
@@ -1374,29 +1383,9 @@ sbp2_map_scatterlist(struct sbp2_command_orb *orb, struct fw_device *device, | |||
1374 | return 0; | 1383 | return 0; |
1375 | } | 1384 | } |
1376 | 1385 | ||
1377 | /* | 1386 | for_each_sg(sg, sg, n, i) { |
1378 | * Convert the scatterlist to an sbp2 page table. If any | 1387 | orb->page_table[i].high = cpu_to_be32(sg_dma_len(sg) << 16); |
1379 | * scatterlist entries are too big for sbp2, we split them as we | 1388 | orb->page_table[i].low = cpu_to_be32(sg_dma_address(sg)); |
1380 | * go. Even if we ask the block I/O layer to not give us sg | ||
1381 | * elements larger than 65535 bytes, some IOMMUs may merge sg elements | ||
1382 | * during DMA mapping, and Linux currently doesn't prevent this. | ||
1383 | */ | ||
1384 | for (i = 0, j = 0; i < count; i++, sg = sg_next(sg)) { | ||
1385 | sg_len = sg_dma_len(sg); | ||
1386 | sg_addr = sg_dma_address(sg); | ||
1387 | while (sg_len) { | ||
1388 | /* FIXME: This won't get us out of the pinch. */ | ||
1389 | if (unlikely(j >= ARRAY_SIZE(orb->page_table))) { | ||
1390 | fw_error("page table overflow\n"); | ||
1391 | goto fail_page_table; | ||
1392 | } | ||
1393 | l = min(sg_len, SBP2_MAX_SG_ELEMENT_LENGTH); | ||
1394 | orb->page_table[j].low = cpu_to_be32(sg_addr); | ||
1395 | orb->page_table[j].high = cpu_to_be32(l << 16); | ||
1396 | sg_addr += l; | ||
1397 | sg_len -= l; | ||
1398 | j++; | ||
1399 | } | ||
1400 | } | 1389 | } |
1401 | 1390 | ||
1402 | orb->page_table_bus = | 1391 | orb->page_table_bus = |
@@ -1415,13 +1404,13 @@ sbp2_map_scatterlist(struct sbp2_command_orb *orb, struct fw_device *device, | |||
1415 | orb->request.data_descriptor.high = cpu_to_be32(lu->tgt->address_high); | 1404 | orb->request.data_descriptor.high = cpu_to_be32(lu->tgt->address_high); |
1416 | orb->request.data_descriptor.low = cpu_to_be32(orb->page_table_bus); | 1405 | orb->request.data_descriptor.low = cpu_to_be32(orb->page_table_bus); |
1417 | orb->request.misc |= cpu_to_be32(COMMAND_ORB_PAGE_TABLE_PRESENT | | 1406 | orb->request.misc |= cpu_to_be32(COMMAND_ORB_PAGE_TABLE_PRESENT | |
1418 | COMMAND_ORB_DATA_SIZE(j)); | 1407 | COMMAND_ORB_DATA_SIZE(n)); |
1419 | 1408 | ||
1420 | return 0; | 1409 | return 0; |
1421 | 1410 | ||
1422 | fail_page_table: | 1411 | fail_page_table: |
1423 | dma_unmap_sg(device->card->device, sg, scsi_sg_count(orb->cmd), | 1412 | dma_unmap_sg(device->card->device, scsi_sglist(orb->cmd), |
1424 | orb->cmd->sc_data_direction); | 1413 | scsi_sg_count(orb->cmd), orb->cmd->sc_data_direction); |
1425 | fail: | 1414 | fail: |
1426 | return -ENOMEM; | 1415 | return -ENOMEM; |
1427 | } | 1416 | } |
@@ -1542,6 +1531,8 @@ static int sbp2_scsi_slave_configure(struct scsi_device *sdev) | |||
1542 | if (lu->tgt->workarounds & SBP2_WORKAROUND_128K_MAX_TRANS) | 1531 | if (lu->tgt->workarounds & SBP2_WORKAROUND_128K_MAX_TRANS) |
1543 | blk_queue_max_sectors(sdev->request_queue, 128 * 1024 / 512); | 1532 | blk_queue_max_sectors(sdev->request_queue, 128 * 1024 / 512); |
1544 | 1533 | ||
1534 | blk_queue_max_segment_size(sdev->request_queue, SBP2_MAX_SEG_SIZE); | ||
1535 | |||
1545 | return 0; | 1536 | return 0; |
1546 | } | 1537 | } |
1547 | 1538 | ||