diff options
author | Ke Wei <kewei@marvell.com> | 2008-03-27 02:54:23 -0400 |
---|---|---|
committer | James Bottomley <James.Bottomley@HansenPartnership.com> | 2008-03-28 13:31:03 -0400 |
commit | 1fce5e5da03b18505179882be27cc697f24d6b58 (patch) | |
tree | 01eae54e97f6a8b6cfc6a0846db74ac8e6e49fb5 | |
parent | ee1f1c2ef95258351e1ecb89a2dbd2763cb3a6ed (diff) |
[SCSI] mvsas : interrupt handling
When a slot is busy, we will not free this slot until slot reset is
completed. When unplugged the disk, we should release all command
tasks with unplugged port that have been sent.
If MVS_USE_TASKLET is defined, we can enable tasklet. Default is off.
Signed-off-by: Ke Wei <kewei@marvell.com>
Signed-off-by: James Bottomley <James.Bottomley@HansenPartnership.com>
-rw-r--r-- | drivers/scsi/mvsas.c | 227 |
1 files changed, 170 insertions, 57 deletions
diff --git a/drivers/scsi/mvsas.c b/drivers/scsi/mvsas.c index 9ebf56510d21..f302970f6f2d 100644 --- a/drivers/scsi/mvsas.c +++ b/drivers/scsi/mvsas.c | |||
@@ -1218,10 +1218,63 @@ static void mvs_int_port(struct mvs_info *mvi, int phy_no, u32 events) | |||
1218 | 1218 | ||
1219 | static void mvs_int_sata(struct mvs_info *mvi) | 1219 | static void mvs_int_sata(struct mvs_info *mvi) |
1220 | { | 1220 | { |
1221 | /* FIXME */ | 1221 | u32 tmp; |
1222 | void __iomem *regs = mvi->regs; | ||
1223 | tmp = mr32(INT_STAT_SRS); | ||
1224 | mw32(INT_STAT_SRS, tmp & 0xFFFF); | ||
1225 | } | ||
1226 | |||
1227 | static void mvs_slot_reset(struct mvs_info *mvi, struct sas_task *task, | ||
1228 | u32 slot_idx) | ||
1229 | { | ||
1230 | void __iomem *regs = mvi->regs; | ||
1231 | struct domain_device *dev = task->dev; | ||
1232 | struct asd_sas_port *sas_port = dev->port; | ||
1233 | struct mvs_port *port = mvi->slot_info[slot_idx].port; | ||
1234 | u32 reg_set, phy_mask; | ||
1235 | |||
1236 | if (!sas_protocol_ata(task->task_proto)) { | ||
1237 | reg_set = 0; | ||
1238 | phy_mask = (port->wide_port_phymap) ? port->wide_port_phymap : | ||
1239 | sas_port->phy_mask; | ||
1240 | } else { | ||
1241 | reg_set = port->taskfileset; | ||
1242 | phy_mask = sas_port->phy_mask; | ||
1243 | } | ||
1244 | mvi->tx[mvi->tx_prod] = cpu_to_le32(TXQ_MODE_I | slot_idx | | ||
1245 | (TXQ_CMD_SLOT_RESET << TXQ_CMD_SHIFT) | | ||
1246 | (phy_mask << TXQ_PHY_SHIFT) | | ||
1247 | (reg_set << TXQ_SRS_SHIFT)); | ||
1248 | |||
1249 | mw32(TX_PROD_IDX, mvi->tx_prod); | ||
1250 | mvi->tx_prod = (mvi->tx_prod + 1) & (MVS_CHIP_SLOT_SZ - 1); | ||
1222 | } | 1251 | } |
1223 | 1252 | ||
1224 | static void mvs_slot_free(struct mvs_info *mvi, struct sas_task *task, | 1253 | static int mvs_sata_done(struct mvs_info *mvi, struct sas_task *task, |
1254 | u32 slot_idx, int err) | ||
1255 | { | ||
1256 | struct mvs_port *port = mvi->slot_info[slot_idx].port; | ||
1257 | struct task_status_struct *tstat = &task->task_status; | ||
1258 | struct ata_task_resp *resp = (struct ata_task_resp *)tstat->buf; | ||
1259 | int stat = SAM_GOOD; | ||
1260 | |||
1261 | resp->frame_len = sizeof(struct dev_to_host_fis); | ||
1262 | memcpy(&resp->ending_fis[0], | ||
1263 | SATA_RECEIVED_D2H_FIS(port->taskfileset), | ||
1264 | sizeof(struct dev_to_host_fis)); | ||
1265 | tstat->buf_valid_size = sizeof(*resp); | ||
1266 | if (unlikely(err)) | ||
1267 | stat = SAS_PROTO_RESPONSE; | ||
1268 | return stat; | ||
1269 | } | ||
1270 | |||
1271 | static void mvs_slot_free(struct mvs_info *mvi, u32 rx_desc) | ||
1272 | { | ||
1273 | u32 slot_idx = rx_desc & RXQ_SLOT_MASK; | ||
1274 | mvs_tag_clear(mvi, slot_idx); | ||
1275 | } | ||
1276 | |||
1277 | static void mvs_slot_task_free(struct mvs_info *mvi, struct sas_task *task, | ||
1225 | struct mvs_slot_info *slot, u32 slot_idx) | 1278 | struct mvs_slot_info *slot, u32 slot_idx) |
1226 | { | 1279 | { |
1227 | if (!sas_protocol_ata(task->task_proto)) | 1280 | if (!sas_protocol_ata(task->task_proto)) |
@@ -1244,38 +1297,58 @@ static void mvs_slot_free(struct mvs_info *mvi, struct sas_task *task, | |||
1244 | /* do nothing */ | 1297 | /* do nothing */ |
1245 | break; | 1298 | break; |
1246 | } | 1299 | } |
1247 | 1300 | list_del(&slot->list); | |
1301 | task->lldd_task = NULL; | ||
1248 | slot->task = NULL; | 1302 | slot->task = NULL; |
1249 | mvs_tag_clear(mvi, slot_idx); | 1303 | slot->port = NULL; |
1250 | } | 1304 | } |
1251 | 1305 | ||
1252 | static void mvs_slot_err(struct mvs_info *mvi, struct sas_task *task, | 1306 | static int mvs_slot_err(struct mvs_info *mvi, struct sas_task *task, |
1253 | u32 slot_idx) | 1307 | u32 slot_idx) |
1254 | { | 1308 | { |
1255 | struct mvs_slot_info *slot = &mvi->slot_info[slot_idx]; | 1309 | struct mvs_slot_info *slot = &mvi->slot_info[slot_idx]; |
1256 | u64 err_dw0 = *(u32 *) slot->response; | 1310 | u32 err_dw0 = le32_to_cpu(*(u32 *) (slot->response)); |
1257 | void __iomem *regs = mvi->regs; | 1311 | u32 err_dw1 = le32_to_cpu(*(u32 *) (slot->response + 4)); |
1258 | u32 tmp; | 1312 | int stat = SAM_CHECK_COND; |
1259 | 1313 | ||
1260 | if (err_dw0 & CMD_ISS_STPD) | 1314 | if (err_dw1 & SLOT_BSY_ERR) { |
1261 | if (sas_protocol_ata(task->task_proto)) { | 1315 | stat = SAS_QUEUE_FULL; |
1262 | tmp = mr32(INT_STAT_SRS); | 1316 | mvs_slot_reset(mvi, task, slot_idx); |
1263 | mw32(INT_STAT_SRS, tmp & 0xFFFF); | 1317 | } |
1264 | } | 1318 | switch (task->task_proto) { |
1319 | case SAS_PROTOCOL_SSP: | ||
1320 | break; | ||
1321 | case SAS_PROTOCOL_SMP: | ||
1322 | break; | ||
1323 | case SAS_PROTOCOL_SATA: | ||
1324 | case SAS_PROTOCOL_STP: | ||
1325 | case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP: | ||
1326 | if (err_dw0 & TFILE_ERR) | ||
1327 | stat = mvs_sata_done(mvi, task, slot_idx, 1); | ||
1328 | break; | ||
1329 | default: | ||
1330 | break; | ||
1331 | } | ||
1265 | 1332 | ||
1266 | mvs_hba_sb_dump(mvi, slot_idx, task->task_proto); | 1333 | mvs_hexdump(16, (u8 *) slot->response, 0); |
1334 | return stat; | ||
1267 | } | 1335 | } |
1268 | 1336 | ||
1269 | static int mvs_slot_complete(struct mvs_info *mvi, u32 rx_desc) | 1337 | static int mvs_slot_complete(struct mvs_info *mvi, u32 rx_desc, u32 flags) |
1270 | { | 1338 | { |
1271 | u32 slot_idx = rx_desc & RXQ_SLOT_MASK; | 1339 | u32 slot_idx = rx_desc & RXQ_SLOT_MASK; |
1272 | struct mvs_slot_info *slot = &mvi->slot_info[slot_idx]; | 1340 | struct mvs_slot_info *slot = &mvi->slot_info[slot_idx]; |
1273 | struct sas_task *task = slot->task; | 1341 | struct sas_task *task = slot->task; |
1274 | struct task_status_struct *tstat = &task->task_status; | 1342 | struct task_status_struct *tstat; |
1275 | struct mvs_port *port = &mvi->port[task->dev->port->id]; | 1343 | struct mvs_port *port; |
1276 | bool aborted; | 1344 | bool aborted; |
1277 | void *to; | 1345 | void *to; |
1278 | 1346 | ||
1347 | if (unlikely(!task || !task->lldd_task)) | ||
1348 | return -1; | ||
1349 | |||
1350 | mvs_hba_cq_dump(mvi); | ||
1351 | |||
1279 | spin_lock(&task->task_state_lock); | 1352 | spin_lock(&task->task_state_lock); |
1280 | aborted = task->task_state_flags & SAS_TASK_STATE_ABORTED; | 1353 | aborted = task->task_state_flags & SAS_TASK_STATE_ABORTED; |
1281 | if (!aborted) { | 1354 | if (!aborted) { |
@@ -1285,22 +1358,27 @@ static int mvs_slot_complete(struct mvs_info *mvi, u32 rx_desc) | |||
1285 | } | 1358 | } |
1286 | spin_unlock(&task->task_state_lock); | 1359 | spin_unlock(&task->task_state_lock); |
1287 | 1360 | ||
1288 | if (aborted) | 1361 | if (aborted) { |
1362 | mvs_slot_task_free(mvi, task, slot, slot_idx); | ||
1363 | mvs_slot_free(mvi, rx_desc); | ||
1289 | return -1; | 1364 | return -1; |
1365 | } | ||
1290 | 1366 | ||
1367 | port = slot->port; | ||
1368 | tstat = &task->task_status; | ||
1291 | memset(tstat, 0, sizeof(*tstat)); | 1369 | memset(tstat, 0, sizeof(*tstat)); |
1292 | tstat->resp = SAS_TASK_COMPLETE; | 1370 | tstat->resp = SAS_TASK_COMPLETE; |
1293 | 1371 | ||
1294 | 1372 | if (unlikely(!port->port_attached || flags)) { | |
1295 | if (unlikely(!port->port_attached)) { | 1373 | mvs_slot_err(mvi, task, slot_idx); |
1296 | tstat->stat = SAS_PHY_DOWN; | 1374 | if (!sas_protocol_ata(task->task_proto)) |
1375 | tstat->stat = SAS_PHY_DOWN; | ||
1297 | goto out; | 1376 | goto out; |
1298 | } | 1377 | } |
1299 | 1378 | ||
1300 | /* error info record present */ | 1379 | /* error info record present */ |
1301 | if ((rx_desc & RXQ_ERR) && (*(u64 *) slot->response)) { | 1380 | if (unlikely((rx_desc & RXQ_ERR) && (*(u64 *) slot->response))) { |
1302 | tstat->stat = SAM_CHECK_COND; | 1381 | tstat->stat = mvs_slot_err(mvi, task, slot_idx); |
1303 | mvs_slot_err(mvi, task, slot_idx); | ||
1304 | goto out; | 1382 | goto out; |
1305 | } | 1383 | } |
1306 | 1384 | ||
@@ -1337,21 +1415,7 @@ static int mvs_slot_complete(struct mvs_info *mvi, u32 rx_desc) | |||
1337 | case SAS_PROTOCOL_SATA: | 1415 | case SAS_PROTOCOL_SATA: |
1338 | case SAS_PROTOCOL_STP: | 1416 | case SAS_PROTOCOL_STP: |
1339 | case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP: { | 1417 | case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP: { |
1340 | struct ata_task_resp *resp = | 1418 | tstat->stat = mvs_sata_done(mvi, task, slot_idx, 0); |
1341 | (struct ata_task_resp *)tstat->buf; | ||
1342 | |||
1343 | if ((rx_desc & (RXQ_DONE | RXQ_ERR | RXQ_ATTN)) == | ||
1344 | RXQ_DONE) | ||
1345 | tstat->stat = SAM_GOOD; | ||
1346 | else | ||
1347 | tstat->stat = SAM_CHECK_COND; | ||
1348 | |||
1349 | resp->frame_len = sizeof(struct dev_to_host_fis); | ||
1350 | memcpy(&resp->ending_fis[0], | ||
1351 | SATA_RECEIVED_D2H_FIS(port->taskfileset), | ||
1352 | sizeof(struct dev_to_host_fis)); | ||
1353 | if (resp->ending_fis[2] & ATA_ERR) | ||
1354 | mvs_hexdump(16, resp->ending_fis, 0); | ||
1355 | break; | 1419 | break; |
1356 | } | 1420 | } |
1357 | 1421 | ||
@@ -1361,11 +1425,34 @@ static int mvs_slot_complete(struct mvs_info *mvi, u32 rx_desc) | |||
1361 | } | 1425 | } |
1362 | 1426 | ||
1363 | out: | 1427 | out: |
1364 | mvs_slot_free(mvi, task, slot, slot_idx); | 1428 | mvs_slot_task_free(mvi, task, slot, slot_idx); |
1429 | if (unlikely(tstat->stat != SAS_QUEUE_FULL)) | ||
1430 | mvs_slot_free(mvi, rx_desc); | ||
1431 | |||
1432 | spin_unlock(&mvi->lock); | ||
1365 | task->task_done(task); | 1433 | task->task_done(task); |
1434 | spin_lock(&mvi->lock); | ||
1366 | return tstat->stat; | 1435 | return tstat->stat; |
1367 | } | 1436 | } |
1368 | 1437 | ||
1438 | static void mvs_release_task(struct mvs_info *mvi, int phy_no) | ||
1439 | { | ||
1440 | struct list_head *pos, *n; | ||
1441 | struct mvs_slot_info *slot; | ||
1442 | struct mvs_phy *phy = &mvi->phy[phy_no]; | ||
1443 | struct mvs_port *port = phy->port; | ||
1444 | u32 rx_desc; | ||
1445 | |||
1446 | if (!port) | ||
1447 | return; | ||
1448 | |||
1449 | list_for_each_safe(pos, n, &port->list) { | ||
1450 | slot = container_of(pos, struct mvs_slot_info, list); | ||
1451 | rx_desc = (u32) (slot - mvi->slot_info); | ||
1452 | mvs_slot_complete(mvi, rx_desc, 1); | ||
1453 | } | ||
1454 | } | ||
1455 | |||
1369 | static void mvs_int_full(struct mvs_info *mvi) | 1456 | static void mvs_int_full(struct mvs_info *mvi) |
1370 | { | 1457 | { |
1371 | void __iomem *regs = mvi->regs; | 1458 | void __iomem *regs = mvi->regs; |
@@ -1400,40 +1487,43 @@ static int mvs_int_rx(struct mvs_info *mvi, bool self_clear) | |||
1400 | * we don't have to stall the CPU reading that register. | 1487 | * we don't have to stall the CPU reading that register. |
1401 | * The actual RX ring is offset by one dword, due to this. | 1488 | * The actual RX ring is offset by one dword, due to this. |
1402 | */ | 1489 | */ |
1403 | rx_prod_idx = mr32(RX_CONS_IDX) & RX_RING_SZ_MASK; | 1490 | rx_prod_idx = mvi->rx_cons; |
1404 | if (rx_prod_idx == 0xfff) { /* h/w hasn't touched RX ring yet */ | 1491 | mvi->rx_cons = le32_to_cpu(mvi->rx[0]); |
1405 | mvi->rx_cons = 0xfff; | 1492 | if (mvi->rx_cons == 0xfff) /* h/w hasn't touched RX ring yet */ |
1406 | return 0; | 1493 | return 0; |
1407 | } | ||
1408 | 1494 | ||
1409 | /* The CMPL_Q may come late, read from register and try again | 1495 | /* The CMPL_Q may come late, read from register and try again |
1410 | * note: if coalescing is enabled, | 1496 | * note: if coalescing is enabled, |
1411 | * it will need to read from register every time for sure | 1497 | * it will need to read from register every time for sure |
1412 | */ | 1498 | */ |
1413 | if (mvi->rx_cons == rx_prod_idx) | 1499 | if (mvi->rx_cons == rx_prod_idx) |
1414 | return 0; | 1500 | mvi->rx_cons = mr32(RX_CONS_IDX) & RX_RING_SZ_MASK; |
1415 | 1501 | ||
1416 | if (mvi->rx_cons == 0xfff) | 1502 | if (mvi->rx_cons == rx_prod_idx) |
1417 | mvi->rx_cons = MVS_RX_RING_SZ - 1; | 1503 | return 0; |
1418 | 1504 | ||
1419 | while (mvi->rx_cons != rx_prod_idx) { | 1505 | while (mvi->rx_cons != rx_prod_idx) { |
1420 | 1506 | ||
1421 | /* increment our internal RX consumer pointer */ | 1507 | /* increment our internal RX consumer pointer */ |
1422 | mvi->rx_cons = (mvi->rx_cons + 1) & (MVS_RX_RING_SZ - 1); | 1508 | rx_prod_idx = (rx_prod_idx + 1) & (MVS_RX_RING_SZ - 1); |
1423 | |||
1424 | rx_desc = le32_to_cpu(mvi->rx[mvi->rx_cons + 1]); | ||
1425 | 1509 | ||
1426 | mvs_hba_cq_dump(mvi); | 1510 | rx_desc = le32_to_cpu(mvi->rx[rx_prod_idx + 1]); |
1427 | 1511 | ||
1428 | if (likely(rx_desc & RXQ_DONE)) | 1512 | if (likely(rx_desc & RXQ_DONE)) |
1429 | mvs_slot_complete(mvi, rx_desc); | 1513 | mvs_slot_complete(mvi, rx_desc, 0); |
1430 | if (rx_desc & RXQ_ATTN) { | 1514 | if (rx_desc & RXQ_ATTN) { |
1431 | attn = true; | 1515 | attn = true; |
1432 | dev_printk(KERN_DEBUG, &pdev->dev, "ATTN %X\n", | 1516 | dev_printk(KERN_DEBUG, &pdev->dev, "ATTN %X\n", |
1433 | rx_desc); | 1517 | rx_desc); |
1434 | } else if (rx_desc & RXQ_ERR) { | 1518 | } else if (rx_desc & RXQ_ERR) { |
1519 | if (!(rx_desc & RXQ_DONE)) | ||
1520 | mvs_slot_complete(mvi, rx_desc, 0); | ||
1435 | dev_printk(KERN_DEBUG, &pdev->dev, "RXQ_ERR %X\n", | 1521 | dev_printk(KERN_DEBUG, &pdev->dev, "RXQ_ERR %X\n", |
1436 | rx_desc); | 1522 | rx_desc); |
1523 | } else if (rx_desc & RXQ_SLOT_RESET) { | ||
1524 | dev_printk(KERN_DEBUG, &pdev->dev, "Slot reset[%X]\n", | ||
1525 | rx_desc); | ||
1526 | mvs_slot_free(mvi, rx_desc); | ||
1437 | } | 1527 | } |
1438 | } | 1528 | } |
1439 | 1529 | ||
@@ -1443,6 +1533,23 @@ static int mvs_int_rx(struct mvs_info *mvi, bool self_clear) | |||
1443 | return 0; | 1533 | return 0; |
1444 | } | 1534 | } |
1445 | 1535 | ||
1536 | #ifdef MVS_USE_TASKLET | ||
1537 | static void mvs_tasklet(unsigned long data) | ||
1538 | { | ||
1539 | struct mvs_info *mvi = (struct mvs_info *) data; | ||
1540 | unsigned long flags; | ||
1541 | |||
1542 | spin_lock_irqsave(&mvi->lock, flags); | ||
1543 | |||
1544 | #ifdef MVS_DISABLE_MSI | ||
1545 | mvs_int_full(mvi); | ||
1546 | #else | ||
1547 | mvs_int_rx(mvi, true); | ||
1548 | #endif | ||
1549 | spin_unlock_irqrestore(&mvi->lock, flags); | ||
1550 | } | ||
1551 | #endif | ||
1552 | |||
1446 | static irqreturn_t mvs_interrupt(int irq, void *opaque) | 1553 | static irqreturn_t mvs_interrupt(int irq, void *opaque) |
1447 | { | 1554 | { |
1448 | struct mvs_info *mvi = opaque; | 1555 | struct mvs_info *mvi = opaque; |
@@ -1451,18 +1558,21 @@ static irqreturn_t mvs_interrupt(int irq, void *opaque) | |||
1451 | 1558 | ||
1452 | stat = mr32(GBL_INT_STAT); | 1559 | stat = mr32(GBL_INT_STAT); |
1453 | 1560 | ||
1454 | /* clear CMD_CMPLT ASAP */ | ||
1455 | mw32_f(INT_STAT, CINT_DONE); | ||
1456 | |||
1457 | if (stat == 0 || stat == 0xffffffff) | 1561 | if (stat == 0 || stat == 0xffffffff) |
1458 | return IRQ_NONE; | 1562 | return IRQ_NONE; |
1459 | 1563 | ||
1564 | /* clear CMD_CMPLT ASAP */ | ||
1565 | mw32_f(INT_STAT, CINT_DONE); | ||
1566 | |||
1567 | #ifndef MVS_USE_TASKLET | ||
1460 | spin_lock(&mvi->lock); | 1568 | spin_lock(&mvi->lock); |
1461 | 1569 | ||
1462 | mvs_int_full(mvi); | 1570 | mvs_int_full(mvi); |
1463 | 1571 | ||
1464 | spin_unlock(&mvi->lock); | 1572 | spin_unlock(&mvi->lock); |
1465 | 1573 | #else | |
1574 | tasklet_schedule(&mvi->tasklet); | ||
1575 | #endif | ||
1466 | return IRQ_HANDLED; | 1576 | return IRQ_HANDLED; |
1467 | } | 1577 | } |
1468 | 1578 | ||
@@ -1471,12 +1581,15 @@ static irqreturn_t mvs_msi_interrupt(int irq, void *opaque) | |||
1471 | { | 1581 | { |
1472 | struct mvs_info *mvi = opaque; | 1582 | struct mvs_info *mvi = opaque; |
1473 | 1583 | ||
1584 | #ifndef MVS_USE_TASKLET | ||
1474 | spin_lock(&mvi->lock); | 1585 | spin_lock(&mvi->lock); |
1475 | 1586 | ||
1476 | mvs_int_rx(mvi, true); | 1587 | mvs_int_rx(mvi, true); |
1477 | 1588 | ||
1478 | spin_unlock(&mvi->lock); | 1589 | spin_unlock(&mvi->lock); |
1479 | 1590 | #else | |
1591 | tasklet_schedule(&mvi->tasklet); | ||
1592 | #endif | ||
1480 | return IRQ_HANDLED; | 1593 | return IRQ_HANDLED; |
1481 | } | 1594 | } |
1482 | #endif | 1595 | #endif |