aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/scsi/ipr.c
diff options
context:
space:
mode:
authorDavid Howells <dhowells@redhat.com>2006-12-06 10:01:18 -0500
committerDavid Howells <dhowells@warthog.cambridge.redhat.com>2006-12-06 10:01:18 -0500
commit4796b71fbb907ce6b8a9acf1852d3646a80b4576 (patch)
tree6263f165446c581efdbb760205c1f85378fe6259 /drivers/scsi/ipr.c
parent6d5aefb8eaa38e44b5b8cf60c812aceafc02d924 (diff)
parentec0bf39a471bf6fcd01def2bd677128cea940b73 (diff)
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6
Conflicts: drivers/pcmcia/ds.c Fix up merge failures with Linus's head and fix new compile failures. Signed-Off-By: David Howells <dhowells@redhat.com>
Diffstat (limited to 'drivers/scsi/ipr.c')
-rw-r--r--drivers/scsi/ipr.c313
1 files changed, 261 insertions, 52 deletions
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index d51c3e764bb0..ccd4dafce8e2 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -79,7 +79,6 @@
79#include <scsi/scsi_tcq.h> 79#include <scsi/scsi_tcq.h>
80#include <scsi/scsi_eh.h> 80#include <scsi/scsi_eh.h>
81#include <scsi/scsi_cmnd.h> 81#include <scsi/scsi_cmnd.h>
82#include <scsi/scsi_transport.h>
83#include "ipr.h" 82#include "ipr.h"
84 83
85/* 84/*
@@ -98,7 +97,7 @@ static DEFINE_SPINLOCK(ipr_driver_lock);
98 97
99/* This table describes the differences between DMA controller chips */ 98/* This table describes the differences between DMA controller chips */
100static const struct ipr_chip_cfg_t ipr_chip_cfg[] = { 99static const struct ipr_chip_cfg_t ipr_chip_cfg[] = {
101 { /* Gemstone, Citrine, and Obsidian */ 100 { /* Gemstone, Citrine, Obsidian, and Obsidian-E */
102 .mailbox = 0x0042C, 101 .mailbox = 0x0042C,
103 .cache_line_size = 0x20, 102 .cache_line_size = 0x20,
104 { 103 {
@@ -135,6 +134,7 @@ static const struct ipr_chip_t ipr_chip[] = {
135 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE, &ipr_chip_cfg[0] }, 134 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE, &ipr_chip_cfg[0] },
136 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN, &ipr_chip_cfg[0] }, 135 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN, &ipr_chip_cfg[0] },
137 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN, &ipr_chip_cfg[0] }, 136 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN, &ipr_chip_cfg[0] },
137 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E, &ipr_chip_cfg[0] },
138 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE, &ipr_chip_cfg[1] }, 138 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE, &ipr_chip_cfg[1] },
139 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP, &ipr_chip_cfg[1] } 139 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP, &ipr_chip_cfg[1] }
140}; 140};
@@ -1249,19 +1249,23 @@ static void ipr_log_array_error(struct ipr_ioa_cfg *ioa_cfg,
1249 1249
1250/** 1250/**
1251 * ipr_log_hex_data - Log additional hex IOA error data. 1251 * ipr_log_hex_data - Log additional hex IOA error data.
1252 * @ioa_cfg: ioa config struct
1252 * @data: IOA error data 1253 * @data: IOA error data
1253 * @len: data length 1254 * @len: data length
1254 * 1255 *
1255 * Return value: 1256 * Return value:
1256 * none 1257 * none
1257 **/ 1258 **/
1258static void ipr_log_hex_data(u32 *data, int len) 1259static void ipr_log_hex_data(struct ipr_ioa_cfg *ioa_cfg, u32 *data, int len)
1259{ 1260{
1260 int i; 1261 int i;
1261 1262
1262 if (len == 0) 1263 if (len == 0)
1263 return; 1264 return;
1264 1265
1266 if (ioa_cfg->log_level <= IPR_DEFAULT_LOG_LEVEL)
1267 len = min_t(int, len, IPR_DEFAULT_MAX_ERROR_DUMP);
1268
1265 for (i = 0; i < len / 4; i += 4) { 1269 for (i = 0; i < len / 4; i += 4) {
1266 ipr_err("%08X: %08X %08X %08X %08X\n", i*4, 1270 ipr_err("%08X: %08X %08X %08X %08X\n", i*4,
1267 be32_to_cpu(data[i]), 1271 be32_to_cpu(data[i]),
@@ -1290,7 +1294,7 @@ static void ipr_log_enhanced_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
1290 ipr_err("%s\n", error->failure_reason); 1294 ipr_err("%s\n", error->failure_reason);
1291 ipr_err("Remote Adapter VPD:\n"); 1295 ipr_err("Remote Adapter VPD:\n");
1292 ipr_log_ext_vpd(&error->vpd); 1296 ipr_log_ext_vpd(&error->vpd);
1293 ipr_log_hex_data(error->data, 1297 ipr_log_hex_data(ioa_cfg, error->data,
1294 be32_to_cpu(hostrcb->hcam.length) - 1298 be32_to_cpu(hostrcb->hcam.length) -
1295 (offsetof(struct ipr_hostrcb_error, u) + 1299 (offsetof(struct ipr_hostrcb_error, u) +
1296 offsetof(struct ipr_hostrcb_type_17_error, data))); 1300 offsetof(struct ipr_hostrcb_type_17_error, data)));
@@ -1315,12 +1319,225 @@ static void ipr_log_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
1315 ipr_err("%s\n", error->failure_reason); 1319 ipr_err("%s\n", error->failure_reason);
1316 ipr_err("Remote Adapter VPD:\n"); 1320 ipr_err("Remote Adapter VPD:\n");
1317 ipr_log_vpd(&error->vpd); 1321 ipr_log_vpd(&error->vpd);
1318 ipr_log_hex_data(error->data, 1322 ipr_log_hex_data(ioa_cfg, error->data,
1319 be32_to_cpu(hostrcb->hcam.length) - 1323 be32_to_cpu(hostrcb->hcam.length) -
1320 (offsetof(struct ipr_hostrcb_error, u) + 1324 (offsetof(struct ipr_hostrcb_error, u) +
1321 offsetof(struct ipr_hostrcb_type_07_error, data))); 1325 offsetof(struct ipr_hostrcb_type_07_error, data)));
1322} 1326}
1323 1327
1328static const struct {
1329 u8 active;
1330 char *desc;
1331} path_active_desc[] = {
1332 { IPR_PATH_NO_INFO, "Path" },
1333 { IPR_PATH_ACTIVE, "Active path" },
1334 { IPR_PATH_NOT_ACTIVE, "Inactive path" }
1335};
1336
1337static const struct {
1338 u8 state;
1339 char *desc;
1340} path_state_desc[] = {
1341 { IPR_PATH_STATE_NO_INFO, "has no path state information available" },
1342 { IPR_PATH_HEALTHY, "is healthy" },
1343 { IPR_PATH_DEGRADED, "is degraded" },
1344 { IPR_PATH_FAILED, "is failed" }
1345};
1346
1347/**
1348 * ipr_log_fabric_path - Log a fabric path error
1349 * @hostrcb: hostrcb struct
1350 * @fabric: fabric descriptor
1351 *
1352 * Return value:
1353 * none
1354 **/
1355static void ipr_log_fabric_path(struct ipr_hostrcb *hostrcb,
1356 struct ipr_hostrcb_fabric_desc *fabric)
1357{
1358 int i, j;
1359 u8 path_state = fabric->path_state;
1360 u8 active = path_state & IPR_PATH_ACTIVE_MASK;
1361 u8 state = path_state & IPR_PATH_STATE_MASK;
1362
1363 for (i = 0; i < ARRAY_SIZE(path_active_desc); i++) {
1364 if (path_active_desc[i].active != active)
1365 continue;
1366
1367 for (j = 0; j < ARRAY_SIZE(path_state_desc); j++) {
1368 if (path_state_desc[j].state != state)
1369 continue;
1370
1371 if (fabric->cascaded_expander == 0xff && fabric->phy == 0xff) {
1372 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d\n",
1373 path_active_desc[i].desc, path_state_desc[j].desc,
1374 fabric->ioa_port);
1375 } else if (fabric->cascaded_expander == 0xff) {
1376 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Phy=%d\n",
1377 path_active_desc[i].desc, path_state_desc[j].desc,
1378 fabric->ioa_port, fabric->phy);
1379 } else if (fabric->phy == 0xff) {
1380 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Cascade=%d\n",
1381 path_active_desc[i].desc, path_state_desc[j].desc,
1382 fabric->ioa_port, fabric->cascaded_expander);
1383 } else {
1384 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Cascade=%d, Phy=%d\n",
1385 path_active_desc[i].desc, path_state_desc[j].desc,
1386 fabric->ioa_port, fabric->cascaded_expander, fabric->phy);
1387 }
1388 return;
1389 }
1390 }
1391
1392 ipr_err("Path state=%02X IOA Port=%d Cascade=%d Phy=%d\n", path_state,
1393 fabric->ioa_port, fabric->cascaded_expander, fabric->phy);
1394}
1395
1396static const struct {
1397 u8 type;
1398 char *desc;
1399} path_type_desc[] = {
1400 { IPR_PATH_CFG_IOA_PORT, "IOA port" },
1401 { IPR_PATH_CFG_EXP_PORT, "Expander port" },
1402 { IPR_PATH_CFG_DEVICE_PORT, "Device port" },
1403 { IPR_PATH_CFG_DEVICE_LUN, "Device LUN" }
1404};
1405
1406static const struct {
1407 u8 status;
1408 char *desc;
1409} path_status_desc[] = {
1410 { IPR_PATH_CFG_NO_PROB, "Functional" },
1411 { IPR_PATH_CFG_DEGRADED, "Degraded" },
1412 { IPR_PATH_CFG_FAILED, "Failed" },
1413 { IPR_PATH_CFG_SUSPECT, "Suspect" },
1414 { IPR_PATH_NOT_DETECTED, "Missing" },
1415 { IPR_PATH_INCORRECT_CONN, "Incorrectly connected" }
1416};
1417
1418static const char *link_rate[] = {
1419 "unknown",
1420 "disabled",
1421 "phy reset problem",
1422 "spinup hold",
1423 "port selector",
1424 "unknown",
1425 "unknown",
1426 "unknown",
1427 "1.5Gbps",
1428 "3.0Gbps",
1429 "unknown",
1430 "unknown",
1431 "unknown",
1432 "unknown",
1433 "unknown",
1434 "unknown"
1435};
1436
1437/**
1438 * ipr_log_path_elem - Log a fabric path element.
1439 * @hostrcb: hostrcb struct
1440 * @cfg: fabric path element struct
1441 *
1442 * Return value:
1443 * none
1444 **/
1445static void ipr_log_path_elem(struct ipr_hostrcb *hostrcb,
1446 struct ipr_hostrcb_config_element *cfg)
1447{
1448 int i, j;
1449 u8 type = cfg->type_status & IPR_PATH_CFG_TYPE_MASK;
1450 u8 status = cfg->type_status & IPR_PATH_CFG_STATUS_MASK;
1451
1452 if (type == IPR_PATH_CFG_NOT_EXIST)
1453 return;
1454
1455 for (i = 0; i < ARRAY_SIZE(path_type_desc); i++) {
1456 if (path_type_desc[i].type != type)
1457 continue;
1458
1459 for (j = 0; j < ARRAY_SIZE(path_status_desc); j++) {
1460 if (path_status_desc[j].status != status)
1461 continue;
1462
1463 if (type == IPR_PATH_CFG_IOA_PORT) {
1464 ipr_hcam_err(hostrcb, "%s %s: Phy=%d, Link rate=%s, WWN=%08X%08X\n",
1465 path_status_desc[j].desc, path_type_desc[i].desc,
1466 cfg->phy, link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
1467 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
1468 } else {
1469 if (cfg->cascaded_expander == 0xff && cfg->phy == 0xff) {
1470 ipr_hcam_err(hostrcb, "%s %s: Link rate=%s, WWN=%08X%08X\n",
1471 path_status_desc[j].desc, path_type_desc[i].desc,
1472 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
1473 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
1474 } else if (cfg->cascaded_expander == 0xff) {
1475 ipr_hcam_err(hostrcb, "%s %s: Phy=%d, Link rate=%s, "
1476 "WWN=%08X%08X\n", path_status_desc[j].desc,
1477 path_type_desc[i].desc, cfg->phy,
1478 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
1479 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
1480 } else if (cfg->phy == 0xff) {
1481 ipr_hcam_err(hostrcb, "%s %s: Cascade=%d, Link rate=%s, "
1482 "WWN=%08X%08X\n", path_status_desc[j].desc,
1483 path_type_desc[i].desc, cfg->cascaded_expander,
1484 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
1485 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
1486 } else {
1487 ipr_hcam_err(hostrcb, "%s %s: Cascade=%d, Phy=%d, Link rate=%s "
1488 "WWN=%08X%08X\n", path_status_desc[j].desc,
1489 path_type_desc[i].desc, cfg->cascaded_expander, cfg->phy,
1490 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
1491 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
1492 }
1493 }
1494 return;
1495 }
1496 }
1497
1498 ipr_hcam_err(hostrcb, "Path element=%02X: Cascade=%d Phy=%d Link rate=%s "
1499 "WWN=%08X%08X\n", cfg->type_status, cfg->cascaded_expander, cfg->phy,
1500 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
1501 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
1502}
1503
1504/**
1505 * ipr_log_fabric_error - Log a fabric error.
1506 * @ioa_cfg: ioa config struct
1507 * @hostrcb: hostrcb struct
1508 *
1509 * Return value:
1510 * none
1511 **/
1512static void ipr_log_fabric_error(struct ipr_ioa_cfg *ioa_cfg,
1513 struct ipr_hostrcb *hostrcb)
1514{
1515 struct ipr_hostrcb_type_20_error *error;
1516 struct ipr_hostrcb_fabric_desc *fabric;
1517 struct ipr_hostrcb_config_element *cfg;
1518 int i, add_len;
1519
1520 error = &hostrcb->hcam.u.error.u.type_20_error;
1521 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
1522 ipr_hcam_err(hostrcb, "%s\n", error->failure_reason);
1523
1524 add_len = be32_to_cpu(hostrcb->hcam.length) -
1525 (offsetof(struct ipr_hostrcb_error, u) +
1526 offsetof(struct ipr_hostrcb_type_20_error, desc));
1527
1528 for (i = 0, fabric = error->desc; i < error->num_entries; i++) {
1529 ipr_log_fabric_path(hostrcb, fabric);
1530 for_each_fabric_cfg(fabric, cfg)
1531 ipr_log_path_elem(hostrcb, cfg);
1532
1533 add_len -= be16_to_cpu(fabric->length);
1534 fabric = (struct ipr_hostrcb_fabric_desc *)
1535 ((unsigned long)fabric + be16_to_cpu(fabric->length));
1536 }
1537
1538 ipr_log_hex_data(ioa_cfg, (u32 *)fabric, add_len);
1539}
1540
1324/** 1541/**
1325 * ipr_log_generic_error - Log an adapter error. 1542 * ipr_log_generic_error - Log an adapter error.
1326 * @ioa_cfg: ioa config struct 1543 * @ioa_cfg: ioa config struct
@@ -1332,7 +1549,7 @@ static void ipr_log_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
1332static void ipr_log_generic_error(struct ipr_ioa_cfg *ioa_cfg, 1549static void ipr_log_generic_error(struct ipr_ioa_cfg *ioa_cfg,
1333 struct ipr_hostrcb *hostrcb) 1550 struct ipr_hostrcb *hostrcb)
1334{ 1551{
1335 ipr_log_hex_data(hostrcb->hcam.u.raw.data, 1552 ipr_log_hex_data(ioa_cfg, hostrcb->hcam.u.raw.data,
1336 be32_to_cpu(hostrcb->hcam.length)); 1553 be32_to_cpu(hostrcb->hcam.length));
1337} 1554}
1338 1555
@@ -1394,13 +1611,7 @@ static void ipr_handle_log_data(struct ipr_ioa_cfg *ioa_cfg,
1394 if (!ipr_error_table[error_index].log_hcam) 1611 if (!ipr_error_table[error_index].log_hcam)
1395 return; 1612 return;
1396 1613
1397 if (ipr_is_device(&hostrcb->hcam.u.error.failing_dev_res_addr)) { 1614 ipr_hcam_err(hostrcb, "%s\n", ipr_error_table[error_index].error);
1398 ipr_ra_err(ioa_cfg, hostrcb->hcam.u.error.failing_dev_res_addr,
1399 "%s\n", ipr_error_table[error_index].error);
1400 } else {
1401 dev_err(&ioa_cfg->pdev->dev, "%s\n",
1402 ipr_error_table[error_index].error);
1403 }
1404 1615
1405 /* Set indication we have logged an error */ 1616 /* Set indication we have logged an error */
1406 ioa_cfg->errors_logged++; 1617 ioa_cfg->errors_logged++;
@@ -1437,6 +1648,9 @@ static void ipr_handle_log_data(struct ipr_ioa_cfg *ioa_cfg,
1437 case IPR_HOST_RCB_OVERLAY_ID_17: 1648 case IPR_HOST_RCB_OVERLAY_ID_17:
1438 ipr_log_enhanced_dual_ioa_error(ioa_cfg, hostrcb); 1649 ipr_log_enhanced_dual_ioa_error(ioa_cfg, hostrcb);
1439 break; 1650 break;
1651 case IPR_HOST_RCB_OVERLAY_ID_20:
1652 ipr_log_fabric_error(ioa_cfg, hostrcb);
1653 break;
1440 case IPR_HOST_RCB_OVERLAY_ID_1: 1654 case IPR_HOST_RCB_OVERLAY_ID_1:
1441 case IPR_HOST_RCB_OVERLAY_ID_DEFAULT: 1655 case IPR_HOST_RCB_OVERLAY_ID_DEFAULT:
1442 default: 1656 default:
@@ -2970,7 +3184,6 @@ static int ipr_alloc_dump(struct ipr_ioa_cfg *ioa_cfg)
2970 struct ipr_dump *dump; 3184 struct ipr_dump *dump;
2971 unsigned long lock_flags = 0; 3185 unsigned long lock_flags = 0;
2972 3186
2973 ENTER;
2974 dump = kzalloc(sizeof(struct ipr_dump), GFP_KERNEL); 3187 dump = kzalloc(sizeof(struct ipr_dump), GFP_KERNEL);
2975 3188
2976 if (!dump) { 3189 if (!dump) {
@@ -2997,7 +3210,6 @@ static int ipr_alloc_dump(struct ipr_ioa_cfg *ioa_cfg)
2997 } 3210 }
2998 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 3211 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2999 3212
3000 LEAVE;
3001 return 0; 3213 return 0;
3002} 3214}
3003 3215
@@ -3574,6 +3786,12 @@ static int ipr_sata_reset(struct ata_port *ap, unsigned int *classes)
3574 3786
3575 ENTER; 3787 ENTER;
3576 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 3788 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3789 while(ioa_cfg->in_reset_reload) {
3790 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3791 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3792 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3793 }
3794
3577 res = sata_port->res; 3795 res = sata_port->res;
3578 if (res) { 3796 if (res) {
3579 rc = ipr_device_reset(ioa_cfg, res); 3797 rc = ipr_device_reset(ioa_cfg, res);
@@ -3637,6 +3855,10 @@ static int __ipr_eh_dev_reset(struct scsi_cmnd * scsi_cmd)
3637 if (ipr_cmd->ioarcb.res_handle == res->cfgte.res_handle) { 3855 if (ipr_cmd->ioarcb.res_handle == res->cfgte.res_handle) {
3638 if (ipr_cmd->scsi_cmd) 3856 if (ipr_cmd->scsi_cmd)
3639 ipr_cmd->done = ipr_scsi_eh_done; 3857 ipr_cmd->done = ipr_scsi_eh_done;
3858 if (ipr_cmd->qc && !(ipr_cmd->qc->flags & ATA_QCFLAG_FAILED)) {
3859 ipr_cmd->qc->err_mask |= AC_ERR_TIMEOUT;
3860 ipr_cmd->qc->flags |= ATA_QCFLAG_FAILED;
3861 }
3640 } 3862 }
3641 } 3863 }
3642 3864
@@ -3771,7 +3993,7 @@ static int ipr_cancel_op(struct scsi_cmnd * scsi_cmd)
3771 */ 3993 */
3772 if (ioa_cfg->in_reset_reload || ioa_cfg->ioa_is_dead) 3994 if (ioa_cfg->in_reset_reload || ioa_cfg->ioa_is_dead)
3773 return FAILED; 3995 return FAILED;
3774 if (!res || (!ipr_is_gscsi(res) && !ipr_is_vset_device(res))) 3996 if (!res || !ipr_is_gscsi(res))
3775 return FAILED; 3997 return FAILED;
3776 3998
3777 list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) { 3999 list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) {
@@ -4616,7 +4838,7 @@ static int ipr_queuecommand(struct scsi_cmnd *scsi_cmd,
4616 * Return value: 4838 * Return value:
4617 * 0 on success / other on failure 4839 * 0 on success / other on failure
4618 **/ 4840 **/
4619int ipr_ioctl(struct scsi_device *sdev, int cmd, void __user *arg) 4841static int ipr_ioctl(struct scsi_device *sdev, int cmd, void __user *arg)
4620{ 4842{
4621 struct ipr_resource_entry *res; 4843 struct ipr_resource_entry *res;
4622 4844
@@ -4649,40 +4871,6 @@ static const char * ipr_ioa_info(struct Scsi_Host *host)
4649 return buffer; 4871 return buffer;
4650} 4872}
4651 4873
4652/**
4653 * ipr_scsi_timed_out - Handle scsi command timeout
4654 * @scsi_cmd: scsi command struct
4655 *
4656 * Return value:
4657 * EH_NOT_HANDLED
4658 **/
4659enum scsi_eh_timer_return ipr_scsi_timed_out(struct scsi_cmnd *scsi_cmd)
4660{
4661 struct ipr_ioa_cfg *ioa_cfg;
4662 struct ipr_cmnd *ipr_cmd;
4663 unsigned long flags;
4664
4665 ENTER;
4666 spin_lock_irqsave(scsi_cmd->device->host->host_lock, flags);
4667 ioa_cfg = (struct ipr_ioa_cfg *)scsi_cmd->device->host->hostdata;
4668
4669 list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) {
4670 if (ipr_cmd->qc && ipr_cmd->qc->scsicmd == scsi_cmd) {
4671 ipr_cmd->qc->err_mask |= AC_ERR_TIMEOUT;
4672 ipr_cmd->qc->flags |= ATA_QCFLAG_FAILED;
4673 break;
4674 }
4675 }
4676
4677 spin_unlock_irqrestore(scsi_cmd->device->host->host_lock, flags);
4678 LEAVE;
4679 return EH_NOT_HANDLED;
4680}
4681
4682static struct scsi_transport_template ipr_transport_template = {
4683 .eh_timed_out = ipr_scsi_timed_out
4684};
4685
4686static struct scsi_host_template driver_template = { 4874static struct scsi_host_template driver_template = {
4687 .module = THIS_MODULE, 4875 .module = THIS_MODULE,
4688 .name = "IPR", 4876 .name = "IPR",
@@ -4777,6 +4965,12 @@ static void ipr_ata_post_internal(struct ata_queued_cmd *qc)
4777 unsigned long flags; 4965 unsigned long flags;
4778 4966
4779 spin_lock_irqsave(ioa_cfg->host->host_lock, flags); 4967 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
4968 while(ioa_cfg->in_reset_reload) {
4969 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
4970 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
4971 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
4972 }
4973
4780 list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) { 4974 list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) {
4781 if (ipr_cmd->qc == qc) { 4975 if (ipr_cmd->qc == qc) {
4782 ipr_device_reset(ioa_cfg, sata_port->res); 4976 ipr_device_reset(ioa_cfg, sata_port->res);
@@ -6833,6 +7027,7 @@ static int __devinit ipr_alloc_mem(struct ipr_ioa_cfg *ioa_cfg)
6833 7027
6834 ioa_cfg->hostrcb[i]->hostrcb_dma = 7028 ioa_cfg->hostrcb[i]->hostrcb_dma =
6835 ioa_cfg->hostrcb_dma[i] + offsetof(struct ipr_hostrcb, hcam); 7029 ioa_cfg->hostrcb_dma[i] + offsetof(struct ipr_hostrcb, hcam);
7030 ioa_cfg->hostrcb[i]->ioa_cfg = ioa_cfg;
6836 list_add_tail(&ioa_cfg->hostrcb[i]->queue, &ioa_cfg->hostrcb_free_q); 7031 list_add_tail(&ioa_cfg->hostrcb[i]->queue, &ioa_cfg->hostrcb_free_q);
6837 } 7032 }
6838 7033
@@ -7018,7 +7213,6 @@ static int __devinit ipr_probe_ioa(struct pci_dev *pdev,
7018 7213
7019 ioa_cfg = (struct ipr_ioa_cfg *)host->hostdata; 7214 ioa_cfg = (struct ipr_ioa_cfg *)host->hostdata;
7020 memset(ioa_cfg, 0, sizeof(struct ipr_ioa_cfg)); 7215 memset(ioa_cfg, 0, sizeof(struct ipr_ioa_cfg));
7021 host->transportt = &ipr_transport_template;
7022 ata_host_init(&ioa_cfg->ata_host, &pdev->dev, 7216 ata_host_init(&ioa_cfg->ata_host, &pdev->dev,
7023 sata_port_info.flags, &ipr_sata_ops); 7217 sata_port_info.flags, &ipr_sata_ops);
7024 7218
@@ -7352,12 +7546,24 @@ static struct pci_device_id ipr_pci_table[] __devinitdata = {
7352 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN, 7546 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
7353 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B, 7547 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B,
7354 0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] }, 7548 0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
7549 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
7550 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575C,
7551 0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
7355 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN, 7552 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
7356 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A, 7553 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A,
7357 0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] }, 7554 0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
7358 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN, 7555 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
7359 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B, 7556 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B,
7360 0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] }, 7557 0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
7558 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
7559 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575C,
7560 0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
7561 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
7562 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B8,
7563 0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
7564 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
7565 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B7,
7566 0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
7361 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE, 7567 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE,
7362 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2780, 7568 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2780,
7363 0, 0, (kernel_ulong_t)&ipr_chip_cfg[1] }, 7569 0, 0, (kernel_ulong_t)&ipr_chip_cfg[1] },
@@ -7367,6 +7573,9 @@ static struct pci_device_id ipr_pci_table[] __devinitdata = {
7367 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP, 7573 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
7368 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571F, 7574 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571F,
7369 0, 0, (kernel_ulong_t)&ipr_chip_cfg[1] }, 7575 0, 0, (kernel_ulong_t)&ipr_chip_cfg[1] },
7576 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
7577 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572F,
7578 0, 0, (kernel_ulong_t)&ipr_chip_cfg[1] },
7370 { } 7579 { }
7371}; 7580};
7372MODULE_DEVICE_TABLE(pci, ipr_pci_table); 7581MODULE_DEVICE_TABLE(pci, ipr_pci_table);