aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/scsi/ipr.c
diff options
context:
space:
mode:
authorBrian King <brking@us.ibm.com>2006-09-25 13:39:20 -0400
committerJames Bottomley <jejb@mulgrave.il.steeleye.com>2006-09-26 12:23:18 -0400
commit35a39691e4daa3c371c265e4cb8356c67ded45ec (patch)
tree96d2acac2a4115570c70dc709a1e0ab7e46ec059 /drivers/scsi/ipr.c
parentd7694f8c0bb61829b1bd4d5543a51c66f04a6c3e (diff)
[SCSI] ipr: Support attaching SATA devices
Adds support to attach SATA devices to ipr SAS adapters. Signed-off-by: Brian King <brking@us.ibm.com> Signed-off-by: James Bottomley <James.Bottomley@SteelEye.com>
Diffstat (limited to 'drivers/scsi/ipr.c')
-rw-r--r--drivers/scsi/ipr.c683
1 files changed, 665 insertions, 18 deletions
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index 7ed4eef8347b..e1e3a0cb88e3 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -70,6 +70,7 @@
70#include <linux/firmware.h> 70#include <linux/firmware.h>
71#include <linux/module.h> 71#include <linux/module.h>
72#include <linux/moduleparam.h> 72#include <linux/moduleparam.h>
73#include <linux/libata.h>
73#include <asm/io.h> 74#include <asm/io.h>
74#include <asm/irq.h> 75#include <asm/irq.h>
75#include <asm/processor.h> 76#include <asm/processor.h>
@@ -78,6 +79,7 @@
78#include <scsi/scsi_tcq.h> 79#include <scsi/scsi_tcq.h>
79#include <scsi/scsi_eh.h> 80#include <scsi/scsi_eh.h>
80#include <scsi/scsi_cmnd.h> 81#include <scsi/scsi_cmnd.h>
82#include <scsi/scsi_transport.h>
81#include "ipr.h" 83#include "ipr.h"
82 84
83/* 85/*
@@ -199,6 +201,8 @@ struct ipr_error_table_t ipr_error_table[] = {
199 "FFFA: Undefined device response recovered by the IOA"}, 201 "FFFA: Undefined device response recovered by the IOA"},
200 {0x014A0000, 1, 1, 202 {0x014A0000, 1, 1,
201 "FFF6: Device bus error, message or command phase"}, 203 "FFF6: Device bus error, message or command phase"},
204 {0x014A8000, 0, 1,
205 "FFFE: Task Management Function failed"},
202 {0x015D0000, 0, 1, 206 {0x015D0000, 0, 1,
203 "FFF6: Failure prediction threshold exceeded"}, 207 "FFF6: Failure prediction threshold exceeded"},
204 {0x015D9200, 0, 1, 208 {0x015D9200, 0, 1,
@@ -261,6 +265,8 @@ struct ipr_error_table_t ipr_error_table[] = {
261 "Device bus status error"}, 265 "Device bus status error"},
262 {0x04448600, 0, 1, 266 {0x04448600, 0, 1,
263 "8157: IOA error requiring IOA reset to recover"}, 267 "8157: IOA error requiring IOA reset to recover"},
268 {0x04448700, 0, 0,
269 "ATA device status error"},
264 {0x04490000, 0, 0, 270 {0x04490000, 0, 0,
265 "Message reject received from the device"}, 271 "Message reject received from the device"},
266 {0x04449200, 0, 1, 272 {0x04449200, 0, 1,
@@ -273,6 +279,8 @@ struct ipr_error_table_t ipr_error_table[] = {
273 "9082: IOA detected device error"}, 279 "9082: IOA detected device error"},
274 {0x044A0000, 1, 1, 280 {0x044A0000, 1, 1,
275 "3110: Device bus error, message or command phase"}, 281 "3110: Device bus error, message or command phase"},
282 {0x044A8000, 1, 1,
283 "3110: SAS Command / Task Management Function failed"},
276 {0x04670400, 0, 1, 284 {0x04670400, 0, 1,
277 "9091: Incorrect hardware configuration change has been detected"}, 285 "9091: Incorrect hardware configuration change has been detected"},
278 {0x04678000, 0, 1, 286 {0x04678000, 0, 1,
@@ -453,7 +461,8 @@ static void ipr_trc_hook(struct ipr_cmnd *ipr_cmd,
453 trace_entry->time = jiffies; 461 trace_entry->time = jiffies;
454 trace_entry->op_code = ipr_cmd->ioarcb.cmd_pkt.cdb[0]; 462 trace_entry->op_code = ipr_cmd->ioarcb.cmd_pkt.cdb[0];
455 trace_entry->type = type; 463 trace_entry->type = type;
456 trace_entry->cmd_index = ipr_cmd->cmd_index; 464 trace_entry->ata_op_code = ipr_cmd->ioarcb.add_data.u.regs.command;
465 trace_entry->cmd_index = ipr_cmd->cmd_index & 0xff;
457 trace_entry->res_handle = ipr_cmd->ioarcb.res_handle; 466 trace_entry->res_handle = ipr_cmd->ioarcb.res_handle;
458 trace_entry->u.add_data = add_data; 467 trace_entry->u.add_data = add_data;
459} 468}
@@ -480,8 +489,10 @@ static void ipr_reinit_ipr_cmnd(struct ipr_cmnd *ipr_cmd)
480 ioarcb->read_ioadl_len = 0; 489 ioarcb->read_ioadl_len = 0;
481 ioasa->ioasc = 0; 490 ioasa->ioasc = 0;
482 ioasa->residual_data_len = 0; 491 ioasa->residual_data_len = 0;
492 ioasa->u.gata.status = 0;
483 493
484 ipr_cmd->scsi_cmd = NULL; 494 ipr_cmd->scsi_cmd = NULL;
495 ipr_cmd->qc = NULL;
485 ipr_cmd->sense_buffer[0] = 0; 496 ipr_cmd->sense_buffer[0] = 0;
486 ipr_cmd->dma_use_sg = 0; 497 ipr_cmd->dma_use_sg = 0;
487} 498}
@@ -626,6 +637,28 @@ static int ipr_set_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
626} 637}
627 638
628/** 639/**
640 * ipr_sata_eh_done - done function for aborted SATA commands
641 * @ipr_cmd: ipr command struct
642 *
643 * This function is invoked for ops generated to SATA
644 * devices which are being aborted.
645 *
646 * Return value:
647 * none
648 **/
649static void ipr_sata_eh_done(struct ipr_cmnd *ipr_cmd)
650{
651 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
652 struct ata_queued_cmd *qc = ipr_cmd->qc;
653 struct ipr_sata_port *sata_port = qc->ap->private_data;
654
655 qc->err_mask |= AC_ERR_OTHER;
656 sata_port->ioasa.status |= ATA_BUSY;
657 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
658 ata_qc_complete(qc);
659}
660
661/**
629 * ipr_scsi_eh_done - mid-layer done function for aborted ops 662 * ipr_scsi_eh_done - mid-layer done function for aborted ops
630 * @ipr_cmd: ipr command struct 663 * @ipr_cmd: ipr command struct
631 * 664 *
@@ -669,6 +702,8 @@ static void ipr_fail_all_ops(struct ipr_ioa_cfg *ioa_cfg)
669 702
670 if (ipr_cmd->scsi_cmd) 703 if (ipr_cmd->scsi_cmd)
671 ipr_cmd->done = ipr_scsi_eh_done; 704 ipr_cmd->done = ipr_scsi_eh_done;
705 else if (ipr_cmd->qc)
706 ipr_cmd->done = ipr_sata_eh_done;
672 707
673 ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH, IPR_IOASC_IOA_WAS_RESET); 708 ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH, IPR_IOASC_IOA_WAS_RESET);
674 del_timer(&ipr_cmd->timer); 709 del_timer(&ipr_cmd->timer);
@@ -825,6 +860,7 @@ static void ipr_init_res_entry(struct ipr_resource_entry *res)
825 res->del_from_ml = 0; 860 res->del_from_ml = 0;
826 res->resetting_device = 0; 861 res->resetting_device = 0;
827 res->sdev = NULL; 862 res->sdev = NULL;
863 res->sata_port = NULL;
828} 864}
829 865
830/** 866/**
@@ -1316,7 +1352,7 @@ static u32 ipr_get_error(u32 ioasc)
1316 int i; 1352 int i;
1317 1353
1318 for (i = 0; i < ARRAY_SIZE(ipr_error_table); i++) 1354 for (i = 0; i < ARRAY_SIZE(ipr_error_table); i++)
1319 if (ipr_error_table[i].ioasc == ioasc) 1355 if (ipr_error_table[i].ioasc == (ioasc & IPR_IOASC_IOASC_MASK))
1320 return i; 1356 return i;
1321 1357
1322 return 0; 1358 return 0;
@@ -3051,6 +3087,17 @@ static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg) { return 0; };
3051 **/ 3087 **/
3052static int ipr_change_queue_depth(struct scsi_device *sdev, int qdepth) 3088static int ipr_change_queue_depth(struct scsi_device *sdev, int qdepth)
3053{ 3089{
3090 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
3091 struct ipr_resource_entry *res;
3092 unsigned long lock_flags = 0;
3093
3094 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3095 res = (struct ipr_resource_entry *)sdev->hostdata;
3096
3097 if (res && ipr_is_gata(res) && qdepth > IPR_MAX_CMD_PER_ATA_LUN)
3098 qdepth = IPR_MAX_CMD_PER_ATA_LUN;
3099 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3100
3054 scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth); 3101 scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth);
3055 return sdev->queue_depth; 3102 return sdev->queue_depth;
3056} 3103}
@@ -3166,6 +3213,122 @@ static int ipr_biosparam(struct scsi_device *sdev,
3166} 3213}
3167 3214
3168/** 3215/**
3216 * ipr_find_starget - Find target based on bus/target.
3217 * @starget: scsi target struct
3218 *
3219 * Return value:
3220 * resource entry pointer if found / NULL if not found
3221 **/
3222static struct ipr_resource_entry *ipr_find_starget(struct scsi_target *starget)
3223{
3224 struct Scsi_Host *shost = dev_to_shost(&starget->dev);
3225 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
3226 struct ipr_resource_entry *res;
3227
3228 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3229 if ((res->cfgte.res_addr.bus == starget->channel) &&
3230 (res->cfgte.res_addr.target == starget->id) &&
3231 (res->cfgte.res_addr.lun == 0)) {
3232 return res;
3233 }
3234 }
3235
3236 return NULL;
3237}
3238
3239static struct ata_port_info sata_port_info;
3240
3241/**
3242 * ipr_target_alloc - Prepare for commands to a SCSI target
3243 * @starget: scsi target struct
3244 *
3245 * If the device is a SATA device, this function allocates an
3246 * ATA port with libata, else it does nothing.
3247 *
3248 * Return value:
3249 * 0 on success / non-0 on failure
3250 **/
3251static int ipr_target_alloc(struct scsi_target *starget)
3252{
3253 struct Scsi_Host *shost = dev_to_shost(&starget->dev);
3254 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
3255 struct ipr_sata_port *sata_port;
3256 struct ata_port *ap;
3257 struct ipr_resource_entry *res;
3258 unsigned long lock_flags;
3259
3260 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3261 res = ipr_find_starget(starget);
3262 starget->hostdata = NULL;
3263
3264 if (res && ipr_is_gata(res)) {
3265 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3266 sata_port = kzalloc(sizeof(*sata_port), GFP_KERNEL);
3267 if (!sata_port)
3268 return -ENOMEM;
3269
3270 ap = ata_sas_port_alloc(&ioa_cfg->ata_host, &sata_port_info, shost);
3271 if (ap) {
3272 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3273 sata_port->ioa_cfg = ioa_cfg;
3274 sata_port->ap = ap;
3275 sata_port->res = res;
3276
3277 res->sata_port = sata_port;
3278 ap->private_data = sata_port;
3279 starget->hostdata = sata_port;
3280 } else {
3281 kfree(sata_port);
3282 return -ENOMEM;
3283 }
3284 }
3285 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3286
3287 return 0;
3288}
3289
3290/**
3291 * ipr_target_destroy - Destroy a SCSI target
3292 * @starget: scsi target struct
3293 *
3294 * If the device was a SATA device, this function frees the libata
3295 * ATA port, else it does nothing.
3296 *
3297 **/
3298static void ipr_target_destroy(struct scsi_target *starget)
3299{
3300 struct ipr_sata_port *sata_port = starget->hostdata;
3301
3302 if (sata_port) {
3303 starget->hostdata = NULL;
3304 ata_sas_port_destroy(sata_port->ap);
3305 kfree(sata_port);
3306 }
3307}
3308
3309/**
3310 * ipr_find_sdev - Find device based on bus/target/lun.
3311 * @sdev: scsi device struct
3312 *
3313 * Return value:
3314 * resource entry pointer if found / NULL if not found
3315 **/
3316static struct ipr_resource_entry *ipr_find_sdev(struct scsi_device *sdev)
3317{
3318 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
3319 struct ipr_resource_entry *res;
3320
3321 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3322 if ((res->cfgte.res_addr.bus == sdev->channel) &&
3323 (res->cfgte.res_addr.target == sdev->id) &&
3324 (res->cfgte.res_addr.lun == sdev->lun))
3325 return res;
3326 }
3327
3328 return NULL;
3329}
3330
3331/**
3169 * ipr_slave_destroy - Unconfigure a SCSI device 3332 * ipr_slave_destroy - Unconfigure a SCSI device
3170 * @sdev: scsi device struct 3333 * @sdev: scsi device struct
3171 * 3334 *
@@ -3183,8 +3346,11 @@ static void ipr_slave_destroy(struct scsi_device *sdev)
3183 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 3346 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3184 res = (struct ipr_resource_entry *) sdev->hostdata; 3347 res = (struct ipr_resource_entry *) sdev->hostdata;
3185 if (res) { 3348 if (res) {
3349 if (res->sata_port)
3350 ata_port_disable(res->sata_port->ap);
3186 sdev->hostdata = NULL; 3351 sdev->hostdata = NULL;
3187 res->sdev = NULL; 3352 res->sdev = NULL;
3353 res->sata_port = NULL;
3188 } 3354 }
3189 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 3355 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3190} 3356}
@@ -3219,13 +3385,45 @@ static int ipr_slave_configure(struct scsi_device *sdev)
3219 } 3385 }
3220 if (ipr_is_vset_device(res) || ipr_is_scsi_disk(res)) 3386 if (ipr_is_vset_device(res) || ipr_is_scsi_disk(res))
3221 sdev->allow_restart = 1; 3387 sdev->allow_restart = 1;
3222 scsi_adjust_queue_depth(sdev, 0, sdev->host->cmd_per_lun); 3388 if (ipr_is_gata(res) && res->sata_port) {
3389 scsi_adjust_queue_depth(sdev, 0, IPR_MAX_CMD_PER_ATA_LUN);
3390 ata_sas_slave_configure(sdev, res->sata_port->ap);
3391 } else {
3392 scsi_adjust_queue_depth(sdev, 0, sdev->host->cmd_per_lun);
3393 }
3223 } 3394 }
3224 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 3395 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3225 return 0; 3396 return 0;
3226} 3397}
3227 3398
3228/** 3399/**
3400 * ipr_ata_slave_alloc - Prepare for commands to a SATA device
3401 * @sdev: scsi device struct
3402 *
3403 * This function initializes an ATA port so that future commands
3404 * sent through queuecommand will work.
3405 *
3406 * Return value:
3407 * 0 on success
3408 **/
3409static int ipr_ata_slave_alloc(struct scsi_device *sdev)
3410{
3411 struct ipr_sata_port *sata_port = NULL;
3412 int rc = -ENXIO;
3413
3414 ENTER;
3415 if (sdev->sdev_target)
3416 sata_port = sdev->sdev_target->hostdata;
3417 if (sata_port)
3418 rc = ata_sas_port_init(sata_port->ap);
3419 if (rc)
3420 ipr_slave_destroy(sdev);
3421
3422 LEAVE;
3423 return rc;
3424}
3425
3426/**
3229 * ipr_slave_alloc - Prepare for commands to a device. 3427 * ipr_slave_alloc - Prepare for commands to a device.
3230 * @sdev: scsi device struct 3428 * @sdev: scsi device struct
3231 * 3429 *
@@ -3248,18 +3446,18 @@ static int ipr_slave_alloc(struct scsi_device *sdev)
3248 3446
3249 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 3447 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3250 3448
3251 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) { 3449 res = ipr_find_sdev(sdev);
3252 if ((res->cfgte.res_addr.bus == sdev->channel) && 3450 if (res) {
3253 (res->cfgte.res_addr.target == sdev->id) && 3451 res->sdev = sdev;
3254 (res->cfgte.res_addr.lun == sdev->lun)) { 3452 res->add_to_ml = 0;
3255 res->sdev = sdev; 3453 res->in_erp = 0;
3256 res->add_to_ml = 0; 3454 sdev->hostdata = res;
3257 res->in_erp = 0; 3455 if (!ipr_is_naca_model(res))
3258 sdev->hostdata = res; 3456 res->needs_sync_complete = 1;
3259 if (!ipr_is_naca_model(res)) 3457 rc = 0;
3260 res->needs_sync_complete = 1; 3458 if (ipr_is_gata(res)) {
3261 rc = 0; 3459 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3262 break; 3460 return ipr_ata_slave_alloc(sdev);
3263 } 3461 }
3264 } 3462 }
3265 3463
@@ -3314,7 +3512,8 @@ static int ipr_eh_host_reset(struct scsi_cmnd * cmd)
3314 * This function issues a device reset to the affected device. 3512 * This function issues a device reset to the affected device.
3315 * If the device is a SCSI device, a LUN reset will be sent 3513 * If the device is a SCSI device, a LUN reset will be sent
3316 * to the device first. If that does not work, a target reset 3514 * to the device first. If that does not work, a target reset
3317 * will be sent. 3515 * will be sent. If the device is a SATA device, a PHY reset will
3516 * be sent.
3318 * 3517 *
3319 * Return value: 3518 * Return value:
3320 * 0 on success / non-zero on failure 3519 * 0 on success / non-zero on failure
@@ -3325,26 +3524,79 @@ static int ipr_device_reset(struct ipr_ioa_cfg *ioa_cfg,
3325 struct ipr_cmnd *ipr_cmd; 3524 struct ipr_cmnd *ipr_cmd;
3326 struct ipr_ioarcb *ioarcb; 3525 struct ipr_ioarcb *ioarcb;
3327 struct ipr_cmd_pkt *cmd_pkt; 3526 struct ipr_cmd_pkt *cmd_pkt;
3527 struct ipr_ioarcb_ata_regs *regs;
3328 u32 ioasc; 3528 u32 ioasc;
3329 3529
3330 ENTER; 3530 ENTER;
3331 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg); 3531 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
3332 ioarcb = &ipr_cmd->ioarcb; 3532 ioarcb = &ipr_cmd->ioarcb;
3333 cmd_pkt = &ioarcb->cmd_pkt; 3533 cmd_pkt = &ioarcb->cmd_pkt;
3534 regs = &ioarcb->add_data.u.regs;
3334 3535
3335 ioarcb->res_handle = res->cfgte.res_handle; 3536 ioarcb->res_handle = res->cfgte.res_handle;
3336 cmd_pkt->request_type = IPR_RQTYPE_IOACMD; 3537 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
3337 cmd_pkt->cdb[0] = IPR_RESET_DEVICE; 3538 cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
3539 if (ipr_is_gata(res)) {
3540 cmd_pkt->cdb[2] = IPR_ATA_PHY_RESET;
3541 ioarcb->add_cmd_parms_len = cpu_to_be32(sizeof(regs->flags));
3542 regs->flags |= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION;
3543 }
3338 3544
3339 ipr_send_blocking_cmd(ipr_cmd, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT); 3545 ipr_send_blocking_cmd(ipr_cmd, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
3340 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc); 3546 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
3341 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q); 3547 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
3548 if (ipr_is_gata(res) && res->sata_port && ioasc != IPR_IOASC_IOA_WAS_RESET)
3549 memcpy(&res->sata_port->ioasa, &ipr_cmd->ioasa.u.gata,
3550 sizeof(struct ipr_ioasa_gata));
3342 3551
3343 LEAVE; 3552 LEAVE;
3344 return (IPR_IOASC_SENSE_KEY(ioasc) ? -EIO : 0); 3553 return (IPR_IOASC_SENSE_KEY(ioasc) ? -EIO : 0);
3345} 3554}
3346 3555
3347/** 3556/**
3557 * ipr_sata_reset - Reset the SATA port
3558 * @ap: SATA port to reset
3559 * @classes: class of the attached device
3560 *
3561 * This function issues a SATA phy reset to the affected ATA port.
3562 *
3563 * Return value:
3564 * 0 on success / non-zero on failure
3565 **/
3566static int ipr_sata_reset(struct ata_port *ap, unsigned int *classes)
3567{
3568 struct ipr_sata_port *sata_port = ap->private_data;
3569 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
3570 struct ipr_resource_entry *res;
3571 unsigned long lock_flags = 0;
3572 int rc = -ENXIO;
3573
3574 ENTER;
3575 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3576 res = sata_port->res;
3577 if (res) {
3578 rc = ipr_device_reset(ioa_cfg, res);
3579 switch(res->cfgte.proto) {
3580 case IPR_PROTO_SATA:
3581 case IPR_PROTO_SAS_STP:
3582 *classes = ATA_DEV_ATA;
3583 break;
3584 case IPR_PROTO_SATA_ATAPI:
3585 case IPR_PROTO_SAS_STP_ATAPI:
3586 *classes = ATA_DEV_ATAPI;
3587 break;
3588 default:
3589 *classes = ATA_DEV_UNKNOWN;
3590 break;
3591 };
3592 }
3593
3594 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3595 LEAVE;
3596 return rc;
3597}
3598
3599/**
3348 * ipr_eh_dev_reset - Reset the device 3600 * ipr_eh_dev_reset - Reset the device
3349 * @scsi_cmd: scsi command struct 3601 * @scsi_cmd: scsi command struct
3350 * 3602 *
@@ -3360,7 +3612,8 @@ static int __ipr_eh_dev_reset(struct scsi_cmnd * scsi_cmd)
3360 struct ipr_cmnd *ipr_cmd; 3612 struct ipr_cmnd *ipr_cmd;
3361 struct ipr_ioa_cfg *ioa_cfg; 3613 struct ipr_ioa_cfg *ioa_cfg;
3362 struct ipr_resource_entry *res; 3614 struct ipr_resource_entry *res;
3363 int rc; 3615 struct ata_port *ap;
3616 int rc = 0;
3364 3617
3365 ENTER; 3618 ENTER;
3366 ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata; 3619 ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
@@ -3388,7 +3641,14 @@ static int __ipr_eh_dev_reset(struct scsi_cmnd * scsi_cmd)
3388 3641
3389 res->resetting_device = 1; 3642 res->resetting_device = 1;
3390 scmd_printk(KERN_ERR, scsi_cmd, "Resetting device\n"); 3643 scmd_printk(KERN_ERR, scsi_cmd, "Resetting device\n");
3391 rc = ipr_device_reset(ioa_cfg, res); 3644
3645 if (ipr_is_gata(res) && res->sata_port) {
3646 ap = res->sata_port->ap;
3647 spin_unlock_irq(scsi_cmd->device->host->host_lock);
3648 ata_do_eh(ap, NULL, NULL, ipr_sata_reset, NULL);
3649 spin_lock_irq(scsi_cmd->device->host->host_lock);
3650 } else
3651 rc = ipr_device_reset(ioa_cfg, res);
3392 res->resetting_device = 0; 3652 res->resetting_device = 0;
3393 3653
3394 LEAVE; 3654 LEAVE;
@@ -4300,6 +4560,9 @@ static int ipr_queuecommand(struct scsi_cmnd *scsi_cmd,
4300 return 0; 4560 return 0;
4301 } 4561 }
4302 4562
4563 if (ipr_is_gata(res) && res->sata_port)
4564 return ata_sas_queuecmd(scsi_cmd, done, res->sata_port->ap);
4565
4303 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg); 4566 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
4304 ioarcb = &ipr_cmd->ioarcb; 4567 ioarcb = &ipr_cmd->ioarcb;
4305 list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q); 4568 list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
@@ -4345,6 +4608,26 @@ static int ipr_queuecommand(struct scsi_cmnd *scsi_cmd,
4345} 4608}
4346 4609
4347/** 4610/**
4611 * ipr_ioctl - IOCTL handler
4612 * @sdev: scsi device struct
4613 * @cmd: IOCTL cmd
4614 * @arg: IOCTL arg
4615 *
4616 * Return value:
4617 * 0 on success / other on failure
4618 **/
4619int ipr_ioctl(struct scsi_device *sdev, int cmd, void __user *arg)
4620{
4621 struct ipr_resource_entry *res;
4622
4623 res = (struct ipr_resource_entry *)sdev->hostdata;
4624 if (res && ipr_is_gata(res))
4625 return ata_scsi_ioctl(sdev, cmd, arg);
4626
4627 return -EINVAL;
4628}
4629
4630/**
4348 * ipr_info - Get information about the card/driver 4631 * ipr_info - Get information about the card/driver
4349 * @scsi_host: scsi host struct 4632 * @scsi_host: scsi host struct
4350 * 4633 *
@@ -4366,10 +4649,45 @@ static const char * ipr_ioa_info(struct Scsi_Host *host)
4366 return buffer; 4649 return buffer;
4367} 4650}
4368 4651
4652/**
4653 * ipr_scsi_timed_out - Handle scsi command timeout
4654 * @scsi_cmd: scsi command struct
4655 *
4656 * Return value:
4657 * EH_NOT_HANDLED
4658 **/
4659enum scsi_eh_timer_return ipr_scsi_timed_out(struct scsi_cmnd *scsi_cmd)
4660{
4661 struct ipr_ioa_cfg *ioa_cfg;
4662 struct ipr_cmnd *ipr_cmd;
4663 unsigned long flags;
4664
4665 ENTER;
4666 spin_lock_irqsave(scsi_cmd->device->host->host_lock, flags);
4667 ioa_cfg = (struct ipr_ioa_cfg *)scsi_cmd->device->host->hostdata;
4668
4669 list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) {
4670 if (ipr_cmd->qc && ipr_cmd->qc->scsicmd == scsi_cmd) {
4671 ipr_cmd->qc->err_mask |= AC_ERR_TIMEOUT;
4672 ipr_cmd->qc->flags |= ATA_QCFLAG_FAILED;
4673 break;
4674 }
4675 }
4676
4677 spin_unlock_irqrestore(scsi_cmd->device->host->host_lock, flags);
4678 LEAVE;
4679 return EH_NOT_HANDLED;
4680}
4681
4682static struct scsi_transport_template ipr_transport_template = {
4683 .eh_timed_out = ipr_scsi_timed_out
4684};
4685
4369static struct scsi_host_template driver_template = { 4686static struct scsi_host_template driver_template = {
4370 .module = THIS_MODULE, 4687 .module = THIS_MODULE,
4371 .name = "IPR", 4688 .name = "IPR",
4372 .info = ipr_ioa_info, 4689 .info = ipr_ioa_info,
4690 .ioctl = ipr_ioctl,
4373 .queuecommand = ipr_queuecommand, 4691 .queuecommand = ipr_queuecommand,
4374 .eh_abort_handler = ipr_eh_abort, 4692 .eh_abort_handler = ipr_eh_abort,
4375 .eh_device_reset_handler = ipr_eh_dev_reset, 4693 .eh_device_reset_handler = ipr_eh_dev_reset,
@@ -4377,6 +4695,8 @@ static struct scsi_host_template driver_template = {
4377 .slave_alloc = ipr_slave_alloc, 4695 .slave_alloc = ipr_slave_alloc,
4378 .slave_configure = ipr_slave_configure, 4696 .slave_configure = ipr_slave_configure,
4379 .slave_destroy = ipr_slave_destroy, 4697 .slave_destroy = ipr_slave_destroy,
4698 .target_alloc = ipr_target_alloc,
4699 .target_destroy = ipr_target_destroy,
4380 .change_queue_depth = ipr_change_queue_depth, 4700 .change_queue_depth = ipr_change_queue_depth,
4381 .change_queue_type = ipr_change_queue_type, 4701 .change_queue_type = ipr_change_queue_type,
4382 .bios_param = ipr_biosparam, 4702 .bios_param = ipr_biosparam,
@@ -4391,6 +4711,330 @@ static struct scsi_host_template driver_template = {
4391 .proc_name = IPR_NAME 4711 .proc_name = IPR_NAME
4392}; 4712};
4393 4713
4714/**
4715 * ipr_ata_phy_reset - libata phy_reset handler
4716 * @ap: ata port to reset
4717 *
4718 **/
4719static void ipr_ata_phy_reset(struct ata_port *ap)
4720{
4721 unsigned long flags;
4722 struct ipr_sata_port *sata_port = ap->private_data;
4723 struct ipr_resource_entry *res = sata_port->res;
4724 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
4725 int rc;
4726
4727 ENTER;
4728 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
4729 while(ioa_cfg->in_reset_reload) {
4730 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
4731 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
4732 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
4733 }
4734
4735 if (!ioa_cfg->allow_cmds)
4736 goto out_unlock;
4737
4738 rc = ipr_device_reset(ioa_cfg, res);
4739
4740 if (rc) {
4741 ap->ops->port_disable(ap);
4742 goto out_unlock;
4743 }
4744
4745 switch(res->cfgte.proto) {
4746 case IPR_PROTO_SATA:
4747 case IPR_PROTO_SAS_STP:
4748 ap->device[0].class = ATA_DEV_ATA;
4749 break;
4750 case IPR_PROTO_SATA_ATAPI:
4751 case IPR_PROTO_SAS_STP_ATAPI:
4752 ap->device[0].class = ATA_DEV_ATAPI;
4753 break;
4754 default:
4755 ap->device[0].class = ATA_DEV_UNKNOWN;
4756 ap->ops->port_disable(ap);
4757 break;
4758 };
4759
4760out_unlock:
4761 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
4762 LEAVE;
4763}
4764
4765/**
4766 * ipr_ata_post_internal - Cleanup after an internal command
4767 * @qc: ATA queued command
4768 *
4769 * Return value:
4770 * none
4771 **/
4772static void ipr_ata_post_internal(struct ata_queued_cmd *qc)
4773{
4774 struct ipr_sata_port *sata_port = qc->ap->private_data;
4775 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
4776 struct ipr_cmnd *ipr_cmd;
4777 unsigned long flags;
4778
4779 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
4780 list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) {
4781 if (ipr_cmd->qc == qc) {
4782 ipr_device_reset(ioa_cfg, sata_port->res);
4783 break;
4784 }
4785 }
4786 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
4787}
4788
4789/**
4790 * ipr_tf_read - Read the current ATA taskfile for the ATA port
4791 * @ap: ATA port
4792 * @tf: destination ATA taskfile
4793 *
4794 * Return value:
4795 * none
4796 **/
4797static void ipr_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
4798{
4799 struct ipr_sata_port *sata_port = ap->private_data;
4800 struct ipr_ioasa_gata *g = &sata_port->ioasa;
4801
4802 tf->feature = g->error;
4803 tf->nsect = g->nsect;
4804 tf->lbal = g->lbal;
4805 tf->lbam = g->lbam;
4806 tf->lbah = g->lbah;
4807 tf->device = g->device;
4808 tf->command = g->status;
4809 tf->hob_nsect = g->hob_nsect;
4810 tf->hob_lbal = g->hob_lbal;
4811 tf->hob_lbam = g->hob_lbam;
4812 tf->hob_lbah = g->hob_lbah;
4813 tf->ctl = g->alt_status;
4814}
4815
4816/**
4817 * ipr_copy_sata_tf - Copy a SATA taskfile to an IOA data structure
4818 * @regs: destination
4819 * @tf: source ATA taskfile
4820 *
4821 * Return value:
4822 * none
4823 **/
4824static void ipr_copy_sata_tf(struct ipr_ioarcb_ata_regs *regs,
4825 struct ata_taskfile *tf)
4826{
4827 regs->feature = tf->feature;
4828 regs->nsect = tf->nsect;
4829 regs->lbal = tf->lbal;
4830 regs->lbam = tf->lbam;
4831 regs->lbah = tf->lbah;
4832 regs->device = tf->device;
4833 regs->command = tf->command;
4834 regs->hob_feature = tf->hob_feature;
4835 regs->hob_nsect = tf->hob_nsect;
4836 regs->hob_lbal = tf->hob_lbal;
4837 regs->hob_lbam = tf->hob_lbam;
4838 regs->hob_lbah = tf->hob_lbah;
4839 regs->ctl = tf->ctl;
4840}
4841
4842/**
4843 * ipr_sata_done - done function for SATA commands
4844 * @ipr_cmd: ipr command struct
4845 *
4846 * This function is invoked by the interrupt handler for
4847 * ops generated by the SCSI mid-layer to SATA devices
4848 *
4849 * Return value:
4850 * none
4851 **/
4852static void ipr_sata_done(struct ipr_cmnd *ipr_cmd)
4853{
4854 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4855 struct ata_queued_cmd *qc = ipr_cmd->qc;
4856 struct ipr_sata_port *sata_port = qc->ap->private_data;
4857 struct ipr_resource_entry *res = sata_port->res;
4858 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
4859
4860 memcpy(&sata_port->ioasa, &ipr_cmd->ioasa.u.gata,
4861 sizeof(struct ipr_ioasa_gata));
4862 ipr_dump_ioasa(ioa_cfg, ipr_cmd, res);
4863
4864 if (be32_to_cpu(ipr_cmd->ioasa.ioasc_specific) & IPR_ATA_DEVICE_WAS_RESET)
4865 scsi_report_device_reset(ioa_cfg->host, res->cfgte.res_addr.bus,
4866 res->cfgte.res_addr.target);
4867
4868 if (IPR_IOASC_SENSE_KEY(ioasc) > RECOVERED_ERROR)
4869 qc->err_mask |= __ac_err_mask(ipr_cmd->ioasa.u.gata.status);
4870 else
4871 qc->err_mask |= ac_err_mask(ipr_cmd->ioasa.u.gata.status);
4872 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
4873 ata_qc_complete(qc);
4874}
4875
4876/**
4877 * ipr_build_ata_ioadl - Build an ATA scatter/gather list
4878 * @ipr_cmd: ipr command struct
4879 * @qc: ATA queued command
4880 *
4881 **/
4882static void ipr_build_ata_ioadl(struct ipr_cmnd *ipr_cmd,
4883 struct ata_queued_cmd *qc)
4884{
4885 u32 ioadl_flags = 0;
4886 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
4887 struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
4888 int len = qc->nbytes + qc->pad_len;
4889 struct scatterlist *sg;
4890
4891 if (len == 0)
4892 return;
4893
4894 if (qc->dma_dir == DMA_TO_DEVICE) {
4895 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
4896 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
4897 ioarcb->write_data_transfer_length = cpu_to_be32(len);
4898 ioarcb->write_ioadl_len =
4899 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
4900 } else if (qc->dma_dir == DMA_FROM_DEVICE) {
4901 ioadl_flags = IPR_IOADL_FLAGS_READ;
4902 ioarcb->read_data_transfer_length = cpu_to_be32(len);
4903 ioarcb->read_ioadl_len =
4904 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
4905 }
4906
4907 ata_for_each_sg(sg, qc) {
4908 ioadl->flags_and_data_len = cpu_to_be32(ioadl_flags | sg_dma_len(sg));
4909 ioadl->address = cpu_to_be32(sg_dma_address(sg));
4910 if (ata_sg_is_last(sg, qc))
4911 ioadl->flags_and_data_len |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
4912 else
4913 ioadl++;
4914 }
4915}
4916
4917/**
4918 * ipr_qc_issue - Issue a SATA qc to a device
4919 * @qc: queued command
4920 *
4921 * Return value:
4922 * 0 if success
4923 **/
4924static unsigned int ipr_qc_issue(struct ata_queued_cmd *qc)
4925{
4926 struct ata_port *ap = qc->ap;
4927 struct ipr_sata_port *sata_port = ap->private_data;
4928 struct ipr_resource_entry *res = sata_port->res;
4929 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
4930 struct ipr_cmnd *ipr_cmd;
4931 struct ipr_ioarcb *ioarcb;
4932 struct ipr_ioarcb_ata_regs *regs;
4933
4934 if (unlikely(!ioa_cfg->allow_cmds || ioa_cfg->ioa_is_dead))
4935 return -EIO;
4936
4937 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
4938 ioarcb = &ipr_cmd->ioarcb;
4939 regs = &ioarcb->add_data.u.regs;
4940
4941 memset(&ioarcb->add_data, 0, sizeof(ioarcb->add_data));
4942 ioarcb->add_cmd_parms_len = cpu_to_be32(sizeof(ioarcb->add_data.u.regs));
4943
4944 list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
4945 ipr_cmd->qc = qc;
4946 ipr_cmd->done = ipr_sata_done;
4947 ipr_cmd->ioarcb.res_handle = res->cfgte.res_handle;
4948 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_ATA_PASSTHRU;
4949 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC;
4950 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
4951 ipr_cmd->dma_use_sg = qc->pad_len ? qc->n_elem + 1 : qc->n_elem;
4952
4953 ipr_build_ata_ioadl(ipr_cmd, qc);
4954 regs->flags |= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION;
4955 ipr_copy_sata_tf(regs, &qc->tf);
4956 memcpy(ioarcb->cmd_pkt.cdb, qc->cdb, IPR_MAX_CDB_LEN);
4957 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_PHYS_LOC(res->cfgte.res_addr));
4958
4959 switch (qc->tf.protocol) {
4960 case ATA_PROT_NODATA:
4961 case ATA_PROT_PIO:
4962 break;
4963
4964 case ATA_PROT_DMA:
4965 regs->flags |= IPR_ATA_FLAG_XFER_TYPE_DMA;
4966 break;
4967
4968 case ATA_PROT_ATAPI:
4969 case ATA_PROT_ATAPI_NODATA:
4970 regs->flags |= IPR_ATA_FLAG_PACKET_CMD;
4971 break;
4972
4973 case ATA_PROT_ATAPI_DMA:
4974 regs->flags |= IPR_ATA_FLAG_PACKET_CMD;
4975 regs->flags |= IPR_ATA_FLAG_XFER_TYPE_DMA;
4976 break;
4977
4978 default:
4979 WARN_ON(1);
4980 return -1;
4981 }
4982
4983 mb();
4984 writel(be32_to_cpu(ioarcb->ioarcb_host_pci_addr),
4985 ioa_cfg->regs.ioarrin_reg);
4986 return 0;
4987}
4988
4989/**
4990 * ipr_ata_check_status - Return last ATA status
4991 * @ap: ATA port
4992 *
4993 * Return value:
4994 * ATA status
4995 **/
4996static u8 ipr_ata_check_status(struct ata_port *ap)
4997{
4998 struct ipr_sata_port *sata_port = ap->private_data;
4999 return sata_port->ioasa.status;
5000}
5001
5002/**
5003 * ipr_ata_check_altstatus - Return last ATA altstatus
5004 * @ap: ATA port
5005 *
5006 * Return value:
5007 * Alt ATA status
5008 **/
5009static u8 ipr_ata_check_altstatus(struct ata_port *ap)
5010{
5011 struct ipr_sata_port *sata_port = ap->private_data;
5012 return sata_port->ioasa.alt_status;
5013}
5014
5015static struct ata_port_operations ipr_sata_ops = {
5016 .port_disable = ata_port_disable,
5017 .check_status = ipr_ata_check_status,
5018 .check_altstatus = ipr_ata_check_altstatus,
5019 .dev_select = ata_noop_dev_select,
5020 .phy_reset = ipr_ata_phy_reset,
5021 .post_internal_cmd = ipr_ata_post_internal,
5022 .tf_read = ipr_tf_read,
5023 .qc_prep = ata_noop_qc_prep,
5024 .qc_issue = ipr_qc_issue,
5025 .port_start = ata_sas_port_start,
5026 .port_stop = ata_sas_port_stop
5027};
5028
5029static struct ata_port_info sata_port_info = {
5030 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | ATA_FLAG_SATA_RESET |
5031 ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA,
5032 .pio_mask = 0x10, /* pio4 */
5033 .mwdma_mask = 0x07,
5034 .udma_mask = 0x7f, /* udma0-6 */
5035 .port_ops = &ipr_sata_ops
5036};
5037
4394#ifdef CONFIG_PPC_PSERIES 5038#ifdef CONFIG_PPC_PSERIES
4395static const u16 ipr_blocked_processors[] = { 5039static const u16 ipr_blocked_processors[] = {
4396 PV_NORTHSTAR, 5040 PV_NORTHSTAR,
@@ -6374,6 +7018,9 @@ static int __devinit ipr_probe_ioa(struct pci_dev *pdev,
6374 7018
6375 ioa_cfg = (struct ipr_ioa_cfg *)host->hostdata; 7019 ioa_cfg = (struct ipr_ioa_cfg *)host->hostdata;
6376 memset(ioa_cfg, 0, sizeof(struct ipr_ioa_cfg)); 7020 memset(ioa_cfg, 0, sizeof(struct ipr_ioa_cfg));
7021 host->transportt = &ipr_transport_template;
7022 ata_host_init(&ioa_cfg->ata_host, &pdev->dev,
7023 sata_port_info.flags, &ipr_sata_ops);
6377 7024
6378 ioa_cfg->chip_cfg = ipr_get_chip_cfg(dev_id); 7025 ioa_cfg->chip_cfg = ipr_get_chip_cfg(dev_id);
6379 7026