aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/scsi/ibmvscsi
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@woody.linux-foundation.org>2007-05-05 16:30:44 -0400
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-05-05 16:30:44 -0400
commit4f7a307dc6e4d8bfeb56f7cf7231b08cb845687c (patch)
tree3bf90522c87fcb32373cb2a5ff25b1ead33405f5 /drivers/scsi/ibmvscsi
parentfabb5c4e4a474ff0f7d6c1d3466a1b79bbce5f49 (diff)
parent7297824581755593535fc97d2c8b6c47e2dc2db6 (diff)
Merge master.kernel.org:/pub/scm/linux/kernel/git/jejb/scsi-misc-2.6
* master.kernel.org:/pub/scm/linux/kernel/git/jejb/scsi-misc-2.6: (87 commits) [SCSI] fusion: fix domain validation loops [SCSI] qla2xxx: fix regression on sparc64 [SCSI] modalias for scsi devices [SCSI] sg: cap reserved_size values at max_sectors [SCSI] BusLogic: stop using check_region [SCSI] tgt: fix rdma transfer bugs [SCSI] aacraid: fix aacraid not finding device [SCSI] aacraid: Correct SMC products in aacraid.txt [SCSI] scsi_error.c: Add EH Start Unit retry [SCSI] aacraid: [Fastboot] Panics for AACRAID driver during 'insmod' for kexec test. [SCSI] ipr: Driver version to 2.3.2 [SCSI] ipr: Faster sg list fetch [SCSI] ipr: Return better qc_issue errors [SCSI] ipr: Disrupt device error [SCSI] ipr: Improve async error logging level control [SCSI] ipr: PCI unblock config access fix [SCSI] ipr: Fix for oops following SATA request sense [SCSI] ipr: Log error for SAS dual path switch [SCSI] ipr: Enable logging of debug error data for all devices [SCSI] ipr: Add new PCI-E IDs to device table ...
Diffstat (limited to 'drivers/scsi/ibmvscsi')
-rw-r--r--drivers/scsi/ibmvscsi/ibmvscsi.c80
-rw-r--r--drivers/scsi/ibmvscsi/ibmvscsi.h2
-rw-r--r--drivers/scsi/ibmvscsi/ibmvstgt.c37
3 files changed, 75 insertions, 44 deletions
diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.c b/drivers/scsi/ibmvscsi/ibmvscsi.c
index fbc1d5c3b0a7..b10eefe735c5 100644
--- a/drivers/scsi/ibmvscsi/ibmvscsi.c
+++ b/drivers/scsi/ibmvscsi/ibmvscsi.c
@@ -85,7 +85,7 @@
85static int max_id = 64; 85static int max_id = 64;
86static int max_channel = 3; 86static int max_channel = 3;
87static int init_timeout = 5; 87static int init_timeout = 5;
88static int max_requests = 50; 88static int max_requests = IBMVSCSI_MAX_REQUESTS_DEFAULT;
89 89
90#define IBMVSCSI_VERSION "1.5.8" 90#define IBMVSCSI_VERSION "1.5.8"
91 91
@@ -538,7 +538,8 @@ static int ibmvscsi_send_srp_event(struct srp_event_struct *evt_struct,
538 int request_status; 538 int request_status;
539 int rc; 539 int rc;
540 540
541 /* If we have exhausted our request limit, just fail this request. 541 /* If we have exhausted our request limit, just fail this request,
542 * unless it is for a reset or abort.
542 * Note that there are rare cases involving driver generated requests 543 * Note that there are rare cases involving driver generated requests
543 * (such as task management requests) that the mid layer may think we 544 * (such as task management requests) that the mid layer may think we
544 * can handle more requests (can_queue) when we actually can't 545 * can handle more requests (can_queue) when we actually can't
@@ -551,9 +552,30 @@ static int ibmvscsi_send_srp_event(struct srp_event_struct *evt_struct,
551 */ 552 */
552 if (request_status < -1) 553 if (request_status < -1)
553 goto send_error; 554 goto send_error;
554 /* Otherwise, if we have run out of requests */ 555 /* Otherwise, we may have run out of requests. */
555 else if (request_status < 0) 556 /* Abort and reset calls should make it through.
556 goto send_busy; 557 * Nothing except abort and reset should use the last two
558 * slots unless we had two or less to begin with.
559 */
560 else if (request_status < 2 &&
561 evt_struct->iu.srp.cmd.opcode != SRP_TSK_MGMT) {
562 /* In the case that we have less than two requests
563 * available, check the server limit as a combination
564 * of the request limit and the number of requests
565 * in-flight (the size of the send list). If the
566 * server limit is greater than 2, return busy so
567 * that the last two are reserved for reset and abort.
568 */
569 int server_limit = request_status;
570 struct srp_event_struct *tmp_evt;
571
572 list_for_each_entry(tmp_evt, &hostdata->sent, list) {
573 server_limit++;
574 }
575
576 if (server_limit > 2)
577 goto send_busy;
578 }
557 } 579 }
558 580
559 /* Copy the IU into the transfer area */ 581 /* Copy the IU into the transfer area */
@@ -572,6 +594,7 @@ static int ibmvscsi_send_srp_event(struct srp_event_struct *evt_struct,
572 594
573 printk(KERN_ERR "ibmvscsi: send error %d\n", 595 printk(KERN_ERR "ibmvscsi: send error %d\n",
574 rc); 596 rc);
597 atomic_inc(&hostdata->request_limit);
575 goto send_error; 598 goto send_error;
576 } 599 }
577 600
@@ -581,7 +604,8 @@ static int ibmvscsi_send_srp_event(struct srp_event_struct *evt_struct,
581 unmap_cmd_data(&evt_struct->iu.srp.cmd, evt_struct, hostdata->dev); 604 unmap_cmd_data(&evt_struct->iu.srp.cmd, evt_struct, hostdata->dev);
582 605
583 free_event_struct(&hostdata->pool, evt_struct); 606 free_event_struct(&hostdata->pool, evt_struct);
584 return SCSI_MLQUEUE_HOST_BUSY; 607 atomic_inc(&hostdata->request_limit);
608 return SCSI_MLQUEUE_HOST_BUSY;
585 609
586 send_error: 610 send_error:
587 unmap_cmd_data(&evt_struct->iu.srp.cmd, evt_struct, hostdata->dev); 611 unmap_cmd_data(&evt_struct->iu.srp.cmd, evt_struct, hostdata->dev);
@@ -831,23 +855,16 @@ static void login_rsp(struct srp_event_struct *evt_struct)
831 855
832 printk(KERN_INFO "ibmvscsi: SRP_LOGIN succeeded\n"); 856 printk(KERN_INFO "ibmvscsi: SRP_LOGIN succeeded\n");
833 857
834 if (evt_struct->xfer_iu->srp.login_rsp.req_lim_delta > 858 if (evt_struct->xfer_iu->srp.login_rsp.req_lim_delta < 0)
835 (max_requests - 2)) 859 printk(KERN_ERR "ibmvscsi: Invalid request_limit.\n");
836 evt_struct->xfer_iu->srp.login_rsp.req_lim_delta =
837 max_requests - 2;
838 860
839 /* Now we know what the real request-limit is */ 861 /* Now we know what the real request-limit is.
862 * This value is set rather than added to request_limit because
863 * request_limit could have been set to -1 by this client.
864 */
840 atomic_set(&hostdata->request_limit, 865 atomic_set(&hostdata->request_limit,
841 evt_struct->xfer_iu->srp.login_rsp.req_lim_delta); 866 evt_struct->xfer_iu->srp.login_rsp.req_lim_delta);
842 867
843 hostdata->host->can_queue =
844 evt_struct->xfer_iu->srp.login_rsp.req_lim_delta - 2;
845
846 if (hostdata->host->can_queue < 1) {
847 printk(KERN_ERR "ibmvscsi: Invalid request_limit_delta\n");
848 return;
849 }
850
851 /* If we had any pending I/Os, kick them */ 868 /* If we had any pending I/Os, kick them */
852 scsi_unblock_requests(hostdata->host); 869 scsi_unblock_requests(hostdata->host);
853 870
@@ -1337,6 +1354,27 @@ static int ibmvscsi_do_host_config(struct ibmvscsi_host_data *hostdata,
1337 return rc; 1354 return rc;
1338} 1355}
1339 1356
1357/**
1358 * ibmvscsi_slave_configure: Set the "allow_restart" flag for each disk.
1359 * @sdev: struct scsi_device device to configure
1360 *
1361 * Enable allow_restart for a device if it is a disk. Adjust the
1362 * queue_depth here also as is required by the documentation for
1363 * struct scsi_host_template.
1364 */
1365static int ibmvscsi_slave_configure(struct scsi_device *sdev)
1366{
1367 struct Scsi_Host *shost = sdev->host;
1368 unsigned long lock_flags = 0;
1369
1370 spin_lock_irqsave(shost->host_lock, lock_flags);
1371 if (sdev->type == TYPE_DISK)
1372 sdev->allow_restart = 1;
1373 scsi_adjust_queue_depth(sdev, 0, shost->cmd_per_lun);
1374 spin_unlock_irqrestore(shost->host_lock, lock_flags);
1375 return 0;
1376}
1377
1340/* ------------------------------------------------------------ 1378/* ------------------------------------------------------------
1341 * sysfs attributes 1379 * sysfs attributes
1342 */ 1380 */
@@ -1482,8 +1520,9 @@ static struct scsi_host_template driver_template = {
1482 .queuecommand = ibmvscsi_queuecommand, 1520 .queuecommand = ibmvscsi_queuecommand,
1483 .eh_abort_handler = ibmvscsi_eh_abort_handler, 1521 .eh_abort_handler = ibmvscsi_eh_abort_handler,
1484 .eh_device_reset_handler = ibmvscsi_eh_device_reset_handler, 1522 .eh_device_reset_handler = ibmvscsi_eh_device_reset_handler,
1523 .slave_configure = ibmvscsi_slave_configure,
1485 .cmd_per_lun = 16, 1524 .cmd_per_lun = 16,
1486 .can_queue = 1, /* Updated after SRP_LOGIN */ 1525 .can_queue = IBMVSCSI_MAX_REQUESTS_DEFAULT,
1487 .this_id = -1, 1526 .this_id = -1,
1488 .sg_tablesize = SG_ALL, 1527 .sg_tablesize = SG_ALL,
1489 .use_clustering = ENABLE_CLUSTERING, 1528 .use_clustering = ENABLE_CLUSTERING,
@@ -1503,6 +1542,7 @@ static int ibmvscsi_probe(struct vio_dev *vdev, const struct vio_device_id *id)
1503 1542
1504 vdev->dev.driver_data = NULL; 1543 vdev->dev.driver_data = NULL;
1505 1544
1545 driver_template.can_queue = max_requests;
1506 host = scsi_host_alloc(&driver_template, sizeof(*hostdata)); 1546 host = scsi_host_alloc(&driver_template, sizeof(*hostdata));
1507 if (!host) { 1547 if (!host) {
1508 printk(KERN_ERR "ibmvscsi: couldn't allocate host data\n"); 1548 printk(KERN_ERR "ibmvscsi: couldn't allocate host data\n");
diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.h b/drivers/scsi/ibmvscsi/ibmvscsi.h
index 5c6d93582929..77cc1d40f5bb 100644
--- a/drivers/scsi/ibmvscsi/ibmvscsi.h
+++ b/drivers/scsi/ibmvscsi/ibmvscsi.h
@@ -44,6 +44,8 @@ struct Scsi_Host;
44 */ 44 */
45#define MAX_INDIRECT_BUFS 10 45#define MAX_INDIRECT_BUFS 10
46 46
47#define IBMVSCSI_MAX_REQUESTS_DEFAULT 100
48
47/* ------------------------------------------------------------ 49/* ------------------------------------------------------------
48 * Data Structures 50 * Data Structures
49 */ 51 */
diff --git a/drivers/scsi/ibmvscsi/ibmvstgt.c b/drivers/scsi/ibmvscsi/ibmvstgt.c
index a39a478bb39a..6d223dd76440 100644
--- a/drivers/scsi/ibmvscsi/ibmvstgt.c
+++ b/drivers/scsi/ibmvscsi/ibmvstgt.c
@@ -35,7 +35,7 @@
35#include "ibmvscsi.h" 35#include "ibmvscsi.h"
36 36
37#define INITIAL_SRP_LIMIT 16 37#define INITIAL_SRP_LIMIT 16
38#define DEFAULT_MAX_SECTORS 512 38#define DEFAULT_MAX_SECTORS 256
39 39
40#define TGT_NAME "ibmvstgt" 40#define TGT_NAME "ibmvstgt"
41 41
@@ -248,8 +248,8 @@ static int ibmvstgt_rdma(struct scsi_cmnd *sc, struct scatterlist *sg, int nsg,
248 md[i].va + mdone); 248 md[i].va + mdone);
249 249
250 if (err != H_SUCCESS) { 250 if (err != H_SUCCESS) {
251 eprintk("rdma error %d %d\n", dir, slen); 251 eprintk("rdma error %d %d %ld\n", dir, slen, err);
252 goto out; 252 return -EIO;
253 } 253 }
254 254
255 mlen -= slen; 255 mlen -= slen;
@@ -265,45 +265,35 @@ static int ibmvstgt_rdma(struct scsi_cmnd *sc, struct scatterlist *sg, int nsg,
265 if (sidx > nsg) { 265 if (sidx > nsg) {
266 eprintk("out of sg %p %d %d\n", 266 eprintk("out of sg %p %d %d\n",
267 iue, sidx, nsg); 267 iue, sidx, nsg);
268 goto out; 268 return -EIO;
269 } 269 }
270 } 270 }
271 }; 271 };
272 272
273 rest -= mlen; 273 rest -= mlen;
274 } 274 }
275out:
276
277 return 0; 275 return 0;
278} 276}
279 277
280static int ibmvstgt_transfer_data(struct scsi_cmnd *sc,
281 void (*done)(struct scsi_cmnd *))
282{
283 struct iu_entry *iue = (struct iu_entry *) sc->SCp.ptr;
284 int err;
285
286 err = srp_transfer_data(sc, &vio_iu(iue)->srp.cmd, ibmvstgt_rdma, 1, 1);
287
288 done(sc);
289
290 return err;
291}
292
293static int ibmvstgt_cmd_done(struct scsi_cmnd *sc, 278static int ibmvstgt_cmd_done(struct scsi_cmnd *sc,
294 void (*done)(struct scsi_cmnd *)) 279 void (*done)(struct scsi_cmnd *))
295{ 280{
296 unsigned long flags; 281 unsigned long flags;
297 struct iu_entry *iue = (struct iu_entry *) sc->SCp.ptr; 282 struct iu_entry *iue = (struct iu_entry *) sc->SCp.ptr;
298 struct srp_target *target = iue->target; 283 struct srp_target *target = iue->target;
284 int err = 0;
299 285
300 dprintk("%p %p %x\n", iue, target, vio_iu(iue)->srp.cmd.cdb[0]); 286 dprintk("%p %p %x %u\n", iue, target, vio_iu(iue)->srp.cmd.cdb[0],
287 cmd->usg_sg);
288
289 if (sc->use_sg)
290 err = srp_transfer_data(sc, &vio_iu(iue)->srp.cmd, ibmvstgt_rdma, 1, 1);
301 291
302 spin_lock_irqsave(&target->lock, flags); 292 spin_lock_irqsave(&target->lock, flags);
303 list_del(&iue->ilist); 293 list_del(&iue->ilist);
304 spin_unlock_irqrestore(&target->lock, flags); 294 spin_unlock_irqrestore(&target->lock, flags);
305 295
306 if (sc->result != SAM_STAT_GOOD) { 296 if (err|| sc->result != SAM_STAT_GOOD) {
307 eprintk("operation failed %p %d %x\n", 297 eprintk("operation failed %p %d %x\n",
308 iue, sc->result, vio_iu(iue)->srp.cmd.cdb[0]); 298 iue, sc->result, vio_iu(iue)->srp.cmd.cdb[0]);
309 send_rsp(iue, sc, HARDWARE_ERROR, 0x00); 299 send_rsp(iue, sc, HARDWARE_ERROR, 0x00);
@@ -503,7 +493,8 @@ static void process_iu(struct viosrp_crq *crq, struct srp_target *target)
503{ 493{
504 struct vio_port *vport = target_to_port(target); 494 struct vio_port *vport = target_to_port(target);
505 struct iu_entry *iue; 495 struct iu_entry *iue;
506 long err, done; 496 long err;
497 int done = 1;
507 498
508 iue = srp_iu_get(target); 499 iue = srp_iu_get(target);
509 if (!iue) { 500 if (!iue) {
@@ -518,7 +509,6 @@ static void process_iu(struct viosrp_crq *crq, struct srp_target *target)
518 509
519 if (err != H_SUCCESS) { 510 if (err != H_SUCCESS) {
520 eprintk("%ld transferring data error %p\n", err, iue); 511 eprintk("%ld transferring data error %p\n", err, iue);
521 done = 1;
522 goto out; 512 goto out;
523 } 513 }
524 514
@@ -794,7 +784,6 @@ static struct scsi_host_template ibmvstgt_sht = {
794 .use_clustering = DISABLE_CLUSTERING, 784 .use_clustering = DISABLE_CLUSTERING,
795 .max_sectors = DEFAULT_MAX_SECTORS, 785 .max_sectors = DEFAULT_MAX_SECTORS,
796 .transfer_response = ibmvstgt_cmd_done, 786 .transfer_response = ibmvstgt_cmd_done,
797 .transfer_data = ibmvstgt_transfer_data,
798 .eh_abort_handler = ibmvstgt_eh_abort_handler, 787 .eh_abort_handler = ibmvstgt_eh_abort_handler,
799 .tsk_mgmt_response = ibmvstgt_tsk_mgmt_response, 788 .tsk_mgmt_response = ibmvstgt_tsk_mgmt_response,
800 .shost_attrs = ibmvstgt_attrs, 789 .shost_attrs = ibmvstgt_attrs,