aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMike Christie <michaelc@cs.wisc.edu>2007-03-02 19:55:54 -0500
committerJames Bottomley <jejb@mulgrave.il.steeleye.com>2007-03-11 12:31:33 -0400
commit181011e04a2a32f8d5df212254239ac9a3c8ab5e (patch)
treef633a66a5cc7c9d9cb5399107cbe7147fbd47d2a
parent0f238418b6d41cdfc85f2f399848429ff6fbfbd0 (diff)
[SCSI] tgt: rm bio hacks in scsi tgt
scsi tgt breaks up a command into multple scatterlists if we cannot fit all the data in one. This was because the block rq helpers did not support large requests and because we can get a command of any old size so it is hard to preallocate pages for scatterlist large enough (we cannot really preallocate pages with the bio map user path). In 2.6.20, we added large request support to the block layer helper, blk_rq_map_user. And at LSF, we talked about increasing SCSI_MAX_PHYS_SEGMENTS for scsi tgt if we want to support really really :) large (greater than 256 * PAGE_SIZE in the worst mapping case) requests. The only target currently implemented does not even support the multiple scatterlists stuff and only supports smaller requests, so this patch just coverts scsi tgt to use blk_rq_map_user. Signed-off-by: Mike Christie <michaelc@cs.wisc.edu> Signed-off-by: FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp> Signed-off-by: James Bottomley <James.Bottomley@SteelEye.com>
-rw-r--r--drivers/scsi/scsi_tgt_lib.c133
-rw-r--r--include/scsi/scsi_cmnd.h3
2 files changed, 34 insertions, 102 deletions
diff --git a/drivers/scsi/scsi_tgt_lib.c b/drivers/scsi/scsi_tgt_lib.c
index d402aff5f314..47c29a98c922 100644
--- a/drivers/scsi/scsi_tgt_lib.c
+++ b/drivers/scsi/scsi_tgt_lib.c
@@ -28,7 +28,6 @@
28#include <scsi/scsi_device.h> 28#include <scsi/scsi_device.h>
29#include <scsi/scsi_host.h> 29#include <scsi/scsi_host.h>
30#include <scsi/scsi_tgt.h> 30#include <scsi/scsi_tgt.h>
31#include <../drivers/md/dm-bio-list.h>
32 31
33#include "scsi_tgt_priv.h" 32#include "scsi_tgt_priv.h"
34 33
@@ -42,9 +41,8 @@ static struct kmem_cache *scsi_tgt_cmd_cache;
42struct scsi_tgt_cmd { 41struct scsi_tgt_cmd {
43 /* TODO replace work with James b's code */ 42 /* TODO replace work with James b's code */
44 struct work_struct work; 43 struct work_struct work;
45 /* TODO replace the lists with a large bio */ 44 /* TODO fix limits of some drivers */
46 struct bio_list xfer_done_list; 45 struct bio *bio;
47 struct bio_list xfer_list;
48 46
49 struct list_head hash_list; 47 struct list_head hash_list;
50 struct request *rq; 48 struct request *rq;
@@ -93,7 +91,12 @@ struct scsi_cmnd *scsi_host_get_command(struct Scsi_Host *shost,
93 if (!tcmd) 91 if (!tcmd)
94 goto put_dev; 92 goto put_dev;
95 93
96 rq = blk_get_request(shost->uspace_req_q, write, gfp_mask); 94 /*
95 * The blk helpers are used to the READ/WRITE requests
96 * transfering data from a initiator point of view. Since
97 * we are in target mode we want the opposite.
98 */
99 rq = blk_get_request(shost->uspace_req_q, !write, gfp_mask);
97 if (!rq) 100 if (!rq)
98 goto free_tcmd; 101 goto free_tcmd;
99 102
@@ -111,8 +114,6 @@ struct scsi_cmnd *scsi_host_get_command(struct Scsi_Host *shost,
111 rq->cmd_flags |= REQ_TYPE_BLOCK_PC; 114 rq->cmd_flags |= REQ_TYPE_BLOCK_PC;
112 rq->end_io_data = tcmd; 115 rq->end_io_data = tcmd;
113 116
114 bio_list_init(&tcmd->xfer_list);
115 bio_list_init(&tcmd->xfer_done_list);
116 tcmd->rq = rq; 117 tcmd->rq = rq;
117 118
118 return cmd; 119 return cmd;
@@ -157,22 +158,6 @@ void scsi_host_put_command(struct Scsi_Host *shost, struct scsi_cmnd *cmd)
157} 158}
158EXPORT_SYMBOL_GPL(scsi_host_put_command); 159EXPORT_SYMBOL_GPL(scsi_host_put_command);
159 160
160static void scsi_unmap_user_pages(struct scsi_tgt_cmd *tcmd)
161{
162 struct bio *bio;
163
164 /* must call bio_endio in case bio was bounced */
165 while ((bio = bio_list_pop(&tcmd->xfer_done_list))) {
166 bio_endio(bio, bio->bi_size, 0);
167 bio_unmap_user(bio);
168 }
169
170 while ((bio = bio_list_pop(&tcmd->xfer_list))) {
171 bio_endio(bio, bio->bi_size, 0);
172 bio_unmap_user(bio);
173 }
174}
175
176static void cmd_hashlist_del(struct scsi_cmnd *cmd) 161static void cmd_hashlist_del(struct scsi_cmnd *cmd)
177{ 162{
178 struct request_queue *q = cmd->request->q; 163 struct request_queue *q = cmd->request->q;
@@ -185,6 +170,11 @@ static void cmd_hashlist_del(struct scsi_cmnd *cmd)
185 spin_unlock_irqrestore(&qdata->cmd_hash_lock, flags); 170 spin_unlock_irqrestore(&qdata->cmd_hash_lock, flags);
186} 171}
187 172
173static void scsi_unmap_user_pages(struct scsi_tgt_cmd *tcmd)
174{
175 blk_rq_unmap_user(tcmd->bio);
176}
177
188static void scsi_tgt_cmd_destroy(struct work_struct *work) 178static void scsi_tgt_cmd_destroy(struct work_struct *work)
189{ 179{
190 struct scsi_tgt_cmd *tcmd = 180 struct scsi_tgt_cmd *tcmd =
@@ -193,16 +183,6 @@ static void scsi_tgt_cmd_destroy(struct work_struct *work)
193 183
194 dprintk("cmd %p %d %lu\n", cmd, cmd->sc_data_direction, 184 dprintk("cmd %p %d %lu\n", cmd, cmd->sc_data_direction,
195 rq_data_dir(cmd->request)); 185 rq_data_dir(cmd->request));
196 /*
197 * We fix rq->cmd_flags here since when we told bio_map_user
198 * to write vm for WRITE commands, blk_rq_bio_prep set
199 * rq_data_dir the flags to READ.
200 */
201 if (cmd->sc_data_direction == DMA_TO_DEVICE)
202 cmd->request->cmd_flags |= REQ_RW;
203 else
204 cmd->request->cmd_flags &= ~REQ_RW;
205
206 scsi_unmap_user_pages(tcmd); 186 scsi_unmap_user_pages(tcmd);
207 scsi_host_put_command(scsi_tgt_cmd_to_host(cmd), cmd); 187 scsi_host_put_command(scsi_tgt_cmd_to_host(cmd), cmd);
208} 188}
@@ -215,6 +195,7 @@ static void init_scsi_tgt_cmd(struct request *rq, struct scsi_tgt_cmd *tcmd,
215 struct list_head *head; 195 struct list_head *head;
216 196
217 tcmd->tag = tag; 197 tcmd->tag = tag;
198 tcmd->bio = NULL;
218 INIT_WORK(&tcmd->work, scsi_tgt_cmd_destroy); 199 INIT_WORK(&tcmd->work, scsi_tgt_cmd_destroy);
219 spin_lock_irqsave(&qdata->cmd_hash_lock, flags); 200 spin_lock_irqsave(&qdata->cmd_hash_lock, flags);
220 head = &qdata->cmd_hash[cmd_hashfn(tag)]; 201 head = &qdata->cmd_hash[cmd_hashfn(tag)];
@@ -419,52 +400,33 @@ static int scsi_map_user_pages(struct scsi_tgt_cmd *tcmd, struct scsi_cmnd *cmd,
419 struct request *rq = cmd->request; 400 struct request *rq = cmd->request;
420 void *uaddr = tcmd->buffer; 401 void *uaddr = tcmd->buffer;
421 unsigned int len = tcmd->bufflen; 402 unsigned int len = tcmd->bufflen;
422 struct bio *bio;
423 int err; 403 int err;
424 404
425 while (len > 0) { 405 dprintk("%lx %u\n", (unsigned long) uaddr, len);
426 dprintk("%lx %u\n", (unsigned long) uaddr, len); 406 err = blk_rq_map_user(q, rq, uaddr, len);
427 bio = bio_map_user(q, NULL, (unsigned long) uaddr, len, rw); 407 if (err) {
428 if (IS_ERR(bio)) {
429 err = PTR_ERR(bio);
430 dprintk("fail to map %lx %u %d %x\n",
431 (unsigned long) uaddr, len, err, cmd->cmnd[0]);
432 goto unmap_bios;
433 }
434
435 uaddr += bio->bi_size;
436 len -= bio->bi_size;
437
438 /* 408 /*
439 * The first bio is added and merged. We could probably 409 * TODO: need to fixup sg_tablesize, max_segment_size,
440 * try to add others using scsi_merge_bio() but for now 410 * max_sectors, etc for modern HW and software drivers
441 * we keep it simple. The first bio should be pretty large 411 * where this value is bogus.
442 * (either hitting the 1 MB bio pages limit or a queue limit) 412 *
443 * already but for really large IO we may want to try and 413 * TODO2: we can alloc a reserve buffer of max size
444 * merge these. 414 * we can handle and do the slow copy path for really large
415 * IO.
445 */ 416 */
446 if (!rq->bio) { 417 eprintk("Could not handle request of size %u.\n", len);
447 blk_rq_bio_prep(q, rq, bio); 418 return err;
448 rq->data_len = bio->bi_size;
449 } else
450 /* put list of bios to transfer in next go around */
451 bio_list_add(&tcmd->xfer_list, bio);
452 } 419 }
453 420
454 cmd->offset = 0; 421 tcmd->bio = rq->bio;
455 err = scsi_tgt_init_cmd(cmd, GFP_KERNEL); 422 err = scsi_tgt_init_cmd(cmd, GFP_KERNEL);
456 if (err) 423 if (err)
457 goto unmap_bios; 424 goto unmap_rq;
458 425
459 return 0; 426 return 0;
460 427
461unmap_bios: 428unmap_rq:
462 if (rq->bio) { 429 scsi_unmap_user_pages(tcmd);
463 bio_unmap_user(rq->bio);
464 while ((bio = bio_list_pop(&tcmd->xfer_list)))
465 bio_unmap_user(bio);
466 }
467
468 return err; 430 return err;
469} 431}
470 432
@@ -473,12 +435,10 @@ static int scsi_tgt_transfer_data(struct scsi_cmnd *);
473static void scsi_tgt_data_transfer_done(struct scsi_cmnd *cmd) 435static void scsi_tgt_data_transfer_done(struct scsi_cmnd *cmd)
474{ 436{
475 struct scsi_tgt_cmd *tcmd = cmd->request->end_io_data; 437 struct scsi_tgt_cmd *tcmd = cmd->request->end_io_data;
476 struct bio *bio;
477 int err; 438 int err;
478 439
479 /* should we free resources here on error ? */ 440 /* should we free resources here on error ? */
480 if (cmd->result) { 441 if (cmd->result) {
481send_uspace_err:
482 err = scsi_tgt_uspace_send_status(cmd, tcmd->tag); 442 err = scsi_tgt_uspace_send_status(cmd, tcmd->tag);
483 if (err <= 0) 443 if (err <= 0)
484 /* the tgt uspace eh will have to pick this up */ 444 /* the tgt uspace eh will have to pick this up */
@@ -490,34 +450,8 @@ send_uspace_err:
490 cmd, cmd->request_bufflen, tcmd->bufflen); 450 cmd, cmd->request_bufflen, tcmd->bufflen);
491 451
492 scsi_free_sgtable(cmd->request_buffer, cmd->sglist_len); 452 scsi_free_sgtable(cmd->request_buffer, cmd->sglist_len);
493 bio_list_add(&tcmd->xfer_done_list, cmd->request->bio);
494
495 tcmd->buffer += cmd->request_bufflen; 453 tcmd->buffer += cmd->request_bufflen;
496 cmd->offset += cmd->request_bufflen; 454 scsi_tgt_transfer_response(cmd);
497
498 if (!tcmd->xfer_list.head) {
499 scsi_tgt_transfer_response(cmd);
500 return;
501 }
502
503 dprintk("cmd2 %p request_bufflen %u bufflen %u\n",
504 cmd, cmd->request_bufflen, tcmd->bufflen);
505
506 bio = bio_list_pop(&tcmd->xfer_list);
507 BUG_ON(!bio);
508
509 blk_rq_bio_prep(cmd->request->q, cmd->request, bio);
510 cmd->request->data_len = bio->bi_size;
511 err = scsi_tgt_init_cmd(cmd, GFP_ATOMIC);
512 if (err) {
513 cmd->result = DID_ERROR << 16;
514 goto send_uspace_err;
515 }
516
517 if (scsi_tgt_transfer_data(cmd)) {
518 cmd->result = DID_NO_CONNECT << 16;
519 goto send_uspace_err;
520 }
521} 455}
522 456
523static int scsi_tgt_transfer_data(struct scsi_cmnd *cmd) 457static int scsi_tgt_transfer_data(struct scsi_cmnd *cmd)
@@ -617,8 +551,9 @@ int scsi_tgt_kspace_exec(int host_no, u64 tag, int result, u32 len,
617 } 551 }
618 cmd = rq->special; 552 cmd = rq->special;
619 553
620 dprintk("cmd %p result %d len %d bufflen %u %lu %x\n", cmd, 554 dprintk("cmd %p scb %x result %d len %d bufflen %u %lu %x\n",
621 result, len, cmd->request_bufflen, rq_data_dir(rq), cmd->cmnd[0]); 555 cmd, cmd->cmnd[0], result, len, cmd->request_bufflen,
556 rq_data_dir(rq), cmd->cmnd[0]);
622 557
623 if (result == TASK_ABORTED) { 558 if (result == TASK_ABORTED) {
624 scsi_tgt_abort_cmd(shost, cmd); 559 scsi_tgt_abort_cmd(shost, cmd);
diff --git a/include/scsi/scsi_cmnd.h b/include/scsi/scsi_cmnd.h
index d6948d0e8cdb..a2e0c1032491 100644
--- a/include/scsi/scsi_cmnd.h
+++ b/include/scsi/scsi_cmnd.h
@@ -73,9 +73,6 @@ struct scsi_cmnd {
73 unsigned short use_sg; /* Number of pieces of scatter-gather */ 73 unsigned short use_sg; /* Number of pieces of scatter-gather */
74 unsigned short sglist_len; /* size of malloc'd scatter-gather list */ 74 unsigned short sglist_len; /* size of malloc'd scatter-gather list */
75 75
76 /* offset in cmd we are at (for multi-transfer tgt cmds) */
77 unsigned offset;
78
79 unsigned underflow; /* Return error if less than 76 unsigned underflow; /* Return error if less than
80 this amount is transferred */ 77 this amount is transferred */
81 78