aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/scsi/scsi_tgt_lib.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/scsi/scsi_tgt_lib.c')
-rw-r--r--drivers/scsi/scsi_tgt_lib.c261
1 files changed, 69 insertions, 192 deletions
diff --git a/drivers/scsi/scsi_tgt_lib.c b/drivers/scsi/scsi_tgt_lib.c
index d402aff5f314..2570f48a69c7 100644
--- a/drivers/scsi/scsi_tgt_lib.c
+++ b/drivers/scsi/scsi_tgt_lib.c
@@ -28,7 +28,6 @@
28#include <scsi/scsi_device.h> 28#include <scsi/scsi_device.h>
29#include <scsi/scsi_host.h> 29#include <scsi/scsi_host.h>
30#include <scsi/scsi_tgt.h> 30#include <scsi/scsi_tgt.h>
31#include <../drivers/md/dm-bio-list.h>
32 31
33#include "scsi_tgt_priv.h" 32#include "scsi_tgt_priv.h"
34 33
@@ -42,16 +41,12 @@ static struct kmem_cache *scsi_tgt_cmd_cache;
42struct scsi_tgt_cmd { 41struct scsi_tgt_cmd {
43 /* TODO replace work with James b's code */ 42 /* TODO replace work with James b's code */
44 struct work_struct work; 43 struct work_struct work;
45 /* TODO replace the lists with a large bio */ 44 /* TODO fix limits of some drivers */
46 struct bio_list xfer_done_list; 45 struct bio *bio;
47 struct bio_list xfer_list;
48 46
49 struct list_head hash_list; 47 struct list_head hash_list;
50 struct request *rq; 48 struct request *rq;
51 u64 tag; 49 u64 tag;
52
53 void *buffer;
54 unsigned bufflen;
55}; 50};
56 51
57#define TGT_HASH_ORDER 4 52#define TGT_HASH_ORDER 4
@@ -93,7 +88,12 @@ struct scsi_cmnd *scsi_host_get_command(struct Scsi_Host *shost,
93 if (!tcmd) 88 if (!tcmd)
94 goto put_dev; 89 goto put_dev;
95 90
96 rq = blk_get_request(shost->uspace_req_q, write, gfp_mask); 91 /*
92 * The blk helpers are used to the READ/WRITE requests
93 * transfering data from a initiator point of view. Since
94 * we are in target mode we want the opposite.
95 */
96 rq = blk_get_request(shost->uspace_req_q, !write, gfp_mask);
97 if (!rq) 97 if (!rq)
98 goto free_tcmd; 98 goto free_tcmd;
99 99
@@ -111,8 +111,6 @@ struct scsi_cmnd *scsi_host_get_command(struct Scsi_Host *shost,
111 rq->cmd_flags |= REQ_TYPE_BLOCK_PC; 111 rq->cmd_flags |= REQ_TYPE_BLOCK_PC;
112 rq->end_io_data = tcmd; 112 rq->end_io_data = tcmd;
113 113
114 bio_list_init(&tcmd->xfer_list);
115 bio_list_init(&tcmd->xfer_done_list);
116 tcmd->rq = rq; 114 tcmd->rq = rq;
117 115
118 return cmd; 116 return cmd;
@@ -157,22 +155,6 @@ void scsi_host_put_command(struct Scsi_Host *shost, struct scsi_cmnd *cmd)
157} 155}
158EXPORT_SYMBOL_GPL(scsi_host_put_command); 156EXPORT_SYMBOL_GPL(scsi_host_put_command);
159 157
160static void scsi_unmap_user_pages(struct scsi_tgt_cmd *tcmd)
161{
162 struct bio *bio;
163
164 /* must call bio_endio in case bio was bounced */
165 while ((bio = bio_list_pop(&tcmd->xfer_done_list))) {
166 bio_endio(bio, bio->bi_size, 0);
167 bio_unmap_user(bio);
168 }
169
170 while ((bio = bio_list_pop(&tcmd->xfer_list))) {
171 bio_endio(bio, bio->bi_size, 0);
172 bio_unmap_user(bio);
173 }
174}
175
176static void cmd_hashlist_del(struct scsi_cmnd *cmd) 158static void cmd_hashlist_del(struct scsi_cmnd *cmd)
177{ 159{
178 struct request_queue *q = cmd->request->q; 160 struct request_queue *q = cmd->request->q;
@@ -185,6 +167,11 @@ static void cmd_hashlist_del(struct scsi_cmnd *cmd)
185 spin_unlock_irqrestore(&qdata->cmd_hash_lock, flags); 167 spin_unlock_irqrestore(&qdata->cmd_hash_lock, flags);
186} 168}
187 169
170static void scsi_unmap_user_pages(struct scsi_tgt_cmd *tcmd)
171{
172 blk_rq_unmap_user(tcmd->bio);
173}
174
188static void scsi_tgt_cmd_destroy(struct work_struct *work) 175static void scsi_tgt_cmd_destroy(struct work_struct *work)
189{ 176{
190 struct scsi_tgt_cmd *tcmd = 177 struct scsi_tgt_cmd *tcmd =
@@ -193,16 +180,6 @@ static void scsi_tgt_cmd_destroy(struct work_struct *work)
193 180
194 dprintk("cmd %p %d %lu\n", cmd, cmd->sc_data_direction, 181 dprintk("cmd %p %d %lu\n", cmd, cmd->sc_data_direction,
195 rq_data_dir(cmd->request)); 182 rq_data_dir(cmd->request));
196 /*
197 * We fix rq->cmd_flags here since when we told bio_map_user
198 * to write vm for WRITE commands, blk_rq_bio_prep set
199 * rq_data_dir the flags to READ.
200 */
201 if (cmd->sc_data_direction == DMA_TO_DEVICE)
202 cmd->request->cmd_flags |= REQ_RW;
203 else
204 cmd->request->cmd_flags &= ~REQ_RW;
205
206 scsi_unmap_user_pages(tcmd); 183 scsi_unmap_user_pages(tcmd);
207 scsi_host_put_command(scsi_tgt_cmd_to_host(cmd), cmd); 184 scsi_host_put_command(scsi_tgt_cmd_to_host(cmd), cmd);
208} 185}
@@ -215,6 +192,7 @@ static void init_scsi_tgt_cmd(struct request *rq, struct scsi_tgt_cmd *tcmd,
215 struct list_head *head; 192 struct list_head *head;
216 193
217 tcmd->tag = tag; 194 tcmd->tag = tag;
195 tcmd->bio = NULL;
218 INIT_WORK(&tcmd->work, scsi_tgt_cmd_destroy); 196 INIT_WORK(&tcmd->work, scsi_tgt_cmd_destroy);
219 spin_lock_irqsave(&qdata->cmd_hash_lock, flags); 197 spin_lock_irqsave(&qdata->cmd_hash_lock, flags);
220 head = &qdata->cmd_hash[cmd_hashfn(tag)]; 198 head = &qdata->cmd_hash[cmd_hashfn(tag)];
@@ -349,10 +327,14 @@ static void scsi_tgt_cmd_done(struct scsi_cmnd *cmd)
349 dprintk("cmd %p %lu\n", cmd, rq_data_dir(cmd->request)); 327 dprintk("cmd %p %lu\n", cmd, rq_data_dir(cmd->request));
350 328
351 scsi_tgt_uspace_send_status(cmd, tcmd->tag); 329 scsi_tgt_uspace_send_status(cmd, tcmd->tag);
330
331 if (cmd->request_buffer)
332 scsi_free_sgtable(cmd->request_buffer, cmd->sglist_len);
333
352 queue_work(scsi_tgtd, &tcmd->work); 334 queue_work(scsi_tgtd, &tcmd->work);
353} 335}
354 336
355static int __scsi_tgt_transfer_response(struct scsi_cmnd *cmd) 337static int scsi_tgt_transfer_response(struct scsi_cmnd *cmd)
356{ 338{
357 struct Scsi_Host *shost = scsi_tgt_cmd_to_host(cmd); 339 struct Scsi_Host *shost = scsi_tgt_cmd_to_host(cmd);
358 int err; 340 int err;
@@ -365,30 +347,12 @@ static int __scsi_tgt_transfer_response(struct scsi_cmnd *cmd)
365 case SCSI_MLQUEUE_DEVICE_BUSY: 347 case SCSI_MLQUEUE_DEVICE_BUSY:
366 return -EAGAIN; 348 return -EAGAIN;
367 } 349 }
368
369 return 0; 350 return 0;
370} 351}
371 352
372static void scsi_tgt_transfer_response(struct scsi_cmnd *cmd)
373{
374 struct scsi_tgt_cmd *tcmd = cmd->request->end_io_data;
375 int err;
376
377 err = __scsi_tgt_transfer_response(cmd);
378 if (!err)
379 return;
380
381 cmd->result = DID_BUS_BUSY << 16;
382 err = scsi_tgt_uspace_send_status(cmd, tcmd->tag);
383 if (err <= 0)
384 /* the eh will have to pick this up */
385 printk(KERN_ERR "Could not send cmd %p status\n", cmd);
386}
387
388static int scsi_tgt_init_cmd(struct scsi_cmnd *cmd, gfp_t gfp_mask) 353static int scsi_tgt_init_cmd(struct scsi_cmnd *cmd, gfp_t gfp_mask)
389{ 354{
390 struct request *rq = cmd->request; 355 struct request *rq = cmd->request;
391 struct scsi_tgt_cmd *tcmd = rq->end_io_data;
392 int count; 356 int count;
393 357
394 cmd->use_sg = rq->nr_phys_segments; 358 cmd->use_sg = rq->nr_phys_segments;
@@ -398,143 +362,54 @@ static int scsi_tgt_init_cmd(struct scsi_cmnd *cmd, gfp_t gfp_mask)
398 362
399 cmd->request_bufflen = rq->data_len; 363 cmd->request_bufflen = rq->data_len;
400 364
401 dprintk("cmd %p addr %p cnt %d %lu\n", cmd, tcmd->buffer, cmd->use_sg, 365 dprintk("cmd %p cnt %d %lu\n", cmd, cmd->use_sg, rq_data_dir(rq));
402 rq_data_dir(rq));
403 count = blk_rq_map_sg(rq->q, rq, cmd->request_buffer); 366 count = blk_rq_map_sg(rq->q, rq, cmd->request_buffer);
404 if (likely(count <= cmd->use_sg)) { 367 if (likely(count <= cmd->use_sg)) {
405 cmd->use_sg = count; 368 cmd->use_sg = count;
406 return 0; 369 return 0;
407 } 370 }
408 371
409 eprintk("cmd %p addr %p cnt %d\n", cmd, tcmd->buffer, cmd->use_sg); 372 eprintk("cmd %p cnt %d\n", cmd, cmd->use_sg);
410 scsi_free_sgtable(cmd->request_buffer, cmd->sglist_len); 373 scsi_free_sgtable(cmd->request_buffer, cmd->sglist_len);
411 return -EINVAL; 374 return -EINVAL;
412} 375}
413 376
414/* TODO: test this crap and replace bio_map_user with new interface maybe */ 377/* TODO: test this crap and replace bio_map_user with new interface maybe */
415static int scsi_map_user_pages(struct scsi_tgt_cmd *tcmd, struct scsi_cmnd *cmd, 378static int scsi_map_user_pages(struct scsi_tgt_cmd *tcmd, struct scsi_cmnd *cmd,
416 int rw) 379 unsigned long uaddr, unsigned int len, int rw)
417{ 380{
418 struct request_queue *q = cmd->request->q; 381 struct request_queue *q = cmd->request->q;
419 struct request *rq = cmd->request; 382 struct request *rq = cmd->request;
420 void *uaddr = tcmd->buffer;
421 unsigned int len = tcmd->bufflen;
422 struct bio *bio;
423 int err; 383 int err;
424 384
425 while (len > 0) { 385 dprintk("%lx %u\n", uaddr, len);
426 dprintk("%lx %u\n", (unsigned long) uaddr, len); 386 err = blk_rq_map_user(q, rq, (void *)uaddr, len);
427 bio = bio_map_user(q, NULL, (unsigned long) uaddr, len, rw); 387 if (err) {
428 if (IS_ERR(bio)) {
429 err = PTR_ERR(bio);
430 dprintk("fail to map %lx %u %d %x\n",
431 (unsigned long) uaddr, len, err, cmd->cmnd[0]);
432 goto unmap_bios;
433 }
434
435 uaddr += bio->bi_size;
436 len -= bio->bi_size;
437
438 /* 388 /*
439 * The first bio is added and merged. We could probably 389 * TODO: need to fixup sg_tablesize, max_segment_size,
440 * try to add others using scsi_merge_bio() but for now 390 * max_sectors, etc for modern HW and software drivers
441 * we keep it simple. The first bio should be pretty large 391 * where this value is bogus.
442 * (either hitting the 1 MB bio pages limit or a queue limit) 392 *
443 * already but for really large IO we may want to try and 393 * TODO2: we can alloc a reserve buffer of max size
444 * merge these. 394 * we can handle and do the slow copy path for really large
395 * IO.
445 */ 396 */
446 if (!rq->bio) { 397 eprintk("Could not handle request of size %u.\n", len);
447 blk_rq_bio_prep(q, rq, bio); 398 return err;
448 rq->data_len = bio->bi_size;
449 } else
450 /* put list of bios to transfer in next go around */
451 bio_list_add(&tcmd->xfer_list, bio);
452 } 399 }
453 400
454 cmd->offset = 0; 401 tcmd->bio = rq->bio;
455 err = scsi_tgt_init_cmd(cmd, GFP_KERNEL); 402 err = scsi_tgt_init_cmd(cmd, GFP_KERNEL);
456 if (err) 403 if (err)
457 goto unmap_bios; 404 goto unmap_rq;
458 405
459 return 0; 406 return 0;
460 407
461unmap_bios: 408unmap_rq:
462 if (rq->bio) { 409 scsi_unmap_user_pages(tcmd);
463 bio_unmap_user(rq->bio);
464 while ((bio = bio_list_pop(&tcmd->xfer_list)))
465 bio_unmap_user(bio);
466 }
467
468 return err; 410 return err;
469} 411}
470 412
471static int scsi_tgt_transfer_data(struct scsi_cmnd *);
472
473static void scsi_tgt_data_transfer_done(struct scsi_cmnd *cmd)
474{
475 struct scsi_tgt_cmd *tcmd = cmd->request->end_io_data;
476 struct bio *bio;
477 int err;
478
479 /* should we free resources here on error ? */
480 if (cmd->result) {
481send_uspace_err:
482 err = scsi_tgt_uspace_send_status(cmd, tcmd->tag);
483 if (err <= 0)
484 /* the tgt uspace eh will have to pick this up */
485 printk(KERN_ERR "Could not send cmd %p status\n", cmd);
486 return;
487 }
488
489 dprintk("cmd %p request_bufflen %u bufflen %u\n",
490 cmd, cmd->request_bufflen, tcmd->bufflen);
491
492 scsi_free_sgtable(cmd->request_buffer, cmd->sglist_len);
493 bio_list_add(&tcmd->xfer_done_list, cmd->request->bio);
494
495 tcmd->buffer += cmd->request_bufflen;
496 cmd->offset += cmd->request_bufflen;
497
498 if (!tcmd->xfer_list.head) {
499 scsi_tgt_transfer_response(cmd);
500 return;
501 }
502
503 dprintk("cmd2 %p request_bufflen %u bufflen %u\n",
504 cmd, cmd->request_bufflen, tcmd->bufflen);
505
506 bio = bio_list_pop(&tcmd->xfer_list);
507 BUG_ON(!bio);
508
509 blk_rq_bio_prep(cmd->request->q, cmd->request, bio);
510 cmd->request->data_len = bio->bi_size;
511 err = scsi_tgt_init_cmd(cmd, GFP_ATOMIC);
512 if (err) {
513 cmd->result = DID_ERROR << 16;
514 goto send_uspace_err;
515 }
516
517 if (scsi_tgt_transfer_data(cmd)) {
518 cmd->result = DID_NO_CONNECT << 16;
519 goto send_uspace_err;
520 }
521}
522
523static int scsi_tgt_transfer_data(struct scsi_cmnd *cmd)
524{
525 int err;
526 struct Scsi_Host *host = scsi_tgt_cmd_to_host(cmd);
527
528 err = host->hostt->transfer_data(cmd, scsi_tgt_data_transfer_done);
529 switch (err) {
530 case SCSI_MLQUEUE_HOST_BUSY:
531 case SCSI_MLQUEUE_DEVICE_BUSY:
532 return -EAGAIN;
533 default:
534 return 0;
535 }
536}
537
538static int scsi_tgt_copy_sense(struct scsi_cmnd *cmd, unsigned long uaddr, 413static int scsi_tgt_copy_sense(struct scsi_cmnd *cmd, unsigned long uaddr,
539 unsigned len) 414 unsigned len)
540{ 415{
@@ -584,8 +459,9 @@ static struct request *tgt_cmd_hash_lookup(struct request_queue *q, u64 tag)
584 return rq; 459 return rq;
585} 460}
586 461
587int scsi_tgt_kspace_exec(int host_no, u64 tag, int result, u32 len, 462int scsi_tgt_kspace_exec(int host_no, int result, u64 tag,
588 unsigned long uaddr, u8 rw) 463 unsigned long uaddr, u32 len, unsigned long sense_uaddr,
464 u32 sense_len, u8 rw)
589{ 465{
590 struct Scsi_Host *shost; 466 struct Scsi_Host *shost;
591 struct scsi_cmnd *cmd; 467 struct scsi_cmnd *cmd;
@@ -617,8 +493,9 @@ int scsi_tgt_kspace_exec(int host_no, u64 tag, int result, u32 len,
617 } 493 }
618 cmd = rq->special; 494 cmd = rq->special;
619 495
620 dprintk("cmd %p result %d len %d bufflen %u %lu %x\n", cmd, 496 dprintk("cmd %p scb %x result %d len %d bufflen %u %lu %x\n",
621 result, len, cmd->request_bufflen, rq_data_dir(rq), cmd->cmnd[0]); 497 cmd, cmd->cmnd[0], result, len, cmd->request_bufflen,
498 rq_data_dir(rq), cmd->cmnd[0]);
622 499
623 if (result == TASK_ABORTED) { 500 if (result == TASK_ABORTED) {
624 scsi_tgt_abort_cmd(shost, cmd); 501 scsi_tgt_abort_cmd(shost, cmd);
@@ -629,36 +506,36 @@ int scsi_tgt_kspace_exec(int host_no, u64 tag, int result, u32 len,
629 * in the request_* values 506 * in the request_* values
630 */ 507 */
631 tcmd = cmd->request->end_io_data; 508 tcmd = cmd->request->end_io_data;
632 tcmd->buffer = (void *)uaddr;
633 tcmd->bufflen = len;
634 cmd->result = result; 509 cmd->result = result;
635 510
636 if (!tcmd->bufflen || cmd->request_buffer) { 511 if (cmd->result == SAM_STAT_CHECK_CONDITION)
637 err = __scsi_tgt_transfer_response(cmd); 512 scsi_tgt_copy_sense(cmd, sense_uaddr, sense_len);
638 goto done;
639 }
640 513
641 /* 514 if (len) {
642 * TODO: Do we need to handle case where request does not 515 err = scsi_map_user_pages(rq->end_io_data, cmd, uaddr, len, rw);
643 * align with LLD. 516 if (err) {
644 */ 517 /*
645 err = scsi_map_user_pages(rq->end_io_data, cmd, rw); 518 * user-space daemon bugs or OOM
646 if (err) { 519 * TODO: we can do better for OOM.
647 eprintk("%p %d\n", cmd, err); 520 */
648 err = -EAGAIN; 521 struct scsi_tgt_queuedata *qdata;
649 goto done; 522 struct list_head *head;
650 } 523 unsigned long flags;
651 524
652 /* userspace failure */ 525 eprintk("cmd %p ret %d uaddr %lx len %d rw %d\n",
653 if (cmd->result) { 526 cmd, err, uaddr, len, rw);
654 if (status_byte(cmd->result) == CHECK_CONDITION) 527
655 scsi_tgt_copy_sense(cmd, uaddr, len); 528 qdata = shost->uspace_req_q->queuedata;
656 err = __scsi_tgt_transfer_response(cmd); 529 head = &qdata->cmd_hash[cmd_hashfn(tcmd->tag)];
657 goto done; 530
658 } 531 spin_lock_irqsave(&qdata->cmd_hash_lock, flags);
659 /* ask the target LLD to transfer the data to the buffer */ 532 list_add(&tcmd->hash_list, head);
660 err = scsi_tgt_transfer_data(cmd); 533 spin_unlock_irqrestore(&qdata->cmd_hash_lock, flags);
661 534
535 goto done;
536 }
537 }
538 err = scsi_tgt_transfer_response(cmd);
662done: 539done:
663 scsi_host_put(shost); 540 scsi_host_put(shost);
664 return err; 541 return err;