aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/scsi/scsi_lib.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/scsi/scsi_lib.c')
-rw-r--r--drivers/scsi/scsi_lib.c230
1 files changed, 196 insertions, 34 deletions
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 1f2782767ca9..eb0cfbfbcf8f 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -63,39 +63,6 @@ static struct scsi_host_sg_pool scsi_sg_pools[] = {
63}; 63};
64#undef SP 64#undef SP
65 65
66
67/*
68 * Function: scsi_insert_special_req()
69 *
70 * Purpose: Insert pre-formed request into request queue.
71 *
72 * Arguments: sreq - request that is ready to be queued.
73 * at_head - boolean. True if we should insert at head
74 * of queue, false if we should insert at tail.
75 *
76 * Lock status: Assumed that lock is not held upon entry.
77 *
78 * Returns: Nothing
79 *
80 * Notes: This function is called from character device and from
81 * ioctl types of functions where the caller knows exactly
82 * what SCSI command needs to be issued. The idea is that
83 * we merely inject the command into the queue (at the head
84 * for now), and then call the queue request function to actually
85 * process it.
86 */
87int scsi_insert_special_req(struct scsi_request *sreq, int at_head)
88{
89 /*
90 * Because users of this function are apt to reuse requests with no
91 * modification, we have to sanitise the request flags here
92 */
93 sreq->sr_request->flags &= ~REQ_DONTPREP;
94 blk_insert_request(sreq->sr_device->request_queue, sreq->sr_request,
95 at_head, sreq);
96 return 0;
97}
98
99static void scsi_run_queue(struct request_queue *q); 66static void scsi_run_queue(struct request_queue *q);
100 67
101/* 68/*
@@ -249,8 +216,13 @@ void scsi_do_req(struct scsi_request *sreq, const void *cmnd,
249 216
250 /* 217 /*
251 * head injection *required* here otherwise quiesce won't work 218 * head injection *required* here otherwise quiesce won't work
219 *
220 * Because users of this function are apt to reuse requests with no
221 * modification, we have to sanitise the request flags here
252 */ 222 */
253 scsi_insert_special_req(sreq, 1); 223 sreq->sr_request->flags &= ~REQ_DONTPREP;
224 blk_insert_request(sreq->sr_device->request_queue, sreq->sr_request,
225 1, sreq);
254} 226}
255EXPORT_SYMBOL(scsi_do_req); 227EXPORT_SYMBOL(scsi_do_req);
256 228
@@ -327,6 +299,196 @@ int scsi_execute_req(struct scsi_device *sdev, const unsigned char *cmd,
327} 299}
328EXPORT_SYMBOL(scsi_execute_req); 300EXPORT_SYMBOL(scsi_execute_req);
329 301
302struct scsi_io_context {
303 void *data;
304 void (*done)(void *data, char *sense, int result, int resid);
305 char sense[SCSI_SENSE_BUFFERSIZE];
306};
307
308static void scsi_end_async(struct request *req)
309{
310 struct scsi_io_context *sioc = req->end_io_data;
311
312 if (sioc->done)
313 sioc->done(sioc->data, sioc->sense, req->errors, req->data_len);
314
315 kfree(sioc);
316 __blk_put_request(req->q, req);
317}
318
319static int scsi_merge_bio(struct request *rq, struct bio *bio)
320{
321 struct request_queue *q = rq->q;
322
323 bio->bi_flags &= ~(1 << BIO_SEG_VALID);
324 if (rq_data_dir(rq) == WRITE)
325 bio->bi_rw |= (1 << BIO_RW);
326 blk_queue_bounce(q, &bio);
327
328 if (!rq->bio)
329 blk_rq_bio_prep(q, rq, bio);
330 else if (!q->back_merge_fn(q, rq, bio))
331 return -EINVAL;
332 else {
333 rq->biotail->bi_next = bio;
334 rq->biotail = bio;
335 rq->hard_nr_sectors += bio_sectors(bio);
336 rq->nr_sectors = rq->hard_nr_sectors;
337 }
338
339 return 0;
340}
341
342static int scsi_bi_endio(struct bio *bio, unsigned int bytes_done, int error)
343{
344 if (bio->bi_size)
345 return 1;
346
347 bio_put(bio);
348 return 0;
349}
350
351/**
352 * scsi_req_map_sg - map a scatterlist into a request
353 * @rq: request to fill
354 * @sg: scatterlist
355 * @nsegs: number of elements
356 * @bufflen: len of buffer
357 * @gfp: memory allocation flags
358 *
359 * scsi_req_map_sg maps a scatterlist into a request so that the
360 * request can be sent to the block layer. We do not trust the scatterlist
361 * sent to use, as some ULDs use that struct to only organize the pages.
362 */
363static int scsi_req_map_sg(struct request *rq, struct scatterlist *sgl,
364 int nsegs, unsigned bufflen, gfp_t gfp)
365{
366 struct request_queue *q = rq->q;
367 int nr_pages = (bufflen + PAGE_SIZE - 1) >> PAGE_SHIFT;
368 unsigned int data_len = 0, len, bytes, off;
369 struct page *page;
370 struct bio *bio = NULL;
371 int i, err, nr_vecs = 0;
372
373 for (i = 0; i < nsegs; i++) {
374 page = sgl[i].page;
375 off = sgl[i].offset;
376 len = sgl[i].length;
377 data_len += len;
378
379 while (len > 0) {
380 bytes = min_t(unsigned int, len, PAGE_SIZE - off);
381
382 if (!bio) {
383 nr_vecs = min_t(int, BIO_MAX_PAGES, nr_pages);
384 nr_pages -= nr_vecs;
385
386 bio = bio_alloc(gfp, nr_vecs);
387 if (!bio) {
388 err = -ENOMEM;
389 goto free_bios;
390 }
391 bio->bi_end_io = scsi_bi_endio;
392 }
393
394 if (bio_add_pc_page(q, bio, page, bytes, off) !=
395 bytes) {
396 bio_put(bio);
397 err = -EINVAL;
398 goto free_bios;
399 }
400
401 if (bio->bi_vcnt >= nr_vecs) {
402 err = scsi_merge_bio(rq, bio);
403 if (err) {
404 bio_endio(bio, bio->bi_size, 0);
405 goto free_bios;
406 }
407 bio = NULL;
408 }
409
410 page++;
411 len -= bytes;
412 off = 0;
413 }
414 }
415
416 rq->buffer = rq->data = NULL;
417 rq->data_len = data_len;
418 return 0;
419
420free_bios:
421 while ((bio = rq->bio) != NULL) {
422 rq->bio = bio->bi_next;
423 /*
424 * call endio instead of bio_put incase it was bounced
425 */
426 bio_endio(bio, bio->bi_size, 0);
427 }
428
429 return err;
430}
431
432/**
433 * scsi_execute_async - insert request
434 * @sdev: scsi device
435 * @cmd: scsi command
436 * @data_direction: data direction
437 * @buffer: data buffer (this can be a kernel buffer or scatterlist)
438 * @bufflen: len of buffer
439 * @use_sg: if buffer is a scatterlist this is the number of elements
440 * @timeout: request timeout in seconds
441 * @retries: number of times to retry request
442 * @flags: or into request flags
443 **/
444int scsi_execute_async(struct scsi_device *sdev, const unsigned char *cmd,
445 int data_direction, void *buffer, unsigned bufflen,
446 int use_sg, int timeout, int retries, void *privdata,
447 void (*done)(void *, char *, int, int), gfp_t gfp)
448{
449 struct request *req;
450 struct scsi_io_context *sioc;
451 int err = 0;
452 int write = (data_direction == DMA_TO_DEVICE);
453
454 sioc = kzalloc(sizeof(*sioc), gfp);
455 if (!sioc)
456 return DRIVER_ERROR << 24;
457
458 req = blk_get_request(sdev->request_queue, write, gfp);
459 if (!req)
460 goto free_sense;
461
462 if (use_sg)
463 err = scsi_req_map_sg(req, buffer, use_sg, bufflen, gfp);
464 else if (bufflen)
465 err = blk_rq_map_kern(req->q, req, buffer, bufflen, gfp);
466
467 if (err)
468 goto free_req;
469
470 req->cmd_len = COMMAND_SIZE(cmd[0]);
471 memcpy(req->cmd, cmd, req->cmd_len);
472 req->sense = sioc->sense;
473 req->sense_len = 0;
474 req->timeout = timeout;
475 req->flags |= REQ_BLOCK_PC | REQ_QUIET;
476 req->end_io_data = sioc;
477
478 sioc->data = privdata;
479 sioc->done = done;
480
481 blk_execute_rq_nowait(req->q, NULL, req, 1, scsi_end_async);
482 return 0;
483
484free_req:
485 blk_put_request(req);
486free_sense:
487 kfree(sioc);
488 return DRIVER_ERROR << 24;
489}
490EXPORT_SYMBOL_GPL(scsi_execute_async);
491
330/* 492/*
331 * Function: scsi_init_cmd_errh() 493 * Function: scsi_init_cmd_errh()
332 * 494 *