aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/scsi/scsi.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/scsi/scsi.c')
-rw-r--r--drivers/scsi/scsi.c157
1 files changed, 0 insertions, 157 deletions
diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
index dae4f08adde0..2ab7df0dcfe8 100644
--- a/drivers/scsi/scsi.c
+++ b/drivers/scsi/scsi.c
@@ -63,7 +63,6 @@
63#include <scsi/scsi_eh.h> 63#include <scsi/scsi_eh.h>
64#include <scsi/scsi_host.h> 64#include <scsi/scsi_host.h>
65#include <scsi/scsi_tcq.h> 65#include <scsi/scsi_tcq.h>
66#include <scsi/scsi_request.h>
67 66
68#include "scsi_priv.h" 67#include "scsi_priv.h"
69#include "scsi_logging.h" 68#include "scsi_logging.h"
@@ -116,79 +115,6 @@ const char *const scsi_device_types[MAX_SCSI_DEVICE_CODE] = {
116}; 115};
117EXPORT_SYMBOL(scsi_device_types); 116EXPORT_SYMBOL(scsi_device_types);
118 117
119/*
120 * Function: scsi_allocate_request
121 *
122 * Purpose: Allocate a request descriptor.
123 *
124 * Arguments: device - device for which we want a request
125 * gfp_mask - allocation flags passed to kmalloc
126 *
127 * Lock status: No locks assumed to be held. This function is SMP-safe.
128 *
129 * Returns: Pointer to request block.
130 */
131struct scsi_request *scsi_allocate_request(struct scsi_device *sdev,
132 gfp_t gfp_mask)
133{
134 const int offset = ALIGN(sizeof(struct scsi_request), 4);
135 const int size = offset + sizeof(struct request);
136 struct scsi_request *sreq;
137
138 sreq = kzalloc(size, gfp_mask);
139 if (likely(sreq != NULL)) {
140 sreq->sr_request = (struct request *)(((char *)sreq) + offset);
141 sreq->sr_device = sdev;
142 sreq->sr_host = sdev->host;
143 sreq->sr_magic = SCSI_REQ_MAGIC;
144 sreq->sr_data_direction = DMA_BIDIRECTIONAL;
145 }
146
147 return sreq;
148}
149EXPORT_SYMBOL(scsi_allocate_request);
150
151void __scsi_release_request(struct scsi_request *sreq)
152{
153 struct request *req = sreq->sr_request;
154
155 /* unlikely because the tag was usually ended earlier by the
156 * mid-layer. However, for layering reasons ULD's don't end
157 * the tag of commands they generate. */
158 if (unlikely(blk_rq_tagged(req))) {
159 unsigned long flags;
160 struct request_queue *q = req->q;
161
162 spin_lock_irqsave(q->queue_lock, flags);
163 blk_queue_end_tag(q, req);
164 spin_unlock_irqrestore(q->queue_lock, flags);
165 }
166
167
168 if (likely(sreq->sr_command != NULL)) {
169 struct scsi_cmnd *cmd = sreq->sr_command;
170
171 sreq->sr_command = NULL;
172 scsi_next_command(cmd);
173 }
174}
175
176/*
177 * Function: scsi_release_request
178 *
179 * Purpose: Release a request descriptor.
180 *
181 * Arguments: sreq - request to release
182 *
183 * Lock status: No locks assumed to be held. This function is SMP-safe.
184 */
185void scsi_release_request(struct scsi_request *sreq)
186{
187 __scsi_release_request(sreq);
188 kfree(sreq);
189}
190EXPORT_SYMBOL(scsi_release_request);
191
192struct scsi_host_cmd_pool { 118struct scsi_host_cmd_pool {
193 kmem_cache_t *slab; 119 kmem_cache_t *slab;
194 unsigned int users; 120 unsigned int users;
@@ -646,73 +572,6 @@ int scsi_dispatch_cmd(struct scsi_cmnd *cmd)
646 return rtn; 572 return rtn;
647} 573}
648 574
649/*
650 * Function: scsi_init_cmd_from_req
651 *
652 * Purpose: Queue a SCSI command
653 * Purpose: Initialize a struct scsi_cmnd from a struct scsi_request
654 *
655 * Arguments: cmd - command descriptor.
656 * sreq - Request from the queue.
657 *
658 * Lock status: None needed.
659 *
660 * Returns: Nothing.
661 *
662 * Notes: Mainly transfer data from the request structure to the
663 * command structure. The request structure is allocated
664 * using the normal memory allocator, and requests can pile
665 * up to more or less any depth. The command structure represents
666 * a consumable resource, as these are allocated into a pool
667 * when the SCSI subsystem initializes. The preallocation is
668 * required so that in low-memory situations a disk I/O request
669 * won't cause the memory manager to try and write out a page.
670 * The request structure is generally used by ioctls and character
671 * devices.
672 */
673void scsi_init_cmd_from_req(struct scsi_cmnd *cmd, struct scsi_request *sreq)
674{
675 sreq->sr_command = cmd;
676
677 cmd->cmd_len = sreq->sr_cmd_len;
678 cmd->use_sg = sreq->sr_use_sg;
679
680 cmd->request = sreq->sr_request;
681 memcpy(cmd->data_cmnd, sreq->sr_cmnd, sizeof(cmd->data_cmnd));
682 cmd->serial_number = 0;
683 cmd->bufflen = sreq->sr_bufflen;
684 cmd->buffer = sreq->sr_buffer;
685 cmd->retries = 0;
686 cmd->allowed = sreq->sr_allowed;
687 cmd->done = sreq->sr_done;
688 cmd->timeout_per_command = sreq->sr_timeout_per_command;
689 cmd->sc_data_direction = sreq->sr_data_direction;
690 cmd->sglist_len = sreq->sr_sglist_len;
691 cmd->underflow = sreq->sr_underflow;
692 cmd->sc_request = sreq;
693 memcpy(cmd->cmnd, sreq->sr_cmnd, sizeof(sreq->sr_cmnd));
694
695 /*
696 * Zero the sense buffer. Some host adapters automatically request
697 * sense on error. 0 is not a valid sense code.
698 */
699 memset(cmd->sense_buffer, 0, sizeof(sreq->sr_sense_buffer));
700 cmd->request_buffer = sreq->sr_buffer;
701 cmd->request_bufflen = sreq->sr_bufflen;
702 cmd->old_use_sg = cmd->use_sg;
703 if (cmd->cmd_len == 0)
704 cmd->cmd_len = COMMAND_SIZE(cmd->cmnd[0]);
705 cmd->old_cmd_len = cmd->cmd_len;
706 cmd->sc_old_data_direction = cmd->sc_data_direction;
707 cmd->old_underflow = cmd->underflow;
708
709 /*
710 * Start the timer ticking.
711 */
712 cmd->result = 0;
713
714 SCSI_LOG_MLQUEUE(3, printk("Leaving scsi_init_cmd_from_req()\n"));
715}
716 575
717/* 576/*
718 * Per-CPU I/O completion queue. 577 * Per-CPU I/O completion queue.
@@ -827,7 +686,6 @@ void scsi_finish_command(struct scsi_cmnd *cmd)
827{ 686{
828 struct scsi_device *sdev = cmd->device; 687 struct scsi_device *sdev = cmd->device;
829 struct Scsi_Host *shost = sdev->host; 688 struct Scsi_Host *shost = sdev->host;
830 struct scsi_request *sreq;
831 689
832 scsi_device_unbusy(sdev); 690 scsi_device_unbusy(sdev);
833 691
@@ -857,21 +715,6 @@ void scsi_finish_command(struct scsi_cmnd *cmd)
857 * We can get here with use_sg=0, causing a panic in the upper level 715 * We can get here with use_sg=0, causing a panic in the upper level
858 */ 716 */
859 cmd->use_sg = cmd->old_use_sg; 717 cmd->use_sg = cmd->old_use_sg;
860
861 /*
862 * If there is an associated request structure, copy the data over
863 * before we call the completion function.
864 */
865 sreq = cmd->sc_request;
866 if (sreq) {
867 sreq->sr_result = sreq->sr_command->result;
868 if (sreq->sr_result) {
869 memcpy(sreq->sr_sense_buffer,
870 sreq->sr_command->sense_buffer,
871 sizeof(sreq->sr_sense_buffer));
872 }
873 }
874
875 cmd->done(cmd); 718 cmd->done(cmd);
876} 719}
877EXPORT_SYMBOL(scsi_finish_command); 720EXPORT_SYMBOL(scsi_finish_command);