aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/scsi/scsi_lib.c
diff options
context:
space:
mode:
authorJeff Garzik <jeff@garzik.org>2006-06-22 22:11:56 -0400
committerJeff Garzik <jeff@garzik.org>2006-06-22 22:11:56 -0400
commit71d530cd1b6d97094481002a04c77fea1c8e1c22 (patch)
treee786da7145d83c19a594adf76ed90d52c51058b1 /drivers/scsi/scsi_lib.c
parentd7a80dad2fe19a2b8c119c8e9cba605474a75a2b (diff)
parentd588fcbe5a7ba8bba2cebf7799ab2d573717a806 (diff)
Merge branch 'master' into upstream
Conflicts: drivers/scsi/libata-core.c drivers/scsi/libata-scsi.c include/linux/pci_ids.h
Diffstat (limited to 'drivers/scsi/scsi_lib.c')
-rw-r--r--drivers/scsi/scsi_lib.c140
1 files changed, 62 insertions, 78 deletions
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 28befa7bb0e9..3d04a9f386ac 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -19,18 +19,18 @@
19#include <linux/hardirq.h> 19#include <linux/hardirq.h>
20 20
21#include <scsi/scsi.h> 21#include <scsi/scsi.h>
22#include <scsi/scsi_cmnd.h>
22#include <scsi/scsi_dbg.h> 23#include <scsi/scsi_dbg.h>
23#include <scsi/scsi_device.h> 24#include <scsi/scsi_device.h>
24#include <scsi/scsi_driver.h> 25#include <scsi/scsi_driver.h>
25#include <scsi/scsi_eh.h> 26#include <scsi/scsi_eh.h>
26#include <scsi/scsi_host.h> 27#include <scsi/scsi_host.h>
27#include <scsi/scsi_request.h>
28 28
29#include "scsi_priv.h" 29#include "scsi_priv.h"
30#include "scsi_logging.h" 30#include "scsi_logging.h"
31 31
32 32
33#define SG_MEMPOOL_NR (sizeof(scsi_sg_pools)/sizeof(struct scsi_host_sg_pool)) 33#define SG_MEMPOOL_NR ARRAY_SIZE(scsi_sg_pools)
34#define SG_MEMPOOL_SIZE 32 34#define SG_MEMPOOL_SIZE 32
35 35
36struct scsi_host_sg_pool { 36struct scsi_host_sg_pool {
@@ -83,7 +83,7 @@ static void scsi_unprep_request(struct request *req)
83 struct scsi_cmnd *cmd = req->special; 83 struct scsi_cmnd *cmd = req->special;
84 84
85 req->flags &= ~REQ_DONTPREP; 85 req->flags &= ~REQ_DONTPREP;
86 req->special = (req->flags & REQ_SPECIAL) ? cmd->sc_request : NULL; 86 req->special = NULL;
87 87
88 scsi_put_command(cmd); 88 scsi_put_command(cmd);
89} 89}
@@ -161,72 +161,6 @@ int scsi_queue_insert(struct scsi_cmnd *cmd, int reason)
161 return 0; 161 return 0;
162} 162}
163 163
164/*
165 * Function: scsi_do_req
166 *
167 * Purpose: Queue a SCSI request
168 *
169 * Arguments: sreq - command descriptor.
170 * cmnd - actual SCSI command to be performed.
171 * buffer - data buffer.
172 * bufflen - size of data buffer.
173 * done - completion function to be run.
174 * timeout - how long to let it run before timeout.
175 * retries - number of retries we allow.
176 *
177 * Lock status: No locks held upon entry.
178 *
179 * Returns: Nothing.
180 *
181 * Notes: This function is only used for queueing requests for things
182 * like ioctls and character device requests - this is because
183 * we essentially just inject a request into the queue for the
184 * device.
185 *
186 * In order to support the scsi_device_quiesce function, we
187 * now inject requests on the *head* of the device queue
188 * rather than the tail.
189 */
190void scsi_do_req(struct scsi_request *sreq, const void *cmnd,
191 void *buffer, unsigned bufflen,
192 void (*done)(struct scsi_cmnd *),
193 int timeout, int retries)
194{
195 /*
196 * If the upper level driver is reusing these things, then
197 * we should release the low-level block now. Another one will
198 * be allocated later when this request is getting queued.
199 */
200 __scsi_release_request(sreq);
201
202 /*
203 * Our own function scsi_done (which marks the host as not busy,
204 * disables the timeout counter, etc) will be called by us or by the
205 * scsi_hosts[host].queuecommand() function needs to also call
206 * the completion function for the high level driver.
207 */
208 memcpy(sreq->sr_cmnd, cmnd, sizeof(sreq->sr_cmnd));
209 sreq->sr_bufflen = bufflen;
210 sreq->sr_buffer = buffer;
211 sreq->sr_allowed = retries;
212 sreq->sr_done = done;
213 sreq->sr_timeout_per_command = timeout;
214
215 if (sreq->sr_cmd_len == 0)
216 sreq->sr_cmd_len = COMMAND_SIZE(sreq->sr_cmnd[0]);
217
218 /*
219 * head injection *required* here otherwise quiesce won't work
220 *
221 * Because users of this function are apt to reuse requests with no
222 * modification, we have to sanitise the request flags here
223 */
224 sreq->sr_request->flags &= ~REQ_DONTPREP;
225 blk_insert_request(sreq->sr_device->request_queue, sreq->sr_request,
226 1, sreq);
227}
228EXPORT_SYMBOL(scsi_do_req);
229
230/** 164/**
231 * scsi_execute - insert request and wait for the result 165 * scsi_execute - insert request and wait for the result
232 * @sdev: scsi device 166 * @sdev: scsi device
@@ -1300,15 +1234,7 @@ static int scsi_prep_fn(struct request_queue *q, struct request *req)
1300 * at request->cmd, as this tells us the real story. 1234 * at request->cmd, as this tells us the real story.
1301 */ 1235 */
1302 if (req->flags & REQ_SPECIAL && req->special) { 1236 if (req->flags & REQ_SPECIAL && req->special) {
1303 struct scsi_request *sreq = req->special; 1237 cmd = req->special;
1304
1305 if (sreq->sr_magic == SCSI_REQ_MAGIC) {
1306 cmd = scsi_get_command(sreq->sr_device, GFP_ATOMIC);
1307 if (unlikely(!cmd))
1308 goto defer;
1309 scsi_init_cmd_from_req(cmd, sreq);
1310 } else
1311 cmd = req->special;
1312 } else if (req->flags & (REQ_CMD | REQ_BLOCK_PC)) { 1238 } else if (req->flags & (REQ_CMD | REQ_BLOCK_PC)) {
1313 1239
1314 if(unlikely(specials_only) && !(req->flags & REQ_SPECIAL)) { 1240 if(unlikely(specials_only) && !(req->flags & REQ_SPECIAL)) {
@@ -2363,3 +2289,61 @@ scsi_target_unblock(struct device *dev)
2363 device_for_each_child(dev, NULL, target_unblock); 2289 device_for_each_child(dev, NULL, target_unblock);
2364} 2290}
2365EXPORT_SYMBOL_GPL(scsi_target_unblock); 2291EXPORT_SYMBOL_GPL(scsi_target_unblock);
2292
2293/**
2294 * scsi_kmap_atomic_sg - find and atomically map an sg-elemnt
2295 * @sg: scatter-gather list
2296 * @sg_count: number of segments in sg
2297 * @offset: offset in bytes into sg, on return offset into the mapped area
2298 * @len: bytes to map, on return number of bytes mapped
2299 *
2300 * Returns virtual address of the start of the mapped page
2301 */
2302void *scsi_kmap_atomic_sg(struct scatterlist *sg, int sg_count,
2303 size_t *offset, size_t *len)
2304{
2305 int i;
2306 size_t sg_len = 0, len_complete = 0;
2307 struct page *page;
2308
2309 for (i = 0; i < sg_count; i++) {
2310 len_complete = sg_len; /* Complete sg-entries */
2311 sg_len += sg[i].length;
2312 if (sg_len > *offset)
2313 break;
2314 }
2315
2316 if (unlikely(i == sg_count)) {
2317 printk(KERN_ERR "%s: Bytes in sg: %zu, requested offset %zu, "
2318 "elements %d\n",
2319 __FUNCTION__, sg_len, *offset, sg_count);
2320 WARN_ON(1);
2321 return NULL;
2322 }
2323
2324 /* Offset starting from the beginning of first page in this sg-entry */
2325 *offset = *offset - len_complete + sg[i].offset;
2326
2327 /* Assumption: contiguous pages can be accessed as "page + i" */
2328 page = nth_page(sg[i].page, (*offset >> PAGE_SHIFT));
2329 *offset &= ~PAGE_MASK;
2330
2331 /* Bytes in this sg-entry from *offset to the end of the page */
2332 sg_len = PAGE_SIZE - *offset;
2333 if (*len > sg_len)
2334 *len = sg_len;
2335
2336 return kmap_atomic(page, KM_BIO_SRC_IRQ);
2337}
2338EXPORT_SYMBOL(scsi_kmap_atomic_sg);
2339
2340/**
2341 * scsi_kunmap_atomic_sg - atomically unmap a virtual address, previously
2342 * mapped with scsi_kmap_atomic_sg
2343 * @virt: virtual address to be unmapped
2344 */
2345void scsi_kunmap_atomic_sg(void *virt)
2346{
2347 kunmap_atomic(virt, KM_BIO_SRC_IRQ);
2348}
2349EXPORT_SYMBOL(scsi_kunmap_atomic_sg);