aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/scsi/libfc/fc_fcp.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/scsi/libfc/fc_fcp.c')
-rw-r--r--drivers/scsi/libfc/fc_fcp.c1071
1 files changed, 568 insertions, 503 deletions
diff --git a/drivers/scsi/libfc/fc_fcp.c b/drivers/scsi/libfc/fc_fcp.c
index 59a4408b27b5..17396c708b08 100644
--- a/drivers/scsi/libfc/fc_fcp.c
+++ b/drivers/scsi/libfc/fc_fcp.c
@@ -27,6 +27,7 @@
27#include <linux/scatterlist.h> 27#include <linux/scatterlist.h>
28#include <linux/err.h> 28#include <linux/err.h>
29#include <linux/crc32.h> 29#include <linux/crc32.h>
30#include <linux/slab.h>
30 31
31#include <scsi/scsi_tcq.h> 32#include <scsi/scsi_tcq.h>
32#include <scsi/scsi.h> 33#include <scsi/scsi.h>
@@ -39,26 +40,19 @@
39#include <scsi/libfc.h> 40#include <scsi/libfc.h>
40#include <scsi/fc_encode.h> 41#include <scsi/fc_encode.h>
41 42
42MODULE_AUTHOR("Open-FCoE.org"); 43#include "fc_libfc.h"
43MODULE_DESCRIPTION("libfc");
44MODULE_LICENSE("GPL v2");
45 44
46unsigned int fc_debug_logging; 45struct kmem_cache *scsi_pkt_cachep;
47module_param_named(debug_logging, fc_debug_logging, int, S_IRUGO|S_IWUSR);
48MODULE_PARM_DESC(debug_logging, "a bit mask of logging levels");
49
50static struct kmem_cache *scsi_pkt_cachep;
51 46
52/* SRB state definitions */ 47/* SRB state definitions */
53#define FC_SRB_FREE 0 /* cmd is free */ 48#define FC_SRB_FREE 0 /* cmd is free */
54#define FC_SRB_CMD_SENT (1 << 0) /* cmd has been sent */ 49#define FC_SRB_CMD_SENT (1 << 0) /* cmd has been sent */
55#define FC_SRB_RCV_STATUS (1 << 1) /* response has arrived */ 50#define FC_SRB_RCV_STATUS (1 << 1) /* response has arrived */
56#define FC_SRB_ABORT_PENDING (1 << 2) /* cmd abort sent to device */ 51#define FC_SRB_ABORT_PENDING (1 << 2) /* cmd abort sent to device */
57#define FC_SRB_ABORTED (1 << 3) /* abort acknowleged */ 52#define FC_SRB_ABORTED (1 << 3) /* abort acknowledged */
58#define FC_SRB_DISCONTIG (1 << 4) /* non-sequential data recvd */ 53#define FC_SRB_DISCONTIG (1 << 4) /* non-sequential data recvd */
59#define FC_SRB_COMPL (1 << 5) /* fc_io_compl has been run */ 54#define FC_SRB_COMPL (1 << 5) /* fc_io_compl has been run */
60#define FC_SRB_FCP_PROCESSING_TMO (1 << 6) /* timer function processing */ 55#define FC_SRB_FCP_PROCESSING_TMO (1 << 6) /* timer function processing */
61#define FC_SRB_NOMEM (1 << 7) /* dropped to out of mem */
62 56
63#define FC_SRB_READ (1 << 1) 57#define FC_SRB_READ (1 << 1)
64#define FC_SRB_WRITE (1 << 0) 58#define FC_SRB_WRITE (1 << 0)
@@ -73,10 +67,22 @@ static struct kmem_cache *scsi_pkt_cachep;
73#define CMD_SCSI_STATUS(Cmnd) ((Cmnd)->SCp.Status) 67#define CMD_SCSI_STATUS(Cmnd) ((Cmnd)->SCp.Status)
74#define CMD_RESID_LEN(Cmnd) ((Cmnd)->SCp.buffers_residual) 68#define CMD_RESID_LEN(Cmnd) ((Cmnd)->SCp.buffers_residual)
75 69
70/**
71 * struct fc_fcp_internal - FCP layer internal data
72 * @scsi_pkt_pool: Memory pool to draw FCP packets from
73 * @scsi_queue_lock: Protects the scsi_pkt_queue
74 * @scsi_pkt_queue: Current FCP packets
75 * @last_can_queue_ramp_down_time: ramp down time
76 * @last_can_queue_ramp_up_time: ramp up time
77 * @max_can_queue: max can_queue size
78 */
76struct fc_fcp_internal { 79struct fc_fcp_internal {
77 mempool_t *scsi_pkt_pool; 80 mempool_t *scsi_pkt_pool;
78 struct list_head scsi_pkt_queue; 81 spinlock_t scsi_queue_lock;
79 u8 throttled; 82 struct list_head scsi_pkt_queue;
83 unsigned long last_can_queue_ramp_down_time;
84 unsigned long last_can_queue_ramp_up_time;
85 int max_can_queue;
80}; 86};
81 87
82#define fc_get_scsi_internal(x) ((struct fc_fcp_internal *)(x)->scsi_priv) 88#define fc_get_scsi_internal(x) ((struct fc_fcp_internal *)(x)->scsi_priv)
@@ -90,9 +96,9 @@ static void fc_fcp_recv(struct fc_seq *, struct fc_frame *, void *);
90static void fc_fcp_resp(struct fc_fcp_pkt *, struct fc_frame *); 96static void fc_fcp_resp(struct fc_fcp_pkt *, struct fc_frame *);
91static void fc_fcp_complete_locked(struct fc_fcp_pkt *); 97static void fc_fcp_complete_locked(struct fc_fcp_pkt *);
92static void fc_tm_done(struct fc_seq *, struct fc_frame *, void *); 98static void fc_tm_done(struct fc_seq *, struct fc_frame *, void *);
93static void fc_fcp_error(struct fc_fcp_pkt *fsp, struct fc_frame *fp); 99static void fc_fcp_error(struct fc_fcp_pkt *, struct fc_frame *);
94static void fc_timeout_error(struct fc_fcp_pkt *); 100static void fc_timeout_error(struct fc_fcp_pkt *);
95static void fc_fcp_timeout(unsigned long data); 101static void fc_fcp_timeout(unsigned long);
96static void fc_fcp_rec(struct fc_fcp_pkt *); 102static void fc_fcp_rec(struct fc_fcp_pkt *);
97static void fc_fcp_rec_error(struct fc_fcp_pkt *, struct fc_frame *); 103static void fc_fcp_rec_error(struct fc_fcp_pkt *, struct fc_frame *);
98static void fc_fcp_rec_resp(struct fc_seq *, struct fc_frame *, void *); 104static void fc_fcp_rec_resp(struct fc_seq *, struct fc_frame *, void *);
@@ -124,6 +130,7 @@ static void fc_fcp_srr_error(struct fc_fcp_pkt *, struct fc_frame *);
124#define FC_SCSI_TM_TOV (10 * HZ) 130#define FC_SCSI_TM_TOV (10 * HZ)
125#define FC_SCSI_REC_TOV (2 * HZ) 131#define FC_SCSI_REC_TOV (2 * HZ)
126#define FC_HOST_RESET_TIMEOUT (30 * HZ) 132#define FC_HOST_RESET_TIMEOUT (30 * HZ)
133#define FC_CAN_QUEUE_PERIOD (60 * HZ)
127 134
128#define FC_MAX_ERROR_CNT 5 135#define FC_MAX_ERROR_CNT 5
129#define FC_MAX_RECOV_RETRY 3 136#define FC_MAX_RECOV_RETRY 3
@@ -131,23 +138,22 @@ static void fc_fcp_srr_error(struct fc_fcp_pkt *, struct fc_frame *);
131#define FC_FCP_DFLT_QUEUE_DEPTH 32 138#define FC_FCP_DFLT_QUEUE_DEPTH 32
132 139
133/** 140/**
134 * fc_fcp_pkt_alloc - allocation routine for scsi_pkt packet 141 * fc_fcp_pkt_alloc() - Allocate a fcp_pkt
135 * @lp: fc lport struct 142 * @lport: The local port that the FCP packet is for
136 * @gfp: gfp flags for allocation 143 * @gfp: GFP flags for allocation
137 * 144 *
138 * This is used by upper layer scsi driver. 145 * Return value: fcp_pkt structure or null on allocation failure.
139 * Return Value : scsi_pkt structure or null on allocation failure. 146 * Context: Can be called from process context, no lock is required.
140 * Context : call from process context. no locking required.
141 */ 147 */
142static struct fc_fcp_pkt *fc_fcp_pkt_alloc(struct fc_lport *lp, gfp_t gfp) 148static struct fc_fcp_pkt *fc_fcp_pkt_alloc(struct fc_lport *lport, gfp_t gfp)
143{ 149{
144 struct fc_fcp_internal *si = fc_get_scsi_internal(lp); 150 struct fc_fcp_internal *si = fc_get_scsi_internal(lport);
145 struct fc_fcp_pkt *fsp; 151 struct fc_fcp_pkt *fsp;
146 152
147 fsp = mempool_alloc(si->scsi_pkt_pool, gfp); 153 fsp = mempool_alloc(si->scsi_pkt_pool, gfp);
148 if (fsp) { 154 if (fsp) {
149 memset(fsp, 0, sizeof(*fsp)); 155 memset(fsp, 0, sizeof(*fsp));
150 fsp->lp = lp; 156 fsp->lp = lport;
151 atomic_set(&fsp->ref_cnt, 1); 157 atomic_set(&fsp->ref_cnt, 1);
152 init_timer(&fsp->timer); 158 init_timer(&fsp->timer);
153 INIT_LIST_HEAD(&fsp->list); 159 INIT_LIST_HEAD(&fsp->list);
@@ -157,12 +163,11 @@ static struct fc_fcp_pkt *fc_fcp_pkt_alloc(struct fc_lport *lp, gfp_t gfp)
157} 163}
158 164
159/** 165/**
160 * fc_fcp_pkt_release() - release hold on scsi_pkt packet 166 * fc_fcp_pkt_release() - Release hold on a fcp_pkt
161 * @fsp: fcp packet struct 167 * @fsp: The FCP packet to be released
162 * 168 *
163 * This is used by upper layer scsi driver. 169 * Context: Can be called from process or interrupt context,
164 * Context : call from process and interrupt context. 170 * no lock is required.
165 * no locking required
166 */ 171 */
167static void fc_fcp_pkt_release(struct fc_fcp_pkt *fsp) 172static void fc_fcp_pkt_release(struct fc_fcp_pkt *fsp)
168{ 173{
@@ -173,20 +178,25 @@ static void fc_fcp_pkt_release(struct fc_fcp_pkt *fsp)
173 } 178 }
174} 179}
175 180
181/**
182 * fc_fcp_pkt_hold() - Hold a fcp_pkt
183 * @fsp: The FCP packet to be held
184 */
176static void fc_fcp_pkt_hold(struct fc_fcp_pkt *fsp) 185static void fc_fcp_pkt_hold(struct fc_fcp_pkt *fsp)
177{ 186{
178 atomic_inc(&fsp->ref_cnt); 187 atomic_inc(&fsp->ref_cnt);
179} 188}
180 189
181/** 190/**
182 * fc_fcp_pkt_destory() - release hold on scsi_pkt packet 191 * fc_fcp_pkt_destory() - Release hold on a fcp_pkt
183 * @seq: exchange sequence 192 * @seq: The sequence that the FCP packet is on (required by destructor API)
184 * @fsp: fcp packet struct 193 * @fsp: The FCP packet to be released
194 *
195 * This routine is called by a destructor callback in the exch_seq_send()
196 * routine of the libfc Transport Template. The 'struct fc_seq' is a required
197 * argument even though it is not used by this routine.
185 * 198 *
186 * Release hold on scsi_pkt packet set to keep scsi_pkt 199 * Context: No locking required.
187 * till EM layer exch resource is not freed.
188 * Context : called from from EM layer.
189 * no locking required
190 */ 200 */
191static void fc_fcp_pkt_destroy(struct fc_seq *seq, void *fsp) 201static void fc_fcp_pkt_destroy(struct fc_seq *seq, void *fsp)
192{ 202{
@@ -194,10 +204,10 @@ static void fc_fcp_pkt_destroy(struct fc_seq *seq, void *fsp)
194} 204}
195 205
196/** 206/**
197 * fc_fcp_lock_pkt() - lock a packet and get a ref to it. 207 * fc_fcp_lock_pkt() - Lock a fcp_pkt and increase its reference count
198 * @fsp: fcp packet 208 * @fsp: The FCP packet to be locked and incremented
199 * 209 *
200 * We should only return error if we return a command to scsi-ml before 210 * We should only return error if we return a command to SCSI-ml before
201 * getting a response. This can happen in cases where we send a abort, but 211 * getting a response. This can happen in cases where we send a abort, but
202 * do not wait for the response and the abort and command can be passing 212 * do not wait for the response and the abort and command can be passing
203 * each other on the wire/network-layer. 213 * each other on the wire/network-layer.
@@ -222,18 +232,33 @@ static inline int fc_fcp_lock_pkt(struct fc_fcp_pkt *fsp)
222 return 0; 232 return 0;
223} 233}
224 234
235/**
236 * fc_fcp_unlock_pkt() - Release a fcp_pkt's lock and decrement its
237 * reference count
238 * @fsp: The FCP packet to be unlocked and decremented
239 */
225static inline void fc_fcp_unlock_pkt(struct fc_fcp_pkt *fsp) 240static inline void fc_fcp_unlock_pkt(struct fc_fcp_pkt *fsp)
226{ 241{
227 spin_unlock_bh(&fsp->scsi_pkt_lock); 242 spin_unlock_bh(&fsp->scsi_pkt_lock);
228 fc_fcp_pkt_release(fsp); 243 fc_fcp_pkt_release(fsp);
229} 244}
230 245
246/**
247 * fc_fcp_timer_set() - Start a timer for a fcp_pkt
248 * @fsp: The FCP packet to start a timer for
249 * @delay: The timeout period for the timer
250 */
231static void fc_fcp_timer_set(struct fc_fcp_pkt *fsp, unsigned long delay) 251static void fc_fcp_timer_set(struct fc_fcp_pkt *fsp, unsigned long delay)
232{ 252{
233 if (!(fsp->state & FC_SRB_COMPL)) 253 if (!(fsp->state & FC_SRB_COMPL))
234 mod_timer(&fsp->timer, jiffies + delay); 254 mod_timer(&fsp->timer, jiffies + delay);
235} 255}
236 256
257/**
258 * fc_fcp_send_abort() - Send an abort for exchanges associated with a
259 * fcp_pkt
260 * @fsp: The FCP packet to abort exchanges on
261 */
237static int fc_fcp_send_abort(struct fc_fcp_pkt *fsp) 262static int fc_fcp_send_abort(struct fc_fcp_pkt *fsp)
238{ 263{
239 if (!fsp->seq_ptr) 264 if (!fsp->seq_ptr)
@@ -243,9 +268,14 @@ static int fc_fcp_send_abort(struct fc_fcp_pkt *fsp)
243 return fsp->lp->tt.seq_exch_abort(fsp->seq_ptr, 0); 268 return fsp->lp->tt.seq_exch_abort(fsp->seq_ptr, 0);
244} 269}
245 270
246/* 271/**
247 * Retry command. 272 * fc_fcp_retry_cmd() - Retry a fcp_pkt
248 * An abort isn't needed. 273 * @fsp: The FCP packet to be retried
274 *
275 * Sets the status code to be FC_ERROR and then calls
276 * fc_fcp_complete_locked() which in turn calls fc_io_compl().
277 * fc_io_compl() will notify the SCSI-ml that the I/O is done.
278 * The SCSI-ml will retry the command.
249 */ 279 */
250static void fc_fcp_retry_cmd(struct fc_fcp_pkt *fsp) 280static void fc_fcp_retry_cmd(struct fc_fcp_pkt *fsp)
251{ 281{
@@ -260,64 +290,145 @@ static void fc_fcp_retry_cmd(struct fc_fcp_pkt *fsp)
260 fc_fcp_complete_locked(fsp); 290 fc_fcp_complete_locked(fsp);
261} 291}
262 292
263/* 293/**
264 * fc_fcp_ddp_setup - calls to LLD's ddp_setup to set up DDP 294 * fc_fcp_ddp_setup() - Calls a LLD's ddp_setup routine to set up DDP context
265 * transfer for a read I/O indicated by the fc_fcp_pkt. 295 * @fsp: The FCP packet that will manage the DDP frames
266 * @fsp: ptr to the fc_fcp_pkt 296 * @xid: The XID that will be used for the DDP exchange
267 *
268 * This is called in exch_seq_send() when we have a newly allocated
269 * exchange with a valid exchange id to setup ddp.
270 *
271 * returns: none
272 */ 297 */
273void fc_fcp_ddp_setup(struct fc_fcp_pkt *fsp, u16 xid) 298void fc_fcp_ddp_setup(struct fc_fcp_pkt *fsp, u16 xid)
274{ 299{
275 struct fc_lport *lp; 300 struct fc_lport *lport;
276
277 if (!fsp)
278 return;
279 301
280 lp = fsp->lp; 302 lport = fsp->lp;
281 if ((fsp->req_flags & FC_SRB_READ) && 303 if ((fsp->req_flags & FC_SRB_READ) &&
282 (lp->lro_enabled) && (lp->tt.ddp_setup)) { 304 (lport->lro_enabled) && (lport->tt.ddp_setup)) {
283 if (lp->tt.ddp_setup(lp, xid, scsi_sglist(fsp->cmd), 305 if (lport->tt.ddp_setup(lport, xid, scsi_sglist(fsp->cmd),
284 scsi_sg_count(fsp->cmd))) 306 scsi_sg_count(fsp->cmd)))
285 fsp->xfer_ddp = xid; 307 fsp->xfer_ddp = xid;
286 } 308 }
287} 309}
288EXPORT_SYMBOL(fc_fcp_ddp_setup);
289 310
290/* 311/**
291 * fc_fcp_ddp_done - calls to LLD's ddp_done to release any 312 * fc_fcp_ddp_done() - Calls a LLD's ddp_done routine to release any
292 * DDP related resources for this I/O if it is initialized 313 * DDP related resources for a fcp_pkt
293 * as a ddp transfer 314 * @fsp: The FCP packet that DDP had been used on
294 * @fsp: ptr to the fc_fcp_pkt
295 *
296 * returns: none
297 */ 315 */
298static void fc_fcp_ddp_done(struct fc_fcp_pkt *fsp) 316static void fc_fcp_ddp_done(struct fc_fcp_pkt *fsp)
299{ 317{
300 struct fc_lport *lp; 318 struct fc_lport *lport;
301 319
302 if (!fsp) 320 if (!fsp)
303 return; 321 return;
304 322
305 lp = fsp->lp; 323 if (fsp->xfer_ddp == FC_XID_UNKNOWN)
306 if (fsp->xfer_ddp && lp->tt.ddp_done) { 324 return;
307 fsp->xfer_len = lp->tt.ddp_done(lp, fsp->xfer_ddp); 325
308 fsp->xfer_ddp = 0; 326 lport = fsp->lp;
327 if (lport->tt.ddp_done) {
328 fsp->xfer_len = lport->tt.ddp_done(lport, fsp->xfer_ddp);
329 fsp->xfer_ddp = FC_XID_UNKNOWN;
330 }
331}
332
333/**
334 * fc_fcp_can_queue_ramp_up() - increases can_queue
335 * @lport: lport to ramp up can_queue
336 *
337 * Locking notes: Called with Scsi_Host lock held
338 */
339static void fc_fcp_can_queue_ramp_up(struct fc_lport *lport)
340{
341 struct fc_fcp_internal *si = fc_get_scsi_internal(lport);
342 int can_queue;
343
344 if (si->last_can_queue_ramp_up_time &&
345 (time_before(jiffies, si->last_can_queue_ramp_up_time +
346 FC_CAN_QUEUE_PERIOD)))
347 return;
348
349 if (time_before(jiffies, si->last_can_queue_ramp_down_time +
350 FC_CAN_QUEUE_PERIOD))
351 return;
352
353 si->last_can_queue_ramp_up_time = jiffies;
354
355 can_queue = lport->host->can_queue << 1;
356 if (can_queue >= si->max_can_queue) {
357 can_queue = si->max_can_queue;
358 si->last_can_queue_ramp_down_time = 0;
309 } 359 }
360 lport->host->can_queue = can_queue;
361 shost_printk(KERN_ERR, lport->host, "libfc: increased "
362 "can_queue to %d.\n", can_queue);
310} 363}
311 364
365/**
366 * fc_fcp_can_queue_ramp_down() - reduces can_queue
367 * @lport: lport to reduce can_queue
368 *
369 * If we are getting memory allocation failures, then we may
370 * be trying to execute too many commands. We let the running
371 * commands complete or timeout, then try again with a reduced
372 * can_queue. Eventually we will hit the point where we run
373 * on all reserved structs.
374 *
375 * Locking notes: Called with Scsi_Host lock held
376 */
377static void fc_fcp_can_queue_ramp_down(struct fc_lport *lport)
378{
379 struct fc_fcp_internal *si = fc_get_scsi_internal(lport);
380 int can_queue;
381
382 if (si->last_can_queue_ramp_down_time &&
383 (time_before(jiffies, si->last_can_queue_ramp_down_time +
384 FC_CAN_QUEUE_PERIOD)))
385 return;
386
387 si->last_can_queue_ramp_down_time = jiffies;
388
389 can_queue = lport->host->can_queue;
390 can_queue >>= 1;
391 if (!can_queue)
392 can_queue = 1;
393 lport->host->can_queue = can_queue;
394 shost_printk(KERN_ERR, lport->host, "libfc: Could not allocate frame.\n"
395 "Reducing can_queue to %d.\n", can_queue);
396}
312 397
313/* 398/*
314 * Receive SCSI data from target. 399 * fc_fcp_frame_alloc() - Allocates fc_frame structure and buffer.
315 * Called after receiving solicited data. 400 * @lport: fc lport struct
401 * @len: payload length
402 *
403 * Allocates fc_frame structure and buffer but if fails to allocate
404 * then reduce can_queue.
405 */
406static inline struct fc_frame *fc_fcp_frame_alloc(struct fc_lport *lport,
407 size_t len)
408{
409 struct fc_frame *fp;
410 unsigned long flags;
411
412 fp = fc_frame_alloc(lport, len);
413 if (likely(fp))
414 return fp;
415
416 /* error case */
417 spin_lock_irqsave(lport->host->host_lock, flags);
418 fc_fcp_can_queue_ramp_down(lport);
419 spin_unlock_irqrestore(lport->host->host_lock, flags);
420 return NULL;
421}
422
423/**
424 * fc_fcp_recv_data() - Handler for receiving SCSI-FCP data from a target
425 * @fsp: The FCP packet the data is on
426 * @fp: The data frame
316 */ 427 */
317static void fc_fcp_recv_data(struct fc_fcp_pkt *fsp, struct fc_frame *fp) 428static void fc_fcp_recv_data(struct fc_fcp_pkt *fsp, struct fc_frame *fp)
318{ 429{
319 struct scsi_cmnd *sc = fsp->cmd; 430 struct scsi_cmnd *sc = fsp->cmd;
320 struct fc_lport *lp = fsp->lp; 431 struct fc_lport *lport = fsp->lp;
321 struct fcoe_dev_stats *stats; 432 struct fcoe_dev_stats *stats;
322 struct fc_frame_header *fh; 433 struct fc_frame_header *fh;
323 size_t start_offset; 434 size_t start_offset;
@@ -327,7 +438,7 @@ static void fc_fcp_recv_data(struct fc_fcp_pkt *fsp, struct fc_frame *fp)
327 size_t len; 438 size_t len;
328 void *buf; 439 void *buf;
329 struct scatterlist *sg; 440 struct scatterlist *sg;
330 size_t remaining; 441 u32 nents;
331 442
332 fh = fc_frame_header_get(fp); 443 fh = fc_frame_header_get(fp);
333 offset = ntohl(fh->fh_parm_offset); 444 offset = ntohl(fh->fh_parm_offset);
@@ -351,65 +462,29 @@ static void fc_fcp_recv_data(struct fc_fcp_pkt *fsp, struct fc_frame *fp)
351 if (offset != fsp->xfer_len) 462 if (offset != fsp->xfer_len)
352 fsp->state |= FC_SRB_DISCONTIG; 463 fsp->state |= FC_SRB_DISCONTIG;
353 464
354 crc = 0;
355 if (fr_flags(fp) & FCPHF_CRC_UNCHECKED)
356 crc = crc32(~0, (u8 *) fh, sizeof(*fh));
357
358 sg = scsi_sglist(sc); 465 sg = scsi_sglist(sc);
359 remaining = len; 466 nents = scsi_sg_count(sc);
360
361 while (remaining > 0 && sg) {
362 size_t off;
363 void *page_addr;
364 size_t sg_bytes;
365
366 if (offset >= sg->length) {
367 offset -= sg->length;
368 sg = sg_next(sg);
369 continue;
370 }
371 sg_bytes = min(remaining, sg->length - offset);
372
373 /*
374 * The scatterlist item may be bigger than PAGE_SIZE,
375 * but we are limited to mapping PAGE_SIZE at a time.
376 */
377 off = offset + sg->offset;
378 sg_bytes = min(sg_bytes, (size_t)
379 (PAGE_SIZE - (off & ~PAGE_MASK)));
380 page_addr = kmap_atomic(sg_page(sg) + (off >> PAGE_SHIFT),
381 KM_SOFTIRQ0);
382 if (!page_addr)
383 break; /* XXX panic? */
384
385 if (fr_flags(fp) & FCPHF_CRC_UNCHECKED)
386 crc = crc32(crc, buf, sg_bytes);
387 memcpy((char *)page_addr + (off & ~PAGE_MASK), buf,
388 sg_bytes);
389
390 kunmap_atomic(page_addr, KM_SOFTIRQ0);
391 buf += sg_bytes;
392 offset += sg_bytes;
393 remaining -= sg_bytes;
394 copy_len += sg_bytes;
395 }
396 467
397 if (fr_flags(fp) & FCPHF_CRC_UNCHECKED) { 468 if (!(fr_flags(fp) & FCPHF_CRC_UNCHECKED)) {
469 copy_len = fc_copy_buffer_to_sglist(buf, len, sg, &nents,
470 &offset, KM_SOFTIRQ0, NULL);
471 } else {
472 crc = crc32(~0, (u8 *) fh, sizeof(*fh));
473 copy_len = fc_copy_buffer_to_sglist(buf, len, sg, &nents,
474 &offset, KM_SOFTIRQ0, &crc);
398 buf = fc_frame_payload_get(fp, 0); 475 buf = fc_frame_payload_get(fp, 0);
399 if (len % 4) { 476 if (len % 4)
400 crc = crc32(crc, buf + len, 4 - (len % 4)); 477 crc = crc32(crc, buf + len, 4 - (len % 4));
401 len += 4 - (len % 4);
402 }
403 478
404 if (~crc != le32_to_cpu(fr_crc(fp))) { 479 if (~crc != le32_to_cpu(fr_crc(fp))) {
405crc_err: 480crc_err:
406 stats = fc_lport_get_stats(lp); 481 stats = fc_lport_get_stats(lport);
407 stats->ErrorFrames++; 482 stats->ErrorFrames++;
408 /* FIXME - per cpu count, not total count! */ 483 /* FIXME - per cpu count, not total count! */
409 if (stats->InvalidCRCCount++ < 5) 484 if (stats->InvalidCRCCount++ < 5)
410 printk(KERN_WARNING "libfc: CRC error on data " 485 printk(KERN_WARNING "libfc: CRC error on data "
411 "frame for port (%6x)\n", 486 "frame for port (%6x)\n",
412 fc_host_port_id(lp->host)); 487 fc_host_port_id(lport->host));
413 /* 488 /*
414 * Assume the frame is total garbage. 489 * Assume the frame is total garbage.
415 * We may have copied it over the good part 490 * We may have copied it over the good part
@@ -437,18 +512,17 @@ crc_err:
437} 512}
438 513
439/** 514/**
440 * fc_fcp_send_data() - Send SCSI data to target. 515 * fc_fcp_send_data() - Send SCSI data to a target
441 * @fsp: ptr to fc_fcp_pkt 516 * @fsp: The FCP packet the data is on
442 * @sp: ptr to this sequence 517 * @sp: The sequence the data is to be sent on
443 * @offset: starting offset for this data request 518 * @offset: The starting offset for this data request
444 * @seq_blen: the burst length for this data request 519 * @seq_blen: The burst length for this data request
445 * 520 *
446 * Called after receiving a Transfer Ready data descriptor. 521 * Called after receiving a Transfer Ready data descriptor.
447 * if LLD is capable of seq offload then send down seq_blen 522 * If the LLD is capable of sequence offload then send down the
448 * size of data in single frame, otherwise send multiple FC 523 * seq_blen amount of data in single frame, otherwise send
449 * frames of max FC frame payload supported by target port. 524 * multiple frames of the maximum frame payload supported by
450 * 525 * the target port.
451 * Returns : 0 for success.
452 */ 526 */
453static int fc_fcp_send_data(struct fc_fcp_pkt *fsp, struct fc_seq *seq, 527static int fc_fcp_send_data(struct fc_fcp_pkt *fsp, struct fc_seq *seq,
454 size_t offset, size_t seq_blen) 528 size_t offset, size_t seq_blen)
@@ -457,16 +531,18 @@ static int fc_fcp_send_data(struct fc_fcp_pkt *fsp, struct fc_seq *seq,
457 struct scsi_cmnd *sc; 531 struct scsi_cmnd *sc;
458 struct scatterlist *sg; 532 struct scatterlist *sg;
459 struct fc_frame *fp = NULL; 533 struct fc_frame *fp = NULL;
460 struct fc_lport *lp = fsp->lp; 534 struct fc_lport *lport = fsp->lp;
535 struct page *page;
461 size_t remaining; 536 size_t remaining;
462 size_t t_blen; 537 size_t t_blen;
463 size_t tlen; 538 size_t tlen;
464 size_t sg_bytes; 539 size_t sg_bytes;
465 size_t frame_offset, fh_parm_offset; 540 size_t frame_offset, fh_parm_offset;
541 size_t off;
466 int error; 542 int error;
467 void *data = NULL; 543 void *data = NULL;
468 void *page_addr; 544 void *page_addr;
469 int using_sg = lp->sg_supp; 545 int using_sg = lport->sg_supp;
470 u32 f_ctl; 546 u32 f_ctl;
471 547
472 WARN_ON(seq_blen <= 0); 548 WARN_ON(seq_blen <= 0);
@@ -488,10 +564,10 @@ static int fc_fcp_send_data(struct fc_fcp_pkt *fsp, struct fc_seq *seq,
488 * to max FC frame payload previously set in fsp->max_payload. 564 * to max FC frame payload previously set in fsp->max_payload.
489 */ 565 */
490 t_blen = fsp->max_payload; 566 t_blen = fsp->max_payload;
491 if (lp->seq_offload) { 567 if (lport->seq_offload) {
492 t_blen = min(seq_blen, (size_t)lp->lso_max); 568 t_blen = min(seq_blen, (size_t)lport->lso_max);
493 FC_FCP_DBG(fsp, "fsp=%p:lso:blen=%zx lso_max=0x%x t_blen=%zx\n", 569 FC_FCP_DBG(fsp, "fsp=%p:lso:blen=%zx lso_max=0x%x t_blen=%zx\n",
494 fsp, seq_blen, lp->lso_max, t_blen); 570 fsp, seq_blen, lport->lso_max, t_blen);
495 } 571 }
496 572
497 WARN_ON(t_blen < FC_MIN_MAX_PAYLOAD); 573 WARN_ON(t_blen < FC_MIN_MAX_PAYLOAD);
@@ -503,7 +579,7 @@ static int fc_fcp_send_data(struct fc_fcp_pkt *fsp, struct fc_seq *seq,
503 remaining = seq_blen; 579 remaining = seq_blen;
504 fh_parm_offset = frame_offset = offset; 580 fh_parm_offset = frame_offset = offset;
505 tlen = 0; 581 tlen = 0;
506 seq = lp->tt.seq_start_next(seq); 582 seq = lport->tt.seq_start_next(seq);
507 f_ctl = FC_FC_REL_OFF; 583 f_ctl = FC_FC_REL_OFF;
508 WARN_ON(!seq); 584 WARN_ON(!seq);
509 585
@@ -525,43 +601,34 @@ static int fc_fcp_send_data(struct fc_fcp_pkt *fsp, struct fc_seq *seq,
525 */ 601 */
526 if (tlen % 4) 602 if (tlen % 4)
527 using_sg = 0; 603 using_sg = 0;
528 if (using_sg) { 604 fp = fc_frame_alloc(lport, using_sg ? 0 : tlen);
529 fp = _fc_frame_alloc(lp, 0); 605 if (!fp)
530 if (!fp) 606 return -ENOMEM;
531 return -ENOMEM;
532 } else {
533 fp = fc_frame_alloc(lp, tlen);
534 if (!fp)
535 return -ENOMEM;
536 607
537 data = (void *)(fr_hdr(fp)) + 608 data = fc_frame_header_get(fp) + 1;
538 sizeof(struct fc_frame_header);
539 }
540 fh_parm_offset = frame_offset; 609 fh_parm_offset = frame_offset;
541 fr_max_payload(fp) = fsp->max_payload; 610 fr_max_payload(fp) = fsp->max_payload;
542 } 611 }
612
613 off = offset + sg->offset;
543 sg_bytes = min(tlen, sg->length - offset); 614 sg_bytes = min(tlen, sg->length - offset);
615 sg_bytes = min(sg_bytes,
616 (size_t) (PAGE_SIZE - (off & ~PAGE_MASK)));
617 page = sg_page(sg) + (off >> PAGE_SHIFT);
544 if (using_sg) { 618 if (using_sg) {
545 get_page(sg_page(sg)); 619 get_page(page);
546 skb_fill_page_desc(fp_skb(fp), 620 skb_fill_page_desc(fp_skb(fp),
547 skb_shinfo(fp_skb(fp))->nr_frags, 621 skb_shinfo(fp_skb(fp))->nr_frags,
548 sg_page(sg), sg->offset + offset, 622 page, off & ~PAGE_MASK, sg_bytes);
549 sg_bytes);
550 fp_skb(fp)->data_len += sg_bytes; 623 fp_skb(fp)->data_len += sg_bytes;
551 fr_len(fp) += sg_bytes; 624 fr_len(fp) += sg_bytes;
552 fp_skb(fp)->truesize += PAGE_SIZE; 625 fp_skb(fp)->truesize += PAGE_SIZE;
553 } else { 626 } else {
554 size_t off = offset + sg->offset;
555
556 /* 627 /*
557 * The scatterlist item may be bigger than PAGE_SIZE, 628 * The scatterlist item may be bigger than PAGE_SIZE,
558 * but we must not cross pages inside the kmap. 629 * but we must not cross pages inside the kmap.
559 */ 630 */
560 sg_bytes = min(sg_bytes, (size_t) (PAGE_SIZE - 631 page_addr = kmap_atomic(page, KM_SOFTIRQ0);
561 (off & ~PAGE_MASK)));
562 page_addr = kmap_atomic(sg_page(sg) +
563 (off >> PAGE_SHIFT),
564 KM_SOFTIRQ0);
565 memcpy(data, (char *)page_addr + (off & ~PAGE_MASK), 632 memcpy(data, (char *)page_addr + (off & ~PAGE_MASK),
566 sg_bytes); 633 sg_bytes);
567 kunmap_atomic(page_addr, KM_SOFTIRQ0); 634 kunmap_atomic(page_addr, KM_SOFTIRQ0);
@@ -572,7 +639,8 @@ static int fc_fcp_send_data(struct fc_fcp_pkt *fsp, struct fc_seq *seq,
572 tlen -= sg_bytes; 639 tlen -= sg_bytes;
573 remaining -= sg_bytes; 640 remaining -= sg_bytes;
574 641
575 if (tlen) 642 if ((skb_shinfo(fp_skb(fp))->nr_frags < FC_FRAME_SG_LEN) &&
643 (tlen))
576 continue; 644 continue;
577 645
578 /* 646 /*
@@ -589,7 +657,7 @@ static int fc_fcp_send_data(struct fc_fcp_pkt *fsp, struct fc_seq *seq,
589 /* 657 /*
590 * send fragment using for a sequence. 658 * send fragment using for a sequence.
591 */ 659 */
592 error = lp->tt.seq_send(lp, seq, fp); 660 error = lport->tt.seq_send(lport, seq, fp);
593 if (error) { 661 if (error) {
594 WARN_ON(1); /* send error should be rare */ 662 WARN_ON(1); /* send error should be rare */
595 fc_fcp_retry_cmd(fsp); 663 fc_fcp_retry_cmd(fsp);
@@ -601,6 +669,11 @@ static int fc_fcp_send_data(struct fc_fcp_pkt *fsp, struct fc_seq *seq,
601 return 0; 669 return 0;
602} 670}
603 671
672/**
673 * fc_fcp_abts_resp() - Send an ABTS response
674 * @fsp: The FCP packet that is being aborted
675 * @fp: The response frame
676 */
604static void fc_fcp_abts_resp(struct fc_fcp_pkt *fsp, struct fc_frame *fp) 677static void fc_fcp_abts_resp(struct fc_fcp_pkt *fsp, struct fc_frame *fp)
605{ 678{
606 int ba_done = 1; 679 int ba_done = 1;
@@ -637,46 +710,13 @@ static void fc_fcp_abts_resp(struct fc_fcp_pkt *fsp, struct fc_frame *fp)
637} 710}
638 711
639/** 712/**
640 * fc_fcp_reduce_can_queue() - drop can_queue 713 * fc_fcp_recv() - Reveive an FCP frame
641 * @lp: lport to drop queueing for
642 *
643 * If we are getting memory allocation failures, then we may
644 * be trying to execute too many commands. We let the running
645 * commands complete or timeout, then try again with a reduced
646 * can_queue. Eventually we will hit the point where we run
647 * on all reserved structs.
648 */
649static void fc_fcp_reduce_can_queue(struct fc_lport *lp)
650{
651 struct fc_fcp_internal *si = fc_get_scsi_internal(lp);
652 unsigned long flags;
653 int can_queue;
654
655 spin_lock_irqsave(lp->host->host_lock, flags);
656 if (si->throttled)
657 goto done;
658 si->throttled = 1;
659
660 can_queue = lp->host->can_queue;
661 can_queue >>= 1;
662 if (!can_queue)
663 can_queue = 1;
664 lp->host->can_queue = can_queue;
665 shost_printk(KERN_ERR, lp->host, "libfc: Could not allocate frame.\n"
666 "Reducing can_queue to %d.\n", can_queue);
667done:
668 spin_unlock_irqrestore(lp->host->host_lock, flags);
669}
670
671/**
672 * fc_fcp_recv() - Reveive FCP frames
673 * @seq: The sequence the frame is on 714 * @seq: The sequence the frame is on
674 * @fp: The FC frame 715 * @fp: The received frame
675 * @arg: The related FCP packet 716 * @arg: The related FCP packet
676 * 717 *
677 * Return : None 718 * Context: Called from Soft IRQ context. Can not be called
678 * Context : called from Soft IRQ context 719 * holding the FCP packet list lock.
679 * can not called holding list lock
680 */ 720 */
681static void fc_fcp_recv(struct fc_seq *seq, struct fc_frame *fp, void *arg) 721static void fc_fcp_recv(struct fc_seq *seq, struct fc_frame *fp, void *arg)
682{ 722{
@@ -687,8 +727,10 @@ static void fc_fcp_recv(struct fc_seq *seq, struct fc_frame *fp, void *arg)
687 u8 r_ctl; 727 u8 r_ctl;
688 int rc = 0; 728 int rc = 0;
689 729
690 if (IS_ERR(fp)) 730 if (IS_ERR(fp)) {
691 goto errout; 731 fc_fcp_error(fsp, fp);
732 return;
733 }
692 734
693 fh = fc_frame_header_get(fp); 735 fh = fc_frame_header_get(fp);
694 r_ctl = fh->fh_r_ctl; 736 r_ctl = fh->fh_r_ctl;
@@ -721,8 +763,6 @@ static void fc_fcp_recv(struct fc_seq *seq, struct fc_frame *fp, void *arg)
721 (size_t) ntohl(dd->ft_burst_len)); 763 (size_t) ntohl(dd->ft_burst_len));
722 if (!rc) 764 if (!rc)
723 seq->rec_data = fsp->xfer_len; 765 seq->rec_data = fsp->xfer_len;
724 else if (rc == -ENOMEM)
725 fsp->state |= FC_SRB_NOMEM;
726 } else if (r_ctl == FC_RCTL_DD_SOL_DATA) { 766 } else if (r_ctl == FC_RCTL_DD_SOL_DATA) {
727 /* 767 /*
728 * received a DATA frame 768 * received a DATA frame
@@ -742,13 +782,13 @@ unlock:
742 fc_fcp_unlock_pkt(fsp); 782 fc_fcp_unlock_pkt(fsp);
743out: 783out:
744 fc_frame_free(fp); 784 fc_frame_free(fp);
745errout:
746 if (IS_ERR(fp))
747 fc_fcp_error(fsp, fp);
748 else if (rc == -ENOMEM)
749 fc_fcp_reduce_can_queue(lport);
750} 785}
751 786
787/**
788 * fc_fcp_resp() - Handler for FCP responses
789 * @fsp: The FCP packet the response is for
790 * @fp: The response frame
791 */
752static void fc_fcp_resp(struct fc_fcp_pkt *fsp, struct fc_frame *fp) 792static void fc_fcp_resp(struct fc_fcp_pkt *fsp, struct fc_frame *fp)
753{ 793{
754 struct fc_frame_header *fh; 794 struct fc_frame_header *fh;
@@ -862,15 +902,16 @@ err:
862} 902}
863 903
864/** 904/**
865 * fc_fcp_complete_locked() - complete processing of a fcp packet 905 * fc_fcp_complete_locked() - Complete processing of a fcp_pkt with the
866 * @fsp: fcp packet 906 * fcp_pkt lock held
907 * @fsp: The FCP packet to be completed
867 * 908 *
868 * This function may sleep if a timer is pending. The packet lock must be 909 * This function may sleep if a timer is pending. The packet lock must be
869 * held, and the host lock must not be held. 910 * held, and the host lock must not be held.
870 */ 911 */
871static void fc_fcp_complete_locked(struct fc_fcp_pkt *fsp) 912static void fc_fcp_complete_locked(struct fc_fcp_pkt *fsp)
872{ 913{
873 struct fc_lport *lp = fsp->lp; 914 struct fc_lport *lport = fsp->lp;
874 struct fc_seq *seq; 915 struct fc_seq *seq;
875 struct fc_exch *ep; 916 struct fc_exch *ep;
876 u32 f_ctl; 917 u32 f_ctl;
@@ -901,8 +942,8 @@ static void fc_fcp_complete_locked(struct fc_fcp_pkt *fsp)
901 struct fc_frame *conf_frame; 942 struct fc_frame *conf_frame;
902 struct fc_seq *csp; 943 struct fc_seq *csp;
903 944
904 csp = lp->tt.seq_start_next(seq); 945 csp = lport->tt.seq_start_next(seq);
905 conf_frame = fc_frame_alloc(fsp->lp, 0); 946 conf_frame = fc_fcp_frame_alloc(fsp->lp, 0);
906 if (conf_frame) { 947 if (conf_frame) {
907 f_ctl = FC_FC_SEQ_INIT; 948 f_ctl = FC_FC_SEQ_INIT;
908 f_ctl |= FC_FC_LAST_SEQ | FC_FC_END_SEQ; 949 f_ctl |= FC_FC_LAST_SEQ | FC_FC_END_SEQ;
@@ -910,43 +951,48 @@ static void fc_fcp_complete_locked(struct fc_fcp_pkt *fsp)
910 fc_fill_fc_hdr(conf_frame, FC_RCTL_DD_SOL_CTL, 951 fc_fill_fc_hdr(conf_frame, FC_RCTL_DD_SOL_CTL,
911 ep->did, ep->sid, 952 ep->did, ep->sid,
912 FC_TYPE_FCP, f_ctl, 0); 953 FC_TYPE_FCP, f_ctl, 0);
913 lp->tt.seq_send(lp, csp, conf_frame); 954 lport->tt.seq_send(lport, csp, conf_frame);
914 } 955 }
915 } 956 }
916 lp->tt.exch_done(seq); 957 lport->tt.exch_done(seq);
917 } 958 }
918 fc_io_compl(fsp); 959 fc_io_compl(fsp);
919} 960}
920 961
962/**
963 * fc_fcp_cleanup_cmd() - Cancel the active exchange on a fcp_pkt
964 * @fsp: The FCP packet whose exchanges should be canceled
965 * @error: The reason for the cancellation
966 */
921static void fc_fcp_cleanup_cmd(struct fc_fcp_pkt *fsp, int error) 967static void fc_fcp_cleanup_cmd(struct fc_fcp_pkt *fsp, int error)
922{ 968{
923 struct fc_lport *lp = fsp->lp; 969 struct fc_lport *lport = fsp->lp;
924 970
925 if (fsp->seq_ptr) { 971 if (fsp->seq_ptr) {
926 lp->tt.exch_done(fsp->seq_ptr); 972 lport->tt.exch_done(fsp->seq_ptr);
927 fsp->seq_ptr = NULL; 973 fsp->seq_ptr = NULL;
928 } 974 }
929 fsp->status_code = error; 975 fsp->status_code = error;
930} 976}
931 977
932/** 978/**
933 * fc_fcp_cleanup_each_cmd() - Cleanup active commads 979 * fc_fcp_cleanup_each_cmd() - Cancel all exchanges on a local port
934 * @lp: logical port 980 * @lport: The local port whose exchanges should be canceled
935 * @id: target id 981 * @id: The target's ID
936 * @lun: lun 982 * @lun: The LUN
937 * @error: fsp status code 983 * @error: The reason for cancellation
938 * 984 *
939 * If lun or id is -1, they are ignored. 985 * If lun or id is -1, they are ignored.
940 */ 986 */
941static void fc_fcp_cleanup_each_cmd(struct fc_lport *lp, unsigned int id, 987static void fc_fcp_cleanup_each_cmd(struct fc_lport *lport, unsigned int id,
942 unsigned int lun, int error) 988 unsigned int lun, int error)
943{ 989{
944 struct fc_fcp_internal *si = fc_get_scsi_internal(lp); 990 struct fc_fcp_internal *si = fc_get_scsi_internal(lport);
945 struct fc_fcp_pkt *fsp; 991 struct fc_fcp_pkt *fsp;
946 struct scsi_cmnd *sc_cmd; 992 struct scsi_cmnd *sc_cmd;
947 unsigned long flags; 993 unsigned long flags;
948 994
949 spin_lock_irqsave(lp->host->host_lock, flags); 995 spin_lock_irqsave(&si->scsi_queue_lock, flags);
950restart: 996restart:
951 list_for_each_entry(fsp, &si->scsi_pkt_queue, list) { 997 list_for_each_entry(fsp, &si->scsi_pkt_queue, list) {
952 sc_cmd = fsp->cmd; 998 sc_cmd = fsp->cmd;
@@ -957,7 +1003,7 @@ restart:
957 continue; 1003 continue;
958 1004
959 fc_fcp_pkt_hold(fsp); 1005 fc_fcp_pkt_hold(fsp);
960 spin_unlock_irqrestore(lp->host->host_lock, flags); 1006 spin_unlock_irqrestore(&si->scsi_queue_lock, flags);
961 1007
962 if (!fc_fcp_lock_pkt(fsp)) { 1008 if (!fc_fcp_lock_pkt(fsp)) {
963 fc_fcp_cleanup_cmd(fsp, error); 1009 fc_fcp_cleanup_cmd(fsp, error);
@@ -966,35 +1012,37 @@ restart:
966 } 1012 }
967 1013
968 fc_fcp_pkt_release(fsp); 1014 fc_fcp_pkt_release(fsp);
969 spin_lock_irqsave(lp->host->host_lock, flags); 1015 spin_lock_irqsave(&si->scsi_queue_lock, flags);
970 /* 1016 /*
971 * while we dropped the lock multiple pkts could 1017 * while we dropped the lock multiple pkts could
972 * have been released, so we have to start over. 1018 * have been released, so we have to start over.
973 */ 1019 */
974 goto restart; 1020 goto restart;
975 } 1021 }
976 spin_unlock_irqrestore(lp->host->host_lock, flags); 1022 spin_unlock_irqrestore(&si->scsi_queue_lock, flags);
977} 1023}
978 1024
979static void fc_fcp_abort_io(struct fc_lport *lp) 1025/**
1026 * fc_fcp_abort_io() - Abort all FCP-SCSI exchanges on a local port
1027 * @lport: The local port whose exchanges are to be aborted
1028 */
1029static void fc_fcp_abort_io(struct fc_lport *lport)
980{ 1030{
981 fc_fcp_cleanup_each_cmd(lp, -1, -1, FC_HRD_ERROR); 1031 fc_fcp_cleanup_each_cmd(lport, -1, -1, FC_HRD_ERROR);
982} 1032}
983 1033
984/** 1034/**
985 * fc_fcp_pkt_send() - send a fcp packet to the lower level. 1035 * fc_fcp_pkt_send() - Send a fcp_pkt
986 * @lp: fc lport 1036 * @lport: The local port to send the FCP packet on
987 * @fsp: fc packet. 1037 * @fsp: The FCP packet to send
988 * 1038 *
989 * This is called by upper layer protocol. 1039 * Return: Zero for success and -1 for failure
990 * Return : zero for success and -1 for failure 1040 * Locks: Called without locks held
991 * Context : called from queuecommand which can be called from process
992 * or scsi soft irq.
993 * Locks : called with the host lock and irqs disabled.
994 */ 1041 */
995static int fc_fcp_pkt_send(struct fc_lport *lp, struct fc_fcp_pkt *fsp) 1042static int fc_fcp_pkt_send(struct fc_lport *lport, struct fc_fcp_pkt *fsp)
996{ 1043{
997 struct fc_fcp_internal *si = fc_get_scsi_internal(lp); 1044 struct fc_fcp_internal *si = fc_get_scsi_internal(lport);
1045 unsigned long flags;
998 int rc; 1046 int rc;
999 1047
1000 fsp->cmd->SCp.ptr = (char *)fsp; 1048 fsp->cmd->SCp.ptr = (char *)fsp;
@@ -1004,18 +1052,27 @@ static int fc_fcp_pkt_send(struct fc_lport *lp, struct fc_fcp_pkt *fsp)
1004 int_to_scsilun(fsp->cmd->device->lun, 1052 int_to_scsilun(fsp->cmd->device->lun,
1005 (struct scsi_lun *)fsp->cdb_cmd.fc_lun); 1053 (struct scsi_lun *)fsp->cdb_cmd.fc_lun);
1006 memcpy(fsp->cdb_cmd.fc_cdb, fsp->cmd->cmnd, fsp->cmd->cmd_len); 1054 memcpy(fsp->cdb_cmd.fc_cdb, fsp->cmd->cmnd, fsp->cmd->cmd_len);
1007 list_add_tail(&fsp->list, &si->scsi_pkt_queue);
1008 1055
1009 spin_unlock_irq(lp->host->host_lock); 1056 spin_lock_irqsave(&si->scsi_queue_lock, flags);
1010 rc = lp->tt.fcp_cmd_send(lp, fsp, fc_fcp_recv); 1057 list_add_tail(&fsp->list, &si->scsi_pkt_queue);
1011 spin_lock_irq(lp->host->host_lock); 1058 spin_unlock_irqrestore(&si->scsi_queue_lock, flags);
1012 if (rc) 1059 rc = lport->tt.fcp_cmd_send(lport, fsp, fc_fcp_recv);
1060 if (unlikely(rc)) {
1061 spin_lock_irqsave(&si->scsi_queue_lock, flags);
1013 list_del(&fsp->list); 1062 list_del(&fsp->list);
1063 spin_unlock_irqrestore(&si->scsi_queue_lock, flags);
1064 }
1014 1065
1015 return rc; 1066 return rc;
1016} 1067}
1017 1068
1018static int fc_fcp_cmd_send(struct fc_lport *lp, struct fc_fcp_pkt *fsp, 1069/**
1070 * fc_fcp_cmd_send() - Send a FCP command
1071 * @lport: The local port to send the command on
1072 * @fsp: The FCP packet the command is on
1073 * @resp: The handler for the response
1074 */
1075static int fc_fcp_cmd_send(struct fc_lport *lport, struct fc_fcp_pkt *fsp,
1019 void (*resp)(struct fc_seq *, 1076 void (*resp)(struct fc_seq *,
1020 struct fc_frame *fp, 1077 struct fc_frame *fp,
1021 void *arg)) 1078 void *arg))
@@ -1023,14 +1080,14 @@ static int fc_fcp_cmd_send(struct fc_lport *lp, struct fc_fcp_pkt *fsp,
1023 struct fc_frame *fp; 1080 struct fc_frame *fp;
1024 struct fc_seq *seq; 1081 struct fc_seq *seq;
1025 struct fc_rport *rport; 1082 struct fc_rport *rport;
1026 struct fc_rport_libfc_priv *rp; 1083 struct fc_rport_libfc_priv *rpriv;
1027 const size_t len = sizeof(fsp->cdb_cmd); 1084 const size_t len = sizeof(fsp->cdb_cmd);
1028 int rc = 0; 1085 int rc = 0;
1029 1086
1030 if (fc_fcp_lock_pkt(fsp)) 1087 if (fc_fcp_lock_pkt(fsp))
1031 return 0; 1088 return 0;
1032 1089
1033 fp = fc_frame_alloc(lp, sizeof(fsp->cdb_cmd)); 1090 fp = fc_fcp_frame_alloc(lport, sizeof(fsp->cdb_cmd));
1034 if (!fp) { 1091 if (!fp) {
1035 rc = -1; 1092 rc = -1;
1036 goto unlock; 1093 goto unlock;
@@ -1040,15 +1097,15 @@ static int fc_fcp_cmd_send(struct fc_lport *lp, struct fc_fcp_pkt *fsp,
1040 fr_fsp(fp) = fsp; 1097 fr_fsp(fp) = fsp;
1041 rport = fsp->rport; 1098 rport = fsp->rport;
1042 fsp->max_payload = rport->maxframe_size; 1099 fsp->max_payload = rport->maxframe_size;
1043 rp = rport->dd_data; 1100 rpriv = rport->dd_data;
1044 1101
1045 fc_fill_fc_hdr(fp, FC_RCTL_DD_UNSOL_CMD, rport->port_id, 1102 fc_fill_fc_hdr(fp, FC_RCTL_DD_UNSOL_CMD, rport->port_id,
1046 fc_host_port_id(rp->local_port->host), FC_TYPE_FCP, 1103 fc_host_port_id(rpriv->local_port->host), FC_TYPE_FCP,
1047 FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0); 1104 FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0);
1048 1105
1049 seq = lp->tt.exch_seq_send(lp, fp, resp, fc_fcp_pkt_destroy, fsp, 0); 1106 seq = lport->tt.exch_seq_send(lport, fp, resp, fc_fcp_pkt_destroy,
1107 fsp, 0);
1050 if (!seq) { 1108 if (!seq) {
1051 fc_frame_free(fp);
1052 rc = -1; 1109 rc = -1;
1053 goto unlock; 1110 goto unlock;
1054 } 1111 }
@@ -1065,8 +1122,10 @@ unlock:
1065 return rc; 1122 return rc;
1066} 1123}
1067 1124
1068/* 1125/**
1069 * transport error handler 1126 * fc_fcp_error() - Handler for FCP layer errors
1127 * @fsp: The FCP packet the error is on
1128 * @fp: The frame that has errored
1070 */ 1129 */
1071static void fc_fcp_error(struct fc_fcp_pkt *fsp, struct fc_frame *fp) 1130static void fc_fcp_error(struct fc_fcp_pkt *fsp, struct fc_frame *fp)
1072{ 1131{
@@ -1091,11 +1150,13 @@ unlock:
1091 fc_fcp_unlock_pkt(fsp); 1150 fc_fcp_unlock_pkt(fsp);
1092} 1151}
1093 1152
1094/* 1153/**
1095 * Scsi abort handler- calls to send an abort 1154 * fc_fcp_pkt_abort() - Abort a fcp_pkt
1096 * and then wait for abort completion 1155 * @fsp: The FCP packet to abort on
1156 *
1157 * Called to send an abort and then wait for abort completion
1097 */ 1158 */
1098static int fc_fcp_pkt_abort(struct fc_lport *lp, struct fc_fcp_pkt *fsp) 1159static int fc_fcp_pkt_abort(struct fc_fcp_pkt *fsp)
1099{ 1160{
1100 int rc = FAILED; 1161 int rc = FAILED;
1101 1162
@@ -1122,14 +1183,15 @@ static int fc_fcp_pkt_abort(struct fc_lport *lp, struct fc_fcp_pkt *fsp)
1122 return rc; 1183 return rc;
1123} 1184}
1124 1185
1125/* 1186/**
1126 * Retry LUN reset after resource allocation failed. 1187 * fc_lun_reset_send() - Send LUN reset command
1188 * @data: The FCP packet that identifies the LUN to be reset
1127 */ 1189 */
1128static void fc_lun_reset_send(unsigned long data) 1190static void fc_lun_reset_send(unsigned long data)
1129{ 1191{
1130 struct fc_fcp_pkt *fsp = (struct fc_fcp_pkt *)data; 1192 struct fc_fcp_pkt *fsp = (struct fc_fcp_pkt *)data;
1131 struct fc_lport *lp = fsp->lp; 1193 struct fc_lport *lport = fsp->lp;
1132 if (lp->tt.fcp_cmd_send(lp, fsp, fc_tm_done)) { 1194 if (lport->tt.fcp_cmd_send(lport, fsp, fc_tm_done)) {
1133 if (fsp->recov_retry++ >= FC_MAX_RECOV_RETRY) 1195 if (fsp->recov_retry++ >= FC_MAX_RECOV_RETRY)
1134 return; 1196 return;
1135 if (fc_fcp_lock_pkt(fsp)) 1197 if (fc_fcp_lock_pkt(fsp))
@@ -1140,11 +1202,15 @@ static void fc_lun_reset_send(unsigned long data)
1140 } 1202 }
1141} 1203}
1142 1204
1143/* 1205/**
1144 * Scsi device reset handler- send a LUN RESET to the device 1206 * fc_lun_reset() - Send a LUN RESET command to a device
1145 * and wait for reset reply 1207 * and wait for the reply
1208 * @lport: The local port to sent the comand on
1209 * @fsp: The FCP packet that identifies the LUN to be reset
1210 * @id: The SCSI command ID
1211 * @lun: The LUN ID to be reset
1146 */ 1212 */
1147static int fc_lun_reset(struct fc_lport *lp, struct fc_fcp_pkt *fsp, 1213static int fc_lun_reset(struct fc_lport *lport, struct fc_fcp_pkt *fsp,
1148 unsigned int id, unsigned int lun) 1214 unsigned int id, unsigned int lun)
1149{ 1215{
1150 int rc; 1216 int rc;
@@ -1172,14 +1238,14 @@ static int fc_lun_reset(struct fc_lport *lp, struct fc_fcp_pkt *fsp,
1172 1238
1173 spin_lock_bh(&fsp->scsi_pkt_lock); 1239 spin_lock_bh(&fsp->scsi_pkt_lock);
1174 if (fsp->seq_ptr) { 1240 if (fsp->seq_ptr) {
1175 lp->tt.exch_done(fsp->seq_ptr); 1241 lport->tt.exch_done(fsp->seq_ptr);
1176 fsp->seq_ptr = NULL; 1242 fsp->seq_ptr = NULL;
1177 } 1243 }
1178 fsp->wait_for_comp = 0; 1244 fsp->wait_for_comp = 0;
1179 spin_unlock_bh(&fsp->scsi_pkt_lock); 1245 spin_unlock_bh(&fsp->scsi_pkt_lock);
1180 1246
1181 if (!rc) { 1247 if (!rc) {
1182 FC_SCSI_DBG(lp, "lun reset failed\n"); 1248 FC_SCSI_DBG(lport, "lun reset failed\n");
1183 return FAILED; 1249 return FAILED;
1184 } 1250 }
1185 1251
@@ -1187,13 +1253,16 @@ static int fc_lun_reset(struct fc_lport *lp, struct fc_fcp_pkt *fsp,
1187 if (fsp->cdb_status != FCP_TMF_CMPL) 1253 if (fsp->cdb_status != FCP_TMF_CMPL)
1188 return FAILED; 1254 return FAILED;
1189 1255
1190 FC_SCSI_DBG(lp, "lun reset to lun %u completed\n", lun); 1256 FC_SCSI_DBG(lport, "lun reset to lun %u completed\n", lun);
1191 fc_fcp_cleanup_each_cmd(lp, id, lun, FC_CMD_ABORTED); 1257 fc_fcp_cleanup_each_cmd(lport, id, lun, FC_CMD_ABORTED);
1192 return SUCCESS; 1258 return SUCCESS;
1193} 1259}
1194 1260
1195/* 1261/**
1196 * Task Managment response handler 1262 * fc_tm_done() - Task Managment response handler
1263 * @seq: The sequence that the response is on
1264 * @fp: The response frame
1265 * @arg: The FCP packet the response is for
1197 */ 1266 */
1198static void fc_tm_done(struct fc_seq *seq, struct fc_frame *fp, void *arg) 1267static void fc_tm_done(struct fc_seq *seq, struct fc_frame *fp, void *arg)
1199{ 1268{
@@ -1230,34 +1299,31 @@ static void fc_tm_done(struct fc_seq *seq, struct fc_frame *fp, void *arg)
1230 fc_fcp_unlock_pkt(fsp); 1299 fc_fcp_unlock_pkt(fsp);
1231} 1300}
1232 1301
1233static void fc_fcp_cleanup(struct fc_lport *lp) 1302/**
1303 * fc_fcp_cleanup() - Cleanup all FCP exchanges on a local port
1304 * @lport: The local port to be cleaned up
1305 */
1306static void fc_fcp_cleanup(struct fc_lport *lport)
1234{ 1307{
1235 fc_fcp_cleanup_each_cmd(lp, -1, -1, FC_ERROR); 1308 fc_fcp_cleanup_each_cmd(lport, -1, -1, FC_ERROR);
1236} 1309}
1237 1310
1238/* 1311/**
1239 * fc_fcp_timeout: called by OS timer function. 1312 * fc_fcp_timeout() - Handler for fcp_pkt timeouts
1240 * 1313 * @data: The FCP packet that has timed out
1241 * The timer has been inactivated and must be reactivated if desired
1242 * using fc_fcp_timer_set().
1243 *
1244 * Algorithm:
1245 *
1246 * If REC is supported, just issue it, and return. The REC exchange will
1247 * complete or time out, and recovery can continue at that point.
1248 *
1249 * Otherwise, if the response has been received without all the data,
1250 * it has been ER_TIMEOUT since the response was received.
1251 * 1314 *
1252 * If the response has not been received, 1315 * If REC is supported then just issue it and return. The REC exchange will
1253 * we see if data was received recently. If it has been, we continue waiting, 1316 * complete or time out and recovery can continue at that point. Otherwise,
1254 * otherwise, we abort the command. 1317 * if the response has been received without all the data it has been
1318 * ER_TIMEOUT since the response was received. If the response has not been
1319 * received we see if data was received recently. If it has been then we
1320 * continue waiting, otherwise, we abort the command.
1255 */ 1321 */
1256static void fc_fcp_timeout(unsigned long data) 1322static void fc_fcp_timeout(unsigned long data)
1257{ 1323{
1258 struct fc_fcp_pkt *fsp = (struct fc_fcp_pkt *)data; 1324 struct fc_fcp_pkt *fsp = (struct fc_fcp_pkt *)data;
1259 struct fc_rport *rport = fsp->rport; 1325 struct fc_rport *rport = fsp->rport;
1260 struct fc_rport_libfc_priv *rp = rport->dd_data; 1326 struct fc_rport_libfc_priv *rpriv = rport->dd_data;
1261 1327
1262 if (fc_fcp_lock_pkt(fsp)) 1328 if (fc_fcp_lock_pkt(fsp))
1263 return; 1329 return;
@@ -1267,7 +1333,7 @@ static void fc_fcp_timeout(unsigned long data)
1267 1333
1268 fsp->state |= FC_SRB_FCP_PROCESSING_TMO; 1334 fsp->state |= FC_SRB_FCP_PROCESSING_TMO;
1269 1335
1270 if (rp->flags & FC_RP_FLAGS_REC_SUPPORTED) 1336 if (rpriv->flags & FC_RP_FLAGS_REC_SUPPORTED)
1271 fc_fcp_rec(fsp); 1337 fc_fcp_rec(fsp);
1272 else if (time_after_eq(fsp->last_pkt_time + (FC_SCSI_ER_TIMEOUT / 2), 1338 else if (time_after_eq(fsp->last_pkt_time + (FC_SCSI_ER_TIMEOUT / 2),
1273 jiffies)) 1339 jiffies))
@@ -1281,39 +1347,40 @@ unlock:
1281 fc_fcp_unlock_pkt(fsp); 1347 fc_fcp_unlock_pkt(fsp);
1282} 1348}
1283 1349
1284/* 1350/**
1285 * Send a REC ELS request 1351 * fc_fcp_rec() - Send a REC ELS request
1352 * @fsp: The FCP packet to send the REC request on
1286 */ 1353 */
1287static void fc_fcp_rec(struct fc_fcp_pkt *fsp) 1354static void fc_fcp_rec(struct fc_fcp_pkt *fsp)
1288{ 1355{
1289 struct fc_lport *lp; 1356 struct fc_lport *lport;
1290 struct fc_frame *fp; 1357 struct fc_frame *fp;
1291 struct fc_rport *rport; 1358 struct fc_rport *rport;
1292 struct fc_rport_libfc_priv *rp; 1359 struct fc_rport_libfc_priv *rpriv;
1293 1360
1294 lp = fsp->lp; 1361 lport = fsp->lp;
1295 rport = fsp->rport; 1362 rport = fsp->rport;
1296 rp = rport->dd_data; 1363 rpriv = rport->dd_data;
1297 if (!fsp->seq_ptr || rp->rp_state != RPORT_ST_READY) { 1364 if (!fsp->seq_ptr || rpriv->rp_state != RPORT_ST_READY) {
1298 fsp->status_code = FC_HRD_ERROR; 1365 fsp->status_code = FC_HRD_ERROR;
1299 fsp->io_status = 0; 1366 fsp->io_status = 0;
1300 fc_fcp_complete_locked(fsp); 1367 fc_fcp_complete_locked(fsp);
1301 return; 1368 return;
1302 } 1369 }
1303 fp = fc_frame_alloc(lp, sizeof(struct fc_els_rec)); 1370 fp = fc_fcp_frame_alloc(lport, sizeof(struct fc_els_rec));
1304 if (!fp) 1371 if (!fp)
1305 goto retry; 1372 goto retry;
1306 1373
1307 fr_seq(fp) = fsp->seq_ptr; 1374 fr_seq(fp) = fsp->seq_ptr;
1308 fc_fill_fc_hdr(fp, FC_RCTL_ELS_REQ, rport->port_id, 1375 fc_fill_fc_hdr(fp, FC_RCTL_ELS_REQ, rport->port_id,
1309 fc_host_port_id(rp->local_port->host), FC_TYPE_ELS, 1376 fc_host_port_id(rpriv->local_port->host), FC_TYPE_ELS,
1310 FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0); 1377 FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0);
1311 if (lp->tt.elsct_send(lp, rport->port_id, fp, ELS_REC, fc_fcp_rec_resp, 1378 if (lport->tt.elsct_send(lport, rport->port_id, fp, ELS_REC,
1312 fsp, jiffies_to_msecs(FC_SCSI_REC_TOV))) { 1379 fc_fcp_rec_resp, fsp,
1380 jiffies_to_msecs(FC_SCSI_REC_TOV))) {
1313 fc_fcp_pkt_hold(fsp); /* hold while REC outstanding */ 1381 fc_fcp_pkt_hold(fsp); /* hold while REC outstanding */
1314 return; 1382 return;
1315 } 1383 }
1316 fc_frame_free(fp);
1317retry: 1384retry:
1318 if (fsp->recov_retry++ < FC_MAX_RECOV_RETRY) 1385 if (fsp->recov_retry++ < FC_MAX_RECOV_RETRY)
1319 fc_fcp_timer_set(fsp, FC_SCSI_REC_TOV); 1386 fc_fcp_timer_set(fsp, FC_SCSI_REC_TOV);
@@ -1321,12 +1388,16 @@ retry:
1321 fc_timeout_error(fsp); 1388 fc_timeout_error(fsp);
1322} 1389}
1323 1390
1324/* 1391/**
1325 * Receive handler for REC ELS frame 1392 * fc_fcp_rec_resp() - Handler for REC ELS responses
1326 * if it is a reject then let the scsi layer to handle 1393 * @seq: The sequence the response is on
1327 * the timeout. if it is a LS_ACC then if the io was not completed 1394 * @fp: The response frame
1328 * then set the timeout and return otherwise complete the exchange 1395 * @arg: The FCP packet the response is on
1329 * and tell the scsi layer to restart the I/O. 1396 *
1397 * If the response is a reject then the scsi layer will handle
1398 * the timeout. If the response is a LS_ACC then if the I/O was not completed
1399 * set the timeout and return. If the I/O was completed then complete the
1400 * exchange and tell the SCSI layer.
1330 */ 1401 */
1331static void fc_fcp_rec_resp(struct fc_seq *seq, struct fc_frame *fp, void *arg) 1402static void fc_fcp_rec_resp(struct fc_seq *seq, struct fc_frame *fp, void *arg)
1332{ 1403{
@@ -1338,7 +1409,7 @@ static void fc_fcp_rec_resp(struct fc_seq *seq, struct fc_frame *fp, void *arg)
1338 u32 offset; 1409 u32 offset;
1339 enum dma_data_direction data_dir; 1410 enum dma_data_direction data_dir;
1340 enum fc_rctl r_ctl; 1411 enum fc_rctl r_ctl;
1341 struct fc_rport_libfc_priv *rp; 1412 struct fc_rport_libfc_priv *rpriv;
1342 1413
1343 if (IS_ERR(fp)) { 1414 if (IS_ERR(fp)) {
1344 fc_fcp_rec_error(fsp, fp); 1415 fc_fcp_rec_error(fsp, fp);
@@ -1361,13 +1432,13 @@ static void fc_fcp_rec_resp(struct fc_seq *seq, struct fc_frame *fp, void *arg)
1361 /* fall through */ 1432 /* fall through */
1362 case ELS_RJT_UNSUP: 1433 case ELS_RJT_UNSUP:
1363 FC_FCP_DBG(fsp, "device does not support REC\n"); 1434 FC_FCP_DBG(fsp, "device does not support REC\n");
1364 rp = fsp->rport->dd_data; 1435 rpriv = fsp->rport->dd_data;
1365 /* 1436 /*
1366 * if we do not spport RECs or got some bogus 1437 * if we do not spport RECs or got some bogus
1367 * reason then resetup timer so we check for 1438 * reason then resetup timer so we check for
1368 * making progress. 1439 * making progress.
1369 */ 1440 */
1370 rp->flags &= ~FC_RP_FLAGS_REC_SUPPORTED; 1441 rpriv->flags &= ~FC_RP_FLAGS_REC_SUPPORTED;
1371 fc_fcp_timer_set(fsp, FC_SCSI_ER_TIMEOUT); 1442 fc_fcp_timer_set(fsp, FC_SCSI_ER_TIMEOUT);
1372 break; 1443 break;
1373 case ELS_RJT_LOGIC: 1444 case ELS_RJT_LOGIC:
@@ -1464,8 +1535,10 @@ out:
1464 fc_frame_free(fp); 1535 fc_frame_free(fp);
1465} 1536}
1466 1537
1467/* 1538/**
1468 * Handle error response or timeout for REC exchange. 1539 * fc_fcp_rec_error() - Handler for REC errors
1540 * @fsp: The FCP packet the error is on
1541 * @fp: The REC frame
1469 */ 1542 */
1470static void fc_fcp_rec_error(struct fc_fcp_pkt *fsp, struct fc_frame *fp) 1543static void fc_fcp_rec_error(struct fc_fcp_pkt *fsp, struct fc_frame *fp)
1471{ 1544{
@@ -1504,10 +1577,9 @@ out:
1504 fc_fcp_pkt_release(fsp); /* drop hold for outstanding REC */ 1577 fc_fcp_pkt_release(fsp); /* drop hold for outstanding REC */
1505} 1578}
1506 1579
1507/* 1580/**
1508 * Time out error routine: 1581 * fc_timeout_error() - Handler for fcp_pkt timeouts
1509 * abort's the I/O close the exchange and 1582 * @fsp: The FCP packt that has timed out
1510 * send completion notification to scsi layer
1511 */ 1583 */
1512static void fc_timeout_error(struct fc_fcp_pkt *fsp) 1584static void fc_timeout_error(struct fc_fcp_pkt *fsp)
1513{ 1585{
@@ -1521,16 +1593,18 @@ static void fc_timeout_error(struct fc_fcp_pkt *fsp)
1521 fc_fcp_send_abort(fsp); 1593 fc_fcp_send_abort(fsp);
1522} 1594}
1523 1595
1524/* 1596/**
1525 * Sequence retransmission request. 1597 * fc_fcp_srr() - Send a SRR request (Sequence Retransmission Request)
1598 * @fsp: The FCP packet the SRR is to be sent on
1599 * @r_ctl: The R_CTL field for the SRR request
1526 * This is called after receiving status but insufficient data, or 1600 * This is called after receiving status but insufficient data, or
1527 * when expecting status but the request has timed out. 1601 * when expecting status but the request has timed out.
1528 */ 1602 */
1529static void fc_fcp_srr(struct fc_fcp_pkt *fsp, enum fc_rctl r_ctl, u32 offset) 1603static void fc_fcp_srr(struct fc_fcp_pkt *fsp, enum fc_rctl r_ctl, u32 offset)
1530{ 1604{
1531 struct fc_lport *lp = fsp->lp; 1605 struct fc_lport *lport = fsp->lp;
1532 struct fc_rport *rport; 1606 struct fc_rport *rport;
1533 struct fc_rport_libfc_priv *rp; 1607 struct fc_rport_libfc_priv *rpriv;
1534 struct fc_exch *ep = fc_seq_exch(fsp->seq_ptr); 1608 struct fc_exch *ep = fc_seq_exch(fsp->seq_ptr);
1535 struct fc_seq *seq; 1609 struct fc_seq *seq;
1536 struct fcp_srr *srr; 1610 struct fcp_srr *srr;
@@ -1538,12 +1612,13 @@ static void fc_fcp_srr(struct fc_fcp_pkt *fsp, enum fc_rctl r_ctl, u32 offset)
1538 u8 cdb_op; 1612 u8 cdb_op;
1539 1613
1540 rport = fsp->rport; 1614 rport = fsp->rport;
1541 rp = rport->dd_data; 1615 rpriv = rport->dd_data;
1542 cdb_op = fsp->cdb_cmd.fc_cdb[0]; 1616 cdb_op = fsp->cdb_cmd.fc_cdb[0];
1543 1617
1544 if (!(rp->flags & FC_RP_FLAGS_RETRY) || rp->rp_state != RPORT_ST_READY) 1618 if (!(rpriv->flags & FC_RP_FLAGS_RETRY) ||
1619 rpriv->rp_state != RPORT_ST_READY)
1545 goto retry; /* shouldn't happen */ 1620 goto retry; /* shouldn't happen */
1546 fp = fc_frame_alloc(lp, sizeof(*srr)); 1621 fp = fc_fcp_frame_alloc(lport, sizeof(*srr));
1547 if (!fp) 1622 if (!fp)
1548 goto retry; 1623 goto retry;
1549 1624
@@ -1556,15 +1631,14 @@ static void fc_fcp_srr(struct fc_fcp_pkt *fsp, enum fc_rctl r_ctl, u32 offset)
1556 srr->srr_rel_off = htonl(offset); 1631 srr->srr_rel_off = htonl(offset);
1557 1632
1558 fc_fill_fc_hdr(fp, FC_RCTL_ELS4_REQ, rport->port_id, 1633 fc_fill_fc_hdr(fp, FC_RCTL_ELS4_REQ, rport->port_id,
1559 fc_host_port_id(rp->local_port->host), FC_TYPE_FCP, 1634 fc_host_port_id(rpriv->local_port->host), FC_TYPE_FCP,
1560 FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0); 1635 FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0);
1561 1636
1562 seq = lp->tt.exch_seq_send(lp, fp, fc_fcp_srr_resp, NULL, 1637 seq = lport->tt.exch_seq_send(lport, fp, fc_fcp_srr_resp, NULL,
1563 fsp, jiffies_to_msecs(FC_SCSI_REC_TOV)); 1638 fsp, jiffies_to_msecs(FC_SCSI_REC_TOV));
1564 if (!seq) { 1639 if (!seq)
1565 fc_frame_free(fp);
1566 goto retry; 1640 goto retry;
1567 } 1641
1568 fsp->recov_seq = seq; 1642 fsp->recov_seq = seq;
1569 fsp->xfer_len = offset; 1643 fsp->xfer_len = offset;
1570 fsp->xfer_contig_end = offset; 1644 fsp->xfer_contig_end = offset;
@@ -1575,8 +1649,11 @@ retry:
1575 fc_fcp_retry_cmd(fsp); 1649 fc_fcp_retry_cmd(fsp);
1576} 1650}
1577 1651
1578/* 1652/**
1579 * Handle response from SRR. 1653 * fc_fcp_srr_resp() - Handler for SRR response
1654 * @seq: The sequence the SRR is on
1655 * @fp: The SRR frame
1656 * @arg: The FCP packet the SRR is on
1580 */ 1657 */
1581static void fc_fcp_srr_resp(struct fc_seq *seq, struct fc_frame *fp, void *arg) 1658static void fc_fcp_srr_resp(struct fc_seq *seq, struct fc_frame *fp, void *arg)
1582{ 1659{
@@ -1622,6 +1699,11 @@ out:
1622 fc_fcp_pkt_release(fsp); /* drop hold for outstanding SRR */ 1699 fc_fcp_pkt_release(fsp); /* drop hold for outstanding SRR */
1623} 1700}
1624 1701
1702/**
1703 * fc_fcp_srr_error() - Handler for SRR errors
1704 * @fsp: The FCP packet that the SRR error is on
1705 * @fp: The SRR frame
1706 */
1625static void fc_fcp_srr_error(struct fc_fcp_pkt *fsp, struct fc_frame *fp) 1707static void fc_fcp_srr_error(struct fc_fcp_pkt *fsp, struct fc_frame *fp)
1626{ 1708{
1627 if (fc_fcp_lock_pkt(fsp)) 1709 if (fc_fcp_lock_pkt(fsp))
@@ -1646,31 +1728,37 @@ out:
1646 fc_fcp_pkt_release(fsp); /* drop hold for outstanding SRR */ 1728 fc_fcp_pkt_release(fsp); /* drop hold for outstanding SRR */
1647} 1729}
1648 1730
1649static inline int fc_fcp_lport_queue_ready(struct fc_lport *lp) 1731/**
1732 * fc_fcp_lport_queue_ready() - Determine if the lport and it's queue is ready
1733 * @lport: The local port to be checked
1734 */
1735static inline int fc_fcp_lport_queue_ready(struct fc_lport *lport)
1650{ 1736{
1651 /* lock ? */ 1737 /* lock ? */
1652 return (lp->state == LPORT_ST_READY) && lp->link_up && !lp->qfull; 1738 return (lport->state == LPORT_ST_READY) &&
1739 lport->link_up && !lport->qfull;
1653} 1740}
1654 1741
1655/** 1742/**
1656 * fc_queuecommand - The queuecommand function of the scsi template 1743 * fc_queuecommand() - The queuecommand function of the SCSI template
1657 * @cmd: struct scsi_cmnd to be executed 1744 * @cmd: The scsi_cmnd to be executed
1658 * @done: Callback function to be called when cmd is completed 1745 * @done: The callback function to be called when the scsi_cmnd is complete
1659 * 1746 *
1660 * this is the i/o strategy routine, called by the scsi layer 1747 * This is the i/o strategy routine, called by the SCSI layer. This routine
1661 * this routine is called with holding the host_lock. 1748 * is called with the host_lock held.
1662 */ 1749 */
1663int fc_queuecommand(struct scsi_cmnd *sc_cmd, void (*done)(struct scsi_cmnd *)) 1750int fc_queuecommand(struct scsi_cmnd *sc_cmd, void (*done)(struct scsi_cmnd *))
1664{ 1751{
1665 struct fc_lport *lp; 1752 struct fc_lport *lport;
1666 struct fc_rport *rport = starget_to_rport(scsi_target(sc_cmd->device)); 1753 struct fc_rport *rport = starget_to_rport(scsi_target(sc_cmd->device));
1667 struct fc_fcp_pkt *fsp; 1754 struct fc_fcp_pkt *fsp;
1668 struct fc_rport_libfc_priv *rp; 1755 struct fc_rport_libfc_priv *rpriv;
1669 int rval; 1756 int rval;
1670 int rc = 0; 1757 int rc = 0;
1671 struct fcoe_dev_stats *stats; 1758 struct fcoe_dev_stats *stats;
1672 1759
1673 lp = shost_priv(sc_cmd->device->host); 1760 lport = shost_priv(sc_cmd->device->host);
1761 spin_unlock_irq(lport->host->host_lock);
1674 1762
1675 rval = fc_remote_port_chkready(rport); 1763 rval = fc_remote_port_chkready(rport);
1676 if (rval) { 1764 if (rval) {
@@ -1689,14 +1777,16 @@ int fc_queuecommand(struct scsi_cmnd *sc_cmd, void (*done)(struct scsi_cmnd *))
1689 goto out; 1777 goto out;
1690 } 1778 }
1691 1779
1692 rp = rport->dd_data; 1780 rpriv = rport->dd_data;
1693 1781
1694 if (!fc_fcp_lport_queue_ready(lp)) { 1782 if (!fc_fcp_lport_queue_ready(lport)) {
1783 if (lport->qfull)
1784 fc_fcp_can_queue_ramp_down(lport);
1695 rc = SCSI_MLQUEUE_HOST_BUSY; 1785 rc = SCSI_MLQUEUE_HOST_BUSY;
1696 goto out; 1786 goto out;
1697 } 1787 }
1698 1788
1699 fsp = fc_fcp_pkt_alloc(lp, GFP_ATOMIC); 1789 fsp = fc_fcp_pkt_alloc(lport, GFP_ATOMIC);
1700 if (fsp == NULL) { 1790 if (fsp == NULL) {
1701 rc = SCSI_MLQUEUE_HOST_BUSY; 1791 rc = SCSI_MLQUEUE_HOST_BUSY;
1702 goto out; 1792 goto out;
@@ -1706,8 +1796,9 @@ int fc_queuecommand(struct scsi_cmnd *sc_cmd, void (*done)(struct scsi_cmnd *))
1706 * build the libfc request pkt 1796 * build the libfc request pkt
1707 */ 1797 */
1708 fsp->cmd = sc_cmd; /* save the cmd */ 1798 fsp->cmd = sc_cmd; /* save the cmd */
1709 fsp->lp = lp; /* save the softc ptr */ 1799 fsp->lp = lport; /* save the softc ptr */
1710 fsp->rport = rport; /* set the remote port ptr */ 1800 fsp->rport = rport; /* set the remote port ptr */
1801 fsp->xfer_ddp = FC_XID_UNKNOWN;
1711 sc_cmd->scsi_done = done; 1802 sc_cmd->scsi_done = done;
1712 1803
1713 /* 1804 /*
@@ -1719,7 +1810,7 @@ int fc_queuecommand(struct scsi_cmnd *sc_cmd, void (*done)(struct scsi_cmnd *))
1719 /* 1810 /*
1720 * setup the data direction 1811 * setup the data direction
1721 */ 1812 */
1722 stats = fc_lport_get_stats(lp); 1813 stats = fc_lport_get_stats(lport);
1723 if (sc_cmd->sc_data_direction == DMA_FROM_DEVICE) { 1814 if (sc_cmd->sc_data_direction == DMA_FROM_DEVICE) {
1724 fsp->req_flags = FC_SRB_READ; 1815 fsp->req_flags = FC_SRB_READ;
1725 stats->InputRequests++; 1816 stats->InputRequests++;
@@ -1733,7 +1824,7 @@ int fc_queuecommand(struct scsi_cmnd *sc_cmd, void (*done)(struct scsi_cmnd *))
1733 stats->ControlRequests++; 1824 stats->ControlRequests++;
1734 } 1825 }
1735 1826
1736 fsp->tgt_flags = rp->flags; 1827 fsp->tgt_flags = rpriv->flags;
1737 1828
1738 init_timer(&fsp->timer); 1829 init_timer(&fsp->timer);
1739 fsp->timer.data = (unsigned long)fsp; 1830 fsp->timer.data = (unsigned long)fsp;
@@ -1743,30 +1834,30 @@ int fc_queuecommand(struct scsi_cmnd *sc_cmd, void (*done)(struct scsi_cmnd *))
1743 * if we get -1 return then put the request in the pending 1834 * if we get -1 return then put the request in the pending
1744 * queue. 1835 * queue.
1745 */ 1836 */
1746 rval = fc_fcp_pkt_send(lp, fsp); 1837 rval = fc_fcp_pkt_send(lport, fsp);
1747 if (rval != 0) { 1838 if (rval != 0) {
1748 fsp->state = FC_SRB_FREE; 1839 fsp->state = FC_SRB_FREE;
1749 fc_fcp_pkt_release(fsp); 1840 fc_fcp_pkt_release(fsp);
1750 rc = SCSI_MLQUEUE_HOST_BUSY; 1841 rc = SCSI_MLQUEUE_HOST_BUSY;
1751 } 1842 }
1752out: 1843out:
1844 spin_lock_irq(lport->host->host_lock);
1753 return rc; 1845 return rc;
1754} 1846}
1755EXPORT_SYMBOL(fc_queuecommand); 1847EXPORT_SYMBOL(fc_queuecommand);
1756 1848
1757/** 1849/**
1758 * fc_io_compl() - Handle responses for completed commands 1850 * fc_io_compl() - Handle responses for completed commands
1759 * @fsp: scsi packet 1851 * @fsp: The FCP packet that is complete
1760 *
1761 * Translates a error to a Linux SCSI error.
1762 * 1852 *
1853 * Translates fcp_pkt errors to a Linux SCSI errors.
1763 * The fcp packet lock must be held when calling. 1854 * The fcp packet lock must be held when calling.
1764 */ 1855 */
1765static void fc_io_compl(struct fc_fcp_pkt *fsp) 1856static void fc_io_compl(struct fc_fcp_pkt *fsp)
1766{ 1857{
1767 struct fc_fcp_internal *si; 1858 struct fc_fcp_internal *si;
1768 struct scsi_cmnd *sc_cmd; 1859 struct scsi_cmnd *sc_cmd;
1769 struct fc_lport *lp; 1860 struct fc_lport *lport;
1770 unsigned long flags; 1861 unsigned long flags;
1771 1862
1772 /* release outstanding ddp context */ 1863 /* release outstanding ddp context */
@@ -1779,30 +1870,23 @@ static void fc_io_compl(struct fc_fcp_pkt *fsp)
1779 spin_lock_bh(&fsp->scsi_pkt_lock); 1870 spin_lock_bh(&fsp->scsi_pkt_lock);
1780 } 1871 }
1781 1872
1782 lp = fsp->lp; 1873 lport = fsp->lp;
1783 si = fc_get_scsi_internal(lp); 1874 si = fc_get_scsi_internal(lport);
1784 spin_lock_irqsave(lp->host->host_lock, flags); 1875 if (!fsp->cmd)
1785 if (!fsp->cmd) {
1786 spin_unlock_irqrestore(lp->host->host_lock, flags);
1787 return; 1876 return;
1788 }
1789 1877
1790 /* 1878 /*
1791 * if a command timed out while we had to try and throttle IO 1879 * if can_queue ramp down is done then try can_queue ramp up
1792 * and it is now getting cleaned up, then we are about to 1880 * since commands are completing now.
1793 * try again so clear the throttled flag incase we get more
1794 * time outs.
1795 */ 1881 */
1796 if (si->throttled && fsp->state & FC_SRB_NOMEM) 1882 if (si->last_can_queue_ramp_down_time)
1797 si->throttled = 0; 1883 fc_fcp_can_queue_ramp_up(lport);
1798 1884
1799 sc_cmd = fsp->cmd; 1885 sc_cmd = fsp->cmd;
1800 fsp->cmd = NULL; 1886 fsp->cmd = NULL;
1801 1887
1802 if (!sc_cmd->SCp.ptr) { 1888 if (!sc_cmd->SCp.ptr)
1803 spin_unlock_irqrestore(lp->host->host_lock, flags);
1804 return; 1889 return;
1805 }
1806 1890
1807 CMD_SCSI_STATUS(sc_cmd) = fsp->cdb_status; 1891 CMD_SCSI_STATUS(sc_cmd) = fsp->cdb_status;
1808 switch (fsp->status_code) { 1892 switch (fsp->status_code) {
@@ -1814,21 +1898,6 @@ static void fc_io_compl(struct fc_fcp_pkt *fsp)
1814 sc_cmd->result = DID_OK << 16; 1898 sc_cmd->result = DID_OK << 16;
1815 if (fsp->scsi_resid) 1899 if (fsp->scsi_resid)
1816 CMD_RESID_LEN(sc_cmd) = fsp->scsi_resid; 1900 CMD_RESID_LEN(sc_cmd) = fsp->scsi_resid;
1817 } else if (fsp->cdb_status == QUEUE_FULL) {
1818 struct scsi_device *tmp_sdev;
1819 struct scsi_device *sdev = sc_cmd->device;
1820
1821 shost_for_each_device(tmp_sdev, sdev->host) {
1822 if (tmp_sdev->id != sdev->id)
1823 continue;
1824
1825 if (tmp_sdev->queue_depth > 1) {
1826 scsi_track_queue_full(tmp_sdev,
1827 tmp_sdev->
1828 queue_depth - 1);
1829 }
1830 }
1831 sc_cmd->result = (DID_OK << 16) | fsp->cdb_status;
1832 } else { 1901 } else {
1833 /* 1902 /*
1834 * transport level I/O was ok but scsi 1903 * transport level I/O was ok but scsi
@@ -1846,7 +1915,8 @@ static void fc_io_compl(struct fc_fcp_pkt *fsp)
1846 * scsi status is good but transport level 1915 * scsi status is good but transport level
1847 * underrun. 1916 * underrun.
1848 */ 1917 */
1849 sc_cmd->result = DID_OK << 16; 1918 sc_cmd->result = (fsp->state & FC_SRB_RCV_STATUS ?
1919 DID_OK : DID_ERROR) << 16;
1850 } else { 1920 } else {
1851 /* 1921 /*
1852 * scsi got underrun, this is an error 1922 * scsi got underrun, this is an error
@@ -1878,63 +1948,46 @@ static void fc_io_compl(struct fc_fcp_pkt *fsp)
1878 break; 1948 break;
1879 } 1949 }
1880 1950
1951 spin_lock_irqsave(&si->scsi_queue_lock, flags);
1881 list_del(&fsp->list); 1952 list_del(&fsp->list);
1953 spin_unlock_irqrestore(&si->scsi_queue_lock, flags);
1882 sc_cmd->SCp.ptr = NULL; 1954 sc_cmd->SCp.ptr = NULL;
1883 sc_cmd->scsi_done(sc_cmd); 1955 sc_cmd->scsi_done(sc_cmd);
1884 spin_unlock_irqrestore(lp->host->host_lock, flags);
1885 1956
1886 /* release ref from initial allocation in queue command */ 1957 /* release ref from initial allocation in queue command */
1887 fc_fcp_pkt_release(fsp); 1958 fc_fcp_pkt_release(fsp);
1888} 1959}
1889 1960
1890/** 1961/**
1891 * fc_fcp_complete() - complete processing of a fcp packet
1892 * @fsp: fcp packet
1893 *
1894 * This function may sleep if a fsp timer is pending.
1895 * The host lock must not be held by caller.
1896 */
1897void fc_fcp_complete(struct fc_fcp_pkt *fsp)
1898{
1899 if (fc_fcp_lock_pkt(fsp))
1900 return;
1901
1902 fc_fcp_complete_locked(fsp);
1903 fc_fcp_unlock_pkt(fsp);
1904}
1905EXPORT_SYMBOL(fc_fcp_complete);
1906
1907/**
1908 * fc_eh_abort() - Abort a command 1962 * fc_eh_abort() - Abort a command
1909 * @sc_cmd: scsi command to abort 1963 * @sc_cmd: The SCSI command to abort
1910 * 1964 *
1911 * From scsi host template. 1965 * From SCSI host template.
1912 * send ABTS to the target device and wait for the response 1966 * Send an ABTS to the target device and wait for the response.
1913 * sc_cmd is the pointer to the command to be aborted.
1914 */ 1967 */
1915int fc_eh_abort(struct scsi_cmnd *sc_cmd) 1968int fc_eh_abort(struct scsi_cmnd *sc_cmd)
1916{ 1969{
1917 struct fc_fcp_pkt *fsp; 1970 struct fc_fcp_pkt *fsp;
1918 struct fc_lport *lp; 1971 struct fc_lport *lport;
1919 int rc = FAILED; 1972 int rc = FAILED;
1920 unsigned long flags; 1973 unsigned long flags;
1921 1974
1922 lp = shost_priv(sc_cmd->device->host); 1975 lport = shost_priv(sc_cmd->device->host);
1923 if (lp->state != LPORT_ST_READY) 1976 if (lport->state != LPORT_ST_READY)
1924 return rc; 1977 return rc;
1925 else if (!lp->link_up) 1978 else if (!lport->link_up)
1926 return rc; 1979 return rc;
1927 1980
1928 spin_lock_irqsave(lp->host->host_lock, flags); 1981 spin_lock_irqsave(lport->host->host_lock, flags);
1929 fsp = CMD_SP(sc_cmd); 1982 fsp = CMD_SP(sc_cmd);
1930 if (!fsp) { 1983 if (!fsp) {
1931 /* command completed while scsi eh was setting up */ 1984 /* command completed while scsi eh was setting up */
1932 spin_unlock_irqrestore(lp->host->host_lock, flags); 1985 spin_unlock_irqrestore(lport->host->host_lock, flags);
1933 return SUCCESS; 1986 return SUCCESS;
1934 } 1987 }
1935 /* grab a ref so the fsp and sc_cmd cannot be relased from under us */ 1988 /* grab a ref so the fsp and sc_cmd cannot be relased from under us */
1936 fc_fcp_pkt_hold(fsp); 1989 fc_fcp_pkt_hold(fsp);
1937 spin_unlock_irqrestore(lp->host->host_lock, flags); 1990 spin_unlock_irqrestore(lport->host->host_lock, flags);
1938 1991
1939 if (fc_fcp_lock_pkt(fsp)) { 1992 if (fc_fcp_lock_pkt(fsp)) {
1940 /* completed while we were waiting for timer to be deleted */ 1993 /* completed while we were waiting for timer to be deleted */
@@ -1942,7 +1995,7 @@ int fc_eh_abort(struct scsi_cmnd *sc_cmd)
1942 goto release_pkt; 1995 goto release_pkt;
1943 } 1996 }
1944 1997
1945 rc = fc_fcp_pkt_abort(lp, fsp); 1998 rc = fc_fcp_pkt_abort(fsp);
1946 fc_fcp_unlock_pkt(fsp); 1999 fc_fcp_unlock_pkt(fsp);
1947 2000
1948release_pkt: 2001release_pkt:
@@ -1952,37 +2005,34 @@ release_pkt:
1952EXPORT_SYMBOL(fc_eh_abort); 2005EXPORT_SYMBOL(fc_eh_abort);
1953 2006
1954/** 2007/**
1955 * fc_eh_device_reset() Reset a single LUN 2008 * fc_eh_device_reset() - Reset a single LUN
1956 * @sc_cmd: scsi command 2009 * @sc_cmd: The SCSI command which identifies the device whose
2010 * LUN is to be reset
1957 * 2011 *
1958 * Set from scsi host template to send tm cmd to the target and wait for the 2012 * Set from SCSI host template.
1959 * response.
1960 */ 2013 */
1961int fc_eh_device_reset(struct scsi_cmnd *sc_cmd) 2014int fc_eh_device_reset(struct scsi_cmnd *sc_cmd)
1962{ 2015{
1963 struct fc_lport *lp; 2016 struct fc_lport *lport;
1964 struct fc_fcp_pkt *fsp; 2017 struct fc_fcp_pkt *fsp;
1965 struct fc_rport *rport = starget_to_rport(scsi_target(sc_cmd->device)); 2018 struct fc_rport *rport = starget_to_rport(scsi_target(sc_cmd->device));
1966 int rc = FAILED; 2019 int rc = FAILED;
1967 struct fc_rport_libfc_priv *rp;
1968 int rval; 2020 int rval;
1969 2021
1970 rval = fc_remote_port_chkready(rport); 2022 rval = fc_remote_port_chkready(rport);
1971 if (rval) 2023 if (rval)
1972 goto out; 2024 goto out;
1973 2025
1974 rp = rport->dd_data; 2026 lport = shost_priv(sc_cmd->device->host);
1975 lp = shost_priv(sc_cmd->device->host);
1976 2027
1977 if (lp->state != LPORT_ST_READY) 2028 if (lport->state != LPORT_ST_READY)
1978 return rc; 2029 return rc;
1979 2030
1980 FC_SCSI_DBG(lp, "Resetting rport (%6x)\n", rport->port_id); 2031 FC_SCSI_DBG(lport, "Resetting rport (%6x)\n", rport->port_id);
1981 2032
1982 fsp = fc_fcp_pkt_alloc(lp, GFP_NOIO); 2033 fsp = fc_fcp_pkt_alloc(lport, GFP_NOIO);
1983 if (fsp == NULL) { 2034 if (fsp == NULL) {
1984 printk(KERN_WARNING "libfc: could not allocate scsi_pkt\n"); 2035 printk(KERN_WARNING "libfc: could not allocate scsi_pkt\n");
1985 sc_cmd->result = DID_NO_CONNECT << 16;
1986 goto out; 2036 goto out;
1987 } 2037 }
1988 2038
@@ -1991,13 +2041,13 @@ int fc_eh_device_reset(struct scsi_cmnd *sc_cmd)
1991 * the sc passed in is not setup for execution like when sent 2041 * the sc passed in is not setup for execution like when sent
1992 * through the queuecommand callout. 2042 * through the queuecommand callout.
1993 */ 2043 */
1994 fsp->lp = lp; /* save the softc ptr */ 2044 fsp->lp = lport; /* save the softc ptr */
1995 fsp->rport = rport; /* set the remote port ptr */ 2045 fsp->rport = rport; /* set the remote port ptr */
1996 2046
1997 /* 2047 /*
1998 * flush outstanding commands 2048 * flush outstanding commands
1999 */ 2049 */
2000 rc = fc_lun_reset(lp, fsp, scmd_id(sc_cmd), sc_cmd->device->lun); 2050 rc = fc_lun_reset(lport, fsp, scmd_id(sc_cmd), sc_cmd->device->lun);
2001 fsp->state = FC_SRB_FREE; 2051 fsp->state = FC_SRB_FREE;
2002 fc_fcp_pkt_release(fsp); 2052 fc_fcp_pkt_release(fsp);
2003 2053
@@ -2007,38 +2057,39 @@ out:
2007EXPORT_SYMBOL(fc_eh_device_reset); 2057EXPORT_SYMBOL(fc_eh_device_reset);
2008 2058
2009/** 2059/**
2010 * fc_eh_host_reset() - The reset function will reset the ports on the host. 2060 * fc_eh_host_reset() - Reset a Scsi_Host.
2011 * @sc_cmd: scsi command 2061 * @sc_cmd: The SCSI command that identifies the SCSI host to be reset
2012 */ 2062 */
2013int fc_eh_host_reset(struct scsi_cmnd *sc_cmd) 2063int fc_eh_host_reset(struct scsi_cmnd *sc_cmd)
2014{ 2064{
2015 struct Scsi_Host *shost = sc_cmd->device->host; 2065 struct Scsi_Host *shost = sc_cmd->device->host;
2016 struct fc_lport *lp = shost_priv(shost); 2066 struct fc_lport *lport = shost_priv(shost);
2017 unsigned long wait_tmo; 2067 unsigned long wait_tmo;
2018 2068
2019 FC_SCSI_DBG(lp, "Resetting host\n"); 2069 FC_SCSI_DBG(lport, "Resetting host\n");
2020 2070
2021 lp->tt.lport_reset(lp); 2071 lport->tt.lport_reset(lport);
2022 wait_tmo = jiffies + FC_HOST_RESET_TIMEOUT; 2072 wait_tmo = jiffies + FC_HOST_RESET_TIMEOUT;
2023 while (!fc_fcp_lport_queue_ready(lp) && time_before(jiffies, wait_tmo)) 2073 while (!fc_fcp_lport_queue_ready(lport) && time_before(jiffies,
2074 wait_tmo))
2024 msleep(1000); 2075 msleep(1000);
2025 2076
2026 if (fc_fcp_lport_queue_ready(lp)) { 2077 if (fc_fcp_lport_queue_ready(lport)) {
2027 shost_printk(KERN_INFO, shost, "libfc: Host reset succeeded " 2078 shost_printk(KERN_INFO, shost, "libfc: Host reset succeeded "
2028 "on port (%6x)\n", fc_host_port_id(lp->host)); 2079 "on port (%6x)\n", fc_host_port_id(lport->host));
2029 return SUCCESS; 2080 return SUCCESS;
2030 } else { 2081 } else {
2031 shost_printk(KERN_INFO, shost, "libfc: Host reset failed, " 2082 shost_printk(KERN_INFO, shost, "libfc: Host reset failed, "
2032 "port (%6x) is not ready.\n", 2083 "port (%6x) is not ready.\n",
2033 fc_host_port_id(lp->host)); 2084 fc_host_port_id(lport->host));
2034 return FAILED; 2085 return FAILED;
2035 } 2086 }
2036} 2087}
2037EXPORT_SYMBOL(fc_eh_host_reset); 2088EXPORT_SYMBOL(fc_eh_host_reset);
2038 2089
2039/** 2090/**
2040 * fc_slave_alloc() - configure queue depth 2091 * fc_slave_alloc() - Configure the queue depth of a Scsi_Host
2041 * @sdev: scsi device 2092 * @sdev: The SCSI device that identifies the SCSI host
2042 * 2093 *
2043 * Configures queue depth based on host's cmd_per_len. If not set 2094 * Configures queue depth based on host's cmd_per_len. If not set
2044 * then we use the libfc default. 2095 * then we use the libfc default.
@@ -2046,29 +2097,50 @@ EXPORT_SYMBOL(fc_eh_host_reset);
2046int fc_slave_alloc(struct scsi_device *sdev) 2097int fc_slave_alloc(struct scsi_device *sdev)
2047{ 2098{
2048 struct fc_rport *rport = starget_to_rport(scsi_target(sdev)); 2099 struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
2049 int queue_depth;
2050 2100
2051 if (!rport || fc_remote_port_chkready(rport)) 2101 if (!rport || fc_remote_port_chkready(rport))
2052 return -ENXIO; 2102 return -ENXIO;
2053 2103
2054 if (sdev->tagged_supported) { 2104 if (sdev->tagged_supported)
2055 if (sdev->host->hostt->cmd_per_lun) 2105 scsi_activate_tcq(sdev, FC_FCP_DFLT_QUEUE_DEPTH);
2056 queue_depth = sdev->host->hostt->cmd_per_lun; 2106 else
2057 else 2107 scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev),
2058 queue_depth = FC_FCP_DFLT_QUEUE_DEPTH; 2108 FC_FCP_DFLT_QUEUE_DEPTH);
2059 scsi_activate_tcq(sdev, queue_depth); 2109
2060 }
2061 return 0; 2110 return 0;
2062} 2111}
2063EXPORT_SYMBOL(fc_slave_alloc); 2112EXPORT_SYMBOL(fc_slave_alloc);
2064 2113
2065int fc_change_queue_depth(struct scsi_device *sdev, int qdepth) 2114/**
2115 * fc_change_queue_depth() - Change a device's queue depth
2116 * @sdev: The SCSI device whose queue depth is to change
2117 * @qdepth: The new queue depth
2118 * @reason: The resason for the change
2119 */
2120int fc_change_queue_depth(struct scsi_device *sdev, int qdepth, int reason)
2066{ 2121{
2067 scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth); 2122 switch (reason) {
2123 case SCSI_QDEPTH_DEFAULT:
2124 scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth);
2125 break;
2126 case SCSI_QDEPTH_QFULL:
2127 scsi_track_queue_full(sdev, qdepth);
2128 break;
2129 case SCSI_QDEPTH_RAMP_UP:
2130 scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth);
2131 break;
2132 default:
2133 return -EOPNOTSUPP;
2134 }
2068 return sdev->queue_depth; 2135 return sdev->queue_depth;
2069} 2136}
2070EXPORT_SYMBOL(fc_change_queue_depth); 2137EXPORT_SYMBOL(fc_change_queue_depth);
2071 2138
2139/**
2140 * fc_change_queue_type() - Change a device's queue type
2141 * @sdev: The SCSI device whose queue depth is to change
2142 * @tag_type: Identifier for queue type
2143 */
2072int fc_change_queue_type(struct scsi_device *sdev, int tag_type) 2144int fc_change_queue_type(struct scsi_device *sdev, int tag_type)
2073{ 2145{
2074 if (sdev->tagged_supported) { 2146 if (sdev->tagged_supported) {
@@ -2084,39 +2156,71 @@ int fc_change_queue_type(struct scsi_device *sdev, int tag_type)
2084} 2156}
2085EXPORT_SYMBOL(fc_change_queue_type); 2157EXPORT_SYMBOL(fc_change_queue_type);
2086 2158
2087void fc_fcp_destroy(struct fc_lport *lp) 2159/**
2160 * fc_fcp_destory() - Tear down the FCP layer for a given local port
2161 * @lport: The local port that no longer needs the FCP layer
2162 */
2163void fc_fcp_destroy(struct fc_lport *lport)
2088{ 2164{
2089 struct fc_fcp_internal *si = fc_get_scsi_internal(lp); 2165 struct fc_fcp_internal *si = fc_get_scsi_internal(lport);
2090 2166
2091 if (!list_empty(&si->scsi_pkt_queue)) 2167 if (!list_empty(&si->scsi_pkt_queue))
2092 printk(KERN_ERR "libfc: Leaked SCSI packets when destroying " 2168 printk(KERN_ERR "libfc: Leaked SCSI packets when destroying "
2093 "port (%6x)\n", fc_host_port_id(lp->host)); 2169 "port (%6x)\n", fc_host_port_id(lport->host));
2094 2170
2095 mempool_destroy(si->scsi_pkt_pool); 2171 mempool_destroy(si->scsi_pkt_pool);
2096 kfree(si); 2172 kfree(si);
2097 lp->scsi_priv = NULL; 2173 lport->scsi_priv = NULL;
2098} 2174}
2099EXPORT_SYMBOL(fc_fcp_destroy); 2175EXPORT_SYMBOL(fc_fcp_destroy);
2100 2176
2101int fc_fcp_init(struct fc_lport *lp) 2177int fc_setup_fcp()
2178{
2179 int rc = 0;
2180
2181 scsi_pkt_cachep = kmem_cache_create("libfc_fcp_pkt",
2182 sizeof(struct fc_fcp_pkt),
2183 0, SLAB_HWCACHE_ALIGN, NULL);
2184 if (!scsi_pkt_cachep) {
2185 printk(KERN_ERR "libfc: Unable to allocate SRB cache, "
2186 "module load failed!");
2187 rc = -ENOMEM;
2188 }
2189
2190 return rc;
2191}
2192
2193void fc_destroy_fcp()
2194{
2195 if (scsi_pkt_cachep)
2196 kmem_cache_destroy(scsi_pkt_cachep);
2197}
2198
2199/**
2200 * fc_fcp_init() - Initialize the FCP layer for a local port
2201 * @lport: The local port to initialize the exchange layer for
2202 */
2203int fc_fcp_init(struct fc_lport *lport)
2102{ 2204{
2103 int rc; 2205 int rc;
2104 struct fc_fcp_internal *si; 2206 struct fc_fcp_internal *si;
2105 2207
2106 if (!lp->tt.fcp_cmd_send) 2208 if (!lport->tt.fcp_cmd_send)
2107 lp->tt.fcp_cmd_send = fc_fcp_cmd_send; 2209 lport->tt.fcp_cmd_send = fc_fcp_cmd_send;
2108 2210
2109 if (!lp->tt.fcp_cleanup) 2211 if (!lport->tt.fcp_cleanup)
2110 lp->tt.fcp_cleanup = fc_fcp_cleanup; 2212 lport->tt.fcp_cleanup = fc_fcp_cleanup;
2111 2213
2112 if (!lp->tt.fcp_abort_io) 2214 if (!lport->tt.fcp_abort_io)
2113 lp->tt.fcp_abort_io = fc_fcp_abort_io; 2215 lport->tt.fcp_abort_io = fc_fcp_abort_io;
2114 2216
2115 si = kzalloc(sizeof(struct fc_fcp_internal), GFP_KERNEL); 2217 si = kzalloc(sizeof(struct fc_fcp_internal), GFP_KERNEL);
2116 if (!si) 2218 if (!si)
2117 return -ENOMEM; 2219 return -ENOMEM;
2118 lp->scsi_priv = si; 2220 lport->scsi_priv = si;
2221 si->max_can_queue = lport->host->can_queue;
2119 INIT_LIST_HEAD(&si->scsi_pkt_queue); 2222 INIT_LIST_HEAD(&si->scsi_pkt_queue);
2223 spin_lock_init(&si->scsi_queue_lock);
2120 2224
2121 si->scsi_pkt_pool = mempool_create_slab_pool(2, scsi_pkt_cachep); 2225 si->scsi_pkt_pool = mempool_create_slab_pool(2, scsi_pkt_cachep);
2122 if (!si->scsi_pkt_pool) { 2226 if (!si->scsi_pkt_pool) {
@@ -2130,42 +2234,3 @@ free_internal:
2130 return rc; 2234 return rc;
2131} 2235}
2132EXPORT_SYMBOL(fc_fcp_init); 2236EXPORT_SYMBOL(fc_fcp_init);
2133
2134static int __init libfc_init(void)
2135{
2136 int rc;
2137
2138 scsi_pkt_cachep = kmem_cache_create("libfc_fcp_pkt",
2139 sizeof(struct fc_fcp_pkt),
2140 0, SLAB_HWCACHE_ALIGN, NULL);
2141 if (scsi_pkt_cachep == NULL) {
2142 printk(KERN_ERR "libfc: Unable to allocate SRB cache, "
2143 "module load failed!");
2144 return -ENOMEM;
2145 }
2146
2147 rc = fc_setup_exch_mgr();
2148 if (rc)
2149 goto destroy_pkt_cache;
2150
2151 rc = fc_setup_rport();
2152 if (rc)
2153 goto destroy_em;
2154
2155 return rc;
2156destroy_em:
2157 fc_destroy_exch_mgr();
2158destroy_pkt_cache:
2159 kmem_cache_destroy(scsi_pkt_cachep);
2160 return rc;
2161}
2162
2163static void __exit libfc_exit(void)
2164{
2165 kmem_cache_destroy(scsi_pkt_cachep);
2166 fc_destroy_exch_mgr();
2167 fc_destroy_rport();
2168}
2169
2170module_init(libfc_init);
2171module_exit(libfc_exit);