diff options
author | Robert Love <robert.w.love@intel.com> | 2009-11-03 14:47:39 -0500 |
---|---|---|
committer | James Bottomley <James.Bottomley@suse.de> | 2009-12-04 13:01:07 -0500 |
commit | 3a3b42bf89a9b90ae9ed2c57fdc378e5473a0ef9 (patch) | |
tree | 6d41d669a3c7b4a3bb5219ded856251c148e7ab6 /drivers/scsi/libfc/fc_fcp.c | |
parent | a51ab39606042e76a483547620699530caa12c40 (diff) |
[SCSI] libfc: Formatting cleanups across libfc
This patch makes a variety of cleanup changes to all libfc files.
This patch adds kernel-doc headers to all functions lacking them
and attempts to better format existing headers. It also add kernel-doc
headers to structures.
This patch ensures that the current naming conventions for local ports,
remote ports and remote port private data is upheld in the following
manner.
struct instance (i.e. variable name)
--------------------------------------------------
fc_lport lport
fc_rport rport
fc_rport_libfc_priv rpriv
fc_rport_priv rdata
I also renamed dns_rp and ptp_rp to dns_rdata and ptp_rdata
respectively.
I used emacs 'indent-region' and 'tabify' on all libfc files
to correct spacing alignments.
I feel sorry for anyone attempting to review this patch.
Signed-off-by: Robert Love <robert.w.love@intel.com>
Signed-off-by: James Bottomley <James.Bottomley@suse.de>
Diffstat (limited to 'drivers/scsi/libfc/fc_fcp.c')
-rw-r--r-- | drivers/scsi/libfc/fc_fcp.c | 667 |
1 files changed, 379 insertions, 288 deletions
diff --git a/drivers/scsi/libfc/fc_fcp.c b/drivers/scsi/libfc/fc_fcp.c index 98279fe0d0c7..970b54f653b7 100644 --- a/drivers/scsi/libfc/fc_fcp.c +++ b/drivers/scsi/libfc/fc_fcp.c | |||
@@ -67,10 +67,16 @@ struct kmem_cache *scsi_pkt_cachep; | |||
67 | #define CMD_SCSI_STATUS(Cmnd) ((Cmnd)->SCp.Status) | 67 | #define CMD_SCSI_STATUS(Cmnd) ((Cmnd)->SCp.Status) |
68 | #define CMD_RESID_LEN(Cmnd) ((Cmnd)->SCp.buffers_residual) | 68 | #define CMD_RESID_LEN(Cmnd) ((Cmnd)->SCp.buffers_residual) |
69 | 69 | ||
70 | /** | ||
71 | * struct fc_fcp_internal - FCP layer internal data | ||
72 | * @scsi_pkt_pool: Memory pool to draw FCP packets from | ||
73 | * @scsi_pkt_queue: Current FCP packets | ||
74 | * @throttled: The FCP packet queue is throttled | ||
75 | */ | ||
70 | struct fc_fcp_internal { | 76 | struct fc_fcp_internal { |
71 | mempool_t *scsi_pkt_pool; | 77 | mempool_t *scsi_pkt_pool; |
72 | struct list_head scsi_pkt_queue; | 78 | struct list_head scsi_pkt_queue; |
73 | u8 throttled; | 79 | u8 throttled; |
74 | }; | 80 | }; |
75 | 81 | ||
76 | #define fc_get_scsi_internal(x) ((struct fc_fcp_internal *)(x)->scsi_priv) | 82 | #define fc_get_scsi_internal(x) ((struct fc_fcp_internal *)(x)->scsi_priv) |
@@ -84,9 +90,9 @@ static void fc_fcp_recv(struct fc_seq *, struct fc_frame *, void *); | |||
84 | static void fc_fcp_resp(struct fc_fcp_pkt *, struct fc_frame *); | 90 | static void fc_fcp_resp(struct fc_fcp_pkt *, struct fc_frame *); |
85 | static void fc_fcp_complete_locked(struct fc_fcp_pkt *); | 91 | static void fc_fcp_complete_locked(struct fc_fcp_pkt *); |
86 | static void fc_tm_done(struct fc_seq *, struct fc_frame *, void *); | 92 | static void fc_tm_done(struct fc_seq *, struct fc_frame *, void *); |
87 | static void fc_fcp_error(struct fc_fcp_pkt *fsp, struct fc_frame *fp); | 93 | static void fc_fcp_error(struct fc_fcp_pkt *, struct fc_frame *); |
88 | static void fc_timeout_error(struct fc_fcp_pkt *); | 94 | static void fc_timeout_error(struct fc_fcp_pkt *); |
89 | static void fc_fcp_timeout(unsigned long data); | 95 | static void fc_fcp_timeout(unsigned long); |
90 | static void fc_fcp_rec(struct fc_fcp_pkt *); | 96 | static void fc_fcp_rec(struct fc_fcp_pkt *); |
91 | static void fc_fcp_rec_error(struct fc_fcp_pkt *, struct fc_frame *); | 97 | static void fc_fcp_rec_error(struct fc_fcp_pkt *, struct fc_frame *); |
92 | static void fc_fcp_rec_resp(struct fc_seq *, struct fc_frame *, void *); | 98 | static void fc_fcp_rec_resp(struct fc_seq *, struct fc_frame *, void *); |
@@ -125,23 +131,22 @@ static void fc_fcp_srr_error(struct fc_fcp_pkt *, struct fc_frame *); | |||
125 | #define FC_FCP_DFLT_QUEUE_DEPTH 32 | 131 | #define FC_FCP_DFLT_QUEUE_DEPTH 32 |
126 | 132 | ||
127 | /** | 133 | /** |
128 | * fc_fcp_pkt_alloc - allocation routine for scsi_pkt packet | 134 | * fc_fcp_pkt_alloc() - Allocate a fcp_pkt |
129 | * @lp: fc lport struct | 135 | * @lport: The local port that the FCP packet is for |
130 | * @gfp: gfp flags for allocation | 136 | * @gfp: GFP flags for allocation |
131 | * | 137 | * |
132 | * This is used by upper layer scsi driver. | 138 | * Return value: fcp_pkt structure or null on allocation failure. |
133 | * Return Value : scsi_pkt structure or null on allocation failure. | 139 | * Context: Can be called from process context, no lock is required. |
134 | * Context : call from process context. no locking required. | ||
135 | */ | 140 | */ |
136 | static struct fc_fcp_pkt *fc_fcp_pkt_alloc(struct fc_lport *lp, gfp_t gfp) | 141 | static struct fc_fcp_pkt *fc_fcp_pkt_alloc(struct fc_lport *lport, gfp_t gfp) |
137 | { | 142 | { |
138 | struct fc_fcp_internal *si = fc_get_scsi_internal(lp); | 143 | struct fc_fcp_internal *si = fc_get_scsi_internal(lport); |
139 | struct fc_fcp_pkt *fsp; | 144 | struct fc_fcp_pkt *fsp; |
140 | 145 | ||
141 | fsp = mempool_alloc(si->scsi_pkt_pool, gfp); | 146 | fsp = mempool_alloc(si->scsi_pkt_pool, gfp); |
142 | if (fsp) { | 147 | if (fsp) { |
143 | memset(fsp, 0, sizeof(*fsp)); | 148 | memset(fsp, 0, sizeof(*fsp)); |
144 | fsp->lp = lp; | 149 | fsp->lp = lport; |
145 | atomic_set(&fsp->ref_cnt, 1); | 150 | atomic_set(&fsp->ref_cnt, 1); |
146 | init_timer(&fsp->timer); | 151 | init_timer(&fsp->timer); |
147 | INIT_LIST_HEAD(&fsp->list); | 152 | INIT_LIST_HEAD(&fsp->list); |
@@ -151,12 +156,11 @@ static struct fc_fcp_pkt *fc_fcp_pkt_alloc(struct fc_lport *lp, gfp_t gfp) | |||
151 | } | 156 | } |
152 | 157 | ||
153 | /** | 158 | /** |
154 | * fc_fcp_pkt_release() - release hold on scsi_pkt packet | 159 | * fc_fcp_pkt_release() - Release hold on a fcp_pkt |
155 | * @fsp: fcp packet struct | 160 | * @fsp: The FCP packet to be released |
156 | * | 161 | * |
157 | * This is used by upper layer scsi driver. | 162 | * Context: Can be called from process or interrupt context, |
158 | * Context : call from process and interrupt context. | 163 | * no lock is required. |
159 | * no locking required | ||
160 | */ | 164 | */ |
161 | static void fc_fcp_pkt_release(struct fc_fcp_pkt *fsp) | 165 | static void fc_fcp_pkt_release(struct fc_fcp_pkt *fsp) |
162 | { | 166 | { |
@@ -167,20 +171,25 @@ static void fc_fcp_pkt_release(struct fc_fcp_pkt *fsp) | |||
167 | } | 171 | } |
168 | } | 172 | } |
169 | 173 | ||
174 | /** | ||
175 | * fc_fcp_pkt_hold() - Hold a fcp_pkt | ||
176 | * @fsp: The FCP packet to be held | ||
177 | */ | ||
170 | static void fc_fcp_pkt_hold(struct fc_fcp_pkt *fsp) | 178 | static void fc_fcp_pkt_hold(struct fc_fcp_pkt *fsp) |
171 | { | 179 | { |
172 | atomic_inc(&fsp->ref_cnt); | 180 | atomic_inc(&fsp->ref_cnt); |
173 | } | 181 | } |
174 | 182 | ||
175 | /** | 183 | /** |
176 | * fc_fcp_pkt_destory() - release hold on scsi_pkt packet | 184 | * fc_fcp_pkt_destory() - Release hold on a fcp_pkt |
177 | * @seq: exchange sequence | 185 | * @seq: The sequence that the FCP packet is on (required by destructor API) |
178 | * @fsp: fcp packet struct | 186 | * @fsp: The FCP packet to be released |
187 | * | ||
188 | * This routine is called by a destructor callback in the exch_seq_send() | ||
189 | * routine of the libfc Transport Template. The 'struct fc_seq' is a required | ||
190 | * argument even though it is not used by this routine. | ||
179 | * | 191 | * |
180 | * Release hold on scsi_pkt packet set to keep scsi_pkt | 192 | * Context: No locking required. |
181 | * till EM layer exch resource is not freed. | ||
182 | * Context : called from from EM layer. | ||
183 | * no locking required | ||
184 | */ | 193 | */ |
185 | static void fc_fcp_pkt_destroy(struct fc_seq *seq, void *fsp) | 194 | static void fc_fcp_pkt_destroy(struct fc_seq *seq, void *fsp) |
186 | { | 195 | { |
@@ -188,10 +197,10 @@ static void fc_fcp_pkt_destroy(struct fc_seq *seq, void *fsp) | |||
188 | } | 197 | } |
189 | 198 | ||
190 | /** | 199 | /** |
191 | * fc_fcp_lock_pkt() - lock a packet and get a ref to it. | 200 | * fc_fcp_lock_pkt() - Lock a fcp_pkt and increase its reference count |
192 | * @fsp: fcp packet | 201 | * @fsp: The FCP packet to be locked and incremented |
193 | * | 202 | * |
194 | * We should only return error if we return a command to scsi-ml before | 203 | * We should only return error if we return a command to SCSI-ml before |
195 | * getting a response. This can happen in cases where we send a abort, but | 204 | * getting a response. This can happen in cases where we send a abort, but |
196 | * do not wait for the response and the abort and command can be passing | 205 | * do not wait for the response and the abort and command can be passing |
197 | * each other on the wire/network-layer. | 206 | * each other on the wire/network-layer. |
@@ -216,18 +225,33 @@ static inline int fc_fcp_lock_pkt(struct fc_fcp_pkt *fsp) | |||
216 | return 0; | 225 | return 0; |
217 | } | 226 | } |
218 | 227 | ||
228 | /** | ||
229 | * fc_fcp_unlock_pkt() - Release a fcp_pkt's lock and decrement its | ||
230 | * reference count | ||
231 | * @fsp: The FCP packet to be unlocked and decremented | ||
232 | */ | ||
219 | static inline void fc_fcp_unlock_pkt(struct fc_fcp_pkt *fsp) | 233 | static inline void fc_fcp_unlock_pkt(struct fc_fcp_pkt *fsp) |
220 | { | 234 | { |
221 | spin_unlock_bh(&fsp->scsi_pkt_lock); | 235 | spin_unlock_bh(&fsp->scsi_pkt_lock); |
222 | fc_fcp_pkt_release(fsp); | 236 | fc_fcp_pkt_release(fsp); |
223 | } | 237 | } |
224 | 238 | ||
239 | /** | ||
240 | * fc_fcp_timer_set() - Start a timer for a fcp_pkt | ||
241 | * @fsp: The FCP packet to start a timer for | ||
242 | * @delay: The timeout period for the timer | ||
243 | */ | ||
225 | static void fc_fcp_timer_set(struct fc_fcp_pkt *fsp, unsigned long delay) | 244 | static void fc_fcp_timer_set(struct fc_fcp_pkt *fsp, unsigned long delay) |
226 | { | 245 | { |
227 | if (!(fsp->state & FC_SRB_COMPL)) | 246 | if (!(fsp->state & FC_SRB_COMPL)) |
228 | mod_timer(&fsp->timer, jiffies + delay); | 247 | mod_timer(&fsp->timer, jiffies + delay); |
229 | } | 248 | } |
230 | 249 | ||
250 | /** | ||
251 | * fc_fcp_send_abort() - Send an abort for exchanges associated with a | ||
252 | * fcp_pkt | ||
253 | * @fsp: The FCP packet to abort exchanges on | ||
254 | */ | ||
231 | static int fc_fcp_send_abort(struct fc_fcp_pkt *fsp) | 255 | static int fc_fcp_send_abort(struct fc_fcp_pkt *fsp) |
232 | { | 256 | { |
233 | if (!fsp->seq_ptr) | 257 | if (!fsp->seq_ptr) |
@@ -237,9 +261,14 @@ static int fc_fcp_send_abort(struct fc_fcp_pkt *fsp) | |||
237 | return fsp->lp->tt.seq_exch_abort(fsp->seq_ptr, 0); | 261 | return fsp->lp->tt.seq_exch_abort(fsp->seq_ptr, 0); |
238 | } | 262 | } |
239 | 263 | ||
240 | /* | 264 | /** |
241 | * Retry command. | 265 | * fc_fcp_retry_cmd() - Retry a fcp_pkt |
242 | * An abort isn't needed. | 266 | * @fsp: The FCP packet to be retried |
267 | * | ||
268 | * Sets the status code to be FC_ERROR and then calls | ||
269 | * fc_fcp_complete_locked() which in turn calls fc_io_compl(). | ||
270 | * fc_io_compl() will notify the SCSI-ml that the I/O is done. | ||
271 | * The SCSI-ml will retry the command. | ||
243 | */ | 272 | */ |
244 | static void fc_fcp_retry_cmd(struct fc_fcp_pkt *fsp) | 273 | static void fc_fcp_retry_cmd(struct fc_fcp_pkt *fsp) |
245 | { | 274 | { |
@@ -254,43 +283,35 @@ static void fc_fcp_retry_cmd(struct fc_fcp_pkt *fsp) | |||
254 | fc_fcp_complete_locked(fsp); | 283 | fc_fcp_complete_locked(fsp); |
255 | } | 284 | } |
256 | 285 | ||
257 | /* | 286 | /** |
258 | * fc_fcp_ddp_setup - calls to LLD's ddp_setup to set up DDP | 287 | * fc_fcp_ddp_setup() - Calls a LLD's ddp_setup routine to set up DDP context |
259 | * transfer for a read I/O indicated by the fc_fcp_pkt. | 288 | * @fsp: The FCP packet that will manage the DDP frames |
260 | * @fsp: ptr to the fc_fcp_pkt | 289 | * @xid: The XID that will be used for the DDP exchange |
261 | * | ||
262 | * This is called in exch_seq_send() when we have a newly allocated | ||
263 | * exchange with a valid exchange id to setup ddp. | ||
264 | * | ||
265 | * returns: none | ||
266 | */ | 290 | */ |
267 | void fc_fcp_ddp_setup(struct fc_fcp_pkt *fsp, u16 xid) | 291 | void fc_fcp_ddp_setup(struct fc_fcp_pkt *fsp, u16 xid) |
268 | { | 292 | { |
269 | struct fc_lport *lp; | 293 | struct fc_lport *lport; |
270 | 294 | ||
271 | if (!fsp) | 295 | if (!fsp) |
272 | return; | 296 | return; |
273 | 297 | ||
274 | lp = fsp->lp; | 298 | lport = fsp->lp; |
275 | if ((fsp->req_flags & FC_SRB_READ) && | 299 | if ((fsp->req_flags & FC_SRB_READ) && |
276 | (lp->lro_enabled) && (lp->tt.ddp_setup)) { | 300 | (lport->lro_enabled) && (lport->tt.ddp_setup)) { |
277 | if (lp->tt.ddp_setup(lp, xid, scsi_sglist(fsp->cmd), | 301 | if (lport->tt.ddp_setup(lport, xid, scsi_sglist(fsp->cmd), |
278 | scsi_sg_count(fsp->cmd))) | 302 | scsi_sg_count(fsp->cmd))) |
279 | fsp->xfer_ddp = xid; | 303 | fsp->xfer_ddp = xid; |
280 | } | 304 | } |
281 | } | 305 | } |
282 | 306 | ||
283 | /* | 307 | /** |
284 | * fc_fcp_ddp_done - calls to LLD's ddp_done to release any | 308 | * fc_fcp_ddp_done() - Calls a LLD's ddp_done routine to release any |
285 | * DDP related resources for this I/O if it is initialized | 309 | * DDP related resources for a fcp_pkt |
286 | * as a ddp transfer | 310 | * @fsp: The FCP packet that DDP had been used on |
287 | * @fsp: ptr to the fc_fcp_pkt | ||
288 | * | ||
289 | * returns: none | ||
290 | */ | 311 | */ |
291 | static void fc_fcp_ddp_done(struct fc_fcp_pkt *fsp) | 312 | static void fc_fcp_ddp_done(struct fc_fcp_pkt *fsp) |
292 | { | 313 | { |
293 | struct fc_lport *lp; | 314 | struct fc_lport *lport; |
294 | 315 | ||
295 | if (!fsp) | 316 | if (!fsp) |
296 | return; | 317 | return; |
@@ -298,22 +319,22 @@ static void fc_fcp_ddp_done(struct fc_fcp_pkt *fsp) | |||
298 | if (fsp->xfer_ddp == FC_XID_UNKNOWN) | 319 | if (fsp->xfer_ddp == FC_XID_UNKNOWN) |
299 | return; | 320 | return; |
300 | 321 | ||
301 | lp = fsp->lp; | 322 | lport = fsp->lp; |
302 | if (lp->tt.ddp_done) { | 323 | if (lport->tt.ddp_done) { |
303 | fsp->xfer_len = lp->tt.ddp_done(lp, fsp->xfer_ddp); | 324 | fsp->xfer_len = lport->tt.ddp_done(lport, fsp->xfer_ddp); |
304 | fsp->xfer_ddp = FC_XID_UNKNOWN; | 325 | fsp->xfer_ddp = FC_XID_UNKNOWN; |
305 | } | 326 | } |
306 | } | 327 | } |
307 | 328 | ||
308 | 329 | /** | |
309 | /* | 330 | * fc_fcp_recv_data() - Handler for receiving SCSI-FCP data from a target |
310 | * Receive SCSI data from target. | 331 | * @fsp: The FCP packet the data is on |
311 | * Called after receiving solicited data. | 332 | * @fp: The data frame |
312 | */ | 333 | */ |
313 | static void fc_fcp_recv_data(struct fc_fcp_pkt *fsp, struct fc_frame *fp) | 334 | static void fc_fcp_recv_data(struct fc_fcp_pkt *fsp, struct fc_frame *fp) |
314 | { | 335 | { |
315 | struct scsi_cmnd *sc = fsp->cmd; | 336 | struct scsi_cmnd *sc = fsp->cmd; |
316 | struct fc_lport *lp = fsp->lp; | 337 | struct fc_lport *lport = fsp->lp; |
317 | struct fcoe_dev_stats *stats; | 338 | struct fcoe_dev_stats *stats; |
318 | struct fc_frame_header *fh; | 339 | struct fc_frame_header *fh; |
319 | size_t start_offset; | 340 | size_t start_offset; |
@@ -363,13 +384,13 @@ static void fc_fcp_recv_data(struct fc_fcp_pkt *fsp, struct fc_frame *fp) | |||
363 | 384 | ||
364 | if (~crc != le32_to_cpu(fr_crc(fp))) { | 385 | if (~crc != le32_to_cpu(fr_crc(fp))) { |
365 | crc_err: | 386 | crc_err: |
366 | stats = fc_lport_get_stats(lp); | 387 | stats = fc_lport_get_stats(lport); |
367 | stats->ErrorFrames++; | 388 | stats->ErrorFrames++; |
368 | /* FIXME - per cpu count, not total count! */ | 389 | /* FIXME - per cpu count, not total count! */ |
369 | if (stats->InvalidCRCCount++ < 5) | 390 | if (stats->InvalidCRCCount++ < 5) |
370 | printk(KERN_WARNING "libfc: CRC error on data " | 391 | printk(KERN_WARNING "libfc: CRC error on data " |
371 | "frame for port (%6x)\n", | 392 | "frame for port (%6x)\n", |
372 | fc_host_port_id(lp->host)); | 393 | fc_host_port_id(lport->host)); |
373 | /* | 394 | /* |
374 | * Assume the frame is total garbage. | 395 | * Assume the frame is total garbage. |
375 | * We may have copied it over the good part | 396 | * We may have copied it over the good part |
@@ -397,18 +418,17 @@ crc_err: | |||
397 | } | 418 | } |
398 | 419 | ||
399 | /** | 420 | /** |
400 | * fc_fcp_send_data() - Send SCSI data to target. | 421 | * fc_fcp_send_data() - Send SCSI data to a target |
401 | * @fsp: ptr to fc_fcp_pkt | 422 | * @fsp: The FCP packet the data is on |
402 | * @sp: ptr to this sequence | 423 | * @sp: The sequence the data is to be sent on |
403 | * @offset: starting offset for this data request | 424 | * @offset: The starting offset for this data request |
404 | * @seq_blen: the burst length for this data request | 425 | * @seq_blen: The burst length for this data request |
405 | * | 426 | * |
406 | * Called after receiving a Transfer Ready data descriptor. | 427 | * Called after receiving a Transfer Ready data descriptor. |
407 | * if LLD is capable of seq offload then send down seq_blen | 428 | * If the LLD is capable of sequence offload then send down the |
408 | * size of data in single frame, otherwise send multiple FC | 429 | * seq_blen ammount of data in single frame, otherwise send |
409 | * frames of max FC frame payload supported by target port. | 430 | * multiple frames of the maximum frame payload supported by |
410 | * | 431 | * the target port. |
411 | * Returns : 0 for success. | ||
412 | */ | 432 | */ |
413 | static int fc_fcp_send_data(struct fc_fcp_pkt *fsp, struct fc_seq *seq, | 433 | static int fc_fcp_send_data(struct fc_fcp_pkt *fsp, struct fc_seq *seq, |
414 | size_t offset, size_t seq_blen) | 434 | size_t offset, size_t seq_blen) |
@@ -417,7 +437,7 @@ static int fc_fcp_send_data(struct fc_fcp_pkt *fsp, struct fc_seq *seq, | |||
417 | struct scsi_cmnd *sc; | 437 | struct scsi_cmnd *sc; |
418 | struct scatterlist *sg; | 438 | struct scatterlist *sg; |
419 | struct fc_frame *fp = NULL; | 439 | struct fc_frame *fp = NULL; |
420 | struct fc_lport *lp = fsp->lp; | 440 | struct fc_lport *lport = fsp->lp; |
421 | size_t remaining; | 441 | size_t remaining; |
422 | size_t t_blen; | 442 | size_t t_blen; |
423 | size_t tlen; | 443 | size_t tlen; |
@@ -426,7 +446,7 @@ static int fc_fcp_send_data(struct fc_fcp_pkt *fsp, struct fc_seq *seq, | |||
426 | int error; | 446 | int error; |
427 | void *data = NULL; | 447 | void *data = NULL; |
428 | void *page_addr; | 448 | void *page_addr; |
429 | int using_sg = lp->sg_supp; | 449 | int using_sg = lport->sg_supp; |
430 | u32 f_ctl; | 450 | u32 f_ctl; |
431 | 451 | ||
432 | WARN_ON(seq_blen <= 0); | 452 | WARN_ON(seq_blen <= 0); |
@@ -448,10 +468,10 @@ static int fc_fcp_send_data(struct fc_fcp_pkt *fsp, struct fc_seq *seq, | |||
448 | * to max FC frame payload previously set in fsp->max_payload. | 468 | * to max FC frame payload previously set in fsp->max_payload. |
449 | */ | 469 | */ |
450 | t_blen = fsp->max_payload; | 470 | t_blen = fsp->max_payload; |
451 | if (lp->seq_offload) { | 471 | if (lport->seq_offload) { |
452 | t_blen = min(seq_blen, (size_t)lp->lso_max); | 472 | t_blen = min(seq_blen, (size_t)lport->lso_max); |
453 | FC_FCP_DBG(fsp, "fsp=%p:lso:blen=%zx lso_max=0x%x t_blen=%zx\n", | 473 | FC_FCP_DBG(fsp, "fsp=%p:lso:blen=%zx lso_max=0x%x t_blen=%zx\n", |
454 | fsp, seq_blen, lp->lso_max, t_blen); | 474 | fsp, seq_blen, lport->lso_max, t_blen); |
455 | } | 475 | } |
456 | 476 | ||
457 | WARN_ON(t_blen < FC_MIN_MAX_PAYLOAD); | 477 | WARN_ON(t_blen < FC_MIN_MAX_PAYLOAD); |
@@ -463,7 +483,7 @@ static int fc_fcp_send_data(struct fc_fcp_pkt *fsp, struct fc_seq *seq, | |||
463 | remaining = seq_blen; | 483 | remaining = seq_blen; |
464 | fh_parm_offset = frame_offset = offset; | 484 | fh_parm_offset = frame_offset = offset; |
465 | tlen = 0; | 485 | tlen = 0; |
466 | seq = lp->tt.seq_start_next(seq); | 486 | seq = lport->tt.seq_start_next(seq); |
467 | f_ctl = FC_FC_REL_OFF; | 487 | f_ctl = FC_FC_REL_OFF; |
468 | WARN_ON(!seq); | 488 | WARN_ON(!seq); |
469 | 489 | ||
@@ -486,11 +506,11 @@ static int fc_fcp_send_data(struct fc_fcp_pkt *fsp, struct fc_seq *seq, | |||
486 | if (tlen % 4) | 506 | if (tlen % 4) |
487 | using_sg = 0; | 507 | using_sg = 0; |
488 | if (using_sg) { | 508 | if (using_sg) { |
489 | fp = _fc_frame_alloc(lp, 0); | 509 | fp = _fc_frame_alloc(lport, 0); |
490 | if (!fp) | 510 | if (!fp) |
491 | return -ENOMEM; | 511 | return -ENOMEM; |
492 | } else { | 512 | } else { |
493 | fp = fc_frame_alloc(lp, tlen); | 513 | fp = fc_frame_alloc(lport, tlen); |
494 | if (!fp) | 514 | if (!fp) |
495 | return -ENOMEM; | 515 | return -ENOMEM; |
496 | 516 | ||
@@ -550,7 +570,7 @@ static int fc_fcp_send_data(struct fc_fcp_pkt *fsp, struct fc_seq *seq, | |||
550 | /* | 570 | /* |
551 | * send fragment using for a sequence. | 571 | * send fragment using for a sequence. |
552 | */ | 572 | */ |
553 | error = lp->tt.seq_send(lp, seq, fp); | 573 | error = lport->tt.seq_send(lport, seq, fp); |
554 | if (error) { | 574 | if (error) { |
555 | WARN_ON(1); /* send error should be rare */ | 575 | WARN_ON(1); /* send error should be rare */ |
556 | fc_fcp_retry_cmd(fsp); | 576 | fc_fcp_retry_cmd(fsp); |
@@ -562,6 +582,11 @@ static int fc_fcp_send_data(struct fc_fcp_pkt *fsp, struct fc_seq *seq, | |||
562 | return 0; | 582 | return 0; |
563 | } | 583 | } |
564 | 584 | ||
585 | /** | ||
586 | * fc_fcp_abts_resp() - Send an ABTS response | ||
587 | * @fsp: The FCP packet that is being aborted | ||
588 | * @fp: The response frame | ||
589 | */ | ||
565 | static void fc_fcp_abts_resp(struct fc_fcp_pkt *fsp, struct fc_frame *fp) | 590 | static void fc_fcp_abts_resp(struct fc_fcp_pkt *fsp, struct fc_frame *fp) |
566 | { | 591 | { |
567 | int ba_done = 1; | 592 | int ba_done = 1; |
@@ -598,8 +623,8 @@ static void fc_fcp_abts_resp(struct fc_fcp_pkt *fsp, struct fc_frame *fp) | |||
598 | } | 623 | } |
599 | 624 | ||
600 | /** | 625 | /** |
601 | * fc_fcp_reduce_can_queue() - drop can_queue | 626 | * fc_fcp_reduce_can_queue() - Reduce the can_queue value for a local port |
602 | * @lp: lport to drop queueing for | 627 | * @lport: The local port to reduce can_queue on |
603 | * | 628 | * |
604 | * If we are getting memory allocation failures, then we may | 629 | * If we are getting memory allocation failures, then we may |
605 | * be trying to execute too many commands. We let the running | 630 | * be trying to execute too many commands. We let the running |
@@ -607,37 +632,36 @@ static void fc_fcp_abts_resp(struct fc_fcp_pkt *fsp, struct fc_frame *fp) | |||
607 | * can_queue. Eventually we will hit the point where we run | 632 | * can_queue. Eventually we will hit the point where we run |
608 | * on all reserved structs. | 633 | * on all reserved structs. |
609 | */ | 634 | */ |
610 | static void fc_fcp_reduce_can_queue(struct fc_lport *lp) | 635 | static void fc_fcp_reduce_can_queue(struct fc_lport *lport) |
611 | { | 636 | { |
612 | struct fc_fcp_internal *si = fc_get_scsi_internal(lp); | 637 | struct fc_fcp_internal *si = fc_get_scsi_internal(lport); |
613 | unsigned long flags; | 638 | unsigned long flags; |
614 | int can_queue; | 639 | int can_queue; |
615 | 640 | ||
616 | spin_lock_irqsave(lp->host->host_lock, flags); | 641 | spin_lock_irqsave(lport->host->host_lock, flags); |
617 | if (si->throttled) | 642 | if (si->throttled) |
618 | goto done; | 643 | goto done; |
619 | si->throttled = 1; | 644 | si->throttled = 1; |
620 | 645 | ||
621 | can_queue = lp->host->can_queue; | 646 | can_queue = lport->host->can_queue; |
622 | can_queue >>= 1; | 647 | can_queue >>= 1; |
623 | if (!can_queue) | 648 | if (!can_queue) |
624 | can_queue = 1; | 649 | can_queue = 1; |
625 | lp->host->can_queue = can_queue; | 650 | lport->host->can_queue = can_queue; |
626 | shost_printk(KERN_ERR, lp->host, "libfc: Could not allocate frame.\n" | 651 | shost_printk(KERN_ERR, lport->host, "libfc: Could not allocate frame.\n" |
627 | "Reducing can_queue to %d.\n", can_queue); | 652 | "Reducing can_queue to %d.\n", can_queue); |
628 | done: | 653 | done: |
629 | spin_unlock_irqrestore(lp->host->host_lock, flags); | 654 | spin_unlock_irqrestore(lport->host->host_lock, flags); |
630 | } | 655 | } |
631 | 656 | ||
632 | /** | 657 | /** |
633 | * fc_fcp_recv() - Reveive FCP frames | 658 | * fc_fcp_recv() - Reveive an FCP frame |
634 | * @seq: The sequence the frame is on | 659 | * @seq: The sequence the frame is on |
635 | * @fp: The FC frame | 660 | * @fp: The received frame |
636 | * @arg: The related FCP packet | 661 | * @arg: The related FCP packet |
637 | * | 662 | * |
638 | * Return : None | 663 | * Context: Called from Soft IRQ context. Can not be called |
639 | * Context : called from Soft IRQ context | 664 | * holding the FCP packet list lock. |
640 | * can not called holding list lock | ||
641 | */ | 665 | */ |
642 | static void fc_fcp_recv(struct fc_seq *seq, struct fc_frame *fp, void *arg) | 666 | static void fc_fcp_recv(struct fc_seq *seq, struct fc_frame *fp, void *arg) |
643 | { | 667 | { |
@@ -710,6 +734,11 @@ errout: | |||
710 | fc_fcp_reduce_can_queue(lport); | 734 | fc_fcp_reduce_can_queue(lport); |
711 | } | 735 | } |
712 | 736 | ||
737 | /** | ||
738 | * fc_fcp_resp() - Handler for FCP responses | ||
739 | * @fsp: The FCP packet the response is for | ||
740 | * @fp: The response frame | ||
741 | */ | ||
713 | static void fc_fcp_resp(struct fc_fcp_pkt *fsp, struct fc_frame *fp) | 742 | static void fc_fcp_resp(struct fc_fcp_pkt *fsp, struct fc_frame *fp) |
714 | { | 743 | { |
715 | struct fc_frame_header *fh; | 744 | struct fc_frame_header *fh; |
@@ -823,15 +852,16 @@ err: | |||
823 | } | 852 | } |
824 | 853 | ||
825 | /** | 854 | /** |
826 | * fc_fcp_complete_locked() - complete processing of a fcp packet | 855 | * fc_fcp_complete_locked() - Complete processing of a fcp_pkt with the |
827 | * @fsp: fcp packet | 856 | * fcp_pkt lock held |
857 | * @fsp: The FCP packet to be completed | ||
828 | * | 858 | * |
829 | * This function may sleep if a timer is pending. The packet lock must be | 859 | * This function may sleep if a timer is pending. The packet lock must be |
830 | * held, and the host lock must not be held. | 860 | * held, and the host lock must not be held. |
831 | */ | 861 | */ |
832 | static void fc_fcp_complete_locked(struct fc_fcp_pkt *fsp) | 862 | static void fc_fcp_complete_locked(struct fc_fcp_pkt *fsp) |
833 | { | 863 | { |
834 | struct fc_lport *lp = fsp->lp; | 864 | struct fc_lport *lport = fsp->lp; |
835 | struct fc_seq *seq; | 865 | struct fc_seq *seq; |
836 | struct fc_exch *ep; | 866 | struct fc_exch *ep; |
837 | u32 f_ctl; | 867 | u32 f_ctl; |
@@ -862,7 +892,7 @@ static void fc_fcp_complete_locked(struct fc_fcp_pkt *fsp) | |||
862 | struct fc_frame *conf_frame; | 892 | struct fc_frame *conf_frame; |
863 | struct fc_seq *csp; | 893 | struct fc_seq *csp; |
864 | 894 | ||
865 | csp = lp->tt.seq_start_next(seq); | 895 | csp = lport->tt.seq_start_next(seq); |
866 | conf_frame = fc_frame_alloc(fsp->lp, 0); | 896 | conf_frame = fc_frame_alloc(fsp->lp, 0); |
867 | if (conf_frame) { | 897 | if (conf_frame) { |
868 | f_ctl = FC_FC_SEQ_INIT; | 898 | f_ctl = FC_FC_SEQ_INIT; |
@@ -871,43 +901,48 @@ static void fc_fcp_complete_locked(struct fc_fcp_pkt *fsp) | |||
871 | fc_fill_fc_hdr(conf_frame, FC_RCTL_DD_SOL_CTL, | 901 | fc_fill_fc_hdr(conf_frame, FC_RCTL_DD_SOL_CTL, |
872 | ep->did, ep->sid, | 902 | ep->did, ep->sid, |
873 | FC_TYPE_FCP, f_ctl, 0); | 903 | FC_TYPE_FCP, f_ctl, 0); |
874 | lp->tt.seq_send(lp, csp, conf_frame); | 904 | lport->tt.seq_send(lport, csp, conf_frame); |
875 | } | 905 | } |
876 | } | 906 | } |
877 | lp->tt.exch_done(seq); | 907 | lport->tt.exch_done(seq); |
878 | } | 908 | } |
879 | fc_io_compl(fsp); | 909 | fc_io_compl(fsp); |
880 | } | 910 | } |
881 | 911 | ||
912 | /** | ||
913 | * fc_fcp_cleanup_cmd() - Cancel the active exchange on a fcp_pkt | ||
914 | * @fsp: The FCP packet whose exchanges should be canceled | ||
915 | * @error: The reason for the cancellation | ||
916 | */ | ||
882 | static void fc_fcp_cleanup_cmd(struct fc_fcp_pkt *fsp, int error) | 917 | static void fc_fcp_cleanup_cmd(struct fc_fcp_pkt *fsp, int error) |
883 | { | 918 | { |
884 | struct fc_lport *lp = fsp->lp; | 919 | struct fc_lport *lport = fsp->lp; |
885 | 920 | ||
886 | if (fsp->seq_ptr) { | 921 | if (fsp->seq_ptr) { |
887 | lp->tt.exch_done(fsp->seq_ptr); | 922 | lport->tt.exch_done(fsp->seq_ptr); |
888 | fsp->seq_ptr = NULL; | 923 | fsp->seq_ptr = NULL; |
889 | } | 924 | } |
890 | fsp->status_code = error; | 925 | fsp->status_code = error; |
891 | } | 926 | } |
892 | 927 | ||
893 | /** | 928 | /** |
894 | * fc_fcp_cleanup_each_cmd() - Cleanup active commads | 929 | * fc_fcp_cleanup_each_cmd() - Cancel all exchanges on a local port |
895 | * @lp: logical port | 930 | * @lport: The local port whose exchanges should be canceled |
896 | * @id: target id | 931 | * @id: The target's ID |
897 | * @lun: lun | 932 | * @lun: The LUN |
898 | * @error: fsp status code | 933 | * @error: The reason for cancellation |
899 | * | 934 | * |
900 | * If lun or id is -1, they are ignored. | 935 | * If lun or id is -1, they are ignored. |
901 | */ | 936 | */ |
902 | static void fc_fcp_cleanup_each_cmd(struct fc_lport *lp, unsigned int id, | 937 | static void fc_fcp_cleanup_each_cmd(struct fc_lport *lport, unsigned int id, |
903 | unsigned int lun, int error) | 938 | unsigned int lun, int error) |
904 | { | 939 | { |
905 | struct fc_fcp_internal *si = fc_get_scsi_internal(lp); | 940 | struct fc_fcp_internal *si = fc_get_scsi_internal(lport); |
906 | struct fc_fcp_pkt *fsp; | 941 | struct fc_fcp_pkt *fsp; |
907 | struct scsi_cmnd *sc_cmd; | 942 | struct scsi_cmnd *sc_cmd; |
908 | unsigned long flags; | 943 | unsigned long flags; |
909 | 944 | ||
910 | spin_lock_irqsave(lp->host->host_lock, flags); | 945 | spin_lock_irqsave(lport->host->host_lock, flags); |
911 | restart: | 946 | restart: |
912 | list_for_each_entry(fsp, &si->scsi_pkt_queue, list) { | 947 | list_for_each_entry(fsp, &si->scsi_pkt_queue, list) { |
913 | sc_cmd = fsp->cmd; | 948 | sc_cmd = fsp->cmd; |
@@ -918,7 +953,7 @@ restart: | |||
918 | continue; | 953 | continue; |
919 | 954 | ||
920 | fc_fcp_pkt_hold(fsp); | 955 | fc_fcp_pkt_hold(fsp); |
921 | spin_unlock_irqrestore(lp->host->host_lock, flags); | 956 | spin_unlock_irqrestore(lport->host->host_lock, flags); |
922 | 957 | ||
923 | if (!fc_fcp_lock_pkt(fsp)) { | 958 | if (!fc_fcp_lock_pkt(fsp)) { |
924 | fc_fcp_cleanup_cmd(fsp, error); | 959 | fc_fcp_cleanup_cmd(fsp, error); |
@@ -927,35 +962,36 @@ restart: | |||
927 | } | 962 | } |
928 | 963 | ||
929 | fc_fcp_pkt_release(fsp); | 964 | fc_fcp_pkt_release(fsp); |
930 | spin_lock_irqsave(lp->host->host_lock, flags); | 965 | spin_lock_irqsave(lport->host->host_lock, flags); |
931 | /* | 966 | /* |
932 | * while we dropped the lock multiple pkts could | 967 | * while we dropped the lock multiple pkts could |
933 | * have been released, so we have to start over. | 968 | * have been released, so we have to start over. |
934 | */ | 969 | */ |
935 | goto restart; | 970 | goto restart; |
936 | } | 971 | } |
937 | spin_unlock_irqrestore(lp->host->host_lock, flags); | 972 | spin_unlock_irqrestore(lport->host->host_lock, flags); |
938 | } | 973 | } |
939 | 974 | ||
940 | static void fc_fcp_abort_io(struct fc_lport *lp) | 975 | /** |
976 | * fc_fcp_abort_io() - Abort all FCP-SCSI exchanges on a local port | ||
977 | * @lport: The local port whose exchanges are to be aborted | ||
978 | */ | ||
979 | static void fc_fcp_abort_io(struct fc_lport *lport) | ||
941 | { | 980 | { |
942 | fc_fcp_cleanup_each_cmd(lp, -1, -1, FC_HRD_ERROR); | 981 | fc_fcp_cleanup_each_cmd(lport, -1, -1, FC_HRD_ERROR); |
943 | } | 982 | } |
944 | 983 | ||
945 | /** | 984 | /** |
946 | * fc_fcp_pkt_send() - send a fcp packet to the lower level. | 985 | * fc_fcp_pkt_send() - Send a fcp_pkt |
947 | * @lp: fc lport | 986 | * @lport: The local port to send the FCP packet on |
948 | * @fsp: fc packet. | 987 | * @fsp: The FCP packet to send |
949 | * | 988 | * |
950 | * This is called by upper layer protocol. | 989 | * Return: Zero for success and -1 for failure |
951 | * Return : zero for success and -1 for failure | 990 | * Locks: Called with the host lock and irqs disabled. |
952 | * Context : called from queuecommand which can be called from process | ||
953 | * or scsi soft irq. | ||
954 | * Locks : called with the host lock and irqs disabled. | ||
955 | */ | 991 | */ |
956 | static int fc_fcp_pkt_send(struct fc_lport *lp, struct fc_fcp_pkt *fsp) | 992 | static int fc_fcp_pkt_send(struct fc_lport *lport, struct fc_fcp_pkt *fsp) |
957 | { | 993 | { |
958 | struct fc_fcp_internal *si = fc_get_scsi_internal(lp); | 994 | struct fc_fcp_internal *si = fc_get_scsi_internal(lport); |
959 | int rc; | 995 | int rc; |
960 | 996 | ||
961 | fsp->cmd->SCp.ptr = (char *)fsp; | 997 | fsp->cmd->SCp.ptr = (char *)fsp; |
@@ -967,16 +1003,22 @@ static int fc_fcp_pkt_send(struct fc_lport *lp, struct fc_fcp_pkt *fsp) | |||
967 | memcpy(fsp->cdb_cmd.fc_cdb, fsp->cmd->cmnd, fsp->cmd->cmd_len); | 1003 | memcpy(fsp->cdb_cmd.fc_cdb, fsp->cmd->cmnd, fsp->cmd->cmd_len); |
968 | list_add_tail(&fsp->list, &si->scsi_pkt_queue); | 1004 | list_add_tail(&fsp->list, &si->scsi_pkt_queue); |
969 | 1005 | ||
970 | spin_unlock_irq(lp->host->host_lock); | 1006 | spin_unlock_irq(lport->host->host_lock); |
971 | rc = lp->tt.fcp_cmd_send(lp, fsp, fc_fcp_recv); | 1007 | rc = lport->tt.fcp_cmd_send(lport, fsp, fc_fcp_recv); |
972 | spin_lock_irq(lp->host->host_lock); | 1008 | spin_lock_irq(lport->host->host_lock); |
973 | if (rc) | 1009 | if (rc) |
974 | list_del(&fsp->list); | 1010 | list_del(&fsp->list); |
975 | 1011 | ||
976 | return rc; | 1012 | return rc; |
977 | } | 1013 | } |
978 | 1014 | ||
979 | static int fc_fcp_cmd_send(struct fc_lport *lp, struct fc_fcp_pkt *fsp, | 1015 | /** |
1016 | * fc_fcp_cmd_send() - Send a FCP command | ||
1017 | * @lport: The local port to send the command on | ||
1018 | * @fsp: The FCP packet the command is on | ||
1019 | * @resp: The handler for the response | ||
1020 | */ | ||
1021 | static int fc_fcp_cmd_send(struct fc_lport *lport, struct fc_fcp_pkt *fsp, | ||
980 | void (*resp)(struct fc_seq *, | 1022 | void (*resp)(struct fc_seq *, |
981 | struct fc_frame *fp, | 1023 | struct fc_frame *fp, |
982 | void *arg)) | 1024 | void *arg)) |
@@ -984,14 +1026,14 @@ static int fc_fcp_cmd_send(struct fc_lport *lp, struct fc_fcp_pkt *fsp, | |||
984 | struct fc_frame *fp; | 1026 | struct fc_frame *fp; |
985 | struct fc_seq *seq; | 1027 | struct fc_seq *seq; |
986 | struct fc_rport *rport; | 1028 | struct fc_rport *rport; |
987 | struct fc_rport_libfc_priv *rp; | 1029 | struct fc_rport_libfc_priv *rpriv; |
988 | const size_t len = sizeof(fsp->cdb_cmd); | 1030 | const size_t len = sizeof(fsp->cdb_cmd); |
989 | int rc = 0; | 1031 | int rc = 0; |
990 | 1032 | ||
991 | if (fc_fcp_lock_pkt(fsp)) | 1033 | if (fc_fcp_lock_pkt(fsp)) |
992 | return 0; | 1034 | return 0; |
993 | 1035 | ||
994 | fp = fc_frame_alloc(lp, sizeof(fsp->cdb_cmd)); | 1036 | fp = fc_frame_alloc(lport, sizeof(fsp->cdb_cmd)); |
995 | if (!fp) { | 1037 | if (!fp) { |
996 | rc = -1; | 1038 | rc = -1; |
997 | goto unlock; | 1039 | goto unlock; |
@@ -1001,13 +1043,14 @@ static int fc_fcp_cmd_send(struct fc_lport *lp, struct fc_fcp_pkt *fsp, | |||
1001 | fr_fsp(fp) = fsp; | 1043 | fr_fsp(fp) = fsp; |
1002 | rport = fsp->rport; | 1044 | rport = fsp->rport; |
1003 | fsp->max_payload = rport->maxframe_size; | 1045 | fsp->max_payload = rport->maxframe_size; |
1004 | rp = rport->dd_data; | 1046 | rpriv = rport->dd_data; |
1005 | 1047 | ||
1006 | fc_fill_fc_hdr(fp, FC_RCTL_DD_UNSOL_CMD, rport->port_id, | 1048 | fc_fill_fc_hdr(fp, FC_RCTL_DD_UNSOL_CMD, rport->port_id, |
1007 | fc_host_port_id(rp->local_port->host), FC_TYPE_FCP, | 1049 | fc_host_port_id(rpriv->local_port->host), FC_TYPE_FCP, |
1008 | FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0); | 1050 | FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0); |
1009 | 1051 | ||
1010 | seq = lp->tt.exch_seq_send(lp, fp, resp, fc_fcp_pkt_destroy, fsp, 0); | 1052 | seq = lport->tt.exch_seq_send(lport, fp, resp, fc_fcp_pkt_destroy, |
1053 | fsp, 0); | ||
1011 | if (!seq) { | 1054 | if (!seq) { |
1012 | rc = -1; | 1055 | rc = -1; |
1013 | goto unlock; | 1056 | goto unlock; |
@@ -1025,8 +1068,10 @@ unlock: | |||
1025 | return rc; | 1068 | return rc; |
1026 | } | 1069 | } |
1027 | 1070 | ||
1028 | /* | 1071 | /** |
1029 | * transport error handler | 1072 | * fc_fcp_error() - Handler for FCP layer errors |
1073 | * @fsp: The FCP packet the error is on | ||
1074 | * @fp: The frame that has errored | ||
1030 | */ | 1075 | */ |
1031 | static void fc_fcp_error(struct fc_fcp_pkt *fsp, struct fc_frame *fp) | 1076 | static void fc_fcp_error(struct fc_fcp_pkt *fsp, struct fc_frame *fp) |
1032 | { | 1077 | { |
@@ -1051,9 +1096,11 @@ unlock: | |||
1051 | fc_fcp_unlock_pkt(fsp); | 1096 | fc_fcp_unlock_pkt(fsp); |
1052 | } | 1097 | } |
1053 | 1098 | ||
1054 | /* | 1099 | /** |
1055 | * Scsi abort handler- calls to send an abort | 1100 | * fc_fcp_pkt_abort() - Abort a fcp_pkt |
1056 | * and then wait for abort completion | 1101 | * @fsp: The FCP packet to abort on |
1102 | * | ||
1103 | * Called to send an abort and then wait for abort completion | ||
1057 | */ | 1104 | */ |
1058 | static int fc_fcp_pkt_abort(struct fc_fcp_pkt *fsp) | 1105 | static int fc_fcp_pkt_abort(struct fc_fcp_pkt *fsp) |
1059 | { | 1106 | { |
@@ -1082,14 +1129,15 @@ static int fc_fcp_pkt_abort(struct fc_fcp_pkt *fsp) | |||
1082 | return rc; | 1129 | return rc; |
1083 | } | 1130 | } |
1084 | 1131 | ||
1085 | /* | 1132 | /** |
1086 | * Retry LUN reset after resource allocation failed. | 1133 | * fc_lun_reset_send() - Send LUN reset command |
1134 | * @data: The FCP packet that identifies the LUN to be reset | ||
1087 | */ | 1135 | */ |
1088 | static void fc_lun_reset_send(unsigned long data) | 1136 | static void fc_lun_reset_send(unsigned long data) |
1089 | { | 1137 | { |
1090 | struct fc_fcp_pkt *fsp = (struct fc_fcp_pkt *)data; | 1138 | struct fc_fcp_pkt *fsp = (struct fc_fcp_pkt *)data; |
1091 | struct fc_lport *lp = fsp->lp; | 1139 | struct fc_lport *lport = fsp->lp; |
1092 | if (lp->tt.fcp_cmd_send(lp, fsp, fc_tm_done)) { | 1140 | if (lport->tt.fcp_cmd_send(lport, fsp, fc_tm_done)) { |
1093 | if (fsp->recov_retry++ >= FC_MAX_RECOV_RETRY) | 1141 | if (fsp->recov_retry++ >= FC_MAX_RECOV_RETRY) |
1094 | return; | 1142 | return; |
1095 | if (fc_fcp_lock_pkt(fsp)) | 1143 | if (fc_fcp_lock_pkt(fsp)) |
@@ -1100,11 +1148,15 @@ static void fc_lun_reset_send(unsigned long data) | |||
1100 | } | 1148 | } |
1101 | } | 1149 | } |
1102 | 1150 | ||
1103 | /* | 1151 | /** |
1104 | * Scsi device reset handler- send a LUN RESET to the device | 1152 | * fc_lun_reset() - Send a LUN RESET command to a device |
1105 | * and wait for reset reply | 1153 | * and wait for the reply |
1154 | * @lport: The local port to sent the comand on | ||
1155 | * @fsp: The FCP packet that identifies the LUN to be reset | ||
1156 | * @id: The SCSI command ID | ||
1157 | * @lun: The LUN ID to be reset | ||
1106 | */ | 1158 | */ |
1107 | static int fc_lun_reset(struct fc_lport *lp, struct fc_fcp_pkt *fsp, | 1159 | static int fc_lun_reset(struct fc_lport *lport, struct fc_fcp_pkt *fsp, |
1108 | unsigned int id, unsigned int lun) | 1160 | unsigned int id, unsigned int lun) |
1109 | { | 1161 | { |
1110 | int rc; | 1162 | int rc; |
@@ -1132,14 +1184,14 @@ static int fc_lun_reset(struct fc_lport *lp, struct fc_fcp_pkt *fsp, | |||
1132 | 1184 | ||
1133 | spin_lock_bh(&fsp->scsi_pkt_lock); | 1185 | spin_lock_bh(&fsp->scsi_pkt_lock); |
1134 | if (fsp->seq_ptr) { | 1186 | if (fsp->seq_ptr) { |
1135 | lp->tt.exch_done(fsp->seq_ptr); | 1187 | lport->tt.exch_done(fsp->seq_ptr); |
1136 | fsp->seq_ptr = NULL; | 1188 | fsp->seq_ptr = NULL; |
1137 | } | 1189 | } |
1138 | fsp->wait_for_comp = 0; | 1190 | fsp->wait_for_comp = 0; |
1139 | spin_unlock_bh(&fsp->scsi_pkt_lock); | 1191 | spin_unlock_bh(&fsp->scsi_pkt_lock); |
1140 | 1192 | ||
1141 | if (!rc) { | 1193 | if (!rc) { |
1142 | FC_SCSI_DBG(lp, "lun reset failed\n"); | 1194 | FC_SCSI_DBG(lport, "lun reset failed\n"); |
1143 | return FAILED; | 1195 | return FAILED; |
1144 | } | 1196 | } |
1145 | 1197 | ||
@@ -1147,13 +1199,16 @@ static int fc_lun_reset(struct fc_lport *lp, struct fc_fcp_pkt *fsp, | |||
1147 | if (fsp->cdb_status != FCP_TMF_CMPL) | 1199 | if (fsp->cdb_status != FCP_TMF_CMPL) |
1148 | return FAILED; | 1200 | return FAILED; |
1149 | 1201 | ||
1150 | FC_SCSI_DBG(lp, "lun reset to lun %u completed\n", lun); | 1202 | FC_SCSI_DBG(lport, "lun reset to lun %u completed\n", lun); |
1151 | fc_fcp_cleanup_each_cmd(lp, id, lun, FC_CMD_ABORTED); | 1203 | fc_fcp_cleanup_each_cmd(lport, id, lun, FC_CMD_ABORTED); |
1152 | return SUCCESS; | 1204 | return SUCCESS; |
1153 | } | 1205 | } |
1154 | 1206 | ||
1155 | /* | 1207 | /** |
1156 | * Task Managment response handler | 1208 | * fc_tm_done() - Task Managment response handler |
1209 | * @seq: The sequence that the response is on | ||
1210 | * @fp: The response frame | ||
1211 | * @arg: The FCP packet the response is for | ||
1157 | */ | 1212 | */ |
1158 | static void fc_tm_done(struct fc_seq *seq, struct fc_frame *fp, void *arg) | 1213 | static void fc_tm_done(struct fc_seq *seq, struct fc_frame *fp, void *arg) |
1159 | { | 1214 | { |
@@ -1190,34 +1245,31 @@ static void fc_tm_done(struct fc_seq *seq, struct fc_frame *fp, void *arg) | |||
1190 | fc_fcp_unlock_pkt(fsp); | 1245 | fc_fcp_unlock_pkt(fsp); |
1191 | } | 1246 | } |
1192 | 1247 | ||
1193 | static void fc_fcp_cleanup(struct fc_lport *lp) | 1248 | /** |
1249 | * fc_fcp_cleanup() - Cleanup all FCP exchanges on a local port | ||
1250 | * @lport: The local port to be cleaned up | ||
1251 | */ | ||
1252 | static void fc_fcp_cleanup(struct fc_lport *lport) | ||
1194 | { | 1253 | { |
1195 | fc_fcp_cleanup_each_cmd(lp, -1, -1, FC_ERROR); | 1254 | fc_fcp_cleanup_each_cmd(lport, -1, -1, FC_ERROR); |
1196 | } | 1255 | } |
1197 | 1256 | ||
1198 | /* | 1257 | /** |
1199 | * fc_fcp_timeout: called by OS timer function. | 1258 | * fc_fcp_timeout() - Handler for fcp_pkt timeouts |
1200 | * | 1259 | * @data: The FCP packet that has timed out |
1201 | * The timer has been inactivated and must be reactivated if desired | ||
1202 | * using fc_fcp_timer_set(). | ||
1203 | * | ||
1204 | * Algorithm: | ||
1205 | * | ||
1206 | * If REC is supported, just issue it, and return. The REC exchange will | ||
1207 | * complete or time out, and recovery can continue at that point. | ||
1208 | * | ||
1209 | * Otherwise, if the response has been received without all the data, | ||
1210 | * it has been ER_TIMEOUT since the response was received. | ||
1211 | * | 1260 | * |
1212 | * If the response has not been received, | 1261 | * If REC is supported then just issue it and return. The REC exchange will |
1213 | * we see if data was received recently. If it has been, we continue waiting, | 1262 | * complete or time out and recovery can continue at that point. Otherwise, |
1214 | * otherwise, we abort the command. | 1263 | * if the response has been received without all the data it has been |
1264 | * ER_TIMEOUT since the response was received. If the response has not been | ||
1265 | * received we see if data was received recently. If it has been then we | ||
1266 | * continue waiting, otherwise, we abort the command. | ||
1215 | */ | 1267 | */ |
1216 | static void fc_fcp_timeout(unsigned long data) | 1268 | static void fc_fcp_timeout(unsigned long data) |
1217 | { | 1269 | { |
1218 | struct fc_fcp_pkt *fsp = (struct fc_fcp_pkt *)data; | 1270 | struct fc_fcp_pkt *fsp = (struct fc_fcp_pkt *)data; |
1219 | struct fc_rport *rport = fsp->rport; | 1271 | struct fc_rport *rport = fsp->rport; |
1220 | struct fc_rport_libfc_priv *rp = rport->dd_data; | 1272 | struct fc_rport_libfc_priv *rpriv = rport->dd_data; |
1221 | 1273 | ||
1222 | if (fc_fcp_lock_pkt(fsp)) | 1274 | if (fc_fcp_lock_pkt(fsp)) |
1223 | return; | 1275 | return; |
@@ -1227,7 +1279,7 @@ static void fc_fcp_timeout(unsigned long data) | |||
1227 | 1279 | ||
1228 | fsp->state |= FC_SRB_FCP_PROCESSING_TMO; | 1280 | fsp->state |= FC_SRB_FCP_PROCESSING_TMO; |
1229 | 1281 | ||
1230 | if (rp->flags & FC_RP_FLAGS_REC_SUPPORTED) | 1282 | if (rpriv->flags & FC_RP_FLAGS_REC_SUPPORTED) |
1231 | fc_fcp_rec(fsp); | 1283 | fc_fcp_rec(fsp); |
1232 | else if (time_after_eq(fsp->last_pkt_time + (FC_SCSI_ER_TIMEOUT / 2), | 1284 | else if (time_after_eq(fsp->last_pkt_time + (FC_SCSI_ER_TIMEOUT / 2), |
1233 | jiffies)) | 1285 | jiffies)) |
@@ -1241,35 +1293,37 @@ unlock: | |||
1241 | fc_fcp_unlock_pkt(fsp); | 1293 | fc_fcp_unlock_pkt(fsp); |
1242 | } | 1294 | } |
1243 | 1295 | ||
1244 | /* | 1296 | /** |
1245 | * Send a REC ELS request | 1297 | * fc_fcp_rec() - Send a REC ELS request |
1298 | * @fsp: The FCP packet to send the REC request on | ||
1246 | */ | 1299 | */ |
1247 | static void fc_fcp_rec(struct fc_fcp_pkt *fsp) | 1300 | static void fc_fcp_rec(struct fc_fcp_pkt *fsp) |
1248 | { | 1301 | { |
1249 | struct fc_lport *lp; | 1302 | struct fc_lport *lport; |
1250 | struct fc_frame *fp; | 1303 | struct fc_frame *fp; |
1251 | struct fc_rport *rport; | 1304 | struct fc_rport *rport; |
1252 | struct fc_rport_libfc_priv *rp; | 1305 | struct fc_rport_libfc_priv *rpriv; |
1253 | 1306 | ||
1254 | lp = fsp->lp; | 1307 | lport = fsp->lp; |
1255 | rport = fsp->rport; | 1308 | rport = fsp->rport; |
1256 | rp = rport->dd_data; | 1309 | rpriv = rport->dd_data; |
1257 | if (!fsp->seq_ptr || rp->rp_state != RPORT_ST_READY) { | 1310 | if (!fsp->seq_ptr || rpriv->rp_state != RPORT_ST_READY) { |
1258 | fsp->status_code = FC_HRD_ERROR; | 1311 | fsp->status_code = FC_HRD_ERROR; |
1259 | fsp->io_status = 0; | 1312 | fsp->io_status = 0; |
1260 | fc_fcp_complete_locked(fsp); | 1313 | fc_fcp_complete_locked(fsp); |
1261 | return; | 1314 | return; |
1262 | } | 1315 | } |
1263 | fp = fc_frame_alloc(lp, sizeof(struct fc_els_rec)); | 1316 | fp = fc_frame_alloc(lport, sizeof(struct fc_els_rec)); |
1264 | if (!fp) | 1317 | if (!fp) |
1265 | goto retry; | 1318 | goto retry; |
1266 | 1319 | ||
1267 | fr_seq(fp) = fsp->seq_ptr; | 1320 | fr_seq(fp) = fsp->seq_ptr; |
1268 | fc_fill_fc_hdr(fp, FC_RCTL_ELS_REQ, rport->port_id, | 1321 | fc_fill_fc_hdr(fp, FC_RCTL_ELS_REQ, rport->port_id, |
1269 | fc_host_port_id(rp->local_port->host), FC_TYPE_ELS, | 1322 | fc_host_port_id(rpriv->local_port->host), FC_TYPE_ELS, |
1270 | FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0); | 1323 | FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0); |
1271 | if (lp->tt.elsct_send(lp, rport->port_id, fp, ELS_REC, fc_fcp_rec_resp, | 1324 | if (lport->tt.elsct_send(lport, rport->port_id, fp, ELS_REC, |
1272 | fsp, jiffies_to_msecs(FC_SCSI_REC_TOV))) { | 1325 | fc_fcp_rec_resp, fsp, |
1326 | jiffies_to_msecs(FC_SCSI_REC_TOV))) { | ||
1273 | fc_fcp_pkt_hold(fsp); /* hold while REC outstanding */ | 1327 | fc_fcp_pkt_hold(fsp); /* hold while REC outstanding */ |
1274 | return; | 1328 | return; |
1275 | } | 1329 | } |
@@ -1280,12 +1334,16 @@ retry: | |||
1280 | fc_timeout_error(fsp); | 1334 | fc_timeout_error(fsp); |
1281 | } | 1335 | } |
1282 | 1336 | ||
1283 | /* | 1337 | /** |
1284 | * Receive handler for REC ELS frame | 1338 | * fc_fcp_rec_resp() - Handler for REC ELS responses |
1285 | * if it is a reject then let the scsi layer to handle | 1339 | * @seq: The sequence the response is on |
1286 | * the timeout. if it is a LS_ACC then if the io was not completed | 1340 | * @fp: The response frame |
1287 | * then set the timeout and return otherwise complete the exchange | 1341 | * @arg: The FCP packet the response is on |
1288 | * and tell the scsi layer to restart the I/O. | 1342 | * |
1343 | * If the response is a reject then the scsi layer will handle | ||
1344 | * the timeout. If the response is a LS_ACC then if the I/O was not completed | ||
1345 | * set the timeout and return. If the I/O was completed then complete the | ||
1346 | * exchange and tell the SCSI layer. | ||
1289 | */ | 1347 | */ |
1290 | static void fc_fcp_rec_resp(struct fc_seq *seq, struct fc_frame *fp, void *arg) | 1348 | static void fc_fcp_rec_resp(struct fc_seq *seq, struct fc_frame *fp, void *arg) |
1291 | { | 1349 | { |
@@ -1297,7 +1355,7 @@ static void fc_fcp_rec_resp(struct fc_seq *seq, struct fc_frame *fp, void *arg) | |||
1297 | u32 offset; | 1355 | u32 offset; |
1298 | enum dma_data_direction data_dir; | 1356 | enum dma_data_direction data_dir; |
1299 | enum fc_rctl r_ctl; | 1357 | enum fc_rctl r_ctl; |
1300 | struct fc_rport_libfc_priv *rp; | 1358 | struct fc_rport_libfc_priv *rpriv; |
1301 | 1359 | ||
1302 | if (IS_ERR(fp)) { | 1360 | if (IS_ERR(fp)) { |
1303 | fc_fcp_rec_error(fsp, fp); | 1361 | fc_fcp_rec_error(fsp, fp); |
@@ -1320,13 +1378,13 @@ static void fc_fcp_rec_resp(struct fc_seq *seq, struct fc_frame *fp, void *arg) | |||
1320 | /* fall through */ | 1378 | /* fall through */ |
1321 | case ELS_RJT_UNSUP: | 1379 | case ELS_RJT_UNSUP: |
1322 | FC_FCP_DBG(fsp, "device does not support REC\n"); | 1380 | FC_FCP_DBG(fsp, "device does not support REC\n"); |
1323 | rp = fsp->rport->dd_data; | 1381 | rpriv = fsp->rport->dd_data; |
1324 | /* | 1382 | /* |
1325 | * if we do not spport RECs or got some bogus | 1383 | * if we do not spport RECs or got some bogus |
1326 | * reason then resetup timer so we check for | 1384 | * reason then resetup timer so we check for |
1327 | * making progress. | 1385 | * making progress. |
1328 | */ | 1386 | */ |
1329 | rp->flags &= ~FC_RP_FLAGS_REC_SUPPORTED; | 1387 | rpriv->flags &= ~FC_RP_FLAGS_REC_SUPPORTED; |
1330 | fc_fcp_timer_set(fsp, FC_SCSI_ER_TIMEOUT); | 1388 | fc_fcp_timer_set(fsp, FC_SCSI_ER_TIMEOUT); |
1331 | break; | 1389 | break; |
1332 | case ELS_RJT_LOGIC: | 1390 | case ELS_RJT_LOGIC: |
@@ -1423,8 +1481,10 @@ out: | |||
1423 | fc_frame_free(fp); | 1481 | fc_frame_free(fp); |
1424 | } | 1482 | } |
1425 | 1483 | ||
1426 | /* | 1484 | /** |
1427 | * Handle error response or timeout for REC exchange. | 1485 | * fc_fcp_rec_error() - Handler for REC errors |
1486 | * @fsp: The FCP packet the error is on | ||
1487 | * @fp: The REC frame | ||
1428 | */ | 1488 | */ |
1429 | static void fc_fcp_rec_error(struct fc_fcp_pkt *fsp, struct fc_frame *fp) | 1489 | static void fc_fcp_rec_error(struct fc_fcp_pkt *fsp, struct fc_frame *fp) |
1430 | { | 1490 | { |
@@ -1463,10 +1523,9 @@ out: | |||
1463 | fc_fcp_pkt_release(fsp); /* drop hold for outstanding REC */ | 1523 | fc_fcp_pkt_release(fsp); /* drop hold for outstanding REC */ |
1464 | } | 1524 | } |
1465 | 1525 | ||
1466 | /* | 1526 | /** |
1467 | * Time out error routine: | 1527 | * fc_timeout_error() - Handler for fcp_pkt timeouts |
1468 | * abort's the I/O close the exchange and | 1528 | * @fsp: The FCP packt that has timed out |
1469 | * send completion notification to scsi layer | ||
1470 | */ | 1529 | */ |
1471 | static void fc_timeout_error(struct fc_fcp_pkt *fsp) | 1530 | static void fc_timeout_error(struct fc_fcp_pkt *fsp) |
1472 | { | 1531 | { |
@@ -1480,16 +1539,18 @@ static void fc_timeout_error(struct fc_fcp_pkt *fsp) | |||
1480 | fc_fcp_send_abort(fsp); | 1539 | fc_fcp_send_abort(fsp); |
1481 | } | 1540 | } |
1482 | 1541 | ||
1483 | /* | 1542 | /** |
1484 | * Sequence retransmission request. | 1543 | * fc_fcp_srr() - Send a SRR request (Sequence Retransmission Request) |
1544 | * @fsp: The FCP packet the SRR is to be sent on | ||
1545 | * @r_ctl: The R_CTL field for the SRR request | ||
1485 | * This is called after receiving status but insufficient data, or | 1546 | * This is called after receiving status but insufficient data, or |
1486 | * when expecting status but the request has timed out. | 1547 | * when expecting status but the request has timed out. |
1487 | */ | 1548 | */ |
1488 | static void fc_fcp_srr(struct fc_fcp_pkt *fsp, enum fc_rctl r_ctl, u32 offset) | 1549 | static void fc_fcp_srr(struct fc_fcp_pkt *fsp, enum fc_rctl r_ctl, u32 offset) |
1489 | { | 1550 | { |
1490 | struct fc_lport *lp = fsp->lp; | 1551 | struct fc_lport *lport = fsp->lp; |
1491 | struct fc_rport *rport; | 1552 | struct fc_rport *rport; |
1492 | struct fc_rport_libfc_priv *rp; | 1553 | struct fc_rport_libfc_priv *rpriv; |
1493 | struct fc_exch *ep = fc_seq_exch(fsp->seq_ptr); | 1554 | struct fc_exch *ep = fc_seq_exch(fsp->seq_ptr); |
1494 | struct fc_seq *seq; | 1555 | struct fc_seq *seq; |
1495 | struct fcp_srr *srr; | 1556 | struct fcp_srr *srr; |
@@ -1497,12 +1558,13 @@ static void fc_fcp_srr(struct fc_fcp_pkt *fsp, enum fc_rctl r_ctl, u32 offset) | |||
1497 | u8 cdb_op; | 1558 | u8 cdb_op; |
1498 | 1559 | ||
1499 | rport = fsp->rport; | 1560 | rport = fsp->rport; |
1500 | rp = rport->dd_data; | 1561 | rpriv = rport->dd_data; |
1501 | cdb_op = fsp->cdb_cmd.fc_cdb[0]; | 1562 | cdb_op = fsp->cdb_cmd.fc_cdb[0]; |
1502 | 1563 | ||
1503 | if (!(rp->flags & FC_RP_FLAGS_RETRY) || rp->rp_state != RPORT_ST_READY) | 1564 | if (!(rpriv->flags & FC_RP_FLAGS_RETRY) || |
1565 | rpriv->rp_state != RPORT_ST_READY) | ||
1504 | goto retry; /* shouldn't happen */ | 1566 | goto retry; /* shouldn't happen */ |
1505 | fp = fc_frame_alloc(lp, sizeof(*srr)); | 1567 | fp = fc_frame_alloc(lport, sizeof(*srr)); |
1506 | if (!fp) | 1568 | if (!fp) |
1507 | goto retry; | 1569 | goto retry; |
1508 | 1570 | ||
@@ -1515,11 +1577,11 @@ static void fc_fcp_srr(struct fc_fcp_pkt *fsp, enum fc_rctl r_ctl, u32 offset) | |||
1515 | srr->srr_rel_off = htonl(offset); | 1577 | srr->srr_rel_off = htonl(offset); |
1516 | 1578 | ||
1517 | fc_fill_fc_hdr(fp, FC_RCTL_ELS4_REQ, rport->port_id, | 1579 | fc_fill_fc_hdr(fp, FC_RCTL_ELS4_REQ, rport->port_id, |
1518 | fc_host_port_id(rp->local_port->host), FC_TYPE_FCP, | 1580 | fc_host_port_id(rpriv->local_port->host), FC_TYPE_FCP, |
1519 | FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0); | 1581 | FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0); |
1520 | 1582 | ||
1521 | seq = lp->tt.exch_seq_send(lp, fp, fc_fcp_srr_resp, NULL, | 1583 | seq = lport->tt.exch_seq_send(lport, fp, fc_fcp_srr_resp, NULL, |
1522 | fsp, jiffies_to_msecs(FC_SCSI_REC_TOV)); | 1584 | fsp, jiffies_to_msecs(FC_SCSI_REC_TOV)); |
1523 | if (!seq) | 1585 | if (!seq) |
1524 | goto retry; | 1586 | goto retry; |
1525 | 1587 | ||
@@ -1533,8 +1595,11 @@ retry: | |||
1533 | fc_fcp_retry_cmd(fsp); | 1595 | fc_fcp_retry_cmd(fsp); |
1534 | } | 1596 | } |
1535 | 1597 | ||
1536 | /* | 1598 | /** |
1537 | * Handle response from SRR. | 1599 | * fc_fcp_srr_resp() - Handler for SRR response |
1600 | * @seq: The sequence the SRR is on | ||
1601 | * @fp: The SRR frame | ||
1602 | * @arg: The FCP packet the SRR is on | ||
1538 | */ | 1603 | */ |
1539 | static void fc_fcp_srr_resp(struct fc_seq *seq, struct fc_frame *fp, void *arg) | 1604 | static void fc_fcp_srr_resp(struct fc_seq *seq, struct fc_frame *fp, void *arg) |
1540 | { | 1605 | { |
@@ -1580,6 +1645,11 @@ out: | |||
1580 | fc_fcp_pkt_release(fsp); /* drop hold for outstanding SRR */ | 1645 | fc_fcp_pkt_release(fsp); /* drop hold for outstanding SRR */ |
1581 | } | 1646 | } |
1582 | 1647 | ||
1648 | /** | ||
1649 | * fc_fcp_srr_error() - Handler for SRR errors | ||
1650 | * @fsp: The FCP packet that the SRR error is on | ||
1651 | * @fp: The SRR frame | ||
1652 | */ | ||
1583 | static void fc_fcp_srr_error(struct fc_fcp_pkt *fsp, struct fc_frame *fp) | 1653 | static void fc_fcp_srr_error(struct fc_fcp_pkt *fsp, struct fc_frame *fp) |
1584 | { | 1654 | { |
1585 | if (fc_fcp_lock_pkt(fsp)) | 1655 | if (fc_fcp_lock_pkt(fsp)) |
@@ -1604,31 +1674,36 @@ out: | |||
1604 | fc_fcp_pkt_release(fsp); /* drop hold for outstanding SRR */ | 1674 | fc_fcp_pkt_release(fsp); /* drop hold for outstanding SRR */ |
1605 | } | 1675 | } |
1606 | 1676 | ||
1607 | static inline int fc_fcp_lport_queue_ready(struct fc_lport *lp) | 1677 | /** |
1678 | * fc_fcp_lport_queue_ready() - Determine if the lport and it's queue is ready | ||
1679 | * @lport: The local port to be checked | ||
1680 | */ | ||
1681 | static inline int fc_fcp_lport_queue_ready(struct fc_lport *lport) | ||
1608 | { | 1682 | { |
1609 | /* lock ? */ | 1683 | /* lock ? */ |
1610 | return (lp->state == LPORT_ST_READY) && lp->link_up && !lp->qfull; | 1684 | return (lport->state == LPORT_ST_READY) && |
1685 | lport->link_up && !lport->qfull; | ||
1611 | } | 1686 | } |
1612 | 1687 | ||
1613 | /** | 1688 | /** |
1614 | * fc_queuecommand - The queuecommand function of the scsi template | 1689 | * fc_queuecommand() - The queuecommand function of the SCSI template |
1615 | * @cmd: struct scsi_cmnd to be executed | 1690 | * @cmd: The scsi_cmnd to be executed |
1616 | * @done: Callback function to be called when cmd is completed | 1691 | * @done: The callback function to be called when the scsi_cmnd is complete |
1617 | * | 1692 | * |
1618 | * this is the i/o strategy routine, called by the scsi layer | 1693 | * This is the i/o strategy routine, called by the SCSI layer. This routine |
1619 | * this routine is called with holding the host_lock. | 1694 | * is called with the host_lock held. |
1620 | */ | 1695 | */ |
1621 | int fc_queuecommand(struct scsi_cmnd *sc_cmd, void (*done)(struct scsi_cmnd *)) | 1696 | int fc_queuecommand(struct scsi_cmnd *sc_cmd, void (*done)(struct scsi_cmnd *)) |
1622 | { | 1697 | { |
1623 | struct fc_lport *lp; | 1698 | struct fc_lport *lport; |
1624 | struct fc_rport *rport = starget_to_rport(scsi_target(sc_cmd->device)); | 1699 | struct fc_rport *rport = starget_to_rport(scsi_target(sc_cmd->device)); |
1625 | struct fc_fcp_pkt *fsp; | 1700 | struct fc_fcp_pkt *fsp; |
1626 | struct fc_rport_libfc_priv *rp; | 1701 | struct fc_rport_libfc_priv *rpriv; |
1627 | int rval; | 1702 | int rval; |
1628 | int rc = 0; | 1703 | int rc = 0; |
1629 | struct fcoe_dev_stats *stats; | 1704 | struct fcoe_dev_stats *stats; |
1630 | 1705 | ||
1631 | lp = shost_priv(sc_cmd->device->host); | 1706 | lport = shost_priv(sc_cmd->device->host); |
1632 | 1707 | ||
1633 | rval = fc_remote_port_chkready(rport); | 1708 | rval = fc_remote_port_chkready(rport); |
1634 | if (rval) { | 1709 | if (rval) { |
@@ -1647,14 +1722,14 @@ int fc_queuecommand(struct scsi_cmnd *sc_cmd, void (*done)(struct scsi_cmnd *)) | |||
1647 | goto out; | 1722 | goto out; |
1648 | } | 1723 | } |
1649 | 1724 | ||
1650 | rp = rport->dd_data; | 1725 | rpriv = rport->dd_data; |
1651 | 1726 | ||
1652 | if (!fc_fcp_lport_queue_ready(lp)) { | 1727 | if (!fc_fcp_lport_queue_ready(lport)) { |
1653 | rc = SCSI_MLQUEUE_HOST_BUSY; | 1728 | rc = SCSI_MLQUEUE_HOST_BUSY; |
1654 | goto out; | 1729 | goto out; |
1655 | } | 1730 | } |
1656 | 1731 | ||
1657 | fsp = fc_fcp_pkt_alloc(lp, GFP_ATOMIC); | 1732 | fsp = fc_fcp_pkt_alloc(lport, GFP_ATOMIC); |
1658 | if (fsp == NULL) { | 1733 | if (fsp == NULL) { |
1659 | rc = SCSI_MLQUEUE_HOST_BUSY; | 1734 | rc = SCSI_MLQUEUE_HOST_BUSY; |
1660 | goto out; | 1735 | goto out; |
@@ -1664,7 +1739,7 @@ int fc_queuecommand(struct scsi_cmnd *sc_cmd, void (*done)(struct scsi_cmnd *)) | |||
1664 | * build the libfc request pkt | 1739 | * build the libfc request pkt |
1665 | */ | 1740 | */ |
1666 | fsp->cmd = sc_cmd; /* save the cmd */ | 1741 | fsp->cmd = sc_cmd; /* save the cmd */ |
1667 | fsp->lp = lp; /* save the softc ptr */ | 1742 | fsp->lp = lport; /* save the softc ptr */ |
1668 | fsp->rport = rport; /* set the remote port ptr */ | 1743 | fsp->rport = rport; /* set the remote port ptr */ |
1669 | fsp->xfer_ddp = FC_XID_UNKNOWN; | 1744 | fsp->xfer_ddp = FC_XID_UNKNOWN; |
1670 | sc_cmd->scsi_done = done; | 1745 | sc_cmd->scsi_done = done; |
@@ -1678,7 +1753,7 @@ int fc_queuecommand(struct scsi_cmnd *sc_cmd, void (*done)(struct scsi_cmnd *)) | |||
1678 | /* | 1753 | /* |
1679 | * setup the data direction | 1754 | * setup the data direction |
1680 | */ | 1755 | */ |
1681 | stats = fc_lport_get_stats(lp); | 1756 | stats = fc_lport_get_stats(lport); |
1682 | if (sc_cmd->sc_data_direction == DMA_FROM_DEVICE) { | 1757 | if (sc_cmd->sc_data_direction == DMA_FROM_DEVICE) { |
1683 | fsp->req_flags = FC_SRB_READ; | 1758 | fsp->req_flags = FC_SRB_READ; |
1684 | stats->InputRequests++; | 1759 | stats->InputRequests++; |
@@ -1692,7 +1767,7 @@ int fc_queuecommand(struct scsi_cmnd *sc_cmd, void (*done)(struct scsi_cmnd *)) | |||
1692 | stats->ControlRequests++; | 1767 | stats->ControlRequests++; |
1693 | } | 1768 | } |
1694 | 1769 | ||
1695 | fsp->tgt_flags = rp->flags; | 1770 | fsp->tgt_flags = rpriv->flags; |
1696 | 1771 | ||
1697 | init_timer(&fsp->timer); | 1772 | init_timer(&fsp->timer); |
1698 | fsp->timer.data = (unsigned long)fsp; | 1773 | fsp->timer.data = (unsigned long)fsp; |
@@ -1702,7 +1777,7 @@ int fc_queuecommand(struct scsi_cmnd *sc_cmd, void (*done)(struct scsi_cmnd *)) | |||
1702 | * if we get -1 return then put the request in the pending | 1777 | * if we get -1 return then put the request in the pending |
1703 | * queue. | 1778 | * queue. |
1704 | */ | 1779 | */ |
1705 | rval = fc_fcp_pkt_send(lp, fsp); | 1780 | rval = fc_fcp_pkt_send(lport, fsp); |
1706 | if (rval != 0) { | 1781 | if (rval != 0) { |
1707 | fsp->state = FC_SRB_FREE; | 1782 | fsp->state = FC_SRB_FREE; |
1708 | fc_fcp_pkt_release(fsp); | 1783 | fc_fcp_pkt_release(fsp); |
@@ -1714,18 +1789,17 @@ out: | |||
1714 | EXPORT_SYMBOL(fc_queuecommand); | 1789 | EXPORT_SYMBOL(fc_queuecommand); |
1715 | 1790 | ||
1716 | /** | 1791 | /** |
1717 | * fc_io_compl() - Handle responses for completed commands | 1792 | * fc_io_compl() - Handle responses for completed commands |
1718 | * @fsp: scsi packet | 1793 | * @fsp: The FCP packet that is complete |
1719 | * | ||
1720 | * Translates a error to a Linux SCSI error. | ||
1721 | * | 1794 | * |
1795 | * Translates fcp_pkt errors to a Linux SCSI errors. | ||
1722 | * The fcp packet lock must be held when calling. | 1796 | * The fcp packet lock must be held when calling. |
1723 | */ | 1797 | */ |
1724 | static void fc_io_compl(struct fc_fcp_pkt *fsp) | 1798 | static void fc_io_compl(struct fc_fcp_pkt *fsp) |
1725 | { | 1799 | { |
1726 | struct fc_fcp_internal *si; | 1800 | struct fc_fcp_internal *si; |
1727 | struct scsi_cmnd *sc_cmd; | 1801 | struct scsi_cmnd *sc_cmd; |
1728 | struct fc_lport *lp; | 1802 | struct fc_lport *lport; |
1729 | unsigned long flags; | 1803 | unsigned long flags; |
1730 | 1804 | ||
1731 | /* release outstanding ddp context */ | 1805 | /* release outstanding ddp context */ |
@@ -1738,11 +1812,11 @@ static void fc_io_compl(struct fc_fcp_pkt *fsp) | |||
1738 | spin_lock_bh(&fsp->scsi_pkt_lock); | 1812 | spin_lock_bh(&fsp->scsi_pkt_lock); |
1739 | } | 1813 | } |
1740 | 1814 | ||
1741 | lp = fsp->lp; | 1815 | lport = fsp->lp; |
1742 | si = fc_get_scsi_internal(lp); | 1816 | si = fc_get_scsi_internal(lport); |
1743 | spin_lock_irqsave(lp->host->host_lock, flags); | 1817 | spin_lock_irqsave(lport->host->host_lock, flags); |
1744 | if (!fsp->cmd) { | 1818 | if (!fsp->cmd) { |
1745 | spin_unlock_irqrestore(lp->host->host_lock, flags); | 1819 | spin_unlock_irqrestore(lport->host->host_lock, flags); |
1746 | return; | 1820 | return; |
1747 | } | 1821 | } |
1748 | 1822 | ||
@@ -1759,7 +1833,7 @@ static void fc_io_compl(struct fc_fcp_pkt *fsp) | |||
1759 | fsp->cmd = NULL; | 1833 | fsp->cmd = NULL; |
1760 | 1834 | ||
1761 | if (!sc_cmd->SCp.ptr) { | 1835 | if (!sc_cmd->SCp.ptr) { |
1762 | spin_unlock_irqrestore(lp->host->host_lock, flags); | 1836 | spin_unlock_irqrestore(lport->host->host_lock, flags); |
1763 | return; | 1837 | return; |
1764 | } | 1838 | } |
1765 | 1839 | ||
@@ -1826,7 +1900,7 @@ static void fc_io_compl(struct fc_fcp_pkt *fsp) | |||
1826 | list_del(&fsp->list); | 1900 | list_del(&fsp->list); |
1827 | sc_cmd->SCp.ptr = NULL; | 1901 | sc_cmd->SCp.ptr = NULL; |
1828 | sc_cmd->scsi_done(sc_cmd); | 1902 | sc_cmd->scsi_done(sc_cmd); |
1829 | spin_unlock_irqrestore(lp->host->host_lock, flags); | 1903 | spin_unlock_irqrestore(lport->host->host_lock, flags); |
1830 | 1904 | ||
1831 | /* release ref from initial allocation in queue command */ | 1905 | /* release ref from initial allocation in queue command */ |
1832 | fc_fcp_pkt_release(fsp); | 1906 | fc_fcp_pkt_release(fsp); |
@@ -1834,35 +1908,34 @@ static void fc_io_compl(struct fc_fcp_pkt *fsp) | |||
1834 | 1908 | ||
1835 | /** | 1909 | /** |
1836 | * fc_eh_abort() - Abort a command | 1910 | * fc_eh_abort() - Abort a command |
1837 | * @sc_cmd: scsi command to abort | 1911 | * @sc_cmd: The SCSI command to abort |
1838 | * | 1912 | * |
1839 | * From scsi host template. | 1913 | * From SCSI host template. |
1840 | * send ABTS to the target device and wait for the response | 1914 | * Send an ABTS to the target device and wait for the response. |
1841 | * sc_cmd is the pointer to the command to be aborted. | ||
1842 | */ | 1915 | */ |
1843 | int fc_eh_abort(struct scsi_cmnd *sc_cmd) | 1916 | int fc_eh_abort(struct scsi_cmnd *sc_cmd) |
1844 | { | 1917 | { |
1845 | struct fc_fcp_pkt *fsp; | 1918 | struct fc_fcp_pkt *fsp; |
1846 | struct fc_lport *lp; | 1919 | struct fc_lport *lport; |
1847 | int rc = FAILED; | 1920 | int rc = FAILED; |
1848 | unsigned long flags; | 1921 | unsigned long flags; |
1849 | 1922 | ||
1850 | lp = shost_priv(sc_cmd->device->host); | 1923 | lport = shost_priv(sc_cmd->device->host); |
1851 | if (lp->state != LPORT_ST_READY) | 1924 | if (lport->state != LPORT_ST_READY) |
1852 | return rc; | 1925 | return rc; |
1853 | else if (!lp->link_up) | 1926 | else if (!lport->link_up) |
1854 | return rc; | 1927 | return rc; |
1855 | 1928 | ||
1856 | spin_lock_irqsave(lp->host->host_lock, flags); | 1929 | spin_lock_irqsave(lport->host->host_lock, flags); |
1857 | fsp = CMD_SP(sc_cmd); | 1930 | fsp = CMD_SP(sc_cmd); |
1858 | if (!fsp) { | 1931 | if (!fsp) { |
1859 | /* command completed while scsi eh was setting up */ | 1932 | /* command completed while scsi eh was setting up */ |
1860 | spin_unlock_irqrestore(lp->host->host_lock, flags); | 1933 | spin_unlock_irqrestore(lport->host->host_lock, flags); |
1861 | return SUCCESS; | 1934 | return SUCCESS; |
1862 | } | 1935 | } |
1863 | /* grab a ref so the fsp and sc_cmd cannot be relased from under us */ | 1936 | /* grab a ref so the fsp and sc_cmd cannot be relased from under us */ |
1864 | fc_fcp_pkt_hold(fsp); | 1937 | fc_fcp_pkt_hold(fsp); |
1865 | spin_unlock_irqrestore(lp->host->host_lock, flags); | 1938 | spin_unlock_irqrestore(lport->host->host_lock, flags); |
1866 | 1939 | ||
1867 | if (fc_fcp_lock_pkt(fsp)) { | 1940 | if (fc_fcp_lock_pkt(fsp)) { |
1868 | /* completed while we were waiting for timer to be deleted */ | 1941 | /* completed while we were waiting for timer to be deleted */ |
@@ -1880,34 +1953,32 @@ release_pkt: | |||
1880 | EXPORT_SYMBOL(fc_eh_abort); | 1953 | EXPORT_SYMBOL(fc_eh_abort); |
1881 | 1954 | ||
1882 | /** | 1955 | /** |
1883 | * fc_eh_device_reset() Reset a single LUN | 1956 | * fc_eh_device_reset() - Reset a single LUN |
1884 | * @sc_cmd: scsi command | 1957 | * @sc_cmd: The SCSI command which identifies the device whose |
1958 | * LUN is to be reset | ||
1885 | * | 1959 | * |
1886 | * Set from scsi host template to send tm cmd to the target and wait for the | 1960 | * Set from SCSI host template. |
1887 | * response. | ||
1888 | */ | 1961 | */ |
1889 | int fc_eh_device_reset(struct scsi_cmnd *sc_cmd) | 1962 | int fc_eh_device_reset(struct scsi_cmnd *sc_cmd) |
1890 | { | 1963 | { |
1891 | struct fc_lport *lp; | 1964 | struct fc_lport *lport; |
1892 | struct fc_fcp_pkt *fsp; | 1965 | struct fc_fcp_pkt *fsp; |
1893 | struct fc_rport *rport = starget_to_rport(scsi_target(sc_cmd->device)); | 1966 | struct fc_rport *rport = starget_to_rport(scsi_target(sc_cmd->device)); |
1894 | int rc = FAILED; | 1967 | int rc = FAILED; |
1895 | struct fc_rport_libfc_priv *rp; | ||
1896 | int rval; | 1968 | int rval; |
1897 | 1969 | ||
1898 | rval = fc_remote_port_chkready(rport); | 1970 | rval = fc_remote_port_chkready(rport); |
1899 | if (rval) | 1971 | if (rval) |
1900 | goto out; | 1972 | goto out; |
1901 | 1973 | ||
1902 | rp = rport->dd_data; | 1974 | lport = shost_priv(sc_cmd->device->host); |
1903 | lp = shost_priv(sc_cmd->device->host); | ||
1904 | 1975 | ||
1905 | if (lp->state != LPORT_ST_READY) | 1976 | if (lport->state != LPORT_ST_READY) |
1906 | return rc; | 1977 | return rc; |
1907 | 1978 | ||
1908 | FC_SCSI_DBG(lp, "Resetting rport (%6x)\n", rport->port_id); | 1979 | FC_SCSI_DBG(lport, "Resetting rport (%6x)\n", rport->port_id); |
1909 | 1980 | ||
1910 | fsp = fc_fcp_pkt_alloc(lp, GFP_NOIO); | 1981 | fsp = fc_fcp_pkt_alloc(lport, GFP_NOIO); |
1911 | if (fsp == NULL) { | 1982 | if (fsp == NULL) { |
1912 | printk(KERN_WARNING "libfc: could not allocate scsi_pkt\n"); | 1983 | printk(KERN_WARNING "libfc: could not allocate scsi_pkt\n"); |
1913 | sc_cmd->result = DID_NO_CONNECT << 16; | 1984 | sc_cmd->result = DID_NO_CONNECT << 16; |
@@ -1919,13 +1990,13 @@ int fc_eh_device_reset(struct scsi_cmnd *sc_cmd) | |||
1919 | * the sc passed in is not setup for execution like when sent | 1990 | * the sc passed in is not setup for execution like when sent |
1920 | * through the queuecommand callout. | 1991 | * through the queuecommand callout. |
1921 | */ | 1992 | */ |
1922 | fsp->lp = lp; /* save the softc ptr */ | 1993 | fsp->lp = lport; /* save the softc ptr */ |
1923 | fsp->rport = rport; /* set the remote port ptr */ | 1994 | fsp->rport = rport; /* set the remote port ptr */ |
1924 | 1995 | ||
1925 | /* | 1996 | /* |
1926 | * flush outstanding commands | 1997 | * flush outstanding commands |
1927 | */ | 1998 | */ |
1928 | rc = fc_lun_reset(lp, fsp, scmd_id(sc_cmd), sc_cmd->device->lun); | 1999 | rc = fc_lun_reset(lport, fsp, scmd_id(sc_cmd), sc_cmd->device->lun); |
1929 | fsp->state = FC_SRB_FREE; | 2000 | fsp->state = FC_SRB_FREE; |
1930 | fc_fcp_pkt_release(fsp); | 2001 | fc_fcp_pkt_release(fsp); |
1931 | 2002 | ||
@@ -1935,38 +2006,39 @@ out: | |||
1935 | EXPORT_SYMBOL(fc_eh_device_reset); | 2006 | EXPORT_SYMBOL(fc_eh_device_reset); |
1936 | 2007 | ||
1937 | /** | 2008 | /** |
1938 | * fc_eh_host_reset() - The reset function will reset the ports on the host. | 2009 | * fc_eh_host_reset() - Reset a Scsi_Host. |
1939 | * @sc_cmd: scsi command | 2010 | * @sc_cmd: The SCSI command that identifies the SCSI host to be reset |
1940 | */ | 2011 | */ |
1941 | int fc_eh_host_reset(struct scsi_cmnd *sc_cmd) | 2012 | int fc_eh_host_reset(struct scsi_cmnd *sc_cmd) |
1942 | { | 2013 | { |
1943 | struct Scsi_Host *shost = sc_cmd->device->host; | 2014 | struct Scsi_Host *shost = sc_cmd->device->host; |
1944 | struct fc_lport *lp = shost_priv(shost); | 2015 | struct fc_lport *lport = shost_priv(shost); |
1945 | unsigned long wait_tmo; | 2016 | unsigned long wait_tmo; |
1946 | 2017 | ||
1947 | FC_SCSI_DBG(lp, "Resetting host\n"); | 2018 | FC_SCSI_DBG(lport, "Resetting host\n"); |
1948 | 2019 | ||
1949 | lp->tt.lport_reset(lp); | 2020 | lport->tt.lport_reset(lport); |
1950 | wait_tmo = jiffies + FC_HOST_RESET_TIMEOUT; | 2021 | wait_tmo = jiffies + FC_HOST_RESET_TIMEOUT; |
1951 | while (!fc_fcp_lport_queue_ready(lp) && time_before(jiffies, wait_tmo)) | 2022 | while (!fc_fcp_lport_queue_ready(lport) && time_before(jiffies, |
2023 | wait_tmo)) | ||
1952 | msleep(1000); | 2024 | msleep(1000); |
1953 | 2025 | ||
1954 | if (fc_fcp_lport_queue_ready(lp)) { | 2026 | if (fc_fcp_lport_queue_ready(lport)) { |
1955 | shost_printk(KERN_INFO, shost, "libfc: Host reset succeeded " | 2027 | shost_printk(KERN_INFO, shost, "libfc: Host reset succeeded " |
1956 | "on port (%6x)\n", fc_host_port_id(lp->host)); | 2028 | "on port (%6x)\n", fc_host_port_id(lport->host)); |
1957 | return SUCCESS; | 2029 | return SUCCESS; |
1958 | } else { | 2030 | } else { |
1959 | shost_printk(KERN_INFO, shost, "libfc: Host reset failed, " | 2031 | shost_printk(KERN_INFO, shost, "libfc: Host reset failed, " |
1960 | "port (%6x) is not ready.\n", | 2032 | "port (%6x) is not ready.\n", |
1961 | fc_host_port_id(lp->host)); | 2033 | fc_host_port_id(lport->host)); |
1962 | return FAILED; | 2034 | return FAILED; |
1963 | } | 2035 | } |
1964 | } | 2036 | } |
1965 | EXPORT_SYMBOL(fc_eh_host_reset); | 2037 | EXPORT_SYMBOL(fc_eh_host_reset); |
1966 | 2038 | ||
1967 | /** | 2039 | /** |
1968 | * fc_slave_alloc() - configure queue depth | 2040 | * fc_slave_alloc() - Configure the queue depth of a Scsi_Host |
1969 | * @sdev: scsi device | 2041 | * @sdev: The SCSI device that identifies the SCSI host |
1970 | * | 2042 | * |
1971 | * Configures queue depth based on host's cmd_per_len. If not set | 2043 | * Configures queue depth based on host's cmd_per_len. If not set |
1972 | * then we use the libfc default. | 2044 | * then we use the libfc default. |
@@ -1988,6 +2060,12 @@ int fc_slave_alloc(struct scsi_device *sdev) | |||
1988 | } | 2060 | } |
1989 | EXPORT_SYMBOL(fc_slave_alloc); | 2061 | EXPORT_SYMBOL(fc_slave_alloc); |
1990 | 2062 | ||
2063 | /** | ||
2064 | * fc_change_queue_depth() - Change a device's queue depth | ||
2065 | * @sdev: The SCSI device whose queue depth is to change | ||
2066 | * @qdepth: The new queue depth | ||
2067 | * @reason: The resason for the change | ||
2068 | */ | ||
1991 | int fc_change_queue_depth(struct scsi_device *sdev, int qdepth, int reason) | 2069 | int fc_change_queue_depth(struct scsi_device *sdev, int qdepth, int reason) |
1992 | { | 2070 | { |
1993 | switch (reason) { | 2071 | switch (reason) { |
@@ -2007,6 +2085,11 @@ int fc_change_queue_depth(struct scsi_device *sdev, int qdepth, int reason) | |||
2007 | } | 2085 | } |
2008 | EXPORT_SYMBOL(fc_change_queue_depth); | 2086 | EXPORT_SYMBOL(fc_change_queue_depth); |
2009 | 2087 | ||
2088 | /** | ||
2089 | * fc_change_queue_type() - Change a device's queue type | ||
2090 | * @sdev: The SCSI device whose queue depth is to change | ||
2091 | * @tag_type: Identifier for queue type | ||
2092 | */ | ||
2010 | int fc_change_queue_type(struct scsi_device *sdev, int tag_type) | 2093 | int fc_change_queue_type(struct scsi_device *sdev, int tag_type) |
2011 | { | 2094 | { |
2012 | if (sdev->tagged_supported) { | 2095 | if (sdev->tagged_supported) { |
@@ -2022,17 +2105,21 @@ int fc_change_queue_type(struct scsi_device *sdev, int tag_type) | |||
2022 | } | 2105 | } |
2023 | EXPORT_SYMBOL(fc_change_queue_type); | 2106 | EXPORT_SYMBOL(fc_change_queue_type); |
2024 | 2107 | ||
2025 | void fc_fcp_destroy(struct fc_lport *lp) | 2108 | /** |
2109 | * fc_fcp_destory() - Tear down the FCP layer for a given local port | ||
2110 | * @lport: The local port that no longer needs the FCP layer | ||
2111 | */ | ||
2112 | void fc_fcp_destroy(struct fc_lport *lport) | ||
2026 | { | 2113 | { |
2027 | struct fc_fcp_internal *si = fc_get_scsi_internal(lp); | 2114 | struct fc_fcp_internal *si = fc_get_scsi_internal(lport); |
2028 | 2115 | ||
2029 | if (!list_empty(&si->scsi_pkt_queue)) | 2116 | if (!list_empty(&si->scsi_pkt_queue)) |
2030 | printk(KERN_ERR "libfc: Leaked SCSI packets when destroying " | 2117 | printk(KERN_ERR "libfc: Leaked SCSI packets when destroying " |
2031 | "port (%6x)\n", fc_host_port_id(lp->host)); | 2118 | "port (%6x)\n", fc_host_port_id(lport->host)); |
2032 | 2119 | ||
2033 | mempool_destroy(si->scsi_pkt_pool); | 2120 | mempool_destroy(si->scsi_pkt_pool); |
2034 | kfree(si); | 2121 | kfree(si); |
2035 | lp->scsi_priv = NULL; | 2122 | lport->scsi_priv = NULL; |
2036 | } | 2123 | } |
2037 | EXPORT_SYMBOL(fc_fcp_destroy); | 2124 | EXPORT_SYMBOL(fc_fcp_destroy); |
2038 | 2125 | ||
@@ -2058,24 +2145,28 @@ void fc_destroy_fcp() | |||
2058 | kmem_cache_destroy(scsi_pkt_cachep); | 2145 | kmem_cache_destroy(scsi_pkt_cachep); |
2059 | } | 2146 | } |
2060 | 2147 | ||
2061 | int fc_fcp_init(struct fc_lport *lp) | 2148 | /** |
2149 | * fc_fcp_init() - Initialize the FCP layer for a local port | ||
2150 | * @lport: The local port to initialize the exchange layer for | ||
2151 | */ | ||
2152 | int fc_fcp_init(struct fc_lport *lport) | ||
2062 | { | 2153 | { |
2063 | int rc; | 2154 | int rc; |
2064 | struct fc_fcp_internal *si; | 2155 | struct fc_fcp_internal *si; |
2065 | 2156 | ||
2066 | if (!lp->tt.fcp_cmd_send) | 2157 | if (!lport->tt.fcp_cmd_send) |
2067 | lp->tt.fcp_cmd_send = fc_fcp_cmd_send; | 2158 | lport->tt.fcp_cmd_send = fc_fcp_cmd_send; |
2068 | 2159 | ||
2069 | if (!lp->tt.fcp_cleanup) | 2160 | if (!lport->tt.fcp_cleanup) |
2070 | lp->tt.fcp_cleanup = fc_fcp_cleanup; | 2161 | lport->tt.fcp_cleanup = fc_fcp_cleanup; |
2071 | 2162 | ||
2072 | if (!lp->tt.fcp_abort_io) | 2163 | if (!lport->tt.fcp_abort_io) |
2073 | lp->tt.fcp_abort_io = fc_fcp_abort_io; | 2164 | lport->tt.fcp_abort_io = fc_fcp_abort_io; |
2074 | 2165 | ||
2075 | si = kzalloc(sizeof(struct fc_fcp_internal), GFP_KERNEL); | 2166 | si = kzalloc(sizeof(struct fc_fcp_internal), GFP_KERNEL); |
2076 | if (!si) | 2167 | if (!si) |
2077 | return -ENOMEM; | 2168 | return -ENOMEM; |
2078 | lp->scsi_priv = si; | 2169 | lport->scsi_priv = si; |
2079 | INIT_LIST_HEAD(&si->scsi_pkt_queue); | 2170 | INIT_LIST_HEAD(&si->scsi_pkt_queue); |
2080 | 2171 | ||
2081 | si->scsi_pkt_pool = mempool_create_slab_pool(2, scsi_pkt_cachep); | 2172 | si->scsi_pkt_pool = mempool_create_slab_pool(2, scsi_pkt_cachep); |