aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@g5.osdl.org>2005-09-12 23:02:18 -0400
committerLinus Torvalds <torvalds@g5.osdl.org>2005-09-12 23:02:18 -0400
commit35d91f75c2c9548e606e813413f03c5cc35da969 (patch)
tree2a1a3f32320409303247a72ead35107c271abb17
parent9401c705f2a6a7e5df102f6443dba395c3c5e5a8 (diff)
parent70c83e110541e894fa16aee0f57bcf4207b33e7b (diff)
Merge master.kernel.org:/pub/scm/linux/kernel/git/jejb/scsi-for-linus-2.6
-rw-r--r--Documentation/scsi/00-INDEX2
-rw-r--r--Documentation/scsi/scsi_eh.txt479
-rw-r--r--drivers/block/scsi_ioctl.c1
-rw-r--r--drivers/ieee1394/sbp2.c8
-rw-r--r--drivers/scsi/3w-9xxx.c30
-rw-r--r--drivers/scsi/Kconfig7
-rw-r--r--drivers/scsi/Makefile1
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c22
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c7
-rw-r--r--drivers/scsi/lpfc/lpfc_hw.h17
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c7
-rw-r--r--drivers/scsi/qla2xxx/qla_attr.c18
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c4
-rw-r--r--drivers/scsi/scsi_lib.c138
-rw-r--r--drivers/scsi/scsi_priv.h1
-rw-r--r--drivers/scsi/scsi_scan.c88
-rw-r--r--drivers/scsi/scsi_sysfs.c28
-rw-r--r--drivers/scsi/scsi_transport_sas.c820
-rw-r--r--drivers/scsi/sg.c4
-rw-r--r--include/scsi/scsi_device.h4
-rw-r--r--include/scsi/scsi_transport_fc.h8
-rw-r--r--include/scsi/scsi_transport_sas.h100
22 files changed, 1648 insertions, 146 deletions
diff --git a/Documentation/scsi/00-INDEX b/Documentation/scsi/00-INDEX
index f9cb5bdcce41..fef92ebf266f 100644
--- a/Documentation/scsi/00-INDEX
+++ b/Documentation/scsi/00-INDEX
@@ -60,6 +60,8 @@ scsi.txt
60 - short blurb on using SCSI support as a module. 60 - short blurb on using SCSI support as a module.
61scsi_mid_low_api.txt 61scsi_mid_low_api.txt
62 - info on API between SCSI layer and low level drivers 62 - info on API between SCSI layer and low level drivers
63scsi_eh.txt
64 - info on SCSI midlayer error handling infrastructure
63st.txt 65st.txt
64 - info on scsi tape driver 66 - info on scsi tape driver
65sym53c500_cs.txt 67sym53c500_cs.txt
diff --git a/Documentation/scsi/scsi_eh.txt b/Documentation/scsi/scsi_eh.txt
new file mode 100644
index 000000000000..534a50922a7b
--- /dev/null
+++ b/Documentation/scsi/scsi_eh.txt
@@ -0,0 +1,479 @@
1
2SCSI EH
3======================================
4
5 This document describes SCSI midlayer error handling infrastructure.
6Please refer to Documentation/scsi/scsi_mid_low_api.txt for more
7information regarding SCSI midlayer.
8
9TABLE OF CONTENTS
10
11[1] How SCSI commands travel through the midlayer and to EH
12 [1-1] struct scsi_cmnd
13 [1-2] How do scmd's get completed?
14 [1-2-1] Completing a scmd w/ scsi_done
15 [1-2-2] Completing a scmd w/ timeout
16 [1-3] How EH takes over
17[2] How SCSI EH works
18 [2-1] EH through fine-grained callbacks
19 [2-1-1] Overview
20 [2-1-2] Flow of scmds through EH
21 [2-1-3] Flow of control
22 [2-2] EH through hostt->eh_strategy_handler()
23 [2-2-1] Pre hostt->eh_strategy_handler() SCSI midlayer conditions
24 [2-2-2] Post hostt->eh_strategy_handler() SCSI midlayer conditions
25 [2-2-3] Things to consider
26
27
28[1] How SCSI commands travel through the midlayer and to EH
29
30[1-1] struct scsi_cmnd
31
32 Each SCSI command is represented with struct scsi_cmnd (== scmd). A
33scmd has two list_head's to link itself into lists. The two are
34scmd->list and scmd->eh_entry. The former is used for free list or
35per-device allocated scmd list and not of much interest to this EH
36discussion. The latter is used for completion and EH lists and unless
37otherwise stated scmds are always linked using scmd->eh_entry in this
38discussion.
39
40
41[1-2] How do scmd's get completed?
42
43 Once LLDD gets hold of a scmd, either the LLDD will complete the
44command by calling scsi_done callback passed from midlayer when
45invoking hostt->queuecommand() or SCSI midlayer will time it out.
46
47
48[1-2-1] Completing a scmd w/ scsi_done
49
50 For all non-EH commands, scsi_done() is the completion callback. It
51does the following.
52
53 1. Delete timeout timer. If it fails, it means that timeout timer
54 has expired and is going to finish the command. Just return.
55
56 2. Link scmd to per-cpu scsi_done_q using scmd->en_entry
57
58 3. Raise SCSI_SOFTIRQ
59
60 SCSI_SOFTIRQ handler scsi_softirq calls scsi_decide_disposition() to
61determine what to do with the command. scsi_decide_disposition()
62looks at the scmd->result value and sense data to determine what to do
63with the command.
64
65 - SUCCESS
66 scsi_finish_command() is invoked for the command. The
67 function does some maintenance choirs and notify completion by
68 calling scmd->done() callback, which, for fs requests, would
69 be HLD completion callback - sd:sd_rw_intr, sr:rw_intr,
70 st:st_intr.
71
72 - NEEDS_RETRY
73 - ADD_TO_MLQUEUE
74 scmd is requeued to blk queue.
75
76 - otherwise
77 scsi_eh_scmd_add(scmd, 0) is invoked for the command. See
78 [1-3] for details of this funciton.
79
80
81[1-2-2] Completing a scmd w/ timeout
82
83 The timeout handler is scsi_times_out(). When a timeout occurs, this
84function
85
86 1. invokes optional hostt->eh_timedout() callback. Return value can
87 be one of
88
89 - EH_HANDLED
90 This indicates that eh_timedout() dealt with the timeout. The
91 scmd is passed to __scsi_done() and thus linked into per-cpu
92 scsi_done_q. Normal command completion described in [1-2-1]
93 follows.
94
95 - EH_RESET_TIMER
96 This indicates that more time is required to finish the
97 command. Timer is restarted. This action is counted as a
98 retry and only allowed scmd->allowed + 1(!) times. Once the
99 limit is reached, action for EH_NOT_HANDLED is taken instead.
100
101 *NOTE* This action is racy as the LLDD could finish the scmd
102 after the timeout has expired but before it's added back. In
103 such cases, scsi_done() would think that timeout has occurred
104 and return without doing anything. We lose completion and the
105 command will time out again.
106
107 - EH_NOT_HANDLED
108 This is the same as when eh_timedout() callback doesn't exist.
109 Step #2 is taken.
110
111 2. scsi_eh_scmd_add(scmd, SCSI_EH_CANCEL_CMD) is invoked for the
112 command. See [1-3] for more information.
113
114
115[1-3] How EH takes over
116
117 scmds enter EH via scsi_eh_scmd_add(), which does the following.
118
119 1. Turns on scmd->eh_eflags as requested. It's 0 for error
120 completions and SCSI_EH_CANCEL_CMD for timeouts.
121
122 2. Links scmd->eh_entry to shost->eh_cmd_q
123
124 3. Sets SHOST_RECOVERY bit in shost->shost_state
125
126 4. Increments shost->host_failed
127
128 5. Wakes up SCSI EH thread if shost->host_busy == shost->host_failed
129
130 As can be seen above, once any scmd is added to shost->eh_cmd_q,
131SHOST_RECOVERY shost_state bit is turned on. This prevents any new
132scmd to be issued from blk queue to the host; eventually, all scmds on
133the host either complete normally, fail and get added to eh_cmd_q, or
134time out and get added to shost->eh_cmd_q.
135
136 If all scmds either complete or fail, the number of in-flight scmds
137becomes equal to the number of failed scmds - i.e. shost->host_busy ==
138shost->host_failed. This wakes up SCSI EH thread. So, once woken up,
139SCSI EH thread can expect that all in-flight commands have failed and
140are linked on shost->eh_cmd_q.
141
142 Note that this does not mean lower layers are quiescent. If a LLDD
143completed a scmd with error status, the LLDD and lower layers are
144assumed to forget about the scmd at that point. However, if a scmd
145has timed out, unless hostt->eh_timedout() made lower layers forget
146about the scmd, which currently no LLDD does, the command is still
147active as long as lower layers are concerned and completion could
148occur at any time. Of course, all such completions are ignored as the
149timer has already expired.
150
151 We'll talk about how SCSI EH takes actions to abort - make LLDD
152forget about - timed out scmds later.
153
154
155[2] How SCSI EH works
156
157 LLDD's can implement SCSI EH actions in one of the following two
158ways.
159
160 - Fine-grained EH callbacks
161 LLDD can implement fine-grained EH callbacks and let SCSI
162 midlayer drive error handling and call appropriate callbacks.
163 This will be dicussed further in [2-1].
164
165 - eh_strategy_handler() callback
166 This is one big callback which should perform whole error
167 handling. As such, it should do all choirs SCSI midlayer
168 performs during recovery. This will be discussed in [2-2].
169
170 Once recovery is complete, SCSI EH resumes normal operation by
171calling scsi_restart_operations(), which
172
173 1. Checks if door locking is needed and locks door.
174
175 2. Clears SHOST_RECOVERY shost_state bit
176
177 3. Wakes up waiters on shost->host_wait. This occurs if someone
178 calls scsi_block_when_processing_errors() on the host.
179 (*QUESTION* why is it needed? All operations will be blocked
180 anyway after it reaches blk queue.)
181
182 4. Kicks queues in all devices on the host in the asses
183
184
185[2-1] EH through fine-grained callbacks
186
187[2-1-1] Overview
188
189 If eh_strategy_handler() is not present, SCSI midlayer takes charge
190of driving error handling. EH's goals are two - make LLDD, host and
191device forget about timed out scmds and make them ready for new
192commands. A scmd is said to be recovered if the scmd is forgotten by
193lower layers and lower layers are ready to process or fail the scmd
194again.
195
196 To achieve these goals, EH performs recovery actions with increasing
197severity. Some actions are performed by issueing SCSI commands and
198others are performed by invoking one of the following fine-grained
199hostt EH callbacks. Callbacks may be omitted and omitted ones are
200considered to fail always.
201
202int (* eh_abort_handler)(struct scsi_cmnd *);
203int (* eh_device_reset_handler)(struct scsi_cmnd *);
204int (* eh_bus_reset_handler)(struct scsi_cmnd *);
205int (* eh_host_reset_handler)(struct scsi_cmnd *);
206
207 Higher-severity actions are taken only when lower-severity actions
208cannot recover some of failed scmds. Also, note that failure of the
209highest-severity action means EH failure and results in offlining of
210all unrecovered devices.
211
212 During recovery, the following rules are followed
213
214 - Recovery actions are performed on failed scmds on the to do list,
215 eh_work_q. If a recovery action succeeds for a scmd, recovered
216 scmds are removed from eh_work_q.
217
218 Note that single recovery action on a scmd can recover multiple
219 scmds. e.g. resetting a device recovers all failed scmds on the
220 device.
221
222 - Higher severity actions are taken iff eh_work_q is not empty after
223 lower severity actions are complete.
224
225 - EH reuses failed scmds to issue commands for recovery. For
226 timed-out scmds, SCSI EH ensures that LLDD forgets about a scmd
227 before reusing it for EH commands.
228
229 When a scmd is recovered, the scmd is moved from eh_work_q to EH
230local eh_done_q using scsi_eh_finish_cmd(). After all scmds are
231recovered (eh_work_q is empty), scsi_eh_flush_done_q() is invoked to
232either retry or error-finish (notify upper layer of failure) recovered
233scmds.
234
235 scmds are retried iff its sdev is still online (not offlined during
236EH), REQ_FAILFAST is not set and ++scmd->retries is less than
237scmd->allowed.
238
239
240[2-1-2] Flow of scmds through EH
241
242 1. Error completion / time out
243 ACTION: scsi_eh_scmd_add() is invoked for scmd
244 - set scmd->eh_eflags
245 - add scmd to shost->eh_cmd_q
246 - set SHOST_RECOVERY
247 - shost->host_failed++
248 LOCKING: shost->host_lock
249
250 2. EH starts
251 ACTION: move all scmds to EH's local eh_work_q. shost->eh_cmd_q
252 is cleared.
253 LOCKING: shost->host_lock (not strictly necessary, just for
254 consistency)
255
256 3. scmd recovered
257 ACTION: scsi_eh_finish_cmd() is invoked to EH-finish scmd
258 - shost->host_failed--
259 - clear scmd->eh_eflags
260 - scsi_setup_cmd_retry()
261 - move from local eh_work_q to local eh_done_q
262 LOCKING: none
263
264 4. EH completes
265 ACTION: scsi_eh_flush_done_q() retries scmds or notifies upper
266 layer of failure.
267 - scmd is removed from eh_done_q and scmd->eh_entry is cleared
268 - if retry is necessary, scmd is requeued using
269 scsi_queue_insert()
270 - otherwise, scsi_finish_command() is invoked for scmd
271 LOCKING: queue or finish function performs appropriate locking
272
273
274[2-1-3] Flow of control
275
276 EH through fine-grained callbacks start from scsi_unjam_host().
277
278<<scsi_unjam_host>>
279
280 1. Lock shost->host_lock, splice_init shost->eh_cmd_q into local
281 eh_work_q and unlock host_lock. Note that shost->eh_cmd_q is
282 cleared by this action.
283
284 2. Invoke scsi_eh_get_sense.
285
286 <<scsi_eh_get_sense>>
287
288 This action is taken for each error-completed
289 (!SCSI_EH_CANCEL_CMD) commands without valid sense data. Most
290 SCSI transports/LLDDs automatically acquire sense data on
291 command failures (autosense). Autosense is recommended for
292 performance reasons and as sense information could get out of
293 sync inbetween occurrence of CHECK CONDITION and this action.
294
295 Note that if autosense is not supported, scmd->sense_buffer
296 contains invalid sense data when error-completing the scmd
297 with scsi_done(). scsi_decide_disposition() always returns
298 FAILED in such cases thus invoking SCSI EH. When the scmd
299 reaches here, sense data is acquired and
300 scsi_decide_disposition() is called again.
301
302 1. Invoke scsi_request_sense() which issues REQUEST_SENSE
303 command. If fails, no action. Note that taking no action
304 causes higher-severity recovery to be taken for the scmd.
305
306 2. Invoke scsi_decide_disposition() on the scmd
307
308 - SUCCESS
309 scmd->retries is set to scmd->allowed preventing
310 scsi_eh_flush_done_q() from retrying the scmd and
311 scsi_eh_finish_cmd() is invoked.
312
313 - NEEDS_RETRY
314 scsi_eh_finish_cmd() invoked
315
316 - otherwise
317 No action.
318
319 3. If !list_empty(&eh_work_q), invoke scsi_eh_abort_cmds().
320
321 <<scsi_eh_abort_cmds>>
322
323 This action is taken for each timed out command.
324 hostt->eh_abort_handler() is invoked for each scmd. The
325 handler returns SUCCESS if it has succeeded to make LLDD and
326 all related hardware forget about the scmd.
327
328 If a timedout scmd is successfully aborted and the sdev is
329 either offline or ready, scsi_eh_finish_cmd() is invoked for
330 the scmd. Otherwise, the scmd is left in eh_work_q for
331 higher-severity actions.
332
333 Note that both offline and ready status mean that the sdev is
334 ready to process new scmds, where processing also implies
335 immediate failing; thus, if a sdev is in one of the two
336 states, no further recovery action is needed.
337
338 Device readiness is tested using scsi_eh_tur() which issues
339 TEST_UNIT_READY command. Note that the scmd must have been
340 aborted successfully before reusing it for TEST_UNIT_READY.
341
342 4. If !list_empty(&eh_work_q), invoke scsi_eh_ready_devs()
343
344 <<scsi_eh_ready_devs>>
345
346 This function takes four increasingly more severe measures to
347 make failed sdevs ready for new commands.
348
349 1. Invoke scsi_eh_stu()
350
351 <<scsi_eh_stu>>
352
353 For each sdev which has failed scmds with valid sense data
354 of which scsi_check_sense()'s verdict is FAILED,
355 START_STOP_UNIT command is issued w/ start=1. Note that
356 as we explicitly choose error-completed scmds, it is known
357 that lower layers have forgotten about the scmd and we can
358 reuse it for STU.
359
360 If STU succeeds and the sdev is either offline or ready,
361 all failed scmds on the sdev are EH-finished with
362 scsi_eh_finish_cmd().
363
364 *NOTE* If hostt->eh_abort_handler() isn't implemented or
365 failed, we may still have timed out scmds at this point
366 and STU doesn't make lower layers forget about those
367 scmds. Yet, this function EH-finish all scmds on the sdev
368 if STU succeeds leaving lower layers in an inconsistent
369 state. It seems that STU action should be taken only when
370 a sdev has no timed out scmd.
371
372 2. If !list_empty(&eh_work_q), invoke scsi_eh_bus_device_reset().
373
374 <<scsi_eh_bus_device_reset>>
375
376 This action is very similar to scsi_eh_stu() except that,
377 instead of issuing STU, hostt->eh_device_reset_handler()
378 is used. Also, as we're not issuing SCSI commands and
379 resetting clears all scmds on the sdev, there is no need
380 to choose error-completed scmds.
381
382 3. If !list_empty(&eh_work_q), invoke scsi_eh_bus_reset()
383
384 <<scsi_eh_bus_reset>>
385
386 hostt->eh_bus_reset_handler() is invoked for each channel
387 with failed scmds. If bus reset succeeds, all failed
388 scmds on all ready or offline sdevs on the channel are
389 EH-finished.
390
391 4. If !list_empty(&eh_work_q), invoke scsi_eh_host_reset()
392
393 <<scsi_eh_host_reset>>
394
395 This is the last resort. hostt->eh_host_reset_handler()
396 is invoked. If host reset succeeds, all failed scmds on
397 all ready or offline sdevs on the host are EH-finished.
398
399 5. If !list_empty(&eh_work_q), invoke scsi_eh_offline_sdevs()
400
401 <<scsi_eh_offline_sdevs>>
402
403 Take all sdevs which still have unrecovered scmds offline
404 and EH-finish the scmds.
405
406 5. Invoke scsi_eh_flush_done_q().
407
408 <<scsi_eh_flush_done_q>>
409
410 At this point all scmds are recovered (or given up) and
411 put on eh_done_q by scsi_eh_finish_cmd(). This function
412 flushes eh_done_q by either retrying or notifying upper
413 layer of failure of the scmds.
414
415
416[2-2] EH through hostt->eh_strategy_handler()
417
418 hostt->eh_strategy_handler() is invoked in the place of
419scsi_unjam_host() and it is responsible for whole recovery process.
420On completion, the handler should have made lower layers forget about
421all failed scmds and either ready for new commands or offline. Also,
422it should perform SCSI EH maintenance choirs to maintain integrity of
423SCSI midlayer. IOW, of the steps described in [2-1-2], all steps
424except for #1 must be implemented by eh_strategy_handler().
425
426
427[2-2-1] Pre hostt->eh_strategy_handler() SCSI midlayer conditions
428
429 The following conditions are true on entry to the handler.
430
431 - Each failed scmd's eh_flags field is set appropriately.
432
433 - Each failed scmd is linked on scmd->eh_cmd_q by scmd->eh_entry.
434
435 - SHOST_RECOVERY is set.
436
437 - shost->host_failed == shost->host_busy
438
439
440[2-2-2] Post hostt->eh_strategy_handler() SCSI midlayer conditions
441
442 The following conditions must be true on exit from the handler.
443
444 - shost->host_failed is zero.
445
446 - Each scmd's eh_eflags field is cleared.
447
448 - Each scmd is in such a state that scsi_setup_cmd_retry() on the
449 scmd doesn't make any difference.
450
451 - shost->eh_cmd_q is cleared.
452
453 - Each scmd->eh_entry is cleared.
454
455 - Either scsi_queue_insert() or scsi_finish_command() is called on
456 each scmd. Note that the handler is free to use scmd->retries and
457 ->allowed to limit the number of retries.
458
459
460[2-2-3] Things to consider
461
462 - Know that timed out scmds are still active on lower layers. Make
463 lower layers forget about them before doing anything else with
464 those scmds.
465
466 - For consistency, when accessing/modifying shost data structure,
467 grab shost->host_lock.
468
469 - On completion, each failed sdev must have forgotten about all
470 active scmds.
471
472 - On completion, each failed sdev must be ready for new commands or
473 offline.
474
475
476--
477Tejun Heo
478htejun@gmail.com
47911th September 2005
diff --git a/drivers/block/scsi_ioctl.c b/drivers/block/scsi_ioctl.c
index abb2df249fd3..856c2278e9d0 100644
--- a/drivers/block/scsi_ioctl.c
+++ b/drivers/block/scsi_ioctl.c
@@ -123,6 +123,7 @@ static int verify_command(struct file *file, unsigned char *cmd)
123 safe_for_read(READ_12), 123 safe_for_read(READ_12),
124 safe_for_read(READ_16), 124 safe_for_read(READ_16),
125 safe_for_read(READ_BUFFER), 125 safe_for_read(READ_BUFFER),
126 safe_for_read(READ_DEFECT_DATA),
126 safe_for_read(READ_LONG), 127 safe_for_read(READ_LONG),
127 safe_for_read(INQUIRY), 128 safe_for_read(INQUIRY),
128 safe_for_read(MODE_SENSE), 129 safe_for_read(MODE_SENSE),
diff --git a/drivers/ieee1394/sbp2.c b/drivers/ieee1394/sbp2.c
index 627af507643a..de88218ef7cc 100644
--- a/drivers/ieee1394/sbp2.c
+++ b/drivers/ieee1394/sbp2.c
@@ -790,7 +790,7 @@ static void sbp2_host_reset(struct hpsb_host *host)
790static int sbp2_start_device(struct scsi_id_instance_data *scsi_id) 790static int sbp2_start_device(struct scsi_id_instance_data *scsi_id)
791{ 791{
792 struct sbp2scsi_host_info *hi = scsi_id->hi; 792 struct sbp2scsi_host_info *hi = scsi_id->hi;
793 struct scsi_device *sdev; 793 int error;
794 794
795 SBP2_DEBUG("sbp2_start_device"); 795 SBP2_DEBUG("sbp2_start_device");
796 796
@@ -939,10 +939,10 @@ alloc_fail:
939 sbp2_max_speed_and_size(scsi_id); 939 sbp2_max_speed_and_size(scsi_id);
940 940
941 /* Add this device to the scsi layer now */ 941 /* Add this device to the scsi layer now */
942 sdev = scsi_add_device(scsi_id->scsi_host, 0, scsi_id->ud->id, 0); 942 error = scsi_add_device(scsi_id->scsi_host, 0, scsi_id->ud->id, 0);
943 if (IS_ERR(sdev)) { 943 if (error) {
944 SBP2_ERR("scsi_add_device failed"); 944 SBP2_ERR("scsi_add_device failed");
945 return PTR_ERR(sdev); 945 return error;
946 } 946 }
947 947
948 return 0; 948 return 0;
diff --git a/drivers/scsi/3w-9xxx.c b/drivers/scsi/3w-9xxx.c
index bc6e4627c7a1..a6ac61611f35 100644
--- a/drivers/scsi/3w-9xxx.c
+++ b/drivers/scsi/3w-9xxx.c
@@ -59,6 +59,7 @@
59 Fix 'handled=1' ISR usage, remove bogus IRQ check. 59 Fix 'handled=1' ISR usage, remove bogus IRQ check.
60 Remove un-needed eh_abort handler. 60 Remove un-needed eh_abort handler.
61 Add support for embedded firmware error strings. 61 Add support for embedded firmware error strings.
62 2.26.02.003 - Correctly handle single sgl's with use_sg=1.
62*/ 63*/
63 64
64#include <linux/module.h> 65#include <linux/module.h>
@@ -81,7 +82,7 @@
81#include "3w-9xxx.h" 82#include "3w-9xxx.h"
82 83
83/* Globals */ 84/* Globals */
84#define TW_DRIVER_VERSION "2.26.02.002" 85#define TW_DRIVER_VERSION "2.26.02.003"
85static TW_Device_Extension *twa_device_extension_list[TW_MAX_SLOT]; 86static TW_Device_Extension *twa_device_extension_list[TW_MAX_SLOT];
86static unsigned int twa_device_extension_count; 87static unsigned int twa_device_extension_count;
87static int twa_major = -1; 88static int twa_major = -1;
@@ -1805,6 +1806,8 @@ static int twa_scsiop_execute_scsi(TW_Device_Extension *tw_dev, int request_id,
1805 if (tw_dev->srb[request_id]->request_bufflen < TW_MIN_SGL_LENGTH) { 1806 if (tw_dev->srb[request_id]->request_bufflen < TW_MIN_SGL_LENGTH) {
1806 command_packet->sg_list[0].address = tw_dev->generic_buffer_phys[request_id]; 1807 command_packet->sg_list[0].address = tw_dev->generic_buffer_phys[request_id];
1807 command_packet->sg_list[0].length = TW_MIN_SGL_LENGTH; 1808 command_packet->sg_list[0].length = TW_MIN_SGL_LENGTH;
1809 if (tw_dev->srb[request_id]->sc_data_direction == DMA_TO_DEVICE || tw_dev->srb[request_id]->sc_data_direction == DMA_BIDIRECTIONAL)
1810 memcpy(tw_dev->generic_buffer_virt[request_id], tw_dev->srb[request_id]->request_buffer, tw_dev->srb[request_id]->request_bufflen);
1808 } else { 1811 } else {
1809 buffaddr = twa_map_scsi_single_data(tw_dev, request_id); 1812 buffaddr = twa_map_scsi_single_data(tw_dev, request_id);
1810 if (buffaddr == 0) 1813 if (buffaddr == 0)
@@ -1823,6 +1826,12 @@ static int twa_scsiop_execute_scsi(TW_Device_Extension *tw_dev, int request_id,
1823 1826
1824 if (tw_dev->srb[request_id]->use_sg > 0) { 1827 if (tw_dev->srb[request_id]->use_sg > 0) {
1825 if ((tw_dev->srb[request_id]->use_sg == 1) && (tw_dev->srb[request_id]->request_bufflen < TW_MIN_SGL_LENGTH)) { 1828 if ((tw_dev->srb[request_id]->use_sg == 1) && (tw_dev->srb[request_id]->request_bufflen < TW_MIN_SGL_LENGTH)) {
1829 if (tw_dev->srb[request_id]->sc_data_direction == DMA_TO_DEVICE || tw_dev->srb[request_id]->sc_data_direction == DMA_BIDIRECTIONAL) {
1830 struct scatterlist *sg = (struct scatterlist *)tw_dev->srb[request_id]->request_buffer;
1831 char *buf = kmap_atomic(sg->page, KM_IRQ0) + sg->offset;
1832 memcpy(tw_dev->generic_buffer_virt[request_id], buf, sg->length);
1833 kunmap_atomic(buf - sg->offset, KM_IRQ0);
1834 }
1826 command_packet->sg_list[0].address = tw_dev->generic_buffer_phys[request_id]; 1835 command_packet->sg_list[0].address = tw_dev->generic_buffer_phys[request_id];
1827 command_packet->sg_list[0].length = TW_MIN_SGL_LENGTH; 1836 command_packet->sg_list[0].length = TW_MIN_SGL_LENGTH;
1828 } else { 1837 } else {
@@ -1888,11 +1897,20 @@ out:
1888/* This function completes an execute scsi operation */ 1897/* This function completes an execute scsi operation */
1889static void twa_scsiop_execute_scsi_complete(TW_Device_Extension *tw_dev, int request_id) 1898static void twa_scsiop_execute_scsi_complete(TW_Device_Extension *tw_dev, int request_id)
1890{ 1899{
1891 /* Copy the response if too small */ 1900 if (tw_dev->srb[request_id]->request_bufflen < TW_MIN_SGL_LENGTH &&
1892 if ((tw_dev->srb[request_id]->request_buffer) && (tw_dev->srb[request_id]->request_bufflen < TW_MIN_SGL_LENGTH)) { 1901 (tw_dev->srb[request_id]->sc_data_direction == DMA_FROM_DEVICE ||
1893 memcpy(tw_dev->srb[request_id]->request_buffer, 1902 tw_dev->srb[request_id]->sc_data_direction == DMA_BIDIRECTIONAL)) {
1894 tw_dev->generic_buffer_virt[request_id], 1903 if (tw_dev->srb[request_id]->use_sg == 0) {
1895 tw_dev->srb[request_id]->request_bufflen); 1904 memcpy(tw_dev->srb[request_id]->request_buffer,
1905 tw_dev->generic_buffer_virt[request_id],
1906 tw_dev->srb[request_id]->request_bufflen);
1907 }
1908 if (tw_dev->srb[request_id]->use_sg == 1) {
1909 struct scatterlist *sg = (struct scatterlist *)tw_dev->srb[request_id]->request_buffer;
1910 char *buf = kmap_atomic(sg->page, KM_IRQ0) + sg->offset;
1911 memcpy(buf, tw_dev->generic_buffer_virt[request_id], sg->length);
1912 kunmap_atomic(buf - sg->offset, KM_IRQ0);
1913 }
1896 } 1914 }
1897} /* End twa_scsiop_execute_scsi_complete() */ 1915} /* End twa_scsiop_execute_scsi_complete() */
1898 1916
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index 2d21265e650b..20019b82b4a8 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -235,6 +235,13 @@ config SCSI_ISCSI_ATTRS
235 each attached iSCSI device to sysfs, say Y. 235 each attached iSCSI device to sysfs, say Y.
236 Otherwise, say N. 236 Otherwise, say N.
237 237
238config SCSI_SAS_ATTRS
239 tristate "SAS Transport Attributes"
240 depends on SCSI
241 help
242 If you wish to export transport-specific information about
243 each attached SAS device to sysfs, say Y.
244
238endmenu 245endmenu
239 246
240menu "SCSI low-level drivers" 247menu "SCSI low-level drivers"
diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile
index 4b4fd94c2674..1e4edbdf2730 100644
--- a/drivers/scsi/Makefile
+++ b/drivers/scsi/Makefile
@@ -31,6 +31,7 @@ obj-$(CONFIG_RAID_ATTRS) += raid_class.o
31obj-$(CONFIG_SCSI_SPI_ATTRS) += scsi_transport_spi.o 31obj-$(CONFIG_SCSI_SPI_ATTRS) += scsi_transport_spi.o
32obj-$(CONFIG_SCSI_FC_ATTRS) += scsi_transport_fc.o 32obj-$(CONFIG_SCSI_FC_ATTRS) += scsi_transport_fc.o
33obj-$(CONFIG_SCSI_ISCSI_ATTRS) += scsi_transport_iscsi.o 33obj-$(CONFIG_SCSI_ISCSI_ATTRS) += scsi_transport_iscsi.o
34obj-$(CONFIG_SCSI_SAS_ATTRS) += scsi_transport_sas.o
34 35
35obj-$(CONFIG_SCSI_AMIGA7XX) += amiga7xx.o 53c7xx.o 36obj-$(CONFIG_SCSI_AMIGA7XX) += amiga7xx.o 53c7xx.o
36obj-$(CONFIG_A3000_SCSI) += a3000.o wd33c93.o 37obj-$(CONFIG_A3000_SCSI) += a3000.o wd33c93.o
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index 0e089a42c03a..86eaf6d408d5 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -966,21 +966,21 @@ static void
966lpfc_get_host_fabric_name (struct Scsi_Host *shost) 966lpfc_get_host_fabric_name (struct Scsi_Host *shost)
967{ 967{
968 struct lpfc_hba *phba = (struct lpfc_hba*)shost->hostdata[0]; 968 struct lpfc_hba *phba = (struct lpfc_hba*)shost->hostdata[0];
969 u64 nodename; 969 u64 node_name;
970 970
971 spin_lock_irq(shost->host_lock); 971 spin_lock_irq(shost->host_lock);
972 972
973 if ((phba->fc_flag & FC_FABRIC) || 973 if ((phba->fc_flag & FC_FABRIC) ||
974 ((phba->fc_topology == TOPOLOGY_LOOP) && 974 ((phba->fc_topology == TOPOLOGY_LOOP) &&
975 (phba->fc_flag & FC_PUBLIC_LOOP))) 975 (phba->fc_flag & FC_PUBLIC_LOOP)))
976 memcpy(&nodename, &phba->fc_fabparam.nodeName, sizeof(u64)); 976 node_name = wwn_to_u64(phba->fc_fabparam.nodeName.wwn);
977 else 977 else
978 /* fabric is local port if there is no F/FL_Port */ 978 /* fabric is local port if there is no F/FL_Port */
979 memcpy(&nodename, &phba->fc_nodename, sizeof(u64)); 979 node_name = wwn_to_u64(phba->fc_nodename.wwn);
980 980
981 spin_unlock_irq(shost->host_lock); 981 spin_unlock_irq(shost->host_lock);
982 982
983 fc_host_fabric_name(shost) = be64_to_cpu(nodename); 983 fc_host_fabric_name(shost) = node_name;
984} 984}
985 985
986 986
@@ -1103,21 +1103,20 @@ lpfc_get_starget_node_name(struct scsi_target *starget)
1103{ 1103{
1104 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); 1104 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
1105 struct lpfc_hba *phba = (struct lpfc_hba *) shost->hostdata[0]; 1105 struct lpfc_hba *phba = (struct lpfc_hba *) shost->hostdata[0];
1106 uint64_t node_name = 0; 1106 u64 node_name = 0;
1107 struct lpfc_nodelist *ndlp = NULL; 1107 struct lpfc_nodelist *ndlp = NULL;
1108 1108
1109 spin_lock_irq(shost->host_lock); 1109 spin_lock_irq(shost->host_lock);
1110 /* Search the mapped list for this target ID */ 1110 /* Search the mapped list for this target ID */
1111 list_for_each_entry(ndlp, &phba->fc_nlpmap_list, nlp_listp) { 1111 list_for_each_entry(ndlp, &phba->fc_nlpmap_list, nlp_listp) {
1112 if (starget->id == ndlp->nlp_sid) { 1112 if (starget->id == ndlp->nlp_sid) {
1113 memcpy(&node_name, &ndlp->nlp_nodename, 1113 node_name = wwn_to_u64(ndlp->nlp_nodename.wwn);
1114 sizeof(struct lpfc_name));
1115 break; 1114 break;
1116 } 1115 }
1117 } 1116 }
1118 spin_unlock_irq(shost->host_lock); 1117 spin_unlock_irq(shost->host_lock);
1119 1118
1120 fc_starget_node_name(starget) = be64_to_cpu(node_name); 1119 fc_starget_node_name(starget) = node_name;
1121} 1120}
1122 1121
1123static void 1122static void
@@ -1125,21 +1124,20 @@ lpfc_get_starget_port_name(struct scsi_target *starget)
1125{ 1124{
1126 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); 1125 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
1127 struct lpfc_hba *phba = (struct lpfc_hba *) shost->hostdata[0]; 1126 struct lpfc_hba *phba = (struct lpfc_hba *) shost->hostdata[0];
1128 uint64_t port_name = 0; 1127 u64 port_name = 0;
1129 struct lpfc_nodelist *ndlp = NULL; 1128 struct lpfc_nodelist *ndlp = NULL;
1130 1129
1131 spin_lock_irq(shost->host_lock); 1130 spin_lock_irq(shost->host_lock);
1132 /* Search the mapped list for this target ID */ 1131 /* Search the mapped list for this target ID */
1133 list_for_each_entry(ndlp, &phba->fc_nlpmap_list, nlp_listp) { 1132 list_for_each_entry(ndlp, &phba->fc_nlpmap_list, nlp_listp) {
1134 if (starget->id == ndlp->nlp_sid) { 1133 if (starget->id == ndlp->nlp_sid) {
1135 memcpy(&port_name, &ndlp->nlp_portname, 1134 port_name = wwn_to_u64(ndlp->nlp_portname.wwn);
1136 sizeof(struct lpfc_name));
1137 break; 1135 break;
1138 } 1136 }
1139 } 1137 }
1140 spin_unlock_irq(shost->host_lock); 1138 spin_unlock_irq(shost->host_lock);
1141 1139
1142 fc_starget_port_name(starget) = be64_to_cpu(port_name); 1140 fc_starget_port_name(starget) = port_name;
1143} 1141}
1144 1142
1145static void 1143static void
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index 0a8269d6b130..4fb8eb0c84cf 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -1017,13 +1017,10 @@ lpfc_register_remote_port(struct lpfc_hba * phba,
1017 struct fc_rport *rport; 1017 struct fc_rport *rport;
1018 struct lpfc_rport_data *rdata; 1018 struct lpfc_rport_data *rdata;
1019 struct fc_rport_identifiers rport_ids; 1019 struct fc_rport_identifiers rport_ids;
1020 uint64_t wwn;
1021 1020
1022 /* Remote port has reappeared. Re-register w/ FC transport */ 1021 /* Remote port has reappeared. Re-register w/ FC transport */
1023 memcpy(&wwn, &ndlp->nlp_nodename, sizeof(uint64_t)); 1022 rport_ids.node_name = wwn_to_u64(ndlp->nlp_nodename.wwn);
1024 rport_ids.node_name = be64_to_cpu(wwn); 1023 rport_ids.port_name = wwn_to_u64(ndlp->nlp_portname.wwn);
1025 memcpy(&wwn, &ndlp->nlp_portname, sizeof(uint64_t));
1026 rport_ids.port_name = be64_to_cpu(wwn);
1027 rport_ids.port_id = ndlp->nlp_DID; 1024 rport_ids.port_id = ndlp->nlp_DID;
1028 rport_ids.roles = FC_RPORT_ROLE_UNKNOWN; 1025 rport_ids.roles = FC_RPORT_ROLE_UNKNOWN;
1029 if (ndlp->nlp_type & NLP_FCP_TARGET) 1026 if (ndlp->nlp_type & NLP_FCP_TARGET)
diff --git a/drivers/scsi/lpfc/lpfc_hw.h b/drivers/scsi/lpfc/lpfc_hw.h
index 21591cb9f551..047a87c26cc0 100644
--- a/drivers/scsi/lpfc/lpfc_hw.h
+++ b/drivers/scsi/lpfc/lpfc_hw.h
@@ -262,12 +262,14 @@ struct lpfc_sli_ct_request {
262#define FF_FRAME_SIZE 2048 262#define FF_FRAME_SIZE 2048
263 263
264struct lpfc_name { 264struct lpfc_name {
265 union {
266 struct {
265#ifdef __BIG_ENDIAN_BITFIELD 267#ifdef __BIG_ENDIAN_BITFIELD
266 uint8_t nameType:4; /* FC Word 0, bit 28:31 */ 268 uint8_t nameType:4; /* FC Word 0, bit 28:31 */
267 uint8_t IEEEextMsn:4; /* FC Word 0, bit 24:27, bit 8:11 of IEEE ext */ 269 uint8_t IEEEextMsn:4; /* FC Word 0, bit 24:27, bit 8:11 of IEEE ext */
268#else /* __LITTLE_ENDIAN_BITFIELD */ 270#else /* __LITTLE_ENDIAN_BITFIELD */
269 uint8_t IEEEextMsn:4; /* FC Word 0, bit 24:27, bit 8:11 of IEEE ext */ 271 uint8_t IEEEextMsn:4; /* FC Word 0, bit 24:27, bit 8:11 of IEEE ext */
270 uint8_t nameType:4; /* FC Word 0, bit 28:31 */ 272 uint8_t nameType:4; /* FC Word 0, bit 28:31 */
271#endif 273#endif
272 274
273#define NAME_IEEE 0x1 /* IEEE name - nameType */ 275#define NAME_IEEE 0x1 /* IEEE name - nameType */
@@ -276,8 +278,11 @@ struct lpfc_name {
276#define NAME_IP_TYPE 0x4 /* IP address */ 278#define NAME_IP_TYPE 0x4 /* IP address */
277#define NAME_CCITT_TYPE 0xC 279#define NAME_CCITT_TYPE 0xC
278#define NAME_CCITT_GR_TYPE 0xE 280#define NAME_CCITT_GR_TYPE 0xE
279 uint8_t IEEEextLsb; /* FC Word 0, bit 16:23, IEEE extended Lsb */ 281 uint8_t IEEEextLsb; /* FC Word 0, bit 16:23, IEEE extended Lsb */
280 uint8_t IEEE[6]; /* FC IEEE address */ 282 uint8_t IEEE[6]; /* FC IEEE address */
283 };
284 uint8_t wwn[8];
285 };
281}; 286};
282 287
283struct csp { 288struct csp {
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index 6f3cb59bf9e0..454058f655db 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -1333,7 +1333,6 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
1333 unsigned long bar0map_len, bar2map_len; 1333 unsigned long bar0map_len, bar2map_len;
1334 int error = -ENODEV, retval; 1334 int error = -ENODEV, retval;
1335 int i; 1335 int i;
1336 u64 wwname;
1337 1336
1338 if (pci_enable_device(pdev)) 1337 if (pci_enable_device(pdev))
1339 goto out; 1338 goto out;
@@ -1524,10 +1523,8 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
1524 * Must done after lpfc_sli_hba_setup() 1523 * Must done after lpfc_sli_hba_setup()
1525 */ 1524 */
1526 1525
1527 memcpy(&wwname, &phba->fc_nodename, sizeof(u64)); 1526 fc_host_node_name(host) = wwn_to_u64(phba->fc_nodename.wwn);
1528 fc_host_node_name(host) = be64_to_cpu(wwname); 1527 fc_host_port_name(host) = wwn_to_u64(phba->fc_portname.wwn);
1529 memcpy(&wwname, &phba->fc_portname, sizeof(u64));
1530 fc_host_port_name(host) = be64_to_cpu(wwname);
1531 fc_host_supported_classes(host) = FC_COS_CLASS3; 1528 fc_host_supported_classes(host) = FC_COS_CLASS3;
1532 1529
1533 memset(fc_host_supported_fc4s(host), 0, 1530 memset(fc_host_supported_fc4s(host), 0,
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index fe0fce71adc7..fc25cd834668 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -360,16 +360,16 @@ qla2x00_get_starget_node_name(struct scsi_target *starget)
360 struct Scsi_Host *host = dev_to_shost(starget->dev.parent); 360 struct Scsi_Host *host = dev_to_shost(starget->dev.parent);
361 scsi_qla_host_t *ha = to_qla_host(host); 361 scsi_qla_host_t *ha = to_qla_host(host);
362 fc_port_t *fcport; 362 fc_port_t *fcport;
363 uint64_t node_name = 0; 363 u64 node_name = 0;
364 364
365 list_for_each_entry(fcport, &ha->fcports, list) { 365 list_for_each_entry(fcport, &ha->fcports, list) {
366 if (starget->id == fcport->os_target_id) { 366 if (starget->id == fcport->os_target_id) {
367 node_name = *(uint64_t *)fcport->node_name; 367 node_name = wwn_to_u64(fcport->node_name);
368 break; 368 break;
369 } 369 }
370 } 370 }
371 371
372 fc_starget_node_name(starget) = be64_to_cpu(node_name); 372 fc_starget_node_name(starget) = node_name;
373} 373}
374 374
375static void 375static void
@@ -378,16 +378,16 @@ qla2x00_get_starget_port_name(struct scsi_target *starget)
378 struct Scsi_Host *host = dev_to_shost(starget->dev.parent); 378 struct Scsi_Host *host = dev_to_shost(starget->dev.parent);
379 scsi_qla_host_t *ha = to_qla_host(host); 379 scsi_qla_host_t *ha = to_qla_host(host);
380 fc_port_t *fcport; 380 fc_port_t *fcport;
381 uint64_t port_name = 0; 381 u64 port_name = 0;
382 382
383 list_for_each_entry(fcport, &ha->fcports, list) { 383 list_for_each_entry(fcport, &ha->fcports, list) {
384 if (starget->id == fcport->os_target_id) { 384 if (starget->id == fcport->os_target_id) {
385 port_name = *(uint64_t *)fcport->port_name; 385 port_name = wwn_to_u64(fcport->port_name);
386 break; 386 break;
387 } 387 }
388 } 388 }
389 389
390 fc_starget_port_name(starget) = be64_to_cpu(port_name); 390 fc_starget_port_name(starget) = port_name;
391} 391}
392 392
393static void 393static void
@@ -460,9 +460,7 @@ struct fc_function_template qla2xxx_transport_functions = {
460void 460void
461qla2x00_init_host_attr(scsi_qla_host_t *ha) 461qla2x00_init_host_attr(scsi_qla_host_t *ha)
462{ 462{
463 fc_host_node_name(ha->host) = 463 fc_host_node_name(ha->host) = wwn_to_u64(ha->init_cb->node_name);
464 be64_to_cpu(*(uint64_t *)ha->init_cb->node_name); 464 fc_host_port_name(ha->host) = wwn_to_u64(ha->init_cb->port_name);
465 fc_host_port_name(ha->host) =
466 be64_to_cpu(*(uint64_t *)ha->init_cb->port_name);
467 fc_host_supported_classes(ha->host) = FC_COS_CLASS3; 465 fc_host_supported_classes(ha->host) = FC_COS_CLASS3;
468} 466}
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index c619583e646b..3e9b64137873 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -2066,8 +2066,8 @@ qla2x00_reg_remote_port(scsi_qla_host_t *ha, fc_port_t *fcport)
2066 return; 2066 return;
2067 } 2067 }
2068 2068
2069 rport_ids.node_name = be64_to_cpu(*(uint64_t *)fcport->node_name); 2069 rport_ids.node_name = wwn_to_u64(fcport->node_name);
2070 rport_ids.port_name = be64_to_cpu(*(uint64_t *)fcport->port_name); 2070 rport_ids.port_name = wwn_to_u64(fcport->port_name);
2071 rport_ids.port_id = fcport->d_id.b.domain << 16 | 2071 rport_ids.port_id = fcport->d_id.b.domain << 16 |
2072 fcport->d_id.b.area << 8 | fcport->d_id.b.al_pa; 2072 fcport->d_id.b.area << 8 | fcport->d_id.b.al_pa;
2073 rport_ids.roles = FC_RPORT_ROLE_UNKNOWN; 2073 rport_ids.roles = FC_RPORT_ROLE_UNKNOWN;
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 77f2d444f7e0..863bb6495daa 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -97,6 +97,30 @@ int scsi_insert_special_req(struct scsi_request *sreq, int at_head)
97} 97}
98 98
99static void scsi_run_queue(struct request_queue *q); 99static void scsi_run_queue(struct request_queue *q);
100static void scsi_release_buffers(struct scsi_cmnd *cmd);
101
102/*
103 * Function: scsi_unprep_request()
104 *
105 * Purpose: Remove all preparation done for a request, including its
106 * associated scsi_cmnd, so that it can be requeued.
107 *
108 * Arguments: req - request to unprepare
109 *
110 * Lock status: Assumed that no locks are held upon entry.
111 *
112 * Returns: Nothing.
113 */
114static void scsi_unprep_request(struct request *req)
115{
116 struct scsi_cmnd *cmd = req->special;
117
118 req->flags &= ~REQ_DONTPREP;
119 req->special = (req->flags & REQ_SPECIAL) ? cmd->sc_request : NULL;
120
121 scsi_release_buffers(cmd);
122 scsi_put_command(cmd);
123}
100 124
101/* 125/*
102 * Function: scsi_queue_insert() 126 * Function: scsi_queue_insert()
@@ -116,12 +140,14 @@ static void scsi_run_queue(struct request_queue *q);
116 * commands. 140 * commands.
117 * Notes: This could be called either from an interrupt context or a 141 * Notes: This could be called either from an interrupt context or a
118 * normal process context. 142 * normal process context.
143 * Notes: Upon return, cmd is a stale pointer.
119 */ 144 */
120int scsi_queue_insert(struct scsi_cmnd *cmd, int reason) 145int scsi_queue_insert(struct scsi_cmnd *cmd, int reason)
121{ 146{
122 struct Scsi_Host *host = cmd->device->host; 147 struct Scsi_Host *host = cmd->device->host;
123 struct scsi_device *device = cmd->device; 148 struct scsi_device *device = cmd->device;
124 struct request_queue *q = device->request_queue; 149 struct request_queue *q = device->request_queue;
150 struct request *req = cmd->request;
125 unsigned long flags; 151 unsigned long flags;
126 152
127 SCSI_LOG_MLQUEUE(1, 153 SCSI_LOG_MLQUEUE(1,
@@ -162,8 +188,9 @@ int scsi_queue_insert(struct scsi_cmnd *cmd, int reason)
162 * function. The SCSI request function detects the blocked condition 188 * function. The SCSI request function detects the blocked condition
163 * and plugs the queue appropriately. 189 * and plugs the queue appropriately.
164 */ 190 */
191 scsi_unprep_request(req);
165 spin_lock_irqsave(q->queue_lock, flags); 192 spin_lock_irqsave(q->queue_lock, flags);
166 blk_requeue_request(q, cmd->request); 193 blk_requeue_request(q, req);
167 spin_unlock_irqrestore(q->queue_lock, flags); 194 spin_unlock_irqrestore(q->queue_lock, flags);
168 195
169 scsi_run_queue(q); 196 scsi_run_queue(q);
@@ -339,7 +366,7 @@ int scsi_execute_req(struct scsi_device *sdev, const unsigned char *cmd,
339 int result; 366 int result;
340 367
341 if (sshdr) { 368 if (sshdr) {
342 sense = kmalloc(SCSI_SENSE_BUFFERSIZE, GFP_KERNEL); 369 sense = kmalloc(SCSI_SENSE_BUFFERSIZE, GFP_NOIO);
343 if (!sense) 370 if (!sense)
344 return DRIVER_ERROR << 24; 371 return DRIVER_ERROR << 24;
345 memset(sense, 0, SCSI_SENSE_BUFFERSIZE); 372 memset(sense, 0, SCSI_SENSE_BUFFERSIZE);
@@ -552,15 +579,16 @@ static void scsi_run_queue(struct request_queue *q)
552 * I/O errors in the middle of the request, in which case 579 * I/O errors in the middle of the request, in which case
553 * we need to request the blocks that come after the bad 580 * we need to request the blocks that come after the bad
554 * sector. 581 * sector.
582 * Notes: Upon return, cmd is a stale pointer.
555 */ 583 */
556static void scsi_requeue_command(struct request_queue *q, struct scsi_cmnd *cmd) 584static void scsi_requeue_command(struct request_queue *q, struct scsi_cmnd *cmd)
557{ 585{
586 struct request *req = cmd->request;
558 unsigned long flags; 587 unsigned long flags;
559 588
560 cmd->request->flags &= ~REQ_DONTPREP; 589 scsi_unprep_request(req);
561
562 spin_lock_irqsave(q->queue_lock, flags); 590 spin_lock_irqsave(q->queue_lock, flags);
563 blk_requeue_request(q, cmd->request); 591 blk_requeue_request(q, req);
564 spin_unlock_irqrestore(q->queue_lock, flags); 592 spin_unlock_irqrestore(q->queue_lock, flags);
565 593
566 scsi_run_queue(q); 594 scsi_run_queue(q);
@@ -595,13 +623,14 @@ void scsi_run_host_queues(struct Scsi_Host *shost)
595 * 623 *
596 * Lock status: Assumed that lock is not held upon entry. 624 * Lock status: Assumed that lock is not held upon entry.
597 * 625 *
598 * Returns: cmd if requeue done or required, NULL otherwise 626 * Returns: cmd if requeue required, NULL otherwise.
599 * 627 *
600 * Notes: This is called for block device requests in order to 628 * Notes: This is called for block device requests in order to
601 * mark some number of sectors as complete. 629 * mark some number of sectors as complete.
602 * 630 *
603 * We are guaranteeing that the request queue will be goosed 631 * We are guaranteeing that the request queue will be goosed
604 * at some point during this call. 632 * at some point during this call.
633 * Notes: If cmd was requeued, upon return it will be a stale pointer.
605 */ 634 */
606static struct scsi_cmnd *scsi_end_request(struct scsi_cmnd *cmd, int uptodate, 635static struct scsi_cmnd *scsi_end_request(struct scsi_cmnd *cmd, int uptodate,
607 int bytes, int requeue) 636 int bytes, int requeue)
@@ -624,14 +653,15 @@ static struct scsi_cmnd *scsi_end_request(struct scsi_cmnd *cmd, int uptodate,
624 if (!uptodate && blk_noretry_request(req)) 653 if (!uptodate && blk_noretry_request(req))
625 end_that_request_chunk(req, 0, leftover); 654 end_that_request_chunk(req, 0, leftover);
626 else { 655 else {
627 if (requeue) 656 if (requeue) {
628 /* 657 /*
629 * Bleah. Leftovers again. Stick the 658 * Bleah. Leftovers again. Stick the
630 * leftovers in the front of the 659 * leftovers in the front of the
631 * queue, and goose the queue again. 660 * queue, and goose the queue again.
632 */ 661 */
633 scsi_requeue_command(q, cmd); 662 scsi_requeue_command(q, cmd);
634 663 cmd = NULL;
664 }
635 return cmd; 665 return cmd;
636 } 666 }
637 } 667 }
@@ -857,15 +887,13 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes,
857 * requeueing right here - we will requeue down below 887 * requeueing right here - we will requeue down below
858 * when we handle the bad sectors. 888 * when we handle the bad sectors.
859 */ 889 */
860 cmd = scsi_end_request(cmd, 1, good_bytes, result == 0);
861 890
862 /* 891 /*
863 * If the command completed without error, then either finish off the 892 * If the command completed without error, then either
864 * rest of the command, or start a new one. 893 * finish off the rest of the command, or start a new one.
865 */ 894 */
866 if (result == 0 || cmd == NULL ) { 895 if (scsi_end_request(cmd, 1, good_bytes, result == 0) == NULL)
867 return; 896 return;
868 }
869 } 897 }
870 /* 898 /*
871 * Now, if we were good little boys and girls, Santa left us a request 899 * Now, if we were good little boys and girls, Santa left us a request
@@ -880,7 +908,7 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes,
880 * and quietly refuse further access. 908 * and quietly refuse further access.
881 */ 909 */
882 cmd->device->changed = 1; 910 cmd->device->changed = 1;
883 cmd = scsi_end_request(cmd, 0, 911 scsi_end_request(cmd, 0,
884 this_count, 1); 912 this_count, 1);
885 return; 913 return;
886 } else { 914 } else {
@@ -914,7 +942,7 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes,
914 scsi_requeue_command(q, cmd); 942 scsi_requeue_command(q, cmd);
915 result = 0; 943 result = 0;
916 } else { 944 } else {
917 cmd = scsi_end_request(cmd, 0, this_count, 1); 945 scsi_end_request(cmd, 0, this_count, 1);
918 return; 946 return;
919 } 947 }
920 break; 948 break;
@@ -931,7 +959,7 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes,
931 dev_printk(KERN_INFO, 959 dev_printk(KERN_INFO,
932 &cmd->device->sdev_gendev, 960 &cmd->device->sdev_gendev,
933 "Device not ready.\n"); 961 "Device not ready.\n");
934 cmd = scsi_end_request(cmd, 0, this_count, 1); 962 scsi_end_request(cmd, 0, this_count, 1);
935 return; 963 return;
936 case VOLUME_OVERFLOW: 964 case VOLUME_OVERFLOW:
937 if (!(req->flags & REQ_QUIET)) { 965 if (!(req->flags & REQ_QUIET)) {
@@ -941,7 +969,7 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes,
941 __scsi_print_command(cmd->data_cmnd); 969 __scsi_print_command(cmd->data_cmnd);
942 scsi_print_sense("", cmd); 970 scsi_print_sense("", cmd);
943 } 971 }
944 cmd = scsi_end_request(cmd, 0, block_bytes, 1); 972 scsi_end_request(cmd, 0, block_bytes, 1);
945 return; 973 return;
946 default: 974 default:
947 break; 975 break;
@@ -972,7 +1000,7 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes,
972 block_bytes = req->hard_cur_sectors << 9; 1000 block_bytes = req->hard_cur_sectors << 9;
973 if (!block_bytes) 1001 if (!block_bytes)
974 block_bytes = req->data_len; 1002 block_bytes = req->data_len;
975 cmd = scsi_end_request(cmd, 0, block_bytes, 1); 1003 scsi_end_request(cmd, 0, block_bytes, 1);
976 } 1004 }
977} 1005}
978EXPORT_SYMBOL(scsi_io_completion); 1006EXPORT_SYMBOL(scsi_io_completion);
@@ -1118,7 +1146,7 @@ static int scsi_prep_fn(struct request_queue *q, struct request *req)
1118 if (unlikely(!scsi_device_online(sdev))) { 1146 if (unlikely(!scsi_device_online(sdev))) {
1119 printk(KERN_ERR "scsi%d (%d:%d): rejecting I/O to offline device\n", 1147 printk(KERN_ERR "scsi%d (%d:%d): rejecting I/O to offline device\n",
1120 sdev->host->host_no, sdev->id, sdev->lun); 1148 sdev->host->host_no, sdev->id, sdev->lun);
1121 return BLKPREP_KILL; 1149 goto kill;
1122 } 1150 }
1123 if (unlikely(sdev->sdev_state != SDEV_RUNNING)) { 1151 if (unlikely(sdev->sdev_state != SDEV_RUNNING)) {
1124 /* OK, we're not in a running state don't prep 1152 /* OK, we're not in a running state don't prep
@@ -1128,7 +1156,7 @@ static int scsi_prep_fn(struct request_queue *q, struct request *req)
1128 * at all allowed down */ 1156 * at all allowed down */
1129 printk(KERN_ERR "scsi%d (%d:%d): rejecting I/O to dead device\n", 1157 printk(KERN_ERR "scsi%d (%d:%d): rejecting I/O to dead device\n",
1130 sdev->host->host_no, sdev->id, sdev->lun); 1158 sdev->host->host_no, sdev->id, sdev->lun);
1131 return BLKPREP_KILL; 1159 goto kill;
1132 } 1160 }
1133 /* OK, we only allow special commands (i.e. not 1161 /* OK, we only allow special commands (i.e. not
1134 * user initiated ones */ 1162 * user initiated ones */
@@ -1160,11 +1188,11 @@ static int scsi_prep_fn(struct request_queue *q, struct request *req)
1160 if(unlikely(specials_only) && !(req->flags & REQ_SPECIAL)) { 1188 if(unlikely(specials_only) && !(req->flags & REQ_SPECIAL)) {
1161 if(specials_only == SDEV_QUIESCE || 1189 if(specials_only == SDEV_QUIESCE ||
1162 specials_only == SDEV_BLOCK) 1190 specials_only == SDEV_BLOCK)
1163 return BLKPREP_DEFER; 1191 goto defer;
1164 1192
1165 printk(KERN_ERR "scsi%d (%d:%d): rejecting I/O to device being removed\n", 1193 printk(KERN_ERR "scsi%d (%d:%d): rejecting I/O to device being removed\n",
1166 sdev->host->host_no, sdev->id, sdev->lun); 1194 sdev->host->host_no, sdev->id, sdev->lun);
1167 return BLKPREP_KILL; 1195 goto kill;
1168 } 1196 }
1169 1197
1170 1198
@@ -1182,7 +1210,7 @@ static int scsi_prep_fn(struct request_queue *q, struct request *req)
1182 cmd->tag = req->tag; 1210 cmd->tag = req->tag;
1183 } else { 1211 } else {
1184 blk_dump_rq_flags(req, "SCSI bad req"); 1212 blk_dump_rq_flags(req, "SCSI bad req");
1185 return BLKPREP_KILL; 1213 goto kill;
1186 } 1214 }
1187 1215
1188 /* note the overloading of req->special. When the tag 1216 /* note the overloading of req->special. When the tag
@@ -1220,8 +1248,13 @@ static int scsi_prep_fn(struct request_queue *q, struct request *req)
1220 * required). 1248 * required).
1221 */ 1249 */
1222 ret = scsi_init_io(cmd); 1250 ret = scsi_init_io(cmd);
1223 if (ret) /* BLKPREP_KILL return also releases the command */ 1251 switch(ret) {
1224 return ret; 1252 case BLKPREP_KILL:
1253 /* BLKPREP_KILL return also releases the command */
1254 goto kill;
1255 case BLKPREP_DEFER:
1256 goto defer;
1257 }
1225 1258
1226 /* 1259 /*
1227 * Initialize the actual SCSI command for this request. 1260 * Initialize the actual SCSI command for this request.
@@ -1231,7 +1264,7 @@ static int scsi_prep_fn(struct request_queue *q, struct request *req)
1231 if (unlikely(!drv->init_command(cmd))) { 1264 if (unlikely(!drv->init_command(cmd))) {
1232 scsi_release_buffers(cmd); 1265 scsi_release_buffers(cmd);
1233 scsi_put_command(cmd); 1266 scsi_put_command(cmd);
1234 return BLKPREP_KILL; 1267 goto kill;
1235 } 1268 }
1236 } else { 1269 } else {
1237 memcpy(cmd->cmnd, req->cmd, sizeof(cmd->cmnd)); 1270 memcpy(cmd->cmnd, req->cmd, sizeof(cmd->cmnd));
@@ -1262,6 +1295,9 @@ static int scsi_prep_fn(struct request_queue *q, struct request *req)
1262 if (sdev->device_busy == 0) 1295 if (sdev->device_busy == 0)
1263 blk_plug_device(q); 1296 blk_plug_device(q);
1264 return BLKPREP_DEFER; 1297 return BLKPREP_DEFER;
1298 kill:
1299 req->errors = DID_NO_CONNECT << 16;
1300 return BLKPREP_KILL;
1265} 1301}
1266 1302
1267/* 1303/*
@@ -1336,19 +1372,24 @@ static inline int scsi_host_queue_ready(struct request_queue *q,
1336} 1372}
1337 1373
1338/* 1374/*
1339 * Kill requests for a dead device 1375 * Kill a request for a dead device
1340 */ 1376 */
1341static void scsi_kill_requests(request_queue_t *q) 1377static void scsi_kill_request(struct request *req, request_queue_t *q)
1342{ 1378{
1343 struct request *req; 1379 struct scsi_cmnd *cmd = req->special;
1380
1381 blkdev_dequeue_request(req);
1344 1382
1345 while ((req = elv_next_request(q)) != NULL) { 1383 if (unlikely(cmd == NULL)) {
1346 blkdev_dequeue_request(req); 1384 printk(KERN_CRIT "impossible request in %s.\n",
1347 req->flags |= REQ_QUIET; 1385 __FUNCTION__);
1348 while (end_that_request_first(req, 0, req->nr_sectors)) 1386 BUG();
1349 ;
1350 end_that_request_last(req);
1351 } 1387 }
1388
1389 scsi_init_cmd_errh(cmd);
1390 cmd->result = DID_NO_CONNECT << 16;
1391 atomic_inc(&cmd->device->iorequest_cnt);
1392 __scsi_done(cmd);
1352} 1393}
1353 1394
1354/* 1395/*
@@ -1371,7 +1412,8 @@ static void scsi_request_fn(struct request_queue *q)
1371 1412
1372 if (!sdev) { 1413 if (!sdev) {
1373 printk("scsi: killing requests for dead queue\n"); 1414 printk("scsi: killing requests for dead queue\n");
1374 scsi_kill_requests(q); 1415 while ((req = elv_next_request(q)) != NULL)
1416 scsi_kill_request(req, q);
1375 return; 1417 return;
1376 } 1418 }
1377 1419
@@ -1398,11 +1440,7 @@ static void scsi_request_fn(struct request_queue *q)
1398 if (unlikely(!scsi_device_online(sdev))) { 1440 if (unlikely(!scsi_device_online(sdev))) {
1399 printk(KERN_ERR "scsi%d (%d:%d): rejecting I/O to offline device\n", 1441 printk(KERN_ERR "scsi%d (%d:%d): rejecting I/O to offline device\n",
1400 sdev->host->host_no, sdev->id, sdev->lun); 1442 sdev->host->host_no, sdev->id, sdev->lun);
1401 blkdev_dequeue_request(req); 1443 scsi_kill_request(req, q);
1402 req->flags |= REQ_QUIET;
1403 while (end_that_request_first(req, 0, req->nr_sectors))
1404 ;
1405 end_that_request_last(req);
1406 continue; 1444 continue;
1407 } 1445 }
1408 1446
@@ -1415,6 +1453,14 @@ static void scsi_request_fn(struct request_queue *q)
1415 sdev->device_busy++; 1453 sdev->device_busy++;
1416 1454
1417 spin_unlock(q->queue_lock); 1455 spin_unlock(q->queue_lock);
1456 cmd = req->special;
1457 if (unlikely(cmd == NULL)) {
1458 printk(KERN_CRIT "impossible request in %s.\n"
1459 "please mail a stack trace to "
1460 "linux-scsi@vger.kernel.org",
1461 __FUNCTION__);
1462 BUG();
1463 }
1418 spin_lock(shost->host_lock); 1464 spin_lock(shost->host_lock);
1419 1465
1420 if (!scsi_host_queue_ready(q, shost, sdev)) 1466 if (!scsi_host_queue_ready(q, shost, sdev))
@@ -1433,15 +1479,6 @@ static void scsi_request_fn(struct request_queue *q)
1433 */ 1479 */
1434 spin_unlock_irq(shost->host_lock); 1480 spin_unlock_irq(shost->host_lock);
1435 1481
1436 cmd = req->special;
1437 if (unlikely(cmd == NULL)) {
1438 printk(KERN_CRIT "impossible request in %s.\n"
1439 "please mail a stack trace to "
1440 "linux-scsi@vger.kernel.org",
1441 __FUNCTION__);
1442 BUG();
1443 }
1444
1445 /* 1482 /*
1446 * Finally, initialize any error handling parameters, and set up 1483 * Finally, initialize any error handling parameters, and set up
1447 * the timers for timeouts. 1484 * the timers for timeouts.
@@ -1477,6 +1514,7 @@ static void scsi_request_fn(struct request_queue *q)
1477 * cases (host limits or settings) should run the queue at some 1514 * cases (host limits or settings) should run the queue at some
1478 * later time. 1515 * later time.
1479 */ 1516 */
1517 scsi_unprep_request(req);
1480 spin_lock_irq(q->queue_lock); 1518 spin_lock_irq(q->queue_lock);
1481 blk_requeue_request(q, req); 1519 blk_requeue_request(q, req);
1482 sdev->device_busy--; 1520 sdev->device_busy--;
diff --git a/drivers/scsi/scsi_priv.h b/drivers/scsi/scsi_priv.h
index ee6de1768e53..d05f778d31a8 100644
--- a/drivers/scsi/scsi_priv.h
+++ b/drivers/scsi/scsi_priv.h
@@ -124,6 +124,7 @@ extern void scsi_sysfs_unregister(void);
124extern void scsi_sysfs_device_initialize(struct scsi_device *); 124extern void scsi_sysfs_device_initialize(struct scsi_device *);
125extern int scsi_sysfs_target_initialize(struct scsi_device *); 125extern int scsi_sysfs_target_initialize(struct scsi_device *);
126extern struct scsi_transport_template blank_transport_template; 126extern struct scsi_transport_template blank_transport_template;
127extern void __scsi_remove_device(struct scsi_device *);
127 128
128extern struct bus_type scsi_bus_type; 129extern struct bus_type scsi_bus_type;
129 130
diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
index 19c9a232a754..b86f170fa8ed 100644
--- a/drivers/scsi/scsi_scan.c
+++ b/drivers/scsi/scsi_scan.c
@@ -870,8 +870,12 @@ static int scsi_probe_and_add_lun(struct scsi_target *starget,
870 out_free_sdev: 870 out_free_sdev:
871 if (res == SCSI_SCAN_LUN_PRESENT) { 871 if (res == SCSI_SCAN_LUN_PRESENT) {
872 if (sdevp) { 872 if (sdevp) {
873 scsi_device_get(sdev); 873 if (scsi_device_get(sdev) == 0) {
874 *sdevp = sdev; 874 *sdevp = sdev;
875 } else {
876 __scsi_remove_device(sdev);
877 res = SCSI_SCAN_NO_RESPONSE;
878 }
875 } 879 }
876 } else { 880 } else {
877 if (sdev->host->hostt->slave_destroy) 881 if (sdev->host->hostt->slave_destroy)
@@ -1260,6 +1264,19 @@ struct scsi_device *__scsi_add_device(struct Scsi_Host *shost, uint channel,
1260} 1264}
1261EXPORT_SYMBOL(__scsi_add_device); 1265EXPORT_SYMBOL(__scsi_add_device);
1262 1266
1267int scsi_add_device(struct Scsi_Host *host, uint channel,
1268 uint target, uint lun)
1269{
1270 struct scsi_device *sdev =
1271 __scsi_add_device(host, channel, target, lun, NULL);
1272 if (IS_ERR(sdev))
1273 return PTR_ERR(sdev);
1274
1275 scsi_device_put(sdev);
1276 return 0;
1277}
1278EXPORT_SYMBOL(scsi_add_device);
1279
1263void scsi_rescan_device(struct device *dev) 1280void scsi_rescan_device(struct device *dev)
1264{ 1281{
1265 struct scsi_driver *drv; 1282 struct scsi_driver *drv;
@@ -1276,27 +1293,8 @@ void scsi_rescan_device(struct device *dev)
1276} 1293}
1277EXPORT_SYMBOL(scsi_rescan_device); 1294EXPORT_SYMBOL(scsi_rescan_device);
1278 1295
1279/** 1296static void __scsi_scan_target(struct device *parent, unsigned int channel,
1280 * scsi_scan_target - scan a target id, possibly including all LUNs on the 1297 unsigned int id, unsigned int lun, int rescan)
1281 * target.
1282 * @sdevsca: Scsi_Device handle for scanning
1283 * @shost: host to scan
1284 * @channel: channel to scan
1285 * @id: target id to scan
1286 *
1287 * Description:
1288 * Scan the target id on @shost, @channel, and @id. Scan at least LUN
1289 * 0, and possibly all LUNs on the target id.
1290 *
1291 * Use the pre-allocated @sdevscan as a handle for the scanning. This
1292 * function sets sdevscan->host, sdevscan->id and sdevscan->lun; the
1293 * scanning functions modify sdevscan->lun.
1294 *
1295 * First try a REPORT LUN scan, if that does not scan the target, do a
1296 * sequential scan of LUNs on the target id.
1297 **/
1298void scsi_scan_target(struct device *parent, unsigned int channel,
1299 unsigned int id, unsigned int lun, int rescan)
1300{ 1298{
1301 struct Scsi_Host *shost = dev_to_shost(parent); 1299 struct Scsi_Host *shost = dev_to_shost(parent);
1302 int bflags = 0; 1300 int bflags = 0;
@@ -1310,9 +1308,7 @@ void scsi_scan_target(struct device *parent, unsigned int channel,
1310 */ 1308 */
1311 return; 1309 return;
1312 1310
1313
1314 starget = scsi_alloc_target(parent, channel, id); 1311 starget = scsi_alloc_target(parent, channel, id);
1315
1316 if (!starget) 1312 if (!starget)
1317 return; 1313 return;
1318 1314
@@ -1358,6 +1354,33 @@ void scsi_scan_target(struct device *parent, unsigned int channel,
1358 1354
1359 put_device(&starget->dev); 1355 put_device(&starget->dev);
1360} 1356}
1357
1358/**
1359 * scsi_scan_target - scan a target id, possibly including all LUNs on the
1360 * target.
1361 * @parent: host to scan
1362 * @channel: channel to scan
1363 * @id: target id to scan
1364 * @lun: Specific LUN to scan or SCAN_WILD_CARD
1365 * @rescan: passed to LUN scanning routines
1366 *
1367 * Description:
1368 * Scan the target id on @parent, @channel, and @id. Scan at least LUN 0,
1369 * and possibly all LUNs on the target id.
1370 *
1371 * First try a REPORT LUN scan, if that does not scan the target, do a
1372 * sequential scan of LUNs on the target id.
1373 **/
1374void scsi_scan_target(struct device *parent, unsigned int channel,
1375 unsigned int id, unsigned int lun, int rescan)
1376{
1377 struct Scsi_Host *shost = dev_to_shost(parent);
1378
1379 down(&shost->scan_mutex);
1380 if (scsi_host_scan_allowed(shost))
1381 __scsi_scan_target(parent, channel, id, lun, rescan);
1382 up(&shost->scan_mutex);
1383}
1361EXPORT_SYMBOL(scsi_scan_target); 1384EXPORT_SYMBOL(scsi_scan_target);
1362 1385
1363static void scsi_scan_channel(struct Scsi_Host *shost, unsigned int channel, 1386static void scsi_scan_channel(struct Scsi_Host *shost, unsigned int channel,
@@ -1383,10 +1406,12 @@ static void scsi_scan_channel(struct Scsi_Host *shost, unsigned int channel,
1383 order_id = shost->max_id - id - 1; 1406 order_id = shost->max_id - id - 1;
1384 else 1407 else
1385 order_id = id; 1408 order_id = id;
1386 scsi_scan_target(&shost->shost_gendev, channel, order_id, lun, rescan); 1409 __scsi_scan_target(&shost->shost_gendev, channel,
1410 order_id, lun, rescan);
1387 } 1411 }
1388 else 1412 else
1389 scsi_scan_target(&shost->shost_gendev, channel, id, lun, rescan); 1413 __scsi_scan_target(&shost->shost_gendev, channel,
1414 id, lun, rescan);
1390} 1415}
1391 1416
1392int scsi_scan_host_selected(struct Scsi_Host *shost, unsigned int channel, 1417int scsi_scan_host_selected(struct Scsi_Host *shost, unsigned int channel,
@@ -1484,12 +1509,15 @@ void scsi_forget_host(struct Scsi_Host *shost)
1484 */ 1509 */
1485struct scsi_device *scsi_get_host_dev(struct Scsi_Host *shost) 1510struct scsi_device *scsi_get_host_dev(struct Scsi_Host *shost)
1486{ 1511{
1487 struct scsi_device *sdev; 1512 struct scsi_device *sdev = NULL;
1488 struct scsi_target *starget; 1513 struct scsi_target *starget;
1489 1514
1515 down(&shost->scan_mutex);
1516 if (!scsi_host_scan_allowed(shost))
1517 goto out;
1490 starget = scsi_alloc_target(&shost->shost_gendev, 0, shost->this_id); 1518 starget = scsi_alloc_target(&shost->shost_gendev, 0, shost->this_id);
1491 if (!starget) 1519 if (!starget)
1492 return NULL; 1520 goto out;
1493 1521
1494 sdev = scsi_alloc_sdev(starget, 0, NULL); 1522 sdev = scsi_alloc_sdev(starget, 0, NULL);
1495 if (sdev) { 1523 if (sdev) {
@@ -1497,6 +1525,8 @@ struct scsi_device *scsi_get_host_dev(struct Scsi_Host *shost)
1497 sdev->borken = 0; 1525 sdev->borken = 0;
1498 } 1526 }
1499 put_device(&starget->dev); 1527 put_device(&starget->dev);
1528 out:
1529 up(&shost->scan_mutex);
1500 return sdev; 1530 return sdev;
1501} 1531}
1502EXPORT_SYMBOL(scsi_get_host_dev); 1532EXPORT_SYMBOL(scsi_get_host_dev);
diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
index dae59d1da07a..b8052d5206cc 100644
--- a/drivers/scsi/scsi_sysfs.c
+++ b/drivers/scsi/scsi_sysfs.c
@@ -653,7 +653,7 @@ int scsi_sysfs_add_sdev(struct scsi_device *sdev)
653 error = attr_add(&sdev->sdev_gendev, 653 error = attr_add(&sdev->sdev_gendev,
654 sdev->host->hostt->sdev_attrs[i]); 654 sdev->host->hostt->sdev_attrs[i]);
655 if (error) { 655 if (error) {
656 scsi_remove_device(sdev); 656 __scsi_remove_device(sdev);
657 goto out; 657 goto out;
658 } 658 }
659 } 659 }
@@ -667,7 +667,7 @@ int scsi_sysfs_add_sdev(struct scsi_device *sdev)
667 scsi_sysfs_sdev_attrs[i]); 667 scsi_sysfs_sdev_attrs[i]);
668 error = device_create_file(&sdev->sdev_gendev, attr); 668 error = device_create_file(&sdev->sdev_gendev, attr);
669 if (error) { 669 if (error) {
670 scsi_remove_device(sdev); 670 __scsi_remove_device(sdev);
671 goto out; 671 goto out;
672 } 672 }
673 } 673 }
@@ -687,17 +687,10 @@ int scsi_sysfs_add_sdev(struct scsi_device *sdev)
687 return error; 687 return error;
688} 688}
689 689
690/** 690void __scsi_remove_device(struct scsi_device *sdev)
691 * scsi_remove_device - unregister a device from the scsi bus
692 * @sdev: scsi_device to unregister
693 **/
694void scsi_remove_device(struct scsi_device *sdev)
695{ 691{
696 struct Scsi_Host *shost = sdev->host;
697
698 down(&shost->scan_mutex);
699 if (scsi_device_set_state(sdev, SDEV_CANCEL) != 0) 692 if (scsi_device_set_state(sdev, SDEV_CANCEL) != 0)
700 goto out; 693 return;
701 694
702 class_device_unregister(&sdev->sdev_classdev); 695 class_device_unregister(&sdev->sdev_classdev);
703 device_del(&sdev->sdev_gendev); 696 device_del(&sdev->sdev_gendev);
@@ -706,8 +699,17 @@ void scsi_remove_device(struct scsi_device *sdev)
706 sdev->host->hostt->slave_destroy(sdev); 699 sdev->host->hostt->slave_destroy(sdev);
707 transport_unregister_device(&sdev->sdev_gendev); 700 transport_unregister_device(&sdev->sdev_gendev);
708 put_device(&sdev->sdev_gendev); 701 put_device(&sdev->sdev_gendev);
709out: 702}
710 up(&shost->scan_mutex); 703
704/**
705 * scsi_remove_device - unregister a device from the scsi bus
706 * @sdev: scsi_device to unregister
707 **/
708void scsi_remove_device(struct scsi_device *sdev)
709{
710 down(&sdev->host->scan_mutex);
711 __scsi_remove_device(sdev);
712 up(&sdev->host->scan_mutex);
711} 713}
712EXPORT_SYMBOL(scsi_remove_device); 714EXPORT_SYMBOL(scsi_remove_device);
713 715
diff --git a/drivers/scsi/scsi_transport_sas.c b/drivers/scsi/scsi_transport_sas.c
new file mode 100644
index 000000000000..ff724bbe6611
--- /dev/null
+++ b/drivers/scsi/scsi_transport_sas.c
@@ -0,0 +1,820 @@
1/*
2 * Copyright (C) 2005 Dell Inc.
3 * Released under GPL v2.
4 *
5 * Serial Attached SCSI (SAS) transport class.
6 *
7 * The SAS transport class contains common code to deal with SAS HBAs,
8 * an aproximated representation of SAS topologies in the driver model,
9 * and various sysfs attributes to expose these topologies and managment
10 * interfaces to userspace.
11 *
12 * In addition to the basic SCSI core objects this transport class
13 * introduces two additional intermediate objects: The SAS PHY
14 * as represented by struct sas_phy defines an "outgoing" PHY on
15 * a SAS HBA or Expander, and the SAS remote PHY represented by
16 * struct sas_rphy defines an "incoming" PHY on a SAS Expander or
17 * end device. Note that this is purely a software concept, the
18 * underlying hardware for a PHY and a remote PHY is the exactly
19 * the same.
20 *
21 * There is no concept of a SAS port in this code, users can see
22 * what PHYs form a wide port based on the port_identifier attribute,
23 * which is the same for all PHYs in a port.
24 */
25
26#include <linux/init.h>
27#include <linux/module.h>
28#include <linux/err.h>
29
30#include <scsi/scsi_device.h>
31#include <scsi/scsi_host.h>
32#include <scsi/scsi_transport.h>
33#include <scsi/scsi_transport_sas.h>
34
35
36#define SAS_HOST_ATTRS 0
37#define SAS_PORT_ATTRS 11
38#define SAS_RPORT_ATTRS 5
39
40struct sas_internal {
41 struct scsi_transport_template t;
42 struct sas_function_template *f;
43
44 struct class_device_attribute private_host_attrs[SAS_HOST_ATTRS];
45 struct class_device_attribute private_phy_attrs[SAS_PORT_ATTRS];
46 struct class_device_attribute private_rphy_attrs[SAS_RPORT_ATTRS];
47
48 struct transport_container phy_attr_cont;
49 struct transport_container rphy_attr_cont;
50
51 /*
52 * The array of null terminated pointers to attributes
53 * needed by scsi_sysfs.c
54 */
55 struct class_device_attribute *host_attrs[SAS_HOST_ATTRS + 1];
56 struct class_device_attribute *phy_attrs[SAS_PORT_ATTRS + 1];
57 struct class_device_attribute *rphy_attrs[SAS_RPORT_ATTRS + 1];
58};
59#define to_sas_internal(tmpl) container_of(tmpl, struct sas_internal, t)
60
61struct sas_host_attrs {
62 struct list_head rphy_list;
63 spinlock_t lock;
64 u32 next_target_id;
65};
66#define to_sas_host_attrs(host) ((struct sas_host_attrs *)(host)->shost_data)
67
68
69/*
70 * Hack to allow attributes of the same name in different objects.
71 */
72#define SAS_CLASS_DEVICE_ATTR(_prefix,_name,_mode,_show,_store) \
73 struct class_device_attribute class_device_attr_##_prefix##_##_name = \
74 __ATTR(_name,_mode,_show,_store)
75
76
77/*
78 * Pretty printing helpers
79 */
80
81#define sas_bitfield_name_match(title, table) \
82static ssize_t \
83get_sas_##title##_names(u32 table_key, char *buf) \
84{ \
85 char *prefix = ""; \
86 ssize_t len = 0; \
87 int i; \
88 \
89 for (i = 0; i < sizeof(table)/sizeof(table[0]); i++) { \
90 if (table[i].value & table_key) { \
91 len += sprintf(buf + len, "%s%s", \
92 prefix, table[i].name); \
93 prefix = ", "; \
94 } \
95 } \
96 len += sprintf(buf + len, "\n"); \
97 return len; \
98}
99
100#define sas_bitfield_name_search(title, table) \
101static ssize_t \
102get_sas_##title##_names(u32 table_key, char *buf) \
103{ \
104 ssize_t len = 0; \
105 int i; \
106 \
107 for (i = 0; i < sizeof(table)/sizeof(table[0]); i++) { \
108 if (table[i].value == table_key) { \
109 len += sprintf(buf + len, "%s", \
110 table[i].name); \
111 break; \
112 } \
113 } \
114 len += sprintf(buf + len, "\n"); \
115 return len; \
116}
117
118static struct {
119 u32 value;
120 char *name;
121} sas_device_type_names[] = {
122 { SAS_PHY_UNUSED, "unused" },
123 { SAS_END_DEVICE, "end device" },
124 { SAS_EDGE_EXPANDER_DEVICE, "edge expander" },
125 { SAS_FANOUT_EXPANDER_DEVICE, "fanout expander" },
126};
127sas_bitfield_name_search(device_type, sas_device_type_names)
128
129
130static struct {
131 u32 value;
132 char *name;
133} sas_protocol_names[] = {
134 { SAS_PROTOCOL_SATA, "sata" },
135 { SAS_PROTOCOL_SMP, "smp" },
136 { SAS_PROTOCOL_STP, "stp" },
137 { SAS_PROTOCOL_SSP, "ssp" },
138};
139sas_bitfield_name_match(protocol, sas_protocol_names)
140
141static struct {
142 u32 value;
143 char *name;
144} sas_linkspeed_names[] = {
145 { SAS_LINK_RATE_UNKNOWN, "Unknown" },
146 { SAS_PHY_DISABLED, "Phy disabled" },
147 { SAS_LINK_RATE_FAILED, "Link Rate failed" },
148 { SAS_SATA_SPINUP_HOLD, "Spin-up hold" },
149 { SAS_LINK_RATE_1_5_GBPS, "1.5 Gbit" },
150 { SAS_LINK_RATE_3_0_GBPS, "3.0 Gbit" },
151};
152sas_bitfield_name_search(linkspeed, sas_linkspeed_names)
153
154
155/*
156 * SAS host attributes
157 */
158
159static int sas_host_setup(struct transport_container *tc, struct device *dev,
160 struct class_device *cdev)
161{
162 struct Scsi_Host *shost = dev_to_shost(dev);
163 struct sas_host_attrs *sas_host = to_sas_host_attrs(shost);
164
165 INIT_LIST_HEAD(&sas_host->rphy_list);
166 spin_lock_init(&sas_host->lock);
167 sas_host->next_target_id = 0;
168 return 0;
169}
170
171static DECLARE_TRANSPORT_CLASS(sas_host_class,
172 "sas_host", sas_host_setup, NULL, NULL);
173
174static int sas_host_match(struct attribute_container *cont,
175 struct device *dev)
176{
177 struct Scsi_Host *shost;
178 struct sas_internal *i;
179
180 if (!scsi_is_host_device(dev))
181 return 0;
182 shost = dev_to_shost(dev);
183
184 if (!shost->transportt)
185 return 0;
186 if (shost->transportt->host_attrs.ac.class !=
187 &sas_host_class.class)
188 return 0;
189
190 i = to_sas_internal(shost->transportt);
191 return &i->t.host_attrs.ac == cont;
192}
193
194static int do_sas_phy_delete(struct device *dev, void *data)
195{
196 if (scsi_is_sas_phy(dev))
197 sas_phy_delete(dev_to_phy(dev));
198 return 0;
199}
200
201/**
202 * sas_remove_host -- tear down a Scsi_Host's SAS data structures
203 * @shost: Scsi Host that is torn down
204 *
205 * Removes all SAS PHYs and remote PHYs for a given Scsi_Host.
206 * Must be called just before scsi_remove_host for SAS HBAs.
207 */
208void sas_remove_host(struct Scsi_Host *shost)
209{
210 device_for_each_child(&shost->shost_gendev, NULL, do_sas_phy_delete);
211}
212EXPORT_SYMBOL(sas_remove_host);
213
214
215/*
216 * SAS Port attributes
217 */
218
219#define sas_phy_show_simple(field, name, format_string, cast) \
220static ssize_t \
221show_sas_phy_##name(struct class_device *cdev, char *buf) \
222{ \
223 struct sas_phy *phy = transport_class_to_phy(cdev); \
224 \
225 return snprintf(buf, 20, format_string, cast phy->field); \
226}
227
228#define sas_phy_simple_attr(field, name, format_string, type) \
229 sas_phy_show_simple(field, name, format_string, (type)) \
230static CLASS_DEVICE_ATTR(name, S_IRUGO, show_sas_phy_##name, NULL)
231
232#define sas_phy_show_protocol(field, name) \
233static ssize_t \
234show_sas_phy_##name(struct class_device *cdev, char *buf) \
235{ \
236 struct sas_phy *phy = transport_class_to_phy(cdev); \
237 \
238 if (!phy->field) \
239 return snprintf(buf, 20, "none\n"); \
240 return get_sas_protocol_names(phy->field, buf); \
241}
242
243#define sas_phy_protocol_attr(field, name) \
244 sas_phy_show_protocol(field, name) \
245static CLASS_DEVICE_ATTR(name, S_IRUGO, show_sas_phy_##name, NULL)
246
247#define sas_phy_show_linkspeed(field) \
248static ssize_t \
249show_sas_phy_##field(struct class_device *cdev, char *buf) \
250{ \
251 struct sas_phy *phy = transport_class_to_phy(cdev); \
252 \
253 return get_sas_linkspeed_names(phy->field, buf); \
254}
255
256#define sas_phy_linkspeed_attr(field) \
257 sas_phy_show_linkspeed(field) \
258static CLASS_DEVICE_ATTR(field, S_IRUGO, show_sas_phy_##field, NULL)
259
260static ssize_t
261show_sas_device_type(struct class_device *cdev, char *buf)
262{
263 struct sas_phy *phy = transport_class_to_phy(cdev);
264
265 if (!phy->identify.device_type)
266 return snprintf(buf, 20, "none\n");
267 return get_sas_device_type_names(phy->identify.device_type, buf);
268}
269
270static CLASS_DEVICE_ATTR(device_type, S_IRUGO, show_sas_device_type, NULL);
271
272sas_phy_protocol_attr(identify.initiator_port_protocols,
273 initiator_port_protocols);
274sas_phy_protocol_attr(identify.target_port_protocols,
275 target_port_protocols);
276sas_phy_simple_attr(identify.sas_address, sas_address, "0x%016llx\n",
277 unsigned long long);
278sas_phy_simple_attr(identify.phy_identifier, phy_identifier, "%d\n", u8);
279sas_phy_simple_attr(port_identifier, port_identifier, "%d\n", u8);
280sas_phy_linkspeed_attr(negotiated_linkrate);
281sas_phy_linkspeed_attr(minimum_linkrate_hw);
282sas_phy_linkspeed_attr(minimum_linkrate);
283sas_phy_linkspeed_attr(maximum_linkrate_hw);
284sas_phy_linkspeed_attr(maximum_linkrate);
285
286
287static DECLARE_TRANSPORT_CLASS(sas_phy_class,
288 "sas_phy", NULL, NULL, NULL);
289
290static int sas_phy_match(struct attribute_container *cont, struct device *dev)
291{
292 struct Scsi_Host *shost;
293 struct sas_internal *i;
294
295 if (!scsi_is_sas_phy(dev))
296 return 0;
297 shost = dev_to_shost(dev->parent);
298
299 if (!shost->transportt)
300 return 0;
301 if (shost->transportt->host_attrs.ac.class !=
302 &sas_host_class.class)
303 return 0;
304
305 i = to_sas_internal(shost->transportt);
306 return &i->phy_attr_cont.ac == cont;
307}
308
309static void sas_phy_release(struct device *dev)
310{
311 struct sas_phy *phy = dev_to_phy(dev);
312
313 put_device(dev->parent);
314 kfree(phy);
315}
316
317/**
318 * sas_phy_alloc -- allocates and initialize a SAS PHY structure
319 * @parent: Parent device
320 * @number: Port number
321 *
322 * Allocates an SAS PHY structure. It will be added in the device tree
323 * below the device specified by @parent, which has to be either a Scsi_Host
324 * or sas_rphy.
325 *
326 * Returns:
327 * SAS PHY allocated or %NULL if the allocation failed.
328 */
329struct sas_phy *sas_phy_alloc(struct device *parent, int number)
330{
331 struct Scsi_Host *shost = dev_to_shost(parent);
332 struct sas_phy *phy;
333
334 phy = kmalloc(sizeof(*phy), GFP_KERNEL);
335 if (!phy)
336 return NULL;
337 memset(phy, 0, sizeof(*phy));
338
339 get_device(parent);
340
341 phy->number = number;
342
343 device_initialize(&phy->dev);
344 phy->dev.parent = get_device(parent);
345 phy->dev.release = sas_phy_release;
346 sprintf(phy->dev.bus_id, "phy-%d:%d", shost->host_no, number);
347
348 transport_setup_device(&phy->dev);
349
350 return phy;
351}
352EXPORT_SYMBOL(sas_phy_alloc);
353
354/**
355 * sas_phy_add -- add a SAS PHY to the device hierachy
356 * @phy: The PHY to be added
357 *
358 * Publishes a SAS PHY to the rest of the system.
359 */
360int sas_phy_add(struct sas_phy *phy)
361{
362 int error;
363
364 error = device_add(&phy->dev);
365 if (!error) {
366 transport_add_device(&phy->dev);
367 transport_configure_device(&phy->dev);
368 }
369
370 return error;
371}
372EXPORT_SYMBOL(sas_phy_add);
373
374/**
375 * sas_phy_free -- free a SAS PHY
376 * @phy: SAS PHY to free
377 *
378 * Frees the specified SAS PHY.
379 *
380 * Note:
381 * This function must only be called on a PHY that has not
382 * sucessfully been added using sas_phy_add().
383 */
384void sas_phy_free(struct sas_phy *phy)
385{
386 transport_destroy_device(&phy->dev);
387 put_device(phy->dev.parent);
388 put_device(phy->dev.parent);
389 put_device(phy->dev.parent);
390 kfree(phy);
391}
392EXPORT_SYMBOL(sas_phy_free);
393
394/**
395 * sas_phy_delete -- remove SAS PHY
396 * @phy: SAS PHY to remove
397 *
398 * Removes the specified SAS PHY. If the SAS PHY has an
399 * associated remote PHY it is removed before.
400 */
401void
402sas_phy_delete(struct sas_phy *phy)
403{
404 struct device *dev = &phy->dev;
405
406 if (phy->rphy)
407 sas_rphy_delete(phy->rphy);
408
409 transport_remove_device(dev);
410 device_del(dev);
411 transport_destroy_device(dev);
412 put_device(dev->parent);
413}
414EXPORT_SYMBOL(sas_phy_delete);
415
416/**
417 * scsi_is_sas_phy -- check if a struct device represents a SAS PHY
418 * @dev: device to check
419 *
420 * Returns:
421 * %1 if the device represents a SAS PHY, %0 else
422 */
423int scsi_is_sas_phy(const struct device *dev)
424{
425 return dev->release == sas_phy_release;
426}
427EXPORT_SYMBOL(scsi_is_sas_phy);
428
429/*
430 * SAS remote PHY attributes.
431 */
432
433#define sas_rphy_show_simple(field, name, format_string, cast) \
434static ssize_t \
435show_sas_rphy_##name(struct class_device *cdev, char *buf) \
436{ \
437 struct sas_rphy *rphy = transport_class_to_rphy(cdev); \
438 \
439 return snprintf(buf, 20, format_string, cast rphy->field); \
440}
441
442#define sas_rphy_simple_attr(field, name, format_string, type) \
443 sas_rphy_show_simple(field, name, format_string, (type)) \
444static SAS_CLASS_DEVICE_ATTR(rphy, name, S_IRUGO, \
445 show_sas_rphy_##name, NULL)
446
447#define sas_rphy_show_protocol(field, name) \
448static ssize_t \
449show_sas_rphy_##name(struct class_device *cdev, char *buf) \
450{ \
451 struct sas_rphy *rphy = transport_class_to_rphy(cdev); \
452 \
453 if (!rphy->field) \
454 return snprintf(buf, 20, "none\n"); \
455 return get_sas_protocol_names(rphy->field, buf); \
456}
457
458#define sas_rphy_protocol_attr(field, name) \
459 sas_rphy_show_protocol(field, name) \
460static SAS_CLASS_DEVICE_ATTR(rphy, name, S_IRUGO, \
461 show_sas_rphy_##name, NULL)
462
463static ssize_t
464show_sas_rphy_device_type(struct class_device *cdev, char *buf)
465{
466 struct sas_rphy *rphy = transport_class_to_rphy(cdev);
467
468 if (!rphy->identify.device_type)
469 return snprintf(buf, 20, "none\n");
470 return get_sas_device_type_names(
471 rphy->identify.device_type, buf);
472}
473
474static SAS_CLASS_DEVICE_ATTR(rphy, device_type, S_IRUGO,
475 show_sas_rphy_device_type, NULL);
476
477sas_rphy_protocol_attr(identify.initiator_port_protocols,
478 initiator_port_protocols);
479sas_rphy_protocol_attr(identify.target_port_protocols, target_port_protocols);
480sas_rphy_simple_attr(identify.sas_address, sas_address, "0x%016llx\n",
481 unsigned long long);
482sas_rphy_simple_attr(identify.phy_identifier, phy_identifier, "%d\n", u8);
483
484static DECLARE_TRANSPORT_CLASS(sas_rphy_class,
485 "sas_rphy", NULL, NULL, NULL);
486
487static int sas_rphy_match(struct attribute_container *cont, struct device *dev)
488{
489 struct Scsi_Host *shost;
490 struct sas_internal *i;
491
492 if (!scsi_is_sas_rphy(dev))
493 return 0;
494 shost = dev_to_shost(dev->parent->parent);
495
496 if (!shost->transportt)
497 return 0;
498 if (shost->transportt->host_attrs.ac.class !=
499 &sas_host_class.class)
500 return 0;
501
502 i = to_sas_internal(shost->transportt);
503 return &i->rphy_attr_cont.ac == cont;
504}
505
506static void sas_rphy_release(struct device *dev)
507{
508 struct sas_rphy *rphy = dev_to_rphy(dev);
509
510 put_device(dev->parent);
511 kfree(rphy);
512}
513
514/**
515 * sas_rphy_alloc -- allocates and initialize a SAS remote PHY structure
516 * @parent: SAS PHY this remote PHY is conneted to
517 *
518 * Allocates an SAS remote PHY structure, connected to @parent.
519 *
520 * Returns:
521 * SAS PHY allocated or %NULL if the allocation failed.
522 */
523struct sas_rphy *sas_rphy_alloc(struct sas_phy *parent)
524{
525 struct Scsi_Host *shost = dev_to_shost(&parent->dev);
526 struct sas_rphy *rphy;
527
528 rphy = kmalloc(sizeof(*rphy), GFP_KERNEL);
529 if (!rphy) {
530 put_device(&parent->dev);
531 return NULL;
532 }
533 memset(rphy, 0, sizeof(*rphy));
534
535 device_initialize(&rphy->dev);
536 rphy->dev.parent = get_device(&parent->dev);
537 rphy->dev.release = sas_rphy_release;
538 sprintf(rphy->dev.bus_id, "rphy-%d:%d",
539 shost->host_no, parent->number);
540 transport_setup_device(&rphy->dev);
541
542 return rphy;
543}
544EXPORT_SYMBOL(sas_rphy_alloc);
545
546/**
547 * sas_rphy_add -- add a SAS remote PHY to the device hierachy
548 * @rphy: The remote PHY to be added
549 *
550 * Publishes a SAS remote PHY to the rest of the system.
551 */
552int sas_rphy_add(struct sas_rphy *rphy)
553{
554 struct sas_phy *parent = dev_to_phy(rphy->dev.parent);
555 struct Scsi_Host *shost = dev_to_shost(parent->dev.parent);
556 struct sas_host_attrs *sas_host = to_sas_host_attrs(shost);
557 struct sas_identify *identify = &rphy->identify;
558 int error;
559
560 if (parent->rphy)
561 return -ENXIO;
562 parent->rphy = rphy;
563
564 error = device_add(&rphy->dev);
565 if (error)
566 return error;
567 transport_add_device(&rphy->dev);
568 transport_configure_device(&rphy->dev);
569
570 spin_lock(&sas_host->lock);
571 list_add_tail(&rphy->list, &sas_host->rphy_list);
572 if (identify->device_type == SAS_END_DEVICE &&
573 (identify->target_port_protocols &
574 (SAS_PROTOCOL_SSP|SAS_PROTOCOL_STP|SAS_PROTOCOL_SATA)))
575 rphy->scsi_target_id = sas_host->next_target_id++;
576 else
577 rphy->scsi_target_id = -1;
578 spin_unlock(&sas_host->lock);
579
580 if (rphy->scsi_target_id != -1) {
581 scsi_scan_target(&rphy->dev, parent->number,
582 rphy->scsi_target_id, ~0, 0);
583 }
584
585 return 0;
586}
587EXPORT_SYMBOL(sas_rphy_add);
588
589/**
590 * sas_rphy_free -- free a SAS remote PHY
591 * @rphy SAS remote PHY to free
592 *
593 * Frees the specified SAS remote PHY.
594 *
595 * Note:
596 * This function must only be called on a remote
597 * PHY that has not sucessfully been added using
598 * sas_rphy_add().
599 */
600void sas_rphy_free(struct sas_rphy *rphy)
601{
602 struct Scsi_Host *shost = dev_to_shost(rphy->dev.parent->parent);
603 struct sas_host_attrs *sas_host = to_sas_host_attrs(shost);
604
605 spin_lock(&sas_host->lock);
606 list_del(&rphy->list);
607 spin_unlock(&sas_host->lock);
608
609 transport_destroy_device(&rphy->dev);
610 put_device(rphy->dev.parent);
611 put_device(rphy->dev.parent);
612 put_device(rphy->dev.parent);
613 kfree(rphy);
614}
615EXPORT_SYMBOL(sas_rphy_free);
616
617/**
618 * sas_rphy_delete -- remove SAS remote PHY
619 * @rphy: SAS remote PHY to remove
620 *
621 * Removes the specified SAS remote PHY.
622 */
623void
624sas_rphy_delete(struct sas_rphy *rphy)
625{
626 struct device *dev = &rphy->dev;
627 struct sas_phy *parent = dev_to_phy(dev->parent);
628 struct Scsi_Host *shost = dev_to_shost(parent->dev.parent);
629 struct sas_host_attrs *sas_host = to_sas_host_attrs(shost);
630
631 transport_destroy_device(&rphy->dev);
632
633 scsi_remove_target(&rphy->dev);
634
635 spin_lock(&sas_host->lock);
636 list_del(&rphy->list);
637 spin_unlock(&sas_host->lock);
638
639 transport_remove_device(dev);
640 device_del(dev);
641 transport_destroy_device(dev);
642 put_device(&parent->dev);
643}
644EXPORT_SYMBOL(sas_rphy_delete);
645
646/**
647 * scsi_is_sas_rphy -- check if a struct device represents a SAS remote PHY
648 * @dev: device to check
649 *
650 * Returns:
651 * %1 if the device represents a SAS remote PHY, %0 else
652 */
653int scsi_is_sas_rphy(const struct device *dev)
654{
655 return dev->release == sas_rphy_release;
656}
657EXPORT_SYMBOL(scsi_is_sas_rphy);
658
659
660/*
661 * SCSI scan helper
662 */
663
664static struct device *sas_target_parent(struct Scsi_Host *shost,
665 int channel, uint id)
666{
667 struct sas_host_attrs *sas_host = to_sas_host_attrs(shost);
668 struct sas_rphy *rphy;
669 struct device *dev = NULL;
670
671 spin_lock(&sas_host->lock);
672 list_for_each_entry(rphy, &sas_host->rphy_list, list) {
673 struct sas_phy *parent = dev_to_phy(rphy->dev.parent);
674 if (parent->number == channel &&
675 rphy->scsi_target_id == id)
676 dev = &rphy->dev;
677 }
678 spin_unlock(&sas_host->lock);
679
680 return dev;
681}
682
683
684/*
685 * Setup / Teardown code
686 */
687
688#define SETUP_RPORT_ATTRIBUTE(field) \
689 i->private_rphy_attrs[count] = class_device_attr_##field; \
690 i->private_rphy_attrs[count].attr.mode = S_IRUGO; \
691 i->private_rphy_attrs[count].store = NULL; \
692 i->rphy_attrs[count] = &i->private_rphy_attrs[count]; \
693 count++
694
695#define SETUP_PORT_ATTRIBUTE(field) \
696 i->private_phy_attrs[count] = class_device_attr_##field; \
697 i->private_phy_attrs[count].attr.mode = S_IRUGO; \
698 i->private_phy_attrs[count].store = NULL; \
699 i->phy_attrs[count] = &i->private_phy_attrs[count]; \
700 count++
701
702
703/**
704 * sas_attach_transport -- instantiate SAS transport template
705 * @ft: SAS transport class function template
706 */
707struct scsi_transport_template *
708sas_attach_transport(struct sas_function_template *ft)
709{
710 struct sas_internal *i;
711 int count;
712
713 i = kmalloc(sizeof(struct sas_internal), GFP_KERNEL);
714 if (!i)
715 return NULL;
716 memset(i, 0, sizeof(struct sas_internal));
717
718 i->t.target_parent = sas_target_parent;
719
720 i->t.host_attrs.ac.attrs = &i->host_attrs[0];
721 i->t.host_attrs.ac.class = &sas_host_class.class;
722 i->t.host_attrs.ac.match = sas_host_match;
723 transport_container_register(&i->t.host_attrs);
724 i->t.host_size = sizeof(struct sas_host_attrs);
725
726 i->phy_attr_cont.ac.class = &sas_phy_class.class;
727 i->phy_attr_cont.ac.attrs = &i->phy_attrs[0];
728 i->phy_attr_cont.ac.match = sas_phy_match;
729 transport_container_register(&i->phy_attr_cont);
730
731 i->rphy_attr_cont.ac.class = &sas_rphy_class.class;
732 i->rphy_attr_cont.ac.attrs = &i->rphy_attrs[0];
733 i->rphy_attr_cont.ac.match = sas_rphy_match;
734 transport_container_register(&i->rphy_attr_cont);
735
736 i->f = ft;
737
738 count = 0;
739 i->host_attrs[count] = NULL;
740
741 count = 0;
742 SETUP_PORT_ATTRIBUTE(initiator_port_protocols);
743 SETUP_PORT_ATTRIBUTE(target_port_protocols);
744 SETUP_PORT_ATTRIBUTE(device_type);
745 SETUP_PORT_ATTRIBUTE(sas_address);
746 SETUP_PORT_ATTRIBUTE(phy_identifier);
747 SETUP_PORT_ATTRIBUTE(port_identifier);
748 SETUP_PORT_ATTRIBUTE(negotiated_linkrate);
749 SETUP_PORT_ATTRIBUTE(minimum_linkrate_hw);
750 SETUP_PORT_ATTRIBUTE(minimum_linkrate);
751 SETUP_PORT_ATTRIBUTE(maximum_linkrate_hw);
752 SETUP_PORT_ATTRIBUTE(maximum_linkrate);
753 i->phy_attrs[count] = NULL;
754
755 count = 0;
756 SETUP_RPORT_ATTRIBUTE(rphy_initiator_port_protocols);
757 SETUP_RPORT_ATTRIBUTE(rphy_target_port_protocols);
758 SETUP_RPORT_ATTRIBUTE(rphy_device_type);
759 SETUP_RPORT_ATTRIBUTE(rphy_sas_address);
760 SETUP_RPORT_ATTRIBUTE(rphy_phy_identifier);
761 i->rphy_attrs[count] = NULL;
762
763 return &i->t;
764}
765EXPORT_SYMBOL(sas_attach_transport);
766
767/**
768 * sas_release_transport -- release SAS transport template instance
769 * @t: transport template instance
770 */
771void sas_release_transport(struct scsi_transport_template *t)
772{
773 struct sas_internal *i = to_sas_internal(t);
774
775 transport_container_unregister(&i->t.host_attrs);
776 transport_container_unregister(&i->phy_attr_cont);
777 transport_container_unregister(&i->rphy_attr_cont);
778
779 kfree(i);
780}
781EXPORT_SYMBOL(sas_release_transport);
782
783static __init int sas_transport_init(void)
784{
785 int error;
786
787 error = transport_class_register(&sas_host_class);
788 if (error)
789 goto out;
790 error = transport_class_register(&sas_phy_class);
791 if (error)
792 goto out_unregister_transport;
793 error = transport_class_register(&sas_rphy_class);
794 if (error)
795 goto out_unregister_phy;
796
797 return 0;
798
799 out_unregister_phy:
800 transport_class_unregister(&sas_phy_class);
801 out_unregister_transport:
802 transport_class_unregister(&sas_host_class);
803 out:
804 return error;
805
806}
807
808static void __exit sas_transport_exit(void)
809{
810 transport_class_unregister(&sas_host_class);
811 transport_class_unregister(&sas_phy_class);
812 transport_class_unregister(&sas_rphy_class);
813}
814
815MODULE_AUTHOR("Christoph Hellwig");
816MODULE_DESCRIPTION("SAS Transphy Attributes");
817MODULE_LICENSE("GPL");
818
819module_init(sas_transport_init);
820module_exit(sas_transport_exit);
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index b1b69d738d08..9ea4765d1d12 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -61,7 +61,7 @@ static int sg_version_num = 30533; /* 2 digits for each component */
61 61
62#ifdef CONFIG_SCSI_PROC_FS 62#ifdef CONFIG_SCSI_PROC_FS
63#include <linux/proc_fs.h> 63#include <linux/proc_fs.h>
64static char *sg_version_date = "20050901"; 64static char *sg_version_date = "20050908";
65 65
66static int sg_proc_init(void); 66static int sg_proc_init(void);
67static void sg_proc_cleanup(void); 67static void sg_proc_cleanup(void);
@@ -1299,7 +1299,7 @@ sg_mmap(struct file *filp, struct vm_area_struct *vma)
1299 sg_rb_correct4mmap(rsv_schp, 1); /* do only once per fd lifetime */ 1299 sg_rb_correct4mmap(rsv_schp, 1); /* do only once per fd lifetime */
1300 sfp->mmap_called = 1; 1300 sfp->mmap_called = 1;
1301 } 1301 }
1302 vma->vm_flags |= (VM_RESERVED | VM_IO); 1302 vma->vm_flags |= VM_RESERVED;
1303 vma->vm_private_data = sfp; 1303 vma->vm_private_data = sfp;
1304 vma->vm_ops = &sg_mmap_vm_ops; 1304 vma->vm_ops = &sg_mmap_vm_ops;
1305 return 0; 1305 return 0;
diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h
index da63722c0123..c0e4c67d836f 100644
--- a/include/scsi/scsi_device.h
+++ b/include/scsi/scsi_device.h
@@ -178,8 +178,8 @@ static inline struct scsi_target *scsi_target(struct scsi_device *sdev)
178 178
179extern struct scsi_device *__scsi_add_device(struct Scsi_Host *, 179extern struct scsi_device *__scsi_add_device(struct Scsi_Host *,
180 uint, uint, uint, void *hostdata); 180 uint, uint, uint, void *hostdata);
181#define scsi_add_device(host, channel, target, lun) \ 181extern int scsi_add_device(struct Scsi_Host *host, uint channel,
182 __scsi_add_device(host, channel, target, lun, NULL) 182 uint target, uint lun);
183extern void scsi_remove_device(struct scsi_device *); 183extern void scsi_remove_device(struct scsi_device *);
184extern int scsi_device_cancel(struct scsi_device *, int); 184extern int scsi_device_cancel(struct scsi_device *, int);
185 185
diff --git a/include/scsi/scsi_transport_fc.h b/include/scsi/scsi_transport_fc.h
index 70ad16315a16..115db056dc6b 100644
--- a/include/scsi/scsi_transport_fc.h
+++ b/include/scsi/scsi_transport_fc.h
@@ -439,4 +439,12 @@ int fc_remote_port_block(struct fc_rport *rport);
439void fc_remote_port_unblock(struct fc_rport *rport); 439void fc_remote_port_unblock(struct fc_rport *rport);
440int scsi_is_fc_rport(const struct device *); 440int scsi_is_fc_rport(const struct device *);
441 441
442static inline u64 wwn_to_u64(u8 *wwn)
443{
444 return (u64)wwn[0] << 56 | (u64)wwn[1] << 48 |
445 (u64)wwn[2] << 40 | (u64)wwn[3] << 32 |
446 (u64)wwn[4] << 24 | (u64)wwn[5] << 16 |
447 (u64)wwn[6] << 8 | (u64)wwn[7];
448}
449
442#endif /* SCSI_TRANSPORT_FC_H */ 450#endif /* SCSI_TRANSPORT_FC_H */
diff --git a/include/scsi/scsi_transport_sas.h b/include/scsi/scsi_transport_sas.h
new file mode 100644
index 000000000000..bc4aeb660dd3
--- /dev/null
+++ b/include/scsi/scsi_transport_sas.h
@@ -0,0 +1,100 @@
1#ifndef SCSI_TRANSPORT_SAS_H
2#define SCSI_TRANSPORT_SAS_H
3
4#include <linux/transport_class.h>
5#include <linux/types.h>
6
7struct scsi_transport_template;
8struct sas_rphy;
9
10
11enum sas_device_type {
12 SAS_PHY_UNUSED,
13 SAS_END_DEVICE,
14 SAS_EDGE_EXPANDER_DEVICE,
15 SAS_FANOUT_EXPANDER_DEVICE,
16};
17
18enum sas_protocol {
19 SAS_PROTOCOL_SATA = 0x01,
20 SAS_PROTOCOL_SMP = 0x02,
21 SAS_PROTOCOL_STP = 0x04,
22 SAS_PROTOCOL_SSP = 0x08,
23};
24
25enum sas_linkrate {
26 SAS_LINK_RATE_UNKNOWN,
27 SAS_PHY_DISABLED,
28 SAS_LINK_RATE_FAILED,
29 SAS_SATA_SPINUP_HOLD,
30 SAS_SATA_PORT_SELECTOR,
31 SAS_LINK_RATE_1_5_GBPS,
32 SAS_LINK_RATE_3_0_GBPS,
33 SAS_LINK_VIRTUAL,
34};
35
36struct sas_identify {
37 enum sas_device_type device_type;
38 enum sas_protocol initiator_port_protocols;
39 enum sas_protocol target_port_protocols;
40 u64 sas_address;
41 u8 phy_identifier;
42};
43
44/* The functions by which the transport class and the driver communicate */
45struct sas_function_template {
46};
47
48struct sas_phy {
49 struct device dev;
50 int number;
51 struct sas_identify identify;
52 enum sas_linkrate negotiated_linkrate;
53 enum sas_linkrate minimum_linkrate_hw;
54 enum sas_linkrate minimum_linkrate;
55 enum sas_linkrate maximum_linkrate_hw;
56 enum sas_linkrate maximum_linkrate;
57 u8 port_identifier;
58 struct sas_rphy *rphy;
59};
60
61#define dev_to_phy(d) \
62 container_of((d), struct sas_phy, dev)
63#define transport_class_to_phy(cdev) \
64 dev_to_phy((cdev)->dev)
65#define phy_to_shost(phy) \
66 dev_to_shost((phy)->dev.parent)
67
68struct sas_rphy {
69 struct device dev;
70 struct sas_identify identify;
71 struct list_head list;
72 u32 scsi_target_id;
73};
74
75#define dev_to_rphy(d) \
76 container_of((d), struct sas_rphy, dev)
77#define transport_class_to_rphy(cdev) \
78 dev_to_rphy((cdev)->dev)
79#define rphy_to_shost(rphy) \
80 dev_to_shost((rphy)->dev.parent)
81
82extern void sas_remove_host(struct Scsi_Host *);
83
84extern struct sas_phy *sas_phy_alloc(struct device *, int);
85extern void sas_phy_free(struct sas_phy *);
86extern int sas_phy_add(struct sas_phy *);
87extern void sas_phy_delete(struct sas_phy *);
88extern int scsi_is_sas_phy(const struct device *);
89
90extern struct sas_rphy *sas_rphy_alloc(struct sas_phy *);
91void sas_rphy_free(struct sas_rphy *);
92extern int sas_rphy_add(struct sas_rphy *);
93extern void sas_rphy_delete(struct sas_rphy *);
94extern int scsi_is_sas_rphy(const struct device *);
95
96extern struct scsi_transport_template *
97sas_attach_transport(struct sas_function_template *);
98extern void sas_release_transport(struct scsi_transport_template *);
99
100#endif /* SCSI_TRANSPORT_SAS_H */