diff options
author | Stefan Richter <stefanr@s5r6.in-berlin.de> | 2009-06-05 10:26:18 -0400 |
---|---|---|
committer | Stefan Richter <stefanr@s5r6.in-berlin.de> | 2009-06-05 10:26:18 -0400 |
commit | e71d31da062095d8b0b02a26fb5e8879e8d3d0de (patch) | |
tree | 6d45660ec489d9d58bad51a02d8d8877b534e81b /drivers/firewire/sbp2.c | |
parent | 77c9a5daa9c4d9b37812c9c69c7bcbb3f9399c3c (diff) |
firewire: rename source files
The source files of firewire-core, firewire-ohci, firewire-sbp2, i.e.
"drivers/firewire/fw-*.c"
are renamed to
"drivers/firewire/core-*.c",
"drivers/firewire/ohci.c",
"drivers/firewire/sbp2.c".
The old fw- prefix was redundant to the directory name. The new core-
prefix distinguishes the files according to which driver they belong to.
This change comes a little late, but still before further firewire
drivers are added as anticipated RSN.
Signed-off-by: Stefan Richter <stefanr@s5r6.in-berlin.de>
Diffstat (limited to 'drivers/firewire/sbp2.c')
-rw-r--r-- | drivers/firewire/sbp2.c | 1651 |
1 files changed, 1651 insertions, 0 deletions
diff --git a/drivers/firewire/sbp2.c b/drivers/firewire/sbp2.c new file mode 100644 index 000000000000..d41cb6e455bc --- /dev/null +++ b/drivers/firewire/sbp2.c | |||
@@ -0,0 +1,1651 @@ | |||
1 | /* | ||
2 | * SBP2 driver (SCSI over IEEE1394) | ||
3 | * | ||
4 | * Copyright (C) 2005-2007 Kristian Hoegsberg <krh@bitplanet.net> | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License as published by | ||
8 | * the Free Software Foundation; either version 2 of the License, or | ||
9 | * (at your option) any later version. | ||
10 | * | ||
11 | * This program is distributed in the hope that it will be useful, | ||
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
14 | * GNU General Public License for more details. | ||
15 | * | ||
16 | * You should have received a copy of the GNU General Public License | ||
17 | * along with this program; if not, write to the Free Software Foundation, | ||
18 | * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | ||
19 | */ | ||
20 | |||
21 | /* | ||
22 | * The basic structure of this driver is based on the old storage driver, | ||
23 | * drivers/ieee1394/sbp2.c, originally written by | ||
24 | * James Goodwin <jamesg@filanet.com> | ||
25 | * with later contributions and ongoing maintenance from | ||
26 | * Ben Collins <bcollins@debian.org>, | ||
27 | * Stefan Richter <stefanr@s5r6.in-berlin.de> | ||
28 | * and many others. | ||
29 | */ | ||
30 | |||
31 | #include <linux/blkdev.h> | ||
32 | #include <linux/bug.h> | ||
33 | #include <linux/completion.h> | ||
34 | #include <linux/delay.h> | ||
35 | #include <linux/device.h> | ||
36 | #include <linux/dma-mapping.h> | ||
37 | #include <linux/firewire.h> | ||
38 | #include <linux/firewire-constants.h> | ||
39 | #include <linux/init.h> | ||
40 | #include <linux/jiffies.h> | ||
41 | #include <linux/kernel.h> | ||
42 | #include <linux/kref.h> | ||
43 | #include <linux/list.h> | ||
44 | #include <linux/mod_devicetable.h> | ||
45 | #include <linux/module.h> | ||
46 | #include <linux/moduleparam.h> | ||
47 | #include <linux/scatterlist.h> | ||
48 | #include <linux/slab.h> | ||
49 | #include <linux/spinlock.h> | ||
50 | #include <linux/string.h> | ||
51 | #include <linux/stringify.h> | ||
52 | #include <linux/workqueue.h> | ||
53 | |||
54 | #include <asm/byteorder.h> | ||
55 | #include <asm/system.h> | ||
56 | |||
57 | #include <scsi/scsi.h> | ||
58 | #include <scsi/scsi_cmnd.h> | ||
59 | #include <scsi/scsi_device.h> | ||
60 | #include <scsi/scsi_host.h> | ||
61 | |||
62 | /* | ||
63 | * So far only bridges from Oxford Semiconductor are known to support | ||
64 | * concurrent logins. Depending on firmware, four or two concurrent logins | ||
65 | * are possible on OXFW911 and newer Oxsemi bridges. | ||
66 | * | ||
67 | * Concurrent logins are useful together with cluster filesystems. | ||
68 | */ | ||
69 | static int sbp2_param_exclusive_login = 1; | ||
70 | module_param_named(exclusive_login, sbp2_param_exclusive_login, bool, 0644); | ||
71 | MODULE_PARM_DESC(exclusive_login, "Exclusive login to sbp2 device " | ||
72 | "(default = Y, use N for concurrent initiators)"); | ||
73 | |||
74 | /* | ||
75 | * Flags for firmware oddities | ||
76 | * | ||
77 | * - 128kB max transfer | ||
78 | * Limit transfer size. Necessary for some old bridges. | ||
79 | * | ||
80 | * - 36 byte inquiry | ||
81 | * When scsi_mod probes the device, let the inquiry command look like that | ||
82 | * from MS Windows. | ||
83 | * | ||
84 | * - skip mode page 8 | ||
85 | * Suppress sending of mode_sense for mode page 8 if the device pretends to | ||
86 | * support the SCSI Primary Block commands instead of Reduced Block Commands. | ||
87 | * | ||
88 | * - fix capacity | ||
89 | * Tell sd_mod to correct the last sector number reported by read_capacity. | ||
90 | * Avoids access beyond actual disk limits on devices with an off-by-one bug. | ||
91 | * Don't use this with devices which don't have this bug. | ||
92 | * | ||
93 | * - delay inquiry | ||
94 | * Wait extra SBP2_INQUIRY_DELAY seconds after login before SCSI inquiry. | ||
95 | * | ||
96 | * - power condition | ||
97 | * Set the power condition field in the START STOP UNIT commands sent by | ||
98 | * sd_mod on suspend, resume, and shutdown (if manage_start_stop is on). | ||
99 | * Some disks need this to spin down or to resume properly. | ||
100 | * | ||
101 | * - override internal blacklist | ||
102 | * Instead of adding to the built-in blacklist, use only the workarounds | ||
103 | * specified in the module load parameter. | ||
104 | * Useful if a blacklist entry interfered with a non-broken device. | ||
105 | */ | ||
106 | #define SBP2_WORKAROUND_128K_MAX_TRANS 0x1 | ||
107 | #define SBP2_WORKAROUND_INQUIRY_36 0x2 | ||
108 | #define SBP2_WORKAROUND_MODE_SENSE_8 0x4 | ||
109 | #define SBP2_WORKAROUND_FIX_CAPACITY 0x8 | ||
110 | #define SBP2_WORKAROUND_DELAY_INQUIRY 0x10 | ||
111 | #define SBP2_INQUIRY_DELAY 12 | ||
112 | #define SBP2_WORKAROUND_POWER_CONDITION 0x20 | ||
113 | #define SBP2_WORKAROUND_OVERRIDE 0x100 | ||
114 | |||
115 | static int sbp2_param_workarounds; | ||
116 | module_param_named(workarounds, sbp2_param_workarounds, int, 0644); | ||
117 | MODULE_PARM_DESC(workarounds, "Work around device bugs (default = 0" | ||
118 | ", 128kB max transfer = " __stringify(SBP2_WORKAROUND_128K_MAX_TRANS) | ||
119 | ", 36 byte inquiry = " __stringify(SBP2_WORKAROUND_INQUIRY_36) | ||
120 | ", skip mode page 8 = " __stringify(SBP2_WORKAROUND_MODE_SENSE_8) | ||
121 | ", fix capacity = " __stringify(SBP2_WORKAROUND_FIX_CAPACITY) | ||
122 | ", delay inquiry = " __stringify(SBP2_WORKAROUND_DELAY_INQUIRY) | ||
123 | ", set power condition in start stop unit = " | ||
124 | __stringify(SBP2_WORKAROUND_POWER_CONDITION) | ||
125 | ", override internal blacklist = " __stringify(SBP2_WORKAROUND_OVERRIDE) | ||
126 | ", or a combination)"); | ||
127 | |||
128 | /* I don't know why the SCSI stack doesn't define something like this... */ | ||
129 | typedef void (*scsi_done_fn_t)(struct scsi_cmnd *); | ||
130 | |||
131 | static const char sbp2_driver_name[] = "sbp2"; | ||
132 | |||
133 | /* | ||
134 | * We create one struct sbp2_logical_unit per SBP-2 Logical Unit Number Entry | ||
135 | * and one struct scsi_device per sbp2_logical_unit. | ||
136 | */ | ||
137 | struct sbp2_logical_unit { | ||
138 | struct sbp2_target *tgt; | ||
139 | struct list_head link; | ||
140 | struct fw_address_handler address_handler; | ||
141 | struct list_head orb_list; | ||
142 | |||
143 | u64 command_block_agent_address; | ||
144 | u16 lun; | ||
145 | int login_id; | ||
146 | |||
147 | /* | ||
148 | * The generation is updated once we've logged in or reconnected | ||
149 | * to the logical unit. Thus, I/O to the device will automatically | ||
150 | * fail and get retried if it happens in a window where the device | ||
151 | * is not ready, e.g. after a bus reset but before we reconnect. | ||
152 | */ | ||
153 | int generation; | ||
154 | int retries; | ||
155 | struct delayed_work work; | ||
156 | bool has_sdev; | ||
157 | bool blocked; | ||
158 | }; | ||
159 | |||
160 | /* | ||
161 | * We create one struct sbp2_target per IEEE 1212 Unit Directory | ||
162 | * and one struct Scsi_Host per sbp2_target. | ||
163 | */ | ||
164 | struct sbp2_target { | ||
165 | struct kref kref; | ||
166 | struct fw_unit *unit; | ||
167 | const char *bus_id; | ||
168 | struct list_head lu_list; | ||
169 | |||
170 | u64 management_agent_address; | ||
171 | u64 guid; | ||
172 | int directory_id; | ||
173 | int node_id; | ||
174 | int address_high; | ||
175 | unsigned int workarounds; | ||
176 | unsigned int mgt_orb_timeout; | ||
177 | unsigned int max_payload; | ||
178 | |||
179 | int dont_block; /* counter for each logical unit */ | ||
180 | int blocked; /* ditto */ | ||
181 | }; | ||
182 | |||
183 | /* Impossible login_id, to detect logout attempt before successful login */ | ||
184 | #define INVALID_LOGIN_ID 0x10000 | ||
185 | |||
186 | /* | ||
187 | * Per section 7.4.8 of the SBP-2 spec, a mgt_ORB_timeout value can be | ||
188 | * provided in the config rom. Most devices do provide a value, which | ||
189 | * we'll use for login management orbs, but with some sane limits. | ||
190 | */ | ||
191 | #define SBP2_MIN_LOGIN_ORB_TIMEOUT 5000U /* Timeout in ms */ | ||
192 | #define SBP2_MAX_LOGIN_ORB_TIMEOUT 40000U /* Timeout in ms */ | ||
193 | #define SBP2_ORB_TIMEOUT 2000U /* Timeout in ms */ | ||
194 | #define SBP2_ORB_NULL 0x80000000 | ||
195 | #define SBP2_RETRY_LIMIT 0xf /* 15 retries */ | ||
196 | #define SBP2_CYCLE_LIMIT (0xc8 << 12) /* 200 125us cycles */ | ||
197 | |||
198 | /* | ||
199 | * The default maximum s/g segment size of a FireWire controller is | ||
200 | * usually 0x10000, but SBP-2 only allows 0xffff. Since buffers have to | ||
201 | * be quadlet-aligned, we set the length limit to 0xffff & ~3. | ||
202 | */ | ||
203 | #define SBP2_MAX_SEG_SIZE 0xfffc | ||
204 | |||
205 | /* Unit directory keys */ | ||
206 | #define SBP2_CSR_UNIT_CHARACTERISTICS 0x3a | ||
207 | #define SBP2_CSR_FIRMWARE_REVISION 0x3c | ||
208 | #define SBP2_CSR_LOGICAL_UNIT_NUMBER 0x14 | ||
209 | #define SBP2_CSR_LOGICAL_UNIT_DIRECTORY 0xd4 | ||
210 | |||
211 | /* Management orb opcodes */ | ||
212 | #define SBP2_LOGIN_REQUEST 0x0 | ||
213 | #define SBP2_QUERY_LOGINS_REQUEST 0x1 | ||
214 | #define SBP2_RECONNECT_REQUEST 0x3 | ||
215 | #define SBP2_SET_PASSWORD_REQUEST 0x4 | ||
216 | #define SBP2_LOGOUT_REQUEST 0x7 | ||
217 | #define SBP2_ABORT_TASK_REQUEST 0xb | ||
218 | #define SBP2_ABORT_TASK_SET 0xc | ||
219 | #define SBP2_LOGICAL_UNIT_RESET 0xe | ||
220 | #define SBP2_TARGET_RESET_REQUEST 0xf | ||
221 | |||
222 | /* Offsets for command block agent registers */ | ||
223 | #define SBP2_AGENT_STATE 0x00 | ||
224 | #define SBP2_AGENT_RESET 0x04 | ||
225 | #define SBP2_ORB_POINTER 0x08 | ||
226 | #define SBP2_DOORBELL 0x10 | ||
227 | #define SBP2_UNSOLICITED_STATUS_ENABLE 0x14 | ||
228 | |||
229 | /* Status write response codes */ | ||
230 | #define SBP2_STATUS_REQUEST_COMPLETE 0x0 | ||
231 | #define SBP2_STATUS_TRANSPORT_FAILURE 0x1 | ||
232 | #define SBP2_STATUS_ILLEGAL_REQUEST 0x2 | ||
233 | #define SBP2_STATUS_VENDOR_DEPENDENT 0x3 | ||
234 | |||
235 | #define STATUS_GET_ORB_HIGH(v) ((v).status & 0xffff) | ||
236 | #define STATUS_GET_SBP_STATUS(v) (((v).status >> 16) & 0xff) | ||
237 | #define STATUS_GET_LEN(v) (((v).status >> 24) & 0x07) | ||
238 | #define STATUS_GET_DEAD(v) (((v).status >> 27) & 0x01) | ||
239 | #define STATUS_GET_RESPONSE(v) (((v).status >> 28) & 0x03) | ||
240 | #define STATUS_GET_SOURCE(v) (((v).status >> 30) & 0x03) | ||
241 | #define STATUS_GET_ORB_LOW(v) ((v).orb_low) | ||
242 | #define STATUS_GET_DATA(v) ((v).data) | ||
243 | |||
244 | struct sbp2_status { | ||
245 | u32 status; | ||
246 | u32 orb_low; | ||
247 | u8 data[24]; | ||
248 | }; | ||
249 | |||
250 | struct sbp2_pointer { | ||
251 | __be32 high; | ||
252 | __be32 low; | ||
253 | }; | ||
254 | |||
255 | struct sbp2_orb { | ||
256 | struct fw_transaction t; | ||
257 | struct kref kref; | ||
258 | dma_addr_t request_bus; | ||
259 | int rcode; | ||
260 | struct sbp2_pointer pointer; | ||
261 | void (*callback)(struct sbp2_orb * orb, struct sbp2_status * status); | ||
262 | struct list_head link; | ||
263 | }; | ||
264 | |||
265 | #define MANAGEMENT_ORB_LUN(v) ((v)) | ||
266 | #define MANAGEMENT_ORB_FUNCTION(v) ((v) << 16) | ||
267 | #define MANAGEMENT_ORB_RECONNECT(v) ((v) << 20) | ||
268 | #define MANAGEMENT_ORB_EXCLUSIVE(v) ((v) ? 1 << 28 : 0) | ||
269 | #define MANAGEMENT_ORB_REQUEST_FORMAT(v) ((v) << 29) | ||
270 | #define MANAGEMENT_ORB_NOTIFY ((1) << 31) | ||
271 | |||
272 | #define MANAGEMENT_ORB_RESPONSE_LENGTH(v) ((v)) | ||
273 | #define MANAGEMENT_ORB_PASSWORD_LENGTH(v) ((v) << 16) | ||
274 | |||
275 | struct sbp2_management_orb { | ||
276 | struct sbp2_orb base; | ||
277 | struct { | ||
278 | struct sbp2_pointer password; | ||
279 | struct sbp2_pointer response; | ||
280 | __be32 misc; | ||
281 | __be32 length; | ||
282 | struct sbp2_pointer status_fifo; | ||
283 | } request; | ||
284 | __be32 response[4]; | ||
285 | dma_addr_t response_bus; | ||
286 | struct completion done; | ||
287 | struct sbp2_status status; | ||
288 | }; | ||
289 | |||
290 | struct sbp2_login_response { | ||
291 | __be32 misc; | ||
292 | struct sbp2_pointer command_block_agent; | ||
293 | __be32 reconnect_hold; | ||
294 | }; | ||
295 | #define COMMAND_ORB_DATA_SIZE(v) ((v)) | ||
296 | #define COMMAND_ORB_PAGE_SIZE(v) ((v) << 16) | ||
297 | #define COMMAND_ORB_PAGE_TABLE_PRESENT ((1) << 19) | ||
298 | #define COMMAND_ORB_MAX_PAYLOAD(v) ((v) << 20) | ||
299 | #define COMMAND_ORB_SPEED(v) ((v) << 24) | ||
300 | #define COMMAND_ORB_DIRECTION ((1) << 27) | ||
301 | #define COMMAND_ORB_REQUEST_FORMAT(v) ((v) << 29) | ||
302 | #define COMMAND_ORB_NOTIFY ((1) << 31) | ||
303 | |||
304 | struct sbp2_command_orb { | ||
305 | struct sbp2_orb base; | ||
306 | struct { | ||
307 | struct sbp2_pointer next; | ||
308 | struct sbp2_pointer data_descriptor; | ||
309 | __be32 misc; | ||
310 | u8 command_block[12]; | ||
311 | } request; | ||
312 | struct scsi_cmnd *cmd; | ||
313 | scsi_done_fn_t done; | ||
314 | struct sbp2_logical_unit *lu; | ||
315 | |||
316 | struct sbp2_pointer page_table[SG_ALL] __attribute__((aligned(8))); | ||
317 | dma_addr_t page_table_bus; | ||
318 | }; | ||
319 | |||
320 | #define SBP2_ROM_VALUE_WILDCARD ~0 /* match all */ | ||
321 | #define SBP2_ROM_VALUE_MISSING 0xff000000 /* not present in the unit dir. */ | ||
322 | |||
323 | /* | ||
324 | * List of devices with known bugs. | ||
325 | * | ||
326 | * The firmware_revision field, masked with 0xffff00, is the best | ||
327 | * indicator for the type of bridge chip of a device. It yields a few | ||
328 | * false positives but this did not break correctly behaving devices | ||
329 | * so far. | ||
330 | */ | ||
331 | static const struct { | ||
332 | u32 firmware_revision; | ||
333 | u32 model; | ||
334 | unsigned int workarounds; | ||
335 | } sbp2_workarounds_table[] = { | ||
336 | /* DViCO Momobay CX-1 with TSB42AA9 bridge */ { | ||
337 | .firmware_revision = 0x002800, | ||
338 | .model = 0x001010, | ||
339 | .workarounds = SBP2_WORKAROUND_INQUIRY_36 | | ||
340 | SBP2_WORKAROUND_MODE_SENSE_8 | | ||
341 | SBP2_WORKAROUND_POWER_CONDITION, | ||
342 | }, | ||
343 | /* DViCO Momobay FX-3A with TSB42AA9A bridge */ { | ||
344 | .firmware_revision = 0x002800, | ||
345 | .model = 0x000000, | ||
346 | .workarounds = SBP2_WORKAROUND_DELAY_INQUIRY | | ||
347 | SBP2_WORKAROUND_POWER_CONDITION, | ||
348 | }, | ||
349 | /* Initio bridges, actually only needed for some older ones */ { | ||
350 | .firmware_revision = 0x000200, | ||
351 | .model = SBP2_ROM_VALUE_WILDCARD, | ||
352 | .workarounds = SBP2_WORKAROUND_INQUIRY_36, | ||
353 | }, | ||
354 | /* PL-3507 bridge with Prolific firmware */ { | ||
355 | .firmware_revision = 0x012800, | ||
356 | .model = SBP2_ROM_VALUE_WILDCARD, | ||
357 | .workarounds = SBP2_WORKAROUND_POWER_CONDITION, | ||
358 | }, | ||
359 | /* Symbios bridge */ { | ||
360 | .firmware_revision = 0xa0b800, | ||
361 | .model = SBP2_ROM_VALUE_WILDCARD, | ||
362 | .workarounds = SBP2_WORKAROUND_128K_MAX_TRANS, | ||
363 | }, | ||
364 | /* Datafab MD2-FW2 with Symbios/LSILogic SYM13FW500 bridge */ { | ||
365 | .firmware_revision = 0x002600, | ||
366 | .model = SBP2_ROM_VALUE_WILDCARD, | ||
367 | .workarounds = SBP2_WORKAROUND_128K_MAX_TRANS, | ||
368 | }, | ||
369 | /* | ||
370 | * iPod 2nd generation: needs 128k max transfer size workaround | ||
371 | * iPod 3rd generation: needs fix capacity workaround | ||
372 | */ | ||
373 | { | ||
374 | .firmware_revision = 0x0a2700, | ||
375 | .model = 0x000000, | ||
376 | .workarounds = SBP2_WORKAROUND_128K_MAX_TRANS | | ||
377 | SBP2_WORKAROUND_FIX_CAPACITY, | ||
378 | }, | ||
379 | /* iPod 4th generation */ { | ||
380 | .firmware_revision = 0x0a2700, | ||
381 | .model = 0x000021, | ||
382 | .workarounds = SBP2_WORKAROUND_FIX_CAPACITY, | ||
383 | }, | ||
384 | /* iPod mini */ { | ||
385 | .firmware_revision = 0x0a2700, | ||
386 | .model = 0x000022, | ||
387 | .workarounds = SBP2_WORKAROUND_FIX_CAPACITY, | ||
388 | }, | ||
389 | /* iPod mini */ { | ||
390 | .firmware_revision = 0x0a2700, | ||
391 | .model = 0x000023, | ||
392 | .workarounds = SBP2_WORKAROUND_FIX_CAPACITY, | ||
393 | }, | ||
394 | /* iPod Photo */ { | ||
395 | .firmware_revision = 0x0a2700, | ||
396 | .model = 0x00007e, | ||
397 | .workarounds = SBP2_WORKAROUND_FIX_CAPACITY, | ||
398 | } | ||
399 | }; | ||
400 | |||
401 | static void free_orb(struct kref *kref) | ||
402 | { | ||
403 | struct sbp2_orb *orb = container_of(kref, struct sbp2_orb, kref); | ||
404 | |||
405 | kfree(orb); | ||
406 | } | ||
407 | |||
408 | static void sbp2_status_write(struct fw_card *card, struct fw_request *request, | ||
409 | int tcode, int destination, int source, | ||
410 | int generation, int speed, | ||
411 | unsigned long long offset, | ||
412 | void *payload, size_t length, void *callback_data) | ||
413 | { | ||
414 | struct sbp2_logical_unit *lu = callback_data; | ||
415 | struct sbp2_orb *orb; | ||
416 | struct sbp2_status status; | ||
417 | size_t header_size; | ||
418 | unsigned long flags; | ||
419 | |||
420 | if (tcode != TCODE_WRITE_BLOCK_REQUEST || | ||
421 | length == 0 || length > sizeof(status)) { | ||
422 | fw_send_response(card, request, RCODE_TYPE_ERROR); | ||
423 | return; | ||
424 | } | ||
425 | |||
426 | header_size = min(length, 2 * sizeof(u32)); | ||
427 | fw_memcpy_from_be32(&status, payload, header_size); | ||
428 | if (length > header_size) | ||
429 | memcpy(status.data, payload + 8, length - header_size); | ||
430 | if (STATUS_GET_SOURCE(status) == 2 || STATUS_GET_SOURCE(status) == 3) { | ||
431 | fw_notify("non-orb related status write, not handled\n"); | ||
432 | fw_send_response(card, request, RCODE_COMPLETE); | ||
433 | return; | ||
434 | } | ||
435 | |||
436 | /* Lookup the orb corresponding to this status write. */ | ||
437 | spin_lock_irqsave(&card->lock, flags); | ||
438 | list_for_each_entry(orb, &lu->orb_list, link) { | ||
439 | if (STATUS_GET_ORB_HIGH(status) == 0 && | ||
440 | STATUS_GET_ORB_LOW(status) == orb->request_bus) { | ||
441 | orb->rcode = RCODE_COMPLETE; | ||
442 | list_del(&orb->link); | ||
443 | break; | ||
444 | } | ||
445 | } | ||
446 | spin_unlock_irqrestore(&card->lock, flags); | ||
447 | |||
448 | if (&orb->link != &lu->orb_list) | ||
449 | orb->callback(orb, &status); | ||
450 | else | ||
451 | fw_error("status write for unknown orb\n"); | ||
452 | |||
453 | kref_put(&orb->kref, free_orb); | ||
454 | |||
455 | fw_send_response(card, request, RCODE_COMPLETE); | ||
456 | } | ||
457 | |||
458 | static void complete_transaction(struct fw_card *card, int rcode, | ||
459 | void *payload, size_t length, void *data) | ||
460 | { | ||
461 | struct sbp2_orb *orb = data; | ||
462 | unsigned long flags; | ||
463 | |||
464 | /* | ||
465 | * This is a little tricky. We can get the status write for | ||
466 | * the orb before we get this callback. The status write | ||
467 | * handler above will assume the orb pointer transaction was | ||
468 | * successful and set the rcode to RCODE_COMPLETE for the orb. | ||
469 | * So this callback only sets the rcode if it hasn't already | ||
470 | * been set and only does the cleanup if the transaction | ||
471 | * failed and we didn't already get a status write. | ||
472 | */ | ||
473 | spin_lock_irqsave(&card->lock, flags); | ||
474 | |||
475 | if (orb->rcode == -1) | ||
476 | orb->rcode = rcode; | ||
477 | if (orb->rcode != RCODE_COMPLETE) { | ||
478 | list_del(&orb->link); | ||
479 | spin_unlock_irqrestore(&card->lock, flags); | ||
480 | orb->callback(orb, NULL); | ||
481 | } else { | ||
482 | spin_unlock_irqrestore(&card->lock, flags); | ||
483 | } | ||
484 | |||
485 | kref_put(&orb->kref, free_orb); | ||
486 | } | ||
487 | |||
488 | static void sbp2_send_orb(struct sbp2_orb *orb, struct sbp2_logical_unit *lu, | ||
489 | int node_id, int generation, u64 offset) | ||
490 | { | ||
491 | struct fw_device *device = fw_device(lu->tgt->unit->device.parent); | ||
492 | unsigned long flags; | ||
493 | |||
494 | orb->pointer.high = 0; | ||
495 | orb->pointer.low = cpu_to_be32(orb->request_bus); | ||
496 | |||
497 | spin_lock_irqsave(&device->card->lock, flags); | ||
498 | list_add_tail(&orb->link, &lu->orb_list); | ||
499 | spin_unlock_irqrestore(&device->card->lock, flags); | ||
500 | |||
501 | /* Take a ref for the orb list and for the transaction callback. */ | ||
502 | kref_get(&orb->kref); | ||
503 | kref_get(&orb->kref); | ||
504 | |||
505 | fw_send_request(device->card, &orb->t, TCODE_WRITE_BLOCK_REQUEST, | ||
506 | node_id, generation, device->max_speed, offset, | ||
507 | &orb->pointer, sizeof(orb->pointer), | ||
508 | complete_transaction, orb); | ||
509 | } | ||
510 | |||
511 | static int sbp2_cancel_orbs(struct sbp2_logical_unit *lu) | ||
512 | { | ||
513 | struct fw_device *device = fw_device(lu->tgt->unit->device.parent); | ||
514 | struct sbp2_orb *orb, *next; | ||
515 | struct list_head list; | ||
516 | unsigned long flags; | ||
517 | int retval = -ENOENT; | ||
518 | |||
519 | INIT_LIST_HEAD(&list); | ||
520 | spin_lock_irqsave(&device->card->lock, flags); | ||
521 | list_splice_init(&lu->orb_list, &list); | ||
522 | spin_unlock_irqrestore(&device->card->lock, flags); | ||
523 | |||
524 | list_for_each_entry_safe(orb, next, &list, link) { | ||
525 | retval = 0; | ||
526 | if (fw_cancel_transaction(device->card, &orb->t) == 0) | ||
527 | continue; | ||
528 | |||
529 | orb->rcode = RCODE_CANCELLED; | ||
530 | orb->callback(orb, NULL); | ||
531 | } | ||
532 | |||
533 | return retval; | ||
534 | } | ||
535 | |||
536 | static void complete_management_orb(struct sbp2_orb *base_orb, | ||
537 | struct sbp2_status *status) | ||
538 | { | ||
539 | struct sbp2_management_orb *orb = | ||
540 | container_of(base_orb, struct sbp2_management_orb, base); | ||
541 | |||
542 | if (status) | ||
543 | memcpy(&orb->status, status, sizeof(*status)); | ||
544 | complete(&orb->done); | ||
545 | } | ||
546 | |||
547 | static int sbp2_send_management_orb(struct sbp2_logical_unit *lu, int node_id, | ||
548 | int generation, int function, | ||
549 | int lun_or_login_id, void *response) | ||
550 | { | ||
551 | struct fw_device *device = fw_device(lu->tgt->unit->device.parent); | ||
552 | struct sbp2_management_orb *orb; | ||
553 | unsigned int timeout; | ||
554 | int retval = -ENOMEM; | ||
555 | |||
556 | if (function == SBP2_LOGOUT_REQUEST && fw_device_is_shutdown(device)) | ||
557 | return 0; | ||
558 | |||
559 | orb = kzalloc(sizeof(*orb), GFP_ATOMIC); | ||
560 | if (orb == NULL) | ||
561 | return -ENOMEM; | ||
562 | |||
563 | kref_init(&orb->base.kref); | ||
564 | orb->response_bus = | ||
565 | dma_map_single(device->card->device, &orb->response, | ||
566 | sizeof(orb->response), DMA_FROM_DEVICE); | ||
567 | if (dma_mapping_error(device->card->device, orb->response_bus)) | ||
568 | goto fail_mapping_response; | ||
569 | |||
570 | orb->request.response.high = 0; | ||
571 | orb->request.response.low = cpu_to_be32(orb->response_bus); | ||
572 | |||
573 | orb->request.misc = cpu_to_be32( | ||
574 | MANAGEMENT_ORB_NOTIFY | | ||
575 | MANAGEMENT_ORB_FUNCTION(function) | | ||
576 | MANAGEMENT_ORB_LUN(lun_or_login_id)); | ||
577 | orb->request.length = cpu_to_be32( | ||
578 | MANAGEMENT_ORB_RESPONSE_LENGTH(sizeof(orb->response))); | ||
579 | |||
580 | orb->request.status_fifo.high = | ||
581 | cpu_to_be32(lu->address_handler.offset >> 32); | ||
582 | orb->request.status_fifo.low = | ||
583 | cpu_to_be32(lu->address_handler.offset); | ||
584 | |||
585 | if (function == SBP2_LOGIN_REQUEST) { | ||
586 | /* Ask for 2^2 == 4 seconds reconnect grace period */ | ||
587 | orb->request.misc |= cpu_to_be32( | ||
588 | MANAGEMENT_ORB_RECONNECT(2) | | ||
589 | MANAGEMENT_ORB_EXCLUSIVE(sbp2_param_exclusive_login)); | ||
590 | timeout = lu->tgt->mgt_orb_timeout; | ||
591 | } else { | ||
592 | timeout = SBP2_ORB_TIMEOUT; | ||
593 | } | ||
594 | |||
595 | init_completion(&orb->done); | ||
596 | orb->base.callback = complete_management_orb; | ||
597 | |||
598 | orb->base.request_bus = | ||
599 | dma_map_single(device->card->device, &orb->request, | ||
600 | sizeof(orb->request), DMA_TO_DEVICE); | ||
601 | if (dma_mapping_error(device->card->device, orb->base.request_bus)) | ||
602 | goto fail_mapping_request; | ||
603 | |||
604 | sbp2_send_orb(&orb->base, lu, node_id, generation, | ||
605 | lu->tgt->management_agent_address); | ||
606 | |||
607 | wait_for_completion_timeout(&orb->done, msecs_to_jiffies(timeout)); | ||
608 | |||
609 | retval = -EIO; | ||
610 | if (sbp2_cancel_orbs(lu) == 0) { | ||
611 | fw_error("%s: orb reply timed out, rcode=0x%02x\n", | ||
612 | lu->tgt->bus_id, orb->base.rcode); | ||
613 | goto out; | ||
614 | } | ||
615 | |||
616 | if (orb->base.rcode != RCODE_COMPLETE) { | ||
617 | fw_error("%s: management write failed, rcode 0x%02x\n", | ||
618 | lu->tgt->bus_id, orb->base.rcode); | ||
619 | goto out; | ||
620 | } | ||
621 | |||
622 | if (STATUS_GET_RESPONSE(orb->status) != 0 || | ||
623 | STATUS_GET_SBP_STATUS(orb->status) != 0) { | ||
624 | fw_error("%s: error status: %d:%d\n", lu->tgt->bus_id, | ||
625 | STATUS_GET_RESPONSE(orb->status), | ||
626 | STATUS_GET_SBP_STATUS(orb->status)); | ||
627 | goto out; | ||
628 | } | ||
629 | |||
630 | retval = 0; | ||
631 | out: | ||
632 | dma_unmap_single(device->card->device, orb->base.request_bus, | ||
633 | sizeof(orb->request), DMA_TO_DEVICE); | ||
634 | fail_mapping_request: | ||
635 | dma_unmap_single(device->card->device, orb->response_bus, | ||
636 | sizeof(orb->response), DMA_FROM_DEVICE); | ||
637 | fail_mapping_response: | ||
638 | if (response) | ||
639 | memcpy(response, orb->response, sizeof(orb->response)); | ||
640 | kref_put(&orb->base.kref, free_orb); | ||
641 | |||
642 | return retval; | ||
643 | } | ||
644 | |||
645 | static void sbp2_agent_reset(struct sbp2_logical_unit *lu) | ||
646 | { | ||
647 | struct fw_device *device = fw_device(lu->tgt->unit->device.parent); | ||
648 | __be32 d = 0; | ||
649 | |||
650 | fw_run_transaction(device->card, TCODE_WRITE_QUADLET_REQUEST, | ||
651 | lu->tgt->node_id, lu->generation, device->max_speed, | ||
652 | lu->command_block_agent_address + SBP2_AGENT_RESET, | ||
653 | &d, sizeof(d)); | ||
654 | } | ||
655 | |||
656 | static void complete_agent_reset_write_no_wait(struct fw_card *card, | ||
657 | int rcode, void *payload, size_t length, void *data) | ||
658 | { | ||
659 | kfree(data); | ||
660 | } | ||
661 | |||
662 | static void sbp2_agent_reset_no_wait(struct sbp2_logical_unit *lu) | ||
663 | { | ||
664 | struct fw_device *device = fw_device(lu->tgt->unit->device.parent); | ||
665 | struct fw_transaction *t; | ||
666 | static __be32 d; | ||
667 | |||
668 | t = kmalloc(sizeof(*t), GFP_ATOMIC); | ||
669 | if (t == NULL) | ||
670 | return; | ||
671 | |||
672 | fw_send_request(device->card, t, TCODE_WRITE_QUADLET_REQUEST, | ||
673 | lu->tgt->node_id, lu->generation, device->max_speed, | ||
674 | lu->command_block_agent_address + SBP2_AGENT_RESET, | ||
675 | &d, sizeof(d), complete_agent_reset_write_no_wait, t); | ||
676 | } | ||
677 | |||
678 | static inline void sbp2_allow_block(struct sbp2_logical_unit *lu) | ||
679 | { | ||
680 | /* | ||
681 | * We may access dont_block without taking card->lock here: | ||
682 | * All callers of sbp2_allow_block() and all callers of sbp2_unblock() | ||
683 | * are currently serialized against each other. | ||
684 | * And a wrong result in sbp2_conditionally_block()'s access of | ||
685 | * dont_block is rather harmless, it simply misses its first chance. | ||
686 | */ | ||
687 | --lu->tgt->dont_block; | ||
688 | } | ||
689 | |||
690 | /* | ||
691 | * Blocks lu->tgt if all of the following conditions are met: | ||
692 | * - Login, INQUIRY, and high-level SCSI setup of all of the target's | ||
693 | * logical units have been finished (indicated by dont_block == 0). | ||
694 | * - lu->generation is stale. | ||
695 | * | ||
696 | * Note, scsi_block_requests() must be called while holding card->lock, | ||
697 | * otherwise it might foil sbp2_[conditionally_]unblock()'s attempt to | ||
698 | * unblock the target. | ||
699 | */ | ||
700 | static void sbp2_conditionally_block(struct sbp2_logical_unit *lu) | ||
701 | { | ||
702 | struct sbp2_target *tgt = lu->tgt; | ||
703 | struct fw_card *card = fw_device(tgt->unit->device.parent)->card; | ||
704 | struct Scsi_Host *shost = | ||
705 | container_of((void *)tgt, struct Scsi_Host, hostdata[0]); | ||
706 | unsigned long flags; | ||
707 | |||
708 | spin_lock_irqsave(&card->lock, flags); | ||
709 | if (!tgt->dont_block && !lu->blocked && | ||
710 | lu->generation != card->generation) { | ||
711 | lu->blocked = true; | ||
712 | if (++tgt->blocked == 1) | ||
713 | scsi_block_requests(shost); | ||
714 | } | ||
715 | spin_unlock_irqrestore(&card->lock, flags); | ||
716 | } | ||
717 | |||
718 | /* | ||
719 | * Unblocks lu->tgt as soon as all its logical units can be unblocked. | ||
720 | * Note, it is harmless to run scsi_unblock_requests() outside the | ||
721 | * card->lock protected section. On the other hand, running it inside | ||
722 | * the section might clash with shost->host_lock. | ||
723 | */ | ||
724 | static void sbp2_conditionally_unblock(struct sbp2_logical_unit *lu) | ||
725 | { | ||
726 | struct sbp2_target *tgt = lu->tgt; | ||
727 | struct fw_card *card = fw_device(tgt->unit->device.parent)->card; | ||
728 | struct Scsi_Host *shost = | ||
729 | container_of((void *)tgt, struct Scsi_Host, hostdata[0]); | ||
730 | unsigned long flags; | ||
731 | bool unblock = false; | ||
732 | |||
733 | spin_lock_irqsave(&card->lock, flags); | ||
734 | if (lu->blocked && lu->generation == card->generation) { | ||
735 | lu->blocked = false; | ||
736 | unblock = --tgt->blocked == 0; | ||
737 | } | ||
738 | spin_unlock_irqrestore(&card->lock, flags); | ||
739 | |||
740 | if (unblock) | ||
741 | scsi_unblock_requests(shost); | ||
742 | } | ||
743 | |||
744 | /* | ||
745 | * Prevents future blocking of tgt and unblocks it. | ||
746 | * Note, it is harmless to run scsi_unblock_requests() outside the | ||
747 | * card->lock protected section. On the other hand, running it inside | ||
748 | * the section might clash with shost->host_lock. | ||
749 | */ | ||
750 | static void sbp2_unblock(struct sbp2_target *tgt) | ||
751 | { | ||
752 | struct fw_card *card = fw_device(tgt->unit->device.parent)->card; | ||
753 | struct Scsi_Host *shost = | ||
754 | container_of((void *)tgt, struct Scsi_Host, hostdata[0]); | ||
755 | unsigned long flags; | ||
756 | |||
757 | spin_lock_irqsave(&card->lock, flags); | ||
758 | ++tgt->dont_block; | ||
759 | spin_unlock_irqrestore(&card->lock, flags); | ||
760 | |||
761 | scsi_unblock_requests(shost); | ||
762 | } | ||
763 | |||
764 | static int sbp2_lun2int(u16 lun) | ||
765 | { | ||
766 | struct scsi_lun eight_bytes_lun; | ||
767 | |||
768 | memset(&eight_bytes_lun, 0, sizeof(eight_bytes_lun)); | ||
769 | eight_bytes_lun.scsi_lun[0] = (lun >> 8) & 0xff; | ||
770 | eight_bytes_lun.scsi_lun[1] = lun & 0xff; | ||
771 | |||
772 | return scsilun_to_int(&eight_bytes_lun); | ||
773 | } | ||
774 | |||
775 | static void sbp2_release_target(struct kref *kref) | ||
776 | { | ||
777 | struct sbp2_target *tgt = container_of(kref, struct sbp2_target, kref); | ||
778 | struct sbp2_logical_unit *lu, *next; | ||
779 | struct Scsi_Host *shost = | ||
780 | container_of((void *)tgt, struct Scsi_Host, hostdata[0]); | ||
781 | struct scsi_device *sdev; | ||
782 | struct fw_device *device = fw_device(tgt->unit->device.parent); | ||
783 | |||
784 | /* prevent deadlocks */ | ||
785 | sbp2_unblock(tgt); | ||
786 | |||
787 | list_for_each_entry_safe(lu, next, &tgt->lu_list, link) { | ||
788 | sdev = scsi_device_lookup(shost, 0, 0, sbp2_lun2int(lu->lun)); | ||
789 | if (sdev) { | ||
790 | scsi_remove_device(sdev); | ||
791 | scsi_device_put(sdev); | ||
792 | } | ||
793 | if (lu->login_id != INVALID_LOGIN_ID) { | ||
794 | int generation, node_id; | ||
795 | /* | ||
796 | * tgt->node_id may be obsolete here if we failed | ||
797 | * during initial login or after a bus reset where | ||
798 | * the topology changed. | ||
799 | */ | ||
800 | generation = device->generation; | ||
801 | smp_rmb(); /* node_id vs. generation */ | ||
802 | node_id = device->node_id; | ||
803 | sbp2_send_management_orb(lu, node_id, generation, | ||
804 | SBP2_LOGOUT_REQUEST, | ||
805 | lu->login_id, NULL); | ||
806 | } | ||
807 | fw_core_remove_address_handler(&lu->address_handler); | ||
808 | list_del(&lu->link); | ||
809 | kfree(lu); | ||
810 | } | ||
811 | scsi_remove_host(shost); | ||
812 | fw_notify("released %s, target %d:0:0\n", tgt->bus_id, shost->host_no); | ||
813 | |||
814 | fw_unit_put(tgt->unit); | ||
815 | scsi_host_put(shost); | ||
816 | fw_device_put(device); | ||
817 | } | ||
818 | |||
819 | static struct workqueue_struct *sbp2_wq; | ||
820 | |||
821 | static void sbp2_target_put(struct sbp2_target *tgt) | ||
822 | { | ||
823 | kref_put(&tgt->kref, sbp2_release_target); | ||
824 | } | ||
825 | |||
826 | /* | ||
827 | * Always get the target's kref when scheduling work on one its units. | ||
828 | * Each workqueue job is responsible to call sbp2_target_put() upon return. | ||
829 | */ | ||
830 | static void sbp2_queue_work(struct sbp2_logical_unit *lu, unsigned long delay) | ||
831 | { | ||
832 | kref_get(&lu->tgt->kref); | ||
833 | if (!queue_delayed_work(sbp2_wq, &lu->work, delay)) | ||
834 | sbp2_target_put(lu->tgt); | ||
835 | } | ||
836 | |||
837 | /* | ||
838 | * Write retransmit retry values into the BUSY_TIMEOUT register. | ||
839 | * - The single-phase retry protocol is supported by all SBP-2 devices, but the | ||
840 | * default retry_limit value is 0 (i.e. never retry transmission). We write a | ||
841 | * saner value after logging into the device. | ||
842 | * - The dual-phase retry protocol is optional to implement, and if not | ||
843 | * supported, writes to the dual-phase portion of the register will be | ||
844 | * ignored. We try to write the original 1394-1995 default here. | ||
845 | * - In the case of devices that are also SBP-3-compliant, all writes are | ||
846 | * ignored, as the register is read-only, but contains single-phase retry of | ||
847 | * 15, which is what we're trying to set for all SBP-2 device anyway, so this | ||
848 | * write attempt is safe and yields more consistent behavior for all devices. | ||
849 | * | ||
850 | * See section 8.3.2.3.5 of the 1394-1995 spec, section 6.2 of the SBP-2 spec, | ||
851 | * and section 6.4 of the SBP-3 spec for further details. | ||
852 | */ | ||
853 | static void sbp2_set_busy_timeout(struct sbp2_logical_unit *lu) | ||
854 | { | ||
855 | struct fw_device *device = fw_device(lu->tgt->unit->device.parent); | ||
856 | __be32 d = cpu_to_be32(SBP2_CYCLE_LIMIT | SBP2_RETRY_LIMIT); | ||
857 | |||
858 | fw_run_transaction(device->card, TCODE_WRITE_QUADLET_REQUEST, | ||
859 | lu->tgt->node_id, lu->generation, device->max_speed, | ||
860 | CSR_REGISTER_BASE + CSR_BUSY_TIMEOUT, | ||
861 | &d, sizeof(d)); | ||
862 | } | ||
863 | |||
864 | static void sbp2_reconnect(struct work_struct *work); | ||
865 | |||
866 | static void sbp2_login(struct work_struct *work) | ||
867 | { | ||
868 | struct sbp2_logical_unit *lu = | ||
869 | container_of(work, struct sbp2_logical_unit, work.work); | ||
870 | struct sbp2_target *tgt = lu->tgt; | ||
871 | struct fw_device *device = fw_device(tgt->unit->device.parent); | ||
872 | struct Scsi_Host *shost; | ||
873 | struct scsi_device *sdev; | ||
874 | struct sbp2_login_response response; | ||
875 | int generation, node_id, local_node_id; | ||
876 | |||
877 | if (fw_device_is_shutdown(device)) | ||
878 | goto out; | ||
879 | |||
880 | generation = device->generation; | ||
881 | smp_rmb(); /* node IDs must not be older than generation */ | ||
882 | node_id = device->node_id; | ||
883 | local_node_id = device->card->node_id; | ||
884 | |||
885 | /* If this is a re-login attempt, log out, or we might be rejected. */ | ||
886 | if (lu->has_sdev) | ||
887 | sbp2_send_management_orb(lu, device->node_id, generation, | ||
888 | SBP2_LOGOUT_REQUEST, lu->login_id, NULL); | ||
889 | |||
890 | if (sbp2_send_management_orb(lu, node_id, generation, | ||
891 | SBP2_LOGIN_REQUEST, lu->lun, &response) < 0) { | ||
892 | if (lu->retries++ < 5) { | ||
893 | sbp2_queue_work(lu, DIV_ROUND_UP(HZ, 5)); | ||
894 | } else { | ||
895 | fw_error("%s: failed to login to LUN %04x\n", | ||
896 | tgt->bus_id, lu->lun); | ||
897 | /* Let any waiting I/O fail from now on. */ | ||
898 | sbp2_unblock(lu->tgt); | ||
899 | } | ||
900 | goto out; | ||
901 | } | ||
902 | |||
903 | tgt->node_id = node_id; | ||
904 | tgt->address_high = local_node_id << 16; | ||
905 | smp_wmb(); /* node IDs must not be older than generation */ | ||
906 | lu->generation = generation; | ||
907 | |||
908 | lu->command_block_agent_address = | ||
909 | ((u64)(be32_to_cpu(response.command_block_agent.high) & 0xffff) | ||
910 | << 32) | be32_to_cpu(response.command_block_agent.low); | ||
911 | lu->login_id = be32_to_cpu(response.misc) & 0xffff; | ||
912 | |||
913 | fw_notify("%s: logged in to LUN %04x (%d retries)\n", | ||
914 | tgt->bus_id, lu->lun, lu->retries); | ||
915 | |||
916 | /* set appropriate retry limit(s) in BUSY_TIMEOUT register */ | ||
917 | sbp2_set_busy_timeout(lu); | ||
918 | |||
919 | PREPARE_DELAYED_WORK(&lu->work, sbp2_reconnect); | ||
920 | sbp2_agent_reset(lu); | ||
921 | |||
922 | /* This was a re-login. */ | ||
923 | if (lu->has_sdev) { | ||
924 | sbp2_cancel_orbs(lu); | ||
925 | sbp2_conditionally_unblock(lu); | ||
926 | goto out; | ||
927 | } | ||
928 | |||
929 | if (lu->tgt->workarounds & SBP2_WORKAROUND_DELAY_INQUIRY) | ||
930 | ssleep(SBP2_INQUIRY_DELAY); | ||
931 | |||
932 | shost = container_of((void *)tgt, struct Scsi_Host, hostdata[0]); | ||
933 | sdev = __scsi_add_device(shost, 0, 0, sbp2_lun2int(lu->lun), lu); | ||
934 | /* | ||
935 | * FIXME: We are unable to perform reconnects while in sbp2_login(). | ||
936 | * Therefore __scsi_add_device() will get into trouble if a bus reset | ||
937 | * happens in parallel. It will either fail or leave us with an | ||
938 | * unusable sdev. As a workaround we check for this and retry the | ||
939 | * whole login and SCSI probing. | ||
940 | */ | ||
941 | |||
942 | /* Reported error during __scsi_add_device() */ | ||
943 | if (IS_ERR(sdev)) | ||
944 | goto out_logout_login; | ||
945 | |||
946 | /* Unreported error during __scsi_add_device() */ | ||
947 | smp_rmb(); /* get current card generation */ | ||
948 | if (generation != device->card->generation) { | ||
949 | scsi_remove_device(sdev); | ||
950 | scsi_device_put(sdev); | ||
951 | goto out_logout_login; | ||
952 | } | ||
953 | |||
954 | /* No error during __scsi_add_device() */ | ||
955 | lu->has_sdev = true; | ||
956 | scsi_device_put(sdev); | ||
957 | sbp2_allow_block(lu); | ||
958 | goto out; | ||
959 | |||
960 | out_logout_login: | ||
961 | smp_rmb(); /* generation may have changed */ | ||
962 | generation = device->generation; | ||
963 | smp_rmb(); /* node_id must not be older than generation */ | ||
964 | |||
965 | sbp2_send_management_orb(lu, device->node_id, generation, | ||
966 | SBP2_LOGOUT_REQUEST, lu->login_id, NULL); | ||
967 | /* | ||
968 | * If a bus reset happened, sbp2_update will have requeued | ||
969 | * lu->work already. Reset the work from reconnect to login. | ||
970 | */ | ||
971 | PREPARE_DELAYED_WORK(&lu->work, sbp2_login); | ||
972 | out: | ||
973 | sbp2_target_put(tgt); | ||
974 | } | ||
975 | |||
976 | static int sbp2_add_logical_unit(struct sbp2_target *tgt, int lun_entry) | ||
977 | { | ||
978 | struct sbp2_logical_unit *lu; | ||
979 | |||
980 | lu = kmalloc(sizeof(*lu), GFP_KERNEL); | ||
981 | if (!lu) | ||
982 | return -ENOMEM; | ||
983 | |||
984 | lu->address_handler.length = 0x100; | ||
985 | lu->address_handler.address_callback = sbp2_status_write; | ||
986 | lu->address_handler.callback_data = lu; | ||
987 | |||
988 | if (fw_core_add_address_handler(&lu->address_handler, | ||
989 | &fw_high_memory_region) < 0) { | ||
990 | kfree(lu); | ||
991 | return -ENOMEM; | ||
992 | } | ||
993 | |||
994 | lu->tgt = tgt; | ||
995 | lu->lun = lun_entry & 0xffff; | ||
996 | lu->login_id = INVALID_LOGIN_ID; | ||
997 | lu->retries = 0; | ||
998 | lu->has_sdev = false; | ||
999 | lu->blocked = false; | ||
1000 | ++tgt->dont_block; | ||
1001 | INIT_LIST_HEAD(&lu->orb_list); | ||
1002 | INIT_DELAYED_WORK(&lu->work, sbp2_login); | ||
1003 | |||
1004 | list_add_tail(&lu->link, &tgt->lu_list); | ||
1005 | return 0; | ||
1006 | } | ||
1007 | |||
1008 | static int sbp2_scan_logical_unit_dir(struct sbp2_target *tgt, u32 *directory) | ||
1009 | { | ||
1010 | struct fw_csr_iterator ci; | ||
1011 | int key, value; | ||
1012 | |||
1013 | fw_csr_iterator_init(&ci, directory); | ||
1014 | while (fw_csr_iterator_next(&ci, &key, &value)) | ||
1015 | if (key == SBP2_CSR_LOGICAL_UNIT_NUMBER && | ||
1016 | sbp2_add_logical_unit(tgt, value) < 0) | ||
1017 | return -ENOMEM; | ||
1018 | return 0; | ||
1019 | } | ||
1020 | |||
1021 | static int sbp2_scan_unit_dir(struct sbp2_target *tgt, u32 *directory, | ||
1022 | u32 *model, u32 *firmware_revision) | ||
1023 | { | ||
1024 | struct fw_csr_iterator ci; | ||
1025 | int key, value; | ||
1026 | unsigned int timeout; | ||
1027 | |||
1028 | fw_csr_iterator_init(&ci, directory); | ||
1029 | while (fw_csr_iterator_next(&ci, &key, &value)) { | ||
1030 | switch (key) { | ||
1031 | |||
1032 | case CSR_DEPENDENT_INFO | CSR_OFFSET: | ||
1033 | tgt->management_agent_address = | ||
1034 | CSR_REGISTER_BASE + 4 * value; | ||
1035 | break; | ||
1036 | |||
1037 | case CSR_DIRECTORY_ID: | ||
1038 | tgt->directory_id = value; | ||
1039 | break; | ||
1040 | |||
1041 | case CSR_MODEL: | ||
1042 | *model = value; | ||
1043 | break; | ||
1044 | |||
1045 | case SBP2_CSR_FIRMWARE_REVISION: | ||
1046 | *firmware_revision = value; | ||
1047 | break; | ||
1048 | |||
1049 | case SBP2_CSR_UNIT_CHARACTERISTICS: | ||
1050 | /* the timeout value is stored in 500ms units */ | ||
1051 | timeout = ((unsigned int) value >> 8 & 0xff) * 500; | ||
1052 | timeout = max(timeout, SBP2_MIN_LOGIN_ORB_TIMEOUT); | ||
1053 | tgt->mgt_orb_timeout = | ||
1054 | min(timeout, SBP2_MAX_LOGIN_ORB_TIMEOUT); | ||
1055 | |||
1056 | if (timeout > tgt->mgt_orb_timeout) | ||
1057 | fw_notify("%s: config rom contains %ds " | ||
1058 | "management ORB timeout, limiting " | ||
1059 | "to %ds\n", tgt->bus_id, | ||
1060 | timeout / 1000, | ||
1061 | tgt->mgt_orb_timeout / 1000); | ||
1062 | break; | ||
1063 | |||
1064 | case SBP2_CSR_LOGICAL_UNIT_NUMBER: | ||
1065 | if (sbp2_add_logical_unit(tgt, value) < 0) | ||
1066 | return -ENOMEM; | ||
1067 | break; | ||
1068 | |||
1069 | case SBP2_CSR_LOGICAL_UNIT_DIRECTORY: | ||
1070 | /* Adjust for the increment in the iterator */ | ||
1071 | if (sbp2_scan_logical_unit_dir(tgt, ci.p - 1 + value) < 0) | ||
1072 | return -ENOMEM; | ||
1073 | break; | ||
1074 | } | ||
1075 | } | ||
1076 | return 0; | ||
1077 | } | ||
1078 | |||
1079 | static void sbp2_init_workarounds(struct sbp2_target *tgt, u32 model, | ||
1080 | u32 firmware_revision) | ||
1081 | { | ||
1082 | int i; | ||
1083 | unsigned int w = sbp2_param_workarounds; | ||
1084 | |||
1085 | if (w) | ||
1086 | fw_notify("Please notify linux1394-devel@lists.sourceforge.net " | ||
1087 | "if you need the workarounds parameter for %s\n", | ||
1088 | tgt->bus_id); | ||
1089 | |||
1090 | if (w & SBP2_WORKAROUND_OVERRIDE) | ||
1091 | goto out; | ||
1092 | |||
1093 | for (i = 0; i < ARRAY_SIZE(sbp2_workarounds_table); i++) { | ||
1094 | |||
1095 | if (sbp2_workarounds_table[i].firmware_revision != | ||
1096 | (firmware_revision & 0xffffff00)) | ||
1097 | continue; | ||
1098 | |||
1099 | if (sbp2_workarounds_table[i].model != model && | ||
1100 | sbp2_workarounds_table[i].model != SBP2_ROM_VALUE_WILDCARD) | ||
1101 | continue; | ||
1102 | |||
1103 | w |= sbp2_workarounds_table[i].workarounds; | ||
1104 | break; | ||
1105 | } | ||
1106 | out: | ||
1107 | if (w) | ||
1108 | fw_notify("Workarounds for %s: 0x%x " | ||
1109 | "(firmware_revision 0x%06x, model_id 0x%06x)\n", | ||
1110 | tgt->bus_id, w, firmware_revision, model); | ||
1111 | tgt->workarounds = w; | ||
1112 | } | ||
1113 | |||
1114 | static struct scsi_host_template scsi_driver_template; | ||
1115 | |||
1116 | static int sbp2_probe(struct device *dev) | ||
1117 | { | ||
1118 | struct fw_unit *unit = fw_unit(dev); | ||
1119 | struct fw_device *device = fw_device(unit->device.parent); | ||
1120 | struct sbp2_target *tgt; | ||
1121 | struct sbp2_logical_unit *lu; | ||
1122 | struct Scsi_Host *shost; | ||
1123 | u32 model, firmware_revision; | ||
1124 | |||
1125 | if (dma_get_max_seg_size(device->card->device) > SBP2_MAX_SEG_SIZE) | ||
1126 | BUG_ON(dma_set_max_seg_size(device->card->device, | ||
1127 | SBP2_MAX_SEG_SIZE)); | ||
1128 | |||
1129 | shost = scsi_host_alloc(&scsi_driver_template, sizeof(*tgt)); | ||
1130 | if (shost == NULL) | ||
1131 | return -ENOMEM; | ||
1132 | |||
1133 | tgt = (struct sbp2_target *)shost->hostdata; | ||
1134 | unit->device.driver_data = tgt; | ||
1135 | tgt->unit = unit; | ||
1136 | kref_init(&tgt->kref); | ||
1137 | INIT_LIST_HEAD(&tgt->lu_list); | ||
1138 | tgt->bus_id = dev_name(&unit->device); | ||
1139 | tgt->guid = (u64)device->config_rom[3] << 32 | device->config_rom[4]; | ||
1140 | |||
1141 | if (fw_device_enable_phys_dma(device) < 0) | ||
1142 | goto fail_shost_put; | ||
1143 | |||
1144 | if (scsi_add_host(shost, &unit->device) < 0) | ||
1145 | goto fail_shost_put; | ||
1146 | |||
1147 | fw_device_get(device); | ||
1148 | fw_unit_get(unit); | ||
1149 | |||
1150 | /* implicit directory ID */ | ||
1151 | tgt->directory_id = ((unit->directory - device->config_rom) * 4 | ||
1152 | + CSR_CONFIG_ROM) & 0xffffff; | ||
1153 | |||
1154 | firmware_revision = SBP2_ROM_VALUE_MISSING; | ||
1155 | model = SBP2_ROM_VALUE_MISSING; | ||
1156 | |||
1157 | if (sbp2_scan_unit_dir(tgt, unit->directory, &model, | ||
1158 | &firmware_revision) < 0) | ||
1159 | goto fail_tgt_put; | ||
1160 | |||
1161 | sbp2_init_workarounds(tgt, model, firmware_revision); | ||
1162 | |||
1163 | /* | ||
1164 | * At S100 we can do 512 bytes per packet, at S200 1024 bytes, | ||
1165 | * and so on up to 4096 bytes. The SBP-2 max_payload field | ||
1166 | * specifies the max payload size as 2 ^ (max_payload + 2), so | ||
1167 | * if we set this to max_speed + 7, we get the right value. | ||
1168 | */ | ||
1169 | tgt->max_payload = min(device->max_speed + 7, 10U); | ||
1170 | tgt->max_payload = min(tgt->max_payload, device->card->max_receive - 1); | ||
1171 | |||
1172 | /* Do the login in a workqueue so we can easily reschedule retries. */ | ||
1173 | list_for_each_entry(lu, &tgt->lu_list, link) | ||
1174 | sbp2_queue_work(lu, DIV_ROUND_UP(HZ, 5)); | ||
1175 | return 0; | ||
1176 | |||
1177 | fail_tgt_put: | ||
1178 | sbp2_target_put(tgt); | ||
1179 | return -ENOMEM; | ||
1180 | |||
1181 | fail_shost_put: | ||
1182 | scsi_host_put(shost); | ||
1183 | return -ENOMEM; | ||
1184 | } | ||
1185 | |||
1186 | static int sbp2_remove(struct device *dev) | ||
1187 | { | ||
1188 | struct fw_unit *unit = fw_unit(dev); | ||
1189 | struct sbp2_target *tgt = unit->device.driver_data; | ||
1190 | |||
1191 | sbp2_target_put(tgt); | ||
1192 | return 0; | ||
1193 | } | ||
1194 | |||
1195 | static void sbp2_reconnect(struct work_struct *work) | ||
1196 | { | ||
1197 | struct sbp2_logical_unit *lu = | ||
1198 | container_of(work, struct sbp2_logical_unit, work.work); | ||
1199 | struct sbp2_target *tgt = lu->tgt; | ||
1200 | struct fw_device *device = fw_device(tgt->unit->device.parent); | ||
1201 | int generation, node_id, local_node_id; | ||
1202 | |||
1203 | if (fw_device_is_shutdown(device)) | ||
1204 | goto out; | ||
1205 | |||
1206 | generation = device->generation; | ||
1207 | smp_rmb(); /* node IDs must not be older than generation */ | ||
1208 | node_id = device->node_id; | ||
1209 | local_node_id = device->card->node_id; | ||
1210 | |||
1211 | if (sbp2_send_management_orb(lu, node_id, generation, | ||
1212 | SBP2_RECONNECT_REQUEST, | ||
1213 | lu->login_id, NULL) < 0) { | ||
1214 | /* | ||
1215 | * If reconnect was impossible even though we are in the | ||
1216 | * current generation, fall back and try to log in again. | ||
1217 | * | ||
1218 | * We could check for "Function rejected" status, but | ||
1219 | * looking at the bus generation as simpler and more general. | ||
1220 | */ | ||
1221 | smp_rmb(); /* get current card generation */ | ||
1222 | if (generation == device->card->generation || | ||
1223 | lu->retries++ >= 5) { | ||
1224 | fw_error("%s: failed to reconnect\n", tgt->bus_id); | ||
1225 | lu->retries = 0; | ||
1226 | PREPARE_DELAYED_WORK(&lu->work, sbp2_login); | ||
1227 | } | ||
1228 | sbp2_queue_work(lu, DIV_ROUND_UP(HZ, 5)); | ||
1229 | goto out; | ||
1230 | } | ||
1231 | |||
1232 | tgt->node_id = node_id; | ||
1233 | tgt->address_high = local_node_id << 16; | ||
1234 | smp_wmb(); /* node IDs must not be older than generation */ | ||
1235 | lu->generation = generation; | ||
1236 | |||
1237 | fw_notify("%s: reconnected to LUN %04x (%d retries)\n", | ||
1238 | tgt->bus_id, lu->lun, lu->retries); | ||
1239 | |||
1240 | sbp2_agent_reset(lu); | ||
1241 | sbp2_cancel_orbs(lu); | ||
1242 | sbp2_conditionally_unblock(lu); | ||
1243 | out: | ||
1244 | sbp2_target_put(tgt); | ||
1245 | } | ||
1246 | |||
1247 | static void sbp2_update(struct fw_unit *unit) | ||
1248 | { | ||
1249 | struct sbp2_target *tgt = unit->device.driver_data; | ||
1250 | struct sbp2_logical_unit *lu; | ||
1251 | |||
1252 | fw_device_enable_phys_dma(fw_device(unit->device.parent)); | ||
1253 | |||
1254 | /* | ||
1255 | * Fw-core serializes sbp2_update() against sbp2_remove(). | ||
1256 | * Iteration over tgt->lu_list is therefore safe here. | ||
1257 | */ | ||
1258 | list_for_each_entry(lu, &tgt->lu_list, link) { | ||
1259 | sbp2_conditionally_block(lu); | ||
1260 | lu->retries = 0; | ||
1261 | sbp2_queue_work(lu, 0); | ||
1262 | } | ||
1263 | } | ||
1264 | |||
1265 | #define SBP2_UNIT_SPEC_ID_ENTRY 0x0000609e | ||
1266 | #define SBP2_SW_VERSION_ENTRY 0x00010483 | ||
1267 | |||
1268 | static const struct ieee1394_device_id sbp2_id_table[] = { | ||
1269 | { | ||
1270 | .match_flags = IEEE1394_MATCH_SPECIFIER_ID | | ||
1271 | IEEE1394_MATCH_VERSION, | ||
1272 | .specifier_id = SBP2_UNIT_SPEC_ID_ENTRY, | ||
1273 | .version = SBP2_SW_VERSION_ENTRY, | ||
1274 | }, | ||
1275 | { } | ||
1276 | }; | ||
1277 | |||
1278 | static struct fw_driver sbp2_driver = { | ||
1279 | .driver = { | ||
1280 | .owner = THIS_MODULE, | ||
1281 | .name = sbp2_driver_name, | ||
1282 | .bus = &fw_bus_type, | ||
1283 | .probe = sbp2_probe, | ||
1284 | .remove = sbp2_remove, | ||
1285 | }, | ||
1286 | .update = sbp2_update, | ||
1287 | .id_table = sbp2_id_table, | ||
1288 | }; | ||
1289 | |||
1290 | static void sbp2_unmap_scatterlist(struct device *card_device, | ||
1291 | struct sbp2_command_orb *orb) | ||
1292 | { | ||
1293 | if (scsi_sg_count(orb->cmd)) | ||
1294 | dma_unmap_sg(card_device, scsi_sglist(orb->cmd), | ||
1295 | scsi_sg_count(orb->cmd), | ||
1296 | orb->cmd->sc_data_direction); | ||
1297 | |||
1298 | if (orb->request.misc & cpu_to_be32(COMMAND_ORB_PAGE_TABLE_PRESENT)) | ||
1299 | dma_unmap_single(card_device, orb->page_table_bus, | ||
1300 | sizeof(orb->page_table), DMA_TO_DEVICE); | ||
1301 | } | ||
1302 | |||
1303 | static unsigned int sbp2_status_to_sense_data(u8 *sbp2_status, u8 *sense_data) | ||
1304 | { | ||
1305 | int sam_status; | ||
1306 | |||
1307 | sense_data[0] = 0x70; | ||
1308 | sense_data[1] = 0x0; | ||
1309 | sense_data[2] = sbp2_status[1]; | ||
1310 | sense_data[3] = sbp2_status[4]; | ||
1311 | sense_data[4] = sbp2_status[5]; | ||
1312 | sense_data[5] = sbp2_status[6]; | ||
1313 | sense_data[6] = sbp2_status[7]; | ||
1314 | sense_data[7] = 10; | ||
1315 | sense_data[8] = sbp2_status[8]; | ||
1316 | sense_data[9] = sbp2_status[9]; | ||
1317 | sense_data[10] = sbp2_status[10]; | ||
1318 | sense_data[11] = sbp2_status[11]; | ||
1319 | sense_data[12] = sbp2_status[2]; | ||
1320 | sense_data[13] = sbp2_status[3]; | ||
1321 | sense_data[14] = sbp2_status[12]; | ||
1322 | sense_data[15] = sbp2_status[13]; | ||
1323 | |||
1324 | sam_status = sbp2_status[0] & 0x3f; | ||
1325 | |||
1326 | switch (sam_status) { | ||
1327 | case SAM_STAT_GOOD: | ||
1328 | case SAM_STAT_CHECK_CONDITION: | ||
1329 | case SAM_STAT_CONDITION_MET: | ||
1330 | case SAM_STAT_BUSY: | ||
1331 | case SAM_STAT_RESERVATION_CONFLICT: | ||
1332 | case SAM_STAT_COMMAND_TERMINATED: | ||
1333 | return DID_OK << 16 | sam_status; | ||
1334 | |||
1335 | default: | ||
1336 | return DID_ERROR << 16; | ||
1337 | } | ||
1338 | } | ||
1339 | |||
1340 | static void complete_command_orb(struct sbp2_orb *base_orb, | ||
1341 | struct sbp2_status *status) | ||
1342 | { | ||
1343 | struct sbp2_command_orb *orb = | ||
1344 | container_of(base_orb, struct sbp2_command_orb, base); | ||
1345 | struct fw_device *device = fw_device(orb->lu->tgt->unit->device.parent); | ||
1346 | int result; | ||
1347 | |||
1348 | if (status != NULL) { | ||
1349 | if (STATUS_GET_DEAD(*status)) | ||
1350 | sbp2_agent_reset_no_wait(orb->lu); | ||
1351 | |||
1352 | switch (STATUS_GET_RESPONSE(*status)) { | ||
1353 | case SBP2_STATUS_REQUEST_COMPLETE: | ||
1354 | result = DID_OK << 16; | ||
1355 | break; | ||
1356 | case SBP2_STATUS_TRANSPORT_FAILURE: | ||
1357 | result = DID_BUS_BUSY << 16; | ||
1358 | break; | ||
1359 | case SBP2_STATUS_ILLEGAL_REQUEST: | ||
1360 | case SBP2_STATUS_VENDOR_DEPENDENT: | ||
1361 | default: | ||
1362 | result = DID_ERROR << 16; | ||
1363 | break; | ||
1364 | } | ||
1365 | |||
1366 | if (result == DID_OK << 16 && STATUS_GET_LEN(*status) > 1) | ||
1367 | result = sbp2_status_to_sense_data(STATUS_GET_DATA(*status), | ||
1368 | orb->cmd->sense_buffer); | ||
1369 | } else { | ||
1370 | /* | ||
1371 | * If the orb completes with status == NULL, something | ||
1372 | * went wrong, typically a bus reset happened mid-orb | ||
1373 | * or when sending the write (less likely). | ||
1374 | */ | ||
1375 | result = DID_BUS_BUSY << 16; | ||
1376 | sbp2_conditionally_block(orb->lu); | ||
1377 | } | ||
1378 | |||
1379 | dma_unmap_single(device->card->device, orb->base.request_bus, | ||
1380 | sizeof(orb->request), DMA_TO_DEVICE); | ||
1381 | sbp2_unmap_scatterlist(device->card->device, orb); | ||
1382 | |||
1383 | orb->cmd->result = result; | ||
1384 | orb->done(orb->cmd); | ||
1385 | } | ||
1386 | |||
1387 | static int sbp2_map_scatterlist(struct sbp2_command_orb *orb, | ||
1388 | struct fw_device *device, struct sbp2_logical_unit *lu) | ||
1389 | { | ||
1390 | struct scatterlist *sg = scsi_sglist(orb->cmd); | ||
1391 | int i, n; | ||
1392 | |||
1393 | n = dma_map_sg(device->card->device, sg, scsi_sg_count(orb->cmd), | ||
1394 | orb->cmd->sc_data_direction); | ||
1395 | if (n == 0) | ||
1396 | goto fail; | ||
1397 | |||
1398 | /* | ||
1399 | * Handle the special case where there is only one element in | ||
1400 | * the scatter list by converting it to an immediate block | ||
1401 | * request. This is also a workaround for broken devices such | ||
1402 | * as the second generation iPod which doesn't support page | ||
1403 | * tables. | ||
1404 | */ | ||
1405 | if (n == 1) { | ||
1406 | orb->request.data_descriptor.high = | ||
1407 | cpu_to_be32(lu->tgt->address_high); | ||
1408 | orb->request.data_descriptor.low = | ||
1409 | cpu_to_be32(sg_dma_address(sg)); | ||
1410 | orb->request.misc |= | ||
1411 | cpu_to_be32(COMMAND_ORB_DATA_SIZE(sg_dma_len(sg))); | ||
1412 | return 0; | ||
1413 | } | ||
1414 | |||
1415 | for_each_sg(sg, sg, n, i) { | ||
1416 | orb->page_table[i].high = cpu_to_be32(sg_dma_len(sg) << 16); | ||
1417 | orb->page_table[i].low = cpu_to_be32(sg_dma_address(sg)); | ||
1418 | } | ||
1419 | |||
1420 | orb->page_table_bus = | ||
1421 | dma_map_single(device->card->device, orb->page_table, | ||
1422 | sizeof(orb->page_table), DMA_TO_DEVICE); | ||
1423 | if (dma_mapping_error(device->card->device, orb->page_table_bus)) | ||
1424 | goto fail_page_table; | ||
1425 | |||
1426 | /* | ||
1427 | * The data_descriptor pointer is the one case where we need | ||
1428 | * to fill in the node ID part of the address. All other | ||
1429 | * pointers assume that the data referenced reside on the | ||
1430 | * initiator (i.e. us), but data_descriptor can refer to data | ||
1431 | * on other nodes so we need to put our ID in descriptor.high. | ||
1432 | */ | ||
1433 | orb->request.data_descriptor.high = cpu_to_be32(lu->tgt->address_high); | ||
1434 | orb->request.data_descriptor.low = cpu_to_be32(orb->page_table_bus); | ||
1435 | orb->request.misc |= cpu_to_be32(COMMAND_ORB_PAGE_TABLE_PRESENT | | ||
1436 | COMMAND_ORB_DATA_SIZE(n)); | ||
1437 | |||
1438 | return 0; | ||
1439 | |||
1440 | fail_page_table: | ||
1441 | dma_unmap_sg(device->card->device, scsi_sglist(orb->cmd), | ||
1442 | scsi_sg_count(orb->cmd), orb->cmd->sc_data_direction); | ||
1443 | fail: | ||
1444 | return -ENOMEM; | ||
1445 | } | ||
1446 | |||
1447 | /* SCSI stack integration */ | ||
1448 | |||
1449 | static int sbp2_scsi_queuecommand(struct scsi_cmnd *cmd, scsi_done_fn_t done) | ||
1450 | { | ||
1451 | struct sbp2_logical_unit *lu = cmd->device->hostdata; | ||
1452 | struct fw_device *device = fw_device(lu->tgt->unit->device.parent); | ||
1453 | struct sbp2_command_orb *orb; | ||
1454 | int generation, retval = SCSI_MLQUEUE_HOST_BUSY; | ||
1455 | |||
1456 | /* | ||
1457 | * Bidirectional commands are not yet implemented, and unknown | ||
1458 | * transfer direction not handled. | ||
1459 | */ | ||
1460 | if (cmd->sc_data_direction == DMA_BIDIRECTIONAL) { | ||
1461 | fw_error("Can't handle DMA_BIDIRECTIONAL, rejecting command\n"); | ||
1462 | cmd->result = DID_ERROR << 16; | ||
1463 | done(cmd); | ||
1464 | return 0; | ||
1465 | } | ||
1466 | |||
1467 | orb = kzalloc(sizeof(*orb), GFP_ATOMIC); | ||
1468 | if (orb == NULL) { | ||
1469 | fw_notify("failed to alloc orb\n"); | ||
1470 | return SCSI_MLQUEUE_HOST_BUSY; | ||
1471 | } | ||
1472 | |||
1473 | /* Initialize rcode to something not RCODE_COMPLETE. */ | ||
1474 | orb->base.rcode = -1; | ||
1475 | kref_init(&orb->base.kref); | ||
1476 | |||
1477 | orb->lu = lu; | ||
1478 | orb->done = done; | ||
1479 | orb->cmd = cmd; | ||
1480 | |||
1481 | orb->request.next.high = cpu_to_be32(SBP2_ORB_NULL); | ||
1482 | orb->request.misc = cpu_to_be32( | ||
1483 | COMMAND_ORB_MAX_PAYLOAD(lu->tgt->max_payload) | | ||
1484 | COMMAND_ORB_SPEED(device->max_speed) | | ||
1485 | COMMAND_ORB_NOTIFY); | ||
1486 | |||
1487 | if (cmd->sc_data_direction == DMA_FROM_DEVICE) | ||
1488 | orb->request.misc |= cpu_to_be32(COMMAND_ORB_DIRECTION); | ||
1489 | |||
1490 | generation = device->generation; | ||
1491 | smp_rmb(); /* sbp2_map_scatterlist looks at tgt->address_high */ | ||
1492 | |||
1493 | if (scsi_sg_count(cmd) && sbp2_map_scatterlist(orb, device, lu) < 0) | ||
1494 | goto out; | ||
1495 | |||
1496 | memcpy(orb->request.command_block, cmd->cmnd, cmd->cmd_len); | ||
1497 | |||
1498 | orb->base.callback = complete_command_orb; | ||
1499 | orb->base.request_bus = | ||
1500 | dma_map_single(device->card->device, &orb->request, | ||
1501 | sizeof(orb->request), DMA_TO_DEVICE); | ||
1502 | if (dma_mapping_error(device->card->device, orb->base.request_bus)) { | ||
1503 | sbp2_unmap_scatterlist(device->card->device, orb); | ||
1504 | goto out; | ||
1505 | } | ||
1506 | |||
1507 | sbp2_send_orb(&orb->base, lu, lu->tgt->node_id, generation, | ||
1508 | lu->command_block_agent_address + SBP2_ORB_POINTER); | ||
1509 | retval = 0; | ||
1510 | out: | ||
1511 | kref_put(&orb->base.kref, free_orb); | ||
1512 | return retval; | ||
1513 | } | ||
1514 | |||
1515 | static int sbp2_scsi_slave_alloc(struct scsi_device *sdev) | ||
1516 | { | ||
1517 | struct sbp2_logical_unit *lu = sdev->hostdata; | ||
1518 | |||
1519 | /* (Re-)Adding logical units via the SCSI stack is not supported. */ | ||
1520 | if (!lu) | ||
1521 | return -ENOSYS; | ||
1522 | |||
1523 | sdev->allow_restart = 1; | ||
1524 | |||
1525 | /* SBP-2 requires quadlet alignment of the data buffers. */ | ||
1526 | blk_queue_update_dma_alignment(sdev->request_queue, 4 - 1); | ||
1527 | |||
1528 | if (lu->tgt->workarounds & SBP2_WORKAROUND_INQUIRY_36) | ||
1529 | sdev->inquiry_len = 36; | ||
1530 | |||
1531 | return 0; | ||
1532 | } | ||
1533 | |||
1534 | static int sbp2_scsi_slave_configure(struct scsi_device *sdev) | ||
1535 | { | ||
1536 | struct sbp2_logical_unit *lu = sdev->hostdata; | ||
1537 | |||
1538 | sdev->use_10_for_rw = 1; | ||
1539 | |||
1540 | if (sbp2_param_exclusive_login) | ||
1541 | sdev->manage_start_stop = 1; | ||
1542 | |||
1543 | if (sdev->type == TYPE_ROM) | ||
1544 | sdev->use_10_for_ms = 1; | ||
1545 | |||
1546 | if (sdev->type == TYPE_DISK && | ||
1547 | lu->tgt->workarounds & SBP2_WORKAROUND_MODE_SENSE_8) | ||
1548 | sdev->skip_ms_page_8 = 1; | ||
1549 | |||
1550 | if (lu->tgt->workarounds & SBP2_WORKAROUND_FIX_CAPACITY) | ||
1551 | sdev->fix_capacity = 1; | ||
1552 | |||
1553 | if (lu->tgt->workarounds & SBP2_WORKAROUND_POWER_CONDITION) | ||
1554 | sdev->start_stop_pwr_cond = 1; | ||
1555 | |||
1556 | if (lu->tgt->workarounds & SBP2_WORKAROUND_128K_MAX_TRANS) | ||
1557 | blk_queue_max_sectors(sdev->request_queue, 128 * 1024 / 512); | ||
1558 | |||
1559 | blk_queue_max_segment_size(sdev->request_queue, SBP2_MAX_SEG_SIZE); | ||
1560 | |||
1561 | return 0; | ||
1562 | } | ||
1563 | |||
1564 | /* | ||
1565 | * Called by scsi stack when something has really gone wrong. Usually | ||
1566 | * called when a command has timed-out for some reason. | ||
1567 | */ | ||
1568 | static int sbp2_scsi_abort(struct scsi_cmnd *cmd) | ||
1569 | { | ||
1570 | struct sbp2_logical_unit *lu = cmd->device->hostdata; | ||
1571 | |||
1572 | fw_notify("%s: sbp2_scsi_abort\n", lu->tgt->bus_id); | ||
1573 | sbp2_agent_reset(lu); | ||
1574 | sbp2_cancel_orbs(lu); | ||
1575 | |||
1576 | return SUCCESS; | ||
1577 | } | ||
1578 | |||
1579 | /* | ||
1580 | * Format of /sys/bus/scsi/devices/.../ieee1394_id: | ||
1581 | * u64 EUI-64 : u24 directory_ID : u16 LUN (all printed in hexadecimal) | ||
1582 | * | ||
1583 | * This is the concatenation of target port identifier and logical unit | ||
1584 | * identifier as per SAM-2...SAM-4 annex A. | ||
1585 | */ | ||
1586 | static ssize_t sbp2_sysfs_ieee1394_id_show(struct device *dev, | ||
1587 | struct device_attribute *attr, char *buf) | ||
1588 | { | ||
1589 | struct scsi_device *sdev = to_scsi_device(dev); | ||
1590 | struct sbp2_logical_unit *lu; | ||
1591 | |||
1592 | if (!sdev) | ||
1593 | return 0; | ||
1594 | |||
1595 | lu = sdev->hostdata; | ||
1596 | |||
1597 | return sprintf(buf, "%016llx:%06x:%04x\n", | ||
1598 | (unsigned long long)lu->tgt->guid, | ||
1599 | lu->tgt->directory_id, lu->lun); | ||
1600 | } | ||
1601 | |||
1602 | static DEVICE_ATTR(ieee1394_id, S_IRUGO, sbp2_sysfs_ieee1394_id_show, NULL); | ||
1603 | |||
1604 | static struct device_attribute *sbp2_scsi_sysfs_attrs[] = { | ||
1605 | &dev_attr_ieee1394_id, | ||
1606 | NULL | ||
1607 | }; | ||
1608 | |||
1609 | static struct scsi_host_template scsi_driver_template = { | ||
1610 | .module = THIS_MODULE, | ||
1611 | .name = "SBP-2 IEEE-1394", | ||
1612 | .proc_name = sbp2_driver_name, | ||
1613 | .queuecommand = sbp2_scsi_queuecommand, | ||
1614 | .slave_alloc = sbp2_scsi_slave_alloc, | ||
1615 | .slave_configure = sbp2_scsi_slave_configure, | ||
1616 | .eh_abort_handler = sbp2_scsi_abort, | ||
1617 | .this_id = -1, | ||
1618 | .sg_tablesize = SG_ALL, | ||
1619 | .use_clustering = ENABLE_CLUSTERING, | ||
1620 | .cmd_per_lun = 1, | ||
1621 | .can_queue = 1, | ||
1622 | .sdev_attrs = sbp2_scsi_sysfs_attrs, | ||
1623 | }; | ||
1624 | |||
1625 | MODULE_AUTHOR("Kristian Hoegsberg <krh@bitplanet.net>"); | ||
1626 | MODULE_DESCRIPTION("SCSI over IEEE1394"); | ||
1627 | MODULE_LICENSE("GPL"); | ||
1628 | MODULE_DEVICE_TABLE(ieee1394, sbp2_id_table); | ||
1629 | |||
1630 | /* Provide a module alias so root-on-sbp2 initrds don't break. */ | ||
1631 | #ifndef CONFIG_IEEE1394_SBP2_MODULE | ||
1632 | MODULE_ALIAS("sbp2"); | ||
1633 | #endif | ||
1634 | |||
1635 | static int __init sbp2_init(void) | ||
1636 | { | ||
1637 | sbp2_wq = create_singlethread_workqueue(KBUILD_MODNAME); | ||
1638 | if (!sbp2_wq) | ||
1639 | return -ENOMEM; | ||
1640 | |||
1641 | return driver_register(&sbp2_driver.driver); | ||
1642 | } | ||
1643 | |||
1644 | static void __exit sbp2_cleanup(void) | ||
1645 | { | ||
1646 | driver_unregister(&sbp2_driver.driver); | ||
1647 | destroy_workqueue(sbp2_wq); | ||
1648 | } | ||
1649 | |||
1650 | module_init(sbp2_init); | ||
1651 | module_exit(sbp2_cleanup); | ||