diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2009-06-16 17:29:46 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2009-06-16 17:29:46 -0400 |
commit | f83b1e616f2f68b56b09b2f5116591981fee0c1c (patch) | |
tree | d8c423502853b46ec82c58ee31b695552579f7da /drivers/firewire/sbp2.c | |
parent | 2f38d70fb4e97e7d00e12eaac45790cf6ebd7b22 (diff) | |
parent | 1e626fdcef61460dc75fe7377f38bb019722b848 (diff) |
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/ieee1394/linux1394-2.6
* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/ieee1394/linux1394-2.6:
firewire: core: use more outbound tlabels
firewire: core: don't update Broadcast_Channel if RFC 2734 conditions aren't met
firewire: core: prepare for non-core children of card devices
firewire: core: include linux/uaccess.h instead of asm/uaccess.h
firewire: add parent-of-unit accessor
firewire: rename source files
firewire: reorganize header files
firewire: clean up includes
firewire: ohci: access bus_seconds atomically
firewire: also use vendor ID in root directory for driver matches
firewire: share device ID table type with ieee1394
firewire: core: add sysfs attribute for easier udev rules
firewire: core: check for missing struct update at build time, not run time
firewire: core: improve check for local node
Diffstat (limited to 'drivers/firewire/sbp2.c')
-rw-r--r-- | drivers/firewire/sbp2.c | 1656 |
1 files changed, 1656 insertions, 0 deletions
diff --git a/drivers/firewire/sbp2.c b/drivers/firewire/sbp2.c new file mode 100644 index 000000000000..24c45635376a --- /dev/null +++ b/drivers/firewire/sbp2.c | |||
@@ -0,0 +1,1656 @@ | |||
1 | /* | ||
2 | * SBP2 driver (SCSI over IEEE1394) | ||
3 | * | ||
4 | * Copyright (C) 2005-2007 Kristian Hoegsberg <krh@bitplanet.net> | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License as published by | ||
8 | * the Free Software Foundation; either version 2 of the License, or | ||
9 | * (at your option) any later version. | ||
10 | * | ||
11 | * This program is distributed in the hope that it will be useful, | ||
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
14 | * GNU General Public License for more details. | ||
15 | * | ||
16 | * You should have received a copy of the GNU General Public License | ||
17 | * along with this program; if not, write to the Free Software Foundation, | ||
18 | * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | ||
19 | */ | ||
20 | |||
21 | /* | ||
22 | * The basic structure of this driver is based on the old storage driver, | ||
23 | * drivers/ieee1394/sbp2.c, originally written by | ||
24 | * James Goodwin <jamesg@filanet.com> | ||
25 | * with later contributions and ongoing maintenance from | ||
26 | * Ben Collins <bcollins@debian.org>, | ||
27 | * Stefan Richter <stefanr@s5r6.in-berlin.de> | ||
28 | * and many others. | ||
29 | */ | ||
30 | |||
31 | #include <linux/blkdev.h> | ||
32 | #include <linux/bug.h> | ||
33 | #include <linux/completion.h> | ||
34 | #include <linux/delay.h> | ||
35 | #include <linux/device.h> | ||
36 | #include <linux/dma-mapping.h> | ||
37 | #include <linux/firewire.h> | ||
38 | #include <linux/firewire-constants.h> | ||
39 | #include <linux/init.h> | ||
40 | #include <linux/jiffies.h> | ||
41 | #include <linux/kernel.h> | ||
42 | #include <linux/kref.h> | ||
43 | #include <linux/list.h> | ||
44 | #include <linux/mod_devicetable.h> | ||
45 | #include <linux/module.h> | ||
46 | #include <linux/moduleparam.h> | ||
47 | #include <linux/scatterlist.h> | ||
48 | #include <linux/slab.h> | ||
49 | #include <linux/spinlock.h> | ||
50 | #include <linux/string.h> | ||
51 | #include <linux/stringify.h> | ||
52 | #include <linux/workqueue.h> | ||
53 | |||
54 | #include <asm/byteorder.h> | ||
55 | #include <asm/system.h> | ||
56 | |||
57 | #include <scsi/scsi.h> | ||
58 | #include <scsi/scsi_cmnd.h> | ||
59 | #include <scsi/scsi_device.h> | ||
60 | #include <scsi/scsi_host.h> | ||
61 | |||
62 | /* | ||
63 | * So far only bridges from Oxford Semiconductor are known to support | ||
64 | * concurrent logins. Depending on firmware, four or two concurrent logins | ||
65 | * are possible on OXFW911 and newer Oxsemi bridges. | ||
66 | * | ||
67 | * Concurrent logins are useful together with cluster filesystems. | ||
68 | */ | ||
69 | static int sbp2_param_exclusive_login = 1; | ||
70 | module_param_named(exclusive_login, sbp2_param_exclusive_login, bool, 0644); | ||
71 | MODULE_PARM_DESC(exclusive_login, "Exclusive login to sbp2 device " | ||
72 | "(default = Y, use N for concurrent initiators)"); | ||
73 | |||
74 | /* | ||
75 | * Flags for firmware oddities | ||
76 | * | ||
77 | * - 128kB max transfer | ||
78 | * Limit transfer size. Necessary for some old bridges. | ||
79 | * | ||
80 | * - 36 byte inquiry | ||
81 | * When scsi_mod probes the device, let the inquiry command look like that | ||
82 | * from MS Windows. | ||
83 | * | ||
84 | * - skip mode page 8 | ||
85 | * Suppress sending of mode_sense for mode page 8 if the device pretends to | ||
86 | * support the SCSI Primary Block commands instead of Reduced Block Commands. | ||
87 | * | ||
88 | * - fix capacity | ||
89 | * Tell sd_mod to correct the last sector number reported by read_capacity. | ||
90 | * Avoids access beyond actual disk limits on devices with an off-by-one bug. | ||
91 | * Don't use this with devices which don't have this bug. | ||
92 | * | ||
93 | * - delay inquiry | ||
94 | * Wait extra SBP2_INQUIRY_DELAY seconds after login before SCSI inquiry. | ||
95 | * | ||
96 | * - power condition | ||
97 | * Set the power condition field in the START STOP UNIT commands sent by | ||
98 | * sd_mod on suspend, resume, and shutdown (if manage_start_stop is on). | ||
99 | * Some disks need this to spin down or to resume properly. | ||
100 | * | ||
101 | * - override internal blacklist | ||
102 | * Instead of adding to the built-in blacklist, use only the workarounds | ||
103 | * specified in the module load parameter. | ||
104 | * Useful if a blacklist entry interfered with a non-broken device. | ||
105 | */ | ||
106 | #define SBP2_WORKAROUND_128K_MAX_TRANS 0x1 | ||
107 | #define SBP2_WORKAROUND_INQUIRY_36 0x2 | ||
108 | #define SBP2_WORKAROUND_MODE_SENSE_8 0x4 | ||
109 | #define SBP2_WORKAROUND_FIX_CAPACITY 0x8 | ||
110 | #define SBP2_WORKAROUND_DELAY_INQUIRY 0x10 | ||
111 | #define SBP2_INQUIRY_DELAY 12 | ||
112 | #define SBP2_WORKAROUND_POWER_CONDITION 0x20 | ||
113 | #define SBP2_WORKAROUND_OVERRIDE 0x100 | ||
114 | |||
115 | static int sbp2_param_workarounds; | ||
116 | module_param_named(workarounds, sbp2_param_workarounds, int, 0644); | ||
117 | MODULE_PARM_DESC(workarounds, "Work around device bugs (default = 0" | ||
118 | ", 128kB max transfer = " __stringify(SBP2_WORKAROUND_128K_MAX_TRANS) | ||
119 | ", 36 byte inquiry = " __stringify(SBP2_WORKAROUND_INQUIRY_36) | ||
120 | ", skip mode page 8 = " __stringify(SBP2_WORKAROUND_MODE_SENSE_8) | ||
121 | ", fix capacity = " __stringify(SBP2_WORKAROUND_FIX_CAPACITY) | ||
122 | ", delay inquiry = " __stringify(SBP2_WORKAROUND_DELAY_INQUIRY) | ||
123 | ", set power condition in start stop unit = " | ||
124 | __stringify(SBP2_WORKAROUND_POWER_CONDITION) | ||
125 | ", override internal blacklist = " __stringify(SBP2_WORKAROUND_OVERRIDE) | ||
126 | ", or a combination)"); | ||
127 | |||
128 | /* I don't know why the SCSI stack doesn't define something like this... */ | ||
129 | typedef void (*scsi_done_fn_t)(struct scsi_cmnd *); | ||
130 | |||
131 | static const char sbp2_driver_name[] = "sbp2"; | ||
132 | |||
133 | /* | ||
134 | * We create one struct sbp2_logical_unit per SBP-2 Logical Unit Number Entry | ||
135 | * and one struct scsi_device per sbp2_logical_unit. | ||
136 | */ | ||
137 | struct sbp2_logical_unit { | ||
138 | struct sbp2_target *tgt; | ||
139 | struct list_head link; | ||
140 | struct fw_address_handler address_handler; | ||
141 | struct list_head orb_list; | ||
142 | |||
143 | u64 command_block_agent_address; | ||
144 | u16 lun; | ||
145 | int login_id; | ||
146 | |||
147 | /* | ||
148 | * The generation is updated once we've logged in or reconnected | ||
149 | * to the logical unit. Thus, I/O to the device will automatically | ||
150 | * fail and get retried if it happens in a window where the device | ||
151 | * is not ready, e.g. after a bus reset but before we reconnect. | ||
152 | */ | ||
153 | int generation; | ||
154 | int retries; | ||
155 | struct delayed_work work; | ||
156 | bool has_sdev; | ||
157 | bool blocked; | ||
158 | }; | ||
159 | |||
160 | /* | ||
161 | * We create one struct sbp2_target per IEEE 1212 Unit Directory | ||
162 | * and one struct Scsi_Host per sbp2_target. | ||
163 | */ | ||
164 | struct sbp2_target { | ||
165 | struct kref kref; | ||
166 | struct fw_unit *unit; | ||
167 | const char *bus_id; | ||
168 | struct list_head lu_list; | ||
169 | |||
170 | u64 management_agent_address; | ||
171 | u64 guid; | ||
172 | int directory_id; | ||
173 | int node_id; | ||
174 | int address_high; | ||
175 | unsigned int workarounds; | ||
176 | unsigned int mgt_orb_timeout; | ||
177 | unsigned int max_payload; | ||
178 | |||
179 | int dont_block; /* counter for each logical unit */ | ||
180 | int blocked; /* ditto */ | ||
181 | }; | ||
182 | |||
183 | static struct fw_device *target_device(struct sbp2_target *tgt) | ||
184 | { | ||
185 | return fw_parent_device(tgt->unit); | ||
186 | } | ||
187 | |||
188 | /* Impossible login_id, to detect logout attempt before successful login */ | ||
189 | #define INVALID_LOGIN_ID 0x10000 | ||
190 | |||
191 | /* | ||
192 | * Per section 7.4.8 of the SBP-2 spec, a mgt_ORB_timeout value can be | ||
193 | * provided in the config rom. Most devices do provide a value, which | ||
194 | * we'll use for login management orbs, but with some sane limits. | ||
195 | */ | ||
196 | #define SBP2_MIN_LOGIN_ORB_TIMEOUT 5000U /* Timeout in ms */ | ||
197 | #define SBP2_MAX_LOGIN_ORB_TIMEOUT 40000U /* Timeout in ms */ | ||
198 | #define SBP2_ORB_TIMEOUT 2000U /* Timeout in ms */ | ||
199 | #define SBP2_ORB_NULL 0x80000000 | ||
200 | #define SBP2_RETRY_LIMIT 0xf /* 15 retries */ | ||
201 | #define SBP2_CYCLE_LIMIT (0xc8 << 12) /* 200 125us cycles */ | ||
202 | |||
203 | /* | ||
204 | * The default maximum s/g segment size of a FireWire controller is | ||
205 | * usually 0x10000, but SBP-2 only allows 0xffff. Since buffers have to | ||
206 | * be quadlet-aligned, we set the length limit to 0xffff & ~3. | ||
207 | */ | ||
208 | #define SBP2_MAX_SEG_SIZE 0xfffc | ||
209 | |||
210 | /* Unit directory keys */ | ||
211 | #define SBP2_CSR_UNIT_CHARACTERISTICS 0x3a | ||
212 | #define SBP2_CSR_FIRMWARE_REVISION 0x3c | ||
213 | #define SBP2_CSR_LOGICAL_UNIT_NUMBER 0x14 | ||
214 | #define SBP2_CSR_LOGICAL_UNIT_DIRECTORY 0xd4 | ||
215 | |||
216 | /* Management orb opcodes */ | ||
217 | #define SBP2_LOGIN_REQUEST 0x0 | ||
218 | #define SBP2_QUERY_LOGINS_REQUEST 0x1 | ||
219 | #define SBP2_RECONNECT_REQUEST 0x3 | ||
220 | #define SBP2_SET_PASSWORD_REQUEST 0x4 | ||
221 | #define SBP2_LOGOUT_REQUEST 0x7 | ||
222 | #define SBP2_ABORT_TASK_REQUEST 0xb | ||
223 | #define SBP2_ABORT_TASK_SET 0xc | ||
224 | #define SBP2_LOGICAL_UNIT_RESET 0xe | ||
225 | #define SBP2_TARGET_RESET_REQUEST 0xf | ||
226 | |||
227 | /* Offsets for command block agent registers */ | ||
228 | #define SBP2_AGENT_STATE 0x00 | ||
229 | #define SBP2_AGENT_RESET 0x04 | ||
230 | #define SBP2_ORB_POINTER 0x08 | ||
231 | #define SBP2_DOORBELL 0x10 | ||
232 | #define SBP2_UNSOLICITED_STATUS_ENABLE 0x14 | ||
233 | |||
234 | /* Status write response codes */ | ||
235 | #define SBP2_STATUS_REQUEST_COMPLETE 0x0 | ||
236 | #define SBP2_STATUS_TRANSPORT_FAILURE 0x1 | ||
237 | #define SBP2_STATUS_ILLEGAL_REQUEST 0x2 | ||
238 | #define SBP2_STATUS_VENDOR_DEPENDENT 0x3 | ||
239 | |||
240 | #define STATUS_GET_ORB_HIGH(v) ((v).status & 0xffff) | ||
241 | #define STATUS_GET_SBP_STATUS(v) (((v).status >> 16) & 0xff) | ||
242 | #define STATUS_GET_LEN(v) (((v).status >> 24) & 0x07) | ||
243 | #define STATUS_GET_DEAD(v) (((v).status >> 27) & 0x01) | ||
244 | #define STATUS_GET_RESPONSE(v) (((v).status >> 28) & 0x03) | ||
245 | #define STATUS_GET_SOURCE(v) (((v).status >> 30) & 0x03) | ||
246 | #define STATUS_GET_ORB_LOW(v) ((v).orb_low) | ||
247 | #define STATUS_GET_DATA(v) ((v).data) | ||
248 | |||
249 | struct sbp2_status { | ||
250 | u32 status; | ||
251 | u32 orb_low; | ||
252 | u8 data[24]; | ||
253 | }; | ||
254 | |||
255 | struct sbp2_pointer { | ||
256 | __be32 high; | ||
257 | __be32 low; | ||
258 | }; | ||
259 | |||
260 | struct sbp2_orb { | ||
261 | struct fw_transaction t; | ||
262 | struct kref kref; | ||
263 | dma_addr_t request_bus; | ||
264 | int rcode; | ||
265 | struct sbp2_pointer pointer; | ||
266 | void (*callback)(struct sbp2_orb * orb, struct sbp2_status * status); | ||
267 | struct list_head link; | ||
268 | }; | ||
269 | |||
270 | #define MANAGEMENT_ORB_LUN(v) ((v)) | ||
271 | #define MANAGEMENT_ORB_FUNCTION(v) ((v) << 16) | ||
272 | #define MANAGEMENT_ORB_RECONNECT(v) ((v) << 20) | ||
273 | #define MANAGEMENT_ORB_EXCLUSIVE(v) ((v) ? 1 << 28 : 0) | ||
274 | #define MANAGEMENT_ORB_REQUEST_FORMAT(v) ((v) << 29) | ||
275 | #define MANAGEMENT_ORB_NOTIFY ((1) << 31) | ||
276 | |||
277 | #define MANAGEMENT_ORB_RESPONSE_LENGTH(v) ((v)) | ||
278 | #define MANAGEMENT_ORB_PASSWORD_LENGTH(v) ((v) << 16) | ||
279 | |||
280 | struct sbp2_management_orb { | ||
281 | struct sbp2_orb base; | ||
282 | struct { | ||
283 | struct sbp2_pointer password; | ||
284 | struct sbp2_pointer response; | ||
285 | __be32 misc; | ||
286 | __be32 length; | ||
287 | struct sbp2_pointer status_fifo; | ||
288 | } request; | ||
289 | __be32 response[4]; | ||
290 | dma_addr_t response_bus; | ||
291 | struct completion done; | ||
292 | struct sbp2_status status; | ||
293 | }; | ||
294 | |||
295 | struct sbp2_login_response { | ||
296 | __be32 misc; | ||
297 | struct sbp2_pointer command_block_agent; | ||
298 | __be32 reconnect_hold; | ||
299 | }; | ||
300 | #define COMMAND_ORB_DATA_SIZE(v) ((v)) | ||
301 | #define COMMAND_ORB_PAGE_SIZE(v) ((v) << 16) | ||
302 | #define COMMAND_ORB_PAGE_TABLE_PRESENT ((1) << 19) | ||
303 | #define COMMAND_ORB_MAX_PAYLOAD(v) ((v) << 20) | ||
304 | #define COMMAND_ORB_SPEED(v) ((v) << 24) | ||
305 | #define COMMAND_ORB_DIRECTION ((1) << 27) | ||
306 | #define COMMAND_ORB_REQUEST_FORMAT(v) ((v) << 29) | ||
307 | #define COMMAND_ORB_NOTIFY ((1) << 31) | ||
308 | |||
309 | struct sbp2_command_orb { | ||
310 | struct sbp2_orb base; | ||
311 | struct { | ||
312 | struct sbp2_pointer next; | ||
313 | struct sbp2_pointer data_descriptor; | ||
314 | __be32 misc; | ||
315 | u8 command_block[12]; | ||
316 | } request; | ||
317 | struct scsi_cmnd *cmd; | ||
318 | scsi_done_fn_t done; | ||
319 | struct sbp2_logical_unit *lu; | ||
320 | |||
321 | struct sbp2_pointer page_table[SG_ALL] __attribute__((aligned(8))); | ||
322 | dma_addr_t page_table_bus; | ||
323 | }; | ||
324 | |||
325 | #define SBP2_ROM_VALUE_WILDCARD ~0 /* match all */ | ||
326 | #define SBP2_ROM_VALUE_MISSING 0xff000000 /* not present in the unit dir. */ | ||
327 | |||
328 | /* | ||
329 | * List of devices with known bugs. | ||
330 | * | ||
331 | * The firmware_revision field, masked with 0xffff00, is the best | ||
332 | * indicator for the type of bridge chip of a device. It yields a few | ||
333 | * false positives but this did not break correctly behaving devices | ||
334 | * so far. | ||
335 | */ | ||
336 | static const struct { | ||
337 | u32 firmware_revision; | ||
338 | u32 model; | ||
339 | unsigned int workarounds; | ||
340 | } sbp2_workarounds_table[] = { | ||
341 | /* DViCO Momobay CX-1 with TSB42AA9 bridge */ { | ||
342 | .firmware_revision = 0x002800, | ||
343 | .model = 0x001010, | ||
344 | .workarounds = SBP2_WORKAROUND_INQUIRY_36 | | ||
345 | SBP2_WORKAROUND_MODE_SENSE_8 | | ||
346 | SBP2_WORKAROUND_POWER_CONDITION, | ||
347 | }, | ||
348 | /* DViCO Momobay FX-3A with TSB42AA9A bridge */ { | ||
349 | .firmware_revision = 0x002800, | ||
350 | .model = 0x000000, | ||
351 | .workarounds = SBP2_WORKAROUND_DELAY_INQUIRY | | ||
352 | SBP2_WORKAROUND_POWER_CONDITION, | ||
353 | }, | ||
354 | /* Initio bridges, actually only needed for some older ones */ { | ||
355 | .firmware_revision = 0x000200, | ||
356 | .model = SBP2_ROM_VALUE_WILDCARD, | ||
357 | .workarounds = SBP2_WORKAROUND_INQUIRY_36, | ||
358 | }, | ||
359 | /* PL-3507 bridge with Prolific firmware */ { | ||
360 | .firmware_revision = 0x012800, | ||
361 | .model = SBP2_ROM_VALUE_WILDCARD, | ||
362 | .workarounds = SBP2_WORKAROUND_POWER_CONDITION, | ||
363 | }, | ||
364 | /* Symbios bridge */ { | ||
365 | .firmware_revision = 0xa0b800, | ||
366 | .model = SBP2_ROM_VALUE_WILDCARD, | ||
367 | .workarounds = SBP2_WORKAROUND_128K_MAX_TRANS, | ||
368 | }, | ||
369 | /* Datafab MD2-FW2 with Symbios/LSILogic SYM13FW500 bridge */ { | ||
370 | .firmware_revision = 0x002600, | ||
371 | .model = SBP2_ROM_VALUE_WILDCARD, | ||
372 | .workarounds = SBP2_WORKAROUND_128K_MAX_TRANS, | ||
373 | }, | ||
374 | /* | ||
375 | * iPod 2nd generation: needs 128k max transfer size workaround | ||
376 | * iPod 3rd generation: needs fix capacity workaround | ||
377 | */ | ||
378 | { | ||
379 | .firmware_revision = 0x0a2700, | ||
380 | .model = 0x000000, | ||
381 | .workarounds = SBP2_WORKAROUND_128K_MAX_TRANS | | ||
382 | SBP2_WORKAROUND_FIX_CAPACITY, | ||
383 | }, | ||
384 | /* iPod 4th generation */ { | ||
385 | .firmware_revision = 0x0a2700, | ||
386 | .model = 0x000021, | ||
387 | .workarounds = SBP2_WORKAROUND_FIX_CAPACITY, | ||
388 | }, | ||
389 | /* iPod mini */ { | ||
390 | .firmware_revision = 0x0a2700, | ||
391 | .model = 0x000022, | ||
392 | .workarounds = SBP2_WORKAROUND_FIX_CAPACITY, | ||
393 | }, | ||
394 | /* iPod mini */ { | ||
395 | .firmware_revision = 0x0a2700, | ||
396 | .model = 0x000023, | ||
397 | .workarounds = SBP2_WORKAROUND_FIX_CAPACITY, | ||
398 | }, | ||
399 | /* iPod Photo */ { | ||
400 | .firmware_revision = 0x0a2700, | ||
401 | .model = 0x00007e, | ||
402 | .workarounds = SBP2_WORKAROUND_FIX_CAPACITY, | ||
403 | } | ||
404 | }; | ||
405 | |||
406 | static void free_orb(struct kref *kref) | ||
407 | { | ||
408 | struct sbp2_orb *orb = container_of(kref, struct sbp2_orb, kref); | ||
409 | |||
410 | kfree(orb); | ||
411 | } | ||
412 | |||
413 | static void sbp2_status_write(struct fw_card *card, struct fw_request *request, | ||
414 | int tcode, int destination, int source, | ||
415 | int generation, int speed, | ||
416 | unsigned long long offset, | ||
417 | void *payload, size_t length, void *callback_data) | ||
418 | { | ||
419 | struct sbp2_logical_unit *lu = callback_data; | ||
420 | struct sbp2_orb *orb; | ||
421 | struct sbp2_status status; | ||
422 | size_t header_size; | ||
423 | unsigned long flags; | ||
424 | |||
425 | if (tcode != TCODE_WRITE_BLOCK_REQUEST || | ||
426 | length == 0 || length > sizeof(status)) { | ||
427 | fw_send_response(card, request, RCODE_TYPE_ERROR); | ||
428 | return; | ||
429 | } | ||
430 | |||
431 | header_size = min(length, 2 * sizeof(u32)); | ||
432 | fw_memcpy_from_be32(&status, payload, header_size); | ||
433 | if (length > header_size) | ||
434 | memcpy(status.data, payload + 8, length - header_size); | ||
435 | if (STATUS_GET_SOURCE(status) == 2 || STATUS_GET_SOURCE(status) == 3) { | ||
436 | fw_notify("non-orb related status write, not handled\n"); | ||
437 | fw_send_response(card, request, RCODE_COMPLETE); | ||
438 | return; | ||
439 | } | ||
440 | |||
441 | /* Lookup the orb corresponding to this status write. */ | ||
442 | spin_lock_irqsave(&card->lock, flags); | ||
443 | list_for_each_entry(orb, &lu->orb_list, link) { | ||
444 | if (STATUS_GET_ORB_HIGH(status) == 0 && | ||
445 | STATUS_GET_ORB_LOW(status) == orb->request_bus) { | ||
446 | orb->rcode = RCODE_COMPLETE; | ||
447 | list_del(&orb->link); | ||
448 | break; | ||
449 | } | ||
450 | } | ||
451 | spin_unlock_irqrestore(&card->lock, flags); | ||
452 | |||
453 | if (&orb->link != &lu->orb_list) | ||
454 | orb->callback(orb, &status); | ||
455 | else | ||
456 | fw_error("status write for unknown orb\n"); | ||
457 | |||
458 | kref_put(&orb->kref, free_orb); | ||
459 | |||
460 | fw_send_response(card, request, RCODE_COMPLETE); | ||
461 | } | ||
462 | |||
463 | static void complete_transaction(struct fw_card *card, int rcode, | ||
464 | void *payload, size_t length, void *data) | ||
465 | { | ||
466 | struct sbp2_orb *orb = data; | ||
467 | unsigned long flags; | ||
468 | |||
469 | /* | ||
470 | * This is a little tricky. We can get the status write for | ||
471 | * the orb before we get this callback. The status write | ||
472 | * handler above will assume the orb pointer transaction was | ||
473 | * successful and set the rcode to RCODE_COMPLETE for the orb. | ||
474 | * So this callback only sets the rcode if it hasn't already | ||
475 | * been set and only does the cleanup if the transaction | ||
476 | * failed and we didn't already get a status write. | ||
477 | */ | ||
478 | spin_lock_irqsave(&card->lock, flags); | ||
479 | |||
480 | if (orb->rcode == -1) | ||
481 | orb->rcode = rcode; | ||
482 | if (orb->rcode != RCODE_COMPLETE) { | ||
483 | list_del(&orb->link); | ||
484 | spin_unlock_irqrestore(&card->lock, flags); | ||
485 | orb->callback(orb, NULL); | ||
486 | } else { | ||
487 | spin_unlock_irqrestore(&card->lock, flags); | ||
488 | } | ||
489 | |||
490 | kref_put(&orb->kref, free_orb); | ||
491 | } | ||
492 | |||
493 | static void sbp2_send_orb(struct sbp2_orb *orb, struct sbp2_logical_unit *lu, | ||
494 | int node_id, int generation, u64 offset) | ||
495 | { | ||
496 | struct fw_device *device = target_device(lu->tgt); | ||
497 | unsigned long flags; | ||
498 | |||
499 | orb->pointer.high = 0; | ||
500 | orb->pointer.low = cpu_to_be32(orb->request_bus); | ||
501 | |||
502 | spin_lock_irqsave(&device->card->lock, flags); | ||
503 | list_add_tail(&orb->link, &lu->orb_list); | ||
504 | spin_unlock_irqrestore(&device->card->lock, flags); | ||
505 | |||
506 | /* Take a ref for the orb list and for the transaction callback. */ | ||
507 | kref_get(&orb->kref); | ||
508 | kref_get(&orb->kref); | ||
509 | |||
510 | fw_send_request(device->card, &orb->t, TCODE_WRITE_BLOCK_REQUEST, | ||
511 | node_id, generation, device->max_speed, offset, | ||
512 | &orb->pointer, sizeof(orb->pointer), | ||
513 | complete_transaction, orb); | ||
514 | } | ||
515 | |||
516 | static int sbp2_cancel_orbs(struct sbp2_logical_unit *lu) | ||
517 | { | ||
518 | struct fw_device *device = target_device(lu->tgt); | ||
519 | struct sbp2_orb *orb, *next; | ||
520 | struct list_head list; | ||
521 | unsigned long flags; | ||
522 | int retval = -ENOENT; | ||
523 | |||
524 | INIT_LIST_HEAD(&list); | ||
525 | spin_lock_irqsave(&device->card->lock, flags); | ||
526 | list_splice_init(&lu->orb_list, &list); | ||
527 | spin_unlock_irqrestore(&device->card->lock, flags); | ||
528 | |||
529 | list_for_each_entry_safe(orb, next, &list, link) { | ||
530 | retval = 0; | ||
531 | if (fw_cancel_transaction(device->card, &orb->t) == 0) | ||
532 | continue; | ||
533 | |||
534 | orb->rcode = RCODE_CANCELLED; | ||
535 | orb->callback(orb, NULL); | ||
536 | } | ||
537 | |||
538 | return retval; | ||
539 | } | ||
540 | |||
541 | static void complete_management_orb(struct sbp2_orb *base_orb, | ||
542 | struct sbp2_status *status) | ||
543 | { | ||
544 | struct sbp2_management_orb *orb = | ||
545 | container_of(base_orb, struct sbp2_management_orb, base); | ||
546 | |||
547 | if (status) | ||
548 | memcpy(&orb->status, status, sizeof(*status)); | ||
549 | complete(&orb->done); | ||
550 | } | ||
551 | |||
552 | static int sbp2_send_management_orb(struct sbp2_logical_unit *lu, int node_id, | ||
553 | int generation, int function, | ||
554 | int lun_or_login_id, void *response) | ||
555 | { | ||
556 | struct fw_device *device = target_device(lu->tgt); | ||
557 | struct sbp2_management_orb *orb; | ||
558 | unsigned int timeout; | ||
559 | int retval = -ENOMEM; | ||
560 | |||
561 | if (function == SBP2_LOGOUT_REQUEST && fw_device_is_shutdown(device)) | ||
562 | return 0; | ||
563 | |||
564 | orb = kzalloc(sizeof(*orb), GFP_ATOMIC); | ||
565 | if (orb == NULL) | ||
566 | return -ENOMEM; | ||
567 | |||
568 | kref_init(&orb->base.kref); | ||
569 | orb->response_bus = | ||
570 | dma_map_single(device->card->device, &orb->response, | ||
571 | sizeof(orb->response), DMA_FROM_DEVICE); | ||
572 | if (dma_mapping_error(device->card->device, orb->response_bus)) | ||
573 | goto fail_mapping_response; | ||
574 | |||
575 | orb->request.response.high = 0; | ||
576 | orb->request.response.low = cpu_to_be32(orb->response_bus); | ||
577 | |||
578 | orb->request.misc = cpu_to_be32( | ||
579 | MANAGEMENT_ORB_NOTIFY | | ||
580 | MANAGEMENT_ORB_FUNCTION(function) | | ||
581 | MANAGEMENT_ORB_LUN(lun_or_login_id)); | ||
582 | orb->request.length = cpu_to_be32( | ||
583 | MANAGEMENT_ORB_RESPONSE_LENGTH(sizeof(orb->response))); | ||
584 | |||
585 | orb->request.status_fifo.high = | ||
586 | cpu_to_be32(lu->address_handler.offset >> 32); | ||
587 | orb->request.status_fifo.low = | ||
588 | cpu_to_be32(lu->address_handler.offset); | ||
589 | |||
590 | if (function == SBP2_LOGIN_REQUEST) { | ||
591 | /* Ask for 2^2 == 4 seconds reconnect grace period */ | ||
592 | orb->request.misc |= cpu_to_be32( | ||
593 | MANAGEMENT_ORB_RECONNECT(2) | | ||
594 | MANAGEMENT_ORB_EXCLUSIVE(sbp2_param_exclusive_login)); | ||
595 | timeout = lu->tgt->mgt_orb_timeout; | ||
596 | } else { | ||
597 | timeout = SBP2_ORB_TIMEOUT; | ||
598 | } | ||
599 | |||
600 | init_completion(&orb->done); | ||
601 | orb->base.callback = complete_management_orb; | ||
602 | |||
603 | orb->base.request_bus = | ||
604 | dma_map_single(device->card->device, &orb->request, | ||
605 | sizeof(orb->request), DMA_TO_DEVICE); | ||
606 | if (dma_mapping_error(device->card->device, orb->base.request_bus)) | ||
607 | goto fail_mapping_request; | ||
608 | |||
609 | sbp2_send_orb(&orb->base, lu, node_id, generation, | ||
610 | lu->tgt->management_agent_address); | ||
611 | |||
612 | wait_for_completion_timeout(&orb->done, msecs_to_jiffies(timeout)); | ||
613 | |||
614 | retval = -EIO; | ||
615 | if (sbp2_cancel_orbs(lu) == 0) { | ||
616 | fw_error("%s: orb reply timed out, rcode=0x%02x\n", | ||
617 | lu->tgt->bus_id, orb->base.rcode); | ||
618 | goto out; | ||
619 | } | ||
620 | |||
621 | if (orb->base.rcode != RCODE_COMPLETE) { | ||
622 | fw_error("%s: management write failed, rcode 0x%02x\n", | ||
623 | lu->tgt->bus_id, orb->base.rcode); | ||
624 | goto out; | ||
625 | } | ||
626 | |||
627 | if (STATUS_GET_RESPONSE(orb->status) != 0 || | ||
628 | STATUS_GET_SBP_STATUS(orb->status) != 0) { | ||
629 | fw_error("%s: error status: %d:%d\n", lu->tgt->bus_id, | ||
630 | STATUS_GET_RESPONSE(orb->status), | ||
631 | STATUS_GET_SBP_STATUS(orb->status)); | ||
632 | goto out; | ||
633 | } | ||
634 | |||
635 | retval = 0; | ||
636 | out: | ||
637 | dma_unmap_single(device->card->device, orb->base.request_bus, | ||
638 | sizeof(orb->request), DMA_TO_DEVICE); | ||
639 | fail_mapping_request: | ||
640 | dma_unmap_single(device->card->device, orb->response_bus, | ||
641 | sizeof(orb->response), DMA_FROM_DEVICE); | ||
642 | fail_mapping_response: | ||
643 | if (response) | ||
644 | memcpy(response, orb->response, sizeof(orb->response)); | ||
645 | kref_put(&orb->base.kref, free_orb); | ||
646 | |||
647 | return retval; | ||
648 | } | ||
649 | |||
650 | static void sbp2_agent_reset(struct sbp2_logical_unit *lu) | ||
651 | { | ||
652 | struct fw_device *device = target_device(lu->tgt); | ||
653 | __be32 d = 0; | ||
654 | |||
655 | fw_run_transaction(device->card, TCODE_WRITE_QUADLET_REQUEST, | ||
656 | lu->tgt->node_id, lu->generation, device->max_speed, | ||
657 | lu->command_block_agent_address + SBP2_AGENT_RESET, | ||
658 | &d, sizeof(d)); | ||
659 | } | ||
660 | |||
661 | static void complete_agent_reset_write_no_wait(struct fw_card *card, | ||
662 | int rcode, void *payload, size_t length, void *data) | ||
663 | { | ||
664 | kfree(data); | ||
665 | } | ||
666 | |||
667 | static void sbp2_agent_reset_no_wait(struct sbp2_logical_unit *lu) | ||
668 | { | ||
669 | struct fw_device *device = target_device(lu->tgt); | ||
670 | struct fw_transaction *t; | ||
671 | static __be32 d; | ||
672 | |||
673 | t = kmalloc(sizeof(*t), GFP_ATOMIC); | ||
674 | if (t == NULL) | ||
675 | return; | ||
676 | |||
677 | fw_send_request(device->card, t, TCODE_WRITE_QUADLET_REQUEST, | ||
678 | lu->tgt->node_id, lu->generation, device->max_speed, | ||
679 | lu->command_block_agent_address + SBP2_AGENT_RESET, | ||
680 | &d, sizeof(d), complete_agent_reset_write_no_wait, t); | ||
681 | } | ||
682 | |||
683 | static inline void sbp2_allow_block(struct sbp2_logical_unit *lu) | ||
684 | { | ||
685 | /* | ||
686 | * We may access dont_block without taking card->lock here: | ||
687 | * All callers of sbp2_allow_block() and all callers of sbp2_unblock() | ||
688 | * are currently serialized against each other. | ||
689 | * And a wrong result in sbp2_conditionally_block()'s access of | ||
690 | * dont_block is rather harmless, it simply misses its first chance. | ||
691 | */ | ||
692 | --lu->tgt->dont_block; | ||
693 | } | ||
694 | |||
695 | /* | ||
696 | * Blocks lu->tgt if all of the following conditions are met: | ||
697 | * - Login, INQUIRY, and high-level SCSI setup of all of the target's | ||
698 | * logical units have been finished (indicated by dont_block == 0). | ||
699 | * - lu->generation is stale. | ||
700 | * | ||
701 | * Note, scsi_block_requests() must be called while holding card->lock, | ||
702 | * otherwise it might foil sbp2_[conditionally_]unblock()'s attempt to | ||
703 | * unblock the target. | ||
704 | */ | ||
705 | static void sbp2_conditionally_block(struct sbp2_logical_unit *lu) | ||
706 | { | ||
707 | struct sbp2_target *tgt = lu->tgt; | ||
708 | struct fw_card *card = target_device(tgt)->card; | ||
709 | struct Scsi_Host *shost = | ||
710 | container_of((void *)tgt, struct Scsi_Host, hostdata[0]); | ||
711 | unsigned long flags; | ||
712 | |||
713 | spin_lock_irqsave(&card->lock, flags); | ||
714 | if (!tgt->dont_block && !lu->blocked && | ||
715 | lu->generation != card->generation) { | ||
716 | lu->blocked = true; | ||
717 | if (++tgt->blocked == 1) | ||
718 | scsi_block_requests(shost); | ||
719 | } | ||
720 | spin_unlock_irqrestore(&card->lock, flags); | ||
721 | } | ||
722 | |||
723 | /* | ||
724 | * Unblocks lu->tgt as soon as all its logical units can be unblocked. | ||
725 | * Note, it is harmless to run scsi_unblock_requests() outside the | ||
726 | * card->lock protected section. On the other hand, running it inside | ||
727 | * the section might clash with shost->host_lock. | ||
728 | */ | ||
729 | static void sbp2_conditionally_unblock(struct sbp2_logical_unit *lu) | ||
730 | { | ||
731 | struct sbp2_target *tgt = lu->tgt; | ||
732 | struct fw_card *card = target_device(tgt)->card; | ||
733 | struct Scsi_Host *shost = | ||
734 | container_of((void *)tgt, struct Scsi_Host, hostdata[0]); | ||
735 | unsigned long flags; | ||
736 | bool unblock = false; | ||
737 | |||
738 | spin_lock_irqsave(&card->lock, flags); | ||
739 | if (lu->blocked && lu->generation == card->generation) { | ||
740 | lu->blocked = false; | ||
741 | unblock = --tgt->blocked == 0; | ||
742 | } | ||
743 | spin_unlock_irqrestore(&card->lock, flags); | ||
744 | |||
745 | if (unblock) | ||
746 | scsi_unblock_requests(shost); | ||
747 | } | ||
748 | |||
749 | /* | ||
750 | * Prevents future blocking of tgt and unblocks it. | ||
751 | * Note, it is harmless to run scsi_unblock_requests() outside the | ||
752 | * card->lock protected section. On the other hand, running it inside | ||
753 | * the section might clash with shost->host_lock. | ||
754 | */ | ||
755 | static void sbp2_unblock(struct sbp2_target *tgt) | ||
756 | { | ||
757 | struct fw_card *card = target_device(tgt)->card; | ||
758 | struct Scsi_Host *shost = | ||
759 | container_of((void *)tgt, struct Scsi_Host, hostdata[0]); | ||
760 | unsigned long flags; | ||
761 | |||
762 | spin_lock_irqsave(&card->lock, flags); | ||
763 | ++tgt->dont_block; | ||
764 | spin_unlock_irqrestore(&card->lock, flags); | ||
765 | |||
766 | scsi_unblock_requests(shost); | ||
767 | } | ||
768 | |||
769 | static int sbp2_lun2int(u16 lun) | ||
770 | { | ||
771 | struct scsi_lun eight_bytes_lun; | ||
772 | |||
773 | memset(&eight_bytes_lun, 0, sizeof(eight_bytes_lun)); | ||
774 | eight_bytes_lun.scsi_lun[0] = (lun >> 8) & 0xff; | ||
775 | eight_bytes_lun.scsi_lun[1] = lun & 0xff; | ||
776 | |||
777 | return scsilun_to_int(&eight_bytes_lun); | ||
778 | } | ||
779 | |||
780 | static void sbp2_release_target(struct kref *kref) | ||
781 | { | ||
782 | struct sbp2_target *tgt = container_of(kref, struct sbp2_target, kref); | ||
783 | struct sbp2_logical_unit *lu, *next; | ||
784 | struct Scsi_Host *shost = | ||
785 | container_of((void *)tgt, struct Scsi_Host, hostdata[0]); | ||
786 | struct scsi_device *sdev; | ||
787 | struct fw_device *device = target_device(tgt); | ||
788 | |||
789 | /* prevent deadlocks */ | ||
790 | sbp2_unblock(tgt); | ||
791 | |||
792 | list_for_each_entry_safe(lu, next, &tgt->lu_list, link) { | ||
793 | sdev = scsi_device_lookup(shost, 0, 0, sbp2_lun2int(lu->lun)); | ||
794 | if (sdev) { | ||
795 | scsi_remove_device(sdev); | ||
796 | scsi_device_put(sdev); | ||
797 | } | ||
798 | if (lu->login_id != INVALID_LOGIN_ID) { | ||
799 | int generation, node_id; | ||
800 | /* | ||
801 | * tgt->node_id may be obsolete here if we failed | ||
802 | * during initial login or after a bus reset where | ||
803 | * the topology changed. | ||
804 | */ | ||
805 | generation = device->generation; | ||
806 | smp_rmb(); /* node_id vs. generation */ | ||
807 | node_id = device->node_id; | ||
808 | sbp2_send_management_orb(lu, node_id, generation, | ||
809 | SBP2_LOGOUT_REQUEST, | ||
810 | lu->login_id, NULL); | ||
811 | } | ||
812 | fw_core_remove_address_handler(&lu->address_handler); | ||
813 | list_del(&lu->link); | ||
814 | kfree(lu); | ||
815 | } | ||
816 | scsi_remove_host(shost); | ||
817 | fw_notify("released %s, target %d:0:0\n", tgt->bus_id, shost->host_no); | ||
818 | |||
819 | fw_unit_put(tgt->unit); | ||
820 | scsi_host_put(shost); | ||
821 | fw_device_put(device); | ||
822 | } | ||
823 | |||
824 | static struct workqueue_struct *sbp2_wq; | ||
825 | |||
826 | static void sbp2_target_put(struct sbp2_target *tgt) | ||
827 | { | ||
828 | kref_put(&tgt->kref, sbp2_release_target); | ||
829 | } | ||
830 | |||
831 | /* | ||
832 | * Always get the target's kref when scheduling work on one its units. | ||
833 | * Each workqueue job is responsible to call sbp2_target_put() upon return. | ||
834 | */ | ||
835 | static void sbp2_queue_work(struct sbp2_logical_unit *lu, unsigned long delay) | ||
836 | { | ||
837 | kref_get(&lu->tgt->kref); | ||
838 | if (!queue_delayed_work(sbp2_wq, &lu->work, delay)) | ||
839 | sbp2_target_put(lu->tgt); | ||
840 | } | ||
841 | |||
842 | /* | ||
843 | * Write retransmit retry values into the BUSY_TIMEOUT register. | ||
844 | * - The single-phase retry protocol is supported by all SBP-2 devices, but the | ||
845 | * default retry_limit value is 0 (i.e. never retry transmission). We write a | ||
846 | * saner value after logging into the device. | ||
847 | * - The dual-phase retry protocol is optional to implement, and if not | ||
848 | * supported, writes to the dual-phase portion of the register will be | ||
849 | * ignored. We try to write the original 1394-1995 default here. | ||
850 | * - In the case of devices that are also SBP-3-compliant, all writes are | ||
851 | * ignored, as the register is read-only, but contains single-phase retry of | ||
852 | * 15, which is what we're trying to set for all SBP-2 device anyway, so this | ||
853 | * write attempt is safe and yields more consistent behavior for all devices. | ||
854 | * | ||
855 | * See section 8.3.2.3.5 of the 1394-1995 spec, section 6.2 of the SBP-2 spec, | ||
856 | * and section 6.4 of the SBP-3 spec for further details. | ||
857 | */ | ||
858 | static void sbp2_set_busy_timeout(struct sbp2_logical_unit *lu) | ||
859 | { | ||
860 | struct fw_device *device = target_device(lu->tgt); | ||
861 | __be32 d = cpu_to_be32(SBP2_CYCLE_LIMIT | SBP2_RETRY_LIMIT); | ||
862 | |||
863 | fw_run_transaction(device->card, TCODE_WRITE_QUADLET_REQUEST, | ||
864 | lu->tgt->node_id, lu->generation, device->max_speed, | ||
865 | CSR_REGISTER_BASE + CSR_BUSY_TIMEOUT, | ||
866 | &d, sizeof(d)); | ||
867 | } | ||
868 | |||
869 | static void sbp2_reconnect(struct work_struct *work); | ||
870 | |||
871 | static void sbp2_login(struct work_struct *work) | ||
872 | { | ||
873 | struct sbp2_logical_unit *lu = | ||
874 | container_of(work, struct sbp2_logical_unit, work.work); | ||
875 | struct sbp2_target *tgt = lu->tgt; | ||
876 | struct fw_device *device = target_device(tgt); | ||
877 | struct Scsi_Host *shost; | ||
878 | struct scsi_device *sdev; | ||
879 | struct sbp2_login_response response; | ||
880 | int generation, node_id, local_node_id; | ||
881 | |||
882 | if (fw_device_is_shutdown(device)) | ||
883 | goto out; | ||
884 | |||
885 | generation = device->generation; | ||
886 | smp_rmb(); /* node IDs must not be older than generation */ | ||
887 | node_id = device->node_id; | ||
888 | local_node_id = device->card->node_id; | ||
889 | |||
890 | /* If this is a re-login attempt, log out, or we might be rejected. */ | ||
891 | if (lu->has_sdev) | ||
892 | sbp2_send_management_orb(lu, device->node_id, generation, | ||
893 | SBP2_LOGOUT_REQUEST, lu->login_id, NULL); | ||
894 | |||
895 | if (sbp2_send_management_orb(lu, node_id, generation, | ||
896 | SBP2_LOGIN_REQUEST, lu->lun, &response) < 0) { | ||
897 | if (lu->retries++ < 5) { | ||
898 | sbp2_queue_work(lu, DIV_ROUND_UP(HZ, 5)); | ||
899 | } else { | ||
900 | fw_error("%s: failed to login to LUN %04x\n", | ||
901 | tgt->bus_id, lu->lun); | ||
902 | /* Let any waiting I/O fail from now on. */ | ||
903 | sbp2_unblock(lu->tgt); | ||
904 | } | ||
905 | goto out; | ||
906 | } | ||
907 | |||
908 | tgt->node_id = node_id; | ||
909 | tgt->address_high = local_node_id << 16; | ||
910 | smp_wmb(); /* node IDs must not be older than generation */ | ||
911 | lu->generation = generation; | ||
912 | |||
913 | lu->command_block_agent_address = | ||
914 | ((u64)(be32_to_cpu(response.command_block_agent.high) & 0xffff) | ||
915 | << 32) | be32_to_cpu(response.command_block_agent.low); | ||
916 | lu->login_id = be32_to_cpu(response.misc) & 0xffff; | ||
917 | |||
918 | fw_notify("%s: logged in to LUN %04x (%d retries)\n", | ||
919 | tgt->bus_id, lu->lun, lu->retries); | ||
920 | |||
921 | /* set appropriate retry limit(s) in BUSY_TIMEOUT register */ | ||
922 | sbp2_set_busy_timeout(lu); | ||
923 | |||
924 | PREPARE_DELAYED_WORK(&lu->work, sbp2_reconnect); | ||
925 | sbp2_agent_reset(lu); | ||
926 | |||
927 | /* This was a re-login. */ | ||
928 | if (lu->has_sdev) { | ||
929 | sbp2_cancel_orbs(lu); | ||
930 | sbp2_conditionally_unblock(lu); | ||
931 | goto out; | ||
932 | } | ||
933 | |||
934 | if (lu->tgt->workarounds & SBP2_WORKAROUND_DELAY_INQUIRY) | ||
935 | ssleep(SBP2_INQUIRY_DELAY); | ||
936 | |||
937 | shost = container_of((void *)tgt, struct Scsi_Host, hostdata[0]); | ||
938 | sdev = __scsi_add_device(shost, 0, 0, sbp2_lun2int(lu->lun), lu); | ||
939 | /* | ||
940 | * FIXME: We are unable to perform reconnects while in sbp2_login(). | ||
941 | * Therefore __scsi_add_device() will get into trouble if a bus reset | ||
942 | * happens in parallel. It will either fail or leave us with an | ||
943 | * unusable sdev. As a workaround we check for this and retry the | ||
944 | * whole login and SCSI probing. | ||
945 | */ | ||
946 | |||
947 | /* Reported error during __scsi_add_device() */ | ||
948 | if (IS_ERR(sdev)) | ||
949 | goto out_logout_login; | ||
950 | |||
951 | /* Unreported error during __scsi_add_device() */ | ||
952 | smp_rmb(); /* get current card generation */ | ||
953 | if (generation != device->card->generation) { | ||
954 | scsi_remove_device(sdev); | ||
955 | scsi_device_put(sdev); | ||
956 | goto out_logout_login; | ||
957 | } | ||
958 | |||
959 | /* No error during __scsi_add_device() */ | ||
960 | lu->has_sdev = true; | ||
961 | scsi_device_put(sdev); | ||
962 | sbp2_allow_block(lu); | ||
963 | goto out; | ||
964 | |||
965 | out_logout_login: | ||
966 | smp_rmb(); /* generation may have changed */ | ||
967 | generation = device->generation; | ||
968 | smp_rmb(); /* node_id must not be older than generation */ | ||
969 | |||
970 | sbp2_send_management_orb(lu, device->node_id, generation, | ||
971 | SBP2_LOGOUT_REQUEST, lu->login_id, NULL); | ||
972 | /* | ||
973 | * If a bus reset happened, sbp2_update will have requeued | ||
974 | * lu->work already. Reset the work from reconnect to login. | ||
975 | */ | ||
976 | PREPARE_DELAYED_WORK(&lu->work, sbp2_login); | ||
977 | out: | ||
978 | sbp2_target_put(tgt); | ||
979 | } | ||
980 | |||
981 | static int sbp2_add_logical_unit(struct sbp2_target *tgt, int lun_entry) | ||
982 | { | ||
983 | struct sbp2_logical_unit *lu; | ||
984 | |||
985 | lu = kmalloc(sizeof(*lu), GFP_KERNEL); | ||
986 | if (!lu) | ||
987 | return -ENOMEM; | ||
988 | |||
989 | lu->address_handler.length = 0x100; | ||
990 | lu->address_handler.address_callback = sbp2_status_write; | ||
991 | lu->address_handler.callback_data = lu; | ||
992 | |||
993 | if (fw_core_add_address_handler(&lu->address_handler, | ||
994 | &fw_high_memory_region) < 0) { | ||
995 | kfree(lu); | ||
996 | return -ENOMEM; | ||
997 | } | ||
998 | |||
999 | lu->tgt = tgt; | ||
1000 | lu->lun = lun_entry & 0xffff; | ||
1001 | lu->login_id = INVALID_LOGIN_ID; | ||
1002 | lu->retries = 0; | ||
1003 | lu->has_sdev = false; | ||
1004 | lu->blocked = false; | ||
1005 | ++tgt->dont_block; | ||
1006 | INIT_LIST_HEAD(&lu->orb_list); | ||
1007 | INIT_DELAYED_WORK(&lu->work, sbp2_login); | ||
1008 | |||
1009 | list_add_tail(&lu->link, &tgt->lu_list); | ||
1010 | return 0; | ||
1011 | } | ||
1012 | |||
1013 | static int sbp2_scan_logical_unit_dir(struct sbp2_target *tgt, u32 *directory) | ||
1014 | { | ||
1015 | struct fw_csr_iterator ci; | ||
1016 | int key, value; | ||
1017 | |||
1018 | fw_csr_iterator_init(&ci, directory); | ||
1019 | while (fw_csr_iterator_next(&ci, &key, &value)) | ||
1020 | if (key == SBP2_CSR_LOGICAL_UNIT_NUMBER && | ||
1021 | sbp2_add_logical_unit(tgt, value) < 0) | ||
1022 | return -ENOMEM; | ||
1023 | return 0; | ||
1024 | } | ||
1025 | |||
1026 | static int sbp2_scan_unit_dir(struct sbp2_target *tgt, u32 *directory, | ||
1027 | u32 *model, u32 *firmware_revision) | ||
1028 | { | ||
1029 | struct fw_csr_iterator ci; | ||
1030 | int key, value; | ||
1031 | unsigned int timeout; | ||
1032 | |||
1033 | fw_csr_iterator_init(&ci, directory); | ||
1034 | while (fw_csr_iterator_next(&ci, &key, &value)) { | ||
1035 | switch (key) { | ||
1036 | |||
1037 | case CSR_DEPENDENT_INFO | CSR_OFFSET: | ||
1038 | tgt->management_agent_address = | ||
1039 | CSR_REGISTER_BASE + 4 * value; | ||
1040 | break; | ||
1041 | |||
1042 | case CSR_DIRECTORY_ID: | ||
1043 | tgt->directory_id = value; | ||
1044 | break; | ||
1045 | |||
1046 | case CSR_MODEL: | ||
1047 | *model = value; | ||
1048 | break; | ||
1049 | |||
1050 | case SBP2_CSR_FIRMWARE_REVISION: | ||
1051 | *firmware_revision = value; | ||
1052 | break; | ||
1053 | |||
1054 | case SBP2_CSR_UNIT_CHARACTERISTICS: | ||
1055 | /* the timeout value is stored in 500ms units */ | ||
1056 | timeout = ((unsigned int) value >> 8 & 0xff) * 500; | ||
1057 | timeout = max(timeout, SBP2_MIN_LOGIN_ORB_TIMEOUT); | ||
1058 | tgt->mgt_orb_timeout = | ||
1059 | min(timeout, SBP2_MAX_LOGIN_ORB_TIMEOUT); | ||
1060 | |||
1061 | if (timeout > tgt->mgt_orb_timeout) | ||
1062 | fw_notify("%s: config rom contains %ds " | ||
1063 | "management ORB timeout, limiting " | ||
1064 | "to %ds\n", tgt->bus_id, | ||
1065 | timeout / 1000, | ||
1066 | tgt->mgt_orb_timeout / 1000); | ||
1067 | break; | ||
1068 | |||
1069 | case SBP2_CSR_LOGICAL_UNIT_NUMBER: | ||
1070 | if (sbp2_add_logical_unit(tgt, value) < 0) | ||
1071 | return -ENOMEM; | ||
1072 | break; | ||
1073 | |||
1074 | case SBP2_CSR_LOGICAL_UNIT_DIRECTORY: | ||
1075 | /* Adjust for the increment in the iterator */ | ||
1076 | if (sbp2_scan_logical_unit_dir(tgt, ci.p - 1 + value) < 0) | ||
1077 | return -ENOMEM; | ||
1078 | break; | ||
1079 | } | ||
1080 | } | ||
1081 | return 0; | ||
1082 | } | ||
1083 | |||
1084 | static void sbp2_init_workarounds(struct sbp2_target *tgt, u32 model, | ||
1085 | u32 firmware_revision) | ||
1086 | { | ||
1087 | int i; | ||
1088 | unsigned int w = sbp2_param_workarounds; | ||
1089 | |||
1090 | if (w) | ||
1091 | fw_notify("Please notify linux1394-devel@lists.sourceforge.net " | ||
1092 | "if you need the workarounds parameter for %s\n", | ||
1093 | tgt->bus_id); | ||
1094 | |||
1095 | if (w & SBP2_WORKAROUND_OVERRIDE) | ||
1096 | goto out; | ||
1097 | |||
1098 | for (i = 0; i < ARRAY_SIZE(sbp2_workarounds_table); i++) { | ||
1099 | |||
1100 | if (sbp2_workarounds_table[i].firmware_revision != | ||
1101 | (firmware_revision & 0xffffff00)) | ||
1102 | continue; | ||
1103 | |||
1104 | if (sbp2_workarounds_table[i].model != model && | ||
1105 | sbp2_workarounds_table[i].model != SBP2_ROM_VALUE_WILDCARD) | ||
1106 | continue; | ||
1107 | |||
1108 | w |= sbp2_workarounds_table[i].workarounds; | ||
1109 | break; | ||
1110 | } | ||
1111 | out: | ||
1112 | if (w) | ||
1113 | fw_notify("Workarounds for %s: 0x%x " | ||
1114 | "(firmware_revision 0x%06x, model_id 0x%06x)\n", | ||
1115 | tgt->bus_id, w, firmware_revision, model); | ||
1116 | tgt->workarounds = w; | ||
1117 | } | ||
1118 | |||
1119 | static struct scsi_host_template scsi_driver_template; | ||
1120 | |||
1121 | static int sbp2_probe(struct device *dev) | ||
1122 | { | ||
1123 | struct fw_unit *unit = fw_unit(dev); | ||
1124 | struct fw_device *device = fw_parent_device(unit); | ||
1125 | struct sbp2_target *tgt; | ||
1126 | struct sbp2_logical_unit *lu; | ||
1127 | struct Scsi_Host *shost; | ||
1128 | u32 model, firmware_revision; | ||
1129 | |||
1130 | if (dma_get_max_seg_size(device->card->device) > SBP2_MAX_SEG_SIZE) | ||
1131 | BUG_ON(dma_set_max_seg_size(device->card->device, | ||
1132 | SBP2_MAX_SEG_SIZE)); | ||
1133 | |||
1134 | shost = scsi_host_alloc(&scsi_driver_template, sizeof(*tgt)); | ||
1135 | if (shost == NULL) | ||
1136 | return -ENOMEM; | ||
1137 | |||
1138 | tgt = (struct sbp2_target *)shost->hostdata; | ||
1139 | dev_set_drvdata(&unit->device, tgt); | ||
1140 | tgt->unit = unit; | ||
1141 | kref_init(&tgt->kref); | ||
1142 | INIT_LIST_HEAD(&tgt->lu_list); | ||
1143 | tgt->bus_id = dev_name(&unit->device); | ||
1144 | tgt->guid = (u64)device->config_rom[3] << 32 | device->config_rom[4]; | ||
1145 | |||
1146 | if (fw_device_enable_phys_dma(device) < 0) | ||
1147 | goto fail_shost_put; | ||
1148 | |||
1149 | if (scsi_add_host(shost, &unit->device) < 0) | ||
1150 | goto fail_shost_put; | ||
1151 | |||
1152 | fw_device_get(device); | ||
1153 | fw_unit_get(unit); | ||
1154 | |||
1155 | /* implicit directory ID */ | ||
1156 | tgt->directory_id = ((unit->directory - device->config_rom) * 4 | ||
1157 | + CSR_CONFIG_ROM) & 0xffffff; | ||
1158 | |||
1159 | firmware_revision = SBP2_ROM_VALUE_MISSING; | ||
1160 | model = SBP2_ROM_VALUE_MISSING; | ||
1161 | |||
1162 | if (sbp2_scan_unit_dir(tgt, unit->directory, &model, | ||
1163 | &firmware_revision) < 0) | ||
1164 | goto fail_tgt_put; | ||
1165 | |||
1166 | sbp2_init_workarounds(tgt, model, firmware_revision); | ||
1167 | |||
1168 | /* | ||
1169 | * At S100 we can do 512 bytes per packet, at S200 1024 bytes, | ||
1170 | * and so on up to 4096 bytes. The SBP-2 max_payload field | ||
1171 | * specifies the max payload size as 2 ^ (max_payload + 2), so | ||
1172 | * if we set this to max_speed + 7, we get the right value. | ||
1173 | */ | ||
1174 | tgt->max_payload = min(device->max_speed + 7, 10U); | ||
1175 | tgt->max_payload = min(tgt->max_payload, device->card->max_receive - 1); | ||
1176 | |||
1177 | /* Do the login in a workqueue so we can easily reschedule retries. */ | ||
1178 | list_for_each_entry(lu, &tgt->lu_list, link) | ||
1179 | sbp2_queue_work(lu, DIV_ROUND_UP(HZ, 5)); | ||
1180 | return 0; | ||
1181 | |||
1182 | fail_tgt_put: | ||
1183 | sbp2_target_put(tgt); | ||
1184 | return -ENOMEM; | ||
1185 | |||
1186 | fail_shost_put: | ||
1187 | scsi_host_put(shost); | ||
1188 | return -ENOMEM; | ||
1189 | } | ||
1190 | |||
1191 | static int sbp2_remove(struct device *dev) | ||
1192 | { | ||
1193 | struct fw_unit *unit = fw_unit(dev); | ||
1194 | struct sbp2_target *tgt = dev_get_drvdata(&unit->device); | ||
1195 | |||
1196 | sbp2_target_put(tgt); | ||
1197 | return 0; | ||
1198 | } | ||
1199 | |||
1200 | static void sbp2_reconnect(struct work_struct *work) | ||
1201 | { | ||
1202 | struct sbp2_logical_unit *lu = | ||
1203 | container_of(work, struct sbp2_logical_unit, work.work); | ||
1204 | struct sbp2_target *tgt = lu->tgt; | ||
1205 | struct fw_device *device = target_device(tgt); | ||
1206 | int generation, node_id, local_node_id; | ||
1207 | |||
1208 | if (fw_device_is_shutdown(device)) | ||
1209 | goto out; | ||
1210 | |||
1211 | generation = device->generation; | ||
1212 | smp_rmb(); /* node IDs must not be older than generation */ | ||
1213 | node_id = device->node_id; | ||
1214 | local_node_id = device->card->node_id; | ||
1215 | |||
1216 | if (sbp2_send_management_orb(lu, node_id, generation, | ||
1217 | SBP2_RECONNECT_REQUEST, | ||
1218 | lu->login_id, NULL) < 0) { | ||
1219 | /* | ||
1220 | * If reconnect was impossible even though we are in the | ||
1221 | * current generation, fall back and try to log in again. | ||
1222 | * | ||
1223 | * We could check for "Function rejected" status, but | ||
1224 | * looking at the bus generation as simpler and more general. | ||
1225 | */ | ||
1226 | smp_rmb(); /* get current card generation */ | ||
1227 | if (generation == device->card->generation || | ||
1228 | lu->retries++ >= 5) { | ||
1229 | fw_error("%s: failed to reconnect\n", tgt->bus_id); | ||
1230 | lu->retries = 0; | ||
1231 | PREPARE_DELAYED_WORK(&lu->work, sbp2_login); | ||
1232 | } | ||
1233 | sbp2_queue_work(lu, DIV_ROUND_UP(HZ, 5)); | ||
1234 | goto out; | ||
1235 | } | ||
1236 | |||
1237 | tgt->node_id = node_id; | ||
1238 | tgt->address_high = local_node_id << 16; | ||
1239 | smp_wmb(); /* node IDs must not be older than generation */ | ||
1240 | lu->generation = generation; | ||
1241 | |||
1242 | fw_notify("%s: reconnected to LUN %04x (%d retries)\n", | ||
1243 | tgt->bus_id, lu->lun, lu->retries); | ||
1244 | |||
1245 | sbp2_agent_reset(lu); | ||
1246 | sbp2_cancel_orbs(lu); | ||
1247 | sbp2_conditionally_unblock(lu); | ||
1248 | out: | ||
1249 | sbp2_target_put(tgt); | ||
1250 | } | ||
1251 | |||
1252 | static void sbp2_update(struct fw_unit *unit) | ||
1253 | { | ||
1254 | struct sbp2_target *tgt = dev_get_drvdata(&unit->device); | ||
1255 | struct sbp2_logical_unit *lu; | ||
1256 | |||
1257 | fw_device_enable_phys_dma(fw_parent_device(unit)); | ||
1258 | |||
1259 | /* | ||
1260 | * Fw-core serializes sbp2_update() against sbp2_remove(). | ||
1261 | * Iteration over tgt->lu_list is therefore safe here. | ||
1262 | */ | ||
1263 | list_for_each_entry(lu, &tgt->lu_list, link) { | ||
1264 | sbp2_conditionally_block(lu); | ||
1265 | lu->retries = 0; | ||
1266 | sbp2_queue_work(lu, 0); | ||
1267 | } | ||
1268 | } | ||
1269 | |||
1270 | #define SBP2_UNIT_SPEC_ID_ENTRY 0x0000609e | ||
1271 | #define SBP2_SW_VERSION_ENTRY 0x00010483 | ||
1272 | |||
1273 | static const struct ieee1394_device_id sbp2_id_table[] = { | ||
1274 | { | ||
1275 | .match_flags = IEEE1394_MATCH_SPECIFIER_ID | | ||
1276 | IEEE1394_MATCH_VERSION, | ||
1277 | .specifier_id = SBP2_UNIT_SPEC_ID_ENTRY, | ||
1278 | .version = SBP2_SW_VERSION_ENTRY, | ||
1279 | }, | ||
1280 | { } | ||
1281 | }; | ||
1282 | |||
1283 | static struct fw_driver sbp2_driver = { | ||
1284 | .driver = { | ||
1285 | .owner = THIS_MODULE, | ||
1286 | .name = sbp2_driver_name, | ||
1287 | .bus = &fw_bus_type, | ||
1288 | .probe = sbp2_probe, | ||
1289 | .remove = sbp2_remove, | ||
1290 | }, | ||
1291 | .update = sbp2_update, | ||
1292 | .id_table = sbp2_id_table, | ||
1293 | }; | ||
1294 | |||
1295 | static void sbp2_unmap_scatterlist(struct device *card_device, | ||
1296 | struct sbp2_command_orb *orb) | ||
1297 | { | ||
1298 | if (scsi_sg_count(orb->cmd)) | ||
1299 | dma_unmap_sg(card_device, scsi_sglist(orb->cmd), | ||
1300 | scsi_sg_count(orb->cmd), | ||
1301 | orb->cmd->sc_data_direction); | ||
1302 | |||
1303 | if (orb->request.misc & cpu_to_be32(COMMAND_ORB_PAGE_TABLE_PRESENT)) | ||
1304 | dma_unmap_single(card_device, orb->page_table_bus, | ||
1305 | sizeof(orb->page_table), DMA_TO_DEVICE); | ||
1306 | } | ||
1307 | |||
1308 | static unsigned int sbp2_status_to_sense_data(u8 *sbp2_status, u8 *sense_data) | ||
1309 | { | ||
1310 | int sam_status; | ||
1311 | |||
1312 | sense_data[0] = 0x70; | ||
1313 | sense_data[1] = 0x0; | ||
1314 | sense_data[2] = sbp2_status[1]; | ||
1315 | sense_data[3] = sbp2_status[4]; | ||
1316 | sense_data[4] = sbp2_status[5]; | ||
1317 | sense_data[5] = sbp2_status[6]; | ||
1318 | sense_data[6] = sbp2_status[7]; | ||
1319 | sense_data[7] = 10; | ||
1320 | sense_data[8] = sbp2_status[8]; | ||
1321 | sense_data[9] = sbp2_status[9]; | ||
1322 | sense_data[10] = sbp2_status[10]; | ||
1323 | sense_data[11] = sbp2_status[11]; | ||
1324 | sense_data[12] = sbp2_status[2]; | ||
1325 | sense_data[13] = sbp2_status[3]; | ||
1326 | sense_data[14] = sbp2_status[12]; | ||
1327 | sense_data[15] = sbp2_status[13]; | ||
1328 | |||
1329 | sam_status = sbp2_status[0] & 0x3f; | ||
1330 | |||
1331 | switch (sam_status) { | ||
1332 | case SAM_STAT_GOOD: | ||
1333 | case SAM_STAT_CHECK_CONDITION: | ||
1334 | case SAM_STAT_CONDITION_MET: | ||
1335 | case SAM_STAT_BUSY: | ||
1336 | case SAM_STAT_RESERVATION_CONFLICT: | ||
1337 | case SAM_STAT_COMMAND_TERMINATED: | ||
1338 | return DID_OK << 16 | sam_status; | ||
1339 | |||
1340 | default: | ||
1341 | return DID_ERROR << 16; | ||
1342 | } | ||
1343 | } | ||
1344 | |||
1345 | static void complete_command_orb(struct sbp2_orb *base_orb, | ||
1346 | struct sbp2_status *status) | ||
1347 | { | ||
1348 | struct sbp2_command_orb *orb = | ||
1349 | container_of(base_orb, struct sbp2_command_orb, base); | ||
1350 | struct fw_device *device = target_device(orb->lu->tgt); | ||
1351 | int result; | ||
1352 | |||
1353 | if (status != NULL) { | ||
1354 | if (STATUS_GET_DEAD(*status)) | ||
1355 | sbp2_agent_reset_no_wait(orb->lu); | ||
1356 | |||
1357 | switch (STATUS_GET_RESPONSE(*status)) { | ||
1358 | case SBP2_STATUS_REQUEST_COMPLETE: | ||
1359 | result = DID_OK << 16; | ||
1360 | break; | ||
1361 | case SBP2_STATUS_TRANSPORT_FAILURE: | ||
1362 | result = DID_BUS_BUSY << 16; | ||
1363 | break; | ||
1364 | case SBP2_STATUS_ILLEGAL_REQUEST: | ||
1365 | case SBP2_STATUS_VENDOR_DEPENDENT: | ||
1366 | default: | ||
1367 | result = DID_ERROR << 16; | ||
1368 | break; | ||
1369 | } | ||
1370 | |||
1371 | if (result == DID_OK << 16 && STATUS_GET_LEN(*status) > 1) | ||
1372 | result = sbp2_status_to_sense_data(STATUS_GET_DATA(*status), | ||
1373 | orb->cmd->sense_buffer); | ||
1374 | } else { | ||
1375 | /* | ||
1376 | * If the orb completes with status == NULL, something | ||
1377 | * went wrong, typically a bus reset happened mid-orb | ||
1378 | * or when sending the write (less likely). | ||
1379 | */ | ||
1380 | result = DID_BUS_BUSY << 16; | ||
1381 | sbp2_conditionally_block(orb->lu); | ||
1382 | } | ||
1383 | |||
1384 | dma_unmap_single(device->card->device, orb->base.request_bus, | ||
1385 | sizeof(orb->request), DMA_TO_DEVICE); | ||
1386 | sbp2_unmap_scatterlist(device->card->device, orb); | ||
1387 | |||
1388 | orb->cmd->result = result; | ||
1389 | orb->done(orb->cmd); | ||
1390 | } | ||
1391 | |||
1392 | static int sbp2_map_scatterlist(struct sbp2_command_orb *orb, | ||
1393 | struct fw_device *device, struct sbp2_logical_unit *lu) | ||
1394 | { | ||
1395 | struct scatterlist *sg = scsi_sglist(orb->cmd); | ||
1396 | int i, n; | ||
1397 | |||
1398 | n = dma_map_sg(device->card->device, sg, scsi_sg_count(orb->cmd), | ||
1399 | orb->cmd->sc_data_direction); | ||
1400 | if (n == 0) | ||
1401 | goto fail; | ||
1402 | |||
1403 | /* | ||
1404 | * Handle the special case where there is only one element in | ||
1405 | * the scatter list by converting it to an immediate block | ||
1406 | * request. This is also a workaround for broken devices such | ||
1407 | * as the second generation iPod which doesn't support page | ||
1408 | * tables. | ||
1409 | */ | ||
1410 | if (n == 1) { | ||
1411 | orb->request.data_descriptor.high = | ||
1412 | cpu_to_be32(lu->tgt->address_high); | ||
1413 | orb->request.data_descriptor.low = | ||
1414 | cpu_to_be32(sg_dma_address(sg)); | ||
1415 | orb->request.misc |= | ||
1416 | cpu_to_be32(COMMAND_ORB_DATA_SIZE(sg_dma_len(sg))); | ||
1417 | return 0; | ||
1418 | } | ||
1419 | |||
1420 | for_each_sg(sg, sg, n, i) { | ||
1421 | orb->page_table[i].high = cpu_to_be32(sg_dma_len(sg) << 16); | ||
1422 | orb->page_table[i].low = cpu_to_be32(sg_dma_address(sg)); | ||
1423 | } | ||
1424 | |||
1425 | orb->page_table_bus = | ||
1426 | dma_map_single(device->card->device, orb->page_table, | ||
1427 | sizeof(orb->page_table), DMA_TO_DEVICE); | ||
1428 | if (dma_mapping_error(device->card->device, orb->page_table_bus)) | ||
1429 | goto fail_page_table; | ||
1430 | |||
1431 | /* | ||
1432 | * The data_descriptor pointer is the one case where we need | ||
1433 | * to fill in the node ID part of the address. All other | ||
1434 | * pointers assume that the data referenced reside on the | ||
1435 | * initiator (i.e. us), but data_descriptor can refer to data | ||
1436 | * on other nodes so we need to put our ID in descriptor.high. | ||
1437 | */ | ||
1438 | orb->request.data_descriptor.high = cpu_to_be32(lu->tgt->address_high); | ||
1439 | orb->request.data_descriptor.low = cpu_to_be32(orb->page_table_bus); | ||
1440 | orb->request.misc |= cpu_to_be32(COMMAND_ORB_PAGE_TABLE_PRESENT | | ||
1441 | COMMAND_ORB_DATA_SIZE(n)); | ||
1442 | |||
1443 | return 0; | ||
1444 | |||
1445 | fail_page_table: | ||
1446 | dma_unmap_sg(device->card->device, scsi_sglist(orb->cmd), | ||
1447 | scsi_sg_count(orb->cmd), orb->cmd->sc_data_direction); | ||
1448 | fail: | ||
1449 | return -ENOMEM; | ||
1450 | } | ||
1451 | |||
1452 | /* SCSI stack integration */ | ||
1453 | |||
1454 | static int sbp2_scsi_queuecommand(struct scsi_cmnd *cmd, scsi_done_fn_t done) | ||
1455 | { | ||
1456 | struct sbp2_logical_unit *lu = cmd->device->hostdata; | ||
1457 | struct fw_device *device = target_device(lu->tgt); | ||
1458 | struct sbp2_command_orb *orb; | ||
1459 | int generation, retval = SCSI_MLQUEUE_HOST_BUSY; | ||
1460 | |||
1461 | /* | ||
1462 | * Bidirectional commands are not yet implemented, and unknown | ||
1463 | * transfer direction not handled. | ||
1464 | */ | ||
1465 | if (cmd->sc_data_direction == DMA_BIDIRECTIONAL) { | ||
1466 | fw_error("Can't handle DMA_BIDIRECTIONAL, rejecting command\n"); | ||
1467 | cmd->result = DID_ERROR << 16; | ||
1468 | done(cmd); | ||
1469 | return 0; | ||
1470 | } | ||
1471 | |||
1472 | orb = kzalloc(sizeof(*orb), GFP_ATOMIC); | ||
1473 | if (orb == NULL) { | ||
1474 | fw_notify("failed to alloc orb\n"); | ||
1475 | return SCSI_MLQUEUE_HOST_BUSY; | ||
1476 | } | ||
1477 | |||
1478 | /* Initialize rcode to something not RCODE_COMPLETE. */ | ||
1479 | orb->base.rcode = -1; | ||
1480 | kref_init(&orb->base.kref); | ||
1481 | |||
1482 | orb->lu = lu; | ||
1483 | orb->done = done; | ||
1484 | orb->cmd = cmd; | ||
1485 | |||
1486 | orb->request.next.high = cpu_to_be32(SBP2_ORB_NULL); | ||
1487 | orb->request.misc = cpu_to_be32( | ||
1488 | COMMAND_ORB_MAX_PAYLOAD(lu->tgt->max_payload) | | ||
1489 | COMMAND_ORB_SPEED(device->max_speed) | | ||
1490 | COMMAND_ORB_NOTIFY); | ||
1491 | |||
1492 | if (cmd->sc_data_direction == DMA_FROM_DEVICE) | ||
1493 | orb->request.misc |= cpu_to_be32(COMMAND_ORB_DIRECTION); | ||
1494 | |||
1495 | generation = device->generation; | ||
1496 | smp_rmb(); /* sbp2_map_scatterlist looks at tgt->address_high */ | ||
1497 | |||
1498 | if (scsi_sg_count(cmd) && sbp2_map_scatterlist(orb, device, lu) < 0) | ||
1499 | goto out; | ||
1500 | |||
1501 | memcpy(orb->request.command_block, cmd->cmnd, cmd->cmd_len); | ||
1502 | |||
1503 | orb->base.callback = complete_command_orb; | ||
1504 | orb->base.request_bus = | ||
1505 | dma_map_single(device->card->device, &orb->request, | ||
1506 | sizeof(orb->request), DMA_TO_DEVICE); | ||
1507 | if (dma_mapping_error(device->card->device, orb->base.request_bus)) { | ||
1508 | sbp2_unmap_scatterlist(device->card->device, orb); | ||
1509 | goto out; | ||
1510 | } | ||
1511 | |||
1512 | sbp2_send_orb(&orb->base, lu, lu->tgt->node_id, generation, | ||
1513 | lu->command_block_agent_address + SBP2_ORB_POINTER); | ||
1514 | retval = 0; | ||
1515 | out: | ||
1516 | kref_put(&orb->base.kref, free_orb); | ||
1517 | return retval; | ||
1518 | } | ||
1519 | |||
1520 | static int sbp2_scsi_slave_alloc(struct scsi_device *sdev) | ||
1521 | { | ||
1522 | struct sbp2_logical_unit *lu = sdev->hostdata; | ||
1523 | |||
1524 | /* (Re-)Adding logical units via the SCSI stack is not supported. */ | ||
1525 | if (!lu) | ||
1526 | return -ENOSYS; | ||
1527 | |||
1528 | sdev->allow_restart = 1; | ||
1529 | |||
1530 | /* SBP-2 requires quadlet alignment of the data buffers. */ | ||
1531 | blk_queue_update_dma_alignment(sdev->request_queue, 4 - 1); | ||
1532 | |||
1533 | if (lu->tgt->workarounds & SBP2_WORKAROUND_INQUIRY_36) | ||
1534 | sdev->inquiry_len = 36; | ||
1535 | |||
1536 | return 0; | ||
1537 | } | ||
1538 | |||
1539 | static int sbp2_scsi_slave_configure(struct scsi_device *sdev) | ||
1540 | { | ||
1541 | struct sbp2_logical_unit *lu = sdev->hostdata; | ||
1542 | |||
1543 | sdev->use_10_for_rw = 1; | ||
1544 | |||
1545 | if (sbp2_param_exclusive_login) | ||
1546 | sdev->manage_start_stop = 1; | ||
1547 | |||
1548 | if (sdev->type == TYPE_ROM) | ||
1549 | sdev->use_10_for_ms = 1; | ||
1550 | |||
1551 | if (sdev->type == TYPE_DISK && | ||
1552 | lu->tgt->workarounds & SBP2_WORKAROUND_MODE_SENSE_8) | ||
1553 | sdev->skip_ms_page_8 = 1; | ||
1554 | |||
1555 | if (lu->tgt->workarounds & SBP2_WORKAROUND_FIX_CAPACITY) | ||
1556 | sdev->fix_capacity = 1; | ||
1557 | |||
1558 | if (lu->tgt->workarounds & SBP2_WORKAROUND_POWER_CONDITION) | ||
1559 | sdev->start_stop_pwr_cond = 1; | ||
1560 | |||
1561 | if (lu->tgt->workarounds & SBP2_WORKAROUND_128K_MAX_TRANS) | ||
1562 | blk_queue_max_sectors(sdev->request_queue, 128 * 1024 / 512); | ||
1563 | |||
1564 | blk_queue_max_segment_size(sdev->request_queue, SBP2_MAX_SEG_SIZE); | ||
1565 | |||
1566 | return 0; | ||
1567 | } | ||
1568 | |||
1569 | /* | ||
1570 | * Called by scsi stack when something has really gone wrong. Usually | ||
1571 | * called when a command has timed-out for some reason. | ||
1572 | */ | ||
1573 | static int sbp2_scsi_abort(struct scsi_cmnd *cmd) | ||
1574 | { | ||
1575 | struct sbp2_logical_unit *lu = cmd->device->hostdata; | ||
1576 | |||
1577 | fw_notify("%s: sbp2_scsi_abort\n", lu->tgt->bus_id); | ||
1578 | sbp2_agent_reset(lu); | ||
1579 | sbp2_cancel_orbs(lu); | ||
1580 | |||
1581 | return SUCCESS; | ||
1582 | } | ||
1583 | |||
1584 | /* | ||
1585 | * Format of /sys/bus/scsi/devices/.../ieee1394_id: | ||
1586 | * u64 EUI-64 : u24 directory_ID : u16 LUN (all printed in hexadecimal) | ||
1587 | * | ||
1588 | * This is the concatenation of target port identifier and logical unit | ||
1589 | * identifier as per SAM-2...SAM-4 annex A. | ||
1590 | */ | ||
1591 | static ssize_t sbp2_sysfs_ieee1394_id_show(struct device *dev, | ||
1592 | struct device_attribute *attr, char *buf) | ||
1593 | { | ||
1594 | struct scsi_device *sdev = to_scsi_device(dev); | ||
1595 | struct sbp2_logical_unit *lu; | ||
1596 | |||
1597 | if (!sdev) | ||
1598 | return 0; | ||
1599 | |||
1600 | lu = sdev->hostdata; | ||
1601 | |||
1602 | return sprintf(buf, "%016llx:%06x:%04x\n", | ||
1603 | (unsigned long long)lu->tgt->guid, | ||
1604 | lu->tgt->directory_id, lu->lun); | ||
1605 | } | ||
1606 | |||
1607 | static DEVICE_ATTR(ieee1394_id, S_IRUGO, sbp2_sysfs_ieee1394_id_show, NULL); | ||
1608 | |||
1609 | static struct device_attribute *sbp2_scsi_sysfs_attrs[] = { | ||
1610 | &dev_attr_ieee1394_id, | ||
1611 | NULL | ||
1612 | }; | ||
1613 | |||
1614 | static struct scsi_host_template scsi_driver_template = { | ||
1615 | .module = THIS_MODULE, | ||
1616 | .name = "SBP-2 IEEE-1394", | ||
1617 | .proc_name = sbp2_driver_name, | ||
1618 | .queuecommand = sbp2_scsi_queuecommand, | ||
1619 | .slave_alloc = sbp2_scsi_slave_alloc, | ||
1620 | .slave_configure = sbp2_scsi_slave_configure, | ||
1621 | .eh_abort_handler = sbp2_scsi_abort, | ||
1622 | .this_id = -1, | ||
1623 | .sg_tablesize = SG_ALL, | ||
1624 | .use_clustering = ENABLE_CLUSTERING, | ||
1625 | .cmd_per_lun = 1, | ||
1626 | .can_queue = 1, | ||
1627 | .sdev_attrs = sbp2_scsi_sysfs_attrs, | ||
1628 | }; | ||
1629 | |||
1630 | MODULE_AUTHOR("Kristian Hoegsberg <krh@bitplanet.net>"); | ||
1631 | MODULE_DESCRIPTION("SCSI over IEEE1394"); | ||
1632 | MODULE_LICENSE("GPL"); | ||
1633 | MODULE_DEVICE_TABLE(ieee1394, sbp2_id_table); | ||
1634 | |||
1635 | /* Provide a module alias so root-on-sbp2 initrds don't break. */ | ||
1636 | #ifndef CONFIG_IEEE1394_SBP2_MODULE | ||
1637 | MODULE_ALIAS("sbp2"); | ||
1638 | #endif | ||
1639 | |||
1640 | static int __init sbp2_init(void) | ||
1641 | { | ||
1642 | sbp2_wq = create_singlethread_workqueue(KBUILD_MODNAME); | ||
1643 | if (!sbp2_wq) | ||
1644 | return -ENOMEM; | ||
1645 | |||
1646 | return driver_register(&sbp2_driver.driver); | ||
1647 | } | ||
1648 | |||
1649 | static void __exit sbp2_cleanup(void) | ||
1650 | { | ||
1651 | driver_unregister(&sbp2_driver.driver); | ||
1652 | destroy_workqueue(sbp2_wq); | ||
1653 | } | ||
1654 | |||
1655 | module_init(sbp2_init); | ||
1656 | module_exit(sbp2_cleanup); | ||