diff options
author | Linus Torvalds <torvalds@ppc970.osdl.org> | 2005-04-16 18:20:36 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@ppc970.osdl.org> | 2005-04-16 18:20:36 -0400 |
commit | 1da177e4c3f41524e886b7f1b8a0c1fc7321cac2 (patch) | |
tree | 0bba044c4ce775e45a88a51686b5d9f90697ea9d /drivers/scsi/ibmvscsi |
Linux-2.6.12-rc2v2.6.12-rc2
Initial git repository build. I'm not bothering with the full history,
even though we have it. We can create a separate "historical" git
archive of that later if we want to, and in the meantime it's about
3.2GB when imported into git - space that would just make the early
git days unnecessarily complicated, when we don't have a lot of good
infrastructure for it.
Let it rip!
Diffstat (limited to 'drivers/scsi/ibmvscsi')
-rw-r--r-- | drivers/scsi/ibmvscsi/Makefile | 5 | ||||
-rw-r--r-- | drivers/scsi/ibmvscsi/ibmvscsi.c | 1473 | ||||
-rw-r--r-- | drivers/scsi/ibmvscsi/ibmvscsi.h | 109 | ||||
-rw-r--r-- | drivers/scsi/ibmvscsi/iseries_vscsi.c | 144 | ||||
-rw-r--r-- | drivers/scsi/ibmvscsi/rpa_vscsi.c | 260 | ||||
-rw-r--r-- | drivers/scsi/ibmvscsi/srp.h | 225 | ||||
-rw-r--r-- | drivers/scsi/ibmvscsi/viosrp.h | 126 |
7 files changed, 2342 insertions, 0 deletions
diff --git a/drivers/scsi/ibmvscsi/Makefile b/drivers/scsi/ibmvscsi/Makefile new file mode 100644 index 000000000000..4e247b6b8700 --- /dev/null +++ b/drivers/scsi/ibmvscsi/Makefile | |||
@@ -0,0 +1,5 @@ | |||
1 | obj-$(CONFIG_SCSI_IBMVSCSI) += ibmvscsic.o | ||
2 | |||
3 | ibmvscsic-y += ibmvscsi.o | ||
4 | ibmvscsic-$(CONFIG_PPC_ISERIES) += iseries_vscsi.o | ||
5 | ibmvscsic-$(CONFIG_PPC_PSERIES) += rpa_vscsi.o | ||
diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.c b/drivers/scsi/ibmvscsi/ibmvscsi.c new file mode 100644 index 000000000000..e89f76e5dd53 --- /dev/null +++ b/drivers/scsi/ibmvscsi/ibmvscsi.c | |||
@@ -0,0 +1,1473 @@ | |||
1 | /* ------------------------------------------------------------ | ||
2 | * ibmvscsi.c | ||
3 | * (C) Copyright IBM Corporation 1994, 2004 | ||
4 | * Authors: Colin DeVilbiss (devilbis@us.ibm.com) | ||
5 | * Santiago Leon (santil@us.ibm.com) | ||
6 | * Dave Boutcher (sleddog@us.ibm.com) | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify | ||
9 | * it under the terms of the GNU General Public License as published by | ||
10 | * the Free Software Foundation; either version 2 of the License, or | ||
11 | * (at your option) any later version. | ||
12 | * | ||
13 | * This program is distributed in the hope that it will be useful, | ||
14 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
16 | * GNU General Public License for more details. | ||
17 | * | ||
18 | * You should have received a copy of the GNU General Public License | ||
19 | * along with this program; if not, write to the Free Software | ||
20 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 | ||
21 | * USA | ||
22 | * | ||
23 | * ------------------------------------------------------------ | ||
24 | * Emulation of a SCSI host adapter for Virtual I/O devices | ||
25 | * | ||
26 | * This driver supports the SCSI adapter implemented by the IBM | ||
27 | * Power5 firmware. That SCSI adapter is not a physical adapter, | ||
28 | * but allows Linux SCSI peripheral drivers to directly | ||
29 | * access devices in another logical partition on the physical system. | ||
30 | * | ||
31 | * The virtual adapter(s) are present in the open firmware device | ||
32 | * tree just like real adapters. | ||
33 | * | ||
34 | * One of the capabilities provided on these systems is the ability | ||
35 | * to DMA between partitions. The architecture states that for VSCSI, | ||
36 | * the server side is allowed to DMA to and from the client. The client | ||
37 | * is never trusted to DMA to or from the server directly. | ||
38 | * | ||
39 | * Messages are sent between partitions on a "Command/Response Queue" | ||
40 | * (CRQ), which is just a buffer of 16 byte entries in the receiver's | ||
41 | * Senders cannot access the buffer directly, but send messages by | ||
42 | * making a hypervisor call and passing in the 16 bytes. The hypervisor | ||
43 | * puts the message in the next 16 byte space in round-robbin fashion, | ||
44 | * turns on the high order bit of the message (the valid bit), and | ||
45 | * generates an interrupt to the receiver (if interrupts are turned on.) | ||
46 | * The receiver just turns off the valid bit when they have copied out | ||
47 | * the message. | ||
48 | * | ||
49 | * The VSCSI client builds a SCSI Remote Protocol (SRP) Information Unit | ||
50 | * (IU) (as defined in the T10 standard available at www.t10.org), gets | ||
51 | * a DMA address for the message, and sends it to the server as the | ||
52 | * payload of a CRQ message. The server DMAs the SRP IU and processes it, | ||
53 | * including doing any additional data transfers. When it is done, it | ||
54 | * DMAs the SRP response back to the same address as the request came from, | ||
55 | * and sends a CRQ message back to inform the client that the request has | ||
56 | * completed. | ||
57 | * | ||
58 | * Note that some of the underlying infrastructure is different between | ||
59 | * machines conforming to the "RS/6000 Platform Architecture" (RPA) and | ||
60 | * the older iSeries hypervisor models. To support both, some low level | ||
61 | * routines have been broken out into rpa_vscsi.c and iseries_vscsi.c. | ||
62 | * The Makefile should pick one, not two, not zero, of these. | ||
63 | * | ||
64 | * TODO: This is currently pretty tied to the IBM i/pSeries hypervisor | ||
65 | * interfaces. It would be really nice to abstract this above an RDMA | ||
66 | * layer. | ||
67 | */ | ||
68 | |||
69 | #include <linux/module.h> | ||
70 | #include <linux/moduleparam.h> | ||
71 | #include <linux/dma-mapping.h> | ||
72 | #include <linux/delay.h> | ||
73 | #include <asm/vio.h> | ||
74 | #include <scsi/scsi.h> | ||
75 | #include <scsi/scsi_cmnd.h> | ||
76 | #include <scsi/scsi_host.h> | ||
77 | #include <scsi/scsi_device.h> | ||
78 | #include "ibmvscsi.h" | ||
79 | |||
80 | /* The values below are somewhat arbitrary default values, but | ||
81 | * OS/400 will use 3 busses (disks, CDs, tapes, I think.) | ||
82 | * Note that there are 3 bits of channel value, 6 bits of id, and | ||
83 | * 5 bits of LUN. | ||
84 | */ | ||
85 | static int max_id = 64; | ||
86 | static int max_channel = 3; | ||
87 | static int init_timeout = 5; | ||
88 | static int max_requests = 50; | ||
89 | |||
90 | #define IBMVSCSI_VERSION "1.5.5" | ||
91 | |||
92 | MODULE_DESCRIPTION("IBM Virtual SCSI"); | ||
93 | MODULE_AUTHOR("Dave Boutcher"); | ||
94 | MODULE_LICENSE("GPL"); | ||
95 | MODULE_VERSION(IBMVSCSI_VERSION); | ||
96 | |||
97 | module_param_named(max_id, max_id, int, S_IRUGO | S_IWUSR); | ||
98 | MODULE_PARM_DESC(max_id, "Largest ID value for each channel"); | ||
99 | module_param_named(max_channel, max_channel, int, S_IRUGO | S_IWUSR); | ||
100 | MODULE_PARM_DESC(max_channel, "Largest channel value"); | ||
101 | module_param_named(init_timeout, init_timeout, int, S_IRUGO | S_IWUSR); | ||
102 | MODULE_PARM_DESC(init_timeout, "Initialization timeout in seconds"); | ||
103 | module_param_named(max_requests, max_requests, int, S_IRUGO | S_IWUSR); | ||
104 | MODULE_PARM_DESC(max_requests, "Maximum requests for this adapter"); | ||
105 | |||
106 | /* ------------------------------------------------------------ | ||
107 | * Routines for the event pool and event structs | ||
108 | */ | ||
109 | /** | ||
110 | * initialize_event_pool: - Allocates and initializes the event pool for a host | ||
111 | * @pool: event_pool to be initialized | ||
112 | * @size: Number of events in pool | ||
113 | * @hostdata: ibmvscsi_host_data who owns the event pool | ||
114 | * | ||
115 | * Returns zero on success. | ||
116 | */ | ||
117 | static int initialize_event_pool(struct event_pool *pool, | ||
118 | int size, struct ibmvscsi_host_data *hostdata) | ||
119 | { | ||
120 | int i; | ||
121 | |||
122 | pool->size = size; | ||
123 | pool->next = 0; | ||
124 | pool->events = kmalloc(pool->size * sizeof(*pool->events), GFP_KERNEL); | ||
125 | if (!pool->events) | ||
126 | return -ENOMEM; | ||
127 | memset(pool->events, 0x00, pool->size * sizeof(*pool->events)); | ||
128 | |||
129 | pool->iu_storage = | ||
130 | dma_alloc_coherent(hostdata->dev, | ||
131 | pool->size * sizeof(*pool->iu_storage), | ||
132 | &pool->iu_token, 0); | ||
133 | if (!pool->iu_storage) { | ||
134 | kfree(pool->events); | ||
135 | return -ENOMEM; | ||
136 | } | ||
137 | |||
138 | for (i = 0; i < pool->size; ++i) { | ||
139 | struct srp_event_struct *evt = &pool->events[i]; | ||
140 | memset(&evt->crq, 0x00, sizeof(evt->crq)); | ||
141 | atomic_set(&evt->free, 1); | ||
142 | evt->crq.valid = 0x80; | ||
143 | evt->crq.IU_length = sizeof(*evt->xfer_iu); | ||
144 | evt->crq.IU_data_ptr = pool->iu_token + | ||
145 | sizeof(*evt->xfer_iu) * i; | ||
146 | evt->xfer_iu = pool->iu_storage + i; | ||
147 | evt->hostdata = hostdata; | ||
148 | } | ||
149 | |||
150 | return 0; | ||
151 | } | ||
152 | |||
153 | /** | ||
154 | * release_event_pool: - Frees memory of an event pool of a host | ||
155 | * @pool: event_pool to be released | ||
156 | * @hostdata: ibmvscsi_host_data who owns the even pool | ||
157 | * | ||
158 | * Returns zero on success. | ||
159 | */ | ||
160 | static void release_event_pool(struct event_pool *pool, | ||
161 | struct ibmvscsi_host_data *hostdata) | ||
162 | { | ||
163 | int i, in_use = 0; | ||
164 | for (i = 0; i < pool->size; ++i) | ||
165 | if (atomic_read(&pool->events[i].free) != 1) | ||
166 | ++in_use; | ||
167 | if (in_use) | ||
168 | printk(KERN_WARNING | ||
169 | "ibmvscsi: releasing event pool with %d " | ||
170 | "events still in use?\n", in_use); | ||
171 | kfree(pool->events); | ||
172 | dma_free_coherent(hostdata->dev, | ||
173 | pool->size * sizeof(*pool->iu_storage), | ||
174 | pool->iu_storage, pool->iu_token); | ||
175 | } | ||
176 | |||
177 | /** | ||
178 | * valid_event_struct: - Determines if event is valid. | ||
179 | * @pool: event_pool that contains the event | ||
180 | * @evt: srp_event_struct to be checked for validity | ||
181 | * | ||
182 | * Returns zero if event is invalid, one otherwise. | ||
183 | */ | ||
184 | static int valid_event_struct(struct event_pool *pool, | ||
185 | struct srp_event_struct *evt) | ||
186 | { | ||
187 | int index = evt - pool->events; | ||
188 | if (index < 0 || index >= pool->size) /* outside of bounds */ | ||
189 | return 0; | ||
190 | if (evt != pool->events + index) /* unaligned */ | ||
191 | return 0; | ||
192 | return 1; | ||
193 | } | ||
194 | |||
195 | /** | ||
196 | * ibmvscsi_free-event_struct: - Changes status of event to "free" | ||
197 | * @pool: event_pool that contains the event | ||
198 | * @evt: srp_event_struct to be modified | ||
199 | * | ||
200 | */ | ||
201 | static void free_event_struct(struct event_pool *pool, | ||
202 | struct srp_event_struct *evt) | ||
203 | { | ||
204 | if (!valid_event_struct(pool, evt)) { | ||
205 | printk(KERN_ERR | ||
206 | "ibmvscsi: Freeing invalid event_struct %p " | ||
207 | "(not in pool %p)\n", evt, pool->events); | ||
208 | return; | ||
209 | } | ||
210 | if (atomic_inc_return(&evt->free) != 1) { | ||
211 | printk(KERN_ERR | ||
212 | "ibmvscsi: Freeing event_struct %p " | ||
213 | "which is not in use!\n", evt); | ||
214 | return; | ||
215 | } | ||
216 | } | ||
217 | |||
218 | /** | ||
219 | * get_evt_struct: - Gets the next free event in pool | ||
220 | * @pool: event_pool that contains the events to be searched | ||
221 | * | ||
222 | * Returns the next event in "free" state, and NULL if none are free. | ||
223 | * Note that no synchronization is done here, we assume the host_lock | ||
224 | * will syncrhonze things. | ||
225 | */ | ||
226 | static struct srp_event_struct *get_event_struct(struct event_pool *pool) | ||
227 | { | ||
228 | int i; | ||
229 | int poolsize = pool->size; | ||
230 | int offset = pool->next; | ||
231 | |||
232 | for (i = 0; i < poolsize; i++) { | ||
233 | offset = (offset + 1) % poolsize; | ||
234 | if (!atomic_dec_if_positive(&pool->events[offset].free)) { | ||
235 | pool->next = offset; | ||
236 | return &pool->events[offset]; | ||
237 | } | ||
238 | } | ||
239 | |||
240 | printk(KERN_ERR "ibmvscsi: found no event struct in pool!\n"); | ||
241 | return NULL; | ||
242 | } | ||
243 | |||
244 | /** | ||
245 | * init_event_struct: Initialize fields in an event struct that are always | ||
246 | * required. | ||
247 | * @evt: The event | ||
248 | * @done: Routine to call when the event is responded to | ||
249 | * @format: SRP or MAD format | ||
250 | * @timeout: timeout value set in the CRQ | ||
251 | */ | ||
252 | static void init_event_struct(struct srp_event_struct *evt_struct, | ||
253 | void (*done) (struct srp_event_struct *), | ||
254 | u8 format, | ||
255 | int timeout) | ||
256 | { | ||
257 | evt_struct->cmnd = NULL; | ||
258 | evt_struct->cmnd_done = NULL; | ||
259 | evt_struct->sync_srp = NULL; | ||
260 | evt_struct->crq.format = format; | ||
261 | evt_struct->crq.timeout = timeout; | ||
262 | evt_struct->done = done; | ||
263 | } | ||
264 | |||
265 | /* ------------------------------------------------------------ | ||
266 | * Routines for receiving SCSI responses from the hosting partition | ||
267 | */ | ||
268 | |||
269 | /** | ||
270 | * set_srp_direction: Set the fields in the srp related to data | ||
271 | * direction and number of buffers based on the direction in | ||
272 | * the scsi_cmnd and the number of buffers | ||
273 | */ | ||
274 | static void set_srp_direction(struct scsi_cmnd *cmd, | ||
275 | struct srp_cmd *srp_cmd, | ||
276 | int numbuf) | ||
277 | { | ||
278 | if (numbuf == 0) | ||
279 | return; | ||
280 | |||
281 | if (numbuf == 1) { | ||
282 | if (cmd->sc_data_direction == DMA_TO_DEVICE) | ||
283 | srp_cmd->data_out_format = SRP_DIRECT_BUFFER; | ||
284 | else | ||
285 | srp_cmd->data_in_format = SRP_DIRECT_BUFFER; | ||
286 | } else { | ||
287 | if (cmd->sc_data_direction == DMA_TO_DEVICE) { | ||
288 | srp_cmd->data_out_format = SRP_INDIRECT_BUFFER; | ||
289 | srp_cmd->data_out_count = numbuf; | ||
290 | } else { | ||
291 | srp_cmd->data_in_format = SRP_INDIRECT_BUFFER; | ||
292 | srp_cmd->data_in_count = numbuf; | ||
293 | } | ||
294 | } | ||
295 | } | ||
296 | |||
297 | /** | ||
298 | * unmap_cmd_data: - Unmap data pointed in srp_cmd based on the format | ||
299 | * @cmd: srp_cmd whose additional_data member will be unmapped | ||
300 | * @dev: device for which the memory is mapped | ||
301 | * | ||
302 | */ | ||
303 | static void unmap_cmd_data(struct srp_cmd *cmd, struct device *dev) | ||
304 | { | ||
305 | int i; | ||
306 | |||
307 | if ((cmd->data_out_format == SRP_NO_BUFFER) && | ||
308 | (cmd->data_in_format == SRP_NO_BUFFER)) | ||
309 | return; | ||
310 | else if ((cmd->data_out_format == SRP_DIRECT_BUFFER) || | ||
311 | (cmd->data_in_format == SRP_DIRECT_BUFFER)) { | ||
312 | struct memory_descriptor *data = | ||
313 | (struct memory_descriptor *)cmd->additional_data; | ||
314 | dma_unmap_single(dev, data->virtual_address, data->length, | ||
315 | DMA_BIDIRECTIONAL); | ||
316 | } else { | ||
317 | struct indirect_descriptor *indirect = | ||
318 | (struct indirect_descriptor *)cmd->additional_data; | ||
319 | int num_mapped = indirect->head.length / | ||
320 | sizeof(indirect->list[0]); | ||
321 | for (i = 0; i < num_mapped; ++i) { | ||
322 | struct memory_descriptor *data = &indirect->list[i]; | ||
323 | dma_unmap_single(dev, | ||
324 | data->virtual_address, | ||
325 | data->length, DMA_BIDIRECTIONAL); | ||
326 | } | ||
327 | } | ||
328 | } | ||
329 | |||
330 | /** | ||
331 | * map_sg_data: - Maps dma for a scatterlist and initializes decriptor fields | ||
332 | * @cmd: Scsi_Cmnd with the scatterlist | ||
333 | * @srp_cmd: srp_cmd that contains the memory descriptor | ||
334 | * @dev: device for which to map dma memory | ||
335 | * | ||
336 | * Called by map_data_for_srp_cmd() when building srp cmd from scsi cmd. | ||
337 | * Returns 1 on success. | ||
338 | */ | ||
339 | static int map_sg_data(struct scsi_cmnd *cmd, | ||
340 | struct srp_cmd *srp_cmd, struct device *dev) | ||
341 | { | ||
342 | |||
343 | int i, sg_mapped; | ||
344 | u64 total_length = 0; | ||
345 | struct scatterlist *sg = cmd->request_buffer; | ||
346 | struct memory_descriptor *data = | ||
347 | (struct memory_descriptor *)srp_cmd->additional_data; | ||
348 | struct indirect_descriptor *indirect = | ||
349 | (struct indirect_descriptor *)data; | ||
350 | |||
351 | sg_mapped = dma_map_sg(dev, sg, cmd->use_sg, DMA_BIDIRECTIONAL); | ||
352 | |||
353 | if (sg_mapped == 0) | ||
354 | return 0; | ||
355 | |||
356 | set_srp_direction(cmd, srp_cmd, sg_mapped); | ||
357 | |||
358 | /* special case; we can use a single direct descriptor */ | ||
359 | if (sg_mapped == 1) { | ||
360 | data->virtual_address = sg_dma_address(&sg[0]); | ||
361 | data->length = sg_dma_len(&sg[0]); | ||
362 | data->memory_handle = 0; | ||
363 | return 1; | ||
364 | } | ||
365 | |||
366 | if (sg_mapped > MAX_INDIRECT_BUFS) { | ||
367 | printk(KERN_ERR | ||
368 | "ibmvscsi: More than %d mapped sg entries, got %d\n", | ||
369 | MAX_INDIRECT_BUFS, sg_mapped); | ||
370 | return 0; | ||
371 | } | ||
372 | |||
373 | indirect->head.virtual_address = 0; | ||
374 | indirect->head.length = sg_mapped * sizeof(indirect->list[0]); | ||
375 | indirect->head.memory_handle = 0; | ||
376 | for (i = 0; i < sg_mapped; ++i) { | ||
377 | struct memory_descriptor *descr = &indirect->list[i]; | ||
378 | struct scatterlist *sg_entry = &sg[i]; | ||
379 | descr->virtual_address = sg_dma_address(sg_entry); | ||
380 | descr->length = sg_dma_len(sg_entry); | ||
381 | descr->memory_handle = 0; | ||
382 | total_length += sg_dma_len(sg_entry); | ||
383 | } | ||
384 | indirect->total_length = total_length; | ||
385 | |||
386 | return 1; | ||
387 | } | ||
388 | |||
389 | /** | ||
390 | * map_single_data: - Maps memory and initializes memory decriptor fields | ||
391 | * @cmd: struct scsi_cmnd with the memory to be mapped | ||
392 | * @srp_cmd: srp_cmd that contains the memory descriptor | ||
393 | * @dev: device for which to map dma memory | ||
394 | * | ||
395 | * Called by map_data_for_srp_cmd() when building srp cmd from scsi cmd. | ||
396 | * Returns 1 on success. | ||
397 | */ | ||
398 | static int map_single_data(struct scsi_cmnd *cmd, | ||
399 | struct srp_cmd *srp_cmd, struct device *dev) | ||
400 | { | ||
401 | struct memory_descriptor *data = | ||
402 | (struct memory_descriptor *)srp_cmd->additional_data; | ||
403 | |||
404 | data->virtual_address = | ||
405 | dma_map_single(dev, cmd->request_buffer, | ||
406 | cmd->request_bufflen, | ||
407 | DMA_BIDIRECTIONAL); | ||
408 | if (dma_mapping_error(data->virtual_address)) { | ||
409 | printk(KERN_ERR | ||
410 | "ibmvscsi: Unable to map request_buffer for command!\n"); | ||
411 | return 0; | ||
412 | } | ||
413 | data->length = cmd->request_bufflen; | ||
414 | data->memory_handle = 0; | ||
415 | |||
416 | set_srp_direction(cmd, srp_cmd, 1); | ||
417 | |||
418 | return 1; | ||
419 | } | ||
420 | |||
421 | /** | ||
422 | * map_data_for_srp_cmd: - Calls functions to map data for srp cmds | ||
423 | * @cmd: struct scsi_cmnd with the memory to be mapped | ||
424 | * @srp_cmd: srp_cmd that contains the memory descriptor | ||
425 | * @dev: dma device for which to map dma memory | ||
426 | * | ||
427 | * Called by scsi_cmd_to_srp_cmd() when converting scsi cmds to srp cmds | ||
428 | * Returns 1 on success. | ||
429 | */ | ||
430 | static int map_data_for_srp_cmd(struct scsi_cmnd *cmd, | ||
431 | struct srp_cmd *srp_cmd, struct device *dev) | ||
432 | { | ||
433 | switch (cmd->sc_data_direction) { | ||
434 | case DMA_FROM_DEVICE: | ||
435 | case DMA_TO_DEVICE: | ||
436 | break; | ||
437 | case DMA_NONE: | ||
438 | return 1; | ||
439 | case DMA_BIDIRECTIONAL: | ||
440 | printk(KERN_ERR | ||
441 | "ibmvscsi: Can't map DMA_BIDIRECTIONAL to read/write\n"); | ||
442 | return 0; | ||
443 | default: | ||
444 | printk(KERN_ERR | ||
445 | "ibmvscsi: Unknown data direction 0x%02x; can't map!\n", | ||
446 | cmd->sc_data_direction); | ||
447 | return 0; | ||
448 | } | ||
449 | |||
450 | if (!cmd->request_buffer) | ||
451 | return 1; | ||
452 | if (cmd->use_sg) | ||
453 | return map_sg_data(cmd, srp_cmd, dev); | ||
454 | return map_single_data(cmd, srp_cmd, dev); | ||
455 | } | ||
456 | |||
457 | /* ------------------------------------------------------------ | ||
458 | * Routines for sending and receiving SRPs | ||
459 | */ | ||
460 | /** | ||
461 | * ibmvscsi_send_srp_event: - Transforms event to u64 array and calls send_crq() | ||
462 | * @evt_struct: evt_struct to be sent | ||
463 | * @hostdata: ibmvscsi_host_data of host | ||
464 | * | ||
465 | * Returns the value returned from ibmvscsi_send_crq(). (Zero for success) | ||
466 | * Note that this routine assumes that host_lock is held for synchronization | ||
467 | */ | ||
468 | static int ibmvscsi_send_srp_event(struct srp_event_struct *evt_struct, | ||
469 | struct ibmvscsi_host_data *hostdata) | ||
470 | { | ||
471 | struct scsi_cmnd *cmnd; | ||
472 | u64 *crq_as_u64 = (u64 *) &evt_struct->crq; | ||
473 | int rc; | ||
474 | |||
475 | /* If we have exhausted our request limit, just fail this request. | ||
476 | * Note that there are rare cases involving driver generated requests | ||
477 | * (such as task management requests) that the mid layer may think we | ||
478 | * can handle more requests (can_queue) when we actually can't | ||
479 | */ | ||
480 | if ((evt_struct->crq.format == VIOSRP_SRP_FORMAT) && | ||
481 | (atomic_dec_if_positive(&hostdata->request_limit) < 0)) { | ||
482 | /* See if the adapter is disabled */ | ||
483 | if (atomic_read(&hostdata->request_limit) < 0) | ||
484 | goto send_error; | ||
485 | |||
486 | printk(KERN_WARNING | ||
487 | "ibmvscsi: Warning, request_limit exceeded\n"); | ||
488 | unmap_cmd_data(&evt_struct->iu.srp.cmd, | ||
489 | hostdata->dev); | ||
490 | free_event_struct(&hostdata->pool, evt_struct); | ||
491 | return SCSI_MLQUEUE_HOST_BUSY; | ||
492 | } | ||
493 | |||
494 | /* Copy the IU into the transfer area */ | ||
495 | *evt_struct->xfer_iu = evt_struct->iu; | ||
496 | evt_struct->xfer_iu->srp.generic.tag = (u64)evt_struct; | ||
497 | |||
498 | /* Add this to the sent list. We need to do this | ||
499 | * before we actually send | ||
500 | * in case it comes back REALLY fast | ||
501 | */ | ||
502 | list_add_tail(&evt_struct->list, &hostdata->sent); | ||
503 | |||
504 | if ((rc = | ||
505 | ibmvscsi_send_crq(hostdata, crq_as_u64[0], crq_as_u64[1])) != 0) { | ||
506 | list_del(&evt_struct->list); | ||
507 | |||
508 | printk(KERN_ERR "ibmvscsi: failed to send event struct rc %d\n", | ||
509 | rc); | ||
510 | goto send_error; | ||
511 | } | ||
512 | |||
513 | return 0; | ||
514 | |||
515 | send_error: | ||
516 | unmap_cmd_data(&evt_struct->iu.srp.cmd, hostdata->dev); | ||
517 | |||
518 | if ((cmnd = evt_struct->cmnd) != NULL) { | ||
519 | cmnd->result = DID_ERROR << 16; | ||
520 | evt_struct->cmnd_done(cmnd); | ||
521 | } else if (evt_struct->done) | ||
522 | evt_struct->done(evt_struct); | ||
523 | |||
524 | free_event_struct(&hostdata->pool, evt_struct); | ||
525 | return 0; | ||
526 | } | ||
527 | |||
528 | /** | ||
529 | * handle_cmd_rsp: - Handle responses from commands | ||
530 | * @evt_struct: srp_event_struct to be handled | ||
531 | * | ||
532 | * Used as a callback by when sending scsi cmds. | ||
533 | * Gets called by ibmvscsi_handle_crq() | ||
534 | */ | ||
535 | static void handle_cmd_rsp(struct srp_event_struct *evt_struct) | ||
536 | { | ||
537 | struct srp_rsp *rsp = &evt_struct->xfer_iu->srp.rsp; | ||
538 | struct scsi_cmnd *cmnd = evt_struct->cmnd; | ||
539 | |||
540 | if (unlikely(rsp->type != SRP_RSP_TYPE)) { | ||
541 | if (printk_ratelimit()) | ||
542 | printk(KERN_WARNING | ||
543 | "ibmvscsi: bad SRP RSP type %d\n", | ||
544 | rsp->type); | ||
545 | } | ||
546 | |||
547 | if (cmnd) { | ||
548 | cmnd->result = rsp->status; | ||
549 | if (((cmnd->result >> 1) & 0x1f) == CHECK_CONDITION) | ||
550 | memcpy(cmnd->sense_buffer, | ||
551 | rsp->sense_and_response_data, | ||
552 | rsp->sense_data_list_length); | ||
553 | unmap_cmd_data(&evt_struct->iu.srp.cmd, | ||
554 | evt_struct->hostdata->dev); | ||
555 | |||
556 | if (rsp->doover) | ||
557 | cmnd->resid = rsp->data_out_residual_count; | ||
558 | else if (rsp->diover) | ||
559 | cmnd->resid = rsp->data_in_residual_count; | ||
560 | } | ||
561 | |||
562 | if (evt_struct->cmnd_done) | ||
563 | evt_struct->cmnd_done(cmnd); | ||
564 | } | ||
565 | |||
566 | /** | ||
567 | * lun_from_dev: - Returns the lun of the scsi device | ||
568 | * @dev: struct scsi_device | ||
569 | * | ||
570 | */ | ||
571 | static inline u16 lun_from_dev(struct scsi_device *dev) | ||
572 | { | ||
573 | return (0x2 << 14) | (dev->id << 8) | (dev->channel << 5) | dev->lun; | ||
574 | } | ||
575 | |||
576 | /** | ||
577 | * ibmvscsi_queue: - The queuecommand function of the scsi template | ||
578 | * @cmd: struct scsi_cmnd to be executed | ||
579 | * @done: Callback function to be called when cmd is completed | ||
580 | */ | ||
581 | static int ibmvscsi_queuecommand(struct scsi_cmnd *cmnd, | ||
582 | void (*done) (struct scsi_cmnd *)) | ||
583 | { | ||
584 | struct srp_cmd *srp_cmd; | ||
585 | struct srp_event_struct *evt_struct; | ||
586 | struct ibmvscsi_host_data *hostdata = | ||
587 | (struct ibmvscsi_host_data *)&cmnd->device->host->hostdata; | ||
588 | u16 lun = lun_from_dev(cmnd->device); | ||
589 | |||
590 | evt_struct = get_event_struct(&hostdata->pool); | ||
591 | if (!evt_struct) | ||
592 | return SCSI_MLQUEUE_HOST_BUSY; | ||
593 | |||
594 | init_event_struct(evt_struct, | ||
595 | handle_cmd_rsp, | ||
596 | VIOSRP_SRP_FORMAT, | ||
597 | cmnd->timeout); | ||
598 | |||
599 | evt_struct->cmnd = cmnd; | ||
600 | evt_struct->cmnd_done = done; | ||
601 | |||
602 | /* Set up the actual SRP IU */ | ||
603 | srp_cmd = &evt_struct->iu.srp.cmd; | ||
604 | memset(srp_cmd, 0x00, sizeof(*srp_cmd)); | ||
605 | srp_cmd->type = SRP_CMD_TYPE; | ||
606 | memcpy(srp_cmd->cdb, cmnd->cmnd, sizeof(cmnd->cmnd)); | ||
607 | srp_cmd->lun = ((u64) lun) << 48; | ||
608 | |||
609 | if (!map_data_for_srp_cmd(cmnd, srp_cmd, hostdata->dev)) { | ||
610 | printk(KERN_ERR "ibmvscsi: couldn't convert cmd to srp_cmd\n"); | ||
611 | free_event_struct(&hostdata->pool, evt_struct); | ||
612 | return SCSI_MLQUEUE_HOST_BUSY; | ||
613 | } | ||
614 | |||
615 | /* Fix up dma address of the buffer itself */ | ||
616 | if ((srp_cmd->data_out_format == SRP_INDIRECT_BUFFER) || | ||
617 | (srp_cmd->data_in_format == SRP_INDIRECT_BUFFER)) { | ||
618 | struct indirect_descriptor *indirect = | ||
619 | (struct indirect_descriptor *)srp_cmd->additional_data; | ||
620 | indirect->head.virtual_address = evt_struct->crq.IU_data_ptr + | ||
621 | offsetof(struct srp_cmd, additional_data) + | ||
622 | offsetof(struct indirect_descriptor, list); | ||
623 | } | ||
624 | |||
625 | return ibmvscsi_send_srp_event(evt_struct, hostdata); | ||
626 | } | ||
627 | |||
628 | /* ------------------------------------------------------------ | ||
629 | * Routines for driver initialization | ||
630 | */ | ||
631 | /** | ||
632 | * adapter_info_rsp: - Handle response to MAD adapter info request | ||
633 | * @evt_struct: srp_event_struct with the response | ||
634 | * | ||
635 | * Used as a "done" callback by when sending adapter_info. Gets called | ||
636 | * by ibmvscsi_handle_crq() | ||
637 | */ | ||
638 | static void adapter_info_rsp(struct srp_event_struct *evt_struct) | ||
639 | { | ||
640 | struct ibmvscsi_host_data *hostdata = evt_struct->hostdata; | ||
641 | dma_unmap_single(hostdata->dev, | ||
642 | evt_struct->iu.mad.adapter_info.buffer, | ||
643 | evt_struct->iu.mad.adapter_info.common.length, | ||
644 | DMA_BIDIRECTIONAL); | ||
645 | |||
646 | if (evt_struct->xfer_iu->mad.adapter_info.common.status) { | ||
647 | printk("ibmvscsi: error %d getting adapter info\n", | ||
648 | evt_struct->xfer_iu->mad.adapter_info.common.status); | ||
649 | } else { | ||
650 | printk("ibmvscsi: host srp version: %s, " | ||
651 | "host partition %s (%d), OS %d, max io %u\n", | ||
652 | hostdata->madapter_info.srp_version, | ||
653 | hostdata->madapter_info.partition_name, | ||
654 | hostdata->madapter_info.partition_number, | ||
655 | hostdata->madapter_info.os_type, | ||
656 | hostdata->madapter_info.port_max_txu[0]); | ||
657 | |||
658 | if (hostdata->madapter_info.port_max_txu[0]) | ||
659 | hostdata->host->max_sectors = | ||
660 | hostdata->madapter_info.port_max_txu[0] >> 9; | ||
661 | } | ||
662 | } | ||
663 | |||
664 | /** | ||
665 | * send_mad_adapter_info: - Sends the mad adapter info request | ||
666 | * and stores the result so it can be retrieved with | ||
667 | * sysfs. We COULD consider causing a failure if the | ||
668 | * returned SRP version doesn't match ours. | ||
669 | * @hostdata: ibmvscsi_host_data of host | ||
670 | * | ||
671 | * Returns zero if successful. | ||
672 | */ | ||
673 | static void send_mad_adapter_info(struct ibmvscsi_host_data *hostdata) | ||
674 | { | ||
675 | struct viosrp_adapter_info *req; | ||
676 | struct srp_event_struct *evt_struct; | ||
677 | |||
678 | memset(&hostdata->madapter_info, 0x00, sizeof(hostdata->madapter_info)); | ||
679 | |||
680 | evt_struct = get_event_struct(&hostdata->pool); | ||
681 | if (!evt_struct) { | ||
682 | printk(KERN_ERR "ibmvscsi: couldn't allocate an event " | ||
683 | "for ADAPTER_INFO_REQ!\n"); | ||
684 | return; | ||
685 | } | ||
686 | |||
687 | init_event_struct(evt_struct, | ||
688 | adapter_info_rsp, | ||
689 | VIOSRP_MAD_FORMAT, | ||
690 | init_timeout * HZ); | ||
691 | |||
692 | req = &evt_struct->iu.mad.adapter_info; | ||
693 | memset(req, 0x00, sizeof(*req)); | ||
694 | |||
695 | req->common.type = VIOSRP_ADAPTER_INFO_TYPE; | ||
696 | req->common.length = sizeof(hostdata->madapter_info); | ||
697 | req->buffer = dma_map_single(hostdata->dev, | ||
698 | &hostdata->madapter_info, | ||
699 | sizeof(hostdata->madapter_info), | ||
700 | DMA_BIDIRECTIONAL); | ||
701 | |||
702 | if (dma_mapping_error(req->buffer)) { | ||
703 | printk(KERN_ERR | ||
704 | "ibmvscsi: Unable to map request_buffer " | ||
705 | "for adapter_info!\n"); | ||
706 | free_event_struct(&hostdata->pool, evt_struct); | ||
707 | return; | ||
708 | } | ||
709 | |||
710 | if (ibmvscsi_send_srp_event(evt_struct, hostdata)) | ||
711 | printk(KERN_ERR "ibmvscsi: couldn't send ADAPTER_INFO_REQ!\n"); | ||
712 | }; | ||
713 | |||
714 | /** | ||
715 | * login_rsp: - Handle response to SRP login request | ||
716 | * @evt_struct: srp_event_struct with the response | ||
717 | * | ||
718 | * Used as a "done" callback by when sending srp_login. Gets called | ||
719 | * by ibmvscsi_handle_crq() | ||
720 | */ | ||
721 | static void login_rsp(struct srp_event_struct *evt_struct) | ||
722 | { | ||
723 | struct ibmvscsi_host_data *hostdata = evt_struct->hostdata; | ||
724 | switch (evt_struct->xfer_iu->srp.generic.type) { | ||
725 | case SRP_LOGIN_RSP_TYPE: /* it worked! */ | ||
726 | break; | ||
727 | case SRP_LOGIN_REJ_TYPE: /* refused! */ | ||
728 | printk(KERN_INFO "ibmvscsi: SRP_LOGIN_REQ rejected\n"); | ||
729 | /* Login failed. */ | ||
730 | atomic_set(&hostdata->request_limit, -1); | ||
731 | return; | ||
732 | default: | ||
733 | printk(KERN_ERR | ||
734 | "ibmvscsi: Invalid login response typecode 0x%02x!\n", | ||
735 | evt_struct->xfer_iu->srp.generic.type); | ||
736 | /* Login failed. */ | ||
737 | atomic_set(&hostdata->request_limit, -1); | ||
738 | return; | ||
739 | } | ||
740 | |||
741 | printk(KERN_INFO "ibmvscsi: SRP_LOGIN succeeded\n"); | ||
742 | |||
743 | if (evt_struct->xfer_iu->srp.login_rsp.request_limit_delta > | ||
744 | (max_requests - 2)) | ||
745 | evt_struct->xfer_iu->srp.login_rsp.request_limit_delta = | ||
746 | max_requests - 2; | ||
747 | |||
748 | /* Now we know what the real request-limit is */ | ||
749 | atomic_set(&hostdata->request_limit, | ||
750 | evt_struct->xfer_iu->srp.login_rsp.request_limit_delta); | ||
751 | |||
752 | hostdata->host->can_queue = | ||
753 | evt_struct->xfer_iu->srp.login_rsp.request_limit_delta - 2; | ||
754 | |||
755 | if (hostdata->host->can_queue < 1) { | ||
756 | printk(KERN_ERR "ibmvscsi: Invalid request_limit_delta\n"); | ||
757 | return; | ||
758 | } | ||
759 | |||
760 | send_mad_adapter_info(hostdata); | ||
761 | return; | ||
762 | } | ||
763 | |||
764 | /** | ||
765 | * send_srp_login: - Sends the srp login | ||
766 | * @hostdata: ibmvscsi_host_data of host | ||
767 | * | ||
768 | * Returns zero if successful. | ||
769 | */ | ||
770 | static int send_srp_login(struct ibmvscsi_host_data *hostdata) | ||
771 | { | ||
772 | int rc; | ||
773 | unsigned long flags; | ||
774 | struct srp_login_req *login; | ||
775 | struct srp_event_struct *evt_struct = get_event_struct(&hostdata->pool); | ||
776 | if (!evt_struct) { | ||
777 | printk(KERN_ERR | ||
778 | "ibmvscsi: couldn't allocate an event for login req!\n"); | ||
779 | return FAILED; | ||
780 | } | ||
781 | |||
782 | init_event_struct(evt_struct, | ||
783 | login_rsp, | ||
784 | VIOSRP_SRP_FORMAT, | ||
785 | init_timeout * HZ); | ||
786 | |||
787 | login = &evt_struct->iu.srp.login_req; | ||
788 | login->type = SRP_LOGIN_REQ_TYPE; | ||
789 | login->max_requested_initiator_to_target_iulen = sizeof(union srp_iu); | ||
790 | login->required_buffer_formats = 0x0006; | ||
791 | |||
792 | /* Start out with a request limit of 1, since this is negotiated in | ||
793 | * the login request we are just sending | ||
794 | */ | ||
795 | atomic_set(&hostdata->request_limit, 1); | ||
796 | |||
797 | spin_lock_irqsave(hostdata->host->host_lock, flags); | ||
798 | rc = ibmvscsi_send_srp_event(evt_struct, hostdata); | ||
799 | spin_unlock_irqrestore(hostdata->host->host_lock, flags); | ||
800 | return rc; | ||
801 | }; | ||
802 | |||
803 | /** | ||
804 | * sync_completion: Signal that a synchronous command has completed | ||
805 | * Note that after returning from this call, the evt_struct is freed. | ||
806 | * the caller waiting on this completion shouldn't touch the evt_struct | ||
807 | * again. | ||
808 | */ | ||
809 | static void sync_completion(struct srp_event_struct *evt_struct) | ||
810 | { | ||
811 | /* copy the response back */ | ||
812 | if (evt_struct->sync_srp) | ||
813 | *evt_struct->sync_srp = *evt_struct->xfer_iu; | ||
814 | |||
815 | complete(&evt_struct->comp); | ||
816 | } | ||
817 | |||
818 | /** | ||
819 | * ibmvscsi_abort: Abort a command...from scsi host template | ||
820 | * send this over to the server and wait synchronously for the response | ||
821 | */ | ||
822 | static int ibmvscsi_eh_abort_handler(struct scsi_cmnd *cmd) | ||
823 | { | ||
824 | struct ibmvscsi_host_data *hostdata = | ||
825 | (struct ibmvscsi_host_data *)cmd->device->host->hostdata; | ||
826 | struct srp_tsk_mgmt *tsk_mgmt; | ||
827 | struct srp_event_struct *evt; | ||
828 | struct srp_event_struct *tmp_evt, *found_evt; | ||
829 | union viosrp_iu srp_rsp; | ||
830 | int rsp_rc; | ||
831 | u16 lun = lun_from_dev(cmd->device); | ||
832 | |||
833 | /* First, find this command in our sent list so we can figure | ||
834 | * out the correct tag | ||
835 | */ | ||
836 | found_evt = NULL; | ||
837 | list_for_each_entry(tmp_evt, &hostdata->sent, list) { | ||
838 | if (tmp_evt->cmnd == cmd) { | ||
839 | found_evt = tmp_evt; | ||
840 | break; | ||
841 | } | ||
842 | } | ||
843 | |||
844 | if (!found_evt) | ||
845 | return FAILED; | ||
846 | |||
847 | evt = get_event_struct(&hostdata->pool); | ||
848 | if (evt == NULL) { | ||
849 | printk(KERN_ERR "ibmvscsi: failed to allocate abort event\n"); | ||
850 | return FAILED; | ||
851 | } | ||
852 | |||
853 | init_event_struct(evt, | ||
854 | sync_completion, | ||
855 | VIOSRP_SRP_FORMAT, | ||
856 | init_timeout * HZ); | ||
857 | |||
858 | tsk_mgmt = &evt->iu.srp.tsk_mgmt; | ||
859 | |||
860 | /* Set up an abort SRP command */ | ||
861 | memset(tsk_mgmt, 0x00, sizeof(*tsk_mgmt)); | ||
862 | tsk_mgmt->type = SRP_TSK_MGMT_TYPE; | ||
863 | tsk_mgmt->lun = ((u64) lun) << 48; | ||
864 | tsk_mgmt->task_mgmt_flags = 0x01; /* ABORT TASK */ | ||
865 | tsk_mgmt->managed_task_tag = (u64) found_evt; | ||
866 | |||
867 | printk(KERN_INFO "ibmvscsi: aborting command. lun 0x%lx, tag 0x%lx\n", | ||
868 | tsk_mgmt->lun, tsk_mgmt->managed_task_tag); | ||
869 | |||
870 | evt->sync_srp = &srp_rsp; | ||
871 | init_completion(&evt->comp); | ||
872 | if (ibmvscsi_send_srp_event(evt, hostdata) != 0) { | ||
873 | printk(KERN_ERR "ibmvscsi: failed to send abort() event\n"); | ||
874 | return FAILED; | ||
875 | } | ||
876 | |||
877 | spin_unlock_irq(hostdata->host->host_lock); | ||
878 | wait_for_completion(&evt->comp); | ||
879 | spin_lock_irq(hostdata->host->host_lock); | ||
880 | |||
881 | /* make sure we got a good response */ | ||
882 | if (unlikely(srp_rsp.srp.generic.type != SRP_RSP_TYPE)) { | ||
883 | if (printk_ratelimit()) | ||
884 | printk(KERN_WARNING | ||
885 | "ibmvscsi: abort bad SRP RSP type %d\n", | ||
886 | srp_rsp.srp.generic.type); | ||
887 | return FAILED; | ||
888 | } | ||
889 | |||
890 | if (srp_rsp.srp.rsp.rspvalid) | ||
891 | rsp_rc = *((int *)srp_rsp.srp.rsp.sense_and_response_data); | ||
892 | else | ||
893 | rsp_rc = srp_rsp.srp.rsp.status; | ||
894 | |||
895 | if (rsp_rc) { | ||
896 | if (printk_ratelimit()) | ||
897 | printk(KERN_WARNING | ||
898 | "ibmvscsi: abort code %d for task tag 0x%lx\n", | ||
899 | rsp_rc, | ||
900 | tsk_mgmt->managed_task_tag); | ||
901 | return FAILED; | ||
902 | } | ||
903 | |||
904 | /* Because we dropped the spinlock above, it's possible | ||
905 | * The event is no longer in our list. Make sure it didn't | ||
906 | * complete while we were aborting | ||
907 | */ | ||
908 | found_evt = NULL; | ||
909 | list_for_each_entry(tmp_evt, &hostdata->sent, list) { | ||
910 | if (tmp_evt->cmnd == cmd) { | ||
911 | found_evt = tmp_evt; | ||
912 | break; | ||
913 | } | ||
914 | } | ||
915 | |||
916 | if (found_evt == NULL) { | ||
917 | printk(KERN_INFO | ||
918 | "ibmvscsi: aborted task tag 0x%lx completed\n", | ||
919 | tsk_mgmt->managed_task_tag); | ||
920 | return SUCCESS; | ||
921 | } | ||
922 | |||
923 | printk(KERN_INFO | ||
924 | "ibmvscsi: successfully aborted task tag 0x%lx\n", | ||
925 | tsk_mgmt->managed_task_tag); | ||
926 | |||
927 | cmd->result = (DID_ABORT << 16); | ||
928 | list_del(&found_evt->list); | ||
929 | unmap_cmd_data(&found_evt->iu.srp.cmd, found_evt->hostdata->dev); | ||
930 | free_event_struct(&found_evt->hostdata->pool, found_evt); | ||
931 | atomic_inc(&hostdata->request_limit); | ||
932 | return SUCCESS; | ||
933 | } | ||
934 | |||
935 | /** | ||
936 | * ibmvscsi_eh_device_reset_handler: Reset a single LUN...from scsi host | ||
937 | * template send this over to the server and wait synchronously for the | ||
938 | * response | ||
939 | */ | ||
940 | static int ibmvscsi_eh_device_reset_handler(struct scsi_cmnd *cmd) | ||
941 | { | ||
942 | struct ibmvscsi_host_data *hostdata = | ||
943 | (struct ibmvscsi_host_data *)cmd->device->host->hostdata; | ||
944 | |||
945 | struct srp_tsk_mgmt *tsk_mgmt; | ||
946 | struct srp_event_struct *evt; | ||
947 | struct srp_event_struct *tmp_evt, *pos; | ||
948 | union viosrp_iu srp_rsp; | ||
949 | int rsp_rc; | ||
950 | u16 lun = lun_from_dev(cmd->device); | ||
951 | |||
952 | evt = get_event_struct(&hostdata->pool); | ||
953 | if (evt == NULL) { | ||
954 | printk(KERN_ERR "ibmvscsi: failed to allocate reset event\n"); | ||
955 | return FAILED; | ||
956 | } | ||
957 | |||
958 | init_event_struct(evt, | ||
959 | sync_completion, | ||
960 | VIOSRP_SRP_FORMAT, | ||
961 | init_timeout * HZ); | ||
962 | |||
963 | tsk_mgmt = &evt->iu.srp.tsk_mgmt; | ||
964 | |||
965 | /* Set up a lun reset SRP command */ | ||
966 | memset(tsk_mgmt, 0x00, sizeof(*tsk_mgmt)); | ||
967 | tsk_mgmt->type = SRP_TSK_MGMT_TYPE; | ||
968 | tsk_mgmt->lun = ((u64) lun) << 48; | ||
969 | tsk_mgmt->task_mgmt_flags = 0x08; /* LUN RESET */ | ||
970 | |||
971 | printk(KERN_INFO "ibmvscsi: resetting device. lun 0x%lx\n", | ||
972 | tsk_mgmt->lun); | ||
973 | |||
974 | evt->sync_srp = &srp_rsp; | ||
975 | init_completion(&evt->comp); | ||
976 | if (ibmvscsi_send_srp_event(evt, hostdata) != 0) { | ||
977 | printk(KERN_ERR "ibmvscsi: failed to send reset event\n"); | ||
978 | return FAILED; | ||
979 | } | ||
980 | |||
981 | spin_unlock_irq(hostdata->host->host_lock); | ||
982 | wait_for_completion(&evt->comp); | ||
983 | spin_lock_irq(hostdata->host->host_lock); | ||
984 | |||
985 | /* make sure we got a good response */ | ||
986 | if (unlikely(srp_rsp.srp.generic.type != SRP_RSP_TYPE)) { | ||
987 | if (printk_ratelimit()) | ||
988 | printk(KERN_WARNING | ||
989 | "ibmvscsi: reset bad SRP RSP type %d\n", | ||
990 | srp_rsp.srp.generic.type); | ||
991 | return FAILED; | ||
992 | } | ||
993 | |||
994 | if (srp_rsp.srp.rsp.rspvalid) | ||
995 | rsp_rc = *((int *)srp_rsp.srp.rsp.sense_and_response_data); | ||
996 | else | ||
997 | rsp_rc = srp_rsp.srp.rsp.status; | ||
998 | |||
999 | if (rsp_rc) { | ||
1000 | if (printk_ratelimit()) | ||
1001 | printk(KERN_WARNING | ||
1002 | "ibmvscsi: reset code %d for task tag 0x%lx\n", | ||
1003 | rsp_rc, | ||
1004 | tsk_mgmt->managed_task_tag); | ||
1005 | return FAILED; | ||
1006 | } | ||
1007 | |||
1008 | /* We need to find all commands for this LUN that have not yet been | ||
1009 | * responded to, and fail them with DID_RESET | ||
1010 | */ | ||
1011 | list_for_each_entry_safe(tmp_evt, pos, &hostdata->sent, list) { | ||
1012 | if ((tmp_evt->cmnd) && (tmp_evt->cmnd->device == cmd->device)) { | ||
1013 | if (tmp_evt->cmnd) | ||
1014 | tmp_evt->cmnd->result = (DID_RESET << 16); | ||
1015 | list_del(&tmp_evt->list); | ||
1016 | unmap_cmd_data(&tmp_evt->iu.srp.cmd, tmp_evt->hostdata->dev); | ||
1017 | free_event_struct(&tmp_evt->hostdata->pool, | ||
1018 | tmp_evt); | ||
1019 | atomic_inc(&hostdata->request_limit); | ||
1020 | if (tmp_evt->cmnd_done) | ||
1021 | tmp_evt->cmnd_done(tmp_evt->cmnd); | ||
1022 | else if (tmp_evt->done) | ||
1023 | tmp_evt->done(tmp_evt); | ||
1024 | } | ||
1025 | } | ||
1026 | return SUCCESS; | ||
1027 | } | ||
1028 | |||
1029 | /** | ||
1030 | * purge_requests: Our virtual adapter just shut down. purge any sent requests | ||
1031 | * @hostdata: the adapter | ||
1032 | */ | ||
1033 | static void purge_requests(struct ibmvscsi_host_data *hostdata) | ||
1034 | { | ||
1035 | struct srp_event_struct *tmp_evt, *pos; | ||
1036 | unsigned long flags; | ||
1037 | |||
1038 | spin_lock_irqsave(hostdata->host->host_lock, flags); | ||
1039 | list_for_each_entry_safe(tmp_evt, pos, &hostdata->sent, list) { | ||
1040 | list_del(&tmp_evt->list); | ||
1041 | if (tmp_evt->cmnd) { | ||
1042 | tmp_evt->cmnd->result = (DID_ERROR << 16); | ||
1043 | unmap_cmd_data(&tmp_evt->iu.srp.cmd, | ||
1044 | tmp_evt->hostdata->dev); | ||
1045 | if (tmp_evt->cmnd_done) | ||
1046 | tmp_evt->cmnd_done(tmp_evt->cmnd); | ||
1047 | } else { | ||
1048 | if (tmp_evt->done) { | ||
1049 | tmp_evt->done(tmp_evt); | ||
1050 | } | ||
1051 | } | ||
1052 | free_event_struct(&tmp_evt->hostdata->pool, tmp_evt); | ||
1053 | } | ||
1054 | spin_unlock_irqrestore(hostdata->host->host_lock, flags); | ||
1055 | } | ||
1056 | |||
1057 | /** | ||
1058 | * ibmvscsi_handle_crq: - Handles and frees received events in the CRQ | ||
1059 | * @crq: Command/Response queue | ||
1060 | * @hostdata: ibmvscsi_host_data of host | ||
1061 | * | ||
1062 | */ | ||
1063 | void ibmvscsi_handle_crq(struct viosrp_crq *crq, | ||
1064 | struct ibmvscsi_host_data *hostdata) | ||
1065 | { | ||
1066 | unsigned long flags; | ||
1067 | struct srp_event_struct *evt_struct = | ||
1068 | (struct srp_event_struct *)crq->IU_data_ptr; | ||
1069 | switch (crq->valid) { | ||
1070 | case 0xC0: /* initialization */ | ||
1071 | switch (crq->format) { | ||
1072 | case 0x01: /* Initialization message */ | ||
1073 | printk(KERN_INFO "ibmvscsi: partner initialized\n"); | ||
1074 | /* Send back a response */ | ||
1075 | if (ibmvscsi_send_crq(hostdata, | ||
1076 | 0xC002000000000000LL, 0) == 0) { | ||
1077 | /* Now login */ | ||
1078 | send_srp_login(hostdata); | ||
1079 | } else { | ||
1080 | printk(KERN_ERR | ||
1081 | "ibmvscsi: Unable to send init rsp\n"); | ||
1082 | } | ||
1083 | |||
1084 | break; | ||
1085 | case 0x02: /* Initialization response */ | ||
1086 | printk(KERN_INFO | ||
1087 | "ibmvscsi: partner initialization complete\n"); | ||
1088 | |||
1089 | /* Now login */ | ||
1090 | send_srp_login(hostdata); | ||
1091 | break; | ||
1092 | default: | ||
1093 | printk(KERN_ERR "ibmvscsi: unknown crq message type\n"); | ||
1094 | } | ||
1095 | return; | ||
1096 | case 0xFF: /* Hypervisor telling us the connection is closed */ | ||
1097 | printk(KERN_INFO "ibmvscsi: Virtual adapter failed!\n"); | ||
1098 | |||
1099 | atomic_set(&hostdata->request_limit, -1); | ||
1100 | purge_requests(hostdata); | ||
1101 | ibmvscsi_reset_crq_queue(&hostdata->queue, hostdata); | ||
1102 | return; | ||
1103 | case 0x80: /* real payload */ | ||
1104 | break; | ||
1105 | default: | ||
1106 | printk(KERN_ERR | ||
1107 | "ibmvscsi: got an invalid message type 0x%02x\n", | ||
1108 | crq->valid); | ||
1109 | return; | ||
1110 | } | ||
1111 | |||
1112 | /* The only kind of payload CRQs we should get are responses to | ||
1113 | * things we send. Make sure this response is to something we | ||
1114 | * actually sent | ||
1115 | */ | ||
1116 | if (!valid_event_struct(&hostdata->pool, evt_struct)) { | ||
1117 | printk(KERN_ERR | ||
1118 | "ibmvscsi: returned correlation_token 0x%p is invalid!\n", | ||
1119 | (void *)crq->IU_data_ptr); | ||
1120 | return; | ||
1121 | } | ||
1122 | |||
1123 | if (atomic_read(&evt_struct->free)) { | ||
1124 | printk(KERN_ERR | ||
1125 | "ibmvscsi: received duplicate correlation_token 0x%p!\n", | ||
1126 | (void *)crq->IU_data_ptr); | ||
1127 | return; | ||
1128 | } | ||
1129 | |||
1130 | if (crq->format == VIOSRP_SRP_FORMAT) | ||
1131 | atomic_add(evt_struct->xfer_iu->srp.rsp.request_limit_delta, | ||
1132 | &hostdata->request_limit); | ||
1133 | |||
1134 | if (evt_struct->done) | ||
1135 | evt_struct->done(evt_struct); | ||
1136 | else | ||
1137 | printk(KERN_ERR | ||
1138 | "ibmvscsi: returned done() is NULL; not running it!\n"); | ||
1139 | |||
1140 | /* | ||
1141 | * Lock the host_lock before messing with these structures, since we | ||
1142 | * are running in a task context | ||
1143 | */ | ||
1144 | spin_lock_irqsave(evt_struct->hostdata->host->host_lock, flags); | ||
1145 | list_del(&evt_struct->list); | ||
1146 | free_event_struct(&evt_struct->hostdata->pool, evt_struct); | ||
1147 | spin_unlock_irqrestore(evt_struct->hostdata->host->host_lock, flags); | ||
1148 | } | ||
1149 | |||
1150 | /** | ||
1151 | * ibmvscsi_get_host_config: Send the command to the server to get host | ||
1152 | * configuration data. The data is opaque to us. | ||
1153 | */ | ||
1154 | static int ibmvscsi_do_host_config(struct ibmvscsi_host_data *hostdata, | ||
1155 | unsigned char *buffer, int length) | ||
1156 | { | ||
1157 | struct viosrp_host_config *host_config; | ||
1158 | struct srp_event_struct *evt_struct; | ||
1159 | int rc; | ||
1160 | |||
1161 | evt_struct = get_event_struct(&hostdata->pool); | ||
1162 | if (!evt_struct) { | ||
1163 | printk(KERN_ERR | ||
1164 | "ibmvscsi: could't allocate event for HOST_CONFIG!\n"); | ||
1165 | return -1; | ||
1166 | } | ||
1167 | |||
1168 | init_event_struct(evt_struct, | ||
1169 | sync_completion, | ||
1170 | VIOSRP_MAD_FORMAT, | ||
1171 | init_timeout * HZ); | ||
1172 | |||
1173 | host_config = &evt_struct->iu.mad.host_config; | ||
1174 | |||
1175 | /* Set up a lun reset SRP command */ | ||
1176 | memset(host_config, 0x00, sizeof(*host_config)); | ||
1177 | host_config->common.type = VIOSRP_HOST_CONFIG_TYPE; | ||
1178 | host_config->common.length = length; | ||
1179 | host_config->buffer = dma_map_single(hostdata->dev, buffer, length, | ||
1180 | DMA_BIDIRECTIONAL); | ||
1181 | |||
1182 | if (dma_mapping_error(host_config->buffer)) { | ||
1183 | printk(KERN_ERR | ||
1184 | "ibmvscsi: dma_mapping error " "getting host config\n"); | ||
1185 | free_event_struct(&hostdata->pool, evt_struct); | ||
1186 | return -1; | ||
1187 | } | ||
1188 | |||
1189 | init_completion(&evt_struct->comp); | ||
1190 | rc = ibmvscsi_send_srp_event(evt_struct, hostdata); | ||
1191 | if (rc == 0) { | ||
1192 | wait_for_completion(&evt_struct->comp); | ||
1193 | dma_unmap_single(hostdata->dev, host_config->buffer, | ||
1194 | length, DMA_BIDIRECTIONAL); | ||
1195 | } | ||
1196 | |||
1197 | return rc; | ||
1198 | } | ||
1199 | |||
1200 | /* ------------------------------------------------------------ | ||
1201 | * sysfs attributes | ||
1202 | */ | ||
1203 | static ssize_t show_host_srp_version(struct class_device *class_dev, char *buf) | ||
1204 | { | ||
1205 | struct Scsi_Host *shost = class_to_shost(class_dev); | ||
1206 | struct ibmvscsi_host_data *hostdata = | ||
1207 | (struct ibmvscsi_host_data *)shost->hostdata; | ||
1208 | int len; | ||
1209 | |||
1210 | len = snprintf(buf, PAGE_SIZE, "%s\n", | ||
1211 | hostdata->madapter_info.srp_version); | ||
1212 | return len; | ||
1213 | } | ||
1214 | |||
1215 | static struct class_device_attribute ibmvscsi_host_srp_version = { | ||
1216 | .attr = { | ||
1217 | .name = "srp_version", | ||
1218 | .mode = S_IRUGO, | ||
1219 | }, | ||
1220 | .show = show_host_srp_version, | ||
1221 | }; | ||
1222 | |||
1223 | static ssize_t show_host_partition_name(struct class_device *class_dev, | ||
1224 | char *buf) | ||
1225 | { | ||
1226 | struct Scsi_Host *shost = class_to_shost(class_dev); | ||
1227 | struct ibmvscsi_host_data *hostdata = | ||
1228 | (struct ibmvscsi_host_data *)shost->hostdata; | ||
1229 | int len; | ||
1230 | |||
1231 | len = snprintf(buf, PAGE_SIZE, "%s\n", | ||
1232 | hostdata->madapter_info.partition_name); | ||
1233 | return len; | ||
1234 | } | ||
1235 | |||
1236 | static struct class_device_attribute ibmvscsi_host_partition_name = { | ||
1237 | .attr = { | ||
1238 | .name = "partition_name", | ||
1239 | .mode = S_IRUGO, | ||
1240 | }, | ||
1241 | .show = show_host_partition_name, | ||
1242 | }; | ||
1243 | |||
1244 | static ssize_t show_host_partition_number(struct class_device *class_dev, | ||
1245 | char *buf) | ||
1246 | { | ||
1247 | struct Scsi_Host *shost = class_to_shost(class_dev); | ||
1248 | struct ibmvscsi_host_data *hostdata = | ||
1249 | (struct ibmvscsi_host_data *)shost->hostdata; | ||
1250 | int len; | ||
1251 | |||
1252 | len = snprintf(buf, PAGE_SIZE, "%d\n", | ||
1253 | hostdata->madapter_info.partition_number); | ||
1254 | return len; | ||
1255 | } | ||
1256 | |||
1257 | static struct class_device_attribute ibmvscsi_host_partition_number = { | ||
1258 | .attr = { | ||
1259 | .name = "partition_number", | ||
1260 | .mode = S_IRUGO, | ||
1261 | }, | ||
1262 | .show = show_host_partition_number, | ||
1263 | }; | ||
1264 | |||
1265 | static ssize_t show_host_mad_version(struct class_device *class_dev, char *buf) | ||
1266 | { | ||
1267 | struct Scsi_Host *shost = class_to_shost(class_dev); | ||
1268 | struct ibmvscsi_host_data *hostdata = | ||
1269 | (struct ibmvscsi_host_data *)shost->hostdata; | ||
1270 | int len; | ||
1271 | |||
1272 | len = snprintf(buf, PAGE_SIZE, "%d\n", | ||
1273 | hostdata->madapter_info.mad_version); | ||
1274 | return len; | ||
1275 | } | ||
1276 | |||
1277 | static struct class_device_attribute ibmvscsi_host_mad_version = { | ||
1278 | .attr = { | ||
1279 | .name = "mad_version", | ||
1280 | .mode = S_IRUGO, | ||
1281 | }, | ||
1282 | .show = show_host_mad_version, | ||
1283 | }; | ||
1284 | |||
1285 | static ssize_t show_host_os_type(struct class_device *class_dev, char *buf) | ||
1286 | { | ||
1287 | struct Scsi_Host *shost = class_to_shost(class_dev); | ||
1288 | struct ibmvscsi_host_data *hostdata = | ||
1289 | (struct ibmvscsi_host_data *)shost->hostdata; | ||
1290 | int len; | ||
1291 | |||
1292 | len = snprintf(buf, PAGE_SIZE, "%d\n", hostdata->madapter_info.os_type); | ||
1293 | return len; | ||
1294 | } | ||
1295 | |||
1296 | static struct class_device_attribute ibmvscsi_host_os_type = { | ||
1297 | .attr = { | ||
1298 | .name = "os_type", | ||
1299 | .mode = S_IRUGO, | ||
1300 | }, | ||
1301 | .show = show_host_os_type, | ||
1302 | }; | ||
1303 | |||
1304 | static ssize_t show_host_config(struct class_device *class_dev, char *buf) | ||
1305 | { | ||
1306 | struct Scsi_Host *shost = class_to_shost(class_dev); | ||
1307 | struct ibmvscsi_host_data *hostdata = | ||
1308 | (struct ibmvscsi_host_data *)shost->hostdata; | ||
1309 | |||
1310 | /* returns null-terminated host config data */ | ||
1311 | if (ibmvscsi_do_host_config(hostdata, buf, PAGE_SIZE) == 0) | ||
1312 | return strlen(buf); | ||
1313 | else | ||
1314 | return 0; | ||
1315 | } | ||
1316 | |||
1317 | static struct class_device_attribute ibmvscsi_host_config = { | ||
1318 | .attr = { | ||
1319 | .name = "config", | ||
1320 | .mode = S_IRUGO, | ||
1321 | }, | ||
1322 | .show = show_host_config, | ||
1323 | }; | ||
1324 | |||
1325 | static struct class_device_attribute *ibmvscsi_attrs[] = { | ||
1326 | &ibmvscsi_host_srp_version, | ||
1327 | &ibmvscsi_host_partition_name, | ||
1328 | &ibmvscsi_host_partition_number, | ||
1329 | &ibmvscsi_host_mad_version, | ||
1330 | &ibmvscsi_host_os_type, | ||
1331 | &ibmvscsi_host_config, | ||
1332 | NULL | ||
1333 | }; | ||
1334 | |||
1335 | /* ------------------------------------------------------------ | ||
1336 | * SCSI driver registration | ||
1337 | */ | ||
1338 | static struct scsi_host_template driver_template = { | ||
1339 | .module = THIS_MODULE, | ||
1340 | .name = "IBM POWER Virtual SCSI Adapter " IBMVSCSI_VERSION, | ||
1341 | .proc_name = "ibmvscsi", | ||
1342 | .queuecommand = ibmvscsi_queuecommand, | ||
1343 | .eh_abort_handler = ibmvscsi_eh_abort_handler, | ||
1344 | .eh_device_reset_handler = ibmvscsi_eh_device_reset_handler, | ||
1345 | .cmd_per_lun = 16, | ||
1346 | .can_queue = 1, /* Updated after SRP_LOGIN */ | ||
1347 | .this_id = -1, | ||
1348 | .sg_tablesize = MAX_INDIRECT_BUFS, | ||
1349 | .use_clustering = ENABLE_CLUSTERING, | ||
1350 | .shost_attrs = ibmvscsi_attrs, | ||
1351 | }; | ||
1352 | |||
1353 | /** | ||
1354 | * Called by bus code for each adapter | ||
1355 | */ | ||
1356 | static int ibmvscsi_probe(struct vio_dev *vdev, const struct vio_device_id *id) | ||
1357 | { | ||
1358 | struct ibmvscsi_host_data *hostdata; | ||
1359 | struct Scsi_Host *host; | ||
1360 | struct device *dev = &vdev->dev; | ||
1361 | unsigned long wait_switch = 0; | ||
1362 | |||
1363 | vdev->dev.driver_data = NULL; | ||
1364 | |||
1365 | host = scsi_host_alloc(&driver_template, sizeof(*hostdata)); | ||
1366 | if (!host) { | ||
1367 | printk(KERN_ERR "ibmvscsi: couldn't allocate host data\n"); | ||
1368 | goto scsi_host_alloc_failed; | ||
1369 | } | ||
1370 | |||
1371 | hostdata = (struct ibmvscsi_host_data *)host->hostdata; | ||
1372 | memset(hostdata, 0x00, sizeof(*hostdata)); | ||
1373 | INIT_LIST_HEAD(&hostdata->sent); | ||
1374 | hostdata->host = host; | ||
1375 | hostdata->dev = dev; | ||
1376 | atomic_set(&hostdata->request_limit, -1); | ||
1377 | hostdata->host->max_sectors = 32 * 8; /* default max I/O 32 pages */ | ||
1378 | |||
1379 | if (ibmvscsi_init_crq_queue(&hostdata->queue, hostdata, | ||
1380 | max_requests) != 0) { | ||
1381 | printk(KERN_ERR "ibmvscsi: couldn't initialize crq\n"); | ||
1382 | goto init_crq_failed; | ||
1383 | } | ||
1384 | if (initialize_event_pool(&hostdata->pool, max_requests, hostdata) != 0) { | ||
1385 | printk(KERN_ERR "ibmvscsi: couldn't initialize event pool\n"); | ||
1386 | goto init_pool_failed; | ||
1387 | } | ||
1388 | |||
1389 | host->max_lun = 8; | ||
1390 | host->max_id = max_id; | ||
1391 | host->max_channel = max_channel; | ||
1392 | |||
1393 | if (scsi_add_host(hostdata->host, hostdata->dev)) | ||
1394 | goto add_host_failed; | ||
1395 | |||
1396 | /* Try to send an initialization message. Note that this is allowed | ||
1397 | * to fail if the other end is not acive. In that case we don't | ||
1398 | * want to scan | ||
1399 | */ | ||
1400 | if (ibmvscsi_send_crq(hostdata, 0xC001000000000000LL, 0) == 0) { | ||
1401 | /* | ||
1402 | * Wait around max init_timeout secs for the adapter to finish | ||
1403 | * initializing. When we are done initializing, we will have a | ||
1404 | * valid request_limit. We don't want Linux scanning before | ||
1405 | * we are ready. | ||
1406 | */ | ||
1407 | for (wait_switch = jiffies + (init_timeout * HZ); | ||
1408 | time_before(jiffies, wait_switch) && | ||
1409 | atomic_read(&hostdata->request_limit) < 2;) { | ||
1410 | |||
1411 | msleep(10); | ||
1412 | } | ||
1413 | |||
1414 | /* if we now have a valid request_limit, initiate a scan */ | ||
1415 | if (atomic_read(&hostdata->request_limit) > 0) | ||
1416 | scsi_scan_host(host); | ||
1417 | } | ||
1418 | |||
1419 | vdev->dev.driver_data = hostdata; | ||
1420 | return 0; | ||
1421 | |||
1422 | add_host_failed: | ||
1423 | release_event_pool(&hostdata->pool, hostdata); | ||
1424 | init_pool_failed: | ||
1425 | ibmvscsi_release_crq_queue(&hostdata->queue, hostdata, max_requests); | ||
1426 | init_crq_failed: | ||
1427 | scsi_host_put(host); | ||
1428 | scsi_host_alloc_failed: | ||
1429 | return -1; | ||
1430 | } | ||
1431 | |||
1432 | static int ibmvscsi_remove(struct vio_dev *vdev) | ||
1433 | { | ||
1434 | struct ibmvscsi_host_data *hostdata = vdev->dev.driver_data; | ||
1435 | release_event_pool(&hostdata->pool, hostdata); | ||
1436 | ibmvscsi_release_crq_queue(&hostdata->queue, hostdata, | ||
1437 | max_requests); | ||
1438 | |||
1439 | scsi_remove_host(hostdata->host); | ||
1440 | scsi_host_put(hostdata->host); | ||
1441 | |||
1442 | return 0; | ||
1443 | } | ||
1444 | |||
1445 | /** | ||
1446 | * ibmvscsi_device_table: Used by vio.c to match devices in the device tree we | ||
1447 | * support. | ||
1448 | */ | ||
1449 | static struct vio_device_id ibmvscsi_device_table[] __devinitdata = { | ||
1450 | {"vscsi", "IBM,v-scsi"}, | ||
1451 | {0,} | ||
1452 | }; | ||
1453 | |||
1454 | MODULE_DEVICE_TABLE(vio, ibmvscsi_device_table); | ||
1455 | static struct vio_driver ibmvscsi_driver = { | ||
1456 | .name = "ibmvscsi", | ||
1457 | .id_table = ibmvscsi_device_table, | ||
1458 | .probe = ibmvscsi_probe, | ||
1459 | .remove = ibmvscsi_remove | ||
1460 | }; | ||
1461 | |||
1462 | int __init ibmvscsi_module_init(void) | ||
1463 | { | ||
1464 | return vio_register_driver(&ibmvscsi_driver); | ||
1465 | } | ||
1466 | |||
1467 | void __exit ibmvscsi_module_exit(void) | ||
1468 | { | ||
1469 | vio_unregister_driver(&ibmvscsi_driver); | ||
1470 | } | ||
1471 | |||
1472 | module_init(ibmvscsi_module_init); | ||
1473 | module_exit(ibmvscsi_module_exit); | ||
diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.h b/drivers/scsi/ibmvscsi/ibmvscsi.h new file mode 100644 index 000000000000..1030b703c30e --- /dev/null +++ b/drivers/scsi/ibmvscsi/ibmvscsi.h | |||
@@ -0,0 +1,109 @@ | |||
1 | /* ------------------------------------------------------------ | ||
2 | * ibmvscsi.h | ||
3 | * (C) Copyright IBM Corporation 1994, 2003 | ||
4 | * Authors: Colin DeVilbiss (devilbis@us.ibm.com) | ||
5 | * Santiago Leon (santil@us.ibm.com) | ||
6 | * Dave Boutcher (sleddog@us.ibm.com) | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify | ||
9 | * it under the terms of the GNU General Public License as published by | ||
10 | * the Free Software Foundation; either version 2 of the License, or | ||
11 | * (at your option) any later version. | ||
12 | * | ||
13 | * This program is distributed in the hope that it will be useful, | ||
14 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
16 | * GNU General Public License for more details. | ||
17 | * | ||
18 | * You should have received a copy of the GNU General Public License | ||
19 | * along with this program; if not, write to the Free Software | ||
20 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 | ||
21 | * USA | ||
22 | * | ||
23 | * ------------------------------------------------------------ | ||
24 | * Emulation of a SCSI host adapter for Virtual I/O devices | ||
25 | * | ||
26 | * This driver allows the Linux SCSI peripheral drivers to directly | ||
27 | * access devices in the hosting partition, either on an iSeries | ||
28 | * hypervisor system or a converged hypervisor system. | ||
29 | */ | ||
30 | #ifndef IBMVSCSI_H | ||
31 | #define IBMVSCSI_H | ||
32 | #include <linux/types.h> | ||
33 | #include <linux/list.h> | ||
34 | #include <linux/completion.h> | ||
35 | #include <linux/interrupt.h> | ||
36 | #include "viosrp.h" | ||
37 | |||
38 | struct scsi_cmnd; | ||
39 | struct Scsi_Host; | ||
40 | |||
41 | /* Number of indirect bufs...the list of these has to fit in the | ||
42 | * additional data of the srp_cmd struct along with the indirect | ||
43 | * descriptor | ||
44 | */ | ||
45 | #define MAX_INDIRECT_BUFS 10 | ||
46 | |||
47 | /* ------------------------------------------------------------ | ||
48 | * Data Structures | ||
49 | */ | ||
50 | /* an RPA command/response transport queue */ | ||
51 | struct crq_queue { | ||
52 | struct viosrp_crq *msgs; | ||
53 | int size, cur; | ||
54 | dma_addr_t msg_token; | ||
55 | spinlock_t lock; | ||
56 | }; | ||
57 | |||
58 | /* a unit of work for the hosting partition */ | ||
59 | struct srp_event_struct { | ||
60 | union viosrp_iu *xfer_iu; | ||
61 | struct scsi_cmnd *cmnd; | ||
62 | struct list_head list; | ||
63 | void (*done) (struct srp_event_struct *); | ||
64 | struct viosrp_crq crq; | ||
65 | struct ibmvscsi_host_data *hostdata; | ||
66 | atomic_t free; | ||
67 | union viosrp_iu iu; | ||
68 | void (*cmnd_done) (struct scsi_cmnd *); | ||
69 | struct completion comp; | ||
70 | union viosrp_iu *sync_srp; | ||
71 | }; | ||
72 | |||
73 | /* a pool of event structs for use */ | ||
74 | struct event_pool { | ||
75 | struct srp_event_struct *events; | ||
76 | u32 size; | ||
77 | int next; | ||
78 | union viosrp_iu *iu_storage; | ||
79 | dma_addr_t iu_token; | ||
80 | }; | ||
81 | |||
82 | /* all driver data associated with a host adapter */ | ||
83 | struct ibmvscsi_host_data { | ||
84 | atomic_t request_limit; | ||
85 | struct device *dev; | ||
86 | struct event_pool pool; | ||
87 | struct crq_queue queue; | ||
88 | struct tasklet_struct srp_task; | ||
89 | struct list_head sent; | ||
90 | struct Scsi_Host *host; | ||
91 | struct mad_adapter_info_data madapter_info; | ||
92 | }; | ||
93 | |||
94 | /* routines for managing a command/response queue */ | ||
95 | int ibmvscsi_init_crq_queue(struct crq_queue *queue, | ||
96 | struct ibmvscsi_host_data *hostdata, | ||
97 | int max_requests); | ||
98 | void ibmvscsi_release_crq_queue(struct crq_queue *queue, | ||
99 | struct ibmvscsi_host_data *hostdata, | ||
100 | int max_requests); | ||
101 | void ibmvscsi_reset_crq_queue(struct crq_queue *queue, | ||
102 | struct ibmvscsi_host_data *hostdata); | ||
103 | |||
104 | void ibmvscsi_handle_crq(struct viosrp_crq *crq, | ||
105 | struct ibmvscsi_host_data *hostdata); | ||
106 | int ibmvscsi_send_crq(struct ibmvscsi_host_data *hostdata, | ||
107 | u64 word1, u64 word2); | ||
108 | |||
109 | #endif /* IBMVSCSI_H */ | ||
diff --git a/drivers/scsi/ibmvscsi/iseries_vscsi.c b/drivers/scsi/ibmvscsi/iseries_vscsi.c new file mode 100644 index 000000000000..e9202f2a8276 --- /dev/null +++ b/drivers/scsi/ibmvscsi/iseries_vscsi.c | |||
@@ -0,0 +1,144 @@ | |||
1 | /* ------------------------------------------------------------ | ||
2 | * iSeries_vscsi.c | ||
3 | * (C) Copyright IBM Corporation 1994, 2003 | ||
4 | * Authors: Colin DeVilbiss (devilbis@us.ibm.com) | ||
5 | * Santiago Leon (santil@us.ibm.com) | ||
6 | * Dave Boutcher (sleddog@us.ibm.com) | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify | ||
9 | * it under the terms of the GNU General Public License as published by | ||
10 | * the Free Software Foundation; either version 2 of the License, or | ||
11 | * (at your option) any later version. | ||
12 | * | ||
13 | * This program is distributed in the hope that it will be useful, | ||
14 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
16 | * GNU General Public License for more details. | ||
17 | * | ||
18 | * You should have received a copy of the GNU General Public License | ||
19 | * along with this program; if not, write to the Free Software | ||
20 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 | ||
21 | * USA | ||
22 | * | ||
23 | * ------------------------------------------------------------ | ||
24 | * iSeries-specific functions of the SCSI host adapter for Virtual I/O devices | ||
25 | * | ||
26 | * This driver allows the Linux SCSI peripheral drivers to directly | ||
27 | * access devices in the hosting partition, either on an iSeries | ||
28 | * hypervisor system or a converged hypervisor system. | ||
29 | */ | ||
30 | |||
31 | #include <asm/iSeries/vio.h> | ||
32 | #include <asm/iSeries/HvLpEvent.h> | ||
33 | #include <asm/iSeries/HvTypes.h> | ||
34 | #include <asm/iSeries/HvLpConfig.h> | ||
35 | #include <asm/vio.h> | ||
36 | #include <linux/device.h> | ||
37 | #include "ibmvscsi.h" | ||
38 | |||
39 | /* global variables */ | ||
40 | static struct ibmvscsi_host_data *single_host_data; | ||
41 | |||
42 | /* ------------------------------------------------------------ | ||
43 | * Routines for direct interpartition interaction | ||
44 | */ | ||
45 | struct srp_lp_event { | ||
46 | struct HvLpEvent lpevt; /* 0x00-0x17 */ | ||
47 | u32 reserved1; /* 0x18-0x1B; unused */ | ||
48 | u16 version; /* 0x1C-0x1D; unused */ | ||
49 | u16 subtype_rc; /* 0x1E-0x1F; unused */ | ||
50 | struct viosrp_crq crq; /* 0x20-0x3F */ | ||
51 | }; | ||
52 | |||
53 | /** | ||
54 | * standard interface for handling logical partition events. | ||
55 | */ | ||
56 | static void ibmvscsi_handle_event(struct HvLpEvent *lpevt) | ||
57 | { | ||
58 | struct srp_lp_event *evt = (struct srp_lp_event *)lpevt; | ||
59 | |||
60 | if (!evt) { | ||
61 | printk(KERN_ERR "ibmvscsi: received null event\n"); | ||
62 | return; | ||
63 | } | ||
64 | |||
65 | if (single_host_data == NULL) { | ||
66 | printk(KERN_ERR | ||
67 | "ibmvscsi: received event, no adapter present\n"); | ||
68 | return; | ||
69 | } | ||
70 | |||
71 | ibmvscsi_handle_crq(&evt->crq, single_host_data); | ||
72 | } | ||
73 | |||
74 | /* ------------------------------------------------------------ | ||
75 | * Routines for driver initialization | ||
76 | */ | ||
77 | int ibmvscsi_init_crq_queue(struct crq_queue *queue, | ||
78 | struct ibmvscsi_host_data *hostdata, | ||
79 | int max_requests) | ||
80 | { | ||
81 | int rc; | ||
82 | |||
83 | single_host_data = hostdata; | ||
84 | rc = viopath_open(viopath_hostLp, viomajorsubtype_scsi, 0); | ||
85 | if (rc < 0) { | ||
86 | printk("viopath_open failed with rc %d in open_event_path\n", | ||
87 | rc); | ||
88 | goto viopath_open_failed; | ||
89 | } | ||
90 | |||
91 | rc = vio_setHandler(viomajorsubtype_scsi, ibmvscsi_handle_event); | ||
92 | if (rc < 0) { | ||
93 | printk("vio_setHandler failed with rc %d in open_event_path\n", | ||
94 | rc); | ||
95 | goto vio_setHandler_failed; | ||
96 | } | ||
97 | return 0; | ||
98 | |||
99 | vio_setHandler_failed: | ||
100 | viopath_close(viopath_hostLp, viomajorsubtype_scsi, max_requests); | ||
101 | viopath_open_failed: | ||
102 | return -1; | ||
103 | } | ||
104 | |||
105 | void ibmvscsi_release_crq_queue(struct crq_queue *queue, | ||
106 | struct ibmvscsi_host_data *hostdata, | ||
107 | int max_requests) | ||
108 | { | ||
109 | vio_clearHandler(viomajorsubtype_scsi); | ||
110 | viopath_close(viopath_hostLp, viomajorsubtype_scsi, max_requests); | ||
111 | } | ||
112 | |||
113 | /** | ||
114 | * reset_crq_queue: - resets a crq after a failure | ||
115 | * @queue: crq_queue to initialize and register | ||
116 | * @hostdata: ibmvscsi_host_data of host | ||
117 | * | ||
118 | * no-op for iSeries | ||
119 | */ | ||
120 | void ibmvscsi_reset_crq_queue(struct crq_queue *queue, | ||
121 | struct ibmvscsi_host_data *hostdata) | ||
122 | { | ||
123 | } | ||
124 | |||
125 | /** | ||
126 | * ibmvscsi_send_crq: - Send a CRQ | ||
127 | * @hostdata: the adapter | ||
128 | * @word1: the first 64 bits of the data | ||
129 | * @word2: the second 64 bits of the data | ||
130 | */ | ||
131 | int ibmvscsi_send_crq(struct ibmvscsi_host_data *hostdata, u64 word1, u64 word2) | ||
132 | { | ||
133 | single_host_data = hostdata; | ||
134 | return HvCallEvent_signalLpEventFast(viopath_hostLp, | ||
135 | HvLpEvent_Type_VirtualIo, | ||
136 | viomajorsubtype_scsi, | ||
137 | HvLpEvent_AckInd_NoAck, | ||
138 | HvLpEvent_AckType_ImmediateAck, | ||
139 | viopath_sourceinst(viopath_hostLp), | ||
140 | viopath_targetinst(viopath_hostLp), | ||
141 | 0, | ||
142 | VIOVERSION << 16, word1, word2, 0, | ||
143 | 0); | ||
144 | } | ||
diff --git a/drivers/scsi/ibmvscsi/rpa_vscsi.c b/drivers/scsi/ibmvscsi/rpa_vscsi.c new file mode 100644 index 000000000000..50cb909f314f --- /dev/null +++ b/drivers/scsi/ibmvscsi/rpa_vscsi.c | |||
@@ -0,0 +1,260 @@ | |||
1 | /* ------------------------------------------------------------ | ||
2 | * rpa_vscsi.c | ||
3 | * (C) Copyright IBM Corporation 1994, 2003 | ||
4 | * Authors: Colin DeVilbiss (devilbis@us.ibm.com) | ||
5 | * Santiago Leon (santil@us.ibm.com) | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify | ||
8 | * it under the terms of the GNU General Public License as published by | ||
9 | * the Free Software Foundation; either version 2 of the License, or | ||
10 | * (at your option) any later version. | ||
11 | * | ||
12 | * This program is distributed in the hope that it will be useful, | ||
13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
15 | * GNU General Public License for more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU General Public License | ||
18 | * along with this program; if not, write to the Free Software | ||
19 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 | ||
20 | * USA | ||
21 | * | ||
22 | * ------------------------------------------------------------ | ||
23 | * RPA-specific functions of the SCSI host adapter for Virtual I/O devices | ||
24 | * | ||
25 | * This driver allows the Linux SCSI peripheral drivers to directly | ||
26 | * access devices in the hosting partition, either on an iSeries | ||
27 | * hypervisor system or a converged hypervisor system. | ||
28 | */ | ||
29 | |||
30 | #include <asm/vio.h> | ||
31 | #include <asm/iommu.h> | ||
32 | #include <asm/hvcall.h> | ||
33 | #include <linux/dma-mapping.h> | ||
34 | #include <linux/interrupt.h> | ||
35 | #include "ibmvscsi.h" | ||
36 | |||
37 | /* ------------------------------------------------------------ | ||
38 | * Routines for managing the command/response queue | ||
39 | */ | ||
40 | /** | ||
41 | * ibmvscsi_handle_event: - Interrupt handler for crq events | ||
42 | * @irq: number of irq to handle, not used | ||
43 | * @dev_instance: ibmvscsi_host_data of host that received interrupt | ||
44 | * @regs: pt_regs with registers | ||
45 | * | ||
46 | * Disables interrupts and schedules srp_task | ||
47 | * Always returns IRQ_HANDLED | ||
48 | */ | ||
49 | static irqreturn_t ibmvscsi_handle_event(int irq, | ||
50 | void *dev_instance, | ||
51 | struct pt_regs *regs) | ||
52 | { | ||
53 | struct ibmvscsi_host_data *hostdata = | ||
54 | (struct ibmvscsi_host_data *)dev_instance; | ||
55 | vio_disable_interrupts(to_vio_dev(hostdata->dev)); | ||
56 | tasklet_schedule(&hostdata->srp_task); | ||
57 | return IRQ_HANDLED; | ||
58 | } | ||
59 | |||
60 | /** | ||
61 | * release_crq_queue: - Deallocates data and unregisters CRQ | ||
62 | * @queue: crq_queue to initialize and register | ||
63 | * @host_data: ibmvscsi_host_data of host | ||
64 | * | ||
65 | * Frees irq, deallocates a page for messages, unmaps dma, and unregisters | ||
66 | * the crq with the hypervisor. | ||
67 | */ | ||
68 | void ibmvscsi_release_crq_queue(struct crq_queue *queue, | ||
69 | struct ibmvscsi_host_data *hostdata, | ||
70 | int max_requests) | ||
71 | { | ||
72 | long rc; | ||
73 | struct vio_dev *vdev = to_vio_dev(hostdata->dev); | ||
74 | free_irq(vdev->irq, (void *)hostdata); | ||
75 | tasklet_kill(&hostdata->srp_task); | ||
76 | do { | ||
77 | rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address); | ||
78 | } while ((rc == H_Busy) || (H_isLongBusy(rc))); | ||
79 | dma_unmap_single(hostdata->dev, | ||
80 | queue->msg_token, | ||
81 | queue->size * sizeof(*queue->msgs), DMA_BIDIRECTIONAL); | ||
82 | free_page((unsigned long)queue->msgs); | ||
83 | } | ||
84 | |||
85 | /** | ||
86 | * crq_queue_next_crq: - Returns the next entry in message queue | ||
87 | * @queue: crq_queue to use | ||
88 | * | ||
89 | * Returns pointer to next entry in queue, or NULL if there are no new | ||
90 | * entried in the CRQ. | ||
91 | */ | ||
92 | static struct viosrp_crq *crq_queue_next_crq(struct crq_queue *queue) | ||
93 | { | ||
94 | struct viosrp_crq *crq; | ||
95 | unsigned long flags; | ||
96 | |||
97 | spin_lock_irqsave(&queue->lock, flags); | ||
98 | crq = &queue->msgs[queue->cur]; | ||
99 | if (crq->valid & 0x80) { | ||
100 | if (++queue->cur == queue->size) | ||
101 | queue->cur = 0; | ||
102 | } else | ||
103 | crq = NULL; | ||
104 | spin_unlock_irqrestore(&queue->lock, flags); | ||
105 | |||
106 | return crq; | ||
107 | } | ||
108 | |||
109 | /** | ||
110 | * ibmvscsi_send_crq: - Send a CRQ | ||
111 | * @hostdata: the adapter | ||
112 | * @word1: the first 64 bits of the data | ||
113 | * @word2: the second 64 bits of the data | ||
114 | */ | ||
115 | int ibmvscsi_send_crq(struct ibmvscsi_host_data *hostdata, u64 word1, u64 word2) | ||
116 | { | ||
117 | struct vio_dev *vdev = to_vio_dev(hostdata->dev); | ||
118 | |||
119 | return plpar_hcall_norets(H_SEND_CRQ, vdev->unit_address, word1, word2); | ||
120 | } | ||
121 | |||
122 | /** | ||
123 | * ibmvscsi_task: - Process srps asynchronously | ||
124 | * @data: ibmvscsi_host_data of host | ||
125 | */ | ||
126 | static void ibmvscsi_task(void *data) | ||
127 | { | ||
128 | struct ibmvscsi_host_data *hostdata = (struct ibmvscsi_host_data *)data; | ||
129 | struct vio_dev *vdev = to_vio_dev(hostdata->dev); | ||
130 | struct viosrp_crq *crq; | ||
131 | int done = 0; | ||
132 | |||
133 | while (!done) { | ||
134 | /* Pull all the valid messages off the CRQ */ | ||
135 | while ((crq = crq_queue_next_crq(&hostdata->queue)) != NULL) { | ||
136 | ibmvscsi_handle_crq(crq, hostdata); | ||
137 | crq->valid = 0x00; | ||
138 | } | ||
139 | |||
140 | vio_enable_interrupts(vdev); | ||
141 | if ((crq = crq_queue_next_crq(&hostdata->queue)) != NULL) { | ||
142 | vio_disable_interrupts(vdev); | ||
143 | ibmvscsi_handle_crq(crq, hostdata); | ||
144 | crq->valid = 0x00; | ||
145 | } else { | ||
146 | done = 1; | ||
147 | } | ||
148 | } | ||
149 | } | ||
150 | |||
151 | /** | ||
152 | * initialize_crq_queue: - Initializes and registers CRQ with hypervisor | ||
153 | * @queue: crq_queue to initialize and register | ||
154 | * @hostdata: ibmvscsi_host_data of host | ||
155 | * | ||
156 | * Allocates a page for messages, maps it for dma, and registers | ||
157 | * the crq with the hypervisor. | ||
158 | * Returns zero on success. | ||
159 | */ | ||
160 | int ibmvscsi_init_crq_queue(struct crq_queue *queue, | ||
161 | struct ibmvscsi_host_data *hostdata, | ||
162 | int max_requests) | ||
163 | { | ||
164 | int rc; | ||
165 | struct vio_dev *vdev = to_vio_dev(hostdata->dev); | ||
166 | |||
167 | queue->msgs = (struct viosrp_crq *)get_zeroed_page(GFP_KERNEL); | ||
168 | |||
169 | if (!queue->msgs) | ||
170 | goto malloc_failed; | ||
171 | queue->size = PAGE_SIZE / sizeof(*queue->msgs); | ||
172 | |||
173 | queue->msg_token = dma_map_single(hostdata->dev, queue->msgs, | ||
174 | queue->size * sizeof(*queue->msgs), | ||
175 | DMA_BIDIRECTIONAL); | ||
176 | |||
177 | if (dma_mapping_error(queue->msg_token)) | ||
178 | goto map_failed; | ||
179 | |||
180 | rc = plpar_hcall_norets(H_REG_CRQ, | ||
181 | vdev->unit_address, | ||
182 | queue->msg_token, PAGE_SIZE); | ||
183 | if (rc == 2) { | ||
184 | /* Adapter is good, but other end is not ready */ | ||
185 | printk(KERN_WARNING "ibmvscsi: Partner adapter not ready\n"); | ||
186 | } else if (rc != 0) { | ||
187 | printk(KERN_WARNING "ibmvscsi: Error %d opening adapter\n", rc); | ||
188 | goto reg_crq_failed; | ||
189 | } | ||
190 | |||
191 | if (request_irq(vdev->irq, | ||
192 | ibmvscsi_handle_event, | ||
193 | 0, "ibmvscsi", (void *)hostdata) != 0) { | ||
194 | printk(KERN_ERR "ibmvscsi: couldn't register irq 0x%x\n", | ||
195 | vdev->irq); | ||
196 | goto req_irq_failed; | ||
197 | } | ||
198 | |||
199 | rc = vio_enable_interrupts(vdev); | ||
200 | if (rc != 0) { | ||
201 | printk(KERN_ERR "ibmvscsi: Error %d enabling interrupts!!!\n", | ||
202 | rc); | ||
203 | goto req_irq_failed; | ||
204 | } | ||
205 | |||
206 | queue->cur = 0; | ||
207 | spin_lock_init(&queue->lock); | ||
208 | |||
209 | tasklet_init(&hostdata->srp_task, (void *)ibmvscsi_task, | ||
210 | (unsigned long)hostdata); | ||
211 | |||
212 | return 0; | ||
213 | |||
214 | req_irq_failed: | ||
215 | do { | ||
216 | rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address); | ||
217 | } while ((rc == H_Busy) || (H_isLongBusy(rc))); | ||
218 | reg_crq_failed: | ||
219 | dma_unmap_single(hostdata->dev, | ||
220 | queue->msg_token, | ||
221 | queue->size * sizeof(*queue->msgs), DMA_BIDIRECTIONAL); | ||
222 | map_failed: | ||
223 | free_page((unsigned long)queue->msgs); | ||
224 | malloc_failed: | ||
225 | return -1; | ||
226 | } | ||
227 | |||
228 | /** | ||
229 | * reset_crq_queue: - resets a crq after a failure | ||
230 | * @queue: crq_queue to initialize and register | ||
231 | * @hostdata: ibmvscsi_host_data of host | ||
232 | * | ||
233 | */ | ||
234 | void ibmvscsi_reset_crq_queue(struct crq_queue *queue, | ||
235 | struct ibmvscsi_host_data *hostdata) | ||
236 | { | ||
237 | int rc; | ||
238 | struct vio_dev *vdev = to_vio_dev(hostdata->dev); | ||
239 | |||
240 | /* Close the CRQ */ | ||
241 | do { | ||
242 | rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address); | ||
243 | } while ((rc == H_Busy) || (H_isLongBusy(rc))); | ||
244 | |||
245 | /* Clean out the queue */ | ||
246 | memset(queue->msgs, 0x00, PAGE_SIZE); | ||
247 | queue->cur = 0; | ||
248 | |||
249 | /* And re-open it again */ | ||
250 | rc = plpar_hcall_norets(H_REG_CRQ, | ||
251 | vdev->unit_address, | ||
252 | queue->msg_token, PAGE_SIZE); | ||
253 | if (rc == 2) { | ||
254 | /* Adapter is good, but other end is not ready */ | ||
255 | printk(KERN_WARNING "ibmvscsi: Partner adapter not ready\n"); | ||
256 | } else if (rc != 0) { | ||
257 | printk(KERN_WARNING | ||
258 | "ibmvscsi: couldn't register crq--rc 0x%x\n", rc); | ||
259 | } | ||
260 | } | ||
diff --git a/drivers/scsi/ibmvscsi/srp.h b/drivers/scsi/ibmvscsi/srp.h new file mode 100644 index 000000000000..e952c1cd9740 --- /dev/null +++ b/drivers/scsi/ibmvscsi/srp.h | |||
@@ -0,0 +1,225 @@ | |||
1 | /*****************************************************************************/ | ||
2 | /* srp.h -- SCSI RDMA Protocol definitions */ | ||
3 | /* */ | ||
4 | /* Written By: Colin Devilbis, IBM Corporation */ | ||
5 | /* */ | ||
6 | /* Copyright (C) 2003 IBM Corporation */ | ||
7 | /* */ | ||
8 | /* This program is free software; you can redistribute it and/or modify */ | ||
9 | /* it under the terms of the GNU General Public License as published by */ | ||
10 | /* the Free Software Foundation; either version 2 of the License, or */ | ||
11 | /* (at your option) any later version. */ | ||
12 | /* */ | ||
13 | /* This program is distributed in the hope that it will be useful, */ | ||
14 | /* but WITHOUT ANY WARRANTY; without even the implied warranty of */ | ||
15 | /* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the */ | ||
16 | /* GNU General Public License for more details. */ | ||
17 | /* */ | ||
18 | /* You should have received a copy of the GNU General Public License */ | ||
19 | /* along with this program; if not, write to the Free Software */ | ||
20 | /* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ | ||
21 | /* */ | ||
22 | /* */ | ||
23 | /* This file contains structures and definitions for the SCSI RDMA Protocol */ | ||
24 | /* (SRP) as defined in the T10 standard available at www.t10.org. This */ | ||
25 | /* file was based on the 16a version of the standard */ | ||
26 | /* */ | ||
27 | /*****************************************************************************/ | ||
28 | #ifndef SRP_H | ||
29 | #define SRP_H | ||
30 | |||
31 | #define PACKED __attribute__((packed)) | ||
32 | |||
33 | enum srp_types { | ||
34 | SRP_LOGIN_REQ_TYPE = 0x00, | ||
35 | SRP_LOGIN_RSP_TYPE = 0xC0, | ||
36 | SRP_LOGIN_REJ_TYPE = 0x80, | ||
37 | SRP_I_LOGOUT_TYPE = 0x03, | ||
38 | SRP_T_LOGOUT_TYPE = 0x80, | ||
39 | SRP_TSK_MGMT_TYPE = 0x01, | ||
40 | SRP_CMD_TYPE = 0x02, | ||
41 | SRP_RSP_TYPE = 0xC1, | ||
42 | SRP_CRED_REQ_TYPE = 0x81, | ||
43 | SRP_CRED_RSP_TYPE = 0x41, | ||
44 | SRP_AER_REQ_TYPE = 0x82, | ||
45 | SRP_AER_RSP_TYPE = 0x42 | ||
46 | }; | ||
47 | |||
48 | enum srp_descriptor_formats { | ||
49 | SRP_NO_BUFFER = 0x00, | ||
50 | SRP_DIRECT_BUFFER = 0x01, | ||
51 | SRP_INDIRECT_BUFFER = 0x02 | ||
52 | }; | ||
53 | |||
54 | struct memory_descriptor { | ||
55 | u64 virtual_address; | ||
56 | u32 memory_handle; | ||
57 | u32 length; | ||
58 | }; | ||
59 | |||
60 | struct indirect_descriptor { | ||
61 | struct memory_descriptor head; | ||
62 | u32 total_length; | ||
63 | struct memory_descriptor list[1] PACKED; | ||
64 | }; | ||
65 | |||
66 | struct srp_generic { | ||
67 | u8 type; | ||
68 | u8 reserved1[7]; | ||
69 | u64 tag; | ||
70 | }; | ||
71 | |||
72 | struct srp_login_req { | ||
73 | u8 type; | ||
74 | u8 reserved1[7]; | ||
75 | u64 tag; | ||
76 | u32 max_requested_initiator_to_target_iulen; | ||
77 | u32 reserved2; | ||
78 | u16 required_buffer_formats; | ||
79 | u8 reserved3:6; | ||
80 | u8 multi_channel_action:2; | ||
81 | u8 reserved4; | ||
82 | u32 reserved5; | ||
83 | u8 initiator_port_identifier[16]; | ||
84 | u8 target_port_identifier[16]; | ||
85 | }; | ||
86 | |||
87 | struct srp_login_rsp { | ||
88 | u8 type; | ||
89 | u8 reserved1[3]; | ||
90 | u32 request_limit_delta; | ||
91 | u64 tag; | ||
92 | u32 max_initiator_to_target_iulen; | ||
93 | u32 max_target_to_initiator_iulen; | ||
94 | u16 supported_buffer_formats; | ||
95 | u8 reserved2:6; | ||
96 | u8 multi_channel_result:2; | ||
97 | u8 reserved3; | ||
98 | u8 reserved4[24]; | ||
99 | }; | ||
100 | |||
101 | struct srp_login_rej { | ||
102 | u8 type; | ||
103 | u8 reserved1[3]; | ||
104 | u32 reason; | ||
105 | u64 tag; | ||
106 | u64 reserved2; | ||
107 | u16 supported_buffer_formats; | ||
108 | u8 reserved3[6]; | ||
109 | }; | ||
110 | |||
111 | struct srp_i_logout { | ||
112 | u8 type; | ||
113 | u8 reserved1[7]; | ||
114 | u64 tag; | ||
115 | }; | ||
116 | |||
117 | struct srp_t_logout { | ||
118 | u8 type; | ||
119 | u8 reserved1[3]; | ||
120 | u32 reason; | ||
121 | u64 tag; | ||
122 | }; | ||
123 | |||
124 | struct srp_tsk_mgmt { | ||
125 | u8 type; | ||
126 | u8 reserved1[7]; | ||
127 | u64 tag; | ||
128 | u32 reserved2; | ||
129 | u64 lun PACKED; | ||
130 | u8 reserved3; | ||
131 | u8 reserved4; | ||
132 | u8 task_mgmt_flags; | ||
133 | u8 reserved5; | ||
134 | u64 managed_task_tag; | ||
135 | u64 reserved6; | ||
136 | }; | ||
137 | |||
138 | struct srp_cmd { | ||
139 | u8 type; | ||
140 | u32 reserved1 PACKED; | ||
141 | u8 data_out_format:4; | ||
142 | u8 data_in_format:4; | ||
143 | u8 data_out_count; | ||
144 | u8 data_in_count; | ||
145 | u64 tag; | ||
146 | u32 reserved2; | ||
147 | u64 lun PACKED; | ||
148 | u8 reserved3; | ||
149 | u8 reserved4:5; | ||
150 | u8 task_attribute:3; | ||
151 | u8 reserved5; | ||
152 | u8 additional_cdb_len; | ||
153 | u8 cdb[16]; | ||
154 | u8 additional_data[0x100 - 0x30]; | ||
155 | }; | ||
156 | |||
157 | struct srp_rsp { | ||
158 | u8 type; | ||
159 | u8 reserved1[3]; | ||
160 | u32 request_limit_delta; | ||
161 | u64 tag; | ||
162 | u16 reserved2; | ||
163 | u8 reserved3:2; | ||
164 | u8 diunder:1; | ||
165 | u8 diover:1; | ||
166 | u8 dounder:1; | ||
167 | u8 doover:1; | ||
168 | u8 snsvalid:1; | ||
169 | u8 rspvalid:1; | ||
170 | u8 status; | ||
171 | u32 data_in_residual_count; | ||
172 | u32 data_out_residual_count; | ||
173 | u32 sense_data_list_length; | ||
174 | u32 response_data_list_length; | ||
175 | u8 sense_and_response_data[18]; | ||
176 | }; | ||
177 | |||
178 | struct srp_cred_req { | ||
179 | u8 type; | ||
180 | u8 reserved1[3]; | ||
181 | u32 request_limit_delta; | ||
182 | u64 tag; | ||
183 | }; | ||
184 | |||
185 | struct srp_cred_rsp { | ||
186 | u8 type; | ||
187 | u8 reserved1[7]; | ||
188 | u64 tag; | ||
189 | }; | ||
190 | |||
191 | struct srp_aer_req { | ||
192 | u8 type; | ||
193 | u8 reserved1[3]; | ||
194 | u32 request_limit_delta; | ||
195 | u64 tag; | ||
196 | u32 reserved2; | ||
197 | u64 lun; | ||
198 | u32 sense_data_list_length; | ||
199 | u32 reserved3; | ||
200 | u8 sense_data[20]; | ||
201 | }; | ||
202 | |||
203 | struct srp_aer_rsp { | ||
204 | u8 type; | ||
205 | u8 reserved1[7]; | ||
206 | u64 tag; | ||
207 | }; | ||
208 | |||
209 | union srp_iu { | ||
210 | struct srp_generic generic; | ||
211 | struct srp_login_req login_req; | ||
212 | struct srp_login_rsp login_rsp; | ||
213 | struct srp_login_rej login_rej; | ||
214 | struct srp_i_logout i_logout; | ||
215 | struct srp_t_logout t_logout; | ||
216 | struct srp_tsk_mgmt tsk_mgmt; | ||
217 | struct srp_cmd cmd; | ||
218 | struct srp_rsp rsp; | ||
219 | struct srp_cred_req cred_req; | ||
220 | struct srp_cred_rsp cred_rsp; | ||
221 | struct srp_aer_req aer_req; | ||
222 | struct srp_aer_rsp aer_rsp; | ||
223 | }; | ||
224 | |||
225 | #endif | ||
diff --git a/drivers/scsi/ibmvscsi/viosrp.h b/drivers/scsi/ibmvscsi/viosrp.h new file mode 100644 index 000000000000..6a6bba8a2f34 --- /dev/null +++ b/drivers/scsi/ibmvscsi/viosrp.h | |||
@@ -0,0 +1,126 @@ | |||
1 | /*****************************************************************************/ | ||
2 | /* srp.h -- SCSI RDMA Protocol definitions */ | ||
3 | /* */ | ||
4 | /* Written By: Colin Devilbis, IBM Corporation */ | ||
5 | /* */ | ||
6 | /* Copyright (C) 2003 IBM Corporation */ | ||
7 | /* */ | ||
8 | /* This program is free software; you can redistribute it and/or modify */ | ||
9 | /* it under the terms of the GNU General Public License as published by */ | ||
10 | /* the Free Software Foundation; either version 2 of the License, or */ | ||
11 | /* (at your option) any later version. */ | ||
12 | /* */ | ||
13 | /* This program is distributed in the hope that it will be useful, */ | ||
14 | /* but WITHOUT ANY WARRANTY; without even the implied warranty of */ | ||
15 | /* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the */ | ||
16 | /* GNU General Public License for more details. */ | ||
17 | /* */ | ||
18 | /* You should have received a copy of the GNU General Public License */ | ||
19 | /* along with this program; if not, write to the Free Software */ | ||
20 | /* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ | ||
21 | /* */ | ||
22 | /* */ | ||
23 | /* This file contains structures and definitions for IBM RPA (RS/6000 */ | ||
24 | /* platform architecture) implementation of the SRP (SCSI RDMA Protocol) */ | ||
25 | /* standard. SRP is used on IBM iSeries and pSeries platforms to send SCSI */ | ||
26 | /* commands between logical partitions. */ | ||
27 | /* */ | ||
28 | /* SRP Information Units (IUs) are sent on a "Command/Response Queue" (CRQ) */ | ||
29 | /* between partitions. The definitions in this file are architected, */ | ||
30 | /* and cannot be changed without breaking compatibility with other versions */ | ||
31 | /* of Linux and other operating systems (AIX, OS/400) that talk this protocol*/ | ||
32 | /* between logical partitions */ | ||
33 | /*****************************************************************************/ | ||
34 | #ifndef VIOSRP_H | ||
35 | #define VIOSRP_H | ||
36 | #include "srp.h" | ||
37 | |||
38 | enum viosrp_crq_formats { | ||
39 | VIOSRP_SRP_FORMAT = 0x01, | ||
40 | VIOSRP_MAD_FORMAT = 0x02, | ||
41 | VIOSRP_OS400_FORMAT = 0x03, | ||
42 | VIOSRP_AIX_FORMAT = 0x04, | ||
43 | VIOSRP_LINUX_FORMAT = 0x06, | ||
44 | VIOSRP_INLINE_FORMAT = 0x07 | ||
45 | }; | ||
46 | |||
47 | struct viosrp_crq { | ||
48 | u8 valid; /* used by RPA */ | ||
49 | u8 format; /* SCSI vs out-of-band */ | ||
50 | u8 reserved; | ||
51 | u8 status; /* non-scsi failure? (e.g. DMA failure) */ | ||
52 | u16 timeout; /* in seconds */ | ||
53 | u16 IU_length; /* in bytes */ | ||
54 | u64 IU_data_ptr; /* the TCE for transferring data */ | ||
55 | }; | ||
56 | |||
57 | /* MADs are Management requests above and beyond the IUs defined in the SRP | ||
58 | * standard. | ||
59 | */ | ||
60 | enum viosrp_mad_types { | ||
61 | VIOSRP_EMPTY_IU_TYPE = 0x01, | ||
62 | VIOSRP_ERROR_LOG_TYPE = 0x02, | ||
63 | VIOSRP_ADAPTER_INFO_TYPE = 0x03, | ||
64 | VIOSRP_HOST_CONFIG_TYPE = 0x04 | ||
65 | }; | ||
66 | |||
67 | /* | ||
68 | * Common MAD header | ||
69 | */ | ||
70 | struct mad_common { | ||
71 | u32 type; | ||
72 | u16 status; | ||
73 | u16 length; | ||
74 | u64 tag; | ||
75 | }; | ||
76 | |||
77 | /* | ||
78 | * All SRP (and MAD) requests normally flow from the | ||
79 | * client to the server. There is no way for the server to send | ||
80 | * an asynchronous message back to the client. The Empty IU is used | ||
81 | * to hang out a meaningless request to the server so that it can respond | ||
82 | * asynchrouously with something like a SCSI AER | ||
83 | */ | ||
84 | struct viosrp_empty_iu { | ||
85 | struct mad_common common; | ||
86 | u64 buffer; | ||
87 | u32 port; | ||
88 | }; | ||
89 | |||
90 | struct viosrp_error_log { | ||
91 | struct mad_common common; | ||
92 | u64 buffer; | ||
93 | }; | ||
94 | |||
95 | struct viosrp_adapter_info { | ||
96 | struct mad_common common; | ||
97 | u64 buffer; | ||
98 | }; | ||
99 | |||
100 | struct viosrp_host_config { | ||
101 | struct mad_common common; | ||
102 | u64 buffer; | ||
103 | }; | ||
104 | |||
105 | union mad_iu { | ||
106 | struct viosrp_empty_iu empty_iu; | ||
107 | struct viosrp_error_log error_log; | ||
108 | struct viosrp_adapter_info adapter_info; | ||
109 | struct viosrp_host_config host_config; | ||
110 | }; | ||
111 | |||
112 | union viosrp_iu { | ||
113 | union srp_iu srp; | ||
114 | union mad_iu mad; | ||
115 | }; | ||
116 | |||
117 | struct mad_adapter_info_data { | ||
118 | char srp_version[8]; | ||
119 | char partition_name[96]; | ||
120 | u32 partition_number; | ||
121 | u32 mad_version; | ||
122 | u32 os_type; | ||
123 | u32 port_max_txu[8]; /* per-port maximum transfer */ | ||
124 | }; | ||
125 | |||
126 | #endif | ||