aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@lst.de>2014-04-15 06:26:50 -0400
committerChristoph Hellwig <hch@lst.de>2014-07-17 16:07:43 -0400
commit074dc37a7b7b008ce1382ed4eec3f0008e541ba7 (patch)
tree49765f24b16311d596c703a6b70d4211bf383b22
parent91921e016a2199e7afe5933c94bd9f723d946598 (diff)
ibmvstgt: remove
The IBM virtual SCSI protocol has been obsoleted by ibmvfc, and there are no reported of the driver left. Signed-off-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Paolo Bonzini <pbonzini@redhat.com> Reviewed-by: Hannes Reinecke <hare@suse.de>
-rw-r--r--drivers/scsi/Kconfig14
-rw-r--r--drivers/scsi/Makefile1
-rw-r--r--drivers/scsi/ibmvscsi/Makefile1
-rw-r--r--drivers/scsi/ibmvscsi/ibmvstgt.c1001
4 files changed, 0 insertions, 1017 deletions
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index cfc6f39ce978..10b3cc885d9c 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -834,20 +834,6 @@ config SCSI_IBMVSCSI
834 To compile this driver as a module, choose M here: the 834 To compile this driver as a module, choose M here: the
835 module will be called ibmvscsi. 835 module will be called ibmvscsi.
836 836
837config SCSI_IBMVSCSIS
838 tristate "IBM Virtual SCSI Server support"
839 depends on PPC_PSERIES && SCSI_SRP && SCSI_SRP_TGT_ATTRS
840 help
841 This is the SRP target driver for IBM pSeries virtual environments.
842
843 The userspace component needed to initialize the driver and
844 documentation can be found:
845
846 http://stgt.berlios.de/
847
848 To compile this driver as a module, choose M here: the
849 module will be called ibmvstgt.
850
851config SCSI_IBMVFC 837config SCSI_IBMVFC
852 tristate "IBM Virtual FC support" 838 tristate "IBM Virtual FC support"
853 depends on PPC_PSERIES && SCSI 839 depends on PPC_PSERIES && SCSI
diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile
index e172d4f8e02f..aacad2f077ea 100644
--- a/drivers/scsi/Makefile
+++ b/drivers/scsi/Makefile
@@ -129,7 +129,6 @@ obj-$(CONFIG_SCSI_NSP32) += nsp32.o
129obj-$(CONFIG_SCSI_IPR) += ipr.o 129obj-$(CONFIG_SCSI_IPR) += ipr.o
130obj-$(CONFIG_SCSI_SRP) += libsrp.o 130obj-$(CONFIG_SCSI_SRP) += libsrp.o
131obj-$(CONFIG_SCSI_IBMVSCSI) += ibmvscsi/ 131obj-$(CONFIG_SCSI_IBMVSCSI) += ibmvscsi/
132obj-$(CONFIG_SCSI_IBMVSCSIS) += ibmvscsi/
133obj-$(CONFIG_SCSI_IBMVFC) += ibmvscsi/ 132obj-$(CONFIG_SCSI_IBMVFC) += ibmvscsi/
134obj-$(CONFIG_SCSI_HPTIOP) += hptiop.o 133obj-$(CONFIG_SCSI_HPTIOP) += hptiop.o
135obj-$(CONFIG_SCSI_STEX) += stex.o 134obj-$(CONFIG_SCSI_STEX) += stex.o
diff --git a/drivers/scsi/ibmvscsi/Makefile b/drivers/scsi/ibmvscsi/Makefile
index cb150d1e5850..3840c64f2966 100644
--- a/drivers/scsi/ibmvscsi/Makefile
+++ b/drivers/scsi/ibmvscsi/Makefile
@@ -1,3 +1,2 @@
1obj-$(CONFIG_SCSI_IBMVSCSI) += ibmvscsi.o 1obj-$(CONFIG_SCSI_IBMVSCSI) += ibmvscsi.o
2obj-$(CONFIG_SCSI_IBMVSCSIS) += ibmvstgt.o
3obj-$(CONFIG_SCSI_IBMVFC) += ibmvfc.o 2obj-$(CONFIG_SCSI_IBMVFC) += ibmvfc.o
diff --git a/drivers/scsi/ibmvscsi/ibmvstgt.c b/drivers/scsi/ibmvscsi/ibmvstgt.c
deleted file mode 100644
index 56f8a861ed72..000000000000
--- a/drivers/scsi/ibmvscsi/ibmvstgt.c
+++ /dev/null
@@ -1,1001 +0,0 @@
1/*
2 * IBM eServer i/pSeries Virtual SCSI Target Driver
3 * Copyright (C) 2003-2005 Dave Boutcher (boutcher@us.ibm.com) IBM Corp.
4 * Santiago Leon (santil@us.ibm.com) IBM Corp.
5 * Linda Xie (lxie@us.ibm.com) IBM Corp.
6 *
7 * Copyright (C) 2005-2006 FUJITA Tomonori <tomof@acm.org>
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
22 * USA
23 */
24#include <linux/interrupt.h>
25#include <linux/module.h>
26#include <linux/slab.h>
27#include <scsi/scsi.h>
28#include <scsi/scsi_host.h>
29#include <scsi/scsi_transport_srp.h>
30#include <scsi/scsi_tgt.h>
31#include <scsi/libsrp.h>
32#include <asm/hvcall.h>
33#include <asm/iommu.h>
34#include <asm/prom.h>
35#include <asm/vio.h>
36
37#include "ibmvscsi.h"
38
39#define INITIAL_SRP_LIMIT 16
40#define DEFAULT_MAX_SECTORS 256
41
42#define TGT_NAME "ibmvstgt"
43
44/*
45 * Hypervisor calls.
46 */
47#define h_copy_rdma(l, sa, sb, da, db) \
48 plpar_hcall_norets(H_COPY_RDMA, l, sa, sb, da, db)
49#define h_send_crq(ua, l, h) \
50 plpar_hcall_norets(H_SEND_CRQ, ua, l, h)
51#define h_reg_crq(ua, tok, sz)\
52 plpar_hcall_norets(H_REG_CRQ, ua, tok, sz);
53#define h_free_crq(ua) \
54 plpar_hcall_norets(H_FREE_CRQ, ua);
55
56/* tmp - will replace with SCSI logging stuff */
57#define eprintk(fmt, args...) \
58do { \
59 printk("%s(%d) " fmt, __func__, __LINE__, ##args); \
60} while (0)
61/* #define dprintk eprintk */
62#define dprintk(fmt, args...)
63
64struct vio_port {
65 struct vio_dev *dma_dev;
66
67 struct crq_queue crq_queue;
68 struct work_struct crq_work;
69
70 unsigned long liobn;
71 unsigned long riobn;
72 struct srp_target *target;
73
74 struct srp_rport *rport;
75};
76
77static struct workqueue_struct *vtgtd;
78static struct scsi_transport_template *ibmvstgt_transport_template;
79
80/*
81 * These are fixed for the system and come from the Open Firmware device tree.
82 * We just store them here to save getting them every time.
83 */
84static char system_id[64] = "";
85static char partition_name[97] = "UNKNOWN";
86static unsigned int partition_number = -1;
87
88static struct vio_port *target_to_port(struct srp_target *target)
89{
90 return (struct vio_port *) target->ldata;
91}
92
93static inline union viosrp_iu *vio_iu(struct iu_entry *iue)
94{
95 return (union viosrp_iu *) (iue->sbuf->buf);
96}
97
98static int send_iu(struct iu_entry *iue, uint64_t length, uint8_t format)
99{
100 struct srp_target *target = iue->target;
101 struct vio_port *vport = target_to_port(target);
102 long rc, rc1;
103 union {
104 struct viosrp_crq cooked;
105 uint64_t raw[2];
106 } crq;
107
108 /* First copy the SRP */
109 rc = h_copy_rdma(length, vport->liobn, iue->sbuf->dma,
110 vport->riobn, iue->remote_token);
111
112 if (rc)
113 eprintk("Error %ld transferring data\n", rc);
114
115 crq.cooked.valid = 0x80;
116 crq.cooked.format = format;
117 crq.cooked.reserved = 0x00;
118 crq.cooked.timeout = 0x00;
119 crq.cooked.IU_length = length;
120 crq.cooked.IU_data_ptr = vio_iu(iue)->srp.rsp.tag;
121
122 if (rc == 0)
123 crq.cooked.status = 0x99; /* Just needs to be non-zero */
124 else
125 crq.cooked.status = 0x00;
126
127 rc1 = h_send_crq(vport->dma_dev->unit_address, crq.raw[0], crq.raw[1]);
128
129 if (rc1) {
130 eprintk("%ld sending response\n", rc1);
131 return rc1;
132 }
133
134 return rc;
135}
136
137#define SRP_RSP_SENSE_DATA_LEN 18
138
139static int send_rsp(struct iu_entry *iue, struct scsi_cmnd *sc,
140 unsigned char status, unsigned char asc)
141{
142 union viosrp_iu *iu = vio_iu(iue);
143 uint64_t tag = iu->srp.rsp.tag;
144
145 /* If the linked bit is on and status is good */
146 if (test_bit(V_LINKED, &iue->flags) && (status == NO_SENSE))
147 status = 0x10;
148
149 memset(iu, 0, sizeof(struct srp_rsp));
150 iu->srp.rsp.opcode = SRP_RSP;
151 iu->srp.rsp.req_lim_delta = 1;
152 iu->srp.rsp.tag = tag;
153
154 if (test_bit(V_DIOVER, &iue->flags))
155 iu->srp.rsp.flags |= SRP_RSP_FLAG_DIOVER;
156
157 iu->srp.rsp.data_in_res_cnt = 0;
158 iu->srp.rsp.data_out_res_cnt = 0;
159
160 iu->srp.rsp.flags &= ~SRP_RSP_FLAG_RSPVALID;
161
162 iu->srp.rsp.resp_data_len = 0;
163 iu->srp.rsp.status = status;
164 if (status) {
165 uint8_t *sense = iu->srp.rsp.data;
166
167 if (sc) {
168 iu->srp.rsp.flags |= SRP_RSP_FLAG_SNSVALID;
169 iu->srp.rsp.sense_data_len = SCSI_SENSE_BUFFERSIZE;
170 memcpy(sense, sc->sense_buffer, SCSI_SENSE_BUFFERSIZE);
171 } else {
172 iu->srp.rsp.status = SAM_STAT_CHECK_CONDITION;
173 iu->srp.rsp.flags |= SRP_RSP_FLAG_SNSVALID;
174 iu->srp.rsp.sense_data_len = SRP_RSP_SENSE_DATA_LEN;
175
176 /* Valid bit and 'current errors' */
177 sense[0] = (0x1 << 7 | 0x70);
178 /* Sense key */
179 sense[2] = status;
180 /* Additional sense length */
181 sense[7] = 0xa; /* 10 bytes */
182 /* Additional sense code */
183 sense[12] = asc;
184 }
185 }
186
187 send_iu(iue, sizeof(iu->srp.rsp) + SRP_RSP_SENSE_DATA_LEN,
188 VIOSRP_SRP_FORMAT);
189
190 return 0;
191}
192
193static void handle_cmd_queue(struct srp_target *target)
194{
195 struct Scsi_Host *shost = target->shost;
196 struct srp_rport *rport = target_to_port(target)->rport;
197 struct iu_entry *iue;
198 struct srp_cmd *cmd;
199 unsigned long flags;
200 int err;
201
202retry:
203 spin_lock_irqsave(&target->lock, flags);
204
205 list_for_each_entry(iue, &target->cmd_queue, ilist) {
206 if (!test_and_set_bit(V_FLYING, &iue->flags)) {
207 spin_unlock_irqrestore(&target->lock, flags);
208 cmd = iue->sbuf->buf;
209 err = srp_cmd_queue(shost, cmd, iue,
210 (unsigned long)rport, 0);
211 if (err) {
212 eprintk("cannot queue cmd %p %d\n", cmd, err);
213 srp_iu_put(iue);
214 }
215 goto retry;
216 }
217 }
218
219 spin_unlock_irqrestore(&target->lock, flags);
220}
221
222static int ibmvstgt_rdma(struct scsi_cmnd *sc, struct scatterlist *sg, int nsg,
223 struct srp_direct_buf *md, int nmd,
224 enum dma_data_direction dir, unsigned int rest)
225{
226 struct iu_entry *iue = (struct iu_entry *) sc->SCp.ptr;
227 struct srp_target *target = iue->target;
228 struct vio_port *vport = target_to_port(target);
229 dma_addr_t token;
230 long err;
231 unsigned int done = 0;
232 int i, sidx, soff;
233
234 sidx = soff = 0;
235 token = sg_dma_address(sg + sidx);
236
237 for (i = 0; i < nmd && rest; i++) {
238 unsigned int mdone, mlen;
239
240 mlen = min(rest, md[i].len);
241 for (mdone = 0; mlen;) {
242 int slen = min(sg_dma_len(sg + sidx) - soff, mlen);
243
244 if (dir == DMA_TO_DEVICE)
245 err = h_copy_rdma(slen,
246 vport->riobn,
247 md[i].va + mdone,
248 vport->liobn,
249 token + soff);
250 else
251 err = h_copy_rdma(slen,
252 vport->liobn,
253 token + soff,
254 vport->riobn,
255 md[i].va + mdone);
256
257 if (err != H_SUCCESS) {
258 eprintk("rdma error %d %d %ld\n", dir, slen, err);
259 return -EIO;
260 }
261
262 mlen -= slen;
263 mdone += slen;
264 soff += slen;
265 done += slen;
266
267 if (soff == sg_dma_len(sg + sidx)) {
268 sidx++;
269 soff = 0;
270 token = sg_dma_address(sg + sidx);
271
272 if (sidx > nsg) {
273 eprintk("out of sg %p %d %d\n",
274 iue, sidx, nsg);
275 return -EIO;
276 }
277 }
278 };
279
280 rest -= mlen;
281 }
282 return 0;
283}
284
285static int ibmvstgt_cmd_done(struct scsi_cmnd *sc,
286 void (*done)(struct scsi_cmnd *))
287{
288 unsigned long flags;
289 struct iu_entry *iue = (struct iu_entry *) sc->SCp.ptr;
290 struct srp_target *target = iue->target;
291 int err = 0;
292
293 dprintk("%p %p %x %u\n", iue, target, vio_iu(iue)->srp.cmd.cdb[0],
294 scsi_sg_count(sc));
295
296 if (scsi_sg_count(sc))
297 err = srp_transfer_data(sc, &vio_iu(iue)->srp.cmd, ibmvstgt_rdma, 1, 1);
298
299 spin_lock_irqsave(&target->lock, flags);
300 list_del(&iue->ilist);
301 spin_unlock_irqrestore(&target->lock, flags);
302
303 if (err|| sc->result != SAM_STAT_GOOD) {
304 eprintk("operation failed %p %d %x\n",
305 iue, sc->result, vio_iu(iue)->srp.cmd.cdb[0]);
306 send_rsp(iue, sc, HARDWARE_ERROR, 0x00);
307 } else
308 send_rsp(iue, sc, NO_SENSE, 0x00);
309
310 done(sc);
311 srp_iu_put(iue);
312 return 0;
313}
314
315int send_adapter_info(struct iu_entry *iue,
316 dma_addr_t remote_buffer, uint16_t length)
317{
318 struct srp_target *target = iue->target;
319 struct vio_port *vport = target_to_port(target);
320 struct Scsi_Host *shost = target->shost;
321 dma_addr_t data_token;
322 struct mad_adapter_info_data *info;
323 int err;
324
325 info = dma_alloc_coherent(target->dev, sizeof(*info), &data_token,
326 GFP_KERNEL);
327 if (!info) {
328 eprintk("bad dma_alloc_coherent %p\n", target);
329 return 1;
330 }
331
332 /* Get remote info */
333 err = h_copy_rdma(sizeof(*info), vport->riobn, remote_buffer,
334 vport->liobn, data_token);
335 if (err == H_SUCCESS) {
336 dprintk("Client connect: %s (%d)\n",
337 info->partition_name, info->partition_number);
338 }
339
340 memset(info, 0, sizeof(*info));
341
342 strcpy(info->srp_version, "16.a");
343 strncpy(info->partition_name, partition_name,
344 sizeof(info->partition_name));
345 info->partition_number = partition_number;
346 info->mad_version = 1;
347 info->os_type = 2;
348 info->port_max_txu[0] = shost->hostt->max_sectors << 9;
349
350 /* Send our info to remote */
351 err = h_copy_rdma(sizeof(*info), vport->liobn, data_token,
352 vport->riobn, remote_buffer);
353
354 dma_free_coherent(target->dev, sizeof(*info), info, data_token);
355
356 if (err != H_SUCCESS) {
357 eprintk("Error sending adapter info %d\n", err);
358 return 1;
359 }
360
361 return 0;
362}
363
364static void process_login(struct iu_entry *iue)
365{
366 union viosrp_iu *iu = vio_iu(iue);
367 struct srp_login_rsp *rsp = &iu->srp.login_rsp;
368 uint64_t tag = iu->srp.rsp.tag;
369 struct Scsi_Host *shost = iue->target->shost;
370 struct srp_target *target = host_to_srp_target(shost);
371 struct vio_port *vport = target_to_port(target);
372 struct srp_rport_identifiers ids;
373
374 memset(&ids, 0, sizeof(ids));
375 sprintf(ids.port_id, "%x", vport->dma_dev->unit_address);
376 ids.roles = SRP_RPORT_ROLE_INITIATOR;
377 if (!vport->rport)
378 vport->rport = srp_rport_add(shost, &ids);
379
380 /* TODO handle case that requested size is wrong and
381 * buffer format is wrong
382 */
383 memset(iu, 0, sizeof(struct srp_login_rsp));
384 rsp->opcode = SRP_LOGIN_RSP;
385 rsp->req_lim_delta = INITIAL_SRP_LIMIT;
386 rsp->tag = tag;
387 rsp->max_it_iu_len = sizeof(union srp_iu);
388 rsp->max_ti_iu_len = sizeof(union srp_iu);
389 /* direct and indirect */
390 rsp->buf_fmt = SRP_BUF_FORMAT_DIRECT | SRP_BUF_FORMAT_INDIRECT;
391
392 send_iu(iue, sizeof(*rsp), VIOSRP_SRP_FORMAT);
393}
394
395static inline void queue_cmd(struct iu_entry *iue)
396{
397 struct srp_target *target = iue->target;
398 unsigned long flags;
399
400 spin_lock_irqsave(&target->lock, flags);
401 list_add_tail(&iue->ilist, &target->cmd_queue);
402 spin_unlock_irqrestore(&target->lock, flags);
403}
404
405static int process_tsk_mgmt(struct iu_entry *iue)
406{
407 union viosrp_iu *iu = vio_iu(iue);
408 int fn;
409
410 dprintk("%p %u\n", iue, iu->srp.tsk_mgmt.tsk_mgmt_func);
411
412 switch (iu->srp.tsk_mgmt.tsk_mgmt_func) {
413 case SRP_TSK_ABORT_TASK:
414 fn = ABORT_TASK;
415 break;
416 case SRP_TSK_ABORT_TASK_SET:
417 fn = ABORT_TASK_SET;
418 break;
419 case SRP_TSK_CLEAR_TASK_SET:
420 fn = CLEAR_TASK_SET;
421 break;
422 case SRP_TSK_LUN_RESET:
423 fn = LOGICAL_UNIT_RESET;
424 break;
425 case SRP_TSK_CLEAR_ACA:
426 fn = CLEAR_ACA;
427 break;
428 default:
429 fn = 0;
430 }
431 if (fn)
432 scsi_tgt_tsk_mgmt_request(iue->target->shost,
433 (unsigned long)iue->target->shost,
434 fn,
435 iu->srp.tsk_mgmt.task_tag,
436 (struct scsi_lun *) &iu->srp.tsk_mgmt.lun,
437 iue);
438 else
439 send_rsp(iue, NULL, ILLEGAL_REQUEST, 0x20);
440
441 return !fn;
442}
443
444static int process_mad_iu(struct iu_entry *iue)
445{
446 union viosrp_iu *iu = vio_iu(iue);
447 struct viosrp_adapter_info *info;
448 struct viosrp_host_config *conf;
449
450 switch (iu->mad.empty_iu.common.type) {
451 case VIOSRP_EMPTY_IU_TYPE:
452 eprintk("%s\n", "Unsupported EMPTY MAD IU");
453 break;
454 case VIOSRP_ERROR_LOG_TYPE:
455 eprintk("%s\n", "Unsupported ERROR LOG MAD IU");
456 iu->mad.error_log.common.status = 1;
457 send_iu(iue, sizeof(iu->mad.error_log), VIOSRP_MAD_FORMAT);
458 break;
459 case VIOSRP_ADAPTER_INFO_TYPE:
460 info = &iu->mad.adapter_info;
461 info->common.status = send_adapter_info(iue, info->buffer,
462 info->common.length);
463 send_iu(iue, sizeof(*info), VIOSRP_MAD_FORMAT);
464 break;
465 case VIOSRP_HOST_CONFIG_TYPE:
466 conf = &iu->mad.host_config;
467 conf->common.status = 1;
468 send_iu(iue, sizeof(*conf), VIOSRP_MAD_FORMAT);
469 break;
470 default:
471 eprintk("Unknown type %u\n", iu->srp.rsp.opcode);
472 }
473
474 return 1;
475}
476
477static int process_srp_iu(struct iu_entry *iue)
478{
479 union viosrp_iu *iu = vio_iu(iue);
480 int done = 1;
481 u8 opcode = iu->srp.rsp.opcode;
482
483 switch (opcode) {
484 case SRP_LOGIN_REQ:
485 process_login(iue);
486 break;
487 case SRP_TSK_MGMT:
488 done = process_tsk_mgmt(iue);
489 break;
490 case SRP_CMD:
491 queue_cmd(iue);
492 done = 0;
493 break;
494 case SRP_LOGIN_RSP:
495 case SRP_I_LOGOUT:
496 case SRP_T_LOGOUT:
497 case SRP_RSP:
498 case SRP_CRED_REQ:
499 case SRP_CRED_RSP:
500 case SRP_AER_REQ:
501 case SRP_AER_RSP:
502 eprintk("Unsupported type %u\n", opcode);
503 break;
504 default:
505 eprintk("Unknown type %u\n", opcode);
506 }
507
508 return done;
509}
510
511static void process_iu(struct viosrp_crq *crq, struct srp_target *target)
512{
513 struct vio_port *vport = target_to_port(target);
514 struct iu_entry *iue;
515 long err;
516 int done = 1;
517
518 iue = srp_iu_get(target);
519 if (!iue) {
520 eprintk("Error getting IU from pool, %p\n", target);
521 return;
522 }
523
524 iue->remote_token = crq->IU_data_ptr;
525
526 err = h_copy_rdma(crq->IU_length, vport->riobn,
527 iue->remote_token, vport->liobn, iue->sbuf->dma);
528
529 if (err != H_SUCCESS) {
530 eprintk("%ld transferring data error %p\n", err, iue);
531 goto out;
532 }
533
534 if (crq->format == VIOSRP_MAD_FORMAT)
535 done = process_mad_iu(iue);
536 else
537 done = process_srp_iu(iue);
538out:
539 if (done)
540 srp_iu_put(iue);
541}
542
543static irqreturn_t ibmvstgt_interrupt(int dummy, void *data)
544{
545 struct srp_target *target = data;
546 struct vio_port *vport = target_to_port(target);
547
548 vio_disable_interrupts(vport->dma_dev);
549 queue_work(vtgtd, &vport->crq_work);
550
551 return IRQ_HANDLED;
552}
553
554static int crq_queue_create(struct crq_queue *queue, struct srp_target *target)
555{
556 int err;
557 struct vio_port *vport = target_to_port(target);
558
559 queue->msgs = (struct viosrp_crq *) get_zeroed_page(GFP_KERNEL);
560 if (!queue->msgs)
561 goto malloc_failed;
562 queue->size = PAGE_SIZE / sizeof(*queue->msgs);
563
564 queue->msg_token = dma_map_single(target->dev, queue->msgs,
565 queue->size * sizeof(*queue->msgs),
566 DMA_BIDIRECTIONAL);
567
568 if (dma_mapping_error(target->dev, queue->msg_token))
569 goto map_failed;
570
571 err = h_reg_crq(vport->dma_dev->unit_address, queue->msg_token,
572 PAGE_SIZE);
573
574 /* If the adapter was left active for some reason (like kexec)
575 * try freeing and re-registering
576 */
577 if (err == H_RESOURCE) {
578 do {
579 err = h_free_crq(vport->dma_dev->unit_address);
580 } while (err == H_BUSY || H_IS_LONG_BUSY(err));
581
582 err = h_reg_crq(vport->dma_dev->unit_address, queue->msg_token,
583 PAGE_SIZE);
584 }
585
586 if (err != H_SUCCESS && err != 2) {
587 eprintk("Error 0x%x opening virtual adapter\n", err);
588 goto reg_crq_failed;
589 }
590
591 err = request_irq(vport->dma_dev->irq, &ibmvstgt_interrupt,
592 0, "ibmvstgt", target);
593 if (err)
594 goto req_irq_failed;
595
596 vio_enable_interrupts(vport->dma_dev);
597
598 h_send_crq(vport->dma_dev->unit_address, 0xC001000000000000, 0);
599
600 queue->cur = 0;
601 spin_lock_init(&queue->lock);
602
603 return 0;
604
605req_irq_failed:
606 do {
607 err = h_free_crq(vport->dma_dev->unit_address);
608 } while (err == H_BUSY || H_IS_LONG_BUSY(err));
609
610reg_crq_failed:
611 dma_unmap_single(target->dev, queue->msg_token,
612 queue->size * sizeof(*queue->msgs), DMA_BIDIRECTIONAL);
613map_failed:
614 free_page((unsigned long) queue->msgs);
615
616malloc_failed:
617 return -ENOMEM;
618}
619
620static void crq_queue_destroy(struct srp_target *target)
621{
622 struct vio_port *vport = target_to_port(target);
623 struct crq_queue *queue = &vport->crq_queue;
624 int err;
625
626 free_irq(vport->dma_dev->irq, target);
627 do {
628 err = h_free_crq(vport->dma_dev->unit_address);
629 } while (err == H_BUSY || H_IS_LONG_BUSY(err));
630
631 dma_unmap_single(target->dev, queue->msg_token,
632 queue->size * sizeof(*queue->msgs), DMA_BIDIRECTIONAL);
633
634 free_page((unsigned long) queue->msgs);
635}
636
637static void process_crq(struct viosrp_crq *crq, struct srp_target *target)
638{
639 struct vio_port *vport = target_to_port(target);
640 dprintk("%x %x\n", crq->valid, crq->format);
641
642 switch (crq->valid) {
643 case 0xC0:
644 /* initialization */
645 switch (crq->format) {
646 case 0x01:
647 h_send_crq(vport->dma_dev->unit_address,
648 0xC002000000000000, 0);
649 break;
650 case 0x02:
651 break;
652 default:
653 eprintk("Unknown format %u\n", crq->format);
654 }
655 break;
656 case 0xFF:
657 /* transport event */
658 break;
659 case 0x80:
660 /* real payload */
661 switch (crq->format) {
662 case VIOSRP_SRP_FORMAT:
663 case VIOSRP_MAD_FORMAT:
664 process_iu(crq, target);
665 break;
666 case VIOSRP_OS400_FORMAT:
667 case VIOSRP_AIX_FORMAT:
668 case VIOSRP_LINUX_FORMAT:
669 case VIOSRP_INLINE_FORMAT:
670 eprintk("Unsupported format %u\n", crq->format);
671 break;
672 default:
673 eprintk("Unknown format %u\n", crq->format);
674 }
675 break;
676 default:
677 eprintk("unknown message type 0x%02x!?\n", crq->valid);
678 }
679}
680
681static inline struct viosrp_crq *next_crq(struct crq_queue *queue)
682{
683 struct viosrp_crq *crq;
684 unsigned long flags;
685
686 spin_lock_irqsave(&queue->lock, flags);
687 crq = &queue->msgs[queue->cur];
688 if (crq->valid & 0x80) {
689 if (++queue->cur == queue->size)
690 queue->cur = 0;
691 } else
692 crq = NULL;
693 spin_unlock_irqrestore(&queue->lock, flags);
694
695 return crq;
696}
697
698static void handle_crq(struct work_struct *work)
699{
700 struct vio_port *vport = container_of(work, struct vio_port, crq_work);
701 struct srp_target *target = vport->target;
702 struct viosrp_crq *crq;
703 int done = 0;
704
705 while (!done) {
706 while ((crq = next_crq(&vport->crq_queue)) != NULL) {
707 process_crq(crq, target);
708 crq->valid = 0x00;
709 }
710
711 vio_enable_interrupts(vport->dma_dev);
712
713 crq = next_crq(&vport->crq_queue);
714 if (crq) {
715 vio_disable_interrupts(vport->dma_dev);
716 process_crq(crq, target);
717 crq->valid = 0x00;
718 } else
719 done = 1;
720 }
721
722 handle_cmd_queue(target);
723}
724
725
726static int ibmvstgt_eh_abort_handler(struct scsi_cmnd *sc)
727{
728 unsigned long flags;
729 struct iu_entry *iue = (struct iu_entry *) sc->SCp.ptr;
730 struct srp_target *target = iue->target;
731
732 dprintk("%p %p %x\n", iue, target, vio_iu(iue)->srp.cmd.cdb[0]);
733
734 spin_lock_irqsave(&target->lock, flags);
735 list_del(&iue->ilist);
736 spin_unlock_irqrestore(&target->lock, flags);
737
738 srp_iu_put(iue);
739
740 return 0;
741}
742
743static int ibmvstgt_tsk_mgmt_response(struct Scsi_Host *shost,
744 u64 itn_id, u64 mid, int result)
745{
746 struct iu_entry *iue = (struct iu_entry *) ((void *) mid);
747 union viosrp_iu *iu = vio_iu(iue);
748 unsigned char status, asc;
749
750 eprintk("%p %d\n", iue, result);
751 status = NO_SENSE;
752 asc = 0;
753
754 switch (iu->srp.tsk_mgmt.tsk_mgmt_func) {
755 case SRP_TSK_ABORT_TASK:
756 asc = 0x14;
757 if (result)
758 status = ABORTED_COMMAND;
759 break;
760 default:
761 break;
762 }
763
764 send_rsp(iue, NULL, status, asc);
765 srp_iu_put(iue);
766
767 return 0;
768}
769
770static int ibmvstgt_it_nexus_response(struct Scsi_Host *shost, u64 itn_id,
771 int result)
772{
773 struct srp_target *target = host_to_srp_target(shost);
774 struct vio_port *vport = target_to_port(target);
775
776 if (result) {
777 eprintk("%p %d\n", shost, result);
778 srp_rport_del(vport->rport);
779 vport->rport = NULL;
780 }
781 return 0;
782}
783
784static ssize_t system_id_show(struct device *dev,
785 struct device_attribute *attr, char *buf)
786{
787 return snprintf(buf, PAGE_SIZE, "%s\n", system_id);
788}
789
790static ssize_t partition_number_show(struct device *dev,
791 struct device_attribute *attr, char *buf)
792{
793 return snprintf(buf, PAGE_SIZE, "%x\n", partition_number);
794}
795
796static ssize_t unit_address_show(struct device *dev,
797 struct device_attribute *attr, char *buf)
798{
799 struct Scsi_Host *shost = class_to_shost(dev);
800 struct srp_target *target = host_to_srp_target(shost);
801 struct vio_port *vport = target_to_port(target);
802 return snprintf(buf, PAGE_SIZE, "%x\n", vport->dma_dev->unit_address);
803}
804
805static DEVICE_ATTR(system_id, S_IRUGO, system_id_show, NULL);
806static DEVICE_ATTR(partition_number, S_IRUGO, partition_number_show, NULL);
807static DEVICE_ATTR(unit_address, S_IRUGO, unit_address_show, NULL);
808
809static struct device_attribute *ibmvstgt_attrs[] = {
810 &dev_attr_system_id,
811 &dev_attr_partition_number,
812 &dev_attr_unit_address,
813 NULL,
814};
815
816static struct scsi_host_template ibmvstgt_sht = {
817 .name = TGT_NAME,
818 .module = THIS_MODULE,
819 .can_queue = INITIAL_SRP_LIMIT,
820 .sg_tablesize = SG_ALL,
821 .use_clustering = DISABLE_CLUSTERING,
822 .max_sectors = DEFAULT_MAX_SECTORS,
823 .transfer_response = ibmvstgt_cmd_done,
824 .eh_abort_handler = ibmvstgt_eh_abort_handler,
825 .shost_attrs = ibmvstgt_attrs,
826 .proc_name = TGT_NAME,
827 .supported_mode = MODE_TARGET,
828};
829
830static int ibmvstgt_probe(struct vio_dev *dev, const struct vio_device_id *id)
831{
832 struct Scsi_Host *shost;
833 struct srp_target *target;
834 struct vio_port *vport;
835 unsigned int *dma, dma_size;
836 int err = -ENOMEM;
837
838 vport = kzalloc(sizeof(struct vio_port), GFP_KERNEL);
839 if (!vport)
840 return err;
841 shost = scsi_host_alloc(&ibmvstgt_sht, sizeof(struct srp_target));
842 if (!shost)
843 goto free_vport;
844 shost->transportt = ibmvstgt_transport_template;
845
846 target = host_to_srp_target(shost);
847 target->shost = shost;
848 vport->dma_dev = dev;
849 target->ldata = vport;
850 vport->target = target;
851 err = srp_target_alloc(target, &dev->dev, INITIAL_SRP_LIMIT,
852 SRP_MAX_IU_LEN);
853 if (err)
854 goto put_host;
855
856 dma = (unsigned int *) vio_get_attribute(dev, "ibm,my-dma-window",
857 &dma_size);
858 if (!dma || dma_size != 40) {
859 eprintk("Couldn't get window property %d\n", dma_size);
860 err = -EIO;
861 goto free_srp_target;
862 }
863 vport->liobn = dma[0];
864 vport->riobn = dma[5];
865
866 INIT_WORK(&vport->crq_work, handle_crq);
867
868 err = scsi_add_host(shost, target->dev);
869 if (err)
870 goto free_srp_target;
871
872 err = scsi_tgt_alloc_queue(shost);
873 if (err)
874 goto remove_host;
875
876 err = crq_queue_create(&vport->crq_queue, target);
877 if (err)
878 goto free_queue;
879
880 return 0;
881free_queue:
882 scsi_tgt_free_queue(shost);
883remove_host:
884 scsi_remove_host(shost);
885free_srp_target:
886 srp_target_free(target);
887put_host:
888 scsi_host_put(shost);
889free_vport:
890 kfree(vport);
891 return err;
892}
893
894static int ibmvstgt_remove(struct vio_dev *dev)
895{
896 struct srp_target *target = dev_get_drvdata(&dev->dev);
897 struct Scsi_Host *shost = target->shost;
898 struct vio_port *vport = target->ldata;
899
900 crq_queue_destroy(target);
901 srp_remove_host(shost);
902 scsi_remove_host(shost);
903 scsi_tgt_free_queue(shost);
904 srp_target_free(target);
905 kfree(vport);
906 scsi_host_put(shost);
907 return 0;
908}
909
910static struct vio_device_id ibmvstgt_device_table[] = {
911 {"v-scsi-host", "IBM,v-scsi-host"},
912 {"",""}
913};
914
915MODULE_DEVICE_TABLE(vio, ibmvstgt_device_table);
916
917static struct vio_driver ibmvstgt_driver = {
918 .id_table = ibmvstgt_device_table,
919 .probe = ibmvstgt_probe,
920 .remove = ibmvstgt_remove,
921 .name = "ibmvscsis",
922};
923
924static int get_system_info(void)
925{
926 struct device_node *rootdn;
927 const char *id, *model, *name;
928 const unsigned int *num;
929
930 rootdn = of_find_node_by_path("/");
931 if (!rootdn)
932 return -ENOENT;
933
934 model = of_get_property(rootdn, "model", NULL);
935 id = of_get_property(rootdn, "system-id", NULL);
936 if (model && id)
937 snprintf(system_id, sizeof(system_id), "%s-%s", model, id);
938
939 name = of_get_property(rootdn, "ibm,partition-name", NULL);
940 if (name)
941 strncpy(partition_name, name, sizeof(partition_name));
942
943 num = of_get_property(rootdn, "ibm,partition-no", NULL);
944 if (num)
945 partition_number = *num;
946
947 of_node_put(rootdn);
948 return 0;
949}
950
951static struct srp_function_template ibmvstgt_transport_functions = {
952 .tsk_mgmt_response = ibmvstgt_tsk_mgmt_response,
953 .it_nexus_response = ibmvstgt_it_nexus_response,
954};
955
956static int __init ibmvstgt_init(void)
957{
958 int err = -ENOMEM;
959
960 printk("IBM eServer i/pSeries Virtual SCSI Target Driver\n");
961
962 ibmvstgt_transport_template =
963 srp_attach_transport(&ibmvstgt_transport_functions);
964 if (!ibmvstgt_transport_template)
965 return err;
966
967 vtgtd = create_workqueue("ibmvtgtd");
968 if (!vtgtd)
969 goto release_transport;
970
971 err = get_system_info();
972 if (err)
973 goto destroy_wq;
974
975 err = vio_register_driver(&ibmvstgt_driver);
976 if (err)
977 goto destroy_wq;
978
979 return 0;
980destroy_wq:
981 destroy_workqueue(vtgtd);
982release_transport:
983 srp_release_transport(ibmvstgt_transport_template);
984 return err;
985}
986
987static void __exit ibmvstgt_exit(void)
988{
989 printk("Unregister IBM virtual SCSI driver\n");
990
991 destroy_workqueue(vtgtd);
992 vio_unregister_driver(&ibmvstgt_driver);
993 srp_release_transport(ibmvstgt_transport_template);
994}
995
996MODULE_DESCRIPTION("IBM Virtual SCSI Target");
997MODULE_AUTHOR("Santiago Leon");
998MODULE_LICENSE("GPL");
999
1000module_init(ibmvstgt_init);
1001module_exit(ibmvstgt_exit);