aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/scsi/ibmvscsi_tgt
diff options
context:
space:
mode:
authorBryant G. Ly <bryantly@linux.vnet.ibm.com>2016-06-28 18:05:35 -0400
committerNicholas Bellinger <nab@linux-iscsi.org>2016-07-20 04:15:43 -0400
commit88a678bbc34cecce36bf2c7a8af4cba38f9f77ce (patch)
treefac58c48609069efa14159d5749ca3586faa0cb2 /drivers/scsi/ibmvscsi_tgt
parent410c29dfbfdf73d0d0b5d14a21868ab038eca703 (diff)
ibmvscsis: Initial commit of IBM VSCSI Tgt Driver
This driver is a pick up of the old IBM VIO scsi Target Driver that was started by Nick and Fujita 2-4 years ago. http://comments.gmane.org/gmane.linux.scsi/90119 The driver provides a virtual SCSI device on IBM Power Servers. This patch contains the fifth version for an initial merge of the tcm ibmvscsis driver. More information on this driver and config can be found: https://github.com/powervm/ibmvscsis/wiki/Configuration http://www.linux-iscsi.org/wiki/IBM_vSCSI (Drop extra libsrp review breakage + Fix kconfig typo - nab) Signed-off-by: Steven Royer <seroyer@linux.vnet.ibm.com> Signed-off-by: Tyrel Datwyler <tyreld@linux.vnet.ibm.com> Signed-off-by: Michael Cyr <mikecyr@linux.vnet.ibm.com> Signed-off-by: Bryant G. Ly <bryantly@linux.vnet.ibm.com> Cc: FUJITA Tomonori <tomof@acm.org> Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
Diffstat (limited to 'drivers/scsi/ibmvscsi_tgt')
-rw-r--r--drivers/scsi/ibmvscsi_tgt/Makefile3
-rw-r--r--drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c4087
-rw-r--r--drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.h346
-rw-r--r--drivers/scsi/ibmvscsi_tgt/libsrp.c427
-rw-r--r--drivers/scsi/ibmvscsi_tgt/libsrp.h123
5 files changed, 4986 insertions, 0 deletions
diff --git a/drivers/scsi/ibmvscsi_tgt/Makefile b/drivers/scsi/ibmvscsi_tgt/Makefile
new file mode 100644
index 000000000000..0c060ce64cb0
--- /dev/null
+++ b/drivers/scsi/ibmvscsi_tgt/Makefile
@@ -0,0 +1,3 @@
1obj-$(CONFIG_SCSI_IBMVSCSIS) += ibmvscsis.o
2
3ibmvscsis-y := libsrp.o ibmvscsi_tgt.o
diff --git a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
new file mode 100644
index 000000000000..b29fef9d0f27
--- /dev/null
+++ b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
@@ -0,0 +1,4087 @@
1/*******************************************************************************
2 * IBM Virtual SCSI Target Driver
3 * Copyright (C) 2003-2005 Dave Boutcher (boutcher@us.ibm.com) IBM Corp.
4 * Santiago Leon (santil@us.ibm.com) IBM Corp.
5 * Linda Xie (lxie@us.ibm.com) IBM Corp.
6 *
7 * Copyright (C) 2005-2011 FUJITA Tomonori <tomof@acm.org>
8 * Copyright (C) 2010 Nicholas A. Bellinger <nab@kernel.org>
9 *
10 * Authors: Bryant G. Ly <bryantly@linux.vnet.ibm.com>
11 * Authors: Michael Cyr <mikecyr@linux.vnet.ibm.com>
12 *
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License as published by
15 * the Free Software Foundation; either version 2 of the License, or
16 * (at your option) any later version.
17 *
18 * This program is distributed in the hope that it will be useful,
19 * but WITHOUT ANY WARRANTY; without even the implied warranty of
20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 * GNU General Public License for more details.
22 *
23 ****************************************************************************/
24
25#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
26
27#include <linux/module.h>
28#include <linux/kernel.h>
29#include <linux/slab.h>
30#include <linux/types.h>
31#include <linux/list.h>
32#include <linux/string.h>
33
34#include <target/target_core_base.h>
35#include <target/target_core_fabric.h>
36
37#include <asm/hvcall.h>
38#include <asm/vio.h>
39
40#include <scsi/viosrp.h>
41
42#include "ibmvscsi_tgt.h"
43
44#define IBMVSCSIS_VERSION "v0.2"
45
46#define INITIAL_SRP_LIMIT 800
47#define DEFAULT_MAX_SECTORS 256
48
49static uint max_vdma_size = MAX_H_COPY_RDMA;
50
51static char system_id[SYS_ID_NAME_LEN] = "";
52static char partition_name[PARTITION_NAMELEN] = "UNKNOWN";
53static uint partition_number = -1;
54
55/* Adapter list and lock to control it */
56static DEFINE_SPINLOCK(ibmvscsis_dev_lock);
57static LIST_HEAD(ibmvscsis_dev_list);
58
59static long ibmvscsis_parse_command(struct scsi_info *vscsi,
60 struct viosrp_crq *crq);
61
62static void ibmvscsis_adapter_idle(struct scsi_info *vscsi);
63
64static void ibmvscsis_determine_resid(struct se_cmd *se_cmd,
65 struct srp_rsp *rsp)
66{
67 u32 residual_count = se_cmd->residual_count;
68
69 if (!residual_count)
70 return;
71
72 if (se_cmd->se_cmd_flags & SCF_UNDERFLOW_BIT) {
73 if (se_cmd->data_direction == DMA_TO_DEVICE) {
74 /* residual data from an underflow write */
75 rsp->flags = SRP_RSP_FLAG_DOUNDER;
76 rsp->data_out_res_cnt = cpu_to_be32(residual_count);
77 } else if (se_cmd->data_direction == DMA_FROM_DEVICE) {
78 /* residual data from an underflow read */
79 rsp->flags = SRP_RSP_FLAG_DIUNDER;
80 rsp->data_in_res_cnt = cpu_to_be32(residual_count);
81 }
82 } else if (se_cmd->se_cmd_flags & SCF_OVERFLOW_BIT) {
83 if (se_cmd->data_direction == DMA_TO_DEVICE) {
84 /* residual data from an overflow write */
85 rsp->flags = SRP_RSP_FLAG_DOOVER;
86 rsp->data_out_res_cnt = cpu_to_be32(residual_count);
87 } else if (se_cmd->data_direction == DMA_FROM_DEVICE) {
88 /* residual data from an overflow read */
89 rsp->flags = SRP_RSP_FLAG_DIOVER;
90 rsp->data_in_res_cnt = cpu_to_be32(residual_count);
91 }
92 }
93}
94
95/**
96 * connection_broken() - Determine if the connection to the client is good
97 * @vscsi: Pointer to our adapter structure
98 *
99 * This function attempts to send a ping MAD to the client. If the call to
100 * queue the request returns H_CLOSED then the connection has been broken
101 * and the function returns TRUE.
102 *
103 * EXECUTION ENVIRONMENT:
104 * Interrupt or Process environment
105 */
106static bool connection_broken(struct scsi_info *vscsi)
107{
108 struct viosrp_crq *crq;
109 u64 buffer[2] = { 0, 0 };
110 long h_return_code;
111 bool rc = false;
112
113 /* create a PING crq */
114 crq = (struct viosrp_crq *)&buffer;
115 crq->valid = VALID_CMD_RESP_EL;
116 crq->format = MESSAGE_IN_CRQ;
117 crq->status = PING;
118
119 h_return_code = h_send_crq(vscsi->dds.unit_id,
120 cpu_to_be64(buffer[MSG_HI]),
121 cpu_to_be64(buffer[MSG_LOW]));
122
123 pr_debug("connection_broken: rc %ld\n", h_return_code);
124
125 if (h_return_code == H_CLOSED)
126 rc = true;
127
128 return rc;
129}
130
131/**
132 * ibmvscsis_unregister_command_q() - Helper Function-Unregister Command Queue
133 * @vscsi: Pointer to our adapter structure
134 *
135 * This function calls h_free_q then frees the interrupt bit etc.
136 * It must release the lock before doing so because of the time it can take
137 * for h_free_crq in PHYP
138 * NOTE: the caller must make sure that state and or flags will prevent
139 * interrupt handler from scheduling work.
140 * NOTE: anyone calling this function may need to set the CRQ_CLOSED flag
141 * we can't do it here, because we don't have the lock
142 *
143 * EXECUTION ENVIRONMENT:
144 * Process level
145 */
146static long ibmvscsis_unregister_command_q(struct scsi_info *vscsi)
147{
148 long qrc;
149 long rc = ADAPT_SUCCESS;
150 int ticks = 0;
151
152 do {
153 qrc = h_free_crq(vscsi->dds.unit_id);
154 switch (qrc) {
155 case H_SUCCESS:
156 break;
157
158 case H_HARDWARE:
159 case H_PARAMETER:
160 dev_err(&vscsi->dev, "unregister_command_q: error from h_free_crq %ld\n",
161 qrc);
162 rc = ERROR;
163 break;
164
165 case H_BUSY:
166 case H_LONG_BUSY_ORDER_1_MSEC:
167 /* msleep not good for small values */
168 usleep_range(1000, 2000);
169 ticks += 1;
170 break;
171 case H_LONG_BUSY_ORDER_10_MSEC:
172 usleep_range(10000, 20000);
173 ticks += 10;
174 break;
175 case H_LONG_BUSY_ORDER_100_MSEC:
176 msleep(100);
177 ticks += 100;
178 break;
179 case H_LONG_BUSY_ORDER_1_SEC:
180 ssleep(1);
181 ticks += 1000;
182 break;
183 case H_LONG_BUSY_ORDER_10_SEC:
184 ssleep(10);
185 ticks += 10000;
186 break;
187 case H_LONG_BUSY_ORDER_100_SEC:
188 ssleep(100);
189 ticks += 100000;
190 break;
191 default:
192 dev_err(&vscsi->dev, "unregister_command_q: unknown error %ld from h_free_crq\n",
193 qrc);
194 rc = ERROR;
195 break;
196 }
197
198 /*
199 * dont wait more then 300 seconds
200 * ticks are in milliseconds more or less
201 */
202 if (ticks > 300000 && qrc != H_SUCCESS) {
203 rc = ERROR;
204 dev_err(&vscsi->dev, "Excessive wait for h_free_crq\n");
205 }
206 } while (qrc != H_SUCCESS && rc == ADAPT_SUCCESS);
207
208 pr_debug("Freeing CRQ: phyp rc %ld, rc %ld\n", qrc, rc);
209
210 return rc;
211}
212
213/**
214 * ibmvscsis_delete_client_info() - Helper function to Delete Client Info
215 * @vscsi: Pointer to our adapter structure
216 * @client_closed: True if client closed its queue
217 *
218 * Deletes information specific to the client when the client goes away
219 *
220 * EXECUTION ENVIRONMENT:
221 * Interrupt or Process
222 */
223static void ibmvscsis_delete_client_info(struct scsi_info *vscsi,
224 bool client_closed)
225{
226 vscsi->client_cap = 0;
227
228 /*
229 * Some things we don't want to clear if we're closing the queue,
230 * because some clients don't resend the host handshake when they
231 * get a transport event.
232 */
233 if (client_closed)
234 vscsi->client_data.os_type = 0;
235}
236
237/**
238 * ibmvscsis_free_command_q() - Free Command Queue
239 * @vscsi: Pointer to our adapter structure
240 *
241 * This function calls unregister_command_q, then clears interrupts and
242 * any pending interrupt acknowledgments associated with the command q.
243 * It also clears memory if there is no error.
244 *
245 * PHYP did not meet the PAPR architecture so that we must give up the
246 * lock. This causes a timing hole regarding state change. To close the
247 * hole this routine does accounting on any change that occurred during
248 * the time the lock is not held.
249 * NOTE: must give up and then acquire the interrupt lock, the caller must
250 * make sure that state and or flags will prevent interrupt handler from
251 * scheduling work.
252 *
253 * EXECUTION ENVIRONMENT:
254 * Process level, interrupt lock is held
255 */
256static long ibmvscsis_free_command_q(struct scsi_info *vscsi)
257{
258 int bytes;
259 u32 flags_under_lock;
260 u16 state_under_lock;
261 long rc = ADAPT_SUCCESS;
262
263 if (!(vscsi->flags & CRQ_CLOSED)) {
264 vio_disable_interrupts(vscsi->dma_dev);
265
266 state_under_lock = vscsi->new_state;
267 flags_under_lock = vscsi->flags;
268 vscsi->phyp_acr_state = 0;
269 vscsi->phyp_acr_flags = 0;
270
271 spin_unlock_bh(&vscsi->intr_lock);
272 rc = ibmvscsis_unregister_command_q(vscsi);
273 spin_lock_bh(&vscsi->intr_lock);
274
275 if (state_under_lock != vscsi->new_state)
276 vscsi->phyp_acr_state = vscsi->new_state;
277
278 vscsi->phyp_acr_flags = ((~flags_under_lock) & vscsi->flags);
279
280 if (rc == ADAPT_SUCCESS) {
281 bytes = vscsi->cmd_q.size * PAGE_SIZE;
282 memset(vscsi->cmd_q.base_addr, 0, bytes);
283 vscsi->cmd_q.index = 0;
284 vscsi->flags |= CRQ_CLOSED;
285
286 ibmvscsis_delete_client_info(vscsi, false);
287 }
288
289 pr_debug("free_command_q: flags 0x%x, state 0x%hx, acr_flags 0x%x, acr_state 0x%hx\n",
290 vscsi->flags, vscsi->state, vscsi->phyp_acr_flags,
291 vscsi->phyp_acr_state);
292 }
293 return rc;
294}
295
296/**
297 * ibmvscsis_cmd_q_dequeue() - Get valid Command element
298 * @mask: Mask to use in case index wraps
299 * @current_index: Current index into command queue
300 * @base_addr: Pointer to start of command queue
301 *
302 * Returns a pointer to a valid command element or NULL, if the command
303 * queue is empty
304 *
305 * EXECUTION ENVIRONMENT:
306 * Interrupt environment, interrupt lock held
307 */
308static struct viosrp_crq *ibmvscsis_cmd_q_dequeue(uint mask,
309 uint *current_index,
310 struct viosrp_crq *base_addr)
311{
312 struct viosrp_crq *ptr;
313
314 ptr = base_addr + *current_index;
315
316 if (ptr->valid) {
317 *current_index = (*current_index + 1) & mask;
318 dma_rmb();
319 } else {
320 ptr = NULL;
321 }
322
323 return ptr;
324}
325
326/**
327 * ibmvscsis_send_init_message() - send initialize message to the client
328 * @vscsi: Pointer to our adapter structure
329 * @format: Which Init Message format to send
330 *
331 * EXECUTION ENVIRONMENT:
332 * Interrupt environment interrupt lock held
333 */
334static long ibmvscsis_send_init_message(struct scsi_info *vscsi, u8 format)
335{
336 struct viosrp_crq *crq;
337 u64 buffer[2] = { 0, 0 };
338 long rc;
339
340 crq = (struct viosrp_crq *)&buffer;
341 crq->valid = VALID_INIT_MSG;
342 crq->format = format;
343 rc = h_send_crq(vscsi->dds.unit_id, cpu_to_be64(buffer[MSG_HI]),
344 cpu_to_be64(buffer[MSG_LOW]));
345
346 return rc;
347}
348
349/**
350 * ibmvscsis_check_init_msg() - Check init message valid
351 * @vscsi: Pointer to our adapter structure
352 * @format: Pointer to return format of Init Message, if any.
353 * Set to UNUSED_FORMAT if no Init Message in queue.
354 *
355 * Checks if an initialize message was queued by the initiatior
356 * after the queue was created and before the interrupt was enabled.
357 *
358 * EXECUTION ENVIRONMENT:
359 * Process level only, interrupt lock held
360 */
361static long ibmvscsis_check_init_msg(struct scsi_info *vscsi, uint *format)
362{
363 struct viosrp_crq *crq;
364 long rc = ADAPT_SUCCESS;
365
366 crq = ibmvscsis_cmd_q_dequeue(vscsi->cmd_q.mask, &vscsi->cmd_q.index,
367 vscsi->cmd_q.base_addr);
368 if (!crq) {
369 *format = (uint)UNUSED_FORMAT;
370 } else if (crq->valid == VALID_INIT_MSG && crq->format == INIT_MSG) {
371 *format = (uint)INIT_MSG;
372 crq->valid = INVALIDATE_CMD_RESP_EL;
373 dma_rmb();
374
375 /*
376 * the caller has ensured no initialize message was
377 * sent after the queue was
378 * created so there should be no other message on the queue.
379 */
380 crq = ibmvscsis_cmd_q_dequeue(vscsi->cmd_q.mask,
381 &vscsi->cmd_q.index,
382 vscsi->cmd_q.base_addr);
383 if (crq) {
384 *format = (uint)(crq->format);
385 rc = ERROR;
386 crq->valid = INVALIDATE_CMD_RESP_EL;
387 dma_rmb();
388 }
389 } else {
390 *format = (uint)(crq->format);
391 rc = ERROR;
392 crq->valid = INVALIDATE_CMD_RESP_EL;
393 dma_rmb();
394 }
395
396 return rc;
397}
398
399/**
400 * ibmvscsis_establish_new_q() - Establish new CRQ queue
401 * @vscsi: Pointer to our adapter structure
402 * @new_state: New state being established after resetting the queue
403 *
404 * Must be called with interrupt lock held.
405 */
406static long ibmvscsis_establish_new_q(struct scsi_info *vscsi, uint new_state)
407{
408 long rc = ADAPT_SUCCESS;
409 uint format;
410
411 vscsi->flags &= PRESERVE_FLAG_FIELDS;
412 vscsi->rsp_q_timer.timer_pops = 0;
413 vscsi->debit = 0;
414 vscsi->credit = 0;
415
416 rc = vio_enable_interrupts(vscsi->dma_dev);
417 if (rc) {
418 pr_warn("reset_queue: failed to enable interrupts, rc %ld\n",
419 rc);
420 return rc;
421 }
422
423 rc = ibmvscsis_check_init_msg(vscsi, &format);
424 if (rc) {
425 dev_err(&vscsi->dev, "reset_queue: check_init_msg failed, rc %ld\n",
426 rc);
427 return rc;
428 }
429
430 if (format == UNUSED_FORMAT && new_state == WAIT_CONNECTION) {
431 rc = ibmvscsis_send_init_message(vscsi, INIT_MSG);
432 switch (rc) {
433 case H_SUCCESS:
434 case H_DROPPED:
435 case H_CLOSED:
436 rc = ADAPT_SUCCESS;
437 break;
438
439 case H_PARAMETER:
440 case H_HARDWARE:
441 break;
442
443 default:
444 vscsi->state = UNDEFINED;
445 rc = H_HARDWARE;
446 break;
447 }
448 }
449
450 return rc;
451}
452
453/**
454 * ibmvscsis_reset_queue() - Reset CRQ Queue
455 * @vscsi: Pointer to our adapter structure
456 * @new_state: New state to establish after resetting the queue
457 *
458 * This function calls h_free_q and then calls h_reg_q and does all
459 * of the bookkeeping to get us back to where we can communicate.
460 *
461 * Actually, we don't always call h_free_crq. A problem was discovered
462 * where one partition would close and reopen his queue, which would
463 * cause his partner to get a transport event, which would cause him to
464 * close and reopen his queue, which would cause the original partition
465 * to get a transport event, etc., etc. To prevent this, we don't
466 * actually close our queue if the client initiated the reset, (i.e.
467 * either we got a transport event or we have detected that the client's
468 * queue is gone)
469 *
470 * EXECUTION ENVIRONMENT:
471 * Process environment, called with interrupt lock held
472 */
473static void ibmvscsis_reset_queue(struct scsi_info *vscsi, uint new_state)
474{
475 int bytes;
476 long rc = ADAPT_SUCCESS;
477
478 pr_debug("reset_queue: flags 0x%x\n", vscsi->flags);
479
480 /* don't reset, the client did it for us */
481 if (vscsi->flags & (CLIENT_FAILED | TRANS_EVENT)) {
482 vscsi->flags &= PRESERVE_FLAG_FIELDS;
483 vscsi->rsp_q_timer.timer_pops = 0;
484 vscsi->debit = 0;
485 vscsi->credit = 0;
486 vscsi->state = new_state;
487 vio_enable_interrupts(vscsi->dma_dev);
488 } else {
489 rc = ibmvscsis_free_command_q(vscsi);
490 if (rc == ADAPT_SUCCESS) {
491 vscsi->state = new_state;
492
493 bytes = vscsi->cmd_q.size * PAGE_SIZE;
494 rc = h_reg_crq(vscsi->dds.unit_id,
495 vscsi->cmd_q.crq_token, bytes);
496 if (rc == H_CLOSED || rc == H_SUCCESS) {
497 rc = ibmvscsis_establish_new_q(vscsi,
498 new_state);
499 }
500
501 if (rc != ADAPT_SUCCESS) {
502 pr_debug("reset_queue: reg_crq rc %ld\n", rc);
503
504 vscsi->state = ERR_DISCONNECTED;
505 vscsi->flags |= RESPONSE_Q_DOWN;
506 ibmvscsis_free_command_q(vscsi);
507 }
508 } else {
509 vscsi->state = ERR_DISCONNECTED;
510 vscsi->flags |= RESPONSE_Q_DOWN;
511 }
512 }
513}
514
515/**
516 * ibmvscsis_free_cmd_resources() - Free command resources
517 * @vscsi: Pointer to our adapter structure
518 * @cmd: Command which is not longer in use
519 *
520 * Must be called with interrupt lock held.
521 */
522static void ibmvscsis_free_cmd_resources(struct scsi_info *vscsi,
523 struct ibmvscsis_cmd *cmd)
524{
525 struct iu_entry *iue = cmd->iue;
526
527 switch (cmd->type) {
528 case TASK_MANAGEMENT:
529 case SCSI_CDB:
530 /*
531 * When the queue goes down this value is cleared, so it
532 * cannot be cleared in this general purpose function.
533 */
534 if (vscsi->debit)
535 vscsi->debit -= 1;
536 break;
537 case ADAPTER_MAD:
538 vscsi->flags &= ~PROCESSING_MAD;
539 break;
540 case UNSET_TYPE:
541 break;
542 default:
543 dev_err(&vscsi->dev, "free_cmd_resources unknown type %d\n",
544 cmd->type);
545 break;
546 }
547
548 cmd->iue = NULL;
549 list_add_tail(&cmd->list, &vscsi->free_cmd);
550 srp_iu_put(iue);
551
552 if (list_empty(&vscsi->active_q) && list_empty(&vscsi->schedule_q) &&
553 list_empty(&vscsi->waiting_rsp) && (vscsi->flags & WAIT_FOR_IDLE)) {
554 vscsi->flags &= ~WAIT_FOR_IDLE;
555 complete(&vscsi->wait_idle);
556 }
557}
558
559/**
560 * ibmvscsis_disconnect() - Helper function to disconnect
561 * @work: Pointer to work_struct, gives access to our adapter structure
562 *
563 * An error has occurred or the driver received a Transport event,
564 * and the driver is requesting that the command queue be de-registered
565 * in a safe manner. If there is no outstanding I/O then we can stop the
566 * queue. If we are restarting the queue it will be reflected in the
567 * the state of the adapter.
568 *
569 * EXECUTION ENVIRONMENT:
570 * Process environment
571 */
572static void ibmvscsis_disconnect(struct work_struct *work)
573{
574 struct scsi_info *vscsi = container_of(work, struct scsi_info,
575 proc_work);
576 u16 new_state;
577 bool wait_idle = false;
578 long rc = ADAPT_SUCCESS;
579
580 spin_lock_bh(&vscsi->intr_lock);
581 new_state = vscsi->new_state;
582 vscsi->new_state = 0;
583
584 pr_debug("disconnect: flags 0x%x, state 0x%hx\n", vscsi->flags,
585 vscsi->state);
586
587 /*
588 * check which state we are in and see if we
589 * should transitition to the new state
590 */
591 switch (vscsi->state) {
592 /* Should never be called while in this state. */
593 case NO_QUEUE:
594 /*
595 * Can never transition from this state;
596 * igonore errors and logout.
597 */
598 case UNCONFIGURING:
599 break;
600
601 /* can transition from this state to UNCONFIGURING */
602 case ERR_DISCONNECT:
603 if (new_state == UNCONFIGURING)
604 vscsi->state = new_state;
605 break;
606
607 /*
608 * Can transition from this state to to unconfiguring
609 * or err disconnect.
610 */
611 case ERR_DISCONNECT_RECONNECT:
612 switch (new_state) {
613 case UNCONFIGURING:
614 case ERR_DISCONNECT:
615 vscsi->state = new_state;
616 break;
617
618 case WAIT_IDLE:
619 break;
620 default:
621 break;
622 }
623 break;
624
625 /* can transition from this state to UNCONFIGURING */
626 case ERR_DISCONNECTED:
627 if (new_state == UNCONFIGURING)
628 vscsi->state = new_state;
629 break;
630
631 /*
632 * If this is a transition into an error state.
633 * a client is attempting to establish a connection
634 * and has violated the RPA protocol.
635 * There can be nothing pending on the adapter although
636 * there can be requests in the command queue.
637 */
638 case WAIT_ENABLED:
639 case PART_UP_WAIT_ENAB:
640 switch (new_state) {
641 case ERR_DISCONNECT:
642 vscsi->flags |= RESPONSE_Q_DOWN;
643 vscsi->state = new_state;
644 vscsi->flags &= ~(SCHEDULE_DISCONNECT |
645 DISCONNECT_SCHEDULED);
646 ibmvscsis_free_command_q(vscsi);
647 break;
648 case ERR_DISCONNECT_RECONNECT:
649 ibmvscsis_reset_queue(vscsi, WAIT_ENABLED);
650 break;
651
652 /* should never happen */
653 case WAIT_IDLE:
654 rc = ERROR;
655 dev_err(&vscsi->dev, "disconnect: invalid state %d for WAIT_IDLE\n",
656 vscsi->state);
657 break;
658 }
659 break;
660
661 case WAIT_IDLE:
662 switch (new_state) {
663 case ERR_DISCONNECT:
664 case ERR_DISCONNECT_RECONNECT:
665 vscsi->state = new_state;
666 break;
667 }
668 break;
669
670 /*
671 * Initiator has not done a successful srp login
672 * or has done a successful srp logout ( adapter was not
673 * busy). In the first case there can be responses queued
674 * waiting for space on the initiators response queue (MAD)
675 * The second case the adapter is idle. Assume the worse case,
676 * i.e. the second case.
677 */
678 case WAIT_CONNECTION:
679 case CONNECTED:
680 case SRP_PROCESSING:
681 wait_idle = true;
682 vscsi->state = new_state;
683 break;
684
685 /* can transition from this state to UNCONFIGURING */
686 case UNDEFINED:
687 if (new_state == UNCONFIGURING)
688 vscsi->state = new_state;
689 break;
690 default:
691 break;
692 }
693
694 if (wait_idle) {
695 pr_debug("disconnect start wait, active %d, sched %d\n",
696 (int)list_empty(&vscsi->active_q),
697 (int)list_empty(&vscsi->schedule_q));
698 if (!list_empty(&vscsi->active_q) ||
699 !list_empty(&vscsi->schedule_q)) {
700 vscsi->flags |= WAIT_FOR_IDLE;
701 pr_debug("disconnect flags 0x%x\n", vscsi->flags);
702 /*
703 * This routine is can not be called with the interrupt
704 * lock held.
705 */
706 spin_unlock_bh(&vscsi->intr_lock);
707 wait_for_completion(&vscsi->wait_idle);
708 spin_lock_bh(&vscsi->intr_lock);
709 }
710 pr_debug("disconnect stop wait\n");
711
712 ibmvscsis_adapter_idle(vscsi);
713 }
714
715 spin_unlock_bh(&vscsi->intr_lock);
716}
717
718/**
719 * ibmvscsis_post_disconnect() - Schedule the disconnect
720 * @vscsi: Pointer to our adapter structure
721 * @new_state: State to move to after disconnecting
722 * @flag_bits: Flags to turn on in adapter structure
723 *
724 * If it's already been scheduled, then see if we need to "upgrade"
725 * the new state (if the one passed in is more "severe" than the
726 * previous one).
727 *
728 * PRECONDITION:
729 * interrupt lock is held
730 */
731static void ibmvscsis_post_disconnect(struct scsi_info *vscsi, uint new_state,
732 uint flag_bits)
733{
734 uint state;
735
736 /* check the validity of the new state */
737 switch (new_state) {
738 case UNCONFIGURING:
739 case ERR_DISCONNECT:
740 case ERR_DISCONNECT_RECONNECT:
741 case WAIT_IDLE:
742 break;
743
744 default:
745 dev_err(&vscsi->dev, "post_disconnect: Invalid new state %d\n",
746 new_state);
747 return;
748 }
749
750 vscsi->flags |= flag_bits;
751
752 pr_debug("post_disconnect: new_state 0x%x, flag_bits 0x%x, vscsi->flags 0x%x, state %hx\n",
753 new_state, flag_bits, vscsi->flags, vscsi->state);
754
755 if (!(vscsi->flags & (DISCONNECT_SCHEDULED | SCHEDULE_DISCONNECT))) {
756 vscsi->flags |= SCHEDULE_DISCONNECT;
757 vscsi->new_state = new_state;
758
759 INIT_WORK(&vscsi->proc_work, ibmvscsis_disconnect);
760 (void)queue_work(vscsi->work_q, &vscsi->proc_work);
761 } else {
762 if (vscsi->new_state)
763 state = vscsi->new_state;
764 else
765 state = vscsi->state;
766
767 switch (state) {
768 case NO_QUEUE:
769 case UNCONFIGURING:
770 break;
771
772 case ERR_DISCONNECTED:
773 case ERR_DISCONNECT:
774 case UNDEFINED:
775 if (new_state == UNCONFIGURING)
776 vscsi->new_state = new_state;
777 break;
778
779 case ERR_DISCONNECT_RECONNECT:
780 switch (new_state) {
781 case UNCONFIGURING:
782 case ERR_DISCONNECT:
783 vscsi->new_state = new_state;
784 break;
785 default:
786 break;
787 }
788 break;
789
790 case WAIT_ENABLED:
791 case PART_UP_WAIT_ENAB:
792 case WAIT_IDLE:
793 case WAIT_CONNECTION:
794 case CONNECTED:
795 case SRP_PROCESSING:
796 vscsi->new_state = new_state;
797 break;
798
799 default:
800 break;
801 }
802 }
803
804 pr_debug("Leaving post_disconnect: flags 0x%x, new_state 0x%x\n",
805 vscsi->flags, vscsi->new_state);
806}
807
808/**
809 * ibmvscsis_trans_event() - Handle a Transport Event
810 * @vscsi: Pointer to our adapter structure
811 * @crq: Pointer to CRQ entry containing the Transport Event
812 *
813 * Do the logic to close the I_T nexus. This function may not
814 * behave to specification.
815 *
816 * EXECUTION ENVIRONMENT:
817 * Interrupt, interrupt lock held
818 */
819static long ibmvscsis_trans_event(struct scsi_info *vscsi,
820 struct viosrp_crq *crq)
821{
822 long rc = ADAPT_SUCCESS;
823
824 pr_debug("trans_event: format %d, flags 0x%x, state 0x%hx\n",
825 (int)crq->format, vscsi->flags, vscsi->state);
826
827 switch (crq->format) {
828 case MIGRATED:
829 case PARTNER_FAILED:
830 case PARTNER_DEREGISTER:
831 ibmvscsis_delete_client_info(vscsi, true);
832 break;
833
834 default:
835 rc = ERROR;
836 dev_err(&vscsi->dev, "trans_event: invalid format %d\n",
837 (uint)crq->format);
838 ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT,
839 RESPONSE_Q_DOWN);
840 break;
841 }
842
843 if (rc == ADAPT_SUCCESS) {
844 switch (vscsi->state) {
845 case NO_QUEUE:
846 case ERR_DISCONNECTED:
847 case UNDEFINED:
848 break;
849
850 case UNCONFIGURING:
851 vscsi->flags |= (RESPONSE_Q_DOWN | TRANS_EVENT);
852 break;
853
854 case WAIT_ENABLED:
855 break;
856
857 case WAIT_CONNECTION:
858 break;
859
860 case CONNECTED:
861 ibmvscsis_post_disconnect(vscsi, WAIT_IDLE,
862 (RESPONSE_Q_DOWN |
863 TRANS_EVENT));
864 break;
865
866 case PART_UP_WAIT_ENAB:
867 vscsi->state = WAIT_ENABLED;
868 break;
869
870 case SRP_PROCESSING:
871 if ((vscsi->debit > 0) ||
872 !list_empty(&vscsi->schedule_q) ||
873 !list_empty(&vscsi->waiting_rsp) ||
874 !list_empty(&vscsi->active_q)) {
875 pr_debug("debit %d, sched %d, wait %d, active %d\n",
876 vscsi->debit,
877 (int)list_empty(&vscsi->schedule_q),
878 (int)list_empty(&vscsi->waiting_rsp),
879 (int)list_empty(&vscsi->active_q));
880 pr_warn("connection lost with outstanding work\n");
881 } else {
882 pr_debug("trans_event: SRP Processing, but no outstanding work\n");
883 }
884
885 ibmvscsis_post_disconnect(vscsi, WAIT_IDLE,
886 (RESPONSE_Q_DOWN |
887 TRANS_EVENT));
888 break;
889
890 case ERR_DISCONNECT:
891 case ERR_DISCONNECT_RECONNECT:
892 case WAIT_IDLE:
893 vscsi->flags |= (RESPONSE_Q_DOWN | TRANS_EVENT);
894 break;
895 }
896 }
897
898 rc = vscsi->flags & SCHEDULE_DISCONNECT;
899
900 pr_debug("Leaving trans_event: flags 0x%x, state 0x%hx, rc %ld\n",
901 vscsi->flags, vscsi->state, rc);
902
903 return rc;
904}
905
906/**
907 * ibmvscsis_poll_cmd_q() - Poll Command Queue
908 * @vscsi: Pointer to our adapter structure
909 *
910 * Called to handle command elements that may have arrived while
911 * interrupts were disabled.
912 *
913 * EXECUTION ENVIRONMENT:
914 * intr_lock must be held
915 */
916static void ibmvscsis_poll_cmd_q(struct scsi_info *vscsi)
917{
918 struct viosrp_crq *crq;
919 long rc;
920 bool ack = true;
921 volatile u8 valid;
922
923 pr_debug("poll_cmd_q: flags 0x%x, state 0x%hx, q index %ud\n",
924 vscsi->flags, vscsi->state, vscsi->cmd_q.index);
925
926 rc = vscsi->flags & SCHEDULE_DISCONNECT;
927 crq = vscsi->cmd_q.base_addr + vscsi->cmd_q.index;
928 valid = crq->valid;
929 dma_rmb();
930
931 while (valid) {
932poll_work:
933 vscsi->cmd_q.index =
934 (vscsi->cmd_q.index + 1) & vscsi->cmd_q.mask;
935
936 if (!rc) {
937 rc = ibmvscsis_parse_command(vscsi, crq);
938 } else {
939 if ((uint)crq->valid == VALID_TRANS_EVENT) {
940 /*
941 * must service the transport layer events even
942 * in an error state, dont break out until all
943 * the consecutive transport events have been
944 * processed
945 */
946 rc = ibmvscsis_trans_event(vscsi, crq);
947 } else if (vscsi->flags & TRANS_EVENT) {
948 /*
949 * if a tranport event has occurred leave
950 * everything but transport events on the queue
951 */
952 pr_debug("poll_cmd_q, ignoring\n");
953
954 /*
955 * need to decrement the queue index so we can
956 * look at the elment again
957 */
958 if (vscsi->cmd_q.index)
959 vscsi->cmd_q.index -= 1;
960 else
961 /*
962 * index is at 0 it just wrapped.
963 * have it index last element in q
964 */
965 vscsi->cmd_q.index = vscsi->cmd_q.mask;
966 break;
967 }
968 }
969
970 crq->valid = INVALIDATE_CMD_RESP_EL;
971
972 crq = vscsi->cmd_q.base_addr + vscsi->cmd_q.index;
973 valid = crq->valid;
974 dma_rmb();
975 }
976
977 if (!rc) {
978 if (ack) {
979 vio_enable_interrupts(vscsi->dma_dev);
980 ack = false;
981 pr_debug("poll_cmd_q, reenabling interrupts\n");
982 }
983 valid = crq->valid;
984 dma_rmb();
985 if (valid)
986 goto poll_work;
987 }
988
989 pr_debug("Leaving poll_cmd_q: rc %ld\n", rc);
990}
991
992/**
993 * ibmvscsis_free_cmd_qs() - Free elements in queue
994 * @vscsi: Pointer to our adapter structure
995 *
996 * Free all of the elements on all queues that are waiting for
997 * whatever reason.
998 *
999 * PRECONDITION:
1000 * Called with interrupt lock held
1001 */
1002static void ibmvscsis_free_cmd_qs(struct scsi_info *vscsi)
1003{
1004 struct ibmvscsis_cmd *cmd, *nxt;
1005
1006 pr_debug("free_cmd_qs: waiting_rsp empty %d, timer starter %d\n",
1007 (int)list_empty(&vscsi->waiting_rsp),
1008 vscsi->rsp_q_timer.started);
1009
1010 list_for_each_entry_safe(cmd, nxt, &vscsi->waiting_rsp, list) {
1011 list_del(&cmd->list);
1012 ibmvscsis_free_cmd_resources(vscsi, cmd);
1013 }
1014}
1015
1016/**
1017 * ibmvscsis_get_free_cmd() - Get free command from list
1018 * @vscsi: Pointer to our adapter structure
1019 *
1020 * Must be called with interrupt lock held.
1021 */
1022static struct ibmvscsis_cmd *ibmvscsis_get_free_cmd(struct scsi_info *vscsi)
1023{
1024 struct ibmvscsis_cmd *cmd = NULL;
1025 struct iu_entry *iue;
1026
1027 iue = srp_iu_get(&vscsi->target);
1028 if (iue) {
1029 cmd = list_first_entry_or_null(&vscsi->free_cmd,
1030 struct ibmvscsis_cmd, list);
1031 if (cmd) {
1032 list_del(&cmd->list);
1033 cmd->iue = iue;
1034 cmd->type = UNSET_TYPE;
1035 memset(&cmd->se_cmd, 0, sizeof(cmd->se_cmd));
1036 } else {
1037 srp_iu_put(iue);
1038 }
1039 }
1040
1041 return cmd;
1042}
1043
1044/**
1045 * ibmvscsis_adapter_idle() - Helper function to handle idle adapter
1046 * @vscsi: Pointer to our adapter structure
1047 *
1048 * This function is called when the adapter is idle when the driver
1049 * is attempting to clear an error condition.
1050 * The adapter is considered busy if any of its cmd queues
1051 * are non-empty. This function can be invoked
1052 * from the off level disconnect function.
1053 *
1054 * EXECUTION ENVIRONMENT:
1055 * Process environment called with interrupt lock held
1056 */
1057static void ibmvscsis_adapter_idle(struct scsi_info *vscsi)
1058{
1059 int free_qs = false;
1060
1061 pr_debug("adapter_idle: flags 0x%x, state 0x%hx\n", vscsi->flags,
1062 vscsi->state);
1063
1064 /* Only need to free qs if we're disconnecting from client */
1065 if (vscsi->state != WAIT_CONNECTION || vscsi->flags & TRANS_EVENT)
1066 free_qs = true;
1067
1068 switch (vscsi->state) {
1069 case ERR_DISCONNECT_RECONNECT:
1070 ibmvscsis_reset_queue(vscsi, WAIT_CONNECTION);
1071 pr_debug("adapter_idle, disc_rec: flags 0x%x\n", vscsi->flags);
1072 break;
1073
1074 case ERR_DISCONNECT:
1075 ibmvscsis_free_command_q(vscsi);
1076 vscsi->flags &= ~DISCONNECT_SCHEDULED;
1077 vscsi->flags |= RESPONSE_Q_DOWN;
1078 vscsi->state = ERR_DISCONNECTED;
1079 pr_debug("adapter_idle, disc: flags 0x%x, state 0x%hx\n",
1080 vscsi->flags, vscsi->state);
1081 break;
1082
1083 case WAIT_IDLE:
1084 vscsi->rsp_q_timer.timer_pops = 0;
1085 vscsi->debit = 0;
1086 vscsi->credit = 0;
1087 if (vscsi->flags & TRANS_EVENT) {
1088 vscsi->state = WAIT_CONNECTION;
1089 vscsi->flags &= PRESERVE_FLAG_FIELDS;
1090 } else {
1091 vscsi->state = CONNECTED;
1092 vscsi->flags &= ~DISCONNECT_SCHEDULED;
1093 }
1094
1095 pr_debug("adapter_idle, wait: flags 0x%x, state 0x%hx\n",
1096 vscsi->flags, vscsi->state);
1097 ibmvscsis_poll_cmd_q(vscsi);
1098 break;
1099
1100 case ERR_DISCONNECTED:
1101 vscsi->flags &= ~DISCONNECT_SCHEDULED;
1102 pr_debug("adapter_idle, disconnected: flags 0x%x, state 0x%hx\n",
1103 vscsi->flags, vscsi->state);
1104 break;
1105
1106 default:
1107 dev_err(&vscsi->dev, "adapter_idle: in invalid state %d\n",
1108 vscsi->state);
1109 break;
1110 }
1111
1112 if (free_qs)
1113 ibmvscsis_free_cmd_qs(vscsi);
1114
1115 /*
1116 * There is a timing window where we could lose a disconnect request.
1117 * The known path to this window occurs during the DISCONNECT_RECONNECT
1118 * case above: reset_queue calls free_command_q, which will release the
1119 * interrupt lock. During that time, a new post_disconnect call can be
1120 * made with a "more severe" state (DISCONNECT or UNCONFIGURING).
1121 * Because the DISCONNECT_SCHEDULED flag is already set, post_disconnect
1122 * will only set the new_state. Now free_command_q reacquires the intr
1123 * lock and clears the DISCONNECT_SCHEDULED flag (using PRESERVE_FLAG_
1124 * FIELDS), and the disconnect is lost. This is particularly bad when
1125 * the new disconnect was for UNCONFIGURING, since the unconfigure hangs
1126 * forever.
1127 * Fix is that free command queue sets acr state and acr flags if there
1128 * is a change under the lock
1129 * note free command queue writes to this state it clears it
1130 * before releasing the lock, different drivers call the free command
1131 * queue different times so dont initialize above
1132 */
1133 if (vscsi->phyp_acr_state != 0) {
1134 /*
1135 * set any bits in flags that may have been cleared by
1136 * a call to free command queue in switch statement
1137 * or reset queue
1138 */
1139 vscsi->flags |= vscsi->phyp_acr_flags;
1140 ibmvscsis_post_disconnect(vscsi, vscsi->phyp_acr_state, 0);
1141 vscsi->phyp_acr_state = 0;
1142 vscsi->phyp_acr_flags = 0;
1143
1144 pr_debug("adapter_idle: flags 0x%x, state 0x%hx, acr_flags 0x%x, acr_state 0x%hx\n",
1145 vscsi->flags, vscsi->state, vscsi->phyp_acr_flags,
1146 vscsi->phyp_acr_state);
1147 }
1148
1149 pr_debug("Leaving adapter_idle: flags 0x%x, state 0x%hx, new_state 0x%x\n",
1150 vscsi->flags, vscsi->state, vscsi->new_state);
1151}
1152
1153/**
1154 * ibmvscsis_copy_crq_packet() - Copy CRQ Packet
1155 * @vscsi: Pointer to our adapter structure
1156 * @cmd: Pointer to command element to use to process the request
1157 * @crq: Pointer to CRQ entry containing the request
1158 *
1159 * Copy the srp information unit from the hosted
1160 * partition using remote dma
1161 *
1162 * EXECUTION ENVIRONMENT:
1163 * Interrupt, interrupt lock held
1164 */
1165static long ibmvscsis_copy_crq_packet(struct scsi_info *vscsi,
1166 struct ibmvscsis_cmd *cmd,
1167 struct viosrp_crq *crq)
1168{
1169 struct iu_entry *iue = cmd->iue;
1170 long rc = 0;
1171 u16 len;
1172
1173 len = be16_to_cpu(crq->IU_length);
1174 if ((len > SRP_MAX_IU_LEN) || (len == 0)) {
1175 dev_err(&vscsi->dev, "copy_crq: Invalid len %d passed", len);
1176 ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0);
1177 return SRP_VIOLATION;
1178 }
1179
1180 rc = h_copy_rdma(len, vscsi->dds.window[REMOTE].liobn,
1181 be64_to_cpu(crq->IU_data_ptr),
1182 vscsi->dds.window[LOCAL].liobn, iue->sbuf->dma);
1183
1184 switch (rc) {
1185 case H_SUCCESS:
1186 cmd->init_time = mftb();
1187 iue->remote_token = crq->IU_data_ptr;
1188 iue->iu_len = len;
1189 pr_debug("copy_crq: ioba 0x%llx, init_time 0x%llx\n",
1190 be64_to_cpu(crq->IU_data_ptr), cmd->init_time);
1191 break;
1192 case H_PERMISSION:
1193 if (connection_broken(vscsi))
1194 ibmvscsis_post_disconnect(vscsi,
1195 ERR_DISCONNECT_RECONNECT,
1196 (RESPONSE_Q_DOWN |
1197 CLIENT_FAILED));
1198 else
1199 ibmvscsis_post_disconnect(vscsi,
1200 ERR_DISCONNECT_RECONNECT, 0);
1201
1202 dev_err(&vscsi->dev, "copy_crq: h_copy_rdma failed, rc %ld\n",
1203 rc);
1204 break;
1205 case H_DEST_PARM:
1206 case H_SOURCE_PARM:
1207 default:
1208 dev_err(&vscsi->dev, "copy_crq: h_copy_rdma failed, rc %ld\n",
1209 rc);
1210 ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0);
1211 break;
1212 }
1213
1214 return rc;
1215}
1216
1217/**
1218 * ibmvscsis_adapter_info - Service an Adapter Info MAnagement Data gram
1219 * @vscsi: Pointer to our adapter structure
1220 * @iue: Information Unit containing the Adapter Info MAD request
1221 *
1222 * EXECUTION ENVIRONMENT:
1223 * Interrupt adpater lock is held
1224 */
1225static long ibmvscsis_adapter_info(struct scsi_info *vscsi,
1226 struct iu_entry *iue)
1227{
1228 struct viosrp_adapter_info *mad = &vio_iu(iue)->mad.adapter_info;
1229 struct mad_adapter_info_data *info;
1230 uint flag_bits = 0;
1231 dma_addr_t token;
1232 long rc;
1233
1234 mad->common.status = cpu_to_be16(VIOSRP_MAD_SUCCESS);
1235
1236 if (be16_to_cpu(mad->common.length) > sizeof(*info)) {
1237 mad->common.status = cpu_to_be16(VIOSRP_MAD_FAILED);
1238 return 0;
1239 }
1240
1241 info = dma_alloc_coherent(&vscsi->dma_dev->dev, sizeof(*info), &token,
1242 GFP_KERNEL);
1243 if (!info) {
1244 dev_err(&vscsi->dev, "bad dma_alloc_coherent %p\n",
1245 iue->target);
1246 mad->common.status = cpu_to_be16(VIOSRP_MAD_FAILED);
1247 return 0;
1248 }
1249
1250 /* Get remote info */
1251 rc = h_copy_rdma(be16_to_cpu(mad->common.length),
1252 vscsi->dds.window[REMOTE].liobn,
1253 be64_to_cpu(mad->buffer),
1254 vscsi->dds.window[LOCAL].liobn, token);
1255
1256 if (rc != H_SUCCESS) {
1257 if (rc == H_PERMISSION) {
1258 if (connection_broken(vscsi))
1259 flag_bits = (RESPONSE_Q_DOWN | CLIENT_FAILED);
1260 }
1261 pr_warn("adapter_info: h_copy_rdma from client failed, rc %ld\n",
1262 rc);
1263 pr_debug("adapter_info: ioba 0x%llx, flags 0x%x, flag_bits 0x%x\n",
1264 be64_to_cpu(mad->buffer), vscsi->flags, flag_bits);
1265 ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT,
1266 flag_bits);
1267 goto free_dma;
1268 }
1269
1270 /*
1271 * Copy client info, but ignore partition number, which we
1272 * already got from phyp - unless we failed to get it from
1273 * phyp (e.g. if we're running on a p5 system).
1274 */
1275 if (vscsi->client_data.partition_number == 0)
1276 vscsi->client_data.partition_number =
1277 be32_to_cpu(info->partition_number);
1278 strncpy(vscsi->client_data.srp_version, info->srp_version,
1279 sizeof(vscsi->client_data.srp_version));
1280 strncpy(vscsi->client_data.partition_name, info->partition_name,
1281 sizeof(vscsi->client_data.partition_name));
1282 vscsi->client_data.mad_version = be32_to_cpu(info->mad_version);
1283 vscsi->client_data.os_type = be32_to_cpu(info->os_type);
1284
1285 /* Copy our info */
1286 strncpy(info->srp_version, SRP_VERSION,
1287 sizeof(info->srp_version));
1288 strncpy(info->partition_name, vscsi->dds.partition_name,
1289 sizeof(info->partition_name));
1290 info->partition_number = cpu_to_be32(vscsi->dds.partition_num);
1291 info->mad_version = cpu_to_be32(MAD_VERSION_1);
1292 info->os_type = cpu_to_be32(LINUX);
1293 memset(&info->port_max_txu[0], 0, sizeof(info->port_max_txu));
1294 info->port_max_txu[0] = cpu_to_be32(128 * PAGE_SIZE);
1295
1296 dma_wmb();
1297 rc = h_copy_rdma(sizeof(*info), vscsi->dds.window[LOCAL].liobn,
1298 token, vscsi->dds.window[REMOTE].liobn,
1299 be64_to_cpu(mad->buffer));
1300 switch (rc) {
1301 case H_SUCCESS:
1302 break;
1303
1304 case H_SOURCE_PARM:
1305 case H_DEST_PARM:
1306 case H_PERMISSION:
1307 if (connection_broken(vscsi))
1308 flag_bits = (RESPONSE_Q_DOWN | CLIENT_FAILED);
1309 default:
1310 dev_err(&vscsi->dev, "adapter_info: h_copy_rdma to client failed, rc %ld\n",
1311 rc);
1312 ibmvscsis_post_disconnect(vscsi,
1313 ERR_DISCONNECT_RECONNECT,
1314 flag_bits);
1315 break;
1316 }
1317
1318free_dma:
1319 dma_free_coherent(&vscsi->dma_dev->dev, sizeof(*info), info, token);
1320 pr_debug("Leaving adapter_info, rc %ld\n", rc);
1321
1322 return rc;
1323}
1324
1325/**
1326 * ibmvscsis_cap_mad() - Service a Capabilities MAnagement Data gram
1327 * @vscsi: Pointer to our adapter structure
1328 * @iue: Information Unit containing the Capabilities MAD request
1329 *
1330 * NOTE: if you return an error from this routine you must be
1331 * disconnecting or you will cause a hang
1332 *
1333 * EXECUTION ENVIRONMENT:
1334 * Interrupt called with adapter lock held
1335 */
1336static int ibmvscsis_cap_mad(struct scsi_info *vscsi, struct iu_entry *iue)
1337{
1338 struct viosrp_capabilities *mad = &vio_iu(iue)->mad.capabilities;
1339 struct capabilities *cap;
1340 struct mad_capability_common *common;
1341 dma_addr_t token;
1342 u16 olen, len, status, min_len, cap_len;
1343 u32 flag;
1344 uint flag_bits = 0;
1345 long rc = 0;
1346
1347 olen = be16_to_cpu(mad->common.length);
1348 /*
1349 * struct capabilities hardcodes a couple capabilities after the
1350 * header, but the capabilities can actually be in any order.
1351 */
1352 min_len = offsetof(struct capabilities, migration);
1353 if ((olen < min_len) || (olen > PAGE_SIZE)) {
1354 pr_warn("cap_mad: invalid len %d\n", olen);
1355 mad->common.status = cpu_to_be16(VIOSRP_MAD_FAILED);
1356 return 0;
1357 }
1358
1359 cap = dma_alloc_coherent(&vscsi->dma_dev->dev, olen, &token,
1360 GFP_KERNEL);
1361 if (!cap) {
1362 dev_err(&vscsi->dev, "bad dma_alloc_coherent %p\n",
1363 iue->target);
1364 mad->common.status = cpu_to_be16(VIOSRP_MAD_FAILED);
1365 return 0;
1366 }
1367 rc = h_copy_rdma(olen, vscsi->dds.window[REMOTE].liobn,
1368 be64_to_cpu(mad->buffer),
1369 vscsi->dds.window[LOCAL].liobn, token);
1370 if (rc == H_SUCCESS) {
1371 strncpy(cap->name, dev_name(&vscsi->dma_dev->dev),
1372 SRP_MAX_LOC_LEN);
1373
1374 len = olen - min_len;
1375 status = VIOSRP_MAD_SUCCESS;
1376 common = (struct mad_capability_common *)&cap->migration;
1377
1378 while ((len > 0) && (status == VIOSRP_MAD_SUCCESS) && !rc) {
1379 pr_debug("cap_mad: len left %hd, cap type %d, cap len %hd\n",
1380 len, be32_to_cpu(common->cap_type),
1381 be16_to_cpu(common->length));
1382
1383 cap_len = be16_to_cpu(common->length);
1384 if (cap_len > len) {
1385 dev_err(&vscsi->dev, "cap_mad: cap len mismatch with total len\n");
1386 status = VIOSRP_MAD_FAILED;
1387 break;
1388 }
1389
1390 if (cap_len == 0) {
1391 dev_err(&vscsi->dev, "cap_mad: cap len is 0\n");
1392 status = VIOSRP_MAD_FAILED;
1393 break;
1394 }
1395
1396 switch (common->cap_type) {
1397 default:
1398 pr_debug("cap_mad: unsupported capability\n");
1399 common->server_support = 0;
1400 flag = cpu_to_be32((u32)CAP_LIST_SUPPORTED);
1401 cap->flags &= ~flag;
1402 break;
1403 }
1404
1405 len = len - cap_len;
1406 common = (struct mad_capability_common *)
1407 ((char *)common + cap_len);
1408 }
1409
1410 mad->common.status = cpu_to_be16(status);
1411
1412 dma_wmb();
1413 rc = h_copy_rdma(olen, vscsi->dds.window[LOCAL].liobn, token,
1414 vscsi->dds.window[REMOTE].liobn,
1415 be64_to_cpu(mad->buffer));
1416
1417 if (rc != H_SUCCESS) {
1418 pr_debug("cap_mad: failed to copy to client, rc %ld\n",
1419 rc);
1420
1421 if (rc == H_PERMISSION) {
1422 if (connection_broken(vscsi))
1423 flag_bits = (RESPONSE_Q_DOWN |
1424 CLIENT_FAILED);
1425 }
1426
1427 pr_warn("cap_mad: error copying data to client, rc %ld\n",
1428 rc);
1429 ibmvscsis_post_disconnect(vscsi,
1430 ERR_DISCONNECT_RECONNECT,
1431 flag_bits);
1432 }
1433 }
1434
1435 dma_free_coherent(&vscsi->dma_dev->dev, olen, cap, token);
1436
1437 pr_debug("Leaving cap_mad, rc %ld, client_cap 0x%x\n",
1438 rc, vscsi->client_cap);
1439
1440 return rc;
1441}
1442
1443/**
1444 * ibmvscsis_process_mad() - Service a MAnagement Data gram
1445 * @vscsi: Pointer to our adapter structure
1446 * @iue: Information Unit containing the MAD request
1447 *
1448 * Must be called with interrupt lock held.
1449 */
1450static long ibmvscsis_process_mad(struct scsi_info *vscsi, struct iu_entry *iue)
1451{
1452 struct mad_common *mad = (struct mad_common *)&vio_iu(iue)->mad;
1453 struct viosrp_empty_iu *empty;
1454 long rc = ADAPT_SUCCESS;
1455
1456 switch (be32_to_cpu(mad->type)) {
1457 case VIOSRP_EMPTY_IU_TYPE:
1458 empty = &vio_iu(iue)->mad.empty_iu;
1459 vscsi->empty_iu_id = be64_to_cpu(empty->buffer);
1460 vscsi->empty_iu_tag = be64_to_cpu(empty->common.tag);
1461 mad->status = cpu_to_be16(VIOSRP_MAD_SUCCESS);
1462 break;
1463 case VIOSRP_ADAPTER_INFO_TYPE:
1464 rc = ibmvscsis_adapter_info(vscsi, iue);
1465 break;
1466 case VIOSRP_CAPABILITIES_TYPE:
1467 rc = ibmvscsis_cap_mad(vscsi, iue);
1468 break;
1469 case VIOSRP_ENABLE_FAST_FAIL:
1470 if (vscsi->state == CONNECTED) {
1471 vscsi->fast_fail = true;
1472 mad->status = cpu_to_be16(VIOSRP_MAD_SUCCESS);
1473 } else {
1474 pr_warn("fast fail mad sent after login\n");
1475 mad->status = cpu_to_be16(VIOSRP_MAD_FAILED);
1476 }
1477 break;
1478 default:
1479 mad->status = cpu_to_be16(VIOSRP_MAD_NOT_SUPPORTED);
1480 break;
1481 }
1482
1483 return rc;
1484}
1485
1486/**
1487 * srp_snd_msg_failed() - Handle an error when sending a response
1488 * @vscsi: Pointer to our adapter structure
1489 * @rc: The return code from the h_send_crq command
1490 *
1491 * Must be called with interrupt lock held.
1492 */
1493static void srp_snd_msg_failed(struct scsi_info *vscsi, long rc)
1494{
1495 ktime_t kt;
1496
1497 if (rc != H_DROPPED) {
1498 ibmvscsis_free_cmd_qs(vscsi);
1499
1500 if (rc == H_CLOSED)
1501 vscsi->flags |= CLIENT_FAILED;
1502
1503 /* don't flag the same problem multiple times */
1504 if (!(vscsi->flags & RESPONSE_Q_DOWN)) {
1505 vscsi->flags |= RESPONSE_Q_DOWN;
1506 if (!(vscsi->state & (ERR_DISCONNECT |
1507 ERR_DISCONNECT_RECONNECT |
1508 ERR_DISCONNECTED | UNDEFINED))) {
1509 dev_err(&vscsi->dev, "snd_msg_failed: setting RESPONSE_Q_DOWN, state 0x%hx, flags 0x%x, rc %ld\n",
1510 vscsi->state, vscsi->flags, rc);
1511 }
1512 ibmvscsis_post_disconnect(vscsi,
1513 ERR_DISCONNECT_RECONNECT, 0);
1514 }
1515 return;
1516 }
1517
1518 /*
1519 * The response queue is full.
1520 * If the server is processing SRP requests, i.e.
1521 * the client has successfully done an
1522 * SRP_LOGIN, then it will wait forever for room in
1523 * the queue. However if the system admin
1524 * is attempting to unconfigure the server then one
1525 * or more children will be in a state where
1526 * they are being removed. So if there is even one
1527 * child being removed then the driver assumes
1528 * the system admin is attempting to break the
1529 * connection with the client and MAX_TIMER_POPS
1530 * is honored.
1531 */
1532 if ((vscsi->rsp_q_timer.timer_pops < MAX_TIMER_POPS) ||
1533 (vscsi->state == SRP_PROCESSING)) {
1534 pr_debug("snd_msg_failed: response queue full, flags 0x%x, timer started %d, pops %d\n",
1535 vscsi->flags, (int)vscsi->rsp_q_timer.started,
1536 vscsi->rsp_q_timer.timer_pops);
1537
1538 /*
1539 * Check if the timer is running; if it
1540 * is not then start it up.
1541 */
1542 if (!vscsi->rsp_q_timer.started) {
1543 if (vscsi->rsp_q_timer.timer_pops <
1544 MAX_TIMER_POPS) {
1545 kt = ktime_set(0, WAIT_NANO_SECONDS);
1546 } else {
1547 /*
1548 * slide the timeslice if the maximum
1549 * timer pops have already happened
1550 */
1551 kt = ktime_set(WAIT_SECONDS, 0);
1552 }
1553
1554 vscsi->rsp_q_timer.started = true;
1555 hrtimer_start(&vscsi->rsp_q_timer.timer, kt,
1556 HRTIMER_MODE_REL);
1557 }
1558 } else {
1559 /*
1560 * TBD: Do we need to worry about this? Need to get
1561 * remove working.
1562 */
1563 /*
1564 * waited a long time and it appears the system admin
1565 * is bring this driver down
1566 */
1567 vscsi->flags |= RESPONSE_Q_DOWN;
1568 ibmvscsis_free_cmd_qs(vscsi);
1569 /*
1570 * if the driver is already attempting to disconnect
1571 * from the client and has already logged an error
1572 * trace this event but don't put it in the error log
1573 */
1574 if (!(vscsi->state & (ERR_DISCONNECT |
1575 ERR_DISCONNECT_RECONNECT |
1576 ERR_DISCONNECTED | UNDEFINED))) {
1577 dev_err(&vscsi->dev, "client crq full too long\n");
1578 ibmvscsis_post_disconnect(vscsi,
1579 ERR_DISCONNECT_RECONNECT,
1580 0);
1581 }
1582 }
1583}
1584
1585/**
1586 * ibmvscsis_send_messages() - Send a Response
1587 * @vscsi: Pointer to our adapter structure
1588 *
1589 * Send a response, first checking the waiting queue. Responses are
1590 * sent in order they are received. If the response cannot be sent,
1591 * because the client queue is full, it stays on the waiting queue.
1592 *
1593 * PRECONDITION:
1594 * Called with interrupt lock held
1595 */
1596static void ibmvscsis_send_messages(struct scsi_info *vscsi)
1597{
1598 u64 msg_hi = 0;
1599 /* note do not attmempt to access the IU_data_ptr with this pointer
1600 * it is not valid
1601 */
1602 struct viosrp_crq *crq = (struct viosrp_crq *)&msg_hi;
1603 struct ibmvscsis_cmd *cmd, *nxt;
1604 struct iu_entry *iue;
1605 long rc = ADAPT_SUCCESS;
1606
1607 if (!(vscsi->flags & RESPONSE_Q_DOWN)) {
1608 list_for_each_entry_safe(cmd, nxt, &vscsi->waiting_rsp, list) {
1609 pr_debug("send_messages cmd %p\n", cmd);
1610
1611 iue = cmd->iue;
1612
1613 crq->valid = VALID_CMD_RESP_EL;
1614 crq->format = cmd->rsp.format;
1615
1616 if (cmd->flags & CMD_FAST_FAIL)
1617 crq->status = VIOSRP_ADAPTER_FAIL;
1618
1619 crq->IU_length = cpu_to_be16(cmd->rsp.len);
1620
1621 rc = h_send_crq(vscsi->dma_dev->unit_address,
1622 be64_to_cpu(msg_hi),
1623 be64_to_cpu(cmd->rsp.tag));
1624
1625 pr_debug("send_messages: tag 0x%llx, rc %ld\n",
1626 be64_to_cpu(cmd->rsp.tag), rc);
1627
1628 /* if all ok free up the command element resources */
1629 if (rc == H_SUCCESS) {
1630 /* some movement has occurred */
1631 vscsi->rsp_q_timer.timer_pops = 0;
1632 list_del(&cmd->list);
1633
1634 ibmvscsis_free_cmd_resources(vscsi, cmd);
1635 } else {
1636 srp_snd_msg_failed(vscsi, rc);
1637 break;
1638 }
1639 }
1640
1641 if (!rc) {
1642 /*
1643 * The timer could pop with the queue empty. If
1644 * this happens, rc will always indicate a
1645 * success; clear the pop count.
1646 */
1647 vscsi->rsp_q_timer.timer_pops = 0;
1648 }
1649 } else {
1650 ibmvscsis_free_cmd_qs(vscsi);
1651 }
1652}
1653
1654/* Called with intr lock held */
1655static void ibmvscsis_send_mad_resp(struct scsi_info *vscsi,
1656 struct ibmvscsis_cmd *cmd,
1657 struct viosrp_crq *crq)
1658{
1659 struct iu_entry *iue = cmd->iue;
1660 struct mad_common *mad = (struct mad_common *)&vio_iu(iue)->mad;
1661 uint flag_bits = 0;
1662 long rc;
1663
1664 dma_wmb();
1665 rc = h_copy_rdma(sizeof(struct mad_common),
1666 vscsi->dds.window[LOCAL].liobn, iue->sbuf->dma,
1667 vscsi->dds.window[REMOTE].liobn,
1668 be64_to_cpu(crq->IU_data_ptr));
1669 if (!rc) {
1670 cmd->rsp.format = VIOSRP_MAD_FORMAT;
1671 cmd->rsp.len = sizeof(struct mad_common);
1672 cmd->rsp.tag = mad->tag;
1673 list_add_tail(&cmd->list, &vscsi->waiting_rsp);
1674 ibmvscsis_send_messages(vscsi);
1675 } else {
1676 pr_debug("Error sending mad response, rc %ld\n", rc);
1677 if (rc == H_PERMISSION) {
1678 if (connection_broken(vscsi))
1679 flag_bits = (RESPONSE_Q_DOWN | CLIENT_FAILED);
1680 }
1681 dev_err(&vscsi->dev, "mad: failed to copy to client, rc %ld\n",
1682 rc);
1683
1684 ibmvscsis_free_cmd_resources(vscsi, cmd);
1685 ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT,
1686 flag_bits);
1687 }
1688}
1689
1690/**
1691 * ibmvscsis_mad() - Service a MAnagement Data gram.
1692 * @vscsi: Pointer to our adapter structure
1693 * @crq: Pointer to the CRQ entry containing the MAD request
1694 *
1695 * EXECUTION ENVIRONMENT:
1696 * Interrupt called with adapter lock held
1697 */
1698static long ibmvscsis_mad(struct scsi_info *vscsi, struct viosrp_crq *crq)
1699{
1700 struct iu_entry *iue;
1701 struct ibmvscsis_cmd *cmd;
1702 struct mad_common *mad;
1703 long rc = ADAPT_SUCCESS;
1704
1705 switch (vscsi->state) {
1706 /*
1707 * We have not exchanged Init Msgs yet, so this MAD was sent
1708 * before the last Transport Event; client will not be
1709 * expecting a response.
1710 */
1711 case WAIT_CONNECTION:
1712 pr_debug("mad: in Wait Connection state, ignoring MAD, flags %d\n",
1713 vscsi->flags);
1714 return ADAPT_SUCCESS;
1715
1716 case SRP_PROCESSING:
1717 case CONNECTED:
1718 break;
1719
1720 /*
1721 * We should never get here while we're in these states.
1722 * Just log an error and get out.
1723 */
1724 case UNCONFIGURING:
1725 case WAIT_IDLE:
1726 case ERR_DISCONNECT:
1727 case ERR_DISCONNECT_RECONNECT:
1728 default:
1729 dev_err(&vscsi->dev, "mad: invalid adapter state %d for mad\n",
1730 vscsi->state);
1731 return ADAPT_SUCCESS;
1732 }
1733
1734 cmd = ibmvscsis_get_free_cmd(vscsi);
1735 if (!cmd) {
1736 dev_err(&vscsi->dev, "mad: failed to get cmd, debit %d\n",
1737 vscsi->debit);
1738 ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0);
1739 return ERROR;
1740 }
1741 iue = cmd->iue;
1742 cmd->type = ADAPTER_MAD;
1743
1744 rc = ibmvscsis_copy_crq_packet(vscsi, cmd, crq);
1745 if (!rc) {
1746 mad = (struct mad_common *)&vio_iu(iue)->mad;
1747
1748 pr_debug("mad: type %d\n", be32_to_cpu(mad->type));
1749
1750 if (be16_to_cpu(mad->length) < 0) {
1751 dev_err(&vscsi->dev, "mad: length is < 0\n");
1752 ibmvscsis_post_disconnect(vscsi,
1753 ERR_DISCONNECT_RECONNECT, 0);
1754 rc = SRP_VIOLATION;
1755 } else {
1756 rc = ibmvscsis_process_mad(vscsi, iue);
1757 }
1758
1759 pr_debug("mad: status %hd, rc %ld\n", be16_to_cpu(mad->status),
1760 rc);
1761
1762 if (!rc)
1763 ibmvscsis_send_mad_resp(vscsi, cmd, crq);
1764 } else {
1765 ibmvscsis_free_cmd_resources(vscsi, cmd);
1766 }
1767
1768 pr_debug("Leaving mad, rc %ld\n", rc);
1769 return rc;
1770}
1771
1772/**
1773 * ibmvscsis_login_rsp() - Create/copy a login response notice to the client
1774 * @vscsi: Pointer to our adapter structure
1775 * @cmd: Pointer to the command for the SRP Login request
1776 *
1777 * EXECUTION ENVIRONMENT:
1778 * Interrupt, interrupt lock held
1779 */
1780static long ibmvscsis_login_rsp(struct scsi_info *vscsi,
1781 struct ibmvscsis_cmd *cmd)
1782{
1783 struct iu_entry *iue = cmd->iue;
1784 struct srp_login_rsp *rsp = &vio_iu(iue)->srp.login_rsp;
1785 struct format_code *fmt;
1786 uint flag_bits = 0;
1787 long rc = ADAPT_SUCCESS;
1788
1789 memset(rsp, 0, sizeof(struct srp_login_rsp));
1790
1791 rsp->opcode = SRP_LOGIN_RSP;
1792 rsp->req_lim_delta = cpu_to_be32(vscsi->request_limit);
1793 rsp->tag = cmd->rsp.tag;
1794 rsp->max_it_iu_len = cpu_to_be32(SRP_MAX_IU_LEN);
1795 rsp->max_ti_iu_len = cpu_to_be32(SRP_MAX_IU_LEN);
1796 fmt = (struct format_code *)&rsp->buf_fmt;
1797 fmt->buffers = SUPPORTED_FORMATS;
1798 vscsi->credit = 0;
1799
1800 cmd->rsp.len = sizeof(struct srp_login_rsp);
1801
1802 dma_wmb();
1803 rc = h_copy_rdma(cmd->rsp.len, vscsi->dds.window[LOCAL].liobn,
1804 iue->sbuf->dma, vscsi->dds.window[REMOTE].liobn,
1805 be64_to_cpu(iue->remote_token));
1806
1807 switch (rc) {
1808 case H_SUCCESS:
1809 break;
1810
1811 case H_PERMISSION:
1812 if (connection_broken(vscsi))
1813 flag_bits = RESPONSE_Q_DOWN | CLIENT_FAILED;
1814 dev_err(&vscsi->dev, "login_rsp: error copying to client, rc %ld\n",
1815 rc);
1816 ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT,
1817 flag_bits);
1818 break;
1819 case H_SOURCE_PARM:
1820 case H_DEST_PARM:
1821 default:
1822 dev_err(&vscsi->dev, "login_rsp: error copying to client, rc %ld\n",
1823 rc);
1824 ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0);
1825 break;
1826 }
1827
1828 return rc;
1829}
1830
1831/**
1832 * ibmvscsis_srp_login_rej() - Create/copy a login rejection notice to client
1833 * @vscsi: Pointer to our adapter structure
1834 * @cmd: Pointer to the command for the SRP Login request
1835 * @reason: The reason the SRP Login is being rejected, per SRP protocol
1836 *
1837 * EXECUTION ENVIRONMENT:
1838 * Interrupt, interrupt lock held
1839 */
1840static long ibmvscsis_srp_login_rej(struct scsi_info *vscsi,
1841 struct ibmvscsis_cmd *cmd, u32 reason)
1842{
1843 struct iu_entry *iue = cmd->iue;
1844 struct srp_login_rej *rej = &vio_iu(iue)->srp.login_rej;
1845 struct format_code *fmt;
1846 uint flag_bits = 0;
1847 long rc = ADAPT_SUCCESS;
1848
1849 memset(rej, 0, sizeof(*rej));
1850
1851 rej->opcode = SRP_LOGIN_REJ;
1852 rej->reason = cpu_to_be32(reason);
1853 rej->tag = cmd->rsp.tag;
1854 fmt = (struct format_code *)&rej->buf_fmt;
1855 fmt->buffers = SUPPORTED_FORMATS;
1856
1857 cmd->rsp.len = sizeof(*rej);
1858
1859 dma_wmb();
1860 rc = h_copy_rdma(cmd->rsp.len, vscsi->dds.window[LOCAL].liobn,
1861 iue->sbuf->dma, vscsi->dds.window[REMOTE].liobn,
1862 be64_to_cpu(iue->remote_token));
1863
1864 switch (rc) {
1865 case H_SUCCESS:
1866 break;
1867 case H_PERMISSION:
1868 if (connection_broken(vscsi))
1869 flag_bits = RESPONSE_Q_DOWN | CLIENT_FAILED;
1870 dev_err(&vscsi->dev, "login_rej: error copying to client, rc %ld\n",
1871 rc);
1872 ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT,
1873 flag_bits);
1874 break;
1875 case H_SOURCE_PARM:
1876 case H_DEST_PARM:
1877 default:
1878 dev_err(&vscsi->dev, "login_rej: error copying to client, rc %ld\n",
1879 rc);
1880 ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0);
1881 break;
1882 }
1883
1884 return rc;
1885}
1886
1887static int ibmvscsis_make_nexus(struct ibmvscsis_tport *tport)
1888{
1889 char *name = tport->tport_name;
1890 struct ibmvscsis_nexus *nexus;
1891 int rc;
1892
1893 if (tport->ibmv_nexus) {
1894 pr_debug("tport->ibmv_nexus already exists\n");
1895 return 0;
1896 }
1897
1898 nexus = kzalloc(sizeof(*nexus), GFP_KERNEL);
1899 if (!nexus) {
1900 pr_err("Unable to allocate struct ibmvscsis_nexus\n");
1901 return -ENOMEM;
1902 }
1903
1904 nexus->se_sess = target_alloc_session(&tport->se_tpg, 0, 0,
1905 TARGET_PROT_NORMAL, name, nexus,
1906 NULL);
1907 if (IS_ERR(nexus->se_sess)) {
1908 rc = PTR_ERR(nexus->se_sess);
1909 goto transport_init_fail;
1910 }
1911
1912 tport->ibmv_nexus = nexus;
1913
1914 return 0;
1915
1916transport_init_fail:
1917 kfree(nexus);
1918 return rc;
1919}
1920
1921static int ibmvscsis_drop_nexus(struct ibmvscsis_tport *tport)
1922{
1923 struct se_session *se_sess;
1924 struct ibmvscsis_nexus *nexus;
1925
1926 nexus = tport->ibmv_nexus;
1927 if (!nexus)
1928 return -ENODEV;
1929
1930 se_sess = nexus->se_sess;
1931 if (!se_sess)
1932 return -ENODEV;
1933
1934 /*
1935 * Release the SCSI I_T Nexus to the emulated ibmvscsis Target Port
1936 */
1937 transport_deregister_session(se_sess);
1938 tport->ibmv_nexus = NULL;
1939 kfree(nexus);
1940
1941 return 0;
1942}
1943
1944/**
1945 * ibmvscsis_srp_login() - Process an SRP Login Request
1946 * @vscsi: Pointer to our adapter structure
1947 * @cmd: Command element to use to process the SRP Login request
1948 * @crq: Pointer to CRQ entry containing the SRP Login request
1949 *
1950 * EXECUTION ENVIRONMENT:
1951 * Interrupt, called with interrupt lock held
1952 */
1953static long ibmvscsis_srp_login(struct scsi_info *vscsi,
1954 struct ibmvscsis_cmd *cmd,
1955 struct viosrp_crq *crq)
1956{
1957 struct iu_entry *iue = cmd->iue;
1958 struct srp_login_req *req = &vio_iu(iue)->srp.login_req;
1959 struct port_id {
1960 __be64 id_extension;
1961 __be64 io_guid;
1962 } *iport, *tport;
1963 struct format_code *fmt;
1964 u32 reason = 0x0;
1965 long rc = ADAPT_SUCCESS;
1966
1967 iport = (struct port_id *)req->initiator_port_id;
1968 tport = (struct port_id *)req->target_port_id;
1969 fmt = (struct format_code *)&req->req_buf_fmt;
1970 if (be32_to_cpu(req->req_it_iu_len) > SRP_MAX_IU_LEN)
1971 reason = SRP_LOGIN_REJ_REQ_IT_IU_LENGTH_TOO_LARGE;
1972 else if (be32_to_cpu(req->req_it_iu_len) < 64)
1973 reason = SRP_LOGIN_REJ_UNABLE_ESTABLISH_CHANNEL;
1974 else if ((be64_to_cpu(iport->id_extension) > (MAX_NUM_PORTS - 1)) ||
1975 (be64_to_cpu(tport->id_extension) > (MAX_NUM_PORTS - 1)))
1976 reason = SRP_LOGIN_REJ_UNABLE_ASSOCIATE_CHANNEL;
1977 else if (req->req_flags & SRP_MULTICHAN_MULTI)
1978 reason = SRP_LOGIN_REJ_MULTI_CHANNEL_UNSUPPORTED;
1979 else if (fmt->buffers & (~SUPPORTED_FORMATS))
1980 reason = SRP_LOGIN_REJ_UNSUPPORTED_DESCRIPTOR_FMT;
1981 else if ((fmt->buffers | SUPPORTED_FORMATS) == 0)
1982 reason = SRP_LOGIN_REJ_UNSUPPORTED_DESCRIPTOR_FMT;
1983
1984 if (vscsi->state == SRP_PROCESSING)
1985 reason = SRP_LOGIN_REJ_CHANNEL_LIMIT_REACHED;
1986
1987 rc = ibmvscsis_make_nexus(&vscsi->tport);
1988 if (rc)
1989 reason = SRP_LOGIN_REJ_UNABLE_ESTABLISH_CHANNEL;
1990
1991 cmd->rsp.format = VIOSRP_SRP_FORMAT;
1992 cmd->rsp.tag = req->tag;
1993
1994 pr_debug("srp_login: reason 0x%x\n", reason);
1995
1996 if (reason)
1997 rc = ibmvscsis_srp_login_rej(vscsi, cmd, reason);
1998 else
1999 rc = ibmvscsis_login_rsp(vscsi, cmd);
2000
2001 if (!rc) {
2002 if (!reason)
2003 vscsi->state = SRP_PROCESSING;
2004
2005 list_add_tail(&cmd->list, &vscsi->waiting_rsp);
2006 ibmvscsis_send_messages(vscsi);
2007 } else {
2008 ibmvscsis_free_cmd_resources(vscsi, cmd);
2009 }
2010
2011 pr_debug("Leaving srp_login, rc %ld\n", rc);
2012 return rc;
2013}
2014
2015/**
2016 * ibmvscsis_srp_i_logout() - Helper Function to close I_T Nexus
2017 * @vscsi: Pointer to our adapter structure
2018 * @cmd: Command element to use to process the Implicit Logout request
2019 * @crq: Pointer to CRQ entry containing the Implicit Logout request
2020 *
2021 * Do the logic to close the I_T nexus. This function may not
2022 * behave to specification.
2023 *
2024 * EXECUTION ENVIRONMENT:
2025 * Interrupt, interrupt lock held
2026 */
2027static long ibmvscsis_srp_i_logout(struct scsi_info *vscsi,
2028 struct ibmvscsis_cmd *cmd,
2029 struct viosrp_crq *crq)
2030{
2031 struct iu_entry *iue = cmd->iue;
2032 struct srp_i_logout *log_out = &vio_iu(iue)->srp.i_logout;
2033 long rc = ADAPT_SUCCESS;
2034
2035 if ((vscsi->debit > 0) || !list_empty(&vscsi->schedule_q) ||
2036 !list_empty(&vscsi->waiting_rsp)) {
2037 dev_err(&vscsi->dev, "i_logout: outstanding work\n");
2038 ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT, 0);
2039 } else {
2040 cmd->rsp.format = SRP_FORMAT;
2041 cmd->rsp.tag = log_out->tag;
2042 cmd->rsp.len = sizeof(struct mad_common);
2043 list_add_tail(&cmd->list, &vscsi->waiting_rsp);
2044 ibmvscsis_send_messages(vscsi);
2045
2046 ibmvscsis_post_disconnect(vscsi, WAIT_IDLE, 0);
2047 }
2048
2049 return rc;
2050}
2051
2052/* Called with intr lock held */
2053static void ibmvscsis_srp_cmd(struct scsi_info *vscsi, struct viosrp_crq *crq)
2054{
2055 struct ibmvscsis_cmd *cmd;
2056 struct iu_entry *iue;
2057 struct srp_cmd *srp;
2058 struct srp_tsk_mgmt *tsk;
2059 long rc;
2060
2061 if (vscsi->request_limit - vscsi->debit <= 0) {
2062 /* Client has exceeded request limit */
2063 dev_err(&vscsi->dev, "Client exceeded the request limit (%d), debit %d\n",
2064 vscsi->request_limit, vscsi->debit);
2065 ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0);
2066 return;
2067 }
2068
2069 cmd = ibmvscsis_get_free_cmd(vscsi);
2070 if (!cmd) {
2071 dev_err(&vscsi->dev, "srp_cmd failed to get cmd, debit %d\n",
2072 vscsi->debit);
2073 ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0);
2074 return;
2075 }
2076 iue = cmd->iue;
2077 srp = &vio_iu(iue)->srp.cmd;
2078
2079 rc = ibmvscsis_copy_crq_packet(vscsi, cmd, crq);
2080 if (rc) {
2081 ibmvscsis_free_cmd_resources(vscsi, cmd);
2082 return;
2083 }
2084
2085 if (vscsi->state == SRP_PROCESSING) {
2086 switch (srp->opcode) {
2087 case SRP_LOGIN_REQ:
2088 rc = ibmvscsis_srp_login(vscsi, cmd, crq);
2089 break;
2090
2091 case SRP_TSK_MGMT:
2092 tsk = &vio_iu(iue)->srp.tsk_mgmt;
2093 pr_debug("tsk_mgmt tag: %llu (0x%llx)\n", tsk->tag,
2094 tsk->tag);
2095 cmd->rsp.tag = tsk->tag;
2096 vscsi->debit += 1;
2097 cmd->type = TASK_MANAGEMENT;
2098 list_add_tail(&cmd->list, &vscsi->schedule_q);
2099 queue_work(vscsi->work_q, &cmd->work);
2100 break;
2101
2102 case SRP_CMD:
2103 pr_debug("srp_cmd tag: %llu (0x%llx)\n", srp->tag,
2104 srp->tag);
2105 cmd->rsp.tag = srp->tag;
2106 vscsi->debit += 1;
2107 cmd->type = SCSI_CDB;
2108 /*
2109 * We want to keep track of work waiting for
2110 * the workqueue.
2111 */
2112 list_add_tail(&cmd->list, &vscsi->schedule_q);
2113 queue_work(vscsi->work_q, &cmd->work);
2114 break;
2115
2116 case SRP_I_LOGOUT:
2117 rc = ibmvscsis_srp_i_logout(vscsi, cmd, crq);
2118 break;
2119
2120 case SRP_CRED_RSP:
2121 case SRP_AER_RSP:
2122 default:
2123 ibmvscsis_free_cmd_resources(vscsi, cmd);
2124 dev_err(&vscsi->dev, "invalid srp cmd, opcode %d\n",
2125 (uint)srp->opcode);
2126 ibmvscsis_post_disconnect(vscsi,
2127 ERR_DISCONNECT_RECONNECT, 0);
2128 break;
2129 }
2130 } else if (srp->opcode == SRP_LOGIN_REQ && vscsi->state == CONNECTED) {
2131 rc = ibmvscsis_srp_login(vscsi, cmd, crq);
2132 } else {
2133 ibmvscsis_free_cmd_resources(vscsi, cmd);
2134 dev_err(&vscsi->dev, "Invalid state %d to handle srp cmd\n",
2135 vscsi->state);
2136 ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0);
2137 }
2138}
2139
2140/**
2141 * ibmvscsis_ping_response() - Respond to a ping request
2142 * @vscsi: Pointer to our adapter structure
2143 *
2144 * Let the client know that the server is alive and waiting on
2145 * its native I/O stack.
2146 * If any type of error occurs from the call to queue a ping
2147 * response then the client is either not accepting or receiving
2148 * interrupts. Disconnect with an error.
2149 *
2150 * EXECUTION ENVIRONMENT:
2151 * Interrupt, interrupt lock held
2152 */
2153static long ibmvscsis_ping_response(struct scsi_info *vscsi)
2154{
2155 struct viosrp_crq *crq;
2156 u64 buffer[2] = { 0, 0 };
2157 long rc;
2158
2159 crq = (struct viosrp_crq *)&buffer;
2160 crq->valid = VALID_CMD_RESP_EL;
2161 crq->format = (u8)MESSAGE_IN_CRQ;
2162 crq->status = PING_RESPONSE;
2163
2164 rc = h_send_crq(vscsi->dds.unit_id, cpu_to_be64(buffer[MSG_HI]),
2165 cpu_to_be64(buffer[MSG_LOW]));
2166
2167 switch (rc) {
2168 case H_SUCCESS:
2169 break;
2170 case H_CLOSED:
2171 vscsi->flags |= CLIENT_FAILED;
2172 case H_DROPPED:
2173 vscsi->flags |= RESPONSE_Q_DOWN;
2174 case H_REMOTE_PARM:
2175 dev_err(&vscsi->dev, "ping_response: h_send_crq failed, rc %ld\n",
2176 rc);
2177 ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0);
2178 break;
2179 default:
2180 dev_err(&vscsi->dev, "ping_response: h_send_crq returned unknown rc %ld\n",
2181 rc);
2182 ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT, 0);
2183 break;
2184 }
2185
2186 return rc;
2187}
2188
2189/**
2190 * ibmvscsis_handle_init_compl_msg() - Respond to an Init Complete Message
2191 * @vscsi: Pointer to our adapter structure
2192 *
2193 * Must be called with interrupt lock held.
2194 */
2195static long ibmvscsis_handle_init_compl_msg(struct scsi_info *vscsi)
2196{
2197 long rc = ADAPT_SUCCESS;
2198
2199 switch (vscsi->state) {
2200 case NO_QUEUE:
2201 case ERR_DISCONNECT:
2202 case ERR_DISCONNECT_RECONNECT:
2203 case ERR_DISCONNECTED:
2204 case UNCONFIGURING:
2205 case UNDEFINED:
2206 rc = ERROR;
2207 break;
2208
2209 case WAIT_CONNECTION:
2210 vscsi->state = CONNECTED;
2211 break;
2212
2213 case WAIT_IDLE:
2214 case SRP_PROCESSING:
2215 case CONNECTED:
2216 case WAIT_ENABLED:
2217 case PART_UP_WAIT_ENAB:
2218 default:
2219 rc = ERROR;
2220 dev_err(&vscsi->dev, "init_msg: invalid state %d to get init compl msg\n",
2221 vscsi->state);
2222 ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0);
2223 break;
2224 }
2225
2226 return rc;
2227}
2228
2229/**
2230 * ibmvscsis_handle_init_msg() - Respond to an Init Message
2231 * @vscsi: Pointer to our adapter structure
2232 *
2233 * Must be called with interrupt lock held.
2234 */
2235static long ibmvscsis_handle_init_msg(struct scsi_info *vscsi)
2236{
2237 long rc = ADAPT_SUCCESS;
2238
2239 switch (vscsi->state) {
2240 case WAIT_ENABLED:
2241 vscsi->state = PART_UP_WAIT_ENAB;
2242 break;
2243
2244 case WAIT_CONNECTION:
2245 rc = ibmvscsis_send_init_message(vscsi, INIT_COMPLETE_MSG);
2246 switch (rc) {
2247 case H_SUCCESS:
2248 vscsi->state = CONNECTED;
2249 break;
2250
2251 case H_PARAMETER:
2252 dev_err(&vscsi->dev, "init_msg: failed to send, rc %ld\n",
2253 rc);
2254 ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT, 0);
2255 break;
2256
2257 case H_DROPPED:
2258 dev_err(&vscsi->dev, "init_msg: failed to send, rc %ld\n",
2259 rc);
2260 rc = ERROR;
2261 ibmvscsis_post_disconnect(vscsi,
2262 ERR_DISCONNECT_RECONNECT, 0);
2263 break;
2264
2265 case H_CLOSED:
2266 pr_warn("init_msg: failed to send, rc %ld\n", rc);
2267 rc = 0;
2268 break;
2269 }
2270 break;
2271
2272 case UNDEFINED:
2273 rc = ERROR;
2274 break;
2275
2276 case UNCONFIGURING:
2277 break;
2278
2279 case PART_UP_WAIT_ENAB:
2280 case CONNECTED:
2281 case SRP_PROCESSING:
2282 case WAIT_IDLE:
2283 case NO_QUEUE:
2284 case ERR_DISCONNECT:
2285 case ERR_DISCONNECT_RECONNECT:
2286 case ERR_DISCONNECTED:
2287 default:
2288 rc = ERROR;
2289 dev_err(&vscsi->dev, "init_msg: invalid state %d to get init msg\n",
2290 vscsi->state);
2291 ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0);
2292 break;
2293 }
2294
2295 return rc;
2296}
2297
2298/**
2299 * ibmvscsis_init_msg() - Respond to an init message
2300 * @vscsi: Pointer to our adapter structure
2301 * @crq: Pointer to CRQ element containing the Init Message
2302 *
2303 * EXECUTION ENVIRONMENT:
2304 * Interrupt, interrupt lock held
2305 */
2306static long ibmvscsis_init_msg(struct scsi_info *vscsi, struct viosrp_crq *crq)
2307{
2308 long rc = ADAPT_SUCCESS;
2309
2310 pr_debug("init_msg: state 0x%hx\n", vscsi->state);
2311
2312 rc = h_vioctl(vscsi->dds.unit_id, H_GET_PARTNER_INFO,
2313 (u64)vscsi->map_ioba | ((u64)PAGE_SIZE << 32), 0, 0, 0,
2314 0);
2315 if (rc == H_SUCCESS) {
2316 vscsi->client_data.partition_number =
2317 be64_to_cpu(*(u64 *)vscsi->map_buf);
2318 pr_debug("init_msg, part num %d\n",
2319 vscsi->client_data.partition_number);
2320 } else {
2321 pr_debug("init_msg h_vioctl rc %ld\n", rc);
2322 rc = ADAPT_SUCCESS;
2323 }
2324
2325 if (crq->format == INIT_MSG) {
2326 rc = ibmvscsis_handle_init_msg(vscsi);
2327 } else if (crq->format == INIT_COMPLETE_MSG) {
2328 rc = ibmvscsis_handle_init_compl_msg(vscsi);
2329 } else {
2330 rc = ERROR;
2331 dev_err(&vscsi->dev, "init_msg: invalid format %d\n",
2332 (uint)crq->format);
2333 ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0);
2334 }
2335
2336 return rc;
2337}
2338
2339/**
2340 * ibmvscsis_parse_command() - Parse an element taken from the cmd rsp queue.
2341 * @vscsi: Pointer to our adapter structure
2342 * @crq: Pointer to CRQ element containing the SRP request
2343 *
2344 * This function will return success if the command queue element is valid
2345 * and the srp iu or MAD request it pointed to was also valid. That does
2346 * not mean that an error was not returned to the client.
2347 *
2348 * EXECUTION ENVIRONMENT:
2349 * Interrupt, intr lock held
2350 */
2351static long ibmvscsis_parse_command(struct scsi_info *vscsi,
2352 struct viosrp_crq *crq)
2353{
2354 long rc = ADAPT_SUCCESS;
2355
2356 switch (crq->valid) {
2357 case VALID_CMD_RESP_EL:
2358 switch (crq->format) {
2359 case OS400_FORMAT:
2360 case AIX_FORMAT:
2361 case LINUX_FORMAT:
2362 case MAD_FORMAT:
2363 if (vscsi->flags & PROCESSING_MAD) {
2364 rc = ERROR;
2365 dev_err(&vscsi->dev, "parse_command: already processing mad\n");
2366 ibmvscsis_post_disconnect(vscsi,
2367 ERR_DISCONNECT_RECONNECT,
2368 0);
2369 } else {
2370 vscsi->flags |= PROCESSING_MAD;
2371 rc = ibmvscsis_mad(vscsi, crq);
2372 }
2373 break;
2374
2375 case SRP_FORMAT:
2376 ibmvscsis_srp_cmd(vscsi, crq);
2377 break;
2378
2379 case MESSAGE_IN_CRQ:
2380 if (crq->status == PING)
2381 ibmvscsis_ping_response(vscsi);
2382 break;
2383
2384 default:
2385 dev_err(&vscsi->dev, "parse_command: invalid format %d\n",
2386 (uint)crq->format);
2387 ibmvscsis_post_disconnect(vscsi,
2388 ERR_DISCONNECT_RECONNECT, 0);
2389 break;
2390 }
2391 break;
2392
2393 case VALID_TRANS_EVENT:
2394 rc = ibmvscsis_trans_event(vscsi, crq);
2395 break;
2396
2397 case VALID_INIT_MSG:
2398 rc = ibmvscsis_init_msg(vscsi, crq);
2399 break;
2400
2401 default:
2402 dev_err(&vscsi->dev, "parse_command: invalid valid field %d\n",
2403 (uint)crq->valid);
2404 ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0);
2405 break;
2406 }
2407
2408 /*
2409 * Return only what the interrupt handler cares
2410 * about. Most errors we keep right on trucking.
2411 */
2412 rc = vscsi->flags & SCHEDULE_DISCONNECT;
2413
2414 return rc;
2415}
2416
2417static int read_dma_window(struct scsi_info *vscsi)
2418{
2419 struct vio_dev *vdev = vscsi->dma_dev;
2420 const __be32 *dma_window;
2421 const __be32 *prop;
2422
2423 /* TODO Using of_parse_dma_window would be better, but it doesn't give
2424 * a way to read multiple windows without already knowing the size of
2425 * a window or the number of windows.
2426 */
2427 dma_window = (const __be32 *)vio_get_attribute(vdev,
2428 "ibm,my-dma-window",
2429 NULL);
2430 if (!dma_window) {
2431 pr_err("Couldn't find ibm,my-dma-window property\n");
2432 return -1;
2433 }
2434
2435 vscsi->dds.window[LOCAL].liobn = be32_to_cpu(*dma_window);
2436 dma_window++;
2437
2438 prop = (const __be32 *)vio_get_attribute(vdev, "ibm,#dma-address-cells",
2439 NULL);
2440 if (!prop) {
2441 pr_warn("Couldn't find ibm,#dma-address-cells property\n");
2442 dma_window++;
2443 } else {
2444 dma_window += be32_to_cpu(*prop);
2445 }
2446
2447 prop = (const __be32 *)vio_get_attribute(vdev, "ibm,#dma-size-cells",
2448 NULL);
2449 if (!prop) {
2450 pr_warn("Couldn't find ibm,#dma-size-cells property\n");
2451 dma_window++;
2452 } else {
2453 dma_window += be32_to_cpu(*prop);
2454 }
2455
2456 /* dma_window should point to the second window now */
2457 vscsi->dds.window[REMOTE].liobn = be32_to_cpu(*dma_window);
2458
2459 return 0;
2460}
2461
2462static struct ibmvscsis_tport *ibmvscsis_lookup_port(const char *name)
2463{
2464 struct ibmvscsis_tport *tport = NULL;
2465 struct vio_dev *vdev;
2466 struct scsi_info *vscsi;
2467
2468 spin_lock_bh(&ibmvscsis_dev_lock);
2469 list_for_each_entry(vscsi, &ibmvscsis_dev_list, list) {
2470 vdev = vscsi->dma_dev;
2471 if (!strcmp(dev_name(&vdev->dev), name)) {
2472 tport = &vscsi->tport;
2473 break;
2474 }
2475 }
2476 spin_unlock_bh(&ibmvscsis_dev_lock);
2477
2478 return tport;
2479}
2480
2481/**
2482 * ibmvscsis_parse_cmd() - Parse SRP Command
2483 * @vscsi: Pointer to our adapter structure
2484 * @cmd: Pointer to command element with SRP command
2485 *
2486 * Parse the srp command; if it is valid then submit it to tcm.
2487 * Note: The return code does not reflect the status of the SCSI CDB.
2488 *
2489 * EXECUTION ENVIRONMENT:
2490 * Process level
2491 */
2492static void ibmvscsis_parse_cmd(struct scsi_info *vscsi,
2493 struct ibmvscsis_cmd *cmd)
2494{
2495 struct iu_entry *iue = cmd->iue;
2496 struct srp_cmd *srp = (struct srp_cmd *)iue->sbuf->buf;
2497 struct ibmvscsis_nexus *nexus;
2498 u64 data_len = 0;
2499 enum dma_data_direction dir;
2500 int attr = 0;
2501 int rc = 0;
2502
2503 nexus = vscsi->tport.ibmv_nexus;
2504 /*
2505 * additional length in bytes. Note that the SRP spec says that
2506 * additional length is in 4-byte words, but technically the
2507 * additional length field is only the upper 6 bits of the byte.
2508 * The lower 2 bits are reserved. If the lower 2 bits are 0 (as
2509 * all reserved fields should be), then interpreting the byte as
2510 * an int will yield the length in bytes.
2511 */
2512 if (srp->add_cdb_len & 0x03) {
2513 dev_err(&vscsi->dev, "parse_cmd: reserved bits set in IU\n");
2514 spin_lock_bh(&vscsi->intr_lock);
2515 ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0);
2516 ibmvscsis_free_cmd_resources(vscsi, cmd);
2517 spin_unlock_bh(&vscsi->intr_lock);
2518 return;
2519 }
2520
2521 if (srp_get_desc_table(srp, &dir, &data_len)) {
2522 dev_err(&vscsi->dev, "0x%llx: parsing SRP descriptor table failed.\n",
2523 srp->tag);
2524 goto fail;
2525 return;
2526 }
2527
2528 cmd->rsp.sol_not = srp->sol_not;
2529
2530 switch (srp->task_attr) {
2531 case SRP_SIMPLE_TASK:
2532 attr = TCM_SIMPLE_TAG;
2533 break;
2534 case SRP_ORDERED_TASK:
2535 attr = TCM_ORDERED_TAG;
2536 break;
2537 case SRP_HEAD_TASK:
2538 attr = TCM_HEAD_TAG;
2539 break;
2540 case SRP_ACA_TASK:
2541 attr = TCM_ACA_TAG;
2542 break;
2543 default:
2544 dev_err(&vscsi->dev, "Invalid task attribute %d\n",
2545 srp->task_attr);
2546 goto fail;
2547 }
2548
2549 cmd->se_cmd.tag = be64_to_cpu(srp->tag);
2550
2551 spin_lock_bh(&vscsi->intr_lock);
2552 list_add_tail(&cmd->list, &vscsi->active_q);
2553 spin_unlock_bh(&vscsi->intr_lock);
2554
2555 srp->lun.scsi_lun[0] &= 0x3f;
2556
2557 pr_debug("calling submit_cmd, se_cmd %p, lun 0x%llx, cdb 0x%x, attr:%d\n",
2558 &cmd->se_cmd, scsilun_to_int(&srp->lun), (int)srp->cdb[0],
2559 attr);
2560
2561 rc = target_submit_cmd(&cmd->se_cmd, nexus->se_sess, srp->cdb,
2562 cmd->sense_buf, scsilun_to_int(&srp->lun),
2563 data_len, attr, dir, 0);
2564 if (rc) {
2565 dev_err(&vscsi->dev, "target_submit_cmd failed, rc %d\n", rc);
2566 goto fail;
2567 }
2568 return;
2569
2570fail:
2571 spin_lock_bh(&vscsi->intr_lock);
2572 ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0);
2573 spin_unlock_bh(&vscsi->intr_lock);
2574}
2575
2576/**
2577 * ibmvscsis_parse_task() - Parse SRP Task Management Request
2578 * @vscsi: Pointer to our adapter structure
2579 * @cmd: Pointer to command element with SRP task management request
2580 *
2581 * Parse the srp task management request; if it is valid then submit it to tcm.
2582 * Note: The return code does not reflect the status of the task management
2583 * request.
2584 *
2585 * EXECUTION ENVIRONMENT:
2586 * Processor level
2587 */
2588static void ibmvscsis_parse_task(struct scsi_info *vscsi,
2589 struct ibmvscsis_cmd *cmd)
2590{
2591 struct iu_entry *iue = cmd->iue;
2592 struct srp_tsk_mgmt *srp_tsk = &vio_iu(iue)->srp.tsk_mgmt;
2593 int tcm_type;
2594 u64 tag_to_abort = 0;
2595 int rc = 0;
2596 struct ibmvscsis_nexus *nexus;
2597
2598 nexus = vscsi->tport.ibmv_nexus;
2599
2600 cmd->rsp.sol_not = srp_tsk->sol_not;
2601
2602 switch (srp_tsk->tsk_mgmt_func) {
2603 case SRP_TSK_ABORT_TASK:
2604 tcm_type = TMR_ABORT_TASK;
2605 tag_to_abort = be64_to_cpu(srp_tsk->task_tag);
2606 break;
2607 case SRP_TSK_ABORT_TASK_SET:
2608 tcm_type = TMR_ABORT_TASK_SET;
2609 break;
2610 case SRP_TSK_CLEAR_TASK_SET:
2611 tcm_type = TMR_CLEAR_TASK_SET;
2612 break;
2613 case SRP_TSK_LUN_RESET:
2614 tcm_type = TMR_LUN_RESET;
2615 break;
2616 case SRP_TSK_CLEAR_ACA:
2617 tcm_type = TMR_CLEAR_ACA;
2618 break;
2619 default:
2620 dev_err(&vscsi->dev, "unknown task mgmt func %d\n",
2621 srp_tsk->tsk_mgmt_func);
2622 cmd->se_cmd.se_tmr_req->response =
2623 TMR_TASK_MGMT_FUNCTION_NOT_SUPPORTED;
2624 rc = -1;
2625 break;
2626 }
2627
2628 if (!rc) {
2629 cmd->se_cmd.tag = be64_to_cpu(srp_tsk->tag);
2630
2631 spin_lock_bh(&vscsi->intr_lock);
2632 list_add_tail(&cmd->list, &vscsi->active_q);
2633 spin_unlock_bh(&vscsi->intr_lock);
2634
2635 srp_tsk->lun.scsi_lun[0] &= 0x3f;
2636
2637 pr_debug("calling submit_tmr, func %d\n",
2638 srp_tsk->tsk_mgmt_func);
2639 rc = target_submit_tmr(&cmd->se_cmd, nexus->se_sess, NULL,
2640 scsilun_to_int(&srp_tsk->lun), srp_tsk,
2641 tcm_type, GFP_KERNEL, tag_to_abort, 0);
2642 if (rc) {
2643 dev_err(&vscsi->dev, "target_submit_tmr failed, rc %d\n",
2644 rc);
2645 cmd->se_cmd.se_tmr_req->response =
2646 TMR_FUNCTION_REJECTED;
2647 }
2648 }
2649
2650 if (rc)
2651 transport_send_check_condition_and_sense(&cmd->se_cmd, 0, 0);
2652}
2653
2654static void ibmvscsis_scheduler(struct work_struct *work)
2655{
2656 struct ibmvscsis_cmd *cmd = container_of(work, struct ibmvscsis_cmd,
2657 work);
2658 struct scsi_info *vscsi = cmd->adapter;
2659
2660 spin_lock_bh(&vscsi->intr_lock);
2661
2662 /* Remove from schedule_q */
2663 list_del(&cmd->list);
2664
2665 /* Don't submit cmd if we're disconnecting */
2666 if (vscsi->flags & (SCHEDULE_DISCONNECT | DISCONNECT_SCHEDULED)) {
2667 ibmvscsis_free_cmd_resources(vscsi, cmd);
2668
2669 /* ibmvscsis_disconnect might be waiting for us */
2670 if (list_empty(&vscsi->active_q) &&
2671 list_empty(&vscsi->schedule_q) &&
2672 (vscsi->flags & WAIT_FOR_IDLE)) {
2673 vscsi->flags &= ~WAIT_FOR_IDLE;
2674 complete(&vscsi->wait_idle);
2675 }
2676
2677 spin_unlock_bh(&vscsi->intr_lock);
2678 return;
2679 }
2680
2681 spin_unlock_bh(&vscsi->intr_lock);
2682
2683 switch (cmd->type) {
2684 case SCSI_CDB:
2685 ibmvscsis_parse_cmd(vscsi, cmd);
2686 break;
2687 case TASK_MANAGEMENT:
2688 ibmvscsis_parse_task(vscsi, cmd);
2689 break;
2690 default:
2691 dev_err(&vscsi->dev, "scheduler, invalid cmd type %d\n",
2692 cmd->type);
2693 spin_lock_bh(&vscsi->intr_lock);
2694 ibmvscsis_free_cmd_resources(vscsi, cmd);
2695 spin_unlock_bh(&vscsi->intr_lock);
2696 break;
2697 }
2698}
2699
2700static int ibmvscsis_alloc_cmds(struct scsi_info *vscsi, int num)
2701{
2702 struct ibmvscsis_cmd *cmd;
2703 int i;
2704
2705 INIT_LIST_HEAD(&vscsi->free_cmd);
2706 vscsi->cmd_pool = kcalloc(num, sizeof(struct ibmvscsis_cmd),
2707 GFP_KERNEL);
2708 if (!vscsi->cmd_pool)
2709 return -ENOMEM;
2710
2711 for (i = 0, cmd = (struct ibmvscsis_cmd *)vscsi->cmd_pool; i < num;
2712 i++, cmd++) {
2713 cmd->adapter = vscsi;
2714 INIT_WORK(&cmd->work, ibmvscsis_scheduler);
2715 list_add_tail(&cmd->list, &vscsi->free_cmd);
2716 }
2717
2718 return 0;
2719}
2720
2721static void ibmvscsis_free_cmds(struct scsi_info *vscsi)
2722{
2723 kfree(vscsi->cmd_pool);
2724 vscsi->cmd_pool = NULL;
2725 INIT_LIST_HEAD(&vscsi->free_cmd);
2726}
2727
2728/**
2729 * ibmvscsis_service_wait_q() - Service Waiting Queue
2730 * @timer: Pointer to timer which has expired
2731 *
2732 * This routine is called when the timer pops to service the waiting
2733 * queue. Elements on the queue have completed, their responses have been
2734 * copied to the client, but the client's response queue was full so
2735 * the queue message could not be sent. The routine grabs the proper locks
2736 * and calls send messages.
2737 *
2738 * EXECUTION ENVIRONMENT:
2739 * called at interrupt level
2740 */
2741static enum hrtimer_restart ibmvscsis_service_wait_q(struct hrtimer *timer)
2742{
2743 struct timer_cb *p_timer = container_of(timer, struct timer_cb, timer);
2744 struct scsi_info *vscsi = container_of(p_timer, struct scsi_info,
2745 rsp_q_timer);
2746
2747 spin_lock_bh(&vscsi->intr_lock);
2748 p_timer->timer_pops += 1;
2749 p_timer->started = false;
2750 ibmvscsis_send_messages(vscsi);
2751 spin_unlock_bh(&vscsi->intr_lock);
2752
2753 return HRTIMER_NORESTART;
2754}
2755
2756static long ibmvscsis_alloctimer(struct scsi_info *vscsi)
2757{
2758 struct timer_cb *p_timer;
2759
2760 p_timer = &vscsi->rsp_q_timer;
2761 hrtimer_init(&p_timer->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
2762
2763 p_timer->timer.function = ibmvscsis_service_wait_q;
2764 p_timer->started = false;
2765 p_timer->timer_pops = 0;
2766
2767 return ADAPT_SUCCESS;
2768}
2769
2770static void ibmvscsis_freetimer(struct scsi_info *vscsi)
2771{
2772 struct timer_cb *p_timer;
2773
2774 p_timer = &vscsi->rsp_q_timer;
2775
2776 (void)hrtimer_cancel(&p_timer->timer);
2777
2778 p_timer->started = false;
2779 p_timer->timer_pops = 0;
2780}
2781
2782static irqreturn_t ibmvscsis_interrupt(int dummy, void *data)
2783{
2784 struct scsi_info *vscsi = data;
2785
2786 vio_disable_interrupts(vscsi->dma_dev);
2787 tasklet_schedule(&vscsi->work_task);
2788
2789 return IRQ_HANDLED;
2790}
2791
2792/**
2793 * ibmvscsis_check_q() - Helper function to Check Init Message Valid
2794 * @vscsi: Pointer to our adapter structure
2795 *
2796 * Checks if a initialize message was queued by the initiatior
2797 * while the timing window was open. This function is called from
2798 * probe after the CRQ is created and interrupts are enabled.
2799 * It would only be used by adapters who wait for some event before
2800 * completing the init handshake with the client. For ibmvscsi, this
2801 * event is waiting for the port to be enabled.
2802 *
2803 * EXECUTION ENVIRONMENT:
2804 * Process level only, interrupt lock held
2805 */
2806static long ibmvscsis_check_q(struct scsi_info *vscsi)
2807{
2808 uint format;
2809 long rc;
2810
2811 rc = ibmvscsis_check_init_msg(vscsi, &format);
2812 if (rc)
2813 ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0);
2814 else if (format == UNUSED_FORMAT)
2815 vscsi->state = WAIT_ENABLED;
2816 else
2817 vscsi->state = PART_UP_WAIT_ENAB;
2818
2819 return rc;
2820}
2821
2822/**
2823 * ibmvscsis_enable_change_state() - Set new state based on enabled status
2824 * @vscsi: Pointer to our adapter structure
2825 *
2826 * This function determines our new state now that we are enabled. This
2827 * may involve sending an Init Complete message to the client.
2828 *
2829 * Must be called with interrupt lock held.
2830 */
2831static long ibmvscsis_enable_change_state(struct scsi_info *vscsi)
2832{
2833 long rc = ADAPT_SUCCESS;
2834
2835handle_state_change:
2836 switch (vscsi->state) {
2837 case WAIT_ENABLED:
2838 rc = ibmvscsis_send_init_message(vscsi, INIT_MSG);
2839 switch (rc) {
2840 case H_SUCCESS:
2841 case H_DROPPED:
2842 case H_CLOSED:
2843 vscsi->state = WAIT_CONNECTION;
2844 rc = ADAPT_SUCCESS;
2845 break;
2846
2847 case H_PARAMETER:
2848 break;
2849
2850 case H_HARDWARE:
2851 break;
2852
2853 default:
2854 vscsi->state = UNDEFINED;
2855 rc = H_HARDWARE;
2856 break;
2857 }
2858 break;
2859 case PART_UP_WAIT_ENAB:
2860 rc = ibmvscsis_send_init_message(vscsi, INIT_COMPLETE_MSG);
2861 switch (rc) {
2862 case H_SUCCESS:
2863 vscsi->state = CONNECTED;
2864 rc = ADAPT_SUCCESS;
2865 break;
2866
2867 case H_DROPPED:
2868 case H_CLOSED:
2869 vscsi->state = WAIT_ENABLED;
2870 goto handle_state_change;
2871
2872 case H_PARAMETER:
2873 break;
2874
2875 case H_HARDWARE:
2876 break;
2877
2878 default:
2879 rc = H_HARDWARE;
2880 break;
2881 }
2882 break;
2883
2884 case WAIT_CONNECTION:
2885 case WAIT_IDLE:
2886 case SRP_PROCESSING:
2887 case CONNECTED:
2888 rc = ADAPT_SUCCESS;
2889 break;
2890 /* should not be able to get here */
2891 case UNCONFIGURING:
2892 rc = ERROR;
2893 vscsi->state = UNDEFINED;
2894 break;
2895
2896 /* driver should never allow this to happen */
2897 case ERR_DISCONNECT:
2898 case ERR_DISCONNECT_RECONNECT:
2899 default:
2900 dev_err(&vscsi->dev, "in invalid state %d during enable_change_state\n",
2901 vscsi->state);
2902 rc = ADAPT_SUCCESS;
2903 break;
2904 }
2905
2906 return rc;
2907}
2908
2909/**
2910 * ibmvscsis_create_command_q() - Create Command Queue
2911 * @vscsi: Pointer to our adapter structure
2912 * @num_cmds: Currently unused. In the future, may be used to determine
2913 * the size of the CRQ.
2914 *
2915 * Allocates memory for command queue maps remote memory into an ioba
2916 * initializes the command response queue
2917 *
2918 * EXECUTION ENVIRONMENT:
2919 * Process level only
2920 */
2921static long ibmvscsis_create_command_q(struct scsi_info *vscsi, int num_cmds)
2922{
2923 long rc = 0;
2924 int pages;
2925 struct vio_dev *vdev = vscsi->dma_dev;
2926
2927 /* We might support multiple pages in the future, but just 1 for now */
2928 pages = 1;
2929
2930 vscsi->cmd_q.size = pages;
2931
2932 vscsi->cmd_q.base_addr =
2933 (struct viosrp_crq *)get_zeroed_page(GFP_KERNEL);
2934 if (!vscsi->cmd_q.base_addr)
2935 return -ENOMEM;
2936
2937 vscsi->cmd_q.mask = ((uint)pages * CRQ_PER_PAGE) - 1;
2938
2939 vscsi->cmd_q.crq_token = dma_map_single(&vdev->dev,
2940 vscsi->cmd_q.base_addr,
2941 PAGE_SIZE, DMA_BIDIRECTIONAL);
2942 if (dma_mapping_error(&vdev->dev, vscsi->cmd_q.crq_token)) {
2943 free_page((unsigned long)vscsi->cmd_q.base_addr);
2944 return -ENOMEM;
2945 }
2946
2947 rc = h_reg_crq(vscsi->dds.unit_id, vscsi->cmd_q.crq_token, PAGE_SIZE);
2948 if (rc) {
2949 if (rc == H_CLOSED) {
2950 vscsi->state = WAIT_ENABLED;
2951 rc = 0;
2952 } else {
2953 dma_unmap_single(&vdev->dev, vscsi->cmd_q.crq_token,
2954 PAGE_SIZE, DMA_BIDIRECTIONAL);
2955 free_page((unsigned long)vscsi->cmd_q.base_addr);
2956 rc = -ENODEV;
2957 }
2958 } else {
2959 vscsi->state = WAIT_ENABLED;
2960 }
2961
2962 return rc;
2963}
2964
2965/**
2966 * ibmvscsis_destroy_command_q - Destroy Command Queue
2967 * @vscsi: Pointer to our adapter structure
2968 *
2969 * Releases memory for command queue and unmaps mapped remote memory.
2970 *
2971 * EXECUTION ENVIRONMENT:
2972 * Process level only
2973 */
2974static void ibmvscsis_destroy_command_q(struct scsi_info *vscsi)
2975{
2976 dma_unmap_single(&vscsi->dma_dev->dev, vscsi->cmd_q.crq_token,
2977 PAGE_SIZE, DMA_BIDIRECTIONAL);
2978 free_page((unsigned long)vscsi->cmd_q.base_addr);
2979 vscsi->cmd_q.base_addr = NULL;
2980 vscsi->state = NO_QUEUE;
2981}
2982
2983static u8 ibmvscsis_fast_fail(struct scsi_info *vscsi,
2984 struct ibmvscsis_cmd *cmd)
2985{
2986 struct iu_entry *iue = cmd->iue;
2987 struct se_cmd *se_cmd = &cmd->se_cmd;
2988 struct srp_cmd *srp = (struct srp_cmd *)iue->sbuf->buf;
2989 struct scsi_sense_hdr sshdr;
2990 u8 rc = se_cmd->scsi_status;
2991
2992 if (vscsi->fast_fail && (READ_CMD(srp->cdb) || WRITE_CMD(srp->cdb)))
2993 if (scsi_normalize_sense(se_cmd->sense_buffer,
2994 se_cmd->scsi_sense_length, &sshdr))
2995 if (sshdr.sense_key == HARDWARE_ERROR &&
2996 (se_cmd->residual_count == 0 ||
2997 se_cmd->residual_count == se_cmd->data_length)) {
2998 rc = NO_SENSE;
2999 cmd->flags |= CMD_FAST_FAIL;
3000 }
3001
3002 return rc;
3003}
3004
3005/**
3006 * srp_build_response() - Build an SRP response buffer
3007 * @vscsi: Pointer to our adapter structure
3008 * @cmd: Pointer to command for which to send the response
3009 * @len_p: Where to return the length of the IU response sent. This
3010 * is needed to construct the CRQ response.
3011 *
3012 * Build the SRP response buffer and copy it to the client's memory space.
3013 */
3014static long srp_build_response(struct scsi_info *vscsi,
3015 struct ibmvscsis_cmd *cmd, uint *len_p)
3016{
3017 struct iu_entry *iue = cmd->iue;
3018 struct se_cmd *se_cmd = &cmd->se_cmd;
3019 struct srp_rsp *rsp;
3020 uint len;
3021 u32 rsp_code;
3022 char *data;
3023 u32 *tsk_status;
3024 long rc = ADAPT_SUCCESS;
3025
3026 spin_lock_bh(&vscsi->intr_lock);
3027
3028 rsp = &vio_iu(iue)->srp.rsp;
3029 len = sizeof(*rsp);
3030 memset(rsp, 0, len);
3031 data = rsp->data;
3032
3033 rsp->opcode = SRP_RSP;
3034
3035 if (vscsi->credit > 0 && vscsi->state == SRP_PROCESSING)
3036 rsp->req_lim_delta = cpu_to_be32(vscsi->credit);
3037 else
3038 rsp->req_lim_delta = cpu_to_be32(1 + vscsi->credit);
3039 rsp->tag = cmd->rsp.tag;
3040 rsp->flags = 0;
3041
3042 if (cmd->type == SCSI_CDB) {
3043 rsp->status = ibmvscsis_fast_fail(vscsi, cmd);
3044 if (rsp->status) {
3045 pr_debug("build_resp: cmd %p, scsi status %d\n", cmd,
3046 (int)rsp->status);
3047 ibmvscsis_determine_resid(se_cmd, rsp);
3048 if (se_cmd->scsi_sense_length && se_cmd->sense_buffer) {
3049 rsp->sense_data_len =
3050 cpu_to_be32(se_cmd->scsi_sense_length);
3051 rsp->flags |= SRP_RSP_FLAG_SNSVALID;
3052 len += se_cmd->scsi_sense_length;
3053 memcpy(data, se_cmd->sense_buffer,
3054 se_cmd->scsi_sense_length);
3055 }
3056 rsp->sol_not = (cmd->rsp.sol_not & UCSOLNT) >>
3057 UCSOLNT_RESP_SHIFT;
3058 } else if (cmd->flags & CMD_FAST_FAIL) {
3059 pr_debug("build_resp: cmd %p, fast fail\n", cmd);
3060 rsp->sol_not = (cmd->rsp.sol_not & UCSOLNT) >>
3061 UCSOLNT_RESP_SHIFT;
3062 } else {
3063 rsp->sol_not = (cmd->rsp.sol_not & SCSOLNT) >>
3064 SCSOLNT_RESP_SHIFT;
3065 }
3066 } else {
3067 /* this is task management */
3068 rsp->status = 0;
3069 rsp->resp_data_len = cpu_to_be32(4);
3070 rsp->flags |= SRP_RSP_FLAG_RSPVALID;
3071
3072 switch (se_cmd->se_tmr_req->response) {
3073 case TMR_FUNCTION_COMPLETE:
3074 case TMR_TASK_DOES_NOT_EXIST:
3075 rsp_code = SRP_TASK_MANAGEMENT_FUNCTION_COMPLETE;
3076 rsp->sol_not = (cmd->rsp.sol_not & SCSOLNT) >>
3077 SCSOLNT_RESP_SHIFT;
3078 break;
3079 case TMR_TASK_MGMT_FUNCTION_NOT_SUPPORTED:
3080 case TMR_LUN_DOES_NOT_EXIST:
3081 rsp_code = SRP_TASK_MANAGEMENT_FUNCTION_NOT_SUPPORTED;
3082 rsp->sol_not = (cmd->rsp.sol_not & UCSOLNT) >>
3083 UCSOLNT_RESP_SHIFT;
3084 break;
3085 case TMR_FUNCTION_FAILED:
3086 case TMR_FUNCTION_REJECTED:
3087 default:
3088 rsp_code = SRP_TASK_MANAGEMENT_FUNCTION_FAILED;
3089 rsp->sol_not = (cmd->rsp.sol_not & UCSOLNT) >>
3090 UCSOLNT_RESP_SHIFT;
3091 break;
3092 }
3093
3094 tsk_status = (u32 *)data;
3095 *tsk_status = cpu_to_be32(rsp_code);
3096 data = (char *)(tsk_status + 1);
3097 len += 4;
3098 }
3099
3100 dma_wmb();
3101 rc = h_copy_rdma(len, vscsi->dds.window[LOCAL].liobn, iue->sbuf->dma,
3102 vscsi->dds.window[REMOTE].liobn,
3103 be64_to_cpu(iue->remote_token));
3104
3105 switch (rc) {
3106 case H_SUCCESS:
3107 vscsi->credit = 0;
3108 *len_p = len;
3109 break;
3110 case H_PERMISSION:
3111 if (connection_broken(vscsi))
3112 vscsi->flags |= RESPONSE_Q_DOWN | CLIENT_FAILED;
3113
3114 dev_err(&vscsi->dev, "build_response: error copying to client, rc %ld, flags 0x%x, state 0x%hx\n",
3115 rc, vscsi->flags, vscsi->state);
3116 break;
3117 case H_SOURCE_PARM:
3118 case H_DEST_PARM:
3119 default:
3120 dev_err(&vscsi->dev, "build_response: error copying to client, rc %ld\n",
3121 rc);
3122 break;
3123 }
3124
3125 spin_unlock_bh(&vscsi->intr_lock);
3126
3127 return rc;
3128}
3129
3130static int ibmvscsis_rdma(struct ibmvscsis_cmd *cmd, struct scatterlist *sg,
3131 int nsg, struct srp_direct_buf *md, int nmd,
3132 enum dma_data_direction dir, unsigned int bytes)
3133{
3134 struct iu_entry *iue = cmd->iue;
3135 struct srp_target *target = iue->target;
3136 struct scsi_info *vscsi = target->ldata;
3137 struct scatterlist *sgp;
3138 dma_addr_t client_ioba, server_ioba;
3139 ulong buf_len;
3140 ulong client_len, server_len;
3141 int md_idx;
3142 long tx_len;
3143 long rc = 0;
3144
3145 pr_debug("rdma: dir %d, bytes 0x%x\n", dir, bytes);
3146
3147 if (bytes == 0)
3148 return 0;
3149
3150 sgp = sg;
3151 client_len = 0;
3152 server_len = 0;
3153 md_idx = 0;
3154 tx_len = bytes;
3155
3156 do {
3157 if (client_len == 0) {
3158 if (md_idx >= nmd) {
3159 dev_err(&vscsi->dev, "rdma: ran out of client memory descriptors\n");
3160 rc = -EIO;
3161 break;
3162 }
3163 client_ioba = be64_to_cpu(md[md_idx].va);
3164 client_len = be32_to_cpu(md[md_idx].len);
3165 }
3166 if (server_len == 0) {
3167 if (!sgp) {
3168 dev_err(&vscsi->dev, "rdma: ran out of scatter/gather list\n");
3169 rc = -EIO;
3170 break;
3171 }
3172 server_ioba = sg_dma_address(sgp);
3173 server_len = sg_dma_len(sgp);
3174 }
3175
3176 buf_len = tx_len;
3177
3178 if (buf_len > client_len)
3179 buf_len = client_len;
3180
3181 if (buf_len > server_len)
3182 buf_len = server_len;
3183
3184 if (buf_len > max_vdma_size)
3185 buf_len = max_vdma_size;
3186
3187 if (dir == DMA_TO_DEVICE) {
3188 /* read from client */
3189 rc = h_copy_rdma(buf_len,
3190 vscsi->dds.window[REMOTE].liobn,
3191 client_ioba,
3192 vscsi->dds.window[LOCAL].liobn,
3193 server_ioba);
3194 } else {
3195 /* write to client */
3196 struct srp_cmd *srp = (struct srp_cmd *)iue->sbuf->buf;
3197
3198 if (!READ_CMD(srp->cdb))
3199 print_hex_dump_bytes(" data:", DUMP_PREFIX_NONE,
3200 sg_virt(sgp), buf_len);
3201 /* The h_copy_rdma will cause phyp, running in another
3202 * partition, to read memory, so we need to make sure
3203 * the data has been written out, hence these syncs.
3204 */
3205 /* ensure that everything is in memory */
3206 isync();
3207 /* ensure that memory has been made visible */
3208 dma_wmb();
3209 rc = h_copy_rdma(buf_len,
3210 vscsi->dds.window[LOCAL].liobn,
3211 server_ioba,
3212 vscsi->dds.window[REMOTE].liobn,
3213 client_ioba);
3214 }
3215 switch (rc) {
3216 case H_SUCCESS:
3217 break;
3218 case H_PERMISSION:
3219 case H_SOURCE_PARM:
3220 case H_DEST_PARM:
3221 if (connection_broken(vscsi)) {
3222 spin_lock_bh(&vscsi->intr_lock);
3223 vscsi->flags |=
3224 (RESPONSE_Q_DOWN | CLIENT_FAILED);
3225 spin_unlock_bh(&vscsi->intr_lock);
3226 }
3227 dev_err(&vscsi->dev, "rdma: h_copy_rdma failed, rc %ld\n",
3228 rc);
3229 break;
3230
3231 default:
3232 dev_err(&vscsi->dev, "rdma: unknown error %ld from h_copy_rdma\n",
3233 rc);
3234 break;
3235 }
3236
3237 if (!rc) {
3238 tx_len -= buf_len;
3239 if (tx_len) {
3240 client_len -= buf_len;
3241 if (client_len == 0)
3242 md_idx++;
3243 else
3244 client_ioba += buf_len;
3245
3246 server_len -= buf_len;
3247 if (server_len == 0)
3248 sgp = sg_next(sgp);
3249 else
3250 server_ioba += buf_len;
3251 } else {
3252 break;
3253 }
3254 }
3255 } while (!rc);
3256
3257 return rc;
3258}
3259
3260/**
3261 * ibmvscsis_handle_crq() - Handle CRQ
3262 * @data: Pointer to our adapter structure
3263 *
3264 * Read the command elements from the command queue and copy the payloads
3265 * associated with the command elements to local memory and execute the
3266 * SRP requests.
3267 *
3268 * Note: this is an edge triggered interrupt. It can not be shared.
3269 */
3270static void ibmvscsis_handle_crq(unsigned long data)
3271{
3272 struct scsi_info *vscsi = (struct scsi_info *)data;
3273 struct viosrp_crq *crq;
3274 long rc;
3275 bool ack = true;
3276 volatile u8 valid;
3277
3278 spin_lock_bh(&vscsi->intr_lock);
3279
3280 pr_debug("got interrupt\n");
3281
3282 /*
3283 * if we are in a path where we are waiting for all pending commands
3284 * to complete because we received a transport event and anything in
3285 * the command queue is for a new connection, do nothing
3286 */
3287 if (TARGET_STOP(vscsi)) {
3288 vio_enable_interrupts(vscsi->dma_dev);
3289
3290 pr_debug("handle_crq, don't process: flags 0x%x, state 0x%hx\n",
3291 vscsi->flags, vscsi->state);
3292 spin_unlock_bh(&vscsi->intr_lock);
3293 return;
3294 }
3295
3296 rc = vscsi->flags & SCHEDULE_DISCONNECT;
3297 crq = vscsi->cmd_q.base_addr + vscsi->cmd_q.index;
3298 valid = crq->valid;
3299 dma_rmb();
3300
3301 while (valid) {
3302 /*
3303 * These are edege triggered interrupts. After dropping out of
3304 * the while loop, the code must check for work since an
3305 * interrupt could be lost, and an elment be left on the queue,
3306 * hence the label.
3307 */
3308cmd_work:
3309 vscsi->cmd_q.index =
3310 (vscsi->cmd_q.index + 1) & vscsi->cmd_q.mask;
3311
3312 if (!rc) {
3313 rc = ibmvscsis_parse_command(vscsi, crq);
3314 } else {
3315 if ((uint)crq->valid == VALID_TRANS_EVENT) {
3316 /*
3317 * must service the transport layer events even
3318 * in an error state, dont break out until all
3319 * the consecutive transport events have been
3320 * processed
3321 */
3322 rc = ibmvscsis_trans_event(vscsi, crq);
3323 } else if (vscsi->flags & TRANS_EVENT) {
3324 /*
3325 * if a tranport event has occurred leave
3326 * everything but transport events on the queue
3327 */
3328 pr_debug("handle_crq, ignoring\n");
3329
3330 /*
3331 * need to decrement the queue index so we can
3332 * look at the elment again
3333 */
3334 if (vscsi->cmd_q.index)
3335 vscsi->cmd_q.index -= 1;
3336 else
3337 /*
3338 * index is at 0 it just wrapped.
3339 * have it index last element in q
3340 */
3341 vscsi->cmd_q.index = vscsi->cmd_q.mask;
3342 break;
3343 }
3344 }
3345
3346 crq->valid = INVALIDATE_CMD_RESP_EL;
3347
3348 crq = vscsi->cmd_q.base_addr + vscsi->cmd_q.index;
3349 valid = crq->valid;
3350 dma_rmb();
3351 }
3352
3353 if (!rc) {
3354 if (ack) {
3355 vio_enable_interrupts(vscsi->dma_dev);
3356 ack = false;
3357 pr_debug("handle_crq, reenabling interrupts\n");
3358 }
3359 valid = crq->valid;
3360 dma_rmb();
3361 if (valid)
3362 goto cmd_work;
3363 } else {
3364 pr_debug("handle_crq, error: flags 0x%x, state 0x%hx, crq index 0x%x\n",
3365 vscsi->flags, vscsi->state, vscsi->cmd_q.index);
3366 }
3367
3368 pr_debug("Leaving handle_crq: schedule_q empty %d, flags 0x%x, state 0x%hx\n",
3369 (int)list_empty(&vscsi->schedule_q), vscsi->flags,
3370 vscsi->state);
3371
3372 spin_unlock_bh(&vscsi->intr_lock);
3373}
3374
3375static int ibmvscsis_probe(struct vio_dev *vdev,
3376 const struct vio_device_id *id)
3377{
3378 struct scsi_info *vscsi;
3379 int rc = 0;
3380 long hrc = 0;
3381 char wq_name[24];
3382
3383 vscsi = kzalloc(sizeof(*vscsi), GFP_KERNEL);
3384 if (!vscsi) {
3385 rc = -ENOMEM;
3386 pr_err("probe: allocation of adapter failed\n");
3387 return rc;
3388 }
3389
3390 vscsi->dma_dev = vdev;
3391 vscsi->dev = vdev->dev;
3392 INIT_LIST_HEAD(&vscsi->schedule_q);
3393 INIT_LIST_HEAD(&vscsi->waiting_rsp);
3394 INIT_LIST_HEAD(&vscsi->active_q);
3395
3396 snprintf(vscsi->tport.tport_name, 256, "%s", dev_name(&vdev->dev));
3397
3398 pr_debug("probe tport_name: %s\n", vscsi->tport.tport_name);
3399
3400 rc = read_dma_window(vscsi);
3401 if (rc)
3402 goto free_adapter;
3403 pr_debug("Probe: liobn 0x%x, riobn 0x%x\n",
3404 vscsi->dds.window[LOCAL].liobn,
3405 vscsi->dds.window[REMOTE].liobn);
3406
3407 strcpy(vscsi->eye, "VSCSI ");
3408 strncat(vscsi->eye, vdev->name, MAX_EYE);
3409
3410 vscsi->dds.unit_id = vdev->unit_address;
3411
3412 spin_lock_bh(&ibmvscsis_dev_lock);
3413 list_add_tail(&vscsi->list, &ibmvscsis_dev_list);
3414 spin_unlock_bh(&ibmvscsis_dev_lock);
3415
3416 /*
3417 * TBD: How do we determine # of cmds to request? Do we know how
3418 * many "children" we have?
3419 */
3420 vscsi->request_limit = INITIAL_SRP_LIMIT;
3421 rc = srp_target_alloc(&vscsi->target, &vdev->dev, vscsi->request_limit,
3422 SRP_MAX_IU_LEN);
3423 if (rc)
3424 goto rem_list;
3425
3426 vscsi->target.ldata = vscsi;
3427
3428 rc = ibmvscsis_alloc_cmds(vscsi, vscsi->request_limit);
3429 if (rc) {
3430 dev_err(&vscsi->dev, "alloc_cmds failed, rc %d, num %d\n",
3431 rc, vscsi->request_limit);
3432 goto free_target;
3433 }
3434
3435 /*
3436 * Note: the lock is used in freeing timers, so must initialize
3437 * first so that ordering in case of error is correct.
3438 */
3439 spin_lock_init(&vscsi->intr_lock);
3440
3441 rc = ibmvscsis_alloctimer(vscsi);
3442 if (rc) {
3443 dev_err(&vscsi->dev, "probe: alloctimer failed, rc %d\n", rc);
3444 goto free_cmds;
3445 }
3446
3447 rc = ibmvscsis_create_command_q(vscsi, 256);
3448 if (rc) {
3449 dev_err(&vscsi->dev, "probe: create_command_q failed, rc %d\n",
3450 rc);
3451 goto free_timer;
3452 }
3453
3454 vscsi->map_buf = kzalloc(PAGE_SIZE, GFP_KERNEL);
3455 if (!vscsi->map_buf) {
3456 rc = -ENOMEM;
3457 dev_err(&vscsi->dev, "probe: allocating cmd buffer failed\n");
3458 goto destroy_queue;
3459 }
3460
3461 vscsi->map_ioba = dma_map_single(&vdev->dev, vscsi->map_buf, PAGE_SIZE,
3462 DMA_BIDIRECTIONAL);
3463 if (dma_mapping_error(&vdev->dev, vscsi->map_ioba)) {
3464 dev_err(&vscsi->dev, "probe: error mapping command buffer\n");
3465 goto free_buf;
3466 }
3467
3468 hrc = h_vioctl(vscsi->dds.unit_id, H_GET_PARTNER_INFO,
3469 (u64)vscsi->map_ioba | ((u64)PAGE_SIZE << 32), 0, 0, 0,
3470 0);
3471 if (hrc == H_SUCCESS)
3472 vscsi->client_data.partition_number =
3473 be64_to_cpu(*(u64 *)vscsi->map_buf);
3474 /*
3475 * We expect the VIOCTL to fail if we're configured as "any
3476 * client can connect" and the client isn't activated yet.
3477 * We'll make the call again when he sends an init msg.
3478 */
3479 pr_debug("probe hrc %ld, client partition num %d\n",
3480 hrc, vscsi->client_data.partition_number);
3481
3482 tasklet_init(&vscsi->work_task, ibmvscsis_handle_crq,
3483 (unsigned long)vscsi);
3484
3485 init_completion(&vscsi->wait_idle);
3486
3487 snprintf(wq_name, 24, "ibmvscsis%s", dev_name(&vdev->dev));
3488 vscsi->work_q = create_workqueue(wq_name);
3489 if (!vscsi->work_q) {
3490 rc = -ENOMEM;
3491 dev_err(&vscsi->dev, "create_workqueue failed\n");
3492 goto unmap_buf;
3493 }
3494
3495 rc = request_irq(vdev->irq, ibmvscsis_interrupt, 0, "ibmvscsis", vscsi);
3496 if (rc) {
3497 rc = -EPERM;
3498 dev_err(&vscsi->dev, "probe: request_irq failed, rc %d\n", rc);
3499 goto destroy_WQ;
3500 }
3501
3502 spin_lock_bh(&vscsi->intr_lock);
3503 vio_enable_interrupts(vdev);
3504 if (rc) {
3505 dev_err(&vscsi->dev, "enabling interrupts failed, rc %d\n", rc);
3506 rc = -ENODEV;
3507 spin_unlock_bh(&vscsi->intr_lock);
3508 goto free_irq;
3509 }
3510
3511 if (ibmvscsis_check_q(vscsi)) {
3512 rc = ERROR;
3513 dev_err(&vscsi->dev, "probe: check_q failed, rc %d\n", rc);
3514 spin_unlock_bh(&vscsi->intr_lock);
3515 goto disable_interrupt;
3516 }
3517 spin_unlock_bh(&vscsi->intr_lock);
3518
3519 dev_set_drvdata(&vdev->dev, vscsi);
3520
3521 return 0;
3522
3523disable_interrupt:
3524 vio_disable_interrupts(vdev);
3525free_irq:
3526 free_irq(vdev->irq, vscsi);
3527destroy_WQ:
3528 destroy_workqueue(vscsi->work_q);
3529unmap_buf:
3530 dma_unmap_single(&vdev->dev, vscsi->map_ioba, PAGE_SIZE,
3531 DMA_BIDIRECTIONAL);
3532free_buf:
3533 kfree(vscsi->map_buf);
3534destroy_queue:
3535 tasklet_kill(&vscsi->work_task);
3536 ibmvscsis_unregister_command_q(vscsi);
3537 ibmvscsis_destroy_command_q(vscsi);
3538free_timer:
3539 ibmvscsis_freetimer(vscsi);
3540free_cmds:
3541 ibmvscsis_free_cmds(vscsi);
3542free_target:
3543 srp_target_free(&vscsi->target);
3544rem_list:
3545 spin_lock_bh(&ibmvscsis_dev_lock);
3546 list_del(&vscsi->list);
3547 spin_unlock_bh(&ibmvscsis_dev_lock);
3548free_adapter:
3549 kfree(vscsi);
3550
3551 return rc;
3552}
3553
3554static int ibmvscsis_remove(struct vio_dev *vdev)
3555{
3556 struct scsi_info *vscsi = dev_get_drvdata(&vdev->dev);
3557
3558 pr_debug("remove (%s)\n", dev_name(&vscsi->dma_dev->dev));
3559
3560 /*
3561 * TBD: Need to handle if there are commands on the waiting_rsp q
3562 * Actually, can there still be cmds outstanding to tcm?
3563 */
3564
3565 vio_disable_interrupts(vdev);
3566 free_irq(vdev->irq, vscsi);
3567 destroy_workqueue(vscsi->work_q);
3568 dma_unmap_single(&vdev->dev, vscsi->map_ioba, PAGE_SIZE,
3569 DMA_BIDIRECTIONAL);
3570 kfree(vscsi->map_buf);
3571 tasklet_kill(&vscsi->work_task);
3572 ibmvscsis_unregister_command_q(vscsi);
3573 ibmvscsis_destroy_command_q(vscsi);
3574 ibmvscsis_freetimer(vscsi);
3575 ibmvscsis_free_cmds(vscsi);
3576 srp_target_free(&vscsi->target);
3577 spin_lock_bh(&ibmvscsis_dev_lock);
3578 list_del(&vscsi->list);
3579 spin_unlock_bh(&ibmvscsis_dev_lock);
3580 kfree(vscsi);
3581
3582 return 0;
3583}
3584
3585static ssize_t system_id_show(struct device *dev,
3586 struct device_attribute *attr, char *buf)
3587{
3588 return snprintf(buf, PAGE_SIZE, "%s\n", system_id);
3589}
3590
3591static ssize_t partition_number_show(struct device *dev,
3592 struct device_attribute *attr, char *buf)
3593{
3594 return snprintf(buf, PAGE_SIZE, "%x\n", partition_number);
3595}
3596
3597static ssize_t unit_address_show(struct device *dev,
3598 struct device_attribute *attr, char *buf)
3599{
3600 struct scsi_info *vscsi = container_of(dev, struct scsi_info, dev);
3601
3602 return snprintf(buf, PAGE_SIZE, "%x\n", vscsi->dma_dev->unit_address);
3603}
3604
3605static int ibmvscsis_get_system_info(void)
3606{
3607 struct device_node *rootdn, *vdevdn;
3608 const char *id, *model, *name;
3609 const uint *num;
3610
3611 rootdn = of_find_node_by_path("/");
3612 if (!rootdn)
3613 return -ENOENT;
3614
3615 model = of_get_property(rootdn, "model", NULL);
3616 id = of_get_property(rootdn, "system-id", NULL);
3617 if (model && id)
3618 snprintf(system_id, sizeof(system_id), "%s-%s", model, id);
3619
3620 name = of_get_property(rootdn, "ibm,partition-name", NULL);
3621 if (name)
3622 strncpy(partition_name, name, sizeof(partition_name));
3623
3624 num = of_get_property(rootdn, "ibm,partition-no", NULL);
3625 if (num)
3626 partition_number = *num;
3627
3628 of_node_put(rootdn);
3629
3630 vdevdn = of_find_node_by_path("/vdevice");
3631 if (vdevdn) {
3632 const uint *mvds;
3633
3634 mvds = of_get_property(vdevdn, "ibm,max-virtual-dma-size",
3635 NULL);
3636 if (mvds)
3637 max_vdma_size = *mvds;
3638 of_node_put(vdevdn);
3639 }
3640
3641 return 0;
3642}
3643
3644static char *ibmvscsis_get_fabric_name(void)
3645{
3646 return "ibmvscsis";
3647}
3648
3649static char *ibmvscsis_get_fabric_wwn(struct se_portal_group *se_tpg)
3650{
3651 struct ibmvscsis_tport *tport =
3652 container_of(se_tpg, struct ibmvscsis_tport, se_tpg);
3653
3654 return tport->tport_name;
3655}
3656
3657static u16 ibmvscsis_get_tag(struct se_portal_group *se_tpg)
3658{
3659 struct ibmvscsis_tport *tport =
3660 container_of(se_tpg, struct ibmvscsis_tport, se_tpg);
3661
3662 return tport->tport_tpgt;
3663}
3664
3665static u32 ibmvscsis_get_default_depth(struct se_portal_group *se_tpg)
3666{
3667 return 1;
3668}
3669
3670static int ibmvscsis_check_true(struct se_portal_group *se_tpg)
3671{
3672 return 1;
3673}
3674
3675static int ibmvscsis_check_false(struct se_portal_group *se_tpg)
3676{
3677 return 0;
3678}
3679
3680static u32 ibmvscsis_tpg_get_inst_index(struct se_portal_group *se_tpg)
3681{
3682 return 1;
3683}
3684
3685static int ibmvscsis_check_stop_free(struct se_cmd *se_cmd)
3686{
3687 return target_put_sess_cmd(se_cmd);
3688}
3689
3690static void ibmvscsis_release_cmd(struct se_cmd *se_cmd)
3691{
3692 struct ibmvscsis_cmd *cmd = container_of(se_cmd, struct ibmvscsis_cmd,
3693 se_cmd);
3694 struct scsi_info *vscsi = cmd->adapter;
3695
3696 pr_debug("release_cmd %p, flags %d\n", se_cmd, cmd->flags);
3697
3698 spin_lock_bh(&vscsi->intr_lock);
3699 /* Remove from active_q */
3700 list_del(&cmd->list);
3701 list_add_tail(&cmd->list, &vscsi->waiting_rsp);
3702 ibmvscsis_send_messages(vscsi);
3703 spin_unlock_bh(&vscsi->intr_lock);
3704}
3705
3706static u32 ibmvscsis_sess_get_index(struct se_session *se_sess)
3707{
3708 return 0;
3709}
3710
3711static int ibmvscsis_write_pending(struct se_cmd *se_cmd)
3712{
3713 struct ibmvscsis_cmd *cmd = container_of(se_cmd, struct ibmvscsis_cmd,
3714 se_cmd);
3715 struct iu_entry *iue = cmd->iue;
3716 int rc;
3717
3718 pr_debug("write_pending, se_cmd %p, length 0x%x\n",
3719 se_cmd, se_cmd->data_length);
3720
3721 rc = srp_transfer_data(cmd, &vio_iu(iue)->srp.cmd, ibmvscsis_rdma,
3722 1, 1);
3723 if (rc) {
3724 pr_err("srp_transfer_data() failed: %d\n", rc);
3725 return -EAGAIN;
3726 }
3727 /*
3728 * We now tell TCM to add this WRITE CDB directly into the TCM storage
3729 * object execution queue.
3730 */
3731 target_execute_cmd(se_cmd);
3732 return 0;
3733}
3734
3735static int ibmvscsis_write_pending_status(struct se_cmd *se_cmd)
3736{
3737 return 0;
3738}
3739
3740static void ibmvscsis_set_default_node_attrs(struct se_node_acl *nacl)
3741{
3742}
3743
3744static int ibmvscsis_get_cmd_state(struct se_cmd *se_cmd)
3745{
3746 return 0;
3747}
3748
3749static int ibmvscsis_queue_data_in(struct se_cmd *se_cmd)
3750{
3751 struct ibmvscsis_cmd *cmd = container_of(se_cmd, struct ibmvscsis_cmd,
3752 se_cmd);
3753 struct iu_entry *iue = cmd->iue;
3754 struct scsi_info *vscsi = cmd->adapter;
3755 char *sd;
3756 uint len = 0;
3757 int rc;
3758
3759 pr_debug("queue_data_in, se_cmd %p, length 0x%x\n",
3760 se_cmd, se_cmd->data_length);
3761
3762 rc = srp_transfer_data(cmd, &vio_iu(iue)->srp.cmd, ibmvscsis_rdma, 1,
3763 1);
3764 if (rc) {
3765 pr_err("srp_transfer_data failed: %d\n", rc);
3766 sd = se_cmd->sense_buffer;
3767 se_cmd->scsi_sense_length = 18;
3768 memset(se_cmd->sense_buffer, 0, se_cmd->scsi_sense_length);
3769 /* Logical Unit Communication Time-out asc/ascq = 0x0801 */
3770 scsi_build_sense_buffer(0, se_cmd->sense_buffer, MEDIUM_ERROR,
3771 0x08, 0x01);
3772 }
3773
3774 srp_build_response(vscsi, cmd, &len);
3775 cmd->rsp.format = SRP_FORMAT;
3776 cmd->rsp.len = len;
3777
3778 return 0;
3779}
3780
3781static int ibmvscsis_queue_status(struct se_cmd *se_cmd)
3782{
3783 struct ibmvscsis_cmd *cmd = container_of(se_cmd, struct ibmvscsis_cmd,
3784 se_cmd);
3785 struct scsi_info *vscsi = cmd->adapter;
3786 uint len;
3787
3788 pr_debug("queue_status %p\n", se_cmd);
3789
3790 srp_build_response(vscsi, cmd, &len);
3791 cmd->rsp.format = SRP_FORMAT;
3792 cmd->rsp.len = len;
3793
3794 return 0;
3795}
3796
3797static void ibmvscsis_queue_tm_rsp(struct se_cmd *se_cmd)
3798{
3799 struct ibmvscsis_cmd *cmd = container_of(se_cmd, struct ibmvscsis_cmd,
3800 se_cmd);
3801 struct scsi_info *vscsi = cmd->adapter;
3802 uint len;
3803
3804 pr_debug("queue_tm_rsp %p, status %d\n",
3805 se_cmd, (int)se_cmd->se_tmr_req->response);
3806
3807 srp_build_response(vscsi, cmd, &len);
3808 cmd->rsp.format = SRP_FORMAT;
3809 cmd->rsp.len = len;
3810}
3811
3812static void ibmvscsis_aborted_task(struct se_cmd *se_cmd)
3813{
3814 /* TBD: What (if anything) should we do here? */
3815 pr_debug("ibmvscsis_aborted_task %p\n", se_cmd);
3816}
3817
3818static struct se_wwn *ibmvscsis_make_tport(struct target_fabric_configfs *tf,
3819 struct config_group *group,
3820 const char *name)
3821{
3822 struct ibmvscsis_tport *tport;
3823
3824 tport = ibmvscsis_lookup_port(name);
3825 if (tport) {
3826 tport->tport_proto_id = SCSI_PROTOCOL_SRP;
3827 pr_debug("make_tport(%s), pointer:%p, tport_id:%x\n",
3828 name, tport, tport->tport_proto_id);
3829 return &tport->tport_wwn;
3830 }
3831
3832 return ERR_PTR(-EINVAL);
3833}
3834
3835static void ibmvscsis_drop_tport(struct se_wwn *wwn)
3836{
3837 struct ibmvscsis_tport *tport = container_of(wwn,
3838 struct ibmvscsis_tport,
3839 tport_wwn);
3840
3841 pr_debug("drop_tport(%s)\n",
3842 config_item_name(&tport->tport_wwn.wwn_group.cg_item));
3843}
3844
3845static struct se_portal_group *ibmvscsis_make_tpg(struct se_wwn *wwn,
3846 struct config_group *group,
3847 const char *name)
3848{
3849 struct ibmvscsis_tport *tport =
3850 container_of(wwn, struct ibmvscsis_tport, tport_wwn);
3851 int rc;
3852
3853 tport->releasing = false;
3854
3855 rc = core_tpg_register(&tport->tport_wwn, &tport->se_tpg,
3856 tport->tport_proto_id);
3857 if (rc)
3858 return ERR_PTR(rc);
3859
3860 return &tport->se_tpg;
3861}
3862
3863static void ibmvscsis_drop_tpg(struct se_portal_group *se_tpg)
3864{
3865 struct ibmvscsis_tport *tport = container_of(se_tpg,
3866 struct ibmvscsis_tport,
3867 se_tpg);
3868
3869 tport->releasing = true;
3870 tport->enabled = false;
3871
3872 /*
3873 * Release the virtual I_T Nexus for this ibmvscsis TPG
3874 */
3875 ibmvscsis_drop_nexus(tport);
3876 /*
3877 * Deregister the se_tpg from TCM..
3878 */
3879 core_tpg_deregister(se_tpg);
3880}
3881
3882static ssize_t ibmvscsis_wwn_version_show(struct config_item *item,
3883 char *page)
3884{
3885 return scnprintf(page, PAGE_SIZE, "%s\n", IBMVSCSIS_VERSION);
3886}
3887CONFIGFS_ATTR_RO(ibmvscsis_wwn_, version);
3888
3889static struct configfs_attribute *ibmvscsis_wwn_attrs[] = {
3890 &ibmvscsis_wwn_attr_version,
3891 NULL,
3892};
3893
3894static ssize_t ibmvscsis_tpg_enable_show(struct config_item *item,
3895 char *page)
3896{
3897 struct se_portal_group *se_tpg = to_tpg(item);
3898 struct ibmvscsis_tport *tport = container_of(se_tpg,
3899 struct ibmvscsis_tport,
3900 se_tpg);
3901
3902 return snprintf(page, PAGE_SIZE, "%d\n", (tport->enabled) ? 1 : 0);
3903}
3904
3905static ssize_t ibmvscsis_tpg_enable_store(struct config_item *item,
3906 const char *page, size_t count)
3907{
3908 struct se_portal_group *se_tpg = to_tpg(item);
3909 struct ibmvscsis_tport *tport = container_of(se_tpg,
3910 struct ibmvscsis_tport,
3911 se_tpg);
3912 struct scsi_info *vscsi = container_of(tport, struct scsi_info, tport);
3913 unsigned long tmp;
3914 int rc;
3915 long lrc;
3916
3917 rc = kstrtoul(page, 0, &tmp);
3918 if (rc < 0) {
3919 pr_err("Unable to extract srpt_tpg_store_enable\n");
3920 return -EINVAL;
3921 }
3922
3923 if ((tmp != 0) && (tmp != 1)) {
3924 pr_err("Illegal value for srpt_tpg_store_enable\n");
3925 return -EINVAL;
3926 }
3927
3928 if (tmp) {
3929 tport->enabled = true;
3930 spin_lock_bh(&vscsi->intr_lock);
3931 lrc = ibmvscsis_enable_change_state(vscsi);
3932 if (lrc)
3933 pr_err("enable_change_state failed, rc %ld state %d\n",
3934 lrc, vscsi->state);
3935 spin_unlock_bh(&vscsi->intr_lock);
3936 } else {
3937 tport->enabled = false;
3938 }
3939
3940 pr_debug("tpg_enable_store, state %d\n", vscsi->state);
3941
3942 return count;
3943}
3944CONFIGFS_ATTR(ibmvscsis_tpg_, enable);
3945
3946static struct configfs_attribute *ibmvscsis_tpg_attrs[] = {
3947 &ibmvscsis_tpg_attr_enable,
3948 NULL,
3949};
3950
3951static const struct target_core_fabric_ops ibmvscsis_ops = {
3952 .module = THIS_MODULE,
3953 .name = "ibmvscsis",
3954 .get_fabric_name = ibmvscsis_get_fabric_name,
3955 .tpg_get_wwn = ibmvscsis_get_fabric_wwn,
3956 .tpg_get_tag = ibmvscsis_get_tag,
3957 .tpg_get_default_depth = ibmvscsis_get_default_depth,
3958 .tpg_check_demo_mode = ibmvscsis_check_true,
3959 .tpg_check_demo_mode_cache = ibmvscsis_check_true,
3960 .tpg_check_demo_mode_write_protect = ibmvscsis_check_false,
3961 .tpg_check_prod_mode_write_protect = ibmvscsis_check_false,
3962 .tpg_get_inst_index = ibmvscsis_tpg_get_inst_index,
3963 .check_stop_free = ibmvscsis_check_stop_free,
3964 .release_cmd = ibmvscsis_release_cmd,
3965 .sess_get_index = ibmvscsis_sess_get_index,
3966 .write_pending = ibmvscsis_write_pending,
3967 .write_pending_status = ibmvscsis_write_pending_status,
3968 .set_default_node_attributes = ibmvscsis_set_default_node_attrs,
3969 .get_cmd_state = ibmvscsis_get_cmd_state,
3970 .queue_data_in = ibmvscsis_queue_data_in,
3971 .queue_status = ibmvscsis_queue_status,
3972 .queue_tm_rsp = ibmvscsis_queue_tm_rsp,
3973 .aborted_task = ibmvscsis_aborted_task,
3974 /*
3975 * Setup function pointers for logic in target_core_fabric_configfs.c
3976 */
3977 .fabric_make_wwn = ibmvscsis_make_tport,
3978 .fabric_drop_wwn = ibmvscsis_drop_tport,
3979 .fabric_make_tpg = ibmvscsis_make_tpg,
3980 .fabric_drop_tpg = ibmvscsis_drop_tpg,
3981
3982 .tfc_wwn_attrs = ibmvscsis_wwn_attrs,
3983 .tfc_tpg_base_attrs = ibmvscsis_tpg_attrs,
3984};
3985
3986static void ibmvscsis_dev_release(struct device *dev) {};
3987
3988static struct class_attribute ibmvscsis_class_attrs[] = {
3989 __ATTR_NULL,
3990};
3991
3992static struct device_attribute dev_attr_system_id =
3993 __ATTR(system_id, S_IRUGO, system_id_show, NULL);
3994
3995static struct device_attribute dev_attr_partition_number =
3996 __ATTR(partition_number, S_IRUGO, partition_number_show, NULL);
3997
3998static struct device_attribute dev_attr_unit_address =
3999 __ATTR(unit_address, S_IRUGO, unit_address_show, NULL);
4000
4001static struct attribute *ibmvscsis_dev_attrs[] = {
4002 &dev_attr_system_id.attr,
4003 &dev_attr_partition_number.attr,
4004 &dev_attr_unit_address.attr,
4005};
4006ATTRIBUTE_GROUPS(ibmvscsis_dev);
4007
4008static struct class ibmvscsis_class = {
4009 .name = "ibmvscsis",
4010 .dev_release = ibmvscsis_dev_release,
4011 .class_attrs = ibmvscsis_class_attrs,
4012 .dev_groups = ibmvscsis_dev_groups,
4013};
4014
4015static struct vio_device_id ibmvscsis_device_table[] = {
4016 { "v-scsi-host", "IBM,v-scsi-host" },
4017 { "", "" }
4018};
4019MODULE_DEVICE_TABLE(vio, ibmvscsis_device_table);
4020
4021static struct vio_driver ibmvscsis_driver = {
4022 .name = "ibmvscsis",
4023 .id_table = ibmvscsis_device_table,
4024 .probe = ibmvscsis_probe,
4025 .remove = ibmvscsis_remove,
4026};
4027
4028/*
4029 * ibmvscsis_init() - Kernel Module initialization
4030 *
4031 * Note: vio_register_driver() registers callback functions, and at least one
4032 * of those callback functions calls TCM - Linux IO Target Subsystem, thus
4033 * the SCSI Target template must be registered before vio_register_driver()
4034 * is called.
4035 */
4036static int __init ibmvscsis_init(void)
4037{
4038 int rc = 0;
4039
4040 rc = ibmvscsis_get_system_info();
4041 if (rc) {
4042 pr_err("rc %d from get_system_info\n", rc);
4043 goto out;
4044 }
4045
4046 rc = class_register(&ibmvscsis_class);
4047 if (rc) {
4048 pr_err("failed class register\n");
4049 goto out;
4050 }
4051
4052 rc = target_register_template(&ibmvscsis_ops);
4053 if (rc) {
4054 pr_err("rc %d from target_register_template\n", rc);
4055 goto unregister_class;
4056 }
4057
4058 rc = vio_register_driver(&ibmvscsis_driver);
4059 if (rc) {
4060 pr_err("rc %d from vio_register_driver\n", rc);
4061 goto unregister_target;
4062 }
4063
4064 return 0;
4065
4066unregister_target:
4067 target_unregister_template(&ibmvscsis_ops);
4068unregister_class:
4069 class_unregister(&ibmvscsis_class);
4070out:
4071 return rc;
4072}
4073
4074static void __exit ibmvscsis_exit(void)
4075{
4076 pr_info("Unregister IBM virtual SCSI host driver\n");
4077 vio_unregister_driver(&ibmvscsis_driver);
4078 target_unregister_template(&ibmvscsis_ops);
4079 class_unregister(&ibmvscsis_class);
4080}
4081
4082MODULE_DESCRIPTION("IBMVSCSIS fabric driver");
4083MODULE_AUTHOR("Bryant G. Ly and Michael Cyr");
4084MODULE_LICENSE("GPL");
4085MODULE_VERSION(IBMVSCSIS_VERSION);
4086module_init(ibmvscsis_init);
4087module_exit(ibmvscsis_exit);
diff --git a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.h b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.h
new file mode 100644
index 000000000000..981a0c992b6c
--- /dev/null
+++ b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.h
@@ -0,0 +1,346 @@
1/*******************************************************************************
2 * IBM Virtual SCSI Target Driver
3 * Copyright (C) 2003-2005 Dave Boutcher (boutcher@us.ibm.com) IBM Corp.
4 * Santiago Leon (santil@us.ibm.com) IBM Corp.
5 * Linda Xie (lxie@us.ibm.com) IBM Corp.
6 *
7 * Copyright (C) 2005-2011 FUJITA Tomonori <tomof@acm.org>
8 * Copyright (C) 2010 Nicholas A. Bellinger <nab@kernel.org>
9 * Copyright (C) 2016 Bryant G. Ly <bryantly@linux.vnet.ibm.com> IBM Corp.
10 *
11 * Authors: Bryant G. Ly <bryantly@linux.vnet.ibm.com>
12 * Authors: Michael Cyr <mikecyr@linux.vnet.ibm.com>
13 *
14 * This program is free software; you can redistribute it and/or modify
15 * it under the terms of the GNU General Public License as published by
16 * the Free Software Foundation; either version 2 of the License, or
17 * (at your option) any later version.
18 *
19 * This program is distributed in the hope that it will be useful,
20 * but WITHOUT ANY WARRANTY; without even the implied warranty of
21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
22 * GNU General Public License for more details.
23 *
24 ****************************************************************************/
25
26#ifndef __H_IBMVSCSI_TGT
27#define __H_IBMVSCSI_TGT
28
29#include "libsrp.h"
30
31#define SYS_ID_NAME_LEN 64
32#define PARTITION_NAMELEN 96
33#define IBMVSCSIS_NAMELEN 32
34
35#define MSG_HI 0
36#define MSG_LOW 1
37
38#define MAX_CMD_Q_PAGES 4
39#define CRQ_PER_PAGE (PAGE_SIZE / sizeof(struct viosrp_crq))
40/* in terms of number of elements */
41#define DEFAULT_CMD_Q_SIZE CRQ_PER_PAGE
42#define MAX_CMD_Q_SIZE (DEFAULT_CMD_Q_SIZE * MAX_CMD_Q_PAGES)
43
44#define SRP_VIOLATION 0x102 /* general error code */
45
46/*
47 * SRP buffer formats defined as of 16.a supported by this driver.
48 */
49#define SUPPORTED_FORMATS ((SRP_DATA_DESC_DIRECT << 1) | \
50 (SRP_DATA_DESC_INDIRECT << 1))
51
52#define SCSI_LUN_ADDR_METHOD_FLAT 1
53
54struct dma_window {
55 u32 liobn; /* Unique per vdevice */
56 u64 tce_base; /* Physical location of the TCE table */
57 u64 tce_size; /* Size of the TCE table in bytes */
58};
59
60struct target_dds {
61 u64 unit_id; /* 64 bit will force alignment */
62#define NUM_DMA_WINDOWS 2
63#define LOCAL 0
64#define REMOTE 1
65 struct dma_window window[NUM_DMA_WINDOWS];
66
67 /* root node property "ibm,partition-no" */
68 uint partition_num;
69 char partition_name[PARTITION_NAMELEN];
70};
71
72#define MAX_NUM_PORTS 1
73#define MAX_H_COPY_RDMA (128 * 1024)
74
75#define MAX_EYE 64
76
77/* Return codes */
78#define ADAPT_SUCCESS 0L
79/* choose error codes that do not conflict with PHYP */
80#define ERROR -40L
81
82struct format_code {
83 u8 reserved;
84 u8 buffers;
85};
86
87struct client_info {
88#define SRP_VERSION "16.a"
89 char srp_version[8];
90 /* root node property ibm,partition-name */
91 char partition_name[PARTITION_NAMELEN];
92 /* root node property ibm,partition-no */
93 u32 partition_number;
94 /* initially 1 */
95 u32 mad_version;
96 u32 os_type;
97};
98
99/*
100 * Changing this constant changes the number of seconds to wait before
101 * considering the client will never service its queue again.
102 */
103#define SECONDS_TO_CONSIDER_FAILED 30
104/*
105 * These constants set the polling period used to determine if the client
106 * has freed at least one element in the response queue.
107 */
108#define WAIT_SECONDS 1
109#define WAIT_NANO_SECONDS 5000
110#define MAX_TIMER_POPS ((1000000 / WAIT_NANO_SECONDS) * \
111 SECONDS_TO_CONSIDER_FAILED)
112/*
113 * general purpose timer control block
114 * which can be used for multiple functions
115 */
116struct timer_cb {
117 struct hrtimer timer;
118 /*
119 * how long has it been since the client
120 * serviced the queue. The variable is incrmented
121 * in the service_wait_q routine and cleared
122 * in send messages
123 */
124 int timer_pops;
125 /* the timer is started */
126 bool started;
127};
128
129struct cmd_queue {
130 /* kva */
131 struct viosrp_crq *base_addr;
132 dma_addr_t crq_token;
133 /* used to maintain index */
134 uint mask;
135 /* current element */
136 uint index;
137 int size;
138};
139
140#define SCSOLNT_RESP_SHIFT 1
141#define UCSOLNT_RESP_SHIFT 2
142
143#define SCSOLNT BIT(SCSOLNT_RESP_SHIFT)
144#define UCSOLNT BIT(UCSOLNT_RESP_SHIFT)
145
146enum cmd_type {
147 SCSI_CDB = 0x01,
148 TASK_MANAGEMENT = 0x02,
149 /* MAD or addressed to port 0 */
150 ADAPTER_MAD = 0x04,
151 UNSET_TYPE = 0x08,
152};
153
154struct iu_rsp {
155 u8 format;
156 u8 sol_not;
157 u16 len;
158 /* tag is just to help client identify cmd, so don't translate be/le */
159 u64 tag;
160};
161
162struct ibmvscsis_cmd {
163 struct list_head list;
164 /* Used for TCM Core operations */
165 struct se_cmd se_cmd;
166 struct iu_entry *iue;
167 struct iu_rsp rsp;
168 struct work_struct work;
169 struct scsi_info *adapter;
170 /* Sense buffer that will be mapped into outgoing status */
171 unsigned char sense_buf[TRANSPORT_SENSE_BUFFER];
172 u64 init_time;
173#define CMD_FAST_FAIL BIT(0)
174 u32 flags;
175 char type;
176};
177
178struct ibmvscsis_nexus {
179 struct se_session *se_sess;
180};
181
182struct ibmvscsis_tport {
183 /* SCSI protocol the tport is providing */
184 u8 tport_proto_id;
185 /* ASCII formatted WWPN for SRP Target port */
186 char tport_name[IBMVSCSIS_NAMELEN];
187 /* Returned by ibmvscsis_make_tport() */
188 struct se_wwn tport_wwn;
189 /* Returned by ibmvscsis_make_tpg() */
190 struct se_portal_group se_tpg;
191 /* ibmvscsis port target portal group tag for TCM */
192 u16 tport_tpgt;
193 /* Pointer to TCM session for I_T Nexus */
194 struct ibmvscsis_nexus *ibmv_nexus;
195 bool enabled;
196 bool releasing;
197};
198
199struct scsi_info {
200 struct list_head list;
201 char eye[MAX_EYE];
202
203 /* commands waiting for space on repsonse queue */
204 struct list_head waiting_rsp;
205#define NO_QUEUE 0x00
206#define WAIT_ENABLED 0X01
207 /* driver has received an initialize command */
208#define PART_UP_WAIT_ENAB 0x02
209#define WAIT_CONNECTION 0x04
210 /* have established a connection */
211#define CONNECTED 0x08
212 /* at least one port is processing SRP IU */
213#define SRP_PROCESSING 0x10
214 /* remove request received */
215#define UNCONFIGURING 0x20
216 /* disconnect by letting adapter go idle, no error */
217#define WAIT_IDLE 0x40
218 /* disconnecting to clear an error */
219#define ERR_DISCONNECT 0x80
220 /* disconnect to clear error state, then come back up */
221#define ERR_DISCONNECT_RECONNECT 0x100
222 /* disconnected after clearing an error */
223#define ERR_DISCONNECTED 0x200
224 /* A series of errors caused unexpected errors */
225#define UNDEFINED 0x400
226 u16 state;
227 int fast_fail;
228 struct target_dds dds;
229 char *cmd_pool;
230 /* list of free commands */
231 struct list_head free_cmd;
232 /* command elements ready for scheduler */
233 struct list_head schedule_q;
234 /* commands sent to TCM */
235 struct list_head active_q;
236 caddr_t *map_buf;
237 /* ioba of map buffer */
238 dma_addr_t map_ioba;
239 /* allowable number of outstanding SRP requests */
240 int request_limit;
241 /* extra credit */
242 int credit;
243 /* outstanding transactions against credit limit */
244 int debit;
245
246 /* allow only one outstanding mad request */
247#define PROCESSING_MAD 0x00002
248 /* Waiting to go idle */
249#define WAIT_FOR_IDLE 0x00004
250 /* H_REG_CRQ called */
251#define CRQ_CLOSED 0x00010
252 /* detected that client has failed */
253#define CLIENT_FAILED 0x00040
254 /* detected that transport event occurred */
255#define TRANS_EVENT 0x00080
256 /* don't attempt to send anything to the client */
257#define RESPONSE_Q_DOWN 0x00100
258 /* request made to schedule disconnect handler */
259#define SCHEDULE_DISCONNECT 0x00400
260 /* disconnect handler is scheduled */
261#define DISCONNECT_SCHEDULED 0x00800
262 u32 flags;
263 /* adapter lock */
264 spinlock_t intr_lock;
265 /* information needed to manage command queue */
266 struct cmd_queue cmd_q;
267 /* used in hcall to copy response back into srp buffer */
268 u64 empty_iu_id;
269 /* used in crq, to tag what iu the response is for */
270 u64 empty_iu_tag;
271 uint new_state;
272 /* control block for the response queue timer */
273 struct timer_cb rsp_q_timer;
274 /* keep last client to enable proper accounting */
275 struct client_info client_data;
276 /* what can this client do */
277 u32 client_cap;
278 /*
279 * The following two fields capture state and flag changes that
280 * can occur when the lock is given up. In the orginal design,
281 * the lock was held during calls into phyp;
282 * however, phyp did not meet PAPR architecture. This is
283 * a work around.
284 */
285 u16 phyp_acr_state;
286 u32 phyp_acr_flags;
287
288 struct workqueue_struct *work_q;
289 struct completion wait_idle;
290 struct device dev;
291 struct vio_dev *dma_dev;
292 struct srp_target target;
293 struct ibmvscsis_tport tport;
294 struct tasklet_struct work_task;
295 struct work_struct proc_work;
296};
297
298/*
299 * Provide a constant that allows software to detect the adapter is
300 * disconnecting from the client from one of several states.
301 */
302#define IS_DISCONNECTING (UNCONFIGURING | ERR_DISCONNECT_RECONNECT | \
303 ERR_DISCONNECT)
304
305/*
306 * Provide a constant that can be used with interrupt handling that
307 * essentially lets the interrupt handler know that all requests should
308 * be thrown out,
309 */
310#define DONT_PROCESS_STATE (IS_DISCONNECTING | UNDEFINED | \
311 ERR_DISCONNECTED | WAIT_IDLE)
312
313/*
314 * If any of these flag bits are set then do not allow the interrupt
315 * handler to schedule the off level handler.
316 */
317#define BLOCK (DISCONNECT_SCHEDULED)
318
319/* State and transition events that stop the interrupt handler */
320#define TARGET_STOP(VSCSI) (long)(((VSCSI)->state & DONT_PROCESS_STATE) | \
321 ((VSCSI)->flags & BLOCK))
322
323/* flag bit that are not reset during disconnect */
324#define PRESERVE_FLAG_FIELDS 0
325
326#define vio_iu(IUE) ((union viosrp_iu *)((IUE)->sbuf->buf))
327
328#define READ_CMD(cdb) (((cdb)[0] & 0x1F) == 8)
329#define WRITE_CMD(cdb) (((cdb)[0] & 0x1F) == 0xA)
330
331#ifndef H_GET_PARTNER_INFO
332#define H_GET_PARTNER_INFO 0x0000000000000008LL
333#endif
334
335#define h_copy_rdma(l, sa, sb, da, db) \
336 plpar_hcall_norets(H_COPY_RDMA, l, sa, sb, da, db)
337#define h_vioctl(u, o, a, u1, u2, u3, u4) \
338 plpar_hcall_norets(H_VIOCTL, u, o, a, u1, u2)
339#define h_reg_crq(ua, tok, sz) \
340 plpar_hcall_norets(H_REG_CRQ, ua, tok, sz)
341#define h_free_crq(ua) \
342 plpar_hcall_norets(H_FREE_CRQ, ua)
343#define h_send_crq(ua, d1, d2) \
344 plpar_hcall_norets(H_SEND_CRQ, ua, d1, d2)
345
346#endif
diff --git a/drivers/scsi/ibmvscsi_tgt/libsrp.c b/drivers/scsi/ibmvscsi_tgt/libsrp.c
new file mode 100644
index 000000000000..5a4cc28ca5ff
--- /dev/null
+++ b/drivers/scsi/ibmvscsi_tgt/libsrp.c
@@ -0,0 +1,427 @@
1/*******************************************************************************
2 * SCSI RDMA Protocol lib functions
3 *
4 * Copyright (C) 2006 FUJITA Tomonori <tomof@acm.org>
5 * Copyright (C) 2016 Bryant G. Ly <bryantly@linux.vnet.ibm.com> IBM Corp.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 ***********************************************************************/
18
19#define pr_fmt(fmt) "libsrp: " fmt
20
21#include <linux/printk.h>
22#include <linux/err.h>
23#include <linux/slab.h>
24#include <linux/kfifo.h>
25#include <linux/scatterlist.h>
26#include <linux/dma-mapping.h>
27#include <linux/module.h>
28#include <scsi/srp.h>
29#include <target/target_core_base.h>
30#include "libsrp.h"
31#include "ibmvscsi_tgt.h"
32
33static int srp_iu_pool_alloc(struct srp_queue *q, size_t max,
34 struct srp_buf **ring)
35{
36 struct iu_entry *iue;
37 int i;
38
39 q->pool = kcalloc(max, sizeof(struct iu_entry *), GFP_KERNEL);
40 if (!q->pool)
41 return -ENOMEM;
42 q->items = kcalloc(max, sizeof(struct iu_entry), GFP_KERNEL);
43 if (!q->items)
44 goto free_pool;
45
46 spin_lock_init(&q->lock);
47 kfifo_init(&q->queue, (void *)q->pool, max * sizeof(void *));
48
49 for (i = 0, iue = q->items; i < max; i++) {
50 kfifo_in(&q->queue, (void *)&iue, sizeof(void *));
51 iue->sbuf = ring[i];
52 iue++;
53 }
54 return 0;
55
56free_pool:
57 kfree(q->pool);
58 return -ENOMEM;
59}
60
61static void srp_iu_pool_free(struct srp_queue *q)
62{
63 kfree(q->items);
64 kfree(q->pool);
65}
66
67static struct srp_buf **srp_ring_alloc(struct device *dev,
68 size_t max, size_t size)
69{
70 struct srp_buf **ring;
71 int i;
72
73 ring = kcalloc(max, sizeof(struct srp_buf *), GFP_KERNEL);
74 if (!ring)
75 return NULL;
76
77 for (i = 0; i < max; i++) {
78 ring[i] = kzalloc(sizeof(*ring[i]), GFP_KERNEL);
79 if (!ring[i])
80 goto out;
81 ring[i]->buf = dma_alloc_coherent(dev, size, &ring[i]->dma,
82 GFP_KERNEL);
83 if (!ring[i]->buf)
84 goto out;
85 }
86 return ring;
87
88out:
89 for (i = 0; i < max && ring[i]; i++) {
90 if (ring[i]->buf) {
91 dma_free_coherent(dev, size, ring[i]->buf,
92 ring[i]->dma);
93 }
94 kfree(ring[i]);
95 }
96 kfree(ring);
97
98 return NULL;
99}
100
101static void srp_ring_free(struct device *dev, struct srp_buf **ring,
102 size_t max, size_t size)
103{
104 int i;
105
106 for (i = 0; i < max; i++) {
107 dma_free_coherent(dev, size, ring[i]->buf, ring[i]->dma);
108 kfree(ring[i]);
109 }
110 kfree(ring);
111}
112
113int srp_target_alloc(struct srp_target *target, struct device *dev,
114 size_t nr, size_t iu_size)
115{
116 int err;
117
118 spin_lock_init(&target->lock);
119
120 target->dev = dev;
121
122 target->srp_iu_size = iu_size;
123 target->rx_ring_size = nr;
124 target->rx_ring = srp_ring_alloc(target->dev, nr, iu_size);
125 if (!target->rx_ring)
126 return -ENOMEM;
127 err = srp_iu_pool_alloc(&target->iu_queue, nr, target->rx_ring);
128 if (err)
129 goto free_ring;
130
131 dev_set_drvdata(target->dev, target);
132 return 0;
133
134free_ring:
135 srp_ring_free(target->dev, target->rx_ring, nr, iu_size);
136 return -ENOMEM;
137}
138
139void srp_target_free(struct srp_target *target)
140{
141 dev_set_drvdata(target->dev, NULL);
142 srp_ring_free(target->dev, target->rx_ring, target->rx_ring_size,
143 target->srp_iu_size);
144 srp_iu_pool_free(&target->iu_queue);
145}
146
147struct iu_entry *srp_iu_get(struct srp_target *target)
148{
149 struct iu_entry *iue = NULL;
150
151 if (kfifo_out_locked(&target->iu_queue.queue, (void *)&iue,
152 sizeof(void *),
153 &target->iu_queue.lock) != sizeof(void *)) {
154 WARN_ONCE(1, "unexpected fifo state");
155 return NULL;
156 }
157 if (!iue)
158 return iue;
159 iue->target = target;
160 iue->flags = 0;
161 return iue;
162}
163
164void srp_iu_put(struct iu_entry *iue)
165{
166 kfifo_in_locked(&iue->target->iu_queue.queue, (void *)&iue,
167 sizeof(void *), &iue->target->iu_queue.lock);
168}
169
170static int srp_direct_data(struct ibmvscsis_cmd *cmd, struct srp_direct_buf *md,
171 enum dma_data_direction dir, srp_rdma_t rdma_io,
172 int dma_map, int ext_desc)
173{
174 struct iu_entry *iue = NULL;
175 struct scatterlist *sg = NULL;
176 int err, nsg = 0, len;
177
178 if (dma_map) {
179 iue = cmd->iue;
180 sg = cmd->se_cmd.t_data_sg;
181 nsg = dma_map_sg(iue->target->dev, sg, cmd->se_cmd.t_data_nents,
182 DMA_BIDIRECTIONAL);
183 if (!nsg) {
184 pr_err("fail to map %p %d\n", iue,
185 cmd->se_cmd.t_data_nents);
186 return 0;
187 }
188 len = min(cmd->se_cmd.data_length, be32_to_cpu(md->len));
189 } else {
190 len = be32_to_cpu(md->len);
191 }
192
193 err = rdma_io(cmd, sg, nsg, md, 1, dir, len);
194
195 if (dma_map)
196 dma_unmap_sg(iue->target->dev, sg, nsg, DMA_BIDIRECTIONAL);
197
198 return err;
199}
200
201static int srp_indirect_data(struct ibmvscsis_cmd *cmd, struct srp_cmd *srp_cmd,
202 struct srp_indirect_buf *id,
203 enum dma_data_direction dir, srp_rdma_t rdma_io,
204 int dma_map, int ext_desc)
205{
206 struct iu_entry *iue = NULL;
207 struct srp_direct_buf *md = NULL;
208 struct scatterlist dummy, *sg = NULL;
209 dma_addr_t token = 0;
210 int err = 0;
211 int nmd, nsg = 0, len;
212
213 if (dma_map || ext_desc) {
214 iue = cmd->iue;
215 sg = cmd->se_cmd.t_data_sg;
216 }
217
218 nmd = be32_to_cpu(id->table_desc.len) / sizeof(struct srp_direct_buf);
219
220 if ((dir == DMA_FROM_DEVICE && nmd == srp_cmd->data_in_desc_cnt) ||
221 (dir == DMA_TO_DEVICE && nmd == srp_cmd->data_out_desc_cnt)) {
222 md = &id->desc_list[0];
223 goto rdma;
224 }
225
226 if (ext_desc && dma_map) {
227 md = dma_alloc_coherent(iue->target->dev,
228 be32_to_cpu(id->table_desc.len),
229 &token, GFP_KERNEL);
230 if (!md) {
231 pr_err("Can't get dma memory %u\n",
232 be32_to_cpu(id->table_desc.len));
233 return -ENOMEM;
234 }
235
236 sg_init_one(&dummy, md, be32_to_cpu(id->table_desc.len));
237 sg_dma_address(&dummy) = token;
238 sg_dma_len(&dummy) = be32_to_cpu(id->table_desc.len);
239 err = rdma_io(cmd, &dummy, 1, &id->table_desc, 1, DMA_TO_DEVICE,
240 be32_to_cpu(id->table_desc.len));
241 if (err) {
242 pr_err("Error copying indirect table %d\n", err);
243 goto free_mem;
244 }
245 } else {
246 pr_err("This command uses external indirect buffer\n");
247 return -EINVAL;
248 }
249
250rdma:
251 if (dma_map) {
252 nsg = dma_map_sg(iue->target->dev, sg, cmd->se_cmd.t_data_nents,
253 DMA_BIDIRECTIONAL);
254 if (!nsg) {
255 pr_err("fail to map %p %d\n", iue,
256 cmd->se_cmd.t_data_nents);
257 err = -EIO;
258 goto free_mem;
259 }
260 len = min(cmd->se_cmd.data_length, be32_to_cpu(id->len));
261 } else {
262 len = be32_to_cpu(id->len);
263 }
264
265 err = rdma_io(cmd, sg, nsg, md, nmd, dir, len);
266
267 if (dma_map)
268 dma_unmap_sg(iue->target->dev, sg, nsg, DMA_BIDIRECTIONAL);
269
270free_mem:
271 if (token && dma_map) {
272 dma_free_coherent(iue->target->dev,
273 be32_to_cpu(id->table_desc.len), md, token);
274 }
275 return err;
276}
277
278static int data_out_desc_size(struct srp_cmd *cmd)
279{
280 int size = 0;
281 u8 fmt = cmd->buf_fmt >> 4;
282
283 switch (fmt) {
284 case SRP_NO_DATA_DESC:
285 break;
286 case SRP_DATA_DESC_DIRECT:
287 size = sizeof(struct srp_direct_buf);
288 break;
289 case SRP_DATA_DESC_INDIRECT:
290 size = sizeof(struct srp_indirect_buf) +
291 sizeof(struct srp_direct_buf) * cmd->data_out_desc_cnt;
292 break;
293 default:
294 pr_err("client error. Invalid data_out_format %x\n", fmt);
295 break;
296 }
297 return size;
298}
299
300/*
301 * TODO: this can be called multiple times for a single command if it
302 * has very long data.
303 */
304int srp_transfer_data(struct ibmvscsis_cmd *cmd, struct srp_cmd *srp_cmd,
305 srp_rdma_t rdma_io, int dma_map, int ext_desc)
306{
307 struct srp_direct_buf *md;
308 struct srp_indirect_buf *id;
309 enum dma_data_direction dir;
310 int offset, err = 0;
311 u8 format;
312
313 if (!cmd->se_cmd.t_data_nents)
314 return 0;
315
316 offset = srp_cmd->add_cdb_len & ~3;
317
318 dir = srp_cmd_direction(srp_cmd);
319 if (dir == DMA_FROM_DEVICE)
320 offset += data_out_desc_size(srp_cmd);
321
322 if (dir == DMA_TO_DEVICE)
323 format = srp_cmd->buf_fmt >> 4;
324 else
325 format = srp_cmd->buf_fmt & ((1U << 4) - 1);
326
327 switch (format) {
328 case SRP_NO_DATA_DESC:
329 break;
330 case SRP_DATA_DESC_DIRECT:
331 md = (struct srp_direct_buf *)(srp_cmd->add_data + offset);
332 err = srp_direct_data(cmd, md, dir, rdma_io, dma_map, ext_desc);
333 break;
334 case SRP_DATA_DESC_INDIRECT:
335 id = (struct srp_indirect_buf *)(srp_cmd->add_data + offset);
336 err = srp_indirect_data(cmd, srp_cmd, id, dir, rdma_io, dma_map,
337 ext_desc);
338 break;
339 default:
340 pr_err("Unknown format %d %x\n", dir, format);
341 err = -EINVAL;
342 }
343
344 return err;
345}
346
347u64 srp_data_length(struct srp_cmd *cmd, enum dma_data_direction dir)
348{
349 struct srp_direct_buf *md;
350 struct srp_indirect_buf *id;
351 u64 len = 0;
352 uint offset = cmd->add_cdb_len & ~3;
353 u8 fmt;
354
355 if (dir == DMA_TO_DEVICE) {
356 fmt = cmd->buf_fmt >> 4;
357 } else {
358 fmt = cmd->buf_fmt & ((1U << 4) - 1);
359 offset += data_out_desc_size(cmd);
360 }
361
362 switch (fmt) {
363 case SRP_NO_DATA_DESC:
364 break;
365 case SRP_DATA_DESC_DIRECT:
366 md = (struct srp_direct_buf *)(cmd->add_data + offset);
367 len = be32_to_cpu(md->len);
368 break;
369 case SRP_DATA_DESC_INDIRECT:
370 id = (struct srp_indirect_buf *)(cmd->add_data + offset);
371 len = be32_to_cpu(id->len);
372 break;
373 default:
374 pr_err("invalid data format %x\n", fmt);
375 break;
376 }
377 return len;
378}
379
380int srp_get_desc_table(struct srp_cmd *srp_cmd, enum dma_data_direction *dir,
381 u64 *data_len)
382{
383 struct srp_indirect_buf *idb;
384 struct srp_direct_buf *db;
385 uint add_cdb_offset;
386 int rc;
387
388 /*
389 * The pointer computations below will only be compiled correctly
390 * if srp_cmd::add_data is declared as s8*, u8*, s8[] or u8[], so check
391 * whether srp_cmd::add_data has been declared as a byte pointer.
392 */
393 BUILD_BUG_ON(!__same_type(srp_cmd->add_data[0], (s8)0)
394 && !__same_type(srp_cmd->add_data[0], (u8)0));
395
396 BUG_ON(!dir);
397 BUG_ON(!data_len);
398
399 rc = 0;
400 *data_len = 0;
401
402 *dir = DMA_NONE;
403
404 if (srp_cmd->buf_fmt & 0xf)
405 *dir = DMA_FROM_DEVICE;
406 else if (srp_cmd->buf_fmt >> 4)
407 *dir = DMA_TO_DEVICE;
408
409 add_cdb_offset = srp_cmd->add_cdb_len & ~3;
410 if (((srp_cmd->buf_fmt & 0xf) == SRP_DATA_DESC_DIRECT) ||
411 ((srp_cmd->buf_fmt >> 4) == SRP_DATA_DESC_DIRECT)) {
412 db = (struct srp_direct_buf *)(srp_cmd->add_data
413 + add_cdb_offset);
414 *data_len = be32_to_cpu(db->len);
415 } else if (((srp_cmd->buf_fmt & 0xf) == SRP_DATA_DESC_INDIRECT) ||
416 ((srp_cmd->buf_fmt >> 4) == SRP_DATA_DESC_INDIRECT)) {
417 idb = (struct srp_indirect_buf *)(srp_cmd->add_data
418 + add_cdb_offset);
419
420 *data_len = be32_to_cpu(idb->len);
421 }
422 return rc;
423}
424
425MODULE_DESCRIPTION("SCSI RDMA Protocol lib functions");
426MODULE_AUTHOR("FUJITA Tomonori");
427MODULE_LICENSE("GPL");
diff --git a/drivers/scsi/ibmvscsi_tgt/libsrp.h b/drivers/scsi/ibmvscsi_tgt/libsrp.h
new file mode 100644
index 000000000000..4696f331453e
--- /dev/null
+++ b/drivers/scsi/ibmvscsi_tgt/libsrp.h
@@ -0,0 +1,123 @@
1#ifndef __LIBSRP_H__
2#define __LIBSRP_H__
3
4#include <linux/list.h>
5#include <linux/kfifo.h>
6#include <scsi/srp.h>
7
8enum srp_valid {
9 INVALIDATE_CMD_RESP_EL = 0,
10 VALID_CMD_RESP_EL = 0x80,
11 VALID_INIT_MSG = 0xC0,
12 VALID_TRANS_EVENT = 0xFF
13};
14
15enum srp_format {
16 SRP_FORMAT = 1,
17 MAD_FORMAT = 2,
18 OS400_FORMAT = 3,
19 AIX_FORMAT = 4,
20 LINUX_FORMAT = 5,
21 MESSAGE_IN_CRQ = 6
22};
23
24enum srp_init_msg {
25 INIT_MSG = 1,
26 INIT_COMPLETE_MSG = 2
27};
28
29enum srp_trans_event {
30 UNUSED_FORMAT = 0,
31 PARTNER_FAILED = 1,
32 PARTNER_DEREGISTER = 2,
33 MIGRATED = 6
34};
35
36enum srp_status {
37 HEADER_DESCRIPTOR = 0xF1,
38 PING = 0xF5,
39 PING_RESPONSE = 0xF6
40};
41
42enum srp_mad_version {
43 MAD_VERSION_1 = 1
44};
45
46enum srp_os_type {
47 OS400 = 1,
48 LINUX = 2,
49 AIX = 3,
50 OFW = 4
51};
52
53enum srp_task_attributes {
54 SRP_SIMPLE_TASK = 0,
55 SRP_HEAD_TASK = 1,
56 SRP_ORDERED_TASK = 2,
57 SRP_ACA_TASK = 4
58};
59
60enum {
61 SRP_TASK_MANAGEMENT_FUNCTION_COMPLETE = 0,
62 SRP_REQUEST_FIELDS_INVALID = 2,
63 SRP_TASK_MANAGEMENT_FUNCTION_NOT_SUPPORTED = 4,
64 SRP_TASK_MANAGEMENT_FUNCTION_FAILED = 5
65};
66
67struct srp_buf {
68 dma_addr_t dma;
69 void *buf;
70};
71
72struct srp_queue {
73 void *pool;
74 void *items;
75 struct kfifo queue;
76 spinlock_t lock;
77};
78
79struct srp_target {
80 struct device *dev;
81
82 spinlock_t lock;
83 struct list_head cmd_queue;
84
85 size_t srp_iu_size;
86 struct srp_queue iu_queue;
87 size_t rx_ring_size;
88 struct srp_buf **rx_ring;
89
90 void *ldata;
91};
92
93struct iu_entry {
94 struct srp_target *target;
95
96 struct list_head ilist;
97 dma_addr_t remote_token;
98 unsigned long flags;
99
100 struct srp_buf *sbuf;
101 u16 iu_len;
102};
103
104struct ibmvscsis_cmd;
105
106typedef int (srp_rdma_t)(struct ibmvscsis_cmd *, struct scatterlist *, int,
107 struct srp_direct_buf *, int,
108 enum dma_data_direction, unsigned int);
109int srp_target_alloc(struct srp_target *, struct device *, size_t, size_t);
110void srp_target_free(struct srp_target *);
111struct iu_entry *srp_iu_get(struct srp_target *);
112void srp_iu_put(struct iu_entry *);
113int srp_transfer_data(struct ibmvscsis_cmd *, struct srp_cmd *,
114 srp_rdma_t, int, int);
115u64 srp_data_length(struct srp_cmd *cmd, enum dma_data_direction dir);
116int srp_get_desc_table(struct srp_cmd *srp_cmd, enum dma_data_direction *dir,
117 u64 *data_len);
118static inline int srp_cmd_direction(struct srp_cmd *cmd)
119{
120 return (cmd->buf_fmt >> 4) ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
121}
122
123#endif