aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/s390
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2010-08-04 18:15:15 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2010-08-04 18:15:15 -0400
commit03da30986793385af57eeca3296253c887b742e6 (patch)
tree9c46dbe51c9d0856990649dd917ab45474b7be87 /drivers/s390
parent6ba74014c1ab0e37af7de6f64b4eccbbae3cb9e7 (diff)
parent339f4f4eab80caa6cf0d39fb057ad6ddb84ba91e (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi-misc-2.6
* git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi-misc-2.6: (276 commits) [SCSI] zfcp: Trigger logging in the FCP channel on qdio error conditions [SCSI] zfcp: Introduce experimental support for DIF/DIX [SCSI] zfcp: Enable data division support for FCP devices [SCSI] zfcp: Prevent access on uninitialized memory. [SCSI] zfcp: Post events through FC transport class [SCSI] zfcp: Cleanup QDIO attachment and improve processing. [SCSI] zfcp: Cleanup function parameters for sbal value. [SCSI] zfcp: Use correct width for timer_interval field [SCSI] zfcp: Remove SCSI device when removing unit [SCSI] zfcp: Use memdup_user and kstrdup [SCSI] zfcp: Fix retry after failed "open port" erp action [SCSI] zfcp: Fail erp after timeout [SCSI] zfcp: Use forced_reopen in terminate_rport_io callback [SCSI] zfcp: Register SCSI devices after successful fc_remote_port_add [SCSI] zfcp: Do not try "forced close" when port is already closed [SCSI] zfcp: Do not unblock rport from REOPEN_PORT_FORCED [SCSI] sd: add support for runtime PM [SCSI] implement runtime Power Management [SCSI] convert to the new PM framework [SCSI] Unify SAM_ and SAM_STAT_ macros ...
Diffstat (limited to 'drivers/s390')
-rw-r--r--drivers/s390/cio/qdio_setup.c2
-rw-r--r--drivers/s390/scsi/zfcp_aux.c10
-rw-r--r--drivers/s390/scsi/zfcp_cfdc.c12
-rw-r--r--drivers/s390/scsi/zfcp_dbf.c5
-rw-r--r--drivers/s390/scsi/zfcp_dbf.h1
-rw-r--r--drivers/s390/scsi/zfcp_def.h5
-rw-r--r--drivers/s390/scsi/zfcp_erp.c24
-rw-r--r--drivers/s390/scsi/zfcp_ext.h11
-rw-r--r--drivers/s390/scsi/zfcp_fc.c54
-rw-r--r--drivers/s390/scsi/zfcp_fc.h27
-rw-r--r--drivers/s390/scsi/zfcp_fsf.c169
-rw-r--r--drivers/s390/scsi/zfcp_fsf.h34
-rw-r--r--drivers/s390/scsi/zfcp_qdio.c206
-rw-r--r--drivers/s390/scsi/zfcp_qdio.h95
-rw-r--r--drivers/s390/scsi/zfcp_scsi.c103
-rw-r--r--drivers/s390/scsi/zfcp_sysfs.c12
16 files changed, 510 insertions, 260 deletions
diff --git a/drivers/s390/cio/qdio_setup.c b/drivers/s390/cio/qdio_setup.c
index 6326b67c45d2..34c7e4046df4 100644
--- a/drivers/s390/cio/qdio_setup.c
+++ b/drivers/s390/cio/qdio_setup.c
@@ -368,6 +368,8 @@ static void setup_qib(struct qdio_irq *irq_ptr,
368 if (qebsm_possible()) 368 if (qebsm_possible())
369 irq_ptr->qib.rflags |= QIB_RFLAGS_ENABLE_QEBSM; 369 irq_ptr->qib.rflags |= QIB_RFLAGS_ENABLE_QEBSM;
370 370
371 irq_ptr->qib.rflags |= init_data->qib_rflags;
372
371 irq_ptr->qib.qfmt = init_data->q_format; 373 irq_ptr->qib.qfmt = init_data->q_format;
372 if (init_data->no_input_qs) 374 if (init_data->no_input_qs)
373 irq_ptr->qib.isliba = 375 irq_ptr->qib.isliba =
diff --git a/drivers/s390/scsi/zfcp_aux.c b/drivers/s390/scsi/zfcp_aux.c
index e331df2122f7..96fa1f536394 100644
--- a/drivers/s390/scsi/zfcp_aux.c
+++ b/drivers/s390/scsi/zfcp_aux.c
@@ -98,13 +98,11 @@ static void __init zfcp_init_device_setup(char *devstr)
98 u64 wwpn, lun; 98 u64 wwpn, lun;
99 99
100 /* duplicate devstr and keep the original for sysfs presentation*/ 100 /* duplicate devstr and keep the original for sysfs presentation*/
101 str_saved = kmalloc(strlen(devstr) + 1, GFP_KERNEL); 101 str_saved = kstrdup(devstr, GFP_KERNEL);
102 str = str_saved; 102 str = str_saved;
103 if (!str) 103 if (!str)
104 return; 104 return;
105 105
106 strcpy(str, devstr);
107
108 token = strsep(&str, ","); 106 token = strsep(&str, ",");
109 if (!token || strlen(token) >= ZFCP_BUS_ID_SIZE) 107 if (!token || strlen(token) >= ZFCP_BUS_ID_SIZE)
110 goto err_out; 108 goto err_out;
@@ -314,7 +312,7 @@ struct zfcp_unit *zfcp_unit_enqueue(struct zfcp_port *port, u64 fcp_lun)
314 } 312 }
315 retval = -EINVAL; 313 retval = -EINVAL;
316 314
317 INIT_WORK(&unit->scsi_work, zfcp_scsi_scan); 315 INIT_WORK(&unit->scsi_work, zfcp_scsi_scan_work);
318 316
319 spin_lock_init(&unit->latencies.lock); 317 spin_lock_init(&unit->latencies.lock);
320 unit->latencies.write.channel.min = 0xFFFFFFFF; 318 unit->latencies.write.channel.min = 0xFFFFFFFF;
@@ -526,6 +524,10 @@ struct zfcp_adapter *zfcp_adapter_enqueue(struct ccw_device *ccw_device)
526 rwlock_init(&adapter->port_list_lock); 524 rwlock_init(&adapter->port_list_lock);
527 INIT_LIST_HEAD(&adapter->port_list); 525 INIT_LIST_HEAD(&adapter->port_list);
528 526
527 INIT_LIST_HEAD(&adapter->events.list);
528 INIT_WORK(&adapter->events.work, zfcp_fc_post_event);
529 spin_lock_init(&adapter->events.list_lock);
530
529 init_waitqueue_head(&adapter->erp_ready_wq); 531 init_waitqueue_head(&adapter->erp_ready_wq);
530 init_waitqueue_head(&adapter->erp_done_wqh); 532 init_waitqueue_head(&adapter->erp_done_wqh);
531 533
diff --git a/drivers/s390/scsi/zfcp_cfdc.c b/drivers/s390/scsi/zfcp_cfdc.c
index 1a2db0a35737..fcbd2b756da4 100644
--- a/drivers/s390/scsi/zfcp_cfdc.c
+++ b/drivers/s390/scsi/zfcp_cfdc.c
@@ -189,18 +189,12 @@ static long zfcp_cfdc_dev_ioctl(struct file *file, unsigned int command,
189 if (!fsf_cfdc) 189 if (!fsf_cfdc)
190 return -ENOMEM; 190 return -ENOMEM;
191 191
192 data = kmalloc(sizeof(struct zfcp_cfdc_data), GFP_KERNEL); 192 data = memdup_user(data_user, sizeof(*data_user));
193 if (!data) { 193 if (IS_ERR(data)) {
194 retval = -ENOMEM; 194 retval = PTR_ERR(data);
195 goto no_mem_sense; 195 goto no_mem_sense;
196 } 196 }
197 197
198 retval = copy_from_user(data, data_user, sizeof(*data));
199 if (retval) {
200 retval = -EFAULT;
201 goto free_buffer;
202 }
203
204 if (data->signature != 0xCFDCACDF) { 198 if (data->signature != 0xCFDCACDF) {
205 retval = -EINVAL; 199 retval = -EINVAL;
206 goto free_buffer; 200 goto free_buffer;
diff --git a/drivers/s390/scsi/zfcp_dbf.c b/drivers/s390/scsi/zfcp_dbf.c
index 075852f6968c..a86117b0d6e1 100644
--- a/drivers/s390/scsi/zfcp_dbf.c
+++ b/drivers/s390/scsi/zfcp_dbf.c
@@ -155,6 +155,8 @@ void _zfcp_dbf_hba_fsf_response(const char *tag2, int level,
155 if (scsi_cmnd) { 155 if (scsi_cmnd) {
156 response->u.fcp.cmnd = (unsigned long)scsi_cmnd; 156 response->u.fcp.cmnd = (unsigned long)scsi_cmnd;
157 response->u.fcp.serial = scsi_cmnd->serial_number; 157 response->u.fcp.serial = scsi_cmnd->serial_number;
158 response->u.fcp.data_dir =
159 qtcb->bottom.io.data_direction;
158 } 160 }
159 break; 161 break;
160 162
@@ -326,6 +328,7 @@ static void zfcp_dbf_hba_view_response(char **p,
326 case FSF_QTCB_FCP_CMND: 328 case FSF_QTCB_FCP_CMND:
327 if (r->fsf_req_status & ZFCP_STATUS_FSFREQ_TASK_MANAGEMENT) 329 if (r->fsf_req_status & ZFCP_STATUS_FSFREQ_TASK_MANAGEMENT)
328 break; 330 break;
331 zfcp_dbf_out(p, "data_direction", "0x%04x", r->u.fcp.data_dir);
329 zfcp_dbf_out(p, "scsi_cmnd", "0x%0Lx", r->u.fcp.cmnd); 332 zfcp_dbf_out(p, "scsi_cmnd", "0x%0Lx", r->u.fcp.cmnd);
330 zfcp_dbf_out(p, "scsi_serial", "0x%016Lx", r->u.fcp.serial); 333 zfcp_dbf_out(p, "scsi_serial", "0x%016Lx", r->u.fcp.serial);
331 *p += sprintf(*p, "\n"); 334 *p += sprintf(*p, "\n");
@@ -1005,7 +1008,7 @@ int zfcp_dbf_adapter_register(struct zfcp_adapter *adapter)
1005 char dbf_name[DEBUG_MAX_NAME_LEN]; 1008 char dbf_name[DEBUG_MAX_NAME_LEN];
1006 struct zfcp_dbf *dbf; 1009 struct zfcp_dbf *dbf;
1007 1010
1008 dbf = kmalloc(sizeof(struct zfcp_dbf), GFP_KERNEL); 1011 dbf = kzalloc(sizeof(struct zfcp_dbf), GFP_KERNEL);
1009 if (!dbf) 1012 if (!dbf)
1010 return -ENOMEM; 1013 return -ENOMEM;
1011 1014
diff --git a/drivers/s390/scsi/zfcp_dbf.h b/drivers/s390/scsi/zfcp_dbf.h
index 457e046f2d28..2bcc3403126a 100644
--- a/drivers/s390/scsi/zfcp_dbf.h
+++ b/drivers/s390/scsi/zfcp_dbf.h
@@ -111,6 +111,7 @@ struct zfcp_dbf_hba_record_response {
111 struct { 111 struct {
112 u64 cmnd; 112 u64 cmnd;
113 u64 serial; 113 u64 serial;
114 u32 data_dir;
114 } fcp; 115 } fcp;
115 struct { 116 struct {
116 u64 wwpn; 117 u64 wwpn;
diff --git a/drivers/s390/scsi/zfcp_def.h b/drivers/s390/scsi/zfcp_def.h
index 9fa1b064893e..e1c6b6e05a75 100644
--- a/drivers/s390/scsi/zfcp_def.h
+++ b/drivers/s390/scsi/zfcp_def.h
@@ -37,6 +37,7 @@
37#include <asm/ebcdic.h> 37#include <asm/ebcdic.h>
38#include <asm/sysinfo.h> 38#include <asm/sysinfo.h>
39#include "zfcp_fsf.h" 39#include "zfcp_fsf.h"
40#include "zfcp_fc.h"
40#include "zfcp_qdio.h" 41#include "zfcp_qdio.h"
41 42
42struct zfcp_reqlist; 43struct zfcp_reqlist;
@@ -72,10 +73,12 @@ struct zfcp_reqlist;
72 73
73/* adapter status */ 74/* adapter status */
74#define ZFCP_STATUS_ADAPTER_QDIOUP 0x00000002 75#define ZFCP_STATUS_ADAPTER_QDIOUP 0x00000002
76#define ZFCP_STATUS_ADAPTER_SIOSL_ISSUED 0x00000004
75#define ZFCP_STATUS_ADAPTER_XCONFIG_OK 0x00000008 77#define ZFCP_STATUS_ADAPTER_XCONFIG_OK 0x00000008
76#define ZFCP_STATUS_ADAPTER_HOST_CON_INIT 0x00000010 78#define ZFCP_STATUS_ADAPTER_HOST_CON_INIT 0x00000010
77#define ZFCP_STATUS_ADAPTER_ERP_PENDING 0x00000100 79#define ZFCP_STATUS_ADAPTER_ERP_PENDING 0x00000100
78#define ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED 0x00000200 80#define ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED 0x00000200
81#define ZFCP_STATUS_ADAPTER_DATA_DIV_ENABLED 0x00000400
79 82
80/* remote port status */ 83/* remote port status */
81#define ZFCP_STATUS_PORT_PHYS_OPEN 0x00000001 84#define ZFCP_STATUS_PORT_PHYS_OPEN 0x00000001
@@ -190,6 +193,7 @@ struct zfcp_adapter {
190 struct service_level service_level; 193 struct service_level service_level;
191 struct workqueue_struct *work_queue; 194 struct workqueue_struct *work_queue;
192 struct device_dma_parameters dma_parms; 195 struct device_dma_parameters dma_parms;
196 struct zfcp_fc_events events;
193}; 197};
194 198
195struct zfcp_port { 199struct zfcp_port {
@@ -212,6 +216,7 @@ struct zfcp_port {
212 struct work_struct test_link_work; 216 struct work_struct test_link_work;
213 struct work_struct rport_work; 217 struct work_struct rport_work;
214 enum { RPORT_NONE, RPORT_ADD, RPORT_DEL } rport_task; 218 enum { RPORT_NONE, RPORT_ADD, RPORT_DEL } rport_task;
219 unsigned int starget_id;
215}; 220};
216 221
217struct zfcp_unit { 222struct zfcp_unit {
diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c
index fd068bc1bd0a..160b432c907f 100644
--- a/drivers/s390/scsi/zfcp_erp.c
+++ b/drivers/s390/scsi/zfcp_erp.c
@@ -141,9 +141,13 @@ static int zfcp_erp_required_act(int want, struct zfcp_adapter *adapter,
141 if (!(p_status & ZFCP_STATUS_COMMON_UNBLOCKED)) 141 if (!(p_status & ZFCP_STATUS_COMMON_UNBLOCKED))
142 need = ZFCP_ERP_ACTION_REOPEN_PORT; 142 need = ZFCP_ERP_ACTION_REOPEN_PORT;
143 /* fall through */ 143 /* fall through */
144 case ZFCP_ERP_ACTION_REOPEN_PORT:
145 case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED: 144 case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED:
146 p_status = atomic_read(&port->status); 145 p_status = atomic_read(&port->status);
146 if (!(p_status & ZFCP_STATUS_COMMON_OPEN))
147 need = ZFCP_ERP_ACTION_REOPEN_PORT;
148 /* fall through */
149 case ZFCP_ERP_ACTION_REOPEN_PORT:
150 p_status = atomic_read(&port->status);
147 if (p_status & ZFCP_STATUS_COMMON_ERP_INUSE) 151 if (p_status & ZFCP_STATUS_COMMON_ERP_INUSE)
148 return 0; 152 return 0;
149 a_status = atomic_read(&adapter->status); 153 a_status = atomic_read(&adapter->status);
@@ -893,8 +897,7 @@ static int zfcp_erp_port_strategy_open_common(struct zfcp_erp_action *act)
893 } 897 }
894 if (port->d_id && !(p_status & ZFCP_STATUS_COMMON_NOESC)) { 898 if (port->d_id && !(p_status & ZFCP_STATUS_COMMON_NOESC)) {
895 port->d_id = 0; 899 port->d_id = 0;
896 _zfcp_erp_port_reopen(port, 0, "erpsoc1", NULL); 900 return ZFCP_ERP_FAILED;
897 return ZFCP_ERP_EXIT;
898 } 901 }
899 /* fall through otherwise */ 902 /* fall through otherwise */
900 } 903 }
@@ -1188,19 +1191,14 @@ static void zfcp_erp_action_cleanup(struct zfcp_erp_action *act, int result)
1188 1191
1189 switch (act->action) { 1192 switch (act->action) {
1190 case ZFCP_ERP_ACTION_REOPEN_UNIT: 1193 case ZFCP_ERP_ACTION_REOPEN_UNIT:
1191 if ((result == ZFCP_ERP_SUCCEEDED) && !unit->device) {
1192 get_device(&unit->dev);
1193 if (scsi_queue_work(unit->port->adapter->scsi_host,
1194 &unit->scsi_work) <= 0)
1195 put_device(&unit->dev);
1196 }
1197 put_device(&unit->dev); 1194 put_device(&unit->dev);
1198 break; 1195 break;
1199 1196
1200 case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED:
1201 case ZFCP_ERP_ACTION_REOPEN_PORT: 1197 case ZFCP_ERP_ACTION_REOPEN_PORT:
1202 if (result == ZFCP_ERP_SUCCEEDED) 1198 if (result == ZFCP_ERP_SUCCEEDED)
1203 zfcp_scsi_schedule_rport_register(port); 1199 zfcp_scsi_schedule_rport_register(port);
1200 /* fall through */
1201 case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED:
1204 put_device(&port->dev); 1202 put_device(&port->dev);
1205 break; 1203 break;
1206 1204
@@ -1247,6 +1245,11 @@ static int zfcp_erp_strategy(struct zfcp_erp_action *erp_action)
1247 goto unlock; 1245 goto unlock;
1248 } 1246 }
1249 1247
1248 if (erp_action->status & ZFCP_STATUS_ERP_TIMEDOUT) {
1249 retval = ZFCP_ERP_FAILED;
1250 goto check_target;
1251 }
1252
1250 zfcp_erp_action_to_running(erp_action); 1253 zfcp_erp_action_to_running(erp_action);
1251 1254
1252 /* no lock to allow for blocking operations */ 1255 /* no lock to allow for blocking operations */
@@ -1279,6 +1282,7 @@ static int zfcp_erp_strategy(struct zfcp_erp_action *erp_action)
1279 goto unlock; 1282 goto unlock;
1280 } 1283 }
1281 1284
1285check_target:
1282 retval = zfcp_erp_strategy_check_target(erp_action, retval); 1286 retval = zfcp_erp_strategy_check_target(erp_action, retval);
1283 zfcp_erp_action_dequeue(erp_action); 1287 zfcp_erp_action_dequeue(erp_action);
1284 retval = zfcp_erp_strategy_statechange(erp_action, retval); 1288 retval = zfcp_erp_strategy_statechange(erp_action, retval);
diff --git a/drivers/s390/scsi/zfcp_ext.h b/drivers/s390/scsi/zfcp_ext.h
index 48a8f93b72f5..3b93239c6f69 100644
--- a/drivers/s390/scsi/zfcp_ext.h
+++ b/drivers/s390/scsi/zfcp_ext.h
@@ -96,6 +96,9 @@ extern void zfcp_erp_adapter_access_changed(struct zfcp_adapter *, char *,
96extern void zfcp_erp_timeout_handler(unsigned long); 96extern void zfcp_erp_timeout_handler(unsigned long);
97 97
98/* zfcp_fc.c */ 98/* zfcp_fc.c */
99extern void zfcp_fc_enqueue_event(struct zfcp_adapter *,
100 enum fc_host_event_code event_code, u32);
101extern void zfcp_fc_post_event(struct work_struct *);
99extern void zfcp_fc_scan_ports(struct work_struct *); 102extern void zfcp_fc_scan_ports(struct work_struct *);
100extern void zfcp_fc_incoming_els(struct zfcp_fsf_req *); 103extern void zfcp_fc_incoming_els(struct zfcp_fsf_req *);
101extern void zfcp_fc_port_did_lookup(struct work_struct *); 104extern void zfcp_fc_port_did_lookup(struct work_struct *);
@@ -146,9 +149,10 @@ extern void zfcp_qdio_destroy(struct zfcp_qdio *);
146extern int zfcp_qdio_sbal_get(struct zfcp_qdio *); 149extern int zfcp_qdio_sbal_get(struct zfcp_qdio *);
147extern int zfcp_qdio_send(struct zfcp_qdio *, struct zfcp_qdio_req *); 150extern int zfcp_qdio_send(struct zfcp_qdio *, struct zfcp_qdio_req *);
148extern int zfcp_qdio_sbals_from_sg(struct zfcp_qdio *, struct zfcp_qdio_req *, 151extern int zfcp_qdio_sbals_from_sg(struct zfcp_qdio *, struct zfcp_qdio_req *,
149 struct scatterlist *, int); 152 struct scatterlist *);
150extern int zfcp_qdio_open(struct zfcp_qdio *); 153extern int zfcp_qdio_open(struct zfcp_qdio *);
151extern void zfcp_qdio_close(struct zfcp_qdio *); 154extern void zfcp_qdio_close(struct zfcp_qdio *);
155extern void zfcp_qdio_siosl(struct zfcp_adapter *);
152 156
153/* zfcp_scsi.c */ 157/* zfcp_scsi.c */
154extern struct zfcp_data zfcp_data; 158extern struct zfcp_data zfcp_data;
@@ -159,7 +163,10 @@ extern void zfcp_scsi_rport_work(struct work_struct *);
159extern void zfcp_scsi_schedule_rport_register(struct zfcp_port *); 163extern void zfcp_scsi_schedule_rport_register(struct zfcp_port *);
160extern void zfcp_scsi_schedule_rport_block(struct zfcp_port *); 164extern void zfcp_scsi_schedule_rport_block(struct zfcp_port *);
161extern void zfcp_scsi_schedule_rports_block(struct zfcp_adapter *); 165extern void zfcp_scsi_schedule_rports_block(struct zfcp_adapter *);
162extern void zfcp_scsi_scan(struct work_struct *); 166extern void zfcp_scsi_scan(struct zfcp_unit *);
167extern void zfcp_scsi_scan_work(struct work_struct *);
168extern void zfcp_scsi_set_prot(struct zfcp_adapter *);
169extern void zfcp_scsi_dif_sense_error(struct scsi_cmnd *, int);
163 170
164/* zfcp_sysfs.c */ 171/* zfcp_sysfs.c */
165extern struct attribute_group zfcp_sysfs_unit_attrs; 172extern struct attribute_group zfcp_sysfs_unit_attrs;
diff --git a/drivers/s390/scsi/zfcp_fc.c b/drivers/s390/scsi/zfcp_fc.c
index 6f8ab43a4856..6f3ed2b9a349 100644
--- a/drivers/s390/scsi/zfcp_fc.c
+++ b/drivers/s390/scsi/zfcp_fc.c
@@ -23,6 +23,58 @@ static u32 zfcp_fc_rscn_range_mask[] = {
23 [ELS_ADDR_FMT_FAB] = 0x000000, 23 [ELS_ADDR_FMT_FAB] = 0x000000,
24}; 24};
25 25
26/**
27 * zfcp_fc_post_event - post event to userspace via fc_transport
28 * @work: work struct with enqueued events
29 */
30void zfcp_fc_post_event(struct work_struct *work)
31{
32 struct zfcp_fc_event *event = NULL, *tmp = NULL;
33 LIST_HEAD(tmp_lh);
34 struct zfcp_fc_events *events = container_of(work,
35 struct zfcp_fc_events, work);
36 struct zfcp_adapter *adapter = container_of(events, struct zfcp_adapter,
37 events);
38
39 spin_lock_bh(&events->list_lock);
40 list_splice_init(&events->list, &tmp_lh);
41 spin_unlock_bh(&events->list_lock);
42
43 list_for_each_entry_safe(event, tmp, &tmp_lh, list) {
44 fc_host_post_event(adapter->scsi_host, fc_get_event_number(),
45 event->code, event->data);
46 list_del(&event->list);
47 kfree(event);
48 }
49
50}
51
52/**
53 * zfcp_fc_enqueue_event - safely enqueue FC HBA API event from irq context
54 * @adapter: The adapter where to enqueue the event
55 * @event_code: The event code (as defined in fc_host_event_code in
56 * scsi_transport_fc.h)
57 * @event_data: The event data (e.g. n_port page in case of els)
58 */
59void zfcp_fc_enqueue_event(struct zfcp_adapter *adapter,
60 enum fc_host_event_code event_code, u32 event_data)
61{
62 struct zfcp_fc_event *event;
63
64 event = kmalloc(sizeof(struct zfcp_fc_event), GFP_ATOMIC);
65 if (!event)
66 return;
67
68 event->code = event_code;
69 event->data = event_data;
70
71 spin_lock(&adapter->events.list_lock);
72 list_add_tail(&event->list, &adapter->events.list);
73 spin_unlock(&adapter->events.list_lock);
74
75 queue_work(adapter->work_queue, &adapter->events.work);
76}
77
26static int zfcp_fc_wka_port_get(struct zfcp_fc_wka_port *wka_port) 78static int zfcp_fc_wka_port_get(struct zfcp_fc_wka_port *wka_port)
27{ 79{
28 if (mutex_lock_interruptible(&wka_port->mutex)) 80 if (mutex_lock_interruptible(&wka_port->mutex))
@@ -148,6 +200,8 @@ static void zfcp_fc_incoming_rscn(struct zfcp_fsf_req *fsf_req)
148 afmt = page->rscn_page_flags & ELS_RSCN_ADDR_FMT_MASK; 200 afmt = page->rscn_page_flags & ELS_RSCN_ADDR_FMT_MASK;
149 _zfcp_fc_incoming_rscn(fsf_req, zfcp_fc_rscn_range_mask[afmt], 201 _zfcp_fc_incoming_rscn(fsf_req, zfcp_fc_rscn_range_mask[afmt],
150 page); 202 page);
203 zfcp_fc_enqueue_event(fsf_req->adapter, FCH_EVT_RSCN,
204 *(u32 *)page);
151 } 205 }
152 queue_work(fsf_req->adapter->work_queue, &fsf_req->adapter->scan_work); 206 queue_work(fsf_req->adapter->work_queue, &fsf_req->adapter->scan_work);
153} 207}
diff --git a/drivers/s390/scsi/zfcp_fc.h b/drivers/s390/scsi/zfcp_fc.h
index 0747b087390d..938d50360166 100644
--- a/drivers/s390/scsi/zfcp_fc.h
+++ b/drivers/s390/scsi/zfcp_fc.h
@@ -30,6 +30,30 @@
30#define ZFCP_FC_CTELS_TMO (2 * FC_DEF_R_A_TOV / 1000) 30#define ZFCP_FC_CTELS_TMO (2 * FC_DEF_R_A_TOV / 1000)
31 31
32/** 32/**
33 * struct zfcp_fc_event - FC HBAAPI event for internal queueing from irq context
34 * @code: Event code
35 * @data: Event data
36 * @list: list_head for zfcp_fc_events list
37 */
38struct zfcp_fc_event {
39 enum fc_host_event_code code;
40 u32 data;
41 struct list_head list;
42};
43
44/**
45 * struct zfcp_fc_events - Infrastructure for posting FC events from irq context
46 * @list: List for queueing of events from irq context to workqueue
47 * @list_lock: Lock for event list
48 * @work: work_struct for forwarding events in workqueue
49*/
50struct zfcp_fc_events {
51 struct list_head list;
52 spinlock_t list_lock;
53 struct work_struct work;
54};
55
56/**
33 * struct zfcp_fc_gid_pn_req - container for ct header plus gid_pn request 57 * struct zfcp_fc_gid_pn_req - container for ct header plus gid_pn request
34 * @ct_hdr: FC GS common transport header 58 * @ct_hdr: FC GS common transport header
35 * @gid_pn: GID_PN request 59 * @gid_pn: GID_PN request
@@ -196,6 +220,9 @@ void zfcp_fc_scsi_to_fcp(struct fcp_cmnd *fcp, struct scsi_cmnd *scsi)
196 memcpy(fcp->fc_cdb, scsi->cmnd, scsi->cmd_len); 220 memcpy(fcp->fc_cdb, scsi->cmnd, scsi->cmd_len);
197 221
198 fcp->fc_dl = scsi_bufflen(scsi); 222 fcp->fc_dl = scsi_bufflen(scsi);
223
224 if (scsi_get_prot_type(scsi) == SCSI_PROT_DIF_TYPE1)
225 fcp->fc_dl += fcp->fc_dl / scsi->device->sector_size * 8;
199} 226}
200 227
201/** 228/**
diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c
index 71663fb77310..9d1d7d1842ce 100644
--- a/drivers/s390/scsi/zfcp_fsf.c
+++ b/drivers/s390/scsi/zfcp_fsf.c
@@ -21,6 +21,7 @@
21static void zfcp_fsf_request_timeout_handler(unsigned long data) 21static void zfcp_fsf_request_timeout_handler(unsigned long data)
22{ 22{
23 struct zfcp_adapter *adapter = (struct zfcp_adapter *) data; 23 struct zfcp_adapter *adapter = (struct zfcp_adapter *) data;
24 zfcp_qdio_siosl(adapter);
24 zfcp_erp_adapter_reopen(adapter, ZFCP_STATUS_COMMON_ERP_FAILED, 25 zfcp_erp_adapter_reopen(adapter, ZFCP_STATUS_COMMON_ERP_FAILED,
25 "fsrth_1", NULL); 26 "fsrth_1", NULL);
26} 27}
@@ -274,6 +275,7 @@ static void zfcp_fsf_status_read_handler(struct zfcp_fsf_req *req)
274 break; 275 break;
275 case FSF_STATUS_READ_LINK_DOWN: 276 case FSF_STATUS_READ_LINK_DOWN:
276 zfcp_fsf_status_read_link_down(req); 277 zfcp_fsf_status_read_link_down(req);
278 zfcp_fc_enqueue_event(adapter, FCH_EVT_LINKDOWN, 0);
277 break; 279 break;
278 case FSF_STATUS_READ_LINK_UP: 280 case FSF_STATUS_READ_LINK_UP:
279 dev_info(&adapter->ccw_device->dev, 281 dev_info(&adapter->ccw_device->dev,
@@ -286,6 +288,8 @@ static void zfcp_fsf_status_read_handler(struct zfcp_fsf_req *req)
286 ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED | 288 ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED |
287 ZFCP_STATUS_COMMON_ERP_FAILED, 289 ZFCP_STATUS_COMMON_ERP_FAILED,
288 "fssrh_2", req); 290 "fssrh_2", req);
291 zfcp_fc_enqueue_event(adapter, FCH_EVT_LINKUP, 0);
292
289 break; 293 break;
290 case FSF_STATUS_READ_NOTIFICATION_LOST: 294 case FSF_STATUS_READ_NOTIFICATION_LOST:
291 if (sr_buf->status_subtype & FSF_STATUS_READ_SUB_ACT_UPDATED) 295 if (sr_buf->status_subtype & FSF_STATUS_READ_SUB_ACT_UPDATED)
@@ -323,6 +327,7 @@ static void zfcp_fsf_fsfstatus_qual_eval(struct zfcp_fsf_req *req)
323 dev_err(&req->adapter->ccw_device->dev, 327 dev_err(&req->adapter->ccw_device->dev,
324 "The FCP adapter reported a problem " 328 "The FCP adapter reported a problem "
325 "that cannot be recovered\n"); 329 "that cannot be recovered\n");
330 zfcp_qdio_siosl(req->adapter);
326 zfcp_erp_adapter_shutdown(req->adapter, 0, "fsfsqe1", req); 331 zfcp_erp_adapter_shutdown(req->adapter, 0, "fsfsqe1", req);
327 break; 332 break;
328 } 333 }
@@ -413,6 +418,7 @@ static void zfcp_fsf_protstatus_eval(struct zfcp_fsf_req *req)
413 dev_err(&adapter->ccw_device->dev, 418 dev_err(&adapter->ccw_device->dev,
414 "0x%x is not a valid transfer protocol status\n", 419 "0x%x is not a valid transfer protocol status\n",
415 qtcb->prefix.prot_status); 420 qtcb->prefix.prot_status);
421 zfcp_qdio_siosl(adapter);
416 zfcp_erp_adapter_shutdown(adapter, 0, "fspse_9", req); 422 zfcp_erp_adapter_shutdown(adapter, 0, "fspse_9", req);
417 } 423 }
418 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 424 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
@@ -495,7 +501,7 @@ static int zfcp_fsf_exchange_config_evaluate(struct zfcp_fsf_req *req)
495 fc_host_supported_classes(shost) = FC_COS_CLASS2 | FC_COS_CLASS3; 501 fc_host_supported_classes(shost) = FC_COS_CLASS2 | FC_COS_CLASS3;
496 502
497 adapter->hydra_version = bottom->adapter_type; 503 adapter->hydra_version = bottom->adapter_type;
498 adapter->timer_ticks = bottom->timer_interval; 504 adapter->timer_ticks = bottom->timer_interval & ZFCP_FSF_TIMER_INT_MASK;
499 adapter->stat_read_buf_num = max(bottom->status_read_buf_num, 505 adapter->stat_read_buf_num = max(bottom->status_read_buf_num,
500 (u16)FSF_STATUS_READS_RECOM); 506 (u16)FSF_STATUS_READS_RECOM);
501 507
@@ -523,6 +529,8 @@ static int zfcp_fsf_exchange_config_evaluate(struct zfcp_fsf_req *req)
523 return -EIO; 529 return -EIO;
524 } 530 }
525 531
532 zfcp_scsi_set_prot(adapter);
533
526 return 0; 534 return 0;
527} 535}
528 536
@@ -732,7 +740,7 @@ static int zfcp_fsf_req_send(struct zfcp_fsf_req *req)
732 740
733 zfcp_reqlist_add(adapter->req_list, req); 741 zfcp_reqlist_add(adapter->req_list, req);
734 742
735 req->qdio_req.qdio_outb_usage = atomic_read(&qdio->req_q.count); 743 req->qdio_req.qdio_outb_usage = atomic_read(&qdio->req_q_free);
736 req->issued = get_clock(); 744 req->issued = get_clock();
737 if (zfcp_qdio_send(qdio, &req->qdio_req)) { 745 if (zfcp_qdio_send(qdio, &req->qdio_req)) {
738 del_timer(&req->timer); 746 del_timer(&req->timer);
@@ -959,8 +967,7 @@ static void zfcp_fsf_setup_ct_els_unchained(struct zfcp_qdio *qdio,
959 967
960static int zfcp_fsf_setup_ct_els_sbals(struct zfcp_fsf_req *req, 968static int zfcp_fsf_setup_ct_els_sbals(struct zfcp_fsf_req *req,
961 struct scatterlist *sg_req, 969 struct scatterlist *sg_req,
962 struct scatterlist *sg_resp, 970 struct scatterlist *sg_resp)
963 int max_sbals)
964{ 971{
965 struct zfcp_adapter *adapter = req->adapter; 972 struct zfcp_adapter *adapter = req->adapter;
966 u32 feat = adapter->adapter_features; 973 u32 feat = adapter->adapter_features;
@@ -983,18 +990,19 @@ static int zfcp_fsf_setup_ct_els_sbals(struct zfcp_fsf_req *req,
983 return 0; 990 return 0;
984 } 991 }
985 992
986 bytes = zfcp_qdio_sbals_from_sg(adapter->qdio, &req->qdio_req, 993 bytes = zfcp_qdio_sbals_from_sg(adapter->qdio, &req->qdio_req, sg_req);
987 sg_req, max_sbals);
988 if (bytes <= 0) 994 if (bytes <= 0)
989 return -EIO; 995 return -EIO;
996 zfcp_qdio_set_sbale_last(adapter->qdio, &req->qdio_req);
990 req->qtcb->bottom.support.req_buf_length = bytes; 997 req->qtcb->bottom.support.req_buf_length = bytes;
991 zfcp_qdio_skip_to_last_sbale(&req->qdio_req); 998 zfcp_qdio_skip_to_last_sbale(&req->qdio_req);
992 999
993 bytes = zfcp_qdio_sbals_from_sg(adapter->qdio, &req->qdio_req, 1000 bytes = zfcp_qdio_sbals_from_sg(adapter->qdio, &req->qdio_req,
994 sg_resp, max_sbals); 1001 sg_resp);
995 req->qtcb->bottom.support.resp_buf_length = bytes; 1002 req->qtcb->bottom.support.resp_buf_length = bytes;
996 if (bytes <= 0) 1003 if (bytes <= 0)
997 return -EIO; 1004 return -EIO;
1005 zfcp_qdio_set_sbale_last(adapter->qdio, &req->qdio_req);
998 1006
999 return 0; 1007 return 0;
1000} 1008}
@@ -1002,11 +1010,11 @@ static int zfcp_fsf_setup_ct_els_sbals(struct zfcp_fsf_req *req,
1002static int zfcp_fsf_setup_ct_els(struct zfcp_fsf_req *req, 1010static int zfcp_fsf_setup_ct_els(struct zfcp_fsf_req *req,
1003 struct scatterlist *sg_req, 1011 struct scatterlist *sg_req,
1004 struct scatterlist *sg_resp, 1012 struct scatterlist *sg_resp,
1005 int max_sbals, unsigned int timeout) 1013 unsigned int timeout)
1006{ 1014{
1007 int ret; 1015 int ret;
1008 1016
1009 ret = zfcp_fsf_setup_ct_els_sbals(req, sg_req, sg_resp, max_sbals); 1017 ret = zfcp_fsf_setup_ct_els_sbals(req, sg_req, sg_resp);
1010 if (ret) 1018 if (ret)
1011 return ret; 1019 return ret;
1012 1020
@@ -1046,8 +1054,7 @@ int zfcp_fsf_send_ct(struct zfcp_fc_wka_port *wka_port,
1046 } 1054 }
1047 1055
1048 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; 1056 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
1049 ret = zfcp_fsf_setup_ct_els(req, ct->req, ct->resp, 1057 ret = zfcp_fsf_setup_ct_els(req, ct->req, ct->resp, timeout);
1050 ZFCP_FSF_MAX_SBALS_PER_REQ, timeout);
1051 if (ret) 1058 if (ret)
1052 goto failed_send; 1059 goto failed_send;
1053 1060
@@ -1143,7 +1150,10 @@ int zfcp_fsf_send_els(struct zfcp_adapter *adapter, u32 d_id,
1143 } 1150 }
1144 1151
1145 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; 1152 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
1146 ret = zfcp_fsf_setup_ct_els(req, els->req, els->resp, 2, timeout); 1153
1154 zfcp_qdio_sbal_limit(qdio, &req->qdio_req, 2);
1155
1156 ret = zfcp_fsf_setup_ct_els(req, els->req, els->resp, timeout);
1147 1157
1148 if (ret) 1158 if (ret)
1149 goto failed_send; 1159 goto failed_send;
@@ -2025,7 +2035,7 @@ static void zfcp_fsf_req_trace(struct zfcp_fsf_req *req, struct scsi_cmnd *scsi)
2025 blktrc.magic = ZFCP_BLK_DRV_DATA_MAGIC; 2035 blktrc.magic = ZFCP_BLK_DRV_DATA_MAGIC;
2026 if (req->status & ZFCP_STATUS_FSFREQ_ERROR) 2036 if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
2027 blktrc.flags |= ZFCP_BLK_REQ_ERROR; 2037 blktrc.flags |= ZFCP_BLK_REQ_ERROR;
2028 blktrc.inb_usage = req->qdio_req.qdio_inb_usage; 2038 blktrc.inb_usage = 0;
2029 blktrc.outb_usage = req->qdio_req.qdio_outb_usage; 2039 blktrc.outb_usage = req->qdio_req.qdio_outb_usage;
2030 2040
2031 if (req->adapter->adapter_features & FSF_FEATURE_MEASUREMENT_DATA && 2041 if (req->adapter->adapter_features & FSF_FEATURE_MEASUREMENT_DATA &&
@@ -2035,9 +2045,13 @@ static void zfcp_fsf_req_trace(struct zfcp_fsf_req *req, struct scsi_cmnd *scsi)
2035 blktrc.fabric_lat = lat_in->fabric_lat * ticks; 2045 blktrc.fabric_lat = lat_in->fabric_lat * ticks;
2036 2046
2037 switch (req->qtcb->bottom.io.data_direction) { 2047 switch (req->qtcb->bottom.io.data_direction) {
2048 case FSF_DATADIR_DIF_READ_STRIP:
2049 case FSF_DATADIR_DIF_READ_CONVERT:
2038 case FSF_DATADIR_READ: 2050 case FSF_DATADIR_READ:
2039 lat = &unit->latencies.read; 2051 lat = &unit->latencies.read;
2040 break; 2052 break;
2053 case FSF_DATADIR_DIF_WRITE_INSERT:
2054 case FSF_DATADIR_DIF_WRITE_CONVERT:
2041 case FSF_DATADIR_WRITE: 2055 case FSF_DATADIR_WRITE:
2042 lat = &unit->latencies.write; 2056 lat = &unit->latencies.write;
2043 break; 2057 break;
@@ -2078,6 +2092,21 @@ static void zfcp_fsf_send_fcp_command_task_handler(struct zfcp_fsf_req *req)
2078 goto skip_fsfstatus; 2092 goto skip_fsfstatus;
2079 } 2093 }
2080 2094
2095 switch (req->qtcb->header.fsf_status) {
2096 case FSF_INCONSISTENT_PROT_DATA:
2097 case FSF_INVALID_PROT_PARM:
2098 set_host_byte(scpnt, DID_ERROR);
2099 goto skip_fsfstatus;
2100 case FSF_BLOCK_GUARD_CHECK_FAILURE:
2101 zfcp_scsi_dif_sense_error(scpnt, 0x1);
2102 goto skip_fsfstatus;
2103 case FSF_APP_TAG_CHECK_FAILURE:
2104 zfcp_scsi_dif_sense_error(scpnt, 0x2);
2105 goto skip_fsfstatus;
2106 case FSF_REF_TAG_CHECK_FAILURE:
2107 zfcp_scsi_dif_sense_error(scpnt, 0x3);
2108 goto skip_fsfstatus;
2109 }
2081 fcp_rsp = (struct fcp_resp_with_ext *) &req->qtcb->bottom.io.fcp_rsp; 2110 fcp_rsp = (struct fcp_resp_with_ext *) &req->qtcb->bottom.io.fcp_rsp;
2082 zfcp_fc_eval_fcp_rsp(fcp_rsp, scpnt); 2111 zfcp_fc_eval_fcp_rsp(fcp_rsp, scpnt);
2083 2112
@@ -2187,6 +2216,44 @@ skip_fsfstatus:
2187 } 2216 }
2188} 2217}
2189 2218
2219static int zfcp_fsf_set_data_dir(struct scsi_cmnd *scsi_cmnd, u32 *data_dir)
2220{
2221 switch (scsi_get_prot_op(scsi_cmnd)) {
2222 case SCSI_PROT_NORMAL:
2223 switch (scsi_cmnd->sc_data_direction) {
2224 case DMA_NONE:
2225 *data_dir = FSF_DATADIR_CMND;
2226 break;
2227 case DMA_FROM_DEVICE:
2228 *data_dir = FSF_DATADIR_READ;
2229 break;
2230 case DMA_TO_DEVICE:
2231 *data_dir = FSF_DATADIR_WRITE;
2232 break;
2233 case DMA_BIDIRECTIONAL:
2234 return -EINVAL;
2235 }
2236 break;
2237
2238 case SCSI_PROT_READ_STRIP:
2239 *data_dir = FSF_DATADIR_DIF_READ_STRIP;
2240 break;
2241 case SCSI_PROT_WRITE_INSERT:
2242 *data_dir = FSF_DATADIR_DIF_WRITE_INSERT;
2243 break;
2244 case SCSI_PROT_READ_PASS:
2245 *data_dir = FSF_DATADIR_DIF_READ_CONVERT;
2246 break;
2247 case SCSI_PROT_WRITE_PASS:
2248 *data_dir = FSF_DATADIR_DIF_WRITE_CONVERT;
2249 break;
2250 default:
2251 return -EINVAL;
2252 }
2253
2254 return 0;
2255}
2256
2190/** 2257/**
2191 * zfcp_fsf_send_fcp_command_task - initiate an FCP command (for a SCSI command) 2258 * zfcp_fsf_send_fcp_command_task - initiate an FCP command (for a SCSI command)
2192 * @unit: unit where command is sent to 2259 * @unit: unit where command is sent to
@@ -2198,16 +2265,17 @@ int zfcp_fsf_send_fcp_command_task(struct zfcp_unit *unit,
2198 struct zfcp_fsf_req *req; 2265 struct zfcp_fsf_req *req;
2199 struct fcp_cmnd *fcp_cmnd; 2266 struct fcp_cmnd *fcp_cmnd;
2200 unsigned int sbtype = SBAL_FLAGS0_TYPE_READ; 2267 unsigned int sbtype = SBAL_FLAGS0_TYPE_READ;
2201 int real_bytes, retval = -EIO; 2268 int real_bytes, retval = -EIO, dix_bytes = 0;
2202 struct zfcp_adapter *adapter = unit->port->adapter; 2269 struct zfcp_adapter *adapter = unit->port->adapter;
2203 struct zfcp_qdio *qdio = adapter->qdio; 2270 struct zfcp_qdio *qdio = adapter->qdio;
2271 struct fsf_qtcb_bottom_io *io;
2204 2272
2205 if (unlikely(!(atomic_read(&unit->status) & 2273 if (unlikely(!(atomic_read(&unit->status) &
2206 ZFCP_STATUS_COMMON_UNBLOCKED))) 2274 ZFCP_STATUS_COMMON_UNBLOCKED)))
2207 return -EBUSY; 2275 return -EBUSY;
2208 2276
2209 spin_lock(&qdio->req_q_lock); 2277 spin_lock(&qdio->req_q_lock);
2210 if (atomic_read(&qdio->req_q.count) <= 0) { 2278 if (atomic_read(&qdio->req_q_free) <= 0) {
2211 atomic_inc(&qdio->req_q_full); 2279 atomic_inc(&qdio->req_q_full);
2212 goto out; 2280 goto out;
2213 } 2281 }
@@ -2223,56 +2291,45 @@ int zfcp_fsf_send_fcp_command_task(struct zfcp_unit *unit,
2223 goto out; 2291 goto out;
2224 } 2292 }
2225 2293
2294 scsi_cmnd->host_scribble = (unsigned char *) req->req_id;
2295
2296 io = &req->qtcb->bottom.io;
2226 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; 2297 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
2227 req->unit = unit; 2298 req->unit = unit;
2228 req->data = scsi_cmnd; 2299 req->data = scsi_cmnd;
2229 req->handler = zfcp_fsf_send_fcp_command_handler; 2300 req->handler = zfcp_fsf_send_fcp_command_handler;
2230 req->qtcb->header.lun_handle = unit->handle; 2301 req->qtcb->header.lun_handle = unit->handle;
2231 req->qtcb->header.port_handle = unit->port->handle; 2302 req->qtcb->header.port_handle = unit->port->handle;
2232 req->qtcb->bottom.io.service_class = FSF_CLASS_3; 2303 io->service_class = FSF_CLASS_3;
2233 req->qtcb->bottom.io.fcp_cmnd_length = FCP_CMND_LEN; 2304 io->fcp_cmnd_length = FCP_CMND_LEN;
2234 2305
2235 scsi_cmnd->host_scribble = (unsigned char *) req->req_id; 2306 if (scsi_get_prot_op(scsi_cmnd) != SCSI_PROT_NORMAL) {
2236 2307 io->data_block_length = scsi_cmnd->device->sector_size;
2237 /* 2308 io->ref_tag_value = scsi_get_lba(scsi_cmnd) & 0xFFFFFFFF;
2238 * set depending on data direction:
2239 * data direction bits in SBALE (SB Type)
2240 * data direction bits in QTCB
2241 */
2242 switch (scsi_cmnd->sc_data_direction) {
2243 case DMA_NONE:
2244 req->qtcb->bottom.io.data_direction = FSF_DATADIR_CMND;
2245 break;
2246 case DMA_FROM_DEVICE:
2247 req->qtcb->bottom.io.data_direction = FSF_DATADIR_READ;
2248 break;
2249 case DMA_TO_DEVICE:
2250 req->qtcb->bottom.io.data_direction = FSF_DATADIR_WRITE;
2251 break;
2252 case DMA_BIDIRECTIONAL:
2253 goto failed_scsi_cmnd;
2254 } 2309 }
2255 2310
2311 zfcp_fsf_set_data_dir(scsi_cmnd, &io->data_direction);
2312
2256 get_device(&unit->dev); 2313 get_device(&unit->dev);
2257 2314
2258 fcp_cmnd = (struct fcp_cmnd *) &req->qtcb->bottom.io.fcp_cmnd; 2315 fcp_cmnd = (struct fcp_cmnd *) &req->qtcb->bottom.io.fcp_cmnd;
2259 zfcp_fc_scsi_to_fcp(fcp_cmnd, scsi_cmnd); 2316 zfcp_fc_scsi_to_fcp(fcp_cmnd, scsi_cmnd);
2260 2317
2318 if (scsi_prot_sg_count(scsi_cmnd)) {
2319 zfcp_qdio_set_data_div(qdio, &req->qdio_req,
2320 scsi_prot_sg_count(scsi_cmnd));
2321 dix_bytes = zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req,
2322 scsi_prot_sglist(scsi_cmnd));
2323 io->prot_data_length = dix_bytes;
2324 }
2325
2261 real_bytes = zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req, 2326 real_bytes = zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req,
2262 scsi_sglist(scsi_cmnd), 2327 scsi_sglist(scsi_cmnd));
2263 ZFCP_FSF_MAX_SBALS_PER_REQ); 2328
2264 if (unlikely(real_bytes < 0)) { 2329 if (unlikely(real_bytes < 0) || unlikely(dix_bytes < 0))
2265 if (req->qdio_req.sbal_number >= ZFCP_FSF_MAX_SBALS_PER_REQ) {
2266 dev_err(&adapter->ccw_device->dev,
2267 "Oversize data package, unit 0x%016Lx "
2268 "on port 0x%016Lx closed\n",
2269 (unsigned long long)unit->fcp_lun,
2270 (unsigned long long)unit->port->wwpn);
2271 zfcp_erp_unit_shutdown(unit, 0, "fssfct1", req);
2272 retval = -EINVAL;
2273 }
2274 goto failed_scsi_cmnd; 2330 goto failed_scsi_cmnd;
2275 } 2331
2332 zfcp_qdio_set_sbale_last(adapter->qdio, &req->qdio_req);
2276 2333
2277 retval = zfcp_fsf_req_send(req); 2334 retval = zfcp_fsf_req_send(req);
2278 if (unlikely(retval)) 2335 if (unlikely(retval))
@@ -2391,13 +2448,13 @@ struct zfcp_fsf_req *zfcp_fsf_control_file(struct zfcp_adapter *adapter,
2391 bottom->operation_subtype = FSF_CFDC_OPERATION_SUBTYPE; 2448 bottom->operation_subtype = FSF_CFDC_OPERATION_SUBTYPE;
2392 bottom->option = fsf_cfdc->option; 2449 bottom->option = fsf_cfdc->option;
2393 2450
2394 bytes = zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req, 2451 bytes = zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req, fsf_cfdc->sg);
2395 fsf_cfdc->sg, 2452
2396 ZFCP_FSF_MAX_SBALS_PER_REQ);
2397 if (bytes != ZFCP_CFDC_MAX_SIZE) { 2453 if (bytes != ZFCP_CFDC_MAX_SIZE) {
2398 zfcp_fsf_req_free(req); 2454 zfcp_fsf_req_free(req);
2399 goto out; 2455 goto out;
2400 } 2456 }
2457 zfcp_qdio_set_sbale_last(adapter->qdio, &req->qdio_req);
2401 2458
2402 zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT); 2459 zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT);
2403 retval = zfcp_fsf_req_send(req); 2460 retval = zfcp_fsf_req_send(req);
@@ -2419,7 +2476,7 @@ out:
2419void zfcp_fsf_reqid_check(struct zfcp_qdio *qdio, int sbal_idx) 2476void zfcp_fsf_reqid_check(struct zfcp_qdio *qdio, int sbal_idx)
2420{ 2477{
2421 struct zfcp_adapter *adapter = qdio->adapter; 2478 struct zfcp_adapter *adapter = qdio->adapter;
2422 struct qdio_buffer *sbal = qdio->resp_q.sbal[sbal_idx]; 2479 struct qdio_buffer *sbal = qdio->res_q[sbal_idx];
2423 struct qdio_buffer_element *sbale; 2480 struct qdio_buffer_element *sbale;
2424 struct zfcp_fsf_req *fsf_req; 2481 struct zfcp_fsf_req *fsf_req;
2425 unsigned long req_id; 2482 unsigned long req_id;
@@ -2431,17 +2488,17 @@ void zfcp_fsf_reqid_check(struct zfcp_qdio *qdio, int sbal_idx)
2431 req_id = (unsigned long) sbale->addr; 2488 req_id = (unsigned long) sbale->addr;
2432 fsf_req = zfcp_reqlist_find_rm(adapter->req_list, req_id); 2489 fsf_req = zfcp_reqlist_find_rm(adapter->req_list, req_id);
2433 2490
2434 if (!fsf_req) 2491 if (!fsf_req) {
2435 /* 2492 /*
2436 * Unknown request means that we have potentially memory 2493 * Unknown request means that we have potentially memory
2437 * corruption and must stop the machine immediately. 2494 * corruption and must stop the machine immediately.
2438 */ 2495 */
2496 zfcp_qdio_siosl(adapter);
2439 panic("error: unknown req_id (%lx) on adapter %s.\n", 2497 panic("error: unknown req_id (%lx) on adapter %s.\n",
2440 req_id, dev_name(&adapter->ccw_device->dev)); 2498 req_id, dev_name(&adapter->ccw_device->dev));
2499 }
2441 2500
2442 fsf_req->qdio_req.sbal_response = sbal_idx; 2501 fsf_req->qdio_req.sbal_response = sbal_idx;
2443 fsf_req->qdio_req.qdio_inb_usage =
2444 atomic_read(&qdio->resp_q.count);
2445 zfcp_fsf_req_complete(fsf_req); 2502 zfcp_fsf_req_complete(fsf_req);
2446 2503
2447 if (likely(sbale->flags & SBAL_FLAGS_LAST_ENTRY)) 2504 if (likely(sbale->flags & SBAL_FLAGS_LAST_ENTRY))
diff --git a/drivers/s390/scsi/zfcp_fsf.h b/drivers/s390/scsi/zfcp_fsf.h
index 519083fd6e89..db8c85382dca 100644
--- a/drivers/s390/scsi/zfcp_fsf.h
+++ b/drivers/s390/scsi/zfcp_fsf.h
@@ -80,11 +80,15 @@
80#define FSF_REQUEST_SIZE_TOO_LARGE 0x00000061 80#define FSF_REQUEST_SIZE_TOO_LARGE 0x00000061
81#define FSF_RESPONSE_SIZE_TOO_LARGE 0x00000062 81#define FSF_RESPONSE_SIZE_TOO_LARGE 0x00000062
82#define FSF_SBAL_MISMATCH 0x00000063 82#define FSF_SBAL_MISMATCH 0x00000063
83#define FSF_INCONSISTENT_PROT_DATA 0x00000070
84#define FSF_INVALID_PROT_PARM 0x00000071
85#define FSF_BLOCK_GUARD_CHECK_FAILURE 0x00000081
86#define FSF_APP_TAG_CHECK_FAILURE 0x00000082
87#define FSF_REF_TAG_CHECK_FAILURE 0x00000083
83#define FSF_ADAPTER_STATUS_AVAILABLE 0x000000AD 88#define FSF_ADAPTER_STATUS_AVAILABLE 0x000000AD
84#define FSF_UNKNOWN_COMMAND 0x000000E2 89#define FSF_UNKNOWN_COMMAND 0x000000E2
85#define FSF_UNKNOWN_OP_SUBTYPE 0x000000E3 90#define FSF_UNKNOWN_OP_SUBTYPE 0x000000E3
86#define FSF_INVALID_COMMAND_OPTION 0x000000E5 91#define FSF_INVALID_COMMAND_OPTION 0x000000E5
87/* #define FSF_ERROR 0x000000FF */
88 92
89#define FSF_PROT_STATUS_QUAL_SIZE 16 93#define FSF_PROT_STATUS_QUAL_SIZE 16
90#define FSF_STATUS_QUALIFIER_SIZE 16 94#define FSF_STATUS_QUALIFIER_SIZE 16
@@ -147,18 +151,17 @@
147#define FSF_DATADIR_WRITE 0x00000001 151#define FSF_DATADIR_WRITE 0x00000001
148#define FSF_DATADIR_READ 0x00000002 152#define FSF_DATADIR_READ 0x00000002
149#define FSF_DATADIR_CMND 0x00000004 153#define FSF_DATADIR_CMND 0x00000004
154#define FSF_DATADIR_DIF_WRITE_INSERT 0x00000009
155#define FSF_DATADIR_DIF_READ_STRIP 0x0000000a
156#define FSF_DATADIR_DIF_WRITE_CONVERT 0x0000000b
157#define FSF_DATADIR_DIF_READ_CONVERT 0X0000000c
158
159/* data protection control flags */
160#define FSF_APP_TAG_CHECK_ENABLE 0x10
150 161
151/* fc service class */ 162/* fc service class */
152#define FSF_CLASS_3 0x00000003 163#define FSF_CLASS_3 0x00000003
153 164
154/* SBAL chaining */
155#define ZFCP_FSF_MAX_SBALS_PER_REQ 36
156
157/* max. number of (data buffer) SBALEs in largest SBAL chain
158 * request ID + QTCB in SBALE 0 + 1 of first SBAL in chain */
159#define ZFCP_FSF_MAX_SBALES_PER_REQ \
160 (ZFCP_FSF_MAX_SBALS_PER_REQ * ZFCP_QDIO_MAX_SBALES_PER_SBAL - 2)
161
162/* logging space behind QTCB */ 165/* logging space behind QTCB */
163#define FSF_QTCB_LOG_SIZE 1024 166#define FSF_QTCB_LOG_SIZE 1024
164 167
@@ -170,6 +173,8 @@
170#define FSF_FEATURE_ELS_CT_CHAINED_SBALS 0x00000020 173#define FSF_FEATURE_ELS_CT_CHAINED_SBALS 0x00000020
171#define FSF_FEATURE_UPDATE_ALERT 0x00000100 174#define FSF_FEATURE_UPDATE_ALERT 0x00000100
172#define FSF_FEATURE_MEASUREMENT_DATA 0x00000200 175#define FSF_FEATURE_MEASUREMENT_DATA 0x00000200
176#define FSF_FEATURE_DIF_PROT_TYPE1 0x00010000
177#define FSF_FEATURE_DIX_PROT_TCPIP 0x00020000
173 178
174/* host connection features */ 179/* host connection features */
175#define FSF_FEATURE_NPIV_MODE 0x00000001 180#define FSF_FEATURE_NPIV_MODE 0x00000001
@@ -324,9 +329,14 @@ struct fsf_qtcb_header {
324struct fsf_qtcb_bottom_io { 329struct fsf_qtcb_bottom_io {
325 u32 data_direction; 330 u32 data_direction;
326 u32 service_class; 331 u32 service_class;
327 u8 res1[8]; 332 u8 res1;
333 u8 data_prot_flags;
334 u16 app_tag_value;
335 u32 ref_tag_value;
328 u32 fcp_cmnd_length; 336 u32 fcp_cmnd_length;
329 u8 res2[12]; 337 u32 data_block_length;
338 u32 prot_data_length;
339 u8 res2[4];
330 u8 fcp_cmnd[FSF_FCP_CMND_SIZE]; 340 u8 fcp_cmnd[FSF_FCP_CMND_SIZE];
331 u8 fcp_rsp[FSF_FCP_RSP_SIZE]; 341 u8 fcp_rsp[FSF_FCP_RSP_SIZE];
332 u8 res3[64]; 342 u8 res3[64];
@@ -352,6 +362,8 @@ struct fsf_qtcb_bottom_support {
352 u8 els[256]; 362 u8 els[256];
353} __attribute__ ((packed)); 363} __attribute__ ((packed));
354 364
365#define ZFCP_FSF_TIMER_INT_MASK 0x3FFF
366
355struct fsf_qtcb_bottom_config { 367struct fsf_qtcb_bottom_config {
356 u32 lic_version; 368 u32 lic_version;
357 u32 feature_selection; 369 u32 feature_selection;
diff --git a/drivers/s390/scsi/zfcp_qdio.c b/drivers/s390/scsi/zfcp_qdio.c
index 6fa5e0453176..b2635759721c 100644
--- a/drivers/s390/scsi/zfcp_qdio.c
+++ b/drivers/s390/scsi/zfcp_qdio.c
@@ -30,12 +30,15 @@ static int zfcp_qdio_buffers_enqueue(struct qdio_buffer **sbal)
30 return 0; 30 return 0;
31} 31}
32 32
33static void zfcp_qdio_handler_error(struct zfcp_qdio *qdio, char *id) 33static void zfcp_qdio_handler_error(struct zfcp_qdio *qdio, char *id,
34 unsigned int qdio_err)
34{ 35{
35 struct zfcp_adapter *adapter = qdio->adapter; 36 struct zfcp_adapter *adapter = qdio->adapter;
36 37
37 dev_warn(&adapter->ccw_device->dev, "A QDIO problem occurred\n"); 38 dev_warn(&adapter->ccw_device->dev, "A QDIO problem occurred\n");
38 39
40 if (qdio_err & QDIO_ERROR_SLSB_STATE)
41 zfcp_qdio_siosl(adapter);
39 zfcp_erp_adapter_reopen(adapter, 42 zfcp_erp_adapter_reopen(adapter,
40 ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED | 43 ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED |
41 ZFCP_STATUS_COMMON_ERP_FAILED, id, NULL); 44 ZFCP_STATUS_COMMON_ERP_FAILED, id, NULL);
@@ -55,72 +58,47 @@ static void zfcp_qdio_zero_sbals(struct qdio_buffer *sbal[], int first, int cnt)
55static inline void zfcp_qdio_account(struct zfcp_qdio *qdio) 58static inline void zfcp_qdio_account(struct zfcp_qdio *qdio)
56{ 59{
57 unsigned long long now, span; 60 unsigned long long now, span;
58 int free, used; 61 int used;
59 62
60 spin_lock(&qdio->stat_lock); 63 spin_lock(&qdio->stat_lock);
61 now = get_clock_monotonic(); 64 now = get_clock_monotonic();
62 span = (now - qdio->req_q_time) >> 12; 65 span = (now - qdio->req_q_time) >> 12;
63 free = atomic_read(&qdio->req_q.count); 66 used = QDIO_MAX_BUFFERS_PER_Q - atomic_read(&qdio->req_q_free);
64 used = QDIO_MAX_BUFFERS_PER_Q - free;
65 qdio->req_q_util += used * span; 67 qdio->req_q_util += used * span;
66 qdio->req_q_time = now; 68 qdio->req_q_time = now;
67 spin_unlock(&qdio->stat_lock); 69 spin_unlock(&qdio->stat_lock);
68} 70}
69 71
70static void zfcp_qdio_int_req(struct ccw_device *cdev, unsigned int qdio_err, 72static void zfcp_qdio_int_req(struct ccw_device *cdev, unsigned int qdio_err,
71 int queue_no, int first, int count, 73 int queue_no, int idx, int count,
72 unsigned long parm) 74 unsigned long parm)
73{ 75{
74 struct zfcp_qdio *qdio = (struct zfcp_qdio *) parm; 76 struct zfcp_qdio *qdio = (struct zfcp_qdio *) parm;
75 struct zfcp_qdio_queue *queue = &qdio->req_q;
76 77
77 if (unlikely(qdio_err)) { 78 if (unlikely(qdio_err)) {
78 zfcp_dbf_hba_qdio(qdio->adapter->dbf, qdio_err, first, 79 zfcp_dbf_hba_qdio(qdio->adapter->dbf, qdio_err, idx, count);
79 count); 80 zfcp_qdio_handler_error(qdio, "qdireq1", qdio_err);
80 zfcp_qdio_handler_error(qdio, "qdireq1");
81 return; 81 return;
82 } 82 }
83 83
84 /* cleanup all SBALs being program-owned now */ 84 /* cleanup all SBALs being program-owned now */
85 zfcp_qdio_zero_sbals(queue->sbal, first, count); 85 zfcp_qdio_zero_sbals(qdio->req_q, idx, count);
86 86
87 zfcp_qdio_account(qdio); 87 zfcp_qdio_account(qdio);
88 atomic_add(count, &queue->count); 88 atomic_add(count, &qdio->req_q_free);
89 wake_up(&qdio->req_q_wq); 89 wake_up(&qdio->req_q_wq);
90} 90}
91 91
92static void zfcp_qdio_resp_put_back(struct zfcp_qdio *qdio, int processed)
93{
94 struct zfcp_qdio_queue *queue = &qdio->resp_q;
95 struct ccw_device *cdev = qdio->adapter->ccw_device;
96 u8 count, start = queue->first;
97 unsigned int retval;
98
99 count = atomic_read(&queue->count) + processed;
100
101 retval = do_QDIO(cdev, QDIO_FLAG_SYNC_INPUT, 0, start, count);
102
103 if (unlikely(retval)) {
104 atomic_set(&queue->count, count);
105 zfcp_erp_adapter_reopen(qdio->adapter, 0, "qdrpb_1", NULL);
106 } else {
107 queue->first += count;
108 queue->first %= QDIO_MAX_BUFFERS_PER_Q;
109 atomic_set(&queue->count, 0);
110 }
111}
112
113static void zfcp_qdio_int_resp(struct ccw_device *cdev, unsigned int qdio_err, 92static void zfcp_qdio_int_resp(struct ccw_device *cdev, unsigned int qdio_err,
114 int queue_no, int first, int count, 93 int queue_no, int idx, int count,
115 unsigned long parm) 94 unsigned long parm)
116{ 95{
117 struct zfcp_qdio *qdio = (struct zfcp_qdio *) parm; 96 struct zfcp_qdio *qdio = (struct zfcp_qdio *) parm;
118 int sbal_idx, sbal_no; 97 int sbal_idx, sbal_no;
119 98
120 if (unlikely(qdio_err)) { 99 if (unlikely(qdio_err)) {
121 zfcp_dbf_hba_qdio(qdio->adapter->dbf, qdio_err, first, 100 zfcp_dbf_hba_qdio(qdio->adapter->dbf, qdio_err, idx, count);
122 count); 101 zfcp_qdio_handler_error(qdio, "qdires1", qdio_err);
123 zfcp_qdio_handler_error(qdio, "qdires1");
124 return; 102 return;
125 } 103 }
126 104
@@ -129,25 +107,16 @@ static void zfcp_qdio_int_resp(struct ccw_device *cdev, unsigned int qdio_err,
129 * returned by QDIO layer 107 * returned by QDIO layer
130 */ 108 */
131 for (sbal_no = 0; sbal_no < count; sbal_no++) { 109 for (sbal_no = 0; sbal_no < count; sbal_no++) {
132 sbal_idx = (first + sbal_no) % QDIO_MAX_BUFFERS_PER_Q; 110 sbal_idx = (idx + sbal_no) % QDIO_MAX_BUFFERS_PER_Q;
133 /* go through all SBALEs of SBAL */ 111 /* go through all SBALEs of SBAL */
134 zfcp_fsf_reqid_check(qdio, sbal_idx); 112 zfcp_fsf_reqid_check(qdio, sbal_idx);
135 } 113 }
136 114
137 /* 115 /*
138 * put range of SBALs back to response queue 116 * put SBALs back to response queue
139 * (including SBALs which have already been free before)
140 */ 117 */
141 zfcp_qdio_resp_put_back(qdio, count); 118 if (do_QDIO(cdev, QDIO_FLAG_SYNC_INPUT, 0, idx, count))
142} 119 zfcp_erp_adapter_reopen(qdio->adapter, 0, "qdires2", NULL);
143
144static void zfcp_qdio_sbal_limit(struct zfcp_qdio *qdio,
145 struct zfcp_qdio_req *q_req, int max_sbals)
146{
147 int count = atomic_read(&qdio->req_q.count);
148 count = min(count, max_sbals);
149 q_req->sbal_limit = (q_req->sbal_first + count - 1)
150 % QDIO_MAX_BUFFERS_PER_Q;
151} 120}
152 121
153static struct qdio_buffer_element * 122static struct qdio_buffer_element *
@@ -173,6 +142,7 @@ zfcp_qdio_sbal_chain(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req)
173 142
174 /* keep this requests number of SBALs up-to-date */ 143 /* keep this requests number of SBALs up-to-date */
175 q_req->sbal_number++; 144 q_req->sbal_number++;
145 BUG_ON(q_req->sbal_number > ZFCP_QDIO_MAX_SBALS_PER_REQ);
176 146
177 /* start at first SBALE of new SBAL */ 147 /* start at first SBALE of new SBAL */
178 q_req->sbale_curr = 0; 148 q_req->sbale_curr = 0;
@@ -193,17 +163,6 @@ zfcp_qdio_sbale_next(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req)
193 return zfcp_qdio_sbale_curr(qdio, q_req); 163 return zfcp_qdio_sbale_curr(qdio, q_req);
194} 164}
195 165
196static void zfcp_qdio_undo_sbals(struct zfcp_qdio *qdio,
197 struct zfcp_qdio_req *q_req)
198{
199 struct qdio_buffer **sbal = qdio->req_q.sbal;
200 int first = q_req->sbal_first;
201 int last = q_req->sbal_last;
202 int count = (last - first + QDIO_MAX_BUFFERS_PER_Q) %
203 QDIO_MAX_BUFFERS_PER_Q + 1;
204 zfcp_qdio_zero_sbals(sbal, first, count);
205}
206
207/** 166/**
208 * zfcp_qdio_sbals_from_sg - fill SBALs from scatter-gather list 167 * zfcp_qdio_sbals_from_sg - fill SBALs from scatter-gather list
209 * @qdio: pointer to struct zfcp_qdio 168 * @qdio: pointer to struct zfcp_qdio
@@ -213,14 +172,11 @@ static void zfcp_qdio_undo_sbals(struct zfcp_qdio *qdio,
213 * Returns: number of bytes, or error (negativ) 172 * Returns: number of bytes, or error (negativ)
214 */ 173 */
215int zfcp_qdio_sbals_from_sg(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req, 174int zfcp_qdio_sbals_from_sg(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req,
216 struct scatterlist *sg, int max_sbals) 175 struct scatterlist *sg)
217{ 176{
218 struct qdio_buffer_element *sbale; 177 struct qdio_buffer_element *sbale;
219 int bytes = 0; 178 int bytes = 0;
220 179
221 /* figure out last allowed SBAL */
222 zfcp_qdio_sbal_limit(qdio, q_req, max_sbals);
223
224 /* set storage-block type for this request */ 180 /* set storage-block type for this request */
225 sbale = zfcp_qdio_sbale_req(qdio, q_req); 181 sbale = zfcp_qdio_sbale_req(qdio, q_req);
226 sbale->flags |= q_req->sbtype; 182 sbale->flags |= q_req->sbtype;
@@ -229,7 +185,8 @@ int zfcp_qdio_sbals_from_sg(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req,
229 sbale = zfcp_qdio_sbale_next(qdio, q_req); 185 sbale = zfcp_qdio_sbale_next(qdio, q_req);
230 if (!sbale) { 186 if (!sbale) {
231 atomic_inc(&qdio->req_q_full); 187 atomic_inc(&qdio->req_q_full);
232 zfcp_qdio_undo_sbals(qdio, q_req); 188 zfcp_qdio_zero_sbals(qdio->req_q, q_req->sbal_first,
189 q_req->sbal_number);
233 return -EINVAL; 190 return -EINVAL;
234 } 191 }
235 192
@@ -239,19 +196,13 @@ int zfcp_qdio_sbals_from_sg(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req,
239 bytes += sg->length; 196 bytes += sg->length;
240 } 197 }
241 198
242 /* assume that no other SBALEs are to follow in the same SBAL */
243 sbale = zfcp_qdio_sbale_curr(qdio, q_req);
244 sbale->flags |= SBAL_FLAGS_LAST_ENTRY;
245
246 return bytes; 199 return bytes;
247} 200}
248 201
249static int zfcp_qdio_sbal_check(struct zfcp_qdio *qdio) 202static int zfcp_qdio_sbal_check(struct zfcp_qdio *qdio)
250{ 203{
251 struct zfcp_qdio_queue *req_q = &qdio->req_q;
252
253 spin_lock_bh(&qdio->req_q_lock); 204 spin_lock_bh(&qdio->req_q_lock);
254 if (atomic_read(&req_q->count) || 205 if (atomic_read(&qdio->req_q_free) ||
255 !(atomic_read(&qdio->adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP)) 206 !(atomic_read(&qdio->adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP))
256 return 1; 207 return 1;
257 spin_unlock_bh(&qdio->req_q_lock); 208 spin_unlock_bh(&qdio->req_q_lock);
@@ -300,25 +251,25 @@ int zfcp_qdio_sbal_get(struct zfcp_qdio *qdio)
300 */ 251 */
301int zfcp_qdio_send(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req) 252int zfcp_qdio_send(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req)
302{ 253{
303 struct zfcp_qdio_queue *req_q = &qdio->req_q;
304 int first = q_req->sbal_first;
305 int count = q_req->sbal_number;
306 int retval; 254 int retval;
307 unsigned int qdio_flags = QDIO_FLAG_SYNC_OUTPUT; 255 u8 sbal_number = q_req->sbal_number;
308 256
309 zfcp_qdio_account(qdio); 257 zfcp_qdio_account(qdio);
310 258
311 retval = do_QDIO(qdio->adapter->ccw_device, qdio_flags, 0, first, 259 retval = do_QDIO(qdio->adapter->ccw_device, QDIO_FLAG_SYNC_OUTPUT, 0,
312 count); 260 q_req->sbal_first, sbal_number);
261
313 if (unlikely(retval)) { 262 if (unlikely(retval)) {
314 zfcp_qdio_zero_sbals(req_q->sbal, first, count); 263 zfcp_qdio_zero_sbals(qdio->req_q, q_req->sbal_first,
264 sbal_number);
315 return retval; 265 return retval;
316 } 266 }
317 267
318 /* account for transferred buffers */ 268 /* account for transferred buffers */
319 atomic_sub(count, &req_q->count); 269 atomic_sub(sbal_number, &qdio->req_q_free);
320 req_q->first += count; 270 qdio->req_q_idx += sbal_number;
321 req_q->first %= QDIO_MAX_BUFFERS_PER_Q; 271 qdio->req_q_idx %= QDIO_MAX_BUFFERS_PER_Q;
272
322 return 0; 273 return 0;
323} 274}
324 275
@@ -331,6 +282,7 @@ static void zfcp_qdio_setup_init_data(struct qdio_initialize *id,
331 id->q_format = QDIO_ZFCP_QFMT; 282 id->q_format = QDIO_ZFCP_QFMT;
332 memcpy(id->adapter_name, dev_name(&id->cdev->dev), 8); 283 memcpy(id->adapter_name, dev_name(&id->cdev->dev), 8);
333 ASCEBC(id->adapter_name, 8); 284 ASCEBC(id->adapter_name, 8);
285 id->qib_rflags = QIB_RFLAGS_ENABLE_DATA_DIV;
334 id->qib_param_field_format = 0; 286 id->qib_param_field_format = 0;
335 id->qib_param_field = NULL; 287 id->qib_param_field = NULL;
336 id->input_slib_elements = NULL; 288 id->input_slib_elements = NULL;
@@ -340,10 +292,10 @@ static void zfcp_qdio_setup_init_data(struct qdio_initialize *id,
340 id->input_handler = zfcp_qdio_int_resp; 292 id->input_handler = zfcp_qdio_int_resp;
341 id->output_handler = zfcp_qdio_int_req; 293 id->output_handler = zfcp_qdio_int_req;
342 id->int_parm = (unsigned long) qdio; 294 id->int_parm = (unsigned long) qdio;
343 id->input_sbal_addr_array = (void **) (qdio->resp_q.sbal); 295 id->input_sbal_addr_array = (void **) (qdio->res_q);
344 id->output_sbal_addr_array = (void **) (qdio->req_q.sbal); 296 id->output_sbal_addr_array = (void **) (qdio->req_q);
345
346} 297}
298
347/** 299/**
348 * zfcp_qdio_allocate - allocate queue memory and initialize QDIO data 300 * zfcp_qdio_allocate - allocate queue memory and initialize QDIO data
349 * @adapter: pointer to struct zfcp_adapter 301 * @adapter: pointer to struct zfcp_adapter
@@ -354,8 +306,8 @@ static int zfcp_qdio_allocate(struct zfcp_qdio *qdio)
354{ 306{
355 struct qdio_initialize init_data; 307 struct qdio_initialize init_data;
356 308
357 if (zfcp_qdio_buffers_enqueue(qdio->req_q.sbal) || 309 if (zfcp_qdio_buffers_enqueue(qdio->req_q) ||
358 zfcp_qdio_buffers_enqueue(qdio->resp_q.sbal)) 310 zfcp_qdio_buffers_enqueue(qdio->res_q))
359 return -ENOMEM; 311 return -ENOMEM;
360 312
361 zfcp_qdio_setup_init_data(&init_data, qdio); 313 zfcp_qdio_setup_init_data(&init_data, qdio);
@@ -369,34 +321,30 @@ static int zfcp_qdio_allocate(struct zfcp_qdio *qdio)
369 */ 321 */
370void zfcp_qdio_close(struct zfcp_qdio *qdio) 322void zfcp_qdio_close(struct zfcp_qdio *qdio)
371{ 323{
372 struct zfcp_qdio_queue *req_q; 324 struct zfcp_adapter *adapter = qdio->adapter;
373 int first, count; 325 int idx, count;
374 326
375 if (!(atomic_read(&qdio->adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP)) 327 if (!(atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP))
376 return; 328 return;
377 329
378 /* clear QDIOUP flag, thus do_QDIO is not called during qdio_shutdown */ 330 /* clear QDIOUP flag, thus do_QDIO is not called during qdio_shutdown */
379 req_q = &qdio->req_q;
380 spin_lock_bh(&qdio->req_q_lock); 331 spin_lock_bh(&qdio->req_q_lock);
381 atomic_clear_mask(ZFCP_STATUS_ADAPTER_QDIOUP, &qdio->adapter->status); 332 atomic_clear_mask(ZFCP_STATUS_ADAPTER_QDIOUP, &adapter->status);
382 spin_unlock_bh(&qdio->req_q_lock); 333 spin_unlock_bh(&qdio->req_q_lock);
383 334
384 wake_up(&qdio->req_q_wq); 335 wake_up(&qdio->req_q_wq);
385 336
386 qdio_shutdown(qdio->adapter->ccw_device, 337 qdio_shutdown(adapter->ccw_device, QDIO_FLAG_CLEANUP_USING_CLEAR);
387 QDIO_FLAG_CLEANUP_USING_CLEAR);
388 338
389 /* cleanup used outbound sbals */ 339 /* cleanup used outbound sbals */
390 count = atomic_read(&req_q->count); 340 count = atomic_read(&qdio->req_q_free);
391 if (count < QDIO_MAX_BUFFERS_PER_Q) { 341 if (count < QDIO_MAX_BUFFERS_PER_Q) {
392 first = (req_q->first + count) % QDIO_MAX_BUFFERS_PER_Q; 342 idx = (qdio->req_q_idx + count) % QDIO_MAX_BUFFERS_PER_Q;
393 count = QDIO_MAX_BUFFERS_PER_Q - count; 343 count = QDIO_MAX_BUFFERS_PER_Q - count;
394 zfcp_qdio_zero_sbals(req_q->sbal, first, count); 344 zfcp_qdio_zero_sbals(qdio->req_q, idx, count);
395 } 345 }
396 req_q->first = 0; 346 qdio->req_q_idx = 0;
397 atomic_set(&req_q->count, 0); 347 atomic_set(&qdio->req_q_free, 0);
398 qdio->resp_q.first = 0;
399 atomic_set(&qdio->resp_q.count, 0);
400} 348}
401 349
402/** 350/**
@@ -408,34 +356,45 @@ int zfcp_qdio_open(struct zfcp_qdio *qdio)
408{ 356{
409 struct qdio_buffer_element *sbale; 357 struct qdio_buffer_element *sbale;
410 struct qdio_initialize init_data; 358 struct qdio_initialize init_data;
411 struct ccw_device *cdev = qdio->adapter->ccw_device; 359 struct zfcp_adapter *adapter = qdio->adapter;
360 struct ccw_device *cdev = adapter->ccw_device;
361 struct qdio_ssqd_desc ssqd;
412 int cc; 362 int cc;
413 363
414 if (atomic_read(&qdio->adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP) 364 if (atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP)
415 return -EIO; 365 return -EIO;
416 366
367 atomic_clear_mask(ZFCP_STATUS_ADAPTER_SIOSL_ISSUED,
368 &qdio->adapter->status);
369
417 zfcp_qdio_setup_init_data(&init_data, qdio); 370 zfcp_qdio_setup_init_data(&init_data, qdio);
418 371
419 if (qdio_establish(&init_data)) 372 if (qdio_establish(&init_data))
420 goto failed_establish; 373 goto failed_establish;
421 374
375 if (qdio_get_ssqd_desc(init_data.cdev, &ssqd))
376 goto failed_qdio;
377
378 if (ssqd.qdioac2 & CHSC_AC2_DATA_DIV_ENABLED)
379 atomic_set_mask(ZFCP_STATUS_ADAPTER_DATA_DIV_ENABLED,
380 &qdio->adapter->status);
381
422 if (qdio_activate(cdev)) 382 if (qdio_activate(cdev))
423 goto failed_qdio; 383 goto failed_qdio;
424 384
425 for (cc = 0; cc < QDIO_MAX_BUFFERS_PER_Q; cc++) { 385 for (cc = 0; cc < QDIO_MAX_BUFFERS_PER_Q; cc++) {
426 sbale = &(qdio->resp_q.sbal[cc]->element[0]); 386 sbale = &(qdio->res_q[cc]->element[0]);
427 sbale->length = 0; 387 sbale->length = 0;
428 sbale->flags = SBAL_FLAGS_LAST_ENTRY; 388 sbale->flags = SBAL_FLAGS_LAST_ENTRY;
429 sbale->addr = NULL; 389 sbale->addr = NULL;
430 } 390 }
431 391
432 if (do_QDIO(cdev, QDIO_FLAG_SYNC_INPUT, 0, 0, 392 if (do_QDIO(cdev, QDIO_FLAG_SYNC_INPUT, 0, 0, QDIO_MAX_BUFFERS_PER_Q))
433 QDIO_MAX_BUFFERS_PER_Q))
434 goto failed_qdio; 393 goto failed_qdio;
435 394
436 /* set index of first avalable SBALS / number of available SBALS */ 395 /* set index of first avalable SBALS / number of available SBALS */
437 qdio->req_q.first = 0; 396 qdio->req_q_idx = 0;
438 atomic_set(&qdio->req_q.count, QDIO_MAX_BUFFERS_PER_Q); 397 atomic_set(&qdio->req_q_free, QDIO_MAX_BUFFERS_PER_Q);
439 398
440 return 0; 399 return 0;
441 400
@@ -449,7 +408,6 @@ failed_establish:
449 408
450void zfcp_qdio_destroy(struct zfcp_qdio *qdio) 409void zfcp_qdio_destroy(struct zfcp_qdio *qdio)
451{ 410{
452 struct qdio_buffer **sbal_req, **sbal_resp;
453 int p; 411 int p;
454 412
455 if (!qdio) 413 if (!qdio)
@@ -458,12 +416,9 @@ void zfcp_qdio_destroy(struct zfcp_qdio *qdio)
458 if (qdio->adapter->ccw_device) 416 if (qdio->adapter->ccw_device)
459 qdio_free(qdio->adapter->ccw_device); 417 qdio_free(qdio->adapter->ccw_device);
460 418
461 sbal_req = qdio->req_q.sbal;
462 sbal_resp = qdio->resp_q.sbal;
463
464 for (p = 0; p < QDIO_MAX_BUFFERS_PER_Q; p += QBUFF_PER_PAGE) { 419 for (p = 0; p < QDIO_MAX_BUFFERS_PER_Q; p += QBUFF_PER_PAGE) {
465 free_page((unsigned long) sbal_req[p]); 420 free_page((unsigned long) qdio->req_q[p]);
466 free_page((unsigned long) sbal_resp[p]); 421 free_page((unsigned long) qdio->res_q[p]);
467 } 422 }
468 423
469 kfree(qdio); 424 kfree(qdio);
@@ -491,3 +446,26 @@ int zfcp_qdio_setup(struct zfcp_adapter *adapter)
491 return 0; 446 return 0;
492} 447}
493 448
449/**
450 * zfcp_qdio_siosl - Trigger logging in FCP channel
451 * @adapter: The zfcp_adapter where to trigger logging
452 *
453 * Call the cio siosl function to trigger hardware logging. This
454 * wrapper function sets a flag to ensure hardware logging is only
455 * triggered once before going through qdio shutdown.
456 *
457 * The triggers are always run from qdio tasklet context, so no
458 * additional synchronization is necessary.
459 */
460void zfcp_qdio_siosl(struct zfcp_adapter *adapter)
461{
462 int rc;
463
464 if (atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_SIOSL_ISSUED)
465 return;
466
467 rc = ccw_device_siosl(adapter->ccw_device);
468 if (!rc)
469 atomic_set_mask(ZFCP_STATUS_ADAPTER_SIOSL_ISSUED,
470 &adapter->status);
471}
diff --git a/drivers/s390/scsi/zfcp_qdio.h b/drivers/s390/scsi/zfcp_qdio.h
index 138fba577b48..2297d8d3e947 100644
--- a/drivers/s390/scsi/zfcp_qdio.h
+++ b/drivers/s390/scsi/zfcp_qdio.h
@@ -19,22 +19,20 @@
19/* index of last SBALE (with respect to DMQ bug workaround) */ 19/* index of last SBALE (with respect to DMQ bug workaround) */
20#define ZFCP_QDIO_LAST_SBALE_PER_SBAL (ZFCP_QDIO_MAX_SBALES_PER_SBAL - 1) 20#define ZFCP_QDIO_LAST_SBALE_PER_SBAL (ZFCP_QDIO_MAX_SBALES_PER_SBAL - 1)
21 21
22/** 22/* Max SBALS for chaining */
23 * struct zfcp_qdio_queue - qdio queue buffer, zfcp index and free count 23#define ZFCP_QDIO_MAX_SBALS_PER_REQ 36
24 * @sbal: qdio buffers 24
25 * @first: index of next free buffer in queue 25/* max. number of (data buffer) SBALEs in largest SBAL chain
26 * @count: number of free buffers in queue 26 * request ID + QTCB in SBALE 0 + 1 of first SBAL in chain */
27 */ 27#define ZFCP_QDIO_MAX_SBALES_PER_REQ \
28struct zfcp_qdio_queue { 28 (ZFCP_QDIO_MAX_SBALS_PER_REQ * ZFCP_QDIO_MAX_SBALES_PER_SBAL - 2)
29 struct qdio_buffer *sbal[QDIO_MAX_BUFFERS_PER_Q];
30 u8 first;
31 atomic_t count;
32};
33 29
34/** 30/**
35 * struct zfcp_qdio - basic qdio data structure 31 * struct zfcp_qdio - basic qdio data structure
36 * @resp_q: response queue 32 * @res_q: response queue
37 * @req_q: request queue 33 * @req_q: request queue
34 * @req_q_idx: index of next free buffer
35 * @req_q_free: number of free buffers in queue
38 * @stat_lock: lock to protect req_q_util and req_q_time 36 * @stat_lock: lock to protect req_q_util and req_q_time
39 * @req_q_lock: lock to serialize access to request queue 37 * @req_q_lock: lock to serialize access to request queue
40 * @req_q_time: time of last fill level change 38 * @req_q_time: time of last fill level change
@@ -44,8 +42,10 @@ struct zfcp_qdio_queue {
44 * @adapter: adapter used in conjunction with this qdio structure 42 * @adapter: adapter used in conjunction with this qdio structure
45 */ 43 */
46struct zfcp_qdio { 44struct zfcp_qdio {
47 struct zfcp_qdio_queue resp_q; 45 struct qdio_buffer *res_q[QDIO_MAX_BUFFERS_PER_Q];
48 struct zfcp_qdio_queue req_q; 46 struct qdio_buffer *req_q[QDIO_MAX_BUFFERS_PER_Q];
47 u8 req_q_idx;
48 atomic_t req_q_free;
49 spinlock_t stat_lock; 49 spinlock_t stat_lock;
50 spinlock_t req_q_lock; 50 spinlock_t req_q_lock;
51 unsigned long long req_q_time; 51 unsigned long long req_q_time;
@@ -65,7 +65,6 @@ struct zfcp_qdio {
65 * @sbale_curr: current sbale at creation of this request 65 * @sbale_curr: current sbale at creation of this request
66 * @sbal_response: sbal used in interrupt 66 * @sbal_response: sbal used in interrupt
67 * @qdio_outb_usage: usage of outbound queue 67 * @qdio_outb_usage: usage of outbound queue
68 * @qdio_inb_usage: usage of inbound queue
69 */ 68 */
70struct zfcp_qdio_req { 69struct zfcp_qdio_req {
71 u32 sbtype; 70 u32 sbtype;
@@ -76,22 +75,9 @@ struct zfcp_qdio_req {
76 u8 sbale_curr; 75 u8 sbale_curr;
77 u8 sbal_response; 76 u8 sbal_response;
78 u16 qdio_outb_usage; 77 u16 qdio_outb_usage;
79 u16 qdio_inb_usage;
80}; 78};
81 79
82/** 80/**
83 * zfcp_qdio_sbale - return pointer to sbale in qdio queue
84 * @q: queue where to find sbal
85 * @sbal_idx: sbal index in queue
86 * @sbale_idx: sbale index in sbal
87 */
88static inline struct qdio_buffer_element *
89zfcp_qdio_sbale(struct zfcp_qdio_queue *q, int sbal_idx, int sbale_idx)
90{
91 return &q->sbal[sbal_idx]->element[sbale_idx];
92}
93
94/**
95 * zfcp_qdio_sbale_req - return pointer to sbale on req_q for a request 81 * zfcp_qdio_sbale_req - return pointer to sbale on req_q for a request
96 * @qdio: pointer to struct zfcp_qdio 82 * @qdio: pointer to struct zfcp_qdio
97 * @q_rec: pointer to struct zfcp_qdio_req 83 * @q_rec: pointer to struct zfcp_qdio_req
@@ -100,7 +86,7 @@ zfcp_qdio_sbale(struct zfcp_qdio_queue *q, int sbal_idx, int sbale_idx)
100static inline struct qdio_buffer_element * 86static inline struct qdio_buffer_element *
101zfcp_qdio_sbale_req(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req) 87zfcp_qdio_sbale_req(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req)
102{ 88{
103 return zfcp_qdio_sbale(&qdio->req_q, q_req->sbal_last, 0); 89 return &qdio->req_q[q_req->sbal_last]->element[0];
104} 90}
105 91
106/** 92/**
@@ -112,8 +98,7 @@ zfcp_qdio_sbale_req(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req)
112static inline struct qdio_buffer_element * 98static inline struct qdio_buffer_element *
113zfcp_qdio_sbale_curr(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req) 99zfcp_qdio_sbale_curr(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req)
114{ 100{
115 return zfcp_qdio_sbale(&qdio->req_q, q_req->sbal_last, 101 return &qdio->req_q[q_req->sbal_last]->element[q_req->sbale_curr];
116 q_req->sbale_curr);
117} 102}
118 103
119/** 104/**
@@ -134,21 +119,25 @@ void zfcp_qdio_req_init(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req,
134 unsigned long req_id, u32 sbtype, void *data, u32 len) 119 unsigned long req_id, u32 sbtype, void *data, u32 len)
135{ 120{
136 struct qdio_buffer_element *sbale; 121 struct qdio_buffer_element *sbale;
122 int count = min(atomic_read(&qdio->req_q_free),
123 ZFCP_QDIO_MAX_SBALS_PER_REQ);
137 124
138 q_req->sbal_first = q_req->sbal_last = qdio->req_q.first; 125 q_req->sbal_first = q_req->sbal_last = qdio->req_q_idx;
139 q_req->sbal_number = 1; 126 q_req->sbal_number = 1;
140 q_req->sbtype = sbtype; 127 q_req->sbtype = sbtype;
128 q_req->sbale_curr = 1;
129 q_req->sbal_limit = (q_req->sbal_first + count - 1)
130 % QDIO_MAX_BUFFERS_PER_Q;
141 131
142 sbale = zfcp_qdio_sbale_req(qdio, q_req); 132 sbale = zfcp_qdio_sbale_req(qdio, q_req);
143 sbale->addr = (void *) req_id; 133 sbale->addr = (void *) req_id;
144 sbale->flags |= SBAL_FLAGS0_COMMAND; 134 sbale->flags = SBAL_FLAGS0_COMMAND | sbtype;
145 sbale->flags |= sbtype;
146 135
147 q_req->sbale_curr = 1; 136 if (unlikely(!data))
137 return;
148 sbale++; 138 sbale++;
149 sbale->addr = data; 139 sbale->addr = data;
150 if (likely(data)) 140 sbale->length = len;
151 sbale->length = len;
152} 141}
153 142
154/** 143/**
@@ -210,4 +199,36 @@ void zfcp_qdio_skip_to_last_sbale(struct zfcp_qdio_req *q_req)
210 q_req->sbale_curr = ZFCP_QDIO_LAST_SBALE_PER_SBAL; 199 q_req->sbale_curr = ZFCP_QDIO_LAST_SBALE_PER_SBAL;
211} 200}
212 201
202/**
203 * zfcp_qdio_sbal_limit - set the sbal limit for a request in q_req
204 * @qdio: pointer to struct zfcp_qdio
205 * @q_req: The current zfcp_qdio_req
206 * @max_sbals: maximum number of SBALs allowed
207 */
208static inline
209void zfcp_qdio_sbal_limit(struct zfcp_qdio *qdio,
210 struct zfcp_qdio_req *q_req, int max_sbals)
211{
212 int count = min(atomic_read(&qdio->req_q_free), max_sbals);
213
214 q_req->sbal_limit = (q_req->sbal_first + count - 1) %
215 QDIO_MAX_BUFFERS_PER_Q;
216}
217
218/**
219 * zfcp_qdio_set_data_div - set data division count
220 * @qdio: pointer to struct zfcp_qdio
221 * @q_req: The current zfcp_qdio_req
222 * @count: The data division count
223 */
224static inline
225void zfcp_qdio_set_data_div(struct zfcp_qdio *qdio,
226 struct zfcp_qdio_req *q_req, u32 count)
227{
228 struct qdio_buffer_element *sbale;
229
230 sbale = &qdio->req_q[q_req->sbal_first]->element[0];
231 sbale->length = count;
232}
233
213#endif /* ZFCP_QDIO_H */ 234#endif /* ZFCP_QDIO_H */
diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c
index be5d2c60453d..cb000c9833bb 100644
--- a/drivers/s390/scsi/zfcp_scsi.c
+++ b/drivers/s390/scsi/zfcp_scsi.c
@@ -12,6 +12,7 @@
12#include <linux/types.h> 12#include <linux/types.h>
13#include <linux/slab.h> 13#include <linux/slab.h>
14#include <scsi/fc/fc_fcp.h> 14#include <scsi/fc/fc_fcp.h>
15#include <scsi/scsi_eh.h>
15#include <asm/atomic.h> 16#include <asm/atomic.h>
16#include "zfcp_ext.h" 17#include "zfcp_ext.h"
17#include "zfcp_dbf.h" 18#include "zfcp_dbf.h"
@@ -22,6 +23,13 @@ static unsigned int default_depth = 32;
22module_param_named(queue_depth, default_depth, uint, 0600); 23module_param_named(queue_depth, default_depth, uint, 0600);
23MODULE_PARM_DESC(queue_depth, "Default queue depth for new SCSI devices"); 24MODULE_PARM_DESC(queue_depth, "Default queue depth for new SCSI devices");
24 25
26static bool enable_dif;
27
28#ifdef CONFIG_ZFCP_DIF
29module_param_named(dif, enable_dif, bool, 0600);
30MODULE_PARM_DESC(dif, "Enable DIF/DIX data integrity support");
31#endif
32
25static int zfcp_scsi_change_queue_depth(struct scsi_device *sdev, int depth, 33static int zfcp_scsi_change_queue_depth(struct scsi_device *sdev, int depth,
26 int reason) 34 int reason)
27{ 35{
@@ -506,8 +514,10 @@ static void zfcp_set_rport_dev_loss_tmo(struct fc_rport *rport, u32 timeout)
506 * @rport: The FC rport where to teminate I/O 514 * @rport: The FC rport where to teminate I/O
507 * 515 *
508 * Abort all pending SCSI commands for a port by closing the 516 * Abort all pending SCSI commands for a port by closing the
509 * port. Using a reopen avoiding a conflict with a shutdown 517 * port. Using a reopen avoids a conflict with a shutdown
510 * overwriting a reopen. 518 * overwriting a reopen. The "forced" ensures that a disappeared port
519 * is not opened again as valid due to the cached plogi data in
520 * non-NPIV mode.
511 */ 521 */
512static void zfcp_scsi_terminate_rport_io(struct fc_rport *rport) 522static void zfcp_scsi_terminate_rport_io(struct fc_rport *rport)
513{ 523{
@@ -519,11 +529,25 @@ static void zfcp_scsi_terminate_rport_io(struct fc_rport *rport)
519 port = zfcp_get_port_by_wwpn(adapter, rport->port_name); 529 port = zfcp_get_port_by_wwpn(adapter, rport->port_name);
520 530
521 if (port) { 531 if (port) {
522 zfcp_erp_port_reopen(port, 0, "sctrpi1", NULL); 532 zfcp_erp_port_forced_reopen(port, 0, "sctrpi1", NULL);
523 put_device(&port->dev); 533 put_device(&port->dev);
524 } 534 }
525} 535}
526 536
537static void zfcp_scsi_queue_unit_register(struct zfcp_port *port)
538{
539 struct zfcp_unit *unit;
540
541 read_lock_irq(&port->unit_list_lock);
542 list_for_each_entry(unit, &port->unit_list, list) {
543 get_device(&unit->dev);
544 if (scsi_queue_work(port->adapter->scsi_host,
545 &unit->scsi_work) <= 0)
546 put_device(&unit->dev);
547 }
548 read_unlock_irq(&port->unit_list_lock);
549}
550
527static void zfcp_scsi_rport_register(struct zfcp_port *port) 551static void zfcp_scsi_rport_register(struct zfcp_port *port)
528{ 552{
529 struct fc_rport_identifiers ids; 553 struct fc_rport_identifiers ids;
@@ -548,6 +572,9 @@ static void zfcp_scsi_rport_register(struct zfcp_port *port)
548 rport->maxframe_size = port->maxframe_size; 572 rport->maxframe_size = port->maxframe_size;
549 rport->supported_classes = port->supported_classes; 573 rport->supported_classes = port->supported_classes;
550 port->rport = rport; 574 port->rport = rport;
575 port->starget_id = rport->scsi_target_id;
576
577 zfcp_scsi_queue_unit_register(port);
551} 578}
552 579
553static void zfcp_scsi_rport_block(struct zfcp_port *port) 580static void zfcp_scsi_rport_block(struct zfcp_port *port)
@@ -610,24 +637,74 @@ void zfcp_scsi_rport_work(struct work_struct *work)
610 put_device(&port->dev); 637 put_device(&port->dev);
611} 638}
612 639
613 640/**
614void zfcp_scsi_scan(struct work_struct *work) 641 * zfcp_scsi_scan - Register LUN with SCSI midlayer
642 * @unit: The LUN/unit to register
643 */
644void zfcp_scsi_scan(struct zfcp_unit *unit)
615{ 645{
616 struct zfcp_unit *unit = container_of(work, struct zfcp_unit, 646 struct fc_rport *rport = unit->port->rport;
617 scsi_work);
618 struct fc_rport *rport;
619
620 flush_work(&unit->port->rport_work);
621 rport = unit->port->rport;
622 647
623 if (rport && rport->port_state == FC_PORTSTATE_ONLINE) 648 if (rport && rport->port_state == FC_PORTSTATE_ONLINE)
624 scsi_scan_target(&rport->dev, 0, rport->scsi_target_id, 649 scsi_scan_target(&rport->dev, 0, rport->scsi_target_id,
625 scsilun_to_int((struct scsi_lun *) 650 scsilun_to_int((struct scsi_lun *)
626 &unit->fcp_lun), 0); 651 &unit->fcp_lun), 0);
652}
627 653
654void zfcp_scsi_scan_work(struct work_struct *work)
655{
656 struct zfcp_unit *unit = container_of(work, struct zfcp_unit,
657 scsi_work);
658
659 zfcp_scsi_scan(unit);
628 put_device(&unit->dev); 660 put_device(&unit->dev);
629} 661}
630 662
663/**
664 * zfcp_scsi_set_prot - Configure DIF/DIX support in scsi_host
665 * @adapter: The adapter where to configure DIF/DIX for the SCSI host
666 */
667void zfcp_scsi_set_prot(struct zfcp_adapter *adapter)
668{
669 unsigned int mask = 0;
670 unsigned int data_div;
671 struct Scsi_Host *shost = adapter->scsi_host;
672
673 data_div = atomic_read(&adapter->status) &
674 ZFCP_STATUS_ADAPTER_DATA_DIV_ENABLED;
675
676 if (enable_dif &&
677 adapter->adapter_features & FSF_FEATURE_DIF_PROT_TYPE1)
678 mask |= SHOST_DIF_TYPE1_PROTECTION;
679
680 if (enable_dif && data_div &&
681 adapter->adapter_features & FSF_FEATURE_DIX_PROT_TCPIP) {
682 mask |= SHOST_DIX_TYPE1_PROTECTION;
683 scsi_host_set_guard(shost, SHOST_DIX_GUARD_IP);
684 shost->sg_tablesize = ZFCP_QDIO_MAX_SBALES_PER_REQ / 2;
685 shost->max_sectors = ZFCP_QDIO_MAX_SBALES_PER_REQ * 8 / 2;
686 }
687
688 scsi_host_set_prot(shost, mask);
689}
690
691/**
692 * zfcp_scsi_dif_sense_error - Report DIF/DIX error as driver sense error
693 * @scmd: The SCSI command to report the error for
694 * @ascq: The ASCQ to put in the sense buffer
695 *
696 * See the error handling in sd_done for the sense codes used here.
697 * Set DID_SOFT_ERROR to retry the request, if possible.
698 */
699void zfcp_scsi_dif_sense_error(struct scsi_cmnd *scmd, int ascq)
700{
701 scsi_build_sense_buffer(1, scmd->sense_buffer,
702 ILLEGAL_REQUEST, 0x10, ascq);
703 set_driver_byte(scmd, DRIVER_SENSE);
704 scmd->result |= SAM_STAT_CHECK_CONDITION;
705 set_host_byte(scmd, DID_SOFT_ERROR);
706}
707
631struct fc_function_template zfcp_transport_functions = { 708struct fc_function_template zfcp_transport_functions = {
632 .show_starget_port_id = 1, 709 .show_starget_port_id = 1,
633 .show_starget_port_name = 1, 710 .show_starget_port_name = 1,
@@ -677,11 +754,11 @@ struct zfcp_data zfcp_data = {
677 .eh_host_reset_handler = zfcp_scsi_eh_host_reset_handler, 754 .eh_host_reset_handler = zfcp_scsi_eh_host_reset_handler,
678 .can_queue = 4096, 755 .can_queue = 4096,
679 .this_id = -1, 756 .this_id = -1,
680 .sg_tablesize = ZFCP_FSF_MAX_SBALES_PER_REQ, 757 .sg_tablesize = ZFCP_QDIO_MAX_SBALES_PER_REQ,
681 .cmd_per_lun = 1, 758 .cmd_per_lun = 1,
682 .use_clustering = 1, 759 .use_clustering = 1,
683 .sdev_attrs = zfcp_sysfs_sdev_attrs, 760 .sdev_attrs = zfcp_sysfs_sdev_attrs,
684 .max_sectors = (ZFCP_FSF_MAX_SBALES_PER_REQ * 8), 761 .max_sectors = (ZFCP_QDIO_MAX_SBALES_PER_REQ * 8),
685 .dma_boundary = ZFCP_QDIO_SBALE_LEN - 1, 762 .dma_boundary = ZFCP_QDIO_SBALE_LEN - 1,
686 .shost_attrs = zfcp_sysfs_shost_attrs, 763 .shost_attrs = zfcp_sysfs_shost_attrs,
687 }, 764 },
diff --git a/drivers/s390/scsi/zfcp_sysfs.c b/drivers/s390/scsi/zfcp_sysfs.c
index f5f60698dc4c..b4561c86e230 100644
--- a/drivers/s390/scsi/zfcp_sysfs.c
+++ b/drivers/s390/scsi/zfcp_sysfs.c
@@ -275,7 +275,7 @@ static ssize_t zfcp_sysfs_unit_add_store(struct device *dev,
275 275
276 zfcp_erp_unit_reopen(unit, 0, "syuas_1", NULL); 276 zfcp_erp_unit_reopen(unit, 0, "syuas_1", NULL);
277 zfcp_erp_wait(unit->port->adapter); 277 zfcp_erp_wait(unit->port->adapter);
278 flush_work(&unit->scsi_work); 278 zfcp_scsi_scan(unit);
279out: 279out:
280 put_device(&port->dev); 280 put_device(&port->dev);
281 return retval ? retval : (ssize_t) count; 281 return retval ? retval : (ssize_t) count;
@@ -290,6 +290,7 @@ static ssize_t zfcp_sysfs_unit_remove_store(struct device *dev,
290 struct zfcp_unit *unit; 290 struct zfcp_unit *unit;
291 u64 fcp_lun; 291 u64 fcp_lun;
292 int retval = -EINVAL; 292 int retval = -EINVAL;
293 struct scsi_device *sdev;
293 294
294 if (!(port && get_device(&port->dev))) 295 if (!(port && get_device(&port->dev)))
295 return -EBUSY; 296 return -EBUSY;
@@ -303,8 +304,13 @@ static ssize_t zfcp_sysfs_unit_remove_store(struct device *dev,
303 else 304 else
304 retval = 0; 305 retval = 0;
305 306
306 /* wait for possible timeout during SCSI probe */ 307 sdev = scsi_device_lookup(port->adapter->scsi_host, 0,
307 flush_work(&unit->scsi_work); 308 port->starget_id,
309 scsilun_to_int((struct scsi_lun *)&fcp_lun));
310 if (sdev) {
311 scsi_remove_device(sdev);
312 scsi_device_put(sdev);
313 }
308 314
309 write_lock_irq(&port->unit_list_lock); 315 write_lock_irq(&port->unit_list_lock);
310 list_del(&unit->list); 316 list_del(&unit->list);