diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2009-09-14 20:53:36 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2009-09-14 20:53:36 -0400 |
commit | 39695224bd84dc4be29abad93a0ec232a16fc519 (patch) | |
tree | 2bfa5cb50788a4c8be9f2e9f4412e47a565f4508 /drivers/s390 | |
parent | a9bbd210a44102cc50b30a5f3d111dbf5f2f9cd4 (diff) | |
parent | ea038f63ac52439e7816295fa6064fe95e6c1f51 (diff) |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi-misc-2.6
* git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi-misc-2.6: (209 commits)
[SCSI] fix oops during scsi scanning
[SCSI] libsrp: fix memory leak in srp_ring_free()
[SCSI] libiscsi, bnx2i: make bound ep check common
[SCSI] libiscsi: add completion function for drivers that do not need pdu processing
[SCSI] scsi_dh_rdac: changes for rdac debug logging
[SCSI] scsi_dh_rdac: changes to collect the rdac debug information during the initialization
[SCSI] scsi_dh_rdac: move the init code from rdac_activate to rdac_bus_attach
[SCSI] sg: fix oops in the error path in sg_build_indirect()
[SCSI] mptsas : Bump version to 3.04.12
[SCSI] mptsas : FW event thread and scsi mid layer deadlock in SYNCHRONIZE CACHE command
[SCSI] mptsas : Send DID_NO_CONNECT for pending IOs of removed device
[SCSI] mptsas : PAE Kernel more than 4 GB kernel panic
[SCSI] mptsas : NULL pointer on big endian systems causing Expander not to tear off
[SCSI] mptsas : Sanity check for phyinfo is added
[SCSI] scsi_dh_rdac: Add support for Sun StorageTek ST2500, ST2510 and ST2530
[SCSI] pmcraid: PMC-Sierra MaxRAID driver to support 6Gb/s SAS RAID controller
[SCSI] qla2xxx: Update version number to 8.03.01-k6.
[SCSI] qla2xxx: Properly delete rports attached to a vport.
[SCSI] qla2xxx: Correct various NPIV issues.
[SCSI] qla2xxx: Correct qla2x00_eh_wait_on_command() to wait correctly.
...
Diffstat (limited to 'drivers/s390')
-rw-r--r-- | drivers/s390/scsi/zfcp_aux.c | 288 | ||||
-rw-r--r-- | drivers/s390/scsi/zfcp_ccw.c | 94 | ||||
-rw-r--r-- | drivers/s390/scsi/zfcp_dbf.c | 544 | ||||
-rw-r--r-- | drivers/s390/scsi/zfcp_dbf.h | 175 | ||||
-rw-r--r-- | drivers/s390/scsi/zfcp_def.h | 183 | ||||
-rw-r--r-- | drivers/s390/scsi/zfcp_erp.c | 155 | ||||
-rw-r--r-- | drivers/s390/scsi/zfcp_ext.h | 102 | ||||
-rw-r--r-- | drivers/s390/scsi/zfcp_fc.c | 176 | ||||
-rw-r--r-- | drivers/s390/scsi/zfcp_fsf.c | 635 | ||||
-rw-r--r-- | drivers/s390/scsi/zfcp_fsf.h | 3 | ||||
-rw-r--r-- | drivers/s390/scsi/zfcp_qdio.c | 369 | ||||
-rw-r--r-- | drivers/s390/scsi/zfcp_scsi.c | 73 | ||||
-rw-r--r-- | drivers/s390/scsi/zfcp_sysfs.c | 34 |
13 files changed, 1492 insertions, 1339 deletions
diff --git a/drivers/s390/scsi/zfcp_aux.c b/drivers/s390/scsi/zfcp_aux.c index 2ccbd185a5fb..1be6bf7e8ce6 100644 --- a/drivers/s390/scsi/zfcp_aux.c +++ b/drivers/s390/scsi/zfcp_aux.c | |||
@@ -42,6 +42,12 @@ static char *init_device; | |||
42 | module_param_named(device, init_device, charp, 0400); | 42 | module_param_named(device, init_device, charp, 0400); |
43 | MODULE_PARM_DESC(device, "specify initial device"); | 43 | MODULE_PARM_DESC(device, "specify initial device"); |
44 | 44 | ||
45 | static struct kmem_cache *zfcp_cache_hw_align(const char *name, | ||
46 | unsigned long size) | ||
47 | { | ||
48 | return kmem_cache_create(name, size, roundup_pow_of_two(size), 0, NULL); | ||
49 | } | ||
50 | |||
45 | static int zfcp_reqlist_alloc(struct zfcp_adapter *adapter) | 51 | static int zfcp_reqlist_alloc(struct zfcp_adapter *adapter) |
46 | { | 52 | { |
47 | int idx; | 53 | int idx; |
@@ -78,7 +84,7 @@ static void __init zfcp_init_device_configure(char *busid, u64 wwpn, u64 lun) | |||
78 | struct zfcp_port *port; | 84 | struct zfcp_port *port; |
79 | struct zfcp_unit *unit; | 85 | struct zfcp_unit *unit; |
80 | 86 | ||
81 | down(&zfcp_data.config_sema); | 87 | mutex_lock(&zfcp_data.config_mutex); |
82 | read_lock_irq(&zfcp_data.config_lock); | 88 | read_lock_irq(&zfcp_data.config_lock); |
83 | adapter = zfcp_get_adapter_by_busid(busid); | 89 | adapter = zfcp_get_adapter_by_busid(busid); |
84 | if (adapter) | 90 | if (adapter) |
@@ -93,31 +99,23 @@ static void __init zfcp_init_device_configure(char *busid, u64 wwpn, u64 lun) | |||
93 | unit = zfcp_unit_enqueue(port, lun); | 99 | unit = zfcp_unit_enqueue(port, lun); |
94 | if (IS_ERR(unit)) | 100 | if (IS_ERR(unit)) |
95 | goto out_unit; | 101 | goto out_unit; |
96 | up(&zfcp_data.config_sema); | 102 | mutex_unlock(&zfcp_data.config_mutex); |
97 | ccw_device_set_online(adapter->ccw_device); | 103 | ccw_device_set_online(adapter->ccw_device); |
98 | 104 | ||
99 | zfcp_erp_wait(adapter); | 105 | zfcp_erp_wait(adapter); |
100 | flush_work(&unit->scsi_work); | 106 | flush_work(&unit->scsi_work); |
101 | 107 | ||
102 | down(&zfcp_data.config_sema); | 108 | mutex_lock(&zfcp_data.config_mutex); |
103 | zfcp_unit_put(unit); | 109 | zfcp_unit_put(unit); |
104 | out_unit: | 110 | out_unit: |
105 | zfcp_port_put(port); | 111 | zfcp_port_put(port); |
106 | out_port: | 112 | out_port: |
107 | zfcp_adapter_put(adapter); | 113 | zfcp_adapter_put(adapter); |
108 | out_adapter: | 114 | out_adapter: |
109 | up(&zfcp_data.config_sema); | 115 | mutex_unlock(&zfcp_data.config_mutex); |
110 | return; | 116 | return; |
111 | } | 117 | } |
112 | 118 | ||
113 | static struct kmem_cache *zfcp_cache_create(int size, char *name) | ||
114 | { | ||
115 | int align = 1; | ||
116 | while ((size - align) > 0) | ||
117 | align <<= 1; | ||
118 | return kmem_cache_create(name , size, align, 0, NULL); | ||
119 | } | ||
120 | |||
121 | static void __init zfcp_init_device_setup(char *devstr) | 119 | static void __init zfcp_init_device_setup(char *devstr) |
122 | { | 120 | { |
123 | char *token; | 121 | char *token; |
@@ -158,24 +156,27 @@ static int __init zfcp_module_init(void) | |||
158 | { | 156 | { |
159 | int retval = -ENOMEM; | 157 | int retval = -ENOMEM; |
160 | 158 | ||
161 | zfcp_data.fsf_req_qtcb_cache = zfcp_cache_create( | 159 | zfcp_data.gpn_ft_cache = zfcp_cache_hw_align("zfcp_gpn", |
162 | sizeof(struct zfcp_fsf_req_qtcb), "zfcp_fsf"); | 160 | sizeof(struct ct_iu_gpn_ft_req)); |
163 | if (!zfcp_data.fsf_req_qtcb_cache) | 161 | if (!zfcp_data.gpn_ft_cache) |
164 | goto out; | 162 | goto out; |
165 | 163 | ||
166 | zfcp_data.sr_buffer_cache = zfcp_cache_create( | 164 | zfcp_data.qtcb_cache = zfcp_cache_hw_align("zfcp_qtcb", |
167 | sizeof(struct fsf_status_read_buffer), "zfcp_sr"); | 165 | sizeof(struct fsf_qtcb)); |
166 | if (!zfcp_data.qtcb_cache) | ||
167 | goto out_qtcb_cache; | ||
168 | |||
169 | zfcp_data.sr_buffer_cache = zfcp_cache_hw_align("zfcp_sr", | ||
170 | sizeof(struct fsf_status_read_buffer)); | ||
168 | if (!zfcp_data.sr_buffer_cache) | 171 | if (!zfcp_data.sr_buffer_cache) |
169 | goto out_sr_cache; | 172 | goto out_sr_cache; |
170 | 173 | ||
171 | zfcp_data.gid_pn_cache = zfcp_cache_create( | 174 | zfcp_data.gid_pn_cache = zfcp_cache_hw_align("zfcp_gid", |
172 | sizeof(struct zfcp_gid_pn_data), "zfcp_gid"); | 175 | sizeof(struct zfcp_gid_pn_data)); |
173 | if (!zfcp_data.gid_pn_cache) | 176 | if (!zfcp_data.gid_pn_cache) |
174 | goto out_gid_cache; | 177 | goto out_gid_cache; |
175 | 178 | ||
176 | zfcp_data.work_queue = create_singlethread_workqueue("zfcp_wq"); | 179 | mutex_init(&zfcp_data.config_mutex); |
177 | |||
178 | sema_init(&zfcp_data.config_sema, 1); | ||
179 | rwlock_init(&zfcp_data.config_lock); | 180 | rwlock_init(&zfcp_data.config_lock); |
180 | 181 | ||
181 | zfcp_data.scsi_transport_template = | 182 | zfcp_data.scsi_transport_template = |
@@ -209,7 +210,9 @@ out_transport: | |||
209 | out_gid_cache: | 210 | out_gid_cache: |
210 | kmem_cache_destroy(zfcp_data.sr_buffer_cache); | 211 | kmem_cache_destroy(zfcp_data.sr_buffer_cache); |
211 | out_sr_cache: | 212 | out_sr_cache: |
212 | kmem_cache_destroy(zfcp_data.fsf_req_qtcb_cache); | 213 | kmem_cache_destroy(zfcp_data.qtcb_cache); |
214 | out_qtcb_cache: | ||
215 | kmem_cache_destroy(zfcp_data.gpn_ft_cache); | ||
213 | out: | 216 | out: |
214 | return retval; | 217 | return retval; |
215 | } | 218 | } |
@@ -263,7 +266,7 @@ static void zfcp_sysfs_unit_release(struct device *dev) | |||
263 | * @port: pointer to port where unit is added | 266 | * @port: pointer to port where unit is added |
264 | * @fcp_lun: FCP LUN of unit to be enqueued | 267 | * @fcp_lun: FCP LUN of unit to be enqueued |
265 | * Returns: pointer to enqueued unit on success, ERR_PTR on error | 268 | * Returns: pointer to enqueued unit on success, ERR_PTR on error |
266 | * Locks: config_sema must be held to serialize changes to the unit list | 269 | * Locks: config_mutex must be held to serialize changes to the unit list |
267 | * | 270 | * |
268 | * Sets up some unit internal structures and creates sysfs entry. | 271 | * Sets up some unit internal structures and creates sysfs entry. |
269 | */ | 272 | */ |
@@ -271,6 +274,13 @@ struct zfcp_unit *zfcp_unit_enqueue(struct zfcp_port *port, u64 fcp_lun) | |||
271 | { | 274 | { |
272 | struct zfcp_unit *unit; | 275 | struct zfcp_unit *unit; |
273 | 276 | ||
277 | read_lock_irq(&zfcp_data.config_lock); | ||
278 | if (zfcp_get_unit_by_lun(port, fcp_lun)) { | ||
279 | read_unlock_irq(&zfcp_data.config_lock); | ||
280 | return ERR_PTR(-EINVAL); | ||
281 | } | ||
282 | read_unlock_irq(&zfcp_data.config_lock); | ||
283 | |||
274 | unit = kzalloc(sizeof(struct zfcp_unit), GFP_KERNEL); | 284 | unit = kzalloc(sizeof(struct zfcp_unit), GFP_KERNEL); |
275 | if (!unit) | 285 | if (!unit) |
276 | return ERR_PTR(-ENOMEM); | 286 | return ERR_PTR(-ENOMEM); |
@@ -282,8 +292,11 @@ struct zfcp_unit *zfcp_unit_enqueue(struct zfcp_port *port, u64 fcp_lun) | |||
282 | unit->port = port; | 292 | unit->port = port; |
283 | unit->fcp_lun = fcp_lun; | 293 | unit->fcp_lun = fcp_lun; |
284 | 294 | ||
285 | dev_set_name(&unit->sysfs_device, "0x%016llx", | 295 | if (dev_set_name(&unit->sysfs_device, "0x%016llx", |
286 | (unsigned long long) fcp_lun); | 296 | (unsigned long long) fcp_lun)) { |
297 | kfree(unit); | ||
298 | return ERR_PTR(-ENOMEM); | ||
299 | } | ||
287 | unit->sysfs_device.parent = &port->sysfs_device; | 300 | unit->sysfs_device.parent = &port->sysfs_device; |
288 | unit->sysfs_device.release = zfcp_sysfs_unit_release; | 301 | unit->sysfs_device.release = zfcp_sysfs_unit_release; |
289 | dev_set_drvdata(&unit->sysfs_device, unit); | 302 | dev_set_drvdata(&unit->sysfs_device, unit); |
@@ -299,20 +312,15 @@ struct zfcp_unit *zfcp_unit_enqueue(struct zfcp_port *port, u64 fcp_lun) | |||
299 | unit->latencies.cmd.channel.min = 0xFFFFFFFF; | 312 | unit->latencies.cmd.channel.min = 0xFFFFFFFF; |
300 | unit->latencies.cmd.fabric.min = 0xFFFFFFFF; | 313 | unit->latencies.cmd.fabric.min = 0xFFFFFFFF; |
301 | 314 | ||
302 | read_lock_irq(&zfcp_data.config_lock); | 315 | if (device_register(&unit->sysfs_device)) { |
303 | if (zfcp_get_unit_by_lun(port, fcp_lun)) { | 316 | put_device(&unit->sysfs_device); |
304 | read_unlock_irq(&zfcp_data.config_lock); | 317 | return ERR_PTR(-EINVAL); |
305 | goto err_out_free; | ||
306 | } | 318 | } |
307 | read_unlock_irq(&zfcp_data.config_lock); | ||
308 | |||
309 | if (device_register(&unit->sysfs_device)) | ||
310 | goto err_out_free; | ||
311 | 319 | ||
312 | if (sysfs_create_group(&unit->sysfs_device.kobj, | 320 | if (sysfs_create_group(&unit->sysfs_device.kobj, |
313 | &zfcp_sysfs_unit_attrs)) { | 321 | &zfcp_sysfs_unit_attrs)) { |
314 | device_unregister(&unit->sysfs_device); | 322 | device_unregister(&unit->sysfs_device); |
315 | return ERR_PTR(-EIO); | 323 | return ERR_PTR(-EINVAL); |
316 | } | 324 | } |
317 | 325 | ||
318 | zfcp_unit_get(unit); | 326 | zfcp_unit_get(unit); |
@@ -327,10 +335,6 @@ struct zfcp_unit *zfcp_unit_enqueue(struct zfcp_port *port, u64 fcp_lun) | |||
327 | zfcp_port_get(port); | 335 | zfcp_port_get(port); |
328 | 336 | ||
329 | return unit; | 337 | return unit; |
330 | |||
331 | err_out_free: | ||
332 | kfree(unit); | ||
333 | return ERR_PTR(-EINVAL); | ||
334 | } | 338 | } |
335 | 339 | ||
336 | /** | 340 | /** |
@@ -353,37 +357,47 @@ void zfcp_unit_dequeue(struct zfcp_unit *unit) | |||
353 | 357 | ||
354 | static int zfcp_allocate_low_mem_buffers(struct zfcp_adapter *adapter) | 358 | static int zfcp_allocate_low_mem_buffers(struct zfcp_adapter *adapter) |
355 | { | 359 | { |
356 | /* must only be called with zfcp_data.config_sema taken */ | 360 | /* must only be called with zfcp_data.config_mutex taken */ |
357 | adapter->pool.fsf_req_erp = | 361 | adapter->pool.erp_req = |
358 | mempool_create_slab_pool(1, zfcp_data.fsf_req_qtcb_cache); | 362 | mempool_create_kmalloc_pool(1, sizeof(struct zfcp_fsf_req)); |
359 | if (!adapter->pool.fsf_req_erp) | 363 | if (!adapter->pool.erp_req) |
364 | return -ENOMEM; | ||
365 | |||
366 | adapter->pool.gid_pn_req = | ||
367 | mempool_create_kmalloc_pool(1, sizeof(struct zfcp_fsf_req)); | ||
368 | if (!adapter->pool.gid_pn_req) | ||
360 | return -ENOMEM; | 369 | return -ENOMEM; |
361 | 370 | ||
362 | adapter->pool.fsf_req_scsi = | 371 | adapter->pool.scsi_req = |
363 | mempool_create_slab_pool(1, zfcp_data.fsf_req_qtcb_cache); | 372 | mempool_create_kmalloc_pool(1, sizeof(struct zfcp_fsf_req)); |
364 | if (!adapter->pool.fsf_req_scsi) | 373 | if (!adapter->pool.scsi_req) |
365 | return -ENOMEM; | 374 | return -ENOMEM; |
366 | 375 | ||
367 | adapter->pool.fsf_req_abort = | 376 | adapter->pool.scsi_abort = |
368 | mempool_create_slab_pool(1, zfcp_data.fsf_req_qtcb_cache); | 377 | mempool_create_kmalloc_pool(1, sizeof(struct zfcp_fsf_req)); |
369 | if (!adapter->pool.fsf_req_abort) | 378 | if (!adapter->pool.scsi_abort) |
370 | return -ENOMEM; | 379 | return -ENOMEM; |
371 | 380 | ||
372 | adapter->pool.fsf_req_status_read = | 381 | adapter->pool.status_read_req = |
373 | mempool_create_kmalloc_pool(FSF_STATUS_READS_RECOM, | 382 | mempool_create_kmalloc_pool(FSF_STATUS_READS_RECOM, |
374 | sizeof(struct zfcp_fsf_req)); | 383 | sizeof(struct zfcp_fsf_req)); |
375 | if (!adapter->pool.fsf_req_status_read) | 384 | if (!adapter->pool.status_read_req) |
376 | return -ENOMEM; | 385 | return -ENOMEM; |
377 | 386 | ||
378 | adapter->pool.data_status_read = | 387 | adapter->pool.qtcb_pool = |
388 | mempool_create_slab_pool(4, zfcp_data.qtcb_cache); | ||
389 | if (!adapter->pool.qtcb_pool) | ||
390 | return -ENOMEM; | ||
391 | |||
392 | adapter->pool.status_read_data = | ||
379 | mempool_create_slab_pool(FSF_STATUS_READS_RECOM, | 393 | mempool_create_slab_pool(FSF_STATUS_READS_RECOM, |
380 | zfcp_data.sr_buffer_cache); | 394 | zfcp_data.sr_buffer_cache); |
381 | if (!adapter->pool.data_status_read) | 395 | if (!adapter->pool.status_read_data) |
382 | return -ENOMEM; | 396 | return -ENOMEM; |
383 | 397 | ||
384 | adapter->pool.data_gid_pn = | 398 | adapter->pool.gid_pn_data = |
385 | mempool_create_slab_pool(1, zfcp_data.gid_pn_cache); | 399 | mempool_create_slab_pool(1, zfcp_data.gid_pn_cache); |
386 | if (!adapter->pool.data_gid_pn) | 400 | if (!adapter->pool.gid_pn_data) |
387 | return -ENOMEM; | 401 | return -ENOMEM; |
388 | 402 | ||
389 | return 0; | 403 | return 0; |
@@ -391,19 +405,21 @@ static int zfcp_allocate_low_mem_buffers(struct zfcp_adapter *adapter) | |||
391 | 405 | ||
392 | static void zfcp_free_low_mem_buffers(struct zfcp_adapter *adapter) | 406 | static void zfcp_free_low_mem_buffers(struct zfcp_adapter *adapter) |
393 | { | 407 | { |
394 | /* zfcp_data.config_sema must be held */ | 408 | /* zfcp_data.config_mutex must be held */ |
395 | if (adapter->pool.fsf_req_erp) | 409 | if (adapter->pool.erp_req) |
396 | mempool_destroy(adapter->pool.fsf_req_erp); | 410 | mempool_destroy(adapter->pool.erp_req); |
397 | if (adapter->pool.fsf_req_scsi) | 411 | if (adapter->pool.scsi_req) |
398 | mempool_destroy(adapter->pool.fsf_req_scsi); | 412 | mempool_destroy(adapter->pool.scsi_req); |
399 | if (adapter->pool.fsf_req_abort) | 413 | if (adapter->pool.scsi_abort) |
400 | mempool_destroy(adapter->pool.fsf_req_abort); | 414 | mempool_destroy(adapter->pool.scsi_abort); |
401 | if (adapter->pool.fsf_req_status_read) | 415 | if (adapter->pool.qtcb_pool) |
402 | mempool_destroy(adapter->pool.fsf_req_status_read); | 416 | mempool_destroy(adapter->pool.qtcb_pool); |
403 | if (adapter->pool.data_status_read) | 417 | if (adapter->pool.status_read_req) |
404 | mempool_destroy(adapter->pool.data_status_read); | 418 | mempool_destroy(adapter->pool.status_read_req); |
405 | if (adapter->pool.data_gid_pn) | 419 | if (adapter->pool.status_read_data) |
406 | mempool_destroy(adapter->pool.data_gid_pn); | 420 | mempool_destroy(adapter->pool.status_read_data); |
421 | if (adapter->pool.gid_pn_data) | ||
422 | mempool_destroy(adapter->pool.gid_pn_data); | ||
407 | } | 423 | } |
408 | 424 | ||
409 | /** | 425 | /** |
@@ -418,7 +434,7 @@ static void zfcp_free_low_mem_buffers(struct zfcp_adapter *adapter) | |||
418 | int zfcp_status_read_refill(struct zfcp_adapter *adapter) | 434 | int zfcp_status_read_refill(struct zfcp_adapter *adapter) |
419 | { | 435 | { |
420 | while (atomic_read(&adapter->stat_miss) > 0) | 436 | while (atomic_read(&adapter->stat_miss) > 0) |
421 | if (zfcp_fsf_status_read(adapter)) { | 437 | if (zfcp_fsf_status_read(adapter->qdio)) { |
422 | if (atomic_read(&adapter->stat_miss) >= 16) { | 438 | if (atomic_read(&adapter->stat_miss) >= 16) { |
423 | zfcp_erp_adapter_reopen(adapter, 0, "axsref1", | 439 | zfcp_erp_adapter_reopen(adapter, 0, "axsref1", |
424 | NULL); | 440 | NULL); |
@@ -446,6 +462,27 @@ static void zfcp_print_sl(struct seq_file *m, struct service_level *sl) | |||
446 | adapter->fsf_lic_version); | 462 | adapter->fsf_lic_version); |
447 | } | 463 | } |
448 | 464 | ||
465 | static int zfcp_setup_adapter_work_queue(struct zfcp_adapter *adapter) | ||
466 | { | ||
467 | char name[TASK_COMM_LEN]; | ||
468 | |||
469 | snprintf(name, sizeof(name), "zfcp_q_%s", | ||
470 | dev_name(&adapter->ccw_device->dev)); | ||
471 | adapter->work_queue = create_singlethread_workqueue(name); | ||
472 | |||
473 | if (adapter->work_queue) | ||
474 | return 0; | ||
475 | return -ENOMEM; | ||
476 | } | ||
477 | |||
478 | static void zfcp_destroy_adapter_work_queue(struct zfcp_adapter *adapter) | ||
479 | { | ||
480 | if (adapter->work_queue) | ||
481 | destroy_workqueue(adapter->work_queue); | ||
482 | adapter->work_queue = NULL; | ||
483 | |||
484 | } | ||
485 | |||
449 | /** | 486 | /** |
450 | * zfcp_adapter_enqueue - enqueue a new adapter to the list | 487 | * zfcp_adapter_enqueue - enqueue a new adapter to the list |
451 | * @ccw_device: pointer to the struct cc_device | 488 | * @ccw_device: pointer to the struct cc_device |
@@ -455,7 +492,7 @@ static void zfcp_print_sl(struct seq_file *m, struct service_level *sl) | |||
455 | * Enqueues an adapter at the end of the adapter list in the driver data. | 492 | * Enqueues an adapter at the end of the adapter list in the driver data. |
456 | * All adapter internal structures are set up. | 493 | * All adapter internal structures are set up. |
457 | * Proc-fs entries are also created. | 494 | * Proc-fs entries are also created. |
458 | * locks: config_sema must be held to serialise changes to the adapter list | 495 | * locks: config_mutex must be held to serialize changes to the adapter list |
459 | */ | 496 | */ |
460 | int zfcp_adapter_enqueue(struct ccw_device *ccw_device) | 497 | int zfcp_adapter_enqueue(struct ccw_device *ccw_device) |
461 | { | 498 | { |
@@ -463,37 +500,37 @@ int zfcp_adapter_enqueue(struct ccw_device *ccw_device) | |||
463 | 500 | ||
464 | /* | 501 | /* |
465 | * Note: It is safe to release the list_lock, as any list changes | 502 | * Note: It is safe to release the list_lock, as any list changes |
466 | * are protected by the config_sema, which must be held to get here | 503 | * are protected by the config_mutex, which must be held to get here |
467 | */ | 504 | */ |
468 | 505 | ||
469 | adapter = kzalloc(sizeof(struct zfcp_adapter), GFP_KERNEL); | 506 | adapter = kzalloc(sizeof(struct zfcp_adapter), GFP_KERNEL); |
470 | if (!adapter) | 507 | if (!adapter) |
471 | return -ENOMEM; | 508 | return -ENOMEM; |
472 | 509 | ||
473 | adapter->gs = kzalloc(sizeof(struct zfcp_wka_ports), GFP_KERNEL); | ||
474 | if (!adapter->gs) { | ||
475 | kfree(adapter); | ||
476 | return -ENOMEM; | ||
477 | } | ||
478 | |||
479 | ccw_device->handler = NULL; | 510 | ccw_device->handler = NULL; |
480 | adapter->ccw_device = ccw_device; | 511 | adapter->ccw_device = ccw_device; |
481 | atomic_set(&adapter->refcount, 0); | 512 | atomic_set(&adapter->refcount, 0); |
482 | 513 | ||
483 | if (zfcp_qdio_allocate(adapter)) | 514 | if (zfcp_qdio_setup(adapter)) |
484 | goto qdio_allocate_failed; | 515 | goto qdio_failed; |
485 | 516 | ||
486 | if (zfcp_allocate_low_mem_buffers(adapter)) | 517 | if (zfcp_allocate_low_mem_buffers(adapter)) |
487 | goto failed_low_mem_buffers; | 518 | goto low_mem_buffers_failed; |
488 | 519 | ||
489 | if (zfcp_reqlist_alloc(adapter)) | 520 | if (zfcp_reqlist_alloc(adapter)) |
490 | goto failed_low_mem_buffers; | 521 | goto low_mem_buffers_failed; |
491 | 522 | ||
492 | if (zfcp_adapter_debug_register(adapter)) | 523 | if (zfcp_dbf_adapter_register(adapter)) |
493 | goto debug_register_failed; | 524 | goto debug_register_failed; |
494 | 525 | ||
526 | if (zfcp_setup_adapter_work_queue(adapter)) | ||
527 | goto work_queue_failed; | ||
528 | |||
529 | if (zfcp_fc_gs_setup(adapter)) | ||
530 | goto generic_services_failed; | ||
531 | |||
495 | init_waitqueue_head(&adapter->remove_wq); | 532 | init_waitqueue_head(&adapter->remove_wq); |
496 | init_waitqueue_head(&adapter->erp_thread_wqh); | 533 | init_waitqueue_head(&adapter->erp_ready_wq); |
497 | init_waitqueue_head(&adapter->erp_done_wqh); | 534 | init_waitqueue_head(&adapter->erp_done_wqh); |
498 | 535 | ||
499 | INIT_LIST_HEAD(&adapter->port_list_head); | 536 | INIT_LIST_HEAD(&adapter->port_list_head); |
@@ -502,20 +539,14 @@ int zfcp_adapter_enqueue(struct ccw_device *ccw_device) | |||
502 | 539 | ||
503 | spin_lock_init(&adapter->req_list_lock); | 540 | spin_lock_init(&adapter->req_list_lock); |
504 | 541 | ||
505 | spin_lock_init(&adapter->hba_dbf_lock); | ||
506 | spin_lock_init(&adapter->san_dbf_lock); | ||
507 | spin_lock_init(&adapter->scsi_dbf_lock); | ||
508 | spin_lock_init(&adapter->rec_dbf_lock); | ||
509 | spin_lock_init(&adapter->req_q_lock); | ||
510 | spin_lock_init(&adapter->qdio_stat_lock); | ||
511 | |||
512 | rwlock_init(&adapter->erp_lock); | 542 | rwlock_init(&adapter->erp_lock); |
513 | rwlock_init(&adapter->abort_lock); | 543 | rwlock_init(&adapter->abort_lock); |
514 | 544 | ||
515 | sema_init(&adapter->erp_ready_sem, 0); | 545 | if (zfcp_erp_thread_setup(adapter)) |
546 | goto erp_thread_failed; | ||
516 | 547 | ||
517 | INIT_WORK(&adapter->stat_work, _zfcp_status_read_scheduler); | 548 | INIT_WORK(&adapter->stat_work, _zfcp_status_read_scheduler); |
518 | INIT_WORK(&adapter->scan_work, _zfcp_scan_ports_later); | 549 | INIT_WORK(&adapter->scan_work, _zfcp_fc_scan_ports_later); |
519 | 550 | ||
520 | adapter->service_level.seq_print = zfcp_print_sl; | 551 | adapter->service_level.seq_print = zfcp_print_sl; |
521 | 552 | ||
@@ -529,20 +560,25 @@ int zfcp_adapter_enqueue(struct ccw_device *ccw_device) | |||
529 | goto sysfs_failed; | 560 | goto sysfs_failed; |
530 | 561 | ||
531 | atomic_clear_mask(ZFCP_STATUS_COMMON_REMOVE, &adapter->status); | 562 | atomic_clear_mask(ZFCP_STATUS_COMMON_REMOVE, &adapter->status); |
532 | zfcp_fc_wka_ports_init(adapter); | ||
533 | 563 | ||
534 | if (!zfcp_adapter_scsi_register(adapter)) | 564 | if (!zfcp_adapter_scsi_register(adapter)) |
535 | return 0; | 565 | return 0; |
536 | 566 | ||
537 | sysfs_failed: | 567 | sysfs_failed: |
538 | zfcp_adapter_debug_unregister(adapter); | 568 | zfcp_erp_thread_kill(adapter); |
569 | erp_thread_failed: | ||
570 | zfcp_fc_gs_destroy(adapter); | ||
571 | generic_services_failed: | ||
572 | zfcp_destroy_adapter_work_queue(adapter); | ||
573 | work_queue_failed: | ||
574 | zfcp_dbf_adapter_unregister(adapter->dbf); | ||
539 | debug_register_failed: | 575 | debug_register_failed: |
540 | dev_set_drvdata(&ccw_device->dev, NULL); | 576 | dev_set_drvdata(&ccw_device->dev, NULL); |
541 | kfree(adapter->req_list); | 577 | kfree(adapter->req_list); |
542 | failed_low_mem_buffers: | 578 | low_mem_buffers_failed: |
543 | zfcp_free_low_mem_buffers(adapter); | 579 | zfcp_free_low_mem_buffers(adapter); |
544 | qdio_allocate_failed: | 580 | qdio_failed: |
545 | zfcp_qdio_free(adapter); | 581 | zfcp_qdio_destroy(adapter->qdio); |
546 | kfree(adapter); | 582 | kfree(adapter); |
547 | return -ENOMEM; | 583 | return -ENOMEM; |
548 | } | 584 | } |
@@ -559,6 +595,7 @@ void zfcp_adapter_dequeue(struct zfcp_adapter *adapter) | |||
559 | 595 | ||
560 | cancel_work_sync(&adapter->scan_work); | 596 | cancel_work_sync(&adapter->scan_work); |
561 | cancel_work_sync(&adapter->stat_work); | 597 | cancel_work_sync(&adapter->stat_work); |
598 | zfcp_fc_wka_ports_force_offline(adapter->gs); | ||
562 | zfcp_adapter_scsi_unregister(adapter); | 599 | zfcp_adapter_scsi_unregister(adapter); |
563 | sysfs_remove_group(&adapter->ccw_device->dev.kobj, | 600 | sysfs_remove_group(&adapter->ccw_device->dev.kobj, |
564 | &zfcp_sysfs_adapter_attrs); | 601 | &zfcp_sysfs_adapter_attrs); |
@@ -570,13 +607,15 @@ void zfcp_adapter_dequeue(struct zfcp_adapter *adapter) | |||
570 | if (!retval) | 607 | if (!retval) |
571 | return; | 608 | return; |
572 | 609 | ||
573 | zfcp_adapter_debug_unregister(adapter); | 610 | zfcp_fc_gs_destroy(adapter); |
574 | zfcp_qdio_free(adapter); | 611 | zfcp_erp_thread_kill(adapter); |
612 | zfcp_destroy_adapter_work_queue(adapter); | ||
613 | zfcp_dbf_adapter_unregister(adapter->dbf); | ||
575 | zfcp_free_low_mem_buffers(adapter); | 614 | zfcp_free_low_mem_buffers(adapter); |
615 | zfcp_qdio_destroy(adapter->qdio); | ||
576 | kfree(adapter->req_list); | 616 | kfree(adapter->req_list); |
577 | kfree(adapter->fc_stats); | 617 | kfree(adapter->fc_stats); |
578 | kfree(adapter->stats_reset_data); | 618 | kfree(adapter->stats_reset_data); |
579 | kfree(adapter->gs); | ||
580 | kfree(adapter); | 619 | kfree(adapter); |
581 | } | 620 | } |
582 | 621 | ||
@@ -592,7 +631,7 @@ static void zfcp_sysfs_port_release(struct device *dev) | |||
592 | * @status: initial status for the port | 631 | * @status: initial status for the port |
593 | * @d_id: destination id of the remote port to be enqueued | 632 | * @d_id: destination id of the remote port to be enqueued |
594 | * Returns: pointer to enqueued port on success, ERR_PTR on error | 633 | * Returns: pointer to enqueued port on success, ERR_PTR on error |
595 | * Locks: config_sema must be held to serialize changes to the port list | 634 | * Locks: config_mutex must be held to serialize changes to the port list |
596 | * | 635 | * |
597 | * All port internal structures are set up and the sysfs entry is generated. | 636 | * All port internal structures are set up and the sysfs entry is generated. |
598 | * d_id is used to enqueue ports with a well known address like the Directory | 637 | * d_id is used to enqueue ports with a well known address like the Directory |
@@ -602,7 +641,13 @@ struct zfcp_port *zfcp_port_enqueue(struct zfcp_adapter *adapter, u64 wwpn, | |||
602 | u32 status, u32 d_id) | 641 | u32 status, u32 d_id) |
603 | { | 642 | { |
604 | struct zfcp_port *port; | 643 | struct zfcp_port *port; |
605 | int retval; | 644 | |
645 | read_lock_irq(&zfcp_data.config_lock); | ||
646 | if (zfcp_get_port_by_wwpn(adapter, wwpn)) { | ||
647 | read_unlock_irq(&zfcp_data.config_lock); | ||
648 | return ERR_PTR(-EINVAL); | ||
649 | } | ||
650 | read_unlock_irq(&zfcp_data.config_lock); | ||
606 | 651 | ||
607 | port = kzalloc(sizeof(struct zfcp_port), GFP_KERNEL); | 652 | port = kzalloc(sizeof(struct zfcp_port), GFP_KERNEL); |
608 | if (!port) | 653 | if (!port) |
@@ -610,7 +655,7 @@ struct zfcp_port *zfcp_port_enqueue(struct zfcp_adapter *adapter, u64 wwpn, | |||
610 | 655 | ||
611 | init_waitqueue_head(&port->remove_wq); | 656 | init_waitqueue_head(&port->remove_wq); |
612 | INIT_LIST_HEAD(&port->unit_list_head); | 657 | INIT_LIST_HEAD(&port->unit_list_head); |
613 | INIT_WORK(&port->gid_pn_work, zfcp_erp_port_strategy_open_lookup); | 658 | INIT_WORK(&port->gid_pn_work, zfcp_fc_port_did_lookup); |
614 | INIT_WORK(&port->test_link_work, zfcp_fc_link_test_work); | 659 | INIT_WORK(&port->test_link_work, zfcp_fc_link_test_work); |
615 | INIT_WORK(&port->rport_work, zfcp_scsi_rport_work); | 660 | INIT_WORK(&port->rport_work, zfcp_scsi_rport_work); |
616 | 661 | ||
@@ -623,29 +668,24 @@ struct zfcp_port *zfcp_port_enqueue(struct zfcp_adapter *adapter, u64 wwpn, | |||
623 | atomic_set_mask(status | ZFCP_STATUS_COMMON_REMOVE, &port->status); | 668 | atomic_set_mask(status | ZFCP_STATUS_COMMON_REMOVE, &port->status); |
624 | atomic_set(&port->refcount, 0); | 669 | atomic_set(&port->refcount, 0); |
625 | 670 | ||
626 | dev_set_name(&port->sysfs_device, "0x%016llx", | 671 | if (dev_set_name(&port->sysfs_device, "0x%016llx", |
627 | (unsigned long long)wwpn); | 672 | (unsigned long long)wwpn)) { |
673 | kfree(port); | ||
674 | return ERR_PTR(-ENOMEM); | ||
675 | } | ||
628 | port->sysfs_device.parent = &adapter->ccw_device->dev; | 676 | port->sysfs_device.parent = &adapter->ccw_device->dev; |
629 | |||
630 | port->sysfs_device.release = zfcp_sysfs_port_release; | 677 | port->sysfs_device.release = zfcp_sysfs_port_release; |
631 | dev_set_drvdata(&port->sysfs_device, port); | 678 | dev_set_drvdata(&port->sysfs_device, port); |
632 | 679 | ||
633 | read_lock_irq(&zfcp_data.config_lock); | 680 | if (device_register(&port->sysfs_device)) { |
634 | if (zfcp_get_port_by_wwpn(adapter, wwpn)) { | 681 | put_device(&port->sysfs_device); |
635 | read_unlock_irq(&zfcp_data.config_lock); | 682 | return ERR_PTR(-EINVAL); |
636 | goto err_out_free; | ||
637 | } | 683 | } |
638 | read_unlock_irq(&zfcp_data.config_lock); | ||
639 | 684 | ||
640 | if (device_register(&port->sysfs_device)) | 685 | if (sysfs_create_group(&port->sysfs_device.kobj, |
641 | goto err_out_free; | 686 | &zfcp_sysfs_port_attrs)) { |
642 | |||
643 | retval = sysfs_create_group(&port->sysfs_device.kobj, | ||
644 | &zfcp_sysfs_port_attrs); | ||
645 | |||
646 | if (retval) { | ||
647 | device_unregister(&port->sysfs_device); | 687 | device_unregister(&port->sysfs_device); |
648 | goto err_out; | 688 | return ERR_PTR(-EINVAL); |
649 | } | 689 | } |
650 | 690 | ||
651 | zfcp_port_get(port); | 691 | zfcp_port_get(port); |
@@ -659,11 +699,6 @@ struct zfcp_port *zfcp_port_enqueue(struct zfcp_adapter *adapter, u64 wwpn, | |||
659 | 699 | ||
660 | zfcp_adapter_get(adapter); | 700 | zfcp_adapter_get(adapter); |
661 | return port; | 701 | return port; |
662 | |||
663 | err_out_free: | ||
664 | kfree(port); | ||
665 | err_out: | ||
666 | return ERR_PTR(-EINVAL); | ||
667 | } | 702 | } |
668 | 703 | ||
669 | /** | 704 | /** |
@@ -672,12 +707,11 @@ err_out: | |||
672 | */ | 707 | */ |
673 | void zfcp_port_dequeue(struct zfcp_port *port) | 708 | void zfcp_port_dequeue(struct zfcp_port *port) |
674 | { | 709 | { |
675 | wait_event(port->remove_wq, atomic_read(&port->refcount) == 0); | ||
676 | write_lock_irq(&zfcp_data.config_lock); | 710 | write_lock_irq(&zfcp_data.config_lock); |
677 | list_del(&port->list); | 711 | list_del(&port->list); |
678 | write_unlock_irq(&zfcp_data.config_lock); | 712 | write_unlock_irq(&zfcp_data.config_lock); |
679 | if (port->rport) | 713 | wait_event(port->remove_wq, atomic_read(&port->refcount) == 0); |
680 | port->rport->dd_data = NULL; | 714 | cancel_work_sync(&port->rport_work); /* usually not necessary */ |
681 | zfcp_adapter_put(port->adapter); | 715 | zfcp_adapter_put(port->adapter); |
682 | sysfs_remove_group(&port->sysfs_device.kobj, &zfcp_sysfs_port_attrs); | 716 | sysfs_remove_group(&port->sysfs_device.kobj, &zfcp_sysfs_port_attrs); |
683 | device_unregister(&port->sysfs_device); | 717 | device_unregister(&port->sysfs_device); |
diff --git a/drivers/s390/scsi/zfcp_ccw.c b/drivers/s390/scsi/zfcp_ccw.c index d9da5c42ccbe..0c90f8e71605 100644 --- a/drivers/s390/scsi/zfcp_ccw.c +++ b/drivers/s390/scsi/zfcp_ccw.c | |||
@@ -18,12 +18,15 @@ static int zfcp_ccw_suspend(struct ccw_device *cdev) | |||
18 | { | 18 | { |
19 | struct zfcp_adapter *adapter = dev_get_drvdata(&cdev->dev); | 19 | struct zfcp_adapter *adapter = dev_get_drvdata(&cdev->dev); |
20 | 20 | ||
21 | down(&zfcp_data.config_sema); | 21 | if (!adapter) |
22 | return 0; | ||
23 | |||
24 | mutex_lock(&zfcp_data.config_mutex); | ||
22 | 25 | ||
23 | zfcp_erp_adapter_shutdown(adapter, 0, "ccsusp1", NULL); | 26 | zfcp_erp_adapter_shutdown(adapter, 0, "ccsusp1", NULL); |
24 | zfcp_erp_wait(adapter); | 27 | zfcp_erp_wait(adapter); |
25 | 28 | ||
26 | up(&zfcp_data.config_sema); | 29 | mutex_unlock(&zfcp_data.config_mutex); |
27 | 30 | ||
28 | return 0; | 31 | return 0; |
29 | } | 32 | } |
@@ -33,6 +36,9 @@ static int zfcp_ccw_activate(struct ccw_device *cdev) | |||
33 | { | 36 | { |
34 | struct zfcp_adapter *adapter = dev_get_drvdata(&cdev->dev); | 37 | struct zfcp_adapter *adapter = dev_get_drvdata(&cdev->dev); |
35 | 38 | ||
39 | if (!adapter) | ||
40 | return 0; | ||
41 | |||
36 | zfcp_erp_modify_adapter_status(adapter, "ccresu1", NULL, | 42 | zfcp_erp_modify_adapter_status(adapter, "ccresu1", NULL, |
37 | ZFCP_STATUS_COMMON_RUNNING, ZFCP_SET); | 43 | ZFCP_STATUS_COMMON_RUNNING, ZFCP_SET); |
38 | zfcp_erp_adapter_reopen(adapter, ZFCP_STATUS_COMMON_ERP_FAILED, | 44 | zfcp_erp_adapter_reopen(adapter, ZFCP_STATUS_COMMON_ERP_FAILED, |
@@ -63,25 +69,14 @@ int zfcp_ccw_priv_sch(struct zfcp_adapter *adapter) | |||
63 | * zfcp_ccw_probe - probe function of zfcp driver | 69 | * zfcp_ccw_probe - probe function of zfcp driver |
64 | * @ccw_device: pointer to belonging ccw device | 70 | * @ccw_device: pointer to belonging ccw device |
65 | * | 71 | * |
66 | * This function gets called by the common i/o layer and sets up the initial | 72 | * This function gets called by the common i/o layer for each FCP |
67 | * data structures for each fcp adapter, which was detected by the system. | 73 | * device found on the current system. This is only a stub to make cio |
68 | * Also the sysfs files for this adapter will be created by this function. | 74 | * work: To only allocate adapter resources for devices actually used, |
69 | * In addition the nameserver port will be added to the ports of the adapter | 75 | * the allocation is deferred to the first call to ccw_set_online. |
70 | * and its sysfs representation will be created too. | ||
71 | */ | 76 | */ |
72 | static int zfcp_ccw_probe(struct ccw_device *ccw_device) | 77 | static int zfcp_ccw_probe(struct ccw_device *ccw_device) |
73 | { | 78 | { |
74 | int retval = 0; | 79 | return 0; |
75 | |||
76 | down(&zfcp_data.config_sema); | ||
77 | if (zfcp_adapter_enqueue(ccw_device)) { | ||
78 | dev_err(&ccw_device->dev, | ||
79 | "Setting up data structures for the " | ||
80 | "FCP adapter failed\n"); | ||
81 | retval = -EINVAL; | ||
82 | } | ||
83 | up(&zfcp_data.config_sema); | ||
84 | return retval; | ||
85 | } | 80 | } |
86 | 81 | ||
87 | /** | 82 | /** |
@@ -102,8 +97,11 @@ static void zfcp_ccw_remove(struct ccw_device *ccw_device) | |||
102 | LIST_HEAD(port_remove_lh); | 97 | LIST_HEAD(port_remove_lh); |
103 | 98 | ||
104 | ccw_device_set_offline(ccw_device); | 99 | ccw_device_set_offline(ccw_device); |
105 | down(&zfcp_data.config_sema); | 100 | |
101 | mutex_lock(&zfcp_data.config_mutex); | ||
106 | adapter = dev_get_drvdata(&ccw_device->dev); | 102 | adapter = dev_get_drvdata(&ccw_device->dev); |
103 | if (!adapter) | ||
104 | goto out; | ||
107 | 105 | ||
108 | write_lock_irq(&zfcp_data.config_lock); | 106 | write_lock_irq(&zfcp_data.config_lock); |
109 | list_for_each_entry_safe(port, p, &adapter->port_list_head, list) { | 107 | list_for_each_entry_safe(port, p, &adapter->port_list_head, list) { |
@@ -129,29 +127,41 @@ static void zfcp_ccw_remove(struct ccw_device *ccw_device) | |||
129 | wait_event(adapter->remove_wq, atomic_read(&adapter->refcount) == 0); | 127 | wait_event(adapter->remove_wq, atomic_read(&adapter->refcount) == 0); |
130 | zfcp_adapter_dequeue(adapter); | 128 | zfcp_adapter_dequeue(adapter); |
131 | 129 | ||
132 | up(&zfcp_data.config_sema); | 130 | out: |
131 | mutex_unlock(&zfcp_data.config_mutex); | ||
133 | } | 132 | } |
134 | 133 | ||
135 | /** | 134 | /** |
136 | * zfcp_ccw_set_online - set_online function of zfcp driver | 135 | * zfcp_ccw_set_online - set_online function of zfcp driver |
137 | * @ccw_device: pointer to belonging ccw device | 136 | * @ccw_device: pointer to belonging ccw device |
138 | * | 137 | * |
139 | * This function gets called by the common i/o layer and sets an adapter | 138 | * This function gets called by the common i/o layer and sets an |
140 | * into state online. Setting an fcp device online means that it will be | 139 | * adapter into state online. The first call will allocate all |
141 | * registered with the SCSI stack, that the QDIO queues will be set up | 140 | * adapter resources that will be retained until the device is removed |
142 | * and that the adapter will be opened (asynchronously). | 141 | * via zfcp_ccw_remove. |
142 | * | ||
143 | * Setting an fcp device online means that it will be registered with | ||
144 | * the SCSI stack, that the QDIO queues will be set up and that the | ||
145 | * adapter will be opened. | ||
143 | */ | 146 | */ |
144 | static int zfcp_ccw_set_online(struct ccw_device *ccw_device) | 147 | static int zfcp_ccw_set_online(struct ccw_device *ccw_device) |
145 | { | 148 | { |
146 | struct zfcp_adapter *adapter; | 149 | struct zfcp_adapter *adapter; |
147 | int retval; | 150 | int ret = 0; |
148 | 151 | ||
149 | down(&zfcp_data.config_sema); | 152 | mutex_lock(&zfcp_data.config_mutex); |
150 | adapter = dev_get_drvdata(&ccw_device->dev); | 153 | adapter = dev_get_drvdata(&ccw_device->dev); |
151 | 154 | ||
152 | retval = zfcp_erp_thread_setup(adapter); | 155 | if (!adapter) { |
153 | if (retval) | 156 | ret = zfcp_adapter_enqueue(ccw_device); |
154 | goto out; | 157 | if (ret) { |
158 | dev_err(&ccw_device->dev, | ||
159 | "Setting up data structures for the " | ||
160 | "FCP adapter failed\n"); | ||
161 | goto out; | ||
162 | } | ||
163 | adapter = dev_get_drvdata(&ccw_device->dev); | ||
164 | } | ||
155 | 165 | ||
156 | /* initialize request counter */ | 166 | /* initialize request counter */ |
157 | BUG_ON(!zfcp_reqlist_isempty(adapter)); | 167 | BUG_ON(!zfcp_reqlist_isempty(adapter)); |
@@ -162,13 +172,11 @@ static int zfcp_ccw_set_online(struct ccw_device *ccw_device) | |||
162 | zfcp_erp_adapter_reopen(adapter, ZFCP_STATUS_COMMON_ERP_FAILED, | 172 | zfcp_erp_adapter_reopen(adapter, ZFCP_STATUS_COMMON_ERP_FAILED, |
163 | "ccsonl2", NULL); | 173 | "ccsonl2", NULL); |
164 | zfcp_erp_wait(adapter); | 174 | zfcp_erp_wait(adapter); |
165 | up(&zfcp_data.config_sema); | 175 | out: |
166 | flush_work(&adapter->scan_work); | 176 | mutex_unlock(&zfcp_data.config_mutex); |
167 | return 0; | 177 | if (!ret) |
168 | 178 | flush_work(&adapter->scan_work); | |
169 | out: | 179 | return ret; |
170 | up(&zfcp_data.config_sema); | ||
171 | return retval; | ||
172 | } | 180 | } |
173 | 181 | ||
174 | /** | 182 | /** |
@@ -182,12 +190,15 @@ static int zfcp_ccw_set_offline(struct ccw_device *ccw_device) | |||
182 | { | 190 | { |
183 | struct zfcp_adapter *adapter; | 191 | struct zfcp_adapter *adapter; |
184 | 192 | ||
185 | down(&zfcp_data.config_sema); | 193 | mutex_lock(&zfcp_data.config_mutex); |
186 | adapter = dev_get_drvdata(&ccw_device->dev); | 194 | adapter = dev_get_drvdata(&ccw_device->dev); |
195 | if (!adapter) | ||
196 | goto out; | ||
197 | |||
187 | zfcp_erp_adapter_shutdown(adapter, 0, "ccsoff1", NULL); | 198 | zfcp_erp_adapter_shutdown(adapter, 0, "ccsoff1", NULL); |
188 | zfcp_erp_wait(adapter); | 199 | zfcp_erp_wait(adapter); |
189 | zfcp_erp_thread_kill(adapter); | 200 | mutex_unlock(&zfcp_data.config_mutex); |
190 | up(&zfcp_data.config_sema); | 201 | out: |
191 | return 0; | 202 | return 0; |
192 | } | 203 | } |
193 | 204 | ||
@@ -240,11 +251,12 @@ static void zfcp_ccw_shutdown(struct ccw_device *cdev) | |||
240 | { | 251 | { |
241 | struct zfcp_adapter *adapter; | 252 | struct zfcp_adapter *adapter; |
242 | 253 | ||
243 | down(&zfcp_data.config_sema); | 254 | mutex_lock(&zfcp_data.config_mutex); |
244 | adapter = dev_get_drvdata(&cdev->dev); | 255 | adapter = dev_get_drvdata(&cdev->dev); |
245 | zfcp_erp_adapter_shutdown(adapter, 0, "ccshut1", NULL); | 256 | zfcp_erp_adapter_shutdown(adapter, 0, "ccshut1", NULL); |
246 | zfcp_erp_wait(adapter); | 257 | zfcp_erp_wait(adapter); |
247 | up(&zfcp_data.config_sema); | 258 | zfcp_erp_thread_kill(adapter); |
259 | mutex_unlock(&zfcp_data.config_mutex); | ||
248 | } | 260 | } |
249 | 261 | ||
250 | static struct ccw_driver zfcp_ccw_driver = { | 262 | static struct ccw_driver zfcp_ccw_driver = { |
diff --git a/drivers/s390/scsi/zfcp_dbf.c b/drivers/s390/scsi/zfcp_dbf.c index b99b87ce5a39..215b70749e95 100644 --- a/drivers/s390/scsi/zfcp_dbf.c +++ b/drivers/s390/scsi/zfcp_dbf.c | |||
@@ -3,7 +3,7 @@ | |||
3 | * | 3 | * |
4 | * Debug traces for zfcp. | 4 | * Debug traces for zfcp. |
5 | * | 5 | * |
6 | * Copyright IBM Corporation 2002, 2008 | 6 | * Copyright IBM Corporation 2002, 2009 |
7 | */ | 7 | */ |
8 | 8 | ||
9 | #define KMSG_COMPONENT "zfcp" | 9 | #define KMSG_COMPONENT "zfcp" |
@@ -11,6 +11,7 @@ | |||
11 | 11 | ||
12 | #include <linux/ctype.h> | 12 | #include <linux/ctype.h> |
13 | #include <asm/debug.h> | 13 | #include <asm/debug.h> |
14 | #include "zfcp_dbf.h" | ||
14 | #include "zfcp_ext.h" | 15 | #include "zfcp_ext.h" |
15 | 16 | ||
16 | static u32 dbfsize = 4; | 17 | static u32 dbfsize = 4; |
@@ -37,19 +38,6 @@ static void zfcp_dbf_hexdump(debug_info_t *dbf, void *to, int to_len, | |||
37 | } | 38 | } |
38 | } | 39 | } |
39 | 40 | ||
40 | /* FIXME: this duplicate this code in s390 debug feature */ | ||
41 | static void zfcp_dbf_timestamp(unsigned long long stck, struct timespec *time) | ||
42 | { | ||
43 | unsigned long long sec; | ||
44 | |||
45 | stck -= 0x8126d60e46000000LL - (0x3c26700LL * 1000000 * 4096); | ||
46 | sec = stck >> 12; | ||
47 | do_div(sec, 1000000); | ||
48 | time->tv_sec = sec; | ||
49 | stck -= (sec * 1000000) << 12; | ||
50 | time->tv_nsec = ((stck * 1000) >> 12); | ||
51 | } | ||
52 | |||
53 | static void zfcp_dbf_tag(char **p, const char *label, const char *tag) | 41 | static void zfcp_dbf_tag(char **p, const char *label, const char *tag) |
54 | { | 42 | { |
55 | int i; | 43 | int i; |
@@ -106,7 +94,7 @@ static int zfcp_dbf_view_header(debug_info_t *id, struct debug_view *view, | |||
106 | char *p = out_buf; | 94 | char *p = out_buf; |
107 | 95 | ||
108 | if (strncmp(dump->tag, "dump", ZFCP_DBF_TAG_SIZE) != 0) { | 96 | if (strncmp(dump->tag, "dump", ZFCP_DBF_TAG_SIZE) != 0) { |
109 | zfcp_dbf_timestamp(entry->id.stck, &t); | 97 | stck_to_timespec(entry->id.stck, &t); |
110 | zfcp_dbf_out(&p, "timestamp", "%011lu:%06lu", | 98 | zfcp_dbf_out(&p, "timestamp", "%011lu:%06lu", |
111 | t.tv_sec, t.tv_nsec); | 99 | t.tv_sec, t.tv_nsec); |
112 | zfcp_dbf_out(&p, "cpu", "%02i", entry->id.fields.cpuid); | 100 | zfcp_dbf_out(&p, "cpu", "%02i", entry->id.fields.cpuid); |
@@ -119,13 +107,10 @@ static int zfcp_dbf_view_header(debug_info_t *id, struct debug_view *view, | |||
119 | return p - out_buf; | 107 | return p - out_buf; |
120 | } | 108 | } |
121 | 109 | ||
122 | /** | 110 | void _zfcp_dbf_hba_fsf_response(const char *tag2, int level, |
123 | * zfcp_hba_dbf_event_fsf_response - trace event for request completion | 111 | struct zfcp_fsf_req *fsf_req, |
124 | * @fsf_req: request that has been completed | 112 | struct zfcp_dbf *dbf) |
125 | */ | ||
126 | void zfcp_hba_dbf_event_fsf_response(struct zfcp_fsf_req *fsf_req) | ||
127 | { | 113 | { |
128 | struct zfcp_adapter *adapter = fsf_req->adapter; | ||
129 | struct fsf_qtcb *qtcb = fsf_req->qtcb; | 114 | struct fsf_qtcb *qtcb = fsf_req->qtcb; |
130 | union fsf_prot_status_qual *prot_status_qual = | 115 | union fsf_prot_status_qual *prot_status_qual = |
131 | &qtcb->prefix.prot_status_qual; | 116 | &qtcb->prefix.prot_status_qual; |
@@ -134,33 +119,14 @@ void zfcp_hba_dbf_event_fsf_response(struct zfcp_fsf_req *fsf_req) | |||
134 | struct zfcp_port *port; | 119 | struct zfcp_port *port; |
135 | struct zfcp_unit *unit; | 120 | struct zfcp_unit *unit; |
136 | struct zfcp_send_els *send_els; | 121 | struct zfcp_send_els *send_els; |
137 | struct zfcp_hba_dbf_record *rec = &adapter->hba_dbf_buf; | 122 | struct zfcp_dbf_hba_record *rec = &dbf->hba_buf; |
138 | struct zfcp_hba_dbf_record_response *response = &rec->u.response; | 123 | struct zfcp_dbf_hba_record_response *response = &rec->u.response; |
139 | int level; | ||
140 | unsigned long flags; | 124 | unsigned long flags; |
141 | 125 | ||
142 | spin_lock_irqsave(&adapter->hba_dbf_lock, flags); | 126 | spin_lock_irqsave(&dbf->hba_lock, flags); |
143 | memset(rec, 0, sizeof(*rec)); | 127 | memset(rec, 0, sizeof(*rec)); |
144 | strncpy(rec->tag, "resp", ZFCP_DBF_TAG_SIZE); | 128 | strncpy(rec->tag, "resp", ZFCP_DBF_TAG_SIZE); |
145 | 129 | strncpy(rec->tag2, tag2, ZFCP_DBF_TAG_SIZE); | |
146 | if ((qtcb->prefix.prot_status != FSF_PROT_GOOD) && | ||
147 | (qtcb->prefix.prot_status != FSF_PROT_FSF_STATUS_PRESENTED)) { | ||
148 | strncpy(rec->tag2, "perr", ZFCP_DBF_TAG_SIZE); | ||
149 | level = 1; | ||
150 | } else if (qtcb->header.fsf_status != FSF_GOOD) { | ||
151 | strncpy(rec->tag2, "ferr", ZFCP_DBF_TAG_SIZE); | ||
152 | level = 1; | ||
153 | } else if ((fsf_req->fsf_command == FSF_QTCB_OPEN_PORT_WITH_DID) || | ||
154 | (fsf_req->fsf_command == FSF_QTCB_OPEN_LUN)) { | ||
155 | strncpy(rec->tag2, "open", ZFCP_DBF_TAG_SIZE); | ||
156 | level = 4; | ||
157 | } else if (qtcb->header.log_length) { | ||
158 | strncpy(rec->tag2, "qtcb", ZFCP_DBF_TAG_SIZE); | ||
159 | level = 5; | ||
160 | } else { | ||
161 | strncpy(rec->tag2, "norm", ZFCP_DBF_TAG_SIZE); | ||
162 | level = 6; | ||
163 | } | ||
164 | 130 | ||
165 | response->fsf_command = fsf_req->fsf_command; | 131 | response->fsf_command = fsf_req->fsf_command; |
166 | response->fsf_reqid = fsf_req->req_id; | 132 | response->fsf_reqid = fsf_req->req_id; |
@@ -173,9 +139,9 @@ void zfcp_hba_dbf_event_fsf_response(struct zfcp_fsf_req *fsf_req) | |||
173 | memcpy(response->fsf_status_qual, | 139 | memcpy(response->fsf_status_qual, |
174 | fsf_status_qual, FSF_STATUS_QUALIFIER_SIZE); | 140 | fsf_status_qual, FSF_STATUS_QUALIFIER_SIZE); |
175 | response->fsf_req_status = fsf_req->status; | 141 | response->fsf_req_status = fsf_req->status; |
176 | response->sbal_first = fsf_req->sbal_first; | 142 | response->sbal_first = fsf_req->queue_req.sbal_first; |
177 | response->sbal_last = fsf_req->sbal_last; | 143 | response->sbal_last = fsf_req->queue_req.sbal_last; |
178 | response->sbal_response = fsf_req->sbal_response; | 144 | response->sbal_response = fsf_req->queue_req.sbal_response; |
179 | response->pool = fsf_req->pool != NULL; | 145 | response->pool = fsf_req->pool != NULL; |
180 | response->erp_action = (unsigned long)fsf_req->erp_action; | 146 | response->erp_action = (unsigned long)fsf_req->erp_action; |
181 | 147 | ||
@@ -224,7 +190,7 @@ void zfcp_hba_dbf_event_fsf_response(struct zfcp_fsf_req *fsf_req) | |||
224 | break; | 190 | break; |
225 | } | 191 | } |
226 | 192 | ||
227 | debug_event(adapter->hba_dbf, level, rec, sizeof(*rec)); | 193 | debug_event(dbf->hba, level, rec, sizeof(*rec)); |
228 | 194 | ||
229 | /* have fcp channel microcode fixed to use as little as possible */ | 195 | /* have fcp channel microcode fixed to use as little as possible */ |
230 | if (fsf_req->fsf_command != FSF_QTCB_FCP_CMND) { | 196 | if (fsf_req->fsf_command != FSF_QTCB_FCP_CMND) { |
@@ -232,31 +198,25 @@ void zfcp_hba_dbf_event_fsf_response(struct zfcp_fsf_req *fsf_req) | |||
232 | char *buf = (char *)qtcb + qtcb->header.log_start; | 198 | char *buf = (char *)qtcb + qtcb->header.log_start; |
233 | int len = qtcb->header.log_length; | 199 | int len = qtcb->header.log_length; |
234 | for (; len && !buf[len - 1]; len--); | 200 | for (; len && !buf[len - 1]; len--); |
235 | zfcp_dbf_hexdump(adapter->hba_dbf, rec, sizeof(*rec), level, | 201 | zfcp_dbf_hexdump(dbf->hba, rec, sizeof(*rec), level, buf, |
236 | buf, len); | 202 | len); |
237 | } | 203 | } |
238 | 204 | ||
239 | spin_unlock_irqrestore(&adapter->hba_dbf_lock, flags); | 205 | spin_unlock_irqrestore(&dbf->hba_lock, flags); |
240 | } | 206 | } |
241 | 207 | ||
242 | /** | 208 | void _zfcp_dbf_hba_fsf_unsol(const char *tag, int level, struct zfcp_dbf *dbf, |
243 | * zfcp_hba_dbf_event_fsf_unsol - trace event for an unsolicited status buffer | 209 | struct fsf_status_read_buffer *status_buffer) |
244 | * @tag: tag indicating which kind of unsolicited status has been received | ||
245 | * @adapter: adapter that has issued the unsolicited status buffer | ||
246 | * @status_buffer: buffer containing payload of unsolicited status | ||
247 | */ | ||
248 | void zfcp_hba_dbf_event_fsf_unsol(const char *tag, struct zfcp_adapter *adapter, | ||
249 | struct fsf_status_read_buffer *status_buffer) | ||
250 | { | 210 | { |
251 | struct zfcp_hba_dbf_record *rec = &adapter->hba_dbf_buf; | 211 | struct zfcp_dbf_hba_record *rec = &dbf->hba_buf; |
252 | unsigned long flags; | 212 | unsigned long flags; |
253 | 213 | ||
254 | spin_lock_irqsave(&adapter->hba_dbf_lock, flags); | 214 | spin_lock_irqsave(&dbf->hba_lock, flags); |
255 | memset(rec, 0, sizeof(*rec)); | 215 | memset(rec, 0, sizeof(*rec)); |
256 | strncpy(rec->tag, "stat", ZFCP_DBF_TAG_SIZE); | 216 | strncpy(rec->tag, "stat", ZFCP_DBF_TAG_SIZE); |
257 | strncpy(rec->tag2, tag, ZFCP_DBF_TAG_SIZE); | 217 | strncpy(rec->tag2, tag, ZFCP_DBF_TAG_SIZE); |
258 | 218 | ||
259 | rec->u.status.failed = atomic_read(&adapter->stat_miss); | 219 | rec->u.status.failed = atomic_read(&dbf->adapter->stat_miss); |
260 | if (status_buffer != NULL) { | 220 | if (status_buffer != NULL) { |
261 | rec->u.status.status_type = status_buffer->status_type; | 221 | rec->u.status.status_type = status_buffer->status_type; |
262 | rec->u.status.status_subtype = status_buffer->status_subtype; | 222 | rec->u.status.status_subtype = status_buffer->status_subtype; |
@@ -293,63 +253,61 @@ void zfcp_hba_dbf_event_fsf_unsol(const char *tag, struct zfcp_adapter *adapter, | |||
293 | &status_buffer->payload, rec->u.status.payload_size); | 253 | &status_buffer->payload, rec->u.status.payload_size); |
294 | } | 254 | } |
295 | 255 | ||
296 | debug_event(adapter->hba_dbf, 2, rec, sizeof(*rec)); | 256 | debug_event(dbf->hba, level, rec, sizeof(*rec)); |
297 | spin_unlock_irqrestore(&adapter->hba_dbf_lock, flags); | 257 | spin_unlock_irqrestore(&dbf->hba_lock, flags); |
298 | } | 258 | } |
299 | 259 | ||
300 | /** | 260 | /** |
301 | * zfcp_hba_dbf_event_qdio - trace event for QDIO related failure | 261 | * zfcp_dbf_hba_qdio - trace event for QDIO related failure |
302 | * @adapter: adapter affected by this QDIO related event | 262 | * @qdio: qdio structure affected by this QDIO related event |
303 | * @qdio_error: as passed by qdio module | 263 | * @qdio_error: as passed by qdio module |
304 | * @sbal_index: first buffer with error condition, as passed by qdio module | 264 | * @sbal_index: first buffer with error condition, as passed by qdio module |
305 | * @sbal_count: number of buffers affected, as passed by qdio module | 265 | * @sbal_count: number of buffers affected, as passed by qdio module |
306 | */ | 266 | */ |
307 | void zfcp_hba_dbf_event_qdio(struct zfcp_adapter *adapter, | 267 | void zfcp_dbf_hba_qdio(struct zfcp_dbf *dbf, unsigned int qdio_error, |
308 | unsigned int qdio_error, int sbal_index, | 268 | int sbal_index, int sbal_count) |
309 | int sbal_count) | ||
310 | { | 269 | { |
311 | struct zfcp_hba_dbf_record *r = &adapter->hba_dbf_buf; | 270 | struct zfcp_dbf_hba_record *r = &dbf->hba_buf; |
312 | unsigned long flags; | 271 | unsigned long flags; |
313 | 272 | ||
314 | spin_lock_irqsave(&adapter->hba_dbf_lock, flags); | 273 | spin_lock_irqsave(&dbf->hba_lock, flags); |
315 | memset(r, 0, sizeof(*r)); | 274 | memset(r, 0, sizeof(*r)); |
316 | strncpy(r->tag, "qdio", ZFCP_DBF_TAG_SIZE); | 275 | strncpy(r->tag, "qdio", ZFCP_DBF_TAG_SIZE); |
317 | r->u.qdio.qdio_error = qdio_error; | 276 | r->u.qdio.qdio_error = qdio_error; |
318 | r->u.qdio.sbal_index = sbal_index; | 277 | r->u.qdio.sbal_index = sbal_index; |
319 | r->u.qdio.sbal_count = sbal_count; | 278 | r->u.qdio.sbal_count = sbal_count; |
320 | debug_event(adapter->hba_dbf, 0, r, sizeof(*r)); | 279 | debug_event(dbf->hba, 0, r, sizeof(*r)); |
321 | spin_unlock_irqrestore(&adapter->hba_dbf_lock, flags); | 280 | spin_unlock_irqrestore(&dbf->hba_lock, flags); |
322 | } | 281 | } |
323 | 282 | ||
324 | /** | 283 | /** |
325 | * zfcp_hba_dbf_event_berr - trace event for bit error threshold | 284 | * zfcp_dbf_hba_berr - trace event for bit error threshold |
326 | * @adapter: adapter affected by this QDIO related event | 285 | * @dbf: dbf structure affected by this QDIO related event |
327 | * @req: fsf request | 286 | * @req: fsf request |
328 | */ | 287 | */ |
329 | void zfcp_hba_dbf_event_berr(struct zfcp_adapter *adapter, | 288 | void zfcp_dbf_hba_berr(struct zfcp_dbf *dbf, struct zfcp_fsf_req *req) |
330 | struct zfcp_fsf_req *req) | ||
331 | { | 289 | { |
332 | struct zfcp_hba_dbf_record *r = &adapter->hba_dbf_buf; | 290 | struct zfcp_dbf_hba_record *r = &dbf->hba_buf; |
333 | struct fsf_status_read_buffer *sr_buf = req->data; | 291 | struct fsf_status_read_buffer *sr_buf = req->data; |
334 | struct fsf_bit_error_payload *err = &sr_buf->payload.bit_error; | 292 | struct fsf_bit_error_payload *err = &sr_buf->payload.bit_error; |
335 | unsigned long flags; | 293 | unsigned long flags; |
336 | 294 | ||
337 | spin_lock_irqsave(&adapter->hba_dbf_lock, flags); | 295 | spin_lock_irqsave(&dbf->hba_lock, flags); |
338 | memset(r, 0, sizeof(*r)); | 296 | memset(r, 0, sizeof(*r)); |
339 | strncpy(r->tag, "berr", ZFCP_DBF_TAG_SIZE); | 297 | strncpy(r->tag, "berr", ZFCP_DBF_TAG_SIZE); |
340 | memcpy(&r->u.berr, err, sizeof(struct fsf_bit_error_payload)); | 298 | memcpy(&r->u.berr, err, sizeof(struct fsf_bit_error_payload)); |
341 | debug_event(adapter->hba_dbf, 0, r, sizeof(*r)); | 299 | debug_event(dbf->hba, 0, r, sizeof(*r)); |
342 | spin_unlock_irqrestore(&adapter->hba_dbf_lock, flags); | 300 | spin_unlock_irqrestore(&dbf->hba_lock, flags); |
343 | } | 301 | } |
344 | static void zfcp_hba_dbf_view_response(char **p, | 302 | static void zfcp_dbf_hba_view_response(char **p, |
345 | struct zfcp_hba_dbf_record_response *r) | 303 | struct zfcp_dbf_hba_record_response *r) |
346 | { | 304 | { |
347 | struct timespec t; | 305 | struct timespec t; |
348 | 306 | ||
349 | zfcp_dbf_out(p, "fsf_command", "0x%08x", r->fsf_command); | 307 | zfcp_dbf_out(p, "fsf_command", "0x%08x", r->fsf_command); |
350 | zfcp_dbf_out(p, "fsf_reqid", "0x%0Lx", r->fsf_reqid); | 308 | zfcp_dbf_out(p, "fsf_reqid", "0x%0Lx", r->fsf_reqid); |
351 | zfcp_dbf_out(p, "fsf_seqno", "0x%08x", r->fsf_seqno); | 309 | zfcp_dbf_out(p, "fsf_seqno", "0x%08x", r->fsf_seqno); |
352 | zfcp_dbf_timestamp(r->fsf_issued, &t); | 310 | stck_to_timespec(r->fsf_issued, &t); |
353 | zfcp_dbf_out(p, "fsf_issued", "%011lu:%06lu", t.tv_sec, t.tv_nsec); | 311 | zfcp_dbf_out(p, "fsf_issued", "%011lu:%06lu", t.tv_sec, t.tv_nsec); |
354 | zfcp_dbf_out(p, "fsf_prot_status", "0x%08x", r->fsf_prot_status); | 312 | zfcp_dbf_out(p, "fsf_prot_status", "0x%08x", r->fsf_prot_status); |
355 | zfcp_dbf_out(p, "fsf_status", "0x%08x", r->fsf_status); | 313 | zfcp_dbf_out(p, "fsf_status", "0x%08x", r->fsf_status); |
@@ -403,8 +361,8 @@ static void zfcp_hba_dbf_view_response(char **p, | |||
403 | } | 361 | } |
404 | } | 362 | } |
405 | 363 | ||
406 | static void zfcp_hba_dbf_view_status(char **p, | 364 | static void zfcp_dbf_hba_view_status(char **p, |
407 | struct zfcp_hba_dbf_record_status *r) | 365 | struct zfcp_dbf_hba_record_status *r) |
408 | { | 366 | { |
409 | zfcp_dbf_out(p, "failed", "0x%02x", r->failed); | 367 | zfcp_dbf_out(p, "failed", "0x%02x", r->failed); |
410 | zfcp_dbf_out(p, "status_type", "0x%08x", r->status_type); | 368 | zfcp_dbf_out(p, "status_type", "0x%08x", r->status_type); |
@@ -416,14 +374,14 @@ static void zfcp_hba_dbf_view_status(char **p, | |||
416 | r->payload_size); | 374 | r->payload_size); |
417 | } | 375 | } |
418 | 376 | ||
419 | static void zfcp_hba_dbf_view_qdio(char **p, struct zfcp_hba_dbf_record_qdio *r) | 377 | static void zfcp_dbf_hba_view_qdio(char **p, struct zfcp_dbf_hba_record_qdio *r) |
420 | { | 378 | { |
421 | zfcp_dbf_out(p, "qdio_error", "0x%08x", r->qdio_error); | 379 | zfcp_dbf_out(p, "qdio_error", "0x%08x", r->qdio_error); |
422 | zfcp_dbf_out(p, "sbal_index", "0x%02x", r->sbal_index); | 380 | zfcp_dbf_out(p, "sbal_index", "0x%02x", r->sbal_index); |
423 | zfcp_dbf_out(p, "sbal_count", "0x%02x", r->sbal_count); | 381 | zfcp_dbf_out(p, "sbal_count", "0x%02x", r->sbal_count); |
424 | } | 382 | } |
425 | 383 | ||
426 | static void zfcp_hba_dbf_view_berr(char **p, struct fsf_bit_error_payload *r) | 384 | static void zfcp_dbf_hba_view_berr(char **p, struct fsf_bit_error_payload *r) |
427 | { | 385 | { |
428 | zfcp_dbf_out(p, "link_failures", "%d", r->link_failure_error_count); | 386 | zfcp_dbf_out(p, "link_failures", "%d", r->link_failure_error_count); |
429 | zfcp_dbf_out(p, "loss_of_sync_err", "%d", r->loss_of_sync_error_count); | 387 | zfcp_dbf_out(p, "loss_of_sync_err", "%d", r->loss_of_sync_error_count); |
@@ -447,10 +405,10 @@ static void zfcp_hba_dbf_view_berr(char **p, struct fsf_bit_error_payload *r) | |||
447 | r->current_transmit_b2b_credit); | 405 | r->current_transmit_b2b_credit); |
448 | } | 406 | } |
449 | 407 | ||
450 | static int zfcp_hba_dbf_view_format(debug_info_t *id, struct debug_view *view, | 408 | static int zfcp_dbf_hba_view_format(debug_info_t *id, struct debug_view *view, |
451 | char *out_buf, const char *in_buf) | 409 | char *out_buf, const char *in_buf) |
452 | { | 410 | { |
453 | struct zfcp_hba_dbf_record *r = (struct zfcp_hba_dbf_record *)in_buf; | 411 | struct zfcp_dbf_hba_record *r = (struct zfcp_dbf_hba_record *)in_buf; |
454 | char *p = out_buf; | 412 | char *p = out_buf; |
455 | 413 | ||
456 | if (strncmp(r->tag, "dump", ZFCP_DBF_TAG_SIZE) == 0) | 414 | if (strncmp(r->tag, "dump", ZFCP_DBF_TAG_SIZE) == 0) |
@@ -461,45 +419,42 @@ static int zfcp_hba_dbf_view_format(debug_info_t *id, struct debug_view *view, | |||
461 | zfcp_dbf_tag(&p, "tag2", r->tag2); | 419 | zfcp_dbf_tag(&p, "tag2", r->tag2); |
462 | 420 | ||
463 | if (strncmp(r->tag, "resp", ZFCP_DBF_TAG_SIZE) == 0) | 421 | if (strncmp(r->tag, "resp", ZFCP_DBF_TAG_SIZE) == 0) |
464 | zfcp_hba_dbf_view_response(&p, &r->u.response); | 422 | zfcp_dbf_hba_view_response(&p, &r->u.response); |
465 | else if (strncmp(r->tag, "stat", ZFCP_DBF_TAG_SIZE) == 0) | 423 | else if (strncmp(r->tag, "stat", ZFCP_DBF_TAG_SIZE) == 0) |
466 | zfcp_hba_dbf_view_status(&p, &r->u.status); | 424 | zfcp_dbf_hba_view_status(&p, &r->u.status); |
467 | else if (strncmp(r->tag, "qdio", ZFCP_DBF_TAG_SIZE) == 0) | 425 | else if (strncmp(r->tag, "qdio", ZFCP_DBF_TAG_SIZE) == 0) |
468 | zfcp_hba_dbf_view_qdio(&p, &r->u.qdio); | 426 | zfcp_dbf_hba_view_qdio(&p, &r->u.qdio); |
469 | else if (strncmp(r->tag, "berr", ZFCP_DBF_TAG_SIZE) == 0) | 427 | else if (strncmp(r->tag, "berr", ZFCP_DBF_TAG_SIZE) == 0) |
470 | zfcp_hba_dbf_view_berr(&p, &r->u.berr); | 428 | zfcp_dbf_hba_view_berr(&p, &r->u.berr); |
471 | 429 | ||
472 | if (strncmp(r->tag, "resp", ZFCP_DBF_TAG_SIZE) != 0) | 430 | if (strncmp(r->tag, "resp", ZFCP_DBF_TAG_SIZE) != 0) |
473 | p += sprintf(p, "\n"); | 431 | p += sprintf(p, "\n"); |
474 | return p - out_buf; | 432 | return p - out_buf; |
475 | } | 433 | } |
476 | 434 | ||
477 | static struct debug_view zfcp_hba_dbf_view = { | 435 | static struct debug_view zfcp_dbf_hba_view = { |
478 | "structured", | 436 | .name = "structured", |
479 | NULL, | 437 | .header_proc = zfcp_dbf_view_header, |
480 | &zfcp_dbf_view_header, | 438 | .format_proc = zfcp_dbf_hba_view_format, |
481 | &zfcp_hba_dbf_view_format, | ||
482 | NULL, | ||
483 | NULL | ||
484 | }; | 439 | }; |
485 | 440 | ||
486 | static const char *zfcp_rec_dbf_tags[] = { | 441 | static const char *zfcp_dbf_rec_tags[] = { |
487 | [ZFCP_REC_DBF_ID_THREAD] = "thread", | 442 | [ZFCP_REC_DBF_ID_THREAD] = "thread", |
488 | [ZFCP_REC_DBF_ID_TARGET] = "target", | 443 | [ZFCP_REC_DBF_ID_TARGET] = "target", |
489 | [ZFCP_REC_DBF_ID_TRIGGER] = "trigger", | 444 | [ZFCP_REC_DBF_ID_TRIGGER] = "trigger", |
490 | [ZFCP_REC_DBF_ID_ACTION] = "action", | 445 | [ZFCP_REC_DBF_ID_ACTION] = "action", |
491 | }; | 446 | }; |
492 | 447 | ||
493 | static int zfcp_rec_dbf_view_format(debug_info_t *id, struct debug_view *view, | 448 | static int zfcp_dbf_rec_view_format(debug_info_t *id, struct debug_view *view, |
494 | char *buf, const char *_rec) | 449 | char *buf, const char *_rec) |
495 | { | 450 | { |
496 | struct zfcp_rec_dbf_record *r = (struct zfcp_rec_dbf_record *)_rec; | 451 | struct zfcp_dbf_rec_record *r = (struct zfcp_dbf_rec_record *)_rec; |
497 | char *p = buf; | 452 | char *p = buf; |
498 | char hint[ZFCP_DBF_ID_SIZE + 1]; | 453 | char hint[ZFCP_DBF_ID_SIZE + 1]; |
499 | 454 | ||
500 | memcpy(hint, r->id2, ZFCP_DBF_ID_SIZE); | 455 | memcpy(hint, r->id2, ZFCP_DBF_ID_SIZE); |
501 | hint[ZFCP_DBF_ID_SIZE] = 0; | 456 | hint[ZFCP_DBF_ID_SIZE] = 0; |
502 | zfcp_dbf_outs(&p, "tag", zfcp_rec_dbf_tags[r->id]); | 457 | zfcp_dbf_outs(&p, "tag", zfcp_dbf_rec_tags[r->id]); |
503 | zfcp_dbf_outs(&p, "hint", hint); | 458 | zfcp_dbf_outs(&p, "hint", hint); |
504 | switch (r->id) { | 459 | switch (r->id) { |
505 | case ZFCP_REC_DBF_ID_THREAD: | 460 | case ZFCP_REC_DBF_ID_THREAD: |
@@ -537,24 +492,22 @@ static int zfcp_rec_dbf_view_format(debug_info_t *id, struct debug_view *view, | |||
537 | return p - buf; | 492 | return p - buf; |
538 | } | 493 | } |
539 | 494 | ||
540 | static struct debug_view zfcp_rec_dbf_view = { | 495 | static struct debug_view zfcp_dbf_rec_view = { |
541 | "structured", | 496 | .name = "structured", |
542 | NULL, | 497 | .header_proc = zfcp_dbf_view_header, |
543 | &zfcp_dbf_view_header, | 498 | .format_proc = zfcp_dbf_rec_view_format, |
544 | &zfcp_rec_dbf_view_format, | ||
545 | NULL, | ||
546 | NULL | ||
547 | }; | 499 | }; |
548 | 500 | ||
549 | /** | 501 | /** |
550 | * zfcp_rec_dbf_event_thread - trace event related to recovery thread operation | 502 | * zfcp_dbf_rec_thread - trace event related to recovery thread operation |
551 | * @id2: identifier for event | 503 | * @id2: identifier for event |
552 | * @adapter: adapter | 504 | * @dbf: reference to dbf structure |
553 | * This function assumes that the caller is holding erp_lock. | 505 | * This function assumes that the caller is holding erp_lock. |
554 | */ | 506 | */ |
555 | void zfcp_rec_dbf_event_thread(char *id2, struct zfcp_adapter *adapter) | 507 | void zfcp_dbf_rec_thread(char *id2, struct zfcp_dbf *dbf) |
556 | { | 508 | { |
557 | struct zfcp_rec_dbf_record *r = &adapter->rec_dbf_buf; | 509 | struct zfcp_adapter *adapter = dbf->adapter; |
510 | struct zfcp_dbf_rec_record *r = &dbf->rec_buf; | ||
558 | unsigned long flags = 0; | 511 | unsigned long flags = 0; |
559 | struct list_head *entry; | 512 | struct list_head *entry; |
560 | unsigned ready = 0, running = 0, total; | 513 | unsigned ready = 0, running = 0, total; |
@@ -565,41 +518,41 @@ void zfcp_rec_dbf_event_thread(char *id2, struct zfcp_adapter *adapter) | |||
565 | running++; | 518 | running++; |
566 | total = adapter->erp_total_count; | 519 | total = adapter->erp_total_count; |
567 | 520 | ||
568 | spin_lock_irqsave(&adapter->rec_dbf_lock, flags); | 521 | spin_lock_irqsave(&dbf->rec_lock, flags); |
569 | memset(r, 0, sizeof(*r)); | 522 | memset(r, 0, sizeof(*r)); |
570 | r->id = ZFCP_REC_DBF_ID_THREAD; | 523 | r->id = ZFCP_REC_DBF_ID_THREAD; |
571 | memcpy(r->id2, id2, ZFCP_DBF_ID_SIZE); | 524 | memcpy(r->id2, id2, ZFCP_DBF_ID_SIZE); |
572 | r->u.thread.total = total; | 525 | r->u.thread.total = total; |
573 | r->u.thread.ready = ready; | 526 | r->u.thread.ready = ready; |
574 | r->u.thread.running = running; | 527 | r->u.thread.running = running; |
575 | debug_event(adapter->rec_dbf, 6, r, sizeof(*r)); | 528 | debug_event(dbf->rec, 6, r, sizeof(*r)); |
576 | spin_unlock_irqrestore(&adapter->rec_dbf_lock, flags); | 529 | spin_unlock_irqrestore(&dbf->rec_lock, flags); |
577 | } | 530 | } |
578 | 531 | ||
579 | /** | 532 | /** |
580 | * zfcp_rec_dbf_event_thread - trace event related to recovery thread operation | 533 | * zfcp_dbf_rec_thread - trace event related to recovery thread operation |
581 | * @id2: identifier for event | 534 | * @id2: identifier for event |
582 | * @adapter: adapter | 535 | * @adapter: adapter |
583 | * This function assumes that the caller does not hold erp_lock. | 536 | * This function assumes that the caller does not hold erp_lock. |
584 | */ | 537 | */ |
585 | void zfcp_rec_dbf_event_thread_lock(char *id2, struct zfcp_adapter *adapter) | 538 | void zfcp_dbf_rec_thread_lock(char *id2, struct zfcp_dbf *dbf) |
586 | { | 539 | { |
540 | struct zfcp_adapter *adapter = dbf->adapter; | ||
587 | unsigned long flags; | 541 | unsigned long flags; |
588 | 542 | ||
589 | read_lock_irqsave(&adapter->erp_lock, flags); | 543 | read_lock_irqsave(&adapter->erp_lock, flags); |
590 | zfcp_rec_dbf_event_thread(id2, adapter); | 544 | zfcp_dbf_rec_thread(id2, dbf); |
591 | read_unlock_irqrestore(&adapter->erp_lock, flags); | 545 | read_unlock_irqrestore(&adapter->erp_lock, flags); |
592 | } | 546 | } |
593 | 547 | ||
594 | static void zfcp_rec_dbf_event_target(char *id2, void *ref, | 548 | static void zfcp_dbf_rec_target(char *id2, void *ref, struct zfcp_dbf *dbf, |
595 | struct zfcp_adapter *adapter, | 549 | atomic_t *status, atomic_t *erp_count, u64 wwpn, |
596 | atomic_t *status, atomic_t *erp_count, | 550 | u32 d_id, u64 fcp_lun) |
597 | u64 wwpn, u32 d_id, u64 fcp_lun) | ||
598 | { | 551 | { |
599 | struct zfcp_rec_dbf_record *r = &adapter->rec_dbf_buf; | 552 | struct zfcp_dbf_rec_record *r = &dbf->rec_buf; |
600 | unsigned long flags; | 553 | unsigned long flags; |
601 | 554 | ||
602 | spin_lock_irqsave(&adapter->rec_dbf_lock, flags); | 555 | spin_lock_irqsave(&dbf->rec_lock, flags); |
603 | memset(r, 0, sizeof(*r)); | 556 | memset(r, 0, sizeof(*r)); |
604 | r->id = ZFCP_REC_DBF_ID_TARGET; | 557 | r->id = ZFCP_REC_DBF_ID_TARGET; |
605 | memcpy(r->id2, id2, ZFCP_DBF_ID_SIZE); | 558 | memcpy(r->id2, id2, ZFCP_DBF_ID_SIZE); |
@@ -609,56 +562,57 @@ static void zfcp_rec_dbf_event_target(char *id2, void *ref, | |||
609 | r->u.target.d_id = d_id; | 562 | r->u.target.d_id = d_id; |
610 | r->u.target.fcp_lun = fcp_lun; | 563 | r->u.target.fcp_lun = fcp_lun; |
611 | r->u.target.erp_count = atomic_read(erp_count); | 564 | r->u.target.erp_count = atomic_read(erp_count); |
612 | debug_event(adapter->rec_dbf, 3, r, sizeof(*r)); | 565 | debug_event(dbf->rec, 3, r, sizeof(*r)); |
613 | spin_unlock_irqrestore(&adapter->rec_dbf_lock, flags); | 566 | spin_unlock_irqrestore(&dbf->rec_lock, flags); |
614 | } | 567 | } |
615 | 568 | ||
616 | /** | 569 | /** |
617 | * zfcp_rec_dbf_event_adapter - trace event for adapter state change | 570 | * zfcp_dbf_rec_adapter - trace event for adapter state change |
618 | * @id: identifier for trigger of state change | 571 | * @id: identifier for trigger of state change |
619 | * @ref: additional reference (e.g. request) | 572 | * @ref: additional reference (e.g. request) |
620 | * @adapter: adapter | 573 | * @dbf: reference to dbf structure |
621 | */ | 574 | */ |
622 | void zfcp_rec_dbf_event_adapter(char *id, void *ref, | 575 | void zfcp_dbf_rec_adapter(char *id, void *ref, struct zfcp_dbf *dbf) |
623 | struct zfcp_adapter *adapter) | ||
624 | { | 576 | { |
625 | zfcp_rec_dbf_event_target(id, ref, adapter, &adapter->status, | 577 | struct zfcp_adapter *adapter = dbf->adapter; |
578 | |||
579 | zfcp_dbf_rec_target(id, ref, dbf, &adapter->status, | ||
626 | &adapter->erp_counter, 0, 0, 0); | 580 | &adapter->erp_counter, 0, 0, 0); |
627 | } | 581 | } |
628 | 582 | ||
629 | /** | 583 | /** |
630 | * zfcp_rec_dbf_event_port - trace event for port state change | 584 | * zfcp_dbf_rec_port - trace event for port state change |
631 | * @id: identifier for trigger of state change | 585 | * @id: identifier for trigger of state change |
632 | * @ref: additional reference (e.g. request) | 586 | * @ref: additional reference (e.g. request) |
633 | * @port: port | 587 | * @port: port |
634 | */ | 588 | */ |
635 | void zfcp_rec_dbf_event_port(char *id, void *ref, struct zfcp_port *port) | 589 | void zfcp_dbf_rec_port(char *id, void *ref, struct zfcp_port *port) |
636 | { | 590 | { |
637 | struct zfcp_adapter *adapter = port->adapter; | 591 | struct zfcp_dbf *dbf = port->adapter->dbf; |
638 | 592 | ||
639 | zfcp_rec_dbf_event_target(id, ref, adapter, &port->status, | 593 | zfcp_dbf_rec_target(id, ref, dbf, &port->status, |
640 | &port->erp_counter, port->wwpn, port->d_id, | 594 | &port->erp_counter, port->wwpn, port->d_id, |
641 | 0); | 595 | 0); |
642 | } | 596 | } |
643 | 597 | ||
644 | /** | 598 | /** |
645 | * zfcp_rec_dbf_event_unit - trace event for unit state change | 599 | * zfcp_dbf_rec_unit - trace event for unit state change |
646 | * @id: identifier for trigger of state change | 600 | * @id: identifier for trigger of state change |
647 | * @ref: additional reference (e.g. request) | 601 | * @ref: additional reference (e.g. request) |
648 | * @unit: unit | 602 | * @unit: unit |
649 | */ | 603 | */ |
650 | void zfcp_rec_dbf_event_unit(char *id, void *ref, struct zfcp_unit *unit) | 604 | void zfcp_dbf_rec_unit(char *id, void *ref, struct zfcp_unit *unit) |
651 | { | 605 | { |
652 | struct zfcp_port *port = unit->port; | 606 | struct zfcp_port *port = unit->port; |
653 | struct zfcp_adapter *adapter = port->adapter; | 607 | struct zfcp_dbf *dbf = port->adapter->dbf; |
654 | 608 | ||
655 | zfcp_rec_dbf_event_target(id, ref, adapter, &unit->status, | 609 | zfcp_dbf_rec_target(id, ref, dbf, &unit->status, |
656 | &unit->erp_counter, port->wwpn, port->d_id, | 610 | &unit->erp_counter, port->wwpn, port->d_id, |
657 | unit->fcp_lun); | 611 | unit->fcp_lun); |
658 | } | 612 | } |
659 | 613 | ||
660 | /** | 614 | /** |
661 | * zfcp_rec_dbf_event_trigger - trace event for triggered error recovery | 615 | * zfcp_dbf_rec_trigger - trace event for triggered error recovery |
662 | * @id2: identifier for error recovery trigger | 616 | * @id2: identifier for error recovery trigger |
663 | * @ref: additional reference (e.g. request) | 617 | * @ref: additional reference (e.g. request) |
664 | * @want: originally requested error recovery action | 618 | * @want: originally requested error recovery action |
@@ -668,14 +622,15 @@ void zfcp_rec_dbf_event_unit(char *id, void *ref, struct zfcp_unit *unit) | |||
668 | * @port: port | 622 | * @port: port |
669 | * @unit: unit | 623 | * @unit: unit |
670 | */ | 624 | */ |
671 | void zfcp_rec_dbf_event_trigger(char *id2, void *ref, u8 want, u8 need, | 625 | void zfcp_dbf_rec_trigger(char *id2, void *ref, u8 want, u8 need, void *action, |
672 | void *action, struct zfcp_adapter *adapter, | 626 | struct zfcp_adapter *adapter, struct zfcp_port *port, |
673 | struct zfcp_port *port, struct zfcp_unit *unit) | 627 | struct zfcp_unit *unit) |
674 | { | 628 | { |
675 | struct zfcp_rec_dbf_record *r = &adapter->rec_dbf_buf; | 629 | struct zfcp_dbf *dbf = adapter->dbf; |
630 | struct zfcp_dbf_rec_record *r = &dbf->rec_buf; | ||
676 | unsigned long flags; | 631 | unsigned long flags; |
677 | 632 | ||
678 | spin_lock_irqsave(&adapter->rec_dbf_lock, flags); | 633 | spin_lock_irqsave(&dbf->rec_lock, flags); |
679 | memset(r, 0, sizeof(*r)); | 634 | memset(r, 0, sizeof(*r)); |
680 | r->id = ZFCP_REC_DBF_ID_TRIGGER; | 635 | r->id = ZFCP_REC_DBF_ID_TRIGGER; |
681 | memcpy(r->id2, id2, ZFCP_DBF_ID_SIZE); | 636 | memcpy(r->id2, id2, ZFCP_DBF_ID_SIZE); |
@@ -692,22 +647,22 @@ void zfcp_rec_dbf_event_trigger(char *id2, void *ref, u8 want, u8 need, | |||
692 | r->u.trigger.us = atomic_read(&unit->status); | 647 | r->u.trigger.us = atomic_read(&unit->status); |
693 | r->u.trigger.fcp_lun = unit->fcp_lun; | 648 | r->u.trigger.fcp_lun = unit->fcp_lun; |
694 | } | 649 | } |
695 | debug_event(adapter->rec_dbf, action ? 1 : 4, r, sizeof(*r)); | 650 | debug_event(dbf->rec, action ? 1 : 4, r, sizeof(*r)); |
696 | spin_unlock_irqrestore(&adapter->rec_dbf_lock, flags); | 651 | spin_unlock_irqrestore(&dbf->rec_lock, flags); |
697 | } | 652 | } |
698 | 653 | ||
699 | /** | 654 | /** |
700 | * zfcp_rec_dbf_event_action - trace event showing progress of recovery action | 655 | * zfcp_dbf_rec_action - trace event showing progress of recovery action |
701 | * @id2: identifier | 656 | * @id2: identifier |
702 | * @erp_action: error recovery action struct pointer | 657 | * @erp_action: error recovery action struct pointer |
703 | */ | 658 | */ |
704 | void zfcp_rec_dbf_event_action(char *id2, struct zfcp_erp_action *erp_action) | 659 | void zfcp_dbf_rec_action(char *id2, struct zfcp_erp_action *erp_action) |
705 | { | 660 | { |
706 | struct zfcp_adapter *adapter = erp_action->adapter; | 661 | struct zfcp_dbf *dbf = erp_action->adapter->dbf; |
707 | struct zfcp_rec_dbf_record *r = &adapter->rec_dbf_buf; | 662 | struct zfcp_dbf_rec_record *r = &dbf->rec_buf; |
708 | unsigned long flags; | 663 | unsigned long flags; |
709 | 664 | ||
710 | spin_lock_irqsave(&adapter->rec_dbf_lock, flags); | 665 | spin_lock_irqsave(&dbf->rec_lock, flags); |
711 | memset(r, 0, sizeof(*r)); | 666 | memset(r, 0, sizeof(*r)); |
712 | r->id = ZFCP_REC_DBF_ID_ACTION; | 667 | r->id = ZFCP_REC_DBF_ID_ACTION; |
713 | memcpy(r->id2, id2, ZFCP_DBF_ID_SIZE); | 668 | memcpy(r->id2, id2, ZFCP_DBF_ID_SIZE); |
@@ -715,26 +670,27 @@ void zfcp_rec_dbf_event_action(char *id2, struct zfcp_erp_action *erp_action) | |||
715 | r->u.action.status = erp_action->status; | 670 | r->u.action.status = erp_action->status; |
716 | r->u.action.step = erp_action->step; | 671 | r->u.action.step = erp_action->step; |
717 | r->u.action.fsf_req = (unsigned long)erp_action->fsf_req; | 672 | r->u.action.fsf_req = (unsigned long)erp_action->fsf_req; |
718 | debug_event(adapter->rec_dbf, 5, r, sizeof(*r)); | 673 | debug_event(dbf->rec, 5, r, sizeof(*r)); |
719 | spin_unlock_irqrestore(&adapter->rec_dbf_lock, flags); | 674 | spin_unlock_irqrestore(&dbf->rec_lock, flags); |
720 | } | 675 | } |
721 | 676 | ||
722 | /** | 677 | /** |
723 | * zfcp_san_dbf_event_ct_request - trace event for issued CT request | 678 | * zfcp_dbf_san_ct_request - trace event for issued CT request |
724 | * @fsf_req: request containing issued CT data | 679 | * @fsf_req: request containing issued CT data |
725 | */ | 680 | */ |
726 | void zfcp_san_dbf_event_ct_request(struct zfcp_fsf_req *fsf_req) | 681 | void zfcp_dbf_san_ct_request(struct zfcp_fsf_req *fsf_req) |
727 | { | 682 | { |
728 | struct zfcp_send_ct *ct = (struct zfcp_send_ct *)fsf_req->data; | 683 | struct zfcp_send_ct *ct = (struct zfcp_send_ct *)fsf_req->data; |
729 | struct zfcp_wka_port *wka_port = ct->wka_port; | 684 | struct zfcp_wka_port *wka_port = ct->wka_port; |
730 | struct zfcp_adapter *adapter = wka_port->adapter; | 685 | struct zfcp_adapter *adapter = wka_port->adapter; |
686 | struct zfcp_dbf *dbf = adapter->dbf; | ||
731 | struct ct_hdr *hdr = sg_virt(ct->req); | 687 | struct ct_hdr *hdr = sg_virt(ct->req); |
732 | struct zfcp_san_dbf_record *r = &adapter->san_dbf_buf; | 688 | struct zfcp_dbf_san_record *r = &dbf->san_buf; |
733 | struct zfcp_san_dbf_record_ct_request *oct = &r->u.ct_req; | 689 | struct zfcp_dbf_san_record_ct_request *oct = &r->u.ct_req; |
734 | int level = 3; | 690 | int level = 3; |
735 | unsigned long flags; | 691 | unsigned long flags; |
736 | 692 | ||
737 | spin_lock_irqsave(&adapter->san_dbf_lock, flags); | 693 | spin_lock_irqsave(&dbf->san_lock, flags); |
738 | memset(r, 0, sizeof(*r)); | 694 | memset(r, 0, sizeof(*r)); |
739 | strncpy(r->tag, "octc", ZFCP_DBF_TAG_SIZE); | 695 | strncpy(r->tag, "octc", ZFCP_DBF_TAG_SIZE); |
740 | r->fsf_reqid = fsf_req->req_id; | 696 | r->fsf_reqid = fsf_req->req_id; |
@@ -749,28 +705,29 @@ void zfcp_san_dbf_event_ct_request(struct zfcp_fsf_req *fsf_req) | |||
749 | oct->max_res_size = hdr->max_res_size; | 705 | oct->max_res_size = hdr->max_res_size; |
750 | oct->len = min((int)ct->req->length - (int)sizeof(struct ct_hdr), | 706 | oct->len = min((int)ct->req->length - (int)sizeof(struct ct_hdr), |
751 | ZFCP_DBF_SAN_MAX_PAYLOAD); | 707 | ZFCP_DBF_SAN_MAX_PAYLOAD); |
752 | debug_event(adapter->san_dbf, level, r, sizeof(*r)); | 708 | debug_event(dbf->san, level, r, sizeof(*r)); |
753 | zfcp_dbf_hexdump(adapter->san_dbf, r, sizeof(*r), level, | 709 | zfcp_dbf_hexdump(dbf->san, r, sizeof(*r), level, |
754 | (void *)hdr + sizeof(struct ct_hdr), oct->len); | 710 | (void *)hdr + sizeof(struct ct_hdr), oct->len); |
755 | spin_unlock_irqrestore(&adapter->san_dbf_lock, flags); | 711 | spin_unlock_irqrestore(&dbf->san_lock, flags); |
756 | } | 712 | } |
757 | 713 | ||
758 | /** | 714 | /** |
759 | * zfcp_san_dbf_event_ct_response - trace event for completion of CT request | 715 | * zfcp_dbf_san_ct_response - trace event for completion of CT request |
760 | * @fsf_req: request containing CT response | 716 | * @fsf_req: request containing CT response |
761 | */ | 717 | */ |
762 | void zfcp_san_dbf_event_ct_response(struct zfcp_fsf_req *fsf_req) | 718 | void zfcp_dbf_san_ct_response(struct zfcp_fsf_req *fsf_req) |
763 | { | 719 | { |
764 | struct zfcp_send_ct *ct = (struct zfcp_send_ct *)fsf_req->data; | 720 | struct zfcp_send_ct *ct = (struct zfcp_send_ct *)fsf_req->data; |
765 | struct zfcp_wka_port *wka_port = ct->wka_port; | 721 | struct zfcp_wka_port *wka_port = ct->wka_port; |
766 | struct zfcp_adapter *adapter = wka_port->adapter; | 722 | struct zfcp_adapter *adapter = wka_port->adapter; |
767 | struct ct_hdr *hdr = sg_virt(ct->resp); | 723 | struct ct_hdr *hdr = sg_virt(ct->resp); |
768 | struct zfcp_san_dbf_record *r = &adapter->san_dbf_buf; | 724 | struct zfcp_dbf *dbf = adapter->dbf; |
769 | struct zfcp_san_dbf_record_ct_response *rct = &r->u.ct_resp; | 725 | struct zfcp_dbf_san_record *r = &dbf->san_buf; |
726 | struct zfcp_dbf_san_record_ct_response *rct = &r->u.ct_resp; | ||
770 | int level = 3; | 727 | int level = 3; |
771 | unsigned long flags; | 728 | unsigned long flags; |
772 | 729 | ||
773 | spin_lock_irqsave(&adapter->san_dbf_lock, flags); | 730 | spin_lock_irqsave(&dbf->san_lock, flags); |
774 | memset(r, 0, sizeof(*r)); | 731 | memset(r, 0, sizeof(*r)); |
775 | strncpy(r->tag, "rctc", ZFCP_DBF_TAG_SIZE); | 732 | strncpy(r->tag, "rctc", ZFCP_DBF_TAG_SIZE); |
776 | r->fsf_reqid = fsf_req->req_id; | 733 | r->fsf_reqid = fsf_req->req_id; |
@@ -785,22 +742,22 @@ void zfcp_san_dbf_event_ct_response(struct zfcp_fsf_req *fsf_req) | |||
785 | rct->max_res_size = hdr->max_res_size; | 742 | rct->max_res_size = hdr->max_res_size; |
786 | rct->len = min((int)ct->resp->length - (int)sizeof(struct ct_hdr), | 743 | rct->len = min((int)ct->resp->length - (int)sizeof(struct ct_hdr), |
787 | ZFCP_DBF_SAN_MAX_PAYLOAD); | 744 | ZFCP_DBF_SAN_MAX_PAYLOAD); |
788 | debug_event(adapter->san_dbf, level, r, sizeof(*r)); | 745 | debug_event(dbf->san, level, r, sizeof(*r)); |
789 | zfcp_dbf_hexdump(adapter->san_dbf, r, sizeof(*r), level, | 746 | zfcp_dbf_hexdump(dbf->san, r, sizeof(*r), level, |
790 | (void *)hdr + sizeof(struct ct_hdr), rct->len); | 747 | (void *)hdr + sizeof(struct ct_hdr), rct->len); |
791 | spin_unlock_irqrestore(&adapter->san_dbf_lock, flags); | 748 | spin_unlock_irqrestore(&dbf->san_lock, flags); |
792 | } | 749 | } |
793 | 750 | ||
794 | static void zfcp_san_dbf_event_els(const char *tag, int level, | 751 | static void zfcp_dbf_san_els(const char *tag, int level, |
795 | struct zfcp_fsf_req *fsf_req, u32 s_id, | 752 | struct zfcp_fsf_req *fsf_req, u32 s_id, u32 d_id, |
796 | u32 d_id, u8 ls_code, void *buffer, | 753 | u8 ls_code, void *buffer, int buflen) |
797 | int buflen) | ||
798 | { | 754 | { |
799 | struct zfcp_adapter *adapter = fsf_req->adapter; | 755 | struct zfcp_adapter *adapter = fsf_req->adapter; |
800 | struct zfcp_san_dbf_record *rec = &adapter->san_dbf_buf; | 756 | struct zfcp_dbf *dbf = adapter->dbf; |
757 | struct zfcp_dbf_san_record *rec = &dbf->san_buf; | ||
801 | unsigned long flags; | 758 | unsigned long flags; |
802 | 759 | ||
803 | spin_lock_irqsave(&adapter->san_dbf_lock, flags); | 760 | spin_lock_irqsave(&dbf->san_lock, flags); |
804 | memset(rec, 0, sizeof(*rec)); | 761 | memset(rec, 0, sizeof(*rec)); |
805 | strncpy(rec->tag, tag, ZFCP_DBF_TAG_SIZE); | 762 | strncpy(rec->tag, tag, ZFCP_DBF_TAG_SIZE); |
806 | rec->fsf_reqid = fsf_req->req_id; | 763 | rec->fsf_reqid = fsf_req->req_id; |
@@ -808,45 +765,45 @@ static void zfcp_san_dbf_event_els(const char *tag, int level, | |||
808 | rec->s_id = s_id; | 765 | rec->s_id = s_id; |
809 | rec->d_id = d_id; | 766 | rec->d_id = d_id; |
810 | rec->u.els.ls_code = ls_code; | 767 | rec->u.els.ls_code = ls_code; |
811 | debug_event(adapter->san_dbf, level, rec, sizeof(*rec)); | 768 | debug_event(dbf->san, level, rec, sizeof(*rec)); |
812 | zfcp_dbf_hexdump(adapter->san_dbf, rec, sizeof(*rec), level, | 769 | zfcp_dbf_hexdump(dbf->san, rec, sizeof(*rec), level, |
813 | buffer, min(buflen, ZFCP_DBF_SAN_MAX_PAYLOAD)); | 770 | buffer, min(buflen, ZFCP_DBF_SAN_MAX_PAYLOAD)); |
814 | spin_unlock_irqrestore(&adapter->san_dbf_lock, flags); | 771 | spin_unlock_irqrestore(&dbf->san_lock, flags); |
815 | } | 772 | } |
816 | 773 | ||
817 | /** | 774 | /** |
818 | * zfcp_san_dbf_event_els_request - trace event for issued ELS | 775 | * zfcp_dbf_san_els_request - trace event for issued ELS |
819 | * @fsf_req: request containing issued ELS | 776 | * @fsf_req: request containing issued ELS |
820 | */ | 777 | */ |
821 | void zfcp_san_dbf_event_els_request(struct zfcp_fsf_req *fsf_req) | 778 | void zfcp_dbf_san_els_request(struct zfcp_fsf_req *fsf_req) |
822 | { | 779 | { |
823 | struct zfcp_send_els *els = (struct zfcp_send_els *)fsf_req->data; | 780 | struct zfcp_send_els *els = (struct zfcp_send_els *)fsf_req->data; |
824 | 781 | ||
825 | zfcp_san_dbf_event_els("oels", 2, fsf_req, | 782 | zfcp_dbf_san_els("oels", 2, fsf_req, |
826 | fc_host_port_id(els->adapter->scsi_host), | 783 | fc_host_port_id(els->adapter->scsi_host), |
827 | els->d_id, *(u8 *) sg_virt(els->req), | 784 | els->d_id, *(u8 *) sg_virt(els->req), |
828 | sg_virt(els->req), els->req->length); | 785 | sg_virt(els->req), els->req->length); |
829 | } | 786 | } |
830 | 787 | ||
831 | /** | 788 | /** |
832 | * zfcp_san_dbf_event_els_response - trace event for completed ELS | 789 | * zfcp_dbf_san_els_response - trace event for completed ELS |
833 | * @fsf_req: request containing ELS response | 790 | * @fsf_req: request containing ELS response |
834 | */ | 791 | */ |
835 | void zfcp_san_dbf_event_els_response(struct zfcp_fsf_req *fsf_req) | 792 | void zfcp_dbf_san_els_response(struct zfcp_fsf_req *fsf_req) |
836 | { | 793 | { |
837 | struct zfcp_send_els *els = (struct zfcp_send_els *)fsf_req->data; | 794 | struct zfcp_send_els *els = (struct zfcp_send_els *)fsf_req->data; |
838 | 795 | ||
839 | zfcp_san_dbf_event_els("rels", 2, fsf_req, els->d_id, | 796 | zfcp_dbf_san_els("rels", 2, fsf_req, els->d_id, |
840 | fc_host_port_id(els->adapter->scsi_host), | 797 | fc_host_port_id(els->adapter->scsi_host), |
841 | *(u8 *)sg_virt(els->req), sg_virt(els->resp), | 798 | *(u8 *)sg_virt(els->req), sg_virt(els->resp), |
842 | els->resp->length); | 799 | els->resp->length); |
843 | } | 800 | } |
844 | 801 | ||
845 | /** | 802 | /** |
846 | * zfcp_san_dbf_event_incoming_els - trace event for incomig ELS | 803 | * zfcp_dbf_san_incoming_els - trace event for incomig ELS |
847 | * @fsf_req: request containing unsolicited status buffer with incoming ELS | 804 | * @fsf_req: request containing unsolicited status buffer with incoming ELS |
848 | */ | 805 | */ |
849 | void zfcp_san_dbf_event_incoming_els(struct zfcp_fsf_req *fsf_req) | 806 | void zfcp_dbf_san_incoming_els(struct zfcp_fsf_req *fsf_req) |
850 | { | 807 | { |
851 | struct zfcp_adapter *adapter = fsf_req->adapter; | 808 | struct zfcp_adapter *adapter = fsf_req->adapter; |
852 | struct fsf_status_read_buffer *buf = | 809 | struct fsf_status_read_buffer *buf = |
@@ -854,16 +811,16 @@ void zfcp_san_dbf_event_incoming_els(struct zfcp_fsf_req *fsf_req) | |||
854 | int length = (int)buf->length - | 811 | int length = (int)buf->length - |
855 | (int)((void *)&buf->payload - (void *)buf); | 812 | (int)((void *)&buf->payload - (void *)buf); |
856 | 813 | ||
857 | zfcp_san_dbf_event_els("iels", 1, fsf_req, buf->d_id, | 814 | zfcp_dbf_san_els("iels", 1, fsf_req, buf->d_id, |
858 | fc_host_port_id(adapter->scsi_host), | 815 | fc_host_port_id(adapter->scsi_host), |
859 | buf->payload.data[0], (void *)buf->payload.data, | 816 | buf->payload.data[0], (void *)buf->payload.data, |
860 | length); | 817 | length); |
861 | } | 818 | } |
862 | 819 | ||
863 | static int zfcp_san_dbf_view_format(debug_info_t *id, struct debug_view *view, | 820 | static int zfcp_dbf_san_view_format(debug_info_t *id, struct debug_view *view, |
864 | char *out_buf, const char *in_buf) | 821 | char *out_buf, const char *in_buf) |
865 | { | 822 | { |
866 | struct zfcp_san_dbf_record *r = (struct zfcp_san_dbf_record *)in_buf; | 823 | struct zfcp_dbf_san_record *r = (struct zfcp_dbf_san_record *)in_buf; |
867 | char *p = out_buf; | 824 | char *p = out_buf; |
868 | 825 | ||
869 | if (strncmp(r->tag, "dump", ZFCP_DBF_TAG_SIZE) == 0) | 826 | if (strncmp(r->tag, "dump", ZFCP_DBF_TAG_SIZE) == 0) |
@@ -876,7 +833,7 @@ static int zfcp_san_dbf_view_format(debug_info_t *id, struct debug_view *view, | |||
876 | zfcp_dbf_out(&p, "d_id", "0x%06x", r->d_id); | 833 | zfcp_dbf_out(&p, "d_id", "0x%06x", r->d_id); |
877 | 834 | ||
878 | if (strncmp(r->tag, "octc", ZFCP_DBF_TAG_SIZE) == 0) { | 835 | if (strncmp(r->tag, "octc", ZFCP_DBF_TAG_SIZE) == 0) { |
879 | struct zfcp_san_dbf_record_ct_request *ct = &r->u.ct_req; | 836 | struct zfcp_dbf_san_record_ct_request *ct = &r->u.ct_req; |
880 | zfcp_dbf_out(&p, "cmd_req_code", "0x%04x", ct->cmd_req_code); | 837 | zfcp_dbf_out(&p, "cmd_req_code", "0x%04x", ct->cmd_req_code); |
881 | zfcp_dbf_out(&p, "revision", "0x%02x", ct->revision); | 838 | zfcp_dbf_out(&p, "revision", "0x%02x", ct->revision); |
882 | zfcp_dbf_out(&p, "gs_type", "0x%02x", ct->gs_type); | 839 | zfcp_dbf_out(&p, "gs_type", "0x%02x", ct->gs_type); |
@@ -884,7 +841,7 @@ static int zfcp_san_dbf_view_format(debug_info_t *id, struct debug_view *view, | |||
884 | zfcp_dbf_out(&p, "options", "0x%02x", ct->options); | 841 | zfcp_dbf_out(&p, "options", "0x%02x", ct->options); |
885 | zfcp_dbf_out(&p, "max_res_size", "0x%04x", ct->max_res_size); | 842 | zfcp_dbf_out(&p, "max_res_size", "0x%04x", ct->max_res_size); |
886 | } else if (strncmp(r->tag, "rctc", ZFCP_DBF_TAG_SIZE) == 0) { | 843 | } else if (strncmp(r->tag, "rctc", ZFCP_DBF_TAG_SIZE) == 0) { |
887 | struct zfcp_san_dbf_record_ct_response *ct = &r->u.ct_resp; | 844 | struct zfcp_dbf_san_record_ct_response *ct = &r->u.ct_resp; |
888 | zfcp_dbf_out(&p, "cmd_rsp_code", "0x%04x", ct->cmd_rsp_code); | 845 | zfcp_dbf_out(&p, "cmd_rsp_code", "0x%04x", ct->cmd_rsp_code); |
889 | zfcp_dbf_out(&p, "revision", "0x%02x", ct->revision); | 846 | zfcp_dbf_out(&p, "revision", "0x%02x", ct->revision); |
890 | zfcp_dbf_out(&p, "reason_code", "0x%02x", ct->reason_code); | 847 | zfcp_dbf_out(&p, "reason_code", "0x%02x", ct->reason_code); |
@@ -894,35 +851,30 @@ static int zfcp_san_dbf_view_format(debug_info_t *id, struct debug_view *view, | |||
894 | } else if (strncmp(r->tag, "oels", ZFCP_DBF_TAG_SIZE) == 0 || | 851 | } else if (strncmp(r->tag, "oels", ZFCP_DBF_TAG_SIZE) == 0 || |
895 | strncmp(r->tag, "rels", ZFCP_DBF_TAG_SIZE) == 0 || | 852 | strncmp(r->tag, "rels", ZFCP_DBF_TAG_SIZE) == 0 || |
896 | strncmp(r->tag, "iels", ZFCP_DBF_TAG_SIZE) == 0) { | 853 | strncmp(r->tag, "iels", ZFCP_DBF_TAG_SIZE) == 0) { |
897 | struct zfcp_san_dbf_record_els *els = &r->u.els; | 854 | struct zfcp_dbf_san_record_els *els = &r->u.els; |
898 | zfcp_dbf_out(&p, "ls_code", "0x%02x", els->ls_code); | 855 | zfcp_dbf_out(&p, "ls_code", "0x%02x", els->ls_code); |
899 | } | 856 | } |
900 | return p - out_buf; | 857 | return p - out_buf; |
901 | } | 858 | } |
902 | 859 | ||
903 | static struct debug_view zfcp_san_dbf_view = { | 860 | static struct debug_view zfcp_dbf_san_view = { |
904 | "structured", | 861 | .name = "structured", |
905 | NULL, | 862 | .header_proc = zfcp_dbf_view_header, |
906 | &zfcp_dbf_view_header, | 863 | .format_proc = zfcp_dbf_san_view_format, |
907 | &zfcp_san_dbf_view_format, | ||
908 | NULL, | ||
909 | NULL | ||
910 | }; | 864 | }; |
911 | 865 | ||
912 | static void zfcp_scsi_dbf_event(const char *tag, const char *tag2, int level, | 866 | void _zfcp_dbf_scsi(const char *tag, const char *tag2, int level, |
913 | struct zfcp_adapter *adapter, | 867 | struct zfcp_dbf *dbf, struct scsi_cmnd *scsi_cmnd, |
914 | struct scsi_cmnd *scsi_cmnd, | 868 | struct zfcp_fsf_req *fsf_req, unsigned long old_req_id) |
915 | struct zfcp_fsf_req *fsf_req, | ||
916 | unsigned long old_req_id) | ||
917 | { | 869 | { |
918 | struct zfcp_scsi_dbf_record *rec = &adapter->scsi_dbf_buf; | 870 | struct zfcp_dbf_scsi_record *rec = &dbf->scsi_buf; |
919 | struct zfcp_dbf_dump *dump = (struct zfcp_dbf_dump *)rec; | 871 | struct zfcp_dbf_dump *dump = (struct zfcp_dbf_dump *)rec; |
920 | unsigned long flags; | 872 | unsigned long flags; |
921 | struct fcp_rsp_iu *fcp_rsp; | 873 | struct fcp_rsp_iu *fcp_rsp; |
922 | char *fcp_rsp_info = NULL, *fcp_sns_info = NULL; | 874 | char *fcp_rsp_info = NULL, *fcp_sns_info = NULL; |
923 | int offset = 0, buflen = 0; | 875 | int offset = 0, buflen = 0; |
924 | 876 | ||
925 | spin_lock_irqsave(&adapter->scsi_dbf_lock, flags); | 877 | spin_lock_irqsave(&dbf->scsi_lock, flags); |
926 | do { | 878 | do { |
927 | memset(rec, 0, sizeof(*rec)); | 879 | memset(rec, 0, sizeof(*rec)); |
928 | if (offset == 0) { | 880 | if (offset == 0) { |
@@ -976,68 +928,20 @@ static void zfcp_scsi_dbf_event(const char *tag, const char *tag2, int level, | |||
976 | dump->offset = offset; | 928 | dump->offset = offset; |
977 | dump->size = min(buflen - offset, | 929 | dump->size = min(buflen - offset, |
978 | (int)sizeof(struct | 930 | (int)sizeof(struct |
979 | zfcp_scsi_dbf_record) - | 931 | zfcp_dbf_scsi_record) - |
980 | (int)sizeof(struct zfcp_dbf_dump)); | 932 | (int)sizeof(struct zfcp_dbf_dump)); |
981 | memcpy(dump->data, fcp_sns_info + offset, dump->size); | 933 | memcpy(dump->data, fcp_sns_info + offset, dump->size); |
982 | offset += dump->size; | 934 | offset += dump->size; |
983 | } | 935 | } |
984 | debug_event(adapter->scsi_dbf, level, rec, sizeof(*rec)); | 936 | debug_event(dbf->scsi, level, rec, sizeof(*rec)); |
985 | } while (offset < buflen); | 937 | } while (offset < buflen); |
986 | spin_unlock_irqrestore(&adapter->scsi_dbf_lock, flags); | 938 | spin_unlock_irqrestore(&dbf->scsi_lock, flags); |
987 | } | ||
988 | |||
989 | /** | ||
990 | * zfcp_scsi_dbf_event_result - trace event for SCSI command completion | ||
991 | * @tag: tag indicating success or failure of SCSI command | ||
992 | * @level: trace level applicable for this event | ||
993 | * @adapter: adapter that has been used to issue the SCSI command | ||
994 | * @scsi_cmnd: SCSI command pointer | ||
995 | * @fsf_req: request used to issue SCSI command (might be NULL) | ||
996 | */ | ||
997 | void zfcp_scsi_dbf_event_result(const char *tag, int level, | ||
998 | struct zfcp_adapter *adapter, | ||
999 | struct scsi_cmnd *scsi_cmnd, | ||
1000 | struct zfcp_fsf_req *fsf_req) | ||
1001 | { | ||
1002 | zfcp_scsi_dbf_event("rslt", tag, level, adapter, scsi_cmnd, fsf_req, 0); | ||
1003 | } | 939 | } |
1004 | 940 | ||
1005 | /** | 941 | static int zfcp_dbf_scsi_view_format(debug_info_t *id, struct debug_view *view, |
1006 | * zfcp_scsi_dbf_event_abort - trace event for SCSI command abort | ||
1007 | * @tag: tag indicating success or failure of abort operation | ||
1008 | * @adapter: adapter thas has been used to issue SCSI command to be aborted | ||
1009 | * @scsi_cmnd: SCSI command to be aborted | ||
1010 | * @new_fsf_req: request containing abort (might be NULL) | ||
1011 | * @old_req_id: identifier of request containg SCSI command to be aborted | ||
1012 | */ | ||
1013 | void zfcp_scsi_dbf_event_abort(const char *tag, struct zfcp_adapter *adapter, | ||
1014 | struct scsi_cmnd *scsi_cmnd, | ||
1015 | struct zfcp_fsf_req *new_fsf_req, | ||
1016 | unsigned long old_req_id) | ||
1017 | { | ||
1018 | zfcp_scsi_dbf_event("abrt", tag, 1, adapter, scsi_cmnd, new_fsf_req, | ||
1019 | old_req_id); | ||
1020 | } | ||
1021 | |||
1022 | /** | ||
1023 | * zfcp_scsi_dbf_event_devreset - trace event for Logical Unit or Target Reset | ||
1024 | * @tag: tag indicating success or failure of reset operation | ||
1025 | * @flag: indicates type of reset (Target Reset, Logical Unit Reset) | ||
1026 | * @unit: unit that needs reset | ||
1027 | * @scsi_cmnd: SCSI command which caused this error recovery | ||
1028 | */ | ||
1029 | void zfcp_scsi_dbf_event_devreset(const char *tag, u8 flag, | ||
1030 | struct zfcp_unit *unit, | ||
1031 | struct scsi_cmnd *scsi_cmnd) | ||
1032 | { | ||
1033 | zfcp_scsi_dbf_event(flag == FCP_TARGET_RESET ? "trst" : "lrst", tag, 1, | ||
1034 | unit->port->adapter, scsi_cmnd, NULL, 0); | ||
1035 | } | ||
1036 | |||
1037 | static int zfcp_scsi_dbf_view_format(debug_info_t *id, struct debug_view *view, | ||
1038 | char *out_buf, const char *in_buf) | 942 | char *out_buf, const char *in_buf) |
1039 | { | 943 | { |
1040 | struct zfcp_scsi_dbf_record *r = (struct zfcp_scsi_dbf_record *)in_buf; | 944 | struct zfcp_dbf_scsi_record *r = (struct zfcp_dbf_scsi_record *)in_buf; |
1041 | struct timespec t; | 945 | struct timespec t; |
1042 | char *p = out_buf; | 946 | char *p = out_buf; |
1043 | 947 | ||
@@ -1059,7 +963,7 @@ static int zfcp_scsi_dbf_view_format(debug_info_t *id, struct debug_view *view, | |||
1059 | zfcp_dbf_out(&p, "old_fsf_reqid", "0x%0Lx", r->old_fsf_reqid); | 963 | zfcp_dbf_out(&p, "old_fsf_reqid", "0x%0Lx", r->old_fsf_reqid); |
1060 | zfcp_dbf_out(&p, "fsf_reqid", "0x%0Lx", r->fsf_reqid); | 964 | zfcp_dbf_out(&p, "fsf_reqid", "0x%0Lx", r->fsf_reqid); |
1061 | zfcp_dbf_out(&p, "fsf_seqno", "0x%08x", r->fsf_seqno); | 965 | zfcp_dbf_out(&p, "fsf_seqno", "0x%08x", r->fsf_seqno); |
1062 | zfcp_dbf_timestamp(r->fsf_issued, &t); | 966 | stck_to_timespec(r->fsf_issued, &t); |
1063 | zfcp_dbf_out(&p, "fsf_issued", "%011lu:%06lu", t.tv_sec, t.tv_nsec); | 967 | zfcp_dbf_out(&p, "fsf_issued", "%011lu:%06lu", t.tv_sec, t.tv_nsec); |
1064 | 968 | ||
1065 | if (strncmp(r->tag, "rslt", ZFCP_DBF_TAG_SIZE) == 0) { | 969 | if (strncmp(r->tag, "rslt", ZFCP_DBF_TAG_SIZE) == 0) { |
@@ -1078,84 +982,96 @@ static int zfcp_scsi_dbf_view_format(debug_info_t *id, struct debug_view *view, | |||
1078 | return p - out_buf; | 982 | return p - out_buf; |
1079 | } | 983 | } |
1080 | 984 | ||
1081 | static struct debug_view zfcp_scsi_dbf_view = { | 985 | static struct debug_view zfcp_dbf_scsi_view = { |
1082 | "structured", | 986 | .name = "structured", |
1083 | NULL, | 987 | .header_proc = zfcp_dbf_view_header, |
1084 | &zfcp_dbf_view_header, | 988 | .format_proc = zfcp_dbf_scsi_view_format, |
1085 | &zfcp_scsi_dbf_view_format, | ||
1086 | NULL, | ||
1087 | NULL | ||
1088 | }; | 989 | }; |
1089 | 990 | ||
991 | static debug_info_t *zfcp_dbf_reg(const char *name, int level, | ||
992 | struct debug_view *view, int size) | ||
993 | { | ||
994 | struct debug_info *d; | ||
995 | |||
996 | d = debug_register(name, dbfsize, level, size); | ||
997 | if (!d) | ||
998 | return NULL; | ||
999 | |||
1000 | debug_register_view(d, &debug_hex_ascii_view); | ||
1001 | debug_register_view(d, view); | ||
1002 | debug_set_level(d, level); | ||
1003 | |||
1004 | return d; | ||
1005 | } | ||
1006 | |||
1090 | /** | 1007 | /** |
1091 | * zfcp_adapter_debug_register - registers debug feature for an adapter | 1008 | * zfcp_adapter_debug_register - registers debug feature for an adapter |
1092 | * @adapter: pointer to adapter for which debug features should be registered | 1009 | * @adapter: pointer to adapter for which debug features should be registered |
1093 | * return: -ENOMEM on error, 0 otherwise | 1010 | * return: -ENOMEM on error, 0 otherwise |
1094 | */ | 1011 | */ |
1095 | int zfcp_adapter_debug_register(struct zfcp_adapter *adapter) | 1012 | int zfcp_dbf_adapter_register(struct zfcp_adapter *adapter) |
1096 | { | 1013 | { |
1097 | char dbf_name[DEBUG_MAX_NAME_LEN]; | 1014 | char dbf_name[DEBUG_MAX_NAME_LEN]; |
1015 | struct zfcp_dbf *dbf; | ||
1016 | |||
1017 | dbf = kmalloc(sizeof(struct zfcp_dbf), GFP_KERNEL); | ||
1018 | if (!dbf) | ||
1019 | return -ENOMEM; | ||
1020 | |||
1021 | dbf->adapter = adapter; | ||
1022 | |||
1023 | spin_lock_init(&dbf->hba_lock); | ||
1024 | spin_lock_init(&dbf->san_lock); | ||
1025 | spin_lock_init(&dbf->scsi_lock); | ||
1026 | spin_lock_init(&dbf->rec_lock); | ||
1098 | 1027 | ||
1099 | /* debug feature area which records recovery activity */ | 1028 | /* debug feature area which records recovery activity */ |
1100 | sprintf(dbf_name, "zfcp_%s_rec", dev_name(&adapter->ccw_device->dev)); | 1029 | sprintf(dbf_name, "zfcp_%s_rec", dev_name(&adapter->ccw_device->dev)); |
1101 | adapter->rec_dbf = debug_register(dbf_name, dbfsize, 1, | 1030 | dbf->rec = zfcp_dbf_reg(dbf_name, 3, &zfcp_dbf_rec_view, |
1102 | sizeof(struct zfcp_rec_dbf_record)); | 1031 | sizeof(struct zfcp_dbf_rec_record)); |
1103 | if (!adapter->rec_dbf) | 1032 | if (!dbf->rec) |
1104 | goto failed; | 1033 | goto err_out; |
1105 | debug_register_view(adapter->rec_dbf, &debug_hex_ascii_view); | ||
1106 | debug_register_view(adapter->rec_dbf, &zfcp_rec_dbf_view); | ||
1107 | debug_set_level(adapter->rec_dbf, 3); | ||
1108 | 1034 | ||
1109 | /* debug feature area which records HBA (FSF and QDIO) conditions */ | 1035 | /* debug feature area which records HBA (FSF and QDIO) conditions */ |
1110 | sprintf(dbf_name, "zfcp_%s_hba", dev_name(&adapter->ccw_device->dev)); | 1036 | sprintf(dbf_name, "zfcp_%s_hba", dev_name(&adapter->ccw_device->dev)); |
1111 | adapter->hba_dbf = debug_register(dbf_name, dbfsize, 1, | 1037 | dbf->hba = zfcp_dbf_reg(dbf_name, 3, &zfcp_dbf_hba_view, |
1112 | sizeof(struct zfcp_hba_dbf_record)); | 1038 | sizeof(struct zfcp_dbf_hba_record)); |
1113 | if (!adapter->hba_dbf) | 1039 | if (!dbf->hba) |
1114 | goto failed; | 1040 | goto err_out; |
1115 | debug_register_view(adapter->hba_dbf, &debug_hex_ascii_view); | ||
1116 | debug_register_view(adapter->hba_dbf, &zfcp_hba_dbf_view); | ||
1117 | debug_set_level(adapter->hba_dbf, 3); | ||
1118 | 1041 | ||
1119 | /* debug feature area which records SAN command failures and recovery */ | 1042 | /* debug feature area which records SAN command failures and recovery */ |
1120 | sprintf(dbf_name, "zfcp_%s_san", dev_name(&adapter->ccw_device->dev)); | 1043 | sprintf(dbf_name, "zfcp_%s_san", dev_name(&adapter->ccw_device->dev)); |
1121 | adapter->san_dbf = debug_register(dbf_name, dbfsize, 1, | 1044 | dbf->san = zfcp_dbf_reg(dbf_name, 6, &zfcp_dbf_san_view, |
1122 | sizeof(struct zfcp_san_dbf_record)); | 1045 | sizeof(struct zfcp_dbf_san_record)); |
1123 | if (!adapter->san_dbf) | 1046 | if (!dbf->san) |
1124 | goto failed; | 1047 | goto err_out; |
1125 | debug_register_view(adapter->san_dbf, &debug_hex_ascii_view); | ||
1126 | debug_register_view(adapter->san_dbf, &zfcp_san_dbf_view); | ||
1127 | debug_set_level(adapter->san_dbf, 6); | ||
1128 | 1048 | ||
1129 | /* debug feature area which records SCSI command failures and recovery */ | 1049 | /* debug feature area which records SCSI command failures and recovery */ |
1130 | sprintf(dbf_name, "zfcp_%s_scsi", dev_name(&adapter->ccw_device->dev)); | 1050 | sprintf(dbf_name, "zfcp_%s_scsi", dev_name(&adapter->ccw_device->dev)); |
1131 | adapter->scsi_dbf = debug_register(dbf_name, dbfsize, 1, | 1051 | dbf->scsi = zfcp_dbf_reg(dbf_name, 3, &zfcp_dbf_scsi_view, |
1132 | sizeof(struct zfcp_scsi_dbf_record)); | 1052 | sizeof(struct zfcp_dbf_scsi_record)); |
1133 | if (!adapter->scsi_dbf) | 1053 | if (!dbf->scsi) |
1134 | goto failed; | 1054 | goto err_out; |
1135 | debug_register_view(adapter->scsi_dbf, &debug_hex_ascii_view); | ||
1136 | debug_register_view(adapter->scsi_dbf, &zfcp_scsi_dbf_view); | ||
1137 | debug_set_level(adapter->scsi_dbf, 3); | ||
1138 | 1055 | ||
1056 | adapter->dbf = dbf; | ||
1139 | return 0; | 1057 | return 0; |
1140 | 1058 | ||
1141 | failed: | 1059 | err_out: |
1142 | zfcp_adapter_debug_unregister(adapter); | 1060 | zfcp_dbf_adapter_unregister(dbf); |
1143 | |||
1144 | return -ENOMEM; | 1061 | return -ENOMEM; |
1145 | } | 1062 | } |
1146 | 1063 | ||
1147 | /** | 1064 | /** |
1148 | * zfcp_adapter_debug_unregister - unregisters debug feature for an adapter | 1065 | * zfcp_adapter_debug_unregister - unregisters debug feature for an adapter |
1149 | * @adapter: pointer to adapter for which debug features should be unregistered | 1066 | * @dbf: pointer to dbf for which debug features should be unregistered |
1150 | */ | 1067 | */ |
1151 | void zfcp_adapter_debug_unregister(struct zfcp_adapter *adapter) | 1068 | void zfcp_dbf_adapter_unregister(struct zfcp_dbf *dbf) |
1152 | { | 1069 | { |
1153 | debug_unregister(adapter->scsi_dbf); | 1070 | debug_unregister(dbf->scsi); |
1154 | debug_unregister(adapter->san_dbf); | 1071 | debug_unregister(dbf->san); |
1155 | debug_unregister(adapter->hba_dbf); | 1072 | debug_unregister(dbf->hba); |
1156 | debug_unregister(adapter->rec_dbf); | 1073 | debug_unregister(dbf->rec); |
1157 | adapter->scsi_dbf = NULL; | 1074 | dbf->adapter->dbf = NULL; |
1158 | adapter->san_dbf = NULL; | 1075 | kfree(dbf); |
1159 | adapter->hba_dbf = NULL; | ||
1160 | adapter->rec_dbf = NULL; | ||
1161 | } | 1076 | } |
1077 | |||
diff --git a/drivers/s390/scsi/zfcp_dbf.h b/drivers/s390/scsi/zfcp_dbf.h index a573f7344dd6..6b1461e8f847 100644 --- a/drivers/s390/scsi/zfcp_dbf.h +++ b/drivers/s390/scsi/zfcp_dbf.h | |||
@@ -2,7 +2,7 @@ | |||
2 | * This file is part of the zfcp device driver for | 2 | * This file is part of the zfcp device driver for |
3 | * FCP adapters for IBM System z9 and zSeries. | 3 | * FCP adapters for IBM System z9 and zSeries. |
4 | * | 4 | * |
5 | * Copyright IBM Corp. 2008, 2008 | 5 | * Copyright IBM Corp. 2008, 2009 |
6 | * | 6 | * |
7 | * This program is free software; you can redistribute it and/or modify | 7 | * This program is free software; you can redistribute it and/or modify |
8 | * it under the terms of the GNU General Public License as published by | 8 | * it under the terms of the GNU General Public License as published by |
@@ -22,7 +22,9 @@ | |||
22 | #ifndef ZFCP_DBF_H | 22 | #ifndef ZFCP_DBF_H |
23 | #define ZFCP_DBF_H | 23 | #define ZFCP_DBF_H |
24 | 24 | ||
25 | #include "zfcp_ext.h" | ||
25 | #include "zfcp_fsf.h" | 26 | #include "zfcp_fsf.h" |
27 | #include "zfcp_def.h" | ||
26 | 28 | ||
27 | #define ZFCP_DBF_TAG_SIZE 4 | 29 | #define ZFCP_DBF_TAG_SIZE 4 |
28 | #define ZFCP_DBF_ID_SIZE 7 | 30 | #define ZFCP_DBF_ID_SIZE 7 |
@@ -35,13 +37,13 @@ struct zfcp_dbf_dump { | |||
35 | u8 data[]; /* dump data */ | 37 | u8 data[]; /* dump data */ |
36 | } __attribute__ ((packed)); | 38 | } __attribute__ ((packed)); |
37 | 39 | ||
38 | struct zfcp_rec_dbf_record_thread { | 40 | struct zfcp_dbf_rec_record_thread { |
39 | u32 total; | 41 | u32 total; |
40 | u32 ready; | 42 | u32 ready; |
41 | u32 running; | 43 | u32 running; |
42 | }; | 44 | }; |
43 | 45 | ||
44 | struct zfcp_rec_dbf_record_target { | 46 | struct zfcp_dbf_rec_record_target { |
45 | u64 ref; | 47 | u64 ref; |
46 | u32 status; | 48 | u32 status; |
47 | u32 d_id; | 49 | u32 d_id; |
@@ -50,7 +52,7 @@ struct zfcp_rec_dbf_record_target { | |||
50 | u32 erp_count; | 52 | u32 erp_count; |
51 | }; | 53 | }; |
52 | 54 | ||
53 | struct zfcp_rec_dbf_record_trigger { | 55 | struct zfcp_dbf_rec_record_trigger { |
54 | u8 want; | 56 | u8 want; |
55 | u8 need; | 57 | u8 need; |
56 | u32 as; | 58 | u32 as; |
@@ -62,21 +64,21 @@ struct zfcp_rec_dbf_record_trigger { | |||
62 | u64 fcp_lun; | 64 | u64 fcp_lun; |
63 | }; | 65 | }; |
64 | 66 | ||
65 | struct zfcp_rec_dbf_record_action { | 67 | struct zfcp_dbf_rec_record_action { |
66 | u32 status; | 68 | u32 status; |
67 | u32 step; | 69 | u32 step; |
68 | u64 action; | 70 | u64 action; |
69 | u64 fsf_req; | 71 | u64 fsf_req; |
70 | }; | 72 | }; |
71 | 73 | ||
72 | struct zfcp_rec_dbf_record { | 74 | struct zfcp_dbf_rec_record { |
73 | u8 id; | 75 | u8 id; |
74 | char id2[7]; | 76 | char id2[7]; |
75 | union { | 77 | union { |
76 | struct zfcp_rec_dbf_record_action action; | 78 | struct zfcp_dbf_rec_record_action action; |
77 | struct zfcp_rec_dbf_record_thread thread; | 79 | struct zfcp_dbf_rec_record_thread thread; |
78 | struct zfcp_rec_dbf_record_target target; | 80 | struct zfcp_dbf_rec_record_target target; |
79 | struct zfcp_rec_dbf_record_trigger trigger; | 81 | struct zfcp_dbf_rec_record_trigger trigger; |
80 | } u; | 82 | } u; |
81 | }; | 83 | }; |
82 | 84 | ||
@@ -87,7 +89,7 @@ enum { | |||
87 | ZFCP_REC_DBF_ID_TRIGGER, | 89 | ZFCP_REC_DBF_ID_TRIGGER, |
88 | }; | 90 | }; |
89 | 91 | ||
90 | struct zfcp_hba_dbf_record_response { | 92 | struct zfcp_dbf_hba_record_response { |
91 | u32 fsf_command; | 93 | u32 fsf_command; |
92 | u64 fsf_reqid; | 94 | u64 fsf_reqid; |
93 | u32 fsf_seqno; | 95 | u32 fsf_seqno; |
@@ -125,7 +127,7 @@ struct zfcp_hba_dbf_record_response { | |||
125 | } u; | 127 | } u; |
126 | } __attribute__ ((packed)); | 128 | } __attribute__ ((packed)); |
127 | 129 | ||
128 | struct zfcp_hba_dbf_record_status { | 130 | struct zfcp_dbf_hba_record_status { |
129 | u8 failed; | 131 | u8 failed; |
130 | u32 status_type; | 132 | u32 status_type; |
131 | u32 status_subtype; | 133 | u32 status_subtype; |
@@ -139,24 +141,24 @@ struct zfcp_hba_dbf_record_status { | |||
139 | u8 payload[ZFCP_DBF_UNSOL_PAYLOAD]; | 141 | u8 payload[ZFCP_DBF_UNSOL_PAYLOAD]; |
140 | } __attribute__ ((packed)); | 142 | } __attribute__ ((packed)); |
141 | 143 | ||
142 | struct zfcp_hba_dbf_record_qdio { | 144 | struct zfcp_dbf_hba_record_qdio { |
143 | u32 qdio_error; | 145 | u32 qdio_error; |
144 | u8 sbal_index; | 146 | u8 sbal_index; |
145 | u8 sbal_count; | 147 | u8 sbal_count; |
146 | } __attribute__ ((packed)); | 148 | } __attribute__ ((packed)); |
147 | 149 | ||
148 | struct zfcp_hba_dbf_record { | 150 | struct zfcp_dbf_hba_record { |
149 | u8 tag[ZFCP_DBF_TAG_SIZE]; | 151 | u8 tag[ZFCP_DBF_TAG_SIZE]; |
150 | u8 tag2[ZFCP_DBF_TAG_SIZE]; | 152 | u8 tag2[ZFCP_DBF_TAG_SIZE]; |
151 | union { | 153 | union { |
152 | struct zfcp_hba_dbf_record_response response; | 154 | struct zfcp_dbf_hba_record_response response; |
153 | struct zfcp_hba_dbf_record_status status; | 155 | struct zfcp_dbf_hba_record_status status; |
154 | struct zfcp_hba_dbf_record_qdio qdio; | 156 | struct zfcp_dbf_hba_record_qdio qdio; |
155 | struct fsf_bit_error_payload berr; | 157 | struct fsf_bit_error_payload berr; |
156 | } u; | 158 | } u; |
157 | } __attribute__ ((packed)); | 159 | } __attribute__ ((packed)); |
158 | 160 | ||
159 | struct zfcp_san_dbf_record_ct_request { | 161 | struct zfcp_dbf_san_record_ct_request { |
160 | u16 cmd_req_code; | 162 | u16 cmd_req_code; |
161 | u8 revision; | 163 | u8 revision; |
162 | u8 gs_type; | 164 | u8 gs_type; |
@@ -166,7 +168,7 @@ struct zfcp_san_dbf_record_ct_request { | |||
166 | u32 len; | 168 | u32 len; |
167 | } __attribute__ ((packed)); | 169 | } __attribute__ ((packed)); |
168 | 170 | ||
169 | struct zfcp_san_dbf_record_ct_response { | 171 | struct zfcp_dbf_san_record_ct_response { |
170 | u16 cmd_rsp_code; | 172 | u16 cmd_rsp_code; |
171 | u8 revision; | 173 | u8 revision; |
172 | u8 reason_code; | 174 | u8 reason_code; |
@@ -176,27 +178,27 @@ struct zfcp_san_dbf_record_ct_response { | |||
176 | u32 len; | 178 | u32 len; |
177 | } __attribute__ ((packed)); | 179 | } __attribute__ ((packed)); |
178 | 180 | ||
179 | struct zfcp_san_dbf_record_els { | 181 | struct zfcp_dbf_san_record_els { |
180 | u8 ls_code; | 182 | u8 ls_code; |
181 | u32 len; | 183 | u32 len; |
182 | } __attribute__ ((packed)); | 184 | } __attribute__ ((packed)); |
183 | 185 | ||
184 | struct zfcp_san_dbf_record { | 186 | struct zfcp_dbf_san_record { |
185 | u8 tag[ZFCP_DBF_TAG_SIZE]; | 187 | u8 tag[ZFCP_DBF_TAG_SIZE]; |
186 | u64 fsf_reqid; | 188 | u64 fsf_reqid; |
187 | u32 fsf_seqno; | 189 | u32 fsf_seqno; |
188 | u32 s_id; | 190 | u32 s_id; |
189 | u32 d_id; | 191 | u32 d_id; |
190 | union { | 192 | union { |
191 | struct zfcp_san_dbf_record_ct_request ct_req; | 193 | struct zfcp_dbf_san_record_ct_request ct_req; |
192 | struct zfcp_san_dbf_record_ct_response ct_resp; | 194 | struct zfcp_dbf_san_record_ct_response ct_resp; |
193 | struct zfcp_san_dbf_record_els els; | 195 | struct zfcp_dbf_san_record_els els; |
194 | } u; | 196 | } u; |
195 | #define ZFCP_DBF_SAN_MAX_PAYLOAD 1024 | 197 | #define ZFCP_DBF_SAN_MAX_PAYLOAD 1024 |
196 | u8 payload[32]; | 198 | u8 payload[32]; |
197 | } __attribute__ ((packed)); | 199 | } __attribute__ ((packed)); |
198 | 200 | ||
199 | struct zfcp_scsi_dbf_record { | 201 | struct zfcp_dbf_scsi_record { |
200 | u8 tag[ZFCP_DBF_TAG_SIZE]; | 202 | u8 tag[ZFCP_DBF_TAG_SIZE]; |
201 | u8 tag2[ZFCP_DBF_TAG_SIZE]; | 203 | u8 tag2[ZFCP_DBF_TAG_SIZE]; |
202 | u32 scsi_id; | 204 | u32 scsi_id; |
@@ -222,4 +224,127 @@ struct zfcp_scsi_dbf_record { | |||
222 | u8 sns_info[ZFCP_DBF_SCSI_FCP_SNS_INFO]; | 224 | u8 sns_info[ZFCP_DBF_SCSI_FCP_SNS_INFO]; |
223 | } __attribute__ ((packed)); | 225 | } __attribute__ ((packed)); |
224 | 226 | ||
227 | struct zfcp_dbf { | ||
228 | debug_info_t *rec; | ||
229 | debug_info_t *hba; | ||
230 | debug_info_t *san; | ||
231 | debug_info_t *scsi; | ||
232 | spinlock_t rec_lock; | ||
233 | spinlock_t hba_lock; | ||
234 | spinlock_t san_lock; | ||
235 | spinlock_t scsi_lock; | ||
236 | struct zfcp_dbf_rec_record rec_buf; | ||
237 | struct zfcp_dbf_hba_record hba_buf; | ||
238 | struct zfcp_dbf_san_record san_buf; | ||
239 | struct zfcp_dbf_scsi_record scsi_buf; | ||
240 | struct zfcp_adapter *adapter; | ||
241 | }; | ||
242 | |||
243 | static inline | ||
244 | void zfcp_dbf_hba_fsf_resp(const char *tag2, int level, | ||
245 | struct zfcp_fsf_req *req, struct zfcp_dbf *dbf) | ||
246 | { | ||
247 | if (level <= dbf->hba->level) | ||
248 | _zfcp_dbf_hba_fsf_response(tag2, level, req, dbf); | ||
249 | } | ||
250 | |||
251 | /** | ||
252 | * zfcp_dbf_hba_fsf_response - trace event for request completion | ||
253 | * @fsf_req: request that has been completed | ||
254 | */ | ||
255 | static inline void zfcp_dbf_hba_fsf_response(struct zfcp_fsf_req *req) | ||
256 | { | ||
257 | struct zfcp_dbf *dbf = req->adapter->dbf; | ||
258 | struct fsf_qtcb *qtcb = req->qtcb; | ||
259 | |||
260 | if ((qtcb->prefix.prot_status != FSF_PROT_GOOD) && | ||
261 | (qtcb->prefix.prot_status != FSF_PROT_FSF_STATUS_PRESENTED)) { | ||
262 | zfcp_dbf_hba_fsf_resp("perr", 1, req, dbf); | ||
263 | |||
264 | } else if (qtcb->header.fsf_status != FSF_GOOD) { | ||
265 | zfcp_dbf_hba_fsf_resp("ferr", 1, req, dbf); | ||
266 | |||
267 | } else if ((req->fsf_command == FSF_QTCB_OPEN_PORT_WITH_DID) || | ||
268 | (req->fsf_command == FSF_QTCB_OPEN_LUN)) { | ||
269 | zfcp_dbf_hba_fsf_resp("open", 4, req, dbf); | ||
270 | |||
271 | } else if (qtcb->header.log_length) { | ||
272 | zfcp_dbf_hba_fsf_resp("qtcb", 5, req, dbf); | ||
273 | |||
274 | } else { | ||
275 | zfcp_dbf_hba_fsf_resp("norm", 6, req, dbf); | ||
276 | } | ||
277 | } | ||
278 | |||
279 | /** | ||
280 | * zfcp_dbf_hba_fsf_unsol - trace event for an unsolicited status buffer | ||
281 | * @tag: tag indicating which kind of unsolicited status has been received | ||
282 | * @dbf: reference to dbf structure | ||
283 | * @status_buffer: buffer containing payload of unsolicited status | ||
284 | */ | ||
285 | static inline | ||
286 | void zfcp_dbf_hba_fsf_unsol(const char *tag, struct zfcp_dbf *dbf, | ||
287 | struct fsf_status_read_buffer *buf) | ||
288 | { | ||
289 | int level = 2; | ||
290 | |||
291 | if (level <= dbf->hba->level) | ||
292 | _zfcp_dbf_hba_fsf_unsol(tag, level, dbf, buf); | ||
293 | } | ||
294 | |||
295 | static inline | ||
296 | void zfcp_dbf_scsi(const char *tag, const char *tag2, int level, | ||
297 | struct zfcp_dbf *dbf, struct scsi_cmnd *scmd, | ||
298 | struct zfcp_fsf_req *req, unsigned long old_id) | ||
299 | { | ||
300 | if (level <= dbf->scsi->level) | ||
301 | _zfcp_dbf_scsi(tag, tag2, level, dbf, scmd, req, old_id); | ||
302 | } | ||
303 | |||
304 | /** | ||
305 | * zfcp_dbf_scsi_result - trace event for SCSI command completion | ||
306 | * @tag: tag indicating success or failure of SCSI command | ||
307 | * @level: trace level applicable for this event | ||
308 | * @adapter: adapter that has been used to issue the SCSI command | ||
309 | * @scmd: SCSI command pointer | ||
310 | * @fsf_req: request used to issue SCSI command (might be NULL) | ||
311 | */ | ||
312 | static inline | ||
313 | void zfcp_dbf_scsi_result(const char *tag, int level, struct zfcp_dbf *dbf, | ||
314 | struct scsi_cmnd *scmd, struct zfcp_fsf_req *fsf_req) | ||
315 | { | ||
316 | zfcp_dbf_scsi("rslt", tag, level, dbf, scmd, fsf_req, 0); | ||
317 | } | ||
318 | |||
319 | /** | ||
320 | * zfcp_dbf_scsi_abort - trace event for SCSI command abort | ||
321 | * @tag: tag indicating success or failure of abort operation | ||
322 | * @adapter: adapter thas has been used to issue SCSI command to be aborted | ||
323 | * @scmd: SCSI command to be aborted | ||
324 | * @new_req: request containing abort (might be NULL) | ||
325 | * @old_id: identifier of request containg SCSI command to be aborted | ||
326 | */ | ||
327 | static inline | ||
328 | void zfcp_dbf_scsi_abort(const char *tag, struct zfcp_dbf *dbf, | ||
329 | struct scsi_cmnd *scmd, struct zfcp_fsf_req *new_req, | ||
330 | unsigned long old_id) | ||
331 | { | ||
332 | zfcp_dbf_scsi("abrt", tag, 1, dbf, scmd, new_req, old_id); | ||
333 | } | ||
334 | |||
335 | /** | ||
336 | * zfcp_dbf_scsi_devreset - trace event for Logical Unit or Target Reset | ||
337 | * @tag: tag indicating success or failure of reset operation | ||
338 | * @flag: indicates type of reset (Target Reset, Logical Unit Reset) | ||
339 | * @unit: unit that needs reset | ||
340 | * @scsi_cmnd: SCSI command which caused this error recovery | ||
341 | */ | ||
342 | static inline | ||
343 | void zfcp_dbf_scsi_devreset(const char *tag, u8 flag, struct zfcp_unit *unit, | ||
344 | struct scsi_cmnd *scsi_cmnd) | ||
345 | { | ||
346 | zfcp_dbf_scsi(flag == FCP_TARGET_RESET ? "trst" : "lrst", tag, 1, | ||
347 | unit->port->adapter->dbf, scsi_cmnd, NULL, 0); | ||
348 | } | ||
349 | |||
225 | #endif /* ZFCP_DBF_H */ | 350 | #endif /* ZFCP_DBF_H */ |
diff --git a/drivers/s390/scsi/zfcp_def.h b/drivers/s390/scsi/zfcp_def.h index 49d0532bca1c..7da2fad8f515 100644 --- a/drivers/s390/scsi/zfcp_def.h +++ b/drivers/s390/scsi/zfcp_def.h | |||
@@ -37,10 +37,8 @@ | |||
37 | #include <asm/debug.h> | 37 | #include <asm/debug.h> |
38 | #include <asm/ebcdic.h> | 38 | #include <asm/ebcdic.h> |
39 | #include <asm/sysinfo.h> | 39 | #include <asm/sysinfo.h> |
40 | #include "zfcp_dbf.h" | ||
41 | #include "zfcp_fsf.h" | 40 | #include "zfcp_fsf.h" |
42 | 41 | ||
43 | |||
44 | /********************* GENERAL DEFINES *********************************/ | 42 | /********************* GENERAL DEFINES *********************************/ |
45 | 43 | ||
46 | #define REQUEST_LIST_SIZE 128 | 44 | #define REQUEST_LIST_SIZE 128 |
@@ -75,9 +73,6 @@ | |||
75 | 73 | ||
76 | /*************** FIBRE CHANNEL PROTOCOL SPECIFIC DEFINES ********************/ | 74 | /*************** FIBRE CHANNEL PROTOCOL SPECIFIC DEFINES ********************/ |
77 | 75 | ||
78 | /* timeout for name-server lookup (in seconds) */ | ||
79 | #define ZFCP_NS_GID_PN_TIMEOUT 10 | ||
80 | |||
81 | /* task attribute values in FCP-2 FCP_CMND IU */ | 76 | /* task attribute values in FCP-2 FCP_CMND IU */ |
82 | #define SIMPLE_Q 0 | 77 | #define SIMPLE_Q 0 |
83 | #define HEAD_OF_Q 1 | 78 | #define HEAD_OF_Q 1 |
@@ -224,8 +219,6 @@ struct zfcp_ls_adisc { | |||
224 | #define ZFCP_STATUS_ADAPTER_QDIOUP 0x00000002 | 219 | #define ZFCP_STATUS_ADAPTER_QDIOUP 0x00000002 |
225 | #define ZFCP_STATUS_ADAPTER_XCONFIG_OK 0x00000008 | 220 | #define ZFCP_STATUS_ADAPTER_XCONFIG_OK 0x00000008 |
226 | #define ZFCP_STATUS_ADAPTER_HOST_CON_INIT 0x00000010 | 221 | #define ZFCP_STATUS_ADAPTER_HOST_CON_INIT 0x00000010 |
227 | #define ZFCP_STATUS_ADAPTER_ERP_THREAD_UP 0x00000020 | ||
228 | #define ZFCP_STATUS_ADAPTER_ERP_THREAD_KILL 0x00000080 | ||
229 | #define ZFCP_STATUS_ADAPTER_ERP_PENDING 0x00000100 | 222 | #define ZFCP_STATUS_ADAPTER_ERP_PENDING 0x00000100 |
230 | #define ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED 0x00000200 | 223 | #define ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED 0x00000200 |
231 | 224 | ||
@@ -234,6 +227,7 @@ struct zfcp_ls_adisc { | |||
234 | 227 | ||
235 | /* remote port status */ | 228 | /* remote port status */ |
236 | #define ZFCP_STATUS_PORT_PHYS_OPEN 0x00000001 | 229 | #define ZFCP_STATUS_PORT_PHYS_OPEN 0x00000001 |
230 | #define ZFCP_STATUS_PORT_LINK_TEST 0x00000002 | ||
237 | 231 | ||
238 | /* well known address (WKA) port status*/ | 232 | /* well known address (WKA) port status*/ |
239 | enum zfcp_wka_status { | 233 | enum zfcp_wka_status { |
@@ -249,7 +243,6 @@ enum zfcp_wka_status { | |||
249 | 243 | ||
250 | /* FSF request status (this does not have a common part) */ | 244 | /* FSF request status (this does not have a common part) */ |
251 | #define ZFCP_STATUS_FSFREQ_TASK_MANAGEMENT 0x00000002 | 245 | #define ZFCP_STATUS_FSFREQ_TASK_MANAGEMENT 0x00000002 |
252 | #define ZFCP_STATUS_FSFREQ_COMPLETED 0x00000004 | ||
253 | #define ZFCP_STATUS_FSFREQ_ERROR 0x00000008 | 246 | #define ZFCP_STATUS_FSFREQ_ERROR 0x00000008 |
254 | #define ZFCP_STATUS_FSFREQ_CLEANUP 0x00000010 | 247 | #define ZFCP_STATUS_FSFREQ_CLEANUP 0x00000010 |
255 | #define ZFCP_STATUS_FSFREQ_ABORTSUCCEEDED 0x00000040 | 248 | #define ZFCP_STATUS_FSFREQ_ABORTSUCCEEDED 0x00000040 |
@@ -266,12 +259,14 @@ struct zfcp_fsf_req; | |||
266 | 259 | ||
267 | /* holds various memory pools of an adapter */ | 260 | /* holds various memory pools of an adapter */ |
268 | struct zfcp_adapter_mempool { | 261 | struct zfcp_adapter_mempool { |
269 | mempool_t *fsf_req_erp; | 262 | mempool_t *erp_req; |
270 | mempool_t *fsf_req_scsi; | 263 | mempool_t *gid_pn_req; |
271 | mempool_t *fsf_req_abort; | 264 | mempool_t *scsi_req; |
272 | mempool_t *fsf_req_status_read; | 265 | mempool_t *scsi_abort; |
273 | mempool_t *data_status_read; | 266 | mempool_t *status_read_req; |
274 | mempool_t *data_gid_pn; | 267 | mempool_t *status_read_data; |
268 | mempool_t *gid_pn_data; | ||
269 | mempool_t *qtcb_pool; | ||
275 | }; | 270 | }; |
276 | 271 | ||
277 | /* | 272 | /* |
@@ -305,6 +300,15 @@ struct ct_iu_gid_pn_resp { | |||
305 | u32 d_id; | 300 | u32 d_id; |
306 | } __attribute__ ((packed)); | 301 | } __attribute__ ((packed)); |
307 | 302 | ||
303 | struct ct_iu_gpn_ft_req { | ||
304 | struct ct_hdr header; | ||
305 | u8 flags; | ||
306 | u8 domain_id_scope; | ||
307 | u8 area_id_scope; | ||
308 | u8 fc4_type; | ||
309 | } __attribute__ ((packed)); | ||
310 | |||
311 | |||
308 | /** | 312 | /** |
309 | * struct zfcp_send_ct - used to pass parameters to function zfcp_fsf_send_ct | 313 | * struct zfcp_send_ct - used to pass parameters to function zfcp_fsf_send_ct |
310 | * @wka_port: port where the request is sent to | 314 | * @wka_port: port where the request is sent to |
@@ -312,7 +316,6 @@ struct ct_iu_gid_pn_resp { | |||
312 | * @resp: scatter-gather list for response | 316 | * @resp: scatter-gather list for response |
313 | * @handler: handler function (called for response to the request) | 317 | * @handler: handler function (called for response to the request) |
314 | * @handler_data: data passed to handler function | 318 | * @handler_data: data passed to handler function |
315 | * @timeout: FSF timeout for this request | ||
316 | * @completion: completion for synchronization purposes | 319 | * @completion: completion for synchronization purposes |
317 | * @status: used to pass error status to calling function | 320 | * @status: used to pass error status to calling function |
318 | */ | 321 | */ |
@@ -322,7 +325,6 @@ struct zfcp_send_ct { | |||
322 | struct scatterlist *resp; | 325 | struct scatterlist *resp; |
323 | void (*handler)(unsigned long); | 326 | void (*handler)(unsigned long); |
324 | unsigned long handler_data; | 327 | unsigned long handler_data; |
325 | int timeout; | ||
326 | struct completion *completion; | 328 | struct completion *completion; |
327 | int status; | 329 | int status; |
328 | }; | 330 | }; |
@@ -420,6 +422,29 @@ struct zfcp_latencies { | |||
420 | spinlock_t lock; | 422 | spinlock_t lock; |
421 | }; | 423 | }; |
422 | 424 | ||
425 | /** struct zfcp_qdio - basic QDIO data structure | ||
426 | * @resp_q: response queue | ||
427 | * @req_q: request queue | ||
428 | * @stat_lock: lock to protect req_q_util and req_q_time | ||
429 | * @req_q_lock; lock to serialize access to request queue | ||
430 | * @req_q_time: time of last fill level change | ||
431 | * @req_q_util: used for accounting | ||
432 | * @req_q_full: queue full incidents | ||
433 | * @req_q_wq: used to wait for SBAL availability | ||
434 | * @adapter: adapter used in conjunction with this QDIO structure | ||
435 | */ | ||
436 | struct zfcp_qdio { | ||
437 | struct zfcp_qdio_queue resp_q; | ||
438 | struct zfcp_qdio_queue req_q; | ||
439 | spinlock_t stat_lock; | ||
440 | spinlock_t req_q_lock; | ||
441 | unsigned long long req_q_time; | ||
442 | u64 req_q_util; | ||
443 | atomic_t req_q_full; | ||
444 | wait_queue_head_t req_q_wq; | ||
445 | struct zfcp_adapter *adapter; | ||
446 | }; | ||
447 | |||
423 | struct zfcp_adapter { | 448 | struct zfcp_adapter { |
424 | atomic_t refcount; /* reference count */ | 449 | atomic_t refcount; /* reference count */ |
425 | wait_queue_head_t remove_wq; /* can be used to wait for | 450 | wait_queue_head_t remove_wq; /* can be used to wait for |
@@ -428,6 +453,7 @@ struct zfcp_adapter { | |||
428 | u64 peer_wwpn; /* P2P peer WWPN */ | 453 | u64 peer_wwpn; /* P2P peer WWPN */ |
429 | u32 peer_d_id; /* P2P peer D_ID */ | 454 | u32 peer_d_id; /* P2P peer D_ID */ |
430 | struct ccw_device *ccw_device; /* S/390 ccw device */ | 455 | struct ccw_device *ccw_device; /* S/390 ccw device */ |
456 | struct zfcp_qdio *qdio; | ||
431 | u32 hydra_version; /* Hydra version */ | 457 | u32 hydra_version; /* Hydra version */ |
432 | u32 fsf_lic_version; | 458 | u32 fsf_lic_version; |
433 | u32 adapter_features; /* FCP channel features */ | 459 | u32 adapter_features; /* FCP channel features */ |
@@ -439,15 +465,7 @@ struct zfcp_adapter { | |||
439 | unsigned long req_no; /* unique FSF req number */ | 465 | unsigned long req_no; /* unique FSF req number */ |
440 | struct list_head *req_list; /* list of pending reqs */ | 466 | struct list_head *req_list; /* list of pending reqs */ |
441 | spinlock_t req_list_lock; /* request list lock */ | 467 | spinlock_t req_list_lock; /* request list lock */ |
442 | struct zfcp_qdio_queue req_q; /* request queue */ | ||
443 | spinlock_t req_q_lock; /* for operations on queue */ | ||
444 | ktime_t req_q_time; /* time of last fill level change */ | ||
445 | u64 req_q_util; /* for accounting */ | ||
446 | spinlock_t qdio_stat_lock; | ||
447 | u32 fsf_req_seq_no; /* FSF cmnd seq number */ | 468 | u32 fsf_req_seq_no; /* FSF cmnd seq number */ |
448 | wait_queue_head_t request_wq; /* can be used to wait for | ||
449 | more avaliable SBALs */ | ||
450 | struct zfcp_qdio_queue resp_q; /* response queue */ | ||
451 | rwlock_t abort_lock; /* Protects against SCSI | 469 | rwlock_t abort_lock; /* Protects against SCSI |
452 | stack abort/command | 470 | stack abort/command |
453 | completion races */ | 471 | completion races */ |
@@ -456,10 +474,9 @@ struct zfcp_adapter { | |||
456 | atomic_t status; /* status of this adapter */ | 474 | atomic_t status; /* status of this adapter */ |
457 | struct list_head erp_ready_head; /* error recovery for this | 475 | struct list_head erp_ready_head; /* error recovery for this |
458 | adapter/devices */ | 476 | adapter/devices */ |
477 | wait_queue_head_t erp_ready_wq; | ||
459 | struct list_head erp_running_head; | 478 | struct list_head erp_running_head; |
460 | rwlock_t erp_lock; | 479 | rwlock_t erp_lock; |
461 | struct semaphore erp_ready_sem; | ||
462 | wait_queue_head_t erp_thread_wqh; | ||
463 | wait_queue_head_t erp_done_wqh; | 480 | wait_queue_head_t erp_done_wqh; |
464 | struct zfcp_erp_action erp_action; /* pending error recovery */ | 481 | struct zfcp_erp_action erp_action; /* pending error recovery */ |
465 | atomic_t erp_counter; | 482 | atomic_t erp_counter; |
@@ -467,27 +484,16 @@ struct zfcp_adapter { | |||
467 | actions */ | 484 | actions */ |
468 | u32 erp_low_mem_count; /* nr of erp actions waiting | 485 | u32 erp_low_mem_count; /* nr of erp actions waiting |
469 | for memory */ | 486 | for memory */ |
487 | struct task_struct *erp_thread; | ||
470 | struct zfcp_wka_ports *gs; /* generic services */ | 488 | struct zfcp_wka_ports *gs; /* generic services */ |
471 | debug_info_t *rec_dbf; | 489 | struct zfcp_dbf *dbf; /* debug traces */ |
472 | debug_info_t *hba_dbf; | ||
473 | debug_info_t *san_dbf; /* debug feature areas */ | ||
474 | debug_info_t *scsi_dbf; | ||
475 | spinlock_t rec_dbf_lock; | ||
476 | spinlock_t hba_dbf_lock; | ||
477 | spinlock_t san_dbf_lock; | ||
478 | spinlock_t scsi_dbf_lock; | ||
479 | struct zfcp_rec_dbf_record rec_dbf_buf; | ||
480 | struct zfcp_hba_dbf_record hba_dbf_buf; | ||
481 | struct zfcp_san_dbf_record san_dbf_buf; | ||
482 | struct zfcp_scsi_dbf_record scsi_dbf_buf; | ||
483 | struct zfcp_adapter_mempool pool; /* Adapter memory pools */ | 490 | struct zfcp_adapter_mempool pool; /* Adapter memory pools */ |
484 | struct qdio_initialize qdio_init_data; /* for qdio_establish */ | ||
485 | struct fc_host_statistics *fc_stats; | 491 | struct fc_host_statistics *fc_stats; |
486 | struct fsf_qtcb_bottom_port *stats_reset_data; | 492 | struct fsf_qtcb_bottom_port *stats_reset_data; |
487 | unsigned long stats_reset; | 493 | unsigned long stats_reset; |
488 | struct work_struct scan_work; | 494 | struct work_struct scan_work; |
489 | struct service_level service_level; | 495 | struct service_level service_level; |
490 | atomic_t qdio_outb_full; /* queue full incidents */ | 496 | struct workqueue_struct *work_queue; |
491 | }; | 497 | }; |
492 | 498 | ||
493 | struct zfcp_port { | 499 | struct zfcp_port { |
@@ -531,36 +537,64 @@ struct zfcp_unit { | |||
531 | struct work_struct scsi_work; | 537 | struct work_struct scsi_work; |
532 | }; | 538 | }; |
533 | 539 | ||
534 | /* FSF request */ | 540 | /** |
541 | * struct zfcp_queue_req - queue related values for a request | ||
542 | * @sbal_number: number of free SBALs | ||
543 | * @sbal_first: first SBAL for this request | ||
544 | * @sbal_last: last SBAL for this request | ||
545 | * @sbal_limit: last possible SBAL for this request | ||
546 | * @sbale_curr: current SBALE at creation of this request | ||
547 | * @sbal_response: SBAL used in interrupt | ||
548 | * @qdio_outb_usage: usage of outbound queue | ||
549 | * @qdio_inb_usage: usage of inbound queue | ||
550 | */ | ||
551 | struct zfcp_queue_req { | ||
552 | u8 sbal_number; | ||
553 | u8 sbal_first; | ||
554 | u8 sbal_last; | ||
555 | u8 sbal_limit; | ||
556 | u8 sbale_curr; | ||
557 | u8 sbal_response; | ||
558 | u16 qdio_outb_usage; | ||
559 | u16 qdio_inb_usage; | ||
560 | }; | ||
561 | |||
562 | /** | ||
563 | * struct zfcp_fsf_req - basic FSF request structure | ||
564 | * @list: list of FSF requests | ||
565 | * @req_id: unique request ID | ||
566 | * @adapter: adapter this request belongs to | ||
567 | * @queue_req: queue related values | ||
568 | * @completion: used to signal the completion of the request | ||
569 | * @status: status of the request | ||
570 | * @fsf_command: FSF command issued | ||
571 | * @qtcb: associated QTCB | ||
572 | * @seq_no: sequence number of this request | ||
573 | * @data: private data | ||
574 | * @timer: timer data of this request | ||
575 | * @erp_action: reference to erp action if request issued on behalf of ERP | ||
576 | * @pool: reference to memory pool if used for this request | ||
577 | * @issued: time when request was send (STCK) | ||
578 | * @unit: reference to unit if this request is a SCSI request | ||
579 | * @handler: handler which should be called to process response | ||
580 | */ | ||
535 | struct zfcp_fsf_req { | 581 | struct zfcp_fsf_req { |
536 | struct list_head list; /* list of FSF requests */ | 582 | struct list_head list; |
537 | unsigned long req_id; /* unique request ID */ | 583 | unsigned long req_id; |
538 | struct zfcp_adapter *adapter; /* adapter request belongs to */ | 584 | struct zfcp_adapter *adapter; |
539 | u8 sbal_number; /* nr of SBALs free for use */ | 585 | struct zfcp_queue_req queue_req; |
540 | u8 sbal_first; /* first SBAL for this request */ | 586 | struct completion completion; |
541 | u8 sbal_last; /* last SBAL for this request */ | 587 | u32 status; |
542 | u8 sbal_limit; /* last possible SBAL for | 588 | u32 fsf_command; |
543 | this reuest */ | 589 | struct fsf_qtcb *qtcb; |
544 | u8 sbale_curr; /* current SBALE during creation | 590 | u32 seq_no; |
545 | of request */ | 591 | void *data; |
546 | u8 sbal_response; /* SBAL used in interrupt */ | 592 | struct timer_list timer; |
547 | wait_queue_head_t completion_wq; /* can be used by a routine | 593 | struct zfcp_erp_action *erp_action; |
548 | to wait for completion */ | 594 | mempool_t *pool; |
549 | u32 status; /* status of this request */ | 595 | unsigned long long issued; |
550 | u32 fsf_command; /* FSF Command copy */ | 596 | struct zfcp_unit *unit; |
551 | struct fsf_qtcb *qtcb; /* address of associated QTCB */ | ||
552 | u32 seq_no; /* Sequence number of request */ | ||
553 | void *data; /* private data of request */ | ||
554 | struct timer_list timer; /* used for erp or scsi er */ | ||
555 | struct zfcp_erp_action *erp_action; /* used if this request is | ||
556 | issued on behalf of erp */ | ||
557 | mempool_t *pool; /* used if request was alloacted | ||
558 | from emergency pool */ | ||
559 | unsigned long long issued; /* request sent time (STCK) */ | ||
560 | struct zfcp_unit *unit; | ||
561 | void (*handler)(struct zfcp_fsf_req *); | 597 | void (*handler)(struct zfcp_fsf_req *); |
562 | u16 qdio_outb_usage;/* usage of outbound queue */ | ||
563 | u16 qdio_inb_usage; /* usage of inbound queue */ | ||
564 | }; | 598 | }; |
565 | 599 | ||
566 | /* driver data */ | 600 | /* driver data */ |
@@ -570,18 +604,11 @@ struct zfcp_data { | |||
570 | rwlock_t config_lock; /* serialises changes | 604 | rwlock_t config_lock; /* serialises changes |
571 | to adapter/port/unit | 605 | to adapter/port/unit |
572 | lists */ | 606 | lists */ |
573 | struct semaphore config_sema; /* serialises configuration | 607 | struct mutex config_mutex; |
574 | changes */ | 608 | struct kmem_cache *gpn_ft_cache; |
575 | struct kmem_cache *fsf_req_qtcb_cache; | 609 | struct kmem_cache *qtcb_cache; |
576 | struct kmem_cache *sr_buffer_cache; | 610 | struct kmem_cache *sr_buffer_cache; |
577 | struct kmem_cache *gid_pn_cache; | 611 | struct kmem_cache *gid_pn_cache; |
578 | struct workqueue_struct *work_queue; | ||
579 | }; | ||
580 | |||
581 | /* struct used by memory pools for fsf_requests */ | ||
582 | struct zfcp_fsf_req_qtcb { | ||
583 | struct zfcp_fsf_req fsf_req; | ||
584 | struct fsf_qtcb qtcb; | ||
585 | }; | 612 | }; |
586 | 613 | ||
587 | /********************** ZFCP SPECIFIC DEFINES ********************************/ | 614 | /********************** ZFCP SPECIFIC DEFINES ********************************/ |
diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c index c75d6f35cb5f..73d366ba31e5 100644 --- a/drivers/s390/scsi/zfcp_erp.c +++ b/drivers/s390/scsi/zfcp_erp.c | |||
@@ -9,6 +9,7 @@ | |||
9 | #define KMSG_COMPONENT "zfcp" | 9 | #define KMSG_COMPONENT "zfcp" |
10 | #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt | 10 | #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt |
11 | 11 | ||
12 | #include <linux/kthread.h> | ||
12 | #include "zfcp_ext.h" | 13 | #include "zfcp_ext.h" |
13 | 14 | ||
14 | #define ZFCP_MAX_ERPS 3 | 15 | #define ZFCP_MAX_ERPS 3 |
@@ -26,7 +27,6 @@ enum zfcp_erp_steps { | |||
26 | ZFCP_ERP_STEP_FSF_XCONFIG = 0x0001, | 27 | ZFCP_ERP_STEP_FSF_XCONFIG = 0x0001, |
27 | ZFCP_ERP_STEP_PHYS_PORT_CLOSING = 0x0010, | 28 | ZFCP_ERP_STEP_PHYS_PORT_CLOSING = 0x0010, |
28 | ZFCP_ERP_STEP_PORT_CLOSING = 0x0100, | 29 | ZFCP_ERP_STEP_PORT_CLOSING = 0x0100, |
29 | ZFCP_ERP_STEP_NAMESERVER_LOOKUP = 0x0400, | ||
30 | ZFCP_ERP_STEP_PORT_OPENING = 0x0800, | 30 | ZFCP_ERP_STEP_PORT_OPENING = 0x0800, |
31 | ZFCP_ERP_STEP_UNIT_CLOSING = 0x1000, | 31 | ZFCP_ERP_STEP_UNIT_CLOSING = 0x1000, |
32 | ZFCP_ERP_STEP_UNIT_OPENING = 0x2000, | 32 | ZFCP_ERP_STEP_UNIT_OPENING = 0x2000, |
@@ -75,9 +75,9 @@ static void zfcp_erp_action_ready(struct zfcp_erp_action *act) | |||
75 | struct zfcp_adapter *adapter = act->adapter; | 75 | struct zfcp_adapter *adapter = act->adapter; |
76 | 76 | ||
77 | list_move(&act->list, &act->adapter->erp_ready_head); | 77 | list_move(&act->list, &act->adapter->erp_ready_head); |
78 | zfcp_rec_dbf_event_action("erardy1", act); | 78 | zfcp_dbf_rec_action("erardy1", act); |
79 | up(&adapter->erp_ready_sem); | 79 | wake_up(&adapter->erp_ready_wq); |
80 | zfcp_rec_dbf_event_thread("erardy2", adapter); | 80 | zfcp_dbf_rec_thread("erardy2", adapter->dbf); |
81 | } | 81 | } |
82 | 82 | ||
83 | static void zfcp_erp_action_dismiss(struct zfcp_erp_action *act) | 83 | static void zfcp_erp_action_dismiss(struct zfcp_erp_action *act) |
@@ -150,6 +150,9 @@ static int zfcp_erp_required_act(int want, struct zfcp_adapter *adapter, | |||
150 | a_status = atomic_read(&adapter->status); | 150 | a_status = atomic_read(&adapter->status); |
151 | if (a_status & ZFCP_STATUS_COMMON_ERP_INUSE) | 151 | if (a_status & ZFCP_STATUS_COMMON_ERP_INUSE) |
152 | return 0; | 152 | return 0; |
153 | if (!(a_status & ZFCP_STATUS_COMMON_RUNNING) && | ||
154 | !(a_status & ZFCP_STATUS_COMMON_OPEN)) | ||
155 | return 0; /* shutdown requested for closed adapter */ | ||
153 | } | 156 | } |
154 | 157 | ||
155 | return need; | 158 | return need; |
@@ -213,8 +216,7 @@ static int zfcp_erp_action_enqueue(int want, struct zfcp_adapter *adapter, | |||
213 | int retval = 1, need; | 216 | int retval = 1, need; |
214 | struct zfcp_erp_action *act = NULL; | 217 | struct zfcp_erp_action *act = NULL; |
215 | 218 | ||
216 | if (!(atomic_read(&adapter->status) & | 219 | if (!adapter->erp_thread) |
217 | ZFCP_STATUS_ADAPTER_ERP_THREAD_UP)) | ||
218 | return -EIO; | 220 | return -EIO; |
219 | 221 | ||
220 | need = zfcp_erp_required_act(want, adapter, port, unit); | 222 | need = zfcp_erp_required_act(want, adapter, port, unit); |
@@ -227,12 +229,11 @@ static int zfcp_erp_action_enqueue(int want, struct zfcp_adapter *adapter, | |||
227 | goto out; | 229 | goto out; |
228 | ++adapter->erp_total_count; | 230 | ++adapter->erp_total_count; |
229 | list_add_tail(&act->list, &adapter->erp_ready_head); | 231 | list_add_tail(&act->list, &adapter->erp_ready_head); |
230 | up(&adapter->erp_ready_sem); | 232 | wake_up(&adapter->erp_ready_wq); |
231 | zfcp_rec_dbf_event_thread("eracte1", adapter); | 233 | zfcp_dbf_rec_thread("eracte1", adapter->dbf); |
232 | retval = 0; | 234 | retval = 0; |
233 | out: | 235 | out: |
234 | zfcp_rec_dbf_event_trigger(id, ref, want, need, act, | 236 | zfcp_dbf_rec_trigger(id, ref, want, need, act, adapter, port, unit); |
235 | adapter, port, unit); | ||
236 | return retval; | 237 | return retval; |
237 | } | 238 | } |
238 | 239 | ||
@@ -443,28 +444,28 @@ static int status_change_clear(unsigned long mask, atomic_t *status) | |||
443 | static void zfcp_erp_adapter_unblock(struct zfcp_adapter *adapter) | 444 | static void zfcp_erp_adapter_unblock(struct zfcp_adapter *adapter) |
444 | { | 445 | { |
445 | if (status_change_set(ZFCP_STATUS_COMMON_UNBLOCKED, &adapter->status)) | 446 | if (status_change_set(ZFCP_STATUS_COMMON_UNBLOCKED, &adapter->status)) |
446 | zfcp_rec_dbf_event_adapter("eraubl1", NULL, adapter); | 447 | zfcp_dbf_rec_adapter("eraubl1", NULL, adapter->dbf); |
447 | atomic_set_mask(ZFCP_STATUS_COMMON_UNBLOCKED, &adapter->status); | 448 | atomic_set_mask(ZFCP_STATUS_COMMON_UNBLOCKED, &adapter->status); |
448 | } | 449 | } |
449 | 450 | ||
450 | static void zfcp_erp_port_unblock(struct zfcp_port *port) | 451 | static void zfcp_erp_port_unblock(struct zfcp_port *port) |
451 | { | 452 | { |
452 | if (status_change_set(ZFCP_STATUS_COMMON_UNBLOCKED, &port->status)) | 453 | if (status_change_set(ZFCP_STATUS_COMMON_UNBLOCKED, &port->status)) |
453 | zfcp_rec_dbf_event_port("erpubl1", NULL, port); | 454 | zfcp_dbf_rec_port("erpubl1", NULL, port); |
454 | atomic_set_mask(ZFCP_STATUS_COMMON_UNBLOCKED, &port->status); | 455 | atomic_set_mask(ZFCP_STATUS_COMMON_UNBLOCKED, &port->status); |
455 | } | 456 | } |
456 | 457 | ||
457 | static void zfcp_erp_unit_unblock(struct zfcp_unit *unit) | 458 | static void zfcp_erp_unit_unblock(struct zfcp_unit *unit) |
458 | { | 459 | { |
459 | if (status_change_set(ZFCP_STATUS_COMMON_UNBLOCKED, &unit->status)) | 460 | if (status_change_set(ZFCP_STATUS_COMMON_UNBLOCKED, &unit->status)) |
460 | zfcp_rec_dbf_event_unit("eruubl1", NULL, unit); | 461 | zfcp_dbf_rec_unit("eruubl1", NULL, unit); |
461 | atomic_set_mask(ZFCP_STATUS_COMMON_UNBLOCKED, &unit->status); | 462 | atomic_set_mask(ZFCP_STATUS_COMMON_UNBLOCKED, &unit->status); |
462 | } | 463 | } |
463 | 464 | ||
464 | static void zfcp_erp_action_to_running(struct zfcp_erp_action *erp_action) | 465 | static void zfcp_erp_action_to_running(struct zfcp_erp_action *erp_action) |
465 | { | 466 | { |
466 | list_move(&erp_action->list, &erp_action->adapter->erp_running_head); | 467 | list_move(&erp_action->list, &erp_action->adapter->erp_running_head); |
467 | zfcp_rec_dbf_event_action("erator1", erp_action); | 468 | zfcp_dbf_rec_action("erator1", erp_action); |
468 | } | 469 | } |
469 | 470 | ||
470 | static void zfcp_erp_strategy_check_fsfreq(struct zfcp_erp_action *act) | 471 | static void zfcp_erp_strategy_check_fsfreq(struct zfcp_erp_action *act) |
@@ -480,13 +481,12 @@ static void zfcp_erp_strategy_check_fsfreq(struct zfcp_erp_action *act) | |||
480 | if (act->status & (ZFCP_STATUS_ERP_DISMISSED | | 481 | if (act->status & (ZFCP_STATUS_ERP_DISMISSED | |
481 | ZFCP_STATUS_ERP_TIMEDOUT)) { | 482 | ZFCP_STATUS_ERP_TIMEDOUT)) { |
482 | act->fsf_req->status |= ZFCP_STATUS_FSFREQ_DISMISSED; | 483 | act->fsf_req->status |= ZFCP_STATUS_FSFREQ_DISMISSED; |
483 | zfcp_rec_dbf_event_action("erscf_1", act); | 484 | zfcp_dbf_rec_action("erscf_1", act); |
484 | act->fsf_req->erp_action = NULL; | 485 | act->fsf_req->erp_action = NULL; |
485 | } | 486 | } |
486 | if (act->status & ZFCP_STATUS_ERP_TIMEDOUT) | 487 | if (act->status & ZFCP_STATUS_ERP_TIMEDOUT) |
487 | zfcp_rec_dbf_event_action("erscf_2", act); | 488 | zfcp_dbf_rec_action("erscf_2", act); |
488 | if (act->fsf_req->status & (ZFCP_STATUS_FSFREQ_COMPLETED | | 489 | if (act->fsf_req->status & ZFCP_STATUS_FSFREQ_DISMISSED) |
489 | ZFCP_STATUS_FSFREQ_DISMISSED)) | ||
490 | act->fsf_req = NULL; | 490 | act->fsf_req = NULL; |
491 | } else | 491 | } else |
492 | act->fsf_req = NULL; | 492 | act->fsf_req = NULL; |
@@ -604,9 +604,11 @@ static void zfcp_erp_wakeup(struct zfcp_adapter *adapter) | |||
604 | 604 | ||
605 | static int zfcp_erp_adapter_strategy_open_qdio(struct zfcp_erp_action *act) | 605 | static int zfcp_erp_adapter_strategy_open_qdio(struct zfcp_erp_action *act) |
606 | { | 606 | { |
607 | if (zfcp_qdio_open(act->adapter)) | 607 | struct zfcp_qdio *qdio = act->adapter->qdio; |
608 | |||
609 | if (zfcp_qdio_open(qdio)) | ||
608 | return ZFCP_ERP_FAILED; | 610 | return ZFCP_ERP_FAILED; |
609 | init_waitqueue_head(&act->adapter->request_wq); | 611 | init_waitqueue_head(&qdio->req_q_wq); |
610 | atomic_set_mask(ZFCP_STATUS_ADAPTER_QDIOUP, &act->adapter->status); | 612 | atomic_set_mask(ZFCP_STATUS_ADAPTER_QDIOUP, &act->adapter->status); |
611 | return ZFCP_ERP_SUCCEEDED; | 613 | return ZFCP_ERP_SUCCEEDED; |
612 | } | 614 | } |
@@ -641,9 +643,10 @@ static int zfcp_erp_adapter_strat_fsf_xconf(struct zfcp_erp_action *erp_action) | |||
641 | return ZFCP_ERP_FAILED; | 643 | return ZFCP_ERP_FAILED; |
642 | } | 644 | } |
643 | 645 | ||
644 | zfcp_rec_dbf_event_thread_lock("erasfx1", adapter); | 646 | zfcp_dbf_rec_thread_lock("erasfx1", adapter->dbf); |
645 | down(&adapter->erp_ready_sem); | 647 | wait_event(adapter->erp_ready_wq, |
646 | zfcp_rec_dbf_event_thread_lock("erasfx2", adapter); | 648 | !list_empty(&adapter->erp_ready_head)); |
649 | zfcp_dbf_rec_thread_lock("erasfx2", adapter->dbf); | ||
647 | if (erp_action->status & ZFCP_STATUS_ERP_TIMEDOUT) | 650 | if (erp_action->status & ZFCP_STATUS_ERP_TIMEDOUT) |
648 | break; | 651 | break; |
649 | 652 | ||
@@ -682,9 +685,10 @@ static int zfcp_erp_adapter_strategy_open_fsf_xport(struct zfcp_erp_action *act) | |||
682 | if (ret) | 685 | if (ret) |
683 | return ZFCP_ERP_FAILED; | 686 | return ZFCP_ERP_FAILED; |
684 | 687 | ||
685 | zfcp_rec_dbf_event_thread_lock("erasox1", adapter); | 688 | zfcp_dbf_rec_thread_lock("erasox1", adapter->dbf); |
686 | down(&adapter->erp_ready_sem); | 689 | wait_event(adapter->erp_ready_wq, |
687 | zfcp_rec_dbf_event_thread_lock("erasox2", adapter); | 690 | !list_empty(&adapter->erp_ready_head)); |
691 | zfcp_dbf_rec_thread_lock("erasox2", adapter->dbf); | ||
688 | if (act->status & ZFCP_STATUS_ERP_TIMEDOUT) | 692 | if (act->status & ZFCP_STATUS_ERP_TIMEDOUT) |
689 | return ZFCP_ERP_FAILED; | 693 | return ZFCP_ERP_FAILED; |
690 | 694 | ||
@@ -711,10 +715,10 @@ static void zfcp_erp_adapter_strategy_close(struct zfcp_erp_action *act) | |||
711 | struct zfcp_adapter *adapter = act->adapter; | 715 | struct zfcp_adapter *adapter = act->adapter; |
712 | 716 | ||
713 | /* close queues to ensure that buffers are not accessed by adapter */ | 717 | /* close queues to ensure that buffers are not accessed by adapter */ |
714 | zfcp_qdio_close(adapter); | 718 | zfcp_qdio_close(adapter->qdio); |
715 | zfcp_fsf_req_dismiss_all(adapter); | 719 | zfcp_fsf_req_dismiss_all(adapter); |
716 | adapter->fsf_req_seq_no = 0; | 720 | adapter->fsf_req_seq_no = 0; |
717 | zfcp_fc_wka_port_force_offline(&adapter->gs->ds); | 721 | zfcp_fc_wka_ports_force_offline(adapter->gs); |
718 | /* all ports and units are closed */ | 722 | /* all ports and units are closed */ |
719 | zfcp_erp_modify_adapter_status(adapter, "erascl1", NULL, | 723 | zfcp_erp_modify_adapter_status(adapter, "erascl1", NULL, |
720 | ZFCP_STATUS_COMMON_OPEN, ZFCP_CLEAR); | 724 | ZFCP_STATUS_COMMON_OPEN, ZFCP_CLEAR); |
@@ -841,27 +845,6 @@ static int zfcp_erp_open_ptp_port(struct zfcp_erp_action *act) | |||
841 | return zfcp_erp_port_strategy_open_port(act); | 845 | return zfcp_erp_port_strategy_open_port(act); |
842 | } | 846 | } |
843 | 847 | ||
844 | void zfcp_erp_port_strategy_open_lookup(struct work_struct *work) | ||
845 | { | ||
846 | int retval; | ||
847 | struct zfcp_port *port = container_of(work, struct zfcp_port, | ||
848 | gid_pn_work); | ||
849 | |||
850 | retval = zfcp_fc_ns_gid_pn(&port->erp_action); | ||
851 | if (!retval) { | ||
852 | port->erp_action.step = ZFCP_ERP_STEP_NAMESERVER_LOOKUP; | ||
853 | goto out; | ||
854 | } | ||
855 | if (retval == -ENOMEM) { | ||
856 | zfcp_erp_notify(&port->erp_action, ZFCP_STATUS_ERP_LOWMEM); | ||
857 | goto out; | ||
858 | } | ||
859 | /* all other error condtions */ | ||
860 | zfcp_erp_notify(&port->erp_action, 0); | ||
861 | out: | ||
862 | zfcp_port_put(port); | ||
863 | } | ||
864 | |||
865 | static int zfcp_erp_port_strategy_open_common(struct zfcp_erp_action *act) | 848 | static int zfcp_erp_port_strategy_open_common(struct zfcp_erp_action *act) |
866 | { | 849 | { |
867 | struct zfcp_adapter *adapter = act->adapter; | 850 | struct zfcp_adapter *adapter = act->adapter; |
@@ -876,15 +859,11 @@ static int zfcp_erp_port_strategy_open_common(struct zfcp_erp_action *act) | |||
876 | return zfcp_erp_open_ptp_port(act); | 859 | return zfcp_erp_open_ptp_port(act); |
877 | if (!port->d_id) { | 860 | if (!port->d_id) { |
878 | zfcp_port_get(port); | 861 | zfcp_port_get(port); |
879 | if (!queue_work(zfcp_data.work_queue, | 862 | if (!queue_work(adapter->work_queue, |
880 | &port->gid_pn_work)) | 863 | &port->gid_pn_work)) |
881 | zfcp_port_put(port); | 864 | zfcp_port_put(port); |
882 | return ZFCP_ERP_CONTINUES; | 865 | return ZFCP_ERP_EXIT; |
883 | } | 866 | } |
884 | /* fall through */ | ||
885 | case ZFCP_ERP_STEP_NAMESERVER_LOOKUP: | ||
886 | if (!port->d_id) | ||
887 | return ZFCP_ERP_FAILED; | ||
888 | return zfcp_erp_port_strategy_open_port(act); | 867 | return zfcp_erp_port_strategy_open_port(act); |
889 | 868 | ||
890 | case ZFCP_ERP_STEP_PORT_OPENING: | 869 | case ZFCP_ERP_STEP_PORT_OPENING: |
@@ -1163,7 +1142,7 @@ static void zfcp_erp_action_dequeue(struct zfcp_erp_action *erp_action) | |||
1163 | } | 1142 | } |
1164 | 1143 | ||
1165 | list_del(&erp_action->list); | 1144 | list_del(&erp_action->list); |
1166 | zfcp_rec_dbf_event_action("eractd1", erp_action); | 1145 | zfcp_dbf_rec_action("eractd1", erp_action); |
1167 | 1146 | ||
1168 | switch (erp_action->action) { | 1147 | switch (erp_action->action) { |
1169 | case ZFCP_ERP_ACTION_REOPEN_UNIT: | 1148 | case ZFCP_ERP_ACTION_REOPEN_UNIT: |
@@ -1311,20 +1290,16 @@ static int zfcp_erp_thread(void *data) | |||
1311 | struct list_head *next; | 1290 | struct list_head *next; |
1312 | struct zfcp_erp_action *act; | 1291 | struct zfcp_erp_action *act; |
1313 | unsigned long flags; | 1292 | unsigned long flags; |
1314 | int ignore; | ||
1315 | |||
1316 | daemonize("zfcperp%s", dev_name(&adapter->ccw_device->dev)); | ||
1317 | /* Block all signals */ | ||
1318 | siginitsetinv(¤t->blocked, 0); | ||
1319 | atomic_set_mask(ZFCP_STATUS_ADAPTER_ERP_THREAD_UP, &adapter->status); | ||
1320 | wake_up(&adapter->erp_thread_wqh); | ||
1321 | 1293 | ||
1322 | while (!(atomic_read(&adapter->status) & | 1294 | for (;;) { |
1323 | ZFCP_STATUS_ADAPTER_ERP_THREAD_KILL)) { | 1295 | zfcp_dbf_rec_thread_lock("erthrd1", adapter->dbf); |
1296 | wait_event_interruptible(adapter->erp_ready_wq, | ||
1297 | !list_empty(&adapter->erp_ready_head) || | ||
1298 | kthread_should_stop()); | ||
1299 | zfcp_dbf_rec_thread_lock("erthrd2", adapter->dbf); | ||
1324 | 1300 | ||
1325 | zfcp_rec_dbf_event_thread_lock("erthrd1", adapter); | 1301 | if (kthread_should_stop()) |
1326 | ignore = down_interruptible(&adapter->erp_ready_sem); | 1302 | break; |
1327 | zfcp_rec_dbf_event_thread_lock("erthrd2", adapter); | ||
1328 | 1303 | ||
1329 | write_lock_irqsave(&adapter->erp_lock, flags); | 1304 | write_lock_irqsave(&adapter->erp_lock, flags); |
1330 | next = adapter->erp_ready_head.next; | 1305 | next = adapter->erp_ready_head.next; |
@@ -1339,9 +1314,6 @@ static int zfcp_erp_thread(void *data) | |||
1339 | } | 1314 | } |
1340 | } | 1315 | } |
1341 | 1316 | ||
1342 | atomic_clear_mask(ZFCP_STATUS_ADAPTER_ERP_THREAD_UP, &adapter->status); | ||
1343 | wake_up(&adapter->erp_thread_wqh); | ||
1344 | |||
1345 | return 0; | 1317 | return 0; |
1346 | } | 1318 | } |
1347 | 1319 | ||
@@ -1353,18 +1325,17 @@ static int zfcp_erp_thread(void *data) | |||
1353 | */ | 1325 | */ |
1354 | int zfcp_erp_thread_setup(struct zfcp_adapter *adapter) | 1326 | int zfcp_erp_thread_setup(struct zfcp_adapter *adapter) |
1355 | { | 1327 | { |
1356 | int retval; | 1328 | struct task_struct *thread; |
1357 | 1329 | ||
1358 | atomic_clear_mask(ZFCP_STATUS_ADAPTER_ERP_THREAD_UP, &adapter->status); | 1330 | thread = kthread_run(zfcp_erp_thread, adapter, "zfcperp%s", |
1359 | retval = kernel_thread(zfcp_erp_thread, adapter, SIGCHLD); | 1331 | dev_name(&adapter->ccw_device->dev)); |
1360 | if (retval < 0) { | 1332 | if (IS_ERR(thread)) { |
1361 | dev_err(&adapter->ccw_device->dev, | 1333 | dev_err(&adapter->ccw_device->dev, |
1362 | "Creating an ERP thread for the FCP device failed.\n"); | 1334 | "Creating an ERP thread for the FCP device failed.\n"); |
1363 | return retval; | 1335 | return PTR_ERR(thread); |
1364 | } | 1336 | } |
1365 | wait_event(adapter->erp_thread_wqh, | 1337 | |
1366 | atomic_read(&adapter->status) & | 1338 | adapter->erp_thread = thread; |
1367 | ZFCP_STATUS_ADAPTER_ERP_THREAD_UP); | ||
1368 | return 0; | 1339 | return 0; |
1369 | } | 1340 | } |
1370 | 1341 | ||
@@ -1379,16 +1350,10 @@ int zfcp_erp_thread_setup(struct zfcp_adapter *adapter) | |||
1379 | */ | 1350 | */ |
1380 | void zfcp_erp_thread_kill(struct zfcp_adapter *adapter) | 1351 | void zfcp_erp_thread_kill(struct zfcp_adapter *adapter) |
1381 | { | 1352 | { |
1382 | atomic_set_mask(ZFCP_STATUS_ADAPTER_ERP_THREAD_KILL, &adapter->status); | 1353 | kthread_stop(adapter->erp_thread); |
1383 | up(&adapter->erp_ready_sem); | 1354 | adapter->erp_thread = NULL; |
1384 | zfcp_rec_dbf_event_thread_lock("erthrk1", adapter); | 1355 | WARN_ON(!list_empty(&adapter->erp_ready_head)); |
1385 | 1356 | WARN_ON(!list_empty(&adapter->erp_running_head)); | |
1386 | wait_event(adapter->erp_thread_wqh, | ||
1387 | !(atomic_read(&adapter->status) & | ||
1388 | ZFCP_STATUS_ADAPTER_ERP_THREAD_UP)); | ||
1389 | |||
1390 | atomic_clear_mask(ZFCP_STATUS_ADAPTER_ERP_THREAD_KILL, | ||
1391 | &adapter->status); | ||
1392 | } | 1357 | } |
1393 | 1358 | ||
1394 | /** | 1359 | /** |
@@ -1456,11 +1421,11 @@ void zfcp_erp_modify_adapter_status(struct zfcp_adapter *adapter, char *id, | |||
1456 | 1421 | ||
1457 | if (set_or_clear == ZFCP_SET) { | 1422 | if (set_or_clear == ZFCP_SET) { |
1458 | if (status_change_set(mask, &adapter->status)) | 1423 | if (status_change_set(mask, &adapter->status)) |
1459 | zfcp_rec_dbf_event_adapter(id, ref, adapter); | 1424 | zfcp_dbf_rec_adapter(id, ref, adapter->dbf); |
1460 | atomic_set_mask(mask, &adapter->status); | 1425 | atomic_set_mask(mask, &adapter->status); |
1461 | } else { | 1426 | } else { |
1462 | if (status_change_clear(mask, &adapter->status)) | 1427 | if (status_change_clear(mask, &adapter->status)) |
1463 | zfcp_rec_dbf_event_adapter(id, ref, adapter); | 1428 | zfcp_dbf_rec_adapter(id, ref, adapter->dbf); |
1464 | atomic_clear_mask(mask, &adapter->status); | 1429 | atomic_clear_mask(mask, &adapter->status); |
1465 | if (mask & ZFCP_STATUS_COMMON_ERP_FAILED) | 1430 | if (mask & ZFCP_STATUS_COMMON_ERP_FAILED) |
1466 | atomic_set(&adapter->erp_counter, 0); | 1431 | atomic_set(&adapter->erp_counter, 0); |
@@ -1490,11 +1455,11 @@ void zfcp_erp_modify_port_status(struct zfcp_port *port, char *id, void *ref, | |||
1490 | 1455 | ||
1491 | if (set_or_clear == ZFCP_SET) { | 1456 | if (set_or_clear == ZFCP_SET) { |
1492 | if (status_change_set(mask, &port->status)) | 1457 | if (status_change_set(mask, &port->status)) |
1493 | zfcp_rec_dbf_event_port(id, ref, port); | 1458 | zfcp_dbf_rec_port(id, ref, port); |
1494 | atomic_set_mask(mask, &port->status); | 1459 | atomic_set_mask(mask, &port->status); |
1495 | } else { | 1460 | } else { |
1496 | if (status_change_clear(mask, &port->status)) | 1461 | if (status_change_clear(mask, &port->status)) |
1497 | zfcp_rec_dbf_event_port(id, ref, port); | 1462 | zfcp_dbf_rec_port(id, ref, port); |
1498 | atomic_clear_mask(mask, &port->status); | 1463 | atomic_clear_mask(mask, &port->status); |
1499 | if (mask & ZFCP_STATUS_COMMON_ERP_FAILED) | 1464 | if (mask & ZFCP_STATUS_COMMON_ERP_FAILED) |
1500 | atomic_set(&port->erp_counter, 0); | 1465 | atomic_set(&port->erp_counter, 0); |
@@ -1519,11 +1484,11 @@ void zfcp_erp_modify_unit_status(struct zfcp_unit *unit, char *id, void *ref, | |||
1519 | { | 1484 | { |
1520 | if (set_or_clear == ZFCP_SET) { | 1485 | if (set_or_clear == ZFCP_SET) { |
1521 | if (status_change_set(mask, &unit->status)) | 1486 | if (status_change_set(mask, &unit->status)) |
1522 | zfcp_rec_dbf_event_unit(id, ref, unit); | 1487 | zfcp_dbf_rec_unit(id, ref, unit); |
1523 | atomic_set_mask(mask, &unit->status); | 1488 | atomic_set_mask(mask, &unit->status); |
1524 | } else { | 1489 | } else { |
1525 | if (status_change_clear(mask, &unit->status)) | 1490 | if (status_change_clear(mask, &unit->status)) |
1526 | zfcp_rec_dbf_event_unit(id, ref, unit); | 1491 | zfcp_dbf_rec_unit(id, ref, unit); |
1527 | atomic_clear_mask(mask, &unit->status); | 1492 | atomic_clear_mask(mask, &unit->status); |
1528 | if (mask & ZFCP_STATUS_COMMON_ERP_FAILED) { | 1493 | if (mask & ZFCP_STATUS_COMMON_ERP_FAILED) { |
1529 | atomic_set(&unit->erp_counter, 0); | 1494 | atomic_set(&unit->erp_counter, 0); |
diff --git a/drivers/s390/scsi/zfcp_ext.h b/drivers/s390/scsi/zfcp_ext.h index 3044c6010306..36935bc0818f 100644 --- a/drivers/s390/scsi/zfcp_ext.h +++ b/drivers/s390/scsi/zfcp_ext.h | |||
@@ -34,37 +34,31 @@ extern struct zfcp_adapter *zfcp_get_adapter_by_busid(char *); | |||
34 | extern struct miscdevice zfcp_cfdc_misc; | 34 | extern struct miscdevice zfcp_cfdc_misc; |
35 | 35 | ||
36 | /* zfcp_dbf.c */ | 36 | /* zfcp_dbf.c */ |
37 | extern int zfcp_adapter_debug_register(struct zfcp_adapter *); | 37 | extern int zfcp_dbf_adapter_register(struct zfcp_adapter *); |
38 | extern void zfcp_adapter_debug_unregister(struct zfcp_adapter *); | 38 | extern void zfcp_dbf_adapter_unregister(struct zfcp_dbf *); |
39 | extern void zfcp_rec_dbf_event_thread(char *, struct zfcp_adapter *); | 39 | extern void zfcp_dbf_rec_thread(char *, struct zfcp_dbf *); |
40 | extern void zfcp_rec_dbf_event_thread_lock(char *, struct zfcp_adapter *); | 40 | extern void zfcp_dbf_rec_thread_lock(char *, struct zfcp_dbf *); |
41 | extern void zfcp_rec_dbf_event_adapter(char *, void *, struct zfcp_adapter *); | 41 | extern void zfcp_dbf_rec_adapter(char *, void *, struct zfcp_dbf *); |
42 | extern void zfcp_rec_dbf_event_port(char *, void *, struct zfcp_port *); | 42 | extern void zfcp_dbf_rec_port(char *, void *, struct zfcp_port *); |
43 | extern void zfcp_rec_dbf_event_unit(char *, void *, struct zfcp_unit *); | 43 | extern void zfcp_dbf_rec_unit(char *, void *, struct zfcp_unit *); |
44 | extern void zfcp_rec_dbf_event_trigger(char *, void *, u8, u8, void *, | 44 | extern void zfcp_dbf_rec_trigger(char *, void *, u8, u8, void *, |
45 | struct zfcp_adapter *, | 45 | struct zfcp_adapter *, struct zfcp_port *, |
46 | struct zfcp_port *, struct zfcp_unit *); | 46 | struct zfcp_unit *); |
47 | extern void zfcp_rec_dbf_event_action(char *, struct zfcp_erp_action *); | 47 | extern void zfcp_dbf_rec_action(char *, struct zfcp_erp_action *); |
48 | extern void zfcp_hba_dbf_event_fsf_response(struct zfcp_fsf_req *); | 48 | extern void _zfcp_dbf_hba_fsf_response(const char *, int, struct zfcp_fsf_req *, |
49 | extern void zfcp_hba_dbf_event_fsf_unsol(const char *, struct zfcp_adapter *, | 49 | struct zfcp_dbf *); |
50 | struct fsf_status_read_buffer *); | 50 | extern void _zfcp_dbf_hba_fsf_unsol(const char *, int level, struct zfcp_dbf *, |
51 | extern void zfcp_hba_dbf_event_qdio(struct zfcp_adapter *, unsigned int, int, | 51 | struct fsf_status_read_buffer *); |
52 | int); | 52 | extern void zfcp_dbf_hba_qdio(struct zfcp_dbf *, unsigned int, int, int); |
53 | extern void zfcp_hba_dbf_event_berr(struct zfcp_adapter *, | 53 | extern void zfcp_dbf_hba_berr(struct zfcp_dbf *, struct zfcp_fsf_req *); |
54 | struct zfcp_fsf_req *); | 54 | extern void zfcp_dbf_san_ct_request(struct zfcp_fsf_req *); |
55 | extern void zfcp_san_dbf_event_ct_request(struct zfcp_fsf_req *); | 55 | extern void zfcp_dbf_san_ct_response(struct zfcp_fsf_req *); |
56 | extern void zfcp_san_dbf_event_ct_response(struct zfcp_fsf_req *); | 56 | extern void zfcp_dbf_san_els_request(struct zfcp_fsf_req *); |
57 | extern void zfcp_san_dbf_event_els_request(struct zfcp_fsf_req *); | 57 | extern void zfcp_dbf_san_els_response(struct zfcp_fsf_req *); |
58 | extern void zfcp_san_dbf_event_els_response(struct zfcp_fsf_req *); | 58 | extern void zfcp_dbf_san_incoming_els(struct zfcp_fsf_req *); |
59 | extern void zfcp_san_dbf_event_incoming_els(struct zfcp_fsf_req *); | 59 | extern void _zfcp_dbf_scsi(const char *, const char *, int, struct zfcp_dbf *, |
60 | extern void zfcp_scsi_dbf_event_result(const char *, int, struct zfcp_adapter *, | 60 | struct scsi_cmnd *, struct zfcp_fsf_req *, |
61 | struct scsi_cmnd *, | 61 | unsigned long); |
62 | struct zfcp_fsf_req *); | ||
63 | extern void zfcp_scsi_dbf_event_abort(const char *, struct zfcp_adapter *, | ||
64 | struct scsi_cmnd *, struct zfcp_fsf_req *, | ||
65 | unsigned long); | ||
66 | extern void zfcp_scsi_dbf_event_devreset(const char *, u8, struct zfcp_unit *, | ||
67 | struct scsi_cmnd *); | ||
68 | 62 | ||
69 | /* zfcp_erp.c */ | 63 | /* zfcp_erp.c */ |
70 | extern void zfcp_erp_modify_adapter_status(struct zfcp_adapter *, char *, | 64 | extern void zfcp_erp_modify_adapter_status(struct zfcp_adapter *, char *, |
@@ -96,22 +90,20 @@ extern void zfcp_erp_unit_access_denied(struct zfcp_unit *, char *, void *); | |||
96 | extern void zfcp_erp_adapter_access_changed(struct zfcp_adapter *, char *, | 90 | extern void zfcp_erp_adapter_access_changed(struct zfcp_adapter *, char *, |
97 | void *); | 91 | void *); |
98 | extern void zfcp_erp_timeout_handler(unsigned long); | 92 | extern void zfcp_erp_timeout_handler(unsigned long); |
99 | extern void zfcp_erp_port_strategy_open_lookup(struct work_struct *); | ||
100 | 93 | ||
101 | /* zfcp_fc.c */ | 94 | /* zfcp_fc.c */ |
102 | extern int zfcp_scan_ports(struct zfcp_adapter *); | 95 | extern int zfcp_fc_scan_ports(struct zfcp_adapter *); |
103 | extern void _zfcp_scan_ports_later(struct work_struct *); | 96 | extern void _zfcp_fc_scan_ports_later(struct work_struct *); |
104 | extern void zfcp_fc_incoming_els(struct zfcp_fsf_req *); | 97 | extern void zfcp_fc_incoming_els(struct zfcp_fsf_req *); |
105 | extern int zfcp_fc_ns_gid_pn(struct zfcp_erp_action *); | 98 | extern void zfcp_fc_port_did_lookup(struct work_struct *); |
106 | extern void zfcp_fc_plogi_evaluate(struct zfcp_port *, struct fsf_plogi *); | 99 | extern void zfcp_fc_plogi_evaluate(struct zfcp_port *, struct fsf_plogi *); |
107 | extern void zfcp_test_link(struct zfcp_port *); | 100 | extern void zfcp_fc_test_link(struct zfcp_port *); |
108 | extern void zfcp_fc_link_test_work(struct work_struct *); | 101 | extern void zfcp_fc_link_test_work(struct work_struct *); |
109 | extern void zfcp_fc_wka_port_force_offline(struct zfcp_wka_port *); | 102 | extern void zfcp_fc_wka_ports_force_offline(struct zfcp_wka_ports *); |
110 | extern void zfcp_fc_wka_ports_init(struct zfcp_adapter *); | 103 | extern int zfcp_fc_gs_setup(struct zfcp_adapter *); |
104 | extern void zfcp_fc_gs_destroy(struct zfcp_adapter *); | ||
111 | extern int zfcp_fc_execute_els_fc_job(struct fc_bsg_job *); | 105 | extern int zfcp_fc_execute_els_fc_job(struct fc_bsg_job *); |
112 | extern int zfcp_fc_execute_ct_fc_job(struct fc_bsg_job *); | 106 | extern int zfcp_fc_execute_ct_fc_job(struct fc_bsg_job *); |
113 | extern void zfcp_fc_wka_port_force_offline(struct zfcp_wka_port *); | ||
114 | |||
115 | 107 | ||
116 | /* zfcp_fsf.c */ | 108 | /* zfcp_fsf.c */ |
117 | extern int zfcp_fsf_open_port(struct zfcp_erp_action *); | 109 | extern int zfcp_fsf_open_port(struct zfcp_erp_action *); |
@@ -122,37 +114,39 @@ extern int zfcp_fsf_close_physical_port(struct zfcp_erp_action *); | |||
122 | extern int zfcp_fsf_open_unit(struct zfcp_erp_action *); | 114 | extern int zfcp_fsf_open_unit(struct zfcp_erp_action *); |
123 | extern int zfcp_fsf_close_unit(struct zfcp_erp_action *); | 115 | extern int zfcp_fsf_close_unit(struct zfcp_erp_action *); |
124 | extern int zfcp_fsf_exchange_config_data(struct zfcp_erp_action *); | 116 | extern int zfcp_fsf_exchange_config_data(struct zfcp_erp_action *); |
125 | extern int zfcp_fsf_exchange_config_data_sync(struct zfcp_adapter *, | 117 | extern int zfcp_fsf_exchange_config_data_sync(struct zfcp_qdio *, |
126 | struct fsf_qtcb_bottom_config *); | 118 | struct fsf_qtcb_bottom_config *); |
127 | extern int zfcp_fsf_exchange_port_data(struct zfcp_erp_action *); | 119 | extern int zfcp_fsf_exchange_port_data(struct zfcp_erp_action *); |
128 | extern int zfcp_fsf_exchange_port_data_sync(struct zfcp_adapter *, | 120 | extern int zfcp_fsf_exchange_port_data_sync(struct zfcp_qdio *, |
129 | struct fsf_qtcb_bottom_port *); | 121 | struct fsf_qtcb_bottom_port *); |
130 | extern struct zfcp_fsf_req *zfcp_fsf_control_file(struct zfcp_adapter *, | 122 | extern struct zfcp_fsf_req *zfcp_fsf_control_file(struct zfcp_adapter *, |
131 | struct zfcp_fsf_cfdc *); | 123 | struct zfcp_fsf_cfdc *); |
132 | extern void zfcp_fsf_req_dismiss_all(struct zfcp_adapter *); | 124 | extern void zfcp_fsf_req_dismiss_all(struct zfcp_adapter *); |
133 | extern int zfcp_fsf_status_read(struct zfcp_adapter *); | 125 | extern int zfcp_fsf_status_read(struct zfcp_qdio *); |
134 | extern int zfcp_status_read_refill(struct zfcp_adapter *adapter); | 126 | extern int zfcp_status_read_refill(struct zfcp_adapter *adapter); |
135 | extern int zfcp_fsf_send_ct(struct zfcp_send_ct *, mempool_t *, | 127 | extern int zfcp_fsf_send_ct(struct zfcp_send_ct *, mempool_t *); |
136 | struct zfcp_erp_action *); | ||
137 | extern int zfcp_fsf_send_els(struct zfcp_send_els *); | 128 | extern int zfcp_fsf_send_els(struct zfcp_send_els *); |
138 | extern int zfcp_fsf_send_fcp_command_task(struct zfcp_unit *, | 129 | extern int zfcp_fsf_send_fcp_command_task(struct zfcp_unit *, |
139 | struct scsi_cmnd *); | 130 | struct scsi_cmnd *); |
140 | extern void zfcp_fsf_req_complete(struct zfcp_fsf_req *); | ||
141 | extern void zfcp_fsf_req_free(struct zfcp_fsf_req *); | 131 | extern void zfcp_fsf_req_free(struct zfcp_fsf_req *); |
142 | extern struct zfcp_fsf_req *zfcp_fsf_send_fcp_ctm(struct zfcp_unit *, u8); | 132 | extern struct zfcp_fsf_req *zfcp_fsf_send_fcp_ctm(struct zfcp_unit *, u8); |
143 | extern struct zfcp_fsf_req *zfcp_fsf_abort_fcp_command(unsigned long, | 133 | extern struct zfcp_fsf_req *zfcp_fsf_abort_fcp_command(unsigned long, |
144 | struct zfcp_unit *); | 134 | struct zfcp_unit *); |
135 | extern void zfcp_fsf_reqid_check(struct zfcp_qdio *, int); | ||
145 | 136 | ||
146 | /* zfcp_qdio.c */ | 137 | /* zfcp_qdio.c */ |
147 | extern int zfcp_qdio_allocate(struct zfcp_adapter *); | 138 | extern int zfcp_qdio_setup(struct zfcp_adapter *); |
148 | extern void zfcp_qdio_free(struct zfcp_adapter *); | 139 | extern void zfcp_qdio_destroy(struct zfcp_qdio *); |
149 | extern int zfcp_qdio_send(struct zfcp_fsf_req *); | 140 | extern int zfcp_qdio_send(struct zfcp_qdio *, struct zfcp_queue_req *); |
150 | extern struct qdio_buffer_element *zfcp_qdio_sbale_req(struct zfcp_fsf_req *); | 141 | extern struct qdio_buffer_element |
151 | extern struct qdio_buffer_element *zfcp_qdio_sbale_curr(struct zfcp_fsf_req *); | 142 | *zfcp_qdio_sbale_req(struct zfcp_qdio *, struct zfcp_queue_req *); |
152 | extern int zfcp_qdio_sbals_from_sg(struct zfcp_fsf_req *, unsigned long, | 143 | extern struct qdio_buffer_element |
144 | *zfcp_qdio_sbale_curr(struct zfcp_qdio *, struct zfcp_queue_req *); | ||
145 | extern int zfcp_qdio_sbals_from_sg(struct zfcp_qdio *, | ||
146 | struct zfcp_queue_req *, unsigned long, | ||
153 | struct scatterlist *, int); | 147 | struct scatterlist *, int); |
154 | extern int zfcp_qdio_open(struct zfcp_adapter *); | 148 | extern int zfcp_qdio_open(struct zfcp_qdio *); |
155 | extern void zfcp_qdio_close(struct zfcp_adapter *); | 149 | extern void zfcp_qdio_close(struct zfcp_qdio *); |
156 | 150 | ||
157 | /* zfcp_scsi.c */ | 151 | /* zfcp_scsi.c */ |
158 | extern struct zfcp_data zfcp_data; | 152 | extern struct zfcp_data zfcp_data; |
diff --git a/drivers/s390/scsi/zfcp_fc.c b/drivers/s390/scsi/zfcp_fc.c index 47daebfa7e59..722f22de8753 100644 --- a/drivers/s390/scsi/zfcp_fc.c +++ b/drivers/s390/scsi/zfcp_fc.c | |||
@@ -25,14 +25,6 @@ static u32 rscn_range_mask[] = { | |||
25 | [RSCN_FABRIC_ADDRESS] = 0x000000, | 25 | [RSCN_FABRIC_ADDRESS] = 0x000000, |
26 | }; | 26 | }; |
27 | 27 | ||
28 | struct ct_iu_gpn_ft_req { | ||
29 | struct ct_hdr header; | ||
30 | u8 flags; | ||
31 | u8 domain_id_scope; | ||
32 | u8 area_id_scope; | ||
33 | u8 fc4_type; | ||
34 | } __attribute__ ((packed)); | ||
35 | |||
36 | struct gpn_ft_resp_acc { | 28 | struct gpn_ft_resp_acc { |
37 | u8 control; | 29 | u8 control; |
38 | u8 port_id[3]; | 30 | u8 port_id[3]; |
@@ -65,7 +57,7 @@ struct zfcp_fc_ns_handler_data { | |||
65 | unsigned long handler_data; | 57 | unsigned long handler_data; |
66 | }; | 58 | }; |
67 | 59 | ||
68 | static int zfcp_wka_port_get(struct zfcp_wka_port *wka_port) | 60 | static int zfcp_fc_wka_port_get(struct zfcp_wka_port *wka_port) |
69 | { | 61 | { |
70 | if (mutex_lock_interruptible(&wka_port->mutex)) | 62 | if (mutex_lock_interruptible(&wka_port->mutex)) |
71 | return -ERESTARTSYS; | 63 | return -ERESTARTSYS; |
@@ -90,7 +82,7 @@ static int zfcp_wka_port_get(struct zfcp_wka_port *wka_port) | |||
90 | return -EIO; | 82 | return -EIO; |
91 | } | 83 | } |
92 | 84 | ||
93 | static void zfcp_wka_port_offline(struct work_struct *work) | 85 | static void zfcp_fc_wka_port_offline(struct work_struct *work) |
94 | { | 86 | { |
95 | struct delayed_work *dw = to_delayed_work(work); | 87 | struct delayed_work *dw = to_delayed_work(work); |
96 | struct zfcp_wka_port *wka_port = | 88 | struct zfcp_wka_port *wka_port = |
@@ -110,7 +102,7 @@ out: | |||
110 | mutex_unlock(&wka_port->mutex); | 102 | mutex_unlock(&wka_port->mutex); |
111 | } | 103 | } |
112 | 104 | ||
113 | static void zfcp_wka_port_put(struct zfcp_wka_port *wka_port) | 105 | static void zfcp_fc_wka_port_put(struct zfcp_wka_port *wka_port) |
114 | { | 106 | { |
115 | if (atomic_dec_return(&wka_port->refcount) != 0) | 107 | if (atomic_dec_return(&wka_port->refcount) != 0) |
116 | return; | 108 | return; |
@@ -129,10 +121,10 @@ static void zfcp_fc_wka_port_init(struct zfcp_wka_port *wka_port, u32 d_id, | |||
129 | wka_port->status = ZFCP_WKA_PORT_OFFLINE; | 121 | wka_port->status = ZFCP_WKA_PORT_OFFLINE; |
130 | atomic_set(&wka_port->refcount, 0); | 122 | atomic_set(&wka_port->refcount, 0); |
131 | mutex_init(&wka_port->mutex); | 123 | mutex_init(&wka_port->mutex); |
132 | INIT_DELAYED_WORK(&wka_port->work, zfcp_wka_port_offline); | 124 | INIT_DELAYED_WORK(&wka_port->work, zfcp_fc_wka_port_offline); |
133 | } | 125 | } |
134 | 126 | ||
135 | void zfcp_fc_wka_port_force_offline(struct zfcp_wka_port *wka) | 127 | static void zfcp_fc_wka_port_force_offline(struct zfcp_wka_port *wka) |
136 | { | 128 | { |
137 | cancel_delayed_work_sync(&wka->work); | 129 | cancel_delayed_work_sync(&wka->work); |
138 | mutex_lock(&wka->mutex); | 130 | mutex_lock(&wka->mutex); |
@@ -140,15 +132,13 @@ void zfcp_fc_wka_port_force_offline(struct zfcp_wka_port *wka) | |||
140 | mutex_unlock(&wka->mutex); | 132 | mutex_unlock(&wka->mutex); |
141 | } | 133 | } |
142 | 134 | ||
143 | void zfcp_fc_wka_ports_init(struct zfcp_adapter *adapter) | 135 | void zfcp_fc_wka_ports_force_offline(struct zfcp_wka_ports *gs) |
144 | { | 136 | { |
145 | struct zfcp_wka_ports *gs = adapter->gs; | 137 | zfcp_fc_wka_port_force_offline(&gs->ms); |
146 | 138 | zfcp_fc_wka_port_force_offline(&gs->ts); | |
147 | zfcp_fc_wka_port_init(&gs->ms, FC_FID_MGMT_SERV, adapter); | 139 | zfcp_fc_wka_port_force_offline(&gs->ds); |
148 | zfcp_fc_wka_port_init(&gs->ts, FC_FID_TIME_SERV, adapter); | 140 | zfcp_fc_wka_port_force_offline(&gs->as); |
149 | zfcp_fc_wka_port_init(&gs->ds, FC_FID_DIR_SERV, adapter); | 141 | zfcp_fc_wka_port_force_offline(&gs->ks); |
150 | zfcp_fc_wka_port_init(&gs->as, FC_FID_ALIASES, adapter); | ||
151 | zfcp_fc_wka_port_init(&gs->ks, FC_FID_SEC_KEY, adapter); | ||
152 | } | 142 | } |
153 | 143 | ||
154 | static void _zfcp_fc_incoming_rscn(struct zfcp_fsf_req *fsf_req, u32 range, | 144 | static void _zfcp_fc_incoming_rscn(struct zfcp_fsf_req *fsf_req, u32 range, |
@@ -160,7 +150,7 @@ static void _zfcp_fc_incoming_rscn(struct zfcp_fsf_req *fsf_req, u32 range, | |||
160 | read_lock_irqsave(&zfcp_data.config_lock, flags); | 150 | read_lock_irqsave(&zfcp_data.config_lock, flags); |
161 | list_for_each_entry(port, &fsf_req->adapter->port_list_head, list) { | 151 | list_for_each_entry(port, &fsf_req->adapter->port_list_head, list) { |
162 | if ((port->d_id & range) == (elem->nport_did & range)) | 152 | if ((port->d_id & range) == (elem->nport_did & range)) |
163 | zfcp_test_link(port); | 153 | zfcp_fc_test_link(port); |
164 | if (!port->d_id) | 154 | if (!port->d_id) |
165 | zfcp_erp_port_reopen(port, | 155 | zfcp_erp_port_reopen(port, |
166 | ZFCP_STATUS_COMMON_ERP_FAILED, | 156 | ZFCP_STATUS_COMMON_ERP_FAILED, |
@@ -241,7 +231,7 @@ void zfcp_fc_incoming_els(struct zfcp_fsf_req *fsf_req) | |||
241 | (struct fsf_status_read_buffer *) fsf_req->data; | 231 | (struct fsf_status_read_buffer *) fsf_req->data; |
242 | unsigned int els_type = status_buffer->payload.data[0]; | 232 | unsigned int els_type = status_buffer->payload.data[0]; |
243 | 233 | ||
244 | zfcp_san_dbf_event_incoming_els(fsf_req); | 234 | zfcp_dbf_san_incoming_els(fsf_req); |
245 | if (els_type == LS_PLOGI) | 235 | if (els_type == LS_PLOGI) |
246 | zfcp_fc_incoming_plogi(fsf_req); | 236 | zfcp_fc_incoming_plogi(fsf_req); |
247 | else if (els_type == LS_LOGO) | 237 | else if (els_type == LS_LOGO) |
@@ -281,19 +271,18 @@ static void zfcp_fc_ns_gid_pn_eval(unsigned long data) | |||
281 | port->d_id = ct_iu_resp->d_id & ZFCP_DID_MASK; | 271 | port->d_id = ct_iu_resp->d_id & ZFCP_DID_MASK; |
282 | } | 272 | } |
283 | 273 | ||
284 | int static zfcp_fc_ns_gid_pn_request(struct zfcp_erp_action *erp_action, | 274 | static int zfcp_fc_ns_gid_pn_request(struct zfcp_port *port, |
285 | struct zfcp_gid_pn_data *gid_pn) | 275 | struct zfcp_gid_pn_data *gid_pn) |
286 | { | 276 | { |
287 | struct zfcp_adapter *adapter = erp_action->adapter; | 277 | struct zfcp_adapter *adapter = port->adapter; |
288 | struct zfcp_fc_ns_handler_data compl_rec; | 278 | struct zfcp_fc_ns_handler_data compl_rec; |
289 | int ret; | 279 | int ret; |
290 | 280 | ||
291 | /* setup parameters for send generic command */ | 281 | /* setup parameters for send generic command */ |
292 | gid_pn->port = erp_action->port; | 282 | gid_pn->port = port; |
293 | gid_pn->ct.wka_port = &adapter->gs->ds; | 283 | gid_pn->ct.wka_port = &adapter->gs->ds; |
294 | gid_pn->ct.handler = zfcp_fc_ns_handler; | 284 | gid_pn->ct.handler = zfcp_fc_ns_handler; |
295 | gid_pn->ct.handler_data = (unsigned long) &compl_rec; | 285 | gid_pn->ct.handler_data = (unsigned long) &compl_rec; |
296 | gid_pn->ct.timeout = ZFCP_NS_GID_PN_TIMEOUT; | ||
297 | gid_pn->ct.req = &gid_pn->req; | 286 | gid_pn->ct.req = &gid_pn->req; |
298 | gid_pn->ct.resp = &gid_pn->resp; | 287 | gid_pn->ct.resp = &gid_pn->resp; |
299 | sg_init_one(&gid_pn->req, &gid_pn->ct_iu_req, | 288 | sg_init_one(&gid_pn->req, &gid_pn->ct_iu_req, |
@@ -308,13 +297,12 @@ int static zfcp_fc_ns_gid_pn_request(struct zfcp_erp_action *erp_action, | |||
308 | gid_pn->ct_iu_req.header.options = ZFCP_CT_SYNCHRONOUS; | 297 | gid_pn->ct_iu_req.header.options = ZFCP_CT_SYNCHRONOUS; |
309 | gid_pn->ct_iu_req.header.cmd_rsp_code = ZFCP_CT_GID_PN; | 298 | gid_pn->ct_iu_req.header.cmd_rsp_code = ZFCP_CT_GID_PN; |
310 | gid_pn->ct_iu_req.header.max_res_size = ZFCP_CT_SIZE_ONE_PAGE / 4; | 299 | gid_pn->ct_iu_req.header.max_res_size = ZFCP_CT_SIZE_ONE_PAGE / 4; |
311 | gid_pn->ct_iu_req.wwpn = erp_action->port->wwpn; | 300 | gid_pn->ct_iu_req.wwpn = port->wwpn; |
312 | 301 | ||
313 | init_completion(&compl_rec.done); | 302 | init_completion(&compl_rec.done); |
314 | compl_rec.handler = zfcp_fc_ns_gid_pn_eval; | 303 | compl_rec.handler = zfcp_fc_ns_gid_pn_eval; |
315 | compl_rec.handler_data = (unsigned long) gid_pn; | 304 | compl_rec.handler_data = (unsigned long) gid_pn; |
316 | ret = zfcp_fsf_send_ct(&gid_pn->ct, adapter->pool.fsf_req_erp, | 305 | ret = zfcp_fsf_send_ct(&gid_pn->ct, adapter->pool.gid_pn_req); |
317 | erp_action); | ||
318 | if (!ret) | 306 | if (!ret) |
319 | wait_for_completion(&compl_rec.done); | 307 | wait_for_completion(&compl_rec.done); |
320 | return ret; | 308 | return ret; |
@@ -322,33 +310,56 @@ int static zfcp_fc_ns_gid_pn_request(struct zfcp_erp_action *erp_action, | |||
322 | 310 | ||
323 | /** | 311 | /** |
324 | * zfcp_fc_ns_gid_pn_request - initiate GID_PN nameserver request | 312 | * zfcp_fc_ns_gid_pn_request - initiate GID_PN nameserver request |
325 | * @erp_action: pointer to zfcp_erp_action where GID_PN request is needed | 313 | * @port: port where GID_PN request is needed |
326 | * return: -ENOMEM on error, 0 otherwise | 314 | * return: -ENOMEM on error, 0 otherwise |
327 | */ | 315 | */ |
328 | int zfcp_fc_ns_gid_pn(struct zfcp_erp_action *erp_action) | 316 | static int zfcp_fc_ns_gid_pn(struct zfcp_port *port) |
329 | { | 317 | { |
330 | int ret; | 318 | int ret; |
331 | struct zfcp_gid_pn_data *gid_pn; | 319 | struct zfcp_gid_pn_data *gid_pn; |
332 | struct zfcp_adapter *adapter = erp_action->adapter; | 320 | struct zfcp_adapter *adapter = port->adapter; |
333 | 321 | ||
334 | gid_pn = mempool_alloc(adapter->pool.data_gid_pn, GFP_ATOMIC); | 322 | gid_pn = mempool_alloc(adapter->pool.gid_pn_data, GFP_ATOMIC); |
335 | if (!gid_pn) | 323 | if (!gid_pn) |
336 | return -ENOMEM; | 324 | return -ENOMEM; |
337 | 325 | ||
338 | memset(gid_pn, 0, sizeof(*gid_pn)); | 326 | memset(gid_pn, 0, sizeof(*gid_pn)); |
339 | 327 | ||
340 | ret = zfcp_wka_port_get(&adapter->gs->ds); | 328 | ret = zfcp_fc_wka_port_get(&adapter->gs->ds); |
341 | if (ret) | 329 | if (ret) |
342 | goto out; | 330 | goto out; |
343 | 331 | ||
344 | ret = zfcp_fc_ns_gid_pn_request(erp_action, gid_pn); | 332 | ret = zfcp_fc_ns_gid_pn_request(port, gid_pn); |
345 | 333 | ||
346 | zfcp_wka_port_put(&adapter->gs->ds); | 334 | zfcp_fc_wka_port_put(&adapter->gs->ds); |
347 | out: | 335 | out: |
348 | mempool_free(gid_pn, adapter->pool.data_gid_pn); | 336 | mempool_free(gid_pn, adapter->pool.gid_pn_data); |
349 | return ret; | 337 | return ret; |
350 | } | 338 | } |
351 | 339 | ||
340 | void zfcp_fc_port_did_lookup(struct work_struct *work) | ||
341 | { | ||
342 | int ret; | ||
343 | struct zfcp_port *port = container_of(work, struct zfcp_port, | ||
344 | gid_pn_work); | ||
345 | |||
346 | ret = zfcp_fc_ns_gid_pn(port); | ||
347 | if (ret) { | ||
348 | /* could not issue gid_pn for some reason */ | ||
349 | zfcp_erp_adapter_reopen(port->adapter, 0, "fcgpn_1", NULL); | ||
350 | goto out; | ||
351 | } | ||
352 | |||
353 | if (!port->d_id) { | ||
354 | zfcp_erp_port_failed(port, "fcgpn_2", NULL); | ||
355 | goto out; | ||
356 | } | ||
357 | |||
358 | zfcp_erp_port_reopen(port, 0, "fcgpn_3", NULL); | ||
359 | out: | ||
360 | zfcp_port_put(port); | ||
361 | } | ||
362 | |||
352 | /** | 363 | /** |
353 | * zfcp_fc_plogi_evaluate - evaluate PLOGI playload | 364 | * zfcp_fc_plogi_evaluate - evaluate PLOGI playload |
354 | * @port: zfcp_port structure | 365 | * @port: zfcp_port structure |
@@ -404,6 +415,7 @@ static void zfcp_fc_adisc_handler(unsigned long data) | |||
404 | /* port is good, unblock rport without going through erp */ | 415 | /* port is good, unblock rport without going through erp */ |
405 | zfcp_scsi_schedule_rport_register(port); | 416 | zfcp_scsi_schedule_rport_register(port); |
406 | out: | 417 | out: |
418 | atomic_clear_mask(ZFCP_STATUS_PORT_LINK_TEST, &port->status); | ||
407 | zfcp_port_put(port); | 419 | zfcp_port_put(port); |
408 | kfree(adisc); | 420 | kfree(adisc); |
409 | } | 421 | } |
@@ -450,28 +462,36 @@ void zfcp_fc_link_test_work(struct work_struct *work) | |||
450 | port->rport_task = RPORT_DEL; | 462 | port->rport_task = RPORT_DEL; |
451 | zfcp_scsi_rport_work(&port->rport_work); | 463 | zfcp_scsi_rport_work(&port->rport_work); |
452 | 464 | ||
465 | /* only issue one test command at one time per port */ | ||
466 | if (atomic_read(&port->status) & ZFCP_STATUS_PORT_LINK_TEST) | ||
467 | goto out; | ||
468 | |||
469 | atomic_set_mask(ZFCP_STATUS_PORT_LINK_TEST, &port->status); | ||
470 | |||
453 | retval = zfcp_fc_adisc(port); | 471 | retval = zfcp_fc_adisc(port); |
454 | if (retval == 0) | 472 | if (retval == 0) |
455 | return; | 473 | return; |
456 | 474 | ||
457 | /* send of ADISC was not possible */ | 475 | /* send of ADISC was not possible */ |
476 | atomic_clear_mask(ZFCP_STATUS_PORT_LINK_TEST, &port->status); | ||
458 | zfcp_erp_port_forced_reopen(port, 0, "fcltwk1", NULL); | 477 | zfcp_erp_port_forced_reopen(port, 0, "fcltwk1", NULL); |
459 | 478 | ||
479 | out: | ||
460 | zfcp_port_put(port); | 480 | zfcp_port_put(port); |
461 | } | 481 | } |
462 | 482 | ||
463 | /** | 483 | /** |
464 | * zfcp_test_link - lightweight link test procedure | 484 | * zfcp_fc_test_link - lightweight link test procedure |
465 | * @port: port to be tested | 485 | * @port: port to be tested |
466 | * | 486 | * |
467 | * Test status of a link to a remote port using the ELS command ADISC. | 487 | * Test status of a link to a remote port using the ELS command ADISC. |
468 | * If there is a problem with the remote port, error recovery steps | 488 | * If there is a problem with the remote port, error recovery steps |
469 | * will be triggered. | 489 | * will be triggered. |
470 | */ | 490 | */ |
471 | void zfcp_test_link(struct zfcp_port *port) | 491 | void zfcp_fc_test_link(struct zfcp_port *port) |
472 | { | 492 | { |
473 | zfcp_port_get(port); | 493 | zfcp_port_get(port); |
474 | if (!queue_work(zfcp_data.work_queue, &port->test_link_work)) | 494 | if (!queue_work(port->adapter->work_queue, &port->test_link_work)) |
475 | zfcp_port_put(port); | 495 | zfcp_port_put(port); |
476 | } | 496 | } |
477 | 497 | ||
@@ -479,7 +499,7 @@ static void zfcp_free_sg_env(struct zfcp_gpn_ft *gpn_ft, int buf_num) | |||
479 | { | 499 | { |
480 | struct scatterlist *sg = &gpn_ft->sg_req; | 500 | struct scatterlist *sg = &gpn_ft->sg_req; |
481 | 501 | ||
482 | kfree(sg_virt(sg)); /* free request buffer */ | 502 | kmem_cache_free(zfcp_data.gpn_ft_cache, sg_virt(sg)); |
483 | zfcp_sg_free_table(gpn_ft->sg_resp, buf_num); | 503 | zfcp_sg_free_table(gpn_ft->sg_resp, buf_num); |
484 | 504 | ||
485 | kfree(gpn_ft); | 505 | kfree(gpn_ft); |
@@ -494,7 +514,7 @@ static struct zfcp_gpn_ft *zfcp_alloc_sg_env(int buf_num) | |||
494 | if (!gpn_ft) | 514 | if (!gpn_ft) |
495 | return NULL; | 515 | return NULL; |
496 | 516 | ||
497 | req = kzalloc(sizeof(struct ct_iu_gpn_ft_req), GFP_KERNEL); | 517 | req = kmem_cache_alloc(zfcp_data.gpn_ft_cache, GFP_KERNEL); |
498 | if (!req) { | 518 | if (!req) { |
499 | kfree(gpn_ft); | 519 | kfree(gpn_ft); |
500 | gpn_ft = NULL; | 520 | gpn_ft = NULL; |
@@ -511,9 +531,8 @@ out: | |||
511 | } | 531 | } |
512 | 532 | ||
513 | 533 | ||
514 | static int zfcp_scan_issue_gpn_ft(struct zfcp_gpn_ft *gpn_ft, | 534 | static int zfcp_fc_send_gpn_ft(struct zfcp_gpn_ft *gpn_ft, |
515 | struct zfcp_adapter *adapter, | 535 | struct zfcp_adapter *adapter, int max_bytes) |
516 | int max_bytes) | ||
517 | { | 536 | { |
518 | struct zfcp_send_ct *ct = &gpn_ft->ct; | 537 | struct zfcp_send_ct *ct = &gpn_ft->ct; |
519 | struct ct_iu_gpn_ft_req *req = sg_virt(&gpn_ft->sg_req); | 538 | struct ct_iu_gpn_ft_req *req = sg_virt(&gpn_ft->sg_req); |
@@ -536,19 +555,18 @@ static int zfcp_scan_issue_gpn_ft(struct zfcp_gpn_ft *gpn_ft, | |||
536 | ct->wka_port = &adapter->gs->ds; | 555 | ct->wka_port = &adapter->gs->ds; |
537 | ct->handler = zfcp_fc_ns_handler; | 556 | ct->handler = zfcp_fc_ns_handler; |
538 | ct->handler_data = (unsigned long)&compl_rec; | 557 | ct->handler_data = (unsigned long)&compl_rec; |
539 | ct->timeout = 10; | ||
540 | ct->req = &gpn_ft->sg_req; | 558 | ct->req = &gpn_ft->sg_req; |
541 | ct->resp = gpn_ft->sg_resp; | 559 | ct->resp = gpn_ft->sg_resp; |
542 | 560 | ||
543 | init_completion(&compl_rec.done); | 561 | init_completion(&compl_rec.done); |
544 | compl_rec.handler = NULL; | 562 | compl_rec.handler = NULL; |
545 | ret = zfcp_fsf_send_ct(ct, NULL, NULL); | 563 | ret = zfcp_fsf_send_ct(ct, NULL); |
546 | if (!ret) | 564 | if (!ret) |
547 | wait_for_completion(&compl_rec.done); | 565 | wait_for_completion(&compl_rec.done); |
548 | return ret; | 566 | return ret; |
549 | } | 567 | } |
550 | 568 | ||
551 | static void zfcp_validate_port(struct zfcp_port *port) | 569 | static void zfcp_fc_validate_port(struct zfcp_port *port) |
552 | { | 570 | { |
553 | struct zfcp_adapter *adapter = port->adapter; | 571 | struct zfcp_adapter *adapter = port->adapter; |
554 | 572 | ||
@@ -568,7 +586,7 @@ static void zfcp_validate_port(struct zfcp_port *port) | |||
568 | zfcp_port_dequeue(port); | 586 | zfcp_port_dequeue(port); |
569 | } | 587 | } |
570 | 588 | ||
571 | static int zfcp_scan_eval_gpn_ft(struct zfcp_gpn_ft *gpn_ft, int max_entries) | 589 | static int zfcp_fc_eval_gpn_ft(struct zfcp_gpn_ft *gpn_ft, int max_entries) |
572 | { | 590 | { |
573 | struct zfcp_send_ct *ct = &gpn_ft->ct; | 591 | struct zfcp_send_ct *ct = &gpn_ft->ct; |
574 | struct scatterlist *sg = gpn_ft->sg_resp; | 592 | struct scatterlist *sg = gpn_ft->sg_resp; |
@@ -595,7 +613,7 @@ static int zfcp_scan_eval_gpn_ft(struct zfcp_gpn_ft *gpn_ft, int max_entries) | |||
595 | return -E2BIG; | 613 | return -E2BIG; |
596 | } | 614 | } |
597 | 615 | ||
598 | down(&zfcp_data.config_sema); | 616 | mutex_lock(&zfcp_data.config_mutex); |
599 | 617 | ||
600 | /* first entry is the header */ | 618 | /* first entry is the header */ |
601 | for (x = 1; x < max_entries && !last; x++) { | 619 | for (x = 1; x < max_entries && !last; x++) { |
@@ -628,16 +646,16 @@ static int zfcp_scan_eval_gpn_ft(struct zfcp_gpn_ft *gpn_ft, int max_entries) | |||
628 | 646 | ||
629 | zfcp_erp_wait(adapter); | 647 | zfcp_erp_wait(adapter); |
630 | list_for_each_entry_safe(port, tmp, &adapter->port_list_head, list) | 648 | list_for_each_entry_safe(port, tmp, &adapter->port_list_head, list) |
631 | zfcp_validate_port(port); | 649 | zfcp_fc_validate_port(port); |
632 | up(&zfcp_data.config_sema); | 650 | mutex_unlock(&zfcp_data.config_mutex); |
633 | return ret; | 651 | return ret; |
634 | } | 652 | } |
635 | 653 | ||
636 | /** | 654 | /** |
637 | * zfcp_scan_ports - scan remote ports and attach new ports | 655 | * zfcp_fc_scan_ports - scan remote ports and attach new ports |
638 | * @adapter: pointer to struct zfcp_adapter | 656 | * @adapter: pointer to struct zfcp_adapter |
639 | */ | 657 | */ |
640 | int zfcp_scan_ports(struct zfcp_adapter *adapter) | 658 | int zfcp_fc_scan_ports(struct zfcp_adapter *adapter) |
641 | { | 659 | { |
642 | int ret, i; | 660 | int ret, i; |
643 | struct zfcp_gpn_ft *gpn_ft; | 661 | struct zfcp_gpn_ft *gpn_ft; |
@@ -652,7 +670,7 @@ int zfcp_scan_ports(struct zfcp_adapter *adapter) | |||
652 | fc_host_port_type(adapter->scsi_host) != FC_PORTTYPE_NPIV) | 670 | fc_host_port_type(adapter->scsi_host) != FC_PORTTYPE_NPIV) |
653 | return 0; | 671 | return 0; |
654 | 672 | ||
655 | ret = zfcp_wka_port_get(&adapter->gs->ds); | 673 | ret = zfcp_fc_wka_port_get(&adapter->gs->ds); |
656 | if (ret) | 674 | if (ret) |
657 | return ret; | 675 | return ret; |
658 | 676 | ||
@@ -663,9 +681,9 @@ int zfcp_scan_ports(struct zfcp_adapter *adapter) | |||
663 | } | 681 | } |
664 | 682 | ||
665 | for (i = 0; i < 3; i++) { | 683 | for (i = 0; i < 3; i++) { |
666 | ret = zfcp_scan_issue_gpn_ft(gpn_ft, adapter, max_bytes); | 684 | ret = zfcp_fc_send_gpn_ft(gpn_ft, adapter, max_bytes); |
667 | if (!ret) { | 685 | if (!ret) { |
668 | ret = zfcp_scan_eval_gpn_ft(gpn_ft, max_entries); | 686 | ret = zfcp_fc_eval_gpn_ft(gpn_ft, max_entries); |
669 | if (ret == -EAGAIN) | 687 | if (ret == -EAGAIN) |
670 | ssleep(1); | 688 | ssleep(1); |
671 | else | 689 | else |
@@ -674,14 +692,14 @@ int zfcp_scan_ports(struct zfcp_adapter *adapter) | |||
674 | } | 692 | } |
675 | zfcp_free_sg_env(gpn_ft, buf_num); | 693 | zfcp_free_sg_env(gpn_ft, buf_num); |
676 | out: | 694 | out: |
677 | zfcp_wka_port_put(&adapter->gs->ds); | 695 | zfcp_fc_wka_port_put(&adapter->gs->ds); |
678 | return ret; | 696 | return ret; |
679 | } | 697 | } |
680 | 698 | ||
681 | 699 | ||
682 | void _zfcp_scan_ports_later(struct work_struct *work) | 700 | void _zfcp_fc_scan_ports_later(struct work_struct *work) |
683 | { | 701 | { |
684 | zfcp_scan_ports(container_of(work, struct zfcp_adapter, scan_work)); | 702 | zfcp_fc_scan_ports(container_of(work, struct zfcp_adapter, scan_work)); |
685 | } | 703 | } |
686 | 704 | ||
687 | struct zfcp_els_fc_job { | 705 | struct zfcp_els_fc_job { |
@@ -732,7 +750,7 @@ int zfcp_fc_execute_els_fc_job(struct fc_bsg_job *job) | |||
732 | els_fc_job->els.adapter = adapter; | 750 | els_fc_job->els.adapter = adapter; |
733 | if (rport) { | 751 | if (rport) { |
734 | read_lock_irq(&zfcp_data.config_lock); | 752 | read_lock_irq(&zfcp_data.config_lock); |
735 | port = rport->dd_data; | 753 | port = zfcp_get_port_by_wwpn(adapter, rport->port_name); |
736 | if (port) | 754 | if (port) |
737 | els_fc_job->els.d_id = port->d_id; | 755 | els_fc_job->els.d_id = port->d_id; |
738 | read_unlock_irq(&zfcp_data.config_lock); | 756 | read_unlock_irq(&zfcp_data.config_lock); |
@@ -771,7 +789,7 @@ static void zfcp_fc_generic_ct_handler(unsigned long data) | |||
771 | job->state_flags = FC_RQST_STATE_DONE; | 789 | job->state_flags = FC_RQST_STATE_DONE; |
772 | job->job_done(job); | 790 | job->job_done(job); |
773 | 791 | ||
774 | zfcp_wka_port_put(ct_fc_job->ct.wka_port); | 792 | zfcp_fc_wka_port_put(ct_fc_job->ct.wka_port); |
775 | 793 | ||
776 | kfree(ct_fc_job); | 794 | kfree(ct_fc_job); |
777 | } | 795 | } |
@@ -817,7 +835,7 @@ int zfcp_fc_execute_ct_fc_job(struct fc_bsg_job *job) | |||
817 | return -EINVAL; /* no such service */ | 835 | return -EINVAL; /* no such service */ |
818 | } | 836 | } |
819 | 837 | ||
820 | ret = zfcp_wka_port_get(ct_fc_job->ct.wka_port); | 838 | ret = zfcp_fc_wka_port_get(ct_fc_job->ct.wka_port); |
821 | if (ret) { | 839 | if (ret) { |
822 | kfree(ct_fc_job); | 840 | kfree(ct_fc_job); |
823 | return ret; | 841 | return ret; |
@@ -825,16 +843,40 @@ int zfcp_fc_execute_ct_fc_job(struct fc_bsg_job *job) | |||
825 | 843 | ||
826 | ct_fc_job->ct.req = job->request_payload.sg_list; | 844 | ct_fc_job->ct.req = job->request_payload.sg_list; |
827 | ct_fc_job->ct.resp = job->reply_payload.sg_list; | 845 | ct_fc_job->ct.resp = job->reply_payload.sg_list; |
828 | ct_fc_job->ct.timeout = ZFCP_FSF_REQUEST_TIMEOUT; | ||
829 | ct_fc_job->ct.handler = zfcp_fc_generic_ct_handler; | 846 | ct_fc_job->ct.handler = zfcp_fc_generic_ct_handler; |
830 | ct_fc_job->ct.handler_data = (unsigned long) ct_fc_job; | 847 | ct_fc_job->ct.handler_data = (unsigned long) ct_fc_job; |
831 | ct_fc_job->ct.completion = NULL; | 848 | ct_fc_job->ct.completion = NULL; |
832 | ct_fc_job->job = job; | 849 | ct_fc_job->job = job; |
833 | 850 | ||
834 | ret = zfcp_fsf_send_ct(&ct_fc_job->ct, NULL, NULL); | 851 | ret = zfcp_fsf_send_ct(&ct_fc_job->ct, NULL); |
835 | if (ret) { | 852 | if (ret) { |
836 | kfree(ct_fc_job); | 853 | kfree(ct_fc_job); |
837 | zfcp_wka_port_put(ct_fc_job->ct.wka_port); | 854 | zfcp_fc_wka_port_put(ct_fc_job->ct.wka_port); |
838 | } | 855 | } |
839 | return ret; | 856 | return ret; |
840 | } | 857 | } |
858 | |||
859 | int zfcp_fc_gs_setup(struct zfcp_adapter *adapter) | ||
860 | { | ||
861 | struct zfcp_wka_ports *wka_ports; | ||
862 | |||
863 | wka_ports = kzalloc(sizeof(struct zfcp_wka_ports), GFP_KERNEL); | ||
864 | if (!wka_ports) | ||
865 | return -ENOMEM; | ||
866 | |||
867 | adapter->gs = wka_ports; | ||
868 | zfcp_fc_wka_port_init(&wka_ports->ms, FC_FID_MGMT_SERV, adapter); | ||
869 | zfcp_fc_wka_port_init(&wka_ports->ts, FC_FID_TIME_SERV, adapter); | ||
870 | zfcp_fc_wka_port_init(&wka_ports->ds, FC_FID_DIR_SERV, adapter); | ||
871 | zfcp_fc_wka_port_init(&wka_ports->as, FC_FID_ALIASES, adapter); | ||
872 | zfcp_fc_wka_port_init(&wka_ports->ks, FC_FID_SEC_KEY, adapter); | ||
873 | |||
874 | return 0; | ||
875 | } | ||
876 | |||
877 | void zfcp_fc_gs_destroy(struct zfcp_adapter *adapter) | ||
878 | { | ||
879 | kfree(adapter->gs); | ||
880 | adapter->gs = NULL; | ||
881 | } | ||
882 | |||
diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c index 47795fbf081f..f09c863dc6bd 100644 --- a/drivers/s390/scsi/zfcp_fsf.c +++ b/drivers/s390/scsi/zfcp_fsf.c | |||
@@ -11,9 +11,7 @@ | |||
11 | 11 | ||
12 | #include <linux/blktrace_api.h> | 12 | #include <linux/blktrace_api.h> |
13 | #include "zfcp_ext.h" | 13 | #include "zfcp_ext.h" |
14 | 14 | #include "zfcp_dbf.h" | |
15 | #define ZFCP_REQ_AUTO_CLEANUP 0x00000002 | ||
16 | #define ZFCP_REQ_NO_QTCB 0x00000008 | ||
17 | 15 | ||
18 | static void zfcp_fsf_request_timeout_handler(unsigned long data) | 16 | static void zfcp_fsf_request_timeout_handler(unsigned long data) |
19 | { | 17 | { |
@@ -111,43 +109,15 @@ static void zfcp_fsf_class_not_supp(struct zfcp_fsf_req *req) | |||
111 | void zfcp_fsf_req_free(struct zfcp_fsf_req *req) | 109 | void zfcp_fsf_req_free(struct zfcp_fsf_req *req) |
112 | { | 110 | { |
113 | if (likely(req->pool)) { | 111 | if (likely(req->pool)) { |
112 | if (likely(req->qtcb)) | ||
113 | mempool_free(req->qtcb, req->adapter->pool.qtcb_pool); | ||
114 | mempool_free(req, req->pool); | 114 | mempool_free(req, req->pool); |
115 | return; | 115 | return; |
116 | } | 116 | } |
117 | 117 | ||
118 | if (req->qtcb) { | 118 | if (likely(req->qtcb)) |
119 | kmem_cache_free(zfcp_data.fsf_req_qtcb_cache, req); | 119 | kmem_cache_free(zfcp_data.qtcb_cache, req->qtcb); |
120 | return; | 120 | kfree(req); |
121 | } | ||
122 | } | ||
123 | |||
124 | /** | ||
125 | * zfcp_fsf_req_dismiss_all - dismiss all fsf requests | ||
126 | * @adapter: pointer to struct zfcp_adapter | ||
127 | * | ||
128 | * Never ever call this without shutting down the adapter first. | ||
129 | * Otherwise the adapter would continue using and corrupting s390 storage. | ||
130 | * Included BUG_ON() call to ensure this is done. | ||
131 | * ERP is supposed to be the only user of this function. | ||
132 | */ | ||
133 | void zfcp_fsf_req_dismiss_all(struct zfcp_adapter *adapter) | ||
134 | { | ||
135 | struct zfcp_fsf_req *req, *tmp; | ||
136 | unsigned long flags; | ||
137 | LIST_HEAD(remove_queue); | ||
138 | unsigned int i; | ||
139 | |||
140 | BUG_ON(atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP); | ||
141 | spin_lock_irqsave(&adapter->req_list_lock, flags); | ||
142 | for (i = 0; i < REQUEST_LIST_SIZE; i++) | ||
143 | list_splice_init(&adapter->req_list[i], &remove_queue); | ||
144 | spin_unlock_irqrestore(&adapter->req_list_lock, flags); | ||
145 | |||
146 | list_for_each_entry_safe(req, tmp, &remove_queue, list) { | ||
147 | list_del(&req->list); | ||
148 | req->status |= ZFCP_STATUS_FSFREQ_DISMISSED; | ||
149 | zfcp_fsf_req_complete(req); | ||
150 | } | ||
151 | } | 121 | } |
152 | 122 | ||
153 | static void zfcp_fsf_status_read_port_closed(struct zfcp_fsf_req *req) | 123 | static void zfcp_fsf_status_read_port_closed(struct zfcp_fsf_req *req) |
@@ -278,13 +248,13 @@ static void zfcp_fsf_status_read_handler(struct zfcp_fsf_req *req) | |||
278 | struct fsf_status_read_buffer *sr_buf = req->data; | 248 | struct fsf_status_read_buffer *sr_buf = req->data; |
279 | 249 | ||
280 | if (req->status & ZFCP_STATUS_FSFREQ_DISMISSED) { | 250 | if (req->status & ZFCP_STATUS_FSFREQ_DISMISSED) { |
281 | zfcp_hba_dbf_event_fsf_unsol("dism", adapter, sr_buf); | 251 | zfcp_dbf_hba_fsf_unsol("dism", adapter->dbf, sr_buf); |
282 | mempool_free(sr_buf, adapter->pool.data_status_read); | 252 | mempool_free(sr_buf, adapter->pool.status_read_data); |
283 | zfcp_fsf_req_free(req); | 253 | zfcp_fsf_req_free(req); |
284 | return; | 254 | return; |
285 | } | 255 | } |
286 | 256 | ||
287 | zfcp_hba_dbf_event_fsf_unsol("read", adapter, sr_buf); | 257 | zfcp_dbf_hba_fsf_unsol("read", adapter->dbf, sr_buf); |
288 | 258 | ||
289 | switch (sr_buf->status_type) { | 259 | switch (sr_buf->status_type) { |
290 | case FSF_STATUS_READ_PORT_CLOSED: | 260 | case FSF_STATUS_READ_PORT_CLOSED: |
@@ -299,7 +269,7 @@ static void zfcp_fsf_status_read_handler(struct zfcp_fsf_req *req) | |||
299 | dev_warn(&adapter->ccw_device->dev, | 269 | dev_warn(&adapter->ccw_device->dev, |
300 | "The error threshold for checksum statistics " | 270 | "The error threshold for checksum statistics " |
301 | "has been exceeded\n"); | 271 | "has been exceeded\n"); |
302 | zfcp_hba_dbf_event_berr(adapter, req); | 272 | zfcp_dbf_hba_berr(adapter->dbf, req); |
303 | break; | 273 | break; |
304 | case FSF_STATUS_READ_LINK_DOWN: | 274 | case FSF_STATUS_READ_LINK_DOWN: |
305 | zfcp_fsf_status_read_link_down(req); | 275 | zfcp_fsf_status_read_link_down(req); |
@@ -331,11 +301,11 @@ static void zfcp_fsf_status_read_handler(struct zfcp_fsf_req *req) | |||
331 | break; | 301 | break; |
332 | } | 302 | } |
333 | 303 | ||
334 | mempool_free(sr_buf, adapter->pool.data_status_read); | 304 | mempool_free(sr_buf, adapter->pool.status_read_data); |
335 | zfcp_fsf_req_free(req); | 305 | zfcp_fsf_req_free(req); |
336 | 306 | ||
337 | atomic_inc(&adapter->stat_miss); | 307 | atomic_inc(&adapter->stat_miss); |
338 | queue_work(zfcp_data.work_queue, &adapter->stat_work); | 308 | queue_work(adapter->work_queue, &adapter->stat_work); |
339 | } | 309 | } |
340 | 310 | ||
341 | static void zfcp_fsf_fsfstatus_qual_eval(struct zfcp_fsf_req *req) | 311 | static void zfcp_fsf_fsfstatus_qual_eval(struct zfcp_fsf_req *req) |
@@ -385,7 +355,7 @@ static void zfcp_fsf_protstatus_eval(struct zfcp_fsf_req *req) | |||
385 | struct fsf_qtcb *qtcb = req->qtcb; | 355 | struct fsf_qtcb *qtcb = req->qtcb; |
386 | union fsf_prot_status_qual *psq = &qtcb->prefix.prot_status_qual; | 356 | union fsf_prot_status_qual *psq = &qtcb->prefix.prot_status_qual; |
387 | 357 | ||
388 | zfcp_hba_dbf_event_fsf_response(req); | 358 | zfcp_dbf_hba_fsf_response(req); |
389 | 359 | ||
390 | if (req->status & ZFCP_STATUS_FSFREQ_DISMISSED) { | 360 | if (req->status & ZFCP_STATUS_FSFREQ_DISMISSED) { |
391 | req->status |= ZFCP_STATUS_FSFREQ_ERROR | | 361 | req->status |= ZFCP_STATUS_FSFREQ_ERROR | |
@@ -458,7 +428,7 @@ static void zfcp_fsf_protstatus_eval(struct zfcp_fsf_req *req) | |||
458 | * is called to process the completion status and trigger further | 428 | * is called to process the completion status and trigger further |
459 | * events related to the FSF request. | 429 | * events related to the FSF request. |
460 | */ | 430 | */ |
461 | void zfcp_fsf_req_complete(struct zfcp_fsf_req *req) | 431 | static void zfcp_fsf_req_complete(struct zfcp_fsf_req *req) |
462 | { | 432 | { |
463 | if (unlikely(req->fsf_command == FSF_QTCB_UNSOLICITED_STATUS)) { | 433 | if (unlikely(req->fsf_command == FSF_QTCB_UNSOLICITED_STATUS)) { |
464 | zfcp_fsf_status_read_handler(req); | 434 | zfcp_fsf_status_read_handler(req); |
@@ -472,23 +442,40 @@ void zfcp_fsf_req_complete(struct zfcp_fsf_req *req) | |||
472 | 442 | ||
473 | if (req->erp_action) | 443 | if (req->erp_action) |
474 | zfcp_erp_notify(req->erp_action, 0); | 444 | zfcp_erp_notify(req->erp_action, 0); |
475 | req->status |= ZFCP_STATUS_FSFREQ_COMPLETED; | ||
476 | 445 | ||
477 | if (likely(req->status & ZFCP_STATUS_FSFREQ_CLEANUP)) | 446 | if (likely(req->status & ZFCP_STATUS_FSFREQ_CLEANUP)) |
478 | zfcp_fsf_req_free(req); | 447 | zfcp_fsf_req_free(req); |
479 | else | 448 | else |
480 | /* notify initiator waiting for the requests completion */ | 449 | complete(&req->completion); |
481 | /* | 450 | } |
482 | * FIXME: Race! We must not access fsf_req here as it might have been | 451 | |
483 | * cleaned up already due to the set ZFCP_STATUS_FSFREQ_COMPLETED | 452 | /** |
484 | * flag. It's an improbable case. But, we have the same paranoia for | 453 | * zfcp_fsf_req_dismiss_all - dismiss all fsf requests |
485 | * the cleanup flag already. | 454 | * @adapter: pointer to struct zfcp_adapter |
486 | * Might better be handled using complete()? | 455 | * |
487 | * (setting the flag and doing wakeup ought to be atomic | 456 | * Never ever call this without shutting down the adapter first. |
488 | * with regard to checking the flag as long as waitqueue is | 457 | * Otherwise the adapter would continue using and corrupting s390 storage. |
489 | * part of the to be released structure) | 458 | * Included BUG_ON() call to ensure this is done. |
490 | */ | 459 | * ERP is supposed to be the only user of this function. |
491 | wake_up(&req->completion_wq); | 460 | */ |
461 | void zfcp_fsf_req_dismiss_all(struct zfcp_adapter *adapter) | ||
462 | { | ||
463 | struct zfcp_fsf_req *req, *tmp; | ||
464 | unsigned long flags; | ||
465 | LIST_HEAD(remove_queue); | ||
466 | unsigned int i; | ||
467 | |||
468 | BUG_ON(atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP); | ||
469 | spin_lock_irqsave(&adapter->req_list_lock, flags); | ||
470 | for (i = 0; i < REQUEST_LIST_SIZE; i++) | ||
471 | list_splice_init(&adapter->req_list[i], &remove_queue); | ||
472 | spin_unlock_irqrestore(&adapter->req_list_lock, flags); | ||
473 | |||
474 | list_for_each_entry_safe(req, tmp, &remove_queue, list) { | ||
475 | list_del(&req->list); | ||
476 | req->status |= ZFCP_STATUS_FSFREQ_DISMISSED; | ||
477 | zfcp_fsf_req_complete(req); | ||
478 | } | ||
492 | } | 479 | } |
493 | 480 | ||
494 | static int zfcp_fsf_exchange_config_evaluate(struct zfcp_fsf_req *req) | 481 | static int zfcp_fsf_exchange_config_evaluate(struct zfcp_fsf_req *req) |
@@ -650,79 +637,77 @@ static void zfcp_fsf_exchange_port_data_handler(struct zfcp_fsf_req *req) | |||
650 | } | 637 | } |
651 | } | 638 | } |
652 | 639 | ||
653 | static int zfcp_fsf_sbal_check(struct zfcp_adapter *adapter) | 640 | static int zfcp_fsf_sbal_check(struct zfcp_qdio *qdio) |
654 | { | 641 | { |
655 | struct zfcp_qdio_queue *req_q = &adapter->req_q; | 642 | struct zfcp_qdio_queue *req_q = &qdio->req_q; |
656 | 643 | ||
657 | spin_lock_bh(&adapter->req_q_lock); | 644 | spin_lock_bh(&qdio->req_q_lock); |
658 | if (atomic_read(&req_q->count)) | 645 | if (atomic_read(&req_q->count)) |
659 | return 1; | 646 | return 1; |
660 | spin_unlock_bh(&adapter->req_q_lock); | 647 | spin_unlock_bh(&qdio->req_q_lock); |
661 | return 0; | 648 | return 0; |
662 | } | 649 | } |
663 | 650 | ||
664 | static int zfcp_fsf_req_sbal_get(struct zfcp_adapter *adapter) | 651 | static int zfcp_fsf_req_sbal_get(struct zfcp_qdio *qdio) |
665 | { | 652 | { |
653 | struct zfcp_adapter *adapter = qdio->adapter; | ||
666 | long ret; | 654 | long ret; |
667 | 655 | ||
668 | spin_unlock_bh(&adapter->req_q_lock); | 656 | spin_unlock_bh(&qdio->req_q_lock); |
669 | ret = wait_event_interruptible_timeout(adapter->request_wq, | 657 | ret = wait_event_interruptible_timeout(qdio->req_q_wq, |
670 | zfcp_fsf_sbal_check(adapter), 5 * HZ); | 658 | zfcp_fsf_sbal_check(qdio), 5 * HZ); |
671 | if (ret > 0) | 659 | if (ret > 0) |
672 | return 0; | 660 | return 0; |
673 | if (!ret) { | 661 | if (!ret) { |
674 | atomic_inc(&adapter->qdio_outb_full); | 662 | atomic_inc(&qdio->req_q_full); |
675 | /* assume hanging outbound queue, try queue recovery */ | 663 | /* assume hanging outbound queue, try queue recovery */ |
676 | zfcp_erp_adapter_reopen(adapter, 0, "fsrsg_1", NULL); | 664 | zfcp_erp_adapter_reopen(adapter, 0, "fsrsg_1", NULL); |
677 | } | 665 | } |
678 | 666 | ||
679 | spin_lock_bh(&adapter->req_q_lock); | 667 | spin_lock_bh(&qdio->req_q_lock); |
680 | return -EIO; | 668 | return -EIO; |
681 | } | 669 | } |
682 | 670 | ||
683 | static struct zfcp_fsf_req *zfcp_fsf_alloc_noqtcb(mempool_t *pool) | 671 | static struct zfcp_fsf_req *zfcp_fsf_alloc(mempool_t *pool) |
684 | { | 672 | { |
685 | struct zfcp_fsf_req *req; | 673 | struct zfcp_fsf_req *req; |
686 | req = mempool_alloc(pool, GFP_ATOMIC); | 674 | |
687 | if (!req) | 675 | if (likely(pool)) |
676 | req = mempool_alloc(pool, GFP_ATOMIC); | ||
677 | else | ||
678 | req = kmalloc(sizeof(*req), GFP_ATOMIC); | ||
679 | |||
680 | if (unlikely(!req)) | ||
688 | return NULL; | 681 | return NULL; |
682 | |||
689 | memset(req, 0, sizeof(*req)); | 683 | memset(req, 0, sizeof(*req)); |
690 | req->pool = pool; | 684 | req->pool = pool; |
691 | return req; | 685 | return req; |
692 | } | 686 | } |
693 | 687 | ||
694 | static struct zfcp_fsf_req *zfcp_fsf_alloc_qtcb(mempool_t *pool) | 688 | static struct fsf_qtcb *zfcp_qtcb_alloc(mempool_t *pool) |
695 | { | 689 | { |
696 | struct zfcp_fsf_req_qtcb *qtcb; | 690 | struct fsf_qtcb *qtcb; |
697 | 691 | ||
698 | if (likely(pool)) | 692 | if (likely(pool)) |
699 | qtcb = mempool_alloc(pool, GFP_ATOMIC); | 693 | qtcb = mempool_alloc(pool, GFP_ATOMIC); |
700 | else | 694 | else |
701 | qtcb = kmem_cache_alloc(zfcp_data.fsf_req_qtcb_cache, | 695 | qtcb = kmem_cache_alloc(zfcp_data.qtcb_cache, GFP_ATOMIC); |
702 | GFP_ATOMIC); | 696 | |
703 | if (unlikely(!qtcb)) | 697 | if (unlikely(!qtcb)) |
704 | return NULL; | 698 | return NULL; |
705 | 699 | ||
706 | memset(qtcb, 0, sizeof(*qtcb)); | 700 | memset(qtcb, 0, sizeof(*qtcb)); |
707 | qtcb->fsf_req.qtcb = &qtcb->qtcb; | 701 | return qtcb; |
708 | qtcb->fsf_req.pool = pool; | ||
709 | |||
710 | return &qtcb->fsf_req; | ||
711 | } | 702 | } |
712 | 703 | ||
713 | static struct zfcp_fsf_req *zfcp_fsf_req_create(struct zfcp_adapter *adapter, | 704 | static struct zfcp_fsf_req *zfcp_fsf_req_create(struct zfcp_qdio *qdio, |
714 | u32 fsf_cmd, int req_flags, | 705 | u32 fsf_cmd, mempool_t *pool) |
715 | mempool_t *pool) | ||
716 | { | 706 | { |
717 | struct qdio_buffer_element *sbale; | 707 | struct qdio_buffer_element *sbale; |
718 | 708 | struct zfcp_qdio_queue *req_q = &qdio->req_q; | |
719 | struct zfcp_fsf_req *req; | 709 | struct zfcp_adapter *adapter = qdio->adapter; |
720 | struct zfcp_qdio_queue *req_q = &adapter->req_q; | 710 | struct zfcp_fsf_req *req = zfcp_fsf_alloc(pool); |
721 | |||
722 | if (req_flags & ZFCP_REQ_NO_QTCB) | ||
723 | req = zfcp_fsf_alloc_noqtcb(pool); | ||
724 | else | ||
725 | req = zfcp_fsf_alloc_qtcb(pool); | ||
726 | 711 | ||
727 | if (unlikely(!req)) | 712 | if (unlikely(!req)) |
728 | return ERR_PTR(-ENOMEM); | 713 | return ERR_PTR(-ENOMEM); |
@@ -732,22 +717,32 @@ static struct zfcp_fsf_req *zfcp_fsf_req_create(struct zfcp_adapter *adapter, | |||
732 | 717 | ||
733 | INIT_LIST_HEAD(&req->list); | 718 | INIT_LIST_HEAD(&req->list); |
734 | init_timer(&req->timer); | 719 | init_timer(&req->timer); |
735 | init_waitqueue_head(&req->completion_wq); | 720 | init_completion(&req->completion); |
736 | 721 | ||
737 | req->adapter = adapter; | 722 | req->adapter = adapter; |
738 | req->fsf_command = fsf_cmd; | 723 | req->fsf_command = fsf_cmd; |
739 | req->req_id = adapter->req_no; | 724 | req->req_id = adapter->req_no; |
740 | req->sbal_number = 1; | 725 | req->queue_req.sbal_number = 1; |
741 | req->sbal_first = req_q->first; | 726 | req->queue_req.sbal_first = req_q->first; |
742 | req->sbal_last = req_q->first; | 727 | req->queue_req.sbal_last = req_q->first; |
743 | req->sbale_curr = 1; | 728 | req->queue_req.sbale_curr = 1; |
744 | 729 | ||
745 | sbale = zfcp_qdio_sbale_req(req); | 730 | sbale = zfcp_qdio_sbale_req(qdio, &req->queue_req); |
746 | sbale[0].addr = (void *) req->req_id; | 731 | sbale[0].addr = (void *) req->req_id; |
747 | sbale[0].flags |= SBAL_FLAGS0_COMMAND; | 732 | sbale[0].flags |= SBAL_FLAGS0_COMMAND; |
748 | 733 | ||
749 | if (likely(req->qtcb)) { | 734 | if (likely(fsf_cmd != FSF_QTCB_UNSOLICITED_STATUS)) { |
750 | req->qtcb->prefix.req_seq_no = req->adapter->fsf_req_seq_no; | 735 | if (likely(pool)) |
736 | req->qtcb = zfcp_qtcb_alloc(adapter->pool.qtcb_pool); | ||
737 | else | ||
738 | req->qtcb = zfcp_qtcb_alloc(NULL); | ||
739 | |||
740 | if (unlikely(!req->qtcb)) { | ||
741 | zfcp_fsf_req_free(req); | ||
742 | return ERR_PTR(-ENOMEM); | ||
743 | } | ||
744 | |||
745 | req->qtcb->prefix.req_seq_no = adapter->fsf_req_seq_no; | ||
751 | req->qtcb->prefix.req_id = req->req_id; | 746 | req->qtcb->prefix.req_id = req->req_id; |
752 | req->qtcb->prefix.ulp_info = 26; | 747 | req->qtcb->prefix.ulp_info = 26; |
753 | req->qtcb->prefix.qtcb_type = fsf_qtcb_type[req->fsf_command]; | 748 | req->qtcb->prefix.qtcb_type = fsf_qtcb_type[req->fsf_command]; |
@@ -765,15 +760,13 @@ static struct zfcp_fsf_req *zfcp_fsf_req_create(struct zfcp_adapter *adapter, | |||
765 | return ERR_PTR(-EIO); | 760 | return ERR_PTR(-EIO); |
766 | } | 761 | } |
767 | 762 | ||
768 | if (likely(req_flags & ZFCP_REQ_AUTO_CLEANUP)) | ||
769 | req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; | ||
770 | |||
771 | return req; | 763 | return req; |
772 | } | 764 | } |
773 | 765 | ||
774 | static int zfcp_fsf_req_send(struct zfcp_fsf_req *req) | 766 | static int zfcp_fsf_req_send(struct zfcp_fsf_req *req) |
775 | { | 767 | { |
776 | struct zfcp_adapter *adapter = req->adapter; | 768 | struct zfcp_adapter *adapter = req->adapter; |
769 | struct zfcp_qdio *qdio = adapter->qdio; | ||
777 | unsigned long flags; | 770 | unsigned long flags; |
778 | int idx; | 771 | int idx; |
779 | int with_qtcb = (req->qtcb != NULL); | 772 | int with_qtcb = (req->qtcb != NULL); |
@@ -784,9 +777,9 @@ static int zfcp_fsf_req_send(struct zfcp_fsf_req *req) | |||
784 | list_add_tail(&req->list, &adapter->req_list[idx]); | 777 | list_add_tail(&req->list, &adapter->req_list[idx]); |
785 | spin_unlock_irqrestore(&adapter->req_list_lock, flags); | 778 | spin_unlock_irqrestore(&adapter->req_list_lock, flags); |
786 | 779 | ||
787 | req->qdio_outb_usage = atomic_read(&adapter->req_q.count); | 780 | req->queue_req.qdio_outb_usage = atomic_read(&qdio->req_q.count); |
788 | req->issued = get_clock(); | 781 | req->issued = get_clock(); |
789 | if (zfcp_qdio_send(req)) { | 782 | if (zfcp_qdio_send(qdio, &req->queue_req)) { |
790 | del_timer(&req->timer); | 783 | del_timer(&req->timer); |
791 | spin_lock_irqsave(&adapter->req_list_lock, flags); | 784 | spin_lock_irqsave(&adapter->req_list_lock, flags); |
792 | /* lookup request again, list might have changed */ | 785 | /* lookup request again, list might have changed */ |
@@ -811,38 +804,37 @@ static int zfcp_fsf_req_send(struct zfcp_fsf_req *req) | |||
811 | * @req_flags: request flags | 804 | * @req_flags: request flags |
812 | * Returns: 0 on success, ERROR otherwise | 805 | * Returns: 0 on success, ERROR otherwise |
813 | */ | 806 | */ |
814 | int zfcp_fsf_status_read(struct zfcp_adapter *adapter) | 807 | int zfcp_fsf_status_read(struct zfcp_qdio *qdio) |
815 | { | 808 | { |
809 | struct zfcp_adapter *adapter = qdio->adapter; | ||
816 | struct zfcp_fsf_req *req; | 810 | struct zfcp_fsf_req *req; |
817 | struct fsf_status_read_buffer *sr_buf; | 811 | struct fsf_status_read_buffer *sr_buf; |
818 | struct qdio_buffer_element *sbale; | 812 | struct qdio_buffer_element *sbale; |
819 | int retval = -EIO; | 813 | int retval = -EIO; |
820 | 814 | ||
821 | spin_lock_bh(&adapter->req_q_lock); | 815 | spin_lock_bh(&qdio->req_q_lock); |
822 | if (zfcp_fsf_req_sbal_get(adapter)) | 816 | if (zfcp_fsf_req_sbal_get(qdio)) |
823 | goto out; | 817 | goto out; |
824 | 818 | ||
825 | req = zfcp_fsf_req_create(adapter, FSF_QTCB_UNSOLICITED_STATUS, | 819 | req = zfcp_fsf_req_create(qdio, FSF_QTCB_UNSOLICITED_STATUS, |
826 | ZFCP_REQ_NO_QTCB, | 820 | adapter->pool.status_read_req); |
827 | adapter->pool.fsf_req_status_read); | ||
828 | if (IS_ERR(req)) { | 821 | if (IS_ERR(req)) { |
829 | retval = PTR_ERR(req); | 822 | retval = PTR_ERR(req); |
830 | goto out; | 823 | goto out; |
831 | } | 824 | } |
832 | 825 | ||
833 | sbale = zfcp_qdio_sbale_req(req); | 826 | sbale = zfcp_qdio_sbale_req(qdio, &req->queue_req); |
834 | sbale[0].flags |= SBAL_FLAGS0_TYPE_STATUS; | ||
835 | sbale[2].flags |= SBAL_FLAGS_LAST_ENTRY; | 827 | sbale[2].flags |= SBAL_FLAGS_LAST_ENTRY; |
836 | req->sbale_curr = 2; | 828 | req->queue_req.sbale_curr = 2; |
837 | 829 | ||
838 | sr_buf = mempool_alloc(adapter->pool.data_status_read, GFP_ATOMIC); | 830 | sr_buf = mempool_alloc(adapter->pool.status_read_data, GFP_ATOMIC); |
839 | if (!sr_buf) { | 831 | if (!sr_buf) { |
840 | retval = -ENOMEM; | 832 | retval = -ENOMEM; |
841 | goto failed_buf; | 833 | goto failed_buf; |
842 | } | 834 | } |
843 | memset(sr_buf, 0, sizeof(*sr_buf)); | 835 | memset(sr_buf, 0, sizeof(*sr_buf)); |
844 | req->data = sr_buf; | 836 | req->data = sr_buf; |
845 | sbale = zfcp_qdio_sbale_curr(req); | 837 | sbale = zfcp_qdio_sbale_curr(qdio, &req->queue_req); |
846 | sbale->addr = (void *) sr_buf; | 838 | sbale->addr = (void *) sr_buf; |
847 | sbale->length = sizeof(*sr_buf); | 839 | sbale->length = sizeof(*sr_buf); |
848 | 840 | ||
@@ -853,12 +845,12 @@ int zfcp_fsf_status_read(struct zfcp_adapter *adapter) | |||
853 | goto out; | 845 | goto out; |
854 | 846 | ||
855 | failed_req_send: | 847 | failed_req_send: |
856 | mempool_free(sr_buf, adapter->pool.data_status_read); | 848 | mempool_free(sr_buf, adapter->pool.status_read_data); |
857 | failed_buf: | 849 | failed_buf: |
858 | zfcp_fsf_req_free(req); | 850 | zfcp_fsf_req_free(req); |
859 | zfcp_hba_dbf_event_fsf_unsol("fail", adapter, NULL); | 851 | zfcp_dbf_hba_fsf_unsol("fail", adapter->dbf, NULL); |
860 | out: | 852 | out: |
861 | spin_unlock_bh(&adapter->req_q_lock); | 853 | spin_unlock_bh(&qdio->req_q_lock); |
862 | return retval; | 854 | return retval; |
863 | } | 855 | } |
864 | 856 | ||
@@ -900,7 +892,7 @@ static void zfcp_fsf_abort_fcp_command_handler(struct zfcp_fsf_req *req) | |||
900 | case FSF_ADAPTER_STATUS_AVAILABLE: | 892 | case FSF_ADAPTER_STATUS_AVAILABLE: |
901 | switch (fsq->word[0]) { | 893 | switch (fsq->word[0]) { |
902 | case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE: | 894 | case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE: |
903 | zfcp_test_link(unit->port); | 895 | zfcp_fc_test_link(unit->port); |
904 | /* fall through */ | 896 | /* fall through */ |
905 | case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED: | 897 | case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED: |
906 | req->status |= ZFCP_STATUS_FSFREQ_ERROR; | 898 | req->status |= ZFCP_STATUS_FSFREQ_ERROR; |
@@ -925,13 +917,13 @@ struct zfcp_fsf_req *zfcp_fsf_abort_fcp_command(unsigned long old_req_id, | |||
925 | { | 917 | { |
926 | struct qdio_buffer_element *sbale; | 918 | struct qdio_buffer_element *sbale; |
927 | struct zfcp_fsf_req *req = NULL; | 919 | struct zfcp_fsf_req *req = NULL; |
928 | struct zfcp_adapter *adapter = unit->port->adapter; | 920 | struct zfcp_qdio *qdio = unit->port->adapter->qdio; |
929 | 921 | ||
930 | spin_lock_bh(&adapter->req_q_lock); | 922 | spin_lock_bh(&qdio->req_q_lock); |
931 | if (zfcp_fsf_req_sbal_get(adapter)) | 923 | if (zfcp_fsf_req_sbal_get(qdio)) |
932 | goto out; | 924 | goto out; |
933 | req = zfcp_fsf_req_create(adapter, FSF_QTCB_ABORT_FCP_CMND, | 925 | req = zfcp_fsf_req_create(qdio, FSF_QTCB_ABORT_FCP_CMND, |
934 | 0, adapter->pool.fsf_req_abort); | 926 | qdio->adapter->pool.scsi_abort); |
935 | if (IS_ERR(req)) { | 927 | if (IS_ERR(req)) { |
936 | req = NULL; | 928 | req = NULL; |
937 | goto out; | 929 | goto out; |
@@ -941,7 +933,7 @@ struct zfcp_fsf_req *zfcp_fsf_abort_fcp_command(unsigned long old_req_id, | |||
941 | ZFCP_STATUS_COMMON_UNBLOCKED))) | 933 | ZFCP_STATUS_COMMON_UNBLOCKED))) |
942 | goto out_error_free; | 934 | goto out_error_free; |
943 | 935 | ||
944 | sbale = zfcp_qdio_sbale_req(req); | 936 | sbale = zfcp_qdio_sbale_req(qdio, &req->queue_req); |
945 | sbale[0].flags |= SBAL_FLAGS0_TYPE_READ; | 937 | sbale[0].flags |= SBAL_FLAGS0_TYPE_READ; |
946 | sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; | 938 | sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; |
947 | 939 | ||
@@ -959,7 +951,7 @@ out_error_free: | |||
959 | zfcp_fsf_req_free(req); | 951 | zfcp_fsf_req_free(req); |
960 | req = NULL; | 952 | req = NULL; |
961 | out: | 953 | out: |
962 | spin_unlock_bh(&adapter->req_q_lock); | 954 | spin_unlock_bh(&qdio->req_q_lock); |
963 | return req; | 955 | return req; |
964 | } | 956 | } |
965 | 957 | ||
@@ -976,7 +968,7 @@ static void zfcp_fsf_send_ct_handler(struct zfcp_fsf_req *req) | |||
976 | 968 | ||
977 | switch (header->fsf_status) { | 969 | switch (header->fsf_status) { |
978 | case FSF_GOOD: | 970 | case FSF_GOOD: |
979 | zfcp_san_dbf_event_ct_response(req); | 971 | zfcp_dbf_san_ct_response(req); |
980 | send_ct->status = 0; | 972 | send_ct->status = 0; |
981 | break; | 973 | break; |
982 | case FSF_SERVICE_CLASS_NOT_SUPPORTED: | 974 | case FSF_SERVICE_CLASS_NOT_SUPPORTED: |
@@ -1035,8 +1027,10 @@ static int zfcp_fsf_setup_ct_els_sbals(struct zfcp_fsf_req *req, | |||
1035 | struct scatterlist *sg_resp, | 1027 | struct scatterlist *sg_resp, |
1036 | int max_sbals) | 1028 | int max_sbals) |
1037 | { | 1029 | { |
1038 | struct qdio_buffer_element *sbale = zfcp_qdio_sbale_req(req); | 1030 | struct zfcp_adapter *adapter = req->adapter; |
1039 | u32 feat = req->adapter->adapter_features; | 1031 | struct qdio_buffer_element *sbale = zfcp_qdio_sbale_req(adapter->qdio, |
1032 | &req->queue_req); | ||
1033 | u32 feat = adapter->adapter_features; | ||
1040 | int bytes; | 1034 | int bytes; |
1041 | 1035 | ||
1042 | if (!(feat & FSF_FEATURE_ELS_CT_CHAINED_SBALS)) { | 1036 | if (!(feat & FSF_FEATURE_ELS_CT_CHAINED_SBALS)) { |
@@ -1053,18 +1047,25 @@ static int zfcp_fsf_setup_ct_els_sbals(struct zfcp_fsf_req *req, | |||
1053 | return 0; | 1047 | return 0; |
1054 | } | 1048 | } |
1055 | 1049 | ||
1056 | bytes = zfcp_qdio_sbals_from_sg(req, SBAL_FLAGS0_TYPE_WRITE_READ, | 1050 | bytes = zfcp_qdio_sbals_from_sg(adapter->qdio, &req->queue_req, |
1051 | SBAL_FLAGS0_TYPE_WRITE_READ, | ||
1057 | sg_req, max_sbals); | 1052 | sg_req, max_sbals); |
1058 | if (bytes <= 0) | 1053 | if (bytes <= 0) |
1059 | return -EIO; | 1054 | return -EIO; |
1060 | req->qtcb->bottom.support.req_buf_length = bytes; | 1055 | req->qtcb->bottom.support.req_buf_length = bytes; |
1061 | req->sbale_curr = ZFCP_LAST_SBALE_PER_SBAL; | 1056 | req->queue_req.sbale_curr = ZFCP_LAST_SBALE_PER_SBAL; |
1062 | 1057 | ||
1063 | bytes = zfcp_qdio_sbals_from_sg(req, SBAL_FLAGS0_TYPE_WRITE_READ, | 1058 | bytes = zfcp_qdio_sbals_from_sg(adapter->qdio, &req->queue_req, |
1059 | SBAL_FLAGS0_TYPE_WRITE_READ, | ||
1064 | sg_resp, max_sbals); | 1060 | sg_resp, max_sbals); |
1065 | if (bytes <= 0) | 1061 | if (bytes <= 0) |
1066 | return -EIO; | 1062 | return -EIO; |
1063 | |||
1064 | /* common settings for ct/gs and els requests */ | ||
1067 | req->qtcb->bottom.support.resp_buf_length = bytes; | 1065 | req->qtcb->bottom.support.resp_buf_length = bytes; |
1066 | req->qtcb->bottom.support.service_class = FSF_CLASS_3; | ||
1067 | req->qtcb->bottom.support.timeout = 2 * R_A_TOV; | ||
1068 | zfcp_fsf_start_timer(req, 2 * R_A_TOV + 10); | ||
1068 | 1069 | ||
1069 | return 0; | 1070 | return 0; |
1070 | } | 1071 | } |
@@ -1073,27 +1074,26 @@ static int zfcp_fsf_setup_ct_els_sbals(struct zfcp_fsf_req *req, | |||
1073 | * zfcp_fsf_send_ct - initiate a Generic Service request (FC-GS) | 1074 | * zfcp_fsf_send_ct - initiate a Generic Service request (FC-GS) |
1074 | * @ct: pointer to struct zfcp_send_ct with data for request | 1075 | * @ct: pointer to struct zfcp_send_ct with data for request |
1075 | * @pool: if non-null this mempool is used to allocate struct zfcp_fsf_req | 1076 | * @pool: if non-null this mempool is used to allocate struct zfcp_fsf_req |
1076 | * @erp_action: if non-null the Generic Service request sent within ERP | ||
1077 | */ | 1077 | */ |
1078 | int zfcp_fsf_send_ct(struct zfcp_send_ct *ct, mempool_t *pool, | 1078 | int zfcp_fsf_send_ct(struct zfcp_send_ct *ct, mempool_t *pool) |
1079 | struct zfcp_erp_action *erp_action) | ||
1080 | { | 1079 | { |
1081 | struct zfcp_wka_port *wka_port = ct->wka_port; | 1080 | struct zfcp_wka_port *wka_port = ct->wka_port; |
1082 | struct zfcp_adapter *adapter = wka_port->adapter; | 1081 | struct zfcp_qdio *qdio = wka_port->adapter->qdio; |
1083 | struct zfcp_fsf_req *req; | 1082 | struct zfcp_fsf_req *req; |
1084 | int ret = -EIO; | 1083 | int ret = -EIO; |
1085 | 1084 | ||
1086 | spin_lock_bh(&adapter->req_q_lock); | 1085 | spin_lock_bh(&qdio->req_q_lock); |
1087 | if (zfcp_fsf_req_sbal_get(adapter)) | 1086 | if (zfcp_fsf_req_sbal_get(qdio)) |
1088 | goto out; | 1087 | goto out; |
1089 | 1088 | ||
1090 | req = zfcp_fsf_req_create(adapter, FSF_QTCB_SEND_GENERIC, | 1089 | req = zfcp_fsf_req_create(qdio, FSF_QTCB_SEND_GENERIC, pool); |
1091 | ZFCP_REQ_AUTO_CLEANUP, pool); | 1090 | |
1092 | if (IS_ERR(req)) { | 1091 | if (IS_ERR(req)) { |
1093 | ret = PTR_ERR(req); | 1092 | ret = PTR_ERR(req); |
1094 | goto out; | 1093 | goto out; |
1095 | } | 1094 | } |
1096 | 1095 | ||
1096 | req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; | ||
1097 | ret = zfcp_fsf_setup_ct_els_sbals(req, ct->req, ct->resp, | 1097 | ret = zfcp_fsf_setup_ct_els_sbals(req, ct->req, ct->resp, |
1098 | FSF_MAX_SBALS_PER_REQ); | 1098 | FSF_MAX_SBALS_PER_REQ); |
1099 | if (ret) | 1099 | if (ret) |
@@ -1101,18 +1101,9 @@ int zfcp_fsf_send_ct(struct zfcp_send_ct *ct, mempool_t *pool, | |||
1101 | 1101 | ||
1102 | req->handler = zfcp_fsf_send_ct_handler; | 1102 | req->handler = zfcp_fsf_send_ct_handler; |
1103 | req->qtcb->header.port_handle = wka_port->handle; | 1103 | req->qtcb->header.port_handle = wka_port->handle; |
1104 | req->qtcb->bottom.support.service_class = FSF_CLASS_3; | ||
1105 | req->qtcb->bottom.support.timeout = ct->timeout; | ||
1106 | req->data = ct; | 1104 | req->data = ct; |
1107 | 1105 | ||
1108 | zfcp_san_dbf_event_ct_request(req); | 1106 | zfcp_dbf_san_ct_request(req); |
1109 | |||
1110 | if (erp_action) { | ||
1111 | erp_action->fsf_req = req; | ||
1112 | req->erp_action = erp_action; | ||
1113 | zfcp_fsf_start_erp_timer(req); | ||
1114 | } else | ||
1115 | zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT); | ||
1116 | 1107 | ||
1117 | ret = zfcp_fsf_req_send(req); | 1108 | ret = zfcp_fsf_req_send(req); |
1118 | if (ret) | 1109 | if (ret) |
@@ -1122,10 +1113,8 @@ int zfcp_fsf_send_ct(struct zfcp_send_ct *ct, mempool_t *pool, | |||
1122 | 1113 | ||
1123 | failed_send: | 1114 | failed_send: |
1124 | zfcp_fsf_req_free(req); | 1115 | zfcp_fsf_req_free(req); |
1125 | if (erp_action) | ||
1126 | erp_action->fsf_req = NULL; | ||
1127 | out: | 1116 | out: |
1128 | spin_unlock_bh(&adapter->req_q_lock); | 1117 | spin_unlock_bh(&qdio->req_q_lock); |
1129 | return ret; | 1118 | return ret; |
1130 | } | 1119 | } |
1131 | 1120 | ||
@@ -1142,7 +1131,7 @@ static void zfcp_fsf_send_els_handler(struct zfcp_fsf_req *req) | |||
1142 | 1131 | ||
1143 | switch (header->fsf_status) { | 1132 | switch (header->fsf_status) { |
1144 | case FSF_GOOD: | 1133 | case FSF_GOOD: |
1145 | zfcp_san_dbf_event_els_response(req); | 1134 | zfcp_dbf_san_els_response(req); |
1146 | send_els->status = 0; | 1135 | send_els->status = 0; |
1147 | break; | 1136 | break; |
1148 | case FSF_SERVICE_CLASS_NOT_SUPPORTED: | 1137 | case FSF_SERVICE_CLASS_NOT_SUPPORTED: |
@@ -1152,7 +1141,7 @@ static void zfcp_fsf_send_els_handler(struct zfcp_fsf_req *req) | |||
1152 | switch (header->fsf_status_qual.word[0]){ | 1141 | switch (header->fsf_status_qual.word[0]){ |
1153 | case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE: | 1142 | case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE: |
1154 | if (port && (send_els->ls_code != ZFCP_LS_ADISC)) | 1143 | if (port && (send_els->ls_code != ZFCP_LS_ADISC)) |
1155 | zfcp_test_link(port); | 1144 | zfcp_fc_test_link(port); |
1156 | /*fall through */ | 1145 | /*fall through */ |
1157 | case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED: | 1146 | case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED: |
1158 | case FSF_SQ_RETRY_IF_POSSIBLE: | 1147 | case FSF_SQ_RETRY_IF_POSSIBLE: |
@@ -1188,35 +1177,32 @@ skip_fsfstatus: | |||
1188 | int zfcp_fsf_send_els(struct zfcp_send_els *els) | 1177 | int zfcp_fsf_send_els(struct zfcp_send_els *els) |
1189 | { | 1178 | { |
1190 | struct zfcp_fsf_req *req; | 1179 | struct zfcp_fsf_req *req; |
1191 | struct zfcp_adapter *adapter = els->adapter; | 1180 | struct zfcp_qdio *qdio = els->adapter->qdio; |
1192 | struct fsf_qtcb_bottom_support *bottom; | ||
1193 | int ret = -EIO; | 1181 | int ret = -EIO; |
1194 | 1182 | ||
1195 | spin_lock_bh(&adapter->req_q_lock); | 1183 | spin_lock_bh(&qdio->req_q_lock); |
1196 | if (zfcp_fsf_req_sbal_get(adapter)) | 1184 | if (zfcp_fsf_req_sbal_get(qdio)) |
1197 | goto out; | 1185 | goto out; |
1198 | req = zfcp_fsf_req_create(adapter, FSF_QTCB_SEND_ELS, | 1186 | |
1199 | ZFCP_REQ_AUTO_CLEANUP, NULL); | 1187 | req = zfcp_fsf_req_create(qdio, FSF_QTCB_SEND_ELS, NULL); |
1188 | |||
1200 | if (IS_ERR(req)) { | 1189 | if (IS_ERR(req)) { |
1201 | ret = PTR_ERR(req); | 1190 | ret = PTR_ERR(req); |
1202 | goto out; | 1191 | goto out; |
1203 | } | 1192 | } |
1204 | 1193 | ||
1194 | req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; | ||
1205 | ret = zfcp_fsf_setup_ct_els_sbals(req, els->req, els->resp, 2); | 1195 | ret = zfcp_fsf_setup_ct_els_sbals(req, els->req, els->resp, 2); |
1206 | 1196 | ||
1207 | if (ret) | 1197 | if (ret) |
1208 | goto failed_send; | 1198 | goto failed_send; |
1209 | 1199 | ||
1210 | bottom = &req->qtcb->bottom.support; | 1200 | req->qtcb->bottom.support.d_id = els->d_id; |
1211 | req->handler = zfcp_fsf_send_els_handler; | 1201 | req->handler = zfcp_fsf_send_els_handler; |
1212 | bottom->d_id = els->d_id; | ||
1213 | bottom->service_class = FSF_CLASS_3; | ||
1214 | bottom->timeout = 2 * R_A_TOV; | ||
1215 | req->data = els; | 1202 | req->data = els; |
1216 | 1203 | ||
1217 | zfcp_san_dbf_event_els_request(req); | 1204 | zfcp_dbf_san_els_request(req); |
1218 | 1205 | ||
1219 | zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT); | ||
1220 | ret = zfcp_fsf_req_send(req); | 1206 | ret = zfcp_fsf_req_send(req); |
1221 | if (ret) | 1207 | if (ret) |
1222 | goto failed_send; | 1208 | goto failed_send; |
@@ -1226,7 +1212,7 @@ int zfcp_fsf_send_els(struct zfcp_send_els *els) | |||
1226 | failed_send: | 1212 | failed_send: |
1227 | zfcp_fsf_req_free(req); | 1213 | zfcp_fsf_req_free(req); |
1228 | out: | 1214 | out: |
1229 | spin_unlock_bh(&adapter->req_q_lock); | 1215 | spin_unlock_bh(&qdio->req_q_lock); |
1230 | return ret; | 1216 | return ret; |
1231 | } | 1217 | } |
1232 | 1218 | ||
@@ -1234,22 +1220,23 @@ int zfcp_fsf_exchange_config_data(struct zfcp_erp_action *erp_action) | |||
1234 | { | 1220 | { |
1235 | struct qdio_buffer_element *sbale; | 1221 | struct qdio_buffer_element *sbale; |
1236 | struct zfcp_fsf_req *req; | 1222 | struct zfcp_fsf_req *req; |
1237 | struct zfcp_adapter *adapter = erp_action->adapter; | 1223 | struct zfcp_qdio *qdio = erp_action->adapter->qdio; |
1238 | int retval = -EIO; | 1224 | int retval = -EIO; |
1239 | 1225 | ||
1240 | spin_lock_bh(&adapter->req_q_lock); | 1226 | spin_lock_bh(&qdio->req_q_lock); |
1241 | if (zfcp_fsf_req_sbal_get(adapter)) | 1227 | if (zfcp_fsf_req_sbal_get(qdio)) |
1242 | goto out; | 1228 | goto out; |
1243 | req = zfcp_fsf_req_create(adapter, | 1229 | |
1244 | FSF_QTCB_EXCHANGE_CONFIG_DATA, | 1230 | req = zfcp_fsf_req_create(qdio, FSF_QTCB_EXCHANGE_CONFIG_DATA, |
1245 | ZFCP_REQ_AUTO_CLEANUP, | 1231 | qdio->adapter->pool.erp_req); |
1246 | adapter->pool.fsf_req_erp); | 1232 | |
1247 | if (IS_ERR(req)) { | 1233 | if (IS_ERR(req)) { |
1248 | retval = PTR_ERR(req); | 1234 | retval = PTR_ERR(req); |
1249 | goto out; | 1235 | goto out; |
1250 | } | 1236 | } |
1251 | 1237 | ||
1252 | sbale = zfcp_qdio_sbale_req(req); | 1238 | req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; |
1239 | sbale = zfcp_qdio_sbale_req(qdio, &req->queue_req); | ||
1253 | sbale[0].flags |= SBAL_FLAGS0_TYPE_READ; | 1240 | sbale[0].flags |= SBAL_FLAGS0_TYPE_READ; |
1254 | sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; | 1241 | sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; |
1255 | 1242 | ||
@@ -1269,29 +1256,29 @@ int zfcp_fsf_exchange_config_data(struct zfcp_erp_action *erp_action) | |||
1269 | erp_action->fsf_req = NULL; | 1256 | erp_action->fsf_req = NULL; |
1270 | } | 1257 | } |
1271 | out: | 1258 | out: |
1272 | spin_unlock_bh(&adapter->req_q_lock); | 1259 | spin_unlock_bh(&qdio->req_q_lock); |
1273 | return retval; | 1260 | return retval; |
1274 | } | 1261 | } |
1275 | 1262 | ||
1276 | int zfcp_fsf_exchange_config_data_sync(struct zfcp_adapter *adapter, | 1263 | int zfcp_fsf_exchange_config_data_sync(struct zfcp_qdio *qdio, |
1277 | struct fsf_qtcb_bottom_config *data) | 1264 | struct fsf_qtcb_bottom_config *data) |
1278 | { | 1265 | { |
1279 | struct qdio_buffer_element *sbale; | 1266 | struct qdio_buffer_element *sbale; |
1280 | struct zfcp_fsf_req *req = NULL; | 1267 | struct zfcp_fsf_req *req = NULL; |
1281 | int retval = -EIO; | 1268 | int retval = -EIO; |
1282 | 1269 | ||
1283 | spin_lock_bh(&adapter->req_q_lock); | 1270 | spin_lock_bh(&qdio->req_q_lock); |
1284 | if (zfcp_fsf_req_sbal_get(adapter)) | 1271 | if (zfcp_fsf_req_sbal_get(qdio)) |
1285 | goto out_unlock; | 1272 | goto out_unlock; |
1286 | 1273 | ||
1287 | req = zfcp_fsf_req_create(adapter, FSF_QTCB_EXCHANGE_CONFIG_DATA, | 1274 | req = zfcp_fsf_req_create(qdio, FSF_QTCB_EXCHANGE_CONFIG_DATA, NULL); |
1288 | 0, NULL); | 1275 | |
1289 | if (IS_ERR(req)) { | 1276 | if (IS_ERR(req)) { |
1290 | retval = PTR_ERR(req); | 1277 | retval = PTR_ERR(req); |
1291 | goto out_unlock; | 1278 | goto out_unlock; |
1292 | } | 1279 | } |
1293 | 1280 | ||
1294 | sbale = zfcp_qdio_sbale_req(req); | 1281 | sbale = zfcp_qdio_sbale_req(qdio, &req->queue_req); |
1295 | sbale[0].flags |= SBAL_FLAGS0_TYPE_READ; | 1282 | sbale[0].flags |= SBAL_FLAGS0_TYPE_READ; |
1296 | sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; | 1283 | sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; |
1297 | req->handler = zfcp_fsf_exchange_config_data_handler; | 1284 | req->handler = zfcp_fsf_exchange_config_data_handler; |
@@ -1307,16 +1294,15 @@ int zfcp_fsf_exchange_config_data_sync(struct zfcp_adapter *adapter, | |||
1307 | 1294 | ||
1308 | zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT); | 1295 | zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT); |
1309 | retval = zfcp_fsf_req_send(req); | 1296 | retval = zfcp_fsf_req_send(req); |
1310 | spin_unlock_bh(&adapter->req_q_lock); | 1297 | spin_unlock_bh(&qdio->req_q_lock); |
1311 | if (!retval) | 1298 | if (!retval) |
1312 | wait_event(req->completion_wq, | 1299 | wait_for_completion(&req->completion); |
1313 | req->status & ZFCP_STATUS_FSFREQ_COMPLETED); | ||
1314 | 1300 | ||
1315 | zfcp_fsf_req_free(req); | 1301 | zfcp_fsf_req_free(req); |
1316 | return retval; | 1302 | return retval; |
1317 | 1303 | ||
1318 | out_unlock: | 1304 | out_unlock: |
1319 | spin_unlock_bh(&adapter->req_q_lock); | 1305 | spin_unlock_bh(&qdio->req_q_lock); |
1320 | return retval; | 1306 | return retval; |
1321 | } | 1307 | } |
1322 | 1308 | ||
@@ -1327,26 +1313,28 @@ out_unlock: | |||
1327 | */ | 1313 | */ |
1328 | int zfcp_fsf_exchange_port_data(struct zfcp_erp_action *erp_action) | 1314 | int zfcp_fsf_exchange_port_data(struct zfcp_erp_action *erp_action) |
1329 | { | 1315 | { |
1316 | struct zfcp_qdio *qdio = erp_action->adapter->qdio; | ||
1330 | struct qdio_buffer_element *sbale; | 1317 | struct qdio_buffer_element *sbale; |
1331 | struct zfcp_fsf_req *req; | 1318 | struct zfcp_fsf_req *req; |
1332 | struct zfcp_adapter *adapter = erp_action->adapter; | ||
1333 | int retval = -EIO; | 1319 | int retval = -EIO; |
1334 | 1320 | ||
1335 | if (!(adapter->adapter_features & FSF_FEATURE_HBAAPI_MANAGEMENT)) | 1321 | if (!(qdio->adapter->adapter_features & FSF_FEATURE_HBAAPI_MANAGEMENT)) |
1336 | return -EOPNOTSUPP; | 1322 | return -EOPNOTSUPP; |
1337 | 1323 | ||
1338 | spin_lock_bh(&adapter->req_q_lock); | 1324 | spin_lock_bh(&qdio->req_q_lock); |
1339 | if (zfcp_fsf_req_sbal_get(adapter)) | 1325 | if (zfcp_fsf_req_sbal_get(qdio)) |
1340 | goto out; | 1326 | goto out; |
1341 | req = zfcp_fsf_req_create(adapter, FSF_QTCB_EXCHANGE_PORT_DATA, | 1327 | |
1342 | ZFCP_REQ_AUTO_CLEANUP, | 1328 | req = zfcp_fsf_req_create(qdio, FSF_QTCB_EXCHANGE_PORT_DATA, |
1343 | adapter->pool.fsf_req_erp); | 1329 | qdio->adapter->pool.erp_req); |
1330 | |||
1344 | if (IS_ERR(req)) { | 1331 | if (IS_ERR(req)) { |
1345 | retval = PTR_ERR(req); | 1332 | retval = PTR_ERR(req); |
1346 | goto out; | 1333 | goto out; |
1347 | } | 1334 | } |
1348 | 1335 | ||
1349 | sbale = zfcp_qdio_sbale_req(req); | 1336 | req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; |
1337 | sbale = zfcp_qdio_sbale_req(qdio, &req->queue_req); | ||
1350 | sbale[0].flags |= SBAL_FLAGS0_TYPE_READ; | 1338 | sbale[0].flags |= SBAL_FLAGS0_TYPE_READ; |
1351 | sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; | 1339 | sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; |
1352 | 1340 | ||
@@ -1361,32 +1349,32 @@ int zfcp_fsf_exchange_port_data(struct zfcp_erp_action *erp_action) | |||
1361 | erp_action->fsf_req = NULL; | 1349 | erp_action->fsf_req = NULL; |
1362 | } | 1350 | } |
1363 | out: | 1351 | out: |
1364 | spin_unlock_bh(&adapter->req_q_lock); | 1352 | spin_unlock_bh(&qdio->req_q_lock); |
1365 | return retval; | 1353 | return retval; |
1366 | } | 1354 | } |
1367 | 1355 | ||
1368 | /** | 1356 | /** |
1369 | * zfcp_fsf_exchange_port_data_sync - request information about local port | 1357 | * zfcp_fsf_exchange_port_data_sync - request information about local port |
1370 | * @adapter: pointer to struct zfcp_adapter | 1358 | * @qdio: pointer to struct zfcp_qdio |
1371 | * @data: pointer to struct fsf_qtcb_bottom_port | 1359 | * @data: pointer to struct fsf_qtcb_bottom_port |
1372 | * Returns: 0 on success, error otherwise | 1360 | * Returns: 0 on success, error otherwise |
1373 | */ | 1361 | */ |
1374 | int zfcp_fsf_exchange_port_data_sync(struct zfcp_adapter *adapter, | 1362 | int zfcp_fsf_exchange_port_data_sync(struct zfcp_qdio *qdio, |
1375 | struct fsf_qtcb_bottom_port *data) | 1363 | struct fsf_qtcb_bottom_port *data) |
1376 | { | 1364 | { |
1377 | struct qdio_buffer_element *sbale; | 1365 | struct qdio_buffer_element *sbale; |
1378 | struct zfcp_fsf_req *req = NULL; | 1366 | struct zfcp_fsf_req *req = NULL; |
1379 | int retval = -EIO; | 1367 | int retval = -EIO; |
1380 | 1368 | ||
1381 | if (!(adapter->adapter_features & FSF_FEATURE_HBAAPI_MANAGEMENT)) | 1369 | if (!(qdio->adapter->adapter_features & FSF_FEATURE_HBAAPI_MANAGEMENT)) |
1382 | return -EOPNOTSUPP; | 1370 | return -EOPNOTSUPP; |
1383 | 1371 | ||
1384 | spin_lock_bh(&adapter->req_q_lock); | 1372 | spin_lock_bh(&qdio->req_q_lock); |
1385 | if (zfcp_fsf_req_sbal_get(adapter)) | 1373 | if (zfcp_fsf_req_sbal_get(qdio)) |
1386 | goto out_unlock; | 1374 | goto out_unlock; |
1387 | 1375 | ||
1388 | req = zfcp_fsf_req_create(adapter, FSF_QTCB_EXCHANGE_PORT_DATA, 0, | 1376 | req = zfcp_fsf_req_create(qdio, FSF_QTCB_EXCHANGE_PORT_DATA, NULL); |
1389 | NULL); | 1377 | |
1390 | if (IS_ERR(req)) { | 1378 | if (IS_ERR(req)) { |
1391 | retval = PTR_ERR(req); | 1379 | retval = PTR_ERR(req); |
1392 | goto out_unlock; | 1380 | goto out_unlock; |
@@ -1395,24 +1383,24 @@ int zfcp_fsf_exchange_port_data_sync(struct zfcp_adapter *adapter, | |||
1395 | if (data) | 1383 | if (data) |
1396 | req->data = data; | 1384 | req->data = data; |
1397 | 1385 | ||
1398 | sbale = zfcp_qdio_sbale_req(req); | 1386 | sbale = zfcp_qdio_sbale_req(qdio, &req->queue_req); |
1399 | sbale[0].flags |= SBAL_FLAGS0_TYPE_READ; | 1387 | sbale[0].flags |= SBAL_FLAGS0_TYPE_READ; |
1400 | sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; | 1388 | sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; |
1401 | 1389 | ||
1402 | req->handler = zfcp_fsf_exchange_port_data_handler; | 1390 | req->handler = zfcp_fsf_exchange_port_data_handler; |
1403 | zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT); | 1391 | zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT); |
1404 | retval = zfcp_fsf_req_send(req); | 1392 | retval = zfcp_fsf_req_send(req); |
1405 | spin_unlock_bh(&adapter->req_q_lock); | 1393 | spin_unlock_bh(&qdio->req_q_lock); |
1406 | 1394 | ||
1407 | if (!retval) | 1395 | if (!retval) |
1408 | wait_event(req->completion_wq, | 1396 | wait_for_completion(&req->completion); |
1409 | req->status & ZFCP_STATUS_FSFREQ_COMPLETED); | 1397 | |
1410 | zfcp_fsf_req_free(req); | 1398 | zfcp_fsf_req_free(req); |
1411 | 1399 | ||
1412 | return retval; | 1400 | return retval; |
1413 | 1401 | ||
1414 | out_unlock: | 1402 | out_unlock: |
1415 | spin_unlock_bh(&adapter->req_q_lock); | 1403 | spin_unlock_bh(&qdio->req_q_lock); |
1416 | return retval; | 1404 | return retval; |
1417 | } | 1405 | } |
1418 | 1406 | ||
@@ -1498,25 +1486,25 @@ out: | |||
1498 | int zfcp_fsf_open_port(struct zfcp_erp_action *erp_action) | 1486 | int zfcp_fsf_open_port(struct zfcp_erp_action *erp_action) |
1499 | { | 1487 | { |
1500 | struct qdio_buffer_element *sbale; | 1488 | struct qdio_buffer_element *sbale; |
1501 | struct zfcp_adapter *adapter = erp_action->adapter; | 1489 | struct zfcp_qdio *qdio = erp_action->adapter->qdio; |
1502 | struct zfcp_fsf_req *req; | ||
1503 | struct zfcp_port *port = erp_action->port; | 1490 | struct zfcp_port *port = erp_action->port; |
1491 | struct zfcp_fsf_req *req; | ||
1504 | int retval = -EIO; | 1492 | int retval = -EIO; |
1505 | 1493 | ||
1506 | spin_lock_bh(&adapter->req_q_lock); | 1494 | spin_lock_bh(&qdio->req_q_lock); |
1507 | if (zfcp_fsf_req_sbal_get(adapter)) | 1495 | if (zfcp_fsf_req_sbal_get(qdio)) |
1508 | goto out; | 1496 | goto out; |
1509 | 1497 | ||
1510 | req = zfcp_fsf_req_create(adapter, | 1498 | req = zfcp_fsf_req_create(qdio, FSF_QTCB_OPEN_PORT_WITH_DID, |
1511 | FSF_QTCB_OPEN_PORT_WITH_DID, | 1499 | qdio->adapter->pool.erp_req); |
1512 | ZFCP_REQ_AUTO_CLEANUP, | 1500 | |
1513 | adapter->pool.fsf_req_erp); | ||
1514 | if (IS_ERR(req)) { | 1501 | if (IS_ERR(req)) { |
1515 | retval = PTR_ERR(req); | 1502 | retval = PTR_ERR(req); |
1516 | goto out; | 1503 | goto out; |
1517 | } | 1504 | } |
1518 | 1505 | ||
1519 | sbale = zfcp_qdio_sbale_req(req); | 1506 | req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; |
1507 | sbale = zfcp_qdio_sbale_req(qdio, &req->queue_req); | ||
1520 | sbale[0].flags |= SBAL_FLAGS0_TYPE_READ; | 1508 | sbale[0].flags |= SBAL_FLAGS0_TYPE_READ; |
1521 | sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; | 1509 | sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; |
1522 | 1510 | ||
@@ -1535,7 +1523,7 @@ int zfcp_fsf_open_port(struct zfcp_erp_action *erp_action) | |||
1535 | zfcp_port_put(port); | 1523 | zfcp_port_put(port); |
1536 | } | 1524 | } |
1537 | out: | 1525 | out: |
1538 | spin_unlock_bh(&adapter->req_q_lock); | 1526 | spin_unlock_bh(&qdio->req_q_lock); |
1539 | return retval; | 1527 | return retval; |
1540 | } | 1528 | } |
1541 | 1529 | ||
@@ -1569,23 +1557,24 @@ static void zfcp_fsf_close_port_handler(struct zfcp_fsf_req *req) | |||
1569 | int zfcp_fsf_close_port(struct zfcp_erp_action *erp_action) | 1557 | int zfcp_fsf_close_port(struct zfcp_erp_action *erp_action) |
1570 | { | 1558 | { |
1571 | struct qdio_buffer_element *sbale; | 1559 | struct qdio_buffer_element *sbale; |
1572 | struct zfcp_adapter *adapter = erp_action->adapter; | 1560 | struct zfcp_qdio *qdio = erp_action->adapter->qdio; |
1573 | struct zfcp_fsf_req *req; | 1561 | struct zfcp_fsf_req *req; |
1574 | int retval = -EIO; | 1562 | int retval = -EIO; |
1575 | 1563 | ||
1576 | spin_lock_bh(&adapter->req_q_lock); | 1564 | spin_lock_bh(&qdio->req_q_lock); |
1577 | if (zfcp_fsf_req_sbal_get(adapter)) | 1565 | if (zfcp_fsf_req_sbal_get(qdio)) |
1578 | goto out; | 1566 | goto out; |
1579 | 1567 | ||
1580 | req = zfcp_fsf_req_create(adapter, FSF_QTCB_CLOSE_PORT, | 1568 | req = zfcp_fsf_req_create(qdio, FSF_QTCB_CLOSE_PORT, |
1581 | ZFCP_REQ_AUTO_CLEANUP, | 1569 | qdio->adapter->pool.erp_req); |
1582 | adapter->pool.fsf_req_erp); | 1570 | |
1583 | if (IS_ERR(req)) { | 1571 | if (IS_ERR(req)) { |
1584 | retval = PTR_ERR(req); | 1572 | retval = PTR_ERR(req); |
1585 | goto out; | 1573 | goto out; |
1586 | } | 1574 | } |
1587 | 1575 | ||
1588 | sbale = zfcp_qdio_sbale_req(req); | 1576 | req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; |
1577 | sbale = zfcp_qdio_sbale_req(qdio, &req->queue_req); | ||
1589 | sbale[0].flags |= SBAL_FLAGS0_TYPE_READ; | 1578 | sbale[0].flags |= SBAL_FLAGS0_TYPE_READ; |
1590 | sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; | 1579 | sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; |
1591 | 1580 | ||
@@ -1602,7 +1591,7 @@ int zfcp_fsf_close_port(struct zfcp_erp_action *erp_action) | |||
1602 | erp_action->fsf_req = NULL; | 1591 | erp_action->fsf_req = NULL; |
1603 | } | 1592 | } |
1604 | out: | 1593 | out: |
1605 | spin_unlock_bh(&adapter->req_q_lock); | 1594 | spin_unlock_bh(&qdio->req_q_lock); |
1606 | return retval; | 1595 | return retval; |
1607 | } | 1596 | } |
1608 | 1597 | ||
@@ -1645,24 +1634,24 @@ out: | |||
1645 | int zfcp_fsf_open_wka_port(struct zfcp_wka_port *wka_port) | 1634 | int zfcp_fsf_open_wka_port(struct zfcp_wka_port *wka_port) |
1646 | { | 1635 | { |
1647 | struct qdio_buffer_element *sbale; | 1636 | struct qdio_buffer_element *sbale; |
1648 | struct zfcp_adapter *adapter = wka_port->adapter; | 1637 | struct zfcp_qdio *qdio = wka_port->adapter->qdio; |
1649 | struct zfcp_fsf_req *req; | 1638 | struct zfcp_fsf_req *req; |
1650 | int retval = -EIO; | 1639 | int retval = -EIO; |
1651 | 1640 | ||
1652 | spin_lock_bh(&adapter->req_q_lock); | 1641 | spin_lock_bh(&qdio->req_q_lock); |
1653 | if (zfcp_fsf_req_sbal_get(adapter)) | 1642 | if (zfcp_fsf_req_sbal_get(qdio)) |
1654 | goto out; | 1643 | goto out; |
1655 | 1644 | ||
1656 | req = zfcp_fsf_req_create(adapter, | 1645 | req = zfcp_fsf_req_create(qdio, FSF_QTCB_OPEN_PORT_WITH_DID, |
1657 | FSF_QTCB_OPEN_PORT_WITH_DID, | 1646 | qdio->adapter->pool.erp_req); |
1658 | ZFCP_REQ_AUTO_CLEANUP, | 1647 | |
1659 | adapter->pool.fsf_req_erp); | ||
1660 | if (unlikely(IS_ERR(req))) { | 1648 | if (unlikely(IS_ERR(req))) { |
1661 | retval = PTR_ERR(req); | 1649 | retval = PTR_ERR(req); |
1662 | goto out; | 1650 | goto out; |
1663 | } | 1651 | } |
1664 | 1652 | ||
1665 | sbale = zfcp_qdio_sbale_req(req); | 1653 | req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; |
1654 | sbale = zfcp_qdio_sbale_req(qdio, &req->queue_req); | ||
1666 | sbale[0].flags |= SBAL_FLAGS0_TYPE_READ; | 1655 | sbale[0].flags |= SBAL_FLAGS0_TYPE_READ; |
1667 | sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; | 1656 | sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; |
1668 | 1657 | ||
@@ -1675,7 +1664,7 @@ int zfcp_fsf_open_wka_port(struct zfcp_wka_port *wka_port) | |||
1675 | if (retval) | 1664 | if (retval) |
1676 | zfcp_fsf_req_free(req); | 1665 | zfcp_fsf_req_free(req); |
1677 | out: | 1666 | out: |
1678 | spin_unlock_bh(&adapter->req_q_lock); | 1667 | spin_unlock_bh(&qdio->req_q_lock); |
1679 | return retval; | 1668 | return retval; |
1680 | } | 1669 | } |
1681 | 1670 | ||
@@ -1700,23 +1689,24 @@ static void zfcp_fsf_close_wka_port_handler(struct zfcp_fsf_req *req) | |||
1700 | int zfcp_fsf_close_wka_port(struct zfcp_wka_port *wka_port) | 1689 | int zfcp_fsf_close_wka_port(struct zfcp_wka_port *wka_port) |
1701 | { | 1690 | { |
1702 | struct qdio_buffer_element *sbale; | 1691 | struct qdio_buffer_element *sbale; |
1703 | struct zfcp_adapter *adapter = wka_port->adapter; | 1692 | struct zfcp_qdio *qdio = wka_port->adapter->qdio; |
1704 | struct zfcp_fsf_req *req; | 1693 | struct zfcp_fsf_req *req; |
1705 | int retval = -EIO; | 1694 | int retval = -EIO; |
1706 | 1695 | ||
1707 | spin_lock_bh(&adapter->req_q_lock); | 1696 | spin_lock_bh(&qdio->req_q_lock); |
1708 | if (zfcp_fsf_req_sbal_get(adapter)) | 1697 | if (zfcp_fsf_req_sbal_get(qdio)) |
1709 | goto out; | 1698 | goto out; |
1710 | 1699 | ||
1711 | req = zfcp_fsf_req_create(adapter, FSF_QTCB_CLOSE_PORT, | 1700 | req = zfcp_fsf_req_create(qdio, FSF_QTCB_CLOSE_PORT, |
1712 | ZFCP_REQ_AUTO_CLEANUP, | 1701 | qdio->adapter->pool.erp_req); |
1713 | adapter->pool.fsf_req_erp); | 1702 | |
1714 | if (unlikely(IS_ERR(req))) { | 1703 | if (unlikely(IS_ERR(req))) { |
1715 | retval = PTR_ERR(req); | 1704 | retval = PTR_ERR(req); |
1716 | goto out; | 1705 | goto out; |
1717 | } | 1706 | } |
1718 | 1707 | ||
1719 | sbale = zfcp_qdio_sbale_req(req); | 1708 | req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; |
1709 | sbale = zfcp_qdio_sbale_req(qdio, &req->queue_req); | ||
1720 | sbale[0].flags |= SBAL_FLAGS0_TYPE_READ; | 1710 | sbale[0].flags |= SBAL_FLAGS0_TYPE_READ; |
1721 | sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; | 1711 | sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; |
1722 | 1712 | ||
@@ -1729,7 +1719,7 @@ int zfcp_fsf_close_wka_port(struct zfcp_wka_port *wka_port) | |||
1729 | if (retval) | 1719 | if (retval) |
1730 | zfcp_fsf_req_free(req); | 1720 | zfcp_fsf_req_free(req); |
1731 | out: | 1721 | out: |
1732 | spin_unlock_bh(&adapter->req_q_lock); | 1722 | spin_unlock_bh(&qdio->req_q_lock); |
1733 | return retval; | 1723 | return retval; |
1734 | } | 1724 | } |
1735 | 1725 | ||
@@ -1791,23 +1781,24 @@ static void zfcp_fsf_close_physical_port_handler(struct zfcp_fsf_req *req) | |||
1791 | int zfcp_fsf_close_physical_port(struct zfcp_erp_action *erp_action) | 1781 | int zfcp_fsf_close_physical_port(struct zfcp_erp_action *erp_action) |
1792 | { | 1782 | { |
1793 | struct qdio_buffer_element *sbale; | 1783 | struct qdio_buffer_element *sbale; |
1794 | struct zfcp_adapter *adapter = erp_action->adapter; | 1784 | struct zfcp_qdio *qdio = erp_action->adapter->qdio; |
1795 | struct zfcp_fsf_req *req; | 1785 | struct zfcp_fsf_req *req; |
1796 | int retval = -EIO; | 1786 | int retval = -EIO; |
1797 | 1787 | ||
1798 | spin_lock_bh(&adapter->req_q_lock); | 1788 | spin_lock_bh(&qdio->req_q_lock); |
1799 | if (zfcp_fsf_req_sbal_get(adapter)) | 1789 | if (zfcp_fsf_req_sbal_get(qdio)) |
1800 | goto out; | 1790 | goto out; |
1801 | 1791 | ||
1802 | req = zfcp_fsf_req_create(adapter, FSF_QTCB_CLOSE_PHYSICAL_PORT, | 1792 | req = zfcp_fsf_req_create(qdio, FSF_QTCB_CLOSE_PHYSICAL_PORT, |
1803 | ZFCP_REQ_AUTO_CLEANUP, | 1793 | qdio->adapter->pool.erp_req); |
1804 | adapter->pool.fsf_req_erp); | 1794 | |
1805 | if (IS_ERR(req)) { | 1795 | if (IS_ERR(req)) { |
1806 | retval = PTR_ERR(req); | 1796 | retval = PTR_ERR(req); |
1807 | goto out; | 1797 | goto out; |
1808 | } | 1798 | } |
1809 | 1799 | ||
1810 | sbale = zfcp_qdio_sbale_req(req); | 1800 | req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; |
1801 | sbale = zfcp_qdio_sbale_req(qdio, &req->queue_req); | ||
1811 | sbale[0].flags |= SBAL_FLAGS0_TYPE_READ; | 1802 | sbale[0].flags |= SBAL_FLAGS0_TYPE_READ; |
1812 | sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; | 1803 | sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; |
1813 | 1804 | ||
@@ -1824,7 +1815,7 @@ int zfcp_fsf_close_physical_port(struct zfcp_erp_action *erp_action) | |||
1824 | erp_action->fsf_req = NULL; | 1815 | erp_action->fsf_req = NULL; |
1825 | } | 1816 | } |
1826 | out: | 1817 | out: |
1827 | spin_unlock_bh(&adapter->req_q_lock); | 1818 | spin_unlock_bh(&qdio->req_q_lock); |
1828 | return retval; | 1819 | return retval; |
1829 | } | 1820 | } |
1830 | 1821 | ||
@@ -1895,7 +1886,7 @@ static void zfcp_fsf_open_unit_handler(struct zfcp_fsf_req *req) | |||
1895 | case FSF_ADAPTER_STATUS_AVAILABLE: | 1886 | case FSF_ADAPTER_STATUS_AVAILABLE: |
1896 | switch (header->fsf_status_qual.word[0]) { | 1887 | switch (header->fsf_status_qual.word[0]) { |
1897 | case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE: | 1888 | case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE: |
1898 | zfcp_test_link(unit->port); | 1889 | zfcp_fc_test_link(unit->port); |
1899 | /* fall through */ | 1890 | /* fall through */ |
1900 | case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED: | 1891 | case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED: |
1901 | req->status |= ZFCP_STATUS_FSFREQ_ERROR; | 1892 | req->status |= ZFCP_STATUS_FSFREQ_ERROR; |
@@ -1964,22 +1955,24 @@ int zfcp_fsf_open_unit(struct zfcp_erp_action *erp_action) | |||
1964 | { | 1955 | { |
1965 | struct qdio_buffer_element *sbale; | 1956 | struct qdio_buffer_element *sbale; |
1966 | struct zfcp_adapter *adapter = erp_action->adapter; | 1957 | struct zfcp_adapter *adapter = erp_action->adapter; |
1958 | struct zfcp_qdio *qdio = adapter->qdio; | ||
1967 | struct zfcp_fsf_req *req; | 1959 | struct zfcp_fsf_req *req; |
1968 | int retval = -EIO; | 1960 | int retval = -EIO; |
1969 | 1961 | ||
1970 | spin_lock_bh(&adapter->req_q_lock); | 1962 | spin_lock_bh(&qdio->req_q_lock); |
1971 | if (zfcp_fsf_req_sbal_get(adapter)) | 1963 | if (zfcp_fsf_req_sbal_get(qdio)) |
1972 | goto out; | 1964 | goto out; |
1973 | 1965 | ||
1974 | req = zfcp_fsf_req_create(adapter, FSF_QTCB_OPEN_LUN, | 1966 | req = zfcp_fsf_req_create(qdio, FSF_QTCB_OPEN_LUN, |
1975 | ZFCP_REQ_AUTO_CLEANUP, | 1967 | adapter->pool.erp_req); |
1976 | adapter->pool.fsf_req_erp); | 1968 | |
1977 | if (IS_ERR(req)) { | 1969 | if (IS_ERR(req)) { |
1978 | retval = PTR_ERR(req); | 1970 | retval = PTR_ERR(req); |
1979 | goto out; | 1971 | goto out; |
1980 | } | 1972 | } |
1981 | 1973 | ||
1982 | sbale = zfcp_qdio_sbale_req(req); | 1974 | req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; |
1975 | sbale = zfcp_qdio_sbale_req(qdio, &req->queue_req); | ||
1983 | sbale[0].flags |= SBAL_FLAGS0_TYPE_READ; | 1976 | sbale[0].flags |= SBAL_FLAGS0_TYPE_READ; |
1984 | sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; | 1977 | sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; |
1985 | 1978 | ||
@@ -2000,7 +1993,7 @@ int zfcp_fsf_open_unit(struct zfcp_erp_action *erp_action) | |||
2000 | erp_action->fsf_req = NULL; | 1993 | erp_action->fsf_req = NULL; |
2001 | } | 1994 | } |
2002 | out: | 1995 | out: |
2003 | spin_unlock_bh(&adapter->req_q_lock); | 1996 | spin_unlock_bh(&qdio->req_q_lock); |
2004 | return retval; | 1997 | return retval; |
2005 | } | 1998 | } |
2006 | 1999 | ||
@@ -2028,7 +2021,7 @@ static void zfcp_fsf_close_unit_handler(struct zfcp_fsf_req *req) | |||
2028 | case FSF_ADAPTER_STATUS_AVAILABLE: | 2021 | case FSF_ADAPTER_STATUS_AVAILABLE: |
2029 | switch (req->qtcb->header.fsf_status_qual.word[0]) { | 2022 | switch (req->qtcb->header.fsf_status_qual.word[0]) { |
2030 | case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE: | 2023 | case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE: |
2031 | zfcp_test_link(unit->port); | 2024 | zfcp_fc_test_link(unit->port); |
2032 | /* fall through */ | 2025 | /* fall through */ |
2033 | case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED: | 2026 | case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED: |
2034 | req->status |= ZFCP_STATUS_FSFREQ_ERROR; | 2027 | req->status |= ZFCP_STATUS_FSFREQ_ERROR; |
@@ -2049,22 +2042,24 @@ static void zfcp_fsf_close_unit_handler(struct zfcp_fsf_req *req) | |||
2049 | int zfcp_fsf_close_unit(struct zfcp_erp_action *erp_action) | 2042 | int zfcp_fsf_close_unit(struct zfcp_erp_action *erp_action) |
2050 | { | 2043 | { |
2051 | struct qdio_buffer_element *sbale; | 2044 | struct qdio_buffer_element *sbale; |
2052 | struct zfcp_adapter *adapter = erp_action->adapter; | 2045 | struct zfcp_qdio *qdio = erp_action->adapter->qdio; |
2053 | struct zfcp_fsf_req *req; | 2046 | struct zfcp_fsf_req *req; |
2054 | int retval = -EIO; | 2047 | int retval = -EIO; |
2055 | 2048 | ||
2056 | spin_lock_bh(&adapter->req_q_lock); | 2049 | spin_lock_bh(&qdio->req_q_lock); |
2057 | if (zfcp_fsf_req_sbal_get(adapter)) | 2050 | if (zfcp_fsf_req_sbal_get(qdio)) |
2058 | goto out; | 2051 | goto out; |
2059 | req = zfcp_fsf_req_create(adapter, FSF_QTCB_CLOSE_LUN, | 2052 | |
2060 | ZFCP_REQ_AUTO_CLEANUP, | 2053 | req = zfcp_fsf_req_create(qdio, FSF_QTCB_CLOSE_LUN, |
2061 | adapter->pool.fsf_req_erp); | 2054 | qdio->adapter->pool.erp_req); |
2055 | |||
2062 | if (IS_ERR(req)) { | 2056 | if (IS_ERR(req)) { |
2063 | retval = PTR_ERR(req); | 2057 | retval = PTR_ERR(req); |
2064 | goto out; | 2058 | goto out; |
2065 | } | 2059 | } |
2066 | 2060 | ||
2067 | sbale = zfcp_qdio_sbale_req(req); | 2061 | req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; |
2062 | sbale = zfcp_qdio_sbale_req(qdio, &req->queue_req); | ||
2068 | sbale[0].flags |= SBAL_FLAGS0_TYPE_READ; | 2063 | sbale[0].flags |= SBAL_FLAGS0_TYPE_READ; |
2069 | sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; | 2064 | sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; |
2070 | 2065 | ||
@@ -2082,7 +2077,7 @@ int zfcp_fsf_close_unit(struct zfcp_erp_action *erp_action) | |||
2082 | erp_action->fsf_req = NULL; | 2077 | erp_action->fsf_req = NULL; |
2083 | } | 2078 | } |
2084 | out: | 2079 | out: |
2085 | spin_unlock_bh(&adapter->req_q_lock); | 2080 | spin_unlock_bh(&qdio->req_q_lock); |
2086 | return retval; | 2081 | return retval; |
2087 | } | 2082 | } |
2088 | 2083 | ||
@@ -2141,8 +2136,8 @@ static void zfcp_fsf_trace_latency(struct zfcp_fsf_req *fsf_req) | |||
2141 | } | 2136 | } |
2142 | if (fsf_req->status & ZFCP_STATUS_FSFREQ_ERROR) | 2137 | if (fsf_req->status & ZFCP_STATUS_FSFREQ_ERROR) |
2143 | trace.flags |= ZFCP_BLK_REQ_ERROR; | 2138 | trace.flags |= ZFCP_BLK_REQ_ERROR; |
2144 | trace.inb_usage = fsf_req->qdio_inb_usage; | 2139 | trace.inb_usage = fsf_req->queue_req.qdio_inb_usage; |
2145 | trace.outb_usage = fsf_req->qdio_outb_usage; | 2140 | trace.outb_usage = fsf_req->queue_req.qdio_outb_usage; |
2146 | 2141 | ||
2147 | blk_add_driver_data(req->q, req, &trace, sizeof(trace)); | 2142 | blk_add_driver_data(req->q, req, &trace, sizeof(trace)); |
2148 | } | 2143 | } |
@@ -2215,11 +2210,11 @@ static void zfcp_fsf_send_fcp_command_task_handler(struct zfcp_fsf_req *req) | |||
2215 | } | 2210 | } |
2216 | skip_fsfstatus: | 2211 | skip_fsfstatus: |
2217 | if (scpnt->result != 0) | 2212 | if (scpnt->result != 0) |
2218 | zfcp_scsi_dbf_event_result("erro", 3, req->adapter, scpnt, req); | 2213 | zfcp_dbf_scsi_result("erro", 3, req->adapter->dbf, scpnt, req); |
2219 | else if (scpnt->retries > 0) | 2214 | else if (scpnt->retries > 0) |
2220 | zfcp_scsi_dbf_event_result("retr", 4, req->adapter, scpnt, req); | 2215 | zfcp_dbf_scsi_result("retr", 4, req->adapter->dbf, scpnt, req); |
2221 | else | 2216 | else |
2222 | zfcp_scsi_dbf_event_result("norm", 6, req->adapter, scpnt, req); | 2217 | zfcp_dbf_scsi_result("norm", 6, req->adapter->dbf, scpnt, req); |
2223 | 2218 | ||
2224 | scpnt->host_scribble = NULL; | 2219 | scpnt->host_scribble = NULL; |
2225 | (scpnt->scsi_done) (scpnt); | 2220 | (scpnt->scsi_done) (scpnt); |
@@ -2309,7 +2304,7 @@ static void zfcp_fsf_send_fcp_command_handler(struct zfcp_fsf_req *req) | |||
2309 | case FSF_ADAPTER_STATUS_AVAILABLE: | 2304 | case FSF_ADAPTER_STATUS_AVAILABLE: |
2310 | if (header->fsf_status_qual.word[0] == | 2305 | if (header->fsf_status_qual.word[0] == |
2311 | FSF_SQ_INVOKE_LINK_TEST_PROCEDURE) | 2306 | FSF_SQ_INVOKE_LINK_TEST_PROCEDURE) |
2312 | zfcp_test_link(unit->port); | 2307 | zfcp_fc_test_link(unit->port); |
2313 | req->status |= ZFCP_STATUS_FSFREQ_ERROR; | 2308 | req->status |= ZFCP_STATUS_FSFREQ_ERROR; |
2314 | break; | 2309 | break; |
2315 | } | 2310 | } |
@@ -2350,24 +2345,27 @@ int zfcp_fsf_send_fcp_command_task(struct zfcp_unit *unit, | |||
2350 | unsigned int sbtype = SBAL_FLAGS0_TYPE_READ; | 2345 | unsigned int sbtype = SBAL_FLAGS0_TYPE_READ; |
2351 | int real_bytes, retval = -EIO; | 2346 | int real_bytes, retval = -EIO; |
2352 | struct zfcp_adapter *adapter = unit->port->adapter; | 2347 | struct zfcp_adapter *adapter = unit->port->adapter; |
2348 | struct zfcp_qdio *qdio = adapter->qdio; | ||
2353 | 2349 | ||
2354 | if (unlikely(!(atomic_read(&unit->status) & | 2350 | if (unlikely(!(atomic_read(&unit->status) & |
2355 | ZFCP_STATUS_COMMON_UNBLOCKED))) | 2351 | ZFCP_STATUS_COMMON_UNBLOCKED))) |
2356 | return -EBUSY; | 2352 | return -EBUSY; |
2357 | 2353 | ||
2358 | spin_lock(&adapter->req_q_lock); | 2354 | spin_lock(&qdio->req_q_lock); |
2359 | if (atomic_read(&adapter->req_q.count) <= 0) { | 2355 | if (atomic_read(&qdio->req_q.count) <= 0) { |
2360 | atomic_inc(&adapter->qdio_outb_full); | 2356 | atomic_inc(&qdio->req_q_full); |
2361 | goto out; | 2357 | goto out; |
2362 | } | 2358 | } |
2363 | req = zfcp_fsf_req_create(adapter, FSF_QTCB_FCP_CMND, | 2359 | |
2364 | ZFCP_REQ_AUTO_CLEANUP, | 2360 | req = zfcp_fsf_req_create(qdio, FSF_QTCB_FCP_CMND, |
2365 | adapter->pool.fsf_req_scsi); | 2361 | adapter->pool.scsi_req); |
2362 | |||
2366 | if (IS_ERR(req)) { | 2363 | if (IS_ERR(req)) { |
2367 | retval = PTR_ERR(req); | 2364 | retval = PTR_ERR(req); |
2368 | goto out; | 2365 | goto out; |
2369 | } | 2366 | } |
2370 | 2367 | ||
2368 | req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; | ||
2371 | zfcp_unit_get(unit); | 2369 | zfcp_unit_get(unit); |
2372 | req->unit = unit; | 2370 | req->unit = unit; |
2373 | req->data = scsi_cmnd; | 2371 | req->data = scsi_cmnd; |
@@ -2419,11 +2417,11 @@ int zfcp_fsf_send_fcp_command_task(struct zfcp_unit *unit, | |||
2419 | req->qtcb->bottom.io.fcp_cmnd_length = sizeof(struct fcp_cmnd_iu) + | 2417 | req->qtcb->bottom.io.fcp_cmnd_length = sizeof(struct fcp_cmnd_iu) + |
2420 | fcp_cmnd_iu->add_fcp_cdb_length + sizeof(u32); | 2418 | fcp_cmnd_iu->add_fcp_cdb_length + sizeof(u32); |
2421 | 2419 | ||
2422 | real_bytes = zfcp_qdio_sbals_from_sg(req, sbtype, | 2420 | real_bytes = zfcp_qdio_sbals_from_sg(qdio, &req->queue_req, sbtype, |
2423 | scsi_sglist(scsi_cmnd), | 2421 | scsi_sglist(scsi_cmnd), |
2424 | FSF_MAX_SBALS_PER_REQ); | 2422 | FSF_MAX_SBALS_PER_REQ); |
2425 | if (unlikely(real_bytes < 0)) { | 2423 | if (unlikely(real_bytes < 0)) { |
2426 | if (req->sbal_number >= FSF_MAX_SBALS_PER_REQ) { | 2424 | if (req->queue_req.sbal_number >= FSF_MAX_SBALS_PER_REQ) { |
2427 | dev_err(&adapter->ccw_device->dev, | 2425 | dev_err(&adapter->ccw_device->dev, |
2428 | "Oversize data package, unit 0x%016Lx " | 2426 | "Oversize data package, unit 0x%016Lx " |
2429 | "on port 0x%016Lx closed\n", | 2427 | "on port 0x%016Lx closed\n", |
@@ -2448,7 +2446,7 @@ failed_scsi_cmnd: | |||
2448 | zfcp_fsf_req_free(req); | 2446 | zfcp_fsf_req_free(req); |
2449 | scsi_cmnd->host_scribble = NULL; | 2447 | scsi_cmnd->host_scribble = NULL; |
2450 | out: | 2448 | out: |
2451 | spin_unlock(&adapter->req_q_lock); | 2449 | spin_unlock(&qdio->req_q_lock); |
2452 | return retval; | 2450 | return retval; |
2453 | } | 2451 | } |
2454 | 2452 | ||
@@ -2463,17 +2461,19 @@ struct zfcp_fsf_req *zfcp_fsf_send_fcp_ctm(struct zfcp_unit *unit, u8 tm_flags) | |||
2463 | struct qdio_buffer_element *sbale; | 2461 | struct qdio_buffer_element *sbale; |
2464 | struct zfcp_fsf_req *req = NULL; | 2462 | struct zfcp_fsf_req *req = NULL; |
2465 | struct fcp_cmnd_iu *fcp_cmnd_iu; | 2463 | struct fcp_cmnd_iu *fcp_cmnd_iu; |
2466 | struct zfcp_adapter *adapter = unit->port->adapter; | 2464 | struct zfcp_qdio *qdio = unit->port->adapter->qdio; |
2467 | 2465 | ||
2468 | if (unlikely(!(atomic_read(&unit->status) & | 2466 | if (unlikely(!(atomic_read(&unit->status) & |
2469 | ZFCP_STATUS_COMMON_UNBLOCKED))) | 2467 | ZFCP_STATUS_COMMON_UNBLOCKED))) |
2470 | return NULL; | 2468 | return NULL; |
2471 | 2469 | ||
2472 | spin_lock_bh(&adapter->req_q_lock); | 2470 | spin_lock_bh(&qdio->req_q_lock); |
2473 | if (zfcp_fsf_req_sbal_get(adapter)) | 2471 | if (zfcp_fsf_req_sbal_get(qdio)) |
2474 | goto out; | 2472 | goto out; |
2475 | req = zfcp_fsf_req_create(adapter, FSF_QTCB_FCP_CMND, 0, | 2473 | |
2476 | adapter->pool.fsf_req_scsi); | 2474 | req = zfcp_fsf_req_create(qdio, FSF_QTCB_FCP_CMND, |
2475 | qdio->adapter->pool.scsi_req); | ||
2476 | |||
2477 | if (IS_ERR(req)) { | 2477 | if (IS_ERR(req)) { |
2478 | req = NULL; | 2478 | req = NULL; |
2479 | goto out; | 2479 | goto out; |
@@ -2489,7 +2489,7 @@ struct zfcp_fsf_req *zfcp_fsf_send_fcp_ctm(struct zfcp_unit *unit, u8 tm_flags) | |||
2489 | req->qtcb->bottom.io.fcp_cmnd_length = sizeof(struct fcp_cmnd_iu) + | 2489 | req->qtcb->bottom.io.fcp_cmnd_length = sizeof(struct fcp_cmnd_iu) + |
2490 | sizeof(u32); | 2490 | sizeof(u32); |
2491 | 2491 | ||
2492 | sbale = zfcp_qdio_sbale_req(req); | 2492 | sbale = zfcp_qdio_sbale_req(qdio, &req->queue_req); |
2493 | sbale[0].flags |= SBAL_FLAGS0_TYPE_WRITE; | 2493 | sbale[0].flags |= SBAL_FLAGS0_TYPE_WRITE; |
2494 | sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; | 2494 | sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; |
2495 | 2495 | ||
@@ -2504,7 +2504,7 @@ struct zfcp_fsf_req *zfcp_fsf_send_fcp_ctm(struct zfcp_unit *unit, u8 tm_flags) | |||
2504 | zfcp_fsf_req_free(req); | 2504 | zfcp_fsf_req_free(req); |
2505 | req = NULL; | 2505 | req = NULL; |
2506 | out: | 2506 | out: |
2507 | spin_unlock_bh(&adapter->req_q_lock); | 2507 | spin_unlock_bh(&qdio->req_q_lock); |
2508 | return req; | 2508 | return req; |
2509 | } | 2509 | } |
2510 | 2510 | ||
@@ -2522,6 +2522,7 @@ struct zfcp_fsf_req *zfcp_fsf_control_file(struct zfcp_adapter *adapter, | |||
2522 | struct zfcp_fsf_cfdc *fsf_cfdc) | 2522 | struct zfcp_fsf_cfdc *fsf_cfdc) |
2523 | { | 2523 | { |
2524 | struct qdio_buffer_element *sbale; | 2524 | struct qdio_buffer_element *sbale; |
2525 | struct zfcp_qdio *qdio = adapter->qdio; | ||
2525 | struct zfcp_fsf_req *req = NULL; | 2526 | struct zfcp_fsf_req *req = NULL; |
2526 | struct fsf_qtcb_bottom_support *bottom; | 2527 | struct fsf_qtcb_bottom_support *bottom; |
2527 | int direction, retval = -EIO, bytes; | 2528 | int direction, retval = -EIO, bytes; |
@@ -2540,11 +2541,11 @@ struct zfcp_fsf_req *zfcp_fsf_control_file(struct zfcp_adapter *adapter, | |||
2540 | return ERR_PTR(-EINVAL); | 2541 | return ERR_PTR(-EINVAL); |
2541 | } | 2542 | } |
2542 | 2543 | ||
2543 | spin_lock_bh(&adapter->req_q_lock); | 2544 | spin_lock_bh(&qdio->req_q_lock); |
2544 | if (zfcp_fsf_req_sbal_get(adapter)) | 2545 | if (zfcp_fsf_req_sbal_get(qdio)) |
2545 | goto out; | 2546 | goto out; |
2546 | 2547 | ||
2547 | req = zfcp_fsf_req_create(adapter, fsf_cfdc->command, 0, NULL); | 2548 | req = zfcp_fsf_req_create(qdio, fsf_cfdc->command, NULL); |
2548 | if (IS_ERR(req)) { | 2549 | if (IS_ERR(req)) { |
2549 | retval = -EPERM; | 2550 | retval = -EPERM; |
2550 | goto out; | 2551 | goto out; |
@@ -2552,14 +2553,15 @@ struct zfcp_fsf_req *zfcp_fsf_control_file(struct zfcp_adapter *adapter, | |||
2552 | 2553 | ||
2553 | req->handler = zfcp_fsf_control_file_handler; | 2554 | req->handler = zfcp_fsf_control_file_handler; |
2554 | 2555 | ||
2555 | sbale = zfcp_qdio_sbale_req(req); | 2556 | sbale = zfcp_qdio_sbale_req(qdio, &req->queue_req); |
2556 | sbale[0].flags |= direction; | 2557 | sbale[0].flags |= direction; |
2557 | 2558 | ||
2558 | bottom = &req->qtcb->bottom.support; | 2559 | bottom = &req->qtcb->bottom.support; |
2559 | bottom->operation_subtype = FSF_CFDC_OPERATION_SUBTYPE; | 2560 | bottom->operation_subtype = FSF_CFDC_OPERATION_SUBTYPE; |
2560 | bottom->option = fsf_cfdc->option; | 2561 | bottom->option = fsf_cfdc->option; |
2561 | 2562 | ||
2562 | bytes = zfcp_qdio_sbals_from_sg(req, direction, fsf_cfdc->sg, | 2563 | bytes = zfcp_qdio_sbals_from_sg(qdio, &req->queue_req, |
2564 | direction, fsf_cfdc->sg, | ||
2563 | FSF_MAX_SBALS_PER_REQ); | 2565 | FSF_MAX_SBALS_PER_REQ); |
2564 | if (bytes != ZFCP_CFDC_MAX_SIZE) { | 2566 | if (bytes != ZFCP_CFDC_MAX_SIZE) { |
2565 | zfcp_fsf_req_free(req); | 2567 | zfcp_fsf_req_free(req); |
@@ -2569,12 +2571,53 @@ struct zfcp_fsf_req *zfcp_fsf_control_file(struct zfcp_adapter *adapter, | |||
2569 | zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT); | 2571 | zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT); |
2570 | retval = zfcp_fsf_req_send(req); | 2572 | retval = zfcp_fsf_req_send(req); |
2571 | out: | 2573 | out: |
2572 | spin_unlock_bh(&adapter->req_q_lock); | 2574 | spin_unlock_bh(&qdio->req_q_lock); |
2573 | 2575 | ||
2574 | if (!retval) { | 2576 | if (!retval) { |
2575 | wait_event(req->completion_wq, | 2577 | wait_for_completion(&req->completion); |
2576 | req->status & ZFCP_STATUS_FSFREQ_COMPLETED); | ||
2577 | return req; | 2578 | return req; |
2578 | } | 2579 | } |
2579 | return ERR_PTR(retval); | 2580 | return ERR_PTR(retval); |
2580 | } | 2581 | } |
2582 | |||
2583 | /** | ||
2584 | * zfcp_fsf_reqid_check - validate req_id contained in SBAL returned by QDIO | ||
2585 | * @adapter: pointer to struct zfcp_adapter | ||
2586 | * @sbal_idx: response queue index of SBAL to be processed | ||
2587 | */ | ||
2588 | void zfcp_fsf_reqid_check(struct zfcp_qdio *qdio, int sbal_idx) | ||
2589 | { | ||
2590 | struct zfcp_adapter *adapter = qdio->adapter; | ||
2591 | struct qdio_buffer *sbal = qdio->resp_q.sbal[sbal_idx]; | ||
2592 | struct qdio_buffer_element *sbale; | ||
2593 | struct zfcp_fsf_req *fsf_req; | ||
2594 | unsigned long flags, req_id; | ||
2595 | int idx; | ||
2596 | |||
2597 | for (idx = 0; idx < QDIO_MAX_ELEMENTS_PER_BUFFER; idx++) { | ||
2598 | |||
2599 | sbale = &sbal->element[idx]; | ||
2600 | req_id = (unsigned long) sbale->addr; | ||
2601 | spin_lock_irqsave(&adapter->req_list_lock, flags); | ||
2602 | fsf_req = zfcp_reqlist_find(adapter, req_id); | ||
2603 | |||
2604 | if (!fsf_req) | ||
2605 | /* | ||
2606 | * Unknown request means that we have potentially memory | ||
2607 | * corruption and must stop the machine immediately. | ||
2608 | */ | ||
2609 | panic("error: unknown req_id (%lx) on adapter %s.\n", | ||
2610 | req_id, dev_name(&adapter->ccw_device->dev)); | ||
2611 | |||
2612 | list_del(&fsf_req->list); | ||
2613 | spin_unlock_irqrestore(&adapter->req_list_lock, flags); | ||
2614 | |||
2615 | fsf_req->queue_req.sbal_response = sbal_idx; | ||
2616 | fsf_req->queue_req.qdio_inb_usage = | ||
2617 | atomic_read(&qdio->resp_q.count); | ||
2618 | zfcp_fsf_req_complete(fsf_req); | ||
2619 | |||
2620 | if (likely(sbale->flags & SBAL_FLAGS_LAST_ENTRY)) | ||
2621 | break; | ||
2622 | } | ||
2623 | } | ||
diff --git a/drivers/s390/scsi/zfcp_fsf.h b/drivers/s390/scsi/zfcp_fsf.h index df7f232faba8..dcc7c1dbcf58 100644 --- a/drivers/s390/scsi/zfcp_fsf.h +++ b/drivers/s390/scsi/zfcp_fsf.h | |||
@@ -3,13 +3,14 @@ | |||
3 | * | 3 | * |
4 | * Interface to the FSF support functions. | 4 | * Interface to the FSF support functions. |
5 | * | 5 | * |
6 | * Copyright IBM Corporation 2002, 2008 | 6 | * Copyright IBM Corporation 2002, 2009 |
7 | */ | 7 | */ |
8 | 8 | ||
9 | #ifndef FSF_H | 9 | #ifndef FSF_H |
10 | #define FSF_H | 10 | #define FSF_H |
11 | 11 | ||
12 | #include <linux/pfn.h> | 12 | #include <linux/pfn.h> |
13 | #include <linux/scatterlist.h> | ||
13 | 14 | ||
14 | #define FSF_QTCB_CURRENT_VERSION 0x00000001 | 15 | #define FSF_QTCB_CURRENT_VERSION 0x00000001 |
15 | 16 | ||
diff --git a/drivers/s390/scsi/zfcp_qdio.c b/drivers/s390/scsi/zfcp_qdio.c index e0a215309df0..6c5228b627fc 100644 --- a/drivers/s390/scsi/zfcp_qdio.c +++ b/drivers/s390/scsi/zfcp_qdio.c | |||
@@ -3,7 +3,7 @@ | |||
3 | * | 3 | * |
4 | * Setup and helper functions to access QDIO. | 4 | * Setup and helper functions to access QDIO. |
5 | * | 5 | * |
6 | * Copyright IBM Corporation 2002, 2008 | 6 | * Copyright IBM Corporation 2002, 2009 |
7 | */ | 7 | */ |
8 | 8 | ||
9 | #define KMSG_COMPONENT "zfcp" | 9 | #define KMSG_COMPONENT "zfcp" |
@@ -34,29 +34,10 @@ zfcp_qdio_sbale(struct zfcp_qdio_queue *q, int sbal_idx, int sbale_idx) | |||
34 | return &q->sbal[sbal_idx]->element[sbale_idx]; | 34 | return &q->sbal[sbal_idx]->element[sbale_idx]; |
35 | } | 35 | } |
36 | 36 | ||
37 | /** | 37 | static void zfcp_qdio_handler_error(struct zfcp_qdio *qdio, char *id) |
38 | * zfcp_qdio_free - free memory used by request- and resposne queue | ||
39 | * @adapter: pointer to the zfcp_adapter structure | ||
40 | */ | ||
41 | void zfcp_qdio_free(struct zfcp_adapter *adapter) | ||
42 | { | 38 | { |
43 | struct qdio_buffer **sbal_req, **sbal_resp; | 39 | struct zfcp_adapter *adapter = qdio->adapter; |
44 | int p; | ||
45 | |||
46 | if (adapter->ccw_device) | ||
47 | qdio_free(adapter->ccw_device); | ||
48 | |||
49 | sbal_req = adapter->req_q.sbal; | ||
50 | sbal_resp = adapter->resp_q.sbal; | ||
51 | |||
52 | for (p = 0; p < QDIO_MAX_BUFFERS_PER_Q; p += QBUFF_PER_PAGE) { | ||
53 | free_page((unsigned long) sbal_req[p]); | ||
54 | free_page((unsigned long) sbal_resp[p]); | ||
55 | } | ||
56 | } | ||
57 | 40 | ||
58 | static void zfcp_qdio_handler_error(struct zfcp_adapter *adapter, char *id) | ||
59 | { | ||
60 | dev_warn(&adapter->ccw_device->dev, "A QDIO problem occurred\n"); | 41 | dev_warn(&adapter->ccw_device->dev, "A QDIO problem occurred\n"); |
61 | 42 | ||
62 | zfcp_erp_adapter_reopen(adapter, | 43 | zfcp_erp_adapter_reopen(adapter, |
@@ -75,72 +56,47 @@ static void zfcp_qdio_zero_sbals(struct qdio_buffer *sbal[], int first, int cnt) | |||
75 | } | 56 | } |
76 | 57 | ||
77 | /* this needs to be called prior to updating the queue fill level */ | 58 | /* this needs to be called prior to updating the queue fill level */ |
78 | static void zfcp_qdio_account(struct zfcp_adapter *adapter) | 59 | static inline void zfcp_qdio_account(struct zfcp_qdio *qdio) |
79 | { | 60 | { |
80 | ktime_t now; | 61 | unsigned long long now, span; |
81 | s64 span; | ||
82 | int free, used; | 62 | int free, used; |
83 | 63 | ||
84 | spin_lock(&adapter->qdio_stat_lock); | 64 | spin_lock(&qdio->stat_lock); |
85 | now = ktime_get(); | 65 | now = get_clock_monotonic(); |
86 | span = ktime_us_delta(now, adapter->req_q_time); | 66 | span = (now - qdio->req_q_time) >> 12; |
87 | free = max(0, atomic_read(&adapter->req_q.count)); | 67 | free = atomic_read(&qdio->req_q.count); |
88 | used = QDIO_MAX_BUFFERS_PER_Q - free; | 68 | used = QDIO_MAX_BUFFERS_PER_Q - free; |
89 | adapter->req_q_util += used * span; | 69 | qdio->req_q_util += used * span; |
90 | adapter->req_q_time = now; | 70 | qdio->req_q_time = now; |
91 | spin_unlock(&adapter->qdio_stat_lock); | 71 | spin_unlock(&qdio->stat_lock); |
92 | } | 72 | } |
93 | 73 | ||
94 | static void zfcp_qdio_int_req(struct ccw_device *cdev, unsigned int qdio_err, | 74 | static void zfcp_qdio_int_req(struct ccw_device *cdev, unsigned int qdio_err, |
95 | int queue_no, int first, int count, | 75 | int queue_no, int first, int count, |
96 | unsigned long parm) | 76 | unsigned long parm) |
97 | { | 77 | { |
98 | struct zfcp_adapter *adapter = (struct zfcp_adapter *) parm; | 78 | struct zfcp_qdio *qdio = (struct zfcp_qdio *) parm; |
99 | struct zfcp_qdio_queue *queue = &adapter->req_q; | 79 | struct zfcp_qdio_queue *queue = &qdio->req_q; |
100 | 80 | ||
101 | if (unlikely(qdio_err)) { | 81 | if (unlikely(qdio_err)) { |
102 | zfcp_hba_dbf_event_qdio(adapter, qdio_err, first, count); | 82 | zfcp_dbf_hba_qdio(qdio->adapter->dbf, qdio_err, first, |
103 | zfcp_qdio_handler_error(adapter, "qdireq1"); | 83 | count); |
84 | zfcp_qdio_handler_error(qdio, "qdireq1"); | ||
104 | return; | 85 | return; |
105 | } | 86 | } |
106 | 87 | ||
107 | /* cleanup all SBALs being program-owned now */ | 88 | /* cleanup all SBALs being program-owned now */ |
108 | zfcp_qdio_zero_sbals(queue->sbal, first, count); | 89 | zfcp_qdio_zero_sbals(queue->sbal, first, count); |
109 | 90 | ||
110 | zfcp_qdio_account(adapter); | 91 | zfcp_qdio_account(qdio); |
111 | atomic_add(count, &queue->count); | 92 | atomic_add(count, &queue->count); |
112 | wake_up(&adapter->request_wq); | 93 | wake_up(&qdio->req_q_wq); |
113 | } | ||
114 | |||
115 | static void zfcp_qdio_reqid_check(struct zfcp_adapter *adapter, | ||
116 | unsigned long req_id, int sbal_idx) | ||
117 | { | ||
118 | struct zfcp_fsf_req *fsf_req; | ||
119 | unsigned long flags; | ||
120 | |||
121 | spin_lock_irqsave(&adapter->req_list_lock, flags); | ||
122 | fsf_req = zfcp_reqlist_find(adapter, req_id); | ||
123 | |||
124 | if (!fsf_req) | ||
125 | /* | ||
126 | * Unknown request means that we have potentially memory | ||
127 | * corruption and must stop the machine immediatly. | ||
128 | */ | ||
129 | panic("error: unknown request id (%lx) on adapter %s.\n", | ||
130 | req_id, dev_name(&adapter->ccw_device->dev)); | ||
131 | |||
132 | zfcp_reqlist_remove(adapter, fsf_req); | ||
133 | spin_unlock_irqrestore(&adapter->req_list_lock, flags); | ||
134 | |||
135 | fsf_req->sbal_response = sbal_idx; | ||
136 | fsf_req->qdio_inb_usage = atomic_read(&adapter->resp_q.count); | ||
137 | zfcp_fsf_req_complete(fsf_req); | ||
138 | } | 94 | } |
139 | 95 | ||
140 | static void zfcp_qdio_resp_put_back(struct zfcp_adapter *adapter, int processed) | 96 | static void zfcp_qdio_resp_put_back(struct zfcp_qdio *qdio, int processed) |
141 | { | 97 | { |
142 | struct zfcp_qdio_queue *queue = &adapter->resp_q; | 98 | struct zfcp_qdio_queue *queue = &qdio->resp_q; |
143 | struct ccw_device *cdev = adapter->ccw_device; | 99 | struct ccw_device *cdev = qdio->adapter->ccw_device; |
144 | u8 count, start = queue->first; | 100 | u8 count, start = queue->first; |
145 | unsigned int retval; | 101 | unsigned int retval; |
146 | 102 | ||
@@ -162,14 +118,13 @@ static void zfcp_qdio_int_resp(struct ccw_device *cdev, unsigned int qdio_err, | |||
162 | int queue_no, int first, int count, | 118 | int queue_no, int first, int count, |
163 | unsigned long parm) | 119 | unsigned long parm) |
164 | { | 120 | { |
165 | struct zfcp_adapter *adapter = (struct zfcp_adapter *) parm; | 121 | struct zfcp_qdio *qdio = (struct zfcp_qdio *) parm; |
166 | struct zfcp_qdio_queue *queue = &adapter->resp_q; | 122 | int sbal_idx, sbal_no; |
167 | struct qdio_buffer_element *sbale; | ||
168 | int sbal_idx, sbale_idx, sbal_no; | ||
169 | 123 | ||
170 | if (unlikely(qdio_err)) { | 124 | if (unlikely(qdio_err)) { |
171 | zfcp_hba_dbf_event_qdio(adapter, qdio_err, first, count); | 125 | zfcp_dbf_hba_qdio(qdio->adapter->dbf, qdio_err, first, |
172 | zfcp_qdio_handler_error(adapter, "qdires1"); | 126 | count); |
127 | zfcp_qdio_handler_error(qdio, "qdires1"); | ||
173 | return; | 128 | return; |
174 | } | 129 | } |
175 | 130 | ||
@@ -179,39 +134,27 @@ static void zfcp_qdio_int_resp(struct ccw_device *cdev, unsigned int qdio_err, | |||
179 | */ | 134 | */ |
180 | for (sbal_no = 0; sbal_no < count; sbal_no++) { | 135 | for (sbal_no = 0; sbal_no < count; sbal_no++) { |
181 | sbal_idx = (first + sbal_no) % QDIO_MAX_BUFFERS_PER_Q; | 136 | sbal_idx = (first + sbal_no) % QDIO_MAX_BUFFERS_PER_Q; |
182 | |||
183 | /* go through all SBALEs of SBAL */ | 137 | /* go through all SBALEs of SBAL */ |
184 | for (sbale_idx = 0; sbale_idx < QDIO_MAX_ELEMENTS_PER_BUFFER; | 138 | zfcp_fsf_reqid_check(qdio, sbal_idx); |
185 | sbale_idx++) { | ||
186 | sbale = zfcp_qdio_sbale(queue, sbal_idx, sbale_idx); | ||
187 | zfcp_qdio_reqid_check(adapter, | ||
188 | (unsigned long) sbale->addr, | ||
189 | sbal_idx); | ||
190 | if (likely(sbale->flags & SBAL_FLAGS_LAST_ENTRY)) | ||
191 | break; | ||
192 | }; | ||
193 | |||
194 | if (unlikely(!(sbale->flags & SBAL_FLAGS_LAST_ENTRY))) | ||
195 | dev_warn(&adapter->ccw_device->dev, | ||
196 | "A QDIO protocol error occurred, " | ||
197 | "operations continue\n"); | ||
198 | } | 139 | } |
199 | 140 | ||
200 | /* | 141 | /* |
201 | * put range of SBALs back to response queue | 142 | * put range of SBALs back to response queue |
202 | * (including SBALs which have already been free before) | 143 | * (including SBALs which have already been free before) |
203 | */ | 144 | */ |
204 | zfcp_qdio_resp_put_back(adapter, count); | 145 | zfcp_qdio_resp_put_back(qdio, count); |
205 | } | 146 | } |
206 | 147 | ||
207 | /** | 148 | /** |
208 | * zfcp_qdio_sbale_req - return ptr to SBALE of req_q for a struct zfcp_fsf_req | 149 | * zfcp_qdio_sbale_req - return ptr to SBALE of req_q for a struct zfcp_fsf_req |
209 | * @fsf_req: pointer to struct fsf_req | 150 | * @qdio: pointer to struct zfcp_qdio |
151 | * @q_rec: pointer to struct zfcp_queue_rec | ||
210 | * Returns: pointer to qdio_buffer_element (SBALE) structure | 152 | * Returns: pointer to qdio_buffer_element (SBALE) structure |
211 | */ | 153 | */ |
212 | struct qdio_buffer_element *zfcp_qdio_sbale_req(struct zfcp_fsf_req *req) | 154 | struct qdio_buffer_element *zfcp_qdio_sbale_req(struct zfcp_qdio *qdio, |
155 | struct zfcp_queue_req *q_req) | ||
213 | { | 156 | { |
214 | return zfcp_qdio_sbale(&req->adapter->req_q, req->sbal_last, 0); | 157 | return zfcp_qdio_sbale(&qdio->req_q, q_req->sbal_last, 0); |
215 | } | 158 | } |
216 | 159 | ||
217 | /** | 160 | /** |
@@ -219,74 +162,80 @@ struct qdio_buffer_element *zfcp_qdio_sbale_req(struct zfcp_fsf_req *req) | |||
219 | * @fsf_req: pointer to struct fsf_req | 162 | * @fsf_req: pointer to struct fsf_req |
220 | * Returns: pointer to qdio_buffer_element (SBALE) structure | 163 | * Returns: pointer to qdio_buffer_element (SBALE) structure |
221 | */ | 164 | */ |
222 | struct qdio_buffer_element *zfcp_qdio_sbale_curr(struct zfcp_fsf_req *req) | 165 | struct qdio_buffer_element *zfcp_qdio_sbale_curr(struct zfcp_qdio *qdio, |
166 | struct zfcp_queue_req *q_req) | ||
223 | { | 167 | { |
224 | return zfcp_qdio_sbale(&req->adapter->req_q, req->sbal_last, | 168 | return zfcp_qdio_sbale(&qdio->req_q, q_req->sbal_last, |
225 | req->sbale_curr); | 169 | q_req->sbale_curr); |
226 | } | 170 | } |
227 | 171 | ||
228 | static void zfcp_qdio_sbal_limit(struct zfcp_fsf_req *fsf_req, int max_sbals) | 172 | static void zfcp_qdio_sbal_limit(struct zfcp_qdio *qdio, |
173 | struct zfcp_queue_req *q_req, int max_sbals) | ||
229 | { | 174 | { |
230 | int count = atomic_read(&fsf_req->adapter->req_q.count); | 175 | int count = atomic_read(&qdio->req_q.count); |
231 | count = min(count, max_sbals); | 176 | count = min(count, max_sbals); |
232 | fsf_req->sbal_limit = (fsf_req->sbal_first + count - 1) | 177 | q_req->sbal_limit = (q_req->sbal_first + count - 1) |
233 | % QDIO_MAX_BUFFERS_PER_Q; | 178 | % QDIO_MAX_BUFFERS_PER_Q; |
234 | } | 179 | } |
235 | 180 | ||
236 | static struct qdio_buffer_element * | 181 | static struct qdio_buffer_element * |
237 | zfcp_qdio_sbal_chain(struct zfcp_fsf_req *fsf_req, unsigned long sbtype) | 182 | zfcp_qdio_sbal_chain(struct zfcp_qdio *qdio, struct zfcp_queue_req *q_req, |
183 | unsigned long sbtype) | ||
238 | { | 184 | { |
239 | struct qdio_buffer_element *sbale; | 185 | struct qdio_buffer_element *sbale; |
240 | 186 | ||
241 | /* set last entry flag in current SBALE of current SBAL */ | 187 | /* set last entry flag in current SBALE of current SBAL */ |
242 | sbale = zfcp_qdio_sbale_curr(fsf_req); | 188 | sbale = zfcp_qdio_sbale_curr(qdio, q_req); |
243 | sbale->flags |= SBAL_FLAGS_LAST_ENTRY; | 189 | sbale->flags |= SBAL_FLAGS_LAST_ENTRY; |
244 | 190 | ||
245 | /* don't exceed last allowed SBAL */ | 191 | /* don't exceed last allowed SBAL */ |
246 | if (fsf_req->sbal_last == fsf_req->sbal_limit) | 192 | if (q_req->sbal_last == q_req->sbal_limit) |
247 | return NULL; | 193 | return NULL; |
248 | 194 | ||
249 | /* set chaining flag in first SBALE of current SBAL */ | 195 | /* set chaining flag in first SBALE of current SBAL */ |
250 | sbale = zfcp_qdio_sbale_req(fsf_req); | 196 | sbale = zfcp_qdio_sbale_req(qdio, q_req); |
251 | sbale->flags |= SBAL_FLAGS0_MORE_SBALS; | 197 | sbale->flags |= SBAL_FLAGS0_MORE_SBALS; |
252 | 198 | ||
253 | /* calculate index of next SBAL */ | 199 | /* calculate index of next SBAL */ |
254 | fsf_req->sbal_last++; | 200 | q_req->sbal_last++; |
255 | fsf_req->sbal_last %= QDIO_MAX_BUFFERS_PER_Q; | 201 | q_req->sbal_last %= QDIO_MAX_BUFFERS_PER_Q; |
256 | 202 | ||
257 | /* keep this requests number of SBALs up-to-date */ | 203 | /* keep this requests number of SBALs up-to-date */ |
258 | fsf_req->sbal_number++; | 204 | q_req->sbal_number++; |
259 | 205 | ||
260 | /* start at first SBALE of new SBAL */ | 206 | /* start at first SBALE of new SBAL */ |
261 | fsf_req->sbale_curr = 0; | 207 | q_req->sbale_curr = 0; |
262 | 208 | ||
263 | /* set storage-block type for new SBAL */ | 209 | /* set storage-block type for new SBAL */ |
264 | sbale = zfcp_qdio_sbale_curr(fsf_req); | 210 | sbale = zfcp_qdio_sbale_curr(qdio, q_req); |
265 | sbale->flags |= sbtype; | 211 | sbale->flags |= sbtype; |
266 | 212 | ||
267 | return sbale; | 213 | return sbale; |
268 | } | 214 | } |
269 | 215 | ||
270 | static struct qdio_buffer_element * | 216 | static struct qdio_buffer_element * |
271 | zfcp_qdio_sbale_next(struct zfcp_fsf_req *fsf_req, unsigned long sbtype) | 217 | zfcp_qdio_sbale_next(struct zfcp_qdio *qdio, struct zfcp_queue_req *q_req, |
218 | unsigned int sbtype) | ||
272 | { | 219 | { |
273 | if (fsf_req->sbale_curr == ZFCP_LAST_SBALE_PER_SBAL) | 220 | if (q_req->sbale_curr == ZFCP_LAST_SBALE_PER_SBAL) |
274 | return zfcp_qdio_sbal_chain(fsf_req, sbtype); | 221 | return zfcp_qdio_sbal_chain(qdio, q_req, sbtype); |
275 | fsf_req->sbale_curr++; | 222 | q_req->sbale_curr++; |
276 | return zfcp_qdio_sbale_curr(fsf_req); | 223 | return zfcp_qdio_sbale_curr(qdio, q_req); |
277 | } | 224 | } |
278 | 225 | ||
279 | static void zfcp_qdio_undo_sbals(struct zfcp_fsf_req *fsf_req) | 226 | static void zfcp_qdio_undo_sbals(struct zfcp_qdio *qdio, |
227 | struct zfcp_queue_req *q_req) | ||
280 | { | 228 | { |
281 | struct qdio_buffer **sbal = fsf_req->adapter->req_q.sbal; | 229 | struct qdio_buffer **sbal = qdio->req_q.sbal; |
282 | int first = fsf_req->sbal_first; | 230 | int first = q_req->sbal_first; |
283 | int last = fsf_req->sbal_last; | 231 | int last = q_req->sbal_last; |
284 | int count = (last - first + QDIO_MAX_BUFFERS_PER_Q) % | 232 | int count = (last - first + QDIO_MAX_BUFFERS_PER_Q) % |
285 | QDIO_MAX_BUFFERS_PER_Q + 1; | 233 | QDIO_MAX_BUFFERS_PER_Q + 1; |
286 | zfcp_qdio_zero_sbals(sbal, first, count); | 234 | zfcp_qdio_zero_sbals(sbal, first, count); |
287 | } | 235 | } |
288 | 236 | ||
289 | static int zfcp_qdio_fill_sbals(struct zfcp_fsf_req *fsf_req, | 237 | static int zfcp_qdio_fill_sbals(struct zfcp_qdio *qdio, |
238 | struct zfcp_queue_req *q_req, | ||
290 | unsigned int sbtype, void *start_addr, | 239 | unsigned int sbtype, void *start_addr, |
291 | unsigned int total_length) | 240 | unsigned int total_length) |
292 | { | 241 | { |
@@ -297,10 +246,10 @@ static int zfcp_qdio_fill_sbals(struct zfcp_fsf_req *fsf_req, | |||
297 | /* split segment up */ | 246 | /* split segment up */ |
298 | for (addr = start_addr, remaining = total_length; remaining > 0; | 247 | for (addr = start_addr, remaining = total_length; remaining > 0; |
299 | addr += length, remaining -= length) { | 248 | addr += length, remaining -= length) { |
300 | sbale = zfcp_qdio_sbale_next(fsf_req, sbtype); | 249 | sbale = zfcp_qdio_sbale_next(qdio, q_req, sbtype); |
301 | if (!sbale) { | 250 | if (!sbale) { |
302 | atomic_inc(&fsf_req->adapter->qdio_outb_full); | 251 | atomic_inc(&qdio->req_q_full); |
303 | zfcp_qdio_undo_sbals(fsf_req); | 252 | zfcp_qdio_undo_sbals(qdio, q_req); |
304 | return -EINVAL; | 253 | return -EINVAL; |
305 | } | 254 | } |
306 | 255 | ||
@@ -322,29 +271,31 @@ static int zfcp_qdio_fill_sbals(struct zfcp_fsf_req *fsf_req, | |||
322 | * @max_sbals: upper bound for number of SBALs to be used | 271 | * @max_sbals: upper bound for number of SBALs to be used |
323 | * Returns: number of bytes, or error (negativ) | 272 | * Returns: number of bytes, or error (negativ) |
324 | */ | 273 | */ |
325 | int zfcp_qdio_sbals_from_sg(struct zfcp_fsf_req *fsf_req, unsigned long sbtype, | 274 | int zfcp_qdio_sbals_from_sg(struct zfcp_qdio *qdio, |
326 | struct scatterlist *sg, int max_sbals) | 275 | struct zfcp_queue_req *q_req, |
276 | unsigned long sbtype, struct scatterlist *sg, | ||
277 | int max_sbals) | ||
327 | { | 278 | { |
328 | struct qdio_buffer_element *sbale; | 279 | struct qdio_buffer_element *sbale; |
329 | int retval, bytes = 0; | 280 | int retval, bytes = 0; |
330 | 281 | ||
331 | /* figure out last allowed SBAL */ | 282 | /* figure out last allowed SBAL */ |
332 | zfcp_qdio_sbal_limit(fsf_req, max_sbals); | 283 | zfcp_qdio_sbal_limit(qdio, q_req, max_sbals); |
333 | 284 | ||
334 | /* set storage-block type for this request */ | 285 | /* set storage-block type for this request */ |
335 | sbale = zfcp_qdio_sbale_req(fsf_req); | 286 | sbale = zfcp_qdio_sbale_req(qdio, q_req); |
336 | sbale->flags |= sbtype; | 287 | sbale->flags |= sbtype; |
337 | 288 | ||
338 | for (; sg; sg = sg_next(sg)) { | 289 | for (; sg; sg = sg_next(sg)) { |
339 | retval = zfcp_qdio_fill_sbals(fsf_req, sbtype, sg_virt(sg), | 290 | retval = zfcp_qdio_fill_sbals(qdio, q_req, sbtype, |
340 | sg->length); | 291 | sg_virt(sg), sg->length); |
341 | if (retval < 0) | 292 | if (retval < 0) |
342 | return retval; | 293 | return retval; |
343 | bytes += sg->length; | 294 | bytes += sg->length; |
344 | } | 295 | } |
345 | 296 | ||
346 | /* assume that no other SBALEs are to follow in the same SBAL */ | 297 | /* assume that no other SBALEs are to follow in the same SBAL */ |
347 | sbale = zfcp_qdio_sbale_curr(fsf_req); | 298 | sbale = zfcp_qdio_sbale_curr(qdio, q_req); |
348 | sbale->flags |= SBAL_FLAGS_LAST_ENTRY; | 299 | sbale->flags |= SBAL_FLAGS_LAST_ENTRY; |
349 | 300 | ||
350 | return bytes; | 301 | return bytes; |
@@ -352,21 +303,22 @@ int zfcp_qdio_sbals_from_sg(struct zfcp_fsf_req *fsf_req, unsigned long sbtype, | |||
352 | 303 | ||
353 | /** | 304 | /** |
354 | * zfcp_qdio_send - set PCI flag in first SBALE and send req to QDIO | 305 | * zfcp_qdio_send - set PCI flag in first SBALE and send req to QDIO |
355 | * @fsf_req: pointer to struct zfcp_fsf_req | 306 | * @qdio: pointer to struct zfcp_qdio |
307 | * @q_req: pointer to struct zfcp_queue_req | ||
356 | * Returns: 0 on success, error otherwise | 308 | * Returns: 0 on success, error otherwise |
357 | */ | 309 | */ |
358 | int zfcp_qdio_send(struct zfcp_fsf_req *fsf_req) | 310 | int zfcp_qdio_send(struct zfcp_qdio *qdio, struct zfcp_queue_req *q_req) |
359 | { | 311 | { |
360 | struct zfcp_adapter *adapter = fsf_req->adapter; | 312 | struct zfcp_qdio_queue *req_q = &qdio->req_q; |
361 | struct zfcp_qdio_queue *req_q = &adapter->req_q; | 313 | int first = q_req->sbal_first; |
362 | int first = fsf_req->sbal_first; | 314 | int count = q_req->sbal_number; |
363 | int count = fsf_req->sbal_number; | ||
364 | int retval; | 315 | int retval; |
365 | unsigned int qdio_flags = QDIO_FLAG_SYNC_OUTPUT; | 316 | unsigned int qdio_flags = QDIO_FLAG_SYNC_OUTPUT; |
366 | 317 | ||
367 | zfcp_qdio_account(adapter); | 318 | zfcp_qdio_account(qdio); |
368 | 319 | ||
369 | retval = do_QDIO(adapter->ccw_device, qdio_flags, 0, first, count); | 320 | retval = do_QDIO(qdio->adapter->ccw_device, qdio_flags, 0, first, |
321 | count); | ||
370 | if (unlikely(retval)) { | 322 | if (unlikely(retval)) { |
371 | zfcp_qdio_zero_sbals(req_q->sbal, first, count); | 323 | zfcp_qdio_zero_sbals(req_q->sbal, first, count); |
372 | return retval; | 324 | return retval; |
@@ -379,63 +331,69 @@ int zfcp_qdio_send(struct zfcp_fsf_req *fsf_req) | |||
379 | return 0; | 331 | return 0; |
380 | } | 332 | } |
381 | 333 | ||
334 | |||
335 | static void zfcp_qdio_setup_init_data(struct qdio_initialize *id, | ||
336 | struct zfcp_qdio *qdio) | ||
337 | { | ||
338 | |||
339 | id->cdev = qdio->adapter->ccw_device; | ||
340 | id->q_format = QDIO_ZFCP_QFMT; | ||
341 | memcpy(id->adapter_name, dev_name(&id->cdev->dev), 8); | ||
342 | ASCEBC(id->adapter_name, 8); | ||
343 | id->qib_param_field_format = 0; | ||
344 | id->qib_param_field = NULL; | ||
345 | id->input_slib_elements = NULL; | ||
346 | id->output_slib_elements = NULL; | ||
347 | id->no_input_qs = 1; | ||
348 | id->no_output_qs = 1; | ||
349 | id->input_handler = zfcp_qdio_int_resp; | ||
350 | id->output_handler = zfcp_qdio_int_req; | ||
351 | id->int_parm = (unsigned long) qdio; | ||
352 | id->flags = QDIO_INBOUND_0COPY_SBALS | | ||
353 | QDIO_OUTBOUND_0COPY_SBALS | QDIO_USE_OUTBOUND_PCIS; | ||
354 | id->input_sbal_addr_array = (void **) (qdio->resp_q.sbal); | ||
355 | id->output_sbal_addr_array = (void **) (qdio->req_q.sbal); | ||
356 | |||
357 | } | ||
382 | /** | 358 | /** |
383 | * zfcp_qdio_allocate - allocate queue memory and initialize QDIO data | 359 | * zfcp_qdio_allocate - allocate queue memory and initialize QDIO data |
384 | * @adapter: pointer to struct zfcp_adapter | 360 | * @adapter: pointer to struct zfcp_adapter |
385 | * Returns: -ENOMEM on memory allocation error or return value from | 361 | * Returns: -ENOMEM on memory allocation error or return value from |
386 | * qdio_allocate | 362 | * qdio_allocate |
387 | */ | 363 | */ |
388 | int zfcp_qdio_allocate(struct zfcp_adapter *adapter) | 364 | static int zfcp_qdio_allocate(struct zfcp_qdio *qdio) |
389 | { | 365 | { |
390 | struct qdio_initialize *init_data; | 366 | struct qdio_initialize init_data; |
391 | 367 | ||
392 | if (zfcp_qdio_buffers_enqueue(adapter->req_q.sbal) || | 368 | if (zfcp_qdio_buffers_enqueue(qdio->req_q.sbal) || |
393 | zfcp_qdio_buffers_enqueue(adapter->resp_q.sbal)) | 369 | zfcp_qdio_buffers_enqueue(qdio->resp_q.sbal)) |
394 | return -ENOMEM; | 370 | return -ENOMEM; |
395 | 371 | ||
396 | init_data = &adapter->qdio_init_data; | 372 | zfcp_qdio_setup_init_data(&init_data, qdio); |
397 | 373 | ||
398 | init_data->cdev = adapter->ccw_device; | 374 | return qdio_allocate(&init_data); |
399 | init_data->q_format = QDIO_ZFCP_QFMT; | ||
400 | memcpy(init_data->adapter_name, dev_name(&adapter->ccw_device->dev), 8); | ||
401 | ASCEBC(init_data->adapter_name, 8); | ||
402 | init_data->qib_param_field_format = 0; | ||
403 | init_data->qib_param_field = NULL; | ||
404 | init_data->input_slib_elements = NULL; | ||
405 | init_data->output_slib_elements = NULL; | ||
406 | init_data->no_input_qs = 1; | ||
407 | init_data->no_output_qs = 1; | ||
408 | init_data->input_handler = zfcp_qdio_int_resp; | ||
409 | init_data->output_handler = zfcp_qdio_int_req; | ||
410 | init_data->int_parm = (unsigned long) adapter; | ||
411 | init_data->flags = QDIO_INBOUND_0COPY_SBALS | | ||
412 | QDIO_OUTBOUND_0COPY_SBALS | QDIO_USE_OUTBOUND_PCIS; | ||
413 | init_data->input_sbal_addr_array = | ||
414 | (void **) (adapter->resp_q.sbal); | ||
415 | init_data->output_sbal_addr_array = | ||
416 | (void **) (adapter->req_q.sbal); | ||
417 | |||
418 | return qdio_allocate(init_data); | ||
419 | } | 375 | } |
420 | 376 | ||
421 | /** | 377 | /** |
422 | * zfcp_close_qdio - close qdio queues for an adapter | 378 | * zfcp_close_qdio - close qdio queues for an adapter |
379 | * @qdio: pointer to structure zfcp_qdio | ||
423 | */ | 380 | */ |
424 | void zfcp_qdio_close(struct zfcp_adapter *adapter) | 381 | void zfcp_qdio_close(struct zfcp_qdio *qdio) |
425 | { | 382 | { |
426 | struct zfcp_qdio_queue *req_q; | 383 | struct zfcp_qdio_queue *req_q; |
427 | int first, count; | 384 | int first, count; |
428 | 385 | ||
429 | if (!(atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP)) | 386 | if (!(atomic_read(&qdio->adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP)) |
430 | return; | 387 | return; |
431 | 388 | ||
432 | /* clear QDIOUP flag, thus do_QDIO is not called during qdio_shutdown */ | 389 | /* clear QDIOUP flag, thus do_QDIO is not called during qdio_shutdown */ |
433 | req_q = &adapter->req_q; | 390 | req_q = &qdio->req_q; |
434 | spin_lock_bh(&adapter->req_q_lock); | 391 | spin_lock_bh(&qdio->req_q_lock); |
435 | atomic_clear_mask(ZFCP_STATUS_ADAPTER_QDIOUP, &adapter->status); | 392 | atomic_clear_mask(ZFCP_STATUS_ADAPTER_QDIOUP, &qdio->adapter->status); |
436 | spin_unlock_bh(&adapter->req_q_lock); | 393 | spin_unlock_bh(&qdio->req_q_lock); |
437 | 394 | ||
438 | qdio_shutdown(adapter->ccw_device, QDIO_FLAG_CLEANUP_USING_CLEAR); | 395 | qdio_shutdown(qdio->adapter->ccw_device, |
396 | QDIO_FLAG_CLEANUP_USING_CLEAR); | ||
439 | 397 | ||
440 | /* cleanup used outbound sbals */ | 398 | /* cleanup used outbound sbals */ |
441 | count = atomic_read(&req_q->count); | 399 | count = atomic_read(&req_q->count); |
@@ -446,50 +404,99 @@ void zfcp_qdio_close(struct zfcp_adapter *adapter) | |||
446 | } | 404 | } |
447 | req_q->first = 0; | 405 | req_q->first = 0; |
448 | atomic_set(&req_q->count, 0); | 406 | atomic_set(&req_q->count, 0); |
449 | adapter->resp_q.first = 0; | 407 | qdio->resp_q.first = 0; |
450 | atomic_set(&adapter->resp_q.count, 0); | 408 | atomic_set(&qdio->resp_q.count, 0); |
451 | } | 409 | } |
452 | 410 | ||
453 | /** | 411 | /** |
454 | * zfcp_qdio_open - prepare and initialize response queue | 412 | * zfcp_qdio_open - prepare and initialize response queue |
455 | * @adapter: pointer to struct zfcp_adapter | 413 | * @qdio: pointer to struct zfcp_qdio |
456 | * Returns: 0 on success, otherwise -EIO | 414 | * Returns: 0 on success, otherwise -EIO |
457 | */ | 415 | */ |
458 | int zfcp_qdio_open(struct zfcp_adapter *adapter) | 416 | int zfcp_qdio_open(struct zfcp_qdio *qdio) |
459 | { | 417 | { |
460 | struct qdio_buffer_element *sbale; | 418 | struct qdio_buffer_element *sbale; |
419 | struct qdio_initialize init_data; | ||
420 | struct ccw_device *cdev = qdio->adapter->ccw_device; | ||
461 | int cc; | 421 | int cc; |
462 | 422 | ||
463 | if (atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP) | 423 | if (atomic_read(&qdio->adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP) |
464 | return -EIO; | 424 | return -EIO; |
465 | 425 | ||
466 | if (qdio_establish(&adapter->qdio_init_data)) | 426 | zfcp_qdio_setup_init_data(&init_data, qdio); |
427 | |||
428 | if (qdio_establish(&init_data)) | ||
467 | goto failed_establish; | 429 | goto failed_establish; |
468 | 430 | ||
469 | if (qdio_activate(adapter->ccw_device)) | 431 | if (qdio_activate(cdev)) |
470 | goto failed_qdio; | 432 | goto failed_qdio; |
471 | 433 | ||
472 | for (cc = 0; cc < QDIO_MAX_BUFFERS_PER_Q; cc++) { | 434 | for (cc = 0; cc < QDIO_MAX_BUFFERS_PER_Q; cc++) { |
473 | sbale = &(adapter->resp_q.sbal[cc]->element[0]); | 435 | sbale = &(qdio->resp_q.sbal[cc]->element[0]); |
474 | sbale->length = 0; | 436 | sbale->length = 0; |
475 | sbale->flags = SBAL_FLAGS_LAST_ENTRY; | 437 | sbale->flags = SBAL_FLAGS_LAST_ENTRY; |
476 | sbale->addr = NULL; | 438 | sbale->addr = NULL; |
477 | } | 439 | } |
478 | 440 | ||
479 | if (do_QDIO(adapter->ccw_device, QDIO_FLAG_SYNC_INPUT, 0, 0, | 441 | if (do_QDIO(cdev, QDIO_FLAG_SYNC_INPUT, 0, 0, |
480 | QDIO_MAX_BUFFERS_PER_Q)) | 442 | QDIO_MAX_BUFFERS_PER_Q)) |
481 | goto failed_qdio; | 443 | goto failed_qdio; |
482 | 444 | ||
483 | /* set index of first avalable SBALS / number of available SBALS */ | 445 | /* set index of first avalable SBALS / number of available SBALS */ |
484 | adapter->req_q.first = 0; | 446 | qdio->req_q.first = 0; |
485 | atomic_set(&adapter->req_q.count, QDIO_MAX_BUFFERS_PER_Q); | 447 | atomic_set(&qdio->req_q.count, QDIO_MAX_BUFFERS_PER_Q); |
486 | 448 | ||
487 | return 0; | 449 | return 0; |
488 | 450 | ||
489 | failed_qdio: | 451 | failed_qdio: |
490 | qdio_shutdown(adapter->ccw_device, QDIO_FLAG_CLEANUP_USING_CLEAR); | 452 | qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR); |
491 | failed_establish: | 453 | failed_establish: |
492 | dev_err(&adapter->ccw_device->dev, | 454 | dev_err(&cdev->dev, |
493 | "Setting up the QDIO connection to the FCP adapter failed\n"); | 455 | "Setting up the QDIO connection to the FCP adapter failed\n"); |
494 | return -EIO; | 456 | return -EIO; |
495 | } | 457 | } |
458 | |||
459 | void zfcp_qdio_destroy(struct zfcp_qdio *qdio) | ||
460 | { | ||
461 | struct qdio_buffer **sbal_req, **sbal_resp; | ||
462 | int p; | ||
463 | |||
464 | if (!qdio) | ||
465 | return; | ||
466 | |||
467 | if (qdio->adapter->ccw_device) | ||
468 | qdio_free(qdio->adapter->ccw_device); | ||
469 | |||
470 | sbal_req = qdio->req_q.sbal; | ||
471 | sbal_resp = qdio->resp_q.sbal; | ||
472 | |||
473 | for (p = 0; p < QDIO_MAX_BUFFERS_PER_Q; p += QBUFF_PER_PAGE) { | ||
474 | free_page((unsigned long) sbal_req[p]); | ||
475 | free_page((unsigned long) sbal_resp[p]); | ||
476 | } | ||
477 | |||
478 | kfree(qdio); | ||
479 | } | ||
480 | |||
481 | int zfcp_qdio_setup(struct zfcp_adapter *adapter) | ||
482 | { | ||
483 | struct zfcp_qdio *qdio; | ||
484 | |||
485 | qdio = kzalloc(sizeof(struct zfcp_qdio), GFP_KERNEL); | ||
486 | if (!qdio) | ||
487 | return -ENOMEM; | ||
488 | |||
489 | qdio->adapter = adapter; | ||
490 | |||
491 | if (zfcp_qdio_allocate(qdio)) { | ||
492 | zfcp_qdio_destroy(qdio); | ||
493 | return -ENOMEM; | ||
494 | } | ||
495 | |||
496 | spin_lock_init(&qdio->req_q_lock); | ||
497 | spin_lock_init(&qdio->stat_lock); | ||
498 | |||
499 | adapter->qdio = qdio; | ||
500 | return 0; | ||
501 | } | ||
502 | |||
diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c index 6925a1784682..3ff726afafc6 100644 --- a/drivers/s390/scsi/zfcp_scsi.c +++ b/drivers/s390/scsi/zfcp_scsi.c | |||
@@ -9,8 +9,9 @@ | |||
9 | #define KMSG_COMPONENT "zfcp" | 9 | #define KMSG_COMPONENT "zfcp" |
10 | #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt | 10 | #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt |
11 | 11 | ||
12 | #include "zfcp_ext.h" | ||
13 | #include <asm/atomic.h> | 12 | #include <asm/atomic.h> |
13 | #include "zfcp_ext.h" | ||
14 | #include "zfcp_dbf.h" | ||
14 | 15 | ||
15 | static unsigned int default_depth = 32; | 16 | static unsigned int default_depth = 32; |
16 | module_param_named(queue_depth, default_depth, uint, 0600); | 17 | module_param_named(queue_depth, default_depth, uint, 0600); |
@@ -52,11 +53,11 @@ static int zfcp_scsi_slave_configure(struct scsi_device *sdp) | |||
52 | 53 | ||
53 | static void zfcp_scsi_command_fail(struct scsi_cmnd *scpnt, int result) | 54 | static void zfcp_scsi_command_fail(struct scsi_cmnd *scpnt, int result) |
54 | { | 55 | { |
56 | struct zfcp_adapter *adapter = | ||
57 | (struct zfcp_adapter *) scpnt->device->host->hostdata[0]; | ||
55 | set_host_byte(scpnt, result); | 58 | set_host_byte(scpnt, result); |
56 | if ((scpnt->device != NULL) && (scpnt->device->host != NULL)) | 59 | if ((scpnt->device != NULL) && (scpnt->device->host != NULL)) |
57 | zfcp_scsi_dbf_event_result("fail", 4, | 60 | zfcp_dbf_scsi_result("fail", 4, adapter->dbf, scpnt, NULL); |
58 | (struct zfcp_adapter*) scpnt->device->host->hostdata[0], | ||
59 | scpnt, NULL); | ||
60 | /* return directly */ | 61 | /* return directly */ |
61 | scpnt->scsi_done(scpnt); | 62 | scpnt->scsi_done(scpnt); |
62 | } | 63 | } |
@@ -92,7 +93,7 @@ static int zfcp_scsi_queuecommand(struct scsi_cmnd *scpnt, | |||
92 | scsi_result = fc_remote_port_chkready(rport); | 93 | scsi_result = fc_remote_port_chkready(rport); |
93 | if (unlikely(scsi_result)) { | 94 | if (unlikely(scsi_result)) { |
94 | scpnt->result = scsi_result; | 95 | scpnt->result = scsi_result; |
95 | zfcp_scsi_dbf_event_result("fail", 4, adapter, scpnt, NULL); | 96 | zfcp_dbf_scsi_result("fail", 4, adapter->dbf, scpnt, NULL); |
96 | scpnt->scsi_done(scpnt); | 97 | scpnt->scsi_done(scpnt); |
97 | return 0; | 98 | return 0; |
98 | } | 99 | } |
@@ -180,8 +181,8 @@ static int zfcp_scsi_eh_abort_handler(struct scsi_cmnd *scpnt) | |||
180 | spin_unlock(&adapter->req_list_lock); | 181 | spin_unlock(&adapter->req_list_lock); |
181 | if (!old_req) { | 182 | if (!old_req) { |
182 | write_unlock_irqrestore(&adapter->abort_lock, flags); | 183 | write_unlock_irqrestore(&adapter->abort_lock, flags); |
183 | zfcp_scsi_dbf_event_abort("lte1", adapter, scpnt, NULL, | 184 | zfcp_dbf_scsi_abort("lte1", adapter->dbf, scpnt, NULL, |
184 | old_reqid); | 185 | old_reqid); |
185 | return FAILED; /* completion could be in progress */ | 186 | return FAILED; /* completion could be in progress */ |
186 | } | 187 | } |
187 | old_req->data = NULL; | 188 | old_req->data = NULL; |
@@ -197,16 +198,15 @@ static int zfcp_scsi_eh_abort_handler(struct scsi_cmnd *scpnt) | |||
197 | zfcp_erp_wait(adapter); | 198 | zfcp_erp_wait(adapter); |
198 | if (!(atomic_read(&adapter->status) & | 199 | if (!(atomic_read(&adapter->status) & |
199 | ZFCP_STATUS_COMMON_RUNNING)) { | 200 | ZFCP_STATUS_COMMON_RUNNING)) { |
200 | zfcp_scsi_dbf_event_abort("nres", adapter, scpnt, NULL, | 201 | zfcp_dbf_scsi_abort("nres", adapter->dbf, scpnt, NULL, |
201 | old_reqid); | 202 | old_reqid); |
202 | return SUCCESS; | 203 | return SUCCESS; |
203 | } | 204 | } |
204 | } | 205 | } |
205 | if (!abrt_req) | 206 | if (!abrt_req) |
206 | return FAILED; | 207 | return FAILED; |
207 | 208 | ||
208 | wait_event(abrt_req->completion_wq, | 209 | wait_for_completion(&abrt_req->completion); |
209 | abrt_req->status & ZFCP_STATUS_FSFREQ_COMPLETED); | ||
210 | 210 | ||
211 | if (abrt_req->status & ZFCP_STATUS_FSFREQ_ABORTSUCCEEDED) | 211 | if (abrt_req->status & ZFCP_STATUS_FSFREQ_ABORTSUCCEEDED) |
212 | dbf_tag = "okay"; | 212 | dbf_tag = "okay"; |
@@ -216,7 +216,7 @@ static int zfcp_scsi_eh_abort_handler(struct scsi_cmnd *scpnt) | |||
216 | dbf_tag = "fail"; | 216 | dbf_tag = "fail"; |
217 | retval = FAILED; | 217 | retval = FAILED; |
218 | } | 218 | } |
219 | zfcp_scsi_dbf_event_abort(dbf_tag, adapter, scpnt, abrt_req, old_reqid); | 219 | zfcp_dbf_scsi_abort(dbf_tag, adapter->dbf, scpnt, abrt_req, old_reqid); |
220 | zfcp_fsf_req_free(abrt_req); | 220 | zfcp_fsf_req_free(abrt_req); |
221 | return retval; | 221 | return retval; |
222 | } | 222 | } |
@@ -225,7 +225,7 @@ static int zfcp_task_mgmt_function(struct scsi_cmnd *scpnt, u8 tm_flags) | |||
225 | { | 225 | { |
226 | struct zfcp_unit *unit = scpnt->device->hostdata; | 226 | struct zfcp_unit *unit = scpnt->device->hostdata; |
227 | struct zfcp_adapter *adapter = unit->port->adapter; | 227 | struct zfcp_adapter *adapter = unit->port->adapter; |
228 | struct zfcp_fsf_req *fsf_req; | 228 | struct zfcp_fsf_req *fsf_req = NULL; |
229 | int retval = SUCCESS; | 229 | int retval = SUCCESS; |
230 | int retry = 3; | 230 | int retry = 3; |
231 | 231 | ||
@@ -237,25 +237,23 @@ static int zfcp_task_mgmt_function(struct scsi_cmnd *scpnt, u8 tm_flags) | |||
237 | zfcp_erp_wait(adapter); | 237 | zfcp_erp_wait(adapter); |
238 | if (!(atomic_read(&adapter->status) & | 238 | if (!(atomic_read(&adapter->status) & |
239 | ZFCP_STATUS_COMMON_RUNNING)) { | 239 | ZFCP_STATUS_COMMON_RUNNING)) { |
240 | zfcp_scsi_dbf_event_devreset("nres", tm_flags, unit, | 240 | zfcp_dbf_scsi_devreset("nres", tm_flags, unit, scpnt); |
241 | scpnt); | ||
242 | return SUCCESS; | 241 | return SUCCESS; |
243 | } | 242 | } |
244 | } | 243 | } |
245 | if (!fsf_req) | 244 | if (!fsf_req) |
246 | return FAILED; | 245 | return FAILED; |
247 | 246 | ||
248 | wait_event(fsf_req->completion_wq, | 247 | wait_for_completion(&fsf_req->completion); |
249 | fsf_req->status & ZFCP_STATUS_FSFREQ_COMPLETED); | ||
250 | 248 | ||
251 | if (fsf_req->status & ZFCP_STATUS_FSFREQ_TMFUNCFAILED) { | 249 | if (fsf_req->status & ZFCP_STATUS_FSFREQ_TMFUNCFAILED) { |
252 | zfcp_scsi_dbf_event_devreset("fail", tm_flags, unit, scpnt); | 250 | zfcp_dbf_scsi_devreset("fail", tm_flags, unit, scpnt); |
253 | retval = FAILED; | 251 | retval = FAILED; |
254 | } else if (fsf_req->status & ZFCP_STATUS_FSFREQ_TMFUNCNOTSUPP) { | 252 | } else if (fsf_req->status & ZFCP_STATUS_FSFREQ_TMFUNCNOTSUPP) { |
255 | zfcp_scsi_dbf_event_devreset("nsup", tm_flags, unit, scpnt); | 253 | zfcp_dbf_scsi_devreset("nsup", tm_flags, unit, scpnt); |
256 | retval = FAILED; | 254 | retval = FAILED; |
257 | } else | 255 | } else |
258 | zfcp_scsi_dbf_event_devreset("okay", tm_flags, unit, scpnt); | 256 | zfcp_dbf_scsi_devreset("okay", tm_flags, unit, scpnt); |
259 | 257 | ||
260 | zfcp_fsf_req_free(fsf_req); | 258 | zfcp_fsf_req_free(fsf_req); |
261 | return retval; | 259 | return retval; |
@@ -430,7 +428,7 @@ static struct fc_host_statistics *zfcp_get_fc_host_stats(struct Scsi_Host *host) | |||
430 | if (!data) | 428 | if (!data) |
431 | return NULL; | 429 | return NULL; |
432 | 430 | ||
433 | ret = zfcp_fsf_exchange_port_data_sync(adapter, data); | 431 | ret = zfcp_fsf_exchange_port_data_sync(adapter->qdio, data); |
434 | if (ret) { | 432 | if (ret) { |
435 | kfree(data); | 433 | kfree(data); |
436 | return NULL; | 434 | return NULL; |
@@ -459,7 +457,7 @@ static void zfcp_reset_fc_host_stats(struct Scsi_Host *shost) | |||
459 | if (!data) | 457 | if (!data) |
460 | return; | 458 | return; |
461 | 459 | ||
462 | ret = zfcp_fsf_exchange_port_data_sync(adapter, data); | 460 | ret = zfcp_fsf_exchange_port_data_sync(adapter->qdio, data); |
463 | if (ret) | 461 | if (ret) |
464 | kfree(data); | 462 | kfree(data); |
465 | else { | 463 | else { |
@@ -493,21 +491,6 @@ static void zfcp_set_rport_dev_loss_tmo(struct fc_rport *rport, u32 timeout) | |||
493 | } | 491 | } |
494 | 492 | ||
495 | /** | 493 | /** |
496 | * zfcp_scsi_dev_loss_tmo_callbk - Free any reference to rport | ||
497 | * @rport: The rport that is about to be deleted. | ||
498 | */ | ||
499 | static void zfcp_scsi_dev_loss_tmo_callbk(struct fc_rport *rport) | ||
500 | { | ||
501 | struct zfcp_port *port; | ||
502 | |||
503 | write_lock_irq(&zfcp_data.config_lock); | ||
504 | port = rport->dd_data; | ||
505 | if (port) | ||
506 | port->rport = NULL; | ||
507 | write_unlock_irq(&zfcp_data.config_lock); | ||
508 | } | ||
509 | |||
510 | /** | ||
511 | * zfcp_scsi_terminate_rport_io - Terminate all I/O on a rport | 494 | * zfcp_scsi_terminate_rport_io - Terminate all I/O on a rport |
512 | * @rport: The FC rport where to teminate I/O | 495 | * @rport: The FC rport where to teminate I/O |
513 | * | 496 | * |
@@ -518,9 +501,12 @@ static void zfcp_scsi_dev_loss_tmo_callbk(struct fc_rport *rport) | |||
518 | static void zfcp_scsi_terminate_rport_io(struct fc_rport *rport) | 501 | static void zfcp_scsi_terminate_rport_io(struct fc_rport *rport) |
519 | { | 502 | { |
520 | struct zfcp_port *port; | 503 | struct zfcp_port *port; |
504 | struct Scsi_Host *shost = rport_to_shost(rport); | ||
505 | struct zfcp_adapter *adapter = | ||
506 | (struct zfcp_adapter *)shost->hostdata[0]; | ||
521 | 507 | ||
522 | write_lock_irq(&zfcp_data.config_lock); | 508 | write_lock_irq(&zfcp_data.config_lock); |
523 | port = rport->dd_data; | 509 | port = zfcp_get_port_by_wwpn(adapter, rport->port_name); |
524 | if (port) | 510 | if (port) |
525 | zfcp_port_get(port); | 511 | zfcp_port_get(port); |
526 | write_unlock_irq(&zfcp_data.config_lock); | 512 | write_unlock_irq(&zfcp_data.config_lock); |
@@ -552,7 +538,6 @@ static void zfcp_scsi_rport_register(struct zfcp_port *port) | |||
552 | return; | 538 | return; |
553 | } | 539 | } |
554 | 540 | ||
555 | rport->dd_data = port; | ||
556 | rport->maxframe_size = port->maxframe_size; | 541 | rport->maxframe_size = port->maxframe_size; |
557 | rport->supported_classes = port->supported_classes; | 542 | rport->supported_classes = port->supported_classes; |
558 | port->rport = rport; | 543 | port->rport = rport; |
@@ -573,7 +558,7 @@ void zfcp_scsi_schedule_rport_register(struct zfcp_port *port) | |||
573 | zfcp_port_get(port); | 558 | zfcp_port_get(port); |
574 | port->rport_task = RPORT_ADD; | 559 | port->rport_task = RPORT_ADD; |
575 | 560 | ||
576 | if (!queue_work(zfcp_data.work_queue, &port->rport_work)) | 561 | if (!queue_work(port->adapter->work_queue, &port->rport_work)) |
577 | zfcp_port_put(port); | 562 | zfcp_port_put(port); |
578 | } | 563 | } |
579 | 564 | ||
@@ -582,8 +567,11 @@ void zfcp_scsi_schedule_rport_block(struct zfcp_port *port) | |||
582 | zfcp_port_get(port); | 567 | zfcp_port_get(port); |
583 | port->rport_task = RPORT_DEL; | 568 | port->rport_task = RPORT_DEL; |
584 | 569 | ||
585 | if (!queue_work(zfcp_data.work_queue, &port->rport_work)) | 570 | if (port->rport && queue_work(port->adapter->work_queue, |
586 | zfcp_port_put(port); | 571 | &port->rport_work)) |
572 | return; | ||
573 | |||
574 | zfcp_port_put(port); | ||
587 | } | 575 | } |
588 | 576 | ||
589 | void zfcp_scsi_schedule_rports_block(struct zfcp_adapter *adapter) | 577 | void zfcp_scsi_schedule_rports_block(struct zfcp_adapter *adapter) |
@@ -662,7 +650,6 @@ struct fc_function_template zfcp_transport_functions = { | |||
662 | .reset_fc_host_stats = zfcp_reset_fc_host_stats, | 650 | .reset_fc_host_stats = zfcp_reset_fc_host_stats, |
663 | .set_rport_dev_loss_tmo = zfcp_set_rport_dev_loss_tmo, | 651 | .set_rport_dev_loss_tmo = zfcp_set_rport_dev_loss_tmo, |
664 | .get_host_port_state = zfcp_get_host_port_state, | 652 | .get_host_port_state = zfcp_get_host_port_state, |
665 | .dev_loss_tmo_callbk = zfcp_scsi_dev_loss_tmo_callbk, | ||
666 | .terminate_rport_io = zfcp_scsi_terminate_rport_io, | 653 | .terminate_rport_io = zfcp_scsi_terminate_rport_io, |
667 | .show_host_port_state = 1, | 654 | .show_host_port_state = 1, |
668 | .bsg_request = zfcp_execute_fc_job, | 655 | .bsg_request = zfcp_execute_fc_job, |
diff --git a/drivers/s390/scsi/zfcp_sysfs.c b/drivers/s390/scsi/zfcp_sysfs.c index 0fe5cce818cb..079a8cf518a3 100644 --- a/drivers/s390/scsi/zfcp_sysfs.c +++ b/drivers/s390/scsi/zfcp_sysfs.c | |||
@@ -88,7 +88,7 @@ static ssize_t zfcp_sysfs_##_feat##_failed_store(struct device *dev, \ | |||
88 | unsigned long val; \ | 88 | unsigned long val; \ |
89 | int retval = 0; \ | 89 | int retval = 0; \ |
90 | \ | 90 | \ |
91 | down(&zfcp_data.config_sema); \ | 91 | mutex_lock(&zfcp_data.config_mutex); \ |
92 | if (atomic_read(&_feat->status) & ZFCP_STATUS_COMMON_REMOVE) { \ | 92 | if (atomic_read(&_feat->status) & ZFCP_STATUS_COMMON_REMOVE) { \ |
93 | retval = -EBUSY; \ | 93 | retval = -EBUSY; \ |
94 | goto out; \ | 94 | goto out; \ |
@@ -105,7 +105,7 @@ static ssize_t zfcp_sysfs_##_feat##_failed_store(struct device *dev, \ | |||
105 | _reopen_id, NULL); \ | 105 | _reopen_id, NULL); \ |
106 | zfcp_erp_wait(_adapter); \ | 106 | zfcp_erp_wait(_adapter); \ |
107 | out: \ | 107 | out: \ |
108 | up(&zfcp_data.config_sema); \ | 108 | mutex_unlock(&zfcp_data.config_mutex); \ |
109 | return retval ? retval : (ssize_t) count; \ | 109 | return retval ? retval : (ssize_t) count; \ |
110 | } \ | 110 | } \ |
111 | static ZFCP_DEV_ATTR(_feat, failed, S_IWUSR | S_IRUGO, \ | 111 | static ZFCP_DEV_ATTR(_feat, failed, S_IWUSR | S_IRUGO, \ |
@@ -126,7 +126,7 @@ static ssize_t zfcp_sysfs_port_rescan_store(struct device *dev, | |||
126 | if (atomic_read(&adapter->status) & ZFCP_STATUS_COMMON_REMOVE) | 126 | if (atomic_read(&adapter->status) & ZFCP_STATUS_COMMON_REMOVE) |
127 | return -EBUSY; | 127 | return -EBUSY; |
128 | 128 | ||
129 | ret = zfcp_scan_ports(adapter); | 129 | ret = zfcp_fc_scan_ports(adapter); |
130 | return ret ? ret : (ssize_t) count; | 130 | return ret ? ret : (ssize_t) count; |
131 | } | 131 | } |
132 | static ZFCP_DEV_ATTR(adapter, port_rescan, S_IWUSR, NULL, | 132 | static ZFCP_DEV_ATTR(adapter, port_rescan, S_IWUSR, NULL, |
@@ -142,7 +142,7 @@ static ssize_t zfcp_sysfs_port_remove_store(struct device *dev, | |||
142 | int retval = 0; | 142 | int retval = 0; |
143 | LIST_HEAD(port_remove_lh); | 143 | LIST_HEAD(port_remove_lh); |
144 | 144 | ||
145 | down(&zfcp_data.config_sema); | 145 | mutex_lock(&zfcp_data.config_mutex); |
146 | if (atomic_read(&adapter->status) & ZFCP_STATUS_COMMON_REMOVE) { | 146 | if (atomic_read(&adapter->status) & ZFCP_STATUS_COMMON_REMOVE) { |
147 | retval = -EBUSY; | 147 | retval = -EBUSY; |
148 | goto out; | 148 | goto out; |
@@ -173,7 +173,7 @@ static ssize_t zfcp_sysfs_port_remove_store(struct device *dev, | |||
173 | zfcp_port_put(port); | 173 | zfcp_port_put(port); |
174 | zfcp_port_dequeue(port); | 174 | zfcp_port_dequeue(port); |
175 | out: | 175 | out: |
176 | up(&zfcp_data.config_sema); | 176 | mutex_unlock(&zfcp_data.config_mutex); |
177 | return retval ? retval : (ssize_t) count; | 177 | return retval ? retval : (ssize_t) count; |
178 | } | 178 | } |
179 | static ZFCP_DEV_ATTR(adapter, port_remove, S_IWUSR, NULL, | 179 | static ZFCP_DEV_ATTR(adapter, port_remove, S_IWUSR, NULL, |
@@ -207,7 +207,7 @@ static ssize_t zfcp_sysfs_unit_add_store(struct device *dev, | |||
207 | u64 fcp_lun; | 207 | u64 fcp_lun; |
208 | int retval = -EINVAL; | 208 | int retval = -EINVAL; |
209 | 209 | ||
210 | down(&zfcp_data.config_sema); | 210 | mutex_lock(&zfcp_data.config_mutex); |
211 | if (atomic_read(&port->status) & ZFCP_STATUS_COMMON_REMOVE) { | 211 | if (atomic_read(&port->status) & ZFCP_STATUS_COMMON_REMOVE) { |
212 | retval = -EBUSY; | 212 | retval = -EBUSY; |
213 | goto out; | 213 | goto out; |
@@ -226,7 +226,7 @@ static ssize_t zfcp_sysfs_unit_add_store(struct device *dev, | |||
226 | zfcp_erp_wait(unit->port->adapter); | 226 | zfcp_erp_wait(unit->port->adapter); |
227 | zfcp_unit_put(unit); | 227 | zfcp_unit_put(unit); |
228 | out: | 228 | out: |
229 | up(&zfcp_data.config_sema); | 229 | mutex_unlock(&zfcp_data.config_mutex); |
230 | return retval ? retval : (ssize_t) count; | 230 | return retval ? retval : (ssize_t) count; |
231 | } | 231 | } |
232 | static DEVICE_ATTR(unit_add, S_IWUSR, NULL, zfcp_sysfs_unit_add_store); | 232 | static DEVICE_ATTR(unit_add, S_IWUSR, NULL, zfcp_sysfs_unit_add_store); |
@@ -241,7 +241,7 @@ static ssize_t zfcp_sysfs_unit_remove_store(struct device *dev, | |||
241 | int retval = 0; | 241 | int retval = 0; |
242 | LIST_HEAD(unit_remove_lh); | 242 | LIST_HEAD(unit_remove_lh); |
243 | 243 | ||
244 | down(&zfcp_data.config_sema); | 244 | mutex_lock(&zfcp_data.config_mutex); |
245 | if (atomic_read(&port->status) & ZFCP_STATUS_COMMON_REMOVE) { | 245 | if (atomic_read(&port->status) & ZFCP_STATUS_COMMON_REMOVE) { |
246 | retval = -EBUSY; | 246 | retval = -EBUSY; |
247 | goto out; | 247 | goto out; |
@@ -282,7 +282,7 @@ static ssize_t zfcp_sysfs_unit_remove_store(struct device *dev, | |||
282 | zfcp_unit_put(unit); | 282 | zfcp_unit_put(unit); |
283 | zfcp_unit_dequeue(unit); | 283 | zfcp_unit_dequeue(unit); |
284 | out: | 284 | out: |
285 | up(&zfcp_data.config_sema); | 285 | mutex_unlock(&zfcp_data.config_mutex); |
286 | return retval ? retval : (ssize_t) count; | 286 | return retval ? retval : (ssize_t) count; |
287 | } | 287 | } |
288 | static DEVICE_ATTR(unit_remove, S_IWUSR, NULL, zfcp_sysfs_unit_remove_store); | 288 | static DEVICE_ATTR(unit_remove, S_IWUSR, NULL, zfcp_sysfs_unit_remove_store); |
@@ -425,7 +425,7 @@ static ssize_t zfcp_sysfs_adapter_util_show(struct device *dev, | |||
425 | if (!qtcb_port) | 425 | if (!qtcb_port) |
426 | return -ENOMEM; | 426 | return -ENOMEM; |
427 | 427 | ||
428 | retval = zfcp_fsf_exchange_port_data_sync(adapter, qtcb_port); | 428 | retval = zfcp_fsf_exchange_port_data_sync(adapter->qdio, qtcb_port); |
429 | if (!retval) | 429 | if (!retval) |
430 | retval = sprintf(buf, "%u %u %u\n", qtcb_port->cp_util, | 430 | retval = sprintf(buf, "%u %u %u\n", qtcb_port->cp_util, |
431 | qtcb_port->cb_util, qtcb_port->a_util); | 431 | qtcb_port->cb_util, qtcb_port->a_util); |
@@ -451,7 +451,7 @@ static int zfcp_sysfs_adapter_ex_config(struct device *dev, | |||
451 | if (!qtcb_config) | 451 | if (!qtcb_config) |
452 | return -ENOMEM; | 452 | return -ENOMEM; |
453 | 453 | ||
454 | retval = zfcp_fsf_exchange_config_data_sync(adapter, qtcb_config); | 454 | retval = zfcp_fsf_exchange_config_data_sync(adapter->qdio, qtcb_config); |
455 | if (!retval) | 455 | if (!retval) |
456 | *stat_inf = qtcb_config->stat_info; | 456 | *stat_inf = qtcb_config->stat_info; |
457 | 457 | ||
@@ -492,15 +492,15 @@ static ssize_t zfcp_sysfs_adapter_q_full_show(struct device *dev, | |||
492 | char *buf) | 492 | char *buf) |
493 | { | 493 | { |
494 | struct Scsi_Host *scsi_host = class_to_shost(dev); | 494 | struct Scsi_Host *scsi_host = class_to_shost(dev); |
495 | struct zfcp_adapter *adapter = | 495 | struct zfcp_qdio *qdio = |
496 | (struct zfcp_adapter *) scsi_host->hostdata[0]; | 496 | ((struct zfcp_adapter *) scsi_host->hostdata[0])->qdio; |
497 | u64 util; | 497 | u64 util; |
498 | 498 | ||
499 | spin_lock_bh(&adapter->qdio_stat_lock); | 499 | spin_lock_bh(&qdio->stat_lock); |
500 | util = adapter->req_q_util; | 500 | util = qdio->req_q_util; |
501 | spin_unlock_bh(&adapter->qdio_stat_lock); | 501 | spin_unlock_bh(&qdio->stat_lock); |
502 | 502 | ||
503 | return sprintf(buf, "%d %llu\n", atomic_read(&adapter->qdio_outb_full), | 503 | return sprintf(buf, "%d %llu\n", atomic_read(&qdio->req_q_full), |
504 | (unsigned long long)util); | 504 | (unsigned long long)util); |
505 | } | 505 | } |
506 | static DEVICE_ATTR(queue_full, S_IRUGO, zfcp_sysfs_adapter_q_full_show, NULL); | 506 | static DEVICE_ATTR(queue_full, S_IRUGO, zfcp_sysfs_adapter_q_full_show, NULL); |