aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/s390/scsi
diff options
context:
space:
mode:
authorAndrea Bastoni <bastoni@cs.unc.edu>2010-05-30 19:16:45 -0400
committerAndrea Bastoni <bastoni@cs.unc.edu>2010-05-30 19:16:45 -0400
commitada47b5fe13d89735805b566185f4885f5a3f750 (patch)
tree644b88f8a71896307d71438e9b3af49126ffb22b /drivers/s390/scsi
parent43e98717ad40a4ae64545b5ba047c7b86aa44f4f (diff)
parent3280f21d43ee541f97f8cda5792150d2dbec20d5 (diff)
Merge branch 'wip-2.6.34' into old-private-masterarchived-private-master
Diffstat (limited to 'drivers/s390/scsi')
-rw-r--r--drivers/s390/scsi/zfcp_aux.c445
-rw-r--r--drivers/s390/scsi/zfcp_ccw.c182
-rw-r--r--drivers/s390/scsi/zfcp_cfdc.c36
-rw-r--r--drivers/s390/scsi/zfcp_dbf.c157
-rw-r--r--drivers/s390/scsi/zfcp_dbf.h44
-rw-r--r--drivers/s390/scsi/zfcp_def.h437
-rw-r--r--drivers/s390/scsi/zfcp_erp.c162
-rw-r--r--drivers/s390/scsi/zfcp_ext.h48
-rw-r--r--drivers/s390/scsi/zfcp_fc.c714
-rw-r--r--drivers/s390/scsi/zfcp_fc.h262
-rw-r--r--drivers/s390/scsi/zfcp_fsf.c520
-rw-r--r--drivers/s390/scsi/zfcp_fsf.h53
-rw-r--r--drivers/s390/scsi/zfcp_qdio.c53
-rw-r--r--drivers/s390/scsi/zfcp_qdio.h109
-rw-r--r--drivers/s390/scsi/zfcp_reqlist.h183
-rw-r--r--drivers/s390/scsi/zfcp_scsi.c180
-rw-r--r--drivers/s390/scsi/zfcp_sysfs.c246
17 files changed, 1881 insertions, 1950 deletions
diff --git a/drivers/s390/scsi/zfcp_aux.c b/drivers/s390/scsi/zfcp_aux.c
index 2889e5f2dfd3..1e6183a86ce5 100644
--- a/drivers/s390/scsi/zfcp_aux.c
+++ b/drivers/s390/scsi/zfcp_aux.c
@@ -3,7 +3,7 @@
3 * 3 *
4 * Module interface and handling of zfcp data structures. 4 * Module interface and handling of zfcp data structures.
5 * 5 *
6 * Copyright IBM Corporation 2002, 2009 6 * Copyright IBM Corporation 2002, 2010
7 */ 7 */
8 8
9/* 9/*
@@ -30,7 +30,10 @@
30 30
31#include <linux/miscdevice.h> 31#include <linux/miscdevice.h>
32#include <linux/seq_file.h> 32#include <linux/seq_file.h>
33#include <linux/slab.h>
33#include "zfcp_ext.h" 34#include "zfcp_ext.h"
35#include "zfcp_fc.h"
36#include "zfcp_reqlist.h"
34 37
35#define ZFCP_BUS_ID_SIZE 20 38#define ZFCP_BUS_ID_SIZE 20
36 39
@@ -48,80 +51,42 @@ static struct kmem_cache *zfcp_cache_hw_align(const char *name,
48 return kmem_cache_create(name, size, roundup_pow_of_two(size), 0, NULL); 51 return kmem_cache_create(name, size, roundup_pow_of_two(size), 0, NULL);
49} 52}
50 53
51static int zfcp_reqlist_alloc(struct zfcp_adapter *adapter)
52{
53 int idx;
54
55 adapter->req_list = kcalloc(REQUEST_LIST_SIZE, sizeof(struct list_head),
56 GFP_KERNEL);
57 if (!adapter->req_list)
58 return -ENOMEM;
59
60 for (idx = 0; idx < REQUEST_LIST_SIZE; idx++)
61 INIT_LIST_HEAD(&adapter->req_list[idx]);
62 return 0;
63}
64
65/**
66 * zfcp_reqlist_isempty - is the request list empty
67 * @adapter: pointer to struct zfcp_adapter
68 *
69 * Returns: true if list is empty, false otherwise
70 */
71int zfcp_reqlist_isempty(struct zfcp_adapter *adapter)
72{
73 unsigned int idx;
74
75 for (idx = 0; idx < REQUEST_LIST_SIZE; idx++)
76 if (!list_empty(&adapter->req_list[idx]))
77 return 0;
78 return 1;
79}
80
81static void __init zfcp_init_device_configure(char *busid, u64 wwpn, u64 lun) 54static void __init zfcp_init_device_configure(char *busid, u64 wwpn, u64 lun)
82{ 55{
83 struct ccw_device *ccwdev; 56 struct ccw_device *cdev;
84 struct zfcp_adapter *adapter; 57 struct zfcp_adapter *adapter;
85 struct zfcp_port *port; 58 struct zfcp_port *port;
86 struct zfcp_unit *unit; 59 struct zfcp_unit *unit;
87 60
88 ccwdev = get_ccwdev_by_busid(&zfcp_ccw_driver, busid); 61 cdev = get_ccwdev_by_busid(&zfcp_ccw_driver, busid);
89 if (!ccwdev) 62 if (!cdev)
90 return; 63 return;
91 64
92 if (ccw_device_set_online(ccwdev)) 65 if (ccw_device_set_online(cdev))
93 goto out_ccwdev; 66 goto out_ccw_device;
94 67
95 mutex_lock(&zfcp_data.config_mutex); 68 adapter = zfcp_ccw_adapter_by_cdev(cdev);
96 adapter = dev_get_drvdata(&ccwdev->dev);
97 if (!adapter) 69 if (!adapter)
98 goto out_unlock; 70 goto out_ccw_device;
99 zfcp_adapter_get(adapter);
100 71
101 port = zfcp_get_port_by_wwpn(adapter, wwpn); 72 port = zfcp_get_port_by_wwpn(adapter, wwpn);
102 if (!port) 73 if (!port)
103 goto out_port; 74 goto out_port;
104 75
105 zfcp_port_get(port);
106 unit = zfcp_unit_enqueue(port, lun); 76 unit = zfcp_unit_enqueue(port, lun);
107 if (IS_ERR(unit)) 77 if (IS_ERR(unit))
108 goto out_unit; 78 goto out_unit;
109 mutex_unlock(&zfcp_data.config_mutex);
110 79
111 zfcp_erp_unit_reopen(unit, 0, "auidc_1", NULL); 80 zfcp_erp_unit_reopen(unit, 0, "auidc_1", NULL);
112 zfcp_erp_wait(adapter); 81 zfcp_erp_wait(adapter);
113 flush_work(&unit->scsi_work); 82 flush_work(&unit->scsi_work);
114 83
115 mutex_lock(&zfcp_data.config_mutex);
116 zfcp_unit_put(unit);
117out_unit: 84out_unit:
118 zfcp_port_put(port); 85 put_device(&port->dev);
119out_port: 86out_port:
120 zfcp_adapter_put(adapter); 87 zfcp_ccw_adapter_put(adapter);
121out_unlock: 88out_ccw_device:
122 mutex_unlock(&zfcp_data.config_mutex); 89 put_device(&cdev->dev);
123out_ccwdev:
124 put_device(&ccwdev->dev);
125 return; 90 return;
126} 91}
127 92
@@ -167,7 +132,7 @@ static int __init zfcp_module_init(void)
167 int retval = -ENOMEM; 132 int retval = -ENOMEM;
168 133
169 zfcp_data.gpn_ft_cache = zfcp_cache_hw_align("zfcp_gpn", 134 zfcp_data.gpn_ft_cache = zfcp_cache_hw_align("zfcp_gpn",
170 sizeof(struct ct_iu_gpn_ft_req)); 135 sizeof(struct zfcp_fc_gpn_ft_req));
171 if (!zfcp_data.gpn_ft_cache) 136 if (!zfcp_data.gpn_ft_cache)
172 goto out; 137 goto out;
173 138
@@ -182,12 +147,14 @@ static int __init zfcp_module_init(void)
182 goto out_sr_cache; 147 goto out_sr_cache;
183 148
184 zfcp_data.gid_pn_cache = zfcp_cache_hw_align("zfcp_gid", 149 zfcp_data.gid_pn_cache = zfcp_cache_hw_align("zfcp_gid",
185 sizeof(struct zfcp_gid_pn_data)); 150 sizeof(struct zfcp_fc_gid_pn));
186 if (!zfcp_data.gid_pn_cache) 151 if (!zfcp_data.gid_pn_cache)
187 goto out_gid_cache; 152 goto out_gid_cache;
188 153
189 mutex_init(&zfcp_data.config_mutex); 154 zfcp_data.adisc_cache = zfcp_cache_hw_align("zfcp_adisc",
190 rwlock_init(&zfcp_data.config_lock); 155 sizeof(struct zfcp_fc_els_adisc));
156 if (!zfcp_data.adisc_cache)
157 goto out_adisc_cache;
191 158
192 zfcp_data.scsi_transport_template = 159 zfcp_data.scsi_transport_template =
193 fc_attach_transport(&zfcp_transport_functions); 160 fc_attach_transport(&zfcp_transport_functions);
@@ -200,7 +167,7 @@ static int __init zfcp_module_init(void)
200 goto out_misc; 167 goto out_misc;
201 } 168 }
202 169
203 retval = zfcp_ccw_register(); 170 retval = ccw_driver_register(&zfcp_ccw_driver);
204 if (retval) { 171 if (retval) {
205 pr_err("The zfcp device driver could not register with " 172 pr_err("The zfcp device driver could not register with "
206 "the common I/O layer\n"); 173 "the common I/O layer\n");
@@ -216,6 +183,8 @@ out_ccw_register:
216out_misc: 183out_misc:
217 fc_release_transport(zfcp_data.scsi_transport_template); 184 fc_release_transport(zfcp_data.scsi_transport_template);
218out_transport: 185out_transport:
186 kmem_cache_destroy(zfcp_data.adisc_cache);
187out_adisc_cache:
219 kmem_cache_destroy(zfcp_data.gid_pn_cache); 188 kmem_cache_destroy(zfcp_data.gid_pn_cache);
220out_gid_cache: 189out_gid_cache:
221 kmem_cache_destroy(zfcp_data.sr_buffer_cache); 190 kmem_cache_destroy(zfcp_data.sr_buffer_cache);
@@ -229,6 +198,20 @@ out:
229 198
230module_init(zfcp_module_init); 199module_init(zfcp_module_init);
231 200
201static void __exit zfcp_module_exit(void)
202{
203 ccw_driver_unregister(&zfcp_ccw_driver);
204 misc_deregister(&zfcp_cfdc_misc);
205 fc_release_transport(zfcp_data.scsi_transport_template);
206 kmem_cache_destroy(zfcp_data.adisc_cache);
207 kmem_cache_destroy(zfcp_data.gid_pn_cache);
208 kmem_cache_destroy(zfcp_data.sr_buffer_cache);
209 kmem_cache_destroy(zfcp_data.qtcb_cache);
210 kmem_cache_destroy(zfcp_data.gpn_ft_cache);
211}
212
213module_exit(zfcp_module_exit);
214
232/** 215/**
233 * zfcp_get_unit_by_lun - find unit in unit list of port by FCP LUN 216 * zfcp_get_unit_by_lun - find unit in unit list of port by FCP LUN
234 * @port: pointer to port to search for unit 217 * @port: pointer to port to search for unit
@@ -238,12 +221,18 @@ module_init(zfcp_module_init);
238 */ 221 */
239struct zfcp_unit *zfcp_get_unit_by_lun(struct zfcp_port *port, u64 fcp_lun) 222struct zfcp_unit *zfcp_get_unit_by_lun(struct zfcp_port *port, u64 fcp_lun)
240{ 223{
224 unsigned long flags;
241 struct zfcp_unit *unit; 225 struct zfcp_unit *unit;
242 226
243 list_for_each_entry(unit, &port->unit_list_head, list) 227 read_lock_irqsave(&port->unit_list_lock, flags);
244 if ((unit->fcp_lun == fcp_lun) && 228 list_for_each_entry(unit, &port->unit_list, list)
245 !(atomic_read(&unit->status) & ZFCP_STATUS_COMMON_REMOVE)) 229 if (unit->fcp_lun == fcp_lun) {
246 return unit; 230 if (!get_device(&unit->dev))
231 unit = NULL;
232 read_unlock_irqrestore(&port->unit_list_lock, flags);
233 return unit;
234 }
235 read_unlock_irqrestore(&port->unit_list_lock, flags);
247 return NULL; 236 return NULL;
248} 237}
249 238
@@ -257,18 +246,34 @@ struct zfcp_unit *zfcp_get_unit_by_lun(struct zfcp_port *port, u64 fcp_lun)
257struct zfcp_port *zfcp_get_port_by_wwpn(struct zfcp_adapter *adapter, 246struct zfcp_port *zfcp_get_port_by_wwpn(struct zfcp_adapter *adapter,
258 u64 wwpn) 247 u64 wwpn)
259{ 248{
249 unsigned long flags;
260 struct zfcp_port *port; 250 struct zfcp_port *port;
261 251
262 list_for_each_entry(port, &adapter->port_list_head, list) 252 read_lock_irqsave(&adapter->port_list_lock, flags);
263 if ((port->wwpn == wwpn) && 253 list_for_each_entry(port, &adapter->port_list, list)
264 !(atomic_read(&port->status) & ZFCP_STATUS_COMMON_REMOVE)) 254 if (port->wwpn == wwpn) {
255 if (!get_device(&port->dev))
256 port = NULL;
257 read_unlock_irqrestore(&adapter->port_list_lock, flags);
265 return port; 258 return port;
259 }
260 read_unlock_irqrestore(&adapter->port_list_lock, flags);
266 return NULL; 261 return NULL;
267} 262}
268 263
269static void zfcp_sysfs_unit_release(struct device *dev) 264/**
265 * zfcp_unit_release - dequeue unit
266 * @dev: pointer to device
267 *
268 * waits until all work is done on unit and removes it then from the unit->list
269 * of the associated port.
270 */
271static void zfcp_unit_release(struct device *dev)
270{ 272{
271 kfree(container_of(dev, struct zfcp_unit, sysfs_device)); 273 struct zfcp_unit *unit = container_of(dev, struct zfcp_unit, dev);
274
275 put_device(&unit->port->dev);
276 kfree(unit);
272} 277}
273 278
274/** 279/**
@@ -276,43 +281,40 @@ static void zfcp_sysfs_unit_release(struct device *dev)
276 * @port: pointer to port where unit is added 281 * @port: pointer to port where unit is added
277 * @fcp_lun: FCP LUN of unit to be enqueued 282 * @fcp_lun: FCP LUN of unit to be enqueued
278 * Returns: pointer to enqueued unit on success, ERR_PTR on error 283 * Returns: pointer to enqueued unit on success, ERR_PTR on error
279 * Locks: config_mutex must be held to serialize changes to the unit list
280 * 284 *
281 * Sets up some unit internal structures and creates sysfs entry. 285 * Sets up some unit internal structures and creates sysfs entry.
282 */ 286 */
283struct zfcp_unit *zfcp_unit_enqueue(struct zfcp_port *port, u64 fcp_lun) 287struct zfcp_unit *zfcp_unit_enqueue(struct zfcp_port *port, u64 fcp_lun)
284{ 288{
285 struct zfcp_unit *unit; 289 struct zfcp_unit *unit;
290 int retval = -ENOMEM;
291
292 get_device(&port->dev);
286 293
287 read_lock_irq(&zfcp_data.config_lock); 294 unit = zfcp_get_unit_by_lun(port, fcp_lun);
288 if (zfcp_get_unit_by_lun(port, fcp_lun)) { 295 if (unit) {
289 read_unlock_irq(&zfcp_data.config_lock); 296 put_device(&unit->dev);
290 return ERR_PTR(-EINVAL); 297 retval = -EEXIST;
298 goto err_out;
291 } 299 }
292 read_unlock_irq(&zfcp_data.config_lock);
293 300
294 unit = kzalloc(sizeof(struct zfcp_unit), GFP_KERNEL); 301 unit = kzalloc(sizeof(struct zfcp_unit), GFP_KERNEL);
295 if (!unit) 302 if (!unit)
296 return ERR_PTR(-ENOMEM); 303 goto err_out;
297
298 atomic_set(&unit->refcount, 0);
299 init_waitqueue_head(&unit->remove_wq);
300 INIT_WORK(&unit->scsi_work, zfcp_scsi_scan);
301 304
302 unit->port = port; 305 unit->port = port;
303 unit->fcp_lun = fcp_lun; 306 unit->fcp_lun = fcp_lun;
307 unit->dev.parent = &port->dev;
308 unit->dev.release = zfcp_unit_release;
304 309
305 if (dev_set_name(&unit->sysfs_device, "0x%016llx", 310 if (dev_set_name(&unit->dev, "0x%016llx",
306 (unsigned long long) fcp_lun)) { 311 (unsigned long long) fcp_lun)) {
307 kfree(unit); 312 kfree(unit);
308 return ERR_PTR(-ENOMEM); 313 goto err_out;
309 } 314 }
310 unit->sysfs_device.parent = &port->sysfs_device; 315 retval = -EINVAL;
311 unit->sysfs_device.release = zfcp_sysfs_unit_release;
312 dev_set_drvdata(&unit->sysfs_device, unit);
313 316
314 /* mark unit unusable as long as sysfs registration is not complete */ 317 INIT_WORK(&unit->scsi_work, zfcp_scsi_scan);
315 atomic_set_mask(ZFCP_STATUS_COMMON_REMOVE, &unit->status);
316 318
317 spin_lock_init(&unit->latencies.lock); 319 spin_lock_init(&unit->latencies.lock);
318 unit->latencies.write.channel.min = 0xFFFFFFFF; 320 unit->latencies.write.channel.min = 0xFFFFFFFF;
@@ -322,52 +324,31 @@ struct zfcp_unit *zfcp_unit_enqueue(struct zfcp_port *port, u64 fcp_lun)
322 unit->latencies.cmd.channel.min = 0xFFFFFFFF; 324 unit->latencies.cmd.channel.min = 0xFFFFFFFF;
323 unit->latencies.cmd.fabric.min = 0xFFFFFFFF; 325 unit->latencies.cmd.fabric.min = 0xFFFFFFFF;
324 326
325 if (device_register(&unit->sysfs_device)) { 327 if (device_register(&unit->dev)) {
326 put_device(&unit->sysfs_device); 328 put_device(&unit->dev);
327 return ERR_PTR(-EINVAL); 329 goto err_out;
328 } 330 }
329 331
330 if (sysfs_create_group(&unit->sysfs_device.kobj, 332 if (sysfs_create_group(&unit->dev.kobj, &zfcp_sysfs_unit_attrs))
331 &zfcp_sysfs_unit_attrs)) { 333 goto err_out_put;
332 device_unregister(&unit->sysfs_device);
333 return ERR_PTR(-EINVAL);
334 }
335 334
336 zfcp_unit_get(unit); 335 write_lock_irq(&port->unit_list_lock);
336 list_add_tail(&unit->list, &port->unit_list);
337 write_unlock_irq(&port->unit_list_lock);
337 338
338 write_lock_irq(&zfcp_data.config_lock);
339 list_add_tail(&unit->list, &port->unit_list_head);
340 atomic_clear_mask(ZFCP_STATUS_COMMON_REMOVE, &unit->status);
341 atomic_set_mask(ZFCP_STATUS_COMMON_RUNNING, &unit->status); 339 atomic_set_mask(ZFCP_STATUS_COMMON_RUNNING, &unit->status);
342 340
343 write_unlock_irq(&zfcp_data.config_lock);
344
345 zfcp_port_get(port);
346
347 return unit; 341 return unit;
348}
349 342
350/** 343err_out_put:
351 * zfcp_unit_dequeue - dequeue unit 344 device_unregister(&unit->dev);
352 * @unit: pointer to zfcp_unit 345err_out:
353 * 346 put_device(&port->dev);
354 * waits until all work is done on unit and removes it then from the unit->list 347 return ERR_PTR(retval);
355 * of the associated port.
356 */
357void zfcp_unit_dequeue(struct zfcp_unit *unit)
358{
359 wait_event(unit->remove_wq, atomic_read(&unit->refcount) == 0);
360 write_lock_irq(&zfcp_data.config_lock);
361 list_del(&unit->list);
362 write_unlock_irq(&zfcp_data.config_lock);
363 zfcp_port_put(unit->port);
364 sysfs_remove_group(&unit->sysfs_device.kobj, &zfcp_sysfs_unit_attrs);
365 device_unregister(&unit->sysfs_device);
366} 348}
367 349
368static int zfcp_allocate_low_mem_buffers(struct zfcp_adapter *adapter) 350static int zfcp_allocate_low_mem_buffers(struct zfcp_adapter *adapter)
369{ 351{
370 /* must only be called with zfcp_data.config_mutex taken */
371 adapter->pool.erp_req = 352 adapter->pool.erp_req =
372 mempool_create_kmalloc_pool(1, sizeof(struct zfcp_fsf_req)); 353 mempool_create_kmalloc_pool(1, sizeof(struct zfcp_fsf_req));
373 if (!adapter->pool.erp_req) 354 if (!adapter->pool.erp_req)
@@ -405,9 +386,9 @@ static int zfcp_allocate_low_mem_buffers(struct zfcp_adapter *adapter)
405 if (!adapter->pool.status_read_data) 386 if (!adapter->pool.status_read_data)
406 return -ENOMEM; 387 return -ENOMEM;
407 388
408 adapter->pool.gid_pn_data = 389 adapter->pool.gid_pn =
409 mempool_create_slab_pool(1, zfcp_data.gid_pn_cache); 390 mempool_create_slab_pool(1, zfcp_data.gid_pn_cache);
410 if (!adapter->pool.gid_pn_data) 391 if (!adapter->pool.gid_pn)
411 return -ENOMEM; 392 return -ENOMEM;
412 393
413 return 0; 394 return 0;
@@ -415,7 +396,6 @@ static int zfcp_allocate_low_mem_buffers(struct zfcp_adapter *adapter)
415 396
416static void zfcp_free_low_mem_buffers(struct zfcp_adapter *adapter) 397static void zfcp_free_low_mem_buffers(struct zfcp_adapter *adapter)
417{ 398{
418 /* zfcp_data.config_mutex must be held */
419 if (adapter->pool.erp_req) 399 if (adapter->pool.erp_req)
420 mempool_destroy(adapter->pool.erp_req); 400 mempool_destroy(adapter->pool.erp_req);
421 if (adapter->pool.scsi_req) 401 if (adapter->pool.scsi_req)
@@ -428,8 +408,8 @@ static void zfcp_free_low_mem_buffers(struct zfcp_adapter *adapter)
428 mempool_destroy(adapter->pool.status_read_req); 408 mempool_destroy(adapter->pool.status_read_req);
429 if (adapter->pool.status_read_data) 409 if (adapter->pool.status_read_data)
430 mempool_destroy(adapter->pool.status_read_data); 410 mempool_destroy(adapter->pool.status_read_data);
431 if (adapter->pool.gid_pn_data) 411 if (adapter->pool.gid_pn)
432 mempool_destroy(adapter->pool.gid_pn_data); 412 mempool_destroy(adapter->pool.gid_pn);
433} 413}
434 414
435/** 415/**
@@ -497,139 +477,142 @@ static void zfcp_destroy_adapter_work_queue(struct zfcp_adapter *adapter)
497 * zfcp_adapter_enqueue - enqueue a new adapter to the list 477 * zfcp_adapter_enqueue - enqueue a new adapter to the list
498 * @ccw_device: pointer to the struct cc_device 478 * @ccw_device: pointer to the struct cc_device
499 * 479 *
500 * Returns: 0 if a new adapter was successfully enqueued 480 * Returns: struct zfcp_adapter*
501 * -ENOMEM if alloc failed
502 * Enqueues an adapter at the end of the adapter list in the driver data. 481 * Enqueues an adapter at the end of the adapter list in the driver data.
503 * All adapter internal structures are set up. 482 * All adapter internal structures are set up.
504 * Proc-fs entries are also created. 483 * Proc-fs entries are also created.
505 * locks: config_mutex must be held to serialize changes to the adapter list
506 */ 484 */
507int zfcp_adapter_enqueue(struct ccw_device *ccw_device) 485struct zfcp_adapter *zfcp_adapter_enqueue(struct ccw_device *ccw_device)
508{ 486{
509 struct zfcp_adapter *adapter; 487 struct zfcp_adapter *adapter;
510 488
511 /* 489 if (!get_device(&ccw_device->dev))
512 * Note: It is safe to release the list_lock, as any list changes 490 return ERR_PTR(-ENODEV);
513 * are protected by the config_mutex, which must be held to get here
514 */
515 491
516 adapter = kzalloc(sizeof(struct zfcp_adapter), GFP_KERNEL); 492 adapter = kzalloc(sizeof(struct zfcp_adapter), GFP_KERNEL);
517 if (!adapter) 493 if (!adapter) {
518 return -ENOMEM; 494 put_device(&ccw_device->dev);
495 return ERR_PTR(-ENOMEM);
496 }
497
498 kref_init(&adapter->ref);
519 499
520 ccw_device->handler = NULL; 500 ccw_device->handler = NULL;
521 adapter->ccw_device = ccw_device; 501 adapter->ccw_device = ccw_device;
522 atomic_set(&adapter->refcount, 0); 502
503 INIT_WORK(&adapter->stat_work, _zfcp_status_read_scheduler);
504 INIT_WORK(&adapter->scan_work, zfcp_fc_scan_ports);
523 505
524 if (zfcp_qdio_setup(adapter)) 506 if (zfcp_qdio_setup(adapter))
525 goto qdio_failed; 507 goto failed;
526 508
527 if (zfcp_allocate_low_mem_buffers(adapter)) 509 if (zfcp_allocate_low_mem_buffers(adapter))
528 goto low_mem_buffers_failed; 510 goto failed;
529 511
530 if (zfcp_reqlist_alloc(adapter)) 512 adapter->req_list = zfcp_reqlist_alloc();
531 goto low_mem_buffers_failed; 513 if (!adapter->req_list)
514 goto failed;
532 515
533 if (zfcp_dbf_adapter_register(adapter)) 516 if (zfcp_dbf_adapter_register(adapter))
534 goto debug_register_failed; 517 goto failed;
535 518
536 if (zfcp_setup_adapter_work_queue(adapter)) 519 if (zfcp_setup_adapter_work_queue(adapter))
537 goto work_queue_failed; 520 goto failed;
538 521
539 if (zfcp_fc_gs_setup(adapter)) 522 if (zfcp_fc_gs_setup(adapter))
540 goto generic_services_failed; 523 goto failed;
524
525 rwlock_init(&adapter->port_list_lock);
526 INIT_LIST_HEAD(&adapter->port_list);
541 527
542 init_waitqueue_head(&adapter->remove_wq);
543 init_waitqueue_head(&adapter->erp_ready_wq); 528 init_waitqueue_head(&adapter->erp_ready_wq);
544 init_waitqueue_head(&adapter->erp_done_wqh); 529 init_waitqueue_head(&adapter->erp_done_wqh);
545 530
546 INIT_LIST_HEAD(&adapter->port_list_head);
547 INIT_LIST_HEAD(&adapter->erp_ready_head); 531 INIT_LIST_HEAD(&adapter->erp_ready_head);
548 INIT_LIST_HEAD(&adapter->erp_running_head); 532 INIT_LIST_HEAD(&adapter->erp_running_head);
549 533
550 spin_lock_init(&adapter->req_list_lock);
551
552 rwlock_init(&adapter->erp_lock); 534 rwlock_init(&adapter->erp_lock);
553 rwlock_init(&adapter->abort_lock); 535 rwlock_init(&adapter->abort_lock);
554 536
555 if (zfcp_erp_thread_setup(adapter)) 537 if (zfcp_erp_thread_setup(adapter))
556 goto erp_thread_failed; 538 goto failed;
557
558 INIT_WORK(&adapter->stat_work, _zfcp_status_read_scheduler);
559 INIT_WORK(&adapter->scan_work, _zfcp_fc_scan_ports_later);
560 539
561 adapter->service_level.seq_print = zfcp_print_sl; 540 adapter->service_level.seq_print = zfcp_print_sl;
562 541
563 /* mark adapter unusable as long as sysfs registration is not complete */
564 atomic_set_mask(ZFCP_STATUS_COMMON_REMOVE, &adapter->status);
565
566 dev_set_drvdata(&ccw_device->dev, adapter); 542 dev_set_drvdata(&ccw_device->dev, adapter);
567 543
568 if (sysfs_create_group(&ccw_device->dev.kobj, 544 if (sysfs_create_group(&ccw_device->dev.kobj,
569 &zfcp_sysfs_adapter_attrs)) 545 &zfcp_sysfs_adapter_attrs))
570 goto sysfs_failed; 546 goto failed;
571
572 atomic_clear_mask(ZFCP_STATUS_COMMON_REMOVE, &adapter->status);
573 547
574 if (!zfcp_adapter_scsi_register(adapter)) 548 if (!zfcp_adapter_scsi_register(adapter))
575 return 0; 549 return adapter;
576 550
577sysfs_failed: 551failed:
578 zfcp_erp_thread_kill(adapter); 552 zfcp_adapter_unregister(adapter);
579erp_thread_failed: 553 return ERR_PTR(-ENOMEM);
580 zfcp_fc_gs_destroy(adapter); 554}
581generic_services_failed: 555
556void zfcp_adapter_unregister(struct zfcp_adapter *adapter)
557{
558 struct ccw_device *cdev = adapter->ccw_device;
559
560 cancel_work_sync(&adapter->scan_work);
561 cancel_work_sync(&adapter->stat_work);
582 zfcp_destroy_adapter_work_queue(adapter); 562 zfcp_destroy_adapter_work_queue(adapter);
583work_queue_failed: 563
564 zfcp_fc_wka_ports_force_offline(adapter->gs);
565 zfcp_adapter_scsi_unregister(adapter);
566 sysfs_remove_group(&cdev->dev.kobj, &zfcp_sysfs_adapter_attrs);
567
568 zfcp_erp_thread_kill(adapter);
584 zfcp_dbf_adapter_unregister(adapter->dbf); 569 zfcp_dbf_adapter_unregister(adapter->dbf);
585debug_register_failed:
586 dev_set_drvdata(&ccw_device->dev, NULL);
587 kfree(adapter->req_list);
588low_mem_buffers_failed:
589 zfcp_free_low_mem_buffers(adapter);
590qdio_failed:
591 zfcp_qdio_destroy(adapter->qdio); 570 zfcp_qdio_destroy(adapter->qdio);
592 kfree(adapter); 571
593 return -ENOMEM; 572 zfcp_ccw_adapter_put(adapter); /* final put to release */
594} 573}
595 574
596/** 575/**
597 * zfcp_adapter_dequeue - remove the adapter from the resource list 576 * zfcp_adapter_release - remove the adapter from the resource list
598 * @adapter: pointer to struct zfcp_adapter which should be removed 577 * @ref: pointer to struct kref
599 * locks: adapter list write lock is assumed to be held by caller 578 * locks: adapter list write lock is assumed to be held by caller
600 */ 579 */
601void zfcp_adapter_dequeue(struct zfcp_adapter *adapter) 580void zfcp_adapter_release(struct kref *ref)
602{ 581{
603 int retval = 0; 582 struct zfcp_adapter *adapter = container_of(ref, struct zfcp_adapter,
604 unsigned long flags; 583 ref);
584 struct ccw_device *cdev = adapter->ccw_device;
605 585
606 cancel_work_sync(&adapter->stat_work);
607 zfcp_fc_wka_ports_force_offline(adapter->gs);
608 sysfs_remove_group(&adapter->ccw_device->dev.kobj,
609 &zfcp_sysfs_adapter_attrs);
610 dev_set_drvdata(&adapter->ccw_device->dev, NULL); 586 dev_set_drvdata(&adapter->ccw_device->dev, NULL);
611 /* sanity check: no pending FSF requests */
612 spin_lock_irqsave(&adapter->req_list_lock, flags);
613 retval = zfcp_reqlist_isempty(adapter);
614 spin_unlock_irqrestore(&adapter->req_list_lock, flags);
615 if (!retval)
616 return;
617
618 zfcp_fc_gs_destroy(adapter); 587 zfcp_fc_gs_destroy(adapter);
619 zfcp_erp_thread_kill(adapter);
620 zfcp_destroy_adapter_work_queue(adapter);
621 zfcp_dbf_adapter_unregister(adapter->dbf);
622 zfcp_free_low_mem_buffers(adapter); 588 zfcp_free_low_mem_buffers(adapter);
623 zfcp_qdio_destroy(adapter->qdio);
624 kfree(adapter->req_list); 589 kfree(adapter->req_list);
625 kfree(adapter->fc_stats); 590 kfree(adapter->fc_stats);
626 kfree(adapter->stats_reset_data); 591 kfree(adapter->stats_reset_data);
627 kfree(adapter); 592 kfree(adapter);
593 put_device(&cdev->dev);
628} 594}
629 595
630static void zfcp_sysfs_port_release(struct device *dev) 596/**
597 * zfcp_device_unregister - remove port, unit from system
598 * @dev: reference to device which is to be removed
599 * @grp: related reference to attribute group
600 *
601 * Helper function to unregister port, unit from system
602 */
603void zfcp_device_unregister(struct device *dev,
604 const struct attribute_group *grp)
631{ 605{
632 kfree(container_of(dev, struct zfcp_port, sysfs_device)); 606 sysfs_remove_group(&dev->kobj, grp);
607 device_unregister(dev);
608}
609
610static void zfcp_port_release(struct device *dev)
611{
612 struct zfcp_port *port = container_of(dev, struct zfcp_port, dev);
613
614 zfcp_ccw_adapter_put(port->adapter);
615 kfree(port);
633} 616}
634 617
635/** 618/**
@@ -639,7 +622,6 @@ static void zfcp_sysfs_port_release(struct device *dev)
639 * @status: initial status for the port 622 * @status: initial status for the port
640 * @d_id: destination id of the remote port to be enqueued 623 * @d_id: destination id of the remote port to be enqueued
641 * Returns: pointer to enqueued port on success, ERR_PTR on error 624 * Returns: pointer to enqueued port on success, ERR_PTR on error
642 * Locks: config_mutex must be held to serialize changes to the port list
643 * 625 *
644 * All port internal structures are set up and the sysfs entry is generated. 626 * All port internal structures are set up and the sysfs entry is generated.
645 * d_id is used to enqueue ports with a well known address like the Directory 627 * d_id is used to enqueue ports with a well known address like the Directory
@@ -649,20 +631,24 @@ struct zfcp_port *zfcp_port_enqueue(struct zfcp_adapter *adapter, u64 wwpn,
649 u32 status, u32 d_id) 631 u32 status, u32 d_id)
650{ 632{
651 struct zfcp_port *port; 633 struct zfcp_port *port;
634 int retval = -ENOMEM;
635
636 kref_get(&adapter->ref);
652 637
653 read_lock_irq(&zfcp_data.config_lock); 638 port = zfcp_get_port_by_wwpn(adapter, wwpn);
654 if (zfcp_get_port_by_wwpn(adapter, wwpn)) { 639 if (port) {
655 read_unlock_irq(&zfcp_data.config_lock); 640 put_device(&port->dev);
656 return ERR_PTR(-EINVAL); 641 retval = -EEXIST;
642 goto err_out;
657 } 643 }
658 read_unlock_irq(&zfcp_data.config_lock);
659 644
660 port = kzalloc(sizeof(struct zfcp_port), GFP_KERNEL); 645 port = kzalloc(sizeof(struct zfcp_port), GFP_KERNEL);
661 if (!port) 646 if (!port)
662 return ERR_PTR(-ENOMEM); 647 goto err_out;
648
649 rwlock_init(&port->unit_list_lock);
650 INIT_LIST_HEAD(&port->unit_list);
663 651
664 init_waitqueue_head(&port->remove_wq);
665 INIT_LIST_HEAD(&port->unit_list_head);
666 INIT_WORK(&port->gid_pn_work, zfcp_fc_port_did_lookup); 652 INIT_WORK(&port->gid_pn_work, zfcp_fc_port_did_lookup);
667 INIT_WORK(&port->test_link_work, zfcp_fc_link_test_work); 653 INIT_WORK(&port->test_link_work, zfcp_fc_link_test_work);
668 INIT_WORK(&port->rport_work, zfcp_scsi_rport_work); 654 INIT_WORK(&port->rport_work, zfcp_scsi_rport_work);
@@ -671,58 +657,37 @@ struct zfcp_port *zfcp_port_enqueue(struct zfcp_adapter *adapter, u64 wwpn,
671 port->d_id = d_id; 657 port->d_id = d_id;
672 port->wwpn = wwpn; 658 port->wwpn = wwpn;
673 port->rport_task = RPORT_NONE; 659 port->rport_task = RPORT_NONE;
660 port->dev.parent = &adapter->ccw_device->dev;
661 port->dev.release = zfcp_port_release;
674 662
675 /* mark port unusable as long as sysfs registration is not complete */ 663 if (dev_set_name(&port->dev, "0x%016llx", (unsigned long long)wwpn)) {
676 atomic_set_mask(status | ZFCP_STATUS_COMMON_REMOVE, &port->status);
677 atomic_set(&port->refcount, 0);
678
679 if (dev_set_name(&port->sysfs_device, "0x%016llx",
680 (unsigned long long)wwpn)) {
681 kfree(port); 664 kfree(port);
682 return ERR_PTR(-ENOMEM); 665 goto err_out;
683 }
684 port->sysfs_device.parent = &adapter->ccw_device->dev;
685 port->sysfs_device.release = zfcp_sysfs_port_release;
686 dev_set_drvdata(&port->sysfs_device, port);
687
688 if (device_register(&port->sysfs_device)) {
689 put_device(&port->sysfs_device);
690 return ERR_PTR(-EINVAL);
691 } 666 }
667 retval = -EINVAL;
692 668
693 if (sysfs_create_group(&port->sysfs_device.kobj, 669 if (device_register(&port->dev)) {
694 &zfcp_sysfs_port_attrs)) { 670 put_device(&port->dev);
695 device_unregister(&port->sysfs_device); 671 goto err_out;
696 return ERR_PTR(-EINVAL);
697 } 672 }
698 673
699 zfcp_port_get(port); 674 if (sysfs_create_group(&port->dev.kobj,
675 &zfcp_sysfs_port_attrs))
676 goto err_out_put;
700 677
701 write_lock_irq(&zfcp_data.config_lock); 678 write_lock_irq(&adapter->port_list_lock);
702 list_add_tail(&port->list, &adapter->port_list_head); 679 list_add_tail(&port->list, &adapter->port_list);
703 atomic_clear_mask(ZFCP_STATUS_COMMON_REMOVE, &port->status); 680 write_unlock_irq(&adapter->port_list_lock);
704 atomic_set_mask(ZFCP_STATUS_COMMON_RUNNING, &port->status);
705 681
706 write_unlock_irq(&zfcp_data.config_lock); 682 atomic_set_mask(status | ZFCP_STATUS_COMMON_RUNNING, &port->status);
707 683
708 zfcp_adapter_get(adapter);
709 return port; 684 return port;
710}
711 685
712/** 686err_out_put:
713 * zfcp_port_dequeue - dequeues a port from the port list of the adapter 687 device_unregister(&port->dev);
714 * @port: pointer to struct zfcp_port which should be removed 688err_out:
715 */ 689 zfcp_ccw_adapter_put(adapter);
716void zfcp_port_dequeue(struct zfcp_port *port) 690 return ERR_PTR(retval);
717{
718 write_lock_irq(&zfcp_data.config_lock);
719 list_del(&port->list);
720 write_unlock_irq(&zfcp_data.config_lock);
721 wait_event(port->remove_wq, atomic_read(&port->refcount) == 0);
722 cancel_work_sync(&port->rport_work); /* usually not necessary */
723 zfcp_adapter_put(port->adapter);
724 sysfs_remove_group(&port->sysfs_device.kobj, &zfcp_sysfs_port_attrs);
725 device_unregister(&port->sysfs_device);
726} 691}
727 692
728/** 693/**
diff --git a/drivers/s390/scsi/zfcp_ccw.c b/drivers/s390/scsi/zfcp_ccw.c
index e08339428ecf..ce1cc7a11fb4 100644
--- a/drivers/s390/scsi/zfcp_ccw.c
+++ b/drivers/s390/scsi/zfcp_ccw.c
@@ -3,38 +3,45 @@
3 * 3 *
4 * Registration and callback for the s390 common I/O layer. 4 * Registration and callback for the s390 common I/O layer.
5 * 5 *
6 * Copyright IBM Corporation 2002, 2009 6 * Copyright IBM Corporation 2002, 2010
7 */ 7 */
8 8
9#define KMSG_COMPONENT "zfcp" 9#define KMSG_COMPONENT "zfcp"
10#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 10#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
11 11
12#include "zfcp_ext.h" 12#include "zfcp_ext.h"
13#include "zfcp_reqlist.h"
13 14
14#define ZFCP_MODEL_PRIV 0x4 15#define ZFCP_MODEL_PRIV 0x4
15 16
16static int zfcp_ccw_suspend(struct ccw_device *cdev) 17static DEFINE_SPINLOCK(zfcp_ccw_adapter_ref_lock);
17 18
19struct zfcp_adapter *zfcp_ccw_adapter_by_cdev(struct ccw_device *cdev)
18{ 20{
19 struct zfcp_adapter *adapter = dev_get_drvdata(&cdev->dev); 21 struct zfcp_adapter *adapter;
20 22 unsigned long flags;
21 if (!adapter)
22 return 0;
23
24 mutex_lock(&zfcp_data.config_mutex);
25 23
26 zfcp_erp_adapter_shutdown(adapter, 0, "ccsusp1", NULL); 24 spin_lock_irqsave(&zfcp_ccw_adapter_ref_lock, flags);
27 zfcp_erp_wait(adapter); 25 adapter = dev_get_drvdata(&cdev->dev);
26 if (adapter)
27 kref_get(&adapter->ref);
28 spin_unlock_irqrestore(&zfcp_ccw_adapter_ref_lock, flags);
29 return adapter;
30}
28 31
29 mutex_unlock(&zfcp_data.config_mutex); 32void zfcp_ccw_adapter_put(struct zfcp_adapter *adapter)
33{
34 unsigned long flags;
30 35
31 return 0; 36 spin_lock_irqsave(&zfcp_ccw_adapter_ref_lock, flags);
37 kref_put(&adapter->ref, zfcp_adapter_release);
38 spin_unlock_irqrestore(&zfcp_ccw_adapter_ref_lock, flags);
32} 39}
33 40
34static int zfcp_ccw_activate(struct ccw_device *cdev) 41static int zfcp_ccw_activate(struct ccw_device *cdev)
35 42
36{ 43{
37 struct zfcp_adapter *adapter = dev_get_drvdata(&cdev->dev); 44 struct zfcp_adapter *adapter = zfcp_ccw_adapter_by_cdev(cdev);
38 45
39 if (!adapter) 46 if (!adapter)
40 return 0; 47 return 0;
@@ -46,6 +53,8 @@ static int zfcp_ccw_activate(struct ccw_device *cdev)
46 zfcp_erp_wait(adapter); 53 zfcp_erp_wait(adapter);
47 flush_work(&adapter->scan_work); 54 flush_work(&adapter->scan_work);
48 55
56 zfcp_ccw_adapter_put(adapter);
57
49 return 0; 58 return 0;
50} 59}
51 60
@@ -67,28 +76,28 @@ int zfcp_ccw_priv_sch(struct zfcp_adapter *adapter)
67 76
68/** 77/**
69 * zfcp_ccw_probe - probe function of zfcp driver 78 * zfcp_ccw_probe - probe function of zfcp driver
70 * @ccw_device: pointer to belonging ccw device 79 * @cdev: pointer to belonging ccw device
71 * 80 *
72 * This function gets called by the common i/o layer for each FCP 81 * This function gets called by the common i/o layer for each FCP
73 * device found on the current system. This is only a stub to make cio 82 * device found on the current system. This is only a stub to make cio
74 * work: To only allocate adapter resources for devices actually used, 83 * work: To only allocate adapter resources for devices actually used,
75 * the allocation is deferred to the first call to ccw_set_online. 84 * the allocation is deferred to the first call to ccw_set_online.
76 */ 85 */
77static int zfcp_ccw_probe(struct ccw_device *ccw_device) 86static int zfcp_ccw_probe(struct ccw_device *cdev)
78{ 87{
79 return 0; 88 return 0;
80} 89}
81 90
82/** 91/**
83 * zfcp_ccw_remove - remove function of zfcp driver 92 * zfcp_ccw_remove - remove function of zfcp driver
84 * @ccw_device: pointer to belonging ccw device 93 * @cdev: pointer to belonging ccw device
85 * 94 *
86 * This function gets called by the common i/o layer and removes an adapter 95 * This function gets called by the common i/o layer and removes an adapter
87 * from the system. Task of this function is to get rid of all units and 96 * from the system. Task of this function is to get rid of all units and
88 * ports that belong to this adapter. And in addition all resources of this 97 * ports that belong to this adapter. And in addition all resources of this
89 * adapter will be freed too. 98 * adapter will be freed too.
90 */ 99 */
91static void zfcp_ccw_remove(struct ccw_device *ccw_device) 100static void zfcp_ccw_remove(struct ccw_device *cdev)
92{ 101{
93 struct zfcp_adapter *adapter; 102 struct zfcp_adapter *adapter;
94 struct zfcp_port *port, *p; 103 struct zfcp_port *port, *p;
@@ -96,49 +105,35 @@ static void zfcp_ccw_remove(struct ccw_device *ccw_device)
96 LIST_HEAD(unit_remove_lh); 105 LIST_HEAD(unit_remove_lh);
97 LIST_HEAD(port_remove_lh); 106 LIST_HEAD(port_remove_lh);
98 107
99 ccw_device_set_offline(ccw_device); 108 ccw_device_set_offline(cdev);
100 109
101 mutex_lock(&zfcp_data.config_mutex); 110 adapter = zfcp_ccw_adapter_by_cdev(cdev);
102 adapter = dev_get_drvdata(&ccw_device->dev);
103 if (!adapter) 111 if (!adapter)
104 goto out; 112 return;
105 mutex_unlock(&zfcp_data.config_mutex);
106 113
107 cancel_work_sync(&adapter->scan_work); 114 write_lock_irq(&adapter->port_list_lock);
108 115 list_for_each_entry_safe(port, p, &adapter->port_list, list) {
109 mutex_lock(&zfcp_data.config_mutex); 116 write_lock(&port->unit_list_lock);
110 117 list_for_each_entry_safe(unit, u, &port->unit_list, list)
111 /* this also removes the scsi devices, so call it first */
112 zfcp_adapter_scsi_unregister(adapter);
113
114 write_lock_irq(&zfcp_data.config_lock);
115 list_for_each_entry_safe(port, p, &adapter->port_list_head, list) {
116 list_for_each_entry_safe(unit, u, &port->unit_list_head, list) {
117 list_move(&unit->list, &unit_remove_lh); 118 list_move(&unit->list, &unit_remove_lh);
118 atomic_set_mask(ZFCP_STATUS_COMMON_REMOVE, 119 write_unlock(&port->unit_list_lock);
119 &unit->status);
120 }
121 list_move(&port->list, &port_remove_lh); 120 list_move(&port->list, &port_remove_lh);
122 atomic_set_mask(ZFCP_STATUS_COMMON_REMOVE, &port->status);
123 } 121 }
124 atomic_set_mask(ZFCP_STATUS_COMMON_REMOVE, &adapter->status); 122 write_unlock_irq(&adapter->port_list_lock);
125 write_unlock_irq(&zfcp_data.config_lock); 123 zfcp_ccw_adapter_put(adapter); /* put from zfcp_ccw_adapter_by_cdev */
126 124
127 list_for_each_entry_safe(port, p, &port_remove_lh, list) { 125 list_for_each_entry_safe(unit, u, &unit_remove_lh, list)
128 list_for_each_entry_safe(unit, u, &unit_remove_lh, list) 126 zfcp_device_unregister(&unit->dev, &zfcp_sysfs_unit_attrs);
129 zfcp_unit_dequeue(unit);
130 zfcp_port_dequeue(port);
131 }
132 wait_event(adapter->remove_wq, atomic_read(&adapter->refcount) == 0);
133 zfcp_adapter_dequeue(adapter);
134 127
135out: 128 list_for_each_entry_safe(port, p, &port_remove_lh, list)
136 mutex_unlock(&zfcp_data.config_mutex); 129 zfcp_device_unregister(&port->dev, &zfcp_sysfs_port_attrs);
130
131 zfcp_adapter_unregister(adapter);
137} 132}
138 133
139/** 134/**
140 * zfcp_ccw_set_online - set_online function of zfcp driver 135 * zfcp_ccw_set_online - set_online function of zfcp driver
141 * @ccw_device: pointer to belonging ccw device 136 * @cdev: pointer to belonging ccw device
142 * 137 *
143 * This function gets called by the common i/o layer and sets an 138 * This function gets called by the common i/o layer and sets an
144 * adapter into state online. The first call will allocate all 139 * adapter into state online. The first call will allocate all
@@ -149,27 +144,24 @@ out:
149 * the SCSI stack, that the QDIO queues will be set up and that the 144 * the SCSI stack, that the QDIO queues will be set up and that the
150 * adapter will be opened. 145 * adapter will be opened.
151 */ 146 */
152static int zfcp_ccw_set_online(struct ccw_device *ccw_device) 147static int zfcp_ccw_set_online(struct ccw_device *cdev)
153{ 148{
154 struct zfcp_adapter *adapter; 149 struct zfcp_adapter *adapter = zfcp_ccw_adapter_by_cdev(cdev);
155 int ret = 0;
156
157 mutex_lock(&zfcp_data.config_mutex);
158 adapter = dev_get_drvdata(&ccw_device->dev);
159 150
160 if (!adapter) { 151 if (!adapter) {
161 ret = zfcp_adapter_enqueue(ccw_device); 152 adapter = zfcp_adapter_enqueue(cdev);
162 if (ret) { 153
163 dev_err(&ccw_device->dev, 154 if (IS_ERR(adapter)) {
155 dev_err(&cdev->dev,
164 "Setting up data structures for the " 156 "Setting up data structures for the "
165 "FCP adapter failed\n"); 157 "FCP adapter failed\n");
166 goto out; 158 return PTR_ERR(adapter);
167 } 159 }
168 adapter = dev_get_drvdata(&ccw_device->dev); 160 kref_get(&adapter->ref);
169 } 161 }
170 162
171 /* initialize request counter */ 163 /* initialize request counter */
172 BUG_ON(!zfcp_reqlist_isempty(adapter)); 164 BUG_ON(!zfcp_reqlist_isempty(adapter->req_list));
173 adapter->req_no = 0; 165 adapter->req_no = 0;
174 166
175 zfcp_erp_modify_adapter_status(adapter, "ccsonl1", NULL, 167 zfcp_erp_modify_adapter_status(adapter, "ccsonl1", NULL,
@@ -177,58 +169,61 @@ static int zfcp_ccw_set_online(struct ccw_device *ccw_device)
177 zfcp_erp_adapter_reopen(adapter, ZFCP_STATUS_COMMON_ERP_FAILED, 169 zfcp_erp_adapter_reopen(adapter, ZFCP_STATUS_COMMON_ERP_FAILED,
178 "ccsonl2", NULL); 170 "ccsonl2", NULL);
179 zfcp_erp_wait(adapter); 171 zfcp_erp_wait(adapter);
180out: 172
181 mutex_unlock(&zfcp_data.config_mutex); 173 flush_work(&adapter->scan_work);
182 if (!ret) 174
183 flush_work(&adapter->scan_work); 175 zfcp_ccw_adapter_put(adapter);
184 return ret; 176 return 0;
185} 177}
186 178
187/** 179/**
188 * zfcp_ccw_set_offline - set_offline function of zfcp driver 180 * zfcp_ccw_set_offline - set_offline function of zfcp driver
189 * @ccw_device: pointer to belonging ccw device 181 * @cdev: pointer to belonging ccw device
190 * 182 *
191 * This function gets called by the common i/o layer and sets an adapter 183 * This function gets called by the common i/o layer and sets an adapter
192 * into state offline. 184 * into state offline.
193 */ 185 */
194static int zfcp_ccw_set_offline(struct ccw_device *ccw_device) 186static int zfcp_ccw_set_offline(struct ccw_device *cdev)
195{ 187{
196 struct zfcp_adapter *adapter; 188 struct zfcp_adapter *adapter = zfcp_ccw_adapter_by_cdev(cdev);
189
190 if (!adapter)
191 return 0;
197 192
198 mutex_lock(&zfcp_data.config_mutex);
199 adapter = dev_get_drvdata(&ccw_device->dev);
200 zfcp_erp_adapter_shutdown(adapter, 0, "ccsoff1", NULL); 193 zfcp_erp_adapter_shutdown(adapter, 0, "ccsoff1", NULL);
201 zfcp_erp_wait(adapter); 194 zfcp_erp_wait(adapter);
202 mutex_unlock(&zfcp_data.config_mutex); 195
196 zfcp_ccw_adapter_put(adapter);
203 return 0; 197 return 0;
204} 198}
205 199
206/** 200/**
207 * zfcp_ccw_notify - ccw notify function 201 * zfcp_ccw_notify - ccw notify function
208 * @ccw_device: pointer to belonging ccw device 202 * @cdev: pointer to belonging ccw device
209 * @event: indicates if adapter was detached or attached 203 * @event: indicates if adapter was detached or attached
210 * 204 *
211 * This function gets called by the common i/o layer if an adapter has gone 205 * This function gets called by the common i/o layer if an adapter has gone
212 * or reappeared. 206 * or reappeared.
213 */ 207 */
214static int zfcp_ccw_notify(struct ccw_device *ccw_device, int event) 208static int zfcp_ccw_notify(struct ccw_device *cdev, int event)
215{ 209{
216 struct zfcp_adapter *adapter = dev_get_drvdata(&ccw_device->dev); 210 struct zfcp_adapter *adapter = zfcp_ccw_adapter_by_cdev(cdev);
211
212 if (!adapter)
213 return 1;
217 214
218 switch (event) { 215 switch (event) {
219 case CIO_GONE: 216 case CIO_GONE:
220 dev_warn(&adapter->ccw_device->dev, 217 dev_warn(&cdev->dev, "The FCP device has been detached\n");
221 "The FCP device has been detached\n");
222 zfcp_erp_adapter_shutdown(adapter, 0, "ccnoti1", NULL); 218 zfcp_erp_adapter_shutdown(adapter, 0, "ccnoti1", NULL);
223 break; 219 break;
224 case CIO_NO_PATH: 220 case CIO_NO_PATH:
225 dev_warn(&adapter->ccw_device->dev, 221 dev_warn(&cdev->dev,
226 "The CHPID for the FCP device is offline\n"); 222 "The CHPID for the FCP device is offline\n");
227 zfcp_erp_adapter_shutdown(adapter, 0, "ccnoti2", NULL); 223 zfcp_erp_adapter_shutdown(adapter, 0, "ccnoti2", NULL);
228 break; 224 break;
229 case CIO_OPER: 225 case CIO_OPER:
230 dev_info(&adapter->ccw_device->dev, 226 dev_info(&cdev->dev, "The FCP device is operational again\n");
231 "The FCP device is operational again\n");
232 zfcp_erp_modify_adapter_status(adapter, "ccnoti3", NULL, 227 zfcp_erp_modify_adapter_status(adapter, "ccnoti3", NULL,
233 ZFCP_STATUS_COMMON_RUNNING, 228 ZFCP_STATUS_COMMON_RUNNING,
234 ZFCP_SET); 229 ZFCP_SET);
@@ -236,11 +231,13 @@ static int zfcp_ccw_notify(struct ccw_device *ccw_device, int event)
236 "ccnoti4", NULL); 231 "ccnoti4", NULL);
237 break; 232 break;
238 case CIO_BOXED: 233 case CIO_BOXED:
239 dev_warn(&adapter->ccw_device->dev, "The FCP device " 234 dev_warn(&cdev->dev, "The FCP device did not respond within "
240 "did not respond within the specified time\n"); 235 "the specified time\n");
241 zfcp_erp_adapter_shutdown(adapter, 0, "ccnoti5", NULL); 236 zfcp_erp_adapter_shutdown(adapter, 0, "ccnoti5", NULL);
242 break; 237 break;
243 } 238 }
239
240 zfcp_ccw_adapter_put(adapter);
244 return 1; 241 return 1;
245} 242}
246 243
@@ -250,18 +247,16 @@ static int zfcp_ccw_notify(struct ccw_device *ccw_device, int event)
250 */ 247 */
251static void zfcp_ccw_shutdown(struct ccw_device *cdev) 248static void zfcp_ccw_shutdown(struct ccw_device *cdev)
252{ 249{
253 struct zfcp_adapter *adapter; 250 struct zfcp_adapter *adapter = zfcp_ccw_adapter_by_cdev(cdev);
254 251
255 mutex_lock(&zfcp_data.config_mutex);
256 adapter = dev_get_drvdata(&cdev->dev);
257 if (!adapter) 252 if (!adapter)
258 goto out; 253 return;
259 254
260 zfcp_erp_adapter_shutdown(adapter, 0, "ccshut1", NULL); 255 zfcp_erp_adapter_shutdown(adapter, 0, "ccshut1", NULL);
261 zfcp_erp_wait(adapter); 256 zfcp_erp_wait(adapter);
262 zfcp_erp_thread_kill(adapter); 257 zfcp_erp_thread_kill(adapter);
263out: 258
264 mutex_unlock(&zfcp_data.config_mutex); 259 zfcp_ccw_adapter_put(adapter);
265} 260}
266 261
267struct ccw_driver zfcp_ccw_driver = { 262struct ccw_driver zfcp_ccw_driver = {
@@ -274,18 +269,7 @@ struct ccw_driver zfcp_ccw_driver = {
274 .set_offline = zfcp_ccw_set_offline, 269 .set_offline = zfcp_ccw_set_offline,
275 .notify = zfcp_ccw_notify, 270 .notify = zfcp_ccw_notify,
276 .shutdown = zfcp_ccw_shutdown, 271 .shutdown = zfcp_ccw_shutdown,
277 .freeze = zfcp_ccw_suspend, 272 .freeze = zfcp_ccw_set_offline,
278 .thaw = zfcp_ccw_activate, 273 .thaw = zfcp_ccw_activate,
279 .restore = zfcp_ccw_activate, 274 .restore = zfcp_ccw_activate,
280}; 275};
281
282/**
283 * zfcp_ccw_register - ccw register function
284 *
285 * Registers the driver at the common i/o layer. This function will be called
286 * at module load time/system start.
287 */
288int __init zfcp_ccw_register(void)
289{
290 return ccw_driver_register(&zfcp_ccw_driver);
291}
diff --git a/drivers/s390/scsi/zfcp_cfdc.c b/drivers/s390/scsi/zfcp_cfdc.c
index ef681dfed0cc..25d9e0ae9c57 100644
--- a/drivers/s390/scsi/zfcp_cfdc.c
+++ b/drivers/s390/scsi/zfcp_cfdc.c
@@ -10,8 +10,10 @@
10#define KMSG_COMPONENT "zfcp" 10#define KMSG_COMPONENT "zfcp"
11#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 11#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
12 12
13#include <linux/slab.h>
13#include <linux/types.h> 14#include <linux/types.h>
14#include <linux/miscdevice.h> 15#include <linux/miscdevice.h>
16#include <asm/compat.h>
15#include <asm/ccwdev.h> 17#include <asm/ccwdev.h>
16#include "zfcp_def.h" 18#include "zfcp_def.h"
17#include "zfcp_ext.h" 19#include "zfcp_ext.h"
@@ -86,22 +88,17 @@ static int zfcp_cfdc_copy_to_user(void __user *user_buffer,
86static struct zfcp_adapter *zfcp_cfdc_get_adapter(u32 devno) 88static struct zfcp_adapter *zfcp_cfdc_get_adapter(u32 devno)
87{ 89{
88 char busid[9]; 90 char busid[9];
89 struct ccw_device *ccwdev; 91 struct ccw_device *cdev;
90 struct zfcp_adapter *adapter = NULL; 92 struct zfcp_adapter *adapter;
91 93
92 snprintf(busid, sizeof(busid), "0.0.%04x", devno); 94 snprintf(busid, sizeof(busid), "0.0.%04x", devno);
93 ccwdev = get_ccwdev_by_busid(&zfcp_ccw_driver, busid); 95 cdev = get_ccwdev_by_busid(&zfcp_ccw_driver, busid);
94 if (!ccwdev) 96 if (!cdev)
95 goto out; 97 return NULL;
96 98
97 adapter = dev_get_drvdata(&ccwdev->dev); 99 adapter = zfcp_ccw_adapter_by_cdev(cdev);
98 if (!adapter) 100
99 goto out_put; 101 put_device(&cdev->dev);
100
101 zfcp_adapter_get(adapter);
102out_put:
103 put_device(&ccwdev->dev);
104out:
105 return adapter; 102 return adapter;
106} 103}
107 104
@@ -168,7 +165,7 @@ static void zfcp_cfdc_req_to_sense(struct zfcp_cfdc_data *data,
168} 165}
169 166
170static long zfcp_cfdc_dev_ioctl(struct file *file, unsigned int command, 167static long zfcp_cfdc_dev_ioctl(struct file *file, unsigned int command,
171 unsigned long buffer) 168 unsigned long arg)
172{ 169{
173 struct zfcp_cfdc_data *data; 170 struct zfcp_cfdc_data *data;
174 struct zfcp_cfdc_data __user *data_user; 171 struct zfcp_cfdc_data __user *data_user;
@@ -180,7 +177,11 @@ static long zfcp_cfdc_dev_ioctl(struct file *file, unsigned int command,
180 if (command != ZFCP_CFDC_IOC) 177 if (command != ZFCP_CFDC_IOC)
181 return -ENOTTY; 178 return -ENOTTY;
182 179
183 data_user = (void __user *) buffer; 180 if (is_compat_task())
181 data_user = compat_ptr(arg);
182 else
183 data_user = (void __user *)arg;
184
184 if (!data_user) 185 if (!data_user)
185 return -EINVAL; 186 return -EINVAL;
186 187
@@ -212,7 +213,6 @@ static long zfcp_cfdc_dev_ioctl(struct file *file, unsigned int command,
212 retval = -ENXIO; 213 retval = -ENXIO;
213 goto free_buffer; 214 goto free_buffer;
214 } 215 }
215 zfcp_adapter_get(adapter);
216 216
217 retval = zfcp_cfdc_sg_setup(data->command, fsf_cfdc->sg, 217 retval = zfcp_cfdc_sg_setup(data->command, fsf_cfdc->sg,
218 data_user->control_file); 218 data_user->control_file);
@@ -245,7 +245,7 @@ static long zfcp_cfdc_dev_ioctl(struct file *file, unsigned int command,
245 free_sg: 245 free_sg:
246 zfcp_sg_free_table(fsf_cfdc->sg, ZFCP_CFDC_PAGES); 246 zfcp_sg_free_table(fsf_cfdc->sg, ZFCP_CFDC_PAGES);
247 adapter_put: 247 adapter_put:
248 zfcp_adapter_put(adapter); 248 zfcp_ccw_adapter_put(adapter);
249 free_buffer: 249 free_buffer:
250 kfree(data); 250 kfree(data);
251 no_mem_sense: 251 no_mem_sense:
diff --git a/drivers/s390/scsi/zfcp_dbf.c b/drivers/s390/scsi/zfcp_dbf.c
index 215b70749e95..075852f6968c 100644
--- a/drivers/s390/scsi/zfcp_dbf.c
+++ b/drivers/s390/scsi/zfcp_dbf.c
@@ -10,9 +10,11 @@
10#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 10#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
11 11
12#include <linux/ctype.h> 12#include <linux/ctype.h>
13#include <linux/slab.h>
13#include <asm/debug.h> 14#include <asm/debug.h>
14#include "zfcp_dbf.h" 15#include "zfcp_dbf.h"
15#include "zfcp_ext.h" 16#include "zfcp_ext.h"
17#include "zfcp_fc.h"
16 18
17static u32 dbfsize = 4; 19static u32 dbfsize = 4;
18 20
@@ -139,9 +141,9 @@ void _zfcp_dbf_hba_fsf_response(const char *tag2, int level,
139 memcpy(response->fsf_status_qual, 141 memcpy(response->fsf_status_qual,
140 fsf_status_qual, FSF_STATUS_QUALIFIER_SIZE); 142 fsf_status_qual, FSF_STATUS_QUALIFIER_SIZE);
141 response->fsf_req_status = fsf_req->status; 143 response->fsf_req_status = fsf_req->status;
142 response->sbal_first = fsf_req->queue_req.sbal_first; 144 response->sbal_first = fsf_req->qdio_req.sbal_first;
143 response->sbal_last = fsf_req->queue_req.sbal_last; 145 response->sbal_last = fsf_req->qdio_req.sbal_last;
144 response->sbal_response = fsf_req->queue_req.sbal_response; 146 response->sbal_response = fsf_req->qdio_req.sbal_response;
145 response->pool = fsf_req->pool != NULL; 147 response->pool = fsf_req->pool != NULL;
146 response->erp_action = (unsigned long)fsf_req->erp_action; 148 response->erp_action = (unsigned long)fsf_req->erp_action;
147 149
@@ -177,8 +179,7 @@ void _zfcp_dbf_hba_fsf_response(const char *tag2, int level,
177 179
178 case FSF_QTCB_SEND_ELS: 180 case FSF_QTCB_SEND_ELS:
179 send_els = (struct zfcp_send_els *)fsf_req->data; 181 send_els = (struct zfcp_send_els *)fsf_req->data;
180 response->u.els.d_id = qtcb->bottom.support.d_id; 182 response->u.els.d_id = ntoh24(qtcb->bottom.support.d_id);
181 response->u.els.ls_code = send_els->ls_code >> 24;
182 break; 183 break;
183 184
184 case FSF_QTCB_ABORT_FCP_CMND: 185 case FSF_QTCB_ABORT_FCP_CMND:
@@ -327,7 +328,7 @@ static void zfcp_dbf_hba_view_response(char **p,
327 break; 328 break;
328 zfcp_dbf_out(p, "scsi_cmnd", "0x%0Lx", r->u.fcp.cmnd); 329 zfcp_dbf_out(p, "scsi_cmnd", "0x%0Lx", r->u.fcp.cmnd);
329 zfcp_dbf_out(p, "scsi_serial", "0x%016Lx", r->u.fcp.serial); 330 zfcp_dbf_out(p, "scsi_serial", "0x%016Lx", r->u.fcp.serial);
330 p += sprintf(*p, "\n"); 331 *p += sprintf(*p, "\n");
331 break; 332 break;
332 333
333 case FSF_QTCB_OPEN_PORT_WITH_DID: 334 case FSF_QTCB_OPEN_PORT_WITH_DID:
@@ -348,7 +349,6 @@ static void zfcp_dbf_hba_view_response(char **p,
348 349
349 case FSF_QTCB_SEND_ELS: 350 case FSF_QTCB_SEND_ELS:
350 zfcp_dbf_out(p, "d_id", "0x%06x", r->u.els.d_id); 351 zfcp_dbf_out(p, "d_id", "0x%06x", r->u.els.d_id);
351 zfcp_dbf_out(p, "ls_code", "0x%02x", r->u.els.ls_code);
352 break; 352 break;
353 353
354 case FSF_QTCB_ABORT_FCP_CMND: 354 case FSF_QTCB_ABORT_FCP_CMND:
@@ -577,7 +577,8 @@ void zfcp_dbf_rec_adapter(char *id, void *ref, struct zfcp_dbf *dbf)
577 struct zfcp_adapter *adapter = dbf->adapter; 577 struct zfcp_adapter *adapter = dbf->adapter;
578 578
579 zfcp_dbf_rec_target(id, ref, dbf, &adapter->status, 579 zfcp_dbf_rec_target(id, ref, dbf, &adapter->status,
580 &adapter->erp_counter, 0, 0, 0); 580 &adapter->erp_counter, 0, 0,
581 ZFCP_DBF_INVALID_LUN);
581} 582}
582 583
583/** 584/**
@@ -591,8 +592,8 @@ void zfcp_dbf_rec_port(char *id, void *ref, struct zfcp_port *port)
591 struct zfcp_dbf *dbf = port->adapter->dbf; 592 struct zfcp_dbf *dbf = port->adapter->dbf;
592 593
593 zfcp_dbf_rec_target(id, ref, dbf, &port->status, 594 zfcp_dbf_rec_target(id, ref, dbf, &port->status,
594 &port->erp_counter, port->wwpn, port->d_id, 595 &port->erp_counter, port->wwpn, port->d_id,
595 0); 596 ZFCP_DBF_INVALID_LUN);
596} 597}
597 598
598/** 599/**
@@ -643,10 +644,9 @@ void zfcp_dbf_rec_trigger(char *id2, void *ref, u8 want, u8 need, void *action,
643 r->u.trigger.ps = atomic_read(&port->status); 644 r->u.trigger.ps = atomic_read(&port->status);
644 r->u.trigger.wwpn = port->wwpn; 645 r->u.trigger.wwpn = port->wwpn;
645 } 646 }
646 if (unit) { 647 if (unit)
647 r->u.trigger.us = atomic_read(&unit->status); 648 r->u.trigger.us = atomic_read(&unit->status);
648 r->u.trigger.fcp_lun = unit->fcp_lun; 649 r->u.trigger.fcp_lun = unit ? unit->fcp_lun : ZFCP_DBF_INVALID_LUN;
649 }
650 debug_event(dbf->rec, action ? 1 : 4, r, sizeof(*r)); 650 debug_event(dbf->rec, action ? 1 : 4, r, sizeof(*r));
651 spin_unlock_irqrestore(&dbf->rec_lock, flags); 651 spin_unlock_irqrestore(&dbf->rec_lock, flags);
652} 652}
@@ -669,7 +669,7 @@ void zfcp_dbf_rec_action(char *id2, struct zfcp_erp_action *erp_action)
669 r->u.action.action = (unsigned long)erp_action; 669 r->u.action.action = (unsigned long)erp_action;
670 r->u.action.status = erp_action->status; 670 r->u.action.status = erp_action->status;
671 r->u.action.step = erp_action->step; 671 r->u.action.step = erp_action->step;
672 r->u.action.fsf_req = (unsigned long)erp_action->fsf_req; 672 r->u.action.fsf_req = erp_action->fsf_req_id;
673 debug_event(dbf->rec, 5, r, sizeof(*r)); 673 debug_event(dbf->rec, 5, r, sizeof(*r));
674 spin_unlock_irqrestore(&dbf->rec_lock, flags); 674 spin_unlock_irqrestore(&dbf->rec_lock, flags);
675} 675}
@@ -677,14 +677,14 @@ void zfcp_dbf_rec_action(char *id2, struct zfcp_erp_action *erp_action)
677/** 677/**
678 * zfcp_dbf_san_ct_request - trace event for issued CT request 678 * zfcp_dbf_san_ct_request - trace event for issued CT request
679 * @fsf_req: request containing issued CT data 679 * @fsf_req: request containing issued CT data
680 * @d_id: destination id where ct request is sent to
680 */ 681 */
681void zfcp_dbf_san_ct_request(struct zfcp_fsf_req *fsf_req) 682void zfcp_dbf_san_ct_request(struct zfcp_fsf_req *fsf_req, u32 d_id)
682{ 683{
683 struct zfcp_send_ct *ct = (struct zfcp_send_ct *)fsf_req->data; 684 struct zfcp_fsf_ct_els *ct = (struct zfcp_fsf_ct_els *)fsf_req->data;
684 struct zfcp_wka_port *wka_port = ct->wka_port; 685 struct zfcp_adapter *adapter = fsf_req->adapter;
685 struct zfcp_adapter *adapter = wka_port->adapter;
686 struct zfcp_dbf *dbf = adapter->dbf; 686 struct zfcp_dbf *dbf = adapter->dbf;
687 struct ct_hdr *hdr = sg_virt(ct->req); 687 struct fc_ct_hdr *hdr = sg_virt(ct->req);
688 struct zfcp_dbf_san_record *r = &dbf->san_buf; 688 struct zfcp_dbf_san_record *r = &dbf->san_buf;
689 struct zfcp_dbf_san_record_ct_request *oct = &r->u.ct_req; 689 struct zfcp_dbf_san_record_ct_request *oct = &r->u.ct_req;
690 int level = 3; 690 int level = 3;
@@ -695,19 +695,18 @@ void zfcp_dbf_san_ct_request(struct zfcp_fsf_req *fsf_req)
695 strncpy(r->tag, "octc", ZFCP_DBF_TAG_SIZE); 695 strncpy(r->tag, "octc", ZFCP_DBF_TAG_SIZE);
696 r->fsf_reqid = fsf_req->req_id; 696 r->fsf_reqid = fsf_req->req_id;
697 r->fsf_seqno = fsf_req->seq_no; 697 r->fsf_seqno = fsf_req->seq_no;
698 r->s_id = fc_host_port_id(adapter->scsi_host); 698 oct->d_id = d_id;
699 r->d_id = wka_port->d_id; 699 oct->cmd_req_code = hdr->ct_cmd;
700 oct->cmd_req_code = hdr->cmd_rsp_code; 700 oct->revision = hdr->ct_rev;
701 oct->revision = hdr->revision; 701 oct->gs_type = hdr->ct_fs_type;
702 oct->gs_type = hdr->gs_type; 702 oct->gs_subtype = hdr->ct_fs_subtype;
703 oct->gs_subtype = hdr->gs_subtype; 703 oct->options = hdr->ct_options;
704 oct->options = hdr->options; 704 oct->max_res_size = hdr->ct_mr_size;
705 oct->max_res_size = hdr->max_res_size; 705 oct->len = min((int)ct->req->length - (int)sizeof(struct fc_ct_hdr),
706 oct->len = min((int)ct->req->length - (int)sizeof(struct ct_hdr),
707 ZFCP_DBF_SAN_MAX_PAYLOAD); 706 ZFCP_DBF_SAN_MAX_PAYLOAD);
708 debug_event(dbf->san, level, r, sizeof(*r)); 707 debug_event(dbf->san, level, r, sizeof(*r));
709 zfcp_dbf_hexdump(dbf->san, r, sizeof(*r), level, 708 zfcp_dbf_hexdump(dbf->san, r, sizeof(*r), level,
710 (void *)hdr + sizeof(struct ct_hdr), oct->len); 709 (void *)hdr + sizeof(struct fc_ct_hdr), oct->len);
711 spin_unlock_irqrestore(&dbf->san_lock, flags); 710 spin_unlock_irqrestore(&dbf->san_lock, flags);
712} 711}
713 712
@@ -717,10 +716,9 @@ void zfcp_dbf_san_ct_request(struct zfcp_fsf_req *fsf_req)
717 */ 716 */
718void zfcp_dbf_san_ct_response(struct zfcp_fsf_req *fsf_req) 717void zfcp_dbf_san_ct_response(struct zfcp_fsf_req *fsf_req)
719{ 718{
720 struct zfcp_send_ct *ct = (struct zfcp_send_ct *)fsf_req->data; 719 struct zfcp_fsf_ct_els *ct = (struct zfcp_fsf_ct_els *)fsf_req->data;
721 struct zfcp_wka_port *wka_port = ct->wka_port; 720 struct zfcp_adapter *adapter = fsf_req->adapter;
722 struct zfcp_adapter *adapter = wka_port->adapter; 721 struct fc_ct_hdr *hdr = sg_virt(ct->resp);
723 struct ct_hdr *hdr = sg_virt(ct->resp);
724 struct zfcp_dbf *dbf = adapter->dbf; 722 struct zfcp_dbf *dbf = adapter->dbf;
725 struct zfcp_dbf_san_record *r = &dbf->san_buf; 723 struct zfcp_dbf_san_record *r = &dbf->san_buf;
726 struct zfcp_dbf_san_record_ct_response *rct = &r->u.ct_resp; 724 struct zfcp_dbf_san_record_ct_response *rct = &r->u.ct_resp;
@@ -732,25 +730,23 @@ void zfcp_dbf_san_ct_response(struct zfcp_fsf_req *fsf_req)
732 strncpy(r->tag, "rctc", ZFCP_DBF_TAG_SIZE); 730 strncpy(r->tag, "rctc", ZFCP_DBF_TAG_SIZE);
733 r->fsf_reqid = fsf_req->req_id; 731 r->fsf_reqid = fsf_req->req_id;
734 r->fsf_seqno = fsf_req->seq_no; 732 r->fsf_seqno = fsf_req->seq_no;
735 r->s_id = wka_port->d_id; 733 rct->cmd_rsp_code = hdr->ct_cmd;
736 r->d_id = fc_host_port_id(adapter->scsi_host); 734 rct->revision = hdr->ct_rev;
737 rct->cmd_rsp_code = hdr->cmd_rsp_code; 735 rct->reason_code = hdr->ct_reason;
738 rct->revision = hdr->revision; 736 rct->expl = hdr->ct_explan;
739 rct->reason_code = hdr->reason_code; 737 rct->vendor_unique = hdr->ct_vendor;
740 rct->expl = hdr->reason_code_expl; 738 rct->max_res_size = hdr->ct_mr_size;
741 rct->vendor_unique = hdr->vendor_unique; 739 rct->len = min((int)ct->resp->length - (int)sizeof(struct fc_ct_hdr),
742 rct->max_res_size = hdr->max_res_size;
743 rct->len = min((int)ct->resp->length - (int)sizeof(struct ct_hdr),
744 ZFCP_DBF_SAN_MAX_PAYLOAD); 740 ZFCP_DBF_SAN_MAX_PAYLOAD);
745 debug_event(dbf->san, level, r, sizeof(*r)); 741 debug_event(dbf->san, level, r, sizeof(*r));
746 zfcp_dbf_hexdump(dbf->san, r, sizeof(*r), level, 742 zfcp_dbf_hexdump(dbf->san, r, sizeof(*r), level,
747 (void *)hdr + sizeof(struct ct_hdr), rct->len); 743 (void *)hdr + sizeof(struct fc_ct_hdr), rct->len);
748 spin_unlock_irqrestore(&dbf->san_lock, flags); 744 spin_unlock_irqrestore(&dbf->san_lock, flags);
749} 745}
750 746
751static void zfcp_dbf_san_els(const char *tag, int level, 747static void zfcp_dbf_san_els(const char *tag, int level,
752 struct zfcp_fsf_req *fsf_req, u32 s_id, u32 d_id, 748 struct zfcp_fsf_req *fsf_req, u32 d_id,
753 u8 ls_code, void *buffer, int buflen) 749 void *buffer, int buflen)
754{ 750{
755 struct zfcp_adapter *adapter = fsf_req->adapter; 751 struct zfcp_adapter *adapter = fsf_req->adapter;
756 struct zfcp_dbf *dbf = adapter->dbf; 752 struct zfcp_dbf *dbf = adapter->dbf;
@@ -762,9 +758,7 @@ static void zfcp_dbf_san_els(const char *tag, int level,
762 strncpy(rec->tag, tag, ZFCP_DBF_TAG_SIZE); 758 strncpy(rec->tag, tag, ZFCP_DBF_TAG_SIZE);
763 rec->fsf_reqid = fsf_req->req_id; 759 rec->fsf_reqid = fsf_req->req_id;
764 rec->fsf_seqno = fsf_req->seq_no; 760 rec->fsf_seqno = fsf_req->seq_no;
765 rec->s_id = s_id; 761 rec->u.els.d_id = d_id;
766 rec->d_id = d_id;
767 rec->u.els.ls_code = ls_code;
768 debug_event(dbf->san, level, rec, sizeof(*rec)); 762 debug_event(dbf->san, level, rec, sizeof(*rec));
769 zfcp_dbf_hexdump(dbf->san, rec, sizeof(*rec), level, 763 zfcp_dbf_hexdump(dbf->san, rec, sizeof(*rec), level,
770 buffer, min(buflen, ZFCP_DBF_SAN_MAX_PAYLOAD)); 764 buffer, min(buflen, ZFCP_DBF_SAN_MAX_PAYLOAD));
@@ -777,12 +771,11 @@ static void zfcp_dbf_san_els(const char *tag, int level,
777 */ 771 */
778void zfcp_dbf_san_els_request(struct zfcp_fsf_req *fsf_req) 772void zfcp_dbf_san_els_request(struct zfcp_fsf_req *fsf_req)
779{ 773{
780 struct zfcp_send_els *els = (struct zfcp_send_els *)fsf_req->data; 774 struct zfcp_fsf_ct_els *els = (struct zfcp_fsf_ct_els *)fsf_req->data;
775 u32 d_id = ntoh24(fsf_req->qtcb->bottom.support.d_id);
781 776
782 zfcp_dbf_san_els("oels", 2, fsf_req, 777 zfcp_dbf_san_els("oels", 2, fsf_req, d_id,
783 fc_host_port_id(els->adapter->scsi_host), 778 sg_virt(els->req), els->req->length);
784 els->d_id, *(u8 *) sg_virt(els->req),
785 sg_virt(els->req), els->req->length);
786} 779}
787 780
788/** 781/**
@@ -791,12 +784,11 @@ void zfcp_dbf_san_els_request(struct zfcp_fsf_req *fsf_req)
791 */ 784 */
792void zfcp_dbf_san_els_response(struct zfcp_fsf_req *fsf_req) 785void zfcp_dbf_san_els_response(struct zfcp_fsf_req *fsf_req)
793{ 786{
794 struct zfcp_send_els *els = (struct zfcp_send_els *)fsf_req->data; 787 struct zfcp_fsf_ct_els *els = (struct zfcp_fsf_ct_els *)fsf_req->data;
788 u32 d_id = ntoh24(fsf_req->qtcb->bottom.support.d_id);
795 789
796 zfcp_dbf_san_els("rels", 2, fsf_req, els->d_id, 790 zfcp_dbf_san_els("rels", 2, fsf_req, d_id,
797 fc_host_port_id(els->adapter->scsi_host), 791 sg_virt(els->resp), els->resp->length);
798 *(u8 *)sg_virt(els->req), sg_virt(els->resp),
799 els->resp->length);
800} 792}
801 793
802/** 794/**
@@ -805,16 +797,13 @@ void zfcp_dbf_san_els_response(struct zfcp_fsf_req *fsf_req)
805 */ 797 */
806void zfcp_dbf_san_incoming_els(struct zfcp_fsf_req *fsf_req) 798void zfcp_dbf_san_incoming_els(struct zfcp_fsf_req *fsf_req)
807{ 799{
808 struct zfcp_adapter *adapter = fsf_req->adapter;
809 struct fsf_status_read_buffer *buf = 800 struct fsf_status_read_buffer *buf =
810 (struct fsf_status_read_buffer *)fsf_req->data; 801 (struct fsf_status_read_buffer *)fsf_req->data;
811 int length = (int)buf->length - 802 int length = (int)buf->length -
812 (int)((void *)&buf->payload - (void *)buf); 803 (int)((void *)&buf->payload - (void *)buf);
813 804
814 zfcp_dbf_san_els("iels", 1, fsf_req, buf->d_id, 805 zfcp_dbf_san_els("iels", 1, fsf_req, ntoh24(buf->d_id),
815 fc_host_port_id(adapter->scsi_host), 806 (void *)buf->payload.data, length);
816 buf->payload.data[0], (void *)buf->payload.data,
817 length);
818} 807}
819 808
820static int zfcp_dbf_san_view_format(debug_info_t *id, struct debug_view *view, 809static int zfcp_dbf_san_view_format(debug_info_t *id, struct debug_view *view,
@@ -829,11 +818,10 @@ static int zfcp_dbf_san_view_format(debug_info_t *id, struct debug_view *view,
829 zfcp_dbf_tag(&p, "tag", r->tag); 818 zfcp_dbf_tag(&p, "tag", r->tag);
830 zfcp_dbf_out(&p, "fsf_reqid", "0x%0Lx", r->fsf_reqid); 819 zfcp_dbf_out(&p, "fsf_reqid", "0x%0Lx", r->fsf_reqid);
831 zfcp_dbf_out(&p, "fsf_seqno", "0x%08x", r->fsf_seqno); 820 zfcp_dbf_out(&p, "fsf_seqno", "0x%08x", r->fsf_seqno);
832 zfcp_dbf_out(&p, "s_id", "0x%06x", r->s_id);
833 zfcp_dbf_out(&p, "d_id", "0x%06x", r->d_id);
834 821
835 if (strncmp(r->tag, "octc", ZFCP_DBF_TAG_SIZE) == 0) { 822 if (strncmp(r->tag, "octc", ZFCP_DBF_TAG_SIZE) == 0) {
836 struct zfcp_dbf_san_record_ct_request *ct = &r->u.ct_req; 823 struct zfcp_dbf_san_record_ct_request *ct = &r->u.ct_req;
824 zfcp_dbf_out(&p, "d_id", "0x%06x", ct->d_id);
837 zfcp_dbf_out(&p, "cmd_req_code", "0x%04x", ct->cmd_req_code); 825 zfcp_dbf_out(&p, "cmd_req_code", "0x%04x", ct->cmd_req_code);
838 zfcp_dbf_out(&p, "revision", "0x%02x", ct->revision); 826 zfcp_dbf_out(&p, "revision", "0x%02x", ct->revision);
839 zfcp_dbf_out(&p, "gs_type", "0x%02x", ct->gs_type); 827 zfcp_dbf_out(&p, "gs_type", "0x%02x", ct->gs_type);
@@ -852,7 +840,7 @@ static int zfcp_dbf_san_view_format(debug_info_t *id, struct debug_view *view,
852 strncmp(r->tag, "rels", ZFCP_DBF_TAG_SIZE) == 0 || 840 strncmp(r->tag, "rels", ZFCP_DBF_TAG_SIZE) == 0 ||
853 strncmp(r->tag, "iels", ZFCP_DBF_TAG_SIZE) == 0) { 841 strncmp(r->tag, "iels", ZFCP_DBF_TAG_SIZE) == 0) {
854 struct zfcp_dbf_san_record_els *els = &r->u.els; 842 struct zfcp_dbf_san_record_els *els = &r->u.els;
855 zfcp_dbf_out(&p, "ls_code", "0x%02x", els->ls_code); 843 zfcp_dbf_out(&p, "d_id", "0x%06x", els->d_id);
856 } 844 }
857 return p - out_buf; 845 return p - out_buf;
858} 846}
@@ -870,8 +858,9 @@ void _zfcp_dbf_scsi(const char *tag, const char *tag2, int level,
870 struct zfcp_dbf_scsi_record *rec = &dbf->scsi_buf; 858 struct zfcp_dbf_scsi_record *rec = &dbf->scsi_buf;
871 struct zfcp_dbf_dump *dump = (struct zfcp_dbf_dump *)rec; 859 struct zfcp_dbf_dump *dump = (struct zfcp_dbf_dump *)rec;
872 unsigned long flags; 860 unsigned long flags;
873 struct fcp_rsp_iu *fcp_rsp; 861 struct fcp_resp_with_ext *fcp_rsp;
874 char *fcp_rsp_info = NULL, *fcp_sns_info = NULL; 862 struct fcp_resp_rsp_info *fcp_rsp_info = NULL;
863 char *fcp_sns_info = NULL;
875 int offset = 0, buflen = 0; 864 int offset = 0, buflen = 0;
876 865
877 spin_lock_irqsave(&dbf->scsi_lock, flags); 866 spin_lock_irqsave(&dbf->scsi_lock, flags);
@@ -895,20 +884,22 @@ void _zfcp_dbf_scsi(const char *tag, const char *tag2, int level,
895 rec->scsi_allowed = scsi_cmnd->allowed; 884 rec->scsi_allowed = scsi_cmnd->allowed;
896 } 885 }
897 if (fsf_req != NULL) { 886 if (fsf_req != NULL) {
898 fcp_rsp = (struct fcp_rsp_iu *) 887 fcp_rsp = (struct fcp_resp_with_ext *)
899 &(fsf_req->qtcb->bottom.io.fcp_rsp); 888 &(fsf_req->qtcb->bottom.io.fcp_rsp);
900 fcp_rsp_info = (unsigned char *) &fcp_rsp[1]; 889 fcp_rsp_info = (struct fcp_resp_rsp_info *)
901 fcp_sns_info = 890 &fcp_rsp[1];
902 zfcp_get_fcp_sns_info_ptr(fcp_rsp); 891 fcp_sns_info = (char *) &fcp_rsp[1];
903 892 if (fcp_rsp->resp.fr_flags & FCP_RSP_LEN_VAL)
904 rec->rsp_validity = fcp_rsp->validity.value; 893 fcp_sns_info += fcp_rsp->ext.fr_sns_len;
905 rec->rsp_scsi_status = fcp_rsp->scsi_status; 894
906 rec->rsp_resid = fcp_rsp->fcp_resid; 895 rec->rsp_validity = fcp_rsp->resp.fr_flags;
907 if (fcp_rsp->validity.bits.fcp_rsp_len_valid) 896 rec->rsp_scsi_status = fcp_rsp->resp.fr_status;
908 rec->rsp_code = *(fcp_rsp_info + 3); 897 rec->rsp_resid = fcp_rsp->ext.fr_resid;
909 if (fcp_rsp->validity.bits.fcp_sns_len_valid) { 898 if (fcp_rsp->resp.fr_flags & FCP_RSP_LEN_VAL)
910 buflen = min((int)fcp_rsp->fcp_sns_len, 899 rec->rsp_code = fcp_rsp_info->rsp_code;
911 ZFCP_DBF_SCSI_MAX_FCP_SNS_INFO); 900 if (fcp_rsp->resp.fr_flags & FCP_SNS_LEN_VAL) {
901 buflen = min(fcp_rsp->ext.fr_sns_len,
902 (u32)ZFCP_DBF_SCSI_MAX_FCP_SNS_INFO);
912 rec->sns_info_len = buflen; 903 rec->sns_info_len = buflen;
913 memcpy(rec->sns_info, fcp_sns_info, 904 memcpy(rec->sns_info, fcp_sns_info,
914 min(buflen, 905 min(buflen,
@@ -1067,6 +1058,8 @@ err_out:
1067 */ 1058 */
1068void zfcp_dbf_adapter_unregister(struct zfcp_dbf *dbf) 1059void zfcp_dbf_adapter_unregister(struct zfcp_dbf *dbf)
1069{ 1060{
1061 if (!dbf)
1062 return;
1070 debug_unregister(dbf->scsi); 1063 debug_unregister(dbf->scsi);
1071 debug_unregister(dbf->san); 1064 debug_unregister(dbf->san);
1072 debug_unregister(dbf->hba); 1065 debug_unregister(dbf->hba);
diff --git a/drivers/s390/scsi/zfcp_dbf.h b/drivers/s390/scsi/zfcp_dbf.h
index 6b1461e8f847..457e046f2d28 100644
--- a/drivers/s390/scsi/zfcp_dbf.h
+++ b/drivers/s390/scsi/zfcp_dbf.h
@@ -22,6 +22,7 @@
22#ifndef ZFCP_DBF_H 22#ifndef ZFCP_DBF_H
23#define ZFCP_DBF_H 23#define ZFCP_DBF_H
24 24
25#include <scsi/fc/fc_fcp.h>
25#include "zfcp_ext.h" 26#include "zfcp_ext.h"
26#include "zfcp_fsf.h" 27#include "zfcp_fsf.h"
27#include "zfcp_def.h" 28#include "zfcp_def.h"
@@ -29,6 +30,8 @@
29#define ZFCP_DBF_TAG_SIZE 4 30#define ZFCP_DBF_TAG_SIZE 4
30#define ZFCP_DBF_ID_SIZE 7 31#define ZFCP_DBF_ID_SIZE 7
31 32
33#define ZFCP_DBF_INVALID_LUN 0xFFFFFFFFFFFFFFFFull
34
32struct zfcp_dbf_dump { 35struct zfcp_dbf_dump {
33 u8 tag[ZFCP_DBF_TAG_SIZE]; 36 u8 tag[ZFCP_DBF_TAG_SIZE];
34 u32 total_size; /* size of total dump data */ 37 u32 total_size; /* size of total dump data */
@@ -122,7 +125,6 @@ struct zfcp_dbf_hba_record_response {
122 } unit; 125 } unit;
123 struct { 126 struct {
124 u32 d_id; 127 u32 d_id;
125 u8 ls_code;
126 } els; 128 } els;
127 } u; 129 } u;
128} __attribute__ ((packed)); 130} __attribute__ ((packed));
@@ -166,6 +168,7 @@ struct zfcp_dbf_san_record_ct_request {
166 u8 options; 168 u8 options;
167 u16 max_res_size; 169 u16 max_res_size;
168 u32 len; 170 u32 len;
171 u32 d_id;
169} __attribute__ ((packed)); 172} __attribute__ ((packed));
170 173
171struct zfcp_dbf_san_record_ct_response { 174struct zfcp_dbf_san_record_ct_response {
@@ -179,25 +182,22 @@ struct zfcp_dbf_san_record_ct_response {
179} __attribute__ ((packed)); 182} __attribute__ ((packed));
180 183
181struct zfcp_dbf_san_record_els { 184struct zfcp_dbf_san_record_els {
182 u8 ls_code; 185 u32 d_id;
183 u32 len;
184} __attribute__ ((packed)); 186} __attribute__ ((packed));
185 187
186struct zfcp_dbf_san_record { 188struct zfcp_dbf_san_record {
187 u8 tag[ZFCP_DBF_TAG_SIZE]; 189 u8 tag[ZFCP_DBF_TAG_SIZE];
188 u64 fsf_reqid; 190 u64 fsf_reqid;
189 u32 fsf_seqno; 191 u32 fsf_seqno;
190 u32 s_id;
191 u32 d_id;
192 union { 192 union {
193 struct zfcp_dbf_san_record_ct_request ct_req; 193 struct zfcp_dbf_san_record_ct_request ct_req;
194 struct zfcp_dbf_san_record_ct_response ct_resp; 194 struct zfcp_dbf_san_record_ct_response ct_resp;
195 struct zfcp_dbf_san_record_els els; 195 struct zfcp_dbf_san_record_els els;
196 } u; 196 } u;
197#define ZFCP_DBF_SAN_MAX_PAYLOAD 1024
198 u8 payload[32];
199} __attribute__ ((packed)); 197} __attribute__ ((packed));
200 198
199#define ZFCP_DBF_SAN_MAX_PAYLOAD 1024
200
201struct zfcp_dbf_scsi_record { 201struct zfcp_dbf_scsi_record {
202 u8 tag[ZFCP_DBF_TAG_SIZE]; 202 u8 tag[ZFCP_DBF_TAG_SIZE];
203 u8 tag2[ZFCP_DBF_TAG_SIZE]; 203 u8 tag2[ZFCP_DBF_TAG_SIZE];
@@ -303,17 +303,31 @@ void zfcp_dbf_scsi(const char *tag, const char *tag2, int level,
303 303
304/** 304/**
305 * zfcp_dbf_scsi_result - trace event for SCSI command completion 305 * zfcp_dbf_scsi_result - trace event for SCSI command completion
306 * @tag: tag indicating success or failure of SCSI command 306 * @dbf: adapter dbf trace
307 * @level: trace level applicable for this event 307 * @scmd: SCSI command pointer
308 * @adapter: adapter that has been used to issue the SCSI command 308 * @req: FSF request used to issue SCSI command
309 */
310static inline
311void zfcp_dbf_scsi_result(struct zfcp_dbf *dbf, struct scsi_cmnd *scmd,
312 struct zfcp_fsf_req *req)
313{
314 if (scmd->result != 0)
315 zfcp_dbf_scsi("rslt", "erro", 3, dbf, scmd, req, 0);
316 else if (scmd->retries > 0)
317 zfcp_dbf_scsi("rslt", "retr", 4, dbf, scmd, req, 0);
318 else
319 zfcp_dbf_scsi("rslt", "norm", 6, dbf, scmd, req, 0);
320}
321
322/**
323 * zfcp_dbf_scsi_fail_send - trace event for failure to send SCSI command
324 * @dbf: adapter dbf trace
309 * @scmd: SCSI command pointer 325 * @scmd: SCSI command pointer
310 * @fsf_req: request used to issue SCSI command (might be NULL)
311 */ 326 */
312static inline 327static inline
313void zfcp_dbf_scsi_result(const char *tag, int level, struct zfcp_dbf *dbf, 328void zfcp_dbf_scsi_fail_send(struct zfcp_dbf *dbf, struct scsi_cmnd *scmd)
314 struct scsi_cmnd *scmd, struct zfcp_fsf_req *fsf_req)
315{ 329{
316 zfcp_dbf_scsi("rslt", tag, level, dbf, scmd, fsf_req, 0); 330 zfcp_dbf_scsi("rslt", "fail", 4, dbf, scmd, NULL, 0);
317} 331}
318 332
319/** 333/**
@@ -343,7 +357,7 @@ static inline
343void zfcp_dbf_scsi_devreset(const char *tag, u8 flag, struct zfcp_unit *unit, 357void zfcp_dbf_scsi_devreset(const char *tag, u8 flag, struct zfcp_unit *unit,
344 struct scsi_cmnd *scsi_cmnd) 358 struct scsi_cmnd *scsi_cmnd)
345{ 359{
346 zfcp_dbf_scsi(flag == FCP_TARGET_RESET ? "trst" : "lrst", tag, 1, 360 zfcp_dbf_scsi(flag == FCP_TMF_TGT_RESET ? "trst" : "lrst", tag, 1,
347 unit->port->adapter->dbf, scsi_cmnd, NULL, 0); 361 unit->port->adapter->dbf, scsi_cmnd, NULL, 0);
348} 362}
349 363
diff --git a/drivers/s390/scsi/zfcp_def.h b/drivers/s390/scsi/zfcp_def.h
index 7da2fad8f515..7131c7db1f04 100644
--- a/drivers/s390/scsi/zfcp_def.h
+++ b/drivers/s390/scsi/zfcp_def.h
@@ -3,7 +3,7 @@
3 * 3 *
4 * Global definitions for the zfcp device driver. 4 * Global definitions for the zfcp device driver.
5 * 5 *
6 * Copyright IBM Corporation 2002, 2009 6 * Copyright IBM Corporation 2002, 2010
7 */ 7 */
8 8
9#ifndef ZFCP_DEF_H 9#ifndef ZFCP_DEF_H
@@ -33,15 +33,13 @@
33#include <scsi/scsi_transport_fc.h> 33#include <scsi/scsi_transport_fc.h>
34#include <scsi/scsi_bsg_fc.h> 34#include <scsi/scsi_bsg_fc.h>
35#include <asm/ccwdev.h> 35#include <asm/ccwdev.h>
36#include <asm/qdio.h>
37#include <asm/debug.h> 36#include <asm/debug.h>
38#include <asm/ebcdic.h> 37#include <asm/ebcdic.h>
39#include <asm/sysinfo.h> 38#include <asm/sysinfo.h>
40#include "zfcp_fsf.h" 39#include "zfcp_fsf.h"
40#include "zfcp_qdio.h"
41 41
42/********************* GENERAL DEFINES *********************************/ 42struct zfcp_reqlist;
43
44#define REQUEST_LIST_SIZE 128
45 43
46/********************* SCSI SPECIFIC DEFINES *********************************/ 44/********************* SCSI SPECIFIC DEFINES *********************************/
47#define ZFCP_SCSI_ER_TIMEOUT (10*HZ) 45#define ZFCP_SCSI_ER_TIMEOUT (10*HZ)
@@ -71,131 +69,6 @@
71/* timeout value for "default timer" for fsf requests */ 69/* timeout value for "default timer" for fsf requests */
72#define ZFCP_FSF_REQUEST_TIMEOUT (60*HZ) 70#define ZFCP_FSF_REQUEST_TIMEOUT (60*HZ)
73 71
74/*************** FIBRE CHANNEL PROTOCOL SPECIFIC DEFINES ********************/
75
76/* task attribute values in FCP-2 FCP_CMND IU */
77#define SIMPLE_Q 0
78#define HEAD_OF_Q 1
79#define ORDERED_Q 2
80#define ACA_Q 4
81#define UNTAGGED 5
82
83/* task management flags in FCP-2 FCP_CMND IU */
84#define FCP_CLEAR_ACA 0x40
85#define FCP_TARGET_RESET 0x20
86#define FCP_LOGICAL_UNIT_RESET 0x10
87#define FCP_CLEAR_TASK_SET 0x04
88#define FCP_ABORT_TASK_SET 0x02
89
90#define FCP_CDB_LENGTH 16
91
92#define ZFCP_DID_MASK 0x00FFFFFF
93
94/* FCP(-2) FCP_CMND IU */
95struct fcp_cmnd_iu {
96 u64 fcp_lun; /* FCP logical unit number */
97 u8 crn; /* command reference number */
98 u8 reserved0:5; /* reserved */
99 u8 task_attribute:3; /* task attribute */
100 u8 task_management_flags; /* task management flags */
101 u8 add_fcp_cdb_length:6; /* additional FCP_CDB length */
102 u8 rddata:1; /* read data */
103 u8 wddata:1; /* write data */
104 u8 fcp_cdb[FCP_CDB_LENGTH];
105} __attribute__((packed));
106
107/* FCP(-2) FCP_RSP IU */
108struct fcp_rsp_iu {
109 u8 reserved0[10];
110 union {
111 struct {
112 u8 reserved1:3;
113 u8 fcp_conf_req:1;
114 u8 fcp_resid_under:1;
115 u8 fcp_resid_over:1;
116 u8 fcp_sns_len_valid:1;
117 u8 fcp_rsp_len_valid:1;
118 } bits;
119 u8 value;
120 } validity;
121 u8 scsi_status;
122 u32 fcp_resid;
123 u32 fcp_sns_len;
124 u32 fcp_rsp_len;
125} __attribute__((packed));
126
127
128#define RSP_CODE_GOOD 0
129#define RSP_CODE_LENGTH_MISMATCH 1
130#define RSP_CODE_FIELD_INVALID 2
131#define RSP_CODE_RO_MISMATCH 3
132#define RSP_CODE_TASKMAN_UNSUPP 4
133#define RSP_CODE_TASKMAN_FAILED 5
134
135/* see fc-fs */
136#define LS_RSCN 0x61
137#define LS_LOGO 0x05
138#define LS_PLOGI 0x03
139
140struct fcp_rscn_head {
141 u8 command;
142 u8 page_length; /* always 0x04 */
143 u16 payload_len;
144} __attribute__((packed));
145
146struct fcp_rscn_element {
147 u8 reserved:2;
148 u8 event_qual:4;
149 u8 addr_format:2;
150 u32 nport_did:24;
151} __attribute__((packed));
152
153/* see fc-ph */
154struct fcp_logo {
155 u32 command;
156 u32 nport_did;
157 u64 nport_wwpn;
158} __attribute__((packed));
159
160/*
161 * FC-FS stuff
162 */
163#define R_A_TOV 10 /* seconds */
164
165#define ZFCP_LS_RLS 0x0f
166#define ZFCP_LS_ADISC 0x52
167#define ZFCP_LS_RPS 0x56
168#define ZFCP_LS_RSCN 0x61
169#define ZFCP_LS_RNID 0x78
170
171struct zfcp_ls_adisc {
172 u8 code;
173 u8 field[3];
174 u32 hard_nport_id;
175 u64 wwpn;
176 u64 wwnn;
177 u32 nport_id;
178} __attribute__ ((packed));
179
180/*
181 * FC-GS-2 stuff
182 */
183#define ZFCP_CT_REVISION 0x01
184#define ZFCP_CT_DIRECTORY_SERVICE 0xFC
185#define ZFCP_CT_NAME_SERVER 0x02
186#define ZFCP_CT_SYNCHRONOUS 0x00
187#define ZFCP_CT_SCSI_FCP 0x08
188#define ZFCP_CT_UNABLE_TO_PERFORM_CMD 0x09
189#define ZFCP_CT_GID_PN 0x0121
190#define ZFCP_CT_GPN_FT 0x0172
191#define ZFCP_CT_ACCEPT 0x8002
192#define ZFCP_CT_REJECT 0x8001
193
194/*
195 * FC-GS-4 stuff
196 */
197#define ZFCP_CT_TIMEOUT (3 * R_A_TOV)
198
199/*************** ADAPTER/PORT/UNIT AND FSF_REQ STATUS FLAGS ******************/ 72/*************** ADAPTER/PORT/UNIT AND FSF_REQ STATUS FLAGS ******************/
200 73
201/* 74/*
@@ -205,7 +78,6 @@ struct zfcp_ls_adisc {
205#define ZFCP_COMMON_FLAGS 0xfff00000 78#define ZFCP_COMMON_FLAGS 0xfff00000
206 79
207/* common status bits */ 80/* common status bits */
208#define ZFCP_STATUS_COMMON_REMOVE 0x80000000
209#define ZFCP_STATUS_COMMON_RUNNING 0x40000000 81#define ZFCP_STATUS_COMMON_RUNNING 0x40000000
210#define ZFCP_STATUS_COMMON_ERP_FAILED 0x20000000 82#define ZFCP_STATUS_COMMON_ERP_FAILED 0x20000000
211#define ZFCP_STATUS_COMMON_UNBLOCKED 0x10000000 83#define ZFCP_STATUS_COMMON_UNBLOCKED 0x10000000
@@ -222,21 +94,10 @@ struct zfcp_ls_adisc {
222#define ZFCP_STATUS_ADAPTER_ERP_PENDING 0x00000100 94#define ZFCP_STATUS_ADAPTER_ERP_PENDING 0x00000100
223#define ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED 0x00000200 95#define ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED 0x00000200
224 96
225/* FC-PH/FC-GS well-known address identifiers for generic services */
226#define ZFCP_DID_WKA 0xFFFFF0
227
228/* remote port status */ 97/* remote port status */
229#define ZFCP_STATUS_PORT_PHYS_OPEN 0x00000001 98#define ZFCP_STATUS_PORT_PHYS_OPEN 0x00000001
230#define ZFCP_STATUS_PORT_LINK_TEST 0x00000002 99#define ZFCP_STATUS_PORT_LINK_TEST 0x00000002
231 100
232/* well known address (WKA) port status*/
233enum zfcp_wka_status {
234 ZFCP_WKA_PORT_OFFLINE,
235 ZFCP_WKA_PORT_CLOSING,
236 ZFCP_WKA_PORT_OPENING,
237 ZFCP_WKA_PORT_ONLINE,
238};
239
240/* logical unit status */ 101/* logical unit status */
241#define ZFCP_STATUS_UNIT_SHARED 0x00000004 102#define ZFCP_STATUS_UNIT_SHARED 0x00000004
242#define ZFCP_STATUS_UNIT_READONLY 0x00000008 103#define ZFCP_STATUS_UNIT_READONLY 0x00000008
@@ -247,10 +108,7 @@ enum zfcp_wka_status {
247#define ZFCP_STATUS_FSFREQ_CLEANUP 0x00000010 108#define ZFCP_STATUS_FSFREQ_CLEANUP 0x00000010
248#define ZFCP_STATUS_FSFREQ_ABORTSUCCEEDED 0x00000040 109#define ZFCP_STATUS_FSFREQ_ABORTSUCCEEDED 0x00000040
249#define ZFCP_STATUS_FSFREQ_ABORTNOTNEEDED 0x00000080 110#define ZFCP_STATUS_FSFREQ_ABORTNOTNEEDED 0x00000080
250#define ZFCP_STATUS_FSFREQ_ABORTED 0x00000100
251#define ZFCP_STATUS_FSFREQ_TMFUNCFAILED 0x00000200 111#define ZFCP_STATUS_FSFREQ_TMFUNCFAILED 0x00000200
252#define ZFCP_STATUS_FSFREQ_TMFUNCNOTSUPP 0x00000400
253#define ZFCP_STATUS_FSFREQ_RETRY 0x00000800
254#define ZFCP_STATUS_FSFREQ_DISMISSED 0x00001000 112#define ZFCP_STATUS_FSFREQ_DISMISSED 0x00001000
255 113
256/************************* STRUCTURE DEFINITIONS *****************************/ 114/************************* STRUCTURE DEFINITIONS *****************************/
@@ -265,131 +123,10 @@ struct zfcp_adapter_mempool {
265 mempool_t *scsi_abort; 123 mempool_t *scsi_abort;
266 mempool_t *status_read_req; 124 mempool_t *status_read_req;
267 mempool_t *status_read_data; 125 mempool_t *status_read_data;
268 mempool_t *gid_pn_data; 126 mempool_t *gid_pn;
269 mempool_t *qtcb_pool; 127 mempool_t *qtcb_pool;
270}; 128};
271 129
272/*
273 * header for CT_IU
274 */
275struct ct_hdr {
276 u8 revision; // 0x01
277 u8 in_id[3]; // 0x00
278 u8 gs_type; // 0xFC Directory Service
279 u8 gs_subtype; // 0x02 Name Server
280 u8 options; // 0x00 single bidirectional exchange
281 u8 reserved0;
282 u16 cmd_rsp_code; // 0x0121 GID_PN, or 0x0100 GA_NXT
283 u16 max_res_size; // <= (4096 - 16) / 4
284 u8 reserved1;
285 u8 reason_code;
286 u8 reason_code_expl;
287 u8 vendor_unique;
288} __attribute__ ((packed));
289
290/* nameserver request CT_IU -- for requests where
291 * a port name is required */
292struct ct_iu_gid_pn_req {
293 struct ct_hdr header;
294 u64 wwpn;
295} __attribute__ ((packed));
296
297/* FS_ACC IU and data unit for GID_PN nameserver request */
298struct ct_iu_gid_pn_resp {
299 struct ct_hdr header;
300 u32 d_id;
301} __attribute__ ((packed));
302
303struct ct_iu_gpn_ft_req {
304 struct ct_hdr header;
305 u8 flags;
306 u8 domain_id_scope;
307 u8 area_id_scope;
308 u8 fc4_type;
309} __attribute__ ((packed));
310
311
312/**
313 * struct zfcp_send_ct - used to pass parameters to function zfcp_fsf_send_ct
314 * @wka_port: port where the request is sent to
315 * @req: scatter-gather list for request
316 * @resp: scatter-gather list for response
317 * @handler: handler function (called for response to the request)
318 * @handler_data: data passed to handler function
319 * @completion: completion for synchronization purposes
320 * @status: used to pass error status to calling function
321 */
322struct zfcp_send_ct {
323 struct zfcp_wka_port *wka_port;
324 struct scatterlist *req;
325 struct scatterlist *resp;
326 void (*handler)(unsigned long);
327 unsigned long handler_data;
328 struct completion *completion;
329 int status;
330};
331
332/* used for name server requests in error recovery */
333struct zfcp_gid_pn_data {
334 struct zfcp_send_ct ct;
335 struct scatterlist req;
336 struct scatterlist resp;
337 struct ct_iu_gid_pn_req ct_iu_req;
338 struct ct_iu_gid_pn_resp ct_iu_resp;
339 struct zfcp_port *port;
340};
341
342/**
343 * struct zfcp_send_els - used to pass parameters to function zfcp_fsf_send_els
344 * @adapter: adapter where request is sent from
345 * @port: port where ELS is destinated (port reference count has to be increased)
346 * @d_id: destiniation id of port where request is sent to
347 * @req: scatter-gather list for request
348 * @resp: scatter-gather list for response
349 * @handler: handler function (called for response to the request)
350 * @handler_data: data passed to handler function
351 * @completion: completion for synchronization purposes
352 * @ls_code: hex code of ELS command
353 * @status: used to pass error status to calling function
354 */
355struct zfcp_send_els {
356 struct zfcp_adapter *adapter;
357 struct zfcp_port *port;
358 u32 d_id;
359 struct scatterlist *req;
360 struct scatterlist *resp;
361 void (*handler)(unsigned long);
362 unsigned long handler_data;
363 struct completion *completion;
364 int ls_code;
365 int status;
366};
367
368struct zfcp_wka_port {
369 struct zfcp_adapter *adapter;
370 wait_queue_head_t completion_wq;
371 enum zfcp_wka_status status;
372 atomic_t refcount;
373 u32 d_id;
374 u32 handle;
375 struct mutex mutex;
376 struct delayed_work work;
377};
378
379struct zfcp_wka_ports {
380 struct zfcp_wka_port ms; /* management service */
381 struct zfcp_wka_port ts; /* time service */
382 struct zfcp_wka_port ds; /* directory service */
383 struct zfcp_wka_port as; /* alias service */
384 struct zfcp_wka_port ks; /* key distribution service */
385};
386
387struct zfcp_qdio_queue {
388 struct qdio_buffer *sbal[QDIO_MAX_BUFFERS_PER_Q];
389 u8 first; /* index of next free bfr in queue */
390 atomic_t count; /* number of free buffers in queue */
391};
392
393struct zfcp_erp_action { 130struct zfcp_erp_action {
394 struct list_head list; 131 struct list_head list;
395 int action; /* requested action code */ 132 int action; /* requested action code */
@@ -398,8 +135,7 @@ struct zfcp_erp_action {
398 struct zfcp_unit *unit; 135 struct zfcp_unit *unit;
399 u32 status; /* recovery status */ 136 u32 status; /* recovery status */
400 u32 step; /* active step of this erp action */ 137 u32 step; /* active step of this erp action */
401 struct zfcp_fsf_req *fsf_req; /* fsf request currently pending 138 unsigned long fsf_req_id;
402 for this action */
403 struct timer_list timer; 139 struct timer_list timer;
404}; 140};
405 141
@@ -422,33 +158,8 @@ struct zfcp_latencies {
422 spinlock_t lock; 158 spinlock_t lock;
423}; 159};
424 160
425/** struct zfcp_qdio - basic QDIO data structure
426 * @resp_q: response queue
427 * @req_q: request queue
428 * @stat_lock: lock to protect req_q_util and req_q_time
429 * @req_q_lock; lock to serialize access to request queue
430 * @req_q_time: time of last fill level change
431 * @req_q_util: used for accounting
432 * @req_q_full: queue full incidents
433 * @req_q_wq: used to wait for SBAL availability
434 * @adapter: adapter used in conjunction with this QDIO structure
435 */
436struct zfcp_qdio {
437 struct zfcp_qdio_queue resp_q;
438 struct zfcp_qdio_queue req_q;
439 spinlock_t stat_lock;
440 spinlock_t req_q_lock;
441 unsigned long long req_q_time;
442 u64 req_q_util;
443 atomic_t req_q_full;
444 wait_queue_head_t req_q_wq;
445 struct zfcp_adapter *adapter;
446};
447
448struct zfcp_adapter { 161struct zfcp_adapter {
449 atomic_t refcount; /* reference count */ 162 struct kref ref;
450 wait_queue_head_t remove_wq; /* can be used to wait for
451 refcount drop to zero */
452 u64 peer_wwnn; /* P2P peer WWNN */ 163 u64 peer_wwnn; /* P2P peer WWNN */
453 u64 peer_wwpn; /* P2P peer WWPN */ 164 u64 peer_wwpn; /* P2P peer WWPN */
454 u32 peer_d_id; /* P2P peer D_ID */ 165 u32 peer_d_id; /* P2P peer D_ID */
@@ -461,10 +172,10 @@ struct zfcp_adapter {
461 u32 hardware_version; /* of FCP channel */ 172 u32 hardware_version; /* of FCP channel */
462 u16 timer_ticks; /* time int for a tick */ 173 u16 timer_ticks; /* time int for a tick */
463 struct Scsi_Host *scsi_host; /* Pointer to mid-layer */ 174 struct Scsi_Host *scsi_host; /* Pointer to mid-layer */
464 struct list_head port_list_head; /* remote port list */ 175 struct list_head port_list; /* remote port list */
176 rwlock_t port_list_lock; /* port list lock */
465 unsigned long req_no; /* unique FSF req number */ 177 unsigned long req_no; /* unique FSF req number */
466 struct list_head *req_list; /* list of pending reqs */ 178 struct zfcp_reqlist *req_list;
467 spinlock_t req_list_lock; /* request list lock */
468 u32 fsf_req_seq_no; /* FSF cmnd seq number */ 179 u32 fsf_req_seq_no; /* FSF cmnd seq number */
469 rwlock_t abort_lock; /* Protects against SCSI 180 rwlock_t abort_lock; /* Protects against SCSI
470 stack abort/command 181 stack abort/command
@@ -485,7 +196,7 @@ struct zfcp_adapter {
485 u32 erp_low_mem_count; /* nr of erp actions waiting 196 u32 erp_low_mem_count; /* nr of erp actions waiting
486 for memory */ 197 for memory */
487 struct task_struct *erp_thread; 198 struct task_struct *erp_thread;
488 struct zfcp_wka_ports *gs; /* generic services */ 199 struct zfcp_fc_wka_ports *gs; /* generic services */
489 struct zfcp_dbf *dbf; /* debug traces */ 200 struct zfcp_dbf *dbf; /* debug traces */
490 struct zfcp_adapter_mempool pool; /* Adapter memory pools */ 201 struct zfcp_adapter_mempool pool; /* Adapter memory pools */
491 struct fc_host_statistics *fc_stats; 202 struct fc_host_statistics *fc_stats;
@@ -497,14 +208,12 @@ struct zfcp_adapter {
497}; 208};
498 209
499struct zfcp_port { 210struct zfcp_port {
500 struct device sysfs_device; /* sysfs device */ 211 struct device dev;
501 struct fc_rport *rport; /* rport of fc transport class */ 212 struct fc_rport *rport; /* rport of fc transport class */
502 struct list_head list; /* list of remote ports */ 213 struct list_head list; /* list of remote ports */
503 atomic_t refcount; /* reference count */
504 wait_queue_head_t remove_wq; /* can be used to wait for
505 refcount drop to zero */
506 struct zfcp_adapter *adapter; /* adapter used to access port */ 214 struct zfcp_adapter *adapter; /* adapter used to access port */
507 struct list_head unit_list_head; /* head of logical unit list */ 215 struct list_head unit_list; /* head of logical unit list */
216 rwlock_t unit_list_lock; /* unit list lock */
508 atomic_t status; /* status of this remote port */ 217 atomic_t status; /* status of this remote port */
509 u64 wwnn; /* WWNN if known */ 218 u64 wwnn; /* WWNN if known */
510 u64 wwpn; /* WWPN */ 219 u64 wwpn; /* WWPN */
@@ -521,11 +230,8 @@ struct zfcp_port {
521}; 230};
522 231
523struct zfcp_unit { 232struct zfcp_unit {
524 struct device sysfs_device; /* sysfs device */ 233 struct device dev;
525 struct list_head list; /* list of logical units */ 234 struct list_head list; /* list of logical units */
526 atomic_t refcount; /* reference count */
527 wait_queue_head_t remove_wq; /* can be used to wait for
528 refcount drop to zero */
529 struct zfcp_port *port; /* remote port of unit */ 235 struct zfcp_port *port; /* remote port of unit */
530 atomic_t status; /* status of this logical unit */ 236 atomic_t status; /* status of this logical unit */
531 u64 fcp_lun; /* own FCP_LUN */ 237 u64 fcp_lun; /* own FCP_LUN */
@@ -538,33 +244,11 @@ struct zfcp_unit {
538}; 244};
539 245
540/** 246/**
541 * struct zfcp_queue_req - queue related values for a request
542 * @sbal_number: number of free SBALs
543 * @sbal_first: first SBAL for this request
544 * @sbal_last: last SBAL for this request
545 * @sbal_limit: last possible SBAL for this request
546 * @sbale_curr: current SBALE at creation of this request
547 * @sbal_response: SBAL used in interrupt
548 * @qdio_outb_usage: usage of outbound queue
549 * @qdio_inb_usage: usage of inbound queue
550 */
551struct zfcp_queue_req {
552 u8 sbal_number;
553 u8 sbal_first;
554 u8 sbal_last;
555 u8 sbal_limit;
556 u8 sbale_curr;
557 u8 sbal_response;
558 u16 qdio_outb_usage;
559 u16 qdio_inb_usage;
560};
561
562/**
563 * struct zfcp_fsf_req - basic FSF request structure 247 * struct zfcp_fsf_req - basic FSF request structure
564 * @list: list of FSF requests 248 * @list: list of FSF requests
565 * @req_id: unique request ID 249 * @req_id: unique request ID
566 * @adapter: adapter this request belongs to 250 * @adapter: adapter this request belongs to
567 * @queue_req: queue related values 251 * @qdio_req: qdio queue related values
568 * @completion: used to signal the completion of the request 252 * @completion: used to signal the completion of the request
569 * @status: status of the request 253 * @status: status of the request
570 * @fsf_command: FSF command issued 254 * @fsf_command: FSF command issued
@@ -582,7 +266,7 @@ struct zfcp_fsf_req {
582 struct list_head list; 266 struct list_head list;
583 unsigned long req_id; 267 unsigned long req_id;
584 struct zfcp_adapter *adapter; 268 struct zfcp_adapter *adapter;
585 struct zfcp_queue_req queue_req; 269 struct zfcp_qdio_req qdio_req;
586 struct completion completion; 270 struct completion completion;
587 u32 status; 271 u32 status;
588 u32 fsf_command; 272 u32 fsf_command;
@@ -601,14 +285,11 @@ struct zfcp_fsf_req {
601struct zfcp_data { 285struct zfcp_data {
602 struct scsi_host_template scsi_host_template; 286 struct scsi_host_template scsi_host_template;
603 struct scsi_transport_template *scsi_transport_template; 287 struct scsi_transport_template *scsi_transport_template;
604 rwlock_t config_lock; /* serialises changes
605 to adapter/port/unit
606 lists */
607 struct mutex config_mutex;
608 struct kmem_cache *gpn_ft_cache; 288 struct kmem_cache *gpn_ft_cache;
609 struct kmem_cache *qtcb_cache; 289 struct kmem_cache *qtcb_cache;
610 struct kmem_cache *sr_buffer_cache; 290 struct kmem_cache *sr_buffer_cache;
611 struct kmem_cache *gid_pn_cache; 291 struct kmem_cache *gid_pn_cache;
292 struct kmem_cache *adisc_cache;
612}; 293};
613 294
614/********************** ZFCP SPECIFIC DEFINES ********************************/ 295/********************** ZFCP SPECIFIC DEFINES ********************************/
@@ -616,88 +297,4 @@ struct zfcp_data {
616#define ZFCP_SET 0x00000100 297#define ZFCP_SET 0x00000100
617#define ZFCP_CLEAR 0x00000200 298#define ZFCP_CLEAR 0x00000200
618 299
619/*
620 * Helper functions for request ID management.
621 */
622static inline int zfcp_reqlist_hash(unsigned long req_id)
623{
624 return req_id % REQUEST_LIST_SIZE;
625}
626
627static inline void zfcp_reqlist_remove(struct zfcp_adapter *adapter,
628 struct zfcp_fsf_req *fsf_req)
629{
630 list_del(&fsf_req->list);
631}
632
633static inline struct zfcp_fsf_req *
634zfcp_reqlist_find(struct zfcp_adapter *adapter, unsigned long req_id)
635{
636 struct zfcp_fsf_req *request;
637 unsigned int idx;
638
639 idx = zfcp_reqlist_hash(req_id);
640 list_for_each_entry(request, &adapter->req_list[idx], list)
641 if (request->req_id == req_id)
642 return request;
643 return NULL;
644}
645
646static inline struct zfcp_fsf_req *
647zfcp_reqlist_find_safe(struct zfcp_adapter *adapter, struct zfcp_fsf_req *req)
648{
649 struct zfcp_fsf_req *request;
650 unsigned int idx;
651
652 for (idx = 0; idx < REQUEST_LIST_SIZE; idx++) {
653 list_for_each_entry(request, &adapter->req_list[idx], list)
654 if (request == req)
655 return request;
656 }
657 return NULL;
658}
659
660/*
661 * functions needed for reference/usage counting
662 */
663
664static inline void
665zfcp_unit_get(struct zfcp_unit *unit)
666{
667 atomic_inc(&unit->refcount);
668}
669
670static inline void
671zfcp_unit_put(struct zfcp_unit *unit)
672{
673 if (atomic_dec_return(&unit->refcount) == 0)
674 wake_up(&unit->remove_wq);
675}
676
677static inline void
678zfcp_port_get(struct zfcp_port *port)
679{
680 atomic_inc(&port->refcount);
681}
682
683static inline void
684zfcp_port_put(struct zfcp_port *port)
685{
686 if (atomic_dec_return(&port->refcount) == 0)
687 wake_up(&port->remove_wq);
688}
689
690static inline void
691zfcp_adapter_get(struct zfcp_adapter *adapter)
692{
693 atomic_inc(&adapter->refcount);
694}
695
696static inline void
697zfcp_adapter_put(struct zfcp_adapter *adapter)
698{
699 if (atomic_dec_return(&adapter->refcount) == 0)
700 wake_up(&adapter->remove_wq);
701}
702
703#endif /* ZFCP_DEF_H */ 300#endif /* ZFCP_DEF_H */
diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c
index f73e2180f333..0be5e7ea2828 100644
--- a/drivers/s390/scsi/zfcp_erp.c
+++ b/drivers/s390/scsi/zfcp_erp.c
@@ -3,7 +3,7 @@
3 * 3 *
4 * Error Recovery Procedures (ERP). 4 * Error Recovery Procedures (ERP).
5 * 5 *
6 * Copyright IBM Corporation 2002, 2009 6 * Copyright IBM Corporation 2002, 2010
7 */ 7 */
8 8
9#define KMSG_COMPONENT "zfcp" 9#define KMSG_COMPONENT "zfcp"
@@ -11,6 +11,7 @@
11 11
12#include <linux/kthread.h> 12#include <linux/kthread.h>
13#include "zfcp_ext.h" 13#include "zfcp_ext.h"
14#include "zfcp_reqlist.h"
14 15
15#define ZFCP_MAX_ERPS 3 16#define ZFCP_MAX_ERPS 3
16 17
@@ -99,9 +100,12 @@ static void zfcp_erp_action_dismiss_port(struct zfcp_port *port)
99 100
100 if (atomic_read(&port->status) & ZFCP_STATUS_COMMON_ERP_INUSE) 101 if (atomic_read(&port->status) & ZFCP_STATUS_COMMON_ERP_INUSE)
101 zfcp_erp_action_dismiss(&port->erp_action); 102 zfcp_erp_action_dismiss(&port->erp_action);
102 else 103 else {
103 list_for_each_entry(unit, &port->unit_list_head, list) 104 read_lock(&port->unit_list_lock);
104 zfcp_erp_action_dismiss_unit(unit); 105 list_for_each_entry(unit, &port->unit_list, list)
106 zfcp_erp_action_dismiss_unit(unit);
107 read_unlock(&port->unit_list_lock);
108 }
105} 109}
106 110
107static void zfcp_erp_action_dismiss_adapter(struct zfcp_adapter *adapter) 111static void zfcp_erp_action_dismiss_adapter(struct zfcp_adapter *adapter)
@@ -110,9 +114,12 @@ static void zfcp_erp_action_dismiss_adapter(struct zfcp_adapter *adapter)
110 114
111 if (atomic_read(&adapter->status) & ZFCP_STATUS_COMMON_ERP_INUSE) 115 if (atomic_read(&adapter->status) & ZFCP_STATUS_COMMON_ERP_INUSE)
112 zfcp_erp_action_dismiss(&adapter->erp_action); 116 zfcp_erp_action_dismiss(&adapter->erp_action);
113 else 117 else {
114 list_for_each_entry(port, &adapter->port_list_head, list) 118 read_lock(&adapter->port_list_lock);
119 list_for_each_entry(port, &adapter->port_list, list)
115 zfcp_erp_action_dismiss_port(port); 120 zfcp_erp_action_dismiss_port(port);
121 read_unlock(&adapter->port_list_lock);
122 }
116} 123}
117 124
118static int zfcp_erp_required_act(int want, struct zfcp_adapter *adapter, 125static int zfcp_erp_required_act(int want, struct zfcp_adapter *adapter,
@@ -168,7 +175,8 @@ static struct zfcp_erp_action *zfcp_erp_setup_act(int need,
168 175
169 switch (need) { 176 switch (need) {
170 case ZFCP_ERP_ACTION_REOPEN_UNIT: 177 case ZFCP_ERP_ACTION_REOPEN_UNIT:
171 zfcp_unit_get(unit); 178 if (!get_device(&unit->dev))
179 return NULL;
172 atomic_set_mask(ZFCP_STATUS_COMMON_ERP_INUSE, &unit->status); 180 atomic_set_mask(ZFCP_STATUS_COMMON_ERP_INUSE, &unit->status);
173 erp_action = &unit->erp_action; 181 erp_action = &unit->erp_action;
174 if (!(atomic_read(&unit->status) & ZFCP_STATUS_COMMON_RUNNING)) 182 if (!(atomic_read(&unit->status) & ZFCP_STATUS_COMMON_RUNNING))
@@ -177,7 +185,8 @@ static struct zfcp_erp_action *zfcp_erp_setup_act(int need,
177 185
178 case ZFCP_ERP_ACTION_REOPEN_PORT: 186 case ZFCP_ERP_ACTION_REOPEN_PORT:
179 case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED: 187 case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED:
180 zfcp_port_get(port); 188 if (!get_device(&port->dev))
189 return NULL;
181 zfcp_erp_action_dismiss_port(port); 190 zfcp_erp_action_dismiss_port(port);
182 atomic_set_mask(ZFCP_STATUS_COMMON_ERP_INUSE, &port->status); 191 atomic_set_mask(ZFCP_STATUS_COMMON_ERP_INUSE, &port->status);
183 erp_action = &port->erp_action; 192 erp_action = &port->erp_action;
@@ -186,7 +195,7 @@ static struct zfcp_erp_action *zfcp_erp_setup_act(int need,
186 break; 195 break;
187 196
188 case ZFCP_ERP_ACTION_REOPEN_ADAPTER: 197 case ZFCP_ERP_ACTION_REOPEN_ADAPTER:
189 zfcp_adapter_get(adapter); 198 kref_get(&adapter->ref);
190 zfcp_erp_action_dismiss_adapter(adapter); 199 zfcp_erp_action_dismiss_adapter(adapter);
191 atomic_set_mask(ZFCP_STATUS_COMMON_ERP_INUSE, &adapter->status); 200 atomic_set_mask(ZFCP_STATUS_COMMON_ERP_INUSE, &adapter->status);
192 erp_action = &adapter->erp_action; 201 erp_action = &adapter->erp_action;
@@ -264,11 +273,16 @@ void zfcp_erp_adapter_reopen(struct zfcp_adapter *adapter, int clear,
264{ 273{
265 unsigned long flags; 274 unsigned long flags;
266 275
267 read_lock_irqsave(&zfcp_data.config_lock, flags); 276 zfcp_erp_adapter_block(adapter, clear);
268 write_lock(&adapter->erp_lock); 277 zfcp_scsi_schedule_rports_block(adapter);
269 _zfcp_erp_adapter_reopen(adapter, clear, id, ref); 278
270 write_unlock(&adapter->erp_lock); 279 write_lock_irqsave(&adapter->erp_lock, flags);
271 read_unlock_irqrestore(&zfcp_data.config_lock, flags); 280 if (atomic_read(&adapter->status) & ZFCP_STATUS_COMMON_ERP_FAILED)
281 zfcp_erp_adapter_failed(adapter, "erareo1", NULL);
282 else
283 zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_ADAPTER, adapter,
284 NULL, NULL, id, ref);
285 write_unlock_irqrestore(&adapter->erp_lock, flags);
272} 286}
273 287
274/** 288/**
@@ -345,11 +359,9 @@ void zfcp_erp_port_forced_reopen(struct zfcp_port *port, int clear, char *id,
345 unsigned long flags; 359 unsigned long flags;
346 struct zfcp_adapter *adapter = port->adapter; 360 struct zfcp_adapter *adapter = port->adapter;
347 361
348 read_lock_irqsave(&zfcp_data.config_lock, flags); 362 write_lock_irqsave(&adapter->erp_lock, flags);
349 write_lock(&adapter->erp_lock);
350 _zfcp_erp_port_forced_reopen(port, clear, id, ref); 363 _zfcp_erp_port_forced_reopen(port, clear, id, ref);
351 write_unlock(&adapter->erp_lock); 364 write_unlock_irqrestore(&adapter->erp_lock, flags);
352 read_unlock_irqrestore(&zfcp_data.config_lock, flags);
353} 365}
354 366
355static int _zfcp_erp_port_reopen(struct zfcp_port *port, int clear, char *id, 367static int _zfcp_erp_port_reopen(struct zfcp_port *port, int clear, char *id,
@@ -377,15 +389,13 @@ static int _zfcp_erp_port_reopen(struct zfcp_port *port, int clear, char *id,
377 */ 389 */
378int zfcp_erp_port_reopen(struct zfcp_port *port, int clear, char *id, void *ref) 390int zfcp_erp_port_reopen(struct zfcp_port *port, int clear, char *id, void *ref)
379{ 391{
380 unsigned long flags;
381 int retval; 392 int retval;
393 unsigned long flags;
382 struct zfcp_adapter *adapter = port->adapter; 394 struct zfcp_adapter *adapter = port->adapter;
383 395
384 read_lock_irqsave(&zfcp_data.config_lock, flags); 396 write_lock_irqsave(&adapter->erp_lock, flags);
385 write_lock(&adapter->erp_lock);
386 retval = _zfcp_erp_port_reopen(port, clear, id, ref); 397 retval = _zfcp_erp_port_reopen(port, clear, id, ref);
387 write_unlock(&adapter->erp_lock); 398 write_unlock_irqrestore(&adapter->erp_lock, flags);
388 read_unlock_irqrestore(&zfcp_data.config_lock, flags);
389 399
390 return retval; 400 return retval;
391} 401}
@@ -424,11 +434,9 @@ void zfcp_erp_unit_reopen(struct zfcp_unit *unit, int clear, char *id,
424 struct zfcp_port *port = unit->port; 434 struct zfcp_port *port = unit->port;
425 struct zfcp_adapter *adapter = port->adapter; 435 struct zfcp_adapter *adapter = port->adapter;
426 436
427 read_lock_irqsave(&zfcp_data.config_lock, flags); 437 write_lock_irqsave(&adapter->erp_lock, flags);
428 write_lock(&adapter->erp_lock);
429 _zfcp_erp_unit_reopen(unit, clear, id, ref); 438 _zfcp_erp_unit_reopen(unit, clear, id, ref);
430 write_unlock(&adapter->erp_lock); 439 write_unlock_irqrestore(&adapter->erp_lock, flags);
431 read_unlock_irqrestore(&zfcp_data.config_lock, flags);
432} 440}
433 441
434static int status_change_set(unsigned long mask, atomic_t *status) 442static int status_change_set(unsigned long mask, atomic_t *status)
@@ -471,26 +479,27 @@ static void zfcp_erp_action_to_running(struct zfcp_erp_action *erp_action)
471static void zfcp_erp_strategy_check_fsfreq(struct zfcp_erp_action *act) 479static void zfcp_erp_strategy_check_fsfreq(struct zfcp_erp_action *act)
472{ 480{
473 struct zfcp_adapter *adapter = act->adapter; 481 struct zfcp_adapter *adapter = act->adapter;
482 struct zfcp_fsf_req *req;
474 483
475 if (!act->fsf_req) 484 if (!act->fsf_req_id)
476 return; 485 return;
477 486
478 spin_lock(&adapter->req_list_lock); 487 spin_lock(&adapter->req_list->lock);
479 if (zfcp_reqlist_find_safe(adapter, act->fsf_req) && 488 req = _zfcp_reqlist_find(adapter->req_list, act->fsf_req_id);
480 act->fsf_req->erp_action == act) { 489 if (req && req->erp_action == act) {
481 if (act->status & (ZFCP_STATUS_ERP_DISMISSED | 490 if (act->status & (ZFCP_STATUS_ERP_DISMISSED |
482 ZFCP_STATUS_ERP_TIMEDOUT)) { 491 ZFCP_STATUS_ERP_TIMEDOUT)) {
483 act->fsf_req->status |= ZFCP_STATUS_FSFREQ_DISMISSED; 492 req->status |= ZFCP_STATUS_FSFREQ_DISMISSED;
484 zfcp_dbf_rec_action("erscf_1", act); 493 zfcp_dbf_rec_action("erscf_1", act);
485 act->fsf_req->erp_action = NULL; 494 req->erp_action = NULL;
486 } 495 }
487 if (act->status & ZFCP_STATUS_ERP_TIMEDOUT) 496 if (act->status & ZFCP_STATUS_ERP_TIMEDOUT)
488 zfcp_dbf_rec_action("erscf_2", act); 497 zfcp_dbf_rec_action("erscf_2", act);
489 if (act->fsf_req->status & ZFCP_STATUS_FSFREQ_DISMISSED) 498 if (req->status & ZFCP_STATUS_FSFREQ_DISMISSED)
490 act->fsf_req = NULL; 499 act->fsf_req_id = 0;
491 } else 500 } else
492 act->fsf_req = NULL; 501 act->fsf_req_id = 0;
493 spin_unlock(&adapter->req_list_lock); 502 spin_unlock(&adapter->req_list->lock);
494} 503}
495 504
496/** 505/**
@@ -540,8 +549,10 @@ static void _zfcp_erp_port_reopen_all(struct zfcp_adapter *adapter,
540{ 549{
541 struct zfcp_port *port; 550 struct zfcp_port *port;
542 551
543 list_for_each_entry(port, &adapter->port_list_head, list) 552 read_lock(&adapter->port_list_lock);
553 list_for_each_entry(port, &adapter->port_list, list)
544 _zfcp_erp_port_reopen(port, clear, id, ref); 554 _zfcp_erp_port_reopen(port, clear, id, ref);
555 read_unlock(&adapter->port_list_lock);
545} 556}
546 557
547static void _zfcp_erp_unit_reopen_all(struct zfcp_port *port, int clear, 558static void _zfcp_erp_unit_reopen_all(struct zfcp_port *port, int clear,
@@ -549,8 +560,10 @@ static void _zfcp_erp_unit_reopen_all(struct zfcp_port *port, int clear,
549{ 560{
550 struct zfcp_unit *unit; 561 struct zfcp_unit *unit;
551 562
552 list_for_each_entry(unit, &port->unit_list_head, list) 563 read_lock(&port->unit_list_lock);
564 list_for_each_entry(unit, &port->unit_list, list)
553 _zfcp_erp_unit_reopen(unit, clear, id, ref); 565 _zfcp_erp_unit_reopen(unit, clear, id, ref);
566 read_unlock(&port->unit_list_lock);
554} 567}
555 568
556static void zfcp_erp_strategy_followup_failed(struct zfcp_erp_action *act) 569static void zfcp_erp_strategy_followup_failed(struct zfcp_erp_action *act)
@@ -590,16 +603,14 @@ static void zfcp_erp_wakeup(struct zfcp_adapter *adapter)
590{ 603{
591 unsigned long flags; 604 unsigned long flags;
592 605
593 read_lock_irqsave(&zfcp_data.config_lock, flags); 606 read_lock_irqsave(&adapter->erp_lock, flags);
594 read_lock(&adapter->erp_lock);
595 if (list_empty(&adapter->erp_ready_head) && 607 if (list_empty(&adapter->erp_ready_head) &&
596 list_empty(&adapter->erp_running_head)) { 608 list_empty(&adapter->erp_running_head)) {
597 atomic_clear_mask(ZFCP_STATUS_ADAPTER_ERP_PENDING, 609 atomic_clear_mask(ZFCP_STATUS_ADAPTER_ERP_PENDING,
598 &adapter->status); 610 &adapter->status);
599 wake_up(&adapter->erp_done_wqh); 611 wake_up(&adapter->erp_done_wqh);
600 } 612 }
601 read_unlock(&adapter->erp_lock); 613 read_unlock_irqrestore(&adapter->erp_lock, flags);
602 read_unlock_irqrestore(&zfcp_data.config_lock, flags);
603} 614}
604 615
605static int zfcp_erp_adapter_strategy_open_qdio(struct zfcp_erp_action *act) 616static int zfcp_erp_adapter_strategy_open_qdio(struct zfcp_erp_action *act)
@@ -1170,28 +1181,28 @@ static void zfcp_erp_action_cleanup(struct zfcp_erp_action *act, int result)
1170 switch (act->action) { 1181 switch (act->action) {
1171 case ZFCP_ERP_ACTION_REOPEN_UNIT: 1182 case ZFCP_ERP_ACTION_REOPEN_UNIT:
1172 if ((result == ZFCP_ERP_SUCCEEDED) && !unit->device) { 1183 if ((result == ZFCP_ERP_SUCCEEDED) && !unit->device) {
1173 zfcp_unit_get(unit); 1184 get_device(&unit->dev);
1174 if (scsi_queue_work(unit->port->adapter->scsi_host, 1185 if (scsi_queue_work(unit->port->adapter->scsi_host,
1175 &unit->scsi_work) <= 0) 1186 &unit->scsi_work) <= 0)
1176 zfcp_unit_put(unit); 1187 put_device(&unit->dev);
1177 } 1188 }
1178 zfcp_unit_put(unit); 1189 put_device(&unit->dev);
1179 break; 1190 break;
1180 1191
1181 case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED: 1192 case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED:
1182 case ZFCP_ERP_ACTION_REOPEN_PORT: 1193 case ZFCP_ERP_ACTION_REOPEN_PORT:
1183 if (result == ZFCP_ERP_SUCCEEDED) 1194 if (result == ZFCP_ERP_SUCCEEDED)
1184 zfcp_scsi_schedule_rport_register(port); 1195 zfcp_scsi_schedule_rport_register(port);
1185 zfcp_port_put(port); 1196 put_device(&port->dev);
1186 break; 1197 break;
1187 1198
1188 case ZFCP_ERP_ACTION_REOPEN_ADAPTER: 1199 case ZFCP_ERP_ACTION_REOPEN_ADAPTER:
1189 if (result == ZFCP_ERP_SUCCEEDED) { 1200 if (result == ZFCP_ERP_SUCCEEDED) {
1190 register_service_level(&adapter->service_level); 1201 register_service_level(&adapter->service_level);
1191 schedule_work(&adapter->scan_work); 1202 queue_work(adapter->work_queue, &adapter->scan_work);
1192 } else 1203 } else
1193 unregister_service_level(&adapter->service_level); 1204 unregister_service_level(&adapter->service_level);
1194 zfcp_adapter_put(adapter); 1205 kref_put(&adapter->ref, zfcp_adapter_release);
1195 break; 1206 break;
1196 } 1207 }
1197} 1208}
@@ -1214,12 +1225,12 @@ static int zfcp_erp_strategy_do_action(struct zfcp_erp_action *erp_action)
1214static int zfcp_erp_strategy(struct zfcp_erp_action *erp_action) 1225static int zfcp_erp_strategy(struct zfcp_erp_action *erp_action)
1215{ 1226{
1216 int retval; 1227 int retval;
1217 struct zfcp_adapter *adapter = erp_action->adapter;
1218 unsigned long flags; 1228 unsigned long flags;
1229 struct zfcp_adapter *adapter = erp_action->adapter;
1219 1230
1220 read_lock_irqsave(&zfcp_data.config_lock, flags); 1231 kref_get(&adapter->ref);
1221 write_lock(&adapter->erp_lock);
1222 1232
1233 write_lock_irqsave(&adapter->erp_lock, flags);
1223 zfcp_erp_strategy_check_fsfreq(erp_action); 1234 zfcp_erp_strategy_check_fsfreq(erp_action);
1224 1235
1225 if (erp_action->status & ZFCP_STATUS_ERP_DISMISSED) { 1236 if (erp_action->status & ZFCP_STATUS_ERP_DISMISSED) {
@@ -1231,11 +1242,9 @@ static int zfcp_erp_strategy(struct zfcp_erp_action *erp_action)
1231 zfcp_erp_action_to_running(erp_action); 1242 zfcp_erp_action_to_running(erp_action);
1232 1243
1233 /* no lock to allow for blocking operations */ 1244 /* no lock to allow for blocking operations */
1234 write_unlock(&adapter->erp_lock); 1245 write_unlock_irqrestore(&adapter->erp_lock, flags);
1235 read_unlock_irqrestore(&zfcp_data.config_lock, flags);
1236 retval = zfcp_erp_strategy_do_action(erp_action); 1246 retval = zfcp_erp_strategy_do_action(erp_action);
1237 read_lock_irqsave(&zfcp_data.config_lock, flags); 1247 write_lock_irqsave(&adapter->erp_lock, flags);
1238 write_lock(&adapter->erp_lock);
1239 1248
1240 if (erp_action->status & ZFCP_STATUS_ERP_DISMISSED) 1249 if (erp_action->status & ZFCP_STATUS_ERP_DISMISSED)
1241 retval = ZFCP_ERP_CONTINUES; 1250 retval = ZFCP_ERP_CONTINUES;
@@ -1273,12 +1282,12 @@ static int zfcp_erp_strategy(struct zfcp_erp_action *erp_action)
1273 zfcp_erp_strategy_followup_failed(erp_action); 1282 zfcp_erp_strategy_followup_failed(erp_action);
1274 1283
1275 unlock: 1284 unlock:
1276 write_unlock(&adapter->erp_lock); 1285 write_unlock_irqrestore(&adapter->erp_lock, flags);
1277 read_unlock_irqrestore(&zfcp_data.config_lock, flags);
1278 1286
1279 if (retval != ZFCP_ERP_CONTINUES) 1287 if (retval != ZFCP_ERP_CONTINUES)
1280 zfcp_erp_action_cleanup(erp_action, retval); 1288 zfcp_erp_action_cleanup(erp_action, retval);
1281 1289
1290 kref_put(&adapter->ref, zfcp_adapter_release);
1282 return retval; 1291 return retval;
1283} 1292}
1284 1293
@@ -1415,6 +1424,7 @@ void zfcp_erp_modify_adapter_status(struct zfcp_adapter *adapter, char *id,
1415 void *ref, u32 mask, int set_or_clear) 1424 void *ref, u32 mask, int set_or_clear)
1416{ 1425{
1417 struct zfcp_port *port; 1426 struct zfcp_port *port;
1427 unsigned long flags;
1418 u32 common_mask = mask & ZFCP_COMMON_FLAGS; 1428 u32 common_mask = mask & ZFCP_COMMON_FLAGS;
1419 1429
1420 if (set_or_clear == ZFCP_SET) { 1430 if (set_or_clear == ZFCP_SET) {
@@ -1429,10 +1439,13 @@ void zfcp_erp_modify_adapter_status(struct zfcp_adapter *adapter, char *id,
1429 atomic_set(&adapter->erp_counter, 0); 1439 atomic_set(&adapter->erp_counter, 0);
1430 } 1440 }
1431 1441
1432 if (common_mask) 1442 if (common_mask) {
1433 list_for_each_entry(port, &adapter->port_list_head, list) 1443 read_lock_irqsave(&adapter->port_list_lock, flags);
1444 list_for_each_entry(port, &adapter->port_list, list)
1434 zfcp_erp_modify_port_status(port, id, ref, common_mask, 1445 zfcp_erp_modify_port_status(port, id, ref, common_mask,
1435 set_or_clear); 1446 set_or_clear);
1447 read_unlock_irqrestore(&adapter->port_list_lock, flags);
1448 }
1436} 1449}
1437 1450
1438/** 1451/**
@@ -1449,6 +1462,7 @@ void zfcp_erp_modify_port_status(struct zfcp_port *port, char *id, void *ref,
1449 u32 mask, int set_or_clear) 1462 u32 mask, int set_or_clear)
1450{ 1463{
1451 struct zfcp_unit *unit; 1464 struct zfcp_unit *unit;
1465 unsigned long flags;
1452 u32 common_mask = mask & ZFCP_COMMON_FLAGS; 1466 u32 common_mask = mask & ZFCP_COMMON_FLAGS;
1453 1467
1454 if (set_or_clear == ZFCP_SET) { 1468 if (set_or_clear == ZFCP_SET) {
@@ -1463,10 +1477,13 @@ void zfcp_erp_modify_port_status(struct zfcp_port *port, char *id, void *ref,
1463 atomic_set(&port->erp_counter, 0); 1477 atomic_set(&port->erp_counter, 0);
1464 } 1478 }
1465 1479
1466 if (common_mask) 1480 if (common_mask) {
1467 list_for_each_entry(unit, &port->unit_list_head, list) 1481 read_lock_irqsave(&port->unit_list_lock, flags);
1482 list_for_each_entry(unit, &port->unit_list, list)
1468 zfcp_erp_modify_unit_status(unit, id, ref, common_mask, 1483 zfcp_erp_modify_unit_status(unit, id, ref, common_mask,
1469 set_or_clear); 1484 set_or_clear);
1485 read_unlock_irqrestore(&port->unit_list_lock, flags);
1486 }
1470} 1487}
1471 1488
1472/** 1489/**
@@ -1502,12 +1519,8 @@ void zfcp_erp_modify_unit_status(struct zfcp_unit *unit, char *id, void *ref,
1502 */ 1519 */
1503void zfcp_erp_port_boxed(struct zfcp_port *port, char *id, void *ref) 1520void zfcp_erp_port_boxed(struct zfcp_port *port, char *id, void *ref)
1504{ 1521{
1505 unsigned long flags;
1506
1507 read_lock_irqsave(&zfcp_data.config_lock, flags);
1508 zfcp_erp_modify_port_status(port, id, ref, 1522 zfcp_erp_modify_port_status(port, id, ref,
1509 ZFCP_STATUS_COMMON_ACCESS_BOXED, ZFCP_SET); 1523 ZFCP_STATUS_COMMON_ACCESS_BOXED, ZFCP_SET);
1510 read_unlock_irqrestore(&zfcp_data.config_lock, flags);
1511 zfcp_erp_port_reopen(port, ZFCP_STATUS_COMMON_ERP_FAILED, id, ref); 1524 zfcp_erp_port_reopen(port, ZFCP_STATUS_COMMON_ERP_FAILED, id, ref);
1512} 1525}
1513 1526
@@ -1535,13 +1548,9 @@ void zfcp_erp_unit_boxed(struct zfcp_unit *unit, char *id, void *ref)
1535 */ 1548 */
1536void zfcp_erp_port_access_denied(struct zfcp_port *port, char *id, void *ref) 1549void zfcp_erp_port_access_denied(struct zfcp_port *port, char *id, void *ref)
1537{ 1550{
1538 unsigned long flags;
1539
1540 read_lock_irqsave(&zfcp_data.config_lock, flags);
1541 zfcp_erp_modify_port_status(port, id, ref, 1551 zfcp_erp_modify_port_status(port, id, ref,
1542 ZFCP_STATUS_COMMON_ERP_FAILED | 1552 ZFCP_STATUS_COMMON_ERP_FAILED |
1543 ZFCP_STATUS_COMMON_ACCESS_DENIED, ZFCP_SET); 1553 ZFCP_STATUS_COMMON_ACCESS_DENIED, ZFCP_SET);
1544 read_unlock_irqrestore(&zfcp_data.config_lock, flags);
1545} 1554}
1546 1555
1547/** 1556/**
@@ -1574,12 +1583,15 @@ static void zfcp_erp_port_access_changed(struct zfcp_port *port, char *id,
1574 void *ref) 1583 void *ref)
1575{ 1584{
1576 struct zfcp_unit *unit; 1585 struct zfcp_unit *unit;
1586 unsigned long flags;
1577 int status = atomic_read(&port->status); 1587 int status = atomic_read(&port->status);
1578 1588
1579 if (!(status & (ZFCP_STATUS_COMMON_ACCESS_DENIED | 1589 if (!(status & (ZFCP_STATUS_COMMON_ACCESS_DENIED |
1580 ZFCP_STATUS_COMMON_ACCESS_BOXED))) { 1590 ZFCP_STATUS_COMMON_ACCESS_BOXED))) {
1581 list_for_each_entry(unit, &port->unit_list_head, list) 1591 read_lock_irqsave(&port->unit_list_lock, flags);
1592 list_for_each_entry(unit, &port->unit_list, list)
1582 zfcp_erp_unit_access_changed(unit, id, ref); 1593 zfcp_erp_unit_access_changed(unit, id, ref);
1594 read_unlock_irqrestore(&port->unit_list_lock, flags);
1583 return; 1595 return;
1584 } 1596 }
1585 1597
@@ -1595,14 +1607,14 @@ static void zfcp_erp_port_access_changed(struct zfcp_port *port, char *id,
1595void zfcp_erp_adapter_access_changed(struct zfcp_adapter *adapter, char *id, 1607void zfcp_erp_adapter_access_changed(struct zfcp_adapter *adapter, char *id,
1596 void *ref) 1608 void *ref)
1597{ 1609{
1598 struct zfcp_port *port;
1599 unsigned long flags; 1610 unsigned long flags;
1611 struct zfcp_port *port;
1600 1612
1601 if (adapter->connection_features & FSF_FEATURE_NPIV_MODE) 1613 if (adapter->connection_features & FSF_FEATURE_NPIV_MODE)
1602 return; 1614 return;
1603 1615
1604 read_lock_irqsave(&zfcp_data.config_lock, flags); 1616 read_lock_irqsave(&adapter->port_list_lock, flags);
1605 list_for_each_entry(port, &adapter->port_list_head, list) 1617 list_for_each_entry(port, &adapter->port_list, list)
1606 zfcp_erp_port_access_changed(port, id, ref); 1618 zfcp_erp_port_access_changed(port, id, ref);
1607 read_unlock_irqrestore(&zfcp_data.config_lock, flags); 1619 read_unlock_irqrestore(&adapter->port_list_lock, flags);
1608} 1620}
diff --git a/drivers/s390/scsi/zfcp_ext.h b/drivers/s390/scsi/zfcp_ext.h
index b3f28deb4505..8786a79c7f8f 100644
--- a/drivers/s390/scsi/zfcp_ext.h
+++ b/drivers/s390/scsi/zfcp_ext.h
@@ -9,26 +9,30 @@
9#ifndef ZFCP_EXT_H 9#ifndef ZFCP_EXT_H
10#define ZFCP_EXT_H 10#define ZFCP_EXT_H
11 11
12#include <linux/types.h>
13#include <scsi/fc/fc_els.h>
12#include "zfcp_def.h" 14#include "zfcp_def.h"
15#include "zfcp_fc.h"
13 16
14/* zfcp_aux.c */ 17/* zfcp_aux.c */
15extern struct zfcp_unit *zfcp_get_unit_by_lun(struct zfcp_port *, u64); 18extern struct zfcp_unit *zfcp_get_unit_by_lun(struct zfcp_port *, u64);
16extern struct zfcp_port *zfcp_get_port_by_wwpn(struct zfcp_adapter *, u64); 19extern struct zfcp_port *zfcp_get_port_by_wwpn(struct zfcp_adapter *, u64);
17extern int zfcp_adapter_enqueue(struct ccw_device *); 20extern struct zfcp_adapter *zfcp_adapter_enqueue(struct ccw_device *);
18extern void zfcp_adapter_dequeue(struct zfcp_adapter *);
19extern struct zfcp_port *zfcp_port_enqueue(struct zfcp_adapter *, u64, u32, 21extern struct zfcp_port *zfcp_port_enqueue(struct zfcp_adapter *, u64, u32,
20 u32); 22 u32);
21extern void zfcp_port_dequeue(struct zfcp_port *);
22extern struct zfcp_unit *zfcp_unit_enqueue(struct zfcp_port *, u64); 23extern struct zfcp_unit *zfcp_unit_enqueue(struct zfcp_port *, u64);
23extern void zfcp_unit_dequeue(struct zfcp_unit *);
24extern int zfcp_reqlist_isempty(struct zfcp_adapter *);
25extern void zfcp_sg_free_table(struct scatterlist *, int); 24extern void zfcp_sg_free_table(struct scatterlist *, int);
26extern int zfcp_sg_setup_table(struct scatterlist *, int); 25extern int zfcp_sg_setup_table(struct scatterlist *, int);
26extern void zfcp_device_unregister(struct device *,
27 const struct attribute_group *);
28extern void zfcp_adapter_release(struct kref *);
29extern void zfcp_adapter_unregister(struct zfcp_adapter *);
27 30
28/* zfcp_ccw.c */ 31/* zfcp_ccw.c */
29extern int zfcp_ccw_register(void);
30extern int zfcp_ccw_priv_sch(struct zfcp_adapter *); 32extern int zfcp_ccw_priv_sch(struct zfcp_adapter *);
31extern struct ccw_driver zfcp_ccw_driver; 33extern struct ccw_driver zfcp_ccw_driver;
34extern struct zfcp_adapter *zfcp_ccw_adapter_by_cdev(struct ccw_device *);
35extern void zfcp_ccw_adapter_put(struct zfcp_adapter *);
32 36
33/* zfcp_cfdc.c */ 37/* zfcp_cfdc.c */
34extern struct miscdevice zfcp_cfdc_misc; 38extern struct miscdevice zfcp_cfdc_misc;
@@ -51,7 +55,7 @@ extern void _zfcp_dbf_hba_fsf_unsol(const char *, int level, struct zfcp_dbf *,
51 struct fsf_status_read_buffer *); 55 struct fsf_status_read_buffer *);
52extern void zfcp_dbf_hba_qdio(struct zfcp_dbf *, unsigned int, int, int); 56extern void zfcp_dbf_hba_qdio(struct zfcp_dbf *, unsigned int, int, int);
53extern void zfcp_dbf_hba_berr(struct zfcp_dbf *, struct zfcp_fsf_req *); 57extern void zfcp_dbf_hba_berr(struct zfcp_dbf *, struct zfcp_fsf_req *);
54extern void zfcp_dbf_san_ct_request(struct zfcp_fsf_req *); 58extern void zfcp_dbf_san_ct_request(struct zfcp_fsf_req *, u32);
55extern void zfcp_dbf_san_ct_response(struct zfcp_fsf_req *); 59extern void zfcp_dbf_san_ct_response(struct zfcp_fsf_req *);
56extern void zfcp_dbf_san_els_request(struct zfcp_fsf_req *); 60extern void zfcp_dbf_san_els_request(struct zfcp_fsf_req *);
57extern void zfcp_dbf_san_els_response(struct zfcp_fsf_req *); 61extern void zfcp_dbf_san_els_response(struct zfcp_fsf_req *);
@@ -92,24 +96,23 @@ extern void zfcp_erp_adapter_access_changed(struct zfcp_adapter *, char *,
92extern void zfcp_erp_timeout_handler(unsigned long); 96extern void zfcp_erp_timeout_handler(unsigned long);
93 97
94/* zfcp_fc.c */ 98/* zfcp_fc.c */
95extern int zfcp_fc_scan_ports(struct zfcp_adapter *); 99extern void zfcp_fc_scan_ports(struct work_struct *);
96extern void _zfcp_fc_scan_ports_later(struct work_struct *);
97extern void zfcp_fc_incoming_els(struct zfcp_fsf_req *); 100extern void zfcp_fc_incoming_els(struct zfcp_fsf_req *);
98extern void zfcp_fc_port_did_lookup(struct work_struct *); 101extern void zfcp_fc_port_did_lookup(struct work_struct *);
99extern void zfcp_fc_trigger_did_lookup(struct zfcp_port *); 102extern void zfcp_fc_trigger_did_lookup(struct zfcp_port *);
100extern void zfcp_fc_plogi_evaluate(struct zfcp_port *, struct fsf_plogi *); 103extern void zfcp_fc_plogi_evaluate(struct zfcp_port *, struct fc_els_flogi *);
101extern void zfcp_fc_test_link(struct zfcp_port *); 104extern void zfcp_fc_test_link(struct zfcp_port *);
102extern void zfcp_fc_link_test_work(struct work_struct *); 105extern void zfcp_fc_link_test_work(struct work_struct *);
103extern void zfcp_fc_wka_ports_force_offline(struct zfcp_wka_ports *); 106extern void zfcp_fc_wka_ports_force_offline(struct zfcp_fc_wka_ports *);
104extern int zfcp_fc_gs_setup(struct zfcp_adapter *); 107extern int zfcp_fc_gs_setup(struct zfcp_adapter *);
105extern void zfcp_fc_gs_destroy(struct zfcp_adapter *); 108extern void zfcp_fc_gs_destroy(struct zfcp_adapter *);
106extern int zfcp_fc_execute_els_fc_job(struct fc_bsg_job *); 109extern int zfcp_fc_exec_bsg_job(struct fc_bsg_job *);
107extern int zfcp_fc_execute_ct_fc_job(struct fc_bsg_job *); 110extern int zfcp_fc_timeout_bsg_job(struct fc_bsg_job *);
108 111
109/* zfcp_fsf.c */ 112/* zfcp_fsf.c */
110extern int zfcp_fsf_open_port(struct zfcp_erp_action *); 113extern int zfcp_fsf_open_port(struct zfcp_erp_action *);
111extern int zfcp_fsf_open_wka_port(struct zfcp_wka_port *); 114extern int zfcp_fsf_open_wka_port(struct zfcp_fc_wka_port *);
112extern int zfcp_fsf_close_wka_port(struct zfcp_wka_port *); 115extern int zfcp_fsf_close_wka_port(struct zfcp_fc_wka_port *);
113extern int zfcp_fsf_close_port(struct zfcp_erp_action *); 116extern int zfcp_fsf_close_port(struct zfcp_erp_action *);
114extern int zfcp_fsf_close_physical_port(struct zfcp_erp_action *); 117extern int zfcp_fsf_close_physical_port(struct zfcp_erp_action *);
115extern int zfcp_fsf_open_unit(struct zfcp_erp_action *); 118extern int zfcp_fsf_open_unit(struct zfcp_erp_action *);
@@ -125,8 +128,10 @@ extern struct zfcp_fsf_req *zfcp_fsf_control_file(struct zfcp_adapter *,
125extern void zfcp_fsf_req_dismiss_all(struct zfcp_adapter *); 128extern void zfcp_fsf_req_dismiss_all(struct zfcp_adapter *);
126extern int zfcp_fsf_status_read(struct zfcp_qdio *); 129extern int zfcp_fsf_status_read(struct zfcp_qdio *);
127extern int zfcp_status_read_refill(struct zfcp_adapter *adapter); 130extern int zfcp_status_read_refill(struct zfcp_adapter *adapter);
128extern int zfcp_fsf_send_ct(struct zfcp_send_ct *, mempool_t *); 131extern int zfcp_fsf_send_ct(struct zfcp_fc_wka_port *, struct zfcp_fsf_ct_els *,
129extern int zfcp_fsf_send_els(struct zfcp_send_els *); 132 mempool_t *, unsigned int);
133extern int zfcp_fsf_send_els(struct zfcp_adapter *, u32,
134 struct zfcp_fsf_ct_els *, unsigned int);
130extern int zfcp_fsf_send_fcp_command_task(struct zfcp_unit *, 135extern int zfcp_fsf_send_fcp_command_task(struct zfcp_unit *,
131 struct scsi_cmnd *); 136 struct scsi_cmnd *);
132extern void zfcp_fsf_req_free(struct zfcp_fsf_req *); 137extern void zfcp_fsf_req_free(struct zfcp_fsf_req *);
@@ -138,13 +143,9 @@ extern void zfcp_fsf_reqid_check(struct zfcp_qdio *, int);
138/* zfcp_qdio.c */ 143/* zfcp_qdio.c */
139extern int zfcp_qdio_setup(struct zfcp_adapter *); 144extern int zfcp_qdio_setup(struct zfcp_adapter *);
140extern void zfcp_qdio_destroy(struct zfcp_qdio *); 145extern void zfcp_qdio_destroy(struct zfcp_qdio *);
141extern int zfcp_qdio_send(struct zfcp_qdio *, struct zfcp_queue_req *); 146extern int zfcp_qdio_send(struct zfcp_qdio *, struct zfcp_qdio_req *);
142extern struct qdio_buffer_element
143 *zfcp_qdio_sbale_req(struct zfcp_qdio *, struct zfcp_queue_req *);
144extern struct qdio_buffer_element
145 *zfcp_qdio_sbale_curr(struct zfcp_qdio *, struct zfcp_queue_req *);
146extern int zfcp_qdio_sbals_from_sg(struct zfcp_qdio *, 147extern int zfcp_qdio_sbals_from_sg(struct zfcp_qdio *,
147 struct zfcp_queue_req *, unsigned long, 148 struct zfcp_qdio_req *, unsigned long,
148 struct scatterlist *, int); 149 struct scatterlist *, int);
149extern int zfcp_qdio_open(struct zfcp_qdio *); 150extern int zfcp_qdio_open(struct zfcp_qdio *);
150extern void zfcp_qdio_close(struct zfcp_qdio *); 151extern void zfcp_qdio_close(struct zfcp_qdio *);
@@ -153,7 +154,6 @@ extern void zfcp_qdio_close(struct zfcp_qdio *);
153extern struct zfcp_data zfcp_data; 154extern struct zfcp_data zfcp_data;
154extern int zfcp_adapter_scsi_register(struct zfcp_adapter *); 155extern int zfcp_adapter_scsi_register(struct zfcp_adapter *);
155extern void zfcp_adapter_scsi_unregister(struct zfcp_adapter *); 156extern void zfcp_adapter_scsi_unregister(struct zfcp_adapter *);
156extern char *zfcp_get_fcp_sns_info_ptr(struct fcp_rsp_iu *);
157extern struct fc_function_template zfcp_transport_functions; 157extern struct fc_function_template zfcp_transport_functions;
158extern void zfcp_scsi_rport_work(struct work_struct *); 158extern void zfcp_scsi_rport_work(struct work_struct *);
159extern void zfcp_scsi_schedule_rport_register(struct zfcp_port *); 159extern void zfcp_scsi_schedule_rport_register(struct zfcp_port *);
diff --git a/drivers/s390/scsi/zfcp_fc.c b/drivers/s390/scsi/zfcp_fc.c
index df23bcead23d..2a1cbb74b99b 100644
--- a/drivers/s390/scsi/zfcp_fc.c
+++ b/drivers/s390/scsi/zfcp_fc.c
@@ -3,79 +3,45 @@
3 * 3 *
4 * Fibre Channel related functions for the zfcp device driver. 4 * Fibre Channel related functions for the zfcp device driver.
5 * 5 *
6 * Copyright IBM Corporation 2008, 2009 6 * Copyright IBM Corporation 2008, 2010
7 */ 7 */
8 8
9#define KMSG_COMPONENT "zfcp" 9#define KMSG_COMPONENT "zfcp"
10#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 10#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
11 11
12#include <linux/types.h>
13#include <linux/slab.h>
14#include <scsi/fc/fc_els.h>
15#include <scsi/libfc.h>
12#include "zfcp_ext.h" 16#include "zfcp_ext.h"
17#include "zfcp_fc.h"
13 18
14enum rscn_address_format { 19static u32 zfcp_fc_rscn_range_mask[] = {
15 RSCN_PORT_ADDRESS = 0x0, 20 [ELS_ADDR_FMT_PORT] = 0xFFFFFF,
16 RSCN_AREA_ADDRESS = 0x1, 21 [ELS_ADDR_FMT_AREA] = 0xFFFF00,
17 RSCN_DOMAIN_ADDRESS = 0x2, 22 [ELS_ADDR_FMT_DOM] = 0xFF0000,
18 RSCN_FABRIC_ADDRESS = 0x3, 23 [ELS_ADDR_FMT_FAB] = 0x000000,
19}; 24};
20 25
21static u32 rscn_range_mask[] = { 26static int zfcp_fc_wka_port_get(struct zfcp_fc_wka_port *wka_port)
22 [RSCN_PORT_ADDRESS] = 0xFFFFFF,
23 [RSCN_AREA_ADDRESS] = 0xFFFF00,
24 [RSCN_DOMAIN_ADDRESS] = 0xFF0000,
25 [RSCN_FABRIC_ADDRESS] = 0x000000,
26};
27
28struct gpn_ft_resp_acc {
29 u8 control;
30 u8 port_id[3];
31 u8 reserved[4];
32 u64 wwpn;
33} __attribute__ ((packed));
34
35#define ZFCP_CT_SIZE_ONE_PAGE (PAGE_SIZE - sizeof(struct ct_hdr))
36#define ZFCP_GPN_FT_ENTRIES (ZFCP_CT_SIZE_ONE_PAGE \
37 / sizeof(struct gpn_ft_resp_acc))
38#define ZFCP_GPN_FT_BUFFERS 4
39#define ZFCP_GPN_FT_MAX_SIZE (ZFCP_GPN_FT_BUFFERS * PAGE_SIZE \
40 - sizeof(struct ct_hdr))
41#define ZFCP_GPN_FT_MAX_ENTRIES ZFCP_GPN_FT_BUFFERS * (ZFCP_GPN_FT_ENTRIES + 1)
42
43struct ct_iu_gpn_ft_resp {
44 struct ct_hdr header;
45 struct gpn_ft_resp_acc accept[ZFCP_GPN_FT_ENTRIES];
46} __attribute__ ((packed));
47
48struct zfcp_gpn_ft {
49 struct zfcp_send_ct ct;
50 struct scatterlist sg_req;
51 struct scatterlist sg_resp[ZFCP_GPN_FT_BUFFERS];
52};
53
54struct zfcp_fc_ns_handler_data {
55 struct completion done;
56 void (*handler)(unsigned long);
57 unsigned long handler_data;
58};
59
60static int zfcp_fc_wka_port_get(struct zfcp_wka_port *wka_port)
61{ 27{
62 if (mutex_lock_interruptible(&wka_port->mutex)) 28 if (mutex_lock_interruptible(&wka_port->mutex))
63 return -ERESTARTSYS; 29 return -ERESTARTSYS;
64 30
65 if (wka_port->status == ZFCP_WKA_PORT_OFFLINE || 31 if (wka_port->status == ZFCP_FC_WKA_PORT_OFFLINE ||
66 wka_port->status == ZFCP_WKA_PORT_CLOSING) { 32 wka_port->status == ZFCP_FC_WKA_PORT_CLOSING) {
67 wka_port->status = ZFCP_WKA_PORT_OPENING; 33 wka_port->status = ZFCP_FC_WKA_PORT_OPENING;
68 if (zfcp_fsf_open_wka_port(wka_port)) 34 if (zfcp_fsf_open_wka_port(wka_port))
69 wka_port->status = ZFCP_WKA_PORT_OFFLINE; 35 wka_port->status = ZFCP_FC_WKA_PORT_OFFLINE;
70 } 36 }
71 37
72 mutex_unlock(&wka_port->mutex); 38 mutex_unlock(&wka_port->mutex);
73 39
74 wait_event(wka_port->completion_wq, 40 wait_event(wka_port->completion_wq,
75 wka_port->status == ZFCP_WKA_PORT_ONLINE || 41 wka_port->status == ZFCP_FC_WKA_PORT_ONLINE ||
76 wka_port->status == ZFCP_WKA_PORT_OFFLINE); 42 wka_port->status == ZFCP_FC_WKA_PORT_OFFLINE);
77 43
78 if (wka_port->status == ZFCP_WKA_PORT_ONLINE) { 44 if (wka_port->status == ZFCP_FC_WKA_PORT_ONLINE) {
79 atomic_inc(&wka_port->refcount); 45 atomic_inc(&wka_port->refcount);
80 return 0; 46 return 0;
81 } 47 }
@@ -85,24 +51,24 @@ static int zfcp_fc_wka_port_get(struct zfcp_wka_port *wka_port)
85static void zfcp_fc_wka_port_offline(struct work_struct *work) 51static void zfcp_fc_wka_port_offline(struct work_struct *work)
86{ 52{
87 struct delayed_work *dw = to_delayed_work(work); 53 struct delayed_work *dw = to_delayed_work(work);
88 struct zfcp_wka_port *wka_port = 54 struct zfcp_fc_wka_port *wka_port =
89 container_of(dw, struct zfcp_wka_port, work); 55 container_of(dw, struct zfcp_fc_wka_port, work);
90 56
91 mutex_lock(&wka_port->mutex); 57 mutex_lock(&wka_port->mutex);
92 if ((atomic_read(&wka_port->refcount) != 0) || 58 if ((atomic_read(&wka_port->refcount) != 0) ||
93 (wka_port->status != ZFCP_WKA_PORT_ONLINE)) 59 (wka_port->status != ZFCP_FC_WKA_PORT_ONLINE))
94 goto out; 60 goto out;
95 61
96 wka_port->status = ZFCP_WKA_PORT_CLOSING; 62 wka_port->status = ZFCP_FC_WKA_PORT_CLOSING;
97 if (zfcp_fsf_close_wka_port(wka_port)) { 63 if (zfcp_fsf_close_wka_port(wka_port)) {
98 wka_port->status = ZFCP_WKA_PORT_OFFLINE; 64 wka_port->status = ZFCP_FC_WKA_PORT_OFFLINE;
99 wake_up(&wka_port->completion_wq); 65 wake_up(&wka_port->completion_wq);
100 } 66 }
101out: 67out:
102 mutex_unlock(&wka_port->mutex); 68 mutex_unlock(&wka_port->mutex);
103} 69}
104 70
105static void zfcp_fc_wka_port_put(struct zfcp_wka_port *wka_port) 71static void zfcp_fc_wka_port_put(struct zfcp_fc_wka_port *wka_port)
106{ 72{
107 if (atomic_dec_return(&wka_port->refcount) != 0) 73 if (atomic_dec_return(&wka_port->refcount) != 0)
108 return; 74 return;
@@ -110,7 +76,7 @@ static void zfcp_fc_wka_port_put(struct zfcp_wka_port *wka_port)
110 schedule_delayed_work(&wka_port->work, HZ / 100); 76 schedule_delayed_work(&wka_port->work, HZ / 100);
111} 77}
112 78
113static void zfcp_fc_wka_port_init(struct zfcp_wka_port *wka_port, u32 d_id, 79static void zfcp_fc_wka_port_init(struct zfcp_fc_wka_port *wka_port, u32 d_id,
114 struct zfcp_adapter *adapter) 80 struct zfcp_adapter *adapter)
115{ 81{
116 init_waitqueue_head(&wka_port->completion_wq); 82 init_waitqueue_head(&wka_port->completion_wq);
@@ -118,107 +84,107 @@ static void zfcp_fc_wka_port_init(struct zfcp_wka_port *wka_port, u32 d_id,
118 wka_port->adapter = adapter; 84 wka_port->adapter = adapter;
119 wka_port->d_id = d_id; 85 wka_port->d_id = d_id;
120 86
121 wka_port->status = ZFCP_WKA_PORT_OFFLINE; 87 wka_port->status = ZFCP_FC_WKA_PORT_OFFLINE;
122 atomic_set(&wka_port->refcount, 0); 88 atomic_set(&wka_port->refcount, 0);
123 mutex_init(&wka_port->mutex); 89 mutex_init(&wka_port->mutex);
124 INIT_DELAYED_WORK(&wka_port->work, zfcp_fc_wka_port_offline); 90 INIT_DELAYED_WORK(&wka_port->work, zfcp_fc_wka_port_offline);
125} 91}
126 92
127static void zfcp_fc_wka_port_force_offline(struct zfcp_wka_port *wka) 93static void zfcp_fc_wka_port_force_offline(struct zfcp_fc_wka_port *wka)
128{ 94{
129 cancel_delayed_work_sync(&wka->work); 95 cancel_delayed_work_sync(&wka->work);
130 mutex_lock(&wka->mutex); 96 mutex_lock(&wka->mutex);
131 wka->status = ZFCP_WKA_PORT_OFFLINE; 97 wka->status = ZFCP_FC_WKA_PORT_OFFLINE;
132 mutex_unlock(&wka->mutex); 98 mutex_unlock(&wka->mutex);
133} 99}
134 100
135void zfcp_fc_wka_ports_force_offline(struct zfcp_wka_ports *gs) 101void zfcp_fc_wka_ports_force_offline(struct zfcp_fc_wka_ports *gs)
136{ 102{
103 if (!gs)
104 return;
137 zfcp_fc_wka_port_force_offline(&gs->ms); 105 zfcp_fc_wka_port_force_offline(&gs->ms);
138 zfcp_fc_wka_port_force_offline(&gs->ts); 106 zfcp_fc_wka_port_force_offline(&gs->ts);
139 zfcp_fc_wka_port_force_offline(&gs->ds); 107 zfcp_fc_wka_port_force_offline(&gs->ds);
140 zfcp_fc_wka_port_force_offline(&gs->as); 108 zfcp_fc_wka_port_force_offline(&gs->as);
141 zfcp_fc_wka_port_force_offline(&gs->ks);
142} 109}
143 110
144static void _zfcp_fc_incoming_rscn(struct zfcp_fsf_req *fsf_req, u32 range, 111static void _zfcp_fc_incoming_rscn(struct zfcp_fsf_req *fsf_req, u32 range,
145 struct fcp_rscn_element *elem) 112 struct fc_els_rscn_page *page)
146{ 113{
147 unsigned long flags; 114 unsigned long flags;
115 struct zfcp_adapter *adapter = fsf_req->adapter;
148 struct zfcp_port *port; 116 struct zfcp_port *port;
149 117
150 read_lock_irqsave(&zfcp_data.config_lock, flags); 118 read_lock_irqsave(&adapter->port_list_lock, flags);
151 list_for_each_entry(port, &fsf_req->adapter->port_list_head, list) { 119 list_for_each_entry(port, &adapter->port_list, list) {
152 if ((port->d_id & range) == (elem->nport_did & range)) 120 if ((port->d_id & range) == (ntoh24(page->rscn_fid) & range))
153 zfcp_fc_test_link(port); 121 zfcp_fc_test_link(port);
154 if (!port->d_id) 122 if (!port->d_id)
155 zfcp_erp_port_reopen(port, 123 zfcp_erp_port_reopen(port,
156 ZFCP_STATUS_COMMON_ERP_FAILED, 124 ZFCP_STATUS_COMMON_ERP_FAILED,
157 "fcrscn1", NULL); 125 "fcrscn1", NULL);
158 } 126 }
159 127 read_unlock_irqrestore(&adapter->port_list_lock, flags);
160 read_unlock_irqrestore(&zfcp_data.config_lock, flags);
161} 128}
162 129
163static void zfcp_fc_incoming_rscn(struct zfcp_fsf_req *fsf_req) 130static void zfcp_fc_incoming_rscn(struct zfcp_fsf_req *fsf_req)
164{ 131{
165 struct fsf_status_read_buffer *status_buffer = (void *)fsf_req->data; 132 struct fsf_status_read_buffer *status_buffer = (void *)fsf_req->data;
166 struct fcp_rscn_head *fcp_rscn_head; 133 struct fc_els_rscn *head;
167 struct fcp_rscn_element *fcp_rscn_element; 134 struct fc_els_rscn_page *page;
168 u16 i; 135 u16 i;
169 u16 no_entries; 136 u16 no_entries;
170 u32 range_mask; 137 unsigned int afmt;
171 138
172 fcp_rscn_head = (struct fcp_rscn_head *) status_buffer->payload.data; 139 head = (struct fc_els_rscn *) status_buffer->payload.data;
173 fcp_rscn_element = (struct fcp_rscn_element *) fcp_rscn_head; 140 page = (struct fc_els_rscn_page *) head;
174 141
175 /* see FC-FS */ 142 /* see FC-FS */
176 no_entries = fcp_rscn_head->payload_len / 143 no_entries = head->rscn_plen / sizeof(struct fc_els_rscn_page);
177 sizeof(struct fcp_rscn_element);
178 144
179 for (i = 1; i < no_entries; i++) { 145 for (i = 1; i < no_entries; i++) {
180 /* skip head and start with 1st element */ 146 /* skip head and start with 1st element */
181 fcp_rscn_element++; 147 page++;
182 range_mask = rscn_range_mask[fcp_rscn_element->addr_format]; 148 afmt = page->rscn_page_flags & ELS_RSCN_ADDR_FMT_MASK;
183 _zfcp_fc_incoming_rscn(fsf_req, range_mask, fcp_rscn_element); 149 _zfcp_fc_incoming_rscn(fsf_req, zfcp_fc_rscn_range_mask[afmt],
150 page);
184 } 151 }
185 schedule_work(&fsf_req->adapter->scan_work); 152 queue_work(fsf_req->adapter->work_queue, &fsf_req->adapter->scan_work);
186} 153}
187 154
188static void zfcp_fc_incoming_wwpn(struct zfcp_fsf_req *req, u64 wwpn) 155static void zfcp_fc_incoming_wwpn(struct zfcp_fsf_req *req, u64 wwpn)
189{ 156{
157 unsigned long flags;
190 struct zfcp_adapter *adapter = req->adapter; 158 struct zfcp_adapter *adapter = req->adapter;
191 struct zfcp_port *port; 159 struct zfcp_port *port;
192 unsigned long flags;
193 160
194 read_lock_irqsave(&zfcp_data.config_lock, flags); 161 read_lock_irqsave(&adapter->port_list_lock, flags);
195 list_for_each_entry(port, &adapter->port_list_head, list) 162 list_for_each_entry(port, &adapter->port_list, list)
196 if (port->wwpn == wwpn) 163 if (port->wwpn == wwpn) {
164 zfcp_erp_port_forced_reopen(port, 0, "fciwwp1", req);
197 break; 165 break;
198 read_unlock_irqrestore(&zfcp_data.config_lock, flags); 166 }
199 167 read_unlock_irqrestore(&adapter->port_list_lock, flags);
200 if (port && (port->wwpn == wwpn))
201 zfcp_erp_port_forced_reopen(port, 0, "fciwwp1", req);
202} 168}
203 169
204static void zfcp_fc_incoming_plogi(struct zfcp_fsf_req *req) 170static void zfcp_fc_incoming_plogi(struct zfcp_fsf_req *req)
205{ 171{
206 struct fsf_status_read_buffer *status_buffer = 172 struct fsf_status_read_buffer *status_buffer;
207 (struct fsf_status_read_buffer *)req->data; 173 struct fc_els_flogi *plogi;
208 struct fsf_plogi *els_plogi =
209 (struct fsf_plogi *) status_buffer->payload.data;
210 174
211 zfcp_fc_incoming_wwpn(req, els_plogi->serv_param.wwpn); 175 status_buffer = (struct fsf_status_read_buffer *) req->data;
176 plogi = (struct fc_els_flogi *) status_buffer->payload.data;
177 zfcp_fc_incoming_wwpn(req, plogi->fl_wwpn);
212} 178}
213 179
214static void zfcp_fc_incoming_logo(struct zfcp_fsf_req *req) 180static void zfcp_fc_incoming_logo(struct zfcp_fsf_req *req)
215{ 181{
216 struct fsf_status_read_buffer *status_buffer = 182 struct fsf_status_read_buffer *status_buffer =
217 (struct fsf_status_read_buffer *)req->data; 183 (struct fsf_status_read_buffer *)req->data;
218 struct fcp_logo *els_logo = 184 struct fc_els_logo *logo =
219 (struct fcp_logo *) status_buffer->payload.data; 185 (struct fc_els_logo *) status_buffer->payload.data;
220 186
221 zfcp_fc_incoming_wwpn(req, els_logo->nport_wwpn); 187 zfcp_fc_incoming_wwpn(req, logo->fl_n_port_wwn);
222} 188}
223 189
224/** 190/**
@@ -232,79 +198,73 @@ void zfcp_fc_incoming_els(struct zfcp_fsf_req *fsf_req)
232 unsigned int els_type = status_buffer->payload.data[0]; 198 unsigned int els_type = status_buffer->payload.data[0];
233 199
234 zfcp_dbf_san_incoming_els(fsf_req); 200 zfcp_dbf_san_incoming_els(fsf_req);
235 if (els_type == LS_PLOGI) 201 if (els_type == ELS_PLOGI)
236 zfcp_fc_incoming_plogi(fsf_req); 202 zfcp_fc_incoming_plogi(fsf_req);
237 else if (els_type == LS_LOGO) 203 else if (els_type == ELS_LOGO)
238 zfcp_fc_incoming_logo(fsf_req); 204 zfcp_fc_incoming_logo(fsf_req);
239 else if (els_type == LS_RSCN) 205 else if (els_type == ELS_RSCN)
240 zfcp_fc_incoming_rscn(fsf_req); 206 zfcp_fc_incoming_rscn(fsf_req);
241} 207}
242 208
243static void zfcp_fc_ns_handler(unsigned long data) 209static void zfcp_fc_ns_gid_pn_eval(void *data)
244{ 210{
245 struct zfcp_fc_ns_handler_data *compl_rec = 211 struct zfcp_fc_gid_pn *gid_pn = data;
246 (struct zfcp_fc_ns_handler_data *) data; 212 struct zfcp_fsf_ct_els *ct = &gid_pn->ct;
247 213 struct zfcp_fc_gid_pn_req *gid_pn_req = sg_virt(ct->req);
248 if (compl_rec->handler) 214 struct zfcp_fc_gid_pn_resp *gid_pn_resp = sg_virt(ct->resp);
249 compl_rec->handler(compl_rec->handler_data);
250
251 complete(&compl_rec->done);
252}
253
254static void zfcp_fc_ns_gid_pn_eval(unsigned long data)
255{
256 struct zfcp_gid_pn_data *gid_pn = (struct zfcp_gid_pn_data *) data;
257 struct zfcp_send_ct *ct = &gid_pn->ct;
258 struct ct_iu_gid_pn_req *ct_iu_req = sg_virt(ct->req);
259 struct ct_iu_gid_pn_resp *ct_iu_resp = sg_virt(ct->resp);
260 struct zfcp_port *port = gid_pn->port; 215 struct zfcp_port *port = gid_pn->port;
261 216
262 if (ct->status) 217 if (ct->status)
263 return; 218 return;
264 if (ct_iu_resp->header.cmd_rsp_code != ZFCP_CT_ACCEPT) 219 if (gid_pn_resp->ct_hdr.ct_cmd != FC_FS_ACC)
265 return; 220 return;
266 221
267 /* paranoia */ 222 /* paranoia */
268 if (ct_iu_req->wwpn != port->wwpn) 223 if (gid_pn_req->gid_pn.fn_wwpn != port->wwpn)
269 return; 224 return;
270 /* looks like a valid d_id */ 225 /* looks like a valid d_id */
271 port->d_id = ct_iu_resp->d_id & ZFCP_DID_MASK; 226 port->d_id = ntoh24(gid_pn_resp->gid_pn.fp_fid);
227}
228
229static void zfcp_fc_complete(void *data)
230{
231 complete(data);
272} 232}
273 233
274static int zfcp_fc_ns_gid_pn_request(struct zfcp_port *port, 234static int zfcp_fc_ns_gid_pn_request(struct zfcp_port *port,
275 struct zfcp_gid_pn_data *gid_pn) 235 struct zfcp_fc_gid_pn *gid_pn)
276{ 236{
277 struct zfcp_adapter *adapter = port->adapter; 237 struct zfcp_adapter *adapter = port->adapter;
278 struct zfcp_fc_ns_handler_data compl_rec; 238 DECLARE_COMPLETION_ONSTACK(completion);
279 int ret; 239 int ret;
280 240
281 /* setup parameters for send generic command */ 241 /* setup parameters for send generic command */
282 gid_pn->port = port; 242 gid_pn->port = port;
283 gid_pn->ct.wka_port = &adapter->gs->ds; 243 gid_pn->ct.handler = zfcp_fc_complete;
284 gid_pn->ct.handler = zfcp_fc_ns_handler; 244 gid_pn->ct.handler_data = &completion;
285 gid_pn->ct.handler_data = (unsigned long) &compl_rec; 245 gid_pn->ct.req = &gid_pn->sg_req;
286 gid_pn->ct.req = &gid_pn->req; 246 gid_pn->ct.resp = &gid_pn->sg_resp;
287 gid_pn->ct.resp = &gid_pn->resp; 247 sg_init_one(&gid_pn->sg_req, &gid_pn->gid_pn_req,
288 sg_init_one(&gid_pn->req, &gid_pn->ct_iu_req, 248 sizeof(struct zfcp_fc_gid_pn_req));
289 sizeof(struct ct_iu_gid_pn_req)); 249 sg_init_one(&gid_pn->sg_resp, &gid_pn->gid_pn_resp,
290 sg_init_one(&gid_pn->resp, &gid_pn->ct_iu_resp, 250 sizeof(struct zfcp_fc_gid_pn_resp));
291 sizeof(struct ct_iu_gid_pn_resp));
292 251
293 /* setup nameserver request */ 252 /* setup nameserver request */
294 gid_pn->ct_iu_req.header.revision = ZFCP_CT_REVISION; 253 gid_pn->gid_pn_req.ct_hdr.ct_rev = FC_CT_REV;
295 gid_pn->ct_iu_req.header.gs_type = ZFCP_CT_DIRECTORY_SERVICE; 254 gid_pn->gid_pn_req.ct_hdr.ct_fs_type = FC_FST_DIR;
296 gid_pn->ct_iu_req.header.gs_subtype = ZFCP_CT_NAME_SERVER; 255 gid_pn->gid_pn_req.ct_hdr.ct_fs_subtype = FC_NS_SUBTYPE;
297 gid_pn->ct_iu_req.header.options = ZFCP_CT_SYNCHRONOUS; 256 gid_pn->gid_pn_req.ct_hdr.ct_options = 0;
298 gid_pn->ct_iu_req.header.cmd_rsp_code = ZFCP_CT_GID_PN; 257 gid_pn->gid_pn_req.ct_hdr.ct_cmd = FC_NS_GID_PN;
299 gid_pn->ct_iu_req.header.max_res_size = ZFCP_CT_SIZE_ONE_PAGE / 4; 258 gid_pn->gid_pn_req.ct_hdr.ct_mr_size = ZFCP_FC_CT_SIZE_PAGE / 4;
300 gid_pn->ct_iu_req.wwpn = port->wwpn; 259 gid_pn->gid_pn_req.gid_pn.fn_wwpn = port->wwpn;
301 260
302 init_completion(&compl_rec.done); 261 ret = zfcp_fsf_send_ct(&adapter->gs->ds, &gid_pn->ct,
303 compl_rec.handler = zfcp_fc_ns_gid_pn_eval; 262 adapter->pool.gid_pn_req,
304 compl_rec.handler_data = (unsigned long) gid_pn; 263 ZFCP_FC_CTELS_TMO);
305 ret = zfcp_fsf_send_ct(&gid_pn->ct, adapter->pool.gid_pn_req); 264 if (!ret) {
306 if (!ret) 265 wait_for_completion(&completion);
307 wait_for_completion(&compl_rec.done); 266 zfcp_fc_ns_gid_pn_eval(gid_pn);
267 }
308 return ret; 268 return ret;
309} 269}
310 270
@@ -316,10 +276,10 @@ static int zfcp_fc_ns_gid_pn_request(struct zfcp_port *port,
316static int zfcp_fc_ns_gid_pn(struct zfcp_port *port) 276static int zfcp_fc_ns_gid_pn(struct zfcp_port *port)
317{ 277{
318 int ret; 278 int ret;
319 struct zfcp_gid_pn_data *gid_pn; 279 struct zfcp_fc_gid_pn *gid_pn;
320 struct zfcp_adapter *adapter = port->adapter; 280 struct zfcp_adapter *adapter = port->adapter;
321 281
322 gid_pn = mempool_alloc(adapter->pool.gid_pn_data, GFP_ATOMIC); 282 gid_pn = mempool_alloc(adapter->pool.gid_pn, GFP_ATOMIC);
323 if (!gid_pn) 283 if (!gid_pn)
324 return -ENOMEM; 284 return -ENOMEM;
325 285
@@ -333,7 +293,7 @@ static int zfcp_fc_ns_gid_pn(struct zfcp_port *port)
333 293
334 zfcp_fc_wka_port_put(&adapter->gs->ds); 294 zfcp_fc_wka_port_put(&adapter->gs->ds);
335out: 295out:
336 mempool_free(gid_pn, adapter->pool.gid_pn_data); 296 mempool_free(gid_pn, adapter->pool.gid_pn);
337 return ret; 297 return ret;
338} 298}
339 299
@@ -357,7 +317,7 @@ void zfcp_fc_port_did_lookup(struct work_struct *work)
357 317
358 zfcp_erp_port_reopen(port, 0, "fcgpn_3", NULL); 318 zfcp_erp_port_reopen(port, 0, "fcgpn_3", NULL);
359out: 319out:
360 zfcp_port_put(port); 320 put_device(&port->dev);
361} 321}
362 322
363/** 323/**
@@ -366,9 +326,9 @@ out:
366 */ 326 */
367void zfcp_fc_trigger_did_lookup(struct zfcp_port *port) 327void zfcp_fc_trigger_did_lookup(struct zfcp_port *port)
368{ 328{
369 zfcp_port_get(port); 329 get_device(&port->dev);
370 if (!queue_work(port->adapter->work_queue, &port->gid_pn_work)) 330 if (!queue_work(port->adapter->work_queue, &port->gid_pn_work))
371 zfcp_port_put(port); 331 put_device(&port->dev);
372} 332}
373 333
374/** 334/**
@@ -378,33 +338,36 @@ void zfcp_fc_trigger_did_lookup(struct zfcp_port *port)
378 * 338 *
379 * Evaluate PLOGI playload and copy important fields into zfcp_port structure 339 * Evaluate PLOGI playload and copy important fields into zfcp_port structure
380 */ 340 */
381void zfcp_fc_plogi_evaluate(struct zfcp_port *port, struct fsf_plogi *plogi) 341void zfcp_fc_plogi_evaluate(struct zfcp_port *port, struct fc_els_flogi *plogi)
382{ 342{
383 port->maxframe_size = plogi->serv_param.common_serv_param[7] | 343 if (plogi->fl_wwpn != port->wwpn) {
384 ((plogi->serv_param.common_serv_param[6] & 0x0F) << 8); 344 port->d_id = 0;
385 if (plogi->serv_param.class1_serv_param[0] & 0x80) 345 dev_warn(&port->adapter->ccw_device->dev,
346 "A port opened with WWPN 0x%016Lx returned data that "
347 "identifies it as WWPN 0x%016Lx\n",
348 (unsigned long long) port->wwpn,
349 (unsigned long long) plogi->fl_wwpn);
350 return;
351 }
352
353 port->wwnn = plogi->fl_wwnn;
354 port->maxframe_size = plogi->fl_csp.sp_bb_data;
355
356 if (plogi->fl_cssp[0].cp_class & FC_CPC_VALID)
386 port->supported_classes |= FC_COS_CLASS1; 357 port->supported_classes |= FC_COS_CLASS1;
387 if (plogi->serv_param.class2_serv_param[0] & 0x80) 358 if (plogi->fl_cssp[1].cp_class & FC_CPC_VALID)
388 port->supported_classes |= FC_COS_CLASS2; 359 port->supported_classes |= FC_COS_CLASS2;
389 if (plogi->serv_param.class3_serv_param[0] & 0x80) 360 if (plogi->fl_cssp[2].cp_class & FC_CPC_VALID)
390 port->supported_classes |= FC_COS_CLASS3; 361 port->supported_classes |= FC_COS_CLASS3;
391 if (plogi->serv_param.class4_serv_param[0] & 0x80) 362 if (plogi->fl_cssp[3].cp_class & FC_CPC_VALID)
392 port->supported_classes |= FC_COS_CLASS4; 363 port->supported_classes |= FC_COS_CLASS4;
393} 364}
394 365
395struct zfcp_els_adisc { 366static void zfcp_fc_adisc_handler(void *data)
396 struct zfcp_send_els els;
397 struct scatterlist req;
398 struct scatterlist resp;
399 struct zfcp_ls_adisc ls_adisc;
400 struct zfcp_ls_adisc ls_adisc_acc;
401};
402
403static void zfcp_fc_adisc_handler(unsigned long data)
404{ 367{
405 struct zfcp_els_adisc *adisc = (struct zfcp_els_adisc *) data; 368 struct zfcp_fc_els_adisc *adisc = data;
406 struct zfcp_port *port = adisc->els.port; 369 struct zfcp_port *port = adisc->els.port;
407 struct zfcp_ls_adisc *ls_adisc = &adisc->ls_adisc_acc; 370 struct fc_els_adisc *adisc_resp = &adisc->adisc_resp;
408 371
409 if (adisc->els.status) { 372 if (adisc->els.status) {
410 /* request rejected or timed out */ 373 /* request rejected or timed out */
@@ -414,9 +377,9 @@ static void zfcp_fc_adisc_handler(unsigned long data)
414 } 377 }
415 378
416 if (!port->wwnn) 379 if (!port->wwnn)
417 port->wwnn = ls_adisc->wwnn; 380 port->wwnn = adisc_resp->adisc_wwnn;
418 381
419 if ((port->wwpn != ls_adisc->wwpn) || 382 if ((port->wwpn != adisc_resp->adisc_wwpn) ||
420 !(atomic_read(&port->status) & ZFCP_STATUS_COMMON_OPEN)) { 383 !(atomic_read(&port->status) & ZFCP_STATUS_COMMON_OPEN)) {
421 zfcp_erp_port_reopen(port, ZFCP_STATUS_COMMON_ERP_FAILED, 384 zfcp_erp_port_reopen(port, ZFCP_STATUS_COMMON_ERP_FAILED,
422 "fcadh_2", NULL); 385 "fcadh_2", NULL);
@@ -427,40 +390,45 @@ static void zfcp_fc_adisc_handler(unsigned long data)
427 zfcp_scsi_schedule_rport_register(port); 390 zfcp_scsi_schedule_rport_register(port);
428 out: 391 out:
429 atomic_clear_mask(ZFCP_STATUS_PORT_LINK_TEST, &port->status); 392 atomic_clear_mask(ZFCP_STATUS_PORT_LINK_TEST, &port->status);
430 zfcp_port_put(port); 393 put_device(&port->dev);
431 kfree(adisc); 394 kmem_cache_free(zfcp_data.adisc_cache, adisc);
432} 395}
433 396
434static int zfcp_fc_adisc(struct zfcp_port *port) 397static int zfcp_fc_adisc(struct zfcp_port *port)
435{ 398{
436 struct zfcp_els_adisc *adisc; 399 struct zfcp_fc_els_adisc *adisc;
437 struct zfcp_adapter *adapter = port->adapter; 400 struct zfcp_adapter *adapter = port->adapter;
401 int ret;
438 402
439 adisc = kzalloc(sizeof(struct zfcp_els_adisc), GFP_ATOMIC); 403 adisc = kmem_cache_alloc(zfcp_data.adisc_cache, GFP_ATOMIC);
440 if (!adisc) 404 if (!adisc)
441 return -ENOMEM; 405 return -ENOMEM;
442 406
407 adisc->els.port = port;
443 adisc->els.req = &adisc->req; 408 adisc->els.req = &adisc->req;
444 adisc->els.resp = &adisc->resp; 409 adisc->els.resp = &adisc->resp;
445 sg_init_one(adisc->els.req, &adisc->ls_adisc, 410 sg_init_one(adisc->els.req, &adisc->adisc_req,
446 sizeof(struct zfcp_ls_adisc)); 411 sizeof(struct fc_els_adisc));
447 sg_init_one(adisc->els.resp, &adisc->ls_adisc_acc, 412 sg_init_one(adisc->els.resp, &adisc->adisc_resp,
448 sizeof(struct zfcp_ls_adisc)); 413 sizeof(struct fc_els_adisc));
449 414
450 adisc->els.adapter = adapter;
451 adisc->els.port = port;
452 adisc->els.d_id = port->d_id;
453 adisc->els.handler = zfcp_fc_adisc_handler; 415 adisc->els.handler = zfcp_fc_adisc_handler;
454 adisc->els.handler_data = (unsigned long) adisc; 416 adisc->els.handler_data = adisc;
455 adisc->els.ls_code = adisc->ls_adisc.code = ZFCP_LS_ADISC;
456 417
457 /* acc. to FC-FS, hard_nport_id in ADISC should not be set for ports 418 /* acc. to FC-FS, hard_nport_id in ADISC should not be set for ports
458 without FC-AL-2 capability, so we don't set it */ 419 without FC-AL-2 capability, so we don't set it */
459 adisc->ls_adisc.wwpn = fc_host_port_name(adapter->scsi_host); 420 adisc->adisc_req.adisc_wwpn = fc_host_port_name(adapter->scsi_host);
460 adisc->ls_adisc.wwnn = fc_host_node_name(adapter->scsi_host); 421 adisc->adisc_req.adisc_wwnn = fc_host_node_name(adapter->scsi_host);
461 adisc->ls_adisc.nport_id = fc_host_port_id(adapter->scsi_host); 422 adisc->adisc_req.adisc_cmd = ELS_ADISC;
423 hton24(adisc->adisc_req.adisc_port_id,
424 fc_host_port_id(adapter->scsi_host));
425
426 ret = zfcp_fsf_send_els(adapter, port->d_id, &adisc->els,
427 ZFCP_FC_CTELS_TMO);
428 if (ret)
429 kmem_cache_free(zfcp_data.adisc_cache, adisc);
462 430
463 return zfcp_fsf_send_els(&adisc->els); 431 return ret;
464} 432}
465 433
466void zfcp_fc_link_test_work(struct work_struct *work) 434void zfcp_fc_link_test_work(struct work_struct *work)
@@ -469,7 +437,7 @@ void zfcp_fc_link_test_work(struct work_struct *work)
469 container_of(work, struct zfcp_port, test_link_work); 437 container_of(work, struct zfcp_port, test_link_work);
470 int retval; 438 int retval;
471 439
472 zfcp_port_get(port); 440 get_device(&port->dev);
473 port->rport_task = RPORT_DEL; 441 port->rport_task = RPORT_DEL;
474 zfcp_scsi_rport_work(&port->rport_work); 442 zfcp_scsi_rport_work(&port->rport_work);
475 443
@@ -488,7 +456,7 @@ void zfcp_fc_link_test_work(struct work_struct *work)
488 zfcp_erp_port_forced_reopen(port, 0, "fcltwk1", NULL); 456 zfcp_erp_port_forced_reopen(port, 0, "fcltwk1", NULL);
489 457
490out: 458out:
491 zfcp_port_put(port); 459 put_device(&port->dev);
492} 460}
493 461
494/** 462/**
@@ -501,12 +469,12 @@ out:
501 */ 469 */
502void zfcp_fc_test_link(struct zfcp_port *port) 470void zfcp_fc_test_link(struct zfcp_port *port)
503{ 471{
504 zfcp_port_get(port); 472 get_device(&port->dev);
505 if (!queue_work(port->adapter->work_queue, &port->test_link_work)) 473 if (!queue_work(port->adapter->work_queue, &port->test_link_work))
506 zfcp_port_put(port); 474 put_device(&port->dev);
507} 475}
508 476
509static void zfcp_free_sg_env(struct zfcp_gpn_ft *gpn_ft, int buf_num) 477static void zfcp_free_sg_env(struct zfcp_fc_gpn_ft *gpn_ft, int buf_num)
510{ 478{
511 struct scatterlist *sg = &gpn_ft->sg_req; 479 struct scatterlist *sg = &gpn_ft->sg_req;
512 480
@@ -516,10 +484,10 @@ static void zfcp_free_sg_env(struct zfcp_gpn_ft *gpn_ft, int buf_num)
516 kfree(gpn_ft); 484 kfree(gpn_ft);
517} 485}
518 486
519static struct zfcp_gpn_ft *zfcp_alloc_sg_env(int buf_num) 487static struct zfcp_fc_gpn_ft *zfcp_alloc_sg_env(int buf_num)
520{ 488{
521 struct zfcp_gpn_ft *gpn_ft; 489 struct zfcp_fc_gpn_ft *gpn_ft;
522 struct ct_iu_gpn_ft_req *req; 490 struct zfcp_fc_gpn_ft_req *req;
523 491
524 gpn_ft = kzalloc(sizeof(*gpn_ft), GFP_KERNEL); 492 gpn_ft = kzalloc(sizeof(*gpn_ft), GFP_KERNEL);
525 if (!gpn_ft) 493 if (!gpn_ft)
@@ -542,159 +510,152 @@ out:
542} 510}
543 511
544 512
545static int zfcp_fc_send_gpn_ft(struct zfcp_gpn_ft *gpn_ft, 513static int zfcp_fc_send_gpn_ft(struct zfcp_fc_gpn_ft *gpn_ft,
546 struct zfcp_adapter *adapter, int max_bytes) 514 struct zfcp_adapter *adapter, int max_bytes)
547{ 515{
548 struct zfcp_send_ct *ct = &gpn_ft->ct; 516 struct zfcp_fsf_ct_els *ct = &gpn_ft->ct;
549 struct ct_iu_gpn_ft_req *req = sg_virt(&gpn_ft->sg_req); 517 struct zfcp_fc_gpn_ft_req *req = sg_virt(&gpn_ft->sg_req);
550 struct zfcp_fc_ns_handler_data compl_rec; 518 DECLARE_COMPLETION_ONSTACK(completion);
551 int ret; 519 int ret;
552 520
553 /* prepare CT IU for GPN_FT */ 521 /* prepare CT IU for GPN_FT */
554 req->header.revision = ZFCP_CT_REVISION; 522 req->ct_hdr.ct_rev = FC_CT_REV;
555 req->header.gs_type = ZFCP_CT_DIRECTORY_SERVICE; 523 req->ct_hdr.ct_fs_type = FC_FST_DIR;
556 req->header.gs_subtype = ZFCP_CT_NAME_SERVER; 524 req->ct_hdr.ct_fs_subtype = FC_NS_SUBTYPE;
557 req->header.options = ZFCP_CT_SYNCHRONOUS; 525 req->ct_hdr.ct_options = 0;
558 req->header.cmd_rsp_code = ZFCP_CT_GPN_FT; 526 req->ct_hdr.ct_cmd = FC_NS_GPN_FT;
559 req->header.max_res_size = max_bytes / 4; 527 req->ct_hdr.ct_mr_size = max_bytes / 4;
560 req->flags = 0; 528 req->gpn_ft.fn_domain_id_scope = 0;
561 req->domain_id_scope = 0; 529 req->gpn_ft.fn_area_id_scope = 0;
562 req->area_id_scope = 0; 530 req->gpn_ft.fn_fc4_type = FC_TYPE_FCP;
563 req->fc4_type = ZFCP_CT_SCSI_FCP;
564 531
565 /* prepare zfcp_send_ct */ 532 /* prepare zfcp_send_ct */
566 ct->wka_port = &adapter->gs->ds; 533 ct->handler = zfcp_fc_complete;
567 ct->handler = zfcp_fc_ns_handler; 534 ct->handler_data = &completion;
568 ct->handler_data = (unsigned long)&compl_rec;
569 ct->req = &gpn_ft->sg_req; 535 ct->req = &gpn_ft->sg_req;
570 ct->resp = gpn_ft->sg_resp; 536 ct->resp = gpn_ft->sg_resp;
571 537
572 init_completion(&compl_rec.done); 538 ret = zfcp_fsf_send_ct(&adapter->gs->ds, ct, NULL,
573 compl_rec.handler = NULL; 539 ZFCP_FC_CTELS_TMO);
574 ret = zfcp_fsf_send_ct(ct, NULL);
575 if (!ret) 540 if (!ret)
576 wait_for_completion(&compl_rec.done); 541 wait_for_completion(&completion);
577 return ret; 542 return ret;
578} 543}
579 544
580static void zfcp_fc_validate_port(struct zfcp_port *port) 545static void zfcp_fc_validate_port(struct zfcp_port *port, struct list_head *lh)
581{ 546{
582 struct zfcp_adapter *adapter = port->adapter;
583
584 if (!(atomic_read(&port->status) & ZFCP_STATUS_COMMON_NOESC)) 547 if (!(atomic_read(&port->status) & ZFCP_STATUS_COMMON_NOESC))
585 return; 548 return;
586 549
587 atomic_clear_mask(ZFCP_STATUS_COMMON_NOESC, &port->status); 550 atomic_clear_mask(ZFCP_STATUS_COMMON_NOESC, &port->status);
588 551
589 if ((port->supported_classes != 0) || 552 if ((port->supported_classes != 0) ||
590 !list_empty(&port->unit_list_head)) { 553 !list_empty(&port->unit_list))
591 zfcp_port_put(port);
592 return; 554 return;
593 } 555
594 zfcp_erp_port_shutdown(port, 0, "fcpval1", NULL); 556 list_move_tail(&port->list, lh);
595 zfcp_erp_wait(adapter);
596 zfcp_port_put(port);
597 zfcp_port_dequeue(port);
598} 557}
599 558
600static int zfcp_fc_eval_gpn_ft(struct zfcp_gpn_ft *gpn_ft, int max_entries) 559static int zfcp_fc_eval_gpn_ft(struct zfcp_fc_gpn_ft *gpn_ft,
560 struct zfcp_adapter *adapter, int max_entries)
601{ 561{
602 struct zfcp_send_ct *ct = &gpn_ft->ct; 562 struct zfcp_fsf_ct_els *ct = &gpn_ft->ct;
603 struct scatterlist *sg = gpn_ft->sg_resp; 563 struct scatterlist *sg = gpn_ft->sg_resp;
604 struct ct_hdr *hdr = sg_virt(sg); 564 struct fc_ct_hdr *hdr = sg_virt(sg);
605 struct gpn_ft_resp_acc *acc = sg_virt(sg); 565 struct fc_gpn_ft_resp *acc = sg_virt(sg);
606 struct zfcp_adapter *adapter = ct->wka_port->adapter;
607 struct zfcp_port *port, *tmp; 566 struct zfcp_port *port, *tmp;
567 unsigned long flags;
568 LIST_HEAD(remove_lh);
608 u32 d_id; 569 u32 d_id;
609 int ret = 0, x, last = 0; 570 int ret = 0, x, last = 0;
610 571
611 if (ct->status) 572 if (ct->status)
612 return -EIO; 573 return -EIO;
613 574
614 if (hdr->cmd_rsp_code != ZFCP_CT_ACCEPT) { 575 if (hdr->ct_cmd != FC_FS_ACC) {
615 if (hdr->reason_code == ZFCP_CT_UNABLE_TO_PERFORM_CMD) 576 if (hdr->ct_reason == FC_BA_RJT_UNABLE)
616 return -EAGAIN; /* might be a temporary condition */ 577 return -EAGAIN; /* might be a temporary condition */
617 return -EIO; 578 return -EIO;
618 } 579 }
619 580
620 if (hdr->max_res_size) { 581 if (hdr->ct_mr_size) {
621 dev_warn(&adapter->ccw_device->dev, 582 dev_warn(&adapter->ccw_device->dev,
622 "The name server reported %d words residual data\n", 583 "The name server reported %d words residual data\n",
623 hdr->max_res_size); 584 hdr->ct_mr_size);
624 return -E2BIG; 585 return -E2BIG;
625 } 586 }
626 587
627 mutex_lock(&zfcp_data.config_mutex);
628
629 /* first entry is the header */ 588 /* first entry is the header */
630 for (x = 1; x < max_entries && !last; x++) { 589 for (x = 1; x < max_entries && !last; x++) {
631 if (x % (ZFCP_GPN_FT_ENTRIES + 1)) 590 if (x % (ZFCP_FC_GPN_FT_ENT_PAGE + 1))
632 acc++; 591 acc++;
633 else 592 else
634 acc = sg_virt(++sg); 593 acc = sg_virt(++sg);
635 594
636 last = acc->control & 0x80; 595 last = acc->fp_flags & FC_NS_FID_LAST;
637 d_id = acc->port_id[0] << 16 | acc->port_id[1] << 8 | 596 d_id = ntoh24(acc->fp_fid);
638 acc->port_id[2];
639 597
640 /* don't attach ports with a well known address */ 598 /* don't attach ports with a well known address */
641 if ((d_id & ZFCP_DID_WKA) == ZFCP_DID_WKA) 599 if (d_id >= FC_FID_WELL_KNOWN_BASE)
642 continue; 600 continue;
643 /* skip the adapter's port and known remote ports */ 601 /* skip the adapter's port and known remote ports */
644 if (acc->wwpn == fc_host_port_name(adapter->scsi_host)) 602 if (acc->fp_wwpn == fc_host_port_name(adapter->scsi_host))
645 continue;
646 port = zfcp_get_port_by_wwpn(adapter, acc->wwpn);
647 if (port)
648 continue; 603 continue;
649 604
650 port = zfcp_port_enqueue(adapter, acc->wwpn, 605 port = zfcp_port_enqueue(adapter, acc->fp_wwpn,
651 ZFCP_STATUS_COMMON_NOESC, d_id); 606 ZFCP_STATUS_COMMON_NOESC, d_id);
652 if (IS_ERR(port)) 607 if (!IS_ERR(port))
653 ret = PTR_ERR(port);
654 else
655 zfcp_erp_port_reopen(port, 0, "fcegpf1", NULL); 608 zfcp_erp_port_reopen(port, 0, "fcegpf1", NULL);
609 else if (PTR_ERR(port) != -EEXIST)
610 ret = PTR_ERR(port);
656 } 611 }
657 612
658 zfcp_erp_wait(adapter); 613 zfcp_erp_wait(adapter);
659 list_for_each_entry_safe(port, tmp, &adapter->port_list_head, list) 614 write_lock_irqsave(&adapter->port_list_lock, flags);
660 zfcp_fc_validate_port(port); 615 list_for_each_entry_safe(port, tmp, &adapter->port_list, list)
661 mutex_unlock(&zfcp_data.config_mutex); 616 zfcp_fc_validate_port(port, &remove_lh);
617 write_unlock_irqrestore(&adapter->port_list_lock, flags);
618
619 list_for_each_entry_safe(port, tmp, &remove_lh, list) {
620 zfcp_erp_port_shutdown(port, 0, "fcegpf2", NULL);
621 zfcp_device_unregister(&port->dev, &zfcp_sysfs_port_attrs);
622 }
623
662 return ret; 624 return ret;
663} 625}
664 626
665/** 627/**
666 * zfcp_fc_scan_ports - scan remote ports and attach new ports 628 * zfcp_fc_scan_ports - scan remote ports and attach new ports
667 * @adapter: pointer to struct zfcp_adapter 629 * @work: reference to scheduled work
668 */ 630 */
669int zfcp_fc_scan_ports(struct zfcp_adapter *adapter) 631void zfcp_fc_scan_ports(struct work_struct *work)
670{ 632{
633 struct zfcp_adapter *adapter = container_of(work, struct zfcp_adapter,
634 scan_work);
671 int ret, i; 635 int ret, i;
672 struct zfcp_gpn_ft *gpn_ft; 636 struct zfcp_fc_gpn_ft *gpn_ft;
673 int chain, max_entries, buf_num, max_bytes; 637 int chain, max_entries, buf_num, max_bytes;
674 638
675 chain = adapter->adapter_features & FSF_FEATURE_ELS_CT_CHAINED_SBALS; 639 chain = adapter->adapter_features & FSF_FEATURE_ELS_CT_CHAINED_SBALS;
676 buf_num = chain ? ZFCP_GPN_FT_BUFFERS : 1; 640 buf_num = chain ? ZFCP_FC_GPN_FT_NUM_BUFS : 1;
677 max_entries = chain ? ZFCP_GPN_FT_MAX_ENTRIES : ZFCP_GPN_FT_ENTRIES; 641 max_entries = chain ? ZFCP_FC_GPN_FT_MAX_ENT : ZFCP_FC_GPN_FT_ENT_PAGE;
678 max_bytes = chain ? ZFCP_GPN_FT_MAX_SIZE : ZFCP_CT_SIZE_ONE_PAGE; 642 max_bytes = chain ? ZFCP_FC_GPN_FT_MAX_SIZE : ZFCP_FC_CT_SIZE_PAGE;
679 643
680 if (fc_host_port_type(adapter->scsi_host) != FC_PORTTYPE_NPORT && 644 if (fc_host_port_type(adapter->scsi_host) != FC_PORTTYPE_NPORT &&
681 fc_host_port_type(adapter->scsi_host) != FC_PORTTYPE_NPIV) 645 fc_host_port_type(adapter->scsi_host) != FC_PORTTYPE_NPIV)
682 return 0; 646 return;
683 647
684 ret = zfcp_fc_wka_port_get(&adapter->gs->ds); 648 if (zfcp_fc_wka_port_get(&adapter->gs->ds))
685 if (ret) 649 return;
686 return ret;
687 650
688 gpn_ft = zfcp_alloc_sg_env(buf_num); 651 gpn_ft = zfcp_alloc_sg_env(buf_num);
689 if (!gpn_ft) { 652 if (!gpn_ft)
690 ret = -ENOMEM;
691 goto out; 653 goto out;
692 }
693 654
694 for (i = 0; i < 3; i++) { 655 for (i = 0; i < 3; i++) {
695 ret = zfcp_fc_send_gpn_ft(gpn_ft, adapter, max_bytes); 656 ret = zfcp_fc_send_gpn_ft(gpn_ft, adapter, max_bytes);
696 if (!ret) { 657 if (!ret) {
697 ret = zfcp_fc_eval_gpn_ft(gpn_ft, max_entries); 658 ret = zfcp_fc_eval_gpn_ft(gpn_ft, adapter, max_entries);
698 if (ret == -EAGAIN) 659 if (ret == -EAGAIN)
699 ssleep(1); 660 ssleep(1);
700 else 661 else
@@ -704,174 +665,142 @@ int zfcp_fc_scan_ports(struct zfcp_adapter *adapter)
704 zfcp_free_sg_env(gpn_ft, buf_num); 665 zfcp_free_sg_env(gpn_ft, buf_num);
705out: 666out:
706 zfcp_fc_wka_port_put(&adapter->gs->ds); 667 zfcp_fc_wka_port_put(&adapter->gs->ds);
707 return ret;
708} 668}
709 669
710 670static void zfcp_fc_ct_els_job_handler(void *data)
711void _zfcp_fc_scan_ports_later(struct work_struct *work)
712{ 671{
713 zfcp_fc_scan_ports(container_of(work, struct zfcp_adapter, scan_work)); 672 struct fc_bsg_job *job = data;
714} 673 struct zfcp_fsf_ct_els *zfcp_ct_els = job->dd_data;
674 struct fc_bsg_reply *jr = job->reply;
715 675
716struct zfcp_els_fc_job { 676 jr->reply_payload_rcv_len = job->reply_payload.payload_len;
717 struct zfcp_send_els els; 677 jr->reply_data.ctels_reply.status = FC_CTELS_STATUS_OK;
718 struct fc_bsg_job *job; 678 jr->result = zfcp_ct_els->status ? -EIO : 0;
719}; 679 job->job_done(job);
680}
720 681
721static void zfcp_fc_generic_els_handler(unsigned long data) 682static struct zfcp_fc_wka_port *zfcp_fc_job_wka_port(struct fc_bsg_job *job)
722{ 683{
723 struct zfcp_els_fc_job *els_fc_job = (struct zfcp_els_fc_job *) data; 684 u32 preamble_word1;
724 struct fc_bsg_job *job = els_fc_job->job; 685 u8 gs_type;
725 struct fc_bsg_reply *reply = job->reply; 686 struct zfcp_adapter *adapter;
726 687
727 if (els_fc_job->els.status) { 688 preamble_word1 = job->request->rqst_data.r_ct.preamble_word1;
728 /* request rejected or timed out */ 689 gs_type = (preamble_word1 & 0xff000000) >> 24;
729 reply->reply_data.ctels_reply.status = FC_CTELS_STATUS_REJECT;
730 goto out;
731 }
732 690
733 reply->reply_data.ctels_reply.status = FC_CTELS_STATUS_OK; 691 adapter = (struct zfcp_adapter *) job->shost->hostdata[0];
734 reply->reply_payload_rcv_len = job->reply_payload.payload_len;
735 692
736out: 693 switch (gs_type) {
737 job->state_flags = FC_RQST_STATE_DONE; 694 case FC_FST_ALIAS:
738 job->job_done(job); 695 return &adapter->gs->as;
739 kfree(els_fc_job); 696 case FC_FST_MGMT:
697 return &adapter->gs->ms;
698 case FC_FST_TIME:
699 return &adapter->gs->ts;
700 break;
701 case FC_FST_DIR:
702 return &adapter->gs->ds;
703 break;
704 default:
705 return NULL;
706 }
740} 707}
741 708
742int zfcp_fc_execute_els_fc_job(struct fc_bsg_job *job) 709static void zfcp_fc_ct_job_handler(void *data)
743{ 710{
744 struct zfcp_els_fc_job *els_fc_job; 711 struct fc_bsg_job *job = data;
745 struct fc_rport *rport = job->rport; 712 struct zfcp_fc_wka_port *wka_port;
746 struct Scsi_Host *shost;
747 struct zfcp_adapter *adapter;
748 struct zfcp_port *port;
749 u8 *port_did;
750 713
751 shost = rport ? rport_to_shost(rport) : job->shost; 714 wka_port = zfcp_fc_job_wka_port(job);
752 adapter = (struct zfcp_adapter *)shost->hostdata[0]; 715 zfcp_fc_wka_port_put(wka_port);
753 716
754 if (!(atomic_read(&adapter->status) & ZFCP_STATUS_COMMON_OPEN)) 717 zfcp_fc_ct_els_job_handler(data);
755 return -EINVAL; 718}
756 719
757 els_fc_job = kzalloc(sizeof(struct zfcp_els_fc_job), GFP_KERNEL); 720static int zfcp_fc_exec_els_job(struct fc_bsg_job *job,
758 if (!els_fc_job) 721 struct zfcp_adapter *adapter)
759 return -ENOMEM; 722{
723 struct zfcp_fsf_ct_els *els = job->dd_data;
724 struct fc_rport *rport = job->rport;
725 struct zfcp_port *port;
726 u32 d_id;
760 727
761 els_fc_job->els.adapter = adapter;
762 if (rport) { 728 if (rport) {
763 read_lock_irq(&zfcp_data.config_lock);
764 port = zfcp_get_port_by_wwpn(adapter, rport->port_name); 729 port = zfcp_get_port_by_wwpn(adapter, rport->port_name);
765 if (port) 730 if (!port)
766 els_fc_job->els.d_id = port->d_id;
767 read_unlock_irq(&zfcp_data.config_lock);
768 if (!port) {
769 kfree(els_fc_job);
770 return -EINVAL; 731 return -EINVAL;
771 }
772 } else {
773 port_did = job->request->rqst_data.h_els.port_id;
774 els_fc_job->els.d_id = (port_did[0] << 16) +
775 (port_did[1] << 8) + port_did[2];
776 }
777 732
778 els_fc_job->els.req = job->request_payload.sg_list; 733 d_id = port->d_id;
779 els_fc_job->els.resp = job->reply_payload.sg_list; 734 put_device(&port->dev);
780 els_fc_job->els.handler = zfcp_fc_generic_els_handler; 735 } else
781 els_fc_job->els.handler_data = (unsigned long) els_fc_job; 736 d_id = ntoh24(job->request->rqst_data.h_els.port_id);
782 els_fc_job->job = job;
783 737
784 return zfcp_fsf_send_els(&els_fc_job->els); 738 els->handler = zfcp_fc_ct_els_job_handler;
739 return zfcp_fsf_send_els(adapter, d_id, els, job->req->timeout / HZ);
785} 740}
786 741
787struct zfcp_ct_fc_job { 742static int zfcp_fc_exec_ct_job(struct fc_bsg_job *job,
788 struct zfcp_send_ct ct; 743 struct zfcp_adapter *adapter)
789 struct fc_bsg_job *job;
790};
791
792static void zfcp_fc_generic_ct_handler(unsigned long data)
793{ 744{
794 struct zfcp_ct_fc_job *ct_fc_job = (struct zfcp_ct_fc_job *) data; 745 int ret;
795 struct fc_bsg_job *job = ct_fc_job->job; 746 struct zfcp_fsf_ct_els *ct = job->dd_data;
747 struct zfcp_fc_wka_port *wka_port;
796 748
797 job->reply->reply_data.ctels_reply.status = ct_fc_job->ct.status ? 749 wka_port = zfcp_fc_job_wka_port(job);
798 FC_CTELS_STATUS_REJECT : FC_CTELS_STATUS_OK; 750 if (!wka_port)
799 job->reply->reply_payload_rcv_len = job->reply_payload.payload_len; 751 return -EINVAL;
800 job->state_flags = FC_RQST_STATE_DONE;
801 job->job_done(job);
802 752
803 zfcp_fc_wka_port_put(ct_fc_job->ct.wka_port); 753 ret = zfcp_fc_wka_port_get(wka_port);
754 if (ret)
755 return ret;
804 756
805 kfree(ct_fc_job); 757 ct->handler = zfcp_fc_ct_job_handler;
758 ret = zfcp_fsf_send_ct(wka_port, ct, NULL, job->req->timeout / HZ);
759 if (ret)
760 zfcp_fc_wka_port_put(wka_port);
761
762 return ret;
806} 763}
807 764
808int zfcp_fc_execute_ct_fc_job(struct fc_bsg_job *job) 765int zfcp_fc_exec_bsg_job(struct fc_bsg_job *job)
809{ 766{
810 int ret;
811 u8 gs_type;
812 struct fc_rport *rport = job->rport;
813 struct Scsi_Host *shost; 767 struct Scsi_Host *shost;
814 struct zfcp_adapter *adapter; 768 struct zfcp_adapter *adapter;
815 struct zfcp_ct_fc_job *ct_fc_job; 769 struct zfcp_fsf_ct_els *ct_els = job->dd_data;
816 u32 preamble_word1;
817
818 shost = rport ? rport_to_shost(rport) : job->shost;
819 770
771 shost = job->rport ? rport_to_shost(job->rport) : job->shost;
820 adapter = (struct zfcp_adapter *)shost->hostdata[0]; 772 adapter = (struct zfcp_adapter *)shost->hostdata[0];
773
821 if (!(atomic_read(&adapter->status) & ZFCP_STATUS_COMMON_OPEN)) 774 if (!(atomic_read(&adapter->status) & ZFCP_STATUS_COMMON_OPEN))
822 return -EINVAL; 775 return -EINVAL;
823 776
824 ct_fc_job = kzalloc(sizeof(struct zfcp_ct_fc_job), GFP_KERNEL); 777 ct_els->req = job->request_payload.sg_list;
825 if (!ct_fc_job) 778 ct_els->resp = job->reply_payload.sg_list;
826 return -ENOMEM; 779 ct_els->handler_data = job;
827 780
828 preamble_word1 = job->request->rqst_data.r_ct.preamble_word1; 781 switch (job->request->msgcode) {
829 gs_type = (preamble_word1 & 0xff000000) >> 24; 782 case FC_BSG_RPT_ELS:
830 783 case FC_BSG_HST_ELS_NOLOGIN:
831 switch (gs_type) { 784 return zfcp_fc_exec_els_job(job, adapter);
832 case FC_FST_ALIAS: 785 case FC_BSG_RPT_CT:
833 ct_fc_job->ct.wka_port = &adapter->gs->as; 786 case FC_BSG_HST_CT:
834 break; 787 return zfcp_fc_exec_ct_job(job, adapter);
835 case FC_FST_MGMT:
836 ct_fc_job->ct.wka_port = &adapter->gs->ms;
837 break;
838 case FC_FST_TIME:
839 ct_fc_job->ct.wka_port = &adapter->gs->ts;
840 break;
841 case FC_FST_DIR:
842 ct_fc_job->ct.wka_port = &adapter->gs->ds;
843 break;
844 default: 788 default:
845 kfree(ct_fc_job); 789 return -EINVAL;
846 return -EINVAL; /* no such service */
847 }
848
849 ret = zfcp_fc_wka_port_get(ct_fc_job->ct.wka_port);
850 if (ret) {
851 kfree(ct_fc_job);
852 return ret;
853 } 790 }
791}
854 792
855 ct_fc_job->ct.req = job->request_payload.sg_list; 793int zfcp_fc_timeout_bsg_job(struct fc_bsg_job *job)
856 ct_fc_job->ct.resp = job->reply_payload.sg_list; 794{
857 ct_fc_job->ct.handler = zfcp_fc_generic_ct_handler; 795 /* hardware tracks timeout, reset bsg timeout to not interfere */
858 ct_fc_job->ct.handler_data = (unsigned long) ct_fc_job; 796 return -EAGAIN;
859 ct_fc_job->ct.completion = NULL;
860 ct_fc_job->job = job;
861
862 ret = zfcp_fsf_send_ct(&ct_fc_job->ct, NULL);
863 if (ret) {
864 kfree(ct_fc_job);
865 zfcp_fc_wka_port_put(ct_fc_job->ct.wka_port);
866 }
867 return ret;
868} 797}
869 798
870int zfcp_fc_gs_setup(struct zfcp_adapter *adapter) 799int zfcp_fc_gs_setup(struct zfcp_adapter *adapter)
871{ 800{
872 struct zfcp_wka_ports *wka_ports; 801 struct zfcp_fc_wka_ports *wka_ports;
873 802
874 wka_ports = kzalloc(sizeof(struct zfcp_wka_ports), GFP_KERNEL); 803 wka_ports = kzalloc(sizeof(struct zfcp_fc_wka_ports), GFP_KERNEL);
875 if (!wka_ports) 804 if (!wka_ports)
876 return -ENOMEM; 805 return -ENOMEM;
877 806
@@ -880,7 +809,6 @@ int zfcp_fc_gs_setup(struct zfcp_adapter *adapter)
880 zfcp_fc_wka_port_init(&wka_ports->ts, FC_FID_TIME_SERV, adapter); 809 zfcp_fc_wka_port_init(&wka_ports->ts, FC_FID_TIME_SERV, adapter);
881 zfcp_fc_wka_port_init(&wka_ports->ds, FC_FID_DIR_SERV, adapter); 810 zfcp_fc_wka_port_init(&wka_ports->ds, FC_FID_DIR_SERV, adapter);
882 zfcp_fc_wka_port_init(&wka_ports->as, FC_FID_ALIASES, adapter); 811 zfcp_fc_wka_port_init(&wka_ports->as, FC_FID_ALIASES, adapter);
883 zfcp_fc_wka_port_init(&wka_ports->ks, FC_FID_SEC_KEY, adapter);
884 812
885 return 0; 813 return 0;
886} 814}
diff --git a/drivers/s390/scsi/zfcp_fc.h b/drivers/s390/scsi/zfcp_fc.h
new file mode 100644
index 000000000000..0747b087390d
--- /dev/null
+++ b/drivers/s390/scsi/zfcp_fc.h
@@ -0,0 +1,262 @@
1/*
2 * zfcp device driver
3 *
4 * Fibre Channel related definitions and inline functions for the zfcp
5 * device driver
6 *
7 * Copyright IBM Corporation 2009
8 */
9
10#ifndef ZFCP_FC_H
11#define ZFCP_FC_H
12
13#include <scsi/fc/fc_els.h>
14#include <scsi/fc/fc_fcp.h>
15#include <scsi/fc/fc_ns.h>
16#include <scsi/scsi_cmnd.h>
17#include <scsi/scsi_tcq.h>
18#include "zfcp_fsf.h"
19
20#define ZFCP_FC_CT_SIZE_PAGE (PAGE_SIZE - sizeof(struct fc_ct_hdr))
21#define ZFCP_FC_GPN_FT_ENT_PAGE (ZFCP_FC_CT_SIZE_PAGE \
22 / sizeof(struct fc_gpn_ft_resp))
23#define ZFCP_FC_GPN_FT_NUM_BUFS 4 /* memory pages */
24
25#define ZFCP_FC_GPN_FT_MAX_SIZE (ZFCP_FC_GPN_FT_NUM_BUFS * PAGE_SIZE \
26 - sizeof(struct fc_ct_hdr))
27#define ZFCP_FC_GPN_FT_MAX_ENT (ZFCP_FC_GPN_FT_NUM_BUFS * \
28 (ZFCP_FC_GPN_FT_ENT_PAGE + 1))
29
30#define ZFCP_FC_CTELS_TMO (2 * FC_DEF_R_A_TOV / 1000)
31
32/**
33 * struct zfcp_fc_gid_pn_req - container for ct header plus gid_pn request
34 * @ct_hdr: FC GS common transport header
35 * @gid_pn: GID_PN request
36 */
37struct zfcp_fc_gid_pn_req {
38 struct fc_ct_hdr ct_hdr;
39 struct fc_ns_gid_pn gid_pn;
40} __packed;
41
42/**
43 * struct zfcp_fc_gid_pn_resp - container for ct header plus gid_pn response
44 * @ct_hdr: FC GS common transport header
45 * @gid_pn: GID_PN response
46 */
47struct zfcp_fc_gid_pn_resp {
48 struct fc_ct_hdr ct_hdr;
49 struct fc_gid_pn_resp gid_pn;
50} __packed;
51
52/**
53 * struct zfcp_fc_gid_pn - everything required in zfcp for gid_pn request
54 * @ct: data passed to zfcp_fsf for issuing fsf request
55 * @sg_req: scatterlist entry for request data
56 * @sg_resp: scatterlist entry for response data
57 * @gid_pn_req: GID_PN request data
58 * @gid_pn_resp: GID_PN response data
59 */
60struct zfcp_fc_gid_pn {
61 struct zfcp_fsf_ct_els ct;
62 struct scatterlist sg_req;
63 struct scatterlist sg_resp;
64 struct zfcp_fc_gid_pn_req gid_pn_req;
65 struct zfcp_fc_gid_pn_resp gid_pn_resp;
66 struct zfcp_port *port;
67};
68
69/**
70 * struct zfcp_fc_gpn_ft - container for ct header plus gpn_ft request
71 * @ct_hdr: FC GS common transport header
72 * @gpn_ft: GPN_FT request
73 */
74struct zfcp_fc_gpn_ft_req {
75 struct fc_ct_hdr ct_hdr;
76 struct fc_ns_gid_ft gpn_ft;
77} __packed;
78
79/**
80 * struct zfcp_fc_gpn_ft_resp - container for ct header plus gpn_ft response
81 * @ct_hdr: FC GS common transport header
82 * @gpn_ft: Array of gpn_ft response data to fill one memory page
83 */
84struct zfcp_fc_gpn_ft_resp {
85 struct fc_ct_hdr ct_hdr;
86 struct fc_gpn_ft_resp gpn_ft[ZFCP_FC_GPN_FT_ENT_PAGE];
87} __packed;
88
89/**
90 * struct zfcp_fc_gpn_ft - zfcp data for gpn_ft request
91 * @ct: data passed to zfcp_fsf for issuing fsf request
92 * @sg_req: scatter list entry for gpn_ft request
93 * @sg_resp: scatter list entries for gpn_ft responses (per memory page)
94 */
95struct zfcp_fc_gpn_ft {
96 struct zfcp_fsf_ct_els ct;
97 struct scatterlist sg_req;
98 struct scatterlist sg_resp[ZFCP_FC_GPN_FT_NUM_BUFS];
99};
100
101/**
102 * struct zfcp_fc_els_adisc - everything required in zfcp for issuing ELS ADISC
103 * @els: data required for issuing els fsf command
104 * @req: scatterlist entry for ELS ADISC request
105 * @resp: scatterlist entry for ELS ADISC response
106 * @adisc_req: ELS ADISC request data
107 * @adisc_resp: ELS ADISC response data
108 */
109struct zfcp_fc_els_adisc {
110 struct zfcp_fsf_ct_els els;
111 struct scatterlist req;
112 struct scatterlist resp;
113 struct fc_els_adisc adisc_req;
114 struct fc_els_adisc adisc_resp;
115};
116
117/**
118 * enum zfcp_fc_wka_status - FC WKA port status in zfcp
119 * @ZFCP_FC_WKA_PORT_OFFLINE: Port is closed and not in use
120 * @ZFCP_FC_WKA_PORT_CLOSING: The FSF "close port" request is pending
121 * @ZFCP_FC_WKA_PORT_OPENING: The FSF "open port" request is pending
122 * @ZFCP_FC_WKA_PORT_ONLINE: The port is open and the port handle is valid
123 */
124enum zfcp_fc_wka_status {
125 ZFCP_FC_WKA_PORT_OFFLINE,
126 ZFCP_FC_WKA_PORT_CLOSING,
127 ZFCP_FC_WKA_PORT_OPENING,
128 ZFCP_FC_WKA_PORT_ONLINE,
129};
130
131/**
132 * struct zfcp_fc_wka_port - representation of well-known-address (WKA) FC port
133 * @adapter: Pointer to adapter structure this WKA port belongs to
134 * @completion_wq: Wait for completion of open/close command
135 * @status: Current status of WKA port
136 * @refcount: Reference count to keep port open as long as it is in use
137 * @d_id: FC destination id or well-known-address
138 * @handle: FSF handle for the open WKA port
139 * @mutex: Mutex used during opening/closing state changes
140 * @work: For delaying the closing of the WKA port
141 */
142struct zfcp_fc_wka_port {
143 struct zfcp_adapter *adapter;
144 wait_queue_head_t completion_wq;
145 enum zfcp_fc_wka_status status;
146 atomic_t refcount;
147 u32 d_id;
148 u32 handle;
149 struct mutex mutex;
150 struct delayed_work work;
151};
152
153/**
154 * struct zfcp_fc_wka_ports - Data structures for FC generic services
155 * @ms: FC Management service
156 * @ts: FC time service
157 * @ds: FC directory service
158 * @as: FC alias service
159 */
160struct zfcp_fc_wka_ports {
161 struct zfcp_fc_wka_port ms;
162 struct zfcp_fc_wka_port ts;
163 struct zfcp_fc_wka_port ds;
164 struct zfcp_fc_wka_port as;
165};
166
167/**
168 * zfcp_fc_scsi_to_fcp - setup FCP command with data from scsi_cmnd
169 * @fcp: fcp_cmnd to setup
170 * @scsi: scsi_cmnd where to get LUN, task attributes/flags and CDB
171 */
172static inline
173void zfcp_fc_scsi_to_fcp(struct fcp_cmnd *fcp, struct scsi_cmnd *scsi)
174{
175 char tag[2];
176
177 int_to_scsilun(scsi->device->lun, (struct scsi_lun *) &fcp->fc_lun);
178
179 if (scsi_populate_tag_msg(scsi, tag)) {
180 switch (tag[0]) {
181 case MSG_ORDERED_TAG:
182 fcp->fc_pri_ta |= FCP_PTA_ORDERED;
183 break;
184 case MSG_SIMPLE_TAG:
185 fcp->fc_pri_ta |= FCP_PTA_SIMPLE;
186 break;
187 };
188 } else
189 fcp->fc_pri_ta = FCP_PTA_SIMPLE;
190
191 if (scsi->sc_data_direction == DMA_FROM_DEVICE)
192 fcp->fc_flags |= FCP_CFL_RDDATA;
193 if (scsi->sc_data_direction == DMA_TO_DEVICE)
194 fcp->fc_flags |= FCP_CFL_WRDATA;
195
196 memcpy(fcp->fc_cdb, scsi->cmnd, scsi->cmd_len);
197
198 fcp->fc_dl = scsi_bufflen(scsi);
199}
200
201/**
202 * zfcp_fc_fcp_tm - setup FCP command as task management command
203 * @fcp: fcp_cmnd to setup
204 * @dev: scsi_device where to send the task management command
205 * @tm: task management flags to setup tm command
206 */
207static inline
208void zfcp_fc_fcp_tm(struct fcp_cmnd *fcp, struct scsi_device *dev, u8 tm_flags)
209{
210 int_to_scsilun(dev->lun, (struct scsi_lun *) &fcp->fc_lun);
211 fcp->fc_tm_flags |= tm_flags;
212}
213
214/**
215 * zfcp_fc_evap_fcp_rsp - evaluate FCP RSP IU and update scsi_cmnd accordingly
216 * @fcp_rsp: FCP RSP IU to evaluate
217 * @scsi: SCSI command where to update status and sense buffer
218 */
219static inline
220void zfcp_fc_eval_fcp_rsp(struct fcp_resp_with_ext *fcp_rsp,
221 struct scsi_cmnd *scsi)
222{
223 struct fcp_resp_rsp_info *rsp_info;
224 char *sense;
225 u32 sense_len, resid;
226 u8 rsp_flags;
227
228 set_msg_byte(scsi, COMMAND_COMPLETE);
229 scsi->result |= fcp_rsp->resp.fr_status;
230
231 rsp_flags = fcp_rsp->resp.fr_flags;
232
233 if (unlikely(rsp_flags & FCP_RSP_LEN_VAL)) {
234 rsp_info = (struct fcp_resp_rsp_info *) &fcp_rsp[1];
235 if (rsp_info->rsp_code == FCP_TMF_CMPL)
236 set_host_byte(scsi, DID_OK);
237 else {
238 set_host_byte(scsi, DID_ERROR);
239 return;
240 }
241 }
242
243 if (unlikely(rsp_flags & FCP_SNS_LEN_VAL)) {
244 sense = (char *) &fcp_rsp[1];
245 if (rsp_flags & FCP_RSP_LEN_VAL)
246 sense += fcp_rsp->ext.fr_sns_len;
247 sense_len = min(fcp_rsp->ext.fr_sns_len,
248 (u32) SCSI_SENSE_BUFFERSIZE);
249 memcpy(scsi->sense_buffer, sense, sense_len);
250 }
251
252 if (unlikely(rsp_flags & FCP_RESID_UNDER)) {
253 resid = fcp_rsp->ext.fr_resid;
254 scsi_set_resid(scsi, resid);
255 if (scsi_bufflen(scsi) - resid < scsi->underflow &&
256 !(rsp_flags & FCP_SNS_LEN_VAL) &&
257 fcp_rsp->resp.fr_status == SAM_STAT_GOOD)
258 set_host_byte(scsi, DID_ERROR);
259 }
260}
261
262#endif
diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c
index 4e41baa0c141..b3b1d2f79398 100644
--- a/drivers/s390/scsi/zfcp_fsf.c
+++ b/drivers/s390/scsi/zfcp_fsf.c
@@ -3,15 +3,20 @@
3 * 3 *
4 * Implementation of FSF commands. 4 * Implementation of FSF commands.
5 * 5 *
6 * Copyright IBM Corporation 2002, 2009 6 * Copyright IBM Corporation 2002, 2010
7 */ 7 */
8 8
9#define KMSG_COMPONENT "zfcp" 9#define KMSG_COMPONENT "zfcp"
10#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 10#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
11 11
12#include <linux/blktrace_api.h> 12#include <linux/blktrace_api.h>
13#include <linux/slab.h>
14#include <scsi/fc/fc_els.h>
13#include "zfcp_ext.h" 15#include "zfcp_ext.h"
16#include "zfcp_fc.h"
14#include "zfcp_dbf.h" 17#include "zfcp_dbf.h"
18#include "zfcp_qdio.h"
19#include "zfcp_reqlist.h"
15 20
16static void zfcp_fsf_request_timeout_handler(unsigned long data) 21static void zfcp_fsf_request_timeout_handler(unsigned long data)
17{ 22{
@@ -122,36 +127,32 @@ void zfcp_fsf_req_free(struct zfcp_fsf_req *req)
122 127
123static void zfcp_fsf_status_read_port_closed(struct zfcp_fsf_req *req) 128static void zfcp_fsf_status_read_port_closed(struct zfcp_fsf_req *req)
124{ 129{
130 unsigned long flags;
125 struct fsf_status_read_buffer *sr_buf = req->data; 131 struct fsf_status_read_buffer *sr_buf = req->data;
126 struct zfcp_adapter *adapter = req->adapter; 132 struct zfcp_adapter *adapter = req->adapter;
127 struct zfcp_port *port; 133 struct zfcp_port *port;
128 int d_id = sr_buf->d_id & ZFCP_DID_MASK; 134 int d_id = ntoh24(sr_buf->d_id);
129 unsigned long flags;
130 135
131 read_lock_irqsave(&zfcp_data.config_lock, flags); 136 read_lock_irqsave(&adapter->port_list_lock, flags);
132 list_for_each_entry(port, &adapter->port_list_head, list) 137 list_for_each_entry(port, &adapter->port_list, list)
133 if (port->d_id == d_id) { 138 if (port->d_id == d_id) {
134 read_unlock_irqrestore(&zfcp_data.config_lock, flags);
135 zfcp_erp_port_reopen(port, 0, "fssrpc1", req); 139 zfcp_erp_port_reopen(port, 0, "fssrpc1", req);
136 return; 140 break;
137 } 141 }
138 read_unlock_irqrestore(&zfcp_data.config_lock, flags); 142 read_unlock_irqrestore(&adapter->port_list_lock, flags);
139} 143}
140 144
141static void zfcp_fsf_link_down_info_eval(struct zfcp_fsf_req *req, char *id, 145static void zfcp_fsf_link_down_info_eval(struct zfcp_fsf_req *req, char *id,
142 struct fsf_link_down_info *link_down) 146 struct fsf_link_down_info *link_down)
143{ 147{
144 struct zfcp_adapter *adapter = req->adapter; 148 struct zfcp_adapter *adapter = req->adapter;
145 unsigned long flags;
146 149
147 if (atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED) 150 if (atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED)
148 return; 151 return;
149 152
150 atomic_set_mask(ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED, &adapter->status); 153 atomic_set_mask(ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED, &adapter->status);
151 154
152 read_lock_irqsave(&zfcp_data.config_lock, flags);
153 zfcp_scsi_schedule_rports_block(adapter); 155 zfcp_scsi_schedule_rports_block(adapter);
154 read_unlock_irqrestore(&zfcp_data.config_lock, flags);
155 156
156 if (!link_down) 157 if (!link_down)
157 goto out; 158 goto out;
@@ -291,7 +292,7 @@ static void zfcp_fsf_status_read_handler(struct zfcp_fsf_req *req)
291 zfcp_erp_adapter_access_changed(adapter, "fssrh_3", 292 zfcp_erp_adapter_access_changed(adapter, "fssrh_3",
292 req); 293 req);
293 if (sr_buf->status_subtype & FSF_STATUS_READ_SUB_INCOMING_ELS) 294 if (sr_buf->status_subtype & FSF_STATUS_READ_SUB_INCOMING_ELS)
294 schedule_work(&adapter->scan_work); 295 queue_work(adapter->work_queue, &adapter->scan_work);
295 break; 296 break;
296 case FSF_STATUS_READ_CFDC_UPDATED: 297 case FSF_STATUS_READ_CFDC_UPDATED:
297 zfcp_erp_adapter_access_changed(adapter, "fssrh_4", req); 298 zfcp_erp_adapter_access_changed(adapter, "fssrh_4", req);
@@ -317,7 +318,6 @@ static void zfcp_fsf_fsfstatus_qual_eval(struct zfcp_fsf_req *req)
317 case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED: 318 case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
318 return; 319 return;
319 case FSF_SQ_COMMAND_ABORTED: 320 case FSF_SQ_COMMAND_ABORTED:
320 req->status |= ZFCP_STATUS_FSFREQ_ABORTED;
321 break; 321 break;
322 case FSF_SQ_NO_RECOM: 322 case FSF_SQ_NO_RECOM:
323 dev_err(&req->adapter->ccw_device->dev, 323 dev_err(&req->adapter->ccw_device->dev,
@@ -358,8 +358,7 @@ static void zfcp_fsf_protstatus_eval(struct zfcp_fsf_req *req)
358 zfcp_dbf_hba_fsf_response(req); 358 zfcp_dbf_hba_fsf_response(req);
359 359
360 if (req->status & ZFCP_STATUS_FSFREQ_DISMISSED) { 360 if (req->status & ZFCP_STATUS_FSFREQ_DISMISSED) {
361 req->status |= ZFCP_STATUS_FSFREQ_ERROR | 361 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
362 ZFCP_STATUS_FSFREQ_RETRY; /* only for SCSI cmnds. */
363 return; 362 return;
364 } 363 }
365 364
@@ -377,7 +376,7 @@ static void zfcp_fsf_protstatus_eval(struct zfcp_fsf_req *req)
377 case FSF_PROT_ERROR_STATE: 376 case FSF_PROT_ERROR_STATE:
378 case FSF_PROT_SEQ_NUMB_ERROR: 377 case FSF_PROT_SEQ_NUMB_ERROR:
379 zfcp_erp_adapter_reopen(adapter, 0, "fspse_2", req); 378 zfcp_erp_adapter_reopen(adapter, 0, "fspse_2", req);
380 req->status |= ZFCP_STATUS_FSFREQ_RETRY; 379 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
381 break; 380 break;
382 case FSF_PROT_UNSUPP_QTCB_TYPE: 381 case FSF_PROT_UNSUPP_QTCB_TYPE:
383 dev_err(&adapter->ccw_device->dev, 382 dev_err(&adapter->ccw_device->dev,
@@ -397,7 +396,7 @@ static void zfcp_fsf_protstatus_eval(struct zfcp_fsf_req *req)
397 case FSF_PROT_LINK_DOWN: 396 case FSF_PROT_LINK_DOWN:
398 zfcp_fsf_link_down_info_eval(req, "fspse_5", 397 zfcp_fsf_link_down_info_eval(req, "fspse_5",
399 &psq->link_down_info); 398 &psq->link_down_info);
400 /* FIXME: reopening adapter now? better wait for link up */ 399 /* go through reopen to flush pending requests */
401 zfcp_erp_adapter_reopen(adapter, 0, "fspse_6", req); 400 zfcp_erp_adapter_reopen(adapter, 0, "fspse_6", req);
402 break; 401 break;
403 case FSF_PROT_REEST_QUEUE: 402 case FSF_PROT_REEST_QUEUE:
@@ -461,15 +460,10 @@ static void zfcp_fsf_req_complete(struct zfcp_fsf_req *req)
461void zfcp_fsf_req_dismiss_all(struct zfcp_adapter *adapter) 460void zfcp_fsf_req_dismiss_all(struct zfcp_adapter *adapter)
462{ 461{
463 struct zfcp_fsf_req *req, *tmp; 462 struct zfcp_fsf_req *req, *tmp;
464 unsigned long flags;
465 LIST_HEAD(remove_queue); 463 LIST_HEAD(remove_queue);
466 unsigned int i;
467 464
468 BUG_ON(atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP); 465 BUG_ON(atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP);
469 spin_lock_irqsave(&adapter->req_list_lock, flags); 466 zfcp_reqlist_move(adapter->req_list, &remove_queue);
470 for (i = 0; i < REQUEST_LIST_SIZE; i++)
471 list_splice_init(&adapter->req_list[i], &remove_queue);
472 spin_unlock_irqrestore(&adapter->req_list_lock, flags);
473 467
474 list_for_each_entry_safe(req, tmp, &remove_queue, list) { 468 list_for_each_entry_safe(req, tmp, &remove_queue, list) {
475 list_del(&req->list); 469 list_del(&req->list);
@@ -480,18 +474,23 @@ void zfcp_fsf_req_dismiss_all(struct zfcp_adapter *adapter)
480 474
481static int zfcp_fsf_exchange_config_evaluate(struct zfcp_fsf_req *req) 475static int zfcp_fsf_exchange_config_evaluate(struct zfcp_fsf_req *req)
482{ 476{
483 struct fsf_qtcb_bottom_config *bottom; 477 struct fsf_qtcb_bottom_config *bottom = &req->qtcb->bottom.config;
484 struct zfcp_adapter *adapter = req->adapter; 478 struct zfcp_adapter *adapter = req->adapter;
485 struct Scsi_Host *shost = adapter->scsi_host; 479 struct Scsi_Host *shost = adapter->scsi_host;
480 struct fc_els_flogi *nsp, *plogi;
486 481
487 bottom = &req->qtcb->bottom.config; 482 /* adjust pointers for missing command code */
483 nsp = (struct fc_els_flogi *) ((u8 *)&bottom->nport_serv_param
484 - sizeof(u32));
485 plogi = (struct fc_els_flogi *) ((u8 *)&bottom->plogi_payload
486 - sizeof(u32));
488 487
489 if (req->data) 488 if (req->data)
490 memcpy(req->data, bottom, sizeof(*bottom)); 489 memcpy(req->data, bottom, sizeof(*bottom));
491 490
492 fc_host_node_name(shost) = bottom->nport_serv_param.wwnn; 491 fc_host_port_name(shost) = nsp->fl_wwpn;
493 fc_host_port_name(shost) = bottom->nport_serv_param.wwpn; 492 fc_host_node_name(shost) = nsp->fl_wwnn;
494 fc_host_port_id(shost) = bottom->s_id & ZFCP_DID_MASK; 493 fc_host_port_id(shost) = ntoh24(bottom->s_id);
495 fc_host_speed(shost) = bottom->fc_link_speed; 494 fc_host_speed(shost) = bottom->fc_link_speed;
496 fc_host_supported_classes(shost) = FC_COS_CLASS2 | FC_COS_CLASS3; 495 fc_host_supported_classes(shost) = FC_COS_CLASS2 | FC_COS_CLASS3;
497 496
@@ -503,9 +502,9 @@ static int zfcp_fsf_exchange_config_evaluate(struct zfcp_fsf_req *req)
503 502
504 switch (bottom->fc_topology) { 503 switch (bottom->fc_topology) {
505 case FSF_TOPO_P2P: 504 case FSF_TOPO_P2P:
506 adapter->peer_d_id = bottom->peer_d_id & ZFCP_DID_MASK; 505 adapter->peer_d_id = ntoh24(bottom->peer_d_id);
507 adapter->peer_wwpn = bottom->plogi_payload.wwpn; 506 adapter->peer_wwpn = plogi->fl_wwpn;
508 adapter->peer_wwnn = bottom->plogi_payload.wwnn; 507 adapter->peer_wwnn = plogi->fl_wwnn;
509 fc_host_port_type(shost) = FC_PORTTYPE_PTP; 508 fc_host_port_type(shost) = FC_PORTTYPE_PTP;
510 break; 509 break;
511 case FSF_TOPO_FABRIC: 510 case FSF_TOPO_FABRIC:
@@ -616,6 +615,10 @@ static void zfcp_fsf_exchange_port_evaluate(struct zfcp_fsf_req *req)
616 fc_host_permanent_port_name(shost) = fc_host_port_name(shost); 615 fc_host_permanent_port_name(shost) = fc_host_port_name(shost);
617 fc_host_maxframe_size(shost) = bottom->maximum_frame_size; 616 fc_host_maxframe_size(shost) = bottom->maximum_frame_size;
618 fc_host_supported_speeds(shost) = bottom->supported_speed; 617 fc_host_supported_speeds(shost) = bottom->supported_speed;
618 memcpy(fc_host_supported_fc4s(shost), bottom->supported_fc4_types,
619 FC_FC4_LIST_SIZE);
620 memcpy(fc_host_active_fc4s(shost), bottom->active_fc4_types,
621 FC_FC4_LIST_SIZE);
619} 622}
620 623
621static void zfcp_fsf_exchange_port_data_handler(struct zfcp_fsf_req *req) 624static void zfcp_fsf_exchange_port_data_handler(struct zfcp_fsf_req *req)
@@ -722,12 +725,12 @@ static struct zfcp_fsf_req *zfcp_fsf_req_create(struct zfcp_qdio *qdio,
722 req->adapter = adapter; 725 req->adapter = adapter;
723 req->fsf_command = fsf_cmd; 726 req->fsf_command = fsf_cmd;
724 req->req_id = adapter->req_no; 727 req->req_id = adapter->req_no;
725 req->queue_req.sbal_number = 1; 728 req->qdio_req.sbal_number = 1;
726 req->queue_req.sbal_first = req_q->first; 729 req->qdio_req.sbal_first = req_q->first;
727 req->queue_req.sbal_last = req_q->first; 730 req->qdio_req.sbal_last = req_q->first;
728 req->queue_req.sbale_curr = 1; 731 req->qdio_req.sbale_curr = 1;
729 732
730 sbale = zfcp_qdio_sbale_req(qdio, &req->queue_req); 733 sbale = zfcp_qdio_sbale_req(qdio, &req->qdio_req);
731 sbale[0].addr = (void *) req->req_id; 734 sbale[0].addr = (void *) req->req_id;
732 sbale[0].flags |= SBAL_FLAGS0_COMMAND; 735 sbale[0].flags |= SBAL_FLAGS0_COMMAND;
733 736
@@ -742,6 +745,7 @@ static struct zfcp_fsf_req *zfcp_fsf_req_create(struct zfcp_qdio *qdio,
742 return ERR_PTR(-ENOMEM); 745 return ERR_PTR(-ENOMEM);
743 } 746 }
744 747
748 req->seq_no = adapter->fsf_req_seq_no;
745 req->qtcb->prefix.req_seq_no = adapter->fsf_req_seq_no; 749 req->qtcb->prefix.req_seq_no = adapter->fsf_req_seq_no;
746 req->qtcb->prefix.req_id = req->req_id; 750 req->qtcb->prefix.req_id = req->req_id;
747 req->qtcb->prefix.ulp_info = 26; 751 req->qtcb->prefix.ulp_info = 26;
@@ -749,8 +753,6 @@ static struct zfcp_fsf_req *zfcp_fsf_req_create(struct zfcp_qdio *qdio,
749 req->qtcb->prefix.qtcb_version = FSF_QTCB_CURRENT_VERSION; 753 req->qtcb->prefix.qtcb_version = FSF_QTCB_CURRENT_VERSION;
750 req->qtcb->header.req_handle = req->req_id; 754 req->qtcb->header.req_handle = req->req_id;
751 req->qtcb->header.fsf_command = req->fsf_command; 755 req->qtcb->header.fsf_command = req->fsf_command;
752 req->seq_no = adapter->fsf_req_seq_no;
753 req->qtcb->prefix.req_seq_no = adapter->fsf_req_seq_no;
754 sbale[1].addr = (void *) req->qtcb; 756 sbale[1].addr = (void *) req->qtcb;
755 sbale[1].length = sizeof(struct fsf_qtcb); 757 sbale[1].length = sizeof(struct fsf_qtcb);
756 } 758 }
@@ -767,25 +769,17 @@ static int zfcp_fsf_req_send(struct zfcp_fsf_req *req)
767{ 769{
768 struct zfcp_adapter *adapter = req->adapter; 770 struct zfcp_adapter *adapter = req->adapter;
769 struct zfcp_qdio *qdio = adapter->qdio; 771 struct zfcp_qdio *qdio = adapter->qdio;
770 unsigned long flags; 772 int with_qtcb = (req->qtcb != NULL);
771 int idx; 773 int req_id = req->req_id;
772 int with_qtcb = (req->qtcb != NULL);
773 774
774 /* put allocated FSF request into hash table */ 775 zfcp_reqlist_add(adapter->req_list, req);
775 spin_lock_irqsave(&adapter->req_list_lock, flags);
776 idx = zfcp_reqlist_hash(req->req_id);
777 list_add_tail(&req->list, &adapter->req_list[idx]);
778 spin_unlock_irqrestore(&adapter->req_list_lock, flags);
779 776
780 req->queue_req.qdio_outb_usage = atomic_read(&qdio->req_q.count); 777 req->qdio_req.qdio_outb_usage = atomic_read(&qdio->req_q.count);
781 req->issued = get_clock(); 778 req->issued = get_clock();
782 if (zfcp_qdio_send(qdio, &req->queue_req)) { 779 if (zfcp_qdio_send(qdio, &req->qdio_req)) {
783 del_timer(&req->timer); 780 del_timer(&req->timer);
784 spin_lock_irqsave(&adapter->req_list_lock, flags);
785 /* lookup request again, list might have changed */ 781 /* lookup request again, list might have changed */
786 if (zfcp_reqlist_find_safe(adapter, req)) 782 zfcp_reqlist_find_rm(adapter->req_list, req_id);
787 zfcp_reqlist_remove(adapter, req);
788 spin_unlock_irqrestore(&adapter->req_list_lock, flags);
789 zfcp_erp_adapter_reopen(adapter, 0, "fsrs__1", req); 783 zfcp_erp_adapter_reopen(adapter, 0, "fsrs__1", req);
790 return -EIO; 784 return -EIO;
791 } 785 }
@@ -823,9 +817,9 @@ int zfcp_fsf_status_read(struct zfcp_qdio *qdio)
823 goto out; 817 goto out;
824 } 818 }
825 819
826 sbale = zfcp_qdio_sbale_req(qdio, &req->queue_req); 820 sbale = zfcp_qdio_sbale_req(qdio, &req->qdio_req);
827 sbale[2].flags |= SBAL_FLAGS_LAST_ENTRY; 821 sbale[2].flags |= SBAL_FLAGS_LAST_ENTRY;
828 req->queue_req.sbale_curr = 2; 822 req->qdio_req.sbale_curr = 2;
829 823
830 sr_buf = mempool_alloc(adapter->pool.status_read_data, GFP_ATOMIC); 824 sr_buf = mempool_alloc(adapter->pool.status_read_data, GFP_ATOMIC);
831 if (!sr_buf) { 825 if (!sr_buf) {
@@ -834,7 +828,7 @@ int zfcp_fsf_status_read(struct zfcp_qdio *qdio)
834 } 828 }
835 memset(sr_buf, 0, sizeof(*sr_buf)); 829 memset(sr_buf, 0, sizeof(*sr_buf));
836 req->data = sr_buf; 830 req->data = sr_buf;
837 sbale = zfcp_qdio_sbale_curr(qdio, &req->queue_req); 831 sbale = zfcp_qdio_sbale_curr(qdio, &req->qdio_req);
838 sbale->addr = (void *) sr_buf; 832 sbale->addr = (void *) sr_buf;
839 sbale->length = sizeof(*sr_buf); 833 sbale->length = sizeof(*sr_buf);
840 834
@@ -881,13 +875,11 @@ static void zfcp_fsf_abort_fcp_command_handler(struct zfcp_fsf_req *req)
881 break; 875 break;
882 case FSF_PORT_BOXED: 876 case FSF_PORT_BOXED:
883 zfcp_erp_port_boxed(unit->port, "fsafch3", req); 877 zfcp_erp_port_boxed(unit->port, "fsafch3", req);
884 req->status |= ZFCP_STATUS_FSFREQ_ERROR | 878 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
885 ZFCP_STATUS_FSFREQ_RETRY;
886 break; 879 break;
887 case FSF_LUN_BOXED: 880 case FSF_LUN_BOXED:
888 zfcp_erp_unit_boxed(unit, "fsafch4", req); 881 zfcp_erp_unit_boxed(unit, "fsafch4", req);
889 req->status |= ZFCP_STATUS_FSFREQ_ERROR | 882 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
890 ZFCP_STATUS_FSFREQ_RETRY;
891 break; 883 break;
892 case FSF_ADAPTER_STATUS_AVAILABLE: 884 case FSF_ADAPTER_STATUS_AVAILABLE:
893 switch (fsq->word[0]) { 885 switch (fsq->word[0]) {
@@ -933,7 +925,7 @@ struct zfcp_fsf_req *zfcp_fsf_abort_fcp_command(unsigned long old_req_id,
933 ZFCP_STATUS_COMMON_UNBLOCKED))) 925 ZFCP_STATUS_COMMON_UNBLOCKED)))
934 goto out_error_free; 926 goto out_error_free;
935 927
936 sbale = zfcp_qdio_sbale_req(qdio, &req->queue_req); 928 sbale = zfcp_qdio_sbale_req(qdio, &req->qdio_req);
937 sbale[0].flags |= SBAL_FLAGS0_TYPE_READ; 929 sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
938 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; 930 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
939 931
@@ -958,10 +950,10 @@ out:
958static void zfcp_fsf_send_ct_handler(struct zfcp_fsf_req *req) 950static void zfcp_fsf_send_ct_handler(struct zfcp_fsf_req *req)
959{ 951{
960 struct zfcp_adapter *adapter = req->adapter; 952 struct zfcp_adapter *adapter = req->adapter;
961 struct zfcp_send_ct *send_ct = req->data; 953 struct zfcp_fsf_ct_els *ct = req->data;
962 struct fsf_qtcb_header *header = &req->qtcb->header; 954 struct fsf_qtcb_header *header = &req->qtcb->header;
963 955
964 send_ct->status = -EINVAL; 956 ct->status = -EINVAL;
965 957
966 if (req->status & ZFCP_STATUS_FSFREQ_ERROR) 958 if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
967 goto skip_fsfstatus; 959 goto skip_fsfstatus;
@@ -969,7 +961,7 @@ static void zfcp_fsf_send_ct_handler(struct zfcp_fsf_req *req)
969 switch (header->fsf_status) { 961 switch (header->fsf_status) {
970 case FSF_GOOD: 962 case FSF_GOOD:
971 zfcp_dbf_san_ct_response(req); 963 zfcp_dbf_san_ct_response(req);
972 send_ct->status = 0; 964 ct->status = 0;
973 break; 965 break;
974 case FSF_SERVICE_CLASS_NOT_SUPPORTED: 966 case FSF_SERVICE_CLASS_NOT_SUPPORTED:
975 zfcp_fsf_class_not_supp(req); 967 zfcp_fsf_class_not_supp(req);
@@ -985,8 +977,7 @@ static void zfcp_fsf_send_ct_handler(struct zfcp_fsf_req *req)
985 case FSF_ACCESS_DENIED: 977 case FSF_ACCESS_DENIED:
986 break; 978 break;
987 case FSF_PORT_BOXED: 979 case FSF_PORT_BOXED:
988 req->status |= ZFCP_STATUS_FSFREQ_ERROR | 980 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
989 ZFCP_STATUS_FSFREQ_RETRY;
990 break; 981 break;
991 case FSF_PORT_HANDLE_NOT_VALID: 982 case FSF_PORT_HANDLE_NOT_VALID:
992 zfcp_erp_adapter_reopen(adapter, 0, "fsscth1", req); 983 zfcp_erp_adapter_reopen(adapter, 0, "fsscth1", req);
@@ -1001,8 +992,8 @@ static void zfcp_fsf_send_ct_handler(struct zfcp_fsf_req *req)
1001 } 992 }
1002 993
1003skip_fsfstatus: 994skip_fsfstatus:
1004 if (send_ct->handler) 995 if (ct->handler)
1005 send_ct->handler(send_ct->handler_data); 996 ct->handler(ct->handler_data);
1006} 997}
1007 998
1008static void zfcp_fsf_setup_ct_els_unchained(struct qdio_buffer_element *sbale, 999static void zfcp_fsf_setup_ct_els_unchained(struct qdio_buffer_element *sbale,
@@ -1029,7 +1020,7 @@ static int zfcp_fsf_setup_ct_els_sbals(struct zfcp_fsf_req *req,
1029{ 1020{
1030 struct zfcp_adapter *adapter = req->adapter; 1021 struct zfcp_adapter *adapter = req->adapter;
1031 struct qdio_buffer_element *sbale = zfcp_qdio_sbale_req(adapter->qdio, 1022 struct qdio_buffer_element *sbale = zfcp_qdio_sbale_req(adapter->qdio,
1032 &req->queue_req); 1023 &req->qdio_req);
1033 u32 feat = adapter->adapter_features; 1024 u32 feat = adapter->adapter_features;
1034 int bytes; 1025 int bytes;
1035 1026
@@ -1047,15 +1038,15 @@ static int zfcp_fsf_setup_ct_els_sbals(struct zfcp_fsf_req *req,
1047 return 0; 1038 return 0;
1048 } 1039 }
1049 1040
1050 bytes = zfcp_qdio_sbals_from_sg(adapter->qdio, &req->queue_req, 1041 bytes = zfcp_qdio_sbals_from_sg(adapter->qdio, &req->qdio_req,
1051 SBAL_FLAGS0_TYPE_WRITE_READ, 1042 SBAL_FLAGS0_TYPE_WRITE_READ,
1052 sg_req, max_sbals); 1043 sg_req, max_sbals);
1053 if (bytes <= 0) 1044 if (bytes <= 0)
1054 return -EIO; 1045 return -EIO;
1055 req->qtcb->bottom.support.req_buf_length = bytes; 1046 req->qtcb->bottom.support.req_buf_length = bytes;
1056 req->queue_req.sbale_curr = ZFCP_LAST_SBALE_PER_SBAL; 1047 req->qdio_req.sbale_curr = ZFCP_LAST_SBALE_PER_SBAL;
1057 1048
1058 bytes = zfcp_qdio_sbals_from_sg(adapter->qdio, &req->queue_req, 1049 bytes = zfcp_qdio_sbals_from_sg(adapter->qdio, &req->qdio_req,
1059 SBAL_FLAGS0_TYPE_WRITE_READ, 1050 SBAL_FLAGS0_TYPE_WRITE_READ,
1060 sg_resp, max_sbals); 1051 sg_resp, max_sbals);
1061 req->qtcb->bottom.support.resp_buf_length = bytes; 1052 req->qtcb->bottom.support.resp_buf_length = bytes;
@@ -1068,7 +1059,7 @@ static int zfcp_fsf_setup_ct_els_sbals(struct zfcp_fsf_req *req,
1068static int zfcp_fsf_setup_ct_els(struct zfcp_fsf_req *req, 1059static int zfcp_fsf_setup_ct_els(struct zfcp_fsf_req *req,
1069 struct scatterlist *sg_req, 1060 struct scatterlist *sg_req,
1070 struct scatterlist *sg_resp, 1061 struct scatterlist *sg_resp,
1071 int max_sbals) 1062 int max_sbals, unsigned int timeout)
1072{ 1063{
1073 int ret; 1064 int ret;
1074 1065
@@ -1077,9 +1068,11 @@ static int zfcp_fsf_setup_ct_els(struct zfcp_fsf_req *req,
1077 return ret; 1068 return ret;
1078 1069
1079 /* common settings for ct/gs and els requests */ 1070 /* common settings for ct/gs and els requests */
1071 if (timeout > 255)
1072 timeout = 255; /* max value accepted by hardware */
1080 req->qtcb->bottom.support.service_class = FSF_CLASS_3; 1073 req->qtcb->bottom.support.service_class = FSF_CLASS_3;
1081 req->qtcb->bottom.support.timeout = 2 * R_A_TOV; 1074 req->qtcb->bottom.support.timeout = timeout;
1082 zfcp_fsf_start_timer(req, (2 * R_A_TOV + 10) * HZ); 1075 zfcp_fsf_start_timer(req, (timeout + 10) * HZ);
1083 1076
1084 return 0; 1077 return 0;
1085} 1078}
@@ -1089,9 +1082,10 @@ static int zfcp_fsf_setup_ct_els(struct zfcp_fsf_req *req,
1089 * @ct: pointer to struct zfcp_send_ct with data for request 1082 * @ct: pointer to struct zfcp_send_ct with data for request
1090 * @pool: if non-null this mempool is used to allocate struct zfcp_fsf_req 1083 * @pool: if non-null this mempool is used to allocate struct zfcp_fsf_req
1091 */ 1084 */
1092int zfcp_fsf_send_ct(struct zfcp_send_ct *ct, mempool_t *pool) 1085int zfcp_fsf_send_ct(struct zfcp_fc_wka_port *wka_port,
1086 struct zfcp_fsf_ct_els *ct, mempool_t *pool,
1087 unsigned int timeout)
1093{ 1088{
1094 struct zfcp_wka_port *wka_port = ct->wka_port;
1095 struct zfcp_qdio *qdio = wka_port->adapter->qdio; 1089 struct zfcp_qdio *qdio = wka_port->adapter->qdio;
1096 struct zfcp_fsf_req *req; 1090 struct zfcp_fsf_req *req;
1097 int ret = -EIO; 1091 int ret = -EIO;
@@ -1109,7 +1103,7 @@ int zfcp_fsf_send_ct(struct zfcp_send_ct *ct, mempool_t *pool)
1109 1103
1110 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; 1104 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
1111 ret = zfcp_fsf_setup_ct_els(req, ct->req, ct->resp, 1105 ret = zfcp_fsf_setup_ct_els(req, ct->req, ct->resp,
1112 FSF_MAX_SBALS_PER_REQ); 1106 FSF_MAX_SBALS_PER_REQ, timeout);
1113 if (ret) 1107 if (ret)
1114 goto failed_send; 1108 goto failed_send;
1115 1109
@@ -1117,7 +1111,7 @@ int zfcp_fsf_send_ct(struct zfcp_send_ct *ct, mempool_t *pool)
1117 req->qtcb->header.port_handle = wka_port->handle; 1111 req->qtcb->header.port_handle = wka_port->handle;
1118 req->data = ct; 1112 req->data = ct;
1119 1113
1120 zfcp_dbf_san_ct_request(req); 1114 zfcp_dbf_san_ct_request(req, wka_port->d_id);
1121 1115
1122 ret = zfcp_fsf_req_send(req); 1116 ret = zfcp_fsf_req_send(req);
1123 if (ret) 1117 if (ret)
@@ -1134,7 +1128,7 @@ out:
1134 1128
1135static void zfcp_fsf_send_els_handler(struct zfcp_fsf_req *req) 1129static void zfcp_fsf_send_els_handler(struct zfcp_fsf_req *req)
1136{ 1130{
1137 struct zfcp_send_els *send_els = req->data; 1131 struct zfcp_fsf_ct_els *send_els = req->data;
1138 struct zfcp_port *port = send_els->port; 1132 struct zfcp_port *port = send_els->port;
1139 struct fsf_qtcb_header *header = &req->qtcb->header; 1133 struct fsf_qtcb_header *header = &req->qtcb->header;
1140 1134
@@ -1154,9 +1148,6 @@ static void zfcp_fsf_send_els_handler(struct zfcp_fsf_req *req)
1154 case FSF_ADAPTER_STATUS_AVAILABLE: 1148 case FSF_ADAPTER_STATUS_AVAILABLE:
1155 switch (header->fsf_status_qual.word[0]){ 1149 switch (header->fsf_status_qual.word[0]){
1156 case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE: 1150 case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
1157 if (port && (send_els->ls_code != ZFCP_LS_ADISC))
1158 zfcp_fc_test_link(port);
1159 /*fall through */
1160 case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED: 1151 case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
1161 case FSF_SQ_RETRY_IF_POSSIBLE: 1152 case FSF_SQ_RETRY_IF_POSSIBLE:
1162 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 1153 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
@@ -1188,10 +1179,11 @@ skip_fsfstatus:
1188 * zfcp_fsf_send_els - initiate an ELS command (FC-FS) 1179 * zfcp_fsf_send_els - initiate an ELS command (FC-FS)
1189 * @els: pointer to struct zfcp_send_els with data for the command 1180 * @els: pointer to struct zfcp_send_els with data for the command
1190 */ 1181 */
1191int zfcp_fsf_send_els(struct zfcp_send_els *els) 1182int zfcp_fsf_send_els(struct zfcp_adapter *adapter, u32 d_id,
1183 struct zfcp_fsf_ct_els *els, unsigned int timeout)
1192{ 1184{
1193 struct zfcp_fsf_req *req; 1185 struct zfcp_fsf_req *req;
1194 struct zfcp_qdio *qdio = els->adapter->qdio; 1186 struct zfcp_qdio *qdio = adapter->qdio;
1195 int ret = -EIO; 1187 int ret = -EIO;
1196 1188
1197 spin_lock_bh(&qdio->req_q_lock); 1189 spin_lock_bh(&qdio->req_q_lock);
@@ -1206,12 +1198,12 @@ int zfcp_fsf_send_els(struct zfcp_send_els *els)
1206 } 1198 }
1207 1199
1208 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; 1200 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
1209 ret = zfcp_fsf_setup_ct_els(req, els->req, els->resp, 2); 1201 ret = zfcp_fsf_setup_ct_els(req, els->req, els->resp, 2, timeout);
1210 1202
1211 if (ret) 1203 if (ret)
1212 goto failed_send; 1204 goto failed_send;
1213 1205
1214 req->qtcb->bottom.support.d_id = els->d_id; 1206 hton24(req->qtcb->bottom.support.d_id, d_id);
1215 req->handler = zfcp_fsf_send_els_handler; 1207 req->handler = zfcp_fsf_send_els_handler;
1216 req->data = els; 1208 req->data = els;
1217 1209
@@ -1250,7 +1242,7 @@ int zfcp_fsf_exchange_config_data(struct zfcp_erp_action *erp_action)
1250 } 1242 }
1251 1243
1252 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; 1244 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
1253 sbale = zfcp_qdio_sbale_req(qdio, &req->queue_req); 1245 sbale = zfcp_qdio_sbale_req(qdio, &req->qdio_req);
1254 sbale[0].flags |= SBAL_FLAGS0_TYPE_READ; 1246 sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
1255 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; 1247 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
1256 1248
@@ -1261,13 +1253,13 @@ int zfcp_fsf_exchange_config_data(struct zfcp_erp_action *erp_action)
1261 FSF_FEATURE_UPDATE_ALERT; 1253 FSF_FEATURE_UPDATE_ALERT;
1262 req->erp_action = erp_action; 1254 req->erp_action = erp_action;
1263 req->handler = zfcp_fsf_exchange_config_data_handler; 1255 req->handler = zfcp_fsf_exchange_config_data_handler;
1264 erp_action->fsf_req = req; 1256 erp_action->fsf_req_id = req->req_id;
1265 1257
1266 zfcp_fsf_start_erp_timer(req); 1258 zfcp_fsf_start_erp_timer(req);
1267 retval = zfcp_fsf_req_send(req); 1259 retval = zfcp_fsf_req_send(req);
1268 if (retval) { 1260 if (retval) {
1269 zfcp_fsf_req_free(req); 1261 zfcp_fsf_req_free(req);
1270 erp_action->fsf_req = NULL; 1262 erp_action->fsf_req_id = 0;
1271 } 1263 }
1272out: 1264out:
1273 spin_unlock_bh(&qdio->req_q_lock); 1265 spin_unlock_bh(&qdio->req_q_lock);
@@ -1292,7 +1284,7 @@ int zfcp_fsf_exchange_config_data_sync(struct zfcp_qdio *qdio,
1292 goto out_unlock; 1284 goto out_unlock;
1293 } 1285 }
1294 1286
1295 sbale = zfcp_qdio_sbale_req(qdio, &req->queue_req); 1287 sbale = zfcp_qdio_sbale_req(qdio, &req->qdio_req);
1296 sbale[0].flags |= SBAL_FLAGS0_TYPE_READ; 1288 sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
1297 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; 1289 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
1298 req->handler = zfcp_fsf_exchange_config_data_handler; 1290 req->handler = zfcp_fsf_exchange_config_data_handler;
@@ -1348,19 +1340,19 @@ int zfcp_fsf_exchange_port_data(struct zfcp_erp_action *erp_action)
1348 } 1340 }
1349 1341
1350 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; 1342 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
1351 sbale = zfcp_qdio_sbale_req(qdio, &req->queue_req); 1343 sbale = zfcp_qdio_sbale_req(qdio, &req->qdio_req);
1352 sbale[0].flags |= SBAL_FLAGS0_TYPE_READ; 1344 sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
1353 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; 1345 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
1354 1346
1355 req->handler = zfcp_fsf_exchange_port_data_handler; 1347 req->handler = zfcp_fsf_exchange_port_data_handler;
1356 req->erp_action = erp_action; 1348 req->erp_action = erp_action;
1357 erp_action->fsf_req = req; 1349 erp_action->fsf_req_id = req->req_id;
1358 1350
1359 zfcp_fsf_start_erp_timer(req); 1351 zfcp_fsf_start_erp_timer(req);
1360 retval = zfcp_fsf_req_send(req); 1352 retval = zfcp_fsf_req_send(req);
1361 if (retval) { 1353 if (retval) {
1362 zfcp_fsf_req_free(req); 1354 zfcp_fsf_req_free(req);
1363 erp_action->fsf_req = NULL; 1355 erp_action->fsf_req_id = 0;
1364 } 1356 }
1365out: 1357out:
1366 spin_unlock_bh(&qdio->req_q_lock); 1358 spin_unlock_bh(&qdio->req_q_lock);
@@ -1397,7 +1389,7 @@ int zfcp_fsf_exchange_port_data_sync(struct zfcp_qdio *qdio,
1397 if (data) 1389 if (data)
1398 req->data = data; 1390 req->data = data;
1399 1391
1400 sbale = zfcp_qdio_sbale_req(qdio, &req->queue_req); 1392 sbale = zfcp_qdio_sbale_req(qdio, &req->qdio_req);
1401 sbale[0].flags |= SBAL_FLAGS0_TYPE_READ; 1393 sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
1402 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; 1394 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
1403 1395
@@ -1422,7 +1414,7 @@ static void zfcp_fsf_open_port_handler(struct zfcp_fsf_req *req)
1422{ 1414{
1423 struct zfcp_port *port = req->data; 1415 struct zfcp_port *port = req->data;
1424 struct fsf_qtcb_header *header = &req->qtcb->header; 1416 struct fsf_qtcb_header *header = &req->qtcb->header;
1425 struct fsf_plogi *plogi; 1417 struct fc_els_flogi *plogi;
1426 1418
1427 if (req->status & ZFCP_STATUS_FSFREQ_ERROR) 1419 if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
1428 goto out; 1420 goto out;
@@ -1472,23 +1464,10 @@ static void zfcp_fsf_open_port_handler(struct zfcp_fsf_req *req)
1472 * another GID_PN straight after a port has been opened. 1464 * another GID_PN straight after a port has been opened.
1473 * Alternately, an ADISC/PDISC ELS should suffice, as well. 1465 * Alternately, an ADISC/PDISC ELS should suffice, as well.
1474 */ 1466 */
1475 plogi = (struct fsf_plogi *) req->qtcb->bottom.support.els; 1467 plogi = (struct fc_els_flogi *) req->qtcb->bottom.support.els;
1476 if (req->qtcb->bottom.support.els1_length >= 1468 if (req->qtcb->bottom.support.els1_length >=
1477 FSF_PLOGI_MIN_LEN) { 1469 FSF_PLOGI_MIN_LEN)
1478 if (plogi->serv_param.wwpn != port->wwpn) {
1479 port->d_id = 0;
1480 dev_warn(&port->adapter->ccw_device->dev,
1481 "A port opened with WWPN 0x%016Lx "
1482 "returned data that identifies it as "
1483 "WWPN 0x%016Lx\n",
1484 (unsigned long long) port->wwpn,
1485 (unsigned long long)
1486 plogi->serv_param.wwpn);
1487 } else {
1488 port->wwnn = plogi->serv_param.wwnn;
1489 zfcp_fc_plogi_evaluate(port, plogi); 1470 zfcp_fc_plogi_evaluate(port, plogi);
1490 }
1491 }
1492 break; 1471 break;
1493 case FSF_UNKNOWN_OP_SUBTYPE: 1472 case FSF_UNKNOWN_OP_SUBTYPE:
1494 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 1473 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
@@ -1496,7 +1475,7 @@ static void zfcp_fsf_open_port_handler(struct zfcp_fsf_req *req)
1496 } 1475 }
1497 1476
1498out: 1477out:
1499 zfcp_port_put(port); 1478 put_device(&port->dev);
1500} 1479}
1501 1480
1502/** 1481/**
@@ -1525,23 +1504,23 @@ int zfcp_fsf_open_port(struct zfcp_erp_action *erp_action)
1525 } 1504 }
1526 1505
1527 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; 1506 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
1528 sbale = zfcp_qdio_sbale_req(qdio, &req->queue_req); 1507 sbale = zfcp_qdio_sbale_req(qdio, &req->qdio_req);
1529 sbale[0].flags |= SBAL_FLAGS0_TYPE_READ; 1508 sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
1530 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; 1509 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
1531 1510
1532 req->handler = zfcp_fsf_open_port_handler; 1511 req->handler = zfcp_fsf_open_port_handler;
1533 req->qtcb->bottom.support.d_id = port->d_id; 1512 hton24(req->qtcb->bottom.support.d_id, port->d_id);
1534 req->data = port; 1513 req->data = port;
1535 req->erp_action = erp_action; 1514 req->erp_action = erp_action;
1536 erp_action->fsf_req = req; 1515 erp_action->fsf_req_id = req->req_id;
1537 zfcp_port_get(port); 1516 get_device(&port->dev);
1538 1517
1539 zfcp_fsf_start_erp_timer(req); 1518 zfcp_fsf_start_erp_timer(req);
1540 retval = zfcp_fsf_req_send(req); 1519 retval = zfcp_fsf_req_send(req);
1541 if (retval) { 1520 if (retval) {
1542 zfcp_fsf_req_free(req); 1521 zfcp_fsf_req_free(req);
1543 erp_action->fsf_req = NULL; 1522 erp_action->fsf_req_id = 0;
1544 zfcp_port_put(port); 1523 put_device(&port->dev);
1545 } 1524 }
1546out: 1525out:
1547 spin_unlock_bh(&qdio->req_q_lock); 1526 spin_unlock_bh(&qdio->req_q_lock);
@@ -1595,7 +1574,7 @@ int zfcp_fsf_close_port(struct zfcp_erp_action *erp_action)
1595 } 1574 }
1596 1575
1597 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; 1576 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
1598 sbale = zfcp_qdio_sbale_req(qdio, &req->queue_req); 1577 sbale = zfcp_qdio_sbale_req(qdio, &req->qdio_req);
1599 sbale[0].flags |= SBAL_FLAGS0_TYPE_READ; 1578 sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
1600 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; 1579 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
1601 1580
@@ -1603,13 +1582,13 @@ int zfcp_fsf_close_port(struct zfcp_erp_action *erp_action)
1603 req->data = erp_action->port; 1582 req->data = erp_action->port;
1604 req->erp_action = erp_action; 1583 req->erp_action = erp_action;
1605 req->qtcb->header.port_handle = erp_action->port->handle; 1584 req->qtcb->header.port_handle = erp_action->port->handle;
1606 erp_action->fsf_req = req; 1585 erp_action->fsf_req_id = req->req_id;
1607 1586
1608 zfcp_fsf_start_erp_timer(req); 1587 zfcp_fsf_start_erp_timer(req);
1609 retval = zfcp_fsf_req_send(req); 1588 retval = zfcp_fsf_req_send(req);
1610 if (retval) { 1589 if (retval) {
1611 zfcp_fsf_req_free(req); 1590 zfcp_fsf_req_free(req);
1612 erp_action->fsf_req = NULL; 1591 erp_action->fsf_req_id = 0;
1613 } 1592 }
1614out: 1593out:
1615 spin_unlock_bh(&qdio->req_q_lock); 1594 spin_unlock_bh(&qdio->req_q_lock);
@@ -1618,11 +1597,11 @@ out:
1618 1597
1619static void zfcp_fsf_open_wka_port_handler(struct zfcp_fsf_req *req) 1598static void zfcp_fsf_open_wka_port_handler(struct zfcp_fsf_req *req)
1620{ 1599{
1621 struct zfcp_wka_port *wka_port = req->data; 1600 struct zfcp_fc_wka_port *wka_port = req->data;
1622 struct fsf_qtcb_header *header = &req->qtcb->header; 1601 struct fsf_qtcb_header *header = &req->qtcb->header;
1623 1602
1624 if (req->status & ZFCP_STATUS_FSFREQ_ERROR) { 1603 if (req->status & ZFCP_STATUS_FSFREQ_ERROR) {
1625 wka_port->status = ZFCP_WKA_PORT_OFFLINE; 1604 wka_port->status = ZFCP_FC_WKA_PORT_OFFLINE;
1626 goto out; 1605 goto out;
1627 } 1606 }
1628 1607
@@ -1635,13 +1614,13 @@ static void zfcp_fsf_open_wka_port_handler(struct zfcp_fsf_req *req)
1635 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 1614 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1636 /* fall through */ 1615 /* fall through */
1637 case FSF_ACCESS_DENIED: 1616 case FSF_ACCESS_DENIED:
1638 wka_port->status = ZFCP_WKA_PORT_OFFLINE; 1617 wka_port->status = ZFCP_FC_WKA_PORT_OFFLINE;
1639 break; 1618 break;
1640 case FSF_GOOD: 1619 case FSF_GOOD:
1641 wka_port->handle = header->port_handle; 1620 wka_port->handle = header->port_handle;
1642 /* fall through */ 1621 /* fall through */
1643 case FSF_PORT_ALREADY_OPEN: 1622 case FSF_PORT_ALREADY_OPEN:
1644 wka_port->status = ZFCP_WKA_PORT_ONLINE; 1623 wka_port->status = ZFCP_FC_WKA_PORT_ONLINE;
1645 } 1624 }
1646out: 1625out:
1647 wake_up(&wka_port->completion_wq); 1626 wake_up(&wka_port->completion_wq);
@@ -1649,10 +1628,10 @@ out:
1649 1628
1650/** 1629/**
1651 * zfcp_fsf_open_wka_port - create and send open wka-port request 1630 * zfcp_fsf_open_wka_port - create and send open wka-port request
1652 * @wka_port: pointer to struct zfcp_wka_port 1631 * @wka_port: pointer to struct zfcp_fc_wka_port
1653 * Returns: 0 on success, error otherwise 1632 * Returns: 0 on success, error otherwise
1654 */ 1633 */
1655int zfcp_fsf_open_wka_port(struct zfcp_wka_port *wka_port) 1634int zfcp_fsf_open_wka_port(struct zfcp_fc_wka_port *wka_port)
1656{ 1635{
1657 struct qdio_buffer_element *sbale; 1636 struct qdio_buffer_element *sbale;
1658 struct zfcp_qdio *qdio = wka_port->adapter->qdio; 1637 struct zfcp_qdio *qdio = wka_port->adapter->qdio;
@@ -1672,12 +1651,12 @@ int zfcp_fsf_open_wka_port(struct zfcp_wka_port *wka_port)
1672 } 1651 }
1673 1652
1674 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; 1653 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
1675 sbale = zfcp_qdio_sbale_req(qdio, &req->queue_req); 1654 sbale = zfcp_qdio_sbale_req(qdio, &req->qdio_req);
1676 sbale[0].flags |= SBAL_FLAGS0_TYPE_READ; 1655 sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
1677 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; 1656 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
1678 1657
1679 req->handler = zfcp_fsf_open_wka_port_handler; 1658 req->handler = zfcp_fsf_open_wka_port_handler;
1680 req->qtcb->bottom.support.d_id = wka_port->d_id; 1659 hton24(req->qtcb->bottom.support.d_id, wka_port->d_id);
1681 req->data = wka_port; 1660 req->data = wka_port;
1682 1661
1683 zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT); 1662 zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT);
@@ -1691,23 +1670,23 @@ out:
1691 1670
1692static void zfcp_fsf_close_wka_port_handler(struct zfcp_fsf_req *req) 1671static void zfcp_fsf_close_wka_port_handler(struct zfcp_fsf_req *req)
1693{ 1672{
1694 struct zfcp_wka_port *wka_port = req->data; 1673 struct zfcp_fc_wka_port *wka_port = req->data;
1695 1674
1696 if (req->qtcb->header.fsf_status == FSF_PORT_HANDLE_NOT_VALID) { 1675 if (req->qtcb->header.fsf_status == FSF_PORT_HANDLE_NOT_VALID) {
1697 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 1676 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1698 zfcp_erp_adapter_reopen(wka_port->adapter, 0, "fscwph1", req); 1677 zfcp_erp_adapter_reopen(wka_port->adapter, 0, "fscwph1", req);
1699 } 1678 }
1700 1679
1701 wka_port->status = ZFCP_WKA_PORT_OFFLINE; 1680 wka_port->status = ZFCP_FC_WKA_PORT_OFFLINE;
1702 wake_up(&wka_port->completion_wq); 1681 wake_up(&wka_port->completion_wq);
1703} 1682}
1704 1683
1705/** 1684/**
1706 * zfcp_fsf_close_wka_port - create and send close wka port request 1685 * zfcp_fsf_close_wka_port - create and send close wka port request
1707 * @erp_action: pointer to struct zfcp_erp_action 1686 * @wka_port: WKA port to open
1708 * Returns: 0 on success, error otherwise 1687 * Returns: 0 on success, error otherwise
1709 */ 1688 */
1710int zfcp_fsf_close_wka_port(struct zfcp_wka_port *wka_port) 1689int zfcp_fsf_close_wka_port(struct zfcp_fc_wka_port *wka_port)
1711{ 1690{
1712 struct qdio_buffer_element *sbale; 1691 struct qdio_buffer_element *sbale;
1713 struct zfcp_qdio *qdio = wka_port->adapter->qdio; 1692 struct zfcp_qdio *qdio = wka_port->adapter->qdio;
@@ -1727,7 +1706,7 @@ int zfcp_fsf_close_wka_port(struct zfcp_wka_port *wka_port)
1727 } 1706 }
1728 1707
1729 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; 1708 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
1730 sbale = zfcp_qdio_sbale_req(qdio, &req->queue_req); 1709 sbale = zfcp_qdio_sbale_req(qdio, &req->qdio_req);
1731 sbale[0].flags |= SBAL_FLAGS0_TYPE_READ; 1710 sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
1732 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; 1711 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
1733 1712
@@ -1765,13 +1744,13 @@ static void zfcp_fsf_close_physical_port_handler(struct zfcp_fsf_req *req)
1765 /* can't use generic zfcp_erp_modify_port_status because 1744 /* can't use generic zfcp_erp_modify_port_status because
1766 * ZFCP_STATUS_COMMON_OPEN must not be reset for the port */ 1745 * ZFCP_STATUS_COMMON_OPEN must not be reset for the port */
1767 atomic_clear_mask(ZFCP_STATUS_PORT_PHYS_OPEN, &port->status); 1746 atomic_clear_mask(ZFCP_STATUS_PORT_PHYS_OPEN, &port->status);
1768 list_for_each_entry(unit, &port->unit_list_head, list) 1747 read_lock(&port->unit_list_lock);
1748 list_for_each_entry(unit, &port->unit_list, list)
1769 atomic_clear_mask(ZFCP_STATUS_COMMON_OPEN, 1749 atomic_clear_mask(ZFCP_STATUS_COMMON_OPEN,
1770 &unit->status); 1750 &unit->status);
1751 read_unlock(&port->unit_list_lock);
1771 zfcp_erp_port_boxed(port, "fscpph2", req); 1752 zfcp_erp_port_boxed(port, "fscpph2", req);
1772 req->status |= ZFCP_STATUS_FSFREQ_ERROR | 1753 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1773 ZFCP_STATUS_FSFREQ_RETRY;
1774
1775 break; 1754 break;
1776 case FSF_ADAPTER_STATUS_AVAILABLE: 1755 case FSF_ADAPTER_STATUS_AVAILABLE:
1777 switch (header->fsf_status_qual.word[0]) { 1756 switch (header->fsf_status_qual.word[0]) {
@@ -1787,9 +1766,11 @@ static void zfcp_fsf_close_physical_port_handler(struct zfcp_fsf_req *req)
1787 * ZFCP_STATUS_COMMON_OPEN must not be reset for the port 1766 * ZFCP_STATUS_COMMON_OPEN must not be reset for the port
1788 */ 1767 */
1789 atomic_clear_mask(ZFCP_STATUS_PORT_PHYS_OPEN, &port->status); 1768 atomic_clear_mask(ZFCP_STATUS_PORT_PHYS_OPEN, &port->status);
1790 list_for_each_entry(unit, &port->unit_list_head, list) 1769 read_lock(&port->unit_list_lock);
1770 list_for_each_entry(unit, &port->unit_list, list)
1791 atomic_clear_mask(ZFCP_STATUS_COMMON_OPEN, 1771 atomic_clear_mask(ZFCP_STATUS_COMMON_OPEN,
1792 &unit->status); 1772 &unit->status);
1773 read_unlock(&port->unit_list_lock);
1793 break; 1774 break;
1794 } 1775 }
1795} 1776}
@@ -1819,7 +1800,7 @@ int zfcp_fsf_close_physical_port(struct zfcp_erp_action *erp_action)
1819 } 1800 }
1820 1801
1821 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; 1802 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
1822 sbale = zfcp_qdio_sbale_req(qdio, &req->queue_req); 1803 sbale = zfcp_qdio_sbale_req(qdio, &req->qdio_req);
1823 sbale[0].flags |= SBAL_FLAGS0_TYPE_READ; 1804 sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
1824 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; 1805 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
1825 1806
@@ -1827,13 +1808,13 @@ int zfcp_fsf_close_physical_port(struct zfcp_erp_action *erp_action)
1827 req->qtcb->header.port_handle = erp_action->port->handle; 1808 req->qtcb->header.port_handle = erp_action->port->handle;
1828 req->erp_action = erp_action; 1809 req->erp_action = erp_action;
1829 req->handler = zfcp_fsf_close_physical_port_handler; 1810 req->handler = zfcp_fsf_close_physical_port_handler;
1830 erp_action->fsf_req = req; 1811 erp_action->fsf_req_id = req->req_id;
1831 1812
1832 zfcp_fsf_start_erp_timer(req); 1813 zfcp_fsf_start_erp_timer(req);
1833 retval = zfcp_fsf_req_send(req); 1814 retval = zfcp_fsf_req_send(req);
1834 if (retval) { 1815 if (retval) {
1835 zfcp_fsf_req_free(req); 1816 zfcp_fsf_req_free(req);
1836 erp_action->fsf_req = NULL; 1817 erp_action->fsf_req_id = 0;
1837 } 1818 }
1838out: 1819out:
1839 spin_unlock_bh(&qdio->req_q_lock); 1820 spin_unlock_bh(&qdio->req_q_lock);
@@ -1873,8 +1854,7 @@ static void zfcp_fsf_open_unit_handler(struct zfcp_fsf_req *req)
1873 break; 1854 break;
1874 case FSF_PORT_BOXED: 1855 case FSF_PORT_BOXED:
1875 zfcp_erp_port_boxed(unit->port, "fsouh_2", req); 1856 zfcp_erp_port_boxed(unit->port, "fsouh_2", req);
1876 req->status |= ZFCP_STATUS_FSFREQ_ERROR | 1857 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1877 ZFCP_STATUS_FSFREQ_RETRY;
1878 break; 1858 break;
1879 case FSF_LUN_SHARING_VIOLATION: 1859 case FSF_LUN_SHARING_VIOLATION:
1880 if (header->fsf_status_qual.word[0]) 1860 if (header->fsf_status_qual.word[0])
@@ -1993,7 +1973,7 @@ int zfcp_fsf_open_unit(struct zfcp_erp_action *erp_action)
1993 } 1973 }
1994 1974
1995 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; 1975 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
1996 sbale = zfcp_qdio_sbale_req(qdio, &req->queue_req); 1976 sbale = zfcp_qdio_sbale_req(qdio, &req->qdio_req);
1997 sbale[0].flags |= SBAL_FLAGS0_TYPE_READ; 1977 sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
1998 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; 1978 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
1999 1979
@@ -2002,7 +1982,7 @@ int zfcp_fsf_open_unit(struct zfcp_erp_action *erp_action)
2002 req->handler = zfcp_fsf_open_unit_handler; 1982 req->handler = zfcp_fsf_open_unit_handler;
2003 req->data = erp_action->unit; 1983 req->data = erp_action->unit;
2004 req->erp_action = erp_action; 1984 req->erp_action = erp_action;
2005 erp_action->fsf_req = req; 1985 erp_action->fsf_req_id = req->req_id;
2006 1986
2007 if (!(adapter->connection_features & FSF_FEATURE_NPIV_MODE)) 1987 if (!(adapter->connection_features & FSF_FEATURE_NPIV_MODE))
2008 req->qtcb->bottom.support.option = FSF_OPEN_LUN_SUPPRESS_BOXING; 1988 req->qtcb->bottom.support.option = FSF_OPEN_LUN_SUPPRESS_BOXING;
@@ -2011,7 +1991,7 @@ int zfcp_fsf_open_unit(struct zfcp_erp_action *erp_action)
2011 retval = zfcp_fsf_req_send(req); 1991 retval = zfcp_fsf_req_send(req);
2012 if (retval) { 1992 if (retval) {
2013 zfcp_fsf_req_free(req); 1993 zfcp_fsf_req_free(req);
2014 erp_action->fsf_req = NULL; 1994 erp_action->fsf_req_id = 0;
2015 } 1995 }
2016out: 1996out:
2017 spin_unlock_bh(&qdio->req_q_lock); 1997 spin_unlock_bh(&qdio->req_q_lock);
@@ -2036,8 +2016,7 @@ static void zfcp_fsf_close_unit_handler(struct zfcp_fsf_req *req)
2036 break; 2016 break;
2037 case FSF_PORT_BOXED: 2017 case FSF_PORT_BOXED:
2038 zfcp_erp_port_boxed(unit->port, "fscuh_3", req); 2018 zfcp_erp_port_boxed(unit->port, "fscuh_3", req);
2039 req->status |= ZFCP_STATUS_FSFREQ_ERROR | 2019 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2040 ZFCP_STATUS_FSFREQ_RETRY;
2041 break; 2020 break;
2042 case FSF_ADAPTER_STATUS_AVAILABLE: 2021 case FSF_ADAPTER_STATUS_AVAILABLE:
2043 switch (req->qtcb->header.fsf_status_qual.word[0]) { 2022 switch (req->qtcb->header.fsf_status_qual.word[0]) {
@@ -2080,7 +2059,7 @@ int zfcp_fsf_close_unit(struct zfcp_erp_action *erp_action)
2080 } 2059 }
2081 2060
2082 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; 2061 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
2083 sbale = zfcp_qdio_sbale_req(qdio, &req->queue_req); 2062 sbale = zfcp_qdio_sbale_req(qdio, &req->qdio_req);
2084 sbale[0].flags |= SBAL_FLAGS0_TYPE_READ; 2063 sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
2085 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; 2064 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
2086 2065
@@ -2089,13 +2068,13 @@ int zfcp_fsf_close_unit(struct zfcp_erp_action *erp_action)
2089 req->handler = zfcp_fsf_close_unit_handler; 2068 req->handler = zfcp_fsf_close_unit_handler;
2090 req->data = erp_action->unit; 2069 req->data = erp_action->unit;
2091 req->erp_action = erp_action; 2070 req->erp_action = erp_action;
2092 erp_action->fsf_req = req; 2071 erp_action->fsf_req_id = req->req_id;
2093 2072
2094 zfcp_fsf_start_erp_timer(req); 2073 zfcp_fsf_start_erp_timer(req);
2095 retval = zfcp_fsf_req_send(req); 2074 retval = zfcp_fsf_req_send(req);
2096 if (retval) { 2075 if (retval) {
2097 zfcp_fsf_req_free(req); 2076 zfcp_fsf_req_free(req);
2098 erp_action->fsf_req = NULL; 2077 erp_action->fsf_req_id = 0;
2099 } 2078 }
2100out: 2079out:
2101 spin_unlock_bh(&qdio->req_q_lock); 2080 spin_unlock_bh(&qdio->req_q_lock);
@@ -2109,72 +2088,58 @@ static void zfcp_fsf_update_lat(struct fsf_latency_record *lat_rec, u32 lat)
2109 lat_rec->max = max(lat_rec->max, lat); 2088 lat_rec->max = max(lat_rec->max, lat);
2110} 2089}
2111 2090
2112static void zfcp_fsf_req_latency(struct zfcp_fsf_req *req) 2091static void zfcp_fsf_req_trace(struct zfcp_fsf_req *req, struct scsi_cmnd *scsi)
2113{ 2092{
2114 struct fsf_qual_latency_info *lat_inf; 2093 struct fsf_qual_latency_info *lat_in;
2115 struct latency_cont *lat; 2094 struct latency_cont *lat = NULL;
2116 struct zfcp_unit *unit = req->unit; 2095 struct zfcp_unit *unit = req->unit;
2096 struct zfcp_blk_drv_data blktrc;
2097 int ticks = req->adapter->timer_ticks;
2117 2098
2118 lat_inf = &req->qtcb->prefix.prot_status_qual.latency_info; 2099 lat_in = &req->qtcb->prefix.prot_status_qual.latency_info;
2119 2100
2120 switch (req->qtcb->bottom.io.data_direction) { 2101 blktrc.flags = 0;
2121 case FSF_DATADIR_READ: 2102 blktrc.magic = ZFCP_BLK_DRV_DATA_MAGIC;
2122 lat = &unit->latencies.read; 2103 if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
2123 break; 2104 blktrc.flags |= ZFCP_BLK_REQ_ERROR;
2124 case FSF_DATADIR_WRITE: 2105 blktrc.inb_usage = req->qdio_req.qdio_inb_usage;
2125 lat = &unit->latencies.write; 2106 blktrc.outb_usage = req->qdio_req.qdio_outb_usage;
2126 break; 2107
2127 case FSF_DATADIR_CMND: 2108 if (req->adapter->adapter_features & FSF_FEATURE_MEASUREMENT_DATA &&
2128 lat = &unit->latencies.cmd; 2109 !(req->status & ZFCP_STATUS_FSFREQ_ERROR)) {
2129 break; 2110 blktrc.flags |= ZFCP_BLK_LAT_VALID;
2130 default: 2111 blktrc.channel_lat = lat_in->channel_lat * ticks;
2131 return; 2112 blktrc.fabric_lat = lat_in->fabric_lat * ticks;
2132 } 2113
2133 2114 switch (req->qtcb->bottom.io.data_direction) {
2134 spin_lock(&unit->latencies.lock); 2115 case FSF_DATADIR_READ:
2135 zfcp_fsf_update_lat(&lat->channel, lat_inf->channel_lat); 2116 lat = &unit->latencies.read;
2136 zfcp_fsf_update_lat(&lat->fabric, lat_inf->fabric_lat); 2117 break;
2137 lat->counter++; 2118 case FSF_DATADIR_WRITE:
2138 spin_unlock(&unit->latencies.lock); 2119 lat = &unit->latencies.write;
2139} 2120 break;
2140 2121 case FSF_DATADIR_CMND:
2141#ifdef CONFIG_BLK_DEV_IO_TRACE 2122 lat = &unit->latencies.cmd;
2142static void zfcp_fsf_trace_latency(struct zfcp_fsf_req *fsf_req) 2123 break;
2143{ 2124 }
2144 struct fsf_qual_latency_info *lat_inf;
2145 struct scsi_cmnd *scsi_cmnd = (struct scsi_cmnd *)fsf_req->data;
2146 struct request *req = scsi_cmnd->request;
2147 struct zfcp_blk_drv_data trace;
2148 int ticks = fsf_req->adapter->timer_ticks;
2149 2125
2150 trace.flags = 0; 2126 if (lat) {
2151 trace.magic = ZFCP_BLK_DRV_DATA_MAGIC; 2127 spin_lock(&unit->latencies.lock);
2152 if (fsf_req->adapter->adapter_features & FSF_FEATURE_MEASUREMENT_DATA) { 2128 zfcp_fsf_update_lat(&lat->channel, lat_in->channel_lat);
2153 trace.flags |= ZFCP_BLK_LAT_VALID; 2129 zfcp_fsf_update_lat(&lat->fabric, lat_in->fabric_lat);
2154 lat_inf = &fsf_req->qtcb->prefix.prot_status_qual.latency_info; 2130 lat->counter++;
2155 trace.channel_lat = lat_inf->channel_lat * ticks; 2131 spin_unlock(&unit->latencies.lock);
2156 trace.fabric_lat = lat_inf->fabric_lat * ticks; 2132 }
2157 } 2133 }
2158 if (fsf_req->status & ZFCP_STATUS_FSFREQ_ERROR)
2159 trace.flags |= ZFCP_BLK_REQ_ERROR;
2160 trace.inb_usage = fsf_req->queue_req.qdio_inb_usage;
2161 trace.outb_usage = fsf_req->queue_req.qdio_outb_usage;
2162 2134
2163 blk_add_driver_data(req->q, req, &trace, sizeof(trace)); 2135 blk_add_driver_data(scsi->request->q, scsi->request, &blktrc,
2164} 2136 sizeof(blktrc));
2165#else
2166static inline void zfcp_fsf_trace_latency(struct zfcp_fsf_req *fsf_req)
2167{
2168} 2137}
2169#endif
2170 2138
2171static void zfcp_fsf_send_fcp_command_task_handler(struct zfcp_fsf_req *req) 2139static void zfcp_fsf_send_fcp_command_task_handler(struct zfcp_fsf_req *req)
2172{ 2140{
2173 struct scsi_cmnd *scpnt; 2141 struct scsi_cmnd *scpnt;
2174 struct fcp_rsp_iu *fcp_rsp_iu = (struct fcp_rsp_iu *) 2142 struct fcp_resp_with_ext *fcp_rsp;
2175 &(req->qtcb->bottom.io.fcp_rsp);
2176 u32 sns_len;
2177 char *fcp_rsp_info = (unsigned char *) &fcp_rsp_iu[1];
2178 unsigned long flags; 2143 unsigned long flags;
2179 2144
2180 read_lock_irqsave(&req->adapter->abort_lock, flags); 2145 read_lock_irqsave(&req->adapter->abort_lock, flags);
@@ -2185,57 +2150,17 @@ static void zfcp_fsf_send_fcp_command_task_handler(struct zfcp_fsf_req *req)
2185 return; 2150 return;
2186 } 2151 }
2187 2152
2188 if (unlikely(req->status & ZFCP_STATUS_FSFREQ_ABORTED)) {
2189 set_host_byte(scpnt, DID_SOFT_ERROR);
2190 goto skip_fsfstatus;
2191 }
2192
2193 if (unlikely(req->status & ZFCP_STATUS_FSFREQ_ERROR)) { 2153 if (unlikely(req->status & ZFCP_STATUS_FSFREQ_ERROR)) {
2194 set_host_byte(scpnt, DID_ERROR); 2154 set_host_byte(scpnt, DID_TRANSPORT_DISRUPTED);
2195 goto skip_fsfstatus; 2155 goto skip_fsfstatus;
2196 } 2156 }
2197 2157
2198 set_msg_byte(scpnt, COMMAND_COMPLETE); 2158 fcp_rsp = (struct fcp_resp_with_ext *) &req->qtcb->bottom.io.fcp_rsp;
2199 2159 zfcp_fc_eval_fcp_rsp(fcp_rsp, scpnt);
2200 scpnt->result |= fcp_rsp_iu->scsi_status;
2201
2202 if (req->adapter->adapter_features & FSF_FEATURE_MEASUREMENT_DATA)
2203 zfcp_fsf_req_latency(req);
2204
2205 zfcp_fsf_trace_latency(req);
2206
2207 if (unlikely(fcp_rsp_iu->validity.bits.fcp_rsp_len_valid)) {
2208 if (fcp_rsp_info[3] == RSP_CODE_GOOD)
2209 set_host_byte(scpnt, DID_OK);
2210 else {
2211 set_host_byte(scpnt, DID_ERROR);
2212 goto skip_fsfstatus;
2213 }
2214 }
2215
2216 if (unlikely(fcp_rsp_iu->validity.bits.fcp_sns_len_valid)) {
2217 sns_len = FSF_FCP_RSP_SIZE - sizeof(struct fcp_rsp_iu) +
2218 fcp_rsp_iu->fcp_rsp_len;
2219 sns_len = min(sns_len, (u32) SCSI_SENSE_BUFFERSIZE);
2220 sns_len = min(sns_len, fcp_rsp_iu->fcp_sns_len);
2221
2222 memcpy(scpnt->sense_buffer,
2223 zfcp_get_fcp_sns_info_ptr(fcp_rsp_iu), sns_len);
2224 }
2225 2160
2226 if (unlikely(fcp_rsp_iu->validity.bits.fcp_resid_under)) {
2227 scsi_set_resid(scpnt, fcp_rsp_iu->fcp_resid);
2228 if (scsi_bufflen(scpnt) - scsi_get_resid(scpnt) <
2229 scpnt->underflow)
2230 set_host_byte(scpnt, DID_ERROR);
2231 }
2232skip_fsfstatus: 2161skip_fsfstatus:
2233 if (scpnt->result != 0) 2162 zfcp_fsf_req_trace(req, scpnt);
2234 zfcp_dbf_scsi_result("erro", 3, req->adapter->dbf, scpnt, req); 2163 zfcp_dbf_scsi_result(req->adapter->dbf, scpnt, req);
2235 else if (scpnt->retries > 0)
2236 zfcp_dbf_scsi_result("retr", 4, req->adapter->dbf, scpnt, req);
2237 else
2238 zfcp_dbf_scsi_result("norm", 6, req->adapter->dbf, scpnt, req);
2239 2164
2240 scpnt->host_scribble = NULL; 2165 scpnt->host_scribble = NULL;
2241 (scpnt->scsi_done) (scpnt); 2166 (scpnt->scsi_done) (scpnt);
@@ -2250,11 +2175,13 @@ skip_fsfstatus:
2250 2175
2251static void zfcp_fsf_send_fcp_ctm_handler(struct zfcp_fsf_req *req) 2176static void zfcp_fsf_send_fcp_ctm_handler(struct zfcp_fsf_req *req)
2252{ 2177{
2253 struct fcp_rsp_iu *fcp_rsp_iu = (struct fcp_rsp_iu *) 2178 struct fcp_resp_with_ext *fcp_rsp;
2254 &(req->qtcb->bottom.io.fcp_rsp); 2179 struct fcp_resp_rsp_info *rsp_info;
2255 char *fcp_rsp_info = (unsigned char *) &fcp_rsp_iu[1]; 2180
2181 fcp_rsp = (struct fcp_resp_with_ext *) &req->qtcb->bottom.io.fcp_rsp;
2182 rsp_info = (struct fcp_resp_rsp_info *) &fcp_rsp[1];
2256 2183
2257 if ((fcp_rsp_info[3] != RSP_CODE_GOOD) || 2184 if ((rsp_info->rsp_code != FCP_TMF_CMPL) ||
2258 (req->status & ZFCP_STATUS_FSFREQ_ERROR)) 2185 (req->status & ZFCP_STATUS_FSFREQ_ERROR))
2259 req->status |= ZFCP_STATUS_FSFREQ_TMFUNCFAILED; 2186 req->status |= ZFCP_STATUS_FSFREQ_TMFUNCFAILED;
2260} 2187}
@@ -2314,13 +2241,11 @@ static void zfcp_fsf_send_fcp_command_handler(struct zfcp_fsf_req *req)
2314 break; 2241 break;
2315 case FSF_PORT_BOXED: 2242 case FSF_PORT_BOXED:
2316 zfcp_erp_port_boxed(unit->port, "fssfch5", req); 2243 zfcp_erp_port_boxed(unit->port, "fssfch5", req);
2317 req->status |= ZFCP_STATUS_FSFREQ_ERROR | 2244 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2318 ZFCP_STATUS_FSFREQ_RETRY;
2319 break; 2245 break;
2320 case FSF_LUN_BOXED: 2246 case FSF_LUN_BOXED:
2321 zfcp_erp_unit_boxed(unit, "fssfch6", req); 2247 zfcp_erp_unit_boxed(unit, "fssfch6", req);
2322 req->status |= ZFCP_STATUS_FSFREQ_ERROR | 2248 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2323 ZFCP_STATUS_FSFREQ_RETRY;
2324 break; 2249 break;
2325 case FSF_ADAPTER_STATUS_AVAILABLE: 2250 case FSF_ADAPTER_STATUS_AVAILABLE:
2326 if (header->fsf_status_qual.word[0] == 2251 if (header->fsf_status_qual.word[0] ==
@@ -2335,24 +2260,10 @@ skip_fsfstatus:
2335 else { 2260 else {
2336 zfcp_fsf_send_fcp_command_task_handler(req); 2261 zfcp_fsf_send_fcp_command_task_handler(req);
2337 req->unit = NULL; 2262 req->unit = NULL;
2338 zfcp_unit_put(unit); 2263 put_device(&unit->dev);
2339 } 2264 }
2340} 2265}
2341 2266
2342static void zfcp_set_fcp_dl(struct fcp_cmnd_iu *fcp_cmd, u32 fcp_dl)
2343{
2344 u32 *fcp_dl_ptr;
2345
2346 /*
2347 * fcp_dl_addr = start address of fcp_cmnd structure +
2348 * size of fixed part + size of dynamically sized add_dcp_cdb field
2349 * SEE FCP-2 documentation
2350 */
2351 fcp_dl_ptr = (u32 *) ((unsigned char *) &fcp_cmd[1] +
2352 (fcp_cmd->add_fcp_cdb_length << 2));
2353 *fcp_dl_ptr = fcp_dl;
2354}
2355
2356/** 2267/**
2357 * zfcp_fsf_send_fcp_command_task - initiate an FCP command (for a SCSI command) 2268 * zfcp_fsf_send_fcp_command_task - initiate an FCP command (for a SCSI command)
2358 * @unit: unit where command is sent to 2269 * @unit: unit where command is sent to
@@ -2362,7 +2273,7 @@ int zfcp_fsf_send_fcp_command_task(struct zfcp_unit *unit,
2362 struct scsi_cmnd *scsi_cmnd) 2273 struct scsi_cmnd *scsi_cmnd)
2363{ 2274{
2364 struct zfcp_fsf_req *req; 2275 struct zfcp_fsf_req *req;
2365 struct fcp_cmnd_iu *fcp_cmnd_iu; 2276 struct fcp_cmnd *fcp_cmnd;
2366 unsigned int sbtype = SBAL_FLAGS0_TYPE_READ; 2277 unsigned int sbtype = SBAL_FLAGS0_TYPE_READ;
2367 int real_bytes, retval = -EIO; 2278 int real_bytes, retval = -EIO;
2368 struct zfcp_adapter *adapter = unit->port->adapter; 2279 struct zfcp_adapter *adapter = unit->port->adapter;
@@ -2387,23 +2298,21 @@ int zfcp_fsf_send_fcp_command_task(struct zfcp_unit *unit,
2387 } 2298 }
2388 2299
2389 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; 2300 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
2390 zfcp_unit_get(unit); 2301 get_device(&unit->dev);
2391 req->unit = unit; 2302 req->unit = unit;
2392 req->data = scsi_cmnd; 2303 req->data = scsi_cmnd;
2393 req->handler = zfcp_fsf_send_fcp_command_handler; 2304 req->handler = zfcp_fsf_send_fcp_command_handler;
2394 req->qtcb->header.lun_handle = unit->handle; 2305 req->qtcb->header.lun_handle = unit->handle;
2395 req->qtcb->header.port_handle = unit->port->handle; 2306 req->qtcb->header.port_handle = unit->port->handle;
2396 req->qtcb->bottom.io.service_class = FSF_CLASS_3; 2307 req->qtcb->bottom.io.service_class = FSF_CLASS_3;
2308 req->qtcb->bottom.io.fcp_cmnd_length = FCP_CMND_LEN;
2397 2309
2398 scsi_cmnd->host_scribble = (unsigned char *) req->req_id; 2310 scsi_cmnd->host_scribble = (unsigned char *) req->req_id;
2399 2311
2400 fcp_cmnd_iu = (struct fcp_cmnd_iu *) &(req->qtcb->bottom.io.fcp_cmnd);
2401 fcp_cmnd_iu->fcp_lun = unit->fcp_lun;
2402 /* 2312 /*
2403 * set depending on data direction: 2313 * set depending on data direction:
2404 * data direction bits in SBALE (SB Type) 2314 * data direction bits in SBALE (SB Type)
2405 * data direction bits in QTCB 2315 * data direction bits in QTCB
2406 * data direction bits in FCP_CMND IU
2407 */ 2316 */
2408 switch (scsi_cmnd->sc_data_direction) { 2317 switch (scsi_cmnd->sc_data_direction) {
2409 case DMA_NONE: 2318 case DMA_NONE:
@@ -2411,38 +2320,23 @@ int zfcp_fsf_send_fcp_command_task(struct zfcp_unit *unit,
2411 break; 2320 break;
2412 case DMA_FROM_DEVICE: 2321 case DMA_FROM_DEVICE:
2413 req->qtcb->bottom.io.data_direction = FSF_DATADIR_READ; 2322 req->qtcb->bottom.io.data_direction = FSF_DATADIR_READ;
2414 fcp_cmnd_iu->rddata = 1;
2415 break; 2323 break;
2416 case DMA_TO_DEVICE: 2324 case DMA_TO_DEVICE:
2417 req->qtcb->bottom.io.data_direction = FSF_DATADIR_WRITE; 2325 req->qtcb->bottom.io.data_direction = FSF_DATADIR_WRITE;
2418 sbtype = SBAL_FLAGS0_TYPE_WRITE; 2326 sbtype = SBAL_FLAGS0_TYPE_WRITE;
2419 fcp_cmnd_iu->wddata = 1;
2420 break; 2327 break;
2421 case DMA_BIDIRECTIONAL: 2328 case DMA_BIDIRECTIONAL:
2422 goto failed_scsi_cmnd; 2329 goto failed_scsi_cmnd;
2423 } 2330 }
2424 2331
2425 if (likely((scsi_cmnd->device->simple_tags) || 2332 fcp_cmnd = (struct fcp_cmnd *) &req->qtcb->bottom.io.fcp_cmnd;
2426 ((atomic_read(&unit->status) & ZFCP_STATUS_UNIT_READONLY) && 2333 zfcp_fc_scsi_to_fcp(fcp_cmnd, scsi_cmnd);
2427 (atomic_read(&unit->status) & ZFCP_STATUS_UNIT_SHARED))))
2428 fcp_cmnd_iu->task_attribute = SIMPLE_Q;
2429 else
2430 fcp_cmnd_iu->task_attribute = UNTAGGED;
2431
2432 if (unlikely(scsi_cmnd->cmd_len > FCP_CDB_LENGTH))
2433 fcp_cmnd_iu->add_fcp_cdb_length =
2434 (scsi_cmnd->cmd_len - FCP_CDB_LENGTH) >> 2;
2435 2334
2436 memcpy(fcp_cmnd_iu->fcp_cdb, scsi_cmnd->cmnd, scsi_cmnd->cmd_len); 2335 real_bytes = zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req, sbtype,
2437
2438 req->qtcb->bottom.io.fcp_cmnd_length = sizeof(struct fcp_cmnd_iu) +
2439 fcp_cmnd_iu->add_fcp_cdb_length + sizeof(u32);
2440
2441 real_bytes = zfcp_qdio_sbals_from_sg(qdio, &req->queue_req, sbtype,
2442 scsi_sglist(scsi_cmnd), 2336 scsi_sglist(scsi_cmnd),
2443 FSF_MAX_SBALS_PER_REQ); 2337 FSF_MAX_SBALS_PER_REQ);
2444 if (unlikely(real_bytes < 0)) { 2338 if (unlikely(real_bytes < 0)) {
2445 if (req->queue_req.sbal_number >= FSF_MAX_SBALS_PER_REQ) { 2339 if (req->qdio_req.sbal_number >= FSF_MAX_SBALS_PER_REQ) {
2446 dev_err(&adapter->ccw_device->dev, 2340 dev_err(&adapter->ccw_device->dev,
2447 "Oversize data package, unit 0x%016Lx " 2341 "Oversize data package, unit 0x%016Lx "
2448 "on port 0x%016Lx closed\n", 2342 "on port 0x%016Lx closed\n",
@@ -2454,8 +2348,6 @@ int zfcp_fsf_send_fcp_command_task(struct zfcp_unit *unit,
2454 goto failed_scsi_cmnd; 2348 goto failed_scsi_cmnd;
2455 } 2349 }
2456 2350
2457 zfcp_set_fcp_dl(fcp_cmnd_iu, real_bytes);
2458
2459 retval = zfcp_fsf_req_send(req); 2351 retval = zfcp_fsf_req_send(req);
2460 if (unlikely(retval)) 2352 if (unlikely(retval))
2461 goto failed_scsi_cmnd; 2353 goto failed_scsi_cmnd;
@@ -2463,7 +2355,7 @@ int zfcp_fsf_send_fcp_command_task(struct zfcp_unit *unit,
2463 goto out; 2355 goto out;
2464 2356
2465failed_scsi_cmnd: 2357failed_scsi_cmnd:
2466 zfcp_unit_put(unit); 2358 put_device(&unit->dev);
2467 zfcp_fsf_req_free(req); 2359 zfcp_fsf_req_free(req);
2468 scsi_cmnd->host_scribble = NULL; 2360 scsi_cmnd->host_scribble = NULL;
2469out: 2361out:
@@ -2481,7 +2373,7 @@ struct zfcp_fsf_req *zfcp_fsf_send_fcp_ctm(struct zfcp_unit *unit, u8 tm_flags)
2481{ 2373{
2482 struct qdio_buffer_element *sbale; 2374 struct qdio_buffer_element *sbale;
2483 struct zfcp_fsf_req *req = NULL; 2375 struct zfcp_fsf_req *req = NULL;
2484 struct fcp_cmnd_iu *fcp_cmnd_iu; 2376 struct fcp_cmnd *fcp_cmnd;
2485 struct zfcp_qdio *qdio = unit->port->adapter->qdio; 2377 struct zfcp_qdio *qdio = unit->port->adapter->qdio;
2486 2378
2487 if (unlikely(!(atomic_read(&unit->status) & 2379 if (unlikely(!(atomic_read(&unit->status) &
@@ -2507,16 +2399,14 @@ struct zfcp_fsf_req *zfcp_fsf_send_fcp_ctm(struct zfcp_unit *unit, u8 tm_flags)
2507 req->qtcb->header.port_handle = unit->port->handle; 2399 req->qtcb->header.port_handle = unit->port->handle;
2508 req->qtcb->bottom.io.data_direction = FSF_DATADIR_CMND; 2400 req->qtcb->bottom.io.data_direction = FSF_DATADIR_CMND;
2509 req->qtcb->bottom.io.service_class = FSF_CLASS_3; 2401 req->qtcb->bottom.io.service_class = FSF_CLASS_3;
2510 req->qtcb->bottom.io.fcp_cmnd_length = sizeof(struct fcp_cmnd_iu) + 2402 req->qtcb->bottom.io.fcp_cmnd_length = FCP_CMND_LEN;
2511 sizeof(u32);
2512 2403
2513 sbale = zfcp_qdio_sbale_req(qdio, &req->queue_req); 2404 sbale = zfcp_qdio_sbale_req(qdio, &req->qdio_req);
2514 sbale[0].flags |= SBAL_FLAGS0_TYPE_WRITE; 2405 sbale[0].flags |= SBAL_FLAGS0_TYPE_WRITE;
2515 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; 2406 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
2516 2407
2517 fcp_cmnd_iu = (struct fcp_cmnd_iu *) &req->qtcb->bottom.io.fcp_cmnd; 2408 fcp_cmnd = (struct fcp_cmnd *) &req->qtcb->bottom.io.fcp_cmnd;
2518 fcp_cmnd_iu->fcp_lun = unit->fcp_lun; 2409 zfcp_fc_fcp_tm(fcp_cmnd, unit->device, tm_flags);
2519 fcp_cmnd_iu->task_management_flags = tm_flags;
2520 2410
2521 zfcp_fsf_start_timer(req, ZFCP_SCSI_ER_TIMEOUT); 2411 zfcp_fsf_start_timer(req, ZFCP_SCSI_ER_TIMEOUT);
2522 if (!zfcp_fsf_req_send(req)) 2412 if (!zfcp_fsf_req_send(req))
@@ -2574,14 +2464,14 @@ struct zfcp_fsf_req *zfcp_fsf_control_file(struct zfcp_adapter *adapter,
2574 2464
2575 req->handler = zfcp_fsf_control_file_handler; 2465 req->handler = zfcp_fsf_control_file_handler;
2576 2466
2577 sbale = zfcp_qdio_sbale_req(qdio, &req->queue_req); 2467 sbale = zfcp_qdio_sbale_req(qdio, &req->qdio_req);
2578 sbale[0].flags |= direction; 2468 sbale[0].flags |= direction;
2579 2469
2580 bottom = &req->qtcb->bottom.support; 2470 bottom = &req->qtcb->bottom.support;
2581 bottom->operation_subtype = FSF_CFDC_OPERATION_SUBTYPE; 2471 bottom->operation_subtype = FSF_CFDC_OPERATION_SUBTYPE;
2582 bottom->option = fsf_cfdc->option; 2472 bottom->option = fsf_cfdc->option;
2583 2473
2584 bytes = zfcp_qdio_sbals_from_sg(qdio, &req->queue_req, 2474 bytes = zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req,
2585 direction, fsf_cfdc->sg, 2475 direction, fsf_cfdc->sg,
2586 FSF_MAX_SBALS_PER_REQ); 2476 FSF_MAX_SBALS_PER_REQ);
2587 if (bytes != ZFCP_CFDC_MAX_SIZE) { 2477 if (bytes != ZFCP_CFDC_MAX_SIZE) {
@@ -2612,15 +2502,14 @@ void zfcp_fsf_reqid_check(struct zfcp_qdio *qdio, int sbal_idx)
2612 struct qdio_buffer *sbal = qdio->resp_q.sbal[sbal_idx]; 2502 struct qdio_buffer *sbal = qdio->resp_q.sbal[sbal_idx];
2613 struct qdio_buffer_element *sbale; 2503 struct qdio_buffer_element *sbale;
2614 struct zfcp_fsf_req *fsf_req; 2504 struct zfcp_fsf_req *fsf_req;
2615 unsigned long flags, req_id; 2505 unsigned long req_id;
2616 int idx; 2506 int idx;
2617 2507
2618 for (idx = 0; idx < QDIO_MAX_ELEMENTS_PER_BUFFER; idx++) { 2508 for (idx = 0; idx < QDIO_MAX_ELEMENTS_PER_BUFFER; idx++) {
2619 2509
2620 sbale = &sbal->element[idx]; 2510 sbale = &sbal->element[idx];
2621 req_id = (unsigned long) sbale->addr; 2511 req_id = (unsigned long) sbale->addr;
2622 spin_lock_irqsave(&adapter->req_list_lock, flags); 2512 fsf_req = zfcp_reqlist_find_rm(adapter->req_list, req_id);
2623 fsf_req = zfcp_reqlist_find(adapter, req_id);
2624 2513
2625 if (!fsf_req) 2514 if (!fsf_req)
2626 /* 2515 /*
@@ -2630,11 +2519,8 @@ void zfcp_fsf_reqid_check(struct zfcp_qdio *qdio, int sbal_idx)
2630 panic("error: unknown req_id (%lx) on adapter %s.\n", 2519 panic("error: unknown req_id (%lx) on adapter %s.\n",
2631 req_id, dev_name(&adapter->ccw_device->dev)); 2520 req_id, dev_name(&adapter->ccw_device->dev));
2632 2521
2633 list_del(&fsf_req->list); 2522 fsf_req->qdio_req.sbal_response = sbal_idx;
2634 spin_unlock_irqrestore(&adapter->req_list_lock, flags); 2523 fsf_req->qdio_req.qdio_inb_usage =
2635
2636 fsf_req->queue_req.sbal_response = sbal_idx;
2637 fsf_req->queue_req.qdio_inb_usage =
2638 atomic_read(&qdio->resp_q.count); 2524 atomic_read(&qdio->resp_q.count);
2639 zfcp_fsf_req_complete(fsf_req); 2525 zfcp_fsf_req_complete(fsf_req);
2640 2526
diff --git a/drivers/s390/scsi/zfcp_fsf.h b/drivers/s390/scsi/zfcp_fsf.h
index dcc7c1dbcf58..b3de682b64cf 100644
--- a/drivers/s390/scsi/zfcp_fsf.h
+++ b/drivers/s390/scsi/zfcp_fsf.h
@@ -11,6 +11,7 @@
11 11
12#include <linux/pfn.h> 12#include <linux/pfn.h>
13#include <linux/scatterlist.h> 13#include <linux/scatterlist.h>
14#include <scsi/libfc.h>
14 15
15#define FSF_QTCB_CURRENT_VERSION 0x00000001 16#define FSF_QTCB_CURRENT_VERSION 0x00000001
16 17
@@ -228,7 +229,8 @@ struct fsf_status_read_buffer {
228 u32 length; 229 u32 length;
229 u32 res1; 230 u32 res1;
230 struct fsf_queue_designator queue_designator; 231 struct fsf_queue_designator queue_designator;
231 u32 d_id; 232 u8 res2;
233 u8 d_id[3];
232 u32 class; 234 u32 class;
233 u64 fcp_lun; 235 u64 fcp_lun;
234 u8 res3[24]; 236 u8 res3[24];
@@ -309,22 +311,7 @@ struct fsf_qtcb_header {
309 u8 res4[16]; 311 u8 res4[16];
310} __attribute__ ((packed)); 312} __attribute__ ((packed));
311 313
312struct fsf_nport_serv_param {
313 u8 common_serv_param[16];
314 u64 wwpn;
315 u64 wwnn;
316 u8 class1_serv_param[16];
317 u8 class2_serv_param[16];
318 u8 class3_serv_param[16];
319 u8 class4_serv_param[16];
320 u8 vendor_version_level[16];
321} __attribute__ ((packed));
322
323#define FSF_PLOGI_MIN_LEN 112 314#define FSF_PLOGI_MIN_LEN 112
324struct fsf_plogi {
325 u32 code;
326 struct fsf_nport_serv_param serv_param;
327} __attribute__ ((packed));
328 315
329#define FSF_FCP_CMND_SIZE 288 316#define FSF_FCP_CMND_SIZE 288
330#define FSF_FCP_RSP_SIZE 128 317#define FSF_FCP_RSP_SIZE 128
@@ -342,8 +329,8 @@ struct fsf_qtcb_bottom_io {
342 329
343struct fsf_qtcb_bottom_support { 330struct fsf_qtcb_bottom_support {
344 u32 operation_subtype; 331 u32 operation_subtype;
345 u8 res1[12]; 332 u8 res1[13];
346 u32 d_id; 333 u8 d_id[3];
347 u32 option; 334 u32 option;
348 u64 fcp_lun; 335 u64 fcp_lun;
349 u64 res2; 336 u64 res2;
@@ -372,18 +359,18 @@ struct fsf_qtcb_bottom_config {
372 u32 fc_topology; 359 u32 fc_topology;
373 u32 fc_link_speed; 360 u32 fc_link_speed;
374 u32 adapter_type; 361 u32 adapter_type;
375 u32 peer_d_id; 362 u8 res0;
363 u8 peer_d_id[3];
376 u8 res1[2]; 364 u8 res1[2];
377 u16 timer_interval; 365 u16 timer_interval;
378 u8 res2[8]; 366 u8 res2[9];
379 u32 s_id; 367 u8 s_id[3];
380 struct fsf_nport_serv_param nport_serv_param; 368 u8 nport_serv_param[128];
381 u8 reserved_nport_serv_param[16];
382 u8 res3[8]; 369 u8 res3[8];
383 u32 adapter_ports; 370 u32 adapter_ports;
384 u32 hardware_version; 371 u32 hardware_version;
385 u8 serial_number[32]; 372 u8 serial_number[32];
386 struct fsf_nport_serv_param plogi_payload; 373 u8 plogi_payload[112];
387 struct fsf_statistics_info stat_info; 374 struct fsf_statistics_info stat_info;
388 u8 res4[112]; 375 u8 res4[112];
389} __attribute__ ((packed)); 376} __attribute__ ((packed));
@@ -450,4 +437,22 @@ struct zfcp_blk_drv_data {
450 u64 fabric_lat; 437 u64 fabric_lat;
451} __attribute__ ((packed)); 438} __attribute__ ((packed));
452 439
440/**
441 * struct zfcp_fsf_ct_els - zfcp data for ct or els request
442 * @req: scatter-gather list for request
443 * @resp: scatter-gather list for response
444 * @handler: handler function (called for response to the request)
445 * @handler_data: data passed to handler function
446 * @port: Optional pointer to port for zfcp internal ELS (only test link ADISC)
447 * @status: used to pass error status to calling function
448 */
449struct zfcp_fsf_ct_els {
450 struct scatterlist *req;
451 struct scatterlist *resp;
452 void (*handler)(void *);
453 void *handler_data;
454 struct zfcp_port *port;
455 int status;
456};
457
453#endif /* FSF_H */ 458#endif /* FSF_H */
diff --git a/drivers/s390/scsi/zfcp_qdio.c b/drivers/s390/scsi/zfcp_qdio.c
index 6c5228b627fc..dbfa312a7f50 100644
--- a/drivers/s390/scsi/zfcp_qdio.c
+++ b/drivers/s390/scsi/zfcp_qdio.c
@@ -9,7 +9,9 @@
9#define KMSG_COMPONENT "zfcp" 9#define KMSG_COMPONENT "zfcp"
10#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 10#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
11 11
12#include <linux/slab.h>
12#include "zfcp_ext.h" 13#include "zfcp_ext.h"
14#include "zfcp_qdio.h"
13 15
14#define QBUFF_PER_PAGE (PAGE_SIZE / sizeof(struct qdio_buffer)) 16#define QBUFF_PER_PAGE (PAGE_SIZE / sizeof(struct qdio_buffer))
15 17
@@ -28,12 +30,6 @@ static int zfcp_qdio_buffers_enqueue(struct qdio_buffer **sbal)
28 return 0; 30 return 0;
29} 31}
30 32
31static struct qdio_buffer_element *
32zfcp_qdio_sbale(struct zfcp_qdio_queue *q, int sbal_idx, int sbale_idx)
33{
34 return &q->sbal[sbal_idx]->element[sbale_idx];
35}
36
37static void zfcp_qdio_handler_error(struct zfcp_qdio *qdio, char *id) 33static void zfcp_qdio_handler_error(struct zfcp_qdio *qdio, char *id)
38{ 34{
39 struct zfcp_adapter *adapter = qdio->adapter; 35 struct zfcp_adapter *adapter = qdio->adapter;
@@ -106,7 +102,7 @@ static void zfcp_qdio_resp_put_back(struct zfcp_qdio *qdio, int processed)
106 102
107 if (unlikely(retval)) { 103 if (unlikely(retval)) {
108 atomic_set(&queue->count, count); 104 atomic_set(&queue->count, count);
109 /* FIXME: Recover this with an adapter reopen? */ 105 zfcp_erp_adapter_reopen(qdio->adapter, 0, "qdrpb_1", NULL);
110 } else { 106 } else {
111 queue->first += count; 107 queue->first += count;
112 queue->first %= QDIO_MAX_BUFFERS_PER_Q; 108 queue->first %= QDIO_MAX_BUFFERS_PER_Q;
@@ -145,32 +141,8 @@ static void zfcp_qdio_int_resp(struct ccw_device *cdev, unsigned int qdio_err,
145 zfcp_qdio_resp_put_back(qdio, count); 141 zfcp_qdio_resp_put_back(qdio, count);
146} 142}
147 143
148/**
149 * zfcp_qdio_sbale_req - return ptr to SBALE of req_q for a struct zfcp_fsf_req
150 * @qdio: pointer to struct zfcp_qdio
151 * @q_rec: pointer to struct zfcp_queue_rec
152 * Returns: pointer to qdio_buffer_element (SBALE) structure
153 */
154struct qdio_buffer_element *zfcp_qdio_sbale_req(struct zfcp_qdio *qdio,
155 struct zfcp_queue_req *q_req)
156{
157 return zfcp_qdio_sbale(&qdio->req_q, q_req->sbal_last, 0);
158}
159
160/**
161 * zfcp_qdio_sbale_curr - return curr SBALE on req_q for a struct zfcp_fsf_req
162 * @fsf_req: pointer to struct fsf_req
163 * Returns: pointer to qdio_buffer_element (SBALE) structure
164 */
165struct qdio_buffer_element *zfcp_qdio_sbale_curr(struct zfcp_qdio *qdio,
166 struct zfcp_queue_req *q_req)
167{
168 return zfcp_qdio_sbale(&qdio->req_q, q_req->sbal_last,
169 q_req->sbale_curr);
170}
171
172static void zfcp_qdio_sbal_limit(struct zfcp_qdio *qdio, 144static void zfcp_qdio_sbal_limit(struct zfcp_qdio *qdio,
173 struct zfcp_queue_req *q_req, int max_sbals) 145 struct zfcp_qdio_req *q_req, int max_sbals)
174{ 146{
175 int count = atomic_read(&qdio->req_q.count); 147 int count = atomic_read(&qdio->req_q.count);
176 count = min(count, max_sbals); 148 count = min(count, max_sbals);
@@ -179,7 +151,7 @@ static void zfcp_qdio_sbal_limit(struct zfcp_qdio *qdio,
179} 151}
180 152
181static struct qdio_buffer_element * 153static struct qdio_buffer_element *
182zfcp_qdio_sbal_chain(struct zfcp_qdio *qdio, struct zfcp_queue_req *q_req, 154zfcp_qdio_sbal_chain(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req,
183 unsigned long sbtype) 155 unsigned long sbtype)
184{ 156{
185 struct qdio_buffer_element *sbale; 157 struct qdio_buffer_element *sbale;
@@ -214,7 +186,7 @@ zfcp_qdio_sbal_chain(struct zfcp_qdio *qdio, struct zfcp_queue_req *q_req,
214} 186}
215 187
216static struct qdio_buffer_element * 188static struct qdio_buffer_element *
217zfcp_qdio_sbale_next(struct zfcp_qdio *qdio, struct zfcp_queue_req *q_req, 189zfcp_qdio_sbale_next(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req,
218 unsigned int sbtype) 190 unsigned int sbtype)
219{ 191{
220 if (q_req->sbale_curr == ZFCP_LAST_SBALE_PER_SBAL) 192 if (q_req->sbale_curr == ZFCP_LAST_SBALE_PER_SBAL)
@@ -224,7 +196,7 @@ zfcp_qdio_sbale_next(struct zfcp_qdio *qdio, struct zfcp_queue_req *q_req,
224} 196}
225 197
226static void zfcp_qdio_undo_sbals(struct zfcp_qdio *qdio, 198static void zfcp_qdio_undo_sbals(struct zfcp_qdio *qdio,
227 struct zfcp_queue_req *q_req) 199 struct zfcp_qdio_req *q_req)
228{ 200{
229 struct qdio_buffer **sbal = qdio->req_q.sbal; 201 struct qdio_buffer **sbal = qdio->req_q.sbal;
230 int first = q_req->sbal_first; 202 int first = q_req->sbal_first;
@@ -235,7 +207,7 @@ static void zfcp_qdio_undo_sbals(struct zfcp_qdio *qdio,
235} 207}
236 208
237static int zfcp_qdio_fill_sbals(struct zfcp_qdio *qdio, 209static int zfcp_qdio_fill_sbals(struct zfcp_qdio *qdio,
238 struct zfcp_queue_req *q_req, 210 struct zfcp_qdio_req *q_req,
239 unsigned int sbtype, void *start_addr, 211 unsigned int sbtype, void *start_addr,
240 unsigned int total_length) 212 unsigned int total_length)
241{ 213{
@@ -271,8 +243,7 @@ static int zfcp_qdio_fill_sbals(struct zfcp_qdio *qdio,
271 * @max_sbals: upper bound for number of SBALs to be used 243 * @max_sbals: upper bound for number of SBALs to be used
272 * Returns: number of bytes, or error (negativ) 244 * Returns: number of bytes, or error (negativ)
273 */ 245 */
274int zfcp_qdio_sbals_from_sg(struct zfcp_qdio *qdio, 246int zfcp_qdio_sbals_from_sg(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req,
275 struct zfcp_queue_req *q_req,
276 unsigned long sbtype, struct scatterlist *sg, 247 unsigned long sbtype, struct scatterlist *sg,
277 int max_sbals) 248 int max_sbals)
278{ 249{
@@ -304,10 +275,10 @@ int zfcp_qdio_sbals_from_sg(struct zfcp_qdio *qdio,
304/** 275/**
305 * zfcp_qdio_send - set PCI flag in first SBALE and send req to QDIO 276 * zfcp_qdio_send - set PCI flag in first SBALE and send req to QDIO
306 * @qdio: pointer to struct zfcp_qdio 277 * @qdio: pointer to struct zfcp_qdio
307 * @q_req: pointer to struct zfcp_queue_req 278 * @q_req: pointer to struct zfcp_qdio_req
308 * Returns: 0 on success, error otherwise 279 * Returns: 0 on success, error otherwise
309 */ 280 */
310int zfcp_qdio_send(struct zfcp_qdio *qdio, struct zfcp_queue_req *q_req) 281int zfcp_qdio_send(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req)
311{ 282{
312 struct zfcp_qdio_queue *req_q = &qdio->req_q; 283 struct zfcp_qdio_queue *req_q = &qdio->req_q;
313 int first = q_req->sbal_first; 284 int first = q_req->sbal_first;
@@ -349,8 +320,6 @@ static void zfcp_qdio_setup_init_data(struct qdio_initialize *id,
349 id->input_handler = zfcp_qdio_int_resp; 320 id->input_handler = zfcp_qdio_int_resp;
350 id->output_handler = zfcp_qdio_int_req; 321 id->output_handler = zfcp_qdio_int_req;
351 id->int_parm = (unsigned long) qdio; 322 id->int_parm = (unsigned long) qdio;
352 id->flags = QDIO_INBOUND_0COPY_SBALS |
353 QDIO_OUTBOUND_0COPY_SBALS | QDIO_USE_OUTBOUND_PCIS;
354 id->input_sbal_addr_array = (void **) (qdio->resp_q.sbal); 323 id->input_sbal_addr_array = (void **) (qdio->resp_q.sbal);
355 id->output_sbal_addr_array = (void **) (qdio->req_q.sbal); 324 id->output_sbal_addr_array = (void **) (qdio->req_q.sbal);
356 325
diff --git a/drivers/s390/scsi/zfcp_qdio.h b/drivers/s390/scsi/zfcp_qdio.h
new file mode 100644
index 000000000000..8cca54631e1e
--- /dev/null
+++ b/drivers/s390/scsi/zfcp_qdio.h
@@ -0,0 +1,109 @@
1/*
2 * zfcp device driver
3 *
4 * Header file for zfcp qdio interface
5 *
6 * Copyright IBM Corporation 2010
7 */
8
9#ifndef ZFCP_QDIO_H
10#define ZFCP_QDIO_H
11
12#include <asm/qdio.h>
13
14/**
15 * struct zfcp_qdio_queue - qdio queue buffer, zfcp index and free count
16 * @sbal: qdio buffers
17 * @first: index of next free buffer in queue
18 * @count: number of free buffers in queue
19 */
20struct zfcp_qdio_queue {
21 struct qdio_buffer *sbal[QDIO_MAX_BUFFERS_PER_Q];
22 u8 first;
23 atomic_t count;
24};
25
26/**
27 * struct zfcp_qdio - basic qdio data structure
28 * @resp_q: response queue
29 * @req_q: request queue
30 * @stat_lock: lock to protect req_q_util and req_q_time
31 * @req_q_lock: lock to serialize access to request queue
32 * @req_q_time: time of last fill level change
33 * @req_q_util: used for accounting
34 * @req_q_full: queue full incidents
35 * @req_q_wq: used to wait for SBAL availability
36 * @adapter: adapter used in conjunction with this qdio structure
37 */
38struct zfcp_qdio {
39 struct zfcp_qdio_queue resp_q;
40 struct zfcp_qdio_queue req_q;
41 spinlock_t stat_lock;
42 spinlock_t req_q_lock;
43 unsigned long long req_q_time;
44 u64 req_q_util;
45 atomic_t req_q_full;
46 wait_queue_head_t req_q_wq;
47 struct zfcp_adapter *adapter;
48};
49
50/**
51 * struct zfcp_qdio_req - qdio queue related values for a request
52 * @sbal_number: number of free sbals
53 * @sbal_first: first sbal for this request
54 * @sbal_last: last sbal for this request
55 * @sbal_limit: last possible sbal for this request
56 * @sbale_curr: current sbale at creation of this request
57 * @sbal_response: sbal used in interrupt
58 * @qdio_outb_usage: usage of outbound queue
59 * @qdio_inb_usage: usage of inbound queue
60 */
61struct zfcp_qdio_req {
62 u8 sbal_number;
63 u8 sbal_first;
64 u8 sbal_last;
65 u8 sbal_limit;
66 u8 sbale_curr;
67 u8 sbal_response;
68 u16 qdio_outb_usage;
69 u16 qdio_inb_usage;
70};
71
72/**
73 * zfcp_qdio_sbale - return pointer to sbale in qdio queue
74 * @q: queue where to find sbal
75 * @sbal_idx: sbal index in queue
76 * @sbale_idx: sbale index in sbal
77 */
78static inline struct qdio_buffer_element *
79zfcp_qdio_sbale(struct zfcp_qdio_queue *q, int sbal_idx, int sbale_idx)
80{
81 return &q->sbal[sbal_idx]->element[sbale_idx];
82}
83
84/**
85 * zfcp_qdio_sbale_req - return pointer to sbale on req_q for a request
86 * @qdio: pointer to struct zfcp_qdio
87 * @q_rec: pointer to struct zfcp_qdio_req
88 * Returns: pointer to qdio_buffer_element (sbale) structure
89 */
90static inline struct qdio_buffer_element *
91zfcp_qdio_sbale_req(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req)
92{
93 return zfcp_qdio_sbale(&qdio->req_q, q_req->sbal_last, 0);
94}
95
96/**
97 * zfcp_qdio_sbale_curr - return current sbale on req_q for a request
98 * @qdio: pointer to struct zfcp_qdio
99 * @fsf_req: pointer to struct zfcp_fsf_req
100 * Returns: pointer to qdio_buffer_element (sbale) structure
101 */
102static inline struct qdio_buffer_element *
103zfcp_qdio_sbale_curr(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req)
104{
105 return zfcp_qdio_sbale(&qdio->req_q, q_req->sbal_last,
106 q_req->sbale_curr);
107}
108
109#endif /* ZFCP_QDIO_H */
diff --git a/drivers/s390/scsi/zfcp_reqlist.h b/drivers/s390/scsi/zfcp_reqlist.h
new file mode 100644
index 000000000000..a72d1b730aba
--- /dev/null
+++ b/drivers/s390/scsi/zfcp_reqlist.h
@@ -0,0 +1,183 @@
1/*
2 * zfcp device driver
3 *
4 * Data structure and helper functions for tracking pending FSF
5 * requests.
6 *
7 * Copyright IBM Corporation 2009
8 */
9
10#ifndef ZFCP_REQLIST_H
11#define ZFCP_REQLIST_H
12
13/* number of hash buckets */
14#define ZFCP_REQ_LIST_BUCKETS 128
15
16/**
17 * struct zfcp_reqlist - Container for request list (reqlist)
18 * @lock: Spinlock for protecting the hash list
19 * @list: Array of hashbuckets, each is a list of requests in this bucket
20 */
21struct zfcp_reqlist {
22 spinlock_t lock;
23 struct list_head buckets[ZFCP_REQ_LIST_BUCKETS];
24};
25
26static inline int zfcp_reqlist_hash(unsigned long req_id)
27{
28 return req_id % ZFCP_REQ_LIST_BUCKETS;
29}
30
31/**
32 * zfcp_reqlist_alloc - Allocate and initialize reqlist
33 *
34 * Returns pointer to allocated reqlist on success, or NULL on
35 * allocation failure.
36 */
37static inline struct zfcp_reqlist *zfcp_reqlist_alloc(void)
38{
39 unsigned int i;
40 struct zfcp_reqlist *rl;
41
42 rl = kzalloc(sizeof(struct zfcp_reqlist), GFP_KERNEL);
43 if (!rl)
44 return NULL;
45
46 spin_lock_init(&rl->lock);
47
48 for (i = 0; i < ZFCP_REQ_LIST_BUCKETS; i++)
49 INIT_LIST_HEAD(&rl->buckets[i]);
50
51 return rl;
52}
53
54/**
55 * zfcp_reqlist_isempty - Check whether the request list empty
56 * @rl: pointer to reqlist
57 *
58 * Returns: 1 if list is empty, 0 if not
59 */
60static inline int zfcp_reqlist_isempty(struct zfcp_reqlist *rl)
61{
62 unsigned int i;
63
64 for (i = 0; i < ZFCP_REQ_LIST_BUCKETS; i++)
65 if (!list_empty(&rl->buckets[i]))
66 return 0;
67 return 1;
68}
69
70/**
71 * zfcp_reqlist_free - Free allocated memory for reqlist
72 * @rl: The reqlist where to free memory
73 */
74static inline void zfcp_reqlist_free(struct zfcp_reqlist *rl)
75{
76 /* sanity check */
77 BUG_ON(!zfcp_reqlist_isempty(rl));
78
79 kfree(rl);
80}
81
82static inline struct zfcp_fsf_req *
83_zfcp_reqlist_find(struct zfcp_reqlist *rl, unsigned long req_id)
84{
85 struct zfcp_fsf_req *req;
86 unsigned int i;
87
88 i = zfcp_reqlist_hash(req_id);
89 list_for_each_entry(req, &rl->buckets[i], list)
90 if (req->req_id == req_id)
91 return req;
92 return NULL;
93}
94
95/**
96 * zfcp_reqlist_find - Lookup FSF request by its request id
97 * @rl: The reqlist where to lookup the FSF request
98 * @req_id: The request id to look for
99 *
100 * Returns a pointer to the FSF request with the specified request id
101 * or NULL if there is no known FSF request with this id.
102 */
103static inline struct zfcp_fsf_req *
104zfcp_reqlist_find(struct zfcp_reqlist *rl, unsigned long req_id)
105{
106 unsigned long flags;
107 struct zfcp_fsf_req *req;
108
109 spin_lock_irqsave(&rl->lock, flags);
110 req = _zfcp_reqlist_find(rl, req_id);
111 spin_unlock_irqrestore(&rl->lock, flags);
112
113 return req;
114}
115
116/**
117 * zfcp_reqlist_find_rm - Lookup request by id and remove it from reqlist
118 * @rl: reqlist where to search and remove entry
119 * @req_id: The request id of the request to look for
120 *
121 * This functions tries to find the FSF request with the specified
122 * id and then removes it from the reqlist. The reqlist lock is held
123 * during both steps of the operation.
124 *
125 * Returns: Pointer to the FSF request if the request has been found,
126 * NULL if it has not been found.
127 */
128static inline struct zfcp_fsf_req *
129zfcp_reqlist_find_rm(struct zfcp_reqlist *rl, unsigned long req_id)
130{
131 unsigned long flags;
132 struct zfcp_fsf_req *req;
133
134 spin_lock_irqsave(&rl->lock, flags);
135 req = _zfcp_reqlist_find(rl, req_id);
136 if (req)
137 list_del(&req->list);
138 spin_unlock_irqrestore(&rl->lock, flags);
139
140 return req;
141}
142
143/**
144 * zfcp_reqlist_add - Add entry to reqlist
145 * @rl: reqlist where to add the entry
146 * @req: The entry to add
147 *
148 * The request id always increases. As an optimization new requests
149 * are added here with list_add_tail at the end of the bucket lists
150 * while old requests are looked up starting at the beginning of the
151 * lists.
152 */
153static inline void zfcp_reqlist_add(struct zfcp_reqlist *rl,
154 struct zfcp_fsf_req *req)
155{
156 unsigned int i;
157 unsigned long flags;
158
159 i = zfcp_reqlist_hash(req->req_id);
160
161 spin_lock_irqsave(&rl->lock, flags);
162 list_add_tail(&req->list, &rl->buckets[i]);
163 spin_unlock_irqrestore(&rl->lock, flags);
164}
165
166/**
167 * zfcp_reqlist_move - Move all entries from reqlist to simple list
168 * @rl: The zfcp_reqlist where to remove all entries
169 * @list: The list where to move all entries
170 */
171static inline void zfcp_reqlist_move(struct zfcp_reqlist *rl,
172 struct list_head *list)
173{
174 unsigned int i;
175 unsigned long flags;
176
177 spin_lock_irqsave(&rl->lock, flags);
178 for (i = 0; i < ZFCP_REQ_LIST_BUCKETS; i++)
179 list_splice_init(&rl->buckets[i], list);
180 spin_unlock_irqrestore(&rl->lock, flags);
181}
182
183#endif /* ZFCP_REQLIST_H */
diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c
index 0e1a34627a2e..174b6d57d576 100644
--- a/drivers/s390/scsi/zfcp_scsi.c
+++ b/drivers/s390/scsi/zfcp_scsi.c
@@ -3,35 +3,41 @@
3 * 3 *
4 * Interface to Linux SCSI midlayer. 4 * Interface to Linux SCSI midlayer.
5 * 5 *
6 * Copyright IBM Corporation 2002, 2009 6 * Copyright IBM Corporation 2002, 2010
7 */ 7 */
8 8
9#define KMSG_COMPONENT "zfcp" 9#define KMSG_COMPONENT "zfcp"
10#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 10#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
11 11
12#include <linux/types.h>
13#include <linux/slab.h>
14#include <scsi/fc/fc_fcp.h>
12#include <asm/atomic.h> 15#include <asm/atomic.h>
13#include "zfcp_ext.h" 16#include "zfcp_ext.h"
14#include "zfcp_dbf.h" 17#include "zfcp_dbf.h"
18#include "zfcp_fc.h"
19#include "zfcp_reqlist.h"
15 20
16static unsigned int default_depth = 32; 21static unsigned int default_depth = 32;
17module_param_named(queue_depth, default_depth, uint, 0600); 22module_param_named(queue_depth, default_depth, uint, 0600);
18MODULE_PARM_DESC(queue_depth, "Default queue depth for new SCSI devices"); 23MODULE_PARM_DESC(queue_depth, "Default queue depth for new SCSI devices");
19 24
20/* Find start of Sense Information in FCP response unit*/ 25static int zfcp_scsi_change_queue_depth(struct scsi_device *sdev, int depth,
21char *zfcp_get_fcp_sns_info_ptr(struct fcp_rsp_iu *fcp_rsp_iu) 26 int reason)
22{ 27{
23 char *fcp_sns_info_ptr; 28 switch (reason) {
24 29 case SCSI_QDEPTH_DEFAULT:
25 fcp_sns_info_ptr = (unsigned char *) &fcp_rsp_iu[1]; 30 scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), depth);
26 if (fcp_rsp_iu->validity.bits.fcp_rsp_len_valid) 31 break;
27 fcp_sns_info_ptr += fcp_rsp_iu->fcp_rsp_len; 32 case SCSI_QDEPTH_QFULL:
28 33 scsi_track_queue_full(sdev, depth);
29 return fcp_sns_info_ptr; 34 break;
30} 35 case SCSI_QDEPTH_RAMP_UP:
31 36 scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), depth);
32static int zfcp_scsi_change_queue_depth(struct scsi_device *sdev, int depth) 37 break;
33{ 38 default:
34 scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), depth); 39 return -EOPNOTSUPP;
40 }
35 return sdev->queue_depth; 41 return sdev->queue_depth;
36} 42}
37 43
@@ -39,7 +45,7 @@ static void zfcp_scsi_slave_destroy(struct scsi_device *sdpnt)
39{ 45{
40 struct zfcp_unit *unit = (struct zfcp_unit *) sdpnt->hostdata; 46 struct zfcp_unit *unit = (struct zfcp_unit *) sdpnt->hostdata;
41 unit->device = NULL; 47 unit->device = NULL;
42 zfcp_unit_put(unit); 48 put_device(&unit->dev);
43} 49}
44 50
45static int zfcp_scsi_slave_configure(struct scsi_device *sdp) 51static int zfcp_scsi_slave_configure(struct scsi_device *sdp)
@@ -55,10 +61,9 @@ static void zfcp_scsi_command_fail(struct scsi_cmnd *scpnt, int result)
55{ 61{
56 struct zfcp_adapter *adapter = 62 struct zfcp_adapter *adapter =
57 (struct zfcp_adapter *) scpnt->device->host->hostdata[0]; 63 (struct zfcp_adapter *) scpnt->device->host->hostdata[0];
64
58 set_host_byte(scpnt, result); 65 set_host_byte(scpnt, result);
59 if ((scpnt->device != NULL) && (scpnt->device->host != NULL)) 66 zfcp_dbf_scsi_fail_send(adapter->dbf, scpnt);
60 zfcp_dbf_scsi_result("fail", 4, adapter->dbf, scpnt, NULL);
61 /* return directly */
62 scpnt->scsi_done(scpnt); 67 scpnt->scsi_done(scpnt);
63} 68}
64 69
@@ -82,29 +87,35 @@ static int zfcp_scsi_queuecommand(struct scsi_cmnd *scpnt,
82 adapter = (struct zfcp_adapter *) scpnt->device->host->hostdata[0]; 87 adapter = (struct zfcp_adapter *) scpnt->device->host->hostdata[0];
83 unit = scpnt->device->hostdata; 88 unit = scpnt->device->hostdata;
84 89
85 BUG_ON(!adapter || (adapter != unit->port->adapter));
86 BUG_ON(!scpnt->scsi_done);
87
88 if (unlikely(!unit)) {
89 zfcp_scsi_command_fail(scpnt, DID_NO_CONNECT);
90 return 0;
91 }
92
93 scsi_result = fc_remote_port_chkready(rport); 90 scsi_result = fc_remote_port_chkready(rport);
94 if (unlikely(scsi_result)) { 91 if (unlikely(scsi_result)) {
95 scpnt->result = scsi_result; 92 scpnt->result = scsi_result;
96 zfcp_dbf_scsi_result("fail", 4, adapter->dbf, scpnt, NULL); 93 zfcp_dbf_scsi_fail_send(adapter->dbf, scpnt);
97 scpnt->scsi_done(scpnt); 94 scpnt->scsi_done(scpnt);
98 return 0; 95 return 0;
99 } 96 }
100 97
101 status = atomic_read(&unit->status); 98 status = atomic_read(&unit->status);
102 if (unlikely((status & ZFCP_STATUS_COMMON_ERP_FAILED) || 99 if (unlikely(status & ZFCP_STATUS_COMMON_ERP_FAILED) &&
103 !(status & ZFCP_STATUS_COMMON_RUNNING))) { 100 !(atomic_read(&unit->port->status) &
101 ZFCP_STATUS_COMMON_ERP_FAILED)) {
102 /* only unit access denied, but port is good
103 * not covered by FC transport, have to fail here */
104 zfcp_scsi_command_fail(scpnt, DID_ERROR); 104 zfcp_scsi_command_fail(scpnt, DID_ERROR);
105 return 0; 105 return 0;
106 } 106 }
107 107
108 if (unlikely(!(status & ZFCP_STATUS_COMMON_UNBLOCKED))) {
109 /* This could be either
110 * open unit pending: this is temporary, will result in
111 * open unit or ERP_FAILED, so retry command
112 * call to rport_delete pending: mimic retry from
113 * fc_remote_port_chkready until rport is BLOCKED
114 */
115 zfcp_scsi_command_fail(scpnt, DID_IMM_RETRY);
116 return 0;
117 }
118
108 ret = zfcp_fsf_send_fcp_command_task(unit, scpnt); 119 ret = zfcp_fsf_send_fcp_command_task(unit, scpnt);
109 if (unlikely(ret == -EBUSY)) 120 if (unlikely(ret == -EBUSY))
110 return SCSI_MLQUEUE_DEVICE_BUSY; 121 return SCSI_MLQUEUE_DEVICE_BUSY;
@@ -115,49 +126,44 @@ static int zfcp_scsi_queuecommand(struct scsi_cmnd *scpnt,
115} 126}
116 127
117static struct zfcp_unit *zfcp_unit_lookup(struct zfcp_adapter *adapter, 128static struct zfcp_unit *zfcp_unit_lookup(struct zfcp_adapter *adapter,
118 int channel, unsigned int id, 129 unsigned int id, u64 lun)
119 unsigned int lun)
120{ 130{
131 unsigned long flags;
121 struct zfcp_port *port; 132 struct zfcp_port *port;
122 struct zfcp_unit *unit; 133 struct zfcp_unit *unit = NULL;
123 int scsi_lun;
124 134
125 list_for_each_entry(port, &adapter->port_list_head, list) { 135 read_lock_irqsave(&adapter->port_list_lock, flags);
136 list_for_each_entry(port, &adapter->port_list, list) {
126 if (!port->rport || (id != port->rport->scsi_target_id)) 137 if (!port->rport || (id != port->rport->scsi_target_id))
127 continue; 138 continue;
128 list_for_each_entry(unit, &port->unit_list_head, list) { 139 unit = zfcp_get_unit_by_lun(port, lun);
129 scsi_lun = scsilun_to_int( 140 if (unit)
130 (struct scsi_lun *)&unit->fcp_lun); 141 break;
131 if (lun == scsi_lun)
132 return unit;
133 }
134 } 142 }
143 read_unlock_irqrestore(&adapter->port_list_lock, flags);
135 144
136 return NULL; 145 return unit;
137} 146}
138 147
139static int zfcp_scsi_slave_alloc(struct scsi_device *sdp) 148static int zfcp_scsi_slave_alloc(struct scsi_device *sdp)
140{ 149{
141 struct zfcp_adapter *adapter; 150 struct zfcp_adapter *adapter;
142 struct zfcp_unit *unit; 151 struct zfcp_unit *unit;
143 unsigned long flags; 152 u64 lun;
144 int retval = -ENXIO;
145 153
146 adapter = (struct zfcp_adapter *) sdp->host->hostdata[0]; 154 adapter = (struct zfcp_adapter *) sdp->host->hostdata[0];
147 if (!adapter) 155 if (!adapter)
148 goto out; 156 goto out;
149 157
150 read_lock_irqsave(&zfcp_data.config_lock, flags); 158 int_to_scsilun(sdp->lun, (struct scsi_lun *)&lun);
151 unit = zfcp_unit_lookup(adapter, sdp->channel, sdp->id, sdp->lun); 159 unit = zfcp_unit_lookup(adapter, sdp->id, lun);
152 if (unit) { 160 if (unit) {
153 sdp->hostdata = unit; 161 sdp->hostdata = unit;
154 unit->device = sdp; 162 unit->device = sdp;
155 zfcp_unit_get(unit); 163 return 0;
156 retval = 0;
157 } 164 }
158 read_unlock_irqrestore(&zfcp_data.config_lock, flags);
159out: 165out:
160 return retval; 166 return -ENXIO;
161} 167}
162 168
163static int zfcp_scsi_eh_abort_handler(struct scsi_cmnd *scpnt) 169static int zfcp_scsi_eh_abort_handler(struct scsi_cmnd *scpnt)
@@ -176,9 +182,7 @@ static int zfcp_scsi_eh_abort_handler(struct scsi_cmnd *scpnt)
176 /* avoid race condition between late normal completion and abort */ 182 /* avoid race condition between late normal completion and abort */
177 write_lock_irqsave(&adapter->abort_lock, flags); 183 write_lock_irqsave(&adapter->abort_lock, flags);
178 184
179 spin_lock(&adapter->req_list_lock); 185 old_req = zfcp_reqlist_find(adapter->req_list, old_reqid);
180 old_req = zfcp_reqlist_find(adapter, old_reqid);
181 spin_unlock(&adapter->req_list_lock);
182 if (!old_req) { 186 if (!old_req) {
183 write_unlock_irqrestore(&adapter->abort_lock, flags); 187 write_unlock_irqrestore(&adapter->abort_lock, flags);
184 zfcp_dbf_scsi_abort("lte1", adapter->dbf, scpnt, NULL, 188 zfcp_dbf_scsi_abort("lte1", adapter->dbf, scpnt, NULL,
@@ -196,6 +200,7 @@ static int zfcp_scsi_eh_abort_handler(struct scsi_cmnd *scpnt)
196 break; 200 break;
197 201
198 zfcp_erp_wait(adapter); 202 zfcp_erp_wait(adapter);
203 fc_block_scsi_eh(scpnt);
199 if (!(atomic_read(&adapter->status) & 204 if (!(atomic_read(&adapter->status) &
200 ZFCP_STATUS_COMMON_RUNNING)) { 205 ZFCP_STATUS_COMMON_RUNNING)) {
201 zfcp_dbf_scsi_abort("nres", adapter->dbf, scpnt, NULL, 206 zfcp_dbf_scsi_abort("nres", adapter->dbf, scpnt, NULL,
@@ -235,6 +240,7 @@ static int zfcp_task_mgmt_function(struct scsi_cmnd *scpnt, u8 tm_flags)
235 break; 240 break;
236 241
237 zfcp_erp_wait(adapter); 242 zfcp_erp_wait(adapter);
243 fc_block_scsi_eh(scpnt);
238 if (!(atomic_read(&adapter->status) & 244 if (!(atomic_read(&adapter->status) &
239 ZFCP_STATUS_COMMON_RUNNING)) { 245 ZFCP_STATUS_COMMON_RUNNING)) {
240 zfcp_dbf_scsi_devreset("nres", tm_flags, unit, scpnt); 246 zfcp_dbf_scsi_devreset("nres", tm_flags, unit, scpnt);
@@ -249,9 +255,6 @@ static int zfcp_task_mgmt_function(struct scsi_cmnd *scpnt, u8 tm_flags)
249 if (fsf_req->status & ZFCP_STATUS_FSFREQ_TMFUNCFAILED) { 255 if (fsf_req->status & ZFCP_STATUS_FSFREQ_TMFUNCFAILED) {
250 zfcp_dbf_scsi_devreset("fail", tm_flags, unit, scpnt); 256 zfcp_dbf_scsi_devreset("fail", tm_flags, unit, scpnt);
251 retval = FAILED; 257 retval = FAILED;
252 } else if (fsf_req->status & ZFCP_STATUS_FSFREQ_TMFUNCNOTSUPP) {
253 zfcp_dbf_scsi_devreset("nsup", tm_flags, unit, scpnt);
254 retval = FAILED;
255 } else 258 } else
256 zfcp_dbf_scsi_devreset("okay", tm_flags, unit, scpnt); 259 zfcp_dbf_scsi_devreset("okay", tm_flags, unit, scpnt);
257 260
@@ -261,12 +264,12 @@ static int zfcp_task_mgmt_function(struct scsi_cmnd *scpnt, u8 tm_flags)
261 264
262static int zfcp_scsi_eh_device_reset_handler(struct scsi_cmnd *scpnt) 265static int zfcp_scsi_eh_device_reset_handler(struct scsi_cmnd *scpnt)
263{ 266{
264 return zfcp_task_mgmt_function(scpnt, FCP_LOGICAL_UNIT_RESET); 267 return zfcp_task_mgmt_function(scpnt, FCP_TMF_LUN_RESET);
265} 268}
266 269
267static int zfcp_scsi_eh_target_reset_handler(struct scsi_cmnd *scpnt) 270static int zfcp_scsi_eh_target_reset_handler(struct scsi_cmnd *scpnt)
268{ 271{
269 return zfcp_task_mgmt_function(scpnt, FCP_TARGET_RESET); 272 return zfcp_task_mgmt_function(scpnt, FCP_TMF_TGT_RESET);
270} 273}
271 274
272static int zfcp_scsi_eh_host_reset_handler(struct scsi_cmnd *scpnt) 275static int zfcp_scsi_eh_host_reset_handler(struct scsi_cmnd *scpnt)
@@ -276,6 +279,7 @@ static int zfcp_scsi_eh_host_reset_handler(struct scsi_cmnd *scpnt)
276 279
277 zfcp_erp_adapter_reopen(adapter, 0, "schrh_1", scpnt); 280 zfcp_erp_adapter_reopen(adapter, 0, "schrh_1", scpnt);
278 zfcp_erp_wait(adapter); 281 zfcp_erp_wait(adapter);
282 fc_block_scsi_eh(scpnt);
279 283
280 return SUCCESS; 284 return SUCCESS;
281} 285}
@@ -303,7 +307,7 @@ int zfcp_adapter_scsi_register(struct zfcp_adapter *adapter)
303 adapter->scsi_host->max_lun = 1; 307 adapter->scsi_host->max_lun = 1;
304 adapter->scsi_host->max_channel = 0; 308 adapter->scsi_host->max_channel = 0;
305 adapter->scsi_host->unique_id = dev_id.devno; 309 adapter->scsi_host->unique_id = dev_id.devno;
306 adapter->scsi_host->max_cmd_len = 255; 310 adapter->scsi_host->max_cmd_len = 16; /* in struct fcp_cmnd */
307 adapter->scsi_host->transportt = zfcp_data.scsi_transport_template; 311 adapter->scsi_host->transportt = zfcp_data.scsi_transport_template;
308 312
309 adapter->scsi_host->hostdata[0] = (unsigned long) adapter; 313 adapter->scsi_host->hostdata[0] = (unsigned long) adapter;
@@ -325,12 +329,11 @@ void zfcp_adapter_scsi_unregister(struct zfcp_adapter *adapter)
325 if (!shost) 329 if (!shost)
326 return; 330 return;
327 331
328 read_lock_irq(&zfcp_data.config_lock); 332 read_lock_irq(&adapter->port_list_lock);
329 list_for_each_entry(port, &adapter->port_list_head, list) 333 list_for_each_entry(port, &adapter->port_list, list)
330 if (port->rport) 334 port->rport = NULL;
331 port->rport = NULL; 335 read_unlock_irq(&adapter->port_list_lock);
332 336
333 read_unlock_irq(&zfcp_data.config_lock);
334 fc_remove_host(shost); 337 fc_remove_host(shost);
335 scsi_remove_host(shost); 338 scsi_remove_host(shost);
336 scsi_host_put(shost); 339 scsi_host_put(shost);
@@ -348,7 +351,7 @@ zfcp_init_fc_host_stats(struct zfcp_adapter *adapter)
348 fc_stats = kmalloc(sizeof(*fc_stats), GFP_KERNEL); 351 fc_stats = kmalloc(sizeof(*fc_stats), GFP_KERNEL);
349 if (!fc_stats) 352 if (!fc_stats)
350 return NULL; 353 return NULL;
351 adapter->fc_stats = fc_stats; /* freed in adater_dequeue */ 354 adapter->fc_stats = fc_stats; /* freed in adapter_release */
352 } 355 }
353 memset(adapter->fc_stats, 0, sizeof(*adapter->fc_stats)); 356 memset(adapter->fc_stats, 0, sizeof(*adapter->fc_stats));
354 return adapter->fc_stats; 357 return adapter->fc_stats;
@@ -464,7 +467,7 @@ static void zfcp_reset_fc_host_stats(struct Scsi_Host *shost)
464 adapter->stats_reset = jiffies/HZ; 467 adapter->stats_reset = jiffies/HZ;
465 kfree(adapter->stats_reset_data); 468 kfree(adapter->stats_reset_data);
466 adapter->stats_reset_data = data; /* finally freed in 469 adapter->stats_reset_data = data; /* finally freed in
467 adapter_dequeue */ 470 adapter_release */
468 } 471 }
469} 472}
470 473
@@ -495,7 +498,7 @@ static void zfcp_set_rport_dev_loss_tmo(struct fc_rport *rport, u32 timeout)
495 * @rport: The FC rport where to teminate I/O 498 * @rport: The FC rport where to teminate I/O
496 * 499 *
497 * Abort all pending SCSI commands for a port by closing the 500 * Abort all pending SCSI commands for a port by closing the
498 * port. Using a reopen for avoids a conflict with a shutdown 501 * port. Using a reopen avoiding a conflict with a shutdown
499 * overwriting a reopen. 502 * overwriting a reopen.
500 */ 503 */
501static void zfcp_scsi_terminate_rport_io(struct fc_rport *rport) 504static void zfcp_scsi_terminate_rport_io(struct fc_rport *rport)
@@ -505,15 +508,11 @@ static void zfcp_scsi_terminate_rport_io(struct fc_rport *rport)
505 struct zfcp_adapter *adapter = 508 struct zfcp_adapter *adapter =
506 (struct zfcp_adapter *)shost->hostdata[0]; 509 (struct zfcp_adapter *)shost->hostdata[0];
507 510
508 write_lock_irq(&zfcp_data.config_lock);
509 port = zfcp_get_port_by_wwpn(adapter, rport->port_name); 511 port = zfcp_get_port_by_wwpn(adapter, rport->port_name);
510 if (port)
511 zfcp_port_get(port);
512 write_unlock_irq(&zfcp_data.config_lock);
513 512
514 if (port) { 513 if (port) {
515 zfcp_erp_port_reopen(port, 0, "sctrpi1", NULL); 514 zfcp_erp_port_reopen(port, 0, "sctrpi1", NULL);
516 zfcp_port_put(port); 515 put_device(&port->dev);
517 } 516 }
518} 517}
519 518
@@ -555,31 +554,34 @@ static void zfcp_scsi_rport_block(struct zfcp_port *port)
555 554
556void zfcp_scsi_schedule_rport_register(struct zfcp_port *port) 555void zfcp_scsi_schedule_rport_register(struct zfcp_port *port)
557{ 556{
558 zfcp_port_get(port); 557 get_device(&port->dev);
559 port->rport_task = RPORT_ADD; 558 port->rport_task = RPORT_ADD;
560 559
561 if (!queue_work(port->adapter->work_queue, &port->rport_work)) 560 if (!queue_work(port->adapter->work_queue, &port->rport_work))
562 zfcp_port_put(port); 561 put_device(&port->dev);
563} 562}
564 563
565void zfcp_scsi_schedule_rport_block(struct zfcp_port *port) 564void zfcp_scsi_schedule_rport_block(struct zfcp_port *port)
566{ 565{
567 zfcp_port_get(port); 566 get_device(&port->dev);
568 port->rport_task = RPORT_DEL; 567 port->rport_task = RPORT_DEL;
569 568
570 if (port->rport && queue_work(port->adapter->work_queue, 569 if (port->rport && queue_work(port->adapter->work_queue,
571 &port->rport_work)) 570 &port->rport_work))
572 return; 571 return;
573 572
574 zfcp_port_put(port); 573 put_device(&port->dev);
575} 574}
576 575
577void zfcp_scsi_schedule_rports_block(struct zfcp_adapter *adapter) 576void zfcp_scsi_schedule_rports_block(struct zfcp_adapter *adapter)
578{ 577{
578 unsigned long flags;
579 struct zfcp_port *port; 579 struct zfcp_port *port;
580 580
581 list_for_each_entry(port, &adapter->port_list_head, list) 581 read_lock_irqsave(&adapter->port_list_lock, flags);
582 list_for_each_entry(port, &adapter->port_list, list)
582 zfcp_scsi_schedule_rport_block(port); 583 zfcp_scsi_schedule_rport_block(port);
584 read_unlock_irqrestore(&adapter->port_list_lock, flags);
583} 585}
584 586
585void zfcp_scsi_rport_work(struct work_struct *work) 587void zfcp_scsi_rport_work(struct work_struct *work)
@@ -597,7 +599,7 @@ void zfcp_scsi_rport_work(struct work_struct *work)
597 } 599 }
598 } 600 }
599 601
600 zfcp_port_put(port); 602 put_device(&port->dev);
601} 603}
602 604
603 605
@@ -615,21 +617,7 @@ void zfcp_scsi_scan(struct work_struct *work)
615 scsilun_to_int((struct scsi_lun *) 617 scsilun_to_int((struct scsi_lun *)
616 &unit->fcp_lun), 0); 618 &unit->fcp_lun), 0);
617 619
618 zfcp_unit_put(unit); 620 put_device(&unit->dev);
619}
620
621static int zfcp_execute_fc_job(struct fc_bsg_job *job)
622{
623 switch (job->request->msgcode) {
624 case FC_BSG_RPT_ELS:
625 case FC_BSG_HST_ELS_NOLOGIN:
626 return zfcp_fc_execute_els_fc_job(job);
627 case FC_BSG_RPT_CT:
628 case FC_BSG_HST_CT:
629 return zfcp_fc_execute_ct_fc_job(job);
630 default:
631 return -EINVAL;
632 }
633} 621}
634 622
635struct fc_function_template zfcp_transport_functions = { 623struct fc_function_template zfcp_transport_functions = {
@@ -643,6 +631,7 @@ struct fc_function_template zfcp_transport_functions = {
643 .show_host_port_name = 1, 631 .show_host_port_name = 1,
644 .show_host_permanent_port_name = 1, 632 .show_host_permanent_port_name = 1,
645 .show_host_supported_classes = 1, 633 .show_host_supported_classes = 1,
634 .show_host_supported_fc4s = 1,
646 .show_host_supported_speeds = 1, 635 .show_host_supported_speeds = 1,
647 .show_host_maxframe_size = 1, 636 .show_host_maxframe_size = 1,
648 .show_host_serial_number = 1, 637 .show_host_serial_number = 1,
@@ -652,13 +641,16 @@ struct fc_function_template zfcp_transport_functions = {
652 .get_host_port_state = zfcp_get_host_port_state, 641 .get_host_port_state = zfcp_get_host_port_state,
653 .terminate_rport_io = zfcp_scsi_terminate_rport_io, 642 .terminate_rport_io = zfcp_scsi_terminate_rport_io,
654 .show_host_port_state = 1, 643 .show_host_port_state = 1,
655 .bsg_request = zfcp_execute_fc_job, 644 .show_host_active_fc4s = 1,
645 .bsg_request = zfcp_fc_exec_bsg_job,
646 .bsg_timeout = zfcp_fc_timeout_bsg_job,
656 /* no functions registered for following dynamic attributes but 647 /* no functions registered for following dynamic attributes but
657 directly set by LLDD */ 648 directly set by LLDD */
658 .show_host_port_type = 1, 649 .show_host_port_type = 1,
659 .show_host_speed = 1, 650 .show_host_speed = 1,
660 .show_host_port_id = 1, 651 .show_host_port_id = 1,
661 .disable_target_scan = 1, 652 .disable_target_scan = 1,
653 .dd_bsg_size = sizeof(struct zfcp_fsf_ct_els),
662}; 654};
663 655
664struct zfcp_data zfcp_data = { 656struct zfcp_data zfcp_data = {
diff --git a/drivers/s390/scsi/zfcp_sysfs.c b/drivers/s390/scsi/zfcp_sysfs.c
index d31000886ca8..f5f60698dc4c 100644
--- a/drivers/s390/scsi/zfcp_sysfs.c
+++ b/drivers/s390/scsi/zfcp_sysfs.c
@@ -3,12 +3,13 @@
3 * 3 *
4 * sysfs attributes. 4 * sysfs attributes.
5 * 5 *
6 * Copyright IBM Corporation 2008 6 * Copyright IBM Corporation 2008, 2010
7 */ 7 */
8 8
9#define KMSG_COMPONENT "zfcp" 9#define KMSG_COMPONENT "zfcp"
10#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 10#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
11 11
12#include <linux/slab.h>
12#include "zfcp_ext.h" 13#include "zfcp_ext.h"
13 14
14#define ZFCP_DEV_ATTR(_feat, _name, _mode, _show, _store) \ 15#define ZFCP_DEV_ATTR(_feat, _name, _mode, _show, _store) \
@@ -19,30 +20,43 @@ static ssize_t zfcp_sysfs_##_feat##_##_name##_show(struct device *dev, \
19 struct device_attribute *at,\ 20 struct device_attribute *at,\
20 char *buf) \ 21 char *buf) \
21{ \ 22{ \
22 struct _feat_def *_feat = dev_get_drvdata(dev); \ 23 struct _feat_def *_feat = container_of(dev, struct _feat_def, dev); \
23 \ 24 \
24 return sprintf(buf, _format, _value); \ 25 return sprintf(buf, _format, _value); \
25} \ 26} \
26static ZFCP_DEV_ATTR(_feat, _name, S_IRUGO, \ 27static ZFCP_DEV_ATTR(_feat, _name, S_IRUGO, \
27 zfcp_sysfs_##_feat##_##_name##_show, NULL); 28 zfcp_sysfs_##_feat##_##_name##_show, NULL);
28 29
29ZFCP_DEFINE_ATTR(zfcp_adapter, adapter, status, "0x%08x\n", 30#define ZFCP_DEFINE_A_ATTR(_name, _format, _value) \
30 atomic_read(&adapter->status)); 31static ssize_t zfcp_sysfs_adapter_##_name##_show(struct device *dev, \
31ZFCP_DEFINE_ATTR(zfcp_adapter, adapter, peer_wwnn, "0x%016llx\n", 32 struct device_attribute *at,\
32 (unsigned long long) adapter->peer_wwnn); 33 char *buf) \
33ZFCP_DEFINE_ATTR(zfcp_adapter, adapter, peer_wwpn, "0x%016llx\n", 34{ \
34 (unsigned long long) adapter->peer_wwpn); 35 struct ccw_device *cdev = to_ccwdev(dev); \
35ZFCP_DEFINE_ATTR(zfcp_adapter, adapter, peer_d_id, "0x%06x\n", 36 struct zfcp_adapter *adapter = zfcp_ccw_adapter_by_cdev(cdev); \
36 adapter->peer_d_id); 37 int i; \
37ZFCP_DEFINE_ATTR(zfcp_adapter, adapter, card_version, "0x%04x\n", 38 \
38 adapter->hydra_version); 39 if (!adapter) \
39ZFCP_DEFINE_ATTR(zfcp_adapter, adapter, lic_version, "0x%08x\n", 40 return -ENODEV; \
40 adapter->fsf_lic_version); 41 \
41ZFCP_DEFINE_ATTR(zfcp_adapter, adapter, hardware_version, "0x%08x\n", 42 i = sprintf(buf, _format, _value); \
42 adapter->hardware_version); 43 zfcp_ccw_adapter_put(adapter); \
43ZFCP_DEFINE_ATTR(zfcp_adapter, adapter, in_recovery, "%d\n", 44 return i; \
44 (atomic_read(&adapter->status) & 45} \
45 ZFCP_STATUS_COMMON_ERP_INUSE) != 0); 46static ZFCP_DEV_ATTR(adapter, _name, S_IRUGO, \
47 zfcp_sysfs_adapter_##_name##_show, NULL);
48
49ZFCP_DEFINE_A_ATTR(status, "0x%08x\n", atomic_read(&adapter->status));
50ZFCP_DEFINE_A_ATTR(peer_wwnn, "0x%016llx\n",
51 (unsigned long long) adapter->peer_wwnn);
52ZFCP_DEFINE_A_ATTR(peer_wwpn, "0x%016llx\n",
53 (unsigned long long) adapter->peer_wwpn);
54ZFCP_DEFINE_A_ATTR(peer_d_id, "0x%06x\n", adapter->peer_d_id);
55ZFCP_DEFINE_A_ATTR(card_version, "0x%04x\n", adapter->hydra_version);
56ZFCP_DEFINE_A_ATTR(lic_version, "0x%08x\n", adapter->fsf_lic_version);
57ZFCP_DEFINE_A_ATTR(hardware_version, "0x%08x\n", adapter->hardware_version);
58ZFCP_DEFINE_A_ATTR(in_recovery, "%d\n", (atomic_read(&adapter->status) &
59 ZFCP_STATUS_COMMON_ERP_INUSE) != 0);
46 60
47ZFCP_DEFINE_ATTR(zfcp_port, port, status, "0x%08x\n", 61ZFCP_DEFINE_ATTR(zfcp_port, port, status, "0x%08x\n",
48 atomic_read(&port->status)); 62 atomic_read(&port->status));
@@ -73,7 +87,7 @@ static ssize_t zfcp_sysfs_##_feat##_failed_show(struct device *dev, \
73 struct device_attribute *attr, \ 87 struct device_attribute *attr, \
74 char *buf) \ 88 char *buf) \
75{ \ 89{ \
76 struct _feat_def *_feat = dev_get_drvdata(dev); \ 90 struct _feat_def *_feat = container_of(dev, struct _feat_def, dev); \
77 \ 91 \
78 if (atomic_read(&_feat->status) & ZFCP_STATUS_COMMON_ERP_FAILED) \ 92 if (atomic_read(&_feat->status) & ZFCP_STATUS_COMMON_ERP_FAILED) \
79 return sprintf(buf, "1\n"); \ 93 return sprintf(buf, "1\n"); \
@@ -84,15 +98,12 @@ static ssize_t zfcp_sysfs_##_feat##_failed_store(struct device *dev, \
84 struct device_attribute *attr,\ 98 struct device_attribute *attr,\
85 const char *buf, size_t count)\ 99 const char *buf, size_t count)\
86{ \ 100{ \
87 struct _feat_def *_feat = dev_get_drvdata(dev); \ 101 struct _feat_def *_feat = container_of(dev, struct _feat_def, dev); \
88 unsigned long val; \ 102 unsigned long val; \
89 int retval = 0; \ 103 int retval = 0; \
90 \ 104 \
91 mutex_lock(&zfcp_data.config_mutex); \ 105 if (!(_feat && get_device(&_feat->dev))) \
92 if (atomic_read(&_feat->status) & ZFCP_STATUS_COMMON_REMOVE) { \ 106 return -EBUSY; \
93 retval = -EBUSY; \
94 goto out; \
95 } \
96 \ 107 \
97 if (strict_strtoul(buf, 0, &val) || val != 0) { \ 108 if (strict_strtoul(buf, 0, &val) || val != 0) { \
98 retval = -EINVAL; \ 109 retval = -EINVAL; \
@@ -105,29 +116,82 @@ static ssize_t zfcp_sysfs_##_feat##_failed_store(struct device *dev, \
105 _reopen_id, NULL); \ 116 _reopen_id, NULL); \
106 zfcp_erp_wait(_adapter); \ 117 zfcp_erp_wait(_adapter); \
107out: \ 118out: \
108 mutex_unlock(&zfcp_data.config_mutex); \ 119 put_device(&_feat->dev); \
109 return retval ? retval : (ssize_t) count; \ 120 return retval ? retval : (ssize_t) count; \
110} \ 121} \
111static ZFCP_DEV_ATTR(_feat, failed, S_IWUSR | S_IRUGO, \ 122static ZFCP_DEV_ATTR(_feat, failed, S_IWUSR | S_IRUGO, \
112 zfcp_sysfs_##_feat##_failed_show, \ 123 zfcp_sysfs_##_feat##_failed_show, \
113 zfcp_sysfs_##_feat##_failed_store); 124 zfcp_sysfs_##_feat##_failed_store);
114 125
115ZFCP_SYSFS_FAILED(zfcp_adapter, adapter, adapter, "syafai1", "syafai2");
116ZFCP_SYSFS_FAILED(zfcp_port, port, port->adapter, "sypfai1", "sypfai2"); 126ZFCP_SYSFS_FAILED(zfcp_port, port, port->adapter, "sypfai1", "sypfai2");
117ZFCP_SYSFS_FAILED(zfcp_unit, unit, unit->port->adapter, "syufai1", "syufai2"); 127ZFCP_SYSFS_FAILED(zfcp_unit, unit, unit->port->adapter, "syufai1", "syufai2");
118 128
129static ssize_t zfcp_sysfs_adapter_failed_show(struct device *dev,
130 struct device_attribute *attr,
131 char *buf)
132{
133 struct ccw_device *cdev = to_ccwdev(dev);
134 struct zfcp_adapter *adapter = zfcp_ccw_adapter_by_cdev(cdev);
135 int i;
136
137 if (!adapter)
138 return -ENODEV;
139
140 if (atomic_read(&adapter->status) & ZFCP_STATUS_COMMON_ERP_FAILED)
141 i = sprintf(buf, "1\n");
142 else
143 i = sprintf(buf, "0\n");
144
145 zfcp_ccw_adapter_put(adapter);
146 return i;
147}
148
149static ssize_t zfcp_sysfs_adapter_failed_store(struct device *dev,
150 struct device_attribute *attr,
151 const char *buf, size_t count)
152{
153 struct ccw_device *cdev = to_ccwdev(dev);
154 struct zfcp_adapter *adapter = zfcp_ccw_adapter_by_cdev(cdev);
155 unsigned long val;
156 int retval = 0;
157
158 if (!adapter)
159 return -ENODEV;
160
161 if (strict_strtoul(buf, 0, &val) || val != 0) {
162 retval = -EINVAL;
163 goto out;
164 }
165
166 zfcp_erp_modify_adapter_status(adapter, "syafai1", NULL,
167 ZFCP_STATUS_COMMON_RUNNING, ZFCP_SET);
168 zfcp_erp_adapter_reopen(adapter, ZFCP_STATUS_COMMON_ERP_FAILED,
169 "syafai2", NULL);
170 zfcp_erp_wait(adapter);
171out:
172 zfcp_ccw_adapter_put(adapter);
173 return retval ? retval : (ssize_t) count;
174}
175static ZFCP_DEV_ATTR(adapter, failed, S_IWUSR | S_IRUGO,
176 zfcp_sysfs_adapter_failed_show,
177 zfcp_sysfs_adapter_failed_store);
178
119static ssize_t zfcp_sysfs_port_rescan_store(struct device *dev, 179static ssize_t zfcp_sysfs_port_rescan_store(struct device *dev,
120 struct device_attribute *attr, 180 struct device_attribute *attr,
121 const char *buf, size_t count) 181 const char *buf, size_t count)
122{ 182{
123 struct zfcp_adapter *adapter = dev_get_drvdata(dev); 183 struct ccw_device *cdev = to_ccwdev(dev);
124 int ret; 184 struct zfcp_adapter *adapter = zfcp_ccw_adapter_by_cdev(cdev);
125 185
126 if (atomic_read(&adapter->status) & ZFCP_STATUS_COMMON_REMOVE) 186 if (!adapter)
127 return -EBUSY; 187 return -ENODEV;
128 188
129 ret = zfcp_fc_scan_ports(adapter); 189 /* sync the user-space- with the kernel-invocation of scan_work */
130 return ret ? ret : (ssize_t) count; 190 queue_work(adapter->work_queue, &adapter->scan_work);
191 flush_work(&adapter->scan_work);
192 zfcp_ccw_adapter_put(adapter);
193
194 return (ssize_t) count;
131} 195}
132static ZFCP_DEV_ATTR(adapter, port_rescan, S_IWUSR, NULL, 196static ZFCP_DEV_ATTR(adapter, port_rescan, S_IWUSR, NULL,
133 zfcp_sysfs_port_rescan_store); 197 zfcp_sysfs_port_rescan_store);
@@ -136,44 +200,34 @@ static ssize_t zfcp_sysfs_port_remove_store(struct device *dev,
136 struct device_attribute *attr, 200 struct device_attribute *attr,
137 const char *buf, size_t count) 201 const char *buf, size_t count)
138{ 202{
139 struct zfcp_adapter *adapter = dev_get_drvdata(dev); 203 struct ccw_device *cdev = to_ccwdev(dev);
204 struct zfcp_adapter *adapter = zfcp_ccw_adapter_by_cdev(cdev);
140 struct zfcp_port *port; 205 struct zfcp_port *port;
141 u64 wwpn; 206 u64 wwpn;
142 int retval = 0; 207 int retval = -EINVAL;
143 LIST_HEAD(port_remove_lh);
144 208
145 mutex_lock(&zfcp_data.config_mutex); 209 if (!adapter)
146 if (atomic_read(&adapter->status) & ZFCP_STATUS_COMMON_REMOVE) { 210 return -ENODEV;
147 retval = -EBUSY;
148 goto out;
149 }
150 211
151 if (strict_strtoull(buf, 0, (unsigned long long *) &wwpn)) { 212 if (strict_strtoull(buf, 0, (unsigned long long *) &wwpn))
152 retval = -EINVAL;
153 goto out; 213 goto out;
154 }
155 214
156 write_lock_irq(&zfcp_data.config_lock);
157 port = zfcp_get_port_by_wwpn(adapter, wwpn); 215 port = zfcp_get_port_by_wwpn(adapter, wwpn);
158 if (port && (atomic_read(&port->refcount) == 0)) { 216 if (!port)
159 zfcp_port_get(port);
160 atomic_set_mask(ZFCP_STATUS_COMMON_REMOVE, &port->status);
161 list_move(&port->list, &port_remove_lh);
162 } else
163 port = NULL;
164 write_unlock_irq(&zfcp_data.config_lock);
165
166 if (!port) {
167 retval = -ENXIO;
168 goto out; 217 goto out;
169 } 218 else
219 retval = 0;
220
221 write_lock_irq(&adapter->port_list_lock);
222 list_del(&port->list);
223 write_unlock_irq(&adapter->port_list_lock);
224
225 put_device(&port->dev);
170 226
171 zfcp_erp_port_shutdown(port, 0, "syprs_1", NULL); 227 zfcp_erp_port_shutdown(port, 0, "syprs_1", NULL);
172 zfcp_erp_wait(adapter); 228 zfcp_device_unregister(&port->dev, &zfcp_sysfs_port_attrs);
173 zfcp_port_put(port);
174 zfcp_port_dequeue(port);
175 out: 229 out:
176 mutex_unlock(&zfcp_data.config_mutex); 230 zfcp_ccw_adapter_put(adapter);
177 return retval ? retval : (ssize_t) count; 231 return retval ? retval : (ssize_t) count;
178} 232}
179static ZFCP_DEV_ATTR(adapter, port_remove, S_IWUSR, NULL, 233static ZFCP_DEV_ATTR(adapter, port_remove, S_IWUSR, NULL,
@@ -202,16 +256,13 @@ static ssize_t zfcp_sysfs_unit_add_store(struct device *dev,
202 struct device_attribute *attr, 256 struct device_attribute *attr,
203 const char *buf, size_t count) 257 const char *buf, size_t count)
204{ 258{
205 struct zfcp_port *port = dev_get_drvdata(dev); 259 struct zfcp_port *port = container_of(dev, struct zfcp_port, dev);
206 struct zfcp_unit *unit; 260 struct zfcp_unit *unit;
207 u64 fcp_lun; 261 u64 fcp_lun;
208 int retval = -EINVAL; 262 int retval = -EINVAL;
209 263
210 mutex_lock(&zfcp_data.config_mutex); 264 if (!(port && get_device(&port->dev)))
211 if (atomic_read(&port->status) & ZFCP_STATUS_COMMON_REMOVE) { 265 return -EBUSY;
212 retval = -EBUSY;
213 goto out;
214 }
215 266
216 if (strict_strtoull(buf, 0, (unsigned long long *) &fcp_lun)) 267 if (strict_strtoull(buf, 0, (unsigned long long *) &fcp_lun))
217 goto out; 268 goto out;
@@ -219,15 +270,14 @@ static ssize_t zfcp_sysfs_unit_add_store(struct device *dev,
219 unit = zfcp_unit_enqueue(port, fcp_lun); 270 unit = zfcp_unit_enqueue(port, fcp_lun);
220 if (IS_ERR(unit)) 271 if (IS_ERR(unit))
221 goto out; 272 goto out;
222 273 else
223 retval = 0; 274 retval = 0;
224 275
225 zfcp_erp_unit_reopen(unit, 0, "syuas_1", NULL); 276 zfcp_erp_unit_reopen(unit, 0, "syuas_1", NULL);
226 zfcp_erp_wait(unit->port->adapter); 277 zfcp_erp_wait(unit->port->adapter);
227 flush_work(&unit->scsi_work); 278 flush_work(&unit->scsi_work);
228 zfcp_unit_put(unit);
229out: 279out:
230 mutex_unlock(&zfcp_data.config_mutex); 280 put_device(&port->dev);
231 return retval ? retval : (ssize_t) count; 281 return retval ? retval : (ssize_t) count;
232} 282}
233static DEVICE_ATTR(unit_add, S_IWUSR, NULL, zfcp_sysfs_unit_add_store); 283static DEVICE_ATTR(unit_add, S_IWUSR, NULL, zfcp_sysfs_unit_add_store);
@@ -236,54 +286,36 @@ static ssize_t zfcp_sysfs_unit_remove_store(struct device *dev,
236 struct device_attribute *attr, 286 struct device_attribute *attr,
237 const char *buf, size_t count) 287 const char *buf, size_t count)
238{ 288{
239 struct zfcp_port *port = dev_get_drvdata(dev); 289 struct zfcp_port *port = container_of(dev, struct zfcp_port, dev);
240 struct zfcp_unit *unit; 290 struct zfcp_unit *unit;
241 u64 fcp_lun; 291 u64 fcp_lun;
242 int retval = 0; 292 int retval = -EINVAL;
243 LIST_HEAD(unit_remove_lh);
244 293
245 mutex_lock(&zfcp_data.config_mutex); 294 if (!(port && get_device(&port->dev)))
246 if (atomic_read(&port->status) & ZFCP_STATUS_COMMON_REMOVE) { 295 return -EBUSY;
247 retval = -EBUSY;
248 goto out;
249 }
250 296
251 if (strict_strtoull(buf, 0, (unsigned long long *) &fcp_lun)) { 297 if (strict_strtoull(buf, 0, (unsigned long long *) &fcp_lun))
252 retval = -EINVAL;
253 goto out; 298 goto out;
254 }
255 299
256 write_lock_irq(&zfcp_data.config_lock);
257 unit = zfcp_get_unit_by_lun(port, fcp_lun); 300 unit = zfcp_get_unit_by_lun(port, fcp_lun);
258 if (unit) { 301 if (!unit)
259 write_unlock_irq(&zfcp_data.config_lock); 302 goto out;
260 /* wait for possible timeout during SCSI probe */ 303 else
261 flush_work(&unit->scsi_work); 304 retval = 0;
262 write_lock_irq(&zfcp_data.config_lock);
263
264 if (atomic_read(&unit->refcount) == 0) {
265 zfcp_unit_get(unit);
266 atomic_set_mask(ZFCP_STATUS_COMMON_REMOVE,
267 &unit->status);
268 list_move(&unit->list, &unit_remove_lh);
269 } else {
270 unit = NULL;
271 }
272 }
273 305
274 write_unlock_irq(&zfcp_data.config_lock); 306 /* wait for possible timeout during SCSI probe */
307 flush_work(&unit->scsi_work);
275 308
276 if (!unit) { 309 write_lock_irq(&port->unit_list_lock);
277 retval = -ENXIO; 310 list_del(&unit->list);
278 goto out; 311 write_unlock_irq(&port->unit_list_lock);
279 } 312
313 put_device(&unit->dev);
280 314
281 zfcp_erp_unit_shutdown(unit, 0, "syurs_1", NULL); 315 zfcp_erp_unit_shutdown(unit, 0, "syurs_1", NULL);
282 zfcp_erp_wait(unit->port->adapter); 316 zfcp_device_unregister(&unit->dev, &zfcp_sysfs_unit_attrs);
283 zfcp_unit_put(unit);
284 zfcp_unit_dequeue(unit);
285out: 317out:
286 mutex_unlock(&zfcp_data.config_mutex); 318 put_device(&port->dev);
287 return retval ? retval : (ssize_t) count; 319 return retval ? retval : (ssize_t) count;
288} 320}
289static DEVICE_ATTR(unit_remove, S_IWUSR, NULL, zfcp_sysfs_unit_remove_store); 321static DEVICE_ATTR(unit_remove, S_IWUSR, NULL, zfcp_sysfs_unit_remove_store);