diff options
Diffstat (limited to 'drivers/s390')
52 files changed, 2049 insertions, 1564 deletions
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c index 1a84fae155e1..aa95f1001761 100644 --- a/drivers/s390/block/dasd.c +++ b/drivers/s390/block/dasd.c | |||
@@ -21,7 +21,6 @@ | |||
21 | #include <linux/hdreg.h> | 21 | #include <linux/hdreg.h> |
22 | #include <linux/async.h> | 22 | #include <linux/async.h> |
23 | #include <linux/mutex.h> | 23 | #include <linux/mutex.h> |
24 | #include <linux/smp_lock.h> | ||
25 | 24 | ||
26 | #include <asm/ccwdev.h> | 25 | #include <asm/ccwdev.h> |
27 | #include <asm/ebcdic.h> | 26 | #include <asm/ebcdic.h> |
@@ -1325,14 +1324,14 @@ static void __dasd_device_check_expire(struct dasd_device *device) | |||
1325 | if (device->discipline->term_IO(cqr) != 0) { | 1324 | if (device->discipline->term_IO(cqr) != 0) { |
1326 | /* Hmpf, try again in 5 sec */ | 1325 | /* Hmpf, try again in 5 sec */ |
1327 | dev_err(&device->cdev->dev, | 1326 | dev_err(&device->cdev->dev, |
1328 | "cqr %p timed out (%is) but cannot be " | 1327 | "cqr %p timed out (%lus) but cannot be " |
1329 | "ended, retrying in 5 s\n", | 1328 | "ended, retrying in 5 s\n", |
1330 | cqr, (cqr->expires/HZ)); | 1329 | cqr, (cqr->expires/HZ)); |
1331 | cqr->expires += 5*HZ; | 1330 | cqr->expires += 5*HZ; |
1332 | dasd_device_set_timer(device, 5*HZ); | 1331 | dasd_device_set_timer(device, 5*HZ); |
1333 | } else { | 1332 | } else { |
1334 | dev_err(&device->cdev->dev, | 1333 | dev_err(&device->cdev->dev, |
1335 | "cqr %p timed out (%is), %i retries " | 1334 | "cqr %p timed out (%lus), %i retries " |
1336 | "remaining\n", cqr, (cqr->expires/HZ), | 1335 | "remaining\n", cqr, (cqr->expires/HZ), |
1337 | cqr->retries); | 1336 | cqr->retries); |
1338 | } | 1337 | } |
@@ -2197,7 +2196,6 @@ static void dasd_setup_queue(struct dasd_block *block) | |||
2197 | */ | 2196 | */ |
2198 | blk_queue_max_segment_size(block->request_queue, PAGE_SIZE); | 2197 | blk_queue_max_segment_size(block->request_queue, PAGE_SIZE); |
2199 | blk_queue_segment_boundary(block->request_queue, PAGE_SIZE - 1); | 2198 | blk_queue_segment_boundary(block->request_queue, PAGE_SIZE - 1); |
2200 | blk_queue_ordered(block->request_queue, QUEUE_ORDERED_DRAIN); | ||
2201 | } | 2199 | } |
2202 | 2200 | ||
2203 | /* | 2201 | /* |
@@ -2236,7 +2234,6 @@ static int dasd_open(struct block_device *bdev, fmode_t mode) | |||
2236 | if (!block) | 2234 | if (!block) |
2237 | return -ENODEV; | 2235 | return -ENODEV; |
2238 | 2236 | ||
2239 | lock_kernel(); | ||
2240 | base = block->base; | 2237 | base = block->base; |
2241 | atomic_inc(&block->open_count); | 2238 | atomic_inc(&block->open_count); |
2242 | if (test_bit(DASD_FLAG_OFFLINE, &base->flags)) { | 2239 | if (test_bit(DASD_FLAG_OFFLINE, &base->flags)) { |
@@ -2271,14 +2268,12 @@ static int dasd_open(struct block_device *bdev, fmode_t mode) | |||
2271 | goto out; | 2268 | goto out; |
2272 | } | 2269 | } |
2273 | 2270 | ||
2274 | unlock_kernel(); | ||
2275 | return 0; | 2271 | return 0; |
2276 | 2272 | ||
2277 | out: | 2273 | out: |
2278 | module_put(base->discipline->owner); | 2274 | module_put(base->discipline->owner); |
2279 | unlock: | 2275 | unlock: |
2280 | atomic_dec(&block->open_count); | 2276 | atomic_dec(&block->open_count); |
2281 | unlock_kernel(); | ||
2282 | return rc; | 2277 | return rc; |
2283 | } | 2278 | } |
2284 | 2279 | ||
@@ -2286,10 +2281,8 @@ static int dasd_release(struct gendisk *disk, fmode_t mode) | |||
2286 | { | 2281 | { |
2287 | struct dasd_block *block = disk->private_data; | 2282 | struct dasd_block *block = disk->private_data; |
2288 | 2283 | ||
2289 | lock_kernel(); | ||
2290 | atomic_dec(&block->open_count); | 2284 | atomic_dec(&block->open_count); |
2291 | module_put(block->base->discipline->owner); | 2285 | module_put(block->base->discipline->owner); |
2292 | unlock_kernel(); | ||
2293 | return 0; | 2286 | return 0; |
2294 | } | 2287 | } |
2295 | 2288 | ||
diff --git a/drivers/s390/block/dasd_eer.c b/drivers/s390/block/dasd_eer.c index 7158f9528ecc..c71d89dba302 100644 --- a/drivers/s390/block/dasd_eer.c +++ b/drivers/s390/block/dasd_eer.c | |||
@@ -670,6 +670,7 @@ static const struct file_operations dasd_eer_fops = { | |||
670 | .read = &dasd_eer_read, | 670 | .read = &dasd_eer_read, |
671 | .poll = &dasd_eer_poll, | 671 | .poll = &dasd_eer_poll, |
672 | .owner = THIS_MODULE, | 672 | .owner = THIS_MODULE, |
673 | .llseek = noop_llseek, | ||
673 | }; | 674 | }; |
674 | 675 | ||
675 | static struct miscdevice *dasd_eer_dev = NULL; | 676 | static struct miscdevice *dasd_eer_dev = NULL; |
diff --git a/drivers/s390/block/dasd_ioctl.c b/drivers/s390/block/dasd_ioctl.c index 1557214944f7..26075e95b1ba 100644 --- a/drivers/s390/block/dasd_ioctl.c +++ b/drivers/s390/block/dasd_ioctl.c | |||
@@ -16,7 +16,6 @@ | |||
16 | #include <linux/major.h> | 16 | #include <linux/major.h> |
17 | #include <linux/fs.h> | 17 | #include <linux/fs.h> |
18 | #include <linux/blkpg.h> | 18 | #include <linux/blkpg.h> |
19 | #include <linux/smp_lock.h> | ||
20 | #include <linux/slab.h> | 19 | #include <linux/slab.h> |
21 | #include <asm/compat.h> | 20 | #include <asm/compat.h> |
22 | #include <asm/ccwdev.h> | 21 | #include <asm/ccwdev.h> |
@@ -370,9 +369,8 @@ static int dasd_ioctl_readall_cmb(struct dasd_block *block, unsigned int cmd, | |||
370 | return ret; | 369 | return ret; |
371 | } | 370 | } |
372 | 371 | ||
373 | static int | 372 | int dasd_ioctl(struct block_device *bdev, fmode_t mode, |
374 | dasd_do_ioctl(struct block_device *bdev, fmode_t mode, | 373 | unsigned int cmd, unsigned long arg) |
375 | unsigned int cmd, unsigned long arg) | ||
376 | { | 374 | { |
377 | struct dasd_block *block = bdev->bd_disk->private_data; | 375 | struct dasd_block *block = bdev->bd_disk->private_data; |
378 | void __user *argp; | 376 | void __user *argp; |
@@ -430,14 +428,3 @@ dasd_do_ioctl(struct block_device *bdev, fmode_t mode, | |||
430 | return -EINVAL; | 428 | return -EINVAL; |
431 | } | 429 | } |
432 | } | 430 | } |
433 | |||
434 | int dasd_ioctl(struct block_device *bdev, fmode_t mode, | ||
435 | unsigned int cmd, unsigned long arg) | ||
436 | { | ||
437 | int rc; | ||
438 | |||
439 | lock_kernel(); | ||
440 | rc = dasd_do_ioctl(bdev, mode, cmd, arg); | ||
441 | unlock_kernel(); | ||
442 | return rc; | ||
443 | } | ||
diff --git a/drivers/s390/block/dcssblk.c b/drivers/s390/block/dcssblk.c index 2bd72aa34c59..9b43ae94beba 100644 --- a/drivers/s390/block/dcssblk.c +++ b/drivers/s390/block/dcssblk.c | |||
@@ -14,7 +14,6 @@ | |||
14 | #include <linux/init.h> | 14 | #include <linux/init.h> |
15 | #include <linux/slab.h> | 15 | #include <linux/slab.h> |
16 | #include <linux/blkdev.h> | 16 | #include <linux/blkdev.h> |
17 | #include <linux/smp_lock.h> | ||
18 | #include <linux/completion.h> | 17 | #include <linux/completion.h> |
19 | #include <linux/interrupt.h> | 18 | #include <linux/interrupt.h> |
20 | #include <linux/platform_device.h> | 19 | #include <linux/platform_device.h> |
@@ -776,7 +775,6 @@ dcssblk_open(struct block_device *bdev, fmode_t mode) | |||
776 | struct dcssblk_dev_info *dev_info; | 775 | struct dcssblk_dev_info *dev_info; |
777 | int rc; | 776 | int rc; |
778 | 777 | ||
779 | lock_kernel(); | ||
780 | dev_info = bdev->bd_disk->private_data; | 778 | dev_info = bdev->bd_disk->private_data; |
781 | if (NULL == dev_info) { | 779 | if (NULL == dev_info) { |
782 | rc = -ENODEV; | 780 | rc = -ENODEV; |
@@ -786,7 +784,6 @@ dcssblk_open(struct block_device *bdev, fmode_t mode) | |||
786 | bdev->bd_block_size = 4096; | 784 | bdev->bd_block_size = 4096; |
787 | rc = 0; | 785 | rc = 0; |
788 | out: | 786 | out: |
789 | unlock_kernel(); | ||
790 | return rc; | 787 | return rc; |
791 | } | 788 | } |
792 | 789 | ||
@@ -797,7 +794,6 @@ dcssblk_release(struct gendisk *disk, fmode_t mode) | |||
797 | struct segment_info *entry; | 794 | struct segment_info *entry; |
798 | int rc; | 795 | int rc; |
799 | 796 | ||
800 | lock_kernel(); | ||
801 | if (!dev_info) { | 797 | if (!dev_info) { |
802 | rc = -ENODEV; | 798 | rc = -ENODEV; |
803 | goto out; | 799 | goto out; |
@@ -815,7 +811,6 @@ dcssblk_release(struct gendisk *disk, fmode_t mode) | |||
815 | up_write(&dcssblk_devices_sem); | 811 | up_write(&dcssblk_devices_sem); |
816 | rc = 0; | 812 | rc = 0; |
817 | out: | 813 | out: |
818 | unlock_kernel(); | ||
819 | return rc; | 814 | return rc; |
820 | } | 815 | } |
821 | 816 | ||
diff --git a/drivers/s390/char/ctrlchar.c b/drivers/s390/char/ctrlchar.c index c6cbcb3f925e..0e9a309b9669 100644 --- a/drivers/s390/char/ctrlchar.c +++ b/drivers/s390/char/ctrlchar.c | |||
@@ -16,12 +16,11 @@ | |||
16 | 16 | ||
17 | #ifdef CONFIG_MAGIC_SYSRQ | 17 | #ifdef CONFIG_MAGIC_SYSRQ |
18 | static int ctrlchar_sysrq_key; | 18 | static int ctrlchar_sysrq_key; |
19 | static struct tty_struct *sysrq_tty; | ||
20 | 19 | ||
21 | static void | 20 | static void |
22 | ctrlchar_handle_sysrq(struct work_struct *work) | 21 | ctrlchar_handle_sysrq(struct work_struct *work) |
23 | { | 22 | { |
24 | handle_sysrq(ctrlchar_sysrq_key, sysrq_tty); | 23 | handle_sysrq(ctrlchar_sysrq_key); |
25 | } | 24 | } |
26 | 25 | ||
27 | static DECLARE_WORK(ctrlchar_work, ctrlchar_handle_sysrq); | 26 | static DECLARE_WORK(ctrlchar_work, ctrlchar_handle_sysrq); |
@@ -54,7 +53,6 @@ ctrlchar_handle(const unsigned char *buf, int len, struct tty_struct *tty) | |||
54 | /* racy */ | 53 | /* racy */ |
55 | if (len == 3 && buf[1] == '-') { | 54 | if (len == 3 && buf[1] == '-') { |
56 | ctrlchar_sysrq_key = buf[2]; | 55 | ctrlchar_sysrq_key = buf[2]; |
57 | sysrq_tty = tty; | ||
58 | schedule_work(&ctrlchar_work); | 56 | schedule_work(&ctrlchar_work); |
59 | return CTRLCHAR_SYSRQ; | 57 | return CTRLCHAR_SYSRQ; |
60 | } | 58 | } |
diff --git a/drivers/s390/char/fs3270.c b/drivers/s390/char/fs3270.c index 857dfcb7b359..eb28fb01a38a 100644 --- a/drivers/s390/char/fs3270.c +++ b/drivers/s390/char/fs3270.c | |||
@@ -520,6 +520,7 @@ static const struct file_operations fs3270_fops = { | |||
520 | .compat_ioctl = fs3270_ioctl, /* ioctl */ | 520 | .compat_ioctl = fs3270_ioctl, /* ioctl */ |
521 | .open = fs3270_open, /* open */ | 521 | .open = fs3270_open, /* open */ |
522 | .release = fs3270_close, /* release */ | 522 | .release = fs3270_close, /* release */ |
523 | .llseek = no_llseek, | ||
523 | }; | 524 | }; |
524 | 525 | ||
525 | /* | 526 | /* |
diff --git a/drivers/s390/char/keyboard.c b/drivers/s390/char/keyboard.c index 18d9a497863b..8cd58e412b5e 100644 --- a/drivers/s390/char/keyboard.c +++ b/drivers/s390/char/keyboard.c | |||
@@ -305,7 +305,7 @@ kbd_keycode(struct kbd_data *kbd, unsigned int keycode) | |||
305 | if (kbd->sysrq) { | 305 | if (kbd->sysrq) { |
306 | if (kbd->sysrq == K(KT_LATIN, '-')) { | 306 | if (kbd->sysrq == K(KT_LATIN, '-')) { |
307 | kbd->sysrq = 0; | 307 | kbd->sysrq = 0; |
308 | handle_sysrq(value, kbd->tty); | 308 | handle_sysrq(value); |
309 | return; | 309 | return; |
310 | } | 310 | } |
311 | if (value == '-') { | 311 | if (value == '-') { |
diff --git a/drivers/s390/char/monreader.c b/drivers/s390/char/monreader.c index e021ec663ef9..5b8b8592d311 100644 --- a/drivers/s390/char/monreader.c +++ b/drivers/s390/char/monreader.c | |||
@@ -447,6 +447,7 @@ static const struct file_operations mon_fops = { | |||
447 | .release = &mon_close, | 447 | .release = &mon_close, |
448 | .read = &mon_read, | 448 | .read = &mon_read, |
449 | .poll = &mon_poll, | 449 | .poll = &mon_poll, |
450 | .llseek = noop_llseek, | ||
450 | }; | 451 | }; |
451 | 452 | ||
452 | static struct miscdevice mon_dev = { | 453 | static struct miscdevice mon_dev = { |
diff --git a/drivers/s390/char/monwriter.c b/drivers/s390/char/monwriter.c index 572a1e7fd099..e0702d3ea33b 100644 --- a/drivers/s390/char/monwriter.c +++ b/drivers/s390/char/monwriter.c | |||
@@ -274,6 +274,7 @@ static const struct file_operations monwrite_fops = { | |||
274 | .open = &monwrite_open, | 274 | .open = &monwrite_open, |
275 | .release = &monwrite_close, | 275 | .release = &monwrite_close, |
276 | .write = &monwrite_write, | 276 | .write = &monwrite_write, |
277 | .llseek = noop_llseek, | ||
277 | }; | 278 | }; |
278 | 279 | ||
279 | static struct miscdevice mon_dev = { | 280 | static struct miscdevice mon_dev = { |
diff --git a/drivers/s390/char/sclp.c b/drivers/s390/char/sclp.c index f6d72e1f2a38..5707a80b96b6 100644 --- a/drivers/s390/char/sclp.c +++ b/drivers/s390/char/sclp.c | |||
@@ -468,7 +468,7 @@ sclp_sync_wait(void) | |||
468 | cr0_sync &= 0xffff00a0; | 468 | cr0_sync &= 0xffff00a0; |
469 | cr0_sync |= 0x00000200; | 469 | cr0_sync |= 0x00000200; |
470 | __ctl_load(cr0_sync, 0, 0); | 470 | __ctl_load(cr0_sync, 0, 0); |
471 | __raw_local_irq_stosm(0x01); | 471 | __arch_local_irq_stosm(0x01); |
472 | /* Loop until driver state indicates finished request */ | 472 | /* Loop until driver state indicates finished request */ |
473 | while (sclp_running_state != sclp_running_state_idle) { | 473 | while (sclp_running_state != sclp_running_state_idle) { |
474 | /* Check for expired request timer */ | 474 | /* Check for expired request timer */ |
diff --git a/drivers/s390/char/tape_block.c b/drivers/s390/char/tape_block.c index b7de02525ec9..f0fa9ca5cb2c 100644 --- a/drivers/s390/char/tape_block.c +++ b/drivers/s390/char/tape_block.c | |||
@@ -16,7 +16,7 @@ | |||
16 | #include <linux/fs.h> | 16 | #include <linux/fs.h> |
17 | #include <linux/module.h> | 17 | #include <linux/module.h> |
18 | #include <linux/blkdev.h> | 18 | #include <linux/blkdev.h> |
19 | #include <linux/smp_lock.h> | 19 | #include <linux/mutex.h> |
20 | #include <linux/interrupt.h> | 20 | #include <linux/interrupt.h> |
21 | #include <linux/buffer_head.h> | 21 | #include <linux/buffer_head.h> |
22 | #include <linux/kernel.h> | 22 | #include <linux/kernel.h> |
@@ -45,6 +45,7 @@ | |||
45 | /* | 45 | /* |
46 | * file operation structure for tape block frontend | 46 | * file operation structure for tape block frontend |
47 | */ | 47 | */ |
48 | static DEFINE_MUTEX(tape_block_mutex); | ||
48 | static int tapeblock_open(struct block_device *, fmode_t); | 49 | static int tapeblock_open(struct block_device *, fmode_t); |
49 | static int tapeblock_release(struct gendisk *, fmode_t); | 50 | static int tapeblock_release(struct gendisk *, fmode_t); |
50 | static int tapeblock_medium_changed(struct gendisk *); | 51 | static int tapeblock_medium_changed(struct gendisk *); |
@@ -217,8 +218,7 @@ tapeblock_setup_device(struct tape_device * device) | |||
217 | if (!blkdat->request_queue) | 218 | if (!blkdat->request_queue) |
218 | return -ENOMEM; | 219 | return -ENOMEM; |
219 | 220 | ||
220 | elevator_exit(blkdat->request_queue->elevator); | 221 | rc = elevator_change(blkdat->request_queue, "noop"); |
221 | rc = elevator_init(blkdat->request_queue, "noop"); | ||
222 | if (rc) | 222 | if (rc) |
223 | goto cleanup_queue; | 223 | goto cleanup_queue; |
224 | 224 | ||
@@ -362,7 +362,7 @@ tapeblock_open(struct block_device *bdev, fmode_t mode) | |||
362 | struct tape_device * device; | 362 | struct tape_device * device; |
363 | int rc; | 363 | int rc; |
364 | 364 | ||
365 | lock_kernel(); | 365 | mutex_lock(&tape_block_mutex); |
366 | device = tape_get_device(disk->private_data); | 366 | device = tape_get_device(disk->private_data); |
367 | 367 | ||
368 | if (device->required_tapemarks) { | 368 | if (device->required_tapemarks) { |
@@ -386,14 +386,14 @@ tapeblock_open(struct block_device *bdev, fmode_t mode) | |||
386 | * is called. | 386 | * is called. |
387 | */ | 387 | */ |
388 | tape_state_set(device, TS_BLKUSE); | 388 | tape_state_set(device, TS_BLKUSE); |
389 | unlock_kernel(); | 389 | mutex_unlock(&tape_block_mutex); |
390 | return 0; | 390 | return 0; |
391 | 391 | ||
392 | release: | 392 | release: |
393 | tape_release(device); | 393 | tape_release(device); |
394 | put_device: | 394 | put_device: |
395 | tape_put_device(device); | 395 | tape_put_device(device); |
396 | unlock_kernel(); | 396 | mutex_unlock(&tape_block_mutex); |
397 | return rc; | 397 | return rc; |
398 | } | 398 | } |
399 | 399 | ||
@@ -408,11 +408,11 @@ tapeblock_release(struct gendisk *disk, fmode_t mode) | |||
408 | { | 408 | { |
409 | struct tape_device *device = disk->private_data; | 409 | struct tape_device *device = disk->private_data; |
410 | 410 | ||
411 | lock_kernel(); | 411 | mutex_lock(&tape_block_mutex); |
412 | tape_state_set(device, TS_IN_USE); | 412 | tape_state_set(device, TS_IN_USE); |
413 | tape_release(device); | 413 | tape_release(device); |
414 | tape_put_device(device); | 414 | tape_put_device(device); |
415 | unlock_kernel(); | 415 | mutex_unlock(&tape_block_mutex); |
416 | 416 | ||
417 | return 0; | 417 | return 0; |
418 | } | 418 | } |
diff --git a/drivers/s390/char/tape_char.c b/drivers/s390/char/tape_char.c index 539045acaad4..883e2db02bd3 100644 --- a/drivers/s390/char/tape_char.c +++ b/drivers/s390/char/tape_char.c | |||
@@ -53,6 +53,7 @@ static const struct file_operations tape_fops = | |||
53 | #endif | 53 | #endif |
54 | .open = tapechar_open, | 54 | .open = tapechar_open, |
55 | .release = tapechar_release, | 55 | .release = tapechar_release, |
56 | .llseek = no_llseek, | ||
56 | }; | 57 | }; |
57 | 58 | ||
58 | static int tapechar_major = TAPECHAR_MAJOR; | 59 | static int tapechar_major = TAPECHAR_MAJOR; |
diff --git a/drivers/s390/char/vmcp.c b/drivers/s390/char/vmcp.c index 19a14d9b80c1..31a3ccbb6495 100644 --- a/drivers/s390/char/vmcp.c +++ b/drivers/s390/char/vmcp.c | |||
@@ -177,6 +177,7 @@ static const struct file_operations vmcp_fops = { | |||
177 | .write = vmcp_write, | 177 | .write = vmcp_write, |
178 | .unlocked_ioctl = vmcp_ioctl, | 178 | .unlocked_ioctl = vmcp_ioctl, |
179 | .compat_ioctl = vmcp_ioctl, | 179 | .compat_ioctl = vmcp_ioctl, |
180 | .llseek = no_llseek, | ||
180 | }; | 181 | }; |
181 | 182 | ||
182 | static struct miscdevice vmcp_dev = { | 183 | static struct miscdevice vmcp_dev = { |
diff --git a/drivers/s390/char/vmlogrdr.c b/drivers/s390/char/vmlogrdr.c index e40a1b892866..0d6dc4b92cc2 100644 --- a/drivers/s390/char/vmlogrdr.c +++ b/drivers/s390/char/vmlogrdr.c | |||
@@ -97,6 +97,7 @@ static const struct file_operations vmlogrdr_fops = { | |||
97 | .open = vmlogrdr_open, | 97 | .open = vmlogrdr_open, |
98 | .release = vmlogrdr_release, | 98 | .release = vmlogrdr_release, |
99 | .read = vmlogrdr_read, | 99 | .read = vmlogrdr_read, |
100 | .llseek = no_llseek, | ||
100 | }; | 101 | }; |
101 | 102 | ||
102 | 103 | ||
diff --git a/drivers/s390/char/vmwatchdog.c b/drivers/s390/char/vmwatchdog.c index e13508c98b1a..12ef9121d4f0 100644 --- a/drivers/s390/char/vmwatchdog.c +++ b/drivers/s390/char/vmwatchdog.c | |||
@@ -297,6 +297,7 @@ static const struct file_operations vmwdt_fops = { | |||
297 | .unlocked_ioctl = &vmwdt_ioctl, | 297 | .unlocked_ioctl = &vmwdt_ioctl, |
298 | .write = &vmwdt_write, | 298 | .write = &vmwdt_write, |
299 | .owner = THIS_MODULE, | 299 | .owner = THIS_MODULE, |
300 | .llseek = noop_llseek, | ||
300 | }; | 301 | }; |
301 | 302 | ||
302 | static struct miscdevice vmwdt_dev = { | 303 | static struct miscdevice vmwdt_dev = { |
diff --git a/drivers/s390/char/zcore.c b/drivers/s390/char/zcore.c index f5ea3384a4b9..3b94044027c2 100644 --- a/drivers/s390/char/zcore.c +++ b/drivers/s390/char/zcore.c | |||
@@ -459,6 +459,7 @@ static const struct file_operations zcore_memmap_fops = { | |||
459 | .read = zcore_memmap_read, | 459 | .read = zcore_memmap_read, |
460 | .open = zcore_memmap_open, | 460 | .open = zcore_memmap_open, |
461 | .release = zcore_memmap_release, | 461 | .release = zcore_memmap_release, |
462 | .llseek = no_llseek, | ||
462 | }; | 463 | }; |
463 | 464 | ||
464 | static ssize_t zcore_reipl_write(struct file *filp, const char __user *buf, | 465 | static ssize_t zcore_reipl_write(struct file *filp, const char __user *buf, |
@@ -486,6 +487,7 @@ static const struct file_operations zcore_reipl_fops = { | |||
486 | .write = zcore_reipl_write, | 487 | .write = zcore_reipl_write, |
487 | .open = zcore_reipl_open, | 488 | .open = zcore_reipl_open, |
488 | .release = zcore_reipl_release, | 489 | .release = zcore_reipl_release, |
490 | .llseek = no_llseek, | ||
489 | }; | 491 | }; |
490 | 492 | ||
491 | #ifdef CONFIG_32BIT | 493 | #ifdef CONFIG_32BIT |
diff --git a/drivers/s390/cio/chsc_sch.c b/drivers/s390/cio/chsc_sch.c index a83877c664a6..f2b77e7bfc6f 100644 --- a/drivers/s390/cio/chsc_sch.c +++ b/drivers/s390/cio/chsc_sch.c | |||
@@ -806,6 +806,7 @@ static const struct file_operations chsc_fops = { | |||
806 | .open = nonseekable_open, | 806 | .open = nonseekable_open, |
807 | .unlocked_ioctl = chsc_ioctl, | 807 | .unlocked_ioctl = chsc_ioctl, |
808 | .compat_ioctl = chsc_ioctl, | 808 | .compat_ioctl = chsc_ioctl, |
809 | .llseek = no_llseek, | ||
809 | }; | 810 | }; |
810 | 811 | ||
811 | static struct miscdevice chsc_misc_device = { | 812 | static struct miscdevice chsc_misc_device = { |
diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c index ac94ac751459..ca8e1c240c3c 100644 --- a/drivers/s390/cio/css.c +++ b/drivers/s390/cio/css.c | |||
@@ -1067,6 +1067,7 @@ static ssize_t cio_settle_write(struct file *file, const char __user *buf, | |||
1067 | static const struct file_operations cio_settle_proc_fops = { | 1067 | static const struct file_operations cio_settle_proc_fops = { |
1068 | .open = nonseekable_open, | 1068 | .open = nonseekable_open, |
1069 | .write = cio_settle_write, | 1069 | .write = cio_settle_write, |
1070 | .llseek = no_llseek, | ||
1070 | }; | 1071 | }; |
1071 | 1072 | ||
1072 | static int __init cio_settle_init(void) | 1073 | static int __init cio_settle_init(void) |
diff --git a/drivers/s390/cio/qdio.h b/drivers/s390/cio/qdio.h index f0037eefd44e..0f4ef8769a3d 100644 --- a/drivers/s390/cio/qdio.h +++ b/drivers/s390/cio/qdio.h | |||
@@ -208,6 +208,7 @@ struct qdio_dev_perf_stat { | |||
208 | unsigned int eqbs_partial; | 208 | unsigned int eqbs_partial; |
209 | unsigned int sqbs; | 209 | unsigned int sqbs; |
210 | unsigned int sqbs_partial; | 210 | unsigned int sqbs_partial; |
211 | unsigned int int_discarded; | ||
211 | } ____cacheline_aligned; | 212 | } ____cacheline_aligned; |
212 | 213 | ||
213 | struct qdio_queue_perf_stat { | 214 | struct qdio_queue_perf_stat { |
@@ -222,6 +223,10 @@ struct qdio_queue_perf_stat { | |||
222 | unsigned int nr_sbal_total; | 223 | unsigned int nr_sbal_total; |
223 | }; | 224 | }; |
224 | 225 | ||
226 | enum qdio_queue_irq_states { | ||
227 | QDIO_QUEUE_IRQS_DISABLED, | ||
228 | }; | ||
229 | |||
225 | struct qdio_input_q { | 230 | struct qdio_input_q { |
226 | /* input buffer acknowledgement flag */ | 231 | /* input buffer acknowledgement flag */ |
227 | int polling; | 232 | int polling; |
@@ -231,6 +236,10 @@ struct qdio_input_q { | |||
231 | int ack_count; | 236 | int ack_count; |
232 | /* last time of noticing incoming data */ | 237 | /* last time of noticing incoming data */ |
233 | u64 timestamp; | 238 | u64 timestamp; |
239 | /* upper-layer polling flag */ | ||
240 | unsigned long queue_irq_state; | ||
241 | /* callback to start upper-layer polling */ | ||
242 | void (*queue_start_poll) (struct ccw_device *, int, unsigned long); | ||
234 | }; | 243 | }; |
235 | 244 | ||
236 | struct qdio_output_q { | 245 | struct qdio_output_q { |
@@ -399,6 +408,26 @@ static inline int multicast_outbound(struct qdio_q *q) | |||
399 | #define sub_buf(bufnr, dec) \ | 408 | #define sub_buf(bufnr, dec) \ |
400 | ((bufnr - dec) & QDIO_MAX_BUFFERS_MASK) | 409 | ((bufnr - dec) & QDIO_MAX_BUFFERS_MASK) |
401 | 410 | ||
411 | #define queue_irqs_enabled(q) \ | ||
412 | (test_bit(QDIO_QUEUE_IRQS_DISABLED, &q->u.in.queue_irq_state) == 0) | ||
413 | #define queue_irqs_disabled(q) \ | ||
414 | (test_bit(QDIO_QUEUE_IRQS_DISABLED, &q->u.in.queue_irq_state) != 0) | ||
415 | |||
416 | #define TIQDIO_SHARED_IND 63 | ||
417 | |||
418 | /* device state change indicators */ | ||
419 | struct indicator_t { | ||
420 | u32 ind; /* u32 because of compare-and-swap performance */ | ||
421 | atomic_t count; /* use count, 0 or 1 for non-shared indicators */ | ||
422 | }; | ||
423 | |||
424 | extern struct indicator_t *q_indicators; | ||
425 | |||
426 | static inline int shared_ind(struct qdio_irq *irq_ptr) | ||
427 | { | ||
428 | return irq_ptr->dsci == &q_indicators[TIQDIO_SHARED_IND].ind; | ||
429 | } | ||
430 | |||
402 | /* prototypes for thin interrupt */ | 431 | /* prototypes for thin interrupt */ |
403 | void qdio_setup_thinint(struct qdio_irq *irq_ptr); | 432 | void qdio_setup_thinint(struct qdio_irq *irq_ptr); |
404 | int qdio_establish_thinint(struct qdio_irq *irq_ptr); | 433 | int qdio_establish_thinint(struct qdio_irq *irq_ptr); |
diff --git a/drivers/s390/cio/qdio_debug.c b/drivers/s390/cio/qdio_debug.c index 6ce83f56d537..28868e7471a5 100644 --- a/drivers/s390/cio/qdio_debug.c +++ b/drivers/s390/cio/qdio_debug.c | |||
@@ -56,9 +56,16 @@ static int qstat_show(struct seq_file *m, void *v) | |||
56 | 56 | ||
57 | seq_printf(m, "DSCI: %d nr_used: %d\n", | 57 | seq_printf(m, "DSCI: %d nr_used: %d\n", |
58 | *(u32 *)q->irq_ptr->dsci, atomic_read(&q->nr_buf_used)); | 58 | *(u32 *)q->irq_ptr->dsci, atomic_read(&q->nr_buf_used)); |
59 | seq_printf(m, "ftc: %d last_move: %d\n", q->first_to_check, q->last_move); | 59 | seq_printf(m, "ftc: %d last_move: %d\n", |
60 | seq_printf(m, "polling: %d ack start: %d ack count: %d\n", | 60 | q->first_to_check, q->last_move); |
61 | q->u.in.polling, q->u.in.ack_start, q->u.in.ack_count); | 61 | if (q->is_input_q) { |
62 | seq_printf(m, "polling: %d ack start: %d ack count: %d\n", | ||
63 | q->u.in.polling, q->u.in.ack_start, | ||
64 | q->u.in.ack_count); | ||
65 | seq_printf(m, "IRQs disabled: %u\n", | ||
66 | test_bit(QDIO_QUEUE_IRQS_DISABLED, | ||
67 | &q->u.in.queue_irq_state)); | ||
68 | } | ||
62 | seq_printf(m, "SBAL states:\n"); | 69 | seq_printf(m, "SBAL states:\n"); |
63 | seq_printf(m, "|0 |8 |16 |24 |32 |40 |48 |56 63|\n"); | 70 | seq_printf(m, "|0 |8 |16 |24 |32 |40 |48 |56 63|\n"); |
64 | 71 | ||
@@ -113,22 +120,6 @@ static int qstat_show(struct seq_file *m, void *v) | |||
113 | return 0; | 120 | return 0; |
114 | } | 121 | } |
115 | 122 | ||
116 | static ssize_t qstat_seq_write(struct file *file, const char __user *buf, | ||
117 | size_t count, loff_t *off) | ||
118 | { | ||
119 | struct seq_file *seq = file->private_data; | ||
120 | struct qdio_q *q = seq->private; | ||
121 | |||
122 | if (!q) | ||
123 | return 0; | ||
124 | if (q->is_input_q) | ||
125 | xchg(q->irq_ptr->dsci, 1); | ||
126 | local_bh_disable(); | ||
127 | tasklet_schedule(&q->tasklet); | ||
128 | local_bh_enable(); | ||
129 | return count; | ||
130 | } | ||
131 | |||
132 | static int qstat_seq_open(struct inode *inode, struct file *filp) | 123 | static int qstat_seq_open(struct inode *inode, struct file *filp) |
133 | { | 124 | { |
134 | return single_open(filp, qstat_show, | 125 | return single_open(filp, qstat_show, |
@@ -139,7 +130,6 @@ static const struct file_operations debugfs_fops = { | |||
139 | .owner = THIS_MODULE, | 130 | .owner = THIS_MODULE, |
140 | .open = qstat_seq_open, | 131 | .open = qstat_seq_open, |
141 | .read = seq_read, | 132 | .read = seq_read, |
142 | .write = qstat_seq_write, | ||
143 | .llseek = seq_lseek, | 133 | .llseek = seq_lseek, |
144 | .release = single_release, | 134 | .release = single_release, |
145 | }; | 135 | }; |
@@ -166,7 +156,8 @@ static char *qperf_names[] = { | |||
166 | "QEBSM eqbs", | 156 | "QEBSM eqbs", |
167 | "QEBSM eqbs partial", | 157 | "QEBSM eqbs partial", |
168 | "QEBSM sqbs", | 158 | "QEBSM sqbs", |
169 | "QEBSM sqbs partial" | 159 | "QEBSM sqbs partial", |
160 | "Discarded interrupts" | ||
170 | }; | 161 | }; |
171 | 162 | ||
172 | static int qperf_show(struct seq_file *m, void *v) | 163 | static int qperf_show(struct seq_file *m, void *v) |
diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c index 00520f9a7a8e..5fcfa7f9e9ef 100644 --- a/drivers/s390/cio/qdio_main.c +++ b/drivers/s390/cio/qdio_main.c | |||
@@ -884,8 +884,19 @@ static void qdio_int_handler_pci(struct qdio_irq *irq_ptr) | |||
884 | if (unlikely(irq_ptr->state == QDIO_IRQ_STATE_STOPPED)) | 884 | if (unlikely(irq_ptr->state == QDIO_IRQ_STATE_STOPPED)) |
885 | return; | 885 | return; |
886 | 886 | ||
887 | for_each_input_queue(irq_ptr, q, i) | 887 | for_each_input_queue(irq_ptr, q, i) { |
888 | tasklet_schedule(&q->tasklet); | 888 | if (q->u.in.queue_start_poll) { |
889 | /* skip if polling is enabled or already in work */ | ||
890 | if (test_and_set_bit(QDIO_QUEUE_IRQS_DISABLED, | ||
891 | &q->u.in.queue_irq_state)) { | ||
892 | qperf_inc(q, int_discarded); | ||
893 | continue; | ||
894 | } | ||
895 | q->u.in.queue_start_poll(q->irq_ptr->cdev, q->nr, | ||
896 | q->irq_ptr->int_parm); | ||
897 | } else | ||
898 | tasklet_schedule(&q->tasklet); | ||
899 | } | ||
889 | 900 | ||
890 | if (!(irq_ptr->qib.ac & QIB_AC_OUTBOUND_PCI_SUPPORTED)) | 901 | if (!(irq_ptr->qib.ac & QIB_AC_OUTBOUND_PCI_SUPPORTED)) |
891 | return; | 902 | return; |
@@ -1519,6 +1530,129 @@ int do_QDIO(struct ccw_device *cdev, unsigned int callflags, | |||
1519 | } | 1530 | } |
1520 | EXPORT_SYMBOL_GPL(do_QDIO); | 1531 | EXPORT_SYMBOL_GPL(do_QDIO); |
1521 | 1532 | ||
1533 | /** | ||
1534 | * qdio_start_irq - process input buffers | ||
1535 | * @cdev: associated ccw_device for the qdio subchannel | ||
1536 | * @nr: input queue number | ||
1537 | * | ||
1538 | * Return codes | ||
1539 | * 0 - success | ||
1540 | * 1 - irqs not started since new data is available | ||
1541 | */ | ||
1542 | int qdio_start_irq(struct ccw_device *cdev, int nr) | ||
1543 | { | ||
1544 | struct qdio_q *q; | ||
1545 | struct qdio_irq *irq_ptr = cdev->private->qdio_data; | ||
1546 | |||
1547 | if (!irq_ptr) | ||
1548 | return -ENODEV; | ||
1549 | q = irq_ptr->input_qs[nr]; | ||
1550 | |||
1551 | WARN_ON(queue_irqs_enabled(q)); | ||
1552 | |||
1553 | if (!shared_ind(q->irq_ptr)) | ||
1554 | xchg(q->irq_ptr->dsci, 0); | ||
1555 | |||
1556 | qdio_stop_polling(q); | ||
1557 | clear_bit(QDIO_QUEUE_IRQS_DISABLED, &q->u.in.queue_irq_state); | ||
1558 | |||
1559 | /* | ||
1560 | * We need to check again to not lose initiative after | ||
1561 | * resetting the ACK state. | ||
1562 | */ | ||
1563 | if (!shared_ind(q->irq_ptr) && *q->irq_ptr->dsci) | ||
1564 | goto rescan; | ||
1565 | if (!qdio_inbound_q_done(q)) | ||
1566 | goto rescan; | ||
1567 | return 0; | ||
1568 | |||
1569 | rescan: | ||
1570 | if (test_and_set_bit(QDIO_QUEUE_IRQS_DISABLED, | ||
1571 | &q->u.in.queue_irq_state)) | ||
1572 | return 0; | ||
1573 | else | ||
1574 | return 1; | ||
1575 | |||
1576 | } | ||
1577 | EXPORT_SYMBOL(qdio_start_irq); | ||
1578 | |||
1579 | /** | ||
1580 | * qdio_get_next_buffers - process input buffers | ||
1581 | * @cdev: associated ccw_device for the qdio subchannel | ||
1582 | * @nr: input queue number | ||
1583 | * @bufnr: first filled buffer number | ||
1584 | * @error: buffers are in error state | ||
1585 | * | ||
1586 | * Return codes | ||
1587 | * < 0 - error | ||
1588 | * = 0 - no new buffers found | ||
1589 | * > 0 - number of processed buffers | ||
1590 | */ | ||
1591 | int qdio_get_next_buffers(struct ccw_device *cdev, int nr, int *bufnr, | ||
1592 | int *error) | ||
1593 | { | ||
1594 | struct qdio_q *q; | ||
1595 | int start, end; | ||
1596 | struct qdio_irq *irq_ptr = cdev->private->qdio_data; | ||
1597 | |||
1598 | if (!irq_ptr) | ||
1599 | return -ENODEV; | ||
1600 | q = irq_ptr->input_qs[nr]; | ||
1601 | WARN_ON(queue_irqs_enabled(q)); | ||
1602 | |||
1603 | qdio_sync_after_thinint(q); | ||
1604 | |||
1605 | /* | ||
1606 | * The interrupt could be caused by a PCI request. Check the | ||
1607 | * PCI capable outbound queues. | ||
1608 | */ | ||
1609 | qdio_check_outbound_after_thinint(q); | ||
1610 | |||
1611 | if (!qdio_inbound_q_moved(q)) | ||
1612 | return 0; | ||
1613 | |||
1614 | /* Note: upper-layer MUST stop processing immediately here ... */ | ||
1615 | if (unlikely(q->irq_ptr->state != QDIO_IRQ_STATE_ACTIVE)) | ||
1616 | return -EIO; | ||
1617 | |||
1618 | start = q->first_to_kick; | ||
1619 | end = q->first_to_check; | ||
1620 | *bufnr = start; | ||
1621 | *error = q->qdio_error; | ||
1622 | |||
1623 | /* for the next time */ | ||
1624 | q->first_to_kick = end; | ||
1625 | q->qdio_error = 0; | ||
1626 | return sub_buf(end, start); | ||
1627 | } | ||
1628 | EXPORT_SYMBOL(qdio_get_next_buffers); | ||
1629 | |||
1630 | /** | ||
1631 | * qdio_stop_irq - disable interrupt processing for the device | ||
1632 | * @cdev: associated ccw_device for the qdio subchannel | ||
1633 | * @nr: input queue number | ||
1634 | * | ||
1635 | * Return codes | ||
1636 | * 0 - interrupts were already disabled | ||
1637 | * 1 - interrupts successfully disabled | ||
1638 | */ | ||
1639 | int qdio_stop_irq(struct ccw_device *cdev, int nr) | ||
1640 | { | ||
1641 | struct qdio_q *q; | ||
1642 | struct qdio_irq *irq_ptr = cdev->private->qdio_data; | ||
1643 | |||
1644 | if (!irq_ptr) | ||
1645 | return -ENODEV; | ||
1646 | q = irq_ptr->input_qs[nr]; | ||
1647 | |||
1648 | if (test_and_set_bit(QDIO_QUEUE_IRQS_DISABLED, | ||
1649 | &q->u.in.queue_irq_state)) | ||
1650 | return 0; | ||
1651 | else | ||
1652 | return 1; | ||
1653 | } | ||
1654 | EXPORT_SYMBOL(qdio_stop_irq); | ||
1655 | |||
1522 | static int __init init_QDIO(void) | 1656 | static int __init init_QDIO(void) |
1523 | { | 1657 | { |
1524 | int rc; | 1658 | int rc; |
diff --git a/drivers/s390/cio/qdio_setup.c b/drivers/s390/cio/qdio_setup.c index 34c7e4046df4..a13cf7ec64b2 100644 --- a/drivers/s390/cio/qdio_setup.c +++ b/drivers/s390/cio/qdio_setup.c | |||
@@ -161,6 +161,7 @@ static void setup_queues(struct qdio_irq *irq_ptr, | |||
161 | setup_queues_misc(q, irq_ptr, qdio_init->input_handler, i); | 161 | setup_queues_misc(q, irq_ptr, qdio_init->input_handler, i); |
162 | 162 | ||
163 | q->is_input_q = 1; | 163 | q->is_input_q = 1; |
164 | q->u.in.queue_start_poll = qdio_init->queue_start_poll; | ||
164 | setup_storage_lists(q, irq_ptr, input_sbal_array, i); | 165 | setup_storage_lists(q, irq_ptr, input_sbal_array, i); |
165 | input_sbal_array += QDIO_MAX_BUFFERS_PER_Q; | 166 | input_sbal_array += QDIO_MAX_BUFFERS_PER_Q; |
166 | 167 | ||
diff --git a/drivers/s390/cio/qdio_thinint.c b/drivers/s390/cio/qdio_thinint.c index 8daf1b99f153..752dbee06af5 100644 --- a/drivers/s390/cio/qdio_thinint.c +++ b/drivers/s390/cio/qdio_thinint.c | |||
@@ -25,24 +25,20 @@ | |||
25 | */ | 25 | */ |
26 | #define TIQDIO_NR_NONSHARED_IND 63 | 26 | #define TIQDIO_NR_NONSHARED_IND 63 |
27 | #define TIQDIO_NR_INDICATORS (TIQDIO_NR_NONSHARED_IND + 1) | 27 | #define TIQDIO_NR_INDICATORS (TIQDIO_NR_NONSHARED_IND + 1) |
28 | #define TIQDIO_SHARED_IND 63 | ||
29 | 28 | ||
30 | /* list of thin interrupt input queues */ | 29 | /* list of thin interrupt input queues */ |
31 | static LIST_HEAD(tiq_list); | 30 | static LIST_HEAD(tiq_list); |
32 | DEFINE_MUTEX(tiq_list_lock); | 31 | DEFINE_MUTEX(tiq_list_lock); |
33 | 32 | ||
34 | /* adapter local summary indicator */ | 33 | /* adapter local summary indicator */ |
35 | static unsigned char *tiqdio_alsi; | 34 | static u8 *tiqdio_alsi; |
36 | 35 | ||
37 | /* device state change indicators */ | 36 | struct indicator_t *q_indicators; |
38 | struct indicator_t { | ||
39 | u32 ind; /* u32 because of compare-and-swap performance */ | ||
40 | atomic_t count; /* use count, 0 or 1 for non-shared indicators */ | ||
41 | }; | ||
42 | static struct indicator_t *q_indicators; | ||
43 | 37 | ||
44 | static int css_qdio_omit_svs; | 38 | static int css_qdio_omit_svs; |
45 | 39 | ||
40 | static u64 last_ai_time; | ||
41 | |||
46 | static inline unsigned long do_clear_global_summary(void) | 42 | static inline unsigned long do_clear_global_summary(void) |
47 | { | 43 | { |
48 | register unsigned long __fn asm("1") = 3; | 44 | register unsigned long __fn asm("1") = 3; |
@@ -116,59 +112,73 @@ void tiqdio_remove_input_queues(struct qdio_irq *irq_ptr) | |||
116 | } | 112 | } |
117 | } | 113 | } |
118 | 114 | ||
119 | static inline int shared_ind(struct qdio_irq *irq_ptr) | 115 | static inline int shared_ind_used(void) |
120 | { | 116 | { |
121 | return irq_ptr->dsci == &q_indicators[TIQDIO_SHARED_IND].ind; | 117 | return atomic_read(&q_indicators[TIQDIO_SHARED_IND].count); |
122 | } | 118 | } |
123 | 119 | ||
124 | /** | 120 | /** |
125 | * tiqdio_thinint_handler - thin interrupt handler for qdio | 121 | * tiqdio_thinint_handler - thin interrupt handler for qdio |
126 | * @ind: pointer to adapter local summary indicator | 122 | * @alsi: pointer to adapter local summary indicator |
127 | * @drv_data: NULL | 123 | * @data: NULL |
128 | */ | 124 | */ |
129 | static void tiqdio_thinint_handler(void *ind, void *drv_data) | 125 | static void tiqdio_thinint_handler(void *alsi, void *data) |
130 | { | 126 | { |
131 | struct qdio_q *q; | 127 | struct qdio_q *q; |
132 | 128 | ||
129 | last_ai_time = S390_lowcore.int_clock; | ||
130 | |||
133 | /* | 131 | /* |
134 | * SVS only when needed: issue SVS to benefit from iqdio interrupt | 132 | * SVS only when needed: issue SVS to benefit from iqdio interrupt |
135 | * avoidance (SVS clears adapter interrupt suppression overwrite) | 133 | * avoidance (SVS clears adapter interrupt suppression overwrite). |
136 | */ | 134 | */ |
137 | if (!css_qdio_omit_svs) | 135 | if (!css_qdio_omit_svs) |
138 | do_clear_global_summary(); | 136 | do_clear_global_summary(); |
139 | 137 | ||
140 | /* | 138 | /* reset local summary indicator */ |
141 | * reset local summary indicator (tiqdio_alsi) to stop adapter | 139 | if (shared_ind_used()) |
142 | * interrupts for now | 140 | xchg(tiqdio_alsi, 0); |
143 | */ | ||
144 | xchg((u8 *)ind, 0); | ||
145 | 141 | ||
146 | /* protect tiq_list entries, only changed in activate or shutdown */ | 142 | /* protect tiq_list entries, only changed in activate or shutdown */ |
147 | rcu_read_lock(); | 143 | rcu_read_lock(); |
148 | 144 | ||
149 | /* check for work on all inbound thinint queues */ | 145 | /* check for work on all inbound thinint queues */ |
150 | list_for_each_entry_rcu(q, &tiq_list, entry) | 146 | list_for_each_entry_rcu(q, &tiq_list, entry) { |
147 | |||
151 | /* only process queues from changed sets */ | 148 | /* only process queues from changed sets */ |
152 | if (*q->irq_ptr->dsci) { | 149 | if (!*q->irq_ptr->dsci) |
153 | qperf_inc(q, adapter_int); | 150 | continue; |
154 | 151 | ||
152 | if (q->u.in.queue_start_poll) { | ||
153 | /* skip if polling is enabled or already in work */ | ||
154 | if (test_and_set_bit(QDIO_QUEUE_IRQS_DISABLED, | ||
155 | &q->u.in.queue_irq_state)) { | ||
156 | qperf_inc(q, int_discarded); | ||
157 | continue; | ||
158 | } | ||
159 | |||
160 | /* avoid dsci clear here, done after processing */ | ||
161 | q->u.in.queue_start_poll(q->irq_ptr->cdev, q->nr, | ||
162 | q->irq_ptr->int_parm); | ||
163 | } else { | ||
155 | /* only clear it if the indicator is non-shared */ | 164 | /* only clear it if the indicator is non-shared */ |
156 | if (!shared_ind(q->irq_ptr)) | 165 | if (!shared_ind(q->irq_ptr)) |
157 | xchg(q->irq_ptr->dsci, 0); | 166 | xchg(q->irq_ptr->dsci, 0); |
158 | /* | 167 | /* |
159 | * don't call inbound processing directly since | 168 | * Call inbound processing but not directly |
160 | * that could starve other thinint queues | 169 | * since that could starve other thinint queues. |
161 | */ | 170 | */ |
162 | tasklet_schedule(&q->tasklet); | 171 | tasklet_schedule(&q->tasklet); |
163 | } | 172 | } |
164 | 173 | qperf_inc(q, adapter_int); | |
174 | } | ||
165 | rcu_read_unlock(); | 175 | rcu_read_unlock(); |
166 | 176 | ||
167 | /* | 177 | /* |
168 | * if we used the shared indicator clear it now after all queues | 178 | * If the shared indicator was used clear it now after all queues |
169 | * were processed | 179 | * were processed. |
170 | */ | 180 | */ |
171 | if (atomic_read(&q_indicators[TIQDIO_SHARED_IND].count)) { | 181 | if (shared_ind_used()) { |
172 | xchg(&q_indicators[TIQDIO_SHARED_IND].ind, 0); | 182 | xchg(&q_indicators[TIQDIO_SHARED_IND].ind, 0); |
173 | 183 | ||
174 | /* prevent racing */ | 184 | /* prevent racing */ |
diff --git a/drivers/s390/crypto/zcrypt_api.c b/drivers/s390/crypto/zcrypt_api.c index 41e0aaefafd5..f5221749d180 100644 --- a/drivers/s390/crypto/zcrypt_api.c +++ b/drivers/s390/crypto/zcrypt_api.c | |||
@@ -897,7 +897,8 @@ static const struct file_operations zcrypt_fops = { | |||
897 | .compat_ioctl = zcrypt_compat_ioctl, | 897 | .compat_ioctl = zcrypt_compat_ioctl, |
898 | #endif | 898 | #endif |
899 | .open = zcrypt_open, | 899 | .open = zcrypt_open, |
900 | .release = zcrypt_release | 900 | .release = zcrypt_release, |
901 | .llseek = no_llseek, | ||
901 | }; | 902 | }; |
902 | 903 | ||
903 | /* | 904 | /* |
diff --git a/drivers/s390/kvm/kvm_virtio.c b/drivers/s390/kvm/kvm_virtio.c index 4e298bc8949d..5a46b8c5d68a 100644 --- a/drivers/s390/kvm/kvm_virtio.c +++ b/drivers/s390/kvm/kvm_virtio.c | |||
@@ -32,6 +32,7 @@ | |||
32 | * The pointer to our (page) of device descriptions. | 32 | * The pointer to our (page) of device descriptions. |
33 | */ | 33 | */ |
34 | static void *kvm_devices; | 34 | static void *kvm_devices; |
35 | struct work_struct hotplug_work; | ||
35 | 36 | ||
36 | struct kvm_device { | 37 | struct kvm_device { |
37 | struct virtio_device vdev; | 38 | struct virtio_device vdev; |
@@ -328,13 +329,54 @@ static void scan_devices(void) | |||
328 | } | 329 | } |
329 | 330 | ||
330 | /* | 331 | /* |
332 | * match for a kvm device with a specific desc pointer | ||
333 | */ | ||
334 | static int match_desc(struct device *dev, void *data) | ||
335 | { | ||
336 | if ((ulong)to_kvmdev(dev_to_virtio(dev))->desc == (ulong)data) | ||
337 | return 1; | ||
338 | |||
339 | return 0; | ||
340 | } | ||
341 | |||
342 | /* | ||
343 | * hotplug_device tries to find changes in the device page. | ||
344 | */ | ||
345 | static void hotplug_devices(struct work_struct *dummy) | ||
346 | { | ||
347 | unsigned int i; | ||
348 | struct kvm_device_desc *d; | ||
349 | struct device *dev; | ||
350 | |||
351 | for (i = 0; i < PAGE_SIZE; i += desc_size(d)) { | ||
352 | d = kvm_devices + i; | ||
353 | |||
354 | /* end of list */ | ||
355 | if (d->type == 0) | ||
356 | break; | ||
357 | |||
358 | /* device already exists */ | ||
359 | dev = device_find_child(kvm_root, d, match_desc); | ||
360 | if (dev) { | ||
361 | /* XXX check for hotplug remove */ | ||
362 | put_device(dev); | ||
363 | continue; | ||
364 | } | ||
365 | |||
366 | /* new device */ | ||
367 | printk(KERN_INFO "Adding new virtio device %p\n", d); | ||
368 | add_kvm_device(d, i); | ||
369 | } | ||
370 | } | ||
371 | |||
372 | /* | ||
331 | * we emulate the request_irq behaviour on top of s390 extints | 373 | * we emulate the request_irq behaviour on top of s390 extints |
332 | */ | 374 | */ |
333 | static void kvm_extint_handler(u16 code) | 375 | static void kvm_extint_handler(u16 code) |
334 | { | 376 | { |
335 | struct virtqueue *vq; | 377 | struct virtqueue *vq; |
336 | u16 subcode; | 378 | u16 subcode; |
337 | int config_changed; | 379 | u32 param; |
338 | 380 | ||
339 | subcode = S390_lowcore.cpu_addr; | 381 | subcode = S390_lowcore.cpu_addr; |
340 | if ((subcode & 0xff00) != VIRTIO_SUBCODE_64) | 382 | if ((subcode & 0xff00) != VIRTIO_SUBCODE_64) |
@@ -343,18 +385,28 @@ static void kvm_extint_handler(u16 code) | |||
343 | /* The LSB might be overloaded, we have to mask it */ | 385 | /* The LSB might be overloaded, we have to mask it */ |
344 | vq = (struct virtqueue *)(S390_lowcore.ext_params2 & ~1UL); | 386 | vq = (struct virtqueue *)(S390_lowcore.ext_params2 & ~1UL); |
345 | 387 | ||
346 | /* We use the LSB of extparam, to decide, if this interrupt is a config | 388 | /* We use ext_params to decide what this interrupt means */ |
347 | * change or a "standard" interrupt */ | 389 | param = S390_lowcore.ext_params & VIRTIO_PARAM_MASK; |
348 | config_changed = S390_lowcore.ext_params & 1; | ||
349 | 390 | ||
350 | if (config_changed) { | 391 | switch (param) { |
392 | case VIRTIO_PARAM_CONFIG_CHANGED: | ||
393 | { | ||
351 | struct virtio_driver *drv; | 394 | struct virtio_driver *drv; |
352 | drv = container_of(vq->vdev->dev.driver, | 395 | drv = container_of(vq->vdev->dev.driver, |
353 | struct virtio_driver, driver); | 396 | struct virtio_driver, driver); |
354 | if (drv->config_changed) | 397 | if (drv->config_changed) |
355 | drv->config_changed(vq->vdev); | 398 | drv->config_changed(vq->vdev); |
356 | } else | 399 | |
400 | break; | ||
401 | } | ||
402 | case VIRTIO_PARAM_DEV_ADD: | ||
403 | schedule_work(&hotplug_work); | ||
404 | break; | ||
405 | case VIRTIO_PARAM_VRING_INTERRUPT: | ||
406 | default: | ||
357 | vring_interrupt(0, vq); | 407 | vring_interrupt(0, vq); |
408 | break; | ||
409 | } | ||
358 | } | 410 | } |
359 | 411 | ||
360 | /* | 412 | /* |
@@ -383,6 +435,8 @@ static int __init kvm_devices_init(void) | |||
383 | 435 | ||
384 | kvm_devices = (void *) real_memory_size; | 436 | kvm_devices = (void *) real_memory_size; |
385 | 437 | ||
438 | INIT_WORK(&hotplug_work, hotplug_devices); | ||
439 | |||
386 | ctl_set_bit(0, 9); | 440 | ctl_set_bit(0, 9); |
387 | register_external_interrupt(0x2603, kvm_extint_handler); | 441 | register_external_interrupt(0x2603, kvm_extint_handler); |
388 | 442 | ||
diff --git a/drivers/s390/net/Kconfig b/drivers/s390/net/Kconfig index 977bb4d4ed15..456b18743397 100644 --- a/drivers/s390/net/Kconfig +++ b/drivers/s390/net/Kconfig | |||
@@ -100,6 +100,6 @@ config QETH_IPV6 | |||
100 | 100 | ||
101 | config CCWGROUP | 101 | config CCWGROUP |
102 | tristate | 102 | tristate |
103 | default (LCS || CTCM || QETH) | 103 | default (LCS || CTCM || QETH || CLAW) |
104 | 104 | ||
105 | endmenu | 105 | endmenu |
diff --git a/drivers/s390/net/claw.c b/drivers/s390/net/claw.c index a75ed3083a6a..8e4153d740f3 100644 --- a/drivers/s390/net/claw.c +++ b/drivers/s390/net/claw.c | |||
@@ -386,7 +386,7 @@ claw_tx(struct sk_buff *skb, struct net_device *dev) | |||
386 | struct chbk *p_ch; | 386 | struct chbk *p_ch; |
387 | 387 | ||
388 | CLAW_DBF_TEXT(4, trace, "claw_tx"); | 388 | CLAW_DBF_TEXT(4, trace, "claw_tx"); |
389 | p_ch=&privptr->channel[WRITE]; | 389 | p_ch = &privptr->channel[WRITE_CHANNEL]; |
390 | spin_lock_irqsave(get_ccwdev_lock(p_ch->cdev), saveflags); | 390 | spin_lock_irqsave(get_ccwdev_lock(p_ch->cdev), saveflags); |
391 | rc=claw_hw_tx( skb, dev, 1 ); | 391 | rc=claw_hw_tx( skb, dev, 1 ); |
392 | spin_unlock_irqrestore(get_ccwdev_lock(p_ch->cdev), saveflags); | 392 | spin_unlock_irqrestore(get_ccwdev_lock(p_ch->cdev), saveflags); |
@@ -407,7 +407,7 @@ static struct sk_buff * | |||
407 | claw_pack_skb(struct claw_privbk *privptr) | 407 | claw_pack_skb(struct claw_privbk *privptr) |
408 | { | 408 | { |
409 | struct sk_buff *new_skb,*held_skb; | 409 | struct sk_buff *new_skb,*held_skb; |
410 | struct chbk *p_ch = &privptr->channel[WRITE]; | 410 | struct chbk *p_ch = &privptr->channel[WRITE_CHANNEL]; |
411 | struct claw_env *p_env = privptr->p_env; | 411 | struct claw_env *p_env = privptr->p_env; |
412 | int pkt_cnt,pk_ind,so_far; | 412 | int pkt_cnt,pk_ind,so_far; |
413 | 413 | ||
@@ -515,15 +515,15 @@ claw_open(struct net_device *dev) | |||
515 | privptr->p_env->write_size=CLAW_FRAME_SIZE; | 515 | privptr->p_env->write_size=CLAW_FRAME_SIZE; |
516 | } | 516 | } |
517 | claw_set_busy(dev); | 517 | claw_set_busy(dev); |
518 | tasklet_init(&privptr->channel[READ].tasklet, claw_irq_tasklet, | 518 | tasklet_init(&privptr->channel[READ_CHANNEL].tasklet, claw_irq_tasklet, |
519 | (unsigned long) &privptr->channel[READ]); | 519 | (unsigned long) &privptr->channel[READ_CHANNEL]); |
520 | for ( i = 0; i < 2; i++) { | 520 | for ( i = 0; i < 2; i++) { |
521 | CLAW_DBF_TEXT_(2, trace, "opn_ch%d", i); | 521 | CLAW_DBF_TEXT_(2, trace, "opn_ch%d", i); |
522 | init_waitqueue_head(&privptr->channel[i].wait); | 522 | init_waitqueue_head(&privptr->channel[i].wait); |
523 | /* skb_queue_head_init(&p_ch->io_queue); */ | 523 | /* skb_queue_head_init(&p_ch->io_queue); */ |
524 | if (i == WRITE) | 524 | if (i == WRITE_CHANNEL) |
525 | skb_queue_head_init( | 525 | skb_queue_head_init( |
526 | &privptr->channel[WRITE].collect_queue); | 526 | &privptr->channel[WRITE_CHANNEL].collect_queue); |
527 | privptr->channel[i].flag_a = 0; | 527 | privptr->channel[i].flag_a = 0; |
528 | privptr->channel[i].IO_active = 0; | 528 | privptr->channel[i].IO_active = 0; |
529 | privptr->channel[i].flag &= ~CLAW_TIMER; | 529 | privptr->channel[i].flag &= ~CLAW_TIMER; |
@@ -551,12 +551,12 @@ claw_open(struct net_device *dev) | |||
551 | if((privptr->channel[i].flag & CLAW_TIMER) == 0x00) | 551 | if((privptr->channel[i].flag & CLAW_TIMER) == 0x00) |
552 | del_timer(&timer); | 552 | del_timer(&timer); |
553 | } | 553 | } |
554 | if ((((privptr->channel[READ].last_dstat | | 554 | if ((((privptr->channel[READ_CHANNEL].last_dstat | |
555 | privptr->channel[WRITE].last_dstat) & | 555 | privptr->channel[WRITE_CHANNEL].last_dstat) & |
556 | ~(DEV_STAT_CHN_END | DEV_STAT_DEV_END)) != 0x00) || | 556 | ~(DEV_STAT_CHN_END | DEV_STAT_DEV_END)) != 0x00) || |
557 | (((privptr->channel[READ].flag | | 557 | (((privptr->channel[READ_CHANNEL].flag | |
558 | privptr->channel[WRITE].flag) & CLAW_TIMER) != 0x00)) { | 558 | privptr->channel[WRITE_CHANNEL].flag) & CLAW_TIMER) != 0x00)) { |
559 | dev_info(&privptr->channel[READ].cdev->dev, | 559 | dev_info(&privptr->channel[READ_CHANNEL].cdev->dev, |
560 | "%s: remote side is not ready\n", dev->name); | 560 | "%s: remote side is not ready\n", dev->name); |
561 | CLAW_DBF_TEXT(2, trace, "notrdy"); | 561 | CLAW_DBF_TEXT(2, trace, "notrdy"); |
562 | 562 | ||
@@ -608,8 +608,8 @@ claw_open(struct net_device *dev) | |||
608 | } | 608 | } |
609 | } | 609 | } |
610 | privptr->buffs_alloc = 0; | 610 | privptr->buffs_alloc = 0; |
611 | privptr->channel[READ].flag= 0x00; | 611 | privptr->channel[READ_CHANNEL].flag = 0x00; |
612 | privptr->channel[WRITE].flag = 0x00; | 612 | privptr->channel[WRITE_CHANNEL].flag = 0x00; |
613 | privptr->p_buff_ccw=NULL; | 613 | privptr->p_buff_ccw=NULL; |
614 | privptr->p_buff_read=NULL; | 614 | privptr->p_buff_read=NULL; |
615 | privptr->p_buff_write=NULL; | 615 | privptr->p_buff_write=NULL; |
@@ -652,10 +652,10 @@ claw_irq_handler(struct ccw_device *cdev, | |||
652 | } | 652 | } |
653 | 653 | ||
654 | /* Try to extract channel from driver data. */ | 654 | /* Try to extract channel from driver data. */ |
655 | if (privptr->channel[READ].cdev == cdev) | 655 | if (privptr->channel[READ_CHANNEL].cdev == cdev) |
656 | p_ch = &privptr->channel[READ]; | 656 | p_ch = &privptr->channel[READ_CHANNEL]; |
657 | else if (privptr->channel[WRITE].cdev == cdev) | 657 | else if (privptr->channel[WRITE_CHANNEL].cdev == cdev) |
658 | p_ch = &privptr->channel[WRITE]; | 658 | p_ch = &privptr->channel[WRITE_CHANNEL]; |
659 | else { | 659 | else { |
660 | dev_warn(&cdev->dev, "The device is not a CLAW device\n"); | 660 | dev_warn(&cdev->dev, "The device is not a CLAW device\n"); |
661 | CLAW_DBF_TEXT(2, trace, "badchan"); | 661 | CLAW_DBF_TEXT(2, trace, "badchan"); |
@@ -813,7 +813,7 @@ claw_irq_handler(struct ccw_device *cdev, | |||
813 | claw_clearbit_busy(TB_TX, dev); | 813 | claw_clearbit_busy(TB_TX, dev); |
814 | claw_clear_busy(dev); | 814 | claw_clear_busy(dev); |
815 | } | 815 | } |
816 | p_ch_r = (struct chbk *)&privptr->channel[READ]; | 816 | p_ch_r = (struct chbk *)&privptr->channel[READ_CHANNEL]; |
817 | if (test_and_set_bit(CLAW_BH_ACTIVE, | 817 | if (test_and_set_bit(CLAW_BH_ACTIVE, |
818 | (void *)&p_ch_r->flag_a) == 0) | 818 | (void *)&p_ch_r->flag_a) == 0) |
819 | tasklet_schedule(&p_ch_r->tasklet); | 819 | tasklet_schedule(&p_ch_r->tasklet); |
@@ -878,13 +878,13 @@ claw_release(struct net_device *dev) | |||
878 | for ( i = 1; i >=0 ; i--) { | 878 | for ( i = 1; i >=0 ; i--) { |
879 | spin_lock_irqsave( | 879 | spin_lock_irqsave( |
880 | get_ccwdev_lock(privptr->channel[i].cdev), saveflags); | 880 | get_ccwdev_lock(privptr->channel[i].cdev), saveflags); |
881 | /* del_timer(&privptr->channel[READ].timer); */ | 881 | /* del_timer(&privptr->channel[READ_CHANNEL].timer); */ |
882 | privptr->channel[i].claw_state = CLAW_STOP; | 882 | privptr->channel[i].claw_state = CLAW_STOP; |
883 | privptr->channel[i].IO_active = 0; | 883 | privptr->channel[i].IO_active = 0; |
884 | parm = (unsigned long) &privptr->channel[i]; | 884 | parm = (unsigned long) &privptr->channel[i]; |
885 | if (i == WRITE) | 885 | if (i == WRITE_CHANNEL) |
886 | claw_purge_skb_queue( | 886 | claw_purge_skb_queue( |
887 | &privptr->channel[WRITE].collect_queue); | 887 | &privptr->channel[WRITE_CHANNEL].collect_queue); |
888 | rc = ccw_device_halt (privptr->channel[i].cdev, parm); | 888 | rc = ccw_device_halt (privptr->channel[i].cdev, parm); |
889 | if (privptr->system_validate_comp==0x00) /* never opened? */ | 889 | if (privptr->system_validate_comp==0x00) /* never opened? */ |
890 | init_waitqueue_head(&privptr->channel[i].wait); | 890 | init_waitqueue_head(&privptr->channel[i].wait); |
@@ -971,16 +971,16 @@ claw_release(struct net_device *dev) | |||
971 | privptr->mtc_skipping = 1; | 971 | privptr->mtc_skipping = 1; |
972 | privptr->mtc_offset=0; | 972 | privptr->mtc_offset=0; |
973 | 973 | ||
974 | if (((privptr->channel[READ].last_dstat | | 974 | if (((privptr->channel[READ_CHANNEL].last_dstat | |
975 | privptr->channel[WRITE].last_dstat) & | 975 | privptr->channel[WRITE_CHANNEL].last_dstat) & |
976 | ~(DEV_STAT_CHN_END | DEV_STAT_DEV_END)) != 0x00) { | 976 | ~(DEV_STAT_CHN_END | DEV_STAT_DEV_END)) != 0x00) { |
977 | dev_warn(&privptr->channel[READ].cdev->dev, | 977 | dev_warn(&privptr->channel[READ_CHANNEL].cdev->dev, |
978 | "Deactivating %s completed with incorrect" | 978 | "Deactivating %s completed with incorrect" |
979 | " subchannel status " | 979 | " subchannel status " |
980 | "(read %02x, write %02x)\n", | 980 | "(read %02x, write %02x)\n", |
981 | dev->name, | 981 | dev->name, |
982 | privptr->channel[READ].last_dstat, | 982 | privptr->channel[READ_CHANNEL].last_dstat, |
983 | privptr->channel[WRITE].last_dstat); | 983 | privptr->channel[WRITE_CHANNEL].last_dstat); |
984 | CLAW_DBF_TEXT(2, trace, "badclose"); | 984 | CLAW_DBF_TEXT(2, trace, "badclose"); |
985 | } | 985 | } |
986 | CLAW_DBF_TEXT(4, trace, "rlsexit"); | 986 | CLAW_DBF_TEXT(4, trace, "rlsexit"); |
@@ -1324,7 +1324,7 @@ claw_hw_tx(struct sk_buff *skb, struct net_device *dev, long linkid) | |||
1324 | 1324 | ||
1325 | CLAW_DBF_TEXT(4, trace, "hw_tx"); | 1325 | CLAW_DBF_TEXT(4, trace, "hw_tx"); |
1326 | privptr = (struct claw_privbk *)(dev->ml_priv); | 1326 | privptr = (struct claw_privbk *)(dev->ml_priv); |
1327 | p_ch=(struct chbk *)&privptr->channel[WRITE]; | 1327 | p_ch = (struct chbk *)&privptr->channel[WRITE_CHANNEL]; |
1328 | p_env =privptr->p_env; | 1328 | p_env =privptr->p_env; |
1329 | claw_free_wrt_buf(dev); /* Clean up free chain if posible */ | 1329 | claw_free_wrt_buf(dev); /* Clean up free chain if posible */ |
1330 | /* scan the write queue to free any completed write packets */ | 1330 | /* scan the write queue to free any completed write packets */ |
@@ -1357,7 +1357,7 @@ claw_hw_tx(struct sk_buff *skb, struct net_device *dev, long linkid) | |||
1357 | claw_strt_out_IO(dev ); | 1357 | claw_strt_out_IO(dev ); |
1358 | claw_free_wrt_buf( dev ); | 1358 | claw_free_wrt_buf( dev ); |
1359 | if (privptr->write_free_count==0) { | 1359 | if (privptr->write_free_count==0) { |
1360 | ch = &privptr->channel[WRITE]; | 1360 | ch = &privptr->channel[WRITE_CHANNEL]; |
1361 | atomic_inc(&skb->users); | 1361 | atomic_inc(&skb->users); |
1362 | skb_queue_tail(&ch->collect_queue, skb); | 1362 | skb_queue_tail(&ch->collect_queue, skb); |
1363 | goto Done; | 1363 | goto Done; |
@@ -1369,7 +1369,7 @@ claw_hw_tx(struct sk_buff *skb, struct net_device *dev, long linkid) | |||
1369 | } | 1369 | } |
1370 | /* tx lock */ | 1370 | /* tx lock */ |
1371 | if (claw_test_and_setbit_busy(TB_TX,dev)) { /* set to busy */ | 1371 | if (claw_test_and_setbit_busy(TB_TX,dev)) { /* set to busy */ |
1372 | ch = &privptr->channel[WRITE]; | 1372 | ch = &privptr->channel[WRITE_CHANNEL]; |
1373 | atomic_inc(&skb->users); | 1373 | atomic_inc(&skb->users); |
1374 | skb_queue_tail(&ch->collect_queue, skb); | 1374 | skb_queue_tail(&ch->collect_queue, skb); |
1375 | claw_strt_out_IO(dev ); | 1375 | claw_strt_out_IO(dev ); |
@@ -1385,7 +1385,7 @@ claw_hw_tx(struct sk_buff *skb, struct net_device *dev, long linkid) | |||
1385 | privptr->p_write_free_chain == NULL ) { | 1385 | privptr->p_write_free_chain == NULL ) { |
1386 | 1386 | ||
1387 | claw_setbit_busy(TB_NOBUFFER,dev); | 1387 | claw_setbit_busy(TB_NOBUFFER,dev); |
1388 | ch = &privptr->channel[WRITE]; | 1388 | ch = &privptr->channel[WRITE_CHANNEL]; |
1389 | atomic_inc(&skb->users); | 1389 | atomic_inc(&skb->users); |
1390 | skb_queue_tail(&ch->collect_queue, skb); | 1390 | skb_queue_tail(&ch->collect_queue, skb); |
1391 | CLAW_DBF_TEXT(2, trace, "clawbusy"); | 1391 | CLAW_DBF_TEXT(2, trace, "clawbusy"); |
@@ -1397,7 +1397,7 @@ claw_hw_tx(struct sk_buff *skb, struct net_device *dev, long linkid) | |||
1397 | while (len_of_data > 0) { | 1397 | while (len_of_data > 0) { |
1398 | p_this_ccw=privptr->p_write_free_chain; /* get a block */ | 1398 | p_this_ccw=privptr->p_write_free_chain; /* get a block */ |
1399 | if (p_this_ccw == NULL) { /* lost the race */ | 1399 | if (p_this_ccw == NULL) { /* lost the race */ |
1400 | ch = &privptr->channel[WRITE]; | 1400 | ch = &privptr->channel[WRITE_CHANNEL]; |
1401 | atomic_inc(&skb->users); | 1401 | atomic_inc(&skb->users); |
1402 | skb_queue_tail(&ch->collect_queue, skb); | 1402 | skb_queue_tail(&ch->collect_queue, skb); |
1403 | goto Done2; | 1403 | goto Done2; |
@@ -2067,7 +2067,7 @@ claw_process_control( struct net_device *dev, struct ccwbk * p_ccw) | |||
2067 | *catch up to each other */ | 2067 | *catch up to each other */ |
2068 | privptr = dev->ml_priv; | 2068 | privptr = dev->ml_priv; |
2069 | p_env=privptr->p_env; | 2069 | p_env=privptr->p_env; |
2070 | tdev = &privptr->channel[READ].cdev->dev; | 2070 | tdev = &privptr->channel[READ_CHANNEL].cdev->dev; |
2071 | memcpy( &temp_host_name, p_env->host_name, 8); | 2071 | memcpy( &temp_host_name, p_env->host_name, 8); |
2072 | memcpy( &temp_ws_name, p_env->adapter_name , 8); | 2072 | memcpy( &temp_ws_name, p_env->adapter_name , 8); |
2073 | dev_info(tdev, "%s: CLAW device %.8s: " | 2073 | dev_info(tdev, "%s: CLAW device %.8s: " |
@@ -2245,7 +2245,7 @@ claw_process_control( struct net_device *dev, struct ccwbk * p_ccw) | |||
2245 | dev->name, temp_ws_name, | 2245 | dev->name, temp_ws_name, |
2246 | p_ctlbk->linkid); | 2246 | p_ctlbk->linkid); |
2247 | privptr->active_link_ID = p_ctlbk->linkid; | 2247 | privptr->active_link_ID = p_ctlbk->linkid; |
2248 | p_ch = &privptr->channel[WRITE]; | 2248 | p_ch = &privptr->channel[WRITE_CHANNEL]; |
2249 | wake_up(&p_ch->wait); /* wake up claw_open ( WRITE) */ | 2249 | wake_up(&p_ch->wait); /* wake up claw_open ( WRITE) */ |
2250 | break; | 2250 | break; |
2251 | case CONNECTION_RESPONSE: | 2251 | case CONNECTION_RESPONSE: |
@@ -2296,7 +2296,7 @@ claw_process_control( struct net_device *dev, struct ccwbk * p_ccw) | |||
2296 | "%s: Confirmed Now packing\n", dev->name); | 2296 | "%s: Confirmed Now packing\n", dev->name); |
2297 | p_env->packing = DO_PACKED; | 2297 | p_env->packing = DO_PACKED; |
2298 | } | 2298 | } |
2299 | p_ch = &privptr->channel[WRITE]; | 2299 | p_ch = &privptr->channel[WRITE_CHANNEL]; |
2300 | wake_up(&p_ch->wait); | 2300 | wake_up(&p_ch->wait); |
2301 | } else { | 2301 | } else { |
2302 | dev_warn(tdev, "Activating %s failed because of" | 2302 | dev_warn(tdev, "Activating %s failed because of" |
@@ -2556,7 +2556,7 @@ unpack_read(struct net_device *dev ) | |||
2556 | p_packd=NULL; | 2556 | p_packd=NULL; |
2557 | privptr = dev->ml_priv; | 2557 | privptr = dev->ml_priv; |
2558 | 2558 | ||
2559 | p_dev = &privptr->channel[READ].cdev->dev; | 2559 | p_dev = &privptr->channel[READ_CHANNEL].cdev->dev; |
2560 | p_env = privptr->p_env; | 2560 | p_env = privptr->p_env; |
2561 | p_this_ccw=privptr->p_read_active_first; | 2561 | p_this_ccw=privptr->p_read_active_first; |
2562 | while (p_this_ccw!=NULL && p_this_ccw->header.flag!=CLAW_PENDING) { | 2562 | while (p_this_ccw!=NULL && p_this_ccw->header.flag!=CLAW_PENDING) { |
@@ -2728,7 +2728,7 @@ claw_strt_read (struct net_device *dev, int lock ) | |||
2728 | struct ccwbk*p_ccwbk; | 2728 | struct ccwbk*p_ccwbk; |
2729 | struct chbk *p_ch; | 2729 | struct chbk *p_ch; |
2730 | struct clawh *p_clawh; | 2730 | struct clawh *p_clawh; |
2731 | p_ch=&privptr->channel[READ]; | 2731 | p_ch = &privptr->channel[READ_CHANNEL]; |
2732 | 2732 | ||
2733 | CLAW_DBF_TEXT(4, trace, "StRdNter"); | 2733 | CLAW_DBF_TEXT(4, trace, "StRdNter"); |
2734 | p_clawh=(struct clawh *)privptr->p_claw_signal_blk; | 2734 | p_clawh=(struct clawh *)privptr->p_claw_signal_blk; |
@@ -2782,7 +2782,7 @@ claw_strt_out_IO( struct net_device *dev ) | |||
2782 | return; | 2782 | return; |
2783 | } | 2783 | } |
2784 | privptr = (struct claw_privbk *)dev->ml_priv; | 2784 | privptr = (struct claw_privbk *)dev->ml_priv; |
2785 | p_ch=&privptr->channel[WRITE]; | 2785 | p_ch = &privptr->channel[WRITE_CHANNEL]; |
2786 | 2786 | ||
2787 | CLAW_DBF_TEXT(4, trace, "strt_io"); | 2787 | CLAW_DBF_TEXT(4, trace, "strt_io"); |
2788 | p_first_ccw=privptr->p_write_active_first; | 2788 | p_first_ccw=privptr->p_write_active_first; |
@@ -2875,7 +2875,7 @@ claw_free_netdevice(struct net_device * dev, int free_dev) | |||
2875 | if (dev->flags & IFF_RUNNING) | 2875 | if (dev->flags & IFF_RUNNING) |
2876 | claw_release(dev); | 2876 | claw_release(dev); |
2877 | if (privptr) { | 2877 | if (privptr) { |
2878 | privptr->channel[READ].ndev = NULL; /* say it's free */ | 2878 | privptr->channel[READ_CHANNEL].ndev = NULL; /* say it's free */ |
2879 | } | 2879 | } |
2880 | dev->ml_priv = NULL; | 2880 | dev->ml_priv = NULL; |
2881 | #ifdef MODULE | 2881 | #ifdef MODULE |
@@ -2960,18 +2960,18 @@ claw_new_device(struct ccwgroup_device *cgdev) | |||
2960 | struct ccw_dev_id dev_id; | 2960 | struct ccw_dev_id dev_id; |
2961 | 2961 | ||
2962 | dev_info(&cgdev->dev, "add for %s\n", | 2962 | dev_info(&cgdev->dev, "add for %s\n", |
2963 | dev_name(&cgdev->cdev[READ]->dev)); | 2963 | dev_name(&cgdev->cdev[READ_CHANNEL]->dev)); |
2964 | CLAW_DBF_TEXT(2, setup, "new_dev"); | 2964 | CLAW_DBF_TEXT(2, setup, "new_dev"); |
2965 | privptr = dev_get_drvdata(&cgdev->dev); | 2965 | privptr = dev_get_drvdata(&cgdev->dev); |
2966 | dev_set_drvdata(&cgdev->cdev[READ]->dev, privptr); | 2966 | dev_set_drvdata(&cgdev->cdev[READ_CHANNEL]->dev, privptr); |
2967 | dev_set_drvdata(&cgdev->cdev[WRITE]->dev, privptr); | 2967 | dev_set_drvdata(&cgdev->cdev[WRITE_CHANNEL]->dev, privptr); |
2968 | if (!privptr) | 2968 | if (!privptr) |
2969 | return -ENODEV; | 2969 | return -ENODEV; |
2970 | p_env = privptr->p_env; | 2970 | p_env = privptr->p_env; |
2971 | ccw_device_get_id(cgdev->cdev[READ], &dev_id); | 2971 | ccw_device_get_id(cgdev->cdev[READ_CHANNEL], &dev_id); |
2972 | p_env->devno[READ] = dev_id.devno; | 2972 | p_env->devno[READ_CHANNEL] = dev_id.devno; |
2973 | ccw_device_get_id(cgdev->cdev[WRITE], &dev_id); | 2973 | ccw_device_get_id(cgdev->cdev[WRITE_CHANNEL], &dev_id); |
2974 | p_env->devno[WRITE] = dev_id.devno; | 2974 | p_env->devno[WRITE_CHANNEL] = dev_id.devno; |
2975 | ret = add_channel(cgdev->cdev[0],0,privptr); | 2975 | ret = add_channel(cgdev->cdev[0],0,privptr); |
2976 | if (ret == 0) | 2976 | if (ret == 0) |
2977 | ret = add_channel(cgdev->cdev[1],1,privptr); | 2977 | ret = add_channel(cgdev->cdev[1],1,privptr); |
@@ -2980,14 +2980,14 @@ claw_new_device(struct ccwgroup_device *cgdev) | |||
2980 | " failed with error code %d\n", ret); | 2980 | " failed with error code %d\n", ret); |
2981 | goto out; | 2981 | goto out; |
2982 | } | 2982 | } |
2983 | ret = ccw_device_set_online(cgdev->cdev[READ]); | 2983 | ret = ccw_device_set_online(cgdev->cdev[READ_CHANNEL]); |
2984 | if (ret != 0) { | 2984 | if (ret != 0) { |
2985 | dev_warn(&cgdev->dev, | 2985 | dev_warn(&cgdev->dev, |
2986 | "Setting the read subchannel online" | 2986 | "Setting the read subchannel online" |
2987 | " failed with error code %d\n", ret); | 2987 | " failed with error code %d\n", ret); |
2988 | goto out; | 2988 | goto out; |
2989 | } | 2989 | } |
2990 | ret = ccw_device_set_online(cgdev->cdev[WRITE]); | 2990 | ret = ccw_device_set_online(cgdev->cdev[WRITE_CHANNEL]); |
2991 | if (ret != 0) { | 2991 | if (ret != 0) { |
2992 | dev_warn(&cgdev->dev, | 2992 | dev_warn(&cgdev->dev, |
2993 | "Setting the write subchannel online " | 2993 | "Setting the write subchannel online " |
@@ -3002,8 +3002,8 @@ claw_new_device(struct ccwgroup_device *cgdev) | |||
3002 | } | 3002 | } |
3003 | dev->ml_priv = privptr; | 3003 | dev->ml_priv = privptr; |
3004 | dev_set_drvdata(&cgdev->dev, privptr); | 3004 | dev_set_drvdata(&cgdev->dev, privptr); |
3005 | dev_set_drvdata(&cgdev->cdev[READ]->dev, privptr); | 3005 | dev_set_drvdata(&cgdev->cdev[READ_CHANNEL]->dev, privptr); |
3006 | dev_set_drvdata(&cgdev->cdev[WRITE]->dev, privptr); | 3006 | dev_set_drvdata(&cgdev->cdev[WRITE_CHANNEL]->dev, privptr); |
3007 | /* sysfs magic */ | 3007 | /* sysfs magic */ |
3008 | SET_NETDEV_DEV(dev, &cgdev->dev); | 3008 | SET_NETDEV_DEV(dev, &cgdev->dev); |
3009 | if (register_netdev(dev) != 0) { | 3009 | if (register_netdev(dev) != 0) { |
@@ -3021,16 +3021,16 @@ claw_new_device(struct ccwgroup_device *cgdev) | |||
3021 | goto out; | 3021 | goto out; |
3022 | } | 3022 | } |
3023 | } | 3023 | } |
3024 | privptr->channel[READ].ndev = dev; | 3024 | privptr->channel[READ_CHANNEL].ndev = dev; |
3025 | privptr->channel[WRITE].ndev = dev; | 3025 | privptr->channel[WRITE_CHANNEL].ndev = dev; |
3026 | privptr->p_env->ndev = dev; | 3026 | privptr->p_env->ndev = dev; |
3027 | 3027 | ||
3028 | dev_info(&cgdev->dev, "%s:readsize=%d writesize=%d " | 3028 | dev_info(&cgdev->dev, "%s:readsize=%d writesize=%d " |
3029 | "readbuffer=%d writebuffer=%d read=0x%04x write=0x%04x\n", | 3029 | "readbuffer=%d writebuffer=%d read=0x%04x write=0x%04x\n", |
3030 | dev->name, p_env->read_size, | 3030 | dev->name, p_env->read_size, |
3031 | p_env->write_size, p_env->read_buffers, | 3031 | p_env->write_size, p_env->read_buffers, |
3032 | p_env->write_buffers, p_env->devno[READ], | 3032 | p_env->write_buffers, p_env->devno[READ_CHANNEL], |
3033 | p_env->devno[WRITE]); | 3033 | p_env->devno[WRITE_CHANNEL]); |
3034 | dev_info(&cgdev->dev, "%s:host_name:%.8s, adapter_name " | 3034 | dev_info(&cgdev->dev, "%s:host_name:%.8s, adapter_name " |
3035 | ":%.8s api_type: %.8s\n", | 3035 | ":%.8s api_type: %.8s\n", |
3036 | dev->name, p_env->host_name, | 3036 | dev->name, p_env->host_name, |
@@ -3072,10 +3072,10 @@ claw_shutdown_device(struct ccwgroup_device *cgdev) | |||
3072 | priv = dev_get_drvdata(&cgdev->dev); | 3072 | priv = dev_get_drvdata(&cgdev->dev); |
3073 | if (!priv) | 3073 | if (!priv) |
3074 | return -ENODEV; | 3074 | return -ENODEV; |
3075 | ndev = priv->channel[READ].ndev; | 3075 | ndev = priv->channel[READ_CHANNEL].ndev; |
3076 | if (ndev) { | 3076 | if (ndev) { |
3077 | /* Close the device */ | 3077 | /* Close the device */ |
3078 | dev_info(&cgdev->dev, "%s: shutting down \n", | 3078 | dev_info(&cgdev->dev, "%s: shutting down\n", |
3079 | ndev->name); | 3079 | ndev->name); |
3080 | if (ndev->flags & IFF_RUNNING) | 3080 | if (ndev->flags & IFF_RUNNING) |
3081 | ret = claw_release(ndev); | 3081 | ret = claw_release(ndev); |
@@ -3083,8 +3083,8 @@ claw_shutdown_device(struct ccwgroup_device *cgdev) | |||
3083 | unregister_netdev(ndev); | 3083 | unregister_netdev(ndev); |
3084 | ndev->ml_priv = NULL; /* cgdev data, not ndev's to free */ | 3084 | ndev->ml_priv = NULL; /* cgdev data, not ndev's to free */ |
3085 | claw_free_netdevice(ndev, 1); | 3085 | claw_free_netdevice(ndev, 1); |
3086 | priv->channel[READ].ndev = NULL; | 3086 | priv->channel[READ_CHANNEL].ndev = NULL; |
3087 | priv->channel[WRITE].ndev = NULL; | 3087 | priv->channel[WRITE_CHANNEL].ndev = NULL; |
3088 | priv->p_env->ndev = NULL; | 3088 | priv->p_env->ndev = NULL; |
3089 | } | 3089 | } |
3090 | ccw_device_set_offline(cgdev->cdev[1]); | 3090 | ccw_device_set_offline(cgdev->cdev[1]); |
@@ -3115,8 +3115,8 @@ claw_remove_device(struct ccwgroup_device *cgdev) | |||
3115 | priv->channel[1].irb=NULL; | 3115 | priv->channel[1].irb=NULL; |
3116 | kfree(priv); | 3116 | kfree(priv); |
3117 | dev_set_drvdata(&cgdev->dev, NULL); | 3117 | dev_set_drvdata(&cgdev->dev, NULL); |
3118 | dev_set_drvdata(&cgdev->cdev[READ]->dev, NULL); | 3118 | dev_set_drvdata(&cgdev->cdev[READ_CHANNEL]->dev, NULL); |
3119 | dev_set_drvdata(&cgdev->cdev[WRITE]->dev, NULL); | 3119 | dev_set_drvdata(&cgdev->cdev[WRITE_CHANNEL]->dev, NULL); |
3120 | put_device(&cgdev->dev); | 3120 | put_device(&cgdev->dev); |
3121 | 3121 | ||
3122 | return; | 3122 | return; |
diff --git a/drivers/s390/net/claw.h b/drivers/s390/net/claw.h index 46d59a13db12..1bc5904df19f 100644 --- a/drivers/s390/net/claw.h +++ b/drivers/s390/net/claw.h | |||
@@ -74,8 +74,8 @@ | |||
74 | #define MAX_ENVELOPE_SIZE 65536 | 74 | #define MAX_ENVELOPE_SIZE 65536 |
75 | #define CLAW_DEFAULT_MTU_SIZE 4096 | 75 | #define CLAW_DEFAULT_MTU_SIZE 4096 |
76 | #define DEF_PACK_BUFSIZE 32768 | 76 | #define DEF_PACK_BUFSIZE 32768 |
77 | #define READ 0 | 77 | #define READ_CHANNEL 0 |
78 | #define WRITE 1 | 78 | #define WRITE_CHANNEL 1 |
79 | 79 | ||
80 | #define TB_TX 0 /* sk buffer handling in process */ | 80 | #define TB_TX 0 /* sk buffer handling in process */ |
81 | #define TB_STOP 1 /* network device stop in process */ | 81 | #define TB_STOP 1 /* network device stop in process */ |
diff --git a/drivers/s390/net/ctcm_fsms.c b/drivers/s390/net/ctcm_fsms.c index 70eb7f138414..8c921fc3511a 100644 --- a/drivers/s390/net/ctcm_fsms.c +++ b/drivers/s390/net/ctcm_fsms.c | |||
@@ -454,7 +454,7 @@ static void chx_firstio(fsm_instance *fi, int event, void *arg) | |||
454 | if ((fsmstate == CTC_STATE_SETUPWAIT) && | 454 | if ((fsmstate == CTC_STATE_SETUPWAIT) && |
455 | (ch->protocol == CTCM_PROTO_OS390)) { | 455 | (ch->protocol == CTCM_PROTO_OS390)) { |
456 | /* OS/390 resp. z/OS */ | 456 | /* OS/390 resp. z/OS */ |
457 | if (CHANNEL_DIRECTION(ch->flags) == READ) { | 457 | if (CHANNEL_DIRECTION(ch->flags) == CTCM_READ) { |
458 | *((__u16 *)ch->trans_skb->data) = CTCM_INITIAL_BLOCKLEN; | 458 | *((__u16 *)ch->trans_skb->data) = CTCM_INITIAL_BLOCKLEN; |
459 | fsm_addtimer(&ch->timer, CTCM_TIME_5_SEC, | 459 | fsm_addtimer(&ch->timer, CTCM_TIME_5_SEC, |
460 | CTC_EVENT_TIMER, ch); | 460 | CTC_EVENT_TIMER, ch); |
@@ -472,14 +472,14 @@ static void chx_firstio(fsm_instance *fi, int event, void *arg) | |||
472 | * if in compatibility mode, since VM TCP delays the initial | 472 | * if in compatibility mode, since VM TCP delays the initial |
473 | * frame until it has some data to send. | 473 | * frame until it has some data to send. |
474 | */ | 474 | */ |
475 | if ((CHANNEL_DIRECTION(ch->flags) == WRITE) || | 475 | if ((CHANNEL_DIRECTION(ch->flags) == CTCM_WRITE) || |
476 | (ch->protocol != CTCM_PROTO_S390)) | 476 | (ch->protocol != CTCM_PROTO_S390)) |
477 | fsm_addtimer(&ch->timer, CTCM_TIME_5_SEC, CTC_EVENT_TIMER, ch); | 477 | fsm_addtimer(&ch->timer, CTCM_TIME_5_SEC, CTC_EVENT_TIMER, ch); |
478 | 478 | ||
479 | *((__u16 *)ch->trans_skb->data) = CTCM_INITIAL_BLOCKLEN; | 479 | *((__u16 *)ch->trans_skb->data) = CTCM_INITIAL_BLOCKLEN; |
480 | ch->ccw[1].count = 2; /* Transfer only length */ | 480 | ch->ccw[1].count = 2; /* Transfer only length */ |
481 | 481 | ||
482 | fsm_newstate(fi, (CHANNEL_DIRECTION(ch->flags) == READ) | 482 | fsm_newstate(fi, (CHANNEL_DIRECTION(ch->flags) == CTCM_READ) |
483 | ? CTC_STATE_RXINIT : CTC_STATE_TXINIT); | 483 | ? CTC_STATE_RXINIT : CTC_STATE_TXINIT); |
484 | rc = ccw_device_start(ch->cdev, &ch->ccw[0], | 484 | rc = ccw_device_start(ch->cdev, &ch->ccw[0], |
485 | (unsigned long)ch, 0xff, 0); | 485 | (unsigned long)ch, 0xff, 0); |
@@ -495,7 +495,7 @@ static void chx_firstio(fsm_instance *fi, int event, void *arg) | |||
495 | * reply from VM TCP which brings up the RX channel to it's | 495 | * reply from VM TCP which brings up the RX channel to it's |
496 | * final state. | 496 | * final state. |
497 | */ | 497 | */ |
498 | if ((CHANNEL_DIRECTION(ch->flags) == READ) && | 498 | if ((CHANNEL_DIRECTION(ch->flags) == CTCM_READ) && |
499 | (ch->protocol == CTCM_PROTO_S390)) { | 499 | (ch->protocol == CTCM_PROTO_S390)) { |
500 | struct net_device *dev = ch->netdev; | 500 | struct net_device *dev = ch->netdev; |
501 | struct ctcm_priv *priv = dev->ml_priv; | 501 | struct ctcm_priv *priv = dev->ml_priv; |
@@ -600,15 +600,15 @@ static void ctcm_chx_start(fsm_instance *fi, int event, void *arg) | |||
600 | int rc; | 600 | int rc; |
601 | 601 | ||
602 | CTCM_DBF_TEXT_(SETUP, CTC_DBF_INFO, "%s(%s): %s", | 602 | CTCM_DBF_TEXT_(SETUP, CTC_DBF_INFO, "%s(%s): %s", |
603 | CTCM_FUNTAIL, ch->id, | 603 | CTCM_FUNTAIL, ch->id, |
604 | (CHANNEL_DIRECTION(ch->flags) == READ) ? "RX" : "TX"); | 604 | (CHANNEL_DIRECTION(ch->flags) == CTCM_READ) ? "RX" : "TX"); |
605 | 605 | ||
606 | if (ch->trans_skb != NULL) { | 606 | if (ch->trans_skb != NULL) { |
607 | clear_normalized_cda(&ch->ccw[1]); | 607 | clear_normalized_cda(&ch->ccw[1]); |
608 | dev_kfree_skb(ch->trans_skb); | 608 | dev_kfree_skb(ch->trans_skb); |
609 | ch->trans_skb = NULL; | 609 | ch->trans_skb = NULL; |
610 | } | 610 | } |
611 | if (CHANNEL_DIRECTION(ch->flags) == READ) { | 611 | if (CHANNEL_DIRECTION(ch->flags) == CTCM_READ) { |
612 | ch->ccw[1].cmd_code = CCW_CMD_READ; | 612 | ch->ccw[1].cmd_code = CCW_CMD_READ; |
613 | ch->ccw[1].flags = CCW_FLAG_SLI; | 613 | ch->ccw[1].flags = CCW_FLAG_SLI; |
614 | ch->ccw[1].count = 0; | 614 | ch->ccw[1].count = 0; |
@@ -622,7 +622,8 @@ static void ctcm_chx_start(fsm_instance *fi, int event, void *arg) | |||
622 | "%s(%s): %s trans_skb alloc delayed " | 622 | "%s(%s): %s trans_skb alloc delayed " |
623 | "until first transfer", | 623 | "until first transfer", |
624 | CTCM_FUNTAIL, ch->id, | 624 | CTCM_FUNTAIL, ch->id, |
625 | (CHANNEL_DIRECTION(ch->flags) == READ) ? "RX" : "TX"); | 625 | (CHANNEL_DIRECTION(ch->flags) == CTCM_READ) ? |
626 | "RX" : "TX"); | ||
626 | } | 627 | } |
627 | ch->ccw[0].cmd_code = CCW_CMD_PREPARE; | 628 | ch->ccw[0].cmd_code = CCW_CMD_PREPARE; |
628 | ch->ccw[0].flags = CCW_FLAG_SLI | CCW_FLAG_CC; | 629 | ch->ccw[0].flags = CCW_FLAG_SLI | CCW_FLAG_CC; |
@@ -720,7 +721,7 @@ static void ctcm_chx_cleanup(fsm_instance *fi, int state, | |||
720 | 721 | ||
721 | ch->th_seg = 0x00; | 722 | ch->th_seg = 0x00; |
722 | ch->th_seq_num = 0x00; | 723 | ch->th_seq_num = 0x00; |
723 | if (CHANNEL_DIRECTION(ch->flags) == READ) { | 724 | if (CHANNEL_DIRECTION(ch->flags) == CTCM_READ) { |
724 | skb_queue_purge(&ch->io_queue); | 725 | skb_queue_purge(&ch->io_queue); |
725 | fsm_event(priv->fsm, DEV_EVENT_RXDOWN, dev); | 726 | fsm_event(priv->fsm, DEV_EVENT_RXDOWN, dev); |
726 | } else { | 727 | } else { |
@@ -799,7 +800,8 @@ static void ctcm_chx_setuperr(fsm_instance *fi, int event, void *arg) | |||
799 | fsm_newstate(fi, CTC_STATE_STARTRETRY); | 800 | fsm_newstate(fi, CTC_STATE_STARTRETRY); |
800 | fsm_deltimer(&ch->timer); | 801 | fsm_deltimer(&ch->timer); |
801 | fsm_addtimer(&ch->timer, CTCM_TIME_5_SEC, CTC_EVENT_TIMER, ch); | 802 | fsm_addtimer(&ch->timer, CTCM_TIME_5_SEC, CTC_EVENT_TIMER, ch); |
802 | if (!IS_MPC(ch) && (CHANNEL_DIRECTION(ch->flags) == READ)) { | 803 | if (!IS_MPC(ch) && |
804 | (CHANNEL_DIRECTION(ch->flags) == CTCM_READ)) { | ||
803 | int rc = ccw_device_halt(ch->cdev, (unsigned long)ch); | 805 | int rc = ccw_device_halt(ch->cdev, (unsigned long)ch); |
804 | if (rc != 0) | 806 | if (rc != 0) |
805 | ctcm_ccw_check_rc(ch, rc, | 807 | ctcm_ccw_check_rc(ch, rc, |
@@ -811,10 +813,10 @@ static void ctcm_chx_setuperr(fsm_instance *fi, int event, void *arg) | |||
811 | CTCM_DBF_TEXT_(ERROR, CTC_DBF_CRIT, | 813 | CTCM_DBF_TEXT_(ERROR, CTC_DBF_CRIT, |
812 | "%s(%s) : %s error during %s channel setup state=%s\n", | 814 | "%s(%s) : %s error during %s channel setup state=%s\n", |
813 | CTCM_FUNTAIL, dev->name, ctc_ch_event_names[event], | 815 | CTCM_FUNTAIL, dev->name, ctc_ch_event_names[event], |
814 | (CHANNEL_DIRECTION(ch->flags) == READ) ? "RX" : "TX", | 816 | (CHANNEL_DIRECTION(ch->flags) == CTCM_READ) ? "RX" : "TX", |
815 | fsm_getstate_str(fi)); | 817 | fsm_getstate_str(fi)); |
816 | 818 | ||
817 | if (CHANNEL_DIRECTION(ch->flags) == READ) { | 819 | if (CHANNEL_DIRECTION(ch->flags) == CTCM_READ) { |
818 | fsm_newstate(fi, CTC_STATE_RXERR); | 820 | fsm_newstate(fi, CTC_STATE_RXERR); |
819 | fsm_event(priv->fsm, DEV_EVENT_RXDOWN, dev); | 821 | fsm_event(priv->fsm, DEV_EVENT_RXDOWN, dev); |
820 | } else { | 822 | } else { |
@@ -945,7 +947,7 @@ static void ctcm_chx_rxdisc(fsm_instance *fi, int event, void *arg) | |||
945 | fsm_event(priv->fsm, DEV_EVENT_TXDOWN, dev); | 947 | fsm_event(priv->fsm, DEV_EVENT_TXDOWN, dev); |
946 | 948 | ||
947 | fsm_newstate(fi, CTC_STATE_DTERM); | 949 | fsm_newstate(fi, CTC_STATE_DTERM); |
948 | ch2 = priv->channel[WRITE]; | 950 | ch2 = priv->channel[CTCM_WRITE]; |
949 | fsm_newstate(ch2->fsm, CTC_STATE_DTERM); | 951 | fsm_newstate(ch2->fsm, CTC_STATE_DTERM); |
950 | 952 | ||
951 | ccw_device_halt(ch->cdev, (unsigned long)ch); | 953 | ccw_device_halt(ch->cdev, (unsigned long)ch); |
@@ -1074,13 +1076,13 @@ static void ctcm_chx_iofatal(fsm_instance *fi, int event, void *arg) | |||
1074 | fsm_deltimer(&ch->timer); | 1076 | fsm_deltimer(&ch->timer); |
1075 | CTCM_DBF_TEXT_(ERROR, CTC_DBF_ERROR, | 1077 | CTCM_DBF_TEXT_(ERROR, CTC_DBF_ERROR, |
1076 | "%s: %s: %s unrecoverable channel error", | 1078 | "%s: %s: %s unrecoverable channel error", |
1077 | CTCM_FUNTAIL, ch->id, rd == READ ? "RX" : "TX"); | 1079 | CTCM_FUNTAIL, ch->id, rd == CTCM_READ ? "RX" : "TX"); |
1078 | 1080 | ||
1079 | if (IS_MPC(ch)) { | 1081 | if (IS_MPC(ch)) { |
1080 | priv->stats.tx_dropped++; | 1082 | priv->stats.tx_dropped++; |
1081 | priv->stats.tx_errors++; | 1083 | priv->stats.tx_errors++; |
1082 | } | 1084 | } |
1083 | if (rd == READ) { | 1085 | if (rd == CTCM_READ) { |
1084 | fsm_newstate(fi, CTC_STATE_RXERR); | 1086 | fsm_newstate(fi, CTC_STATE_RXERR); |
1085 | fsm_event(priv->fsm, DEV_EVENT_RXDOWN, dev); | 1087 | fsm_event(priv->fsm, DEV_EVENT_RXDOWN, dev); |
1086 | } else { | 1088 | } else { |
@@ -1503,7 +1505,7 @@ static void ctcmpc_chx_firstio(fsm_instance *fi, int event, void *arg) | |||
1503 | switch (fsm_getstate(fi)) { | 1505 | switch (fsm_getstate(fi)) { |
1504 | case CTC_STATE_STARTRETRY: | 1506 | case CTC_STATE_STARTRETRY: |
1505 | case CTC_STATE_SETUPWAIT: | 1507 | case CTC_STATE_SETUPWAIT: |
1506 | if (CHANNEL_DIRECTION(ch->flags) == READ) { | 1508 | if (CHANNEL_DIRECTION(ch->flags) == CTCM_READ) { |
1507 | ctcmpc_chx_rxidle(fi, event, arg); | 1509 | ctcmpc_chx_rxidle(fi, event, arg); |
1508 | } else { | 1510 | } else { |
1509 | fsm_newstate(fi, CTC_STATE_TXIDLE); | 1511 | fsm_newstate(fi, CTC_STATE_TXIDLE); |
@@ -1514,7 +1516,7 @@ static void ctcmpc_chx_firstio(fsm_instance *fi, int event, void *arg) | |||
1514 | break; | 1516 | break; |
1515 | }; | 1517 | }; |
1516 | 1518 | ||
1517 | fsm_newstate(fi, (CHANNEL_DIRECTION(ch->flags) == READ) | 1519 | fsm_newstate(fi, (CHANNEL_DIRECTION(ch->flags) == CTCM_READ) |
1518 | ? CTC_STATE_RXINIT : CTC_STATE_TXINIT); | 1520 | ? CTC_STATE_RXINIT : CTC_STATE_TXINIT); |
1519 | 1521 | ||
1520 | done: | 1522 | done: |
@@ -1753,8 +1755,8 @@ static void ctcmpc_chx_send_sweep(fsm_instance *fsm, int event, void *arg) | |||
1753 | struct net_device *dev = ach->netdev; | 1755 | struct net_device *dev = ach->netdev; |
1754 | struct ctcm_priv *priv = dev->ml_priv; | 1756 | struct ctcm_priv *priv = dev->ml_priv; |
1755 | struct mpc_group *grp = priv->mpcg; | 1757 | struct mpc_group *grp = priv->mpcg; |
1756 | struct channel *wch = priv->channel[WRITE]; | 1758 | struct channel *wch = priv->channel[CTCM_WRITE]; |
1757 | struct channel *rch = priv->channel[READ]; | 1759 | struct channel *rch = priv->channel[CTCM_READ]; |
1758 | struct sk_buff *skb; | 1760 | struct sk_buff *skb; |
1759 | struct th_sweep *header; | 1761 | struct th_sweep *header; |
1760 | int rc = 0; | 1762 | int rc = 0; |
@@ -2070,7 +2072,7 @@ static void dev_action_start(fsm_instance *fi, int event, void *arg) | |||
2070 | fsm_newstate(fi, DEV_STATE_STARTWAIT_RXTX); | 2072 | fsm_newstate(fi, DEV_STATE_STARTWAIT_RXTX); |
2071 | if (IS_MPC(priv)) | 2073 | if (IS_MPC(priv)) |
2072 | priv->mpcg->channels_terminating = 0; | 2074 | priv->mpcg->channels_terminating = 0; |
2073 | for (direction = READ; direction <= WRITE; direction++) { | 2075 | for (direction = CTCM_READ; direction <= CTCM_WRITE; direction++) { |
2074 | struct channel *ch = priv->channel[direction]; | 2076 | struct channel *ch = priv->channel[direction]; |
2075 | fsm_event(ch->fsm, CTC_EVENT_START, ch); | 2077 | fsm_event(ch->fsm, CTC_EVENT_START, ch); |
2076 | } | 2078 | } |
@@ -2092,7 +2094,7 @@ static void dev_action_stop(fsm_instance *fi, int event, void *arg) | |||
2092 | CTCMY_DBF_DEV_NAME(SETUP, dev, ""); | 2094 | CTCMY_DBF_DEV_NAME(SETUP, dev, ""); |
2093 | 2095 | ||
2094 | fsm_newstate(fi, DEV_STATE_STOPWAIT_RXTX); | 2096 | fsm_newstate(fi, DEV_STATE_STOPWAIT_RXTX); |
2095 | for (direction = READ; direction <= WRITE; direction++) { | 2097 | for (direction = CTCM_READ; direction <= CTCM_WRITE; direction++) { |
2096 | struct channel *ch = priv->channel[direction]; | 2098 | struct channel *ch = priv->channel[direction]; |
2097 | fsm_event(ch->fsm, CTC_EVENT_STOP, ch); | 2099 | fsm_event(ch->fsm, CTC_EVENT_STOP, ch); |
2098 | ch->th_seq_num = 0x00; | 2100 | ch->th_seq_num = 0x00; |
@@ -2183,11 +2185,11 @@ static void dev_action_chup(fsm_instance *fi, int event, void *arg) | |||
2183 | 2185 | ||
2184 | if (IS_MPC(priv)) { | 2186 | if (IS_MPC(priv)) { |
2185 | if (event == DEV_EVENT_RXUP) | 2187 | if (event == DEV_EVENT_RXUP) |
2186 | mpc_channel_action(priv->channel[READ], | 2188 | mpc_channel_action(priv->channel[CTCM_READ], |
2187 | READ, MPC_CHANNEL_ADD); | 2189 | CTCM_READ, MPC_CHANNEL_ADD); |
2188 | else | 2190 | else |
2189 | mpc_channel_action(priv->channel[WRITE], | 2191 | mpc_channel_action(priv->channel[CTCM_WRITE], |
2190 | WRITE, MPC_CHANNEL_ADD); | 2192 | CTCM_WRITE, MPC_CHANNEL_ADD); |
2191 | } | 2193 | } |
2192 | } | 2194 | } |
2193 | 2195 | ||
@@ -2239,11 +2241,11 @@ static void dev_action_chdown(fsm_instance *fi, int event, void *arg) | |||
2239 | } | 2241 | } |
2240 | if (IS_MPC(priv)) { | 2242 | if (IS_MPC(priv)) { |
2241 | if (event == DEV_EVENT_RXDOWN) | 2243 | if (event == DEV_EVENT_RXDOWN) |
2242 | mpc_channel_action(priv->channel[READ], | 2244 | mpc_channel_action(priv->channel[CTCM_READ], |
2243 | READ, MPC_CHANNEL_REMOVE); | 2245 | CTCM_READ, MPC_CHANNEL_REMOVE); |
2244 | else | 2246 | else |
2245 | mpc_channel_action(priv->channel[WRITE], | 2247 | mpc_channel_action(priv->channel[CTCM_WRITE], |
2246 | WRITE, MPC_CHANNEL_REMOVE); | 2248 | CTCM_WRITE, MPC_CHANNEL_REMOVE); |
2247 | } | 2249 | } |
2248 | } | 2250 | } |
2249 | 2251 | ||
diff --git a/drivers/s390/net/ctcm_main.c b/drivers/s390/net/ctcm_main.c index 4ecafbf91211..2c7d2d9be4d0 100644 --- a/drivers/s390/net/ctcm_main.c +++ b/drivers/s390/net/ctcm_main.c | |||
@@ -267,7 +267,7 @@ static struct channel *channel_get(enum ctcm_channel_types type, | |||
267 | else { | 267 | else { |
268 | ch->flags |= CHANNEL_FLAGS_INUSE; | 268 | ch->flags |= CHANNEL_FLAGS_INUSE; |
269 | ch->flags &= ~CHANNEL_FLAGS_RWMASK; | 269 | ch->flags &= ~CHANNEL_FLAGS_RWMASK; |
270 | ch->flags |= (direction == WRITE) | 270 | ch->flags |= (direction == CTCM_WRITE) |
271 | ? CHANNEL_FLAGS_WRITE : CHANNEL_FLAGS_READ; | 271 | ? CHANNEL_FLAGS_WRITE : CHANNEL_FLAGS_READ; |
272 | fsm_newstate(ch->fsm, CTC_STATE_STOPPED); | 272 | fsm_newstate(ch->fsm, CTC_STATE_STOPPED); |
273 | } | 273 | } |
@@ -388,7 +388,8 @@ int ctcm_ch_alloc_buffer(struct channel *ch) | |||
388 | CTCM_DBF_TEXT_(ERROR, CTC_DBF_ERROR, | 388 | CTCM_DBF_TEXT_(ERROR, CTC_DBF_ERROR, |
389 | "%s(%s): %s trans_skb allocation error", | 389 | "%s(%s): %s trans_skb allocation error", |
390 | CTCM_FUNTAIL, ch->id, | 390 | CTCM_FUNTAIL, ch->id, |
391 | (CHANNEL_DIRECTION(ch->flags) == READ) ? "RX" : "TX"); | 391 | (CHANNEL_DIRECTION(ch->flags) == CTCM_READ) ? |
392 | "RX" : "TX"); | ||
392 | return -ENOMEM; | 393 | return -ENOMEM; |
393 | } | 394 | } |
394 | 395 | ||
@@ -399,7 +400,8 @@ int ctcm_ch_alloc_buffer(struct channel *ch) | |||
399 | CTCM_DBF_TEXT_(ERROR, CTC_DBF_ERROR, | 400 | CTCM_DBF_TEXT_(ERROR, CTC_DBF_ERROR, |
400 | "%s(%s): %s set norm_cda failed", | 401 | "%s(%s): %s set norm_cda failed", |
401 | CTCM_FUNTAIL, ch->id, | 402 | CTCM_FUNTAIL, ch->id, |
402 | (CHANNEL_DIRECTION(ch->flags) == READ) ? "RX" : "TX"); | 403 | (CHANNEL_DIRECTION(ch->flags) == CTCM_READ) ? |
404 | "RX" : "TX"); | ||
403 | return -ENOMEM; | 405 | return -ENOMEM; |
404 | } | 406 | } |
405 | 407 | ||
@@ -603,14 +605,14 @@ static void ctcmpc_send_sweep_req(struct channel *rch) | |||
603 | 605 | ||
604 | priv = dev->ml_priv; | 606 | priv = dev->ml_priv; |
605 | grp = priv->mpcg; | 607 | grp = priv->mpcg; |
606 | ch = priv->channel[WRITE]; | 608 | ch = priv->channel[CTCM_WRITE]; |
607 | 609 | ||
608 | /* sweep processing is not complete until response and request */ | 610 | /* sweep processing is not complete until response and request */ |
609 | /* has completed for all read channels in group */ | 611 | /* has completed for all read channels in group */ |
610 | if (grp->in_sweep == 0) { | 612 | if (grp->in_sweep == 0) { |
611 | grp->in_sweep = 1; | 613 | grp->in_sweep = 1; |
612 | grp->sweep_rsp_pend_num = grp->active_channels[READ]; | 614 | grp->sweep_rsp_pend_num = grp->active_channels[CTCM_READ]; |
613 | grp->sweep_req_pend_num = grp->active_channels[READ]; | 615 | grp->sweep_req_pend_num = grp->active_channels[CTCM_READ]; |
614 | } | 616 | } |
615 | 617 | ||
616 | sweep_skb = __dev_alloc_skb(MPC_BUFSIZE_DEFAULT, GFP_ATOMIC|GFP_DMA); | 618 | sweep_skb = __dev_alloc_skb(MPC_BUFSIZE_DEFAULT, GFP_ATOMIC|GFP_DMA); |
@@ -911,7 +913,7 @@ static int ctcm_tx(struct sk_buff *skb, struct net_device *dev) | |||
911 | return NETDEV_TX_BUSY; | 913 | return NETDEV_TX_BUSY; |
912 | 914 | ||
913 | dev->trans_start = jiffies; | 915 | dev->trans_start = jiffies; |
914 | if (ctcm_transmit_skb(priv->channel[WRITE], skb) != 0) | 916 | if (ctcm_transmit_skb(priv->channel[CTCM_WRITE], skb) != 0) |
915 | return NETDEV_TX_BUSY; | 917 | return NETDEV_TX_BUSY; |
916 | return NETDEV_TX_OK; | 918 | return NETDEV_TX_OK; |
917 | } | 919 | } |
@@ -994,7 +996,7 @@ static int ctcmpc_tx(struct sk_buff *skb, struct net_device *dev) | |||
994 | } | 996 | } |
995 | 997 | ||
996 | dev->trans_start = jiffies; | 998 | dev->trans_start = jiffies; |
997 | if (ctcmpc_transmit_skb(priv->channel[WRITE], skb) != 0) { | 999 | if (ctcmpc_transmit_skb(priv->channel[CTCM_WRITE], skb) != 0) { |
998 | CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR, | 1000 | CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR, |
999 | "%s(%s): device error - dropped", | 1001 | "%s(%s): device error - dropped", |
1000 | CTCM_FUNTAIL, dev->name); | 1002 | CTCM_FUNTAIL, dev->name); |
@@ -1035,7 +1037,7 @@ static int ctcm_change_mtu(struct net_device *dev, int new_mtu) | |||
1035 | return -EINVAL; | 1037 | return -EINVAL; |
1036 | 1038 | ||
1037 | priv = dev->ml_priv; | 1039 | priv = dev->ml_priv; |
1038 | max_bufsize = priv->channel[READ]->max_bufsize; | 1040 | max_bufsize = priv->channel[CTCM_READ]->max_bufsize; |
1039 | 1041 | ||
1040 | if (IS_MPC(priv)) { | 1042 | if (IS_MPC(priv)) { |
1041 | if (new_mtu > max_bufsize - TH_HEADER_LENGTH) | 1043 | if (new_mtu > max_bufsize - TH_HEADER_LENGTH) |
@@ -1152,7 +1154,7 @@ static struct net_device *ctcm_init_netdevice(struct ctcm_priv *priv) | |||
1152 | dev_fsm, dev_fsm_len, GFP_KERNEL); | 1154 | dev_fsm, dev_fsm_len, GFP_KERNEL); |
1153 | if (priv->fsm == NULL) { | 1155 | if (priv->fsm == NULL) { |
1154 | CTCMY_DBF_DEV(SETUP, dev, "init_fsm error"); | 1156 | CTCMY_DBF_DEV(SETUP, dev, "init_fsm error"); |
1155 | kfree(dev); | 1157 | free_netdev(dev); |
1156 | return NULL; | 1158 | return NULL; |
1157 | } | 1159 | } |
1158 | fsm_newstate(priv->fsm, DEV_STATE_STOPPED); | 1160 | fsm_newstate(priv->fsm, DEV_STATE_STOPPED); |
@@ -1163,7 +1165,7 @@ static struct net_device *ctcm_init_netdevice(struct ctcm_priv *priv) | |||
1163 | grp = ctcmpc_init_mpc_group(priv); | 1165 | grp = ctcmpc_init_mpc_group(priv); |
1164 | if (grp == NULL) { | 1166 | if (grp == NULL) { |
1165 | MPC_DBF_DEV(SETUP, dev, "init_mpc_group error"); | 1167 | MPC_DBF_DEV(SETUP, dev, "init_mpc_group error"); |
1166 | kfree(dev); | 1168 | free_netdev(dev); |
1167 | return NULL; | 1169 | return NULL; |
1168 | } | 1170 | } |
1169 | tasklet_init(&grp->mpc_tasklet2, | 1171 | tasklet_init(&grp->mpc_tasklet2, |
@@ -1226,10 +1228,10 @@ static void ctcm_irq_handler(struct ccw_device *cdev, | |||
1226 | priv = dev_get_drvdata(&cgdev->dev); | 1228 | priv = dev_get_drvdata(&cgdev->dev); |
1227 | 1229 | ||
1228 | /* Try to extract channel from driver data. */ | 1230 | /* Try to extract channel from driver data. */ |
1229 | if (priv->channel[READ]->cdev == cdev) | 1231 | if (priv->channel[CTCM_READ]->cdev == cdev) |
1230 | ch = priv->channel[READ]; | 1232 | ch = priv->channel[CTCM_READ]; |
1231 | else if (priv->channel[WRITE]->cdev == cdev) | 1233 | else if (priv->channel[CTCM_WRITE]->cdev == cdev) |
1232 | ch = priv->channel[WRITE]; | 1234 | ch = priv->channel[CTCM_WRITE]; |
1233 | else { | 1235 | else { |
1234 | dev_err(&cdev->dev, | 1236 | dev_err(&cdev->dev, |
1235 | "%s: Internal error: Can't determine channel for " | 1237 | "%s: Internal error: Can't determine channel for " |
@@ -1587,13 +1589,13 @@ static int ctcm_new_device(struct ccwgroup_device *cgdev) | |||
1587 | goto out_ccw2; | 1589 | goto out_ccw2; |
1588 | } | 1590 | } |
1589 | 1591 | ||
1590 | for (direction = READ; direction <= WRITE; direction++) { | 1592 | for (direction = CTCM_READ; direction <= CTCM_WRITE; direction++) { |
1591 | priv->channel[direction] = | 1593 | priv->channel[direction] = |
1592 | channel_get(type, direction == READ ? read_id : write_id, | 1594 | channel_get(type, direction == CTCM_READ ? |
1593 | direction); | 1595 | read_id : write_id, direction); |
1594 | if (priv->channel[direction] == NULL) { | 1596 | if (priv->channel[direction] == NULL) { |
1595 | if (direction == WRITE) | 1597 | if (direction == CTCM_WRITE) |
1596 | channel_free(priv->channel[READ]); | 1598 | channel_free(priv->channel[CTCM_READ]); |
1597 | goto out_dev; | 1599 | goto out_dev; |
1598 | } | 1600 | } |
1599 | priv->channel[direction]->netdev = dev; | 1601 | priv->channel[direction]->netdev = dev; |
@@ -1617,13 +1619,13 @@ static int ctcm_new_device(struct ccwgroup_device *cgdev) | |||
1617 | 1619 | ||
1618 | dev_info(&dev->dev, | 1620 | dev_info(&dev->dev, |
1619 | "setup OK : r/w = %s/%s, protocol : %d\n", | 1621 | "setup OK : r/w = %s/%s, protocol : %d\n", |
1620 | priv->channel[READ]->id, | 1622 | priv->channel[CTCM_READ]->id, |
1621 | priv->channel[WRITE]->id, priv->protocol); | 1623 | priv->channel[CTCM_WRITE]->id, priv->protocol); |
1622 | 1624 | ||
1623 | CTCM_DBF_TEXT_(SETUP, CTC_DBF_INFO, | 1625 | CTCM_DBF_TEXT_(SETUP, CTC_DBF_INFO, |
1624 | "setup(%s) OK : r/w = %s/%s, protocol : %d", dev->name, | 1626 | "setup(%s) OK : r/w = %s/%s, protocol : %d", dev->name, |
1625 | priv->channel[READ]->id, | 1627 | priv->channel[CTCM_READ]->id, |
1626 | priv->channel[WRITE]->id, priv->protocol); | 1628 | priv->channel[CTCM_WRITE]->id, priv->protocol); |
1627 | 1629 | ||
1628 | return 0; | 1630 | return 0; |
1629 | out_unregister: | 1631 | out_unregister: |
@@ -1635,10 +1637,10 @@ out_ccw2: | |||
1635 | out_ccw1: | 1637 | out_ccw1: |
1636 | ccw_device_set_offline(cgdev->cdev[0]); | 1638 | ccw_device_set_offline(cgdev->cdev[0]); |
1637 | out_remove_channel2: | 1639 | out_remove_channel2: |
1638 | readc = channel_get(type, read_id, READ); | 1640 | readc = channel_get(type, read_id, CTCM_READ); |
1639 | channel_remove(readc); | 1641 | channel_remove(readc); |
1640 | out_remove_channel1: | 1642 | out_remove_channel1: |
1641 | writec = channel_get(type, write_id, WRITE); | 1643 | writec = channel_get(type, write_id, CTCM_WRITE); |
1642 | channel_remove(writec); | 1644 | channel_remove(writec); |
1643 | out_err_result: | 1645 | out_err_result: |
1644 | return result; | 1646 | return result; |
@@ -1660,19 +1662,19 @@ static int ctcm_shutdown_device(struct ccwgroup_device *cgdev) | |||
1660 | if (!priv) | 1662 | if (!priv) |
1661 | return -ENODEV; | 1663 | return -ENODEV; |
1662 | 1664 | ||
1663 | if (priv->channel[READ]) { | 1665 | if (priv->channel[CTCM_READ]) { |
1664 | dev = priv->channel[READ]->netdev; | 1666 | dev = priv->channel[CTCM_READ]->netdev; |
1665 | CTCM_DBF_DEV(SETUP, dev, ""); | 1667 | CTCM_DBF_DEV(SETUP, dev, ""); |
1666 | /* Close the device */ | 1668 | /* Close the device */ |
1667 | ctcm_close(dev); | 1669 | ctcm_close(dev); |
1668 | dev->flags &= ~IFF_RUNNING; | 1670 | dev->flags &= ~IFF_RUNNING; |
1669 | ctcm_remove_attributes(&cgdev->dev); | 1671 | ctcm_remove_attributes(&cgdev->dev); |
1670 | channel_free(priv->channel[READ]); | 1672 | channel_free(priv->channel[CTCM_READ]); |
1671 | } else | 1673 | } else |
1672 | dev = NULL; | 1674 | dev = NULL; |
1673 | 1675 | ||
1674 | if (priv->channel[WRITE]) | 1676 | if (priv->channel[CTCM_WRITE]) |
1675 | channel_free(priv->channel[WRITE]); | 1677 | channel_free(priv->channel[CTCM_WRITE]); |
1676 | 1678 | ||
1677 | if (dev) { | 1679 | if (dev) { |
1678 | unregister_netdev(dev); | 1680 | unregister_netdev(dev); |
@@ -1685,11 +1687,11 @@ static int ctcm_shutdown_device(struct ccwgroup_device *cgdev) | |||
1685 | ccw_device_set_offline(cgdev->cdev[1]); | 1687 | ccw_device_set_offline(cgdev->cdev[1]); |
1686 | ccw_device_set_offline(cgdev->cdev[0]); | 1688 | ccw_device_set_offline(cgdev->cdev[0]); |
1687 | 1689 | ||
1688 | if (priv->channel[READ]) | 1690 | if (priv->channel[CTCM_READ]) |
1689 | channel_remove(priv->channel[READ]); | 1691 | channel_remove(priv->channel[CTCM_READ]); |
1690 | if (priv->channel[WRITE]) | 1692 | if (priv->channel[CTCM_WRITE]) |
1691 | channel_remove(priv->channel[WRITE]); | 1693 | channel_remove(priv->channel[CTCM_WRITE]); |
1692 | priv->channel[READ] = priv->channel[WRITE] = NULL; | 1694 | priv->channel[CTCM_READ] = priv->channel[CTCM_WRITE] = NULL; |
1693 | 1695 | ||
1694 | return 0; | 1696 | return 0; |
1695 | 1697 | ||
@@ -1720,11 +1722,11 @@ static int ctcm_pm_suspend(struct ccwgroup_device *gdev) | |||
1720 | 1722 | ||
1721 | if (gdev->state == CCWGROUP_OFFLINE) | 1723 | if (gdev->state == CCWGROUP_OFFLINE) |
1722 | return 0; | 1724 | return 0; |
1723 | netif_device_detach(priv->channel[READ]->netdev); | 1725 | netif_device_detach(priv->channel[CTCM_READ]->netdev); |
1724 | ctcm_close(priv->channel[READ]->netdev); | 1726 | ctcm_close(priv->channel[CTCM_READ]->netdev); |
1725 | if (!wait_event_timeout(priv->fsm->wait_q, | 1727 | if (!wait_event_timeout(priv->fsm->wait_q, |
1726 | fsm_getstate(priv->fsm) == DEV_STATE_STOPPED, CTCM_TIME_5_SEC)) { | 1728 | fsm_getstate(priv->fsm) == DEV_STATE_STOPPED, CTCM_TIME_5_SEC)) { |
1727 | netif_device_attach(priv->channel[READ]->netdev); | 1729 | netif_device_attach(priv->channel[CTCM_READ]->netdev); |
1728 | return -EBUSY; | 1730 | return -EBUSY; |
1729 | } | 1731 | } |
1730 | ccw_device_set_offline(gdev->cdev[1]); | 1732 | ccw_device_set_offline(gdev->cdev[1]); |
@@ -1745,9 +1747,9 @@ static int ctcm_pm_resume(struct ccwgroup_device *gdev) | |||
1745 | rc = ccw_device_set_online(gdev->cdev[0]); | 1747 | rc = ccw_device_set_online(gdev->cdev[0]); |
1746 | if (rc) | 1748 | if (rc) |
1747 | goto err_out; | 1749 | goto err_out; |
1748 | ctcm_open(priv->channel[READ]->netdev); | 1750 | ctcm_open(priv->channel[CTCM_READ]->netdev); |
1749 | err_out: | 1751 | err_out: |
1750 | netif_device_attach(priv->channel[READ]->netdev); | 1752 | netif_device_attach(priv->channel[CTCM_READ]->netdev); |
1751 | return rc; | 1753 | return rc; |
1752 | } | 1754 | } |
1753 | 1755 | ||
diff --git a/drivers/s390/net/ctcm_main.h b/drivers/s390/net/ctcm_main.h index d34fa14f44e7..24d5215eb0c4 100644 --- a/drivers/s390/net/ctcm_main.h +++ b/drivers/s390/net/ctcm_main.h | |||
@@ -111,8 +111,8 @@ enum ctcm_channel_types { | |||
111 | 111 | ||
112 | #define CTCM_INITIAL_BLOCKLEN 2 | 112 | #define CTCM_INITIAL_BLOCKLEN 2 |
113 | 113 | ||
114 | #define READ 0 | 114 | #define CTCM_READ 0 |
115 | #define WRITE 1 | 115 | #define CTCM_WRITE 1 |
116 | 116 | ||
117 | #define CTCM_ID_SIZE 20+3 | 117 | #define CTCM_ID_SIZE 20+3 |
118 | 118 | ||
diff --git a/drivers/s390/net/ctcm_mpc.c b/drivers/s390/net/ctcm_mpc.c index 87c24d2936d6..b64881f33f23 100644 --- a/drivers/s390/net/ctcm_mpc.c +++ b/drivers/s390/net/ctcm_mpc.c | |||
@@ -419,8 +419,8 @@ void ctc_mpc_establish_connectivity(int port_num, | |||
419 | return; | 419 | return; |
420 | priv = dev->ml_priv; | 420 | priv = dev->ml_priv; |
421 | grp = priv->mpcg; | 421 | grp = priv->mpcg; |
422 | rch = priv->channel[READ]; | 422 | rch = priv->channel[CTCM_READ]; |
423 | wch = priv->channel[WRITE]; | 423 | wch = priv->channel[CTCM_WRITE]; |
424 | 424 | ||
425 | CTCM_DBF_TEXT_(MPC_SETUP, CTC_DBF_INFO, | 425 | CTCM_DBF_TEXT_(MPC_SETUP, CTC_DBF_INFO, |
426 | "%s(%s): state=%s", | 426 | "%s(%s): state=%s", |
@@ -540,7 +540,7 @@ void ctc_mpc_dealloc_ch(int port_num) | |||
540 | 540 | ||
541 | CTCM_DBF_TEXT_(MPC_SETUP, CTC_DBF_DEBUG, | 541 | CTCM_DBF_TEXT_(MPC_SETUP, CTC_DBF_DEBUG, |
542 | "%s: %s: refcount = %d\n", | 542 | "%s: %s: refcount = %d\n", |
543 | CTCM_FUNTAIL, dev->name, atomic_read(&dev->refcnt)); | 543 | CTCM_FUNTAIL, dev->name, netdev_refcnt_read(dev)); |
544 | 544 | ||
545 | fsm_deltimer(&priv->restart_timer); | 545 | fsm_deltimer(&priv->restart_timer); |
546 | grp->channels_terminating = 0; | 546 | grp->channels_terminating = 0; |
@@ -578,7 +578,7 @@ void ctc_mpc_flow_control(int port_num, int flowc) | |||
578 | "%s: %s: flowc = %d", | 578 | "%s: %s: flowc = %d", |
579 | CTCM_FUNTAIL, dev->name, flowc); | 579 | CTCM_FUNTAIL, dev->name, flowc); |
580 | 580 | ||
581 | rch = priv->channel[READ]; | 581 | rch = priv->channel[CTCM_READ]; |
582 | 582 | ||
583 | mpcg_state = fsm_getstate(grp->fsm); | 583 | mpcg_state = fsm_getstate(grp->fsm); |
584 | switch (flowc) { | 584 | switch (flowc) { |
@@ -622,7 +622,7 @@ static void mpc_rcvd_sweep_resp(struct mpcg_info *mpcginfo) | |||
622 | struct net_device *dev = rch->netdev; | 622 | struct net_device *dev = rch->netdev; |
623 | struct ctcm_priv *priv = dev->ml_priv; | 623 | struct ctcm_priv *priv = dev->ml_priv; |
624 | struct mpc_group *grp = priv->mpcg; | 624 | struct mpc_group *grp = priv->mpcg; |
625 | struct channel *ch = priv->channel[WRITE]; | 625 | struct channel *ch = priv->channel[CTCM_WRITE]; |
626 | 626 | ||
627 | CTCM_PR_DEBUG("%s: ch=0x%p id=%s\n", __func__, ch, ch->id); | 627 | CTCM_PR_DEBUG("%s: ch=0x%p id=%s\n", __func__, ch, ch->id); |
628 | CTCM_D3_DUMP((char *)mpcginfo->sweep, TH_SWEEP_LENGTH); | 628 | CTCM_D3_DUMP((char *)mpcginfo->sweep, TH_SWEEP_LENGTH); |
@@ -656,7 +656,7 @@ static void ctcmpc_send_sweep_resp(struct channel *rch) | |||
656 | int rc = 0; | 656 | int rc = 0; |
657 | struct th_sweep *header; | 657 | struct th_sweep *header; |
658 | struct sk_buff *sweep_skb; | 658 | struct sk_buff *sweep_skb; |
659 | struct channel *ch = priv->channel[WRITE]; | 659 | struct channel *ch = priv->channel[CTCM_WRITE]; |
660 | 660 | ||
661 | CTCM_PR_DEBUG("%s: ch=0x%p id=%s\n", __func__, rch, rch->id); | 661 | CTCM_PR_DEBUG("%s: ch=0x%p id=%s\n", __func__, rch, rch->id); |
662 | 662 | ||
@@ -712,7 +712,7 @@ static void mpc_rcvd_sweep_req(struct mpcg_info *mpcginfo) | |||
712 | struct net_device *dev = rch->netdev; | 712 | struct net_device *dev = rch->netdev; |
713 | struct ctcm_priv *priv = dev->ml_priv; | 713 | struct ctcm_priv *priv = dev->ml_priv; |
714 | struct mpc_group *grp = priv->mpcg; | 714 | struct mpc_group *grp = priv->mpcg; |
715 | struct channel *ch = priv->channel[WRITE]; | 715 | struct channel *ch = priv->channel[CTCM_WRITE]; |
716 | 716 | ||
717 | if (do_debug) | 717 | if (do_debug) |
718 | CTCM_DBF_TEXT_(MPC_TRACE, CTC_DBF_DEBUG, | 718 | CTCM_DBF_TEXT_(MPC_TRACE, CTC_DBF_DEBUG, |
@@ -721,8 +721,8 @@ static void mpc_rcvd_sweep_req(struct mpcg_info *mpcginfo) | |||
721 | if (grp->in_sweep == 0) { | 721 | if (grp->in_sweep == 0) { |
722 | grp->in_sweep = 1; | 722 | grp->in_sweep = 1; |
723 | ctcm_test_and_set_busy(dev); | 723 | ctcm_test_and_set_busy(dev); |
724 | grp->sweep_req_pend_num = grp->active_channels[READ]; | 724 | grp->sweep_req_pend_num = grp->active_channels[CTCM_READ]; |
725 | grp->sweep_rsp_pend_num = grp->active_channels[READ]; | 725 | grp->sweep_rsp_pend_num = grp->active_channels[CTCM_READ]; |
726 | } | 726 | } |
727 | 727 | ||
728 | CTCM_D3_DUMP((char *)mpcginfo->sweep, TH_SWEEP_LENGTH); | 728 | CTCM_D3_DUMP((char *)mpcginfo->sweep, TH_SWEEP_LENGTH); |
@@ -906,14 +906,14 @@ void mpc_group_ready(unsigned long adev) | |||
906 | fsm_newstate(grp->fsm, MPCG_STATE_READY); | 906 | fsm_newstate(grp->fsm, MPCG_STATE_READY); |
907 | 907 | ||
908 | /* Put up a read on the channel */ | 908 | /* Put up a read on the channel */ |
909 | ch = priv->channel[READ]; | 909 | ch = priv->channel[CTCM_READ]; |
910 | ch->pdu_seq = 0; | 910 | ch->pdu_seq = 0; |
911 | CTCM_PR_DBGDATA("ctcmpc: %s() ToDCM_pdu_seq= %08x\n" , | 911 | CTCM_PR_DBGDATA("ctcmpc: %s() ToDCM_pdu_seq= %08x\n" , |
912 | __func__, ch->pdu_seq); | 912 | __func__, ch->pdu_seq); |
913 | 913 | ||
914 | ctcmpc_chx_rxidle(ch->fsm, CTC_EVENT_START, ch); | 914 | ctcmpc_chx_rxidle(ch->fsm, CTC_EVENT_START, ch); |
915 | /* Put the write channel in idle state */ | 915 | /* Put the write channel in idle state */ |
916 | ch = priv->channel[WRITE]; | 916 | ch = priv->channel[CTCM_WRITE]; |
917 | if (ch->collect_len > 0) { | 917 | if (ch->collect_len > 0) { |
918 | spin_lock(&ch->collect_lock); | 918 | spin_lock(&ch->collect_lock); |
919 | ctcm_purge_skb_queue(&ch->collect_queue); | 919 | ctcm_purge_skb_queue(&ch->collect_queue); |
@@ -960,7 +960,8 @@ void mpc_channel_action(struct channel *ch, int direction, int action) | |||
960 | "%s: %i / Grp:%s total_channels=%i, active_channels: " | 960 | "%s: %i / Grp:%s total_channels=%i, active_channels: " |
961 | "read=%i, write=%i\n", __func__, action, | 961 | "read=%i, write=%i\n", __func__, action, |
962 | fsm_getstate_str(grp->fsm), grp->num_channel_paths, | 962 | fsm_getstate_str(grp->fsm), grp->num_channel_paths, |
963 | grp->active_channels[READ], grp->active_channels[WRITE]); | 963 | grp->active_channels[CTCM_READ], |
964 | grp->active_channels[CTCM_WRITE]); | ||
964 | 965 | ||
965 | if ((action == MPC_CHANNEL_ADD) && (ch->in_mpcgroup == 0)) { | 966 | if ((action == MPC_CHANNEL_ADD) && (ch->in_mpcgroup == 0)) { |
966 | grp->num_channel_paths++; | 967 | grp->num_channel_paths++; |
@@ -994,10 +995,11 @@ void mpc_channel_action(struct channel *ch, int direction, int action) | |||
994 | grp->xid_skb->data, | 995 | grp->xid_skb->data, |
995 | grp->xid_skb->len); | 996 | grp->xid_skb->len); |
996 | 997 | ||
997 | ch->xid->xid2_dlc_type = ((CHANNEL_DIRECTION(ch->flags) == READ) | 998 | ch->xid->xid2_dlc_type = |
999 | ((CHANNEL_DIRECTION(ch->flags) == CTCM_READ) | ||
998 | ? XID2_READ_SIDE : XID2_WRITE_SIDE); | 1000 | ? XID2_READ_SIDE : XID2_WRITE_SIDE); |
999 | 1001 | ||
1000 | if (CHANNEL_DIRECTION(ch->flags) == WRITE) | 1002 | if (CHANNEL_DIRECTION(ch->flags) == CTCM_WRITE) |
1001 | ch->xid->xid2_buf_len = 0x00; | 1003 | ch->xid->xid2_buf_len = 0x00; |
1002 | 1004 | ||
1003 | ch->xid_skb->data = ch->xid_skb_data; | 1005 | ch->xid_skb->data = ch->xid_skb_data; |
@@ -1006,8 +1008,8 @@ void mpc_channel_action(struct channel *ch, int direction, int action) | |||
1006 | 1008 | ||
1007 | fsm_newstate(ch->fsm, CH_XID0_PENDING); | 1009 | fsm_newstate(ch->fsm, CH_XID0_PENDING); |
1008 | 1010 | ||
1009 | if ((grp->active_channels[READ] > 0) && | 1011 | if ((grp->active_channels[CTCM_READ] > 0) && |
1010 | (grp->active_channels[WRITE] > 0) && | 1012 | (grp->active_channels[CTCM_WRITE] > 0) && |
1011 | (fsm_getstate(grp->fsm) < MPCG_STATE_XID2INITW)) { | 1013 | (fsm_getstate(grp->fsm) < MPCG_STATE_XID2INITW)) { |
1012 | fsm_newstate(grp->fsm, MPCG_STATE_XID2INITW); | 1014 | fsm_newstate(grp->fsm, MPCG_STATE_XID2INITW); |
1013 | CTCM_DBF_TEXT_(MPC_SETUP, CTC_DBF_NOTICE, | 1015 | CTCM_DBF_TEXT_(MPC_SETUP, CTC_DBF_NOTICE, |
@@ -1027,10 +1029,10 @@ void mpc_channel_action(struct channel *ch, int direction, int action) | |||
1027 | if (grp->channels_terminating) | 1029 | if (grp->channels_terminating) |
1028 | goto done; | 1030 | goto done; |
1029 | 1031 | ||
1030 | if (((grp->active_channels[READ] == 0) && | 1032 | if (((grp->active_channels[CTCM_READ] == 0) && |
1031 | (grp->active_channels[WRITE] > 0)) | 1033 | (grp->active_channels[CTCM_WRITE] > 0)) |
1032 | || ((grp->active_channels[WRITE] == 0) && | 1034 | || ((grp->active_channels[CTCM_WRITE] == 0) && |
1033 | (grp->active_channels[READ] > 0))) | 1035 | (grp->active_channels[CTCM_READ] > 0))) |
1034 | fsm_event(grp->fsm, MPCG_EVENT_INOP, dev); | 1036 | fsm_event(grp->fsm, MPCG_EVENT_INOP, dev); |
1035 | } | 1037 | } |
1036 | done: | 1038 | done: |
@@ -1038,7 +1040,8 @@ done: | |||
1038 | "exit %s: %i / Grp:%s total_channels=%i, active_channels: " | 1040 | "exit %s: %i / Grp:%s total_channels=%i, active_channels: " |
1039 | "read=%i, write=%i\n", __func__, action, | 1041 | "read=%i, write=%i\n", __func__, action, |
1040 | fsm_getstate_str(grp->fsm), grp->num_channel_paths, | 1042 | fsm_getstate_str(grp->fsm), grp->num_channel_paths, |
1041 | grp->active_channels[READ], grp->active_channels[WRITE]); | 1043 | grp->active_channels[CTCM_READ], |
1044 | grp->active_channels[CTCM_WRITE]); | ||
1042 | 1045 | ||
1043 | CTCM_PR_DEBUG("exit %s: ch=0x%p id=%s\n", __func__, ch, ch->id); | 1046 | CTCM_PR_DEBUG("exit %s: ch=0x%p id=%s\n", __func__, ch, ch->id); |
1044 | } | 1047 | } |
@@ -1392,8 +1395,8 @@ static void mpc_action_go_inop(fsm_instance *fi, int event, void *arg) | |||
1392 | (grp->port_persist == 0)) | 1395 | (grp->port_persist == 0)) |
1393 | fsm_deltimer(&priv->restart_timer); | 1396 | fsm_deltimer(&priv->restart_timer); |
1394 | 1397 | ||
1395 | wch = priv->channel[WRITE]; | 1398 | wch = priv->channel[CTCM_WRITE]; |
1396 | rch = priv->channel[READ]; | 1399 | rch = priv->channel[CTCM_READ]; |
1397 | 1400 | ||
1398 | switch (grp->saved_state) { | 1401 | switch (grp->saved_state) { |
1399 | case MPCG_STATE_RESET: | 1402 | case MPCG_STATE_RESET: |
@@ -1480,8 +1483,8 @@ static void mpc_action_timeout(fsm_instance *fi, int event, void *arg) | |||
1480 | 1483 | ||
1481 | priv = dev->ml_priv; | 1484 | priv = dev->ml_priv; |
1482 | grp = priv->mpcg; | 1485 | grp = priv->mpcg; |
1483 | wch = priv->channel[WRITE]; | 1486 | wch = priv->channel[CTCM_WRITE]; |
1484 | rch = priv->channel[READ]; | 1487 | rch = priv->channel[CTCM_READ]; |
1485 | 1488 | ||
1486 | switch (fsm_getstate(grp->fsm)) { | 1489 | switch (fsm_getstate(grp->fsm)) { |
1487 | case MPCG_STATE_XID2INITW: | 1490 | case MPCG_STATE_XID2INITW: |
@@ -1586,7 +1589,7 @@ static int mpc_validate_xid(struct mpcg_info *mpcginfo) | |||
1586 | CTCM_D3_DUMP((char *)xid, XID2_LENGTH); | 1589 | CTCM_D3_DUMP((char *)xid, XID2_LENGTH); |
1587 | 1590 | ||
1588 | /*the received direction should be the opposite of ours */ | 1591 | /*the received direction should be the opposite of ours */ |
1589 | if (((CHANNEL_DIRECTION(ch->flags) == READ) ? XID2_WRITE_SIDE : | 1592 | if (((CHANNEL_DIRECTION(ch->flags) == CTCM_READ) ? XID2_WRITE_SIDE : |
1590 | XID2_READ_SIDE) != xid->xid2_dlc_type) { | 1593 | XID2_READ_SIDE) != xid->xid2_dlc_type) { |
1591 | rc = 2; | 1594 | rc = 2; |
1592 | /* XID REJECTED: r/w channel pairing mismatch */ | 1595 | /* XID REJECTED: r/w channel pairing mismatch */ |
@@ -1912,7 +1915,7 @@ static void mpc_action_doxid7(fsm_instance *fsm, int event, void *arg) | |||
1912 | if (grp == NULL) | 1915 | if (grp == NULL) |
1913 | return; | 1916 | return; |
1914 | 1917 | ||
1915 | for (direction = READ; direction <= WRITE; direction++) { | 1918 | for (direction = CTCM_READ; direction <= CTCM_WRITE; direction++) { |
1916 | struct channel *ch = priv->channel[direction]; | 1919 | struct channel *ch = priv->channel[direction]; |
1917 | struct xid2 *thisxid = ch->xid; | 1920 | struct xid2 *thisxid = ch->xid; |
1918 | ch->xid_skb->data = ch->xid_skb_data; | 1921 | ch->xid_skb->data = ch->xid_skb_data; |
@@ -2152,14 +2155,15 @@ static int mpc_send_qllc_discontact(struct net_device *dev) | |||
2152 | return -ENOMEM; | 2155 | return -ENOMEM; |
2153 | } | 2156 | } |
2154 | 2157 | ||
2155 | *((__u32 *)skb_push(skb, 4)) = priv->channel[READ]->pdu_seq; | 2158 | *((__u32 *)skb_push(skb, 4)) = |
2156 | priv->channel[READ]->pdu_seq++; | 2159 | priv->channel[CTCM_READ]->pdu_seq; |
2160 | priv->channel[CTCM_READ]->pdu_seq++; | ||
2157 | CTCM_PR_DBGDATA("ctcmpc: %s ToDCM_pdu_seq= %08x\n", | 2161 | CTCM_PR_DBGDATA("ctcmpc: %s ToDCM_pdu_seq= %08x\n", |
2158 | __func__, priv->channel[READ]->pdu_seq); | 2162 | __func__, priv->channel[CTCM_READ]->pdu_seq); |
2159 | 2163 | ||
2160 | /* receipt of CC03 resets anticipated sequence number on | 2164 | /* receipt of CC03 resets anticipated sequence number on |
2161 | receiving side */ | 2165 | receiving side */ |
2162 | priv->channel[READ]->pdu_seq = 0x00; | 2166 | priv->channel[CTCM_READ]->pdu_seq = 0x00; |
2163 | skb_reset_mac_header(skb); | 2167 | skb_reset_mac_header(skb); |
2164 | skb->dev = dev; | 2168 | skb->dev = dev; |
2165 | skb->protocol = htons(ETH_P_SNAP); | 2169 | skb->protocol = htons(ETH_P_SNAP); |
diff --git a/drivers/s390/net/ctcm_sysfs.c b/drivers/s390/net/ctcm_sysfs.c index 2b24550e865e..8305319b2a84 100644 --- a/drivers/s390/net/ctcm_sysfs.c +++ b/drivers/s390/net/ctcm_sysfs.c | |||
@@ -38,8 +38,8 @@ static ssize_t ctcm_buffer_write(struct device *dev, | |||
38 | int bs1; | 38 | int bs1; |
39 | struct ctcm_priv *priv = dev_get_drvdata(dev); | 39 | struct ctcm_priv *priv = dev_get_drvdata(dev); |
40 | 40 | ||
41 | if (!(priv && priv->channel[READ] && | 41 | ndev = priv->channel[CTCM_READ]->netdev; |
42 | (ndev = priv->channel[READ]->netdev))) { | 42 | if (!(priv && priv->channel[CTCM_READ] && ndev)) { |
43 | CTCM_DBF_TEXT(SETUP, CTC_DBF_ERROR, "bfnondev"); | 43 | CTCM_DBF_TEXT(SETUP, CTC_DBF_ERROR, "bfnondev"); |
44 | return -ENODEV; | 44 | return -ENODEV; |
45 | } | 45 | } |
@@ -55,12 +55,12 @@ static ssize_t ctcm_buffer_write(struct device *dev, | |||
55 | (bs1 < (ndev->mtu + LL_HEADER_LENGTH + 2))) | 55 | (bs1 < (ndev->mtu + LL_HEADER_LENGTH + 2))) |
56 | goto einval; | 56 | goto einval; |
57 | 57 | ||
58 | priv->channel[READ]->max_bufsize = bs1; | 58 | priv->channel[CTCM_READ]->max_bufsize = bs1; |
59 | priv->channel[WRITE]->max_bufsize = bs1; | 59 | priv->channel[CTCM_WRITE]->max_bufsize = bs1; |
60 | if (!(ndev->flags & IFF_RUNNING)) | 60 | if (!(ndev->flags & IFF_RUNNING)) |
61 | ndev->mtu = bs1 - LL_HEADER_LENGTH - 2; | 61 | ndev->mtu = bs1 - LL_HEADER_LENGTH - 2; |
62 | priv->channel[READ]->flags |= CHANNEL_FLAGS_BUFSIZE_CHANGED; | 62 | priv->channel[CTCM_READ]->flags |= CHANNEL_FLAGS_BUFSIZE_CHANGED; |
63 | priv->channel[WRITE]->flags |= CHANNEL_FLAGS_BUFSIZE_CHANGED; | 63 | priv->channel[CTCM_WRITE]->flags |= CHANNEL_FLAGS_BUFSIZE_CHANGED; |
64 | 64 | ||
65 | CTCM_DBF_DEV(SETUP, ndev, buf); | 65 | CTCM_DBF_DEV(SETUP, ndev, buf); |
66 | return count; | 66 | return count; |
@@ -85,9 +85,9 @@ static void ctcm_print_statistics(struct ctcm_priv *priv) | |||
85 | p += sprintf(p, " Device FSM state: %s\n", | 85 | p += sprintf(p, " Device FSM state: %s\n", |
86 | fsm_getstate_str(priv->fsm)); | 86 | fsm_getstate_str(priv->fsm)); |
87 | p += sprintf(p, " RX channel FSM state: %s\n", | 87 | p += sprintf(p, " RX channel FSM state: %s\n", |
88 | fsm_getstate_str(priv->channel[READ]->fsm)); | 88 | fsm_getstate_str(priv->channel[CTCM_READ]->fsm)); |
89 | p += sprintf(p, " TX channel FSM state: %s\n", | 89 | p += sprintf(p, " TX channel FSM state: %s\n", |
90 | fsm_getstate_str(priv->channel[WRITE]->fsm)); | 90 | fsm_getstate_str(priv->channel[CTCM_WRITE]->fsm)); |
91 | p += sprintf(p, " Max. TX buffer used: %ld\n", | 91 | p += sprintf(p, " Max. TX buffer used: %ld\n", |
92 | priv->channel[WRITE]->prof.maxmulti); | 92 | priv->channel[WRITE]->prof.maxmulti); |
93 | p += sprintf(p, " Max. chained SKBs: %ld\n", | 93 | p += sprintf(p, " Max. chained SKBs: %ld\n", |
@@ -102,7 +102,7 @@ static void ctcm_print_statistics(struct ctcm_priv *priv) | |||
102 | priv->channel[WRITE]->prof.tx_time); | 102 | priv->channel[WRITE]->prof.tx_time); |
103 | 103 | ||
104 | printk(KERN_INFO "Statistics for %s:\n%s", | 104 | printk(KERN_INFO "Statistics for %s:\n%s", |
105 | priv->channel[WRITE]->netdev->name, sbuf); | 105 | priv->channel[CTCM_WRITE]->netdev->name, sbuf); |
106 | kfree(sbuf); | 106 | kfree(sbuf); |
107 | return; | 107 | return; |
108 | } | 108 | } |
@@ -125,7 +125,7 @@ static ssize_t stats_write(struct device *dev, struct device_attribute *attr, | |||
125 | return -ENODEV; | 125 | return -ENODEV; |
126 | /* Reset statistics */ | 126 | /* Reset statistics */ |
127 | memset(&priv->channel[WRITE]->prof, 0, | 127 | memset(&priv->channel[WRITE]->prof, 0, |
128 | sizeof(priv->channel[WRITE]->prof)); | 128 | sizeof(priv->channel[CTCM_WRITE]->prof)); |
129 | return count; | 129 | return count; |
130 | } | 130 | } |
131 | 131 | ||
diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h index d1257768be90..6be43eb126b4 100644 --- a/drivers/s390/net/qeth_core.h +++ b/drivers/s390/net/qeth_core.h | |||
@@ -676,6 +676,7 @@ enum qeth_discipline_id { | |||
676 | }; | 676 | }; |
677 | 677 | ||
678 | struct qeth_discipline { | 678 | struct qeth_discipline { |
679 | void (*start_poll)(struct ccw_device *, int, unsigned long); | ||
679 | qdio_handler_t *input_handler; | 680 | qdio_handler_t *input_handler; |
680 | qdio_handler_t *output_handler; | 681 | qdio_handler_t *output_handler; |
681 | int (*recover)(void *ptr); | 682 | int (*recover)(void *ptr); |
@@ -702,6 +703,16 @@ struct qeth_skb_data { | |||
702 | #define QETH_SKB_MAGIC 0x71657468 | 703 | #define QETH_SKB_MAGIC 0x71657468 |
703 | #define QETH_SIGA_CC2_RETRIES 3 | 704 | #define QETH_SIGA_CC2_RETRIES 3 |
704 | 705 | ||
706 | struct qeth_rx { | ||
707 | int b_count; | ||
708 | int b_index; | ||
709 | struct qdio_buffer_element *b_element; | ||
710 | int e_offset; | ||
711 | int qdio_err; | ||
712 | }; | ||
713 | |||
714 | #define QETH_NAPI_WEIGHT 128 | ||
715 | |||
705 | struct qeth_card { | 716 | struct qeth_card { |
706 | struct list_head list; | 717 | struct list_head list; |
707 | enum qeth_card_states state; | 718 | enum qeth_card_states state; |
@@ -749,6 +760,8 @@ struct qeth_card { | |||
749 | debug_info_t *debug; | 760 | debug_info_t *debug; |
750 | struct mutex conf_mutex; | 761 | struct mutex conf_mutex; |
751 | struct mutex discipline_mutex; | 762 | struct mutex discipline_mutex; |
763 | struct napi_struct napi; | ||
764 | struct qeth_rx rx; | ||
752 | }; | 765 | }; |
753 | 766 | ||
754 | struct qeth_card_list_struct { | 767 | struct qeth_card_list_struct { |
@@ -831,6 +844,10 @@ struct sk_buff *qeth_core_get_next_skb(struct qeth_card *, | |||
831 | struct qdio_buffer *, struct qdio_buffer_element **, int *, | 844 | struct qdio_buffer *, struct qdio_buffer_element **, int *, |
832 | struct qeth_hdr **); | 845 | struct qeth_hdr **); |
833 | void qeth_schedule_recovery(struct qeth_card *); | 846 | void qeth_schedule_recovery(struct qeth_card *); |
847 | void qeth_qdio_start_poll(struct ccw_device *, int, unsigned long); | ||
848 | void qeth_qdio_input_handler(struct ccw_device *, | ||
849 | unsigned int, unsigned int, int, | ||
850 | int, unsigned long); | ||
834 | void qeth_qdio_output_handler(struct ccw_device *, unsigned int, | 851 | void qeth_qdio_output_handler(struct ccw_device *, unsigned int, |
835 | int, int, int, unsigned long); | 852 | int, int, int, unsigned long); |
836 | void qeth_clear_ipacmd_list(struct qeth_card *); | 853 | void qeth_clear_ipacmd_list(struct qeth_card *); |
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c index 3a5a18a0fc28..764267062601 100644 --- a/drivers/s390/net/qeth_core_main.c +++ b/drivers/s390/net/qeth_core_main.c | |||
@@ -2911,6 +2911,27 @@ static void qeth_check_outbound_queue(struct qeth_qdio_out_q *queue) | |||
2911 | } | 2911 | } |
2912 | } | 2912 | } |
2913 | 2913 | ||
2914 | void qeth_qdio_start_poll(struct ccw_device *ccwdev, int queue, | ||
2915 | unsigned long card_ptr) | ||
2916 | { | ||
2917 | struct qeth_card *card = (struct qeth_card *)card_ptr; | ||
2918 | |||
2919 | if (card->dev) | ||
2920 | napi_schedule(&card->napi); | ||
2921 | } | ||
2922 | EXPORT_SYMBOL_GPL(qeth_qdio_start_poll); | ||
2923 | |||
2924 | void qeth_qdio_input_handler(struct ccw_device *ccwdev, unsigned int qdio_err, | ||
2925 | unsigned int queue, int first_element, int count, | ||
2926 | unsigned long card_ptr) | ||
2927 | { | ||
2928 | struct qeth_card *card = (struct qeth_card *)card_ptr; | ||
2929 | |||
2930 | if (qdio_err) | ||
2931 | qeth_schedule_recovery(card); | ||
2932 | } | ||
2933 | EXPORT_SYMBOL_GPL(qeth_qdio_input_handler); | ||
2934 | |||
2914 | void qeth_qdio_output_handler(struct ccw_device *ccwdev, | 2935 | void qeth_qdio_output_handler(struct ccw_device *ccwdev, |
2915 | unsigned int qdio_error, int __queue, int first_element, | 2936 | unsigned int qdio_error, int __queue, int first_element, |
2916 | int count, unsigned long card_ptr) | 2937 | int count, unsigned long card_ptr) |
@@ -3843,6 +3864,7 @@ static int qeth_qdio_establish(struct qeth_card *card) | |||
3843 | init_data.no_output_qs = card->qdio.no_out_queues; | 3864 | init_data.no_output_qs = card->qdio.no_out_queues; |
3844 | init_data.input_handler = card->discipline.input_handler; | 3865 | init_data.input_handler = card->discipline.input_handler; |
3845 | init_data.output_handler = card->discipline.output_handler; | 3866 | init_data.output_handler = card->discipline.output_handler; |
3867 | init_data.queue_start_poll = card->discipline.start_poll; | ||
3846 | init_data.int_parm = (unsigned long) card; | 3868 | init_data.int_parm = (unsigned long) card; |
3847 | init_data.input_sbal_addr_array = (void **) in_sbal_ptrs; | 3869 | init_data.input_sbal_addr_array = (void **) in_sbal_ptrs; |
3848 | init_data.output_sbal_addr_array = (void **) out_sbal_ptrs; | 3870 | init_data.output_sbal_addr_array = (void **) out_sbal_ptrs; |
@@ -4513,8 +4535,8 @@ static struct { | |||
4513 | /* 20 */{"queue 1 buffer usage"}, | 4535 | /* 20 */{"queue 1 buffer usage"}, |
4514 | {"queue 2 buffer usage"}, | 4536 | {"queue 2 buffer usage"}, |
4515 | {"queue 3 buffer usage"}, | 4537 | {"queue 3 buffer usage"}, |
4516 | {"rx handler time"}, | 4538 | {"rx poll time"}, |
4517 | {"rx handler count"}, | 4539 | {"rx poll count"}, |
4518 | {"rx do_QDIO time"}, | 4540 | {"rx do_QDIO time"}, |
4519 | {"rx do_QDIO count"}, | 4541 | {"rx do_QDIO count"}, |
4520 | {"tx handler time"}, | 4542 | {"tx handler time"}, |
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c index 830d63524d61..847e8797073c 100644 --- a/drivers/s390/net/qeth_l2_main.c +++ b/drivers/s390/net/qeth_l2_main.c | |||
@@ -310,6 +310,8 @@ static void qeth_l2_vlan_rx_add_vid(struct net_device *dev, unsigned short vid) | |||
310 | struct qeth_vlan_vid *id; | 310 | struct qeth_vlan_vid *id; |
311 | 311 | ||
312 | QETH_CARD_TEXT_(card, 4, "aid:%d", vid); | 312 | QETH_CARD_TEXT_(card, 4, "aid:%d", vid); |
313 | if (!vid) | ||
314 | return; | ||
313 | if (card->info.type == QETH_CARD_TYPE_OSM) { | 315 | if (card->info.type == QETH_CARD_TYPE_OSM) { |
314 | QETH_CARD_TEXT(card, 3, "aidOSM"); | 316 | QETH_CARD_TEXT(card, 3, "aidOSM"); |
315 | return; | 317 | return; |
@@ -407,29 +409,25 @@ static int qeth_l2_stop_card(struct qeth_card *card, int recovery_mode) | |||
407 | return rc; | 409 | return rc; |
408 | } | 410 | } |
409 | 411 | ||
410 | static void qeth_l2_process_inbound_buffer(struct qeth_card *card, | 412 | static int qeth_l2_process_inbound_buffer(struct qeth_card *card, |
411 | struct qeth_qdio_buffer *buf, int index) | 413 | int budget, int *done) |
412 | { | 414 | { |
413 | struct qdio_buffer_element *element; | 415 | int work_done = 0; |
414 | struct sk_buff *skb; | 416 | struct sk_buff *skb; |
415 | struct qeth_hdr *hdr; | 417 | struct qeth_hdr *hdr; |
416 | int offset; | ||
417 | unsigned int len; | 418 | unsigned int len; |
418 | 419 | ||
419 | /* get first element of current buffer */ | 420 | *done = 0; |
420 | element = (struct qdio_buffer_element *)&buf->buffer->element[0]; | 421 | BUG_ON(!budget); |
421 | offset = 0; | 422 | while (budget) { |
422 | if (card->options.performance_stats) | 423 | skb = qeth_core_get_next_skb(card, |
423 | card->perf_stats.bufs_rec++; | 424 | card->qdio.in_q->bufs[card->rx.b_index].buffer, |
424 | while ((skb = qeth_core_get_next_skb(card, buf->buffer, &element, | 425 | &card->rx.b_element, &card->rx.e_offset, &hdr); |
425 | &offset, &hdr))) { | 426 | if (!skb) { |
426 | skb->dev = card->dev; | 427 | *done = 1; |
427 | /* is device UP ? */ | 428 | break; |
428 | if (!(card->dev->flags & IFF_UP)) { | ||
429 | dev_kfree_skb_any(skb); | ||
430 | continue; | ||
431 | } | 429 | } |
432 | 430 | skb->dev = card->dev; | |
433 | switch (hdr->hdr.l2.id) { | 431 | switch (hdr->hdr.l2.id) { |
434 | case QETH_HEADER_TYPE_LAYER2: | 432 | case QETH_HEADER_TYPE_LAYER2: |
435 | skb->pkt_type = PACKET_HOST; | 433 | skb->pkt_type = PACKET_HOST; |
@@ -441,7 +439,7 @@ static void qeth_l2_process_inbound_buffer(struct qeth_card *card, | |||
441 | if (skb->protocol == htons(ETH_P_802_2)) | 439 | if (skb->protocol == htons(ETH_P_802_2)) |
442 | *((__u32 *)skb->cb) = ++card->seqno.pkt_seqno; | 440 | *((__u32 *)skb->cb) = ++card->seqno.pkt_seqno; |
443 | len = skb->len; | 441 | len = skb->len; |
444 | netif_rx(skb); | 442 | netif_receive_skb(skb); |
445 | break; | 443 | break; |
446 | case QETH_HEADER_TYPE_OSN: | 444 | case QETH_HEADER_TYPE_OSN: |
447 | if (card->info.type == QETH_CARD_TYPE_OSN) { | 445 | if (card->info.type == QETH_CARD_TYPE_OSN) { |
@@ -459,9 +457,87 @@ static void qeth_l2_process_inbound_buffer(struct qeth_card *card, | |||
459 | QETH_DBF_HEX(CTRL, 3, hdr, QETH_DBF_CTRL_LEN); | 457 | QETH_DBF_HEX(CTRL, 3, hdr, QETH_DBF_CTRL_LEN); |
460 | continue; | 458 | continue; |
461 | } | 459 | } |
460 | work_done++; | ||
461 | budget--; | ||
462 | card->stats.rx_packets++; | 462 | card->stats.rx_packets++; |
463 | card->stats.rx_bytes += len; | 463 | card->stats.rx_bytes += len; |
464 | } | 464 | } |
465 | return work_done; | ||
466 | } | ||
467 | |||
468 | static int qeth_l2_poll(struct napi_struct *napi, int budget) | ||
469 | { | ||
470 | struct qeth_card *card = container_of(napi, struct qeth_card, napi); | ||
471 | int work_done = 0; | ||
472 | struct qeth_qdio_buffer *buffer; | ||
473 | int done; | ||
474 | int new_budget = budget; | ||
475 | |||
476 | if (card->options.performance_stats) { | ||
477 | card->perf_stats.inbound_cnt++; | ||
478 | card->perf_stats.inbound_start_time = qeth_get_micros(); | ||
479 | } | ||
480 | |||
481 | while (1) { | ||
482 | if (!card->rx.b_count) { | ||
483 | card->rx.qdio_err = 0; | ||
484 | card->rx.b_count = qdio_get_next_buffers( | ||
485 | card->data.ccwdev, 0, &card->rx.b_index, | ||
486 | &card->rx.qdio_err); | ||
487 | if (card->rx.b_count <= 0) { | ||
488 | card->rx.b_count = 0; | ||
489 | break; | ||
490 | } | ||
491 | card->rx.b_element = | ||
492 | &card->qdio.in_q->bufs[card->rx.b_index] | ||
493 | .buffer->element[0]; | ||
494 | card->rx.e_offset = 0; | ||
495 | } | ||
496 | |||
497 | while (card->rx.b_count) { | ||
498 | buffer = &card->qdio.in_q->bufs[card->rx.b_index]; | ||
499 | if (!(card->rx.qdio_err && | ||
500 | qeth_check_qdio_errors(card, buffer->buffer, | ||
501 | card->rx.qdio_err, "qinerr"))) | ||
502 | work_done += qeth_l2_process_inbound_buffer( | ||
503 | card, new_budget, &done); | ||
504 | else | ||
505 | done = 1; | ||
506 | |||
507 | if (done) { | ||
508 | if (card->options.performance_stats) | ||
509 | card->perf_stats.bufs_rec++; | ||
510 | qeth_put_buffer_pool_entry(card, | ||
511 | buffer->pool_entry); | ||
512 | qeth_queue_input_buffer(card, card->rx.b_index); | ||
513 | card->rx.b_count--; | ||
514 | if (card->rx.b_count) { | ||
515 | card->rx.b_index = | ||
516 | (card->rx.b_index + 1) % | ||
517 | QDIO_MAX_BUFFERS_PER_Q; | ||
518 | card->rx.b_element = | ||
519 | &card->qdio.in_q | ||
520 | ->bufs[card->rx.b_index] | ||
521 | .buffer->element[0]; | ||
522 | card->rx.e_offset = 0; | ||
523 | } | ||
524 | } | ||
525 | |||
526 | if (work_done >= budget) | ||
527 | goto out; | ||
528 | else | ||
529 | new_budget = budget - work_done; | ||
530 | } | ||
531 | } | ||
532 | |||
533 | napi_complete(napi); | ||
534 | if (qdio_start_irq(card->data.ccwdev, 0)) | ||
535 | napi_schedule(&card->napi); | ||
536 | out: | ||
537 | if (card->options.performance_stats) | ||
538 | card->perf_stats.inbound_time += qeth_get_micros() - | ||
539 | card->perf_stats.inbound_start_time; | ||
540 | return work_done; | ||
465 | } | 541 | } |
466 | 542 | ||
467 | static int qeth_l2_send_setdelmac(struct qeth_card *card, __u8 *mac, | 543 | static int qeth_l2_send_setdelmac(struct qeth_card *card, __u8 *mac, |
@@ -755,49 +831,10 @@ tx_drop: | |||
755 | return NETDEV_TX_OK; | 831 | return NETDEV_TX_OK; |
756 | } | 832 | } |
757 | 833 | ||
758 | static void qeth_l2_qdio_input_handler(struct ccw_device *ccwdev, | ||
759 | unsigned int qdio_err, unsigned int queue, | ||
760 | int first_element, int count, unsigned long card_ptr) | ||
761 | { | ||
762 | struct net_device *net_dev; | ||
763 | struct qeth_card *card; | ||
764 | struct qeth_qdio_buffer *buffer; | ||
765 | int index; | ||
766 | int i; | ||
767 | |||
768 | card = (struct qeth_card *) card_ptr; | ||
769 | net_dev = card->dev; | ||
770 | if (card->options.performance_stats) { | ||
771 | card->perf_stats.inbound_cnt++; | ||
772 | card->perf_stats.inbound_start_time = qeth_get_micros(); | ||
773 | } | ||
774 | if (qdio_err & QDIO_ERROR_ACTIVATE_CHECK_CONDITION) { | ||
775 | QETH_CARD_TEXT(card, 1, "qdinchk"); | ||
776 | QETH_CARD_TEXT_(card, 1, "%04X%04X", first_element, | ||
777 | count); | ||
778 | QETH_CARD_TEXT_(card, 1, "%04X", queue); | ||
779 | qeth_schedule_recovery(card); | ||
780 | return; | ||
781 | } | ||
782 | for (i = first_element; i < (first_element + count); ++i) { | ||
783 | index = i % QDIO_MAX_BUFFERS_PER_Q; | ||
784 | buffer = &card->qdio.in_q->bufs[index]; | ||
785 | if (!(qdio_err && | ||
786 | qeth_check_qdio_errors(card, buffer->buffer, qdio_err, | ||
787 | "qinerr"))) | ||
788 | qeth_l2_process_inbound_buffer(card, buffer, index); | ||
789 | /* clear buffer and give back to hardware */ | ||
790 | qeth_put_buffer_pool_entry(card, buffer->pool_entry); | ||
791 | qeth_queue_input_buffer(card, index); | ||
792 | } | ||
793 | if (card->options.performance_stats) | ||
794 | card->perf_stats.inbound_time += qeth_get_micros() - | ||
795 | card->perf_stats.inbound_start_time; | ||
796 | } | ||
797 | |||
798 | static int qeth_l2_open(struct net_device *dev) | 834 | static int qeth_l2_open(struct net_device *dev) |
799 | { | 835 | { |
800 | struct qeth_card *card = dev->ml_priv; | 836 | struct qeth_card *card = dev->ml_priv; |
837 | int rc = 0; | ||
801 | 838 | ||
802 | QETH_CARD_TEXT(card, 4, "qethopen"); | 839 | QETH_CARD_TEXT(card, 4, "qethopen"); |
803 | if (card->state != CARD_STATE_SOFTSETUP) | 840 | if (card->state != CARD_STATE_SOFTSETUP) |
@@ -814,18 +851,24 @@ static int qeth_l2_open(struct net_device *dev) | |||
814 | 851 | ||
815 | if (!card->lan_online && netif_carrier_ok(dev)) | 852 | if (!card->lan_online && netif_carrier_ok(dev)) |
816 | netif_carrier_off(dev); | 853 | netif_carrier_off(dev); |
817 | return 0; | 854 | if (qdio_stop_irq(card->data.ccwdev, 0) >= 0) { |
855 | napi_enable(&card->napi); | ||
856 | napi_schedule(&card->napi); | ||
857 | } else | ||
858 | rc = -EIO; | ||
859 | return rc; | ||
818 | } | 860 | } |
819 | 861 | ||
820 | |||
821 | static int qeth_l2_stop(struct net_device *dev) | 862 | static int qeth_l2_stop(struct net_device *dev) |
822 | { | 863 | { |
823 | struct qeth_card *card = dev->ml_priv; | 864 | struct qeth_card *card = dev->ml_priv; |
824 | 865 | ||
825 | QETH_CARD_TEXT(card, 4, "qethstop"); | 866 | QETH_CARD_TEXT(card, 4, "qethstop"); |
826 | netif_tx_disable(dev); | 867 | netif_tx_disable(dev); |
827 | if (card->state == CARD_STATE_UP) | 868 | if (card->state == CARD_STATE_UP) { |
828 | card->state = CARD_STATE_SOFTSETUP; | 869 | card->state = CARD_STATE_SOFTSETUP; |
870 | napi_disable(&card->napi); | ||
871 | } | ||
829 | return 0; | 872 | return 0; |
830 | } | 873 | } |
831 | 874 | ||
@@ -836,8 +879,9 @@ static int qeth_l2_probe_device(struct ccwgroup_device *gdev) | |||
836 | INIT_LIST_HEAD(&card->vid_list); | 879 | INIT_LIST_HEAD(&card->vid_list); |
837 | INIT_LIST_HEAD(&card->mc_list); | 880 | INIT_LIST_HEAD(&card->mc_list); |
838 | card->options.layer2 = 1; | 881 | card->options.layer2 = 1; |
882 | card->discipline.start_poll = qeth_qdio_start_poll; | ||
839 | card->discipline.input_handler = (qdio_handler_t *) | 883 | card->discipline.input_handler = (qdio_handler_t *) |
840 | qeth_l2_qdio_input_handler; | 884 | qeth_qdio_input_handler; |
841 | card->discipline.output_handler = (qdio_handler_t *) | 885 | card->discipline.output_handler = (qdio_handler_t *) |
842 | qeth_qdio_output_handler; | 886 | qeth_qdio_output_handler; |
843 | card->discipline.recover = qeth_l2_recover; | 887 | card->discipline.recover = qeth_l2_recover; |
@@ -923,6 +967,7 @@ static int qeth_l2_setup_netdev(struct qeth_card *card) | |||
923 | card->info.broadcast_capable = 1; | 967 | card->info.broadcast_capable = 1; |
924 | qeth_l2_request_initial_mac(card); | 968 | qeth_l2_request_initial_mac(card); |
925 | SET_NETDEV_DEV(card->dev, &card->gdev->dev); | 969 | SET_NETDEV_DEV(card->dev, &card->gdev->dev); |
970 | netif_napi_add(card->dev, &card->napi, qeth_l2_poll, QETH_NAPI_WEIGHT); | ||
926 | return register_netdev(card->dev); | 971 | return register_netdev(card->dev); |
927 | } | 972 | } |
928 | 973 | ||
@@ -955,6 +1000,7 @@ static int __qeth_l2_set_online(struct ccwgroup_device *gdev, int recovery_mode) | |||
955 | qeth_l2_send_setmac(card, &card->dev->dev_addr[0]); | 1000 | qeth_l2_send_setmac(card, &card->dev->dev_addr[0]); |
956 | 1001 | ||
957 | card->state = CARD_STATE_HARDSETUP; | 1002 | card->state = CARD_STATE_HARDSETUP; |
1003 | memset(&card->rx, 0, sizeof(struct qeth_rx)); | ||
958 | qeth_print_status_message(card); | 1004 | qeth_print_status_message(card); |
959 | 1005 | ||
960 | /* softsetup */ | 1006 | /* softsetup */ |
@@ -1086,9 +1132,6 @@ static int qeth_l2_recover(void *ptr) | |||
1086 | card->use_hard_stop = 1; | 1132 | card->use_hard_stop = 1; |
1087 | __qeth_l2_set_offline(card->gdev, 1); | 1133 | __qeth_l2_set_offline(card->gdev, 1); |
1088 | rc = __qeth_l2_set_online(card->gdev, 1); | 1134 | rc = __qeth_l2_set_online(card->gdev, 1); |
1089 | /* don't run another scheduled recovery */ | ||
1090 | qeth_clear_thread_start_bit(card, QETH_RECOVER_THREAD); | ||
1091 | qeth_clear_thread_running_bit(card, QETH_RECOVER_THREAD); | ||
1092 | if (!rc) | 1135 | if (!rc) |
1093 | dev_info(&card->gdev->dev, | 1136 | dev_info(&card->gdev->dev, |
1094 | "Device successfully recovered!\n"); | 1137 | "Device successfully recovered!\n"); |
@@ -1099,6 +1142,8 @@ static int qeth_l2_recover(void *ptr) | |||
1099 | dev_warn(&card->gdev->dev, "The qeth device driver " | 1142 | dev_warn(&card->gdev->dev, "The qeth device driver " |
1100 | "failed to recover an error on the device\n"); | 1143 | "failed to recover an error on the device\n"); |
1101 | } | 1144 | } |
1145 | qeth_clear_thread_start_bit(card, QETH_RECOVER_THREAD); | ||
1146 | qeth_clear_thread_running_bit(card, QETH_RECOVER_THREAD); | ||
1102 | return 0; | 1147 | return 0; |
1103 | } | 1148 | } |
1104 | 1149 | ||
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c index e22ae248f613..74d1401a5d5e 100644 --- a/drivers/s390/net/qeth_l3_main.c +++ b/drivers/s390/net/qeth_l3_main.c | |||
@@ -103,12 +103,7 @@ int qeth_l3_string_to_ipaddr4(const char *buf, __u8 *addr) | |||
103 | 103 | ||
104 | void qeth_l3_ipaddr6_to_string(const __u8 *addr, char *buf) | 104 | void qeth_l3_ipaddr6_to_string(const __u8 *addr, char *buf) |
105 | { | 105 | { |
106 | sprintf(buf, "%02x%02x:%02x%02x:%02x%02x:%02x%02x" | 106 | sprintf(buf, "%pI6", addr); |
107 | ":%02x%02x:%02x%02x:%02x%02x:%02x%02x", | ||
108 | addr[0], addr[1], addr[2], addr[3], | ||
109 | addr[4], addr[5], addr[6], addr[7], | ||
110 | addr[8], addr[9], addr[10], addr[11], | ||
111 | addr[12], addr[13], addr[14], addr[15]); | ||
112 | } | 107 | } |
113 | 108 | ||
114 | int qeth_l3_string_to_ipaddr6(const char *buf, __u8 *addr) | 109 | int qeth_l3_string_to_ipaddr6(const char *buf, __u8 *addr) |
@@ -1825,7 +1820,7 @@ static void qeth_l3_add_vlan_mc(struct qeth_card *card) | |||
1825 | return; | 1820 | return; |
1826 | 1821 | ||
1827 | vg = card->vlangrp; | 1822 | vg = card->vlangrp; |
1828 | for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) { | 1823 | for (i = 0; i < VLAN_N_VID; i++) { |
1829 | struct net_device *netdev = vlan_group_get_device(vg, i); | 1824 | struct net_device *netdev = vlan_group_get_device(vg, i); |
1830 | if (netdev == NULL || | 1825 | if (netdev == NULL || |
1831 | !(netdev->flags & IFF_UP)) | 1826 | !(netdev->flags & IFF_UP)) |
@@ -1888,7 +1883,7 @@ static void qeth_l3_add_vlan_mc6(struct qeth_card *card) | |||
1888 | return; | 1883 | return; |
1889 | 1884 | ||
1890 | vg = card->vlangrp; | 1885 | vg = card->vlangrp; |
1891 | for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) { | 1886 | for (i = 0; i < VLAN_N_VID; i++) { |
1892 | struct net_device *netdev = vlan_group_get_device(vg, i); | 1887 | struct net_device *netdev = vlan_group_get_device(vg, i); |
1893 | if (netdev == NULL || | 1888 | if (netdev == NULL || |
1894 | !(netdev->flags & IFF_UP)) | 1889 | !(netdev->flags & IFF_UP)) |
@@ -2018,13 +2013,14 @@ static void qeth_l3_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid) | |||
2018 | qeth_l3_set_multicast_list(card->dev); | 2013 | qeth_l3_set_multicast_list(card->dev); |
2019 | } | 2014 | } |
2020 | 2015 | ||
2021 | static inline __u16 qeth_l3_rebuild_skb(struct qeth_card *card, | 2016 | static inline int qeth_l3_rebuild_skb(struct qeth_card *card, |
2022 | struct sk_buff *skb, struct qeth_hdr *hdr) | 2017 | struct sk_buff *skb, struct qeth_hdr *hdr, |
2018 | unsigned short *vlan_id) | ||
2023 | { | 2019 | { |
2024 | unsigned short vlan_id = 0; | ||
2025 | __be16 prot; | 2020 | __be16 prot; |
2026 | struct iphdr *ip_hdr; | 2021 | struct iphdr *ip_hdr; |
2027 | unsigned char tg_addr[MAX_ADDR_LEN]; | 2022 | unsigned char tg_addr[MAX_ADDR_LEN]; |
2023 | int is_vlan = 0; | ||
2028 | 2024 | ||
2029 | if (!(hdr->hdr.l3.flags & QETH_HDR_PASSTHRU)) { | 2025 | if (!(hdr->hdr.l3.flags & QETH_HDR_PASSTHRU)) { |
2030 | prot = htons((hdr->hdr.l3.flags & QETH_HDR_IPV6)? ETH_P_IPV6 : | 2026 | prot = htons((hdr->hdr.l3.flags & QETH_HDR_IPV6)? ETH_P_IPV6 : |
@@ -2087,8 +2083,9 @@ static inline __u16 qeth_l3_rebuild_skb(struct qeth_card *card, | |||
2087 | 2083 | ||
2088 | if (hdr->hdr.l3.ext_flags & | 2084 | if (hdr->hdr.l3.ext_flags & |
2089 | (QETH_HDR_EXT_VLAN_FRAME | QETH_HDR_EXT_INCLUDE_VLAN_TAG)) { | 2085 | (QETH_HDR_EXT_VLAN_FRAME | QETH_HDR_EXT_INCLUDE_VLAN_TAG)) { |
2090 | vlan_id = (hdr->hdr.l3.ext_flags & QETH_HDR_EXT_VLAN_FRAME)? | 2086 | *vlan_id = (hdr->hdr.l3.ext_flags & QETH_HDR_EXT_VLAN_FRAME) ? |
2091 | hdr->hdr.l3.vlan_id : *((u16 *)&hdr->hdr.l3.dest_addr[12]); | 2087 | hdr->hdr.l3.vlan_id : *((u16 *)&hdr->hdr.l3.dest_addr[12]); |
2088 | is_vlan = 1; | ||
2092 | } | 2089 | } |
2093 | 2090 | ||
2094 | switch (card->options.checksum_type) { | 2091 | switch (card->options.checksum_type) { |
@@ -2109,54 +2106,44 @@ static inline __u16 qeth_l3_rebuild_skb(struct qeth_card *card, | |||
2109 | skb->ip_summed = CHECKSUM_NONE; | 2106 | skb->ip_summed = CHECKSUM_NONE; |
2110 | } | 2107 | } |
2111 | 2108 | ||
2112 | return vlan_id; | 2109 | return is_vlan; |
2113 | } | 2110 | } |
2114 | 2111 | ||
2115 | static void qeth_l3_process_inbound_buffer(struct qeth_card *card, | 2112 | static int qeth_l3_process_inbound_buffer(struct qeth_card *card, |
2116 | struct qeth_qdio_buffer *buf, int index) | 2113 | int budget, int *done) |
2117 | { | 2114 | { |
2118 | struct qdio_buffer_element *element; | 2115 | int work_done = 0; |
2119 | struct sk_buff *skb; | 2116 | struct sk_buff *skb; |
2120 | struct qeth_hdr *hdr; | 2117 | struct qeth_hdr *hdr; |
2121 | int offset; | ||
2122 | __u16 vlan_tag = 0; | 2118 | __u16 vlan_tag = 0; |
2119 | int is_vlan; | ||
2123 | unsigned int len; | 2120 | unsigned int len; |
2124 | /* get first element of current buffer */ | ||
2125 | element = (struct qdio_buffer_element *)&buf->buffer->element[0]; | ||
2126 | offset = 0; | ||
2127 | if (card->options.performance_stats) | ||
2128 | card->perf_stats.bufs_rec++; | ||
2129 | while ((skb = qeth_core_get_next_skb(card, buf->buffer, &element, | ||
2130 | &offset, &hdr))) { | ||
2131 | skb->dev = card->dev; | ||
2132 | /* is device UP ? */ | ||
2133 | if (!(card->dev->flags & IFF_UP)) { | ||
2134 | dev_kfree_skb_any(skb); | ||
2135 | continue; | ||
2136 | } | ||
2137 | 2121 | ||
2122 | *done = 0; | ||
2123 | BUG_ON(!budget); | ||
2124 | while (budget) { | ||
2125 | skb = qeth_core_get_next_skb(card, | ||
2126 | card->qdio.in_q->bufs[card->rx.b_index].buffer, | ||
2127 | &card->rx.b_element, &card->rx.e_offset, &hdr); | ||
2128 | if (!skb) { | ||
2129 | *done = 1; | ||
2130 | break; | ||
2131 | } | ||
2132 | skb->dev = card->dev; | ||
2138 | switch (hdr->hdr.l3.id) { | 2133 | switch (hdr->hdr.l3.id) { |
2139 | case QETH_HEADER_TYPE_LAYER3: | 2134 | case QETH_HEADER_TYPE_LAYER3: |
2140 | vlan_tag = qeth_l3_rebuild_skb(card, skb, hdr); | 2135 | is_vlan = qeth_l3_rebuild_skb(card, skb, hdr, |
2136 | &vlan_tag); | ||
2141 | len = skb->len; | 2137 | len = skb->len; |
2142 | if (vlan_tag && !card->options.sniffer) | 2138 | if (is_vlan && !card->options.sniffer) |
2143 | if (card->vlangrp) | 2139 | vlan_gro_receive(&card->napi, card->vlangrp, |
2144 | vlan_hwaccel_rx(skb, card->vlangrp, | 2140 | vlan_tag, skb); |
2145 | vlan_tag); | ||
2146 | else { | ||
2147 | dev_kfree_skb_any(skb); | ||
2148 | continue; | ||
2149 | } | ||
2150 | else | 2141 | else |
2151 | netif_rx(skb); | 2142 | napi_gro_receive(&card->napi, skb); |
2152 | break; | 2143 | break; |
2153 | case QETH_HEADER_TYPE_LAYER2: /* for HiperSockets sniffer */ | 2144 | case QETH_HEADER_TYPE_LAYER2: /* for HiperSockets sniffer */ |
2154 | skb->pkt_type = PACKET_HOST; | 2145 | skb->pkt_type = PACKET_HOST; |
2155 | skb->protocol = eth_type_trans(skb, skb->dev); | 2146 | skb->protocol = eth_type_trans(skb, skb->dev); |
2156 | if (card->options.checksum_type == NO_CHECKSUMMING) | ||
2157 | skb->ip_summed = CHECKSUM_UNNECESSARY; | ||
2158 | else | ||
2159 | skb->ip_summed = CHECKSUM_NONE; | ||
2160 | len = skb->len; | 2147 | len = skb->len; |
2161 | netif_receive_skb(skb); | 2148 | netif_receive_skb(skb); |
2162 | break; | 2149 | break; |
@@ -2166,10 +2153,87 @@ static void qeth_l3_process_inbound_buffer(struct qeth_card *card, | |||
2166 | QETH_DBF_HEX(CTRL, 3, hdr, QETH_DBF_CTRL_LEN); | 2153 | QETH_DBF_HEX(CTRL, 3, hdr, QETH_DBF_CTRL_LEN); |
2167 | continue; | 2154 | continue; |
2168 | } | 2155 | } |
2169 | 2156 | work_done++; | |
2157 | budget--; | ||
2170 | card->stats.rx_packets++; | 2158 | card->stats.rx_packets++; |
2171 | card->stats.rx_bytes += len; | 2159 | card->stats.rx_bytes += len; |
2172 | } | 2160 | } |
2161 | return work_done; | ||
2162 | } | ||
2163 | |||
2164 | static int qeth_l3_poll(struct napi_struct *napi, int budget) | ||
2165 | { | ||
2166 | struct qeth_card *card = container_of(napi, struct qeth_card, napi); | ||
2167 | int work_done = 0; | ||
2168 | struct qeth_qdio_buffer *buffer; | ||
2169 | int done; | ||
2170 | int new_budget = budget; | ||
2171 | |||
2172 | if (card->options.performance_stats) { | ||
2173 | card->perf_stats.inbound_cnt++; | ||
2174 | card->perf_stats.inbound_start_time = qeth_get_micros(); | ||
2175 | } | ||
2176 | |||
2177 | while (1) { | ||
2178 | if (!card->rx.b_count) { | ||
2179 | card->rx.qdio_err = 0; | ||
2180 | card->rx.b_count = qdio_get_next_buffers( | ||
2181 | card->data.ccwdev, 0, &card->rx.b_index, | ||
2182 | &card->rx.qdio_err); | ||
2183 | if (card->rx.b_count <= 0) { | ||
2184 | card->rx.b_count = 0; | ||
2185 | break; | ||
2186 | } | ||
2187 | card->rx.b_element = | ||
2188 | &card->qdio.in_q->bufs[card->rx.b_index] | ||
2189 | .buffer->element[0]; | ||
2190 | card->rx.e_offset = 0; | ||
2191 | } | ||
2192 | |||
2193 | while (card->rx.b_count) { | ||
2194 | buffer = &card->qdio.in_q->bufs[card->rx.b_index]; | ||
2195 | if (!(card->rx.qdio_err && | ||
2196 | qeth_check_qdio_errors(card, buffer->buffer, | ||
2197 | card->rx.qdio_err, "qinerr"))) | ||
2198 | work_done += qeth_l3_process_inbound_buffer( | ||
2199 | card, new_budget, &done); | ||
2200 | else | ||
2201 | done = 1; | ||
2202 | |||
2203 | if (done) { | ||
2204 | if (card->options.performance_stats) | ||
2205 | card->perf_stats.bufs_rec++; | ||
2206 | qeth_put_buffer_pool_entry(card, | ||
2207 | buffer->pool_entry); | ||
2208 | qeth_queue_input_buffer(card, card->rx.b_index); | ||
2209 | card->rx.b_count--; | ||
2210 | if (card->rx.b_count) { | ||
2211 | card->rx.b_index = | ||
2212 | (card->rx.b_index + 1) % | ||
2213 | QDIO_MAX_BUFFERS_PER_Q; | ||
2214 | card->rx.b_element = | ||
2215 | &card->qdio.in_q | ||
2216 | ->bufs[card->rx.b_index] | ||
2217 | .buffer->element[0]; | ||
2218 | card->rx.e_offset = 0; | ||
2219 | } | ||
2220 | } | ||
2221 | |||
2222 | if (work_done >= budget) | ||
2223 | goto out; | ||
2224 | else | ||
2225 | new_budget = budget - work_done; | ||
2226 | } | ||
2227 | } | ||
2228 | |||
2229 | napi_complete(napi); | ||
2230 | if (qdio_start_irq(card->data.ccwdev, 0)) | ||
2231 | napi_schedule(&card->napi); | ||
2232 | out: | ||
2233 | if (card->options.performance_stats) | ||
2234 | card->perf_stats.inbound_time += qeth_get_micros() - | ||
2235 | card->perf_stats.inbound_start_time; | ||
2236 | return work_done; | ||
2173 | } | 2237 | } |
2174 | 2238 | ||
2175 | static int qeth_l3_verify_vlan_dev(struct net_device *dev, | 2239 | static int qeth_l3_verify_vlan_dev(struct net_device *dev, |
@@ -2183,7 +2247,7 @@ static int qeth_l3_verify_vlan_dev(struct net_device *dev, | |||
2183 | if (!vg) | 2247 | if (!vg) |
2184 | return rc; | 2248 | return rc; |
2185 | 2249 | ||
2186 | for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) { | 2250 | for (i = 0; i < VLAN_N_VID; i++) { |
2187 | if (vlan_group_get_device(vg, i) == dev) { | 2251 | if (vlan_group_get_device(vg, i) == dev) { |
2188 | rc = QETH_VLAN_CARD; | 2252 | rc = QETH_VLAN_CARD; |
2189 | break; | 2253 | break; |
@@ -3103,6 +3167,7 @@ tx_drop: | |||
3103 | static int qeth_l3_open(struct net_device *dev) | 3167 | static int qeth_l3_open(struct net_device *dev) |
3104 | { | 3168 | { |
3105 | struct qeth_card *card = dev->ml_priv; | 3169 | struct qeth_card *card = dev->ml_priv; |
3170 | int rc = 0; | ||
3106 | 3171 | ||
3107 | QETH_CARD_TEXT(card, 4, "qethopen"); | 3172 | QETH_CARD_TEXT(card, 4, "qethopen"); |
3108 | if (card->state != CARD_STATE_SOFTSETUP) | 3173 | if (card->state != CARD_STATE_SOFTSETUP) |
@@ -3113,7 +3178,12 @@ static int qeth_l3_open(struct net_device *dev) | |||
3113 | 3178 | ||
3114 | if (!card->lan_online && netif_carrier_ok(dev)) | 3179 | if (!card->lan_online && netif_carrier_ok(dev)) |
3115 | netif_carrier_off(dev); | 3180 | netif_carrier_off(dev); |
3116 | return 0; | 3181 | if (qdio_stop_irq(card->data.ccwdev, 0) >= 0) { |
3182 | napi_enable(&card->napi); | ||
3183 | napi_schedule(&card->napi); | ||
3184 | } else | ||
3185 | rc = -EIO; | ||
3186 | return rc; | ||
3117 | } | 3187 | } |
3118 | 3188 | ||
3119 | static int qeth_l3_stop(struct net_device *dev) | 3189 | static int qeth_l3_stop(struct net_device *dev) |
@@ -3122,8 +3192,10 @@ static int qeth_l3_stop(struct net_device *dev) | |||
3122 | 3192 | ||
3123 | QETH_CARD_TEXT(card, 4, "qethstop"); | 3193 | QETH_CARD_TEXT(card, 4, "qethstop"); |
3124 | netif_tx_disable(dev); | 3194 | netif_tx_disable(dev); |
3125 | if (card->state == CARD_STATE_UP) | 3195 | if (card->state == CARD_STATE_UP) { |
3126 | card->state = CARD_STATE_SOFTSETUP; | 3196 | card->state = CARD_STATE_SOFTSETUP; |
3197 | napi_disable(&card->napi); | ||
3198 | } | ||
3127 | return 0; | 3199 | return 0; |
3128 | } | 3200 | } |
3129 | 3201 | ||
@@ -3293,57 +3365,19 @@ static int qeth_l3_setup_netdev(struct qeth_card *card) | |||
3293 | card->dev->gso_max_size = 15 * PAGE_SIZE; | 3365 | card->dev->gso_max_size = 15 * PAGE_SIZE; |
3294 | 3366 | ||
3295 | SET_NETDEV_DEV(card->dev, &card->gdev->dev); | 3367 | SET_NETDEV_DEV(card->dev, &card->gdev->dev); |
3368 | netif_napi_add(card->dev, &card->napi, qeth_l3_poll, QETH_NAPI_WEIGHT); | ||
3296 | return register_netdev(card->dev); | 3369 | return register_netdev(card->dev); |
3297 | } | 3370 | } |
3298 | 3371 | ||
3299 | static void qeth_l3_qdio_input_handler(struct ccw_device *ccwdev, | ||
3300 | unsigned int qdio_err, unsigned int queue, int first_element, | ||
3301 | int count, unsigned long card_ptr) | ||
3302 | { | ||
3303 | struct net_device *net_dev; | ||
3304 | struct qeth_card *card; | ||
3305 | struct qeth_qdio_buffer *buffer; | ||
3306 | int index; | ||
3307 | int i; | ||
3308 | |||
3309 | card = (struct qeth_card *) card_ptr; | ||
3310 | net_dev = card->dev; | ||
3311 | if (card->options.performance_stats) { | ||
3312 | card->perf_stats.inbound_cnt++; | ||
3313 | card->perf_stats.inbound_start_time = qeth_get_micros(); | ||
3314 | } | ||
3315 | if (qdio_err & QDIO_ERROR_ACTIVATE_CHECK_CONDITION) { | ||
3316 | QETH_CARD_TEXT(card, 1, "qdinchk"); | ||
3317 | QETH_CARD_TEXT_(card, 1, "%04X%04X", | ||
3318 | first_element, count); | ||
3319 | QETH_CARD_TEXT_(card, 1, "%04X", queue); | ||
3320 | qeth_schedule_recovery(card); | ||
3321 | return; | ||
3322 | } | ||
3323 | for (i = first_element; i < (first_element + count); ++i) { | ||
3324 | index = i % QDIO_MAX_BUFFERS_PER_Q; | ||
3325 | buffer = &card->qdio.in_q->bufs[index]; | ||
3326 | if (!(qdio_err && | ||
3327 | qeth_check_qdio_errors(card, buffer->buffer, | ||
3328 | qdio_err, "qinerr"))) | ||
3329 | qeth_l3_process_inbound_buffer(card, buffer, index); | ||
3330 | /* clear buffer and give back to hardware */ | ||
3331 | qeth_put_buffer_pool_entry(card, buffer->pool_entry); | ||
3332 | qeth_queue_input_buffer(card, index); | ||
3333 | } | ||
3334 | if (card->options.performance_stats) | ||
3335 | card->perf_stats.inbound_time += qeth_get_micros() - | ||
3336 | card->perf_stats.inbound_start_time; | ||
3337 | } | ||
3338 | |||
3339 | static int qeth_l3_probe_device(struct ccwgroup_device *gdev) | 3372 | static int qeth_l3_probe_device(struct ccwgroup_device *gdev) |
3340 | { | 3373 | { |
3341 | struct qeth_card *card = dev_get_drvdata(&gdev->dev); | 3374 | struct qeth_card *card = dev_get_drvdata(&gdev->dev); |
3342 | 3375 | ||
3343 | qeth_l3_create_device_attributes(&gdev->dev); | 3376 | qeth_l3_create_device_attributes(&gdev->dev); |
3344 | card->options.layer2 = 0; | 3377 | card->options.layer2 = 0; |
3378 | card->discipline.start_poll = qeth_qdio_start_poll; | ||
3345 | card->discipline.input_handler = (qdio_handler_t *) | 3379 | card->discipline.input_handler = (qdio_handler_t *) |
3346 | qeth_l3_qdio_input_handler; | 3380 | qeth_qdio_input_handler; |
3347 | card->discipline.output_handler = (qdio_handler_t *) | 3381 | card->discipline.output_handler = (qdio_handler_t *) |
3348 | qeth_qdio_output_handler; | 3382 | qeth_qdio_output_handler; |
3349 | card->discipline.recover = qeth_l3_recover; | 3383 | card->discipline.recover = qeth_l3_recover; |
@@ -3402,6 +3436,7 @@ static int __qeth_l3_set_online(struct ccwgroup_device *gdev, int recovery_mode) | |||
3402 | } | 3436 | } |
3403 | 3437 | ||
3404 | card->state = CARD_STATE_HARDSETUP; | 3438 | card->state = CARD_STATE_HARDSETUP; |
3439 | memset(&card->rx, 0, sizeof(struct qeth_rx)); | ||
3405 | qeth_print_status_message(card); | 3440 | qeth_print_status_message(card); |
3406 | 3441 | ||
3407 | /* softsetup */ | 3442 | /* softsetup */ |
@@ -3538,9 +3573,6 @@ static int qeth_l3_recover(void *ptr) | |||
3538 | card->use_hard_stop = 1; | 3573 | card->use_hard_stop = 1; |
3539 | __qeth_l3_set_offline(card->gdev, 1); | 3574 | __qeth_l3_set_offline(card->gdev, 1); |
3540 | rc = __qeth_l3_set_online(card->gdev, 1); | 3575 | rc = __qeth_l3_set_online(card->gdev, 1); |
3541 | /* don't run another scheduled recovery */ | ||
3542 | qeth_clear_thread_start_bit(card, QETH_RECOVER_THREAD); | ||
3543 | qeth_clear_thread_running_bit(card, QETH_RECOVER_THREAD); | ||
3544 | if (!rc) | 3576 | if (!rc) |
3545 | dev_info(&card->gdev->dev, | 3577 | dev_info(&card->gdev->dev, |
3546 | "Device successfully recovered!\n"); | 3578 | "Device successfully recovered!\n"); |
@@ -3551,6 +3583,8 @@ static int qeth_l3_recover(void *ptr) | |||
3551 | dev_warn(&card->gdev->dev, "The qeth device driver " | 3583 | dev_warn(&card->gdev->dev, "The qeth device driver " |
3552 | "failed to recover an error on the device\n"); | 3584 | "failed to recover an error on the device\n"); |
3553 | } | 3585 | } |
3586 | qeth_clear_thread_start_bit(card, QETH_RECOVER_THREAD); | ||
3587 | qeth_clear_thread_running_bit(card, QETH_RECOVER_THREAD); | ||
3554 | return 0; | 3588 | return 0; |
3555 | } | 3589 | } |
3556 | 3590 | ||
diff --git a/drivers/s390/scsi/Makefile b/drivers/s390/scsi/Makefile index cb301cc6178c..c454ffebb63e 100644 --- a/drivers/s390/scsi/Makefile +++ b/drivers/s390/scsi/Makefile | |||
@@ -2,7 +2,8 @@ | |||
2 | # Makefile for the S/390 specific device drivers | 2 | # Makefile for the S/390 specific device drivers |
3 | # | 3 | # |
4 | 4 | ||
5 | zfcp-objs := zfcp_aux.o zfcp_ccw.o zfcp_scsi.o zfcp_erp.o zfcp_qdio.o \ | 5 | zfcp-objs := zfcp_aux.o zfcp_ccw.o zfcp_cfdc.o zfcp_dbf.o zfcp_erp.o \ |
6 | zfcp_fsf.o zfcp_dbf.o zfcp_sysfs.o zfcp_fc.o zfcp_cfdc.o | 6 | zfcp_fc.o zfcp_fsf.o zfcp_qdio.o zfcp_scsi.o zfcp_sysfs.o \ |
7 | zfcp_unit.o | ||
7 | 8 | ||
8 | obj-$(CONFIG_ZFCP) += zfcp.o | 9 | obj-$(CONFIG_ZFCP) += zfcp.o |
diff --git a/drivers/s390/scsi/zfcp_aux.c b/drivers/s390/scsi/zfcp_aux.c index 96fa1f536394..044fb22718d2 100644 --- a/drivers/s390/scsi/zfcp_aux.c +++ b/drivers/s390/scsi/zfcp_aux.c | |||
@@ -56,7 +56,6 @@ static void __init zfcp_init_device_configure(char *busid, u64 wwpn, u64 lun) | |||
56 | struct ccw_device *cdev; | 56 | struct ccw_device *cdev; |
57 | struct zfcp_adapter *adapter; | 57 | struct zfcp_adapter *adapter; |
58 | struct zfcp_port *port; | 58 | struct zfcp_port *port; |
59 | struct zfcp_unit *unit; | ||
60 | 59 | ||
61 | cdev = get_ccwdev_by_busid(&zfcp_ccw_driver, busid); | 60 | cdev = get_ccwdev_by_busid(&zfcp_ccw_driver, busid); |
62 | if (!cdev) | 61 | if (!cdev) |
@@ -72,17 +71,11 @@ static void __init zfcp_init_device_configure(char *busid, u64 wwpn, u64 lun) | |||
72 | port = zfcp_get_port_by_wwpn(adapter, wwpn); | 71 | port = zfcp_get_port_by_wwpn(adapter, wwpn); |
73 | if (!port) | 72 | if (!port) |
74 | goto out_port; | 73 | goto out_port; |
74 | flush_work(&port->rport_work); | ||
75 | 75 | ||
76 | unit = zfcp_unit_enqueue(port, lun); | 76 | zfcp_unit_add(port, lun); |
77 | if (IS_ERR(unit)) | ||
78 | goto out_unit; | ||
79 | |||
80 | zfcp_erp_unit_reopen(unit, 0, "auidc_1", NULL); | ||
81 | zfcp_erp_wait(adapter); | ||
82 | flush_work(&unit->scsi_work); | ||
83 | |||
84 | out_unit: | ||
85 | put_device(&port->dev); | 77 | put_device(&port->dev); |
78 | |||
86 | out_port: | 79 | out_port: |
87 | zfcp_ccw_adapter_put(adapter); | 80 | zfcp_ccw_adapter_put(adapter); |
88 | out_ccw_device: | 81 | out_ccw_device: |
@@ -158,6 +151,9 @@ static int __init zfcp_module_init(void) | |||
158 | fc_attach_transport(&zfcp_transport_functions); | 151 | fc_attach_transport(&zfcp_transport_functions); |
159 | if (!zfcp_data.scsi_transport_template) | 152 | if (!zfcp_data.scsi_transport_template) |
160 | goto out_transport; | 153 | goto out_transport; |
154 | scsi_transport_reserve_device(zfcp_data.scsi_transport_template, | ||
155 | sizeof(struct zfcp_scsi_dev)); | ||
156 | |||
161 | 157 | ||
162 | retval = misc_register(&zfcp_cfdc_misc); | 158 | retval = misc_register(&zfcp_cfdc_misc); |
163 | if (retval) { | 159 | if (retval) { |
@@ -211,30 +207,6 @@ static void __exit zfcp_module_exit(void) | |||
211 | module_exit(zfcp_module_exit); | 207 | module_exit(zfcp_module_exit); |
212 | 208 | ||
213 | /** | 209 | /** |
214 | * zfcp_get_unit_by_lun - find unit in unit list of port by FCP LUN | ||
215 | * @port: pointer to port to search for unit | ||
216 | * @fcp_lun: FCP LUN to search for | ||
217 | * | ||
218 | * Returns: pointer to zfcp_unit or NULL | ||
219 | */ | ||
220 | struct zfcp_unit *zfcp_get_unit_by_lun(struct zfcp_port *port, u64 fcp_lun) | ||
221 | { | ||
222 | unsigned long flags; | ||
223 | struct zfcp_unit *unit; | ||
224 | |||
225 | read_lock_irqsave(&port->unit_list_lock, flags); | ||
226 | list_for_each_entry(unit, &port->unit_list, list) | ||
227 | if (unit->fcp_lun == fcp_lun) { | ||
228 | if (!get_device(&unit->dev)) | ||
229 | unit = NULL; | ||
230 | read_unlock_irqrestore(&port->unit_list_lock, flags); | ||
231 | return unit; | ||
232 | } | ||
233 | read_unlock_irqrestore(&port->unit_list_lock, flags); | ||
234 | return NULL; | ||
235 | } | ||
236 | |||
237 | /** | ||
238 | * zfcp_get_port_by_wwpn - find port in port list of adapter by wwpn | 210 | * zfcp_get_port_by_wwpn - find port in port list of adapter by wwpn |
239 | * @adapter: pointer to adapter to search for port | 211 | * @adapter: pointer to adapter to search for port |
240 | * @wwpn: wwpn to search for | 212 | * @wwpn: wwpn to search for |
@@ -259,92 +231,6 @@ struct zfcp_port *zfcp_get_port_by_wwpn(struct zfcp_adapter *adapter, | |||
259 | return NULL; | 231 | return NULL; |
260 | } | 232 | } |
261 | 233 | ||
262 | /** | ||
263 | * zfcp_unit_release - dequeue unit | ||
264 | * @dev: pointer to device | ||
265 | * | ||
266 | * waits until all work is done on unit and removes it then from the unit->list | ||
267 | * of the associated port. | ||
268 | */ | ||
269 | static void zfcp_unit_release(struct device *dev) | ||
270 | { | ||
271 | struct zfcp_unit *unit = container_of(dev, struct zfcp_unit, dev); | ||
272 | |||
273 | put_device(&unit->port->dev); | ||
274 | kfree(unit); | ||
275 | } | ||
276 | |||
277 | /** | ||
278 | * zfcp_unit_enqueue - enqueue unit to unit list of a port. | ||
279 | * @port: pointer to port where unit is added | ||
280 | * @fcp_lun: FCP LUN of unit to be enqueued | ||
281 | * Returns: pointer to enqueued unit on success, ERR_PTR on error | ||
282 | * | ||
283 | * Sets up some unit internal structures and creates sysfs entry. | ||
284 | */ | ||
285 | struct zfcp_unit *zfcp_unit_enqueue(struct zfcp_port *port, u64 fcp_lun) | ||
286 | { | ||
287 | struct zfcp_unit *unit; | ||
288 | int retval = -ENOMEM; | ||
289 | |||
290 | get_device(&port->dev); | ||
291 | |||
292 | unit = zfcp_get_unit_by_lun(port, fcp_lun); | ||
293 | if (unit) { | ||
294 | put_device(&unit->dev); | ||
295 | retval = -EEXIST; | ||
296 | goto err_out; | ||
297 | } | ||
298 | |||
299 | unit = kzalloc(sizeof(struct zfcp_unit), GFP_KERNEL); | ||
300 | if (!unit) | ||
301 | goto err_out; | ||
302 | |||
303 | unit->port = port; | ||
304 | unit->fcp_lun = fcp_lun; | ||
305 | unit->dev.parent = &port->dev; | ||
306 | unit->dev.release = zfcp_unit_release; | ||
307 | |||
308 | if (dev_set_name(&unit->dev, "0x%016llx", | ||
309 | (unsigned long long) fcp_lun)) { | ||
310 | kfree(unit); | ||
311 | goto err_out; | ||
312 | } | ||
313 | retval = -EINVAL; | ||
314 | |||
315 | INIT_WORK(&unit->scsi_work, zfcp_scsi_scan_work); | ||
316 | |||
317 | spin_lock_init(&unit->latencies.lock); | ||
318 | unit->latencies.write.channel.min = 0xFFFFFFFF; | ||
319 | unit->latencies.write.fabric.min = 0xFFFFFFFF; | ||
320 | unit->latencies.read.channel.min = 0xFFFFFFFF; | ||
321 | unit->latencies.read.fabric.min = 0xFFFFFFFF; | ||
322 | unit->latencies.cmd.channel.min = 0xFFFFFFFF; | ||
323 | unit->latencies.cmd.fabric.min = 0xFFFFFFFF; | ||
324 | |||
325 | if (device_register(&unit->dev)) { | ||
326 | put_device(&unit->dev); | ||
327 | goto err_out; | ||
328 | } | ||
329 | |||
330 | if (sysfs_create_group(&unit->dev.kobj, &zfcp_sysfs_unit_attrs)) | ||
331 | goto err_out_put; | ||
332 | |||
333 | write_lock_irq(&port->unit_list_lock); | ||
334 | list_add_tail(&unit->list, &port->unit_list); | ||
335 | write_unlock_irq(&port->unit_list_lock); | ||
336 | |||
337 | atomic_set_mask(ZFCP_STATUS_COMMON_RUNNING, &unit->status); | ||
338 | |||
339 | return unit; | ||
340 | |||
341 | err_out_put: | ||
342 | device_unregister(&unit->dev); | ||
343 | err_out: | ||
344 | put_device(&port->dev); | ||
345 | return ERR_PTR(retval); | ||
346 | } | ||
347 | |||
348 | static int zfcp_allocate_low_mem_buffers(struct zfcp_adapter *adapter) | 234 | static int zfcp_allocate_low_mem_buffers(struct zfcp_adapter *adapter) |
349 | { | 235 | { |
350 | adapter->pool.erp_req = | 236 | adapter->pool.erp_req = |
diff --git a/drivers/s390/scsi/zfcp_ccw.c b/drivers/s390/scsi/zfcp_ccw.c index ce1cc7a11fb4..0833c2b51e39 100644 --- a/drivers/s390/scsi/zfcp_ccw.c +++ b/drivers/s390/scsi/zfcp_ccw.c | |||
@@ -46,8 +46,7 @@ static int zfcp_ccw_activate(struct ccw_device *cdev) | |||
46 | if (!adapter) | 46 | if (!adapter) |
47 | return 0; | 47 | return 0; |
48 | 48 | ||
49 | zfcp_erp_modify_adapter_status(adapter, "ccresu1", NULL, | 49 | zfcp_erp_set_adapter_status(adapter, ZFCP_STATUS_COMMON_RUNNING); |
50 | ZFCP_STATUS_COMMON_RUNNING, ZFCP_SET); | ||
51 | zfcp_erp_adapter_reopen(adapter, ZFCP_STATUS_COMMON_ERP_FAILED, | 50 | zfcp_erp_adapter_reopen(adapter, ZFCP_STATUS_COMMON_ERP_FAILED, |
52 | "ccresu2", NULL); | 51 | "ccresu2", NULL); |
53 | zfcp_erp_wait(adapter); | 52 | zfcp_erp_wait(adapter); |
@@ -164,14 +163,7 @@ static int zfcp_ccw_set_online(struct ccw_device *cdev) | |||
164 | BUG_ON(!zfcp_reqlist_isempty(adapter->req_list)); | 163 | BUG_ON(!zfcp_reqlist_isempty(adapter->req_list)); |
165 | adapter->req_no = 0; | 164 | adapter->req_no = 0; |
166 | 165 | ||
167 | zfcp_erp_modify_adapter_status(adapter, "ccsonl1", NULL, | 166 | zfcp_ccw_activate(cdev); |
168 | ZFCP_STATUS_COMMON_RUNNING, ZFCP_SET); | ||
169 | zfcp_erp_adapter_reopen(adapter, ZFCP_STATUS_COMMON_ERP_FAILED, | ||
170 | "ccsonl2", NULL); | ||
171 | zfcp_erp_wait(adapter); | ||
172 | |||
173 | flush_work(&adapter->scan_work); | ||
174 | |||
175 | zfcp_ccw_adapter_put(adapter); | 167 | zfcp_ccw_adapter_put(adapter); |
176 | return 0; | 168 | return 0; |
177 | } | 169 | } |
@@ -224,9 +216,8 @@ static int zfcp_ccw_notify(struct ccw_device *cdev, int event) | |||
224 | break; | 216 | break; |
225 | case CIO_OPER: | 217 | case CIO_OPER: |
226 | dev_info(&cdev->dev, "The FCP device is operational again\n"); | 218 | dev_info(&cdev->dev, "The FCP device is operational again\n"); |
227 | zfcp_erp_modify_adapter_status(adapter, "ccnoti3", NULL, | 219 | zfcp_erp_set_adapter_status(adapter, |
228 | ZFCP_STATUS_COMMON_RUNNING, | 220 | ZFCP_STATUS_COMMON_RUNNING); |
229 | ZFCP_SET); | ||
230 | zfcp_erp_adapter_reopen(adapter, ZFCP_STATUS_COMMON_ERP_FAILED, | 221 | zfcp_erp_adapter_reopen(adapter, ZFCP_STATUS_COMMON_ERP_FAILED, |
231 | "ccnoti4", NULL); | 222 | "ccnoti4", NULL); |
232 | break; | 223 | break; |
diff --git a/drivers/s390/scsi/zfcp_cfdc.c b/drivers/s390/scsi/zfcp_cfdc.c index fcbd2b756da4..d692e229ecba 100644 --- a/drivers/s390/scsi/zfcp_cfdc.c +++ b/drivers/s390/scsi/zfcp_cfdc.c | |||
@@ -2,9 +2,10 @@ | |||
2 | * zfcp device driver | 2 | * zfcp device driver |
3 | * | 3 | * |
4 | * Userspace interface for accessing the | 4 | * Userspace interface for accessing the |
5 | * Access Control Lists / Control File Data Channel | 5 | * Access Control Lists / Control File Data Channel; |
6 | * handling of response code and states for ports and LUNs. | ||
6 | * | 7 | * |
7 | * Copyright IBM Corporation 2008, 2009 | 8 | * Copyright IBM Corporation 2008, 2010 |
8 | */ | 9 | */ |
9 | 10 | ||
10 | #define KMSG_COMPONENT "zfcp" | 11 | #define KMSG_COMPONENT "zfcp" |
@@ -251,8 +252,9 @@ static const struct file_operations zfcp_cfdc_fops = { | |||
251 | .open = nonseekable_open, | 252 | .open = nonseekable_open, |
252 | .unlocked_ioctl = zfcp_cfdc_dev_ioctl, | 253 | .unlocked_ioctl = zfcp_cfdc_dev_ioctl, |
253 | #ifdef CONFIG_COMPAT | 254 | #ifdef CONFIG_COMPAT |
254 | .compat_ioctl = zfcp_cfdc_dev_ioctl | 255 | .compat_ioctl = zfcp_cfdc_dev_ioctl, |
255 | #endif | 256 | #endif |
257 | .llseek = no_llseek, | ||
256 | }; | 258 | }; |
257 | 259 | ||
258 | struct miscdevice zfcp_cfdc_misc = { | 260 | struct miscdevice zfcp_cfdc_misc = { |
@@ -260,3 +262,184 @@ struct miscdevice zfcp_cfdc_misc = { | |||
260 | .name = "zfcp_cfdc", | 262 | .name = "zfcp_cfdc", |
261 | .fops = &zfcp_cfdc_fops, | 263 | .fops = &zfcp_cfdc_fops, |
262 | }; | 264 | }; |
265 | |||
266 | /** | ||
267 | * zfcp_cfdc_adapter_access_changed - Process change in adapter ACT | ||
268 | * @adapter: Adapter where the Access Control Table (ACT) changed | ||
269 | * | ||
270 | * After a change in the adapter ACT, check if access to any | ||
271 | * previously denied resources is now possible. | ||
272 | */ | ||
273 | void zfcp_cfdc_adapter_access_changed(struct zfcp_adapter *adapter) | ||
274 | { | ||
275 | unsigned long flags; | ||
276 | struct zfcp_port *port; | ||
277 | struct scsi_device *sdev; | ||
278 | struct zfcp_scsi_dev *zfcp_sdev; | ||
279 | int status; | ||
280 | |||
281 | if (adapter->connection_features & FSF_FEATURE_NPIV_MODE) | ||
282 | return; | ||
283 | |||
284 | read_lock_irqsave(&adapter->port_list_lock, flags); | ||
285 | list_for_each_entry(port, &adapter->port_list, list) { | ||
286 | status = atomic_read(&port->status); | ||
287 | if ((status & ZFCP_STATUS_COMMON_ACCESS_DENIED) || | ||
288 | (status & ZFCP_STATUS_COMMON_ACCESS_BOXED)) | ||
289 | zfcp_erp_port_reopen(port, | ||
290 | ZFCP_STATUS_COMMON_ERP_FAILED, | ||
291 | "cfaac_1", NULL); | ||
292 | } | ||
293 | read_unlock_irqrestore(&adapter->port_list_lock, flags); | ||
294 | |||
295 | shost_for_each_device(sdev, port->adapter->scsi_host) { | ||
296 | zfcp_sdev = sdev_to_zfcp(sdev); | ||
297 | status = atomic_read(&zfcp_sdev->status); | ||
298 | if ((status & ZFCP_STATUS_COMMON_ACCESS_DENIED) || | ||
299 | (status & ZFCP_STATUS_COMMON_ACCESS_BOXED)) | ||
300 | zfcp_erp_lun_reopen(sdev, | ||
301 | ZFCP_STATUS_COMMON_ERP_FAILED, | ||
302 | "cfaac_2", NULL); | ||
303 | } | ||
304 | } | ||
305 | |||
306 | static void zfcp_act_eval_err(struct zfcp_adapter *adapter, u32 table) | ||
307 | { | ||
308 | u16 subtable = table >> 16; | ||
309 | u16 rule = table & 0xffff; | ||
310 | const char *act_type[] = { "unknown", "OS", "WWPN", "DID", "LUN" }; | ||
311 | |||
312 | if (subtable && subtable < ARRAY_SIZE(act_type)) | ||
313 | dev_warn(&adapter->ccw_device->dev, | ||
314 | "Access denied according to ACT rule type %s, " | ||
315 | "rule %d\n", act_type[subtable], rule); | ||
316 | } | ||
317 | |||
318 | /** | ||
319 | * zfcp_cfdc_port_denied - Process "access denied" for port | ||
320 | * @port: The port where the acces has been denied | ||
321 | * @qual: The FSF status qualifier for the access denied FSF status | ||
322 | */ | ||
323 | void zfcp_cfdc_port_denied(struct zfcp_port *port, | ||
324 | union fsf_status_qual *qual) | ||
325 | { | ||
326 | dev_warn(&port->adapter->ccw_device->dev, | ||
327 | "Access denied to port 0x%016Lx\n", | ||
328 | (unsigned long long)port->wwpn); | ||
329 | |||
330 | zfcp_act_eval_err(port->adapter, qual->halfword[0]); | ||
331 | zfcp_act_eval_err(port->adapter, qual->halfword[1]); | ||
332 | zfcp_erp_set_port_status(port, | ||
333 | ZFCP_STATUS_COMMON_ERP_FAILED | | ||
334 | ZFCP_STATUS_COMMON_ACCESS_DENIED); | ||
335 | } | ||
336 | |||
337 | /** | ||
338 | * zfcp_cfdc_lun_denied - Process "access denied" for LUN | ||
339 | * @sdev: The SCSI device / LUN where the access has been denied | ||
340 | * @qual: The FSF status qualifier for the access denied FSF status | ||
341 | */ | ||
342 | void zfcp_cfdc_lun_denied(struct scsi_device *sdev, | ||
343 | union fsf_status_qual *qual) | ||
344 | { | ||
345 | struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev); | ||
346 | |||
347 | dev_warn(&zfcp_sdev->port->adapter->ccw_device->dev, | ||
348 | "Access denied to LUN 0x%016Lx on port 0x%016Lx\n", | ||
349 | zfcp_scsi_dev_lun(sdev), | ||
350 | (unsigned long long)zfcp_sdev->port->wwpn); | ||
351 | zfcp_act_eval_err(zfcp_sdev->port->adapter, qual->halfword[0]); | ||
352 | zfcp_act_eval_err(zfcp_sdev->port->adapter, qual->halfword[1]); | ||
353 | zfcp_erp_set_lun_status(sdev, | ||
354 | ZFCP_STATUS_COMMON_ERP_FAILED | | ||
355 | ZFCP_STATUS_COMMON_ACCESS_DENIED); | ||
356 | |||
357 | atomic_clear_mask(ZFCP_STATUS_LUN_SHARED, &zfcp_sdev->status); | ||
358 | atomic_clear_mask(ZFCP_STATUS_LUN_READONLY, &zfcp_sdev->status); | ||
359 | } | ||
360 | |||
361 | /** | ||
362 | * zfcp_cfdc_lun_shrng_vltn - Evaluate LUN sharing violation status | ||
363 | * @sdev: The LUN / SCSI device where sharing violation occurred | ||
364 | * @qual: The FSF status qualifier from the LUN sharing violation | ||
365 | */ | ||
366 | void zfcp_cfdc_lun_shrng_vltn(struct scsi_device *sdev, | ||
367 | union fsf_status_qual *qual) | ||
368 | { | ||
369 | struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev); | ||
370 | |||
371 | if (qual->word[0]) | ||
372 | dev_warn(&zfcp_sdev->port->adapter->ccw_device->dev, | ||
373 | "LUN 0x%Lx on port 0x%Lx is already in " | ||
374 | "use by CSS%d, MIF Image ID %x\n", | ||
375 | zfcp_scsi_dev_lun(sdev), | ||
376 | (unsigned long long)zfcp_sdev->port->wwpn, | ||
377 | qual->fsf_queue_designator.cssid, | ||
378 | qual->fsf_queue_designator.hla); | ||
379 | else | ||
380 | zfcp_act_eval_err(zfcp_sdev->port->adapter, qual->word[2]); | ||
381 | |||
382 | zfcp_erp_set_lun_status(sdev, | ||
383 | ZFCP_STATUS_COMMON_ERP_FAILED | | ||
384 | ZFCP_STATUS_COMMON_ACCESS_DENIED); | ||
385 | atomic_clear_mask(ZFCP_STATUS_LUN_SHARED, &zfcp_sdev->status); | ||
386 | atomic_clear_mask(ZFCP_STATUS_LUN_READONLY, &zfcp_sdev->status); | ||
387 | } | ||
388 | |||
389 | /** | ||
390 | * zfcp_cfdc_open_lun_eval - Eval access ctrl. status for successful "open lun" | ||
391 | * @sdev: The SCSI device / LUN where to evaluate the status | ||
392 | * @bottom: The qtcb bottom with the status from the "open lun" | ||
393 | * | ||
394 | * Returns: 0 if LUN is usable, -EACCES if the access control table | ||
395 | * reports an unsupported configuration. | ||
396 | */ | ||
397 | int zfcp_cfdc_open_lun_eval(struct scsi_device *sdev, | ||
398 | struct fsf_qtcb_bottom_support *bottom) | ||
399 | { | ||
400 | int shared, rw; | ||
401 | struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev); | ||
402 | struct zfcp_adapter *adapter = zfcp_sdev->port->adapter; | ||
403 | |||
404 | if ((adapter->connection_features & FSF_FEATURE_NPIV_MODE) || | ||
405 | !(adapter->adapter_features & FSF_FEATURE_LUN_SHARING) || | ||
406 | zfcp_ccw_priv_sch(adapter)) | ||
407 | return 0; | ||
408 | |||
409 | shared = !(bottom->lun_access_info & FSF_UNIT_ACCESS_EXCLUSIVE); | ||
410 | rw = (bottom->lun_access_info & FSF_UNIT_ACCESS_OUTBOUND_TRANSFER); | ||
411 | |||
412 | if (shared) | ||
413 | atomic_set_mask(ZFCP_STATUS_LUN_SHARED, &zfcp_sdev->status); | ||
414 | |||
415 | if (!rw) { | ||
416 | atomic_set_mask(ZFCP_STATUS_LUN_READONLY, &zfcp_sdev->status); | ||
417 | dev_info(&adapter->ccw_device->dev, "SCSI device at LUN " | ||
418 | "0x%016Lx on port 0x%016Lx opened read-only\n", | ||
419 | zfcp_scsi_dev_lun(sdev), | ||
420 | (unsigned long long)zfcp_sdev->port->wwpn); | ||
421 | } | ||
422 | |||
423 | if (!shared && !rw) { | ||
424 | dev_err(&adapter->ccw_device->dev, "Exclusive read-only access " | ||
425 | "not supported (LUN 0x%016Lx, port 0x%016Lx)\n", | ||
426 | zfcp_scsi_dev_lun(sdev), | ||
427 | (unsigned long long)zfcp_sdev->port->wwpn); | ||
428 | zfcp_erp_set_lun_status(sdev, ZFCP_STATUS_COMMON_ERP_FAILED); | ||
429 | zfcp_erp_lun_shutdown(sdev, 0, "fsouh_6", NULL); | ||
430 | return -EACCES; | ||
431 | } | ||
432 | |||
433 | if (shared && rw) { | ||
434 | dev_err(&adapter->ccw_device->dev, | ||
435 | "Shared read-write access not supported " | ||
436 | "(LUN 0x%016Lx, port 0x%016Lx)\n", | ||
437 | zfcp_scsi_dev_lun(sdev), | ||
438 | (unsigned long long)zfcp_sdev->port->wwpn); | ||
439 | zfcp_erp_set_lun_status(sdev, ZFCP_STATUS_COMMON_ERP_FAILED); | ||
440 | zfcp_erp_lun_shutdown(sdev, 0, "fsosh_8", NULL); | ||
441 | return -EACCES; | ||
442 | } | ||
443 | |||
444 | return 0; | ||
445 | } | ||
diff --git a/drivers/s390/scsi/zfcp_dbf.c b/drivers/s390/scsi/zfcp_dbf.c index a86117b0d6e1..2cdd6b28ff7f 100644 --- a/drivers/s390/scsi/zfcp_dbf.c +++ b/drivers/s390/scsi/zfcp_dbf.c | |||
@@ -154,7 +154,6 @@ void _zfcp_dbf_hba_fsf_response(const char *tag2, int level, | |||
154 | scsi_cmnd = (struct scsi_cmnd *)fsf_req->data; | 154 | scsi_cmnd = (struct scsi_cmnd *)fsf_req->data; |
155 | if (scsi_cmnd) { | 155 | if (scsi_cmnd) { |
156 | response->u.fcp.cmnd = (unsigned long)scsi_cmnd; | 156 | response->u.fcp.cmnd = (unsigned long)scsi_cmnd; |
157 | response->u.fcp.serial = scsi_cmnd->serial_number; | ||
158 | response->u.fcp.data_dir = | 157 | response->u.fcp.data_dir = |
159 | qtcb->bottom.io.data_direction; | 158 | qtcb->bottom.io.data_direction; |
160 | } | 159 | } |
@@ -330,7 +329,6 @@ static void zfcp_dbf_hba_view_response(char **p, | |||
330 | break; | 329 | break; |
331 | zfcp_dbf_out(p, "data_direction", "0x%04x", r->u.fcp.data_dir); | 330 | zfcp_dbf_out(p, "data_direction", "0x%04x", r->u.fcp.data_dir); |
332 | zfcp_dbf_out(p, "scsi_cmnd", "0x%0Lx", r->u.fcp.cmnd); | 331 | zfcp_dbf_out(p, "scsi_cmnd", "0x%0Lx", r->u.fcp.cmnd); |
333 | zfcp_dbf_out(p, "scsi_serial", "0x%016Lx", r->u.fcp.serial); | ||
334 | *p += sprintf(*p, "\n"); | 332 | *p += sprintf(*p, "\n"); |
335 | break; | 333 | break; |
336 | 334 | ||
@@ -482,7 +480,7 @@ static int zfcp_dbf_rec_view_format(debug_info_t *id, struct debug_view *view, | |||
482 | zfcp_dbf_out(&p, "fcp_lun", "0x%016Lx", r->u.trigger.fcp_lun); | 480 | zfcp_dbf_out(&p, "fcp_lun", "0x%016Lx", r->u.trigger.fcp_lun); |
483 | zfcp_dbf_out(&p, "adapter_status", "0x%08x", r->u.trigger.as); | 481 | zfcp_dbf_out(&p, "adapter_status", "0x%08x", r->u.trigger.as); |
484 | zfcp_dbf_out(&p, "port_status", "0x%08x", r->u.trigger.ps); | 482 | zfcp_dbf_out(&p, "port_status", "0x%08x", r->u.trigger.ps); |
485 | zfcp_dbf_out(&p, "unit_status", "0x%08x", r->u.trigger.us); | 483 | zfcp_dbf_out(&p, "lun_status", "0x%08x", r->u.trigger.ls); |
486 | break; | 484 | break; |
487 | case ZFCP_REC_DBF_ID_ACTION: | 485 | case ZFCP_REC_DBF_ID_ACTION: |
488 | zfcp_dbf_out(&p, "erp_action", "0x%016Lx", r->u.action.action); | 486 | zfcp_dbf_out(&p, "erp_action", "0x%016Lx", r->u.action.action); |
@@ -600,19 +598,20 @@ void zfcp_dbf_rec_port(char *id, void *ref, struct zfcp_port *port) | |||
600 | } | 598 | } |
601 | 599 | ||
602 | /** | 600 | /** |
603 | * zfcp_dbf_rec_unit - trace event for unit state change | 601 | * zfcp_dbf_rec_lun - trace event for LUN state change |
604 | * @id: identifier for trigger of state change | 602 | * @id: identifier for trigger of state change |
605 | * @ref: additional reference (e.g. request) | 603 | * @ref: additional reference (e.g. request) |
606 | * @unit: unit | 604 | * @sdev: SCSI device |
607 | */ | 605 | */ |
608 | void zfcp_dbf_rec_unit(char *id, void *ref, struct zfcp_unit *unit) | 606 | void zfcp_dbf_rec_lun(char *id, void *ref, struct scsi_device *sdev) |
609 | { | 607 | { |
610 | struct zfcp_port *port = unit->port; | 608 | struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev); |
609 | struct zfcp_port *port = zfcp_sdev->port; | ||
611 | struct zfcp_dbf *dbf = port->adapter->dbf; | 610 | struct zfcp_dbf *dbf = port->adapter->dbf; |
612 | 611 | ||
613 | zfcp_dbf_rec_target(id, ref, dbf, &unit->status, | 612 | zfcp_dbf_rec_target(id, ref, dbf, &zfcp_sdev->status, |
614 | &unit->erp_counter, port->wwpn, port->d_id, | 613 | &zfcp_sdev->erp_counter, port->wwpn, port->d_id, |
615 | unit->fcp_lun); | 614 | zfcp_scsi_dev_lun(sdev)); |
616 | } | 615 | } |
617 | 616 | ||
618 | /** | 617 | /** |
@@ -624,11 +623,11 @@ void zfcp_dbf_rec_unit(char *id, void *ref, struct zfcp_unit *unit) | |||
624 | * @action: address of error recovery action struct | 623 | * @action: address of error recovery action struct |
625 | * @adapter: adapter | 624 | * @adapter: adapter |
626 | * @port: port | 625 | * @port: port |
627 | * @unit: unit | 626 | * @sdev: SCSI device |
628 | */ | 627 | */ |
629 | void zfcp_dbf_rec_trigger(char *id2, void *ref, u8 want, u8 need, void *action, | 628 | void zfcp_dbf_rec_trigger(char *id2, void *ref, u8 want, u8 need, void *action, |
630 | struct zfcp_adapter *adapter, struct zfcp_port *port, | 629 | struct zfcp_adapter *adapter, struct zfcp_port *port, |
631 | struct zfcp_unit *unit) | 630 | struct scsi_device *sdev) |
632 | { | 631 | { |
633 | struct zfcp_dbf *dbf = adapter->dbf; | 632 | struct zfcp_dbf *dbf = adapter->dbf; |
634 | struct zfcp_dbf_rec_record *r = &dbf->rec_buf; | 633 | struct zfcp_dbf_rec_record *r = &dbf->rec_buf; |
@@ -647,9 +646,10 @@ void zfcp_dbf_rec_trigger(char *id2, void *ref, u8 want, u8 need, void *action, | |||
647 | r->u.trigger.ps = atomic_read(&port->status); | 646 | r->u.trigger.ps = atomic_read(&port->status); |
648 | r->u.trigger.wwpn = port->wwpn; | 647 | r->u.trigger.wwpn = port->wwpn; |
649 | } | 648 | } |
650 | if (unit) | 649 | if (sdev) |
651 | r->u.trigger.us = atomic_read(&unit->status); | 650 | r->u.trigger.ls = atomic_read(&sdev_to_zfcp(sdev)->status); |
652 | r->u.trigger.fcp_lun = unit ? unit->fcp_lun : ZFCP_DBF_INVALID_LUN; | 651 | r->u.trigger.fcp_lun = sdev ? zfcp_scsi_dev_lun(sdev) : |
652 | ZFCP_DBF_INVALID_LUN; | ||
653 | debug_event(dbf->rec, action ? 1 : 4, r, sizeof(*r)); | 653 | debug_event(dbf->rec, action ? 1 : 4, r, sizeof(*r)); |
654 | spin_unlock_irqrestore(&dbf->rec_lock, flags); | 654 | spin_unlock_irqrestore(&dbf->rec_lock, flags); |
655 | } | 655 | } |
@@ -879,7 +879,6 @@ void _zfcp_dbf_scsi(const char *tag, const char *tag2, int level, | |||
879 | } | 879 | } |
880 | rec->scsi_result = scsi_cmnd->result; | 880 | rec->scsi_result = scsi_cmnd->result; |
881 | rec->scsi_cmnd = (unsigned long)scsi_cmnd; | 881 | rec->scsi_cmnd = (unsigned long)scsi_cmnd; |
882 | rec->scsi_serial = scsi_cmnd->serial_number; | ||
883 | memcpy(rec->scsi_opcode, scsi_cmnd->cmnd, | 882 | memcpy(rec->scsi_opcode, scsi_cmnd->cmnd, |
884 | min((int)scsi_cmnd->cmd_len, | 883 | min((int)scsi_cmnd->cmd_len, |
885 | ZFCP_DBF_SCSI_OPCODE)); | 884 | ZFCP_DBF_SCSI_OPCODE)); |
@@ -948,7 +947,6 @@ static int zfcp_dbf_scsi_view_format(debug_info_t *id, struct debug_view *view, | |||
948 | zfcp_dbf_out(&p, "scsi_lun", "0x%08x", r->scsi_lun); | 947 | zfcp_dbf_out(&p, "scsi_lun", "0x%08x", r->scsi_lun); |
949 | zfcp_dbf_out(&p, "scsi_result", "0x%08x", r->scsi_result); | 948 | zfcp_dbf_out(&p, "scsi_result", "0x%08x", r->scsi_result); |
950 | zfcp_dbf_out(&p, "scsi_cmnd", "0x%0Lx", r->scsi_cmnd); | 949 | zfcp_dbf_out(&p, "scsi_cmnd", "0x%0Lx", r->scsi_cmnd); |
951 | zfcp_dbf_out(&p, "scsi_serial", "0x%016Lx", r->scsi_serial); | ||
952 | zfcp_dbf_outd(&p, "scsi_opcode", r->scsi_opcode, ZFCP_DBF_SCSI_OPCODE, | 950 | zfcp_dbf_outd(&p, "scsi_opcode", r->scsi_opcode, ZFCP_DBF_SCSI_OPCODE, |
953 | 0, ZFCP_DBF_SCSI_OPCODE); | 951 | 0, ZFCP_DBF_SCSI_OPCODE); |
954 | zfcp_dbf_out(&p, "scsi_retries", "0x%02x", r->scsi_retries); | 952 | zfcp_dbf_out(&p, "scsi_retries", "0x%02x", r->scsi_retries); |
diff --git a/drivers/s390/scsi/zfcp_dbf.h b/drivers/s390/scsi/zfcp_dbf.h index 2bcc3403126a..04081b1b62b4 100644 --- a/drivers/s390/scsi/zfcp_dbf.h +++ b/drivers/s390/scsi/zfcp_dbf.h | |||
@@ -60,7 +60,7 @@ struct zfcp_dbf_rec_record_trigger { | |||
60 | u8 need; | 60 | u8 need; |
61 | u32 as; | 61 | u32 as; |
62 | u32 ps; | 62 | u32 ps; |
63 | u32 us; | 63 | u32 ls; |
64 | u64 ref; | 64 | u64 ref; |
65 | u64 action; | 65 | u64 action; |
66 | u64 wwpn; | 66 | u64 wwpn; |
@@ -110,7 +110,6 @@ struct zfcp_dbf_hba_record_response { | |||
110 | union { | 110 | union { |
111 | struct { | 111 | struct { |
112 | u64 cmnd; | 112 | u64 cmnd; |
113 | u64 serial; | ||
114 | u32 data_dir; | 113 | u32 data_dir; |
115 | } fcp; | 114 | } fcp; |
116 | struct { | 115 | struct { |
@@ -206,7 +205,6 @@ struct zfcp_dbf_scsi_record { | |||
206 | u32 scsi_lun; | 205 | u32 scsi_lun; |
207 | u32 scsi_result; | 206 | u32 scsi_result; |
208 | u64 scsi_cmnd; | 207 | u64 scsi_cmnd; |
209 | u64 scsi_serial; | ||
210 | #define ZFCP_DBF_SCSI_OPCODE 16 | 208 | #define ZFCP_DBF_SCSI_OPCODE 16 |
211 | u8 scsi_opcode[ZFCP_DBF_SCSI_OPCODE]; | 209 | u8 scsi_opcode[ZFCP_DBF_SCSI_OPCODE]; |
212 | u8 scsi_retries; | 210 | u8 scsi_retries; |
@@ -350,16 +348,16 @@ void zfcp_dbf_scsi_abort(const char *tag, struct zfcp_dbf *dbf, | |||
350 | /** | 348 | /** |
351 | * zfcp_dbf_scsi_devreset - trace event for Logical Unit or Target Reset | 349 | * zfcp_dbf_scsi_devreset - trace event for Logical Unit or Target Reset |
352 | * @tag: tag indicating success or failure of reset operation | 350 | * @tag: tag indicating success or failure of reset operation |
351 | * @scmnd: SCSI command which caused this error recovery | ||
353 | * @flag: indicates type of reset (Target Reset, Logical Unit Reset) | 352 | * @flag: indicates type of reset (Target Reset, Logical Unit Reset) |
354 | * @unit: unit that needs reset | ||
355 | * @scsi_cmnd: SCSI command which caused this error recovery | ||
356 | */ | 353 | */ |
357 | static inline | 354 | static inline |
358 | void zfcp_dbf_scsi_devreset(const char *tag, u8 flag, struct zfcp_unit *unit, | 355 | void zfcp_dbf_scsi_devreset(const char *tag, struct scsi_cmnd *scmnd, u8 flag) |
359 | struct scsi_cmnd *scsi_cmnd) | ||
360 | { | 356 | { |
357 | struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(scmnd->device); | ||
358 | |||
361 | zfcp_dbf_scsi(flag == FCP_TMF_TGT_RESET ? "trst" : "lrst", tag, 1, | 359 | zfcp_dbf_scsi(flag == FCP_TMF_TGT_RESET ? "trst" : "lrst", tag, 1, |
362 | unit->port->adapter->dbf, scsi_cmnd, NULL, 0); | 360 | zfcp_sdev->port->adapter->dbf, scmnd, NULL, 0); |
363 | } | 361 | } |
364 | 362 | ||
365 | #endif /* ZFCP_DBF_H */ | 363 | #endif /* ZFCP_DBF_H */ |
diff --git a/drivers/s390/scsi/zfcp_def.h b/drivers/s390/scsi/zfcp_def.h index e1c6b6e05a75..9ae1d0a6f627 100644 --- a/drivers/s390/scsi/zfcp_def.h +++ b/drivers/s390/scsi/zfcp_def.h | |||
@@ -85,8 +85,8 @@ struct zfcp_reqlist; | |||
85 | #define ZFCP_STATUS_PORT_LINK_TEST 0x00000002 | 85 | #define ZFCP_STATUS_PORT_LINK_TEST 0x00000002 |
86 | 86 | ||
87 | /* logical unit status */ | 87 | /* logical unit status */ |
88 | #define ZFCP_STATUS_UNIT_SHARED 0x00000004 | 88 | #define ZFCP_STATUS_LUN_SHARED 0x00000004 |
89 | #define ZFCP_STATUS_UNIT_READONLY 0x00000008 | 89 | #define ZFCP_STATUS_LUN_READONLY 0x00000008 |
90 | 90 | ||
91 | /* FSF request status (this does not have a common part) */ | 91 | /* FSF request status (this does not have a common part) */ |
92 | #define ZFCP_STATUS_FSFREQ_TASK_MANAGEMENT 0x00000002 | 92 | #define ZFCP_STATUS_FSFREQ_TASK_MANAGEMENT 0x00000002 |
@@ -118,7 +118,7 @@ struct zfcp_erp_action { | |||
118 | int action; /* requested action code */ | 118 | int action; /* requested action code */ |
119 | struct zfcp_adapter *adapter; /* device which should be recovered */ | 119 | struct zfcp_adapter *adapter; /* device which should be recovered */ |
120 | struct zfcp_port *port; | 120 | struct zfcp_port *port; |
121 | struct zfcp_unit *unit; | 121 | struct scsi_device *sdev; |
122 | u32 status; /* recovery status */ | 122 | u32 status; /* recovery status */ |
123 | u32 step; /* active step of this erp action */ | 123 | u32 step; /* active step of this erp action */ |
124 | unsigned long fsf_req_id; | 124 | unsigned long fsf_req_id; |
@@ -219,21 +219,66 @@ struct zfcp_port { | |||
219 | unsigned int starget_id; | 219 | unsigned int starget_id; |
220 | }; | 220 | }; |
221 | 221 | ||
222 | /** | ||
223 | * struct zfcp_unit - LUN configured via zfcp sysfs | ||
224 | * @dev: struct device for sysfs representation and reference counting | ||
225 | * @list: entry in LUN/unit list per zfcp_port | ||
226 | * @port: reference to zfcp_port where this LUN is configured | ||
227 | * @fcp_lun: 64 bit LUN value | ||
228 | * @scsi_work: for running scsi_scan_target | ||
229 | * | ||
230 | * This is the representation of a LUN that has been configured for | ||
231 | * usage. The main data here is the 64 bit LUN value, data for | ||
232 | * running I/O and recovery is in struct zfcp_scsi_dev. | ||
233 | */ | ||
222 | struct zfcp_unit { | 234 | struct zfcp_unit { |
223 | struct device dev; | 235 | struct device dev; |
224 | struct list_head list; /* list of logical units */ | 236 | struct list_head list; |
225 | struct zfcp_port *port; /* remote port of unit */ | 237 | struct zfcp_port *port; |
226 | atomic_t status; /* status of this logical unit */ | 238 | u64 fcp_lun; |
227 | u64 fcp_lun; /* own FCP_LUN */ | ||
228 | u32 handle; /* handle assigned by FSF */ | ||
229 | struct scsi_device *device; /* scsi device struct pointer */ | ||
230 | struct zfcp_erp_action erp_action; /* pending error recovery */ | ||
231 | atomic_t erp_counter; | ||
232 | struct zfcp_latencies latencies; | ||
233 | struct work_struct scsi_work; | 239 | struct work_struct scsi_work; |
234 | }; | 240 | }; |
235 | 241 | ||
236 | /** | 242 | /** |
243 | * struct zfcp_scsi_dev - zfcp data per SCSI device | ||
244 | * @status: zfcp internal status flags | ||
245 | * @lun_handle: handle from "open lun" for issuing FSF requests | ||
246 | * @erp_action: zfcp erp data for opening and recovering this LUN | ||
247 | * @erp_counter: zfcp erp counter for this LUN | ||
248 | * @latencies: FSF channel and fabric latencies | ||
249 | * @port: zfcp_port where this LUN belongs to | ||
250 | */ | ||
251 | struct zfcp_scsi_dev { | ||
252 | atomic_t status; | ||
253 | u32 lun_handle; | ||
254 | struct zfcp_erp_action erp_action; | ||
255 | atomic_t erp_counter; | ||
256 | struct zfcp_latencies latencies; | ||
257 | struct zfcp_port *port; | ||
258 | }; | ||
259 | |||
260 | /** | ||
261 | * sdev_to_zfcp - Access zfcp LUN data for SCSI device | ||
262 | * @sdev: scsi_device where to get the zfcp_scsi_dev pointer | ||
263 | */ | ||
264 | static inline struct zfcp_scsi_dev *sdev_to_zfcp(struct scsi_device *sdev) | ||
265 | { | ||
266 | return scsi_transport_device_data(sdev); | ||
267 | } | ||
268 | |||
269 | /** | ||
270 | * zfcp_scsi_dev_lun - Return SCSI device LUN as 64 bit FCP LUN | ||
271 | * @sdev: SCSI device where to get the LUN from | ||
272 | */ | ||
273 | static inline u64 zfcp_scsi_dev_lun(struct scsi_device *sdev) | ||
274 | { | ||
275 | u64 fcp_lun; | ||
276 | |||
277 | int_to_scsilun(sdev->lun, (struct scsi_lun *)&fcp_lun); | ||
278 | return fcp_lun; | ||
279 | } | ||
280 | |||
281 | /** | ||
237 | * struct zfcp_fsf_req - basic FSF request structure | 282 | * struct zfcp_fsf_req - basic FSF request structure |
238 | * @list: list of FSF requests | 283 | * @list: list of FSF requests |
239 | * @req_id: unique request ID | 284 | * @req_id: unique request ID |
@@ -249,7 +294,6 @@ struct zfcp_unit { | |||
249 | * @erp_action: reference to erp action if request issued on behalf of ERP | 294 | * @erp_action: reference to erp action if request issued on behalf of ERP |
250 | * @pool: reference to memory pool if used for this request | 295 | * @pool: reference to memory pool if used for this request |
251 | * @issued: time when request was send (STCK) | 296 | * @issued: time when request was send (STCK) |
252 | * @unit: reference to unit if this request is a SCSI request | ||
253 | * @handler: handler which should be called to process response | 297 | * @handler: handler which should be called to process response |
254 | */ | 298 | */ |
255 | struct zfcp_fsf_req { | 299 | struct zfcp_fsf_req { |
@@ -267,7 +311,6 @@ struct zfcp_fsf_req { | |||
267 | struct zfcp_erp_action *erp_action; | 311 | struct zfcp_erp_action *erp_action; |
268 | mempool_t *pool; | 312 | mempool_t *pool; |
269 | unsigned long long issued; | 313 | unsigned long long issued; |
270 | struct zfcp_unit *unit; | ||
271 | void (*handler)(struct zfcp_fsf_req *); | 314 | void (*handler)(struct zfcp_fsf_req *); |
272 | }; | 315 | }; |
273 | 316 | ||
@@ -282,9 +325,4 @@ struct zfcp_data { | |||
282 | struct kmem_cache *adisc_cache; | 325 | struct kmem_cache *adisc_cache; |
283 | }; | 326 | }; |
284 | 327 | ||
285 | /********************** ZFCP SPECIFIC DEFINES ********************************/ | ||
286 | |||
287 | #define ZFCP_SET 0x00000100 | ||
288 | #define ZFCP_CLEAR 0x00000200 | ||
289 | |||
290 | #endif /* ZFCP_DEF_H */ | 328 | #endif /* ZFCP_DEF_H */ |
diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c index 160b432c907f..d37c7331f244 100644 --- a/drivers/s390/scsi/zfcp_erp.c +++ b/drivers/s390/scsi/zfcp_erp.c | |||
@@ -21,6 +21,7 @@ enum zfcp_erp_act_flags { | |||
21 | ZFCP_STATUS_ERP_DISMISSING = 0x00100000, | 21 | ZFCP_STATUS_ERP_DISMISSING = 0x00100000, |
22 | ZFCP_STATUS_ERP_DISMISSED = 0x00200000, | 22 | ZFCP_STATUS_ERP_DISMISSED = 0x00200000, |
23 | ZFCP_STATUS_ERP_LOWMEM = 0x00400000, | 23 | ZFCP_STATUS_ERP_LOWMEM = 0x00400000, |
24 | ZFCP_STATUS_ERP_NO_REF = 0x00800000, | ||
24 | }; | 25 | }; |
25 | 26 | ||
26 | enum zfcp_erp_steps { | 27 | enum zfcp_erp_steps { |
@@ -29,12 +30,12 @@ enum zfcp_erp_steps { | |||
29 | ZFCP_ERP_STEP_PHYS_PORT_CLOSING = 0x0010, | 30 | ZFCP_ERP_STEP_PHYS_PORT_CLOSING = 0x0010, |
30 | ZFCP_ERP_STEP_PORT_CLOSING = 0x0100, | 31 | ZFCP_ERP_STEP_PORT_CLOSING = 0x0100, |
31 | ZFCP_ERP_STEP_PORT_OPENING = 0x0800, | 32 | ZFCP_ERP_STEP_PORT_OPENING = 0x0800, |
32 | ZFCP_ERP_STEP_UNIT_CLOSING = 0x1000, | 33 | ZFCP_ERP_STEP_LUN_CLOSING = 0x1000, |
33 | ZFCP_ERP_STEP_UNIT_OPENING = 0x2000, | 34 | ZFCP_ERP_STEP_LUN_OPENING = 0x2000, |
34 | }; | 35 | }; |
35 | 36 | ||
36 | enum zfcp_erp_act_type { | 37 | enum zfcp_erp_act_type { |
37 | ZFCP_ERP_ACTION_REOPEN_UNIT = 1, | 38 | ZFCP_ERP_ACTION_REOPEN_LUN = 1, |
38 | ZFCP_ERP_ACTION_REOPEN_PORT = 2, | 39 | ZFCP_ERP_ACTION_REOPEN_PORT = 2, |
39 | ZFCP_ERP_ACTION_REOPEN_PORT_FORCED = 3, | 40 | ZFCP_ERP_ACTION_REOPEN_PORT_FORCED = 3, |
40 | ZFCP_ERP_ACTION_REOPEN_ADAPTER = 4, | 41 | ZFCP_ERP_ACTION_REOPEN_ADAPTER = 4, |
@@ -56,9 +57,8 @@ enum zfcp_erp_act_result { | |||
56 | 57 | ||
57 | static void zfcp_erp_adapter_block(struct zfcp_adapter *adapter, int mask) | 58 | static void zfcp_erp_adapter_block(struct zfcp_adapter *adapter, int mask) |
58 | { | 59 | { |
59 | zfcp_erp_modify_adapter_status(adapter, "erablk1", NULL, | 60 | zfcp_erp_clear_adapter_status(adapter, |
60 | ZFCP_STATUS_COMMON_UNBLOCKED | mask, | 61 | ZFCP_STATUS_COMMON_UNBLOCKED | mask); |
61 | ZFCP_CLEAR); | ||
62 | } | 62 | } |
63 | 63 | ||
64 | static int zfcp_erp_action_exists(struct zfcp_erp_action *act) | 64 | static int zfcp_erp_action_exists(struct zfcp_erp_action *act) |
@@ -88,24 +88,24 @@ static void zfcp_erp_action_dismiss(struct zfcp_erp_action *act) | |||
88 | zfcp_erp_action_ready(act); | 88 | zfcp_erp_action_ready(act); |
89 | } | 89 | } |
90 | 90 | ||
91 | static void zfcp_erp_action_dismiss_unit(struct zfcp_unit *unit) | 91 | static void zfcp_erp_action_dismiss_lun(struct scsi_device *sdev) |
92 | { | 92 | { |
93 | if (atomic_read(&unit->status) & ZFCP_STATUS_COMMON_ERP_INUSE) | 93 | struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev); |
94 | zfcp_erp_action_dismiss(&unit->erp_action); | 94 | |
95 | if (atomic_read(&zfcp_sdev->status) & ZFCP_STATUS_COMMON_ERP_INUSE) | ||
96 | zfcp_erp_action_dismiss(&zfcp_sdev->erp_action); | ||
95 | } | 97 | } |
96 | 98 | ||
97 | static void zfcp_erp_action_dismiss_port(struct zfcp_port *port) | 99 | static void zfcp_erp_action_dismiss_port(struct zfcp_port *port) |
98 | { | 100 | { |
99 | struct zfcp_unit *unit; | 101 | struct scsi_device *sdev; |
100 | 102 | ||
101 | if (atomic_read(&port->status) & ZFCP_STATUS_COMMON_ERP_INUSE) | 103 | if (atomic_read(&port->status) & ZFCP_STATUS_COMMON_ERP_INUSE) |
102 | zfcp_erp_action_dismiss(&port->erp_action); | 104 | zfcp_erp_action_dismiss(&port->erp_action); |
103 | else { | 105 | else |
104 | read_lock(&port->unit_list_lock); | 106 | shost_for_each_device(sdev, port->adapter->scsi_host) |
105 | list_for_each_entry(unit, &port->unit_list, list) | 107 | if (sdev_to_zfcp(sdev)->port == port) |
106 | zfcp_erp_action_dismiss_unit(unit); | 108 | zfcp_erp_action_dismiss_lun(sdev); |
107 | read_unlock(&port->unit_list_lock); | ||
108 | } | ||
109 | } | 109 | } |
110 | 110 | ||
111 | static void zfcp_erp_action_dismiss_adapter(struct zfcp_adapter *adapter) | 111 | static void zfcp_erp_action_dismiss_adapter(struct zfcp_adapter *adapter) |
@@ -124,15 +124,17 @@ static void zfcp_erp_action_dismiss_adapter(struct zfcp_adapter *adapter) | |||
124 | 124 | ||
125 | static int zfcp_erp_required_act(int want, struct zfcp_adapter *adapter, | 125 | static int zfcp_erp_required_act(int want, struct zfcp_adapter *adapter, |
126 | struct zfcp_port *port, | 126 | struct zfcp_port *port, |
127 | struct zfcp_unit *unit) | 127 | struct scsi_device *sdev) |
128 | { | 128 | { |
129 | int need = want; | 129 | int need = want; |
130 | int u_status, p_status, a_status; | 130 | int l_status, p_status, a_status; |
131 | struct zfcp_scsi_dev *zfcp_sdev; | ||
131 | 132 | ||
132 | switch (want) { | 133 | switch (want) { |
133 | case ZFCP_ERP_ACTION_REOPEN_UNIT: | 134 | case ZFCP_ERP_ACTION_REOPEN_LUN: |
134 | u_status = atomic_read(&unit->status); | 135 | zfcp_sdev = sdev_to_zfcp(sdev); |
135 | if (u_status & ZFCP_STATUS_COMMON_ERP_INUSE) | 136 | l_status = atomic_read(&zfcp_sdev->status); |
137 | if (l_status & ZFCP_STATUS_COMMON_ERP_INUSE) | ||
136 | return 0; | 138 | return 0; |
137 | p_status = atomic_read(&port->status); | 139 | p_status = atomic_read(&port->status); |
138 | if (!(p_status & ZFCP_STATUS_COMMON_RUNNING) || | 140 | if (!(p_status & ZFCP_STATUS_COMMON_RUNNING) || |
@@ -169,22 +171,26 @@ static int zfcp_erp_required_act(int want, struct zfcp_adapter *adapter, | |||
169 | return need; | 171 | return need; |
170 | } | 172 | } |
171 | 173 | ||
172 | static struct zfcp_erp_action *zfcp_erp_setup_act(int need, | 174 | static struct zfcp_erp_action *zfcp_erp_setup_act(int need, u32 act_status, |
173 | struct zfcp_adapter *adapter, | 175 | struct zfcp_adapter *adapter, |
174 | struct zfcp_port *port, | 176 | struct zfcp_port *port, |
175 | struct zfcp_unit *unit) | 177 | struct scsi_device *sdev) |
176 | { | 178 | { |
177 | struct zfcp_erp_action *erp_action; | 179 | struct zfcp_erp_action *erp_action; |
178 | u32 status = 0; | 180 | struct zfcp_scsi_dev *zfcp_sdev; |
179 | 181 | ||
180 | switch (need) { | 182 | switch (need) { |
181 | case ZFCP_ERP_ACTION_REOPEN_UNIT: | 183 | case ZFCP_ERP_ACTION_REOPEN_LUN: |
182 | if (!get_device(&unit->dev)) | 184 | zfcp_sdev = sdev_to_zfcp(sdev); |
183 | return NULL; | 185 | if (!(act_status & ZFCP_STATUS_ERP_NO_REF)) |
184 | atomic_set_mask(ZFCP_STATUS_COMMON_ERP_INUSE, &unit->status); | 186 | if (scsi_device_get(sdev)) |
185 | erp_action = &unit->erp_action; | 187 | return NULL; |
186 | if (!(atomic_read(&unit->status) & ZFCP_STATUS_COMMON_RUNNING)) | 188 | atomic_set_mask(ZFCP_STATUS_COMMON_ERP_INUSE, |
187 | status = ZFCP_STATUS_ERP_CLOSE_ONLY; | 189 | &zfcp_sdev->status); |
190 | erp_action = &zfcp_sdev->erp_action; | ||
191 | if (!(atomic_read(&zfcp_sdev->status) & | ||
192 | ZFCP_STATUS_COMMON_RUNNING)) | ||
193 | act_status |= ZFCP_STATUS_ERP_CLOSE_ONLY; | ||
188 | break; | 194 | break; |
189 | 195 | ||
190 | case ZFCP_ERP_ACTION_REOPEN_PORT: | 196 | case ZFCP_ERP_ACTION_REOPEN_PORT: |
@@ -195,7 +201,7 @@ static struct zfcp_erp_action *zfcp_erp_setup_act(int need, | |||
195 | atomic_set_mask(ZFCP_STATUS_COMMON_ERP_INUSE, &port->status); | 201 | atomic_set_mask(ZFCP_STATUS_COMMON_ERP_INUSE, &port->status); |
196 | erp_action = &port->erp_action; | 202 | erp_action = &port->erp_action; |
197 | if (!(atomic_read(&port->status) & ZFCP_STATUS_COMMON_RUNNING)) | 203 | if (!(atomic_read(&port->status) & ZFCP_STATUS_COMMON_RUNNING)) |
198 | status = ZFCP_STATUS_ERP_CLOSE_ONLY; | 204 | act_status |= ZFCP_STATUS_ERP_CLOSE_ONLY; |
199 | break; | 205 | break; |
200 | 206 | ||
201 | case ZFCP_ERP_ACTION_REOPEN_ADAPTER: | 207 | case ZFCP_ERP_ACTION_REOPEN_ADAPTER: |
@@ -205,7 +211,7 @@ static struct zfcp_erp_action *zfcp_erp_setup_act(int need, | |||
205 | erp_action = &adapter->erp_action; | 211 | erp_action = &adapter->erp_action; |
206 | if (!(atomic_read(&adapter->status) & | 212 | if (!(atomic_read(&adapter->status) & |
207 | ZFCP_STATUS_COMMON_RUNNING)) | 213 | ZFCP_STATUS_COMMON_RUNNING)) |
208 | status = ZFCP_STATUS_ERP_CLOSE_ONLY; | 214 | act_status |= ZFCP_STATUS_ERP_CLOSE_ONLY; |
209 | break; | 215 | break; |
210 | 216 | ||
211 | default: | 217 | default: |
@@ -215,16 +221,17 @@ static struct zfcp_erp_action *zfcp_erp_setup_act(int need, | |||
215 | memset(erp_action, 0, sizeof(struct zfcp_erp_action)); | 221 | memset(erp_action, 0, sizeof(struct zfcp_erp_action)); |
216 | erp_action->adapter = adapter; | 222 | erp_action->adapter = adapter; |
217 | erp_action->port = port; | 223 | erp_action->port = port; |
218 | erp_action->unit = unit; | 224 | erp_action->sdev = sdev; |
219 | erp_action->action = need; | 225 | erp_action->action = need; |
220 | erp_action->status = status; | 226 | erp_action->status = act_status; |
221 | 227 | ||
222 | return erp_action; | 228 | return erp_action; |
223 | } | 229 | } |
224 | 230 | ||
225 | static int zfcp_erp_action_enqueue(int want, struct zfcp_adapter *adapter, | 231 | static int zfcp_erp_action_enqueue(int want, struct zfcp_adapter *adapter, |
226 | struct zfcp_port *port, | 232 | struct zfcp_port *port, |
227 | struct zfcp_unit *unit, char *id, void *ref) | 233 | struct scsi_device *sdev, |
234 | char *id, void *ref, u32 act_status) | ||
228 | { | 235 | { |
229 | int retval = 1, need; | 236 | int retval = 1, need; |
230 | struct zfcp_erp_action *act = NULL; | 237 | struct zfcp_erp_action *act = NULL; |
@@ -232,21 +239,21 @@ static int zfcp_erp_action_enqueue(int want, struct zfcp_adapter *adapter, | |||
232 | if (!adapter->erp_thread) | 239 | if (!adapter->erp_thread) |
233 | return -EIO; | 240 | return -EIO; |
234 | 241 | ||
235 | need = zfcp_erp_required_act(want, adapter, port, unit); | 242 | need = zfcp_erp_required_act(want, adapter, port, sdev); |
236 | if (!need) | 243 | if (!need) |
237 | goto out; | 244 | goto out; |
238 | 245 | ||
239 | atomic_set_mask(ZFCP_STATUS_ADAPTER_ERP_PENDING, &adapter->status); | 246 | act = zfcp_erp_setup_act(need, act_status, adapter, port, sdev); |
240 | act = zfcp_erp_setup_act(need, adapter, port, unit); | ||
241 | if (!act) | 247 | if (!act) |
242 | goto out; | 248 | goto out; |
249 | atomic_set_mask(ZFCP_STATUS_ADAPTER_ERP_PENDING, &adapter->status); | ||
243 | ++adapter->erp_total_count; | 250 | ++adapter->erp_total_count; |
244 | list_add_tail(&act->list, &adapter->erp_ready_head); | 251 | list_add_tail(&act->list, &adapter->erp_ready_head); |
245 | wake_up(&adapter->erp_ready_wq); | 252 | wake_up(&adapter->erp_ready_wq); |
246 | zfcp_dbf_rec_thread("eracte1", adapter->dbf); | 253 | zfcp_dbf_rec_thread("eracte1", adapter->dbf); |
247 | retval = 0; | 254 | retval = 0; |
248 | out: | 255 | out: |
249 | zfcp_dbf_rec_trigger(id, ref, want, need, act, adapter, port, unit); | 256 | zfcp_dbf_rec_trigger(id, ref, want, need, act, adapter, port, sdev); |
250 | return retval; | 257 | return retval; |
251 | } | 258 | } |
252 | 259 | ||
@@ -258,11 +265,12 @@ static int _zfcp_erp_adapter_reopen(struct zfcp_adapter *adapter, | |||
258 | 265 | ||
259 | /* ensure propagation of failed status to new devices */ | 266 | /* ensure propagation of failed status to new devices */ |
260 | if (atomic_read(&adapter->status) & ZFCP_STATUS_COMMON_ERP_FAILED) { | 267 | if (atomic_read(&adapter->status) & ZFCP_STATUS_COMMON_ERP_FAILED) { |
261 | zfcp_erp_adapter_failed(adapter, "erareo1", NULL); | 268 | zfcp_erp_set_adapter_status(adapter, |
269 | ZFCP_STATUS_COMMON_ERP_FAILED); | ||
262 | return -EIO; | 270 | return -EIO; |
263 | } | 271 | } |
264 | return zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_ADAPTER, | 272 | return zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_ADAPTER, |
265 | adapter, NULL, NULL, id, ref); | 273 | adapter, NULL, NULL, id, ref, 0); |
266 | } | 274 | } |
267 | 275 | ||
268 | /** | 276 | /** |
@@ -282,10 +290,11 @@ void zfcp_erp_adapter_reopen(struct zfcp_adapter *adapter, int clear, | |||
282 | 290 | ||
283 | write_lock_irqsave(&adapter->erp_lock, flags); | 291 | write_lock_irqsave(&adapter->erp_lock, flags); |
284 | if (atomic_read(&adapter->status) & ZFCP_STATUS_COMMON_ERP_FAILED) | 292 | if (atomic_read(&adapter->status) & ZFCP_STATUS_COMMON_ERP_FAILED) |
285 | zfcp_erp_adapter_failed(adapter, "erareo1", NULL); | 293 | zfcp_erp_set_adapter_status(adapter, |
294 | ZFCP_STATUS_COMMON_ERP_FAILED); | ||
286 | else | 295 | else |
287 | zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_ADAPTER, adapter, | 296 | zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_ADAPTER, adapter, |
288 | NULL, NULL, id, ref); | 297 | NULL, NULL, id, ref, 0); |
289 | write_unlock_irqrestore(&adapter->erp_lock, flags); | 298 | write_unlock_irqrestore(&adapter->erp_lock, flags); |
290 | } | 299 | } |
291 | 300 | ||
@@ -317,25 +326,10 @@ void zfcp_erp_port_shutdown(struct zfcp_port *port, int clear, char *id, | |||
317 | zfcp_erp_port_reopen(port, clear | flags, id, ref); | 326 | zfcp_erp_port_reopen(port, clear | flags, id, ref); |
318 | } | 327 | } |
319 | 328 | ||
320 | /** | ||
321 | * zfcp_erp_unit_shutdown - Shutdown unit | ||
322 | * @unit: Unit to shut down. | ||
323 | * @clear: Status flags to clear. | ||
324 | * @id: Id for debug trace event. | ||
325 | * @ref: Reference for debug trace event. | ||
326 | */ | ||
327 | void zfcp_erp_unit_shutdown(struct zfcp_unit *unit, int clear, char *id, | ||
328 | void *ref) | ||
329 | { | ||
330 | int flags = ZFCP_STATUS_COMMON_RUNNING | ZFCP_STATUS_COMMON_ERP_FAILED; | ||
331 | zfcp_erp_unit_reopen(unit, clear | flags, id, ref); | ||
332 | } | ||
333 | |||
334 | static void zfcp_erp_port_block(struct zfcp_port *port, int clear) | 329 | static void zfcp_erp_port_block(struct zfcp_port *port, int clear) |
335 | { | 330 | { |
336 | zfcp_erp_modify_port_status(port, "erpblk1", NULL, | 331 | zfcp_erp_clear_port_status(port, |
337 | ZFCP_STATUS_COMMON_UNBLOCKED | clear, | 332 | ZFCP_STATUS_COMMON_UNBLOCKED | clear); |
338 | ZFCP_CLEAR); | ||
339 | } | 333 | } |
340 | 334 | ||
341 | static void _zfcp_erp_port_forced_reopen(struct zfcp_port *port, | 335 | static void _zfcp_erp_port_forced_reopen(struct zfcp_port *port, |
@@ -348,7 +342,7 @@ static void _zfcp_erp_port_forced_reopen(struct zfcp_port *port, | |||
348 | return; | 342 | return; |
349 | 343 | ||
350 | zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_PORT_FORCED, | 344 | zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_PORT_FORCED, |
351 | port->adapter, port, NULL, id, ref); | 345 | port->adapter, port, NULL, id, ref, 0); |
352 | } | 346 | } |
353 | 347 | ||
354 | /** | 348 | /** |
@@ -376,12 +370,12 @@ static int _zfcp_erp_port_reopen(struct zfcp_port *port, int clear, char *id, | |||
376 | 370 | ||
377 | if (atomic_read(&port->status) & ZFCP_STATUS_COMMON_ERP_FAILED) { | 371 | if (atomic_read(&port->status) & ZFCP_STATUS_COMMON_ERP_FAILED) { |
378 | /* ensure propagation of failed status to new devices */ | 372 | /* ensure propagation of failed status to new devices */ |
379 | zfcp_erp_port_failed(port, "erpreo1", NULL); | 373 | zfcp_erp_set_port_status(port, ZFCP_STATUS_COMMON_ERP_FAILED); |
380 | return -EIO; | 374 | return -EIO; |
381 | } | 375 | } |
382 | 376 | ||
383 | return zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_PORT, | 377 | return zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_PORT, |
384 | port->adapter, port, NULL, id, ref); | 378 | port->adapter, port, NULL, id, ref, 0); |
385 | } | 379 | } |
386 | 380 | ||
387 | /** | 381 | /** |
@@ -404,53 +398,88 @@ int zfcp_erp_port_reopen(struct zfcp_port *port, int clear, char *id, void *ref) | |||
404 | return retval; | 398 | return retval; |
405 | } | 399 | } |
406 | 400 | ||
407 | static void zfcp_erp_unit_block(struct zfcp_unit *unit, int clear_mask) | 401 | static void zfcp_erp_lun_block(struct scsi_device *sdev, int clear_mask) |
408 | { | 402 | { |
409 | zfcp_erp_modify_unit_status(unit, "erublk1", NULL, | 403 | zfcp_erp_clear_lun_status(sdev, |
410 | ZFCP_STATUS_COMMON_UNBLOCKED | clear_mask, | 404 | ZFCP_STATUS_COMMON_UNBLOCKED | clear_mask); |
411 | ZFCP_CLEAR); | ||
412 | } | 405 | } |
413 | 406 | ||
414 | static void _zfcp_erp_unit_reopen(struct zfcp_unit *unit, int clear, char *id, | 407 | static void _zfcp_erp_lun_reopen(struct scsi_device *sdev, int clear, char *id, |
415 | void *ref) | 408 | void *ref, u32 act_status) |
416 | { | 409 | { |
417 | struct zfcp_adapter *adapter = unit->port->adapter; | 410 | struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev); |
411 | struct zfcp_adapter *adapter = zfcp_sdev->port->adapter; | ||
418 | 412 | ||
419 | zfcp_erp_unit_block(unit, clear); | 413 | zfcp_erp_lun_block(sdev, clear); |
420 | 414 | ||
421 | if (atomic_read(&unit->status) & ZFCP_STATUS_COMMON_ERP_FAILED) | 415 | if (atomic_read(&zfcp_sdev->status) & ZFCP_STATUS_COMMON_ERP_FAILED) |
422 | return; | 416 | return; |
423 | 417 | ||
424 | zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_UNIT, | 418 | zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_LUN, adapter, |
425 | adapter, unit->port, unit, id, ref); | 419 | zfcp_sdev->port, sdev, id, ref, act_status); |
426 | } | 420 | } |
427 | 421 | ||
428 | /** | 422 | /** |
429 | * zfcp_erp_unit_reopen - initiate reopen of a unit | 423 | * zfcp_erp_lun_reopen - initiate reopen of a LUN |
430 | * @unit: unit to be reopened | 424 | * @sdev: SCSI device / LUN to be reopened |
431 | * @clear_mask: specifies flags in unit status to be cleared | 425 | * @clear_mask: specifies flags in LUN status to be cleared |
432 | * Return: 0 on success, < 0 on error | 426 | * Return: 0 on success, < 0 on error |
433 | */ | 427 | */ |
434 | void zfcp_erp_unit_reopen(struct zfcp_unit *unit, int clear, char *id, | 428 | void zfcp_erp_lun_reopen(struct scsi_device *sdev, int clear, char *id, |
435 | void *ref) | 429 | void *ref) |
436 | { | 430 | { |
437 | unsigned long flags; | 431 | unsigned long flags; |
438 | struct zfcp_port *port = unit->port; | 432 | struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev); |
433 | struct zfcp_port *port = zfcp_sdev->port; | ||
439 | struct zfcp_adapter *adapter = port->adapter; | 434 | struct zfcp_adapter *adapter = port->adapter; |
440 | 435 | ||
441 | write_lock_irqsave(&adapter->erp_lock, flags); | 436 | write_lock_irqsave(&adapter->erp_lock, flags); |
442 | _zfcp_erp_unit_reopen(unit, clear, id, ref); | 437 | _zfcp_erp_lun_reopen(sdev, clear, id, ref, 0); |
443 | write_unlock_irqrestore(&adapter->erp_lock, flags); | 438 | write_unlock_irqrestore(&adapter->erp_lock, flags); |
444 | } | 439 | } |
445 | 440 | ||
446 | static int status_change_set(unsigned long mask, atomic_t *status) | 441 | /** |
442 | * zfcp_erp_lun_shutdown - Shutdown LUN | ||
443 | * @sdev: SCSI device / LUN to shut down. | ||
444 | * @clear: Status flags to clear. | ||
445 | * @id: Id for debug trace event. | ||
446 | * @ref: Reference for debug trace event. | ||
447 | */ | ||
448 | void zfcp_erp_lun_shutdown(struct scsi_device *sdev, int clear, char *id, | ||
449 | void *ref) | ||
447 | { | 450 | { |
448 | return (atomic_read(status) ^ mask) & mask; | 451 | int flags = ZFCP_STATUS_COMMON_RUNNING | ZFCP_STATUS_COMMON_ERP_FAILED; |
452 | zfcp_erp_lun_reopen(sdev, clear | flags, id, ref); | ||
449 | } | 453 | } |
450 | 454 | ||
451 | static int status_change_clear(unsigned long mask, atomic_t *status) | 455 | /** |
456 | * zfcp_erp_lun_shutdown_wait - Shutdown LUN and wait for erp completion | ||
457 | * @sdev: SCSI device / LUN to shut down. | ||
458 | * @id: Id for debug trace event. | ||
459 | * | ||
460 | * Do not acquire a reference for the LUN when creating the ERP | ||
461 | * action. It is safe, because this function waits for the ERP to | ||
462 | * complete first. This allows to shutdown the LUN, even when the SCSI | ||
463 | * device is in the state SDEV_DEL when scsi_device_get will fail. | ||
464 | */ | ||
465 | void zfcp_erp_lun_shutdown_wait(struct scsi_device *sdev, char *id) | ||
452 | { | 466 | { |
453 | return atomic_read(status) & mask; | 467 | unsigned long flags; |
468 | struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev); | ||
469 | struct zfcp_port *port = zfcp_sdev->port; | ||
470 | struct zfcp_adapter *adapter = port->adapter; | ||
471 | int clear = ZFCP_STATUS_COMMON_RUNNING | ZFCP_STATUS_COMMON_ERP_FAILED; | ||
472 | |||
473 | write_lock_irqsave(&adapter->erp_lock, flags); | ||
474 | _zfcp_erp_lun_reopen(sdev, clear, id, NULL, ZFCP_STATUS_ERP_NO_REF); | ||
475 | write_unlock_irqrestore(&adapter->erp_lock, flags); | ||
476 | |||
477 | zfcp_erp_wait(adapter); | ||
478 | } | ||
479 | |||
480 | static int status_change_set(unsigned long mask, atomic_t *status) | ||
481 | { | ||
482 | return (atomic_read(status) ^ mask) & mask; | ||
454 | } | 483 | } |
455 | 484 | ||
456 | static void zfcp_erp_adapter_unblock(struct zfcp_adapter *adapter) | 485 | static void zfcp_erp_adapter_unblock(struct zfcp_adapter *adapter) |
@@ -467,11 +496,13 @@ static void zfcp_erp_port_unblock(struct zfcp_port *port) | |||
467 | atomic_set_mask(ZFCP_STATUS_COMMON_UNBLOCKED, &port->status); | 496 | atomic_set_mask(ZFCP_STATUS_COMMON_UNBLOCKED, &port->status); |
468 | } | 497 | } |
469 | 498 | ||
470 | static void zfcp_erp_unit_unblock(struct zfcp_unit *unit) | 499 | static void zfcp_erp_lun_unblock(struct scsi_device *sdev) |
471 | { | 500 | { |
472 | if (status_change_set(ZFCP_STATUS_COMMON_UNBLOCKED, &unit->status)) | 501 | struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev); |
473 | zfcp_dbf_rec_unit("eruubl1", NULL, unit); | 502 | |
474 | atomic_set_mask(ZFCP_STATUS_COMMON_UNBLOCKED, &unit->status); | 503 | if (status_change_set(ZFCP_STATUS_COMMON_UNBLOCKED, &zfcp_sdev->status)) |
504 | zfcp_dbf_rec_lun("erlubl1", NULL, sdev); | ||
505 | atomic_set_mask(ZFCP_STATUS_COMMON_UNBLOCKED, &zfcp_sdev->status); | ||
475 | } | 506 | } |
476 | 507 | ||
477 | static void zfcp_erp_action_to_running(struct zfcp_erp_action *erp_action) | 508 | static void zfcp_erp_action_to_running(struct zfcp_erp_action *erp_action) |
@@ -559,15 +590,14 @@ static void _zfcp_erp_port_reopen_all(struct zfcp_adapter *adapter, | |||
559 | read_unlock(&adapter->port_list_lock); | 590 | read_unlock(&adapter->port_list_lock); |
560 | } | 591 | } |
561 | 592 | ||
562 | static void _zfcp_erp_unit_reopen_all(struct zfcp_port *port, int clear, | 593 | static void _zfcp_erp_lun_reopen_all(struct zfcp_port *port, int clear, |
563 | char *id, void *ref) | 594 | char *id, void *ref) |
564 | { | 595 | { |
565 | struct zfcp_unit *unit; | 596 | struct scsi_device *sdev; |
566 | 597 | ||
567 | read_lock(&port->unit_list_lock); | 598 | shost_for_each_device(sdev, port->adapter->scsi_host) |
568 | list_for_each_entry(unit, &port->unit_list, list) | 599 | if (sdev_to_zfcp(sdev)->port == port) |
569 | _zfcp_erp_unit_reopen(unit, clear, id, ref); | 600 | _zfcp_erp_lun_reopen(sdev, clear, id, ref, 0); |
570 | read_unlock(&port->unit_list_lock); | ||
571 | } | 601 | } |
572 | 602 | ||
573 | static void zfcp_erp_strategy_followup_failed(struct zfcp_erp_action *act) | 603 | static void zfcp_erp_strategy_followup_failed(struct zfcp_erp_action *act) |
@@ -582,8 +612,8 @@ static void zfcp_erp_strategy_followup_failed(struct zfcp_erp_action *act) | |||
582 | case ZFCP_ERP_ACTION_REOPEN_PORT: | 612 | case ZFCP_ERP_ACTION_REOPEN_PORT: |
583 | _zfcp_erp_port_reopen(act->port, 0, "ersff_3", NULL); | 613 | _zfcp_erp_port_reopen(act->port, 0, "ersff_3", NULL); |
584 | break; | 614 | break; |
585 | case ZFCP_ERP_ACTION_REOPEN_UNIT: | 615 | case ZFCP_ERP_ACTION_REOPEN_LUN: |
586 | _zfcp_erp_unit_reopen(act->unit, 0, "ersff_4", NULL); | 616 | _zfcp_erp_lun_reopen(act->sdev, 0, "ersff_4", NULL, 0); |
587 | break; | 617 | break; |
588 | } | 618 | } |
589 | } | 619 | } |
@@ -598,7 +628,7 @@ static void zfcp_erp_strategy_followup_success(struct zfcp_erp_action *act) | |||
598 | _zfcp_erp_port_reopen(act->port, 0, "ersfs_2", NULL); | 628 | _zfcp_erp_port_reopen(act->port, 0, "ersfs_2", NULL); |
599 | break; | 629 | break; |
600 | case ZFCP_ERP_ACTION_REOPEN_PORT: | 630 | case ZFCP_ERP_ACTION_REOPEN_PORT: |
601 | _zfcp_erp_unit_reopen_all(act->port, 0, "ersfs_3", NULL); | 631 | _zfcp_erp_lun_reopen_all(act->port, 0, "ersfs_3", NULL); |
602 | break; | 632 | break; |
603 | } | 633 | } |
604 | } | 634 | } |
@@ -742,9 +772,8 @@ static void zfcp_erp_adapter_strategy_close(struct zfcp_erp_action *act) | |||
742 | zfcp_fsf_req_dismiss_all(adapter); | 772 | zfcp_fsf_req_dismiss_all(adapter); |
743 | adapter->fsf_req_seq_no = 0; | 773 | adapter->fsf_req_seq_no = 0; |
744 | zfcp_fc_wka_ports_force_offline(adapter->gs); | 774 | zfcp_fc_wka_ports_force_offline(adapter->gs); |
745 | /* all ports and units are closed */ | 775 | /* all ports and LUNs are closed */ |
746 | zfcp_erp_modify_adapter_status(adapter, "erascl1", NULL, | 776 | zfcp_erp_clear_adapter_status(adapter, ZFCP_STATUS_COMMON_OPEN); |
747 | ZFCP_STATUS_COMMON_OPEN, ZFCP_CLEAR); | ||
748 | 777 | ||
749 | atomic_clear_mask(ZFCP_STATUS_ADAPTER_XCONFIG_OK | | 778 | atomic_clear_mask(ZFCP_STATUS_ADAPTER_XCONFIG_OK | |
750 | ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED, &adapter->status); | 779 | ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED, &adapter->status); |
@@ -861,7 +890,7 @@ static int zfcp_erp_open_ptp_port(struct zfcp_erp_action *act) | |||
861 | struct zfcp_port *port = act->port; | 890 | struct zfcp_port *port = act->port; |
862 | 891 | ||
863 | if (port->wwpn != adapter->peer_wwpn) { | 892 | if (port->wwpn != adapter->peer_wwpn) { |
864 | zfcp_erp_port_failed(port, "eroptp1", NULL); | 893 | zfcp_erp_set_port_status(port, ZFCP_STATUS_COMMON_ERP_FAILED); |
865 | return ZFCP_ERP_FAILED; | 894 | return ZFCP_ERP_FAILED; |
866 | } | 895 | } |
867 | port->d_id = adapter->peer_d_id; | 896 | port->d_id = adapter->peer_d_id; |
@@ -933,82 +962,87 @@ close_init_done: | |||
933 | return zfcp_erp_port_strategy_open_common(erp_action); | 962 | return zfcp_erp_port_strategy_open_common(erp_action); |
934 | } | 963 | } |
935 | 964 | ||
936 | static void zfcp_erp_unit_strategy_clearstati(struct zfcp_unit *unit) | 965 | static void zfcp_erp_lun_strategy_clearstati(struct scsi_device *sdev) |
937 | { | 966 | { |
967 | struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev); | ||
968 | |||
938 | atomic_clear_mask(ZFCP_STATUS_COMMON_ACCESS_DENIED | | 969 | atomic_clear_mask(ZFCP_STATUS_COMMON_ACCESS_DENIED | |
939 | ZFCP_STATUS_UNIT_SHARED | | 970 | ZFCP_STATUS_LUN_SHARED | ZFCP_STATUS_LUN_READONLY, |
940 | ZFCP_STATUS_UNIT_READONLY, | 971 | &zfcp_sdev->status); |
941 | &unit->status); | ||
942 | } | 972 | } |
943 | 973 | ||
944 | static int zfcp_erp_unit_strategy_close(struct zfcp_erp_action *erp_action) | 974 | static int zfcp_erp_lun_strategy_close(struct zfcp_erp_action *erp_action) |
945 | { | 975 | { |
946 | int retval = zfcp_fsf_close_unit(erp_action); | 976 | int retval = zfcp_fsf_close_lun(erp_action); |
947 | if (retval == -ENOMEM) | 977 | if (retval == -ENOMEM) |
948 | return ZFCP_ERP_NOMEM; | 978 | return ZFCP_ERP_NOMEM; |
949 | erp_action->step = ZFCP_ERP_STEP_UNIT_CLOSING; | 979 | erp_action->step = ZFCP_ERP_STEP_LUN_CLOSING; |
950 | if (retval) | 980 | if (retval) |
951 | return ZFCP_ERP_FAILED; | 981 | return ZFCP_ERP_FAILED; |
952 | return ZFCP_ERP_CONTINUES; | 982 | return ZFCP_ERP_CONTINUES; |
953 | } | 983 | } |
954 | 984 | ||
955 | static int zfcp_erp_unit_strategy_open(struct zfcp_erp_action *erp_action) | 985 | static int zfcp_erp_lun_strategy_open(struct zfcp_erp_action *erp_action) |
956 | { | 986 | { |
957 | int retval = zfcp_fsf_open_unit(erp_action); | 987 | int retval = zfcp_fsf_open_lun(erp_action); |
958 | if (retval == -ENOMEM) | 988 | if (retval == -ENOMEM) |
959 | return ZFCP_ERP_NOMEM; | 989 | return ZFCP_ERP_NOMEM; |
960 | erp_action->step = ZFCP_ERP_STEP_UNIT_OPENING; | 990 | erp_action->step = ZFCP_ERP_STEP_LUN_OPENING; |
961 | if (retval) | 991 | if (retval) |
962 | return ZFCP_ERP_FAILED; | 992 | return ZFCP_ERP_FAILED; |
963 | return ZFCP_ERP_CONTINUES; | 993 | return ZFCP_ERP_CONTINUES; |
964 | } | 994 | } |
965 | 995 | ||
966 | static int zfcp_erp_unit_strategy(struct zfcp_erp_action *erp_action) | 996 | static int zfcp_erp_lun_strategy(struct zfcp_erp_action *erp_action) |
967 | { | 997 | { |
968 | struct zfcp_unit *unit = erp_action->unit; | 998 | struct scsi_device *sdev = erp_action->sdev; |
999 | struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev); | ||
969 | 1000 | ||
970 | switch (erp_action->step) { | 1001 | switch (erp_action->step) { |
971 | case ZFCP_ERP_STEP_UNINITIALIZED: | 1002 | case ZFCP_ERP_STEP_UNINITIALIZED: |
972 | zfcp_erp_unit_strategy_clearstati(unit); | 1003 | zfcp_erp_lun_strategy_clearstati(sdev); |
973 | if (atomic_read(&unit->status) & ZFCP_STATUS_COMMON_OPEN) | 1004 | if (atomic_read(&zfcp_sdev->status) & ZFCP_STATUS_COMMON_OPEN) |
974 | return zfcp_erp_unit_strategy_close(erp_action); | 1005 | return zfcp_erp_lun_strategy_close(erp_action); |
975 | /* already closed, fall through */ | 1006 | /* already closed, fall through */ |
976 | case ZFCP_ERP_STEP_UNIT_CLOSING: | 1007 | case ZFCP_ERP_STEP_LUN_CLOSING: |
977 | if (atomic_read(&unit->status) & ZFCP_STATUS_COMMON_OPEN) | 1008 | if (atomic_read(&zfcp_sdev->status) & ZFCP_STATUS_COMMON_OPEN) |
978 | return ZFCP_ERP_FAILED; | 1009 | return ZFCP_ERP_FAILED; |
979 | if (erp_action->status & ZFCP_STATUS_ERP_CLOSE_ONLY) | 1010 | if (erp_action->status & ZFCP_STATUS_ERP_CLOSE_ONLY) |
980 | return ZFCP_ERP_EXIT; | 1011 | return ZFCP_ERP_EXIT; |
981 | return zfcp_erp_unit_strategy_open(erp_action); | 1012 | return zfcp_erp_lun_strategy_open(erp_action); |
982 | 1013 | ||
983 | case ZFCP_ERP_STEP_UNIT_OPENING: | 1014 | case ZFCP_ERP_STEP_LUN_OPENING: |
984 | if (atomic_read(&unit->status) & ZFCP_STATUS_COMMON_OPEN) | 1015 | if (atomic_read(&zfcp_sdev->status) & ZFCP_STATUS_COMMON_OPEN) |
985 | return ZFCP_ERP_SUCCEEDED; | 1016 | return ZFCP_ERP_SUCCEEDED; |
986 | } | 1017 | } |
987 | return ZFCP_ERP_FAILED; | 1018 | return ZFCP_ERP_FAILED; |
988 | } | 1019 | } |
989 | 1020 | ||
990 | static int zfcp_erp_strategy_check_unit(struct zfcp_unit *unit, int result) | 1021 | static int zfcp_erp_strategy_check_lun(struct scsi_device *sdev, int result) |
991 | { | 1022 | { |
1023 | struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev); | ||
1024 | |||
992 | switch (result) { | 1025 | switch (result) { |
993 | case ZFCP_ERP_SUCCEEDED : | 1026 | case ZFCP_ERP_SUCCEEDED : |
994 | atomic_set(&unit->erp_counter, 0); | 1027 | atomic_set(&zfcp_sdev->erp_counter, 0); |
995 | zfcp_erp_unit_unblock(unit); | 1028 | zfcp_erp_lun_unblock(sdev); |
996 | break; | 1029 | break; |
997 | case ZFCP_ERP_FAILED : | 1030 | case ZFCP_ERP_FAILED : |
998 | atomic_inc(&unit->erp_counter); | 1031 | atomic_inc(&zfcp_sdev->erp_counter); |
999 | if (atomic_read(&unit->erp_counter) > ZFCP_MAX_ERPS) { | 1032 | if (atomic_read(&zfcp_sdev->erp_counter) > ZFCP_MAX_ERPS) { |
1000 | dev_err(&unit->port->adapter->ccw_device->dev, | 1033 | dev_err(&zfcp_sdev->port->adapter->ccw_device->dev, |
1001 | "ERP failed for unit 0x%016Lx on " | 1034 | "ERP failed for LUN 0x%016Lx on " |
1002 | "port 0x%016Lx\n", | 1035 | "port 0x%016Lx\n", |
1003 | (unsigned long long)unit->fcp_lun, | 1036 | (unsigned long long)zfcp_scsi_dev_lun(sdev), |
1004 | (unsigned long long)unit->port->wwpn); | 1037 | (unsigned long long)zfcp_sdev->port->wwpn); |
1005 | zfcp_erp_unit_failed(unit, "erusck1", NULL); | 1038 | zfcp_erp_set_lun_status(sdev, |
1039 | ZFCP_STATUS_COMMON_ERP_FAILED); | ||
1006 | } | 1040 | } |
1007 | break; | 1041 | break; |
1008 | } | 1042 | } |
1009 | 1043 | ||
1010 | if (atomic_read(&unit->status) & ZFCP_STATUS_COMMON_ERP_FAILED) { | 1044 | if (atomic_read(&zfcp_sdev->status) & ZFCP_STATUS_COMMON_ERP_FAILED) { |
1011 | zfcp_erp_unit_block(unit, 0); | 1045 | zfcp_erp_lun_block(sdev, 0); |
1012 | result = ZFCP_ERP_EXIT; | 1046 | result = ZFCP_ERP_EXIT; |
1013 | } | 1047 | } |
1014 | return result; | 1048 | return result; |
@@ -1032,7 +1066,8 @@ static int zfcp_erp_strategy_check_port(struct zfcp_port *port, int result) | |||
1032 | dev_err(&port->adapter->ccw_device->dev, | 1066 | dev_err(&port->adapter->ccw_device->dev, |
1033 | "ERP failed for remote port 0x%016Lx\n", | 1067 | "ERP failed for remote port 0x%016Lx\n", |
1034 | (unsigned long long)port->wwpn); | 1068 | (unsigned long long)port->wwpn); |
1035 | zfcp_erp_port_failed(port, "erpsck1", NULL); | 1069 | zfcp_erp_set_port_status(port, |
1070 | ZFCP_STATUS_COMMON_ERP_FAILED); | ||
1036 | } | 1071 | } |
1037 | break; | 1072 | break; |
1038 | } | 1073 | } |
@@ -1059,7 +1094,8 @@ static int zfcp_erp_strategy_check_adapter(struct zfcp_adapter *adapter, | |||
1059 | dev_err(&adapter->ccw_device->dev, | 1094 | dev_err(&adapter->ccw_device->dev, |
1060 | "ERP cannot recover an error " | 1095 | "ERP cannot recover an error " |
1061 | "on the FCP device\n"); | 1096 | "on the FCP device\n"); |
1062 | zfcp_erp_adapter_failed(adapter, "erasck1", NULL); | 1097 | zfcp_erp_set_adapter_status(adapter, |
1098 | ZFCP_STATUS_COMMON_ERP_FAILED); | ||
1063 | } | 1099 | } |
1064 | break; | 1100 | break; |
1065 | } | 1101 | } |
@@ -1076,12 +1112,12 @@ static int zfcp_erp_strategy_check_target(struct zfcp_erp_action *erp_action, | |||
1076 | { | 1112 | { |
1077 | struct zfcp_adapter *adapter = erp_action->adapter; | 1113 | struct zfcp_adapter *adapter = erp_action->adapter; |
1078 | struct zfcp_port *port = erp_action->port; | 1114 | struct zfcp_port *port = erp_action->port; |
1079 | struct zfcp_unit *unit = erp_action->unit; | 1115 | struct scsi_device *sdev = erp_action->sdev; |
1080 | 1116 | ||
1081 | switch (erp_action->action) { | 1117 | switch (erp_action->action) { |
1082 | 1118 | ||
1083 | case ZFCP_ERP_ACTION_REOPEN_UNIT: | 1119 | case ZFCP_ERP_ACTION_REOPEN_LUN: |
1084 | result = zfcp_erp_strategy_check_unit(unit, result); | 1120 | result = zfcp_erp_strategy_check_lun(sdev, result); |
1085 | break; | 1121 | break; |
1086 | 1122 | ||
1087 | case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED: | 1123 | case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED: |
@@ -1116,7 +1152,8 @@ static int zfcp_erp_strategy_statechange(struct zfcp_erp_action *act, int ret) | |||
1116 | int action = act->action; | 1152 | int action = act->action; |
1117 | struct zfcp_adapter *adapter = act->adapter; | 1153 | struct zfcp_adapter *adapter = act->adapter; |
1118 | struct zfcp_port *port = act->port; | 1154 | struct zfcp_port *port = act->port; |
1119 | struct zfcp_unit *unit = act->unit; | 1155 | struct scsi_device *sdev = act->sdev; |
1156 | struct zfcp_scsi_dev *zfcp_sdev; | ||
1120 | u32 erp_status = act->status; | 1157 | u32 erp_status = act->status; |
1121 | 1158 | ||
1122 | switch (action) { | 1159 | switch (action) { |
@@ -1139,11 +1176,12 @@ static int zfcp_erp_strategy_statechange(struct zfcp_erp_action *act, int ret) | |||
1139 | } | 1176 | } |
1140 | break; | 1177 | break; |
1141 | 1178 | ||
1142 | case ZFCP_ERP_ACTION_REOPEN_UNIT: | 1179 | case ZFCP_ERP_ACTION_REOPEN_LUN: |
1143 | if (zfcp_erp_strat_change_det(&unit->status, erp_status)) { | 1180 | zfcp_sdev = sdev_to_zfcp(sdev); |
1144 | _zfcp_erp_unit_reopen(unit, | 1181 | if (zfcp_erp_strat_change_det(&zfcp_sdev->status, erp_status)) { |
1145 | ZFCP_STATUS_COMMON_ERP_FAILED, | 1182 | _zfcp_erp_lun_reopen(sdev, |
1146 | "ersscg3", NULL); | 1183 | ZFCP_STATUS_COMMON_ERP_FAILED, |
1184 | "ersscg3", NULL, 0); | ||
1147 | return ZFCP_ERP_EXIT; | 1185 | return ZFCP_ERP_EXIT; |
1148 | } | 1186 | } |
1149 | break; | 1187 | break; |
@@ -1154,6 +1192,7 @@ static int zfcp_erp_strategy_statechange(struct zfcp_erp_action *act, int ret) | |||
1154 | static void zfcp_erp_action_dequeue(struct zfcp_erp_action *erp_action) | 1192 | static void zfcp_erp_action_dequeue(struct zfcp_erp_action *erp_action) |
1155 | { | 1193 | { |
1156 | struct zfcp_adapter *adapter = erp_action->adapter; | 1194 | struct zfcp_adapter *adapter = erp_action->adapter; |
1195 | struct zfcp_scsi_dev *zfcp_sdev; | ||
1157 | 1196 | ||
1158 | adapter->erp_total_count--; | 1197 | adapter->erp_total_count--; |
1159 | if (erp_action->status & ZFCP_STATUS_ERP_LOWMEM) { | 1198 | if (erp_action->status & ZFCP_STATUS_ERP_LOWMEM) { |
@@ -1165,9 +1204,10 @@ static void zfcp_erp_action_dequeue(struct zfcp_erp_action *erp_action) | |||
1165 | zfcp_dbf_rec_action("eractd1", erp_action); | 1204 | zfcp_dbf_rec_action("eractd1", erp_action); |
1166 | 1205 | ||
1167 | switch (erp_action->action) { | 1206 | switch (erp_action->action) { |
1168 | case ZFCP_ERP_ACTION_REOPEN_UNIT: | 1207 | case ZFCP_ERP_ACTION_REOPEN_LUN: |
1208 | zfcp_sdev = sdev_to_zfcp(erp_action->sdev); | ||
1169 | atomic_clear_mask(ZFCP_STATUS_COMMON_ERP_INUSE, | 1209 | atomic_clear_mask(ZFCP_STATUS_COMMON_ERP_INUSE, |
1170 | &erp_action->unit->status); | 1210 | &zfcp_sdev->status); |
1171 | break; | 1211 | break; |
1172 | 1212 | ||
1173 | case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED: | 1213 | case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED: |
@@ -1187,11 +1227,12 @@ static void zfcp_erp_action_cleanup(struct zfcp_erp_action *act, int result) | |||
1187 | { | 1227 | { |
1188 | struct zfcp_adapter *adapter = act->adapter; | 1228 | struct zfcp_adapter *adapter = act->adapter; |
1189 | struct zfcp_port *port = act->port; | 1229 | struct zfcp_port *port = act->port; |
1190 | struct zfcp_unit *unit = act->unit; | 1230 | struct scsi_device *sdev = act->sdev; |
1191 | 1231 | ||
1192 | switch (act->action) { | 1232 | switch (act->action) { |
1193 | case ZFCP_ERP_ACTION_REOPEN_UNIT: | 1233 | case ZFCP_ERP_ACTION_REOPEN_LUN: |
1194 | put_device(&unit->dev); | 1234 | if (!(act->status & ZFCP_STATUS_ERP_NO_REF)) |
1235 | scsi_device_put(sdev); | ||
1195 | break; | 1236 | break; |
1196 | 1237 | ||
1197 | case ZFCP_ERP_ACTION_REOPEN_PORT: | 1238 | case ZFCP_ERP_ACTION_REOPEN_PORT: |
@@ -1222,8 +1263,8 @@ static int zfcp_erp_strategy_do_action(struct zfcp_erp_action *erp_action) | |||
1222 | return zfcp_erp_port_forced_strategy(erp_action); | 1263 | return zfcp_erp_port_forced_strategy(erp_action); |
1223 | case ZFCP_ERP_ACTION_REOPEN_PORT: | 1264 | case ZFCP_ERP_ACTION_REOPEN_PORT: |
1224 | return zfcp_erp_port_strategy(erp_action); | 1265 | return zfcp_erp_port_strategy(erp_action); |
1225 | case ZFCP_ERP_ACTION_REOPEN_UNIT: | 1266 | case ZFCP_ERP_ACTION_REOPEN_LUN: |
1226 | return zfcp_erp_unit_strategy(erp_action); | 1267 | return zfcp_erp_lun_strategy(erp_action); |
1227 | } | 1268 | } |
1228 | return ZFCP_ERP_FAILED; | 1269 | return ZFCP_ERP_FAILED; |
1229 | } | 1270 | } |
@@ -1376,42 +1417,6 @@ void zfcp_erp_thread_kill(struct zfcp_adapter *adapter) | |||
1376 | } | 1417 | } |
1377 | 1418 | ||
1378 | /** | 1419 | /** |
1379 | * zfcp_erp_adapter_failed - Set adapter status to failed. | ||
1380 | * @adapter: Failed adapter. | ||
1381 | * @id: Event id for debug trace. | ||
1382 | * @ref: Reference for debug trace. | ||
1383 | */ | ||
1384 | void zfcp_erp_adapter_failed(struct zfcp_adapter *adapter, char *id, void *ref) | ||
1385 | { | ||
1386 | zfcp_erp_modify_adapter_status(adapter, id, ref, | ||
1387 | ZFCP_STATUS_COMMON_ERP_FAILED, ZFCP_SET); | ||
1388 | } | ||
1389 | |||
1390 | /** | ||
1391 | * zfcp_erp_port_failed - Set port status to failed. | ||
1392 | * @port: Failed port. | ||
1393 | * @id: Event id for debug trace. | ||
1394 | * @ref: Reference for debug trace. | ||
1395 | */ | ||
1396 | void zfcp_erp_port_failed(struct zfcp_port *port, char *id, void *ref) | ||
1397 | { | ||
1398 | zfcp_erp_modify_port_status(port, id, ref, | ||
1399 | ZFCP_STATUS_COMMON_ERP_FAILED, ZFCP_SET); | ||
1400 | } | ||
1401 | |||
1402 | /** | ||
1403 | * zfcp_erp_unit_failed - Set unit status to failed. | ||
1404 | * @unit: Failed unit. | ||
1405 | * @id: Event id for debug trace. | ||
1406 | * @ref: Reference for debug trace. | ||
1407 | */ | ||
1408 | void zfcp_erp_unit_failed(struct zfcp_unit *unit, char *id, void *ref) | ||
1409 | { | ||
1410 | zfcp_erp_modify_unit_status(unit, id, ref, | ||
1411 | ZFCP_STATUS_COMMON_ERP_FAILED, ZFCP_SET); | ||
1412 | } | ||
1413 | |||
1414 | /** | ||
1415 | * zfcp_erp_wait - wait for completion of error recovery on an adapter | 1420 | * zfcp_erp_wait - wait for completion of error recovery on an adapter |
1416 | * @adapter: adapter for which to wait for completion of its error recovery | 1421 | * @adapter: adapter for which to wait for completion of its error recovery |
1417 | */ | 1422 | */ |
@@ -1423,210 +1428,148 @@ void zfcp_erp_wait(struct zfcp_adapter *adapter) | |||
1423 | } | 1428 | } |
1424 | 1429 | ||
1425 | /** | 1430 | /** |
1426 | * zfcp_erp_modify_adapter_status - change adapter status bits | 1431 | * zfcp_erp_set_adapter_status - set adapter status bits |
1427 | * @adapter: adapter to change the status | 1432 | * @adapter: adapter to change the status |
1428 | * @id: id for the debug trace | ||
1429 | * @ref: reference for the debug trace | ||
1430 | * @mask: status bits to change | 1433 | * @mask: status bits to change |
1431 | * @set_or_clear: ZFCP_SET or ZFCP_CLEAR | ||
1432 | * | 1434 | * |
1433 | * Changes in common status bits are propagated to attached ports and units. | 1435 | * Changes in common status bits are propagated to attached ports and LUNs. |
1434 | */ | 1436 | */ |
1435 | void zfcp_erp_modify_adapter_status(struct zfcp_adapter *adapter, char *id, | 1437 | void zfcp_erp_set_adapter_status(struct zfcp_adapter *adapter, u32 mask) |
1436 | void *ref, u32 mask, int set_or_clear) | ||
1437 | { | 1438 | { |
1438 | struct zfcp_port *port; | 1439 | struct zfcp_port *port; |
1440 | struct scsi_device *sdev; | ||
1439 | unsigned long flags; | 1441 | unsigned long flags; |
1440 | u32 common_mask = mask & ZFCP_COMMON_FLAGS; | 1442 | u32 common_mask = mask & ZFCP_COMMON_FLAGS; |
1441 | 1443 | ||
1442 | if (set_or_clear == ZFCP_SET) { | 1444 | atomic_set_mask(mask, &adapter->status); |
1443 | if (status_change_set(mask, &adapter->status)) | ||
1444 | zfcp_dbf_rec_adapter(id, ref, adapter->dbf); | ||
1445 | atomic_set_mask(mask, &adapter->status); | ||
1446 | } else { | ||
1447 | if (status_change_clear(mask, &adapter->status)) | ||
1448 | zfcp_dbf_rec_adapter(id, ref, adapter->dbf); | ||
1449 | atomic_clear_mask(mask, &adapter->status); | ||
1450 | if (mask & ZFCP_STATUS_COMMON_ERP_FAILED) | ||
1451 | atomic_set(&adapter->erp_counter, 0); | ||
1452 | } | ||
1453 | 1445 | ||
1454 | if (common_mask) { | 1446 | if (!common_mask) |
1455 | read_lock_irqsave(&adapter->port_list_lock, flags); | 1447 | return; |
1456 | list_for_each_entry(port, &adapter->port_list, list) | 1448 | |
1457 | zfcp_erp_modify_port_status(port, id, ref, common_mask, | 1449 | read_lock_irqsave(&adapter->port_list_lock, flags); |
1458 | set_or_clear); | 1450 | list_for_each_entry(port, &adapter->port_list, list) |
1459 | read_unlock_irqrestore(&adapter->port_list_lock, flags); | 1451 | atomic_set_mask(common_mask, &port->status); |
1460 | } | 1452 | read_unlock_irqrestore(&adapter->port_list_lock, flags); |
1453 | |||
1454 | shost_for_each_device(sdev, adapter->scsi_host) | ||
1455 | atomic_set_mask(common_mask, &sdev_to_zfcp(sdev)->status); | ||
1461 | } | 1456 | } |
1462 | 1457 | ||
1463 | /** | 1458 | /** |
1464 | * zfcp_erp_modify_port_status - change port status bits | 1459 | * zfcp_erp_clear_adapter_status - clear adapter status bits |
1465 | * @port: port to change the status bits | 1460 | * @adapter: adapter to change the status |
1466 | * @id: id for the debug trace | ||
1467 | * @ref: reference for the debug trace | ||
1468 | * @mask: status bits to change | 1461 | * @mask: status bits to change |
1469 | * @set_or_clear: ZFCP_SET or ZFCP_CLEAR | ||
1470 | * | 1462 | * |
1471 | * Changes in common status bits are propagated to attached units. | 1463 | * Changes in common status bits are propagated to attached ports and LUNs. |
1472 | */ | 1464 | */ |
1473 | void zfcp_erp_modify_port_status(struct zfcp_port *port, char *id, void *ref, | 1465 | void zfcp_erp_clear_adapter_status(struct zfcp_adapter *adapter, u32 mask) |
1474 | u32 mask, int set_or_clear) | ||
1475 | { | 1466 | { |
1476 | struct zfcp_unit *unit; | 1467 | struct zfcp_port *port; |
1468 | struct scsi_device *sdev; | ||
1477 | unsigned long flags; | 1469 | unsigned long flags; |
1478 | u32 common_mask = mask & ZFCP_COMMON_FLAGS; | 1470 | u32 common_mask = mask & ZFCP_COMMON_FLAGS; |
1471 | u32 clear_counter = mask & ZFCP_STATUS_COMMON_ERP_FAILED; | ||
1472 | |||
1473 | atomic_clear_mask(mask, &adapter->status); | ||
1474 | |||
1475 | if (!common_mask) | ||
1476 | return; | ||
1477 | |||
1478 | if (clear_counter) | ||
1479 | atomic_set(&adapter->erp_counter, 0); | ||
1479 | 1480 | ||
1480 | if (set_or_clear == ZFCP_SET) { | 1481 | read_lock_irqsave(&adapter->port_list_lock, flags); |
1481 | if (status_change_set(mask, &port->status)) | 1482 | list_for_each_entry(port, &adapter->port_list, list) { |
1482 | zfcp_dbf_rec_port(id, ref, port); | 1483 | atomic_clear_mask(common_mask, &port->status); |
1483 | atomic_set_mask(mask, &port->status); | 1484 | if (clear_counter) |
1484 | } else { | ||
1485 | if (status_change_clear(mask, &port->status)) | ||
1486 | zfcp_dbf_rec_port(id, ref, port); | ||
1487 | atomic_clear_mask(mask, &port->status); | ||
1488 | if (mask & ZFCP_STATUS_COMMON_ERP_FAILED) | ||
1489 | atomic_set(&port->erp_counter, 0); | 1485 | atomic_set(&port->erp_counter, 0); |
1490 | } | 1486 | } |
1487 | read_unlock_irqrestore(&adapter->port_list_lock, flags); | ||
1491 | 1488 | ||
1492 | if (common_mask) { | 1489 | shost_for_each_device(sdev, adapter->scsi_host) { |
1493 | read_lock_irqsave(&port->unit_list_lock, flags); | 1490 | atomic_clear_mask(common_mask, &sdev_to_zfcp(sdev)->status); |
1494 | list_for_each_entry(unit, &port->unit_list, list) | 1491 | if (clear_counter) |
1495 | zfcp_erp_modify_unit_status(unit, id, ref, common_mask, | 1492 | atomic_set(&sdev_to_zfcp(sdev)->erp_counter, 0); |
1496 | set_or_clear); | ||
1497 | read_unlock_irqrestore(&port->unit_list_lock, flags); | ||
1498 | } | 1493 | } |
1499 | } | 1494 | } |
1500 | 1495 | ||
1501 | /** | 1496 | /** |
1502 | * zfcp_erp_modify_unit_status - change unit status bits | 1497 | * zfcp_erp_set_port_status - set port status bits |
1503 | * @unit: unit to change the status bits | 1498 | * @port: port to change the status |
1504 | * @id: id for the debug trace | ||
1505 | * @ref: reference for the debug trace | ||
1506 | * @mask: status bits to change | 1499 | * @mask: status bits to change |
1507 | * @set_or_clear: ZFCP_SET or ZFCP_CLEAR | 1500 | * |
1508 | */ | 1501 | * Changes in common status bits are propagated to attached LUNs. |
1509 | void zfcp_erp_modify_unit_status(struct zfcp_unit *unit, char *id, void *ref, | ||
1510 | u32 mask, int set_or_clear) | ||
1511 | { | ||
1512 | if (set_or_clear == ZFCP_SET) { | ||
1513 | if (status_change_set(mask, &unit->status)) | ||
1514 | zfcp_dbf_rec_unit(id, ref, unit); | ||
1515 | atomic_set_mask(mask, &unit->status); | ||
1516 | } else { | ||
1517 | if (status_change_clear(mask, &unit->status)) | ||
1518 | zfcp_dbf_rec_unit(id, ref, unit); | ||
1519 | atomic_clear_mask(mask, &unit->status); | ||
1520 | if (mask & ZFCP_STATUS_COMMON_ERP_FAILED) { | ||
1521 | atomic_set(&unit->erp_counter, 0); | ||
1522 | } | ||
1523 | } | ||
1524 | } | ||
1525 | |||
1526 | /** | ||
1527 | * zfcp_erp_port_boxed - Mark port as "boxed" and start ERP | ||
1528 | * @port: The "boxed" port. | ||
1529 | * @id: The debug trace id. | ||
1530 | * @id: Reference for the debug trace. | ||
1531 | */ | 1502 | */ |
1532 | void zfcp_erp_port_boxed(struct zfcp_port *port, char *id, void *ref) | 1503 | void zfcp_erp_set_port_status(struct zfcp_port *port, u32 mask) |
1533 | { | 1504 | { |
1534 | zfcp_erp_modify_port_status(port, id, ref, | 1505 | struct scsi_device *sdev; |
1535 | ZFCP_STATUS_COMMON_ACCESS_BOXED, ZFCP_SET); | 1506 | u32 common_mask = mask & ZFCP_COMMON_FLAGS; |
1536 | zfcp_erp_port_reopen(port, ZFCP_STATUS_COMMON_ERP_FAILED, id, ref); | ||
1537 | } | ||
1538 | 1507 | ||
1539 | /** | 1508 | atomic_set_mask(mask, &port->status); |
1540 | * zfcp_erp_unit_boxed - Mark unit as "boxed" and start ERP | ||
1541 | * @port: The "boxed" unit. | ||
1542 | * @id: The debug trace id. | ||
1543 | * @id: Reference for the debug trace. | ||
1544 | */ | ||
1545 | void zfcp_erp_unit_boxed(struct zfcp_unit *unit, char *id, void *ref) | ||
1546 | { | ||
1547 | zfcp_erp_modify_unit_status(unit, id, ref, | ||
1548 | ZFCP_STATUS_COMMON_ACCESS_BOXED, ZFCP_SET); | ||
1549 | zfcp_erp_unit_reopen(unit, ZFCP_STATUS_COMMON_ERP_FAILED, id, ref); | ||
1550 | } | ||
1551 | 1509 | ||
1552 | /** | 1510 | if (!common_mask) |
1553 | * zfcp_erp_port_access_denied - Adapter denied access to port. | 1511 | return; |
1554 | * @port: port where access has been denied | 1512 | |
1555 | * @id: id for debug trace | 1513 | shost_for_each_device(sdev, port->adapter->scsi_host) |
1556 | * @ref: reference for debug trace | 1514 | if (sdev_to_zfcp(sdev)->port == port) |
1557 | * | 1515 | atomic_set_mask(common_mask, |
1558 | * Since the adapter has denied access, stop using the port and the | 1516 | &sdev_to_zfcp(sdev)->status); |
1559 | * attached units. | ||
1560 | */ | ||
1561 | void zfcp_erp_port_access_denied(struct zfcp_port *port, char *id, void *ref) | ||
1562 | { | ||
1563 | zfcp_erp_modify_port_status(port, id, ref, | ||
1564 | ZFCP_STATUS_COMMON_ERP_FAILED | | ||
1565 | ZFCP_STATUS_COMMON_ACCESS_DENIED, ZFCP_SET); | ||
1566 | } | 1517 | } |
1567 | 1518 | ||
1568 | /** | 1519 | /** |
1569 | * zfcp_erp_unit_access_denied - Adapter denied access to unit. | 1520 | * zfcp_erp_clear_port_status - clear port status bits |
1570 | * @unit: unit where access has been denied | 1521 | * @port: adapter to change the status |
1571 | * @id: id for debug trace | 1522 | * @mask: status bits to change |
1572 | * @ref: reference for debug trace | ||
1573 | * | 1523 | * |
1574 | * Since the adapter has denied access, stop using the unit. | 1524 | * Changes in common status bits are propagated to attached LUNs. |
1575 | */ | 1525 | */ |
1576 | void zfcp_erp_unit_access_denied(struct zfcp_unit *unit, char *id, void *ref) | 1526 | void zfcp_erp_clear_port_status(struct zfcp_port *port, u32 mask) |
1577 | { | 1527 | { |
1578 | zfcp_erp_modify_unit_status(unit, id, ref, | 1528 | struct scsi_device *sdev; |
1579 | ZFCP_STATUS_COMMON_ERP_FAILED | | 1529 | u32 common_mask = mask & ZFCP_COMMON_FLAGS; |
1580 | ZFCP_STATUS_COMMON_ACCESS_DENIED, ZFCP_SET); | 1530 | u32 clear_counter = mask & ZFCP_STATUS_COMMON_ERP_FAILED; |
1581 | } | ||
1582 | 1531 | ||
1583 | static void zfcp_erp_unit_access_changed(struct zfcp_unit *unit, char *id, | 1532 | atomic_clear_mask(mask, &port->status); |
1584 | void *ref) | 1533 | |
1585 | { | 1534 | if (!common_mask) |
1586 | int status = atomic_read(&unit->status); | ||
1587 | if (!(status & (ZFCP_STATUS_COMMON_ACCESS_DENIED | | ||
1588 | ZFCP_STATUS_COMMON_ACCESS_BOXED))) | ||
1589 | return; | 1535 | return; |
1590 | 1536 | ||
1591 | zfcp_erp_unit_reopen(unit, ZFCP_STATUS_COMMON_ERP_FAILED, id, ref); | 1537 | if (clear_counter) |
1538 | atomic_set(&port->erp_counter, 0); | ||
1539 | |||
1540 | shost_for_each_device(sdev, port->adapter->scsi_host) | ||
1541 | if (sdev_to_zfcp(sdev)->port == port) { | ||
1542 | atomic_clear_mask(common_mask, | ||
1543 | &sdev_to_zfcp(sdev)->status); | ||
1544 | if (clear_counter) | ||
1545 | atomic_set(&sdev_to_zfcp(sdev)->erp_counter, 0); | ||
1546 | } | ||
1592 | } | 1547 | } |
1593 | 1548 | ||
1594 | static void zfcp_erp_port_access_changed(struct zfcp_port *port, char *id, | 1549 | /** |
1595 | void *ref) | 1550 | * zfcp_erp_set_lun_status - set lun status bits |
1551 | * @sdev: SCSI device / lun to set the status bits | ||
1552 | * @mask: status bits to change | ||
1553 | */ | ||
1554 | void zfcp_erp_set_lun_status(struct scsi_device *sdev, u32 mask) | ||
1596 | { | 1555 | { |
1597 | struct zfcp_unit *unit; | 1556 | struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev); |
1598 | unsigned long flags; | ||
1599 | int status = atomic_read(&port->status); | ||
1600 | 1557 | ||
1601 | if (!(status & (ZFCP_STATUS_COMMON_ACCESS_DENIED | | 1558 | atomic_set_mask(mask, &zfcp_sdev->status); |
1602 | ZFCP_STATUS_COMMON_ACCESS_BOXED))) { | ||
1603 | read_lock_irqsave(&port->unit_list_lock, flags); | ||
1604 | list_for_each_entry(unit, &port->unit_list, list) | ||
1605 | zfcp_erp_unit_access_changed(unit, id, ref); | ||
1606 | read_unlock_irqrestore(&port->unit_list_lock, flags); | ||
1607 | return; | ||
1608 | } | ||
1609 | |||
1610 | zfcp_erp_port_reopen(port, ZFCP_STATUS_COMMON_ERP_FAILED, id, ref); | ||
1611 | } | 1559 | } |
1612 | 1560 | ||
1613 | /** | 1561 | /** |
1614 | * zfcp_erp_adapter_access_changed - Process change in adapter ACT | 1562 | * zfcp_erp_clear_lun_status - clear lun status bits |
1615 | * @adapter: Adapter where the Access Control Table (ACT) changed | 1563 | * @sdev: SCSi device / lun to clear the status bits |
1616 | * @id: Id for debug trace | 1564 | * @mask: status bits to change |
1617 | * @ref: Reference for debug trace | ||
1618 | */ | 1565 | */ |
1619 | void zfcp_erp_adapter_access_changed(struct zfcp_adapter *adapter, char *id, | 1566 | void zfcp_erp_clear_lun_status(struct scsi_device *sdev, u32 mask) |
1620 | void *ref) | ||
1621 | { | 1567 | { |
1622 | unsigned long flags; | 1568 | struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev); |
1623 | struct zfcp_port *port; | ||
1624 | 1569 | ||
1625 | if (adapter->connection_features & FSF_FEATURE_NPIV_MODE) | 1570 | atomic_clear_mask(mask, &zfcp_sdev->status); |
1626 | return; | ||
1627 | 1571 | ||
1628 | read_lock_irqsave(&adapter->port_list_lock, flags); | 1572 | if (mask & ZFCP_STATUS_COMMON_ERP_FAILED) |
1629 | list_for_each_entry(port, &adapter->port_list, list) | 1573 | atomic_set(&zfcp_sdev->erp_counter, 0); |
1630 | zfcp_erp_port_access_changed(port, id, ref); | ||
1631 | read_unlock_irqrestore(&adapter->port_list_lock, flags); | ||
1632 | } | 1574 | } |
1575 | |||
diff --git a/drivers/s390/scsi/zfcp_ext.h b/drivers/s390/scsi/zfcp_ext.h index 3b93239c6f69..bf8f3e514839 100644 --- a/drivers/s390/scsi/zfcp_ext.h +++ b/drivers/s390/scsi/zfcp_ext.h | |||
@@ -15,12 +15,10 @@ | |||
15 | #include "zfcp_fc.h" | 15 | #include "zfcp_fc.h" |
16 | 16 | ||
17 | /* zfcp_aux.c */ | 17 | /* zfcp_aux.c */ |
18 | extern struct zfcp_unit *zfcp_get_unit_by_lun(struct zfcp_port *, u64); | ||
19 | extern struct zfcp_port *zfcp_get_port_by_wwpn(struct zfcp_adapter *, u64); | 18 | extern struct zfcp_port *zfcp_get_port_by_wwpn(struct zfcp_adapter *, u64); |
20 | extern struct zfcp_adapter *zfcp_adapter_enqueue(struct ccw_device *); | 19 | extern struct zfcp_adapter *zfcp_adapter_enqueue(struct ccw_device *); |
21 | extern struct zfcp_port *zfcp_port_enqueue(struct zfcp_adapter *, u64, u32, | 20 | extern struct zfcp_port *zfcp_port_enqueue(struct zfcp_adapter *, u64, u32, |
22 | u32); | 21 | u32); |
23 | extern struct zfcp_unit *zfcp_unit_enqueue(struct zfcp_port *, u64); | ||
24 | extern void zfcp_sg_free_table(struct scatterlist *, int); | 22 | extern void zfcp_sg_free_table(struct scatterlist *, int); |
25 | extern int zfcp_sg_setup_table(struct scatterlist *, int); | 23 | extern int zfcp_sg_setup_table(struct scatterlist *, int); |
26 | extern void zfcp_device_unregister(struct device *, | 24 | extern void zfcp_device_unregister(struct device *, |
@@ -36,6 +34,14 @@ extern void zfcp_ccw_adapter_put(struct zfcp_adapter *); | |||
36 | 34 | ||
37 | /* zfcp_cfdc.c */ | 35 | /* zfcp_cfdc.c */ |
38 | extern struct miscdevice zfcp_cfdc_misc; | 36 | extern struct miscdevice zfcp_cfdc_misc; |
37 | extern void zfcp_cfdc_port_denied(struct zfcp_port *, union fsf_status_qual *); | ||
38 | extern void zfcp_cfdc_lun_denied(struct scsi_device *, union fsf_status_qual *); | ||
39 | extern void zfcp_cfdc_lun_shrng_vltn(struct scsi_device *, | ||
40 | union fsf_status_qual *); | ||
41 | extern int zfcp_cfdc_open_lun_eval(struct scsi_device *, | ||
42 | struct fsf_qtcb_bottom_support *); | ||
43 | extern void zfcp_cfdc_adapter_access_changed(struct zfcp_adapter *); | ||
44 | |||
39 | 45 | ||
40 | /* zfcp_dbf.c */ | 46 | /* zfcp_dbf.c */ |
41 | extern int zfcp_dbf_adapter_register(struct zfcp_adapter *); | 47 | extern int zfcp_dbf_adapter_register(struct zfcp_adapter *); |
@@ -44,10 +50,10 @@ extern void zfcp_dbf_rec_thread(char *, struct zfcp_dbf *); | |||
44 | extern void zfcp_dbf_rec_thread_lock(char *, struct zfcp_dbf *); | 50 | extern void zfcp_dbf_rec_thread_lock(char *, struct zfcp_dbf *); |
45 | extern void zfcp_dbf_rec_adapter(char *, void *, struct zfcp_dbf *); | 51 | extern void zfcp_dbf_rec_adapter(char *, void *, struct zfcp_dbf *); |
46 | extern void zfcp_dbf_rec_port(char *, void *, struct zfcp_port *); | 52 | extern void zfcp_dbf_rec_port(char *, void *, struct zfcp_port *); |
47 | extern void zfcp_dbf_rec_unit(char *, void *, struct zfcp_unit *); | 53 | extern void zfcp_dbf_rec_lun(char *, void *, struct scsi_device *); |
48 | extern void zfcp_dbf_rec_trigger(char *, void *, u8, u8, void *, | 54 | extern void zfcp_dbf_rec_trigger(char *, void *, u8, u8, void *, |
49 | struct zfcp_adapter *, struct zfcp_port *, | 55 | struct zfcp_adapter *, struct zfcp_port *, |
50 | struct zfcp_unit *); | 56 | struct scsi_device *); |
51 | extern void zfcp_dbf_rec_action(char *, struct zfcp_erp_action *); | 57 | extern void zfcp_dbf_rec_action(char *, struct zfcp_erp_action *); |
52 | extern void _zfcp_dbf_hba_fsf_response(const char *, int, struct zfcp_fsf_req *, | 58 | extern void _zfcp_dbf_hba_fsf_response(const char *, int, struct zfcp_fsf_req *, |
53 | struct zfcp_dbf *); | 59 | struct zfcp_dbf *); |
@@ -65,34 +71,26 @@ extern void _zfcp_dbf_scsi(const char *, const char *, int, struct zfcp_dbf *, | |||
65 | unsigned long); | 71 | unsigned long); |
66 | 72 | ||
67 | /* zfcp_erp.c */ | 73 | /* zfcp_erp.c */ |
68 | extern void zfcp_erp_modify_adapter_status(struct zfcp_adapter *, char *, | 74 | extern void zfcp_erp_set_adapter_status(struct zfcp_adapter *, u32); |
69 | void *, u32, int); | 75 | extern void zfcp_erp_clear_adapter_status(struct zfcp_adapter *, u32); |
70 | extern void zfcp_erp_adapter_reopen(struct zfcp_adapter *, int, char *, void *); | 76 | extern void zfcp_erp_adapter_reopen(struct zfcp_adapter *, int, char *, void *); |
71 | extern void zfcp_erp_adapter_shutdown(struct zfcp_adapter *, int, char *, | 77 | extern void zfcp_erp_adapter_shutdown(struct zfcp_adapter *, int, char *, |
72 | void *); | 78 | void *); |
73 | extern void zfcp_erp_adapter_failed(struct zfcp_adapter *, char *, void *); | 79 | extern void zfcp_erp_set_port_status(struct zfcp_port *, u32); |
74 | extern void zfcp_erp_modify_port_status(struct zfcp_port *, char *, void *, u32, | 80 | extern void zfcp_erp_clear_port_status(struct zfcp_port *, u32); |
75 | int); | ||
76 | extern int zfcp_erp_port_reopen(struct zfcp_port *, int, char *, void *); | 81 | extern int zfcp_erp_port_reopen(struct zfcp_port *, int, char *, void *); |
77 | extern void zfcp_erp_port_shutdown(struct zfcp_port *, int, char *, void *); | 82 | extern void zfcp_erp_port_shutdown(struct zfcp_port *, int, char *, void *); |
78 | extern void zfcp_erp_port_forced_reopen(struct zfcp_port *, int, char *, | 83 | extern void zfcp_erp_port_forced_reopen(struct zfcp_port *, int, char *, |
79 | void *); | 84 | void *); |
80 | extern void zfcp_erp_port_failed(struct zfcp_port *, char *, void *); | 85 | extern void zfcp_erp_set_lun_status(struct scsi_device *, u32); |
81 | extern void zfcp_erp_modify_unit_status(struct zfcp_unit *, char *, void *, u32, | 86 | extern void zfcp_erp_clear_lun_status(struct scsi_device *, u32); |
82 | int); | 87 | extern void zfcp_erp_lun_reopen(struct scsi_device *, int, char *, void *); |
83 | extern void zfcp_erp_unit_reopen(struct zfcp_unit *, int, char *, void *); | 88 | extern void zfcp_erp_lun_shutdown(struct scsi_device *, int, char *, void *); |
84 | extern void zfcp_erp_unit_shutdown(struct zfcp_unit *, int, char *, void *); | 89 | extern void zfcp_erp_lun_shutdown_wait(struct scsi_device *, char *); |
85 | extern void zfcp_erp_unit_failed(struct zfcp_unit *, char *, void *); | ||
86 | extern int zfcp_erp_thread_setup(struct zfcp_adapter *); | 90 | extern int zfcp_erp_thread_setup(struct zfcp_adapter *); |
87 | extern void zfcp_erp_thread_kill(struct zfcp_adapter *); | 91 | extern void zfcp_erp_thread_kill(struct zfcp_adapter *); |
88 | extern void zfcp_erp_wait(struct zfcp_adapter *); | 92 | extern void zfcp_erp_wait(struct zfcp_adapter *); |
89 | extern void zfcp_erp_notify(struct zfcp_erp_action *, unsigned long); | 93 | extern void zfcp_erp_notify(struct zfcp_erp_action *, unsigned long); |
90 | extern void zfcp_erp_port_boxed(struct zfcp_port *, char *, void *); | ||
91 | extern void zfcp_erp_unit_boxed(struct zfcp_unit *, char *, void *); | ||
92 | extern void zfcp_erp_port_access_denied(struct zfcp_port *, char *, void *); | ||
93 | extern void zfcp_erp_unit_access_denied(struct zfcp_unit *, char *, void *); | ||
94 | extern void zfcp_erp_adapter_access_changed(struct zfcp_adapter *, char *, | ||
95 | void *); | ||
96 | extern void zfcp_erp_timeout_handler(unsigned long); | 94 | extern void zfcp_erp_timeout_handler(unsigned long); |
97 | 95 | ||
98 | /* zfcp_fc.c */ | 96 | /* zfcp_fc.c */ |
@@ -118,8 +116,8 @@ extern int zfcp_fsf_open_wka_port(struct zfcp_fc_wka_port *); | |||
118 | extern int zfcp_fsf_close_wka_port(struct zfcp_fc_wka_port *); | 116 | extern int zfcp_fsf_close_wka_port(struct zfcp_fc_wka_port *); |
119 | extern int zfcp_fsf_close_port(struct zfcp_erp_action *); | 117 | extern int zfcp_fsf_close_port(struct zfcp_erp_action *); |
120 | extern int zfcp_fsf_close_physical_port(struct zfcp_erp_action *); | 118 | extern int zfcp_fsf_close_physical_port(struct zfcp_erp_action *); |
121 | extern int zfcp_fsf_open_unit(struct zfcp_erp_action *); | 119 | extern int zfcp_fsf_open_lun(struct zfcp_erp_action *); |
122 | extern int zfcp_fsf_close_unit(struct zfcp_erp_action *); | 120 | extern int zfcp_fsf_close_lun(struct zfcp_erp_action *); |
123 | extern int zfcp_fsf_exchange_config_data(struct zfcp_erp_action *); | 121 | extern int zfcp_fsf_exchange_config_data(struct zfcp_erp_action *); |
124 | extern int zfcp_fsf_exchange_config_data_sync(struct zfcp_qdio *, | 122 | extern int zfcp_fsf_exchange_config_data_sync(struct zfcp_qdio *, |
125 | struct fsf_qtcb_bottom_config *); | 123 | struct fsf_qtcb_bottom_config *); |
@@ -135,12 +133,10 @@ extern int zfcp_fsf_send_ct(struct zfcp_fc_wka_port *, struct zfcp_fsf_ct_els *, | |||
135 | mempool_t *, unsigned int); | 133 | mempool_t *, unsigned int); |
136 | extern int zfcp_fsf_send_els(struct zfcp_adapter *, u32, | 134 | extern int zfcp_fsf_send_els(struct zfcp_adapter *, u32, |
137 | struct zfcp_fsf_ct_els *, unsigned int); | 135 | struct zfcp_fsf_ct_els *, unsigned int); |
138 | extern int zfcp_fsf_send_fcp_command_task(struct zfcp_unit *, | 136 | extern int zfcp_fsf_fcp_cmnd(struct scsi_cmnd *); |
139 | struct scsi_cmnd *); | ||
140 | extern void zfcp_fsf_req_free(struct zfcp_fsf_req *); | 137 | extern void zfcp_fsf_req_free(struct zfcp_fsf_req *); |
141 | extern struct zfcp_fsf_req *zfcp_fsf_send_fcp_ctm(struct zfcp_unit *, u8); | 138 | extern struct zfcp_fsf_req *zfcp_fsf_fcp_task_mgmt(struct scsi_cmnd *, u8); |
142 | extern struct zfcp_fsf_req *zfcp_fsf_abort_fcp_command(unsigned long, | 139 | extern struct zfcp_fsf_req *zfcp_fsf_abort_fcp_cmnd(struct scsi_cmnd *); |
143 | struct zfcp_unit *); | ||
144 | extern void zfcp_fsf_reqid_check(struct zfcp_qdio *, int); | 140 | extern void zfcp_fsf_reqid_check(struct zfcp_qdio *, int); |
145 | 141 | ||
146 | /* zfcp_qdio.c */ | 142 | /* zfcp_qdio.c */ |
@@ -163,8 +159,6 @@ extern void zfcp_scsi_rport_work(struct work_struct *); | |||
163 | extern void zfcp_scsi_schedule_rport_register(struct zfcp_port *); | 159 | extern void zfcp_scsi_schedule_rport_register(struct zfcp_port *); |
164 | extern void zfcp_scsi_schedule_rport_block(struct zfcp_port *); | 160 | extern void zfcp_scsi_schedule_rport_block(struct zfcp_port *); |
165 | extern void zfcp_scsi_schedule_rports_block(struct zfcp_adapter *); | 161 | extern void zfcp_scsi_schedule_rports_block(struct zfcp_adapter *); |
166 | extern void zfcp_scsi_scan(struct zfcp_unit *); | ||
167 | extern void zfcp_scsi_scan_work(struct work_struct *); | ||
168 | extern void zfcp_scsi_set_prot(struct zfcp_adapter *); | 162 | extern void zfcp_scsi_set_prot(struct zfcp_adapter *); |
169 | extern void zfcp_scsi_dif_sense_error(struct scsi_cmnd *, int); | 163 | extern void zfcp_scsi_dif_sense_error(struct scsi_cmnd *, int); |
170 | 164 | ||
@@ -175,4 +169,13 @@ extern struct attribute_group zfcp_sysfs_port_attrs; | |||
175 | extern struct device_attribute *zfcp_sysfs_sdev_attrs[]; | 169 | extern struct device_attribute *zfcp_sysfs_sdev_attrs[]; |
176 | extern struct device_attribute *zfcp_sysfs_shost_attrs[]; | 170 | extern struct device_attribute *zfcp_sysfs_shost_attrs[]; |
177 | 171 | ||
172 | /* zfcp_unit.c */ | ||
173 | extern int zfcp_unit_add(struct zfcp_port *, u64); | ||
174 | extern int zfcp_unit_remove(struct zfcp_port *, u64); | ||
175 | extern struct zfcp_unit *zfcp_unit_find(struct zfcp_port *, u64); | ||
176 | extern struct scsi_device *zfcp_unit_sdev(struct zfcp_unit *unit); | ||
177 | extern void zfcp_unit_scsi_scan(struct zfcp_unit *); | ||
178 | extern void zfcp_unit_queue_scsi_scan(struct zfcp_port *); | ||
179 | extern unsigned int zfcp_unit_sdev_status(struct zfcp_unit *); | ||
180 | |||
178 | #endif /* ZFCP_EXT_H */ | 181 | #endif /* ZFCP_EXT_H */ |
diff --git a/drivers/s390/scsi/zfcp_fc.c b/drivers/s390/scsi/zfcp_fc.c index 6f3ed2b9a349..86fd905df48b 100644 --- a/drivers/s390/scsi/zfcp_fc.c +++ b/drivers/s390/scsi/zfcp_fc.c | |||
@@ -365,7 +365,7 @@ void zfcp_fc_port_did_lookup(struct work_struct *work) | |||
365 | } | 365 | } |
366 | 366 | ||
367 | if (!port->d_id) { | 367 | if (!port->d_id) { |
368 | zfcp_erp_port_failed(port, "fcgpn_2", NULL); | 368 | zfcp_erp_set_port_status(port, ZFCP_STATUS_COMMON_ERP_FAILED); |
369 | goto out; | 369 | goto out; |
370 | } | 370 | } |
371 | 371 | ||
diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c index 9d1d7d1842ce..beaf0916ceab 100644 --- a/drivers/s390/scsi/zfcp_fsf.c +++ b/drivers/s390/scsi/zfcp_fsf.c | |||
@@ -61,45 +61,6 @@ static u32 fsf_qtcb_type[] = { | |||
61 | [FSF_QTCB_UPLOAD_CONTROL_FILE] = FSF_SUPPORT_COMMAND | 61 | [FSF_QTCB_UPLOAD_CONTROL_FILE] = FSF_SUPPORT_COMMAND |
62 | }; | 62 | }; |
63 | 63 | ||
64 | static void zfcp_act_eval_err(struct zfcp_adapter *adapter, u32 table) | ||
65 | { | ||
66 | u16 subtable = table >> 16; | ||
67 | u16 rule = table & 0xffff; | ||
68 | const char *act_type[] = { "unknown", "OS", "WWPN", "DID", "LUN" }; | ||
69 | |||
70 | if (subtable && subtable < ARRAY_SIZE(act_type)) | ||
71 | dev_warn(&adapter->ccw_device->dev, | ||
72 | "Access denied according to ACT rule type %s, " | ||
73 | "rule %d\n", act_type[subtable], rule); | ||
74 | } | ||
75 | |||
76 | static void zfcp_fsf_access_denied_port(struct zfcp_fsf_req *req, | ||
77 | struct zfcp_port *port) | ||
78 | { | ||
79 | struct fsf_qtcb_header *header = &req->qtcb->header; | ||
80 | dev_warn(&req->adapter->ccw_device->dev, | ||
81 | "Access denied to port 0x%016Lx\n", | ||
82 | (unsigned long long)port->wwpn); | ||
83 | zfcp_act_eval_err(req->adapter, header->fsf_status_qual.halfword[0]); | ||
84 | zfcp_act_eval_err(req->adapter, header->fsf_status_qual.halfword[1]); | ||
85 | zfcp_erp_port_access_denied(port, "fspad_1", req); | ||
86 | req->status |= ZFCP_STATUS_FSFREQ_ERROR; | ||
87 | } | ||
88 | |||
89 | static void zfcp_fsf_access_denied_unit(struct zfcp_fsf_req *req, | ||
90 | struct zfcp_unit *unit) | ||
91 | { | ||
92 | struct fsf_qtcb_header *header = &req->qtcb->header; | ||
93 | dev_warn(&req->adapter->ccw_device->dev, | ||
94 | "Access denied to unit 0x%016Lx on port 0x%016Lx\n", | ||
95 | (unsigned long long)unit->fcp_lun, | ||
96 | (unsigned long long)unit->port->wwpn); | ||
97 | zfcp_act_eval_err(req->adapter, header->fsf_status_qual.halfword[0]); | ||
98 | zfcp_act_eval_err(req->adapter, header->fsf_status_qual.halfword[1]); | ||
99 | zfcp_erp_unit_access_denied(unit, "fsuad_1", req); | ||
100 | req->status |= ZFCP_STATUS_FSFREQ_ERROR; | ||
101 | } | ||
102 | |||
103 | static void zfcp_fsf_class_not_supp(struct zfcp_fsf_req *req) | 64 | static void zfcp_fsf_class_not_supp(struct zfcp_fsf_req *req) |
104 | { | 65 | { |
105 | dev_err(&req->adapter->ccw_device->dev, "FCP device not " | 66 | dev_err(&req->adapter->ccw_device->dev, "FCP device not " |
@@ -143,7 +104,7 @@ static void zfcp_fsf_status_read_port_closed(struct zfcp_fsf_req *req) | |||
143 | read_unlock_irqrestore(&adapter->port_list_lock, flags); | 104 | read_unlock_irqrestore(&adapter->port_list_lock, flags); |
144 | } | 105 | } |
145 | 106 | ||
146 | static void zfcp_fsf_link_down_info_eval(struct zfcp_fsf_req *req, char *id, | 107 | static void zfcp_fsf_link_down_info_eval(struct zfcp_fsf_req *req, |
147 | struct fsf_link_down_info *link_down) | 108 | struct fsf_link_down_info *link_down) |
148 | { | 109 | { |
149 | struct zfcp_adapter *adapter = req->adapter; | 110 | struct zfcp_adapter *adapter = req->adapter; |
@@ -223,7 +184,7 @@ static void zfcp_fsf_link_down_info_eval(struct zfcp_fsf_req *req, char *id, | |||
223 | "the FC fabric is down\n"); | 184 | "the FC fabric is down\n"); |
224 | } | 185 | } |
225 | out: | 186 | out: |
226 | zfcp_erp_adapter_failed(adapter, id, req); | 187 | zfcp_erp_set_adapter_status(adapter, ZFCP_STATUS_COMMON_ERP_FAILED); |
227 | } | 188 | } |
228 | 189 | ||
229 | static void zfcp_fsf_status_read_link_down(struct zfcp_fsf_req *req) | 190 | static void zfcp_fsf_status_read_link_down(struct zfcp_fsf_req *req) |
@@ -234,13 +195,13 @@ static void zfcp_fsf_status_read_link_down(struct zfcp_fsf_req *req) | |||
234 | 195 | ||
235 | switch (sr_buf->status_subtype) { | 196 | switch (sr_buf->status_subtype) { |
236 | case FSF_STATUS_READ_SUB_NO_PHYSICAL_LINK: | 197 | case FSF_STATUS_READ_SUB_NO_PHYSICAL_LINK: |
237 | zfcp_fsf_link_down_info_eval(req, "fssrld1", ldi); | 198 | zfcp_fsf_link_down_info_eval(req, ldi); |
238 | break; | 199 | break; |
239 | case FSF_STATUS_READ_SUB_FDISC_FAILED: | 200 | case FSF_STATUS_READ_SUB_FDISC_FAILED: |
240 | zfcp_fsf_link_down_info_eval(req, "fssrld2", ldi); | 201 | zfcp_fsf_link_down_info_eval(req, ldi); |
241 | break; | 202 | break; |
242 | case FSF_STATUS_READ_SUB_FIRMWARE_UPDATE: | 203 | case FSF_STATUS_READ_SUB_FIRMWARE_UPDATE: |
243 | zfcp_fsf_link_down_info_eval(req, "fssrld3", NULL); | 204 | zfcp_fsf_link_down_info_eval(req, NULL); |
244 | }; | 205 | }; |
245 | } | 206 | } |
246 | 207 | ||
@@ -281,9 +242,8 @@ static void zfcp_fsf_status_read_handler(struct zfcp_fsf_req *req) | |||
281 | dev_info(&adapter->ccw_device->dev, | 242 | dev_info(&adapter->ccw_device->dev, |
282 | "The local link has been restored\n"); | 243 | "The local link has been restored\n"); |
283 | /* All ports should be marked as ready to run again */ | 244 | /* All ports should be marked as ready to run again */ |
284 | zfcp_erp_modify_adapter_status(adapter, "fssrh_1", NULL, | 245 | zfcp_erp_set_adapter_status(adapter, |
285 | ZFCP_STATUS_COMMON_RUNNING, | 246 | ZFCP_STATUS_COMMON_RUNNING); |
286 | ZFCP_SET); | ||
287 | zfcp_erp_adapter_reopen(adapter, | 247 | zfcp_erp_adapter_reopen(adapter, |
288 | ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED | | 248 | ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED | |
289 | ZFCP_STATUS_COMMON_ERP_FAILED, | 249 | ZFCP_STATUS_COMMON_ERP_FAILED, |
@@ -293,13 +253,12 @@ static void zfcp_fsf_status_read_handler(struct zfcp_fsf_req *req) | |||
293 | break; | 253 | break; |
294 | case FSF_STATUS_READ_NOTIFICATION_LOST: | 254 | case FSF_STATUS_READ_NOTIFICATION_LOST: |
295 | if (sr_buf->status_subtype & FSF_STATUS_READ_SUB_ACT_UPDATED) | 255 | if (sr_buf->status_subtype & FSF_STATUS_READ_SUB_ACT_UPDATED) |
296 | zfcp_erp_adapter_access_changed(adapter, "fssrh_3", | 256 | zfcp_cfdc_adapter_access_changed(adapter); |
297 | req); | ||
298 | if (sr_buf->status_subtype & FSF_STATUS_READ_SUB_INCOMING_ELS) | 257 | if (sr_buf->status_subtype & FSF_STATUS_READ_SUB_INCOMING_ELS) |
299 | queue_work(adapter->work_queue, &adapter->scan_work); | 258 | queue_work(adapter->work_queue, &adapter->scan_work); |
300 | break; | 259 | break; |
301 | case FSF_STATUS_READ_CFDC_UPDATED: | 260 | case FSF_STATUS_READ_CFDC_UPDATED: |
302 | zfcp_erp_adapter_access_changed(adapter, "fssrh_4", req); | 261 | zfcp_cfdc_adapter_access_changed(adapter); |
303 | break; | 262 | break; |
304 | case FSF_STATUS_READ_FEATURE_UPDATE_ALERT: | 263 | case FSF_STATUS_READ_FEATURE_UPDATE_ALERT: |
305 | adapter->adapter_features = sr_buf->payload.word[0]; | 264 | adapter->adapter_features = sr_buf->payload.word[0]; |
@@ -399,16 +358,14 @@ static void zfcp_fsf_protstatus_eval(struct zfcp_fsf_req *req) | |||
399 | zfcp_erp_adapter_shutdown(adapter, 0, "fspse_4", req); | 358 | zfcp_erp_adapter_shutdown(adapter, 0, "fspse_4", req); |
400 | break; | 359 | break; |
401 | case FSF_PROT_LINK_DOWN: | 360 | case FSF_PROT_LINK_DOWN: |
402 | zfcp_fsf_link_down_info_eval(req, "fspse_5", | 361 | zfcp_fsf_link_down_info_eval(req, &psq->link_down_info); |
403 | &psq->link_down_info); | ||
404 | /* go through reopen to flush pending requests */ | 362 | /* go through reopen to flush pending requests */ |
405 | zfcp_erp_adapter_reopen(adapter, 0, "fspse_6", req); | 363 | zfcp_erp_adapter_reopen(adapter, 0, "fspse_6", req); |
406 | break; | 364 | break; |
407 | case FSF_PROT_REEST_QUEUE: | 365 | case FSF_PROT_REEST_QUEUE: |
408 | /* All ports should be marked as ready to run again */ | 366 | /* All ports should be marked as ready to run again */ |
409 | zfcp_erp_modify_adapter_status(adapter, "fspse_7", NULL, | 367 | zfcp_erp_set_adapter_status(adapter, |
410 | ZFCP_STATUS_COMMON_RUNNING, | 368 | ZFCP_STATUS_COMMON_RUNNING); |
411 | ZFCP_SET); | ||
412 | zfcp_erp_adapter_reopen(adapter, | 369 | zfcp_erp_adapter_reopen(adapter, |
413 | ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED | | 370 | ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED | |
414 | ZFCP_STATUS_COMMON_ERP_FAILED, | 371 | ZFCP_STATUS_COMMON_ERP_FAILED, |
@@ -578,7 +535,7 @@ static void zfcp_fsf_exchange_config_data_handler(struct zfcp_fsf_req *req) | |||
578 | atomic_set_mask(ZFCP_STATUS_ADAPTER_XCONFIG_OK, | 535 | atomic_set_mask(ZFCP_STATUS_ADAPTER_XCONFIG_OK, |
579 | &adapter->status); | 536 | &adapter->status); |
580 | 537 | ||
581 | zfcp_fsf_link_down_info_eval(req, "fsecdh2", | 538 | zfcp_fsf_link_down_info_eval(req, |
582 | &qtcb->header.fsf_status_qual.link_down_info); | 539 | &qtcb->header.fsf_status_qual.link_down_info); |
583 | break; | 540 | break; |
584 | default: | 541 | default: |
@@ -644,7 +601,7 @@ static void zfcp_fsf_exchange_port_data_handler(struct zfcp_fsf_req *req) | |||
644 | break; | 601 | break; |
645 | case FSF_EXCHANGE_CONFIG_DATA_INCOMPLETE: | 602 | case FSF_EXCHANGE_CONFIG_DATA_INCOMPLETE: |
646 | zfcp_fsf_exchange_port_evaluate(req); | 603 | zfcp_fsf_exchange_port_evaluate(req); |
647 | zfcp_fsf_link_down_info_eval(req, "fsepdh1", | 604 | zfcp_fsf_link_down_info_eval(req, |
648 | &qtcb->header.fsf_status_qual.link_down_info); | 605 | &qtcb->header.fsf_status_qual.link_down_info); |
649 | break; | 606 | break; |
650 | } | 607 | } |
@@ -771,7 +728,7 @@ int zfcp_fsf_status_read(struct zfcp_qdio *qdio) | |||
771 | struct fsf_status_read_buffer *sr_buf; | 728 | struct fsf_status_read_buffer *sr_buf; |
772 | int retval = -EIO; | 729 | int retval = -EIO; |
773 | 730 | ||
774 | spin_lock_bh(&qdio->req_q_lock); | 731 | spin_lock_irq(&qdio->req_q_lock); |
775 | if (zfcp_qdio_sbal_get(qdio)) | 732 | if (zfcp_qdio_sbal_get(qdio)) |
776 | goto out; | 733 | goto out; |
777 | 734 | ||
@@ -805,13 +762,14 @@ failed_buf: | |||
805 | zfcp_fsf_req_free(req); | 762 | zfcp_fsf_req_free(req); |
806 | zfcp_dbf_hba_fsf_unsol("fail", adapter->dbf, NULL); | 763 | zfcp_dbf_hba_fsf_unsol("fail", adapter->dbf, NULL); |
807 | out: | 764 | out: |
808 | spin_unlock_bh(&qdio->req_q_lock); | 765 | spin_unlock_irq(&qdio->req_q_lock); |
809 | return retval; | 766 | return retval; |
810 | } | 767 | } |
811 | 768 | ||
812 | static void zfcp_fsf_abort_fcp_command_handler(struct zfcp_fsf_req *req) | 769 | static void zfcp_fsf_abort_fcp_command_handler(struct zfcp_fsf_req *req) |
813 | { | 770 | { |
814 | struct zfcp_unit *unit = req->data; | 771 | struct scsi_device *sdev = req->data; |
772 | struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev); | ||
815 | union fsf_status_qual *fsq = &req->qtcb->header.fsf_status_qual; | 773 | union fsf_status_qual *fsq = &req->qtcb->header.fsf_status_qual; |
816 | 774 | ||
817 | if (req->status & ZFCP_STATUS_FSFREQ_ERROR) | 775 | if (req->status & ZFCP_STATUS_FSFREQ_ERROR) |
@@ -820,14 +778,15 @@ static void zfcp_fsf_abort_fcp_command_handler(struct zfcp_fsf_req *req) | |||
820 | switch (req->qtcb->header.fsf_status) { | 778 | switch (req->qtcb->header.fsf_status) { |
821 | case FSF_PORT_HANDLE_NOT_VALID: | 779 | case FSF_PORT_HANDLE_NOT_VALID: |
822 | if (fsq->word[0] == fsq->word[1]) { | 780 | if (fsq->word[0] == fsq->word[1]) { |
823 | zfcp_erp_adapter_reopen(unit->port->adapter, 0, | 781 | zfcp_erp_adapter_reopen(zfcp_sdev->port->adapter, 0, |
824 | "fsafch1", req); | 782 | "fsafch1", req); |
825 | req->status |= ZFCP_STATUS_FSFREQ_ERROR; | 783 | req->status |= ZFCP_STATUS_FSFREQ_ERROR; |
826 | } | 784 | } |
827 | break; | 785 | break; |
828 | case FSF_LUN_HANDLE_NOT_VALID: | 786 | case FSF_LUN_HANDLE_NOT_VALID: |
829 | if (fsq->word[0] == fsq->word[1]) { | 787 | if (fsq->word[0] == fsq->word[1]) { |
830 | zfcp_erp_port_reopen(unit->port, 0, "fsafch2", req); | 788 | zfcp_erp_port_reopen(zfcp_sdev->port, 0, "fsafch2", |
789 | req); | ||
831 | req->status |= ZFCP_STATUS_FSFREQ_ERROR; | 790 | req->status |= ZFCP_STATUS_FSFREQ_ERROR; |
832 | } | 791 | } |
833 | break; | 792 | break; |
@@ -835,17 +794,23 @@ static void zfcp_fsf_abort_fcp_command_handler(struct zfcp_fsf_req *req) | |||
835 | req->status |= ZFCP_STATUS_FSFREQ_ABORTNOTNEEDED; | 794 | req->status |= ZFCP_STATUS_FSFREQ_ABORTNOTNEEDED; |
836 | break; | 795 | break; |
837 | case FSF_PORT_BOXED: | 796 | case FSF_PORT_BOXED: |
838 | zfcp_erp_port_boxed(unit->port, "fsafch3", req); | 797 | zfcp_erp_set_port_status(zfcp_sdev->port, |
798 | ZFCP_STATUS_COMMON_ACCESS_BOXED); | ||
799 | zfcp_erp_port_reopen(zfcp_sdev->port, | ||
800 | ZFCP_STATUS_COMMON_ERP_FAILED, "fsafch3", | ||
801 | req); | ||
839 | req->status |= ZFCP_STATUS_FSFREQ_ERROR; | 802 | req->status |= ZFCP_STATUS_FSFREQ_ERROR; |
840 | break; | 803 | break; |
841 | case FSF_LUN_BOXED: | 804 | case FSF_LUN_BOXED: |
842 | zfcp_erp_unit_boxed(unit, "fsafch4", req); | 805 | zfcp_erp_set_lun_status(sdev, ZFCP_STATUS_COMMON_ACCESS_BOXED); |
806 | zfcp_erp_lun_reopen(sdev, ZFCP_STATUS_COMMON_ERP_FAILED, | ||
807 | "fsafch4", req); | ||
843 | req->status |= ZFCP_STATUS_FSFREQ_ERROR; | 808 | req->status |= ZFCP_STATUS_FSFREQ_ERROR; |
844 | break; | 809 | break; |
845 | case FSF_ADAPTER_STATUS_AVAILABLE: | 810 | case FSF_ADAPTER_STATUS_AVAILABLE: |
846 | switch (fsq->word[0]) { | 811 | switch (fsq->word[0]) { |
847 | case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE: | 812 | case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE: |
848 | zfcp_fc_test_link(unit->port); | 813 | zfcp_fc_test_link(zfcp_sdev->port); |
849 | /* fall through */ | 814 | /* fall through */ |
850 | case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED: | 815 | case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED: |
851 | req->status |= ZFCP_STATUS_FSFREQ_ERROR; | 816 | req->status |= ZFCP_STATUS_FSFREQ_ERROR; |
@@ -859,19 +824,20 @@ static void zfcp_fsf_abort_fcp_command_handler(struct zfcp_fsf_req *req) | |||
859 | } | 824 | } |
860 | 825 | ||
861 | /** | 826 | /** |
862 | * zfcp_fsf_abort_fcp_command - abort running SCSI command | 827 | * zfcp_fsf_abort_fcp_cmnd - abort running SCSI command |
863 | * @old_req_id: unsigned long | 828 | * @scmnd: The SCSI command to abort |
864 | * @unit: pointer to struct zfcp_unit | ||
865 | * Returns: pointer to struct zfcp_fsf_req | 829 | * Returns: pointer to struct zfcp_fsf_req |
866 | */ | 830 | */ |
867 | 831 | ||
868 | struct zfcp_fsf_req *zfcp_fsf_abort_fcp_command(unsigned long old_req_id, | 832 | struct zfcp_fsf_req *zfcp_fsf_abort_fcp_cmnd(struct scsi_cmnd *scmnd) |
869 | struct zfcp_unit *unit) | ||
870 | { | 833 | { |
871 | struct zfcp_fsf_req *req = NULL; | 834 | struct zfcp_fsf_req *req = NULL; |
872 | struct zfcp_qdio *qdio = unit->port->adapter->qdio; | 835 | struct scsi_device *sdev = scmnd->device; |
836 | struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev); | ||
837 | struct zfcp_qdio *qdio = zfcp_sdev->port->adapter->qdio; | ||
838 | unsigned long old_req_id = (unsigned long) scmnd->host_scribble; | ||
873 | 839 | ||
874 | spin_lock_bh(&qdio->req_q_lock); | 840 | spin_lock_irq(&qdio->req_q_lock); |
875 | if (zfcp_qdio_sbal_get(qdio)) | 841 | if (zfcp_qdio_sbal_get(qdio)) |
876 | goto out; | 842 | goto out; |
877 | req = zfcp_fsf_req_create(qdio, FSF_QTCB_ABORT_FCP_CMND, | 843 | req = zfcp_fsf_req_create(qdio, FSF_QTCB_ABORT_FCP_CMND, |
@@ -882,16 +848,16 @@ struct zfcp_fsf_req *zfcp_fsf_abort_fcp_command(unsigned long old_req_id, | |||
882 | goto out; | 848 | goto out; |
883 | } | 849 | } |
884 | 850 | ||
885 | if (unlikely(!(atomic_read(&unit->status) & | 851 | if (unlikely(!(atomic_read(&zfcp_sdev->status) & |
886 | ZFCP_STATUS_COMMON_UNBLOCKED))) | 852 | ZFCP_STATUS_COMMON_UNBLOCKED))) |
887 | goto out_error_free; | 853 | goto out_error_free; |
888 | 854 | ||
889 | zfcp_qdio_set_sbale_last(qdio, &req->qdio_req); | 855 | zfcp_qdio_set_sbale_last(qdio, &req->qdio_req); |
890 | 856 | ||
891 | req->data = unit; | 857 | req->data = zfcp_sdev; |
892 | req->handler = zfcp_fsf_abort_fcp_command_handler; | 858 | req->handler = zfcp_fsf_abort_fcp_command_handler; |
893 | req->qtcb->header.lun_handle = unit->handle; | 859 | req->qtcb->header.lun_handle = zfcp_sdev->lun_handle; |
894 | req->qtcb->header.port_handle = unit->port->handle; | 860 | req->qtcb->header.port_handle = zfcp_sdev->port->handle; |
895 | req->qtcb->bottom.support.req_handle = (u64) old_req_id; | 861 | req->qtcb->bottom.support.req_handle = (u64) old_req_id; |
896 | 862 | ||
897 | zfcp_fsf_start_timer(req, ZFCP_SCSI_ER_TIMEOUT); | 863 | zfcp_fsf_start_timer(req, ZFCP_SCSI_ER_TIMEOUT); |
@@ -902,7 +868,7 @@ out_error_free: | |||
902 | zfcp_fsf_req_free(req); | 868 | zfcp_fsf_req_free(req); |
903 | req = NULL; | 869 | req = NULL; |
904 | out: | 870 | out: |
905 | spin_unlock_bh(&qdio->req_q_lock); | 871 | spin_unlock_irq(&qdio->req_q_lock); |
906 | return req; | 872 | return req; |
907 | } | 873 | } |
908 | 874 | ||
@@ -1041,7 +1007,7 @@ int zfcp_fsf_send_ct(struct zfcp_fc_wka_port *wka_port, | |||
1041 | struct zfcp_fsf_req *req; | 1007 | struct zfcp_fsf_req *req; |
1042 | int ret = -EIO; | 1008 | int ret = -EIO; |
1043 | 1009 | ||
1044 | spin_lock_bh(&qdio->req_q_lock); | 1010 | spin_lock_irq(&qdio->req_q_lock); |
1045 | if (zfcp_qdio_sbal_get(qdio)) | 1011 | if (zfcp_qdio_sbal_get(qdio)) |
1046 | goto out; | 1012 | goto out; |
1047 | 1013 | ||
@@ -1073,7 +1039,7 @@ int zfcp_fsf_send_ct(struct zfcp_fc_wka_port *wka_port, | |||
1073 | failed_send: | 1039 | failed_send: |
1074 | zfcp_fsf_req_free(req); | 1040 | zfcp_fsf_req_free(req); |
1075 | out: | 1041 | out: |
1076 | spin_unlock_bh(&qdio->req_q_lock); | 1042 | spin_unlock_irq(&qdio->req_q_lock); |
1077 | return ret; | 1043 | return ret; |
1078 | } | 1044 | } |
1079 | 1045 | ||
@@ -1111,8 +1077,10 @@ static void zfcp_fsf_send_els_handler(struct zfcp_fsf_req *req) | |||
1111 | case FSF_RESPONSE_SIZE_TOO_LARGE: | 1077 | case FSF_RESPONSE_SIZE_TOO_LARGE: |
1112 | break; | 1078 | break; |
1113 | case FSF_ACCESS_DENIED: | 1079 | case FSF_ACCESS_DENIED: |
1114 | if (port) | 1080 | if (port) { |
1115 | zfcp_fsf_access_denied_port(req, port); | 1081 | zfcp_cfdc_port_denied(port, &header->fsf_status_qual); |
1082 | req->status |= ZFCP_STATUS_FSFREQ_ERROR; | ||
1083 | } | ||
1116 | break; | 1084 | break; |
1117 | case FSF_SBAL_MISMATCH: | 1085 | case FSF_SBAL_MISMATCH: |
1118 | /* should never occure, avoided in zfcp_fsf_send_els */ | 1086 | /* should never occure, avoided in zfcp_fsf_send_els */ |
@@ -1137,7 +1105,7 @@ int zfcp_fsf_send_els(struct zfcp_adapter *adapter, u32 d_id, | |||
1137 | struct zfcp_qdio *qdio = adapter->qdio; | 1105 | struct zfcp_qdio *qdio = adapter->qdio; |
1138 | int ret = -EIO; | 1106 | int ret = -EIO; |
1139 | 1107 | ||
1140 | spin_lock_bh(&qdio->req_q_lock); | 1108 | spin_lock_irq(&qdio->req_q_lock); |
1141 | if (zfcp_qdio_sbal_get(qdio)) | 1109 | if (zfcp_qdio_sbal_get(qdio)) |
1142 | goto out; | 1110 | goto out; |
1143 | 1111 | ||
@@ -1173,7 +1141,7 @@ int zfcp_fsf_send_els(struct zfcp_adapter *adapter, u32 d_id, | |||
1173 | failed_send: | 1141 | failed_send: |
1174 | zfcp_fsf_req_free(req); | 1142 | zfcp_fsf_req_free(req); |
1175 | out: | 1143 | out: |
1176 | spin_unlock_bh(&qdio->req_q_lock); | 1144 | spin_unlock_irq(&qdio->req_q_lock); |
1177 | return ret; | 1145 | return ret; |
1178 | } | 1146 | } |
1179 | 1147 | ||
@@ -1183,7 +1151,7 @@ int zfcp_fsf_exchange_config_data(struct zfcp_erp_action *erp_action) | |||
1183 | struct zfcp_qdio *qdio = erp_action->adapter->qdio; | 1151 | struct zfcp_qdio *qdio = erp_action->adapter->qdio; |
1184 | int retval = -EIO; | 1152 | int retval = -EIO; |
1185 | 1153 | ||
1186 | spin_lock_bh(&qdio->req_q_lock); | 1154 | spin_lock_irq(&qdio->req_q_lock); |
1187 | if (zfcp_qdio_sbal_get(qdio)) | 1155 | if (zfcp_qdio_sbal_get(qdio)) |
1188 | goto out; | 1156 | goto out; |
1189 | 1157 | ||
@@ -1215,7 +1183,7 @@ int zfcp_fsf_exchange_config_data(struct zfcp_erp_action *erp_action) | |||
1215 | erp_action->fsf_req_id = 0; | 1183 | erp_action->fsf_req_id = 0; |
1216 | } | 1184 | } |
1217 | out: | 1185 | out: |
1218 | spin_unlock_bh(&qdio->req_q_lock); | 1186 | spin_unlock_irq(&qdio->req_q_lock); |
1219 | return retval; | 1187 | return retval; |
1220 | } | 1188 | } |
1221 | 1189 | ||
@@ -1225,7 +1193,7 @@ int zfcp_fsf_exchange_config_data_sync(struct zfcp_qdio *qdio, | |||
1225 | struct zfcp_fsf_req *req = NULL; | 1193 | struct zfcp_fsf_req *req = NULL; |
1226 | int retval = -EIO; | 1194 | int retval = -EIO; |
1227 | 1195 | ||
1228 | spin_lock_bh(&qdio->req_q_lock); | 1196 | spin_lock_irq(&qdio->req_q_lock); |
1229 | if (zfcp_qdio_sbal_get(qdio)) | 1197 | if (zfcp_qdio_sbal_get(qdio)) |
1230 | goto out_unlock; | 1198 | goto out_unlock; |
1231 | 1199 | ||
@@ -1251,7 +1219,7 @@ int zfcp_fsf_exchange_config_data_sync(struct zfcp_qdio *qdio, | |||
1251 | 1219 | ||
1252 | zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT); | 1220 | zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT); |
1253 | retval = zfcp_fsf_req_send(req); | 1221 | retval = zfcp_fsf_req_send(req); |
1254 | spin_unlock_bh(&qdio->req_q_lock); | 1222 | spin_unlock_irq(&qdio->req_q_lock); |
1255 | if (!retval) | 1223 | if (!retval) |
1256 | wait_for_completion(&req->completion); | 1224 | wait_for_completion(&req->completion); |
1257 | 1225 | ||
@@ -1259,7 +1227,7 @@ int zfcp_fsf_exchange_config_data_sync(struct zfcp_qdio *qdio, | |||
1259 | return retval; | 1227 | return retval; |
1260 | 1228 | ||
1261 | out_unlock: | 1229 | out_unlock: |
1262 | spin_unlock_bh(&qdio->req_q_lock); | 1230 | spin_unlock_irq(&qdio->req_q_lock); |
1263 | return retval; | 1231 | return retval; |
1264 | } | 1232 | } |
1265 | 1233 | ||
@@ -1277,7 +1245,7 @@ int zfcp_fsf_exchange_port_data(struct zfcp_erp_action *erp_action) | |||
1277 | if (!(qdio->adapter->adapter_features & FSF_FEATURE_HBAAPI_MANAGEMENT)) | 1245 | if (!(qdio->adapter->adapter_features & FSF_FEATURE_HBAAPI_MANAGEMENT)) |
1278 | return -EOPNOTSUPP; | 1246 | return -EOPNOTSUPP; |
1279 | 1247 | ||
1280 | spin_lock_bh(&qdio->req_q_lock); | 1248 | spin_lock_irq(&qdio->req_q_lock); |
1281 | if (zfcp_qdio_sbal_get(qdio)) | 1249 | if (zfcp_qdio_sbal_get(qdio)) |
1282 | goto out; | 1250 | goto out; |
1283 | 1251 | ||
@@ -1304,7 +1272,7 @@ int zfcp_fsf_exchange_port_data(struct zfcp_erp_action *erp_action) | |||
1304 | erp_action->fsf_req_id = 0; | 1272 | erp_action->fsf_req_id = 0; |
1305 | } | 1273 | } |
1306 | out: | 1274 | out: |
1307 | spin_unlock_bh(&qdio->req_q_lock); | 1275 | spin_unlock_irq(&qdio->req_q_lock); |
1308 | return retval; | 1276 | return retval; |
1309 | } | 1277 | } |
1310 | 1278 | ||
@@ -1323,7 +1291,7 @@ int zfcp_fsf_exchange_port_data_sync(struct zfcp_qdio *qdio, | |||
1323 | if (!(qdio->adapter->adapter_features & FSF_FEATURE_HBAAPI_MANAGEMENT)) | 1291 | if (!(qdio->adapter->adapter_features & FSF_FEATURE_HBAAPI_MANAGEMENT)) |
1324 | return -EOPNOTSUPP; | 1292 | return -EOPNOTSUPP; |
1325 | 1293 | ||
1326 | spin_lock_bh(&qdio->req_q_lock); | 1294 | spin_lock_irq(&qdio->req_q_lock); |
1327 | if (zfcp_qdio_sbal_get(qdio)) | 1295 | if (zfcp_qdio_sbal_get(qdio)) |
1328 | goto out_unlock; | 1296 | goto out_unlock; |
1329 | 1297 | ||
@@ -1343,7 +1311,7 @@ int zfcp_fsf_exchange_port_data_sync(struct zfcp_qdio *qdio, | |||
1343 | req->handler = zfcp_fsf_exchange_port_data_handler; | 1311 | req->handler = zfcp_fsf_exchange_port_data_handler; |
1344 | zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT); | 1312 | zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT); |
1345 | retval = zfcp_fsf_req_send(req); | 1313 | retval = zfcp_fsf_req_send(req); |
1346 | spin_unlock_bh(&qdio->req_q_lock); | 1314 | spin_unlock_irq(&qdio->req_q_lock); |
1347 | 1315 | ||
1348 | if (!retval) | 1316 | if (!retval) |
1349 | wait_for_completion(&req->completion); | 1317 | wait_for_completion(&req->completion); |
@@ -1353,7 +1321,7 @@ int zfcp_fsf_exchange_port_data_sync(struct zfcp_qdio *qdio, | |||
1353 | return retval; | 1321 | return retval; |
1354 | 1322 | ||
1355 | out_unlock: | 1323 | out_unlock: |
1356 | spin_unlock_bh(&qdio->req_q_lock); | 1324 | spin_unlock_irq(&qdio->req_q_lock); |
1357 | return retval; | 1325 | return retval; |
1358 | } | 1326 | } |
1359 | 1327 | ||
@@ -1370,14 +1338,16 @@ static void zfcp_fsf_open_port_handler(struct zfcp_fsf_req *req) | |||
1370 | case FSF_PORT_ALREADY_OPEN: | 1338 | case FSF_PORT_ALREADY_OPEN: |
1371 | break; | 1339 | break; |
1372 | case FSF_ACCESS_DENIED: | 1340 | case FSF_ACCESS_DENIED: |
1373 | zfcp_fsf_access_denied_port(req, port); | 1341 | zfcp_cfdc_port_denied(port, &header->fsf_status_qual); |
1342 | req->status |= ZFCP_STATUS_FSFREQ_ERROR; | ||
1374 | break; | 1343 | break; |
1375 | case FSF_MAXIMUM_NUMBER_OF_PORTS_EXCEEDED: | 1344 | case FSF_MAXIMUM_NUMBER_OF_PORTS_EXCEEDED: |
1376 | dev_warn(&req->adapter->ccw_device->dev, | 1345 | dev_warn(&req->adapter->ccw_device->dev, |
1377 | "Not enough FCP adapter resources to open " | 1346 | "Not enough FCP adapter resources to open " |
1378 | "remote port 0x%016Lx\n", | 1347 | "remote port 0x%016Lx\n", |
1379 | (unsigned long long)port->wwpn); | 1348 | (unsigned long long)port->wwpn); |
1380 | zfcp_erp_port_failed(port, "fsoph_1", req); | 1349 | zfcp_erp_set_port_status(port, |
1350 | ZFCP_STATUS_COMMON_ERP_FAILED); | ||
1381 | req->status |= ZFCP_STATUS_FSFREQ_ERROR; | 1351 | req->status |= ZFCP_STATUS_FSFREQ_ERROR; |
1382 | break; | 1352 | break; |
1383 | case FSF_ADAPTER_STATUS_AVAILABLE: | 1353 | case FSF_ADAPTER_STATUS_AVAILABLE: |
@@ -1437,7 +1407,7 @@ int zfcp_fsf_open_port(struct zfcp_erp_action *erp_action) | |||
1437 | struct zfcp_fsf_req *req; | 1407 | struct zfcp_fsf_req *req; |
1438 | int retval = -EIO; | 1408 | int retval = -EIO; |
1439 | 1409 | ||
1440 | spin_lock_bh(&qdio->req_q_lock); | 1410 | spin_lock_irq(&qdio->req_q_lock); |
1441 | if (zfcp_qdio_sbal_get(qdio)) | 1411 | if (zfcp_qdio_sbal_get(qdio)) |
1442 | goto out; | 1412 | goto out; |
1443 | 1413 | ||
@@ -1468,7 +1438,7 @@ int zfcp_fsf_open_port(struct zfcp_erp_action *erp_action) | |||
1468 | put_device(&port->dev); | 1438 | put_device(&port->dev); |
1469 | } | 1439 | } |
1470 | out: | 1440 | out: |
1471 | spin_unlock_bh(&qdio->req_q_lock); | 1441 | spin_unlock_irq(&qdio->req_q_lock); |
1472 | return retval; | 1442 | return retval; |
1473 | } | 1443 | } |
1474 | 1444 | ||
@@ -1487,9 +1457,7 @@ static void zfcp_fsf_close_port_handler(struct zfcp_fsf_req *req) | |||
1487 | case FSF_ADAPTER_STATUS_AVAILABLE: | 1457 | case FSF_ADAPTER_STATUS_AVAILABLE: |
1488 | break; | 1458 | break; |
1489 | case FSF_GOOD: | 1459 | case FSF_GOOD: |
1490 | zfcp_erp_modify_port_status(port, "fscph_2", req, | 1460 | zfcp_erp_clear_port_status(port, ZFCP_STATUS_COMMON_OPEN); |
1491 | ZFCP_STATUS_COMMON_OPEN, | ||
1492 | ZFCP_CLEAR); | ||
1493 | break; | 1461 | break; |
1494 | } | 1462 | } |
1495 | } | 1463 | } |
@@ -1505,7 +1473,7 @@ int zfcp_fsf_close_port(struct zfcp_erp_action *erp_action) | |||
1505 | struct zfcp_fsf_req *req; | 1473 | struct zfcp_fsf_req *req; |
1506 | int retval = -EIO; | 1474 | int retval = -EIO; |
1507 | 1475 | ||
1508 | spin_lock_bh(&qdio->req_q_lock); | 1476 | spin_lock_irq(&qdio->req_q_lock); |
1509 | if (zfcp_qdio_sbal_get(qdio)) | 1477 | if (zfcp_qdio_sbal_get(qdio)) |
1510 | goto out; | 1478 | goto out; |
1511 | 1479 | ||
@@ -1534,7 +1502,7 @@ int zfcp_fsf_close_port(struct zfcp_erp_action *erp_action) | |||
1534 | erp_action->fsf_req_id = 0; | 1502 | erp_action->fsf_req_id = 0; |
1535 | } | 1503 | } |
1536 | out: | 1504 | out: |
1537 | spin_unlock_bh(&qdio->req_q_lock); | 1505 | spin_unlock_irq(&qdio->req_q_lock); |
1538 | return retval; | 1506 | return retval; |
1539 | } | 1507 | } |
1540 | 1508 | ||
@@ -1580,7 +1548,7 @@ int zfcp_fsf_open_wka_port(struct zfcp_fc_wka_port *wka_port) | |||
1580 | struct zfcp_fsf_req *req; | 1548 | struct zfcp_fsf_req *req; |
1581 | int retval = -EIO; | 1549 | int retval = -EIO; |
1582 | 1550 | ||
1583 | spin_lock_bh(&qdio->req_q_lock); | 1551 | spin_lock_irq(&qdio->req_q_lock); |
1584 | if (zfcp_qdio_sbal_get(qdio)) | 1552 | if (zfcp_qdio_sbal_get(qdio)) |
1585 | goto out; | 1553 | goto out; |
1586 | 1554 | ||
@@ -1605,7 +1573,7 @@ int zfcp_fsf_open_wka_port(struct zfcp_fc_wka_port *wka_port) | |||
1605 | if (retval) | 1573 | if (retval) |
1606 | zfcp_fsf_req_free(req); | 1574 | zfcp_fsf_req_free(req); |
1607 | out: | 1575 | out: |
1608 | spin_unlock_bh(&qdio->req_q_lock); | 1576 | spin_unlock_irq(&qdio->req_q_lock); |
1609 | return retval; | 1577 | return retval; |
1610 | } | 1578 | } |
1611 | 1579 | ||
@@ -1633,7 +1601,7 @@ int zfcp_fsf_close_wka_port(struct zfcp_fc_wka_port *wka_port) | |||
1633 | struct zfcp_fsf_req *req; | 1601 | struct zfcp_fsf_req *req; |
1634 | int retval = -EIO; | 1602 | int retval = -EIO; |
1635 | 1603 | ||
1636 | spin_lock_bh(&qdio->req_q_lock); | 1604 | spin_lock_irq(&qdio->req_q_lock); |
1637 | if (zfcp_qdio_sbal_get(qdio)) | 1605 | if (zfcp_qdio_sbal_get(qdio)) |
1638 | goto out; | 1606 | goto out; |
1639 | 1607 | ||
@@ -1658,7 +1626,7 @@ int zfcp_fsf_close_wka_port(struct zfcp_fc_wka_port *wka_port) | |||
1658 | if (retval) | 1626 | if (retval) |
1659 | zfcp_fsf_req_free(req); | 1627 | zfcp_fsf_req_free(req); |
1660 | out: | 1628 | out: |
1661 | spin_unlock_bh(&qdio->req_q_lock); | 1629 | spin_unlock_irq(&qdio->req_q_lock); |
1662 | return retval; | 1630 | return retval; |
1663 | } | 1631 | } |
1664 | 1632 | ||
@@ -1666,7 +1634,7 @@ static void zfcp_fsf_close_physical_port_handler(struct zfcp_fsf_req *req) | |||
1666 | { | 1634 | { |
1667 | struct zfcp_port *port = req->data; | 1635 | struct zfcp_port *port = req->data; |
1668 | struct fsf_qtcb_header *header = &req->qtcb->header; | 1636 | struct fsf_qtcb_header *header = &req->qtcb->header; |
1669 | struct zfcp_unit *unit; | 1637 | struct scsi_device *sdev; |
1670 | 1638 | ||
1671 | if (req->status & ZFCP_STATUS_FSFREQ_ERROR) | 1639 | if (req->status & ZFCP_STATUS_FSFREQ_ERROR) |
1672 | return; | 1640 | return; |
@@ -1677,18 +1645,19 @@ static void zfcp_fsf_close_physical_port_handler(struct zfcp_fsf_req *req) | |||
1677 | req->status |= ZFCP_STATUS_FSFREQ_ERROR; | 1645 | req->status |= ZFCP_STATUS_FSFREQ_ERROR; |
1678 | break; | 1646 | break; |
1679 | case FSF_ACCESS_DENIED: | 1647 | case FSF_ACCESS_DENIED: |
1680 | zfcp_fsf_access_denied_port(req, port); | 1648 | zfcp_cfdc_port_denied(port, &header->fsf_status_qual); |
1681 | break; | 1649 | break; |
1682 | case FSF_PORT_BOXED: | 1650 | case FSF_PORT_BOXED: |
1683 | /* can't use generic zfcp_erp_modify_port_status because | 1651 | /* can't use generic zfcp_erp_modify_port_status because |
1684 | * ZFCP_STATUS_COMMON_OPEN must not be reset for the port */ | 1652 | * ZFCP_STATUS_COMMON_OPEN must not be reset for the port */ |
1685 | atomic_clear_mask(ZFCP_STATUS_PORT_PHYS_OPEN, &port->status); | 1653 | atomic_clear_mask(ZFCP_STATUS_PORT_PHYS_OPEN, &port->status); |
1686 | read_lock(&port->unit_list_lock); | 1654 | shost_for_each_device(sdev, port->adapter->scsi_host) |
1687 | list_for_each_entry(unit, &port->unit_list, list) | 1655 | if (sdev_to_zfcp(sdev)->port == port) |
1688 | atomic_clear_mask(ZFCP_STATUS_COMMON_OPEN, | 1656 | atomic_clear_mask(ZFCP_STATUS_COMMON_OPEN, |
1689 | &unit->status); | 1657 | &sdev_to_zfcp(sdev)->status); |
1690 | read_unlock(&port->unit_list_lock); | 1658 | zfcp_erp_set_port_status(port, ZFCP_STATUS_COMMON_ACCESS_BOXED); |
1691 | zfcp_erp_port_boxed(port, "fscpph2", req); | 1659 | zfcp_erp_port_reopen(port, ZFCP_STATUS_COMMON_ERP_FAILED, |
1660 | "fscpph2", req); | ||
1692 | req->status |= ZFCP_STATUS_FSFREQ_ERROR; | 1661 | req->status |= ZFCP_STATUS_FSFREQ_ERROR; |
1693 | break; | 1662 | break; |
1694 | case FSF_ADAPTER_STATUS_AVAILABLE: | 1663 | case FSF_ADAPTER_STATUS_AVAILABLE: |
@@ -1705,11 +1674,10 @@ static void zfcp_fsf_close_physical_port_handler(struct zfcp_fsf_req *req) | |||
1705 | * ZFCP_STATUS_COMMON_OPEN must not be reset for the port | 1674 | * ZFCP_STATUS_COMMON_OPEN must not be reset for the port |
1706 | */ | 1675 | */ |
1707 | atomic_clear_mask(ZFCP_STATUS_PORT_PHYS_OPEN, &port->status); | 1676 | atomic_clear_mask(ZFCP_STATUS_PORT_PHYS_OPEN, &port->status); |
1708 | read_lock(&port->unit_list_lock); | 1677 | shost_for_each_device(sdev, port->adapter->scsi_host) |
1709 | list_for_each_entry(unit, &port->unit_list, list) | 1678 | if (sdev_to_zfcp(sdev)->port == port) |
1710 | atomic_clear_mask(ZFCP_STATUS_COMMON_OPEN, | 1679 | atomic_clear_mask(ZFCP_STATUS_COMMON_OPEN, |
1711 | &unit->status); | 1680 | &sdev_to_zfcp(sdev)->status); |
1712 | read_unlock(&port->unit_list_lock); | ||
1713 | break; | 1681 | break; |
1714 | } | 1682 | } |
1715 | } | 1683 | } |
@@ -1725,7 +1693,7 @@ int zfcp_fsf_close_physical_port(struct zfcp_erp_action *erp_action) | |||
1725 | struct zfcp_fsf_req *req; | 1693 | struct zfcp_fsf_req *req; |
1726 | int retval = -EIO; | 1694 | int retval = -EIO; |
1727 | 1695 | ||
1728 | spin_lock_bh(&qdio->req_q_lock); | 1696 | spin_lock_irq(&qdio->req_q_lock); |
1729 | if (zfcp_qdio_sbal_get(qdio)) | 1697 | if (zfcp_qdio_sbal_get(qdio)) |
1730 | goto out; | 1698 | goto out; |
1731 | 1699 | ||
@@ -1754,69 +1722,57 @@ int zfcp_fsf_close_physical_port(struct zfcp_erp_action *erp_action) | |||
1754 | erp_action->fsf_req_id = 0; | 1722 | erp_action->fsf_req_id = 0; |
1755 | } | 1723 | } |
1756 | out: | 1724 | out: |
1757 | spin_unlock_bh(&qdio->req_q_lock); | 1725 | spin_unlock_irq(&qdio->req_q_lock); |
1758 | return retval; | 1726 | return retval; |
1759 | } | 1727 | } |
1760 | 1728 | ||
1761 | static void zfcp_fsf_open_unit_handler(struct zfcp_fsf_req *req) | 1729 | static void zfcp_fsf_open_lun_handler(struct zfcp_fsf_req *req) |
1762 | { | 1730 | { |
1763 | struct zfcp_adapter *adapter = req->adapter; | 1731 | struct zfcp_adapter *adapter = req->adapter; |
1764 | struct zfcp_unit *unit = req->data; | 1732 | struct scsi_device *sdev = req->data; |
1733 | struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev); | ||
1765 | struct fsf_qtcb_header *header = &req->qtcb->header; | 1734 | struct fsf_qtcb_header *header = &req->qtcb->header; |
1766 | struct fsf_qtcb_bottom_support *bottom = &req->qtcb->bottom.support; | 1735 | struct fsf_qtcb_bottom_support *bottom = &req->qtcb->bottom.support; |
1767 | struct fsf_queue_designator *queue_designator = | ||
1768 | &header->fsf_status_qual.fsf_queue_designator; | ||
1769 | int exclusive, readwrite; | ||
1770 | 1736 | ||
1771 | if (req->status & ZFCP_STATUS_FSFREQ_ERROR) | 1737 | if (req->status & ZFCP_STATUS_FSFREQ_ERROR) |
1772 | return; | 1738 | return; |
1773 | 1739 | ||
1774 | atomic_clear_mask(ZFCP_STATUS_COMMON_ACCESS_DENIED | | 1740 | atomic_clear_mask(ZFCP_STATUS_COMMON_ACCESS_DENIED | |
1775 | ZFCP_STATUS_COMMON_ACCESS_BOXED | | 1741 | ZFCP_STATUS_COMMON_ACCESS_BOXED | |
1776 | ZFCP_STATUS_UNIT_SHARED | | 1742 | ZFCP_STATUS_LUN_SHARED | |
1777 | ZFCP_STATUS_UNIT_READONLY, | 1743 | ZFCP_STATUS_LUN_READONLY, |
1778 | &unit->status); | 1744 | &zfcp_sdev->status); |
1779 | 1745 | ||
1780 | switch (header->fsf_status) { | 1746 | switch (header->fsf_status) { |
1781 | 1747 | ||
1782 | case FSF_PORT_HANDLE_NOT_VALID: | 1748 | case FSF_PORT_HANDLE_NOT_VALID: |
1783 | zfcp_erp_adapter_reopen(unit->port->adapter, 0, "fsouh_1", req); | 1749 | zfcp_erp_adapter_reopen(adapter, 0, "fsouh_1", req); |
1784 | /* fall through */ | 1750 | /* fall through */ |
1785 | case FSF_LUN_ALREADY_OPEN: | 1751 | case FSF_LUN_ALREADY_OPEN: |
1786 | break; | 1752 | break; |
1787 | case FSF_ACCESS_DENIED: | 1753 | case FSF_ACCESS_DENIED: |
1788 | zfcp_fsf_access_denied_unit(req, unit); | 1754 | zfcp_cfdc_lun_denied(sdev, &header->fsf_status_qual); |
1789 | atomic_clear_mask(ZFCP_STATUS_UNIT_SHARED, &unit->status); | 1755 | req->status |= ZFCP_STATUS_FSFREQ_ERROR; |
1790 | atomic_clear_mask(ZFCP_STATUS_UNIT_READONLY, &unit->status); | ||
1791 | break; | 1756 | break; |
1792 | case FSF_PORT_BOXED: | 1757 | case FSF_PORT_BOXED: |
1793 | zfcp_erp_port_boxed(unit->port, "fsouh_2", req); | 1758 | zfcp_erp_set_port_status(zfcp_sdev->port, |
1759 | ZFCP_STATUS_COMMON_ACCESS_BOXED); | ||
1760 | zfcp_erp_port_reopen(zfcp_sdev->port, | ||
1761 | ZFCP_STATUS_COMMON_ERP_FAILED, "fsouh_2", | ||
1762 | req); | ||
1794 | req->status |= ZFCP_STATUS_FSFREQ_ERROR; | 1763 | req->status |= ZFCP_STATUS_FSFREQ_ERROR; |
1795 | break; | 1764 | break; |
1796 | case FSF_LUN_SHARING_VIOLATION: | 1765 | case FSF_LUN_SHARING_VIOLATION: |
1797 | if (header->fsf_status_qual.word[0]) | 1766 | zfcp_cfdc_lun_shrng_vltn(sdev, &header->fsf_status_qual); |
1798 | dev_warn(&adapter->ccw_device->dev, | ||
1799 | "LUN 0x%Lx on port 0x%Lx is already in " | ||
1800 | "use by CSS%d, MIF Image ID %x\n", | ||
1801 | (unsigned long long)unit->fcp_lun, | ||
1802 | (unsigned long long)unit->port->wwpn, | ||
1803 | queue_designator->cssid, | ||
1804 | queue_designator->hla); | ||
1805 | else | ||
1806 | zfcp_act_eval_err(adapter, | ||
1807 | header->fsf_status_qual.word[2]); | ||
1808 | zfcp_erp_unit_access_denied(unit, "fsouh_3", req); | ||
1809 | atomic_clear_mask(ZFCP_STATUS_UNIT_SHARED, &unit->status); | ||
1810 | atomic_clear_mask(ZFCP_STATUS_UNIT_READONLY, &unit->status); | ||
1811 | req->status |= ZFCP_STATUS_FSFREQ_ERROR; | 1767 | req->status |= ZFCP_STATUS_FSFREQ_ERROR; |
1812 | break; | 1768 | break; |
1813 | case FSF_MAXIMUM_NUMBER_OF_LUNS_EXCEEDED: | 1769 | case FSF_MAXIMUM_NUMBER_OF_LUNS_EXCEEDED: |
1814 | dev_warn(&adapter->ccw_device->dev, | 1770 | dev_warn(&adapter->ccw_device->dev, |
1815 | "No handle is available for LUN " | 1771 | "No handle is available for LUN " |
1816 | "0x%016Lx on port 0x%016Lx\n", | 1772 | "0x%016Lx on port 0x%016Lx\n", |
1817 | (unsigned long long)unit->fcp_lun, | 1773 | (unsigned long long)zfcp_scsi_dev_lun(sdev), |
1818 | (unsigned long long)unit->port->wwpn); | 1774 | (unsigned long long)zfcp_sdev->port->wwpn); |
1819 | zfcp_erp_unit_failed(unit, "fsouh_4", req); | 1775 | zfcp_erp_set_lun_status(sdev, ZFCP_STATUS_COMMON_ERP_FAILED); |
1820 | /* fall through */ | 1776 | /* fall through */ |
1821 | case FSF_INVALID_COMMAND_OPTION: | 1777 | case FSF_INVALID_COMMAND_OPTION: |
1822 | req->status |= ZFCP_STATUS_FSFREQ_ERROR; | 1778 | req->status |= ZFCP_STATUS_FSFREQ_ERROR; |
@@ -1824,7 +1780,7 @@ static void zfcp_fsf_open_unit_handler(struct zfcp_fsf_req *req) | |||
1824 | case FSF_ADAPTER_STATUS_AVAILABLE: | 1780 | case FSF_ADAPTER_STATUS_AVAILABLE: |
1825 | switch (header->fsf_status_qual.word[0]) { | 1781 | switch (header->fsf_status_qual.word[0]) { |
1826 | case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE: | 1782 | case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE: |
1827 | zfcp_fc_test_link(unit->port); | 1783 | zfcp_fc_test_link(zfcp_sdev->port); |
1828 | /* fall through */ | 1784 | /* fall through */ |
1829 | case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED: | 1785 | case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED: |
1830 | req->status |= ZFCP_STATUS_FSFREQ_ERROR; | 1786 | req->status |= ZFCP_STATUS_FSFREQ_ERROR; |
@@ -1833,70 +1789,26 @@ static void zfcp_fsf_open_unit_handler(struct zfcp_fsf_req *req) | |||
1833 | break; | 1789 | break; |
1834 | 1790 | ||
1835 | case FSF_GOOD: | 1791 | case FSF_GOOD: |
1836 | unit->handle = header->lun_handle; | 1792 | zfcp_sdev->lun_handle = header->lun_handle; |
1837 | atomic_set_mask(ZFCP_STATUS_COMMON_OPEN, &unit->status); | 1793 | atomic_set_mask(ZFCP_STATUS_COMMON_OPEN, &zfcp_sdev->status); |
1838 | 1794 | zfcp_cfdc_open_lun_eval(sdev, bottom); | |
1839 | if (!(adapter->connection_features & FSF_FEATURE_NPIV_MODE) && | ||
1840 | (adapter->adapter_features & FSF_FEATURE_LUN_SHARING) && | ||
1841 | !zfcp_ccw_priv_sch(adapter)) { | ||
1842 | exclusive = (bottom->lun_access_info & | ||
1843 | FSF_UNIT_ACCESS_EXCLUSIVE); | ||
1844 | readwrite = (bottom->lun_access_info & | ||
1845 | FSF_UNIT_ACCESS_OUTBOUND_TRANSFER); | ||
1846 | |||
1847 | if (!exclusive) | ||
1848 | atomic_set_mask(ZFCP_STATUS_UNIT_SHARED, | ||
1849 | &unit->status); | ||
1850 | |||
1851 | if (!readwrite) { | ||
1852 | atomic_set_mask(ZFCP_STATUS_UNIT_READONLY, | ||
1853 | &unit->status); | ||
1854 | dev_info(&adapter->ccw_device->dev, | ||
1855 | "SCSI device at LUN 0x%016Lx on port " | ||
1856 | "0x%016Lx opened read-only\n", | ||
1857 | (unsigned long long)unit->fcp_lun, | ||
1858 | (unsigned long long)unit->port->wwpn); | ||
1859 | } | ||
1860 | |||
1861 | if (exclusive && !readwrite) { | ||
1862 | dev_err(&adapter->ccw_device->dev, | ||
1863 | "Exclusive read-only access not " | ||
1864 | "supported (unit 0x%016Lx, " | ||
1865 | "port 0x%016Lx)\n", | ||
1866 | (unsigned long long)unit->fcp_lun, | ||
1867 | (unsigned long long)unit->port->wwpn); | ||
1868 | zfcp_erp_unit_failed(unit, "fsouh_5", req); | ||
1869 | req->status |= ZFCP_STATUS_FSFREQ_ERROR; | ||
1870 | zfcp_erp_unit_shutdown(unit, 0, "fsouh_6", req); | ||
1871 | } else if (!exclusive && readwrite) { | ||
1872 | dev_err(&adapter->ccw_device->dev, | ||
1873 | "Shared read-write access not " | ||
1874 | "supported (unit 0x%016Lx, port " | ||
1875 | "0x%016Lx)\n", | ||
1876 | (unsigned long long)unit->fcp_lun, | ||
1877 | (unsigned long long)unit->port->wwpn); | ||
1878 | zfcp_erp_unit_failed(unit, "fsouh_7", req); | ||
1879 | req->status |= ZFCP_STATUS_FSFREQ_ERROR; | ||
1880 | zfcp_erp_unit_shutdown(unit, 0, "fsouh_8", req); | ||
1881 | } | ||
1882 | } | ||
1883 | break; | 1795 | break; |
1884 | } | 1796 | } |
1885 | } | 1797 | } |
1886 | 1798 | ||
1887 | /** | 1799 | /** |
1888 | * zfcp_fsf_open_unit - open unit | 1800 | * zfcp_fsf_open_lun - open LUN |
1889 | * @erp_action: pointer to struct zfcp_erp_action | 1801 | * @erp_action: pointer to struct zfcp_erp_action |
1890 | * Returns: 0 on success, error otherwise | 1802 | * Returns: 0 on success, error otherwise |
1891 | */ | 1803 | */ |
1892 | int zfcp_fsf_open_unit(struct zfcp_erp_action *erp_action) | 1804 | int zfcp_fsf_open_lun(struct zfcp_erp_action *erp_action) |
1893 | { | 1805 | { |
1894 | struct zfcp_adapter *adapter = erp_action->adapter; | 1806 | struct zfcp_adapter *adapter = erp_action->adapter; |
1895 | struct zfcp_qdio *qdio = adapter->qdio; | 1807 | struct zfcp_qdio *qdio = adapter->qdio; |
1896 | struct zfcp_fsf_req *req; | 1808 | struct zfcp_fsf_req *req; |
1897 | int retval = -EIO; | 1809 | int retval = -EIO; |
1898 | 1810 | ||
1899 | spin_lock_bh(&qdio->req_q_lock); | 1811 | spin_lock_irq(&qdio->req_q_lock); |
1900 | if (zfcp_qdio_sbal_get(qdio)) | 1812 | if (zfcp_qdio_sbal_get(qdio)) |
1901 | goto out; | 1813 | goto out; |
1902 | 1814 | ||
@@ -1913,9 +1825,9 @@ int zfcp_fsf_open_unit(struct zfcp_erp_action *erp_action) | |||
1913 | zfcp_qdio_set_sbale_last(qdio, &req->qdio_req); | 1825 | zfcp_qdio_set_sbale_last(qdio, &req->qdio_req); |
1914 | 1826 | ||
1915 | req->qtcb->header.port_handle = erp_action->port->handle; | 1827 | req->qtcb->header.port_handle = erp_action->port->handle; |
1916 | req->qtcb->bottom.support.fcp_lun = erp_action->unit->fcp_lun; | 1828 | req->qtcb->bottom.support.fcp_lun = zfcp_scsi_dev_lun(erp_action->sdev); |
1917 | req->handler = zfcp_fsf_open_unit_handler; | 1829 | req->handler = zfcp_fsf_open_lun_handler; |
1918 | req->data = erp_action->unit; | 1830 | req->data = erp_action->sdev; |
1919 | req->erp_action = erp_action; | 1831 | req->erp_action = erp_action; |
1920 | erp_action->fsf_req_id = req->req_id; | 1832 | erp_action->fsf_req_id = req->req_id; |
1921 | 1833 | ||
@@ -1929,34 +1841,40 @@ int zfcp_fsf_open_unit(struct zfcp_erp_action *erp_action) | |||
1929 | erp_action->fsf_req_id = 0; | 1841 | erp_action->fsf_req_id = 0; |
1930 | } | 1842 | } |
1931 | out: | 1843 | out: |
1932 | spin_unlock_bh(&qdio->req_q_lock); | 1844 | spin_unlock_irq(&qdio->req_q_lock); |
1933 | return retval; | 1845 | return retval; |
1934 | } | 1846 | } |
1935 | 1847 | ||
1936 | static void zfcp_fsf_close_unit_handler(struct zfcp_fsf_req *req) | 1848 | static void zfcp_fsf_close_lun_handler(struct zfcp_fsf_req *req) |
1937 | { | 1849 | { |
1938 | struct zfcp_unit *unit = req->data; | 1850 | struct scsi_device *sdev = req->data; |
1851 | struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev); | ||
1939 | 1852 | ||
1940 | if (req->status & ZFCP_STATUS_FSFREQ_ERROR) | 1853 | if (req->status & ZFCP_STATUS_FSFREQ_ERROR) |
1941 | return; | 1854 | return; |
1942 | 1855 | ||
1943 | switch (req->qtcb->header.fsf_status) { | 1856 | switch (req->qtcb->header.fsf_status) { |
1944 | case FSF_PORT_HANDLE_NOT_VALID: | 1857 | case FSF_PORT_HANDLE_NOT_VALID: |
1945 | zfcp_erp_adapter_reopen(unit->port->adapter, 0, "fscuh_1", req); | 1858 | zfcp_erp_adapter_reopen(zfcp_sdev->port->adapter, 0, "fscuh_1", |
1859 | req); | ||
1946 | req->status |= ZFCP_STATUS_FSFREQ_ERROR; | 1860 | req->status |= ZFCP_STATUS_FSFREQ_ERROR; |
1947 | break; | 1861 | break; |
1948 | case FSF_LUN_HANDLE_NOT_VALID: | 1862 | case FSF_LUN_HANDLE_NOT_VALID: |
1949 | zfcp_erp_port_reopen(unit->port, 0, "fscuh_2", req); | 1863 | zfcp_erp_port_reopen(zfcp_sdev->port, 0, "fscuh_2", req); |
1950 | req->status |= ZFCP_STATUS_FSFREQ_ERROR; | 1864 | req->status |= ZFCP_STATUS_FSFREQ_ERROR; |
1951 | break; | 1865 | break; |
1952 | case FSF_PORT_BOXED: | 1866 | case FSF_PORT_BOXED: |
1953 | zfcp_erp_port_boxed(unit->port, "fscuh_3", req); | 1867 | zfcp_erp_set_port_status(zfcp_sdev->port, |
1868 | ZFCP_STATUS_COMMON_ACCESS_BOXED); | ||
1869 | zfcp_erp_port_reopen(zfcp_sdev->port, | ||
1870 | ZFCP_STATUS_COMMON_ERP_FAILED, "fscuh_3", | ||
1871 | req); | ||
1954 | req->status |= ZFCP_STATUS_FSFREQ_ERROR; | 1872 | req->status |= ZFCP_STATUS_FSFREQ_ERROR; |
1955 | break; | 1873 | break; |
1956 | case FSF_ADAPTER_STATUS_AVAILABLE: | 1874 | case FSF_ADAPTER_STATUS_AVAILABLE: |
1957 | switch (req->qtcb->header.fsf_status_qual.word[0]) { | 1875 | switch (req->qtcb->header.fsf_status_qual.word[0]) { |
1958 | case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE: | 1876 | case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE: |
1959 | zfcp_fc_test_link(unit->port); | 1877 | zfcp_fc_test_link(zfcp_sdev->port); |
1960 | /* fall through */ | 1878 | /* fall through */ |
1961 | case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED: | 1879 | case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED: |
1962 | req->status |= ZFCP_STATUS_FSFREQ_ERROR; | 1880 | req->status |= ZFCP_STATUS_FSFREQ_ERROR; |
@@ -1964,23 +1882,24 @@ static void zfcp_fsf_close_unit_handler(struct zfcp_fsf_req *req) | |||
1964 | } | 1882 | } |
1965 | break; | 1883 | break; |
1966 | case FSF_GOOD: | 1884 | case FSF_GOOD: |
1967 | atomic_clear_mask(ZFCP_STATUS_COMMON_OPEN, &unit->status); | 1885 | atomic_clear_mask(ZFCP_STATUS_COMMON_OPEN, &zfcp_sdev->status); |
1968 | break; | 1886 | break; |
1969 | } | 1887 | } |
1970 | } | 1888 | } |
1971 | 1889 | ||
1972 | /** | 1890 | /** |
1973 | * zfcp_fsf_close_unit - close zfcp unit | 1891 | * zfcp_fsf_close_LUN - close LUN |
1974 | * @erp_action: pointer to struct zfcp_unit | 1892 | * @erp_action: pointer to erp_action triggering the "close LUN" |
1975 | * Returns: 0 on success, error otherwise | 1893 | * Returns: 0 on success, error otherwise |
1976 | */ | 1894 | */ |
1977 | int zfcp_fsf_close_unit(struct zfcp_erp_action *erp_action) | 1895 | int zfcp_fsf_close_lun(struct zfcp_erp_action *erp_action) |
1978 | { | 1896 | { |
1979 | struct zfcp_qdio *qdio = erp_action->adapter->qdio; | 1897 | struct zfcp_qdio *qdio = erp_action->adapter->qdio; |
1898 | struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(erp_action->sdev); | ||
1980 | struct zfcp_fsf_req *req; | 1899 | struct zfcp_fsf_req *req; |
1981 | int retval = -EIO; | 1900 | int retval = -EIO; |
1982 | 1901 | ||
1983 | spin_lock_bh(&qdio->req_q_lock); | 1902 | spin_lock_irq(&qdio->req_q_lock); |
1984 | if (zfcp_qdio_sbal_get(qdio)) | 1903 | if (zfcp_qdio_sbal_get(qdio)) |
1985 | goto out; | 1904 | goto out; |
1986 | 1905 | ||
@@ -1997,9 +1916,9 @@ int zfcp_fsf_close_unit(struct zfcp_erp_action *erp_action) | |||
1997 | zfcp_qdio_set_sbale_last(qdio, &req->qdio_req); | 1916 | zfcp_qdio_set_sbale_last(qdio, &req->qdio_req); |
1998 | 1917 | ||
1999 | req->qtcb->header.port_handle = erp_action->port->handle; | 1918 | req->qtcb->header.port_handle = erp_action->port->handle; |
2000 | req->qtcb->header.lun_handle = erp_action->unit->handle; | 1919 | req->qtcb->header.lun_handle = zfcp_sdev->lun_handle; |
2001 | req->handler = zfcp_fsf_close_unit_handler; | 1920 | req->handler = zfcp_fsf_close_lun_handler; |
2002 | req->data = erp_action->unit; | 1921 | req->data = erp_action->sdev; |
2003 | req->erp_action = erp_action; | 1922 | req->erp_action = erp_action; |
2004 | erp_action->fsf_req_id = req->req_id; | 1923 | erp_action->fsf_req_id = req->req_id; |
2005 | 1924 | ||
@@ -2010,7 +1929,7 @@ int zfcp_fsf_close_unit(struct zfcp_erp_action *erp_action) | |||
2010 | erp_action->fsf_req_id = 0; | 1929 | erp_action->fsf_req_id = 0; |
2011 | } | 1930 | } |
2012 | out: | 1931 | out: |
2013 | spin_unlock_bh(&qdio->req_q_lock); | 1932 | spin_unlock_irq(&qdio->req_q_lock); |
2014 | return retval; | 1933 | return retval; |
2015 | } | 1934 | } |
2016 | 1935 | ||
@@ -2025,7 +1944,7 @@ static void zfcp_fsf_req_trace(struct zfcp_fsf_req *req, struct scsi_cmnd *scsi) | |||
2025 | { | 1944 | { |
2026 | struct fsf_qual_latency_info *lat_in; | 1945 | struct fsf_qual_latency_info *lat_in; |
2027 | struct latency_cont *lat = NULL; | 1946 | struct latency_cont *lat = NULL; |
2028 | struct zfcp_unit *unit = req->unit; | 1947 | struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(scsi->device); |
2029 | struct zfcp_blk_drv_data blktrc; | 1948 | struct zfcp_blk_drv_data blktrc; |
2030 | int ticks = req->adapter->timer_ticks; | 1949 | int ticks = req->adapter->timer_ticks; |
2031 | 1950 | ||
@@ -2048,24 +1967,24 @@ static void zfcp_fsf_req_trace(struct zfcp_fsf_req *req, struct scsi_cmnd *scsi) | |||
2048 | case FSF_DATADIR_DIF_READ_STRIP: | 1967 | case FSF_DATADIR_DIF_READ_STRIP: |
2049 | case FSF_DATADIR_DIF_READ_CONVERT: | 1968 | case FSF_DATADIR_DIF_READ_CONVERT: |
2050 | case FSF_DATADIR_READ: | 1969 | case FSF_DATADIR_READ: |
2051 | lat = &unit->latencies.read; | 1970 | lat = &zfcp_sdev->latencies.read; |
2052 | break; | 1971 | break; |
2053 | case FSF_DATADIR_DIF_WRITE_INSERT: | 1972 | case FSF_DATADIR_DIF_WRITE_INSERT: |
2054 | case FSF_DATADIR_DIF_WRITE_CONVERT: | 1973 | case FSF_DATADIR_DIF_WRITE_CONVERT: |
2055 | case FSF_DATADIR_WRITE: | 1974 | case FSF_DATADIR_WRITE: |
2056 | lat = &unit->latencies.write; | 1975 | lat = &zfcp_sdev->latencies.write; |
2057 | break; | 1976 | break; |
2058 | case FSF_DATADIR_CMND: | 1977 | case FSF_DATADIR_CMND: |
2059 | lat = &unit->latencies.cmd; | 1978 | lat = &zfcp_sdev->latencies.cmd; |
2060 | break; | 1979 | break; |
2061 | } | 1980 | } |
2062 | 1981 | ||
2063 | if (lat) { | 1982 | if (lat) { |
2064 | spin_lock(&unit->latencies.lock); | 1983 | spin_lock(&zfcp_sdev->latencies.lock); |
2065 | zfcp_fsf_update_lat(&lat->channel, lat_in->channel_lat); | 1984 | zfcp_fsf_update_lat(&lat->channel, lat_in->channel_lat); |
2066 | zfcp_fsf_update_lat(&lat->fabric, lat_in->fabric_lat); | 1985 | zfcp_fsf_update_lat(&lat->fabric, lat_in->fabric_lat); |
2067 | lat->counter++; | 1986 | lat->counter++; |
2068 | spin_unlock(&unit->latencies.lock); | 1987 | spin_unlock(&zfcp_sdev->latencies.lock); |
2069 | } | 1988 | } |
2070 | } | 1989 | } |
2071 | 1990 | ||
@@ -2073,12 +1992,88 @@ static void zfcp_fsf_req_trace(struct zfcp_fsf_req *req, struct scsi_cmnd *scsi) | |||
2073 | sizeof(blktrc)); | 1992 | sizeof(blktrc)); |
2074 | } | 1993 | } |
2075 | 1994 | ||
2076 | static void zfcp_fsf_send_fcp_command_task_handler(struct zfcp_fsf_req *req) | 1995 | static void zfcp_fsf_fcp_handler_common(struct zfcp_fsf_req *req) |
1996 | { | ||
1997 | struct scsi_cmnd *scmnd = req->data; | ||
1998 | struct scsi_device *sdev = scmnd->device; | ||
1999 | struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev); | ||
2000 | struct fsf_qtcb_header *header = &req->qtcb->header; | ||
2001 | |||
2002 | if (unlikely(req->status & ZFCP_STATUS_FSFREQ_ERROR)) | ||
2003 | return; | ||
2004 | |||
2005 | switch (header->fsf_status) { | ||
2006 | case FSF_HANDLE_MISMATCH: | ||
2007 | case FSF_PORT_HANDLE_NOT_VALID: | ||
2008 | zfcp_erp_adapter_reopen(zfcp_sdev->port->adapter, 0, "fssfch1", | ||
2009 | req); | ||
2010 | req->status |= ZFCP_STATUS_FSFREQ_ERROR; | ||
2011 | break; | ||
2012 | case FSF_FCPLUN_NOT_VALID: | ||
2013 | case FSF_LUN_HANDLE_NOT_VALID: | ||
2014 | zfcp_erp_port_reopen(zfcp_sdev->port, 0, "fssfch2", req); | ||
2015 | req->status |= ZFCP_STATUS_FSFREQ_ERROR; | ||
2016 | break; | ||
2017 | case FSF_SERVICE_CLASS_NOT_SUPPORTED: | ||
2018 | zfcp_fsf_class_not_supp(req); | ||
2019 | break; | ||
2020 | case FSF_ACCESS_DENIED: | ||
2021 | zfcp_cfdc_lun_denied(sdev, &header->fsf_status_qual); | ||
2022 | req->status |= ZFCP_STATUS_FSFREQ_ERROR; | ||
2023 | break; | ||
2024 | case FSF_DIRECTION_INDICATOR_NOT_VALID: | ||
2025 | dev_err(&req->adapter->ccw_device->dev, | ||
2026 | "Incorrect direction %d, LUN 0x%016Lx on port " | ||
2027 | "0x%016Lx closed\n", | ||
2028 | req->qtcb->bottom.io.data_direction, | ||
2029 | (unsigned long long)zfcp_scsi_dev_lun(sdev), | ||
2030 | (unsigned long long)zfcp_sdev->port->wwpn); | ||
2031 | zfcp_erp_adapter_shutdown(zfcp_sdev->port->adapter, 0, | ||
2032 | "fssfch3", req); | ||
2033 | req->status |= ZFCP_STATUS_FSFREQ_ERROR; | ||
2034 | break; | ||
2035 | case FSF_CMND_LENGTH_NOT_VALID: | ||
2036 | dev_err(&req->adapter->ccw_device->dev, | ||
2037 | "Incorrect CDB length %d, LUN 0x%016Lx on " | ||
2038 | "port 0x%016Lx closed\n", | ||
2039 | req->qtcb->bottom.io.fcp_cmnd_length, | ||
2040 | (unsigned long long)zfcp_scsi_dev_lun(sdev), | ||
2041 | (unsigned long long)zfcp_sdev->port->wwpn); | ||
2042 | zfcp_erp_adapter_shutdown(zfcp_sdev->port->adapter, 0, | ||
2043 | "fssfch4", req); | ||
2044 | req->status |= ZFCP_STATUS_FSFREQ_ERROR; | ||
2045 | break; | ||
2046 | case FSF_PORT_BOXED: | ||
2047 | zfcp_erp_set_port_status(zfcp_sdev->port, | ||
2048 | ZFCP_STATUS_COMMON_ACCESS_BOXED); | ||
2049 | zfcp_erp_port_reopen(zfcp_sdev->port, | ||
2050 | ZFCP_STATUS_COMMON_ERP_FAILED, "fssfch5", | ||
2051 | req); | ||
2052 | req->status |= ZFCP_STATUS_FSFREQ_ERROR; | ||
2053 | break; | ||
2054 | case FSF_LUN_BOXED: | ||
2055 | zfcp_erp_set_lun_status(sdev, ZFCP_STATUS_COMMON_ACCESS_BOXED); | ||
2056 | zfcp_erp_lun_reopen(sdev, ZFCP_STATUS_COMMON_ERP_FAILED, | ||
2057 | "fssfch6", req); | ||
2058 | req->status |= ZFCP_STATUS_FSFREQ_ERROR; | ||
2059 | break; | ||
2060 | case FSF_ADAPTER_STATUS_AVAILABLE: | ||
2061 | if (header->fsf_status_qual.word[0] == | ||
2062 | FSF_SQ_INVOKE_LINK_TEST_PROCEDURE) | ||
2063 | zfcp_fc_test_link(zfcp_sdev->port); | ||
2064 | req->status |= ZFCP_STATUS_FSFREQ_ERROR; | ||
2065 | break; | ||
2066 | } | ||
2067 | } | ||
2068 | |||
2069 | static void zfcp_fsf_fcp_cmnd_handler(struct zfcp_fsf_req *req) | ||
2077 | { | 2070 | { |
2078 | struct scsi_cmnd *scpnt; | 2071 | struct scsi_cmnd *scpnt; |
2079 | struct fcp_resp_with_ext *fcp_rsp; | 2072 | struct fcp_resp_with_ext *fcp_rsp; |
2080 | unsigned long flags; | 2073 | unsigned long flags; |
2081 | 2074 | ||
2075 | zfcp_fsf_fcp_handler_common(req); | ||
2076 | |||
2082 | read_lock_irqsave(&req->adapter->abort_lock, flags); | 2077 | read_lock_irqsave(&req->adapter->abort_lock, flags); |
2083 | 2078 | ||
2084 | scpnt = req->data; | 2079 | scpnt = req->data; |
@@ -2125,97 +2120,6 @@ skip_fsfstatus: | |||
2125 | read_unlock_irqrestore(&req->adapter->abort_lock, flags); | 2120 | read_unlock_irqrestore(&req->adapter->abort_lock, flags); |
2126 | } | 2121 | } |
2127 | 2122 | ||
2128 | static void zfcp_fsf_send_fcp_ctm_handler(struct zfcp_fsf_req *req) | ||
2129 | { | ||
2130 | struct fcp_resp_with_ext *fcp_rsp; | ||
2131 | struct fcp_resp_rsp_info *rsp_info; | ||
2132 | |||
2133 | fcp_rsp = (struct fcp_resp_with_ext *) &req->qtcb->bottom.io.fcp_rsp; | ||
2134 | rsp_info = (struct fcp_resp_rsp_info *) &fcp_rsp[1]; | ||
2135 | |||
2136 | if ((rsp_info->rsp_code != FCP_TMF_CMPL) || | ||
2137 | (req->status & ZFCP_STATUS_FSFREQ_ERROR)) | ||
2138 | req->status |= ZFCP_STATUS_FSFREQ_TMFUNCFAILED; | ||
2139 | } | ||
2140 | |||
2141 | |||
2142 | static void zfcp_fsf_send_fcp_command_handler(struct zfcp_fsf_req *req) | ||
2143 | { | ||
2144 | struct zfcp_unit *unit; | ||
2145 | struct fsf_qtcb_header *header = &req->qtcb->header; | ||
2146 | |||
2147 | if (unlikely(req->status & ZFCP_STATUS_FSFREQ_TASK_MANAGEMENT)) | ||
2148 | unit = req->data; | ||
2149 | else | ||
2150 | unit = req->unit; | ||
2151 | |||
2152 | if (unlikely(req->status & ZFCP_STATUS_FSFREQ_ERROR)) | ||
2153 | goto skip_fsfstatus; | ||
2154 | |||
2155 | switch (header->fsf_status) { | ||
2156 | case FSF_HANDLE_MISMATCH: | ||
2157 | case FSF_PORT_HANDLE_NOT_VALID: | ||
2158 | zfcp_erp_adapter_reopen(unit->port->adapter, 0, "fssfch1", req); | ||
2159 | req->status |= ZFCP_STATUS_FSFREQ_ERROR; | ||
2160 | break; | ||
2161 | case FSF_FCPLUN_NOT_VALID: | ||
2162 | case FSF_LUN_HANDLE_NOT_VALID: | ||
2163 | zfcp_erp_port_reopen(unit->port, 0, "fssfch2", req); | ||
2164 | req->status |= ZFCP_STATUS_FSFREQ_ERROR; | ||
2165 | break; | ||
2166 | case FSF_SERVICE_CLASS_NOT_SUPPORTED: | ||
2167 | zfcp_fsf_class_not_supp(req); | ||
2168 | break; | ||
2169 | case FSF_ACCESS_DENIED: | ||
2170 | zfcp_fsf_access_denied_unit(req, unit); | ||
2171 | break; | ||
2172 | case FSF_DIRECTION_INDICATOR_NOT_VALID: | ||
2173 | dev_err(&req->adapter->ccw_device->dev, | ||
2174 | "Incorrect direction %d, unit 0x%016Lx on port " | ||
2175 | "0x%016Lx closed\n", | ||
2176 | req->qtcb->bottom.io.data_direction, | ||
2177 | (unsigned long long)unit->fcp_lun, | ||
2178 | (unsigned long long)unit->port->wwpn); | ||
2179 | zfcp_erp_adapter_shutdown(unit->port->adapter, 0, "fssfch3", | ||
2180 | req); | ||
2181 | req->status |= ZFCP_STATUS_FSFREQ_ERROR; | ||
2182 | break; | ||
2183 | case FSF_CMND_LENGTH_NOT_VALID: | ||
2184 | dev_err(&req->adapter->ccw_device->dev, | ||
2185 | "Incorrect CDB length %d, unit 0x%016Lx on " | ||
2186 | "port 0x%016Lx closed\n", | ||
2187 | req->qtcb->bottom.io.fcp_cmnd_length, | ||
2188 | (unsigned long long)unit->fcp_lun, | ||
2189 | (unsigned long long)unit->port->wwpn); | ||
2190 | zfcp_erp_adapter_shutdown(unit->port->adapter, 0, "fssfch4", | ||
2191 | req); | ||
2192 | req->status |= ZFCP_STATUS_FSFREQ_ERROR; | ||
2193 | break; | ||
2194 | case FSF_PORT_BOXED: | ||
2195 | zfcp_erp_port_boxed(unit->port, "fssfch5", req); | ||
2196 | req->status |= ZFCP_STATUS_FSFREQ_ERROR; | ||
2197 | break; | ||
2198 | case FSF_LUN_BOXED: | ||
2199 | zfcp_erp_unit_boxed(unit, "fssfch6", req); | ||
2200 | req->status |= ZFCP_STATUS_FSFREQ_ERROR; | ||
2201 | break; | ||
2202 | case FSF_ADAPTER_STATUS_AVAILABLE: | ||
2203 | if (header->fsf_status_qual.word[0] == | ||
2204 | FSF_SQ_INVOKE_LINK_TEST_PROCEDURE) | ||
2205 | zfcp_fc_test_link(unit->port); | ||
2206 | req->status |= ZFCP_STATUS_FSFREQ_ERROR; | ||
2207 | break; | ||
2208 | } | ||
2209 | skip_fsfstatus: | ||
2210 | if (req->status & ZFCP_STATUS_FSFREQ_TASK_MANAGEMENT) | ||
2211 | zfcp_fsf_send_fcp_ctm_handler(req); | ||
2212 | else { | ||
2213 | zfcp_fsf_send_fcp_command_task_handler(req); | ||
2214 | req->unit = NULL; | ||
2215 | put_device(&unit->dev); | ||
2216 | } | ||
2217 | } | ||
2218 | |||
2219 | static int zfcp_fsf_set_data_dir(struct scsi_cmnd *scsi_cmnd, u32 *data_dir) | 2123 | static int zfcp_fsf_set_data_dir(struct scsi_cmnd *scsi_cmnd, u32 *data_dir) |
2220 | { | 2124 | { |
2221 | switch (scsi_get_prot_op(scsi_cmnd)) { | 2125 | switch (scsi_get_prot_op(scsi_cmnd)) { |
@@ -2255,22 +2159,22 @@ static int zfcp_fsf_set_data_dir(struct scsi_cmnd *scsi_cmnd, u32 *data_dir) | |||
2255 | } | 2159 | } |
2256 | 2160 | ||
2257 | /** | 2161 | /** |
2258 | * zfcp_fsf_send_fcp_command_task - initiate an FCP command (for a SCSI command) | 2162 | * zfcp_fsf_fcp_cmnd - initiate an FCP command (for a SCSI command) |
2259 | * @unit: unit where command is sent to | ||
2260 | * @scsi_cmnd: scsi command to be sent | 2163 | * @scsi_cmnd: scsi command to be sent |
2261 | */ | 2164 | */ |
2262 | int zfcp_fsf_send_fcp_command_task(struct zfcp_unit *unit, | 2165 | int zfcp_fsf_fcp_cmnd(struct scsi_cmnd *scsi_cmnd) |
2263 | struct scsi_cmnd *scsi_cmnd) | ||
2264 | { | 2166 | { |
2265 | struct zfcp_fsf_req *req; | 2167 | struct zfcp_fsf_req *req; |
2266 | struct fcp_cmnd *fcp_cmnd; | 2168 | struct fcp_cmnd *fcp_cmnd; |
2267 | unsigned int sbtype = SBAL_FLAGS0_TYPE_READ; | 2169 | unsigned int sbtype = SBAL_FLAGS0_TYPE_READ; |
2268 | int real_bytes, retval = -EIO, dix_bytes = 0; | 2170 | int real_bytes, retval = -EIO, dix_bytes = 0; |
2269 | struct zfcp_adapter *adapter = unit->port->adapter; | 2171 | struct scsi_device *sdev = scsi_cmnd->device; |
2172 | struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev); | ||
2173 | struct zfcp_adapter *adapter = zfcp_sdev->port->adapter; | ||
2270 | struct zfcp_qdio *qdio = adapter->qdio; | 2174 | struct zfcp_qdio *qdio = adapter->qdio; |
2271 | struct fsf_qtcb_bottom_io *io; | 2175 | struct fsf_qtcb_bottom_io *io; |
2272 | 2176 | ||
2273 | if (unlikely(!(atomic_read(&unit->status) & | 2177 | if (unlikely(!(atomic_read(&zfcp_sdev->status) & |
2274 | ZFCP_STATUS_COMMON_UNBLOCKED))) | 2178 | ZFCP_STATUS_COMMON_UNBLOCKED))) |
2275 | return -EBUSY; | 2179 | return -EBUSY; |
2276 | 2180 | ||
@@ -2295,11 +2199,10 @@ int zfcp_fsf_send_fcp_command_task(struct zfcp_unit *unit, | |||
2295 | 2199 | ||
2296 | io = &req->qtcb->bottom.io; | 2200 | io = &req->qtcb->bottom.io; |
2297 | req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; | 2201 | req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; |
2298 | req->unit = unit; | ||
2299 | req->data = scsi_cmnd; | 2202 | req->data = scsi_cmnd; |
2300 | req->handler = zfcp_fsf_send_fcp_command_handler; | 2203 | req->handler = zfcp_fsf_fcp_cmnd_handler; |
2301 | req->qtcb->header.lun_handle = unit->handle; | 2204 | req->qtcb->header.lun_handle = zfcp_sdev->lun_handle; |
2302 | req->qtcb->header.port_handle = unit->port->handle; | 2205 | req->qtcb->header.port_handle = zfcp_sdev->port->handle; |
2303 | io->service_class = FSF_CLASS_3; | 2206 | io->service_class = FSF_CLASS_3; |
2304 | io->fcp_cmnd_length = FCP_CMND_LEN; | 2207 | io->fcp_cmnd_length = FCP_CMND_LEN; |
2305 | 2208 | ||
@@ -2310,8 +2213,6 @@ int zfcp_fsf_send_fcp_command_task(struct zfcp_unit *unit, | |||
2310 | 2213 | ||
2311 | zfcp_fsf_set_data_dir(scsi_cmnd, &io->data_direction); | 2214 | zfcp_fsf_set_data_dir(scsi_cmnd, &io->data_direction); |
2312 | 2215 | ||
2313 | get_device(&unit->dev); | ||
2314 | |||
2315 | fcp_cmnd = (struct fcp_cmnd *) &req->qtcb->bottom.io.fcp_cmnd; | 2216 | fcp_cmnd = (struct fcp_cmnd *) &req->qtcb->bottom.io.fcp_cmnd; |
2316 | zfcp_fc_scsi_to_fcp(fcp_cmnd, scsi_cmnd); | 2217 | zfcp_fc_scsi_to_fcp(fcp_cmnd, scsi_cmnd); |
2317 | 2218 | ||
@@ -2338,7 +2239,6 @@ int zfcp_fsf_send_fcp_command_task(struct zfcp_unit *unit, | |||
2338 | goto out; | 2239 | goto out; |
2339 | 2240 | ||
2340 | failed_scsi_cmnd: | 2241 | failed_scsi_cmnd: |
2341 | put_device(&unit->dev); | ||
2342 | zfcp_fsf_req_free(req); | 2242 | zfcp_fsf_req_free(req); |
2343 | scsi_cmnd->host_scribble = NULL; | 2243 | scsi_cmnd->host_scribble = NULL; |
2344 | out: | 2244 | out: |
@@ -2346,23 +2246,40 @@ out: | |||
2346 | return retval; | 2246 | return retval; |
2347 | } | 2247 | } |
2348 | 2248 | ||
2249 | static void zfcp_fsf_fcp_task_mgmt_handler(struct zfcp_fsf_req *req) | ||
2250 | { | ||
2251 | struct fcp_resp_with_ext *fcp_rsp; | ||
2252 | struct fcp_resp_rsp_info *rsp_info; | ||
2253 | |||
2254 | zfcp_fsf_fcp_handler_common(req); | ||
2255 | |||
2256 | fcp_rsp = (struct fcp_resp_with_ext *) &req->qtcb->bottom.io.fcp_rsp; | ||
2257 | rsp_info = (struct fcp_resp_rsp_info *) &fcp_rsp[1]; | ||
2258 | |||
2259 | if ((rsp_info->rsp_code != FCP_TMF_CMPL) || | ||
2260 | (req->status & ZFCP_STATUS_FSFREQ_ERROR)) | ||
2261 | req->status |= ZFCP_STATUS_FSFREQ_TMFUNCFAILED; | ||
2262 | } | ||
2263 | |||
2349 | /** | 2264 | /** |
2350 | * zfcp_fsf_send_fcp_ctm - send SCSI task management command | 2265 | * zfcp_fsf_fcp_task_mgmt - send SCSI task management command |
2351 | * @unit: pointer to struct zfcp_unit | 2266 | * @scmnd: SCSI command to send the task management command for |
2352 | * @tm_flags: unsigned byte for task management flags | 2267 | * @tm_flags: unsigned byte for task management flags |
2353 | * Returns: on success pointer to struct fsf_req, NULL otherwise | 2268 | * Returns: on success pointer to struct fsf_req, NULL otherwise |
2354 | */ | 2269 | */ |
2355 | struct zfcp_fsf_req *zfcp_fsf_send_fcp_ctm(struct zfcp_unit *unit, u8 tm_flags) | 2270 | struct zfcp_fsf_req *zfcp_fsf_fcp_task_mgmt(struct scsi_cmnd *scmnd, |
2271 | u8 tm_flags) | ||
2356 | { | 2272 | { |
2357 | struct zfcp_fsf_req *req = NULL; | 2273 | struct zfcp_fsf_req *req = NULL; |
2358 | struct fcp_cmnd *fcp_cmnd; | 2274 | struct fcp_cmnd *fcp_cmnd; |
2359 | struct zfcp_qdio *qdio = unit->port->adapter->qdio; | 2275 | struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(scmnd->device); |
2276 | struct zfcp_qdio *qdio = zfcp_sdev->port->adapter->qdio; | ||
2360 | 2277 | ||
2361 | if (unlikely(!(atomic_read(&unit->status) & | 2278 | if (unlikely(!(atomic_read(&zfcp_sdev->status) & |
2362 | ZFCP_STATUS_COMMON_UNBLOCKED))) | 2279 | ZFCP_STATUS_COMMON_UNBLOCKED))) |
2363 | return NULL; | 2280 | return NULL; |
2364 | 2281 | ||
2365 | spin_lock_bh(&qdio->req_q_lock); | 2282 | spin_lock_irq(&qdio->req_q_lock); |
2366 | if (zfcp_qdio_sbal_get(qdio)) | 2283 | if (zfcp_qdio_sbal_get(qdio)) |
2367 | goto out; | 2284 | goto out; |
2368 | 2285 | ||
@@ -2376,10 +2293,10 @@ struct zfcp_fsf_req *zfcp_fsf_send_fcp_ctm(struct zfcp_unit *unit, u8 tm_flags) | |||
2376 | } | 2293 | } |
2377 | 2294 | ||
2378 | req->status |= ZFCP_STATUS_FSFREQ_TASK_MANAGEMENT; | 2295 | req->status |= ZFCP_STATUS_FSFREQ_TASK_MANAGEMENT; |
2379 | req->data = unit; | 2296 | req->data = scmnd; |
2380 | req->handler = zfcp_fsf_send_fcp_command_handler; | 2297 | req->handler = zfcp_fsf_fcp_task_mgmt_handler; |
2381 | req->qtcb->header.lun_handle = unit->handle; | 2298 | req->qtcb->header.lun_handle = zfcp_sdev->lun_handle; |
2382 | req->qtcb->header.port_handle = unit->port->handle; | 2299 | req->qtcb->header.port_handle = zfcp_sdev->port->handle; |
2383 | req->qtcb->bottom.io.data_direction = FSF_DATADIR_CMND; | 2300 | req->qtcb->bottom.io.data_direction = FSF_DATADIR_CMND; |
2384 | req->qtcb->bottom.io.service_class = FSF_CLASS_3; | 2301 | req->qtcb->bottom.io.service_class = FSF_CLASS_3; |
2385 | req->qtcb->bottom.io.fcp_cmnd_length = FCP_CMND_LEN; | 2302 | req->qtcb->bottom.io.fcp_cmnd_length = FCP_CMND_LEN; |
@@ -2387,7 +2304,7 @@ struct zfcp_fsf_req *zfcp_fsf_send_fcp_ctm(struct zfcp_unit *unit, u8 tm_flags) | |||
2387 | zfcp_qdio_set_sbale_last(qdio, &req->qdio_req); | 2304 | zfcp_qdio_set_sbale_last(qdio, &req->qdio_req); |
2388 | 2305 | ||
2389 | fcp_cmnd = (struct fcp_cmnd *) &req->qtcb->bottom.io.fcp_cmnd; | 2306 | fcp_cmnd = (struct fcp_cmnd *) &req->qtcb->bottom.io.fcp_cmnd; |
2390 | zfcp_fc_fcp_tm(fcp_cmnd, unit->device, tm_flags); | 2307 | zfcp_fc_fcp_tm(fcp_cmnd, scmnd->device, tm_flags); |
2391 | 2308 | ||
2392 | zfcp_fsf_start_timer(req, ZFCP_SCSI_ER_TIMEOUT); | 2309 | zfcp_fsf_start_timer(req, ZFCP_SCSI_ER_TIMEOUT); |
2393 | if (!zfcp_fsf_req_send(req)) | 2310 | if (!zfcp_fsf_req_send(req)) |
@@ -2396,7 +2313,7 @@ struct zfcp_fsf_req *zfcp_fsf_send_fcp_ctm(struct zfcp_unit *unit, u8 tm_flags) | |||
2396 | zfcp_fsf_req_free(req); | 2313 | zfcp_fsf_req_free(req); |
2397 | req = NULL; | 2314 | req = NULL; |
2398 | out: | 2315 | out: |
2399 | spin_unlock_bh(&qdio->req_q_lock); | 2316 | spin_unlock_irq(&qdio->req_q_lock); |
2400 | return req; | 2317 | return req; |
2401 | } | 2318 | } |
2402 | 2319 | ||
@@ -2432,7 +2349,7 @@ struct zfcp_fsf_req *zfcp_fsf_control_file(struct zfcp_adapter *adapter, | |||
2432 | return ERR_PTR(-EINVAL); | 2349 | return ERR_PTR(-EINVAL); |
2433 | } | 2350 | } |
2434 | 2351 | ||
2435 | spin_lock_bh(&qdio->req_q_lock); | 2352 | spin_lock_irq(&qdio->req_q_lock); |
2436 | if (zfcp_qdio_sbal_get(qdio)) | 2353 | if (zfcp_qdio_sbal_get(qdio)) |
2437 | goto out; | 2354 | goto out; |
2438 | 2355 | ||
@@ -2459,7 +2376,7 @@ struct zfcp_fsf_req *zfcp_fsf_control_file(struct zfcp_adapter *adapter, | |||
2459 | zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT); | 2376 | zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT); |
2460 | retval = zfcp_fsf_req_send(req); | 2377 | retval = zfcp_fsf_req_send(req); |
2461 | out: | 2378 | out: |
2462 | spin_unlock_bh(&qdio->req_q_lock); | 2379 | spin_unlock_irq(&qdio->req_q_lock); |
2463 | 2380 | ||
2464 | if (!retval) { | 2381 | if (!retval) { |
2465 | wait_for_completion(&req->completion); | 2382 | wait_for_completion(&req->completion); |
diff --git a/drivers/s390/scsi/zfcp_qdio.c b/drivers/s390/scsi/zfcp_qdio.c index b2635759721c..a0554beb4179 100644 --- a/drivers/s390/scsi/zfcp_qdio.c +++ b/drivers/s390/scsi/zfcp_qdio.c | |||
@@ -60,13 +60,11 @@ static inline void zfcp_qdio_account(struct zfcp_qdio *qdio) | |||
60 | unsigned long long now, span; | 60 | unsigned long long now, span; |
61 | int used; | 61 | int used; |
62 | 62 | ||
63 | spin_lock(&qdio->stat_lock); | ||
64 | now = get_clock_monotonic(); | 63 | now = get_clock_monotonic(); |
65 | span = (now - qdio->req_q_time) >> 12; | 64 | span = (now - qdio->req_q_time) >> 12; |
66 | used = QDIO_MAX_BUFFERS_PER_Q - atomic_read(&qdio->req_q_free); | 65 | used = QDIO_MAX_BUFFERS_PER_Q - atomic_read(&qdio->req_q_free); |
67 | qdio->req_q_util += used * span; | 66 | qdio->req_q_util += used * span; |
68 | qdio->req_q_time = now; | 67 | qdio->req_q_time = now; |
69 | spin_unlock(&qdio->stat_lock); | ||
70 | } | 68 | } |
71 | 69 | ||
72 | static void zfcp_qdio_int_req(struct ccw_device *cdev, unsigned int qdio_err, | 70 | static void zfcp_qdio_int_req(struct ccw_device *cdev, unsigned int qdio_err, |
@@ -84,7 +82,9 @@ static void zfcp_qdio_int_req(struct ccw_device *cdev, unsigned int qdio_err, | |||
84 | /* cleanup all SBALs being program-owned now */ | 82 | /* cleanup all SBALs being program-owned now */ |
85 | zfcp_qdio_zero_sbals(qdio->req_q, idx, count); | 83 | zfcp_qdio_zero_sbals(qdio->req_q, idx, count); |
86 | 84 | ||
85 | spin_lock_irq(&qdio->stat_lock); | ||
87 | zfcp_qdio_account(qdio); | 86 | zfcp_qdio_account(qdio); |
87 | spin_unlock_irq(&qdio->stat_lock); | ||
88 | atomic_add(count, &qdio->req_q_free); | 88 | atomic_add(count, &qdio->req_q_free); |
89 | wake_up(&qdio->req_q_wq); | 89 | wake_up(&qdio->req_q_wq); |
90 | } | 90 | } |
@@ -201,11 +201,11 @@ int zfcp_qdio_sbals_from_sg(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req, | |||
201 | 201 | ||
202 | static int zfcp_qdio_sbal_check(struct zfcp_qdio *qdio) | 202 | static int zfcp_qdio_sbal_check(struct zfcp_qdio *qdio) |
203 | { | 203 | { |
204 | spin_lock_bh(&qdio->req_q_lock); | 204 | spin_lock_irq(&qdio->req_q_lock); |
205 | if (atomic_read(&qdio->req_q_free) || | 205 | if (atomic_read(&qdio->req_q_free) || |
206 | !(atomic_read(&qdio->adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP)) | 206 | !(atomic_read(&qdio->adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP)) |
207 | return 1; | 207 | return 1; |
208 | spin_unlock_bh(&qdio->req_q_lock); | 208 | spin_unlock_irq(&qdio->req_q_lock); |
209 | return 0; | 209 | return 0; |
210 | } | 210 | } |
211 | 211 | ||
@@ -223,7 +223,7 @@ int zfcp_qdio_sbal_get(struct zfcp_qdio *qdio) | |||
223 | { | 223 | { |
224 | long ret; | 224 | long ret; |
225 | 225 | ||
226 | spin_unlock_bh(&qdio->req_q_lock); | 226 | spin_unlock_irq(&qdio->req_q_lock); |
227 | ret = wait_event_interruptible_timeout(qdio->req_q_wq, | 227 | ret = wait_event_interruptible_timeout(qdio->req_q_wq, |
228 | zfcp_qdio_sbal_check(qdio), 5 * HZ); | 228 | zfcp_qdio_sbal_check(qdio), 5 * HZ); |
229 | 229 | ||
@@ -239,7 +239,7 @@ int zfcp_qdio_sbal_get(struct zfcp_qdio *qdio) | |||
239 | zfcp_erp_adapter_reopen(qdio->adapter, 0, "qdsbg_1", NULL); | 239 | zfcp_erp_adapter_reopen(qdio->adapter, 0, "qdsbg_1", NULL); |
240 | } | 240 | } |
241 | 241 | ||
242 | spin_lock_bh(&qdio->req_q_lock); | 242 | spin_lock_irq(&qdio->req_q_lock); |
243 | return -EIO; | 243 | return -EIO; |
244 | } | 244 | } |
245 | 245 | ||
@@ -254,7 +254,9 @@ int zfcp_qdio_send(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req) | |||
254 | int retval; | 254 | int retval; |
255 | u8 sbal_number = q_req->sbal_number; | 255 | u8 sbal_number = q_req->sbal_number; |
256 | 256 | ||
257 | spin_lock(&qdio->stat_lock); | ||
257 | zfcp_qdio_account(qdio); | 258 | zfcp_qdio_account(qdio); |
259 | spin_unlock(&qdio->stat_lock); | ||
258 | 260 | ||
259 | retval = do_QDIO(qdio->adapter->ccw_device, QDIO_FLAG_SYNC_OUTPUT, 0, | 261 | retval = do_QDIO(qdio->adapter->ccw_device, QDIO_FLAG_SYNC_OUTPUT, 0, |
260 | q_req->sbal_first, sbal_number); | 262 | q_req->sbal_first, sbal_number); |
@@ -277,16 +279,12 @@ int zfcp_qdio_send(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req) | |||
277 | static void zfcp_qdio_setup_init_data(struct qdio_initialize *id, | 279 | static void zfcp_qdio_setup_init_data(struct qdio_initialize *id, |
278 | struct zfcp_qdio *qdio) | 280 | struct zfcp_qdio *qdio) |
279 | { | 281 | { |
280 | 282 | memset(id, 0, sizeof(*id)); | |
281 | id->cdev = qdio->adapter->ccw_device; | 283 | id->cdev = qdio->adapter->ccw_device; |
282 | id->q_format = QDIO_ZFCP_QFMT; | 284 | id->q_format = QDIO_ZFCP_QFMT; |
283 | memcpy(id->adapter_name, dev_name(&id->cdev->dev), 8); | 285 | memcpy(id->adapter_name, dev_name(&id->cdev->dev), 8); |
284 | ASCEBC(id->adapter_name, 8); | 286 | ASCEBC(id->adapter_name, 8); |
285 | id->qib_rflags = QIB_RFLAGS_ENABLE_DATA_DIV; | 287 | id->qib_rflags = QIB_RFLAGS_ENABLE_DATA_DIV; |
286 | id->qib_param_field_format = 0; | ||
287 | id->qib_param_field = NULL; | ||
288 | id->input_slib_elements = NULL; | ||
289 | id->output_slib_elements = NULL; | ||
290 | id->no_input_qs = 1; | 288 | id->no_input_qs = 1; |
291 | id->no_output_qs = 1; | 289 | id->no_output_qs = 1; |
292 | id->input_handler = zfcp_qdio_int_resp; | 290 | id->input_handler = zfcp_qdio_int_resp; |
@@ -328,9 +326,9 @@ void zfcp_qdio_close(struct zfcp_qdio *qdio) | |||
328 | return; | 326 | return; |
329 | 327 | ||
330 | /* clear QDIOUP flag, thus do_QDIO is not called during qdio_shutdown */ | 328 | /* clear QDIOUP flag, thus do_QDIO is not called during qdio_shutdown */ |
331 | spin_lock_bh(&qdio->req_q_lock); | 329 | spin_lock_irq(&qdio->req_q_lock); |
332 | atomic_clear_mask(ZFCP_STATUS_ADAPTER_QDIOUP, &adapter->status); | 330 | atomic_clear_mask(ZFCP_STATUS_ADAPTER_QDIOUP, &adapter->status); |
333 | spin_unlock_bh(&qdio->req_q_lock); | 331 | spin_unlock_irq(&qdio->req_q_lock); |
334 | 332 | ||
335 | wake_up(&qdio->req_q_wq); | 333 | wake_up(&qdio->req_q_wq); |
336 | 334 | ||
diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c index cb000c9833bb..50286d8707f3 100644 --- a/drivers/s390/scsi/zfcp_scsi.c +++ b/drivers/s390/scsi/zfcp_scsi.c | |||
@@ -49,11 +49,12 @@ static int zfcp_scsi_change_queue_depth(struct scsi_device *sdev, int depth, | |||
49 | return sdev->queue_depth; | 49 | return sdev->queue_depth; |
50 | } | 50 | } |
51 | 51 | ||
52 | static void zfcp_scsi_slave_destroy(struct scsi_device *sdpnt) | 52 | static void zfcp_scsi_slave_destroy(struct scsi_device *sdev) |
53 | { | 53 | { |
54 | struct zfcp_unit *unit = (struct zfcp_unit *) sdpnt->hostdata; | 54 | struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev); |
55 | unit->device = NULL; | 55 | |
56 | put_device(&unit->dev); | 56 | zfcp_erp_lun_shutdown_wait(sdev, "scssd_1"); |
57 | put_device(&zfcp_sdev->port->dev); | ||
57 | } | 58 | } |
58 | 59 | ||
59 | static int zfcp_scsi_slave_configure(struct scsi_device *sdp) | 60 | static int zfcp_scsi_slave_configure(struct scsi_device *sdp) |
@@ -78,23 +79,16 @@ static void zfcp_scsi_command_fail(struct scsi_cmnd *scpnt, int result) | |||
78 | static int zfcp_scsi_queuecommand(struct scsi_cmnd *scpnt, | 79 | static int zfcp_scsi_queuecommand(struct scsi_cmnd *scpnt, |
79 | void (*done) (struct scsi_cmnd *)) | 80 | void (*done) (struct scsi_cmnd *)) |
80 | { | 81 | { |
81 | struct zfcp_unit *unit; | 82 | struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(scpnt->device); |
82 | struct zfcp_adapter *adapter; | 83 | struct zfcp_adapter *adapter = zfcp_sdev->port->adapter; |
83 | int status, scsi_result, ret; | ||
84 | struct fc_rport *rport = starget_to_rport(scsi_target(scpnt->device)); | 84 | struct fc_rport *rport = starget_to_rport(scsi_target(scpnt->device)); |
85 | int status, scsi_result, ret; | ||
85 | 86 | ||
86 | /* reset the status for this request */ | 87 | /* reset the status for this request */ |
87 | scpnt->result = 0; | 88 | scpnt->result = 0; |
88 | scpnt->host_scribble = NULL; | 89 | scpnt->host_scribble = NULL; |
89 | scpnt->scsi_done = done; | 90 | scpnt->scsi_done = done; |
90 | 91 | ||
91 | /* | ||
92 | * figure out adapter and target device | ||
93 | * (stored there by zfcp_scsi_slave_alloc) | ||
94 | */ | ||
95 | adapter = (struct zfcp_adapter *) scpnt->device->host->hostdata[0]; | ||
96 | unit = scpnt->device->hostdata; | ||
97 | |||
98 | scsi_result = fc_remote_port_chkready(rport); | 92 | scsi_result = fc_remote_port_chkready(rport); |
99 | if (unlikely(scsi_result)) { | 93 | if (unlikely(scsi_result)) { |
100 | scpnt->result = scsi_result; | 94 | scpnt->result = scsi_result; |
@@ -103,11 +97,11 @@ static int zfcp_scsi_queuecommand(struct scsi_cmnd *scpnt, | |||
103 | return 0; | 97 | return 0; |
104 | } | 98 | } |
105 | 99 | ||
106 | status = atomic_read(&unit->status); | 100 | status = atomic_read(&zfcp_sdev->status); |
107 | if (unlikely(status & ZFCP_STATUS_COMMON_ERP_FAILED) && | 101 | if (unlikely(status & ZFCP_STATUS_COMMON_ERP_FAILED) && |
108 | !(atomic_read(&unit->port->status) & | 102 | !(atomic_read(&zfcp_sdev->port->status) & |
109 | ZFCP_STATUS_COMMON_ERP_FAILED)) { | 103 | ZFCP_STATUS_COMMON_ERP_FAILED)) { |
110 | /* only unit access denied, but port is good | 104 | /* only LUN access denied, but port is good |
111 | * not covered by FC transport, have to fail here */ | 105 | * not covered by FC transport, have to fail here */ |
112 | zfcp_scsi_command_fail(scpnt, DID_ERROR); | 106 | zfcp_scsi_command_fail(scpnt, DID_ERROR); |
113 | return 0; | 107 | return 0; |
@@ -115,8 +109,8 @@ static int zfcp_scsi_queuecommand(struct scsi_cmnd *scpnt, | |||
115 | 109 | ||
116 | if (unlikely(!(status & ZFCP_STATUS_COMMON_UNBLOCKED))) { | 110 | if (unlikely(!(status & ZFCP_STATUS_COMMON_UNBLOCKED))) { |
117 | /* This could be either | 111 | /* This could be either |
118 | * open unit pending: this is temporary, will result in | 112 | * open LUN pending: this is temporary, will result in |
119 | * open unit or ERP_FAILED, so retry command | 113 | * open LUN or ERP_FAILED, so retry command |
120 | * call to rport_delete pending: mimic retry from | 114 | * call to rport_delete pending: mimic retry from |
121 | * fc_remote_port_chkready until rport is BLOCKED | 115 | * fc_remote_port_chkready until rport is BLOCKED |
122 | */ | 116 | */ |
@@ -124,7 +118,7 @@ static int zfcp_scsi_queuecommand(struct scsi_cmnd *scpnt, | |||
124 | return 0; | 118 | return 0; |
125 | } | 119 | } |
126 | 120 | ||
127 | ret = zfcp_fsf_send_fcp_command_task(unit, scpnt); | 121 | ret = zfcp_fsf_fcp_cmnd(scpnt); |
128 | if (unlikely(ret == -EBUSY)) | 122 | if (unlikely(ret == -EBUSY)) |
129 | return SCSI_MLQUEUE_DEVICE_BUSY; | 123 | return SCSI_MLQUEUE_DEVICE_BUSY; |
130 | else if (unlikely(ret < 0)) | 124 | else if (unlikely(ret < 0)) |
@@ -133,45 +127,42 @@ static int zfcp_scsi_queuecommand(struct scsi_cmnd *scpnt, | |||
133 | return ret; | 127 | return ret; |
134 | } | 128 | } |
135 | 129 | ||
136 | static struct zfcp_unit *zfcp_unit_lookup(struct zfcp_adapter *adapter, | 130 | static int zfcp_scsi_slave_alloc(struct scsi_device *sdev) |
137 | unsigned int id, u64 lun) | ||
138 | { | 131 | { |
139 | unsigned long flags; | 132 | struct fc_rport *rport = starget_to_rport(scsi_target(sdev)); |
133 | struct zfcp_adapter *adapter = | ||
134 | (struct zfcp_adapter *) sdev->host->hostdata[0]; | ||
135 | struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev); | ||
140 | struct zfcp_port *port; | 136 | struct zfcp_port *port; |
141 | struct zfcp_unit *unit = NULL; | 137 | struct zfcp_unit *unit; |
142 | 138 | ||
143 | read_lock_irqsave(&adapter->port_list_lock, flags); | 139 | port = zfcp_get_port_by_wwpn(adapter, rport->port_name); |
144 | list_for_each_entry(port, &adapter->port_list, list) { | 140 | if (!port) |
145 | if (!port->rport || (id != port->rport->scsi_target_id)) | 141 | return -ENXIO; |
146 | continue; | ||
147 | unit = zfcp_get_unit_by_lun(port, lun); | ||
148 | if (unit) | ||
149 | break; | ||
150 | } | ||
151 | read_unlock_irqrestore(&adapter->port_list_lock, flags); | ||
152 | 142 | ||
153 | return unit; | 143 | unit = zfcp_unit_find(port, zfcp_scsi_dev_lun(sdev)); |
154 | } | 144 | if (unit) |
145 | put_device(&unit->dev); | ||
155 | 146 | ||
156 | static int zfcp_scsi_slave_alloc(struct scsi_device *sdp) | 147 | if (!unit && !(adapter->connection_features & FSF_FEATURE_NPIV_MODE)) { |
157 | { | 148 | put_device(&port->dev); |
158 | struct zfcp_adapter *adapter; | 149 | return -ENXIO; |
159 | struct zfcp_unit *unit; | 150 | } |
160 | u64 lun; | ||
161 | 151 | ||
162 | adapter = (struct zfcp_adapter *) sdp->host->hostdata[0]; | 152 | zfcp_sdev->port = port; |
163 | if (!adapter) | 153 | zfcp_sdev->latencies.write.channel.min = 0xFFFFFFFF; |
164 | goto out; | 154 | zfcp_sdev->latencies.write.fabric.min = 0xFFFFFFFF; |
155 | zfcp_sdev->latencies.read.channel.min = 0xFFFFFFFF; | ||
156 | zfcp_sdev->latencies.read.fabric.min = 0xFFFFFFFF; | ||
157 | zfcp_sdev->latencies.cmd.channel.min = 0xFFFFFFFF; | ||
158 | zfcp_sdev->latencies.cmd.fabric.min = 0xFFFFFFFF; | ||
159 | spin_lock_init(&zfcp_sdev->latencies.lock); | ||
165 | 160 | ||
166 | int_to_scsilun(sdp->lun, (struct scsi_lun *)&lun); | 161 | zfcp_erp_set_lun_status(sdev, ZFCP_STATUS_COMMON_RUNNING); |
167 | unit = zfcp_unit_lookup(adapter, sdp->id, lun); | 162 | zfcp_erp_lun_reopen(sdev, 0, "scsla_1", NULL); |
168 | if (unit) { | 163 | zfcp_erp_wait(port->adapter); |
169 | sdp->hostdata = unit; | 164 | |
170 | unit->device = sdp; | 165 | return 0; |
171 | return 0; | ||
172 | } | ||
173 | out: | ||
174 | return -ENXIO; | ||
175 | } | 166 | } |
176 | 167 | ||
177 | static int zfcp_scsi_eh_abort_handler(struct scsi_cmnd *scpnt) | 168 | static int zfcp_scsi_eh_abort_handler(struct scsi_cmnd *scpnt) |
@@ -179,7 +170,6 @@ static int zfcp_scsi_eh_abort_handler(struct scsi_cmnd *scpnt) | |||
179 | struct Scsi_Host *scsi_host = scpnt->device->host; | 170 | struct Scsi_Host *scsi_host = scpnt->device->host; |
180 | struct zfcp_adapter *adapter = | 171 | struct zfcp_adapter *adapter = |
181 | (struct zfcp_adapter *) scsi_host->hostdata[0]; | 172 | (struct zfcp_adapter *) scsi_host->hostdata[0]; |
182 | struct zfcp_unit *unit = scpnt->device->hostdata; | ||
183 | struct zfcp_fsf_req *old_req, *abrt_req; | 173 | struct zfcp_fsf_req *old_req, *abrt_req; |
184 | unsigned long flags; | 174 | unsigned long flags; |
185 | unsigned long old_reqid = (unsigned long) scpnt->host_scribble; | 175 | unsigned long old_reqid = (unsigned long) scpnt->host_scribble; |
@@ -203,7 +193,7 @@ static int zfcp_scsi_eh_abort_handler(struct scsi_cmnd *scpnt) | |||
203 | write_unlock_irqrestore(&adapter->abort_lock, flags); | 193 | write_unlock_irqrestore(&adapter->abort_lock, flags); |
204 | 194 | ||
205 | while (retry--) { | 195 | while (retry--) { |
206 | abrt_req = zfcp_fsf_abort_fcp_command(old_reqid, unit); | 196 | abrt_req = zfcp_fsf_abort_fcp_cmnd(scpnt); |
207 | if (abrt_req) | 197 | if (abrt_req) |
208 | break; | 198 | break; |
209 | 199 | ||
@@ -238,14 +228,14 @@ static int zfcp_scsi_eh_abort_handler(struct scsi_cmnd *scpnt) | |||
238 | 228 | ||
239 | static int zfcp_task_mgmt_function(struct scsi_cmnd *scpnt, u8 tm_flags) | 229 | static int zfcp_task_mgmt_function(struct scsi_cmnd *scpnt, u8 tm_flags) |
240 | { | 230 | { |
241 | struct zfcp_unit *unit = scpnt->device->hostdata; | 231 | struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(scpnt->device); |
242 | struct zfcp_adapter *adapter = unit->port->adapter; | 232 | struct zfcp_adapter *adapter = zfcp_sdev->port->adapter; |
243 | struct zfcp_fsf_req *fsf_req = NULL; | 233 | struct zfcp_fsf_req *fsf_req = NULL; |
244 | int retval = SUCCESS, ret; | 234 | int retval = SUCCESS, ret; |
245 | int retry = 3; | 235 | int retry = 3; |
246 | 236 | ||
247 | while (retry--) { | 237 | while (retry--) { |
248 | fsf_req = zfcp_fsf_send_fcp_ctm(unit, tm_flags); | 238 | fsf_req = zfcp_fsf_fcp_task_mgmt(scpnt, tm_flags); |
249 | if (fsf_req) | 239 | if (fsf_req) |
250 | break; | 240 | break; |
251 | 241 | ||
@@ -256,7 +246,7 @@ static int zfcp_task_mgmt_function(struct scsi_cmnd *scpnt, u8 tm_flags) | |||
256 | 246 | ||
257 | if (!(atomic_read(&adapter->status) & | 247 | if (!(atomic_read(&adapter->status) & |
258 | ZFCP_STATUS_COMMON_RUNNING)) { | 248 | ZFCP_STATUS_COMMON_RUNNING)) { |
259 | zfcp_dbf_scsi_devreset("nres", tm_flags, unit, scpnt); | 249 | zfcp_dbf_scsi_devreset("nres", scpnt, tm_flags); |
260 | return SUCCESS; | 250 | return SUCCESS; |
261 | } | 251 | } |
262 | } | 252 | } |
@@ -266,10 +256,10 @@ static int zfcp_task_mgmt_function(struct scsi_cmnd *scpnt, u8 tm_flags) | |||
266 | wait_for_completion(&fsf_req->completion); | 256 | wait_for_completion(&fsf_req->completion); |
267 | 257 | ||
268 | if (fsf_req->status & ZFCP_STATUS_FSFREQ_TMFUNCFAILED) { | 258 | if (fsf_req->status & ZFCP_STATUS_FSFREQ_TMFUNCFAILED) { |
269 | zfcp_dbf_scsi_devreset("fail", tm_flags, unit, scpnt); | 259 | zfcp_dbf_scsi_devreset("fail", scpnt, tm_flags); |
270 | retval = FAILED; | 260 | retval = FAILED; |
271 | } else | 261 | } else |
272 | zfcp_dbf_scsi_devreset("okay", tm_flags, unit, scpnt); | 262 | zfcp_dbf_scsi_devreset("okay", scpnt, tm_flags); |
273 | 263 | ||
274 | zfcp_fsf_req_free(fsf_req); | 264 | zfcp_fsf_req_free(fsf_req); |
275 | return retval; | 265 | return retval; |
@@ -287,8 +277,8 @@ static int zfcp_scsi_eh_target_reset_handler(struct scsi_cmnd *scpnt) | |||
287 | 277 | ||
288 | static int zfcp_scsi_eh_host_reset_handler(struct scsi_cmnd *scpnt) | 278 | static int zfcp_scsi_eh_host_reset_handler(struct scsi_cmnd *scpnt) |
289 | { | 279 | { |
290 | struct zfcp_unit *unit = scpnt->device->hostdata; | 280 | struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(scpnt->device); |
291 | struct zfcp_adapter *adapter = unit->port->adapter; | 281 | struct zfcp_adapter *adapter = zfcp_sdev->port->adapter; |
292 | int ret; | 282 | int ret; |
293 | 283 | ||
294 | zfcp_erp_adapter_reopen(adapter, 0, "schrh_1", scpnt); | 284 | zfcp_erp_adapter_reopen(adapter, 0, "schrh_1", scpnt); |
@@ -319,8 +309,8 @@ int zfcp_adapter_scsi_register(struct zfcp_adapter *adapter) | |||
319 | } | 309 | } |
320 | 310 | ||
321 | /* tell the SCSI stack some characteristics of this adapter */ | 311 | /* tell the SCSI stack some characteristics of this adapter */ |
322 | adapter->scsi_host->max_id = 1; | 312 | adapter->scsi_host->max_id = 511; |
323 | adapter->scsi_host->max_lun = 1; | 313 | adapter->scsi_host->max_lun = 0xFFFFFFFF; |
324 | adapter->scsi_host->max_channel = 0; | 314 | adapter->scsi_host->max_channel = 0; |
325 | adapter->scsi_host->unique_id = dev_id.devno; | 315 | adapter->scsi_host->unique_id = dev_id.devno; |
326 | adapter->scsi_host->max_cmd_len = 16; /* in struct fcp_cmnd */ | 316 | adapter->scsi_host->max_cmd_len = 16; /* in struct fcp_cmnd */ |
@@ -534,20 +524,6 @@ static void zfcp_scsi_terminate_rport_io(struct fc_rport *rport) | |||
534 | } | 524 | } |
535 | } | 525 | } |
536 | 526 | ||
537 | static void zfcp_scsi_queue_unit_register(struct zfcp_port *port) | ||
538 | { | ||
539 | struct zfcp_unit *unit; | ||
540 | |||
541 | read_lock_irq(&port->unit_list_lock); | ||
542 | list_for_each_entry(unit, &port->unit_list, list) { | ||
543 | get_device(&unit->dev); | ||
544 | if (scsi_queue_work(port->adapter->scsi_host, | ||
545 | &unit->scsi_work) <= 0) | ||
546 | put_device(&unit->dev); | ||
547 | } | ||
548 | read_unlock_irq(&port->unit_list_lock); | ||
549 | } | ||
550 | |||
551 | static void zfcp_scsi_rport_register(struct zfcp_port *port) | 527 | static void zfcp_scsi_rport_register(struct zfcp_port *port) |
552 | { | 528 | { |
553 | struct fc_rport_identifiers ids; | 529 | struct fc_rport_identifiers ids; |
@@ -574,7 +550,7 @@ static void zfcp_scsi_rport_register(struct zfcp_port *port) | |||
574 | port->rport = rport; | 550 | port->rport = rport; |
575 | port->starget_id = rport->scsi_target_id; | 551 | port->starget_id = rport->scsi_target_id; |
576 | 552 | ||
577 | zfcp_scsi_queue_unit_register(port); | 553 | zfcp_unit_queue_scsi_scan(port); |
578 | } | 554 | } |
579 | 555 | ||
580 | static void zfcp_scsi_rport_block(struct zfcp_port *port) | 556 | static void zfcp_scsi_rport_block(struct zfcp_port *port) |
@@ -638,29 +614,6 @@ void zfcp_scsi_rport_work(struct work_struct *work) | |||
638 | } | 614 | } |
639 | 615 | ||
640 | /** | 616 | /** |
641 | * zfcp_scsi_scan - Register LUN with SCSI midlayer | ||
642 | * @unit: The LUN/unit to register | ||
643 | */ | ||
644 | void zfcp_scsi_scan(struct zfcp_unit *unit) | ||
645 | { | ||
646 | struct fc_rport *rport = unit->port->rport; | ||
647 | |||
648 | if (rport && rport->port_state == FC_PORTSTATE_ONLINE) | ||
649 | scsi_scan_target(&rport->dev, 0, rport->scsi_target_id, | ||
650 | scsilun_to_int((struct scsi_lun *) | ||
651 | &unit->fcp_lun), 0); | ||
652 | } | ||
653 | |||
654 | void zfcp_scsi_scan_work(struct work_struct *work) | ||
655 | { | ||
656 | struct zfcp_unit *unit = container_of(work, struct zfcp_unit, | ||
657 | scsi_work); | ||
658 | |||
659 | zfcp_scsi_scan(unit); | ||
660 | put_device(&unit->dev); | ||
661 | } | ||
662 | |||
663 | /** | ||
664 | * zfcp_scsi_set_prot - Configure DIF/DIX support in scsi_host | 617 | * zfcp_scsi_set_prot - Configure DIF/DIX support in scsi_host |
665 | * @adapter: The adapter where to configure DIF/DIX for the SCSI host | 618 | * @adapter: The adapter where to configure DIF/DIX for the SCSI host |
666 | */ | 619 | */ |
@@ -681,6 +634,7 @@ void zfcp_scsi_set_prot(struct zfcp_adapter *adapter) | |||
681 | adapter->adapter_features & FSF_FEATURE_DIX_PROT_TCPIP) { | 634 | adapter->adapter_features & FSF_FEATURE_DIX_PROT_TCPIP) { |
682 | mask |= SHOST_DIX_TYPE1_PROTECTION; | 635 | mask |= SHOST_DIX_TYPE1_PROTECTION; |
683 | scsi_host_set_guard(shost, SHOST_DIX_GUARD_IP); | 636 | scsi_host_set_guard(shost, SHOST_DIX_GUARD_IP); |
637 | shost->sg_prot_tablesize = ZFCP_QDIO_MAX_SBALES_PER_REQ / 2; | ||
684 | shost->sg_tablesize = ZFCP_QDIO_MAX_SBALES_PER_REQ / 2; | 638 | shost->sg_tablesize = ZFCP_QDIO_MAX_SBALES_PER_REQ / 2; |
685 | shost->max_sectors = ZFCP_QDIO_MAX_SBALES_PER_REQ * 8 / 2; | 639 | shost->max_sectors = ZFCP_QDIO_MAX_SBALES_PER_REQ * 8 / 2; |
686 | } | 640 | } |
@@ -734,7 +688,6 @@ struct fc_function_template zfcp_transport_functions = { | |||
734 | .show_host_port_type = 1, | 688 | .show_host_port_type = 1, |
735 | .show_host_speed = 1, | 689 | .show_host_speed = 1, |
736 | .show_host_port_id = 1, | 690 | .show_host_port_id = 1, |
737 | .disable_target_scan = 1, | ||
738 | .dd_bsg_size = sizeof(struct zfcp_fsf_ct_els), | 691 | .dd_bsg_size = sizeof(struct zfcp_fsf_ct_els), |
739 | }; | 692 | }; |
740 | 693 | ||
diff --git a/drivers/s390/scsi/zfcp_sysfs.c b/drivers/s390/scsi/zfcp_sysfs.c index b4561c86e230..2f2c54f4718f 100644 --- a/drivers/s390/scsi/zfcp_sysfs.c +++ b/drivers/s390/scsi/zfcp_sysfs.c | |||
@@ -68,63 +68,96 @@ ZFCP_DEFINE_ATTR(zfcp_port, port, access_denied, "%d\n", | |||
68 | ZFCP_STATUS_COMMON_ACCESS_DENIED) != 0); | 68 | ZFCP_STATUS_COMMON_ACCESS_DENIED) != 0); |
69 | 69 | ||
70 | ZFCP_DEFINE_ATTR(zfcp_unit, unit, status, "0x%08x\n", | 70 | ZFCP_DEFINE_ATTR(zfcp_unit, unit, status, "0x%08x\n", |
71 | atomic_read(&unit->status)); | 71 | zfcp_unit_sdev_status(unit)); |
72 | ZFCP_DEFINE_ATTR(zfcp_unit, unit, in_recovery, "%d\n", | 72 | ZFCP_DEFINE_ATTR(zfcp_unit, unit, in_recovery, "%d\n", |
73 | (atomic_read(&unit->status) & | 73 | (zfcp_unit_sdev_status(unit) & |
74 | ZFCP_STATUS_COMMON_ERP_INUSE) != 0); | 74 | ZFCP_STATUS_COMMON_ERP_INUSE) != 0); |
75 | ZFCP_DEFINE_ATTR(zfcp_unit, unit, access_denied, "%d\n", | 75 | ZFCP_DEFINE_ATTR(zfcp_unit, unit, access_denied, "%d\n", |
76 | (atomic_read(&unit->status) & | 76 | (zfcp_unit_sdev_status(unit) & |
77 | ZFCP_STATUS_COMMON_ACCESS_DENIED) != 0); | 77 | ZFCP_STATUS_COMMON_ACCESS_DENIED) != 0); |
78 | ZFCP_DEFINE_ATTR(zfcp_unit, unit, access_shared, "%d\n", | 78 | ZFCP_DEFINE_ATTR(zfcp_unit, unit, access_shared, "%d\n", |
79 | (atomic_read(&unit->status) & | 79 | (zfcp_unit_sdev_status(unit) & |
80 | ZFCP_STATUS_UNIT_SHARED) != 0); | 80 | ZFCP_STATUS_LUN_SHARED) != 0); |
81 | ZFCP_DEFINE_ATTR(zfcp_unit, unit, access_readonly, "%d\n", | 81 | ZFCP_DEFINE_ATTR(zfcp_unit, unit, access_readonly, "%d\n", |
82 | (atomic_read(&unit->status) & | 82 | (zfcp_unit_sdev_status(unit) & |
83 | ZFCP_STATUS_UNIT_READONLY) != 0); | 83 | ZFCP_STATUS_LUN_READONLY) != 0); |
84 | 84 | ||
85 | #define ZFCP_SYSFS_FAILED(_feat_def, _feat, _adapter, _mod_id, _reopen_id) \ | 85 | static ssize_t zfcp_sysfs_port_failed_show(struct device *dev, |
86 | static ssize_t zfcp_sysfs_##_feat##_failed_show(struct device *dev, \ | 86 | struct device_attribute *attr, |
87 | struct device_attribute *attr, \ | 87 | char *buf) |
88 | char *buf) \ | 88 | { |
89 | { \ | 89 | struct zfcp_port *port = container_of(dev, struct zfcp_port, dev); |
90 | struct _feat_def *_feat = container_of(dev, struct _feat_def, dev); \ | 90 | |
91 | \ | 91 | if (atomic_read(&port->status) & ZFCP_STATUS_COMMON_ERP_FAILED) |
92 | if (atomic_read(&_feat->status) & ZFCP_STATUS_COMMON_ERP_FAILED) \ | 92 | return sprintf(buf, "1\n"); |
93 | return sprintf(buf, "1\n"); \ | 93 | |
94 | else \ | 94 | return sprintf(buf, "0\n"); |
95 | return sprintf(buf, "0\n"); \ | 95 | } |
96 | } \ | 96 | |
97 | static ssize_t zfcp_sysfs_##_feat##_failed_store(struct device *dev, \ | 97 | static ssize_t zfcp_sysfs_port_failed_store(struct device *dev, |
98 | struct device_attribute *attr,\ | 98 | struct device_attribute *attr, |
99 | const char *buf, size_t count)\ | 99 | const char *buf, size_t count) |
100 | { \ | 100 | { |
101 | struct _feat_def *_feat = container_of(dev, struct _feat_def, dev); \ | 101 | struct zfcp_port *port = container_of(dev, struct zfcp_port, dev); |
102 | unsigned long val; \ | 102 | unsigned long val; |
103 | int retval = 0; \ | 103 | |
104 | \ | 104 | if (strict_strtoul(buf, 0, &val) || val != 0) |
105 | if (!(_feat && get_device(&_feat->dev))) \ | 105 | return -EINVAL; |
106 | return -EBUSY; \ | 106 | |
107 | \ | 107 | zfcp_erp_set_port_status(port, ZFCP_STATUS_COMMON_RUNNING); |
108 | if (strict_strtoul(buf, 0, &val) || val != 0) { \ | 108 | zfcp_erp_port_reopen(port, ZFCP_STATUS_COMMON_ERP_FAILED, "sypfai2", |
109 | retval = -EINVAL; \ | 109 | NULL); |
110 | goto out; \ | 110 | zfcp_erp_wait(port->adapter); |
111 | } \ | ||
112 | \ | ||
113 | zfcp_erp_modify_##_feat##_status(_feat, _mod_id, NULL, \ | ||
114 | ZFCP_STATUS_COMMON_RUNNING, ZFCP_SET);\ | ||
115 | zfcp_erp_##_feat##_reopen(_feat, ZFCP_STATUS_COMMON_ERP_FAILED, \ | ||
116 | _reopen_id, NULL); \ | ||
117 | zfcp_erp_wait(_adapter); \ | ||
118 | out: \ | ||
119 | put_device(&_feat->dev); \ | ||
120 | return retval ? retval : (ssize_t) count; \ | ||
121 | } \ | ||
122 | static ZFCP_DEV_ATTR(_feat, failed, S_IWUSR | S_IRUGO, \ | ||
123 | zfcp_sysfs_##_feat##_failed_show, \ | ||
124 | zfcp_sysfs_##_feat##_failed_store); | ||
125 | 111 | ||
126 | ZFCP_SYSFS_FAILED(zfcp_port, port, port->adapter, "sypfai1", "sypfai2"); | 112 | return count; |
127 | ZFCP_SYSFS_FAILED(zfcp_unit, unit, unit->port->adapter, "syufai1", "syufai2"); | 113 | } |
114 | static ZFCP_DEV_ATTR(port, failed, S_IWUSR | S_IRUGO, | ||
115 | zfcp_sysfs_port_failed_show, | ||
116 | zfcp_sysfs_port_failed_store); | ||
117 | |||
118 | static ssize_t zfcp_sysfs_unit_failed_show(struct device *dev, | ||
119 | struct device_attribute *attr, | ||
120 | char *buf) | ||
121 | { | ||
122 | struct zfcp_unit *unit = container_of(dev, struct zfcp_unit, dev); | ||
123 | struct scsi_device *sdev; | ||
124 | unsigned int status, failed = 1; | ||
125 | |||
126 | sdev = zfcp_unit_sdev(unit); | ||
127 | if (sdev) { | ||
128 | status = atomic_read(&sdev_to_zfcp(sdev)->status); | ||
129 | failed = status & ZFCP_STATUS_COMMON_ERP_FAILED ? 1 : 0; | ||
130 | scsi_device_put(sdev); | ||
131 | } | ||
132 | |||
133 | return sprintf(buf, "%d\n", failed); | ||
134 | } | ||
135 | |||
136 | static ssize_t zfcp_sysfs_unit_failed_store(struct device *dev, | ||
137 | struct device_attribute *attr, | ||
138 | const char *buf, size_t count) | ||
139 | { | ||
140 | struct zfcp_unit *unit = container_of(dev, struct zfcp_unit, dev); | ||
141 | unsigned long val; | ||
142 | struct scsi_device *sdev; | ||
143 | |||
144 | if (strict_strtoul(buf, 0, &val) || val != 0) | ||
145 | return -EINVAL; | ||
146 | |||
147 | sdev = zfcp_unit_sdev(unit); | ||
148 | if (sdev) { | ||
149 | zfcp_erp_set_lun_status(sdev, ZFCP_STATUS_COMMON_RUNNING); | ||
150 | zfcp_erp_lun_reopen(sdev, ZFCP_STATUS_COMMON_ERP_FAILED, | ||
151 | "syufai2", NULL); | ||
152 | zfcp_erp_wait(unit->port->adapter); | ||
153 | } else | ||
154 | zfcp_unit_scsi_scan(unit); | ||
155 | |||
156 | return count; | ||
157 | } | ||
158 | static ZFCP_DEV_ATTR(unit, failed, S_IWUSR | S_IRUGO, | ||
159 | zfcp_sysfs_unit_failed_show, | ||
160 | zfcp_sysfs_unit_failed_store); | ||
128 | 161 | ||
129 | static ssize_t zfcp_sysfs_adapter_failed_show(struct device *dev, | 162 | static ssize_t zfcp_sysfs_adapter_failed_show(struct device *dev, |
130 | struct device_attribute *attr, | 163 | struct device_attribute *attr, |
@@ -163,8 +196,7 @@ static ssize_t zfcp_sysfs_adapter_failed_store(struct device *dev, | |||
163 | goto out; | 196 | goto out; |
164 | } | 197 | } |
165 | 198 | ||
166 | zfcp_erp_modify_adapter_status(adapter, "syafai1", NULL, | 199 | zfcp_erp_set_adapter_status(adapter, ZFCP_STATUS_COMMON_RUNNING); |
167 | ZFCP_STATUS_COMMON_RUNNING, ZFCP_SET); | ||
168 | zfcp_erp_adapter_reopen(adapter, ZFCP_STATUS_COMMON_ERP_FAILED, | 200 | zfcp_erp_adapter_reopen(adapter, ZFCP_STATUS_COMMON_ERP_FAILED, |
169 | "syafai2", NULL); | 201 | "syafai2", NULL); |
170 | zfcp_erp_wait(adapter); | 202 | zfcp_erp_wait(adapter); |
@@ -257,28 +289,15 @@ static ssize_t zfcp_sysfs_unit_add_store(struct device *dev, | |||
257 | const char *buf, size_t count) | 289 | const char *buf, size_t count) |
258 | { | 290 | { |
259 | struct zfcp_port *port = container_of(dev, struct zfcp_port, dev); | 291 | struct zfcp_port *port = container_of(dev, struct zfcp_port, dev); |
260 | struct zfcp_unit *unit; | ||
261 | u64 fcp_lun; | 292 | u64 fcp_lun; |
262 | int retval = -EINVAL; | ||
263 | |||
264 | if (!(port && get_device(&port->dev))) | ||
265 | return -EBUSY; | ||
266 | 293 | ||
267 | if (strict_strtoull(buf, 0, (unsigned long long *) &fcp_lun)) | 294 | if (strict_strtoull(buf, 0, (unsigned long long *) &fcp_lun)) |
268 | goto out; | 295 | return -EINVAL; |
269 | 296 | ||
270 | unit = zfcp_unit_enqueue(port, fcp_lun); | 297 | if (zfcp_unit_add(port, fcp_lun)) |
271 | if (IS_ERR(unit)) | 298 | return -EINVAL; |
272 | goto out; | ||
273 | else | ||
274 | retval = 0; | ||
275 | 299 | ||
276 | zfcp_erp_unit_reopen(unit, 0, "syuas_1", NULL); | 300 | return count; |
277 | zfcp_erp_wait(unit->port->adapter); | ||
278 | zfcp_scsi_scan(unit); | ||
279 | out: | ||
280 | put_device(&port->dev); | ||
281 | return retval ? retval : (ssize_t) count; | ||
282 | } | 301 | } |
283 | static DEVICE_ATTR(unit_add, S_IWUSR, NULL, zfcp_sysfs_unit_add_store); | 302 | static DEVICE_ATTR(unit_add, S_IWUSR, NULL, zfcp_sysfs_unit_add_store); |
284 | 303 | ||
@@ -287,42 +306,15 @@ static ssize_t zfcp_sysfs_unit_remove_store(struct device *dev, | |||
287 | const char *buf, size_t count) | 306 | const char *buf, size_t count) |
288 | { | 307 | { |
289 | struct zfcp_port *port = container_of(dev, struct zfcp_port, dev); | 308 | struct zfcp_port *port = container_of(dev, struct zfcp_port, dev); |
290 | struct zfcp_unit *unit; | ||
291 | u64 fcp_lun; | 309 | u64 fcp_lun; |
292 | int retval = -EINVAL; | ||
293 | struct scsi_device *sdev; | ||
294 | |||
295 | if (!(port && get_device(&port->dev))) | ||
296 | return -EBUSY; | ||
297 | 310 | ||
298 | if (strict_strtoull(buf, 0, (unsigned long long *) &fcp_lun)) | 311 | if (strict_strtoull(buf, 0, (unsigned long long *) &fcp_lun)) |
299 | goto out; | 312 | return -EINVAL; |
300 | 313 | ||
301 | unit = zfcp_get_unit_by_lun(port, fcp_lun); | 314 | if (zfcp_unit_remove(port, fcp_lun)) |
302 | if (!unit) | 315 | return -EINVAL; |
303 | goto out; | ||
304 | else | ||
305 | retval = 0; | ||
306 | |||
307 | sdev = scsi_device_lookup(port->adapter->scsi_host, 0, | ||
308 | port->starget_id, | ||
309 | scsilun_to_int((struct scsi_lun *)&fcp_lun)); | ||
310 | if (sdev) { | ||
311 | scsi_remove_device(sdev); | ||
312 | scsi_device_put(sdev); | ||
313 | } | ||
314 | |||
315 | write_lock_irq(&port->unit_list_lock); | ||
316 | list_del(&unit->list); | ||
317 | write_unlock_irq(&port->unit_list_lock); | ||
318 | |||
319 | put_device(&unit->dev); | ||
320 | 316 | ||
321 | zfcp_erp_unit_shutdown(unit, 0, "syurs_1", NULL); | 317 | return count; |
322 | zfcp_device_unregister(&unit->dev, &zfcp_sysfs_unit_attrs); | ||
323 | out: | ||
324 | put_device(&port->dev); | ||
325 | return retval ? retval : (ssize_t) count; | ||
326 | } | 318 | } |
327 | static DEVICE_ATTR(unit_remove, S_IWUSR, NULL, zfcp_sysfs_unit_remove_store); | 319 | static DEVICE_ATTR(unit_remove, S_IWUSR, NULL, zfcp_sysfs_unit_remove_store); |
328 | 320 | ||
@@ -363,9 +355,9 @@ zfcp_sysfs_unit_##_name##_latency_show(struct device *dev, \ | |||
363 | struct device_attribute *attr, \ | 355 | struct device_attribute *attr, \ |
364 | char *buf) { \ | 356 | char *buf) { \ |
365 | struct scsi_device *sdev = to_scsi_device(dev); \ | 357 | struct scsi_device *sdev = to_scsi_device(dev); \ |
366 | struct zfcp_unit *unit = sdev->hostdata; \ | 358 | struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev); \ |
367 | struct zfcp_latencies *lat = &unit->latencies; \ | 359 | struct zfcp_latencies *lat = &zfcp_sdev->latencies; \ |
368 | struct zfcp_adapter *adapter = unit->port->adapter; \ | 360 | struct zfcp_adapter *adapter = zfcp_sdev->port->adapter; \ |
369 | unsigned long long fsum, fmin, fmax, csum, cmin, cmax, cc; \ | 361 | unsigned long long fsum, fmin, fmax, csum, cmin, cmax, cc; \ |
370 | \ | 362 | \ |
371 | spin_lock_bh(&lat->lock); \ | 363 | spin_lock_bh(&lat->lock); \ |
@@ -394,8 +386,8 @@ zfcp_sysfs_unit_##_name##_latency_store(struct device *dev, \ | |||
394 | const char *buf, size_t count) \ | 386 | const char *buf, size_t count) \ |
395 | { \ | 387 | { \ |
396 | struct scsi_device *sdev = to_scsi_device(dev); \ | 388 | struct scsi_device *sdev = to_scsi_device(dev); \ |
397 | struct zfcp_unit *unit = sdev->hostdata; \ | 389 | struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev); \ |
398 | struct zfcp_latencies *lat = &unit->latencies; \ | 390 | struct zfcp_latencies *lat = &zfcp_sdev->latencies; \ |
399 | unsigned long flags; \ | 391 | unsigned long flags; \ |
400 | \ | 392 | \ |
401 | spin_lock_irqsave(&lat->lock, flags); \ | 393 | spin_lock_irqsave(&lat->lock, flags); \ |
@@ -423,19 +415,28 @@ static ssize_t zfcp_sysfs_scsi_##_name##_show(struct device *dev, \ | |||
423 | struct device_attribute *attr,\ | 415 | struct device_attribute *attr,\ |
424 | char *buf) \ | 416 | char *buf) \ |
425 | { \ | 417 | { \ |
426 | struct scsi_device *sdev = to_scsi_device(dev); \ | 418 | struct scsi_device *sdev = to_scsi_device(dev); \ |
427 | struct zfcp_unit *unit = sdev->hostdata; \ | 419 | struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev); \ |
420 | struct zfcp_port *port = zfcp_sdev->port; \ | ||
428 | \ | 421 | \ |
429 | return sprintf(buf, _format, _value); \ | 422 | return sprintf(buf, _format, _value); \ |
430 | } \ | 423 | } \ |
431 | static DEVICE_ATTR(_name, S_IRUGO, zfcp_sysfs_scsi_##_name##_show, NULL); | 424 | static DEVICE_ATTR(_name, S_IRUGO, zfcp_sysfs_scsi_##_name##_show, NULL); |
432 | 425 | ||
433 | ZFCP_DEFINE_SCSI_ATTR(hba_id, "%s\n", | 426 | ZFCP_DEFINE_SCSI_ATTR(hba_id, "%s\n", |
434 | dev_name(&unit->port->adapter->ccw_device->dev)); | 427 | dev_name(&port->adapter->ccw_device->dev)); |
435 | ZFCP_DEFINE_SCSI_ATTR(wwpn, "0x%016llx\n", | 428 | ZFCP_DEFINE_SCSI_ATTR(wwpn, "0x%016llx\n", |
436 | (unsigned long long) unit->port->wwpn); | 429 | (unsigned long long) port->wwpn); |
437 | ZFCP_DEFINE_SCSI_ATTR(fcp_lun, "0x%016llx\n", | 430 | |
438 | (unsigned long long) unit->fcp_lun); | 431 | static ssize_t zfcp_sysfs_scsi_fcp_lun_show(struct device *dev, |
432 | struct device_attribute *attr, | ||
433 | char *buf) | ||
434 | { | ||
435 | struct scsi_device *sdev = to_scsi_device(dev); | ||
436 | |||
437 | return sprintf(buf, "0x%016llx\n", zfcp_scsi_dev_lun(sdev)); | ||
438 | } | ||
439 | static DEVICE_ATTR(fcp_lun, S_IRUGO, zfcp_sysfs_scsi_fcp_lun_show, NULL); | ||
439 | 440 | ||
440 | struct device_attribute *zfcp_sysfs_sdev_attrs[] = { | 441 | struct device_attribute *zfcp_sysfs_sdev_attrs[] = { |
441 | &dev_attr_fcp_lun, | 442 | &dev_attr_fcp_lun, |
diff --git a/drivers/s390/scsi/zfcp_unit.c b/drivers/s390/scsi/zfcp_unit.c new file mode 100644 index 000000000000..1119c535a667 --- /dev/null +++ b/drivers/s390/scsi/zfcp_unit.c | |||
@@ -0,0 +1,244 @@ | |||
1 | /* | ||
2 | * zfcp device driver | ||
3 | * | ||
4 | * Tracking of manually configured LUNs and helper functions to | ||
5 | * register the LUNs with the SCSI midlayer. | ||
6 | * | ||
7 | * Copyright IBM Corporation 2010 | ||
8 | */ | ||
9 | |||
10 | #include "zfcp_def.h" | ||
11 | #include "zfcp_ext.h" | ||
12 | |||
13 | /** | ||
14 | * zfcp_unit_scsi_scan - Register LUN with SCSI midlayer | ||
15 | * @unit: The zfcp LUN/unit to register | ||
16 | * | ||
17 | * When the SCSI midlayer is not allowed to automatically scan and | ||
18 | * attach SCSI devices, zfcp has to register the single devices with | ||
19 | * the SCSI midlayer. | ||
20 | */ | ||
21 | void zfcp_unit_scsi_scan(struct zfcp_unit *unit) | ||
22 | { | ||
23 | struct fc_rport *rport = unit->port->rport; | ||
24 | unsigned int lun; | ||
25 | |||
26 | lun = scsilun_to_int((struct scsi_lun *) &unit->fcp_lun); | ||
27 | |||
28 | if (rport && rport->port_state == FC_PORTSTATE_ONLINE) | ||
29 | scsi_scan_target(&rport->dev, 0, rport->scsi_target_id, lun, 1); | ||
30 | } | ||
31 | |||
32 | static void zfcp_unit_scsi_scan_work(struct work_struct *work) | ||
33 | { | ||
34 | struct zfcp_unit *unit = container_of(work, struct zfcp_unit, | ||
35 | scsi_work); | ||
36 | |||
37 | zfcp_unit_scsi_scan(unit); | ||
38 | put_device(&unit->dev); | ||
39 | } | ||
40 | |||
41 | /** | ||
42 | * zfcp_unit_queue_scsi_scan - Register configured units on port | ||
43 | * @port: The zfcp_port where to register units | ||
44 | * | ||
45 | * After opening a port, all units configured on this port have to be | ||
46 | * registered with the SCSI midlayer. This function should be called | ||
47 | * after calling fc_remote_port_add, so that the fc_rport is already | ||
48 | * ONLINE and the call to scsi_scan_target runs the same way as the | ||
49 | * call in the FC transport class. | ||
50 | */ | ||
51 | void zfcp_unit_queue_scsi_scan(struct zfcp_port *port) | ||
52 | { | ||
53 | struct zfcp_unit *unit; | ||
54 | |||
55 | read_lock_irq(&port->unit_list_lock); | ||
56 | list_for_each_entry(unit, &port->unit_list, list) { | ||
57 | get_device(&unit->dev); | ||
58 | if (scsi_queue_work(port->adapter->scsi_host, | ||
59 | &unit->scsi_work) <= 0) | ||
60 | put_device(&unit->dev); | ||
61 | } | ||
62 | read_unlock_irq(&port->unit_list_lock); | ||
63 | } | ||
64 | |||
65 | static struct zfcp_unit *_zfcp_unit_find(struct zfcp_port *port, u64 fcp_lun) | ||
66 | { | ||
67 | struct zfcp_unit *unit; | ||
68 | |||
69 | list_for_each_entry(unit, &port->unit_list, list) | ||
70 | if (unit->fcp_lun == fcp_lun) { | ||
71 | get_device(&unit->dev); | ||
72 | return unit; | ||
73 | } | ||
74 | |||
75 | return NULL; | ||
76 | } | ||
77 | |||
78 | /** | ||
79 | * zfcp_unit_find - Find and return zfcp_unit with specified FCP LUN | ||
80 | * @port: zfcp_port where to look for the unit | ||
81 | * @fcp_lun: 64 Bit FCP LUN used to identify the zfcp_unit | ||
82 | * | ||
83 | * If zfcp_unit is found, a reference is acquired that has to be | ||
84 | * released later. | ||
85 | * | ||
86 | * Returns: Pointer to the zfcp_unit, or NULL if there is no zfcp_unit | ||
87 | * with the specified FCP LUN. | ||
88 | */ | ||
89 | struct zfcp_unit *zfcp_unit_find(struct zfcp_port *port, u64 fcp_lun) | ||
90 | { | ||
91 | struct zfcp_unit *unit; | ||
92 | |||
93 | read_lock_irq(&port->unit_list_lock); | ||
94 | unit = _zfcp_unit_find(port, fcp_lun); | ||
95 | read_unlock_irq(&port->unit_list_lock); | ||
96 | return unit; | ||
97 | } | ||
98 | |||
99 | /** | ||
100 | * zfcp_unit_release - Drop reference to zfcp_port and free memory of zfcp_unit. | ||
101 | * @dev: pointer to device in zfcp_unit | ||
102 | */ | ||
103 | static void zfcp_unit_release(struct device *dev) | ||
104 | { | ||
105 | struct zfcp_unit *unit = container_of(dev, struct zfcp_unit, dev); | ||
106 | |||
107 | put_device(&unit->port->dev); | ||
108 | kfree(unit); | ||
109 | } | ||
110 | |||
111 | /** | ||
112 | * zfcp_unit_enqueue - enqueue unit to unit list of a port. | ||
113 | * @port: pointer to port where unit is added | ||
114 | * @fcp_lun: FCP LUN of unit to be enqueued | ||
115 | * Returns: 0 success | ||
116 | * | ||
117 | * Sets up some unit internal structures and creates sysfs entry. | ||
118 | */ | ||
119 | int zfcp_unit_add(struct zfcp_port *port, u64 fcp_lun) | ||
120 | { | ||
121 | struct zfcp_unit *unit; | ||
122 | |||
123 | unit = zfcp_unit_find(port, fcp_lun); | ||
124 | if (unit) { | ||
125 | put_device(&unit->dev); | ||
126 | return -EEXIST; | ||
127 | } | ||
128 | |||
129 | unit = kzalloc(sizeof(struct zfcp_unit), GFP_KERNEL); | ||
130 | if (!unit) | ||
131 | return -ENOMEM; | ||
132 | |||
133 | unit->port = port; | ||
134 | unit->fcp_lun = fcp_lun; | ||
135 | unit->dev.parent = &port->dev; | ||
136 | unit->dev.release = zfcp_unit_release; | ||
137 | INIT_WORK(&unit->scsi_work, zfcp_unit_scsi_scan_work); | ||
138 | |||
139 | if (dev_set_name(&unit->dev, "0x%016llx", | ||
140 | (unsigned long long) fcp_lun)) { | ||
141 | kfree(unit); | ||
142 | return -ENOMEM; | ||
143 | } | ||
144 | |||
145 | if (device_register(&unit->dev)) { | ||
146 | put_device(&unit->dev); | ||
147 | return -ENOMEM; | ||
148 | } | ||
149 | |||
150 | if (sysfs_create_group(&unit->dev.kobj, &zfcp_sysfs_unit_attrs)) { | ||
151 | device_unregister(&unit->dev); | ||
152 | return -EINVAL; | ||
153 | } | ||
154 | |||
155 | get_device(&port->dev); | ||
156 | |||
157 | write_lock_irq(&port->unit_list_lock); | ||
158 | list_add_tail(&unit->list, &port->unit_list); | ||
159 | write_unlock_irq(&port->unit_list_lock); | ||
160 | |||
161 | zfcp_unit_scsi_scan(unit); | ||
162 | |||
163 | return 0; | ||
164 | } | ||
165 | |||
166 | /** | ||
167 | * zfcp_unit_sdev - Return SCSI device for zfcp_unit | ||
168 | * @unit: The zfcp_unit where to get the SCSI device for | ||
169 | * | ||
170 | * Returns: scsi_device pointer on success, NULL if there is no SCSI | ||
171 | * device for this zfcp_unit | ||
172 | * | ||
173 | * On success, the caller also holds a reference to the SCSI device | ||
174 | * that must be released with scsi_device_put. | ||
175 | */ | ||
176 | struct scsi_device *zfcp_unit_sdev(struct zfcp_unit *unit) | ||
177 | { | ||
178 | struct Scsi_Host *shost; | ||
179 | struct zfcp_port *port; | ||
180 | unsigned int lun; | ||
181 | |||
182 | lun = scsilun_to_int((struct scsi_lun *) &unit->fcp_lun); | ||
183 | port = unit->port; | ||
184 | shost = port->adapter->scsi_host; | ||
185 | return scsi_device_lookup(shost, 0, port->starget_id, lun); | ||
186 | } | ||
187 | |||
188 | /** | ||
189 | * zfcp_unit_sdev_status - Return zfcp LUN status for SCSI device | ||
190 | * @unit: The unit to lookup the SCSI device for | ||
191 | * | ||
192 | * Returns the zfcp LUN status field of the SCSI device if the SCSI device | ||
193 | * for the zfcp_unit exists, 0 otherwise. | ||
194 | */ | ||
195 | unsigned int zfcp_unit_sdev_status(struct zfcp_unit *unit) | ||
196 | { | ||
197 | unsigned int status = 0; | ||
198 | struct scsi_device *sdev; | ||
199 | struct zfcp_scsi_dev *zfcp_sdev; | ||
200 | |||
201 | sdev = zfcp_unit_sdev(unit); | ||
202 | if (sdev) { | ||
203 | zfcp_sdev = sdev_to_zfcp(sdev); | ||
204 | status = atomic_read(&zfcp_sdev->status); | ||
205 | scsi_device_put(sdev); | ||
206 | } | ||
207 | |||
208 | return status; | ||
209 | } | ||
210 | |||
211 | /** | ||
212 | * zfcp_unit_remove - Remove entry from list of configured units | ||
213 | * @port: The port where to remove the unit from the configuration | ||
214 | * @fcp_lun: The 64 bit LUN of the unit to remove | ||
215 | * | ||
216 | * Returns: -EINVAL if a unit with the specified LUN does not exist, | ||
217 | * 0 on success. | ||
218 | */ | ||
219 | int zfcp_unit_remove(struct zfcp_port *port, u64 fcp_lun) | ||
220 | { | ||
221 | struct zfcp_unit *unit; | ||
222 | struct scsi_device *sdev; | ||
223 | |||
224 | write_lock_irq(&port->unit_list_lock); | ||
225 | unit = _zfcp_unit_find(port, fcp_lun); | ||
226 | if (unit) | ||
227 | list_del(&unit->list); | ||
228 | write_unlock_irq(&port->unit_list_lock); | ||
229 | |||
230 | if (!unit) | ||
231 | return -EINVAL; | ||
232 | |||
233 | sdev = zfcp_unit_sdev(unit); | ||
234 | if (sdev) { | ||
235 | scsi_remove_device(sdev); | ||
236 | scsi_device_put(sdev); | ||
237 | } | ||
238 | |||
239 | put_device(&unit->dev); | ||
240 | |||
241 | zfcp_device_unregister(&unit->dev, &zfcp_sysfs_unit_attrs); | ||
242 | |||
243 | return 0; | ||
244 | } | ||