diff options
| author | Martin Schwidefsky <schwidefsky@de.ibm.com> | 2006-12-08 09:53:57 -0500 |
|---|---|---|
| committer | Martin Schwidefsky <schwidefsky@de.ibm.com> | 2006-12-08 09:53:57 -0500 |
| commit | c16375329c2ab4667df873394c4be7a61d163c62 (patch) | |
| tree | 9ee9505e4587ce5f472db3fd09935611b0062f83 /drivers/s390 | |
| parent | e45ccc0562e3f391dcba8b2e8a02551e8e42d8db (diff) | |
[S390] more workqueue fixes.
Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Diffstat (limited to 'drivers/s390')
| -rw-r--r-- | drivers/s390/char/tape.h | 3 | ||||
| -rw-r--r-- | drivers/s390/char/tape_34xx.c | 23 | ||||
| -rw-r--r-- | drivers/s390/char/tape_3590.c | 7 | ||||
| -rw-r--r-- | drivers/s390/char/tape_block.c | 14 | ||||
| -rw-r--r-- | drivers/s390/char/tape_core.c | 14 | ||||
| -rw-r--r-- | drivers/s390/cio/css.h | 2 | ||||
| -rw-r--r-- | drivers/s390/cio/device.c | 43 | ||||
| -rw-r--r-- | drivers/s390/cio/device.h | 4 | ||||
| -rw-r--r-- | drivers/s390/cio/device_fsm.c | 38 | ||||
| -rw-r--r-- | drivers/s390/cio/qdio.c | 8 |
10 files changed, 89 insertions, 67 deletions
diff --git a/drivers/s390/char/tape.h b/drivers/s390/char/tape.h index 1f4c89967be4..c9f1c4c8bb13 100644 --- a/drivers/s390/char/tape.h +++ b/drivers/s390/char/tape.h | |||
| @@ -179,6 +179,7 @@ struct tape_char_data { | |||
| 179 | /* Block Frontend Data */ | 179 | /* Block Frontend Data */ |
| 180 | struct tape_blk_data | 180 | struct tape_blk_data |
| 181 | { | 181 | { |
| 182 | struct tape_device * device; | ||
| 182 | /* Block device request queue. */ | 183 | /* Block device request queue. */ |
| 183 | request_queue_t * request_queue; | 184 | request_queue_t * request_queue; |
| 184 | spinlock_t request_queue_lock; | 185 | spinlock_t request_queue_lock; |
| @@ -240,7 +241,7 @@ struct tape_device { | |||
| 240 | #endif | 241 | #endif |
| 241 | 242 | ||
| 242 | /* Function to start or stop the next request later. */ | 243 | /* Function to start or stop the next request later. */ |
| 243 | struct work_struct tape_dnr; | 244 | struct delayed_work tape_dnr; |
| 244 | }; | 245 | }; |
| 245 | 246 | ||
| 246 | /* Externals from tape_core.c */ | 247 | /* Externals from tape_core.c */ |
diff --git a/drivers/s390/char/tape_34xx.c b/drivers/s390/char/tape_34xx.c index 7b95dab913d0..e765875e8db2 100644 --- a/drivers/s390/char/tape_34xx.c +++ b/drivers/s390/char/tape_34xx.c | |||
| @@ -95,6 +95,12 @@ tape_34xx_medium_sense(struct tape_device *device) | |||
| 95 | return rc; | 95 | return rc; |
| 96 | } | 96 | } |
| 97 | 97 | ||
| 98 | struct tape_34xx_work { | ||
| 99 | struct tape_device *device; | ||
| 100 | enum tape_op op; | ||
| 101 | struct work_struct work; | ||
| 102 | }; | ||
| 103 | |||
| 98 | /* | 104 | /* |
| 99 | * These functions are currently used only to schedule a medium_sense for | 105 | * These functions are currently used only to schedule a medium_sense for |
| 100 | * later execution. This is because we get an interrupt whenever a medium | 106 | * later execution. This is because we get an interrupt whenever a medium |
| @@ -103,13 +109,10 @@ tape_34xx_medium_sense(struct tape_device *device) | |||
| 103 | * interrupt handler. | 109 | * interrupt handler. |
| 104 | */ | 110 | */ |
| 105 | static void | 111 | static void |
| 106 | tape_34xx_work_handler(void *data) | 112 | tape_34xx_work_handler(struct work_struct *work) |
| 107 | { | 113 | { |
| 108 | struct { | 114 | struct tape_34xx_work *p = |
| 109 | struct tape_device *device; | 115 | container_of(work, struct tape_34xx_work, work); |
| 110 | enum tape_op op; | ||
| 111 | struct work_struct work; | ||
| 112 | } *p = data; | ||
| 113 | 116 | ||
| 114 | switch(p->op) { | 117 | switch(p->op) { |
| 115 | case TO_MSEN: | 118 | case TO_MSEN: |
| @@ -126,17 +129,13 @@ tape_34xx_work_handler(void *data) | |||
| 126 | static int | 129 | static int |
| 127 | tape_34xx_schedule_work(struct tape_device *device, enum tape_op op) | 130 | tape_34xx_schedule_work(struct tape_device *device, enum tape_op op) |
| 128 | { | 131 | { |
| 129 | struct { | 132 | struct tape_34xx_work *p; |
| 130 | struct tape_device *device; | ||
| 131 | enum tape_op op; | ||
| 132 | struct work_struct work; | ||
| 133 | } *p; | ||
| 134 | 133 | ||
| 135 | if ((p = kmalloc(sizeof(*p), GFP_ATOMIC)) == NULL) | 134 | if ((p = kmalloc(sizeof(*p), GFP_ATOMIC)) == NULL) |
| 136 | return -ENOMEM; | 135 | return -ENOMEM; |
| 137 | 136 | ||
| 138 | memset(p, 0, sizeof(*p)); | 137 | memset(p, 0, sizeof(*p)); |
| 139 | INIT_WORK(&p->work, tape_34xx_work_handler, p); | 138 | INIT_WORK(&p->work, tape_34xx_work_handler); |
| 140 | 139 | ||
| 141 | p->device = tape_get_device_reference(device); | 140 | p->device = tape_get_device_reference(device); |
| 142 | p->op = op; | 141 | p->op = op; |
diff --git a/drivers/s390/char/tape_3590.c b/drivers/s390/char/tape_3590.c index 928cbefc49d5..9df912f63188 100644 --- a/drivers/s390/char/tape_3590.c +++ b/drivers/s390/char/tape_3590.c | |||
| @@ -236,9 +236,10 @@ struct work_handler_data { | |||
| 236 | }; | 236 | }; |
| 237 | 237 | ||
| 238 | static void | 238 | static void |
| 239 | tape_3590_work_handler(void *data) | 239 | tape_3590_work_handler(struct work_struct *work) |
| 240 | { | 240 | { |
| 241 | struct work_handler_data *p = data; | 241 | struct work_handler_data *p = |
| 242 | container_of(work, struct work_handler_data, work); | ||
| 242 | 243 | ||
| 243 | switch (p->op) { | 244 | switch (p->op) { |
| 244 | case TO_MSEN: | 245 | case TO_MSEN: |
| @@ -263,7 +264,7 @@ tape_3590_schedule_work(struct tape_device *device, enum tape_op op) | |||
| 263 | if ((p = kzalloc(sizeof(*p), GFP_ATOMIC)) == NULL) | 264 | if ((p = kzalloc(sizeof(*p), GFP_ATOMIC)) == NULL) |
| 264 | return -ENOMEM; | 265 | return -ENOMEM; |
| 265 | 266 | ||
| 266 | INIT_WORK(&p->work, tape_3590_work_handler, p); | 267 | INIT_WORK(&p->work, tape_3590_work_handler); |
| 267 | 268 | ||
| 268 | p->device = tape_get_device_reference(device); | 269 | p->device = tape_get_device_reference(device); |
| 269 | p->op = op; | 270 | p->op = op; |
diff --git a/drivers/s390/char/tape_block.c b/drivers/s390/char/tape_block.c index 3225fcd1dcb4..c8a89b3b87d4 100644 --- a/drivers/s390/char/tape_block.c +++ b/drivers/s390/char/tape_block.c | |||
| @@ -15,6 +15,7 @@ | |||
| 15 | #include <linux/blkdev.h> | 15 | #include <linux/blkdev.h> |
| 16 | #include <linux/interrupt.h> | 16 | #include <linux/interrupt.h> |
| 17 | #include <linux/buffer_head.h> | 17 | #include <linux/buffer_head.h> |
| 18 | #include <linux/kernel.h> | ||
| 18 | 19 | ||
| 19 | #include <asm/debug.h> | 20 | #include <asm/debug.h> |
| 20 | 21 | ||
| @@ -143,7 +144,8 @@ tapeblock_start_request(struct tape_device *device, struct request *req) | |||
| 143 | * queue. | 144 | * queue. |
| 144 | */ | 145 | */ |
| 145 | static void | 146 | static void |
| 146 | tapeblock_requeue(void *data) { | 147 | tapeblock_requeue(struct work_struct *work) { |
| 148 | struct tape_blk_data * blkdat; | ||
| 147 | struct tape_device * device; | 149 | struct tape_device * device; |
| 148 | request_queue_t * queue; | 150 | request_queue_t * queue; |
| 149 | int nr_queued; | 151 | int nr_queued; |
| @@ -151,7 +153,8 @@ tapeblock_requeue(void *data) { | |||
| 151 | struct list_head * l; | 153 | struct list_head * l; |
| 152 | int rc; | 154 | int rc; |
| 153 | 155 | ||
| 154 | device = (struct tape_device *) data; | 156 | blkdat = container_of(work, struct tape_blk_data, requeue_task); |
| 157 | device = blkdat->device; | ||
| 155 | if (!device) | 158 | if (!device) |
| 156 | return; | 159 | return; |
| 157 | 160 | ||
| @@ -212,6 +215,7 @@ tapeblock_setup_device(struct tape_device * device) | |||
| 212 | int rc; | 215 | int rc; |
| 213 | 216 | ||
| 214 | blkdat = &device->blk_data; | 217 | blkdat = &device->blk_data; |
| 218 | blkdat->device = device; | ||
| 215 | spin_lock_init(&blkdat->request_queue_lock); | 219 | spin_lock_init(&blkdat->request_queue_lock); |
| 216 | atomic_set(&blkdat->requeue_scheduled, 0); | 220 | atomic_set(&blkdat->requeue_scheduled, 0); |
| 217 | 221 | ||
| @@ -255,8 +259,8 @@ tapeblock_setup_device(struct tape_device * device) | |||
| 255 | 259 | ||
| 256 | add_disk(disk); | 260 | add_disk(disk); |
| 257 | 261 | ||
| 258 | INIT_WORK(&blkdat->requeue_task, tapeblock_requeue, | 262 | tape_get_device_reference(device); |
| 259 | tape_get_device_reference(device)); | 263 | INIT_WORK(&blkdat->requeue_task, tapeblock_requeue); |
| 260 | 264 | ||
| 261 | return 0; | 265 | return 0; |
| 262 | 266 | ||
| @@ -271,7 +275,7 @@ void | |||
| 271 | tapeblock_cleanup_device(struct tape_device *device) | 275 | tapeblock_cleanup_device(struct tape_device *device) |
| 272 | { | 276 | { |
| 273 | flush_scheduled_work(); | 277 | flush_scheduled_work(); |
| 274 | device->blk_data.requeue_task.data = tape_put_device(device); | 278 | tape_put_device(device); |
| 275 | 279 | ||
| 276 | if (!device->blk_data.disk) { | 280 | if (!device->blk_data.disk) { |
| 277 | PRINT_ERR("(%s): No gendisk to clean up!\n", | 281 | PRINT_ERR("(%s): No gendisk to clean up!\n", |
diff --git a/drivers/s390/char/tape_core.c b/drivers/s390/char/tape_core.c index 2826aed91043..c6c2e918b990 100644 --- a/drivers/s390/char/tape_core.c +++ b/drivers/s390/char/tape_core.c | |||
| @@ -28,7 +28,7 @@ | |||
| 28 | #define PRINTK_HEADER "TAPE_CORE: " | 28 | #define PRINTK_HEADER "TAPE_CORE: " |
| 29 | 29 | ||
| 30 | static void __tape_do_irq (struct ccw_device *, unsigned long, struct irb *); | 30 | static void __tape_do_irq (struct ccw_device *, unsigned long, struct irb *); |
| 31 | static void tape_delayed_next_request(void * data); | 31 | static void tape_delayed_next_request(struct work_struct *); |
| 32 | 32 | ||
| 33 | /* | 33 | /* |
| 34 | * One list to contain all tape devices of all disciplines, so | 34 | * One list to contain all tape devices of all disciplines, so |
| @@ -272,7 +272,7 @@ __tape_cancel_io(struct tape_device *device, struct tape_request *request) | |||
| 272 | return 0; | 272 | return 0; |
| 273 | case -EBUSY: | 273 | case -EBUSY: |
| 274 | request->status = TAPE_REQUEST_CANCEL; | 274 | request->status = TAPE_REQUEST_CANCEL; |
| 275 | schedule_work(&device->tape_dnr); | 275 | schedule_delayed_work(&device->tape_dnr, 0); |
| 276 | return 0; | 276 | return 0; |
| 277 | case -ENODEV: | 277 | case -ENODEV: |
| 278 | DBF_EXCEPTION(2, "device gone, retry\n"); | 278 | DBF_EXCEPTION(2, "device gone, retry\n"); |
| @@ -470,7 +470,7 @@ tape_alloc_device(void) | |||
| 470 | *device->modeset_byte = 0; | 470 | *device->modeset_byte = 0; |
| 471 | device->first_minor = -1; | 471 | device->first_minor = -1; |
| 472 | atomic_set(&device->ref_count, 1); | 472 | atomic_set(&device->ref_count, 1); |
| 473 | INIT_WORK(&device->tape_dnr, tape_delayed_next_request, device); | 473 | INIT_DELAYED_WORK(&device->tape_dnr, tape_delayed_next_request); |
| 474 | 474 | ||
| 475 | return device; | 475 | return device; |
| 476 | } | 476 | } |
| @@ -724,7 +724,7 @@ __tape_start_io(struct tape_device *device, struct tape_request *request) | |||
| 724 | } else if (rc == -EBUSY) { | 724 | } else if (rc == -EBUSY) { |
| 725 | /* The common I/O subsystem is currently busy. Retry later. */ | 725 | /* The common I/O subsystem is currently busy. Retry later. */ |
| 726 | request->status = TAPE_REQUEST_QUEUED; | 726 | request->status = TAPE_REQUEST_QUEUED; |
| 727 | schedule_work(&device->tape_dnr); | 727 | schedule_delayed_work(&device->tape_dnr, 0); |
| 728 | rc = 0; | 728 | rc = 0; |
| 729 | } else { | 729 | } else { |
| 730 | /* Start failed. Remove request and indicate failure. */ | 730 | /* Start failed. Remove request and indicate failure. */ |
| @@ -790,11 +790,11 @@ __tape_start_next_request(struct tape_device *device) | |||
| 790 | } | 790 | } |
| 791 | 791 | ||
| 792 | static void | 792 | static void |
| 793 | tape_delayed_next_request(void *data) | 793 | tape_delayed_next_request(struct work_struct *work) |
| 794 | { | 794 | { |
| 795 | struct tape_device * device; | 795 | struct tape_device *device = |
| 796 | container_of(work, struct tape_device, tape_dnr.work); | ||
| 796 | 797 | ||
| 797 | device = (struct tape_device *) data; | ||
| 798 | DBF_LH(6, "tape_delayed_next_request(%p)\n", device); | 798 | DBF_LH(6, "tape_delayed_next_request(%p)\n", device); |
| 799 | spin_lock_irq(get_ccwdev_lock(device->cdev)); | 799 | spin_lock_irq(get_ccwdev_lock(device->cdev)); |
| 800 | __tape_start_next_request(device); | 800 | __tape_start_next_request(device); |
diff --git a/drivers/s390/cio/css.h b/drivers/s390/cio/css.h index 9ff064e71767..dfd5462f993f 100644 --- a/drivers/s390/cio/css.h +++ b/drivers/s390/cio/css.h | |||
| @@ -73,6 +73,8 @@ struct senseid { | |||
| 73 | } __attribute__ ((packed,aligned(4))); | 73 | } __attribute__ ((packed,aligned(4))); |
| 74 | 74 | ||
| 75 | struct ccw_device_private { | 75 | struct ccw_device_private { |
| 76 | struct ccw_device *cdev; | ||
| 77 | struct subchannel *sch; | ||
| 76 | int state; /* device state */ | 78 | int state; /* device state */ |
| 77 | atomic_t onoff; | 79 | atomic_t onoff; |
| 78 | unsigned long registered; | 80 | unsigned long registered; |
diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c index d3d3716ff84b..0f604621de40 100644 --- a/drivers/s390/cio/device.c +++ b/drivers/s390/cio/device.c | |||
| @@ -585,12 +585,13 @@ static struct ccw_device * get_disc_ccwdev_by_dev_id(struct ccw_dev_id *dev_id, | |||
| 585 | } | 585 | } |
| 586 | 586 | ||
| 587 | static void | 587 | static void |
| 588 | ccw_device_add_changed(void *data) | 588 | ccw_device_add_changed(struct work_struct *work) |
| 589 | { | 589 | { |
| 590 | 590 | struct ccw_device_private *priv; | |
| 591 | struct ccw_device *cdev; | 591 | struct ccw_device *cdev; |
| 592 | 592 | ||
| 593 | cdev = data; | 593 | priv = container_of(work, struct ccw_device_private, kick_work); |
| 594 | cdev = priv->cdev; | ||
| 594 | if (device_add(&cdev->dev)) { | 595 | if (device_add(&cdev->dev)) { |
| 595 | put_device(&cdev->dev); | 596 | put_device(&cdev->dev); |
| 596 | return; | 597 | return; |
| @@ -605,13 +606,15 @@ ccw_device_add_changed(void *data) | |||
| 605 | extern int css_get_ssd_info(struct subchannel *sch); | 606 | extern int css_get_ssd_info(struct subchannel *sch); |
| 606 | 607 | ||
| 607 | void | 608 | void |
| 608 | ccw_device_do_unreg_rereg(void *data) | 609 | ccw_device_do_unreg_rereg(struct work_struct *work) |
| 609 | { | 610 | { |
| 611 | struct ccw_device_private *priv; | ||
| 610 | struct ccw_device *cdev; | 612 | struct ccw_device *cdev; |
| 611 | struct subchannel *sch; | 613 | struct subchannel *sch; |
| 612 | int need_rename; | 614 | int need_rename; |
| 613 | 615 | ||
| 614 | cdev = data; | 616 | priv = container_of(work, struct ccw_device_private, kick_work); |
| 617 | cdev = priv->cdev; | ||
| 615 | sch = to_subchannel(cdev->dev.parent); | 618 | sch = to_subchannel(cdev->dev.parent); |
| 616 | if (cdev->private->dev_id.devno != sch->schib.pmcw.dev) { | 619 | if (cdev->private->dev_id.devno != sch->schib.pmcw.dev) { |
| 617 | /* | 620 | /* |
| @@ -659,7 +662,7 @@ ccw_device_do_unreg_rereg(void *data) | |||
| 659 | snprintf (cdev->dev.bus_id, BUS_ID_SIZE, "0.%x.%04x", | 662 | snprintf (cdev->dev.bus_id, BUS_ID_SIZE, "0.%x.%04x", |
| 660 | sch->schid.ssid, sch->schib.pmcw.dev); | 663 | sch->schid.ssid, sch->schib.pmcw.dev); |
| 661 | PREPARE_WORK(&cdev->private->kick_work, | 664 | PREPARE_WORK(&cdev->private->kick_work, |
| 662 | ccw_device_add_changed, cdev); | 665 | ccw_device_add_changed); |
| 663 | queue_work(ccw_device_work, &cdev->private->kick_work); | 666 | queue_work(ccw_device_work, &cdev->private->kick_work); |
| 664 | } | 667 | } |
| 665 | 668 | ||
| @@ -677,14 +680,16 @@ ccw_device_release(struct device *dev) | |||
| 677 | * Register recognized device. | 680 | * Register recognized device. |
| 678 | */ | 681 | */ |
| 679 | static void | 682 | static void |
| 680 | io_subchannel_register(void *data) | 683 | io_subchannel_register(struct work_struct *work) |
| 681 | { | 684 | { |
| 685 | struct ccw_device_private *priv; | ||
| 682 | struct ccw_device *cdev; | 686 | struct ccw_device *cdev; |
| 683 | struct subchannel *sch; | 687 | struct subchannel *sch; |
| 684 | int ret; | 688 | int ret; |
| 685 | unsigned long flags; | 689 | unsigned long flags; |
| 686 | 690 | ||
| 687 | cdev = data; | 691 | priv = container_of(work, struct ccw_device_private, kick_work); |
| 692 | cdev = priv->cdev; | ||
| 688 | sch = to_subchannel(cdev->dev.parent); | 693 | sch = to_subchannel(cdev->dev.parent); |
| 689 | 694 | ||
| 690 | /* | 695 | /* |
| @@ -734,11 +739,14 @@ out: | |||
| 734 | } | 739 | } |
| 735 | 740 | ||
| 736 | void | 741 | void |
| 737 | ccw_device_call_sch_unregister(void *data) | 742 | ccw_device_call_sch_unregister(struct work_struct *work) |
| 738 | { | 743 | { |
| 739 | struct ccw_device *cdev = data; | 744 | struct ccw_device_private *priv; |
| 745 | struct ccw_device *cdev; | ||
| 740 | struct subchannel *sch; | 746 | struct subchannel *sch; |
| 741 | 747 | ||
| 748 | priv = container_of(work, struct ccw_device_private, kick_work); | ||
| 749 | cdev = priv->cdev; | ||
| 742 | sch = to_subchannel(cdev->dev.parent); | 750 | sch = to_subchannel(cdev->dev.parent); |
| 743 | css_sch_device_unregister(sch); | 751 | css_sch_device_unregister(sch); |
| 744 | /* Reset intparm to zeroes. */ | 752 | /* Reset intparm to zeroes. */ |
| @@ -768,7 +776,7 @@ io_subchannel_recog_done(struct ccw_device *cdev) | |||
| 768 | break; | 776 | break; |
| 769 | sch = to_subchannel(cdev->dev.parent); | 777 | sch = to_subchannel(cdev->dev.parent); |
| 770 | PREPARE_WORK(&cdev->private->kick_work, | 778 | PREPARE_WORK(&cdev->private->kick_work, |
| 771 | ccw_device_call_sch_unregister, cdev); | 779 | ccw_device_call_sch_unregister); |
| 772 | queue_work(slow_path_wq, &cdev->private->kick_work); | 780 | queue_work(slow_path_wq, &cdev->private->kick_work); |
| 773 | if (atomic_dec_and_test(&ccw_device_init_count)) | 781 | if (atomic_dec_and_test(&ccw_device_init_count)) |
| 774 | wake_up(&ccw_device_init_wq); | 782 | wake_up(&ccw_device_init_wq); |
| @@ -783,7 +791,7 @@ io_subchannel_recog_done(struct ccw_device *cdev) | |||
| 783 | if (!get_device(&cdev->dev)) | 791 | if (!get_device(&cdev->dev)) |
| 784 | break; | 792 | break; |
| 785 | PREPARE_WORK(&cdev->private->kick_work, | 793 | PREPARE_WORK(&cdev->private->kick_work, |
| 786 | io_subchannel_register, cdev); | 794 | io_subchannel_register); |
| 787 | queue_work(slow_path_wq, &cdev->private->kick_work); | 795 | queue_work(slow_path_wq, &cdev->private->kick_work); |
| 788 | break; | 796 | break; |
| 789 | } | 797 | } |
| @@ -865,6 +873,7 @@ io_subchannel_probe (struct subchannel *sch) | |||
| 865 | kfree(cdev); | 873 | kfree(cdev); |
| 866 | return -ENOMEM; | 874 | return -ENOMEM; |
| 867 | } | 875 | } |
| 876 | cdev->private->cdev = cdev; | ||
| 868 | atomic_set(&cdev->private->onoff, 0); | 877 | atomic_set(&cdev->private->onoff, 0); |
| 869 | cdev->dev.parent = &sch->dev; | 878 | cdev->dev.parent = &sch->dev; |
| 870 | cdev->dev.release = ccw_device_release; | 879 | cdev->dev.release = ccw_device_release; |
| @@ -890,12 +899,13 @@ io_subchannel_probe (struct subchannel *sch) | |||
| 890 | return rc; | 899 | return rc; |
| 891 | } | 900 | } |
| 892 | 901 | ||
| 893 | static void | 902 | static void ccw_device_unregister(struct work_struct *work) |
| 894 | ccw_device_unregister(void *data) | ||
| 895 | { | 903 | { |
| 904 | struct ccw_device_private *priv; | ||
| 896 | struct ccw_device *cdev; | 905 | struct ccw_device *cdev; |
| 897 | 906 | ||
| 898 | cdev = (struct ccw_device *)data; | 907 | priv = container_of(work, struct ccw_device_private, kick_work); |
| 908 | cdev = priv->cdev; | ||
| 899 | if (test_and_clear_bit(1, &cdev->private->registered)) | 909 | if (test_and_clear_bit(1, &cdev->private->registered)) |
| 900 | device_unregister(&cdev->dev); | 910 | device_unregister(&cdev->dev); |
| 901 | put_device(&cdev->dev); | 911 | put_device(&cdev->dev); |
| @@ -921,7 +931,7 @@ io_subchannel_remove (struct subchannel *sch) | |||
| 921 | */ | 931 | */ |
| 922 | if (get_device(&cdev->dev)) { | 932 | if (get_device(&cdev->dev)) { |
| 923 | PREPARE_WORK(&cdev->private->kick_work, | 933 | PREPARE_WORK(&cdev->private->kick_work, |
| 924 | ccw_device_unregister, cdev); | 934 | ccw_device_unregister); |
| 925 | queue_work(ccw_device_work, &cdev->private->kick_work); | 935 | queue_work(ccw_device_work, &cdev->private->kick_work); |
| 926 | } | 936 | } |
| 927 | return 0; | 937 | return 0; |
| @@ -1048,6 +1058,7 @@ ccw_device_probe_console(void) | |||
| 1048 | memset(&console_cdev, 0, sizeof(struct ccw_device)); | 1058 | memset(&console_cdev, 0, sizeof(struct ccw_device)); |
| 1049 | memset(&console_private, 0, sizeof(struct ccw_device_private)); | 1059 | memset(&console_private, 0, sizeof(struct ccw_device_private)); |
| 1050 | console_cdev.private = &console_private; | 1060 | console_cdev.private = &console_private; |
| 1061 | console_private.cdev = &console_cdev; | ||
| 1051 | ret = ccw_device_console_enable(&console_cdev, sch); | 1062 | ret = ccw_device_console_enable(&console_cdev, sch); |
| 1052 | if (ret) { | 1063 | if (ret) { |
| 1053 | cio_release_console(); | 1064 | cio_release_console(); |
diff --git a/drivers/s390/cio/device.h b/drivers/s390/cio/device.h index 9233b5c0bcc8..d5fe95e04cfe 100644 --- a/drivers/s390/cio/device.h +++ b/drivers/s390/cio/device.h | |||
| @@ -78,8 +78,8 @@ void io_subchannel_recog_done(struct ccw_device *cdev); | |||
| 78 | 78 | ||
| 79 | int ccw_device_cancel_halt_clear(struct ccw_device *); | 79 | int ccw_device_cancel_halt_clear(struct ccw_device *); |
| 80 | 80 | ||
| 81 | void ccw_device_do_unreg_rereg(void *); | 81 | void ccw_device_do_unreg_rereg(struct work_struct *); |
| 82 | void ccw_device_call_sch_unregister(void *); | 82 | void ccw_device_call_sch_unregister(struct work_struct *); |
| 83 | 83 | ||
| 84 | int ccw_device_recognition(struct ccw_device *); | 84 | int ccw_device_recognition(struct ccw_device *); |
| 85 | int ccw_device_online(struct ccw_device *); | 85 | int ccw_device_online(struct ccw_device *); |
diff --git a/drivers/s390/cio/device_fsm.c b/drivers/s390/cio/device_fsm.c index 09c7672eb3f3..0f0301ce37fe 100644 --- a/drivers/s390/cio/device_fsm.c +++ b/drivers/s390/cio/device_fsm.c | |||
| @@ -194,7 +194,7 @@ ccw_device_handle_oper(struct ccw_device *cdev) | |||
| 194 | cdev->id.dev_model != cdev->private->senseid.dev_model || | 194 | cdev->id.dev_model != cdev->private->senseid.dev_model || |
| 195 | cdev->private->dev_id.devno != sch->schib.pmcw.dev) { | 195 | cdev->private->dev_id.devno != sch->schib.pmcw.dev) { |
| 196 | PREPARE_WORK(&cdev->private->kick_work, | 196 | PREPARE_WORK(&cdev->private->kick_work, |
| 197 | ccw_device_do_unreg_rereg, cdev); | 197 | ccw_device_do_unreg_rereg); |
| 198 | queue_work(ccw_device_work, &cdev->private->kick_work); | 198 | queue_work(ccw_device_work, &cdev->private->kick_work); |
| 199 | return 0; | 199 | return 0; |
| 200 | } | 200 | } |
| @@ -329,19 +329,21 @@ ccw_device_sense_id_done(struct ccw_device *cdev, int err) | |||
| 329 | } | 329 | } |
| 330 | 330 | ||
| 331 | static void | 331 | static void |
| 332 | ccw_device_oper_notify(void *data) | 332 | ccw_device_oper_notify(struct work_struct *work) |
| 333 | { | 333 | { |
| 334 | struct ccw_device_private *priv; | ||
| 334 | struct ccw_device *cdev; | 335 | struct ccw_device *cdev; |
| 335 | struct subchannel *sch; | 336 | struct subchannel *sch; |
| 336 | int ret; | 337 | int ret; |
| 337 | 338 | ||
| 338 | cdev = data; | 339 | priv = container_of(work, struct ccw_device_private, kick_work); |
| 340 | cdev = priv->cdev; | ||
| 339 | sch = to_subchannel(cdev->dev.parent); | 341 | sch = to_subchannel(cdev->dev.parent); |
| 340 | ret = (sch->driver && sch->driver->notify) ? | 342 | ret = (sch->driver && sch->driver->notify) ? |
| 341 | sch->driver->notify(&sch->dev, CIO_OPER) : 0; | 343 | sch->driver->notify(&sch->dev, CIO_OPER) : 0; |
| 342 | if (!ret) | 344 | if (!ret) |
| 343 | /* Driver doesn't want device back. */ | 345 | /* Driver doesn't want device back. */ |
| 344 | ccw_device_do_unreg_rereg(cdev); | 346 | ccw_device_do_unreg_rereg(work); |
| 345 | else { | 347 | else { |
| 346 | /* Reenable channel measurements, if needed. */ | 348 | /* Reenable channel measurements, if needed. */ |
| 347 | cmf_reenable(cdev); | 349 | cmf_reenable(cdev); |
| @@ -377,8 +379,7 @@ ccw_device_done(struct ccw_device *cdev, int state) | |||
| 377 | 379 | ||
| 378 | if (cdev->private->flags.donotify) { | 380 | if (cdev->private->flags.donotify) { |
| 379 | cdev->private->flags.donotify = 0; | 381 | cdev->private->flags.donotify = 0; |
| 380 | PREPARE_WORK(&cdev->private->kick_work, ccw_device_oper_notify, | 382 | PREPARE_WORK(&cdev->private->kick_work, ccw_device_oper_notify); |
| 381 | cdev); | ||
| 382 | queue_work(ccw_device_notify_work, &cdev->private->kick_work); | 383 | queue_work(ccw_device_notify_work, &cdev->private->kick_work); |
| 383 | } | 384 | } |
| 384 | wake_up(&cdev->private->wait_q); | 385 | wake_up(&cdev->private->wait_q); |
| @@ -528,13 +529,15 @@ ccw_device_recog_timeout(struct ccw_device *cdev, enum dev_event dev_event) | |||
| 528 | 529 | ||
| 529 | 530 | ||
| 530 | static void | 531 | static void |
| 531 | ccw_device_nopath_notify(void *data) | 532 | ccw_device_nopath_notify(struct work_struct *work) |
| 532 | { | 533 | { |
| 534 | struct ccw_device_private *priv; | ||
| 533 | struct ccw_device *cdev; | 535 | struct ccw_device *cdev; |
| 534 | struct subchannel *sch; | 536 | struct subchannel *sch; |
| 535 | int ret; | 537 | int ret; |
| 536 | 538 | ||
| 537 | cdev = data; | 539 | priv = container_of(work, struct ccw_device_private, kick_work); |
| 540 | cdev = priv->cdev; | ||
| 538 | sch = to_subchannel(cdev->dev.parent); | 541 | sch = to_subchannel(cdev->dev.parent); |
| 539 | /* Extra sanity. */ | 542 | /* Extra sanity. */ |
| 540 | if (sch->lpm) | 543 | if (sch->lpm) |
| @@ -547,8 +550,7 @@ ccw_device_nopath_notify(void *data) | |||
| 547 | cio_disable_subchannel(sch); | 550 | cio_disable_subchannel(sch); |
| 548 | if (get_device(&cdev->dev)) { | 551 | if (get_device(&cdev->dev)) { |
| 549 | PREPARE_WORK(&cdev->private->kick_work, | 552 | PREPARE_WORK(&cdev->private->kick_work, |
| 550 | ccw_device_call_sch_unregister, | 553 | ccw_device_call_sch_unregister); |
| 551 | cdev); | ||
| 552 | queue_work(ccw_device_work, | 554 | queue_work(ccw_device_work, |
| 553 | &cdev->private->kick_work); | 555 | &cdev->private->kick_work); |
| 554 | } else | 556 | } else |
| @@ -607,7 +609,7 @@ ccw_device_verify_done(struct ccw_device *cdev, int err) | |||
| 607 | /* Reset oper notify indication after verify error. */ | 609 | /* Reset oper notify indication after verify error. */ |
| 608 | cdev->private->flags.donotify = 0; | 610 | cdev->private->flags.donotify = 0; |
| 609 | PREPARE_WORK(&cdev->private->kick_work, | 611 | PREPARE_WORK(&cdev->private->kick_work, |
| 610 | ccw_device_nopath_notify, cdev); | 612 | ccw_device_nopath_notify); |
| 611 | queue_work(ccw_device_notify_work, &cdev->private->kick_work); | 613 | queue_work(ccw_device_notify_work, &cdev->private->kick_work); |
| 612 | ccw_device_done(cdev, DEV_STATE_NOT_OPER); | 614 | ccw_device_done(cdev, DEV_STATE_NOT_OPER); |
| 613 | break; | 615 | break; |
| @@ -738,7 +740,7 @@ ccw_device_offline_notoper(struct ccw_device *cdev, enum dev_event dev_event) | |||
| 738 | sch = to_subchannel(cdev->dev.parent); | 740 | sch = to_subchannel(cdev->dev.parent); |
| 739 | if (get_device(&cdev->dev)) { | 741 | if (get_device(&cdev->dev)) { |
| 740 | PREPARE_WORK(&cdev->private->kick_work, | 742 | PREPARE_WORK(&cdev->private->kick_work, |
| 741 | ccw_device_call_sch_unregister, cdev); | 743 | ccw_device_call_sch_unregister); |
| 742 | queue_work(ccw_device_work, &cdev->private->kick_work); | 744 | queue_work(ccw_device_work, &cdev->private->kick_work); |
| 743 | } | 745 | } |
| 744 | wake_up(&cdev->private->wait_q); | 746 | wake_up(&cdev->private->wait_q); |
| @@ -769,7 +771,7 @@ ccw_device_online_notoper(struct ccw_device *cdev, enum dev_event dev_event) | |||
| 769 | } | 771 | } |
| 770 | if (get_device(&cdev->dev)) { | 772 | if (get_device(&cdev->dev)) { |
| 771 | PREPARE_WORK(&cdev->private->kick_work, | 773 | PREPARE_WORK(&cdev->private->kick_work, |
| 772 | ccw_device_call_sch_unregister, cdev); | 774 | ccw_device_call_sch_unregister); |
| 773 | queue_work(ccw_device_work, &cdev->private->kick_work); | 775 | queue_work(ccw_device_work, &cdev->private->kick_work); |
| 774 | } | 776 | } |
| 775 | wake_up(&cdev->private->wait_q); | 777 | wake_up(&cdev->private->wait_q); |
| @@ -874,7 +876,7 @@ ccw_device_online_timeout(struct ccw_device *cdev, enum dev_event dev_event) | |||
| 874 | sch = to_subchannel(cdev->dev.parent); | 876 | sch = to_subchannel(cdev->dev.parent); |
| 875 | if (!sch->lpm) { | 877 | if (!sch->lpm) { |
| 876 | PREPARE_WORK(&cdev->private->kick_work, | 878 | PREPARE_WORK(&cdev->private->kick_work, |
| 877 | ccw_device_nopath_notify, cdev); | 879 | ccw_device_nopath_notify); |
| 878 | queue_work(ccw_device_notify_work, | 880 | queue_work(ccw_device_notify_work, |
| 879 | &cdev->private->kick_work); | 881 | &cdev->private->kick_work); |
| 880 | } else | 882 | } else |
| @@ -969,7 +971,7 @@ ccw_device_killing_irq(struct ccw_device *cdev, enum dev_event dev_event) | |||
| 969 | ERR_PTR(-EIO)); | 971 | ERR_PTR(-EIO)); |
| 970 | if (!sch->lpm) { | 972 | if (!sch->lpm) { |
| 971 | PREPARE_WORK(&cdev->private->kick_work, | 973 | PREPARE_WORK(&cdev->private->kick_work, |
| 972 | ccw_device_nopath_notify, cdev); | 974 | ccw_device_nopath_notify); |
| 973 | queue_work(ccw_device_notify_work, &cdev->private->kick_work); | 975 | queue_work(ccw_device_notify_work, &cdev->private->kick_work); |
| 974 | } else if (cdev->private->flags.doverify) | 976 | } else if (cdev->private->flags.doverify) |
| 975 | /* Start delayed path verification. */ | 977 | /* Start delayed path verification. */ |
| @@ -992,7 +994,7 @@ ccw_device_killing_timeout(struct ccw_device *cdev, enum dev_event dev_event) | |||
| 992 | sch = to_subchannel(cdev->dev.parent); | 994 | sch = to_subchannel(cdev->dev.parent); |
| 993 | if (!sch->lpm) { | 995 | if (!sch->lpm) { |
| 994 | PREPARE_WORK(&cdev->private->kick_work, | 996 | PREPARE_WORK(&cdev->private->kick_work, |
| 995 | ccw_device_nopath_notify, cdev); | 997 | ccw_device_nopath_notify); |
| 996 | queue_work(ccw_device_notify_work, | 998 | queue_work(ccw_device_notify_work, |
| 997 | &cdev->private->kick_work); | 999 | &cdev->private->kick_work); |
| 998 | } else | 1000 | } else |
| @@ -1021,7 +1023,7 @@ void device_kill_io(struct subchannel *sch) | |||
| 1021 | if (ret == -ENODEV) { | 1023 | if (ret == -ENODEV) { |
| 1022 | if (!sch->lpm) { | 1024 | if (!sch->lpm) { |
| 1023 | PREPARE_WORK(&cdev->private->kick_work, | 1025 | PREPARE_WORK(&cdev->private->kick_work, |
| 1024 | ccw_device_nopath_notify, cdev); | 1026 | ccw_device_nopath_notify); |
| 1025 | queue_work(ccw_device_notify_work, | 1027 | queue_work(ccw_device_notify_work, |
| 1026 | &cdev->private->kick_work); | 1028 | &cdev->private->kick_work); |
| 1027 | } else | 1029 | } else |
| @@ -1033,7 +1035,7 @@ void device_kill_io(struct subchannel *sch) | |||
| 1033 | ERR_PTR(-EIO)); | 1035 | ERR_PTR(-EIO)); |
| 1034 | if (!sch->lpm) { | 1036 | if (!sch->lpm) { |
| 1035 | PREPARE_WORK(&cdev->private->kick_work, | 1037 | PREPARE_WORK(&cdev->private->kick_work, |
| 1036 | ccw_device_nopath_notify, cdev); | 1038 | ccw_device_nopath_notify); |
| 1037 | queue_work(ccw_device_notify_work, &cdev->private->kick_work); | 1039 | queue_work(ccw_device_notify_work, &cdev->private->kick_work); |
| 1038 | } else | 1040 | } else |
| 1039 | /* Start delayed path verification. */ | 1041 | /* Start delayed path verification. */ |
diff --git a/drivers/s390/cio/qdio.c b/drivers/s390/cio/qdio.c index 8d5fa1b4d11f..d066dbf2c65d 100644 --- a/drivers/s390/cio/qdio.c +++ b/drivers/s390/cio/qdio.c | |||
| @@ -2045,11 +2045,13 @@ omit_handler_call: | |||
| 2045 | } | 2045 | } |
| 2046 | 2046 | ||
| 2047 | static void | 2047 | static void |
| 2048 | qdio_call_shutdown(void *data) | 2048 | qdio_call_shutdown(struct work_struct *work) |
| 2049 | { | 2049 | { |
| 2050 | struct ccw_device_private *priv; | ||
| 2050 | struct ccw_device *cdev; | 2051 | struct ccw_device *cdev; |
| 2051 | 2052 | ||
| 2052 | cdev = (struct ccw_device *)data; | 2053 | priv = container_of(work, struct ccw_device_private, kick_work); |
| 2054 | cdev = priv->cdev; | ||
| 2053 | qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR); | 2055 | qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR); |
| 2054 | put_device(&cdev->dev); | 2056 | put_device(&cdev->dev); |
| 2055 | } | 2057 | } |
| @@ -2091,7 +2093,7 @@ qdio_timeout_handler(struct ccw_device *cdev) | |||
| 2091 | if (get_device(&cdev->dev)) { | 2093 | if (get_device(&cdev->dev)) { |
| 2092 | /* Can't call shutdown from interrupt context. */ | 2094 | /* Can't call shutdown from interrupt context. */ |
| 2093 | PREPARE_WORK(&cdev->private->kick_work, | 2095 | PREPARE_WORK(&cdev->private->kick_work, |
| 2094 | qdio_call_shutdown, (void *)cdev); | 2096 | qdio_call_shutdown); |
| 2095 | queue_work(ccw_device_work, &cdev->private->kick_work); | 2097 | queue_work(ccw_device_work, &cdev->private->kick_work); |
| 2096 | } | 2098 | } |
| 2097 | break; | 2099 | break; |
