diff options
author | Martin Schwidefsky <schwidefsky@de.ibm.com> | 2006-12-08 09:53:57 -0500 |
---|---|---|
committer | Martin Schwidefsky <schwidefsky@de.ibm.com> | 2006-12-08 09:53:57 -0500 |
commit | c16375329c2ab4667df873394c4be7a61d163c62 (patch) | |
tree | 9ee9505e4587ce5f472db3fd09935611b0062f83 /drivers/s390/char | |
parent | e45ccc0562e3f391dcba8b2e8a02551e8e42d8db (diff) |
[S390] more workqueue fixes.
Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Diffstat (limited to 'drivers/s390/char')
-rw-r--r-- | drivers/s390/char/tape.h | 3 | ||||
-rw-r--r-- | drivers/s390/char/tape_34xx.c | 23 | ||||
-rw-r--r-- | drivers/s390/char/tape_3590.c | 7 | ||||
-rw-r--r-- | drivers/s390/char/tape_block.c | 14 | ||||
-rw-r--r-- | drivers/s390/char/tape_core.c | 14 |
5 files changed, 33 insertions, 28 deletions
diff --git a/drivers/s390/char/tape.h b/drivers/s390/char/tape.h index 1f4c89967be4..c9f1c4c8bb13 100644 --- a/drivers/s390/char/tape.h +++ b/drivers/s390/char/tape.h | |||
@@ -179,6 +179,7 @@ struct tape_char_data { | |||
179 | /* Block Frontend Data */ | 179 | /* Block Frontend Data */ |
180 | struct tape_blk_data | 180 | struct tape_blk_data |
181 | { | 181 | { |
182 | struct tape_device * device; | ||
182 | /* Block device request queue. */ | 183 | /* Block device request queue. */ |
183 | request_queue_t * request_queue; | 184 | request_queue_t * request_queue; |
184 | spinlock_t request_queue_lock; | 185 | spinlock_t request_queue_lock; |
@@ -240,7 +241,7 @@ struct tape_device { | |||
240 | #endif | 241 | #endif |
241 | 242 | ||
242 | /* Function to start or stop the next request later. */ | 243 | /* Function to start or stop the next request later. */ |
243 | struct work_struct tape_dnr; | 244 | struct delayed_work tape_dnr; |
244 | }; | 245 | }; |
245 | 246 | ||
246 | /* Externals from tape_core.c */ | 247 | /* Externals from tape_core.c */ |
diff --git a/drivers/s390/char/tape_34xx.c b/drivers/s390/char/tape_34xx.c index 7b95dab913d0..e765875e8db2 100644 --- a/drivers/s390/char/tape_34xx.c +++ b/drivers/s390/char/tape_34xx.c | |||
@@ -95,6 +95,12 @@ tape_34xx_medium_sense(struct tape_device *device) | |||
95 | return rc; | 95 | return rc; |
96 | } | 96 | } |
97 | 97 | ||
98 | struct tape_34xx_work { | ||
99 | struct tape_device *device; | ||
100 | enum tape_op op; | ||
101 | struct work_struct work; | ||
102 | }; | ||
103 | |||
98 | /* | 104 | /* |
99 | * These functions are currently used only to schedule a medium_sense for | 105 | * These functions are currently used only to schedule a medium_sense for |
100 | * later execution. This is because we get an interrupt whenever a medium | 106 | * later execution. This is because we get an interrupt whenever a medium |
@@ -103,13 +109,10 @@ tape_34xx_medium_sense(struct tape_device *device) | |||
103 | * interrupt handler. | 109 | * interrupt handler. |
104 | */ | 110 | */ |
105 | static void | 111 | static void |
106 | tape_34xx_work_handler(void *data) | 112 | tape_34xx_work_handler(struct work_struct *work) |
107 | { | 113 | { |
108 | struct { | 114 | struct tape_34xx_work *p = |
109 | struct tape_device *device; | 115 | container_of(work, struct tape_34xx_work, work); |
110 | enum tape_op op; | ||
111 | struct work_struct work; | ||
112 | } *p = data; | ||
113 | 116 | ||
114 | switch(p->op) { | 117 | switch(p->op) { |
115 | case TO_MSEN: | 118 | case TO_MSEN: |
@@ -126,17 +129,13 @@ tape_34xx_work_handler(void *data) | |||
126 | static int | 129 | static int |
127 | tape_34xx_schedule_work(struct tape_device *device, enum tape_op op) | 130 | tape_34xx_schedule_work(struct tape_device *device, enum tape_op op) |
128 | { | 131 | { |
129 | struct { | 132 | struct tape_34xx_work *p; |
130 | struct tape_device *device; | ||
131 | enum tape_op op; | ||
132 | struct work_struct work; | ||
133 | } *p; | ||
134 | 133 | ||
135 | if ((p = kmalloc(sizeof(*p), GFP_ATOMIC)) == NULL) | 134 | if ((p = kmalloc(sizeof(*p), GFP_ATOMIC)) == NULL) |
136 | return -ENOMEM; | 135 | return -ENOMEM; |
137 | 136 | ||
138 | memset(p, 0, sizeof(*p)); | 137 | memset(p, 0, sizeof(*p)); |
139 | INIT_WORK(&p->work, tape_34xx_work_handler, p); | 138 | INIT_WORK(&p->work, tape_34xx_work_handler); |
140 | 139 | ||
141 | p->device = tape_get_device_reference(device); | 140 | p->device = tape_get_device_reference(device); |
142 | p->op = op; | 141 | p->op = op; |
diff --git a/drivers/s390/char/tape_3590.c b/drivers/s390/char/tape_3590.c index 928cbefc49d5..9df912f63188 100644 --- a/drivers/s390/char/tape_3590.c +++ b/drivers/s390/char/tape_3590.c | |||
@@ -236,9 +236,10 @@ struct work_handler_data { | |||
236 | }; | 236 | }; |
237 | 237 | ||
238 | static void | 238 | static void |
239 | tape_3590_work_handler(void *data) | 239 | tape_3590_work_handler(struct work_struct *work) |
240 | { | 240 | { |
241 | struct work_handler_data *p = data; | 241 | struct work_handler_data *p = |
242 | container_of(work, struct work_handler_data, work); | ||
242 | 243 | ||
243 | switch (p->op) { | 244 | switch (p->op) { |
244 | case TO_MSEN: | 245 | case TO_MSEN: |
@@ -263,7 +264,7 @@ tape_3590_schedule_work(struct tape_device *device, enum tape_op op) | |||
263 | if ((p = kzalloc(sizeof(*p), GFP_ATOMIC)) == NULL) | 264 | if ((p = kzalloc(sizeof(*p), GFP_ATOMIC)) == NULL) |
264 | return -ENOMEM; | 265 | return -ENOMEM; |
265 | 266 | ||
266 | INIT_WORK(&p->work, tape_3590_work_handler, p); | 267 | INIT_WORK(&p->work, tape_3590_work_handler); |
267 | 268 | ||
268 | p->device = tape_get_device_reference(device); | 269 | p->device = tape_get_device_reference(device); |
269 | p->op = op; | 270 | p->op = op; |
diff --git a/drivers/s390/char/tape_block.c b/drivers/s390/char/tape_block.c index 3225fcd1dcb4..c8a89b3b87d4 100644 --- a/drivers/s390/char/tape_block.c +++ b/drivers/s390/char/tape_block.c | |||
@@ -15,6 +15,7 @@ | |||
15 | #include <linux/blkdev.h> | 15 | #include <linux/blkdev.h> |
16 | #include <linux/interrupt.h> | 16 | #include <linux/interrupt.h> |
17 | #include <linux/buffer_head.h> | 17 | #include <linux/buffer_head.h> |
18 | #include <linux/kernel.h> | ||
18 | 19 | ||
19 | #include <asm/debug.h> | 20 | #include <asm/debug.h> |
20 | 21 | ||
@@ -143,7 +144,8 @@ tapeblock_start_request(struct tape_device *device, struct request *req) | |||
143 | * queue. | 144 | * queue. |
144 | */ | 145 | */ |
145 | static void | 146 | static void |
146 | tapeblock_requeue(void *data) { | 147 | tapeblock_requeue(struct work_struct *work) { |
148 | struct tape_blk_data * blkdat; | ||
147 | struct tape_device * device; | 149 | struct tape_device * device; |
148 | request_queue_t * queue; | 150 | request_queue_t * queue; |
149 | int nr_queued; | 151 | int nr_queued; |
@@ -151,7 +153,8 @@ tapeblock_requeue(void *data) { | |||
151 | struct list_head * l; | 153 | struct list_head * l; |
152 | int rc; | 154 | int rc; |
153 | 155 | ||
154 | device = (struct tape_device *) data; | 156 | blkdat = container_of(work, struct tape_blk_data, requeue_task); |
157 | device = blkdat->device; | ||
155 | if (!device) | 158 | if (!device) |
156 | return; | 159 | return; |
157 | 160 | ||
@@ -212,6 +215,7 @@ tapeblock_setup_device(struct tape_device * device) | |||
212 | int rc; | 215 | int rc; |
213 | 216 | ||
214 | blkdat = &device->blk_data; | 217 | blkdat = &device->blk_data; |
218 | blkdat->device = device; | ||
215 | spin_lock_init(&blkdat->request_queue_lock); | 219 | spin_lock_init(&blkdat->request_queue_lock); |
216 | atomic_set(&blkdat->requeue_scheduled, 0); | 220 | atomic_set(&blkdat->requeue_scheduled, 0); |
217 | 221 | ||
@@ -255,8 +259,8 @@ tapeblock_setup_device(struct tape_device * device) | |||
255 | 259 | ||
256 | add_disk(disk); | 260 | add_disk(disk); |
257 | 261 | ||
258 | INIT_WORK(&blkdat->requeue_task, tapeblock_requeue, | 262 | tape_get_device_reference(device); |
259 | tape_get_device_reference(device)); | 263 | INIT_WORK(&blkdat->requeue_task, tapeblock_requeue); |
260 | 264 | ||
261 | return 0; | 265 | return 0; |
262 | 266 | ||
@@ -271,7 +275,7 @@ void | |||
271 | tapeblock_cleanup_device(struct tape_device *device) | 275 | tapeblock_cleanup_device(struct tape_device *device) |
272 | { | 276 | { |
273 | flush_scheduled_work(); | 277 | flush_scheduled_work(); |
274 | device->blk_data.requeue_task.data = tape_put_device(device); | 278 | tape_put_device(device); |
275 | 279 | ||
276 | if (!device->blk_data.disk) { | 280 | if (!device->blk_data.disk) { |
277 | PRINT_ERR("(%s): No gendisk to clean up!\n", | 281 | PRINT_ERR("(%s): No gendisk to clean up!\n", |
diff --git a/drivers/s390/char/tape_core.c b/drivers/s390/char/tape_core.c index 2826aed91043..c6c2e918b990 100644 --- a/drivers/s390/char/tape_core.c +++ b/drivers/s390/char/tape_core.c | |||
@@ -28,7 +28,7 @@ | |||
28 | #define PRINTK_HEADER "TAPE_CORE: " | 28 | #define PRINTK_HEADER "TAPE_CORE: " |
29 | 29 | ||
30 | static void __tape_do_irq (struct ccw_device *, unsigned long, struct irb *); | 30 | static void __tape_do_irq (struct ccw_device *, unsigned long, struct irb *); |
31 | static void tape_delayed_next_request(void * data); | 31 | static void tape_delayed_next_request(struct work_struct *); |
32 | 32 | ||
33 | /* | 33 | /* |
34 | * One list to contain all tape devices of all disciplines, so | 34 | * One list to contain all tape devices of all disciplines, so |
@@ -272,7 +272,7 @@ __tape_cancel_io(struct tape_device *device, struct tape_request *request) | |||
272 | return 0; | 272 | return 0; |
273 | case -EBUSY: | 273 | case -EBUSY: |
274 | request->status = TAPE_REQUEST_CANCEL; | 274 | request->status = TAPE_REQUEST_CANCEL; |
275 | schedule_work(&device->tape_dnr); | 275 | schedule_delayed_work(&device->tape_dnr, 0); |
276 | return 0; | 276 | return 0; |
277 | case -ENODEV: | 277 | case -ENODEV: |
278 | DBF_EXCEPTION(2, "device gone, retry\n"); | 278 | DBF_EXCEPTION(2, "device gone, retry\n"); |
@@ -470,7 +470,7 @@ tape_alloc_device(void) | |||
470 | *device->modeset_byte = 0; | 470 | *device->modeset_byte = 0; |
471 | device->first_minor = -1; | 471 | device->first_minor = -1; |
472 | atomic_set(&device->ref_count, 1); | 472 | atomic_set(&device->ref_count, 1); |
473 | INIT_WORK(&device->tape_dnr, tape_delayed_next_request, device); | 473 | INIT_DELAYED_WORK(&device->tape_dnr, tape_delayed_next_request); |
474 | 474 | ||
475 | return device; | 475 | return device; |
476 | } | 476 | } |
@@ -724,7 +724,7 @@ __tape_start_io(struct tape_device *device, struct tape_request *request) | |||
724 | } else if (rc == -EBUSY) { | 724 | } else if (rc == -EBUSY) { |
725 | /* The common I/O subsystem is currently busy. Retry later. */ | 725 | /* The common I/O subsystem is currently busy. Retry later. */ |
726 | request->status = TAPE_REQUEST_QUEUED; | 726 | request->status = TAPE_REQUEST_QUEUED; |
727 | schedule_work(&device->tape_dnr); | 727 | schedule_delayed_work(&device->tape_dnr, 0); |
728 | rc = 0; | 728 | rc = 0; |
729 | } else { | 729 | } else { |
730 | /* Start failed. Remove request and indicate failure. */ | 730 | /* Start failed. Remove request and indicate failure. */ |
@@ -790,11 +790,11 @@ __tape_start_next_request(struct tape_device *device) | |||
790 | } | 790 | } |
791 | 791 | ||
792 | static void | 792 | static void |
793 | tape_delayed_next_request(void *data) | 793 | tape_delayed_next_request(struct work_struct *work) |
794 | { | 794 | { |
795 | struct tape_device * device; | 795 | struct tape_device *device = |
796 | container_of(work, struct tape_device, tape_dnr.work); | ||
796 | 797 | ||
797 | device = (struct tape_device *) data; | ||
798 | DBF_LH(6, "tape_delayed_next_request(%p)\n", device); | 798 | DBF_LH(6, "tape_delayed_next_request(%p)\n", device); |
799 | spin_lock_irq(get_ccwdev_lock(device->cdev)); | 799 | spin_lock_irq(get_ccwdev_lock(device->cdev)); |
800 | __tape_start_next_request(device); | 800 | __tape_start_next_request(device); |