diff options
author | Christoph Hellwig <hch@infradead.org> | 2009-10-30 05:11:47 -0400 |
---|---|---|
committer | Alex Elder <aelder@sgi.com> | 2009-12-11 16:11:20 -0500 |
commit | 5ec4fabb02fcb5b4a4154a27e4299af5aa0f87ac (patch) | |
tree | 5932e385b83c06241f4696c3d2ae6000bd0f2a6f /fs/xfs/linux-2.6/xfs_aops.c | |
parent | 06342cf8adb23464deae0f58f8bcb87818a3bee6 (diff) |
xfs: cleanup data end I/O handlers
Currently we have different end I/O handlers for read vs the different
types of write I/O. But they are all very similar so we could just
use one with a few conditionals and reduce code size a lot.
Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Alex Elder <aelder@sgi.com>
Signed-off-by: Alex Elder <aelder@sgi.com>
Diffstat (limited to 'fs/xfs/linux-2.6/xfs_aops.c')
-rw-r--r-- | fs/xfs/linux-2.6/xfs_aops.c | 95 |
1 files changed, 26 insertions, 69 deletions
diff --git a/fs/xfs/linux-2.6/xfs_aops.c b/fs/xfs/linux-2.6/xfs_aops.c index d13fc7391e8b..616ee3febcbb 100644 --- a/fs/xfs/linux-2.6/xfs_aops.c +++ b/fs/xfs/linux-2.6/xfs_aops.c | |||
@@ -235,71 +235,36 @@ xfs_setfilesize( | |||
235 | } | 235 | } |
236 | 236 | ||
237 | /* | 237 | /* |
238 | * Buffered IO write completion for delayed allocate extents. | 238 | * IO write completion. |
239 | */ | 239 | */ |
240 | STATIC void | 240 | STATIC void |
241 | xfs_end_bio_delalloc( | 241 | xfs_end_io( |
242 | struct work_struct *work) | ||
243 | { | ||
244 | xfs_ioend_t *ioend = | ||
245 | container_of(work, xfs_ioend_t, io_work); | ||
246 | |||
247 | xfs_setfilesize(ioend); | ||
248 | xfs_destroy_ioend(ioend); | ||
249 | } | ||
250 | |||
251 | /* | ||
252 | * Buffered IO write completion for regular, written extents. | ||
253 | */ | ||
254 | STATIC void | ||
255 | xfs_end_bio_written( | ||
256 | struct work_struct *work) | ||
257 | { | ||
258 | xfs_ioend_t *ioend = | ||
259 | container_of(work, xfs_ioend_t, io_work); | ||
260 | |||
261 | xfs_setfilesize(ioend); | ||
262 | xfs_destroy_ioend(ioend); | ||
263 | } | ||
264 | |||
265 | /* | ||
266 | * IO write completion for unwritten extents. | ||
267 | * | ||
268 | * Issue transactions to convert a buffer range from unwritten | ||
269 | * to written extents. | ||
270 | */ | ||
271 | STATIC void | ||
272 | xfs_end_bio_unwritten( | ||
273 | struct work_struct *work) | 242 | struct work_struct *work) |
274 | { | 243 | { |
275 | xfs_ioend_t *ioend = | 244 | xfs_ioend_t *ioend = |
276 | container_of(work, xfs_ioend_t, io_work); | 245 | container_of(work, xfs_ioend_t, io_work); |
277 | struct xfs_inode *ip = XFS_I(ioend->io_inode); | 246 | struct xfs_inode *ip = XFS_I(ioend->io_inode); |
278 | xfs_off_t offset = ioend->io_offset; | ||
279 | size_t size = ioend->io_size; | ||
280 | |||
281 | if (likely(!ioend->io_error)) { | ||
282 | if (!XFS_FORCED_SHUTDOWN(ip->i_mount)) { | ||
283 | int error; | ||
284 | error = xfs_iomap_write_unwritten(ip, offset, size); | ||
285 | if (error) | ||
286 | ioend->io_error = error; | ||
287 | } | ||
288 | xfs_setfilesize(ioend); | ||
289 | } | ||
290 | xfs_destroy_ioend(ioend); | ||
291 | } | ||
292 | 247 | ||
293 | /* | 248 | /* |
294 | * IO read completion for regular, written extents. | 249 | * For unwritten extents we need to issue transactions to convert a |
295 | */ | 250 | * range to normal written extens after the data I/O has finished. |
296 | STATIC void | 251 | */ |
297 | xfs_end_bio_read( | 252 | if (ioend->io_type == IOMAP_UNWRITTEN && |
298 | struct work_struct *work) | 253 | likely(!ioend->io_error && !XFS_FORCED_SHUTDOWN(ip->i_mount))) { |
299 | { | 254 | int error; |
300 | xfs_ioend_t *ioend = | 255 | |
301 | container_of(work, xfs_ioend_t, io_work); | 256 | error = xfs_iomap_write_unwritten(ip, ioend->io_offset, |
257 | ioend->io_size); | ||
258 | if (error) | ||
259 | ioend->io_error = error; | ||
260 | } | ||
302 | 261 | ||
262 | /* | ||
263 | * We might have to update the on-disk file size after extending | ||
264 | * writes. | ||
265 | */ | ||
266 | if (ioend->io_type != IOMAP_READ) | ||
267 | xfs_setfilesize(ioend); | ||
303 | xfs_destroy_ioend(ioend); | 268 | xfs_destroy_ioend(ioend); |
304 | } | 269 | } |
305 | 270 | ||
@@ -314,10 +279,10 @@ xfs_finish_ioend( | |||
314 | int wait) | 279 | int wait) |
315 | { | 280 | { |
316 | if (atomic_dec_and_test(&ioend->io_remaining)) { | 281 | if (atomic_dec_and_test(&ioend->io_remaining)) { |
317 | struct workqueue_struct *wq = xfsdatad_workqueue; | 282 | struct workqueue_struct *wq; |
318 | if (ioend->io_work.func == xfs_end_bio_unwritten) | ||
319 | wq = xfsconvertd_workqueue; | ||
320 | 283 | ||
284 | wq = (ioend->io_type == IOMAP_UNWRITTEN) ? | ||
285 | xfsconvertd_workqueue : xfsdatad_workqueue; | ||
321 | queue_work(wq, &ioend->io_work); | 286 | queue_work(wq, &ioend->io_work); |
322 | if (wait) | 287 | if (wait) |
323 | flush_workqueue(wq); | 288 | flush_workqueue(wq); |
@@ -355,15 +320,7 @@ xfs_alloc_ioend( | |||
355 | ioend->io_offset = 0; | 320 | ioend->io_offset = 0; |
356 | ioend->io_size = 0; | 321 | ioend->io_size = 0; |
357 | 322 | ||
358 | if (type == IOMAP_UNWRITTEN) | 323 | INIT_WORK(&ioend->io_work, xfs_end_io); |
359 | INIT_WORK(&ioend->io_work, xfs_end_bio_unwritten); | ||
360 | else if (type == IOMAP_DELAY) | ||
361 | INIT_WORK(&ioend->io_work, xfs_end_bio_delalloc); | ||
362 | else if (type == IOMAP_READ) | ||
363 | INIT_WORK(&ioend->io_work, xfs_end_bio_read); | ||
364 | else | ||
365 | INIT_WORK(&ioend->io_work, xfs_end_bio_written); | ||
366 | |||
367 | return ioend; | 324 | return ioend; |
368 | } | 325 | } |
369 | 326 | ||
@@ -1538,7 +1495,7 @@ xfs_end_io_direct( | |||
1538 | * didn't map an unwritten extent so switch it's completion | 1495 | * didn't map an unwritten extent so switch it's completion |
1539 | * handler. | 1496 | * handler. |
1540 | */ | 1497 | */ |
1541 | INIT_WORK(&ioend->io_work, xfs_end_bio_written); | 1498 | ioend->io_type = IOMAP_NEW; |
1542 | xfs_finish_ioend(ioend, 0); | 1499 | xfs_finish_ioend(ioend, 0); |
1543 | } | 1500 | } |
1544 | 1501 | ||