aboutsummaryrefslogtreecommitdiffstats
path: root/fs/xfs/linux-2.6/xfs_aops.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/xfs/linux-2.6/xfs_aops.c')
-rw-r--r--fs/xfs/linux-2.6/xfs_aops.c195
1 files changed, 50 insertions, 145 deletions
diff --git a/fs/xfs/linux-2.6/xfs_aops.c b/fs/xfs/linux-2.6/xfs_aops.c
index c2e30eea74dc..66abe36c1213 100644
--- a/fs/xfs/linux-2.6/xfs_aops.c
+++ b/fs/xfs/linux-2.6/xfs_aops.c
@@ -38,6 +38,7 @@
38#include "xfs_rw.h" 38#include "xfs_rw.h"
39#include "xfs_iomap.h" 39#include "xfs_iomap.h"
40#include "xfs_vnodeops.h" 40#include "xfs_vnodeops.h"
41#include "xfs_trace.h"
41#include <linux/mpage.h> 42#include <linux/mpage.h>
42#include <linux/pagevec.h> 43#include <linux/pagevec.h>
43#include <linux/writeback.h> 44#include <linux/writeback.h>
@@ -76,7 +77,7 @@ xfs_ioend_wake(
76 wake_up(to_ioend_wq(ip)); 77 wake_up(to_ioend_wq(ip));
77} 78}
78 79
79STATIC void 80void
80xfs_count_page_state( 81xfs_count_page_state(
81 struct page *page, 82 struct page *page,
82 int *delalloc, 83 int *delalloc,
@@ -98,48 +99,6 @@ xfs_count_page_state(
98 } while ((bh = bh->b_this_page) != head); 99 } while ((bh = bh->b_this_page) != head);
99} 100}
100 101
101#if defined(XFS_RW_TRACE)
102void
103xfs_page_trace(
104 int tag,
105 struct inode *inode,
106 struct page *page,
107 unsigned long pgoff)
108{
109 xfs_inode_t *ip;
110 loff_t isize = i_size_read(inode);
111 loff_t offset = page_offset(page);
112 int delalloc = -1, unmapped = -1, unwritten = -1;
113
114 if (page_has_buffers(page))
115 xfs_count_page_state(page, &delalloc, &unmapped, &unwritten);
116
117 ip = XFS_I(inode);
118 if (!ip->i_rwtrace)
119 return;
120
121 ktrace_enter(ip->i_rwtrace,
122 (void *)((unsigned long)tag),
123 (void *)ip,
124 (void *)inode,
125 (void *)page,
126 (void *)pgoff,
127 (void *)((unsigned long)((ip->i_d.di_size >> 32) & 0xffffffff)),
128 (void *)((unsigned long)(ip->i_d.di_size & 0xffffffff)),
129 (void *)((unsigned long)((isize >> 32) & 0xffffffff)),
130 (void *)((unsigned long)(isize & 0xffffffff)),
131 (void *)((unsigned long)((offset >> 32) & 0xffffffff)),
132 (void *)((unsigned long)(offset & 0xffffffff)),
133 (void *)((unsigned long)delalloc),
134 (void *)((unsigned long)unmapped),
135 (void *)((unsigned long)unwritten),
136 (void *)((unsigned long)current_pid()),
137 (void *)NULL);
138}
139#else
140#define xfs_page_trace(tag, inode, page, pgoff)
141#endif
142
143STATIC struct block_device * 102STATIC struct block_device *
144xfs_find_bdev_for_inode( 103xfs_find_bdev_for_inode(
145 struct xfs_inode *ip) 104 struct xfs_inode *ip)
@@ -235,71 +194,36 @@ xfs_setfilesize(
235} 194}
236 195
237/* 196/*
238 * Buffered IO write completion for delayed allocate extents. 197 * IO write completion.
239 */
240STATIC void
241xfs_end_bio_delalloc(
242 struct work_struct *work)
243{
244 xfs_ioend_t *ioend =
245 container_of(work, xfs_ioend_t, io_work);
246
247 xfs_setfilesize(ioend);
248 xfs_destroy_ioend(ioend);
249}
250
251/*
252 * Buffered IO write completion for regular, written extents.
253 */
254STATIC void
255xfs_end_bio_written(
256 struct work_struct *work)
257{
258 xfs_ioend_t *ioend =
259 container_of(work, xfs_ioend_t, io_work);
260
261 xfs_setfilesize(ioend);
262 xfs_destroy_ioend(ioend);
263}
264
265/*
266 * IO write completion for unwritten extents.
267 *
268 * Issue transactions to convert a buffer range from unwritten
269 * to written extents.
270 */ 198 */
271STATIC void 199STATIC void
272xfs_end_bio_unwritten( 200xfs_end_io(
273 struct work_struct *work) 201 struct work_struct *work)
274{ 202{
275 xfs_ioend_t *ioend = 203 xfs_ioend_t *ioend =
276 container_of(work, xfs_ioend_t, io_work); 204 container_of(work, xfs_ioend_t, io_work);
277 struct xfs_inode *ip = XFS_I(ioend->io_inode); 205 struct xfs_inode *ip = XFS_I(ioend->io_inode);
278 xfs_off_t offset = ioend->io_offset;
279 size_t size = ioend->io_size;
280
281 if (likely(!ioend->io_error)) {
282 if (!XFS_FORCED_SHUTDOWN(ip->i_mount)) {
283 int error;
284 error = xfs_iomap_write_unwritten(ip, offset, size);
285 if (error)
286 ioend->io_error = error;
287 }
288 xfs_setfilesize(ioend);
289 }
290 xfs_destroy_ioend(ioend);
291}
292 206
293/* 207 /*
294 * IO read completion for regular, written extents. 208 * For unwritten extents we need to issue transactions to convert a
295 */ 209 * range to normal written extens after the data I/O has finished.
296STATIC void 210 */
297xfs_end_bio_read( 211 if (ioend->io_type == IOMAP_UNWRITTEN &&
298 struct work_struct *work) 212 likely(!ioend->io_error && !XFS_FORCED_SHUTDOWN(ip->i_mount))) {
299{ 213 int error;
300 xfs_ioend_t *ioend = 214
301 container_of(work, xfs_ioend_t, io_work); 215 error = xfs_iomap_write_unwritten(ip, ioend->io_offset,
216 ioend->io_size);
217 if (error)
218 ioend->io_error = error;
219 }
302 220
221 /*
222 * We might have to update the on-disk file size after extending
223 * writes.
224 */
225 if (ioend->io_type != IOMAP_READ)
226 xfs_setfilesize(ioend);
303 xfs_destroy_ioend(ioend); 227 xfs_destroy_ioend(ioend);
304} 228}
305 229
@@ -314,10 +238,10 @@ xfs_finish_ioend(
314 int wait) 238 int wait)
315{ 239{
316 if (atomic_dec_and_test(&ioend->io_remaining)) { 240 if (atomic_dec_and_test(&ioend->io_remaining)) {
317 struct workqueue_struct *wq = xfsdatad_workqueue; 241 struct workqueue_struct *wq;
318 if (ioend->io_work.func == xfs_end_bio_unwritten)
319 wq = xfsconvertd_workqueue;
320 242
243 wq = (ioend->io_type == IOMAP_UNWRITTEN) ?
244 xfsconvertd_workqueue : xfsdatad_workqueue;
321 queue_work(wq, &ioend->io_work); 245 queue_work(wq, &ioend->io_work);
322 if (wait) 246 if (wait)
323 flush_workqueue(wq); 247 flush_workqueue(wq);
@@ -355,15 +279,7 @@ xfs_alloc_ioend(
355 ioend->io_offset = 0; 279 ioend->io_offset = 0;
356 ioend->io_size = 0; 280 ioend->io_size = 0;
357 281
358 if (type == IOMAP_UNWRITTEN) 282 INIT_WORK(&ioend->io_work, xfs_end_io);
359 INIT_WORK(&ioend->io_work, xfs_end_bio_unwritten);
360 else if (type == IOMAP_DELAY)
361 INIT_WORK(&ioend->io_work, xfs_end_bio_delalloc);
362 else if (type == IOMAP_READ)
363 INIT_WORK(&ioend->io_work, xfs_end_bio_read);
364 else
365 INIT_WORK(&ioend->io_work, xfs_end_bio_written);
366
367 return ioend; 283 return ioend;
368} 284}
369 285
@@ -380,7 +296,7 @@ xfs_map_blocks(
380 return -xfs_iomap(XFS_I(inode), offset, count, flags, mapp, &nmaps); 296 return -xfs_iomap(XFS_I(inode), offset, count, flags, mapp, &nmaps);
381} 297}
382 298
383STATIC_INLINE int 299STATIC int
384xfs_iomap_valid( 300xfs_iomap_valid(
385 xfs_iomap_t *iomapp, 301 xfs_iomap_t *iomapp,
386 loff_t offset) 302 loff_t offset)
@@ -412,8 +328,9 @@ xfs_end_bio(
412 328
413STATIC void 329STATIC void
414xfs_submit_ioend_bio( 330xfs_submit_ioend_bio(
415 xfs_ioend_t *ioend, 331 struct writeback_control *wbc,
416 struct bio *bio) 332 xfs_ioend_t *ioend,
333 struct bio *bio)
417{ 334{
418 atomic_inc(&ioend->io_remaining); 335 atomic_inc(&ioend->io_remaining);
419 bio->bi_private = ioend; 336 bio->bi_private = ioend;
@@ -426,7 +343,8 @@ xfs_submit_ioend_bio(
426 if (xfs_ioend_new_eof(ioend)) 343 if (xfs_ioend_new_eof(ioend))
427 xfs_mark_inode_dirty_sync(XFS_I(ioend->io_inode)); 344 xfs_mark_inode_dirty_sync(XFS_I(ioend->io_inode));
428 345
429 submit_bio(WRITE, bio); 346 submit_bio(wbc->sync_mode == WB_SYNC_ALL ?
347 WRITE_SYNC_PLUG : WRITE, bio);
430 ASSERT(!bio_flagged(bio, BIO_EOPNOTSUPP)); 348 ASSERT(!bio_flagged(bio, BIO_EOPNOTSUPP));
431 bio_put(bio); 349 bio_put(bio);
432} 350}
@@ -505,6 +423,7 @@ static inline int bio_add_buffer(struct bio *bio, struct buffer_head *bh)
505 */ 423 */
506STATIC void 424STATIC void
507xfs_submit_ioend( 425xfs_submit_ioend(
426 struct writeback_control *wbc,
508 xfs_ioend_t *ioend) 427 xfs_ioend_t *ioend)
509{ 428{
510 xfs_ioend_t *head = ioend; 429 xfs_ioend_t *head = ioend;
@@ -533,19 +452,19 @@ xfs_submit_ioend(
533 retry: 452 retry:
534 bio = xfs_alloc_ioend_bio(bh); 453 bio = xfs_alloc_ioend_bio(bh);
535 } else if (bh->b_blocknr != lastblock + 1) { 454 } else if (bh->b_blocknr != lastblock + 1) {
536 xfs_submit_ioend_bio(ioend, bio); 455 xfs_submit_ioend_bio(wbc, ioend, bio);
537 goto retry; 456 goto retry;
538 } 457 }
539 458
540 if (bio_add_buffer(bio, bh) != bh->b_size) { 459 if (bio_add_buffer(bio, bh) != bh->b_size) {
541 xfs_submit_ioend_bio(ioend, bio); 460 xfs_submit_ioend_bio(wbc, ioend, bio);
542 goto retry; 461 goto retry;
543 } 462 }
544 463
545 lastblock = bh->b_blocknr; 464 lastblock = bh->b_blocknr;
546 } 465 }
547 if (bio) 466 if (bio)
548 xfs_submit_ioend_bio(ioend, bio); 467 xfs_submit_ioend_bio(wbc, ioend, bio);
549 xfs_finish_ioend(ioend, 0); 468 xfs_finish_ioend(ioend, 0);
550 } while ((ioend = next) != NULL); 469 } while ((ioend = next) != NULL);
551} 470}
@@ -904,16 +823,9 @@ xfs_convert_page(
904 823
905 if (startio) { 824 if (startio) {
906 if (count) { 825 if (count) {
907 struct backing_dev_info *bdi;
908
909 bdi = inode->i_mapping->backing_dev_info;
910 wbc->nr_to_write--; 826 wbc->nr_to_write--;
911 if (bdi_write_congested(bdi)) { 827 if (wbc->nr_to_write <= 0)
912 wbc->encountered_congestion = 1;
913 done = 1;
914 } else if (wbc->nr_to_write <= 0) {
915 done = 1; 828 done = 1;
916 }
917 } 829 }
918 xfs_start_page_writeback(page, !page_dirty, count); 830 xfs_start_page_writeback(page, !page_dirty, count);
919 } 831 }
@@ -1198,7 +1110,7 @@ xfs_page_state_convert(
1198 } 1110 }
1199 1111
1200 if (iohead) 1112 if (iohead)
1201 xfs_submit_ioend(iohead); 1113 xfs_submit_ioend(wbc, iohead);
1202 1114
1203 return page_dirty; 1115 return page_dirty;
1204 1116
@@ -1249,7 +1161,7 @@ xfs_vm_writepage(
1249 int delalloc, unmapped, unwritten; 1161 int delalloc, unmapped, unwritten;
1250 struct inode *inode = page->mapping->host; 1162 struct inode *inode = page->mapping->host;
1251 1163
1252 xfs_page_trace(XFS_WRITEPAGE_ENTER, inode, page, 0); 1164 trace_xfs_writepage(inode, page, 0);
1253 1165
1254 /* 1166 /*
1255 * We need a transaction if: 1167 * We need a transaction if:
@@ -1354,7 +1266,7 @@ xfs_vm_releasepage(
1354 .nr_to_write = 1, 1266 .nr_to_write = 1,
1355 }; 1267 };
1356 1268
1357 xfs_page_trace(XFS_RELEASEPAGE_ENTER, inode, page, 0); 1269 trace_xfs_releasepage(inode, page, 0);
1358 1270
1359 if (!page_has_buffers(page)) 1271 if (!page_has_buffers(page))
1360 return 0; 1272 return 0;
@@ -1535,7 +1447,7 @@ xfs_end_io_direct(
1535 * didn't map an unwritten extent so switch it's completion 1447 * didn't map an unwritten extent so switch it's completion
1536 * handler. 1448 * handler.
1537 */ 1449 */
1538 INIT_WORK(&ioend->io_work, xfs_end_bio_written); 1450 ioend->io_type = IOMAP_NEW;
1539 xfs_finish_ioend(ioend, 0); 1451 xfs_finish_ioend(ioend, 0);
1540 } 1452 }
1541 1453
@@ -1562,19 +1474,13 @@ xfs_vm_direct_IO(
1562 1474
1563 bdev = xfs_find_bdev_for_inode(XFS_I(inode)); 1475 bdev = xfs_find_bdev_for_inode(XFS_I(inode));
1564 1476
1565 if (rw == WRITE) { 1477 iocb->private = xfs_alloc_ioend(inode, rw == WRITE ?
1566 iocb->private = xfs_alloc_ioend(inode, IOMAP_UNWRITTEN); 1478 IOMAP_UNWRITTEN : IOMAP_READ);
1567 ret = blockdev_direct_IO_own_locking(rw, iocb, inode, 1479
1568 bdev, iov, offset, nr_segs, 1480 ret = blockdev_direct_IO_no_locking(rw, iocb, inode, bdev, iov,
1569 xfs_get_blocks_direct, 1481 offset, nr_segs,
1570 xfs_end_io_direct); 1482 xfs_get_blocks_direct,
1571 } else { 1483 xfs_end_io_direct);
1572 iocb->private = xfs_alloc_ioend(inode, IOMAP_READ);
1573 ret = blockdev_direct_IO_no_locking(rw, iocb, inode,
1574 bdev, iov, offset, nr_segs,
1575 xfs_get_blocks_direct,
1576 xfs_end_io_direct);
1577 }
1578 1484
1579 if (unlikely(ret != -EIOCBQUEUED && iocb->private)) 1485 if (unlikely(ret != -EIOCBQUEUED && iocb->private))
1580 xfs_destroy_ioend(iocb->private); 1486 xfs_destroy_ioend(iocb->private);
@@ -1634,8 +1540,7 @@ xfs_vm_invalidatepage(
1634 struct page *page, 1540 struct page *page,
1635 unsigned long offset) 1541 unsigned long offset)
1636{ 1542{
1637 xfs_page_trace(XFS_INVALIDPAGE_ENTER, 1543 trace_xfs_invalidatepage(page->mapping->host, page, offset);
1638 page->mapping->host, page, offset);
1639 block_invalidatepage(page, offset); 1544 block_invalidatepage(page, offset);
1640} 1545}
1641 1546