diff options
Diffstat (limited to 'fs/f2fs/data.c')
-rw-r--r-- | fs/f2fs/data.c | 428 |
1 files changed, 228 insertions, 200 deletions
diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c index 5c06db17e41f..e5c762b37239 100644 --- a/fs/f2fs/data.c +++ b/fs/f2fs/data.c | |||
@@ -34,9 +34,9 @@ static void f2fs_read_end_io(struct bio *bio) | |||
34 | 34 | ||
35 | if (f2fs_bio_encrypted(bio)) { | 35 | if (f2fs_bio_encrypted(bio)) { |
36 | if (bio->bi_error) { | 36 | if (bio->bi_error) { |
37 | f2fs_release_crypto_ctx(bio->bi_private); | 37 | fscrypt_release_ctx(bio->bi_private); |
38 | } else { | 38 | } else { |
39 | f2fs_end_io_crypto_work(bio->bi_private, bio); | 39 | fscrypt_decrypt_bio_pages(bio->bi_private, bio); |
40 | return; | 40 | return; |
41 | } | 41 | } |
42 | } | 42 | } |
@@ -64,10 +64,9 @@ static void f2fs_write_end_io(struct bio *bio) | |||
64 | bio_for_each_segment_all(bvec, bio, i) { | 64 | bio_for_each_segment_all(bvec, bio, i) { |
65 | struct page *page = bvec->bv_page; | 65 | struct page *page = bvec->bv_page; |
66 | 66 | ||
67 | f2fs_restore_and_release_control_page(&page); | 67 | fscrypt_pullback_bio_page(&page, true); |
68 | 68 | ||
69 | if (unlikely(bio->bi_error)) { | 69 | if (unlikely(bio->bi_error)) { |
70 | set_page_dirty(page); | ||
71 | set_bit(AS_EIO, &page->mapping->flags); | 70 | set_bit(AS_EIO, &page->mapping->flags); |
72 | f2fs_stop_checkpoint(sbi); | 71 | f2fs_stop_checkpoint(sbi); |
73 | } | 72 | } |
@@ -75,8 +74,7 @@ static void f2fs_write_end_io(struct bio *bio) | |||
75 | dec_page_count(sbi, F2FS_WRITEBACK); | 74 | dec_page_count(sbi, F2FS_WRITEBACK); |
76 | } | 75 | } |
77 | 76 | ||
78 | if (!get_pages(sbi, F2FS_WRITEBACK) && | 77 | if (!get_pages(sbi, F2FS_WRITEBACK) && wq_has_sleeper(&sbi->cp_wait)) |
79 | !list_empty(&sbi->cp_wait.task_list)) | ||
80 | wake_up(&sbi->cp_wait); | 78 | wake_up(&sbi->cp_wait); |
81 | 79 | ||
82 | bio_put(bio); | 80 | bio_put(bio); |
@@ -116,8 +114,54 @@ static void __submit_merged_bio(struct f2fs_bio_info *io) | |||
116 | io->bio = NULL; | 114 | io->bio = NULL; |
117 | } | 115 | } |
118 | 116 | ||
119 | void f2fs_submit_merged_bio(struct f2fs_sb_info *sbi, | 117 | static bool __has_merged_page(struct f2fs_bio_info *io, struct inode *inode, |
120 | enum page_type type, int rw) | 118 | struct page *page, nid_t ino) |
119 | { | ||
120 | struct bio_vec *bvec; | ||
121 | struct page *target; | ||
122 | int i; | ||
123 | |||
124 | if (!io->bio) | ||
125 | return false; | ||
126 | |||
127 | if (!inode && !page && !ino) | ||
128 | return true; | ||
129 | |||
130 | bio_for_each_segment_all(bvec, io->bio, i) { | ||
131 | |||
132 | if (bvec->bv_page->mapping) | ||
133 | target = bvec->bv_page; | ||
134 | else | ||
135 | target = fscrypt_control_page(bvec->bv_page); | ||
136 | |||
137 | if (inode && inode == target->mapping->host) | ||
138 | return true; | ||
139 | if (page && page == target) | ||
140 | return true; | ||
141 | if (ino && ino == ino_of_node(target)) | ||
142 | return true; | ||
143 | } | ||
144 | |||
145 | return false; | ||
146 | } | ||
147 | |||
148 | static bool has_merged_page(struct f2fs_sb_info *sbi, struct inode *inode, | ||
149 | struct page *page, nid_t ino, | ||
150 | enum page_type type) | ||
151 | { | ||
152 | enum page_type btype = PAGE_TYPE_OF_BIO(type); | ||
153 | struct f2fs_bio_info *io = &sbi->write_io[btype]; | ||
154 | bool ret; | ||
155 | |||
156 | down_read(&io->io_rwsem); | ||
157 | ret = __has_merged_page(io, inode, page, ino); | ||
158 | up_read(&io->io_rwsem); | ||
159 | return ret; | ||
160 | } | ||
161 | |||
162 | static void __f2fs_submit_merged_bio(struct f2fs_sb_info *sbi, | ||
163 | struct inode *inode, struct page *page, | ||
164 | nid_t ino, enum page_type type, int rw) | ||
121 | { | 165 | { |
122 | enum page_type btype = PAGE_TYPE_OF_BIO(type); | 166 | enum page_type btype = PAGE_TYPE_OF_BIO(type); |
123 | struct f2fs_bio_info *io; | 167 | struct f2fs_bio_info *io; |
@@ -126,6 +170,9 @@ void f2fs_submit_merged_bio(struct f2fs_sb_info *sbi, | |||
126 | 170 | ||
127 | down_write(&io->io_rwsem); | 171 | down_write(&io->io_rwsem); |
128 | 172 | ||
173 | if (!__has_merged_page(io, inode, page, ino)) | ||
174 | goto out; | ||
175 | |||
129 | /* change META to META_FLUSH in the checkpoint procedure */ | 176 | /* change META to META_FLUSH in the checkpoint procedure */ |
130 | if (type >= META_FLUSH) { | 177 | if (type >= META_FLUSH) { |
131 | io->fio.type = META_FLUSH; | 178 | io->fio.type = META_FLUSH; |
@@ -135,9 +182,31 @@ void f2fs_submit_merged_bio(struct f2fs_sb_info *sbi, | |||
135 | io->fio.rw = WRITE_FLUSH_FUA | REQ_META | REQ_PRIO; | 182 | io->fio.rw = WRITE_FLUSH_FUA | REQ_META | REQ_PRIO; |
136 | } | 183 | } |
137 | __submit_merged_bio(io); | 184 | __submit_merged_bio(io); |
185 | out: | ||
138 | up_write(&io->io_rwsem); | 186 | up_write(&io->io_rwsem); |
139 | } | 187 | } |
140 | 188 | ||
189 | void f2fs_submit_merged_bio(struct f2fs_sb_info *sbi, enum page_type type, | ||
190 | int rw) | ||
191 | { | ||
192 | __f2fs_submit_merged_bio(sbi, NULL, NULL, 0, type, rw); | ||
193 | } | ||
194 | |||
195 | void f2fs_submit_merged_bio_cond(struct f2fs_sb_info *sbi, | ||
196 | struct inode *inode, struct page *page, | ||
197 | nid_t ino, enum page_type type, int rw) | ||
198 | { | ||
199 | if (has_merged_page(sbi, inode, page, ino, type)) | ||
200 | __f2fs_submit_merged_bio(sbi, inode, page, ino, type, rw); | ||
201 | } | ||
202 | |||
203 | void f2fs_flush_merged_bios(struct f2fs_sb_info *sbi) | ||
204 | { | ||
205 | f2fs_submit_merged_bio(sbi, DATA, WRITE); | ||
206 | f2fs_submit_merged_bio(sbi, NODE, WRITE); | ||
207 | f2fs_submit_merged_bio(sbi, META, WRITE); | ||
208 | } | ||
209 | |||
141 | /* | 210 | /* |
142 | * Fill the locked page with data located in the block address. | 211 | * Fill the locked page with data located in the block address. |
143 | * Return unlocked page. | 212 | * Return unlocked page. |
@@ -145,13 +214,14 @@ void f2fs_submit_merged_bio(struct f2fs_sb_info *sbi, | |||
145 | int f2fs_submit_page_bio(struct f2fs_io_info *fio) | 214 | int f2fs_submit_page_bio(struct f2fs_io_info *fio) |
146 | { | 215 | { |
147 | struct bio *bio; | 216 | struct bio *bio; |
148 | struct page *page = fio->encrypted_page ? fio->encrypted_page : fio->page; | 217 | struct page *page = fio->encrypted_page ? |
218 | fio->encrypted_page : fio->page; | ||
149 | 219 | ||
150 | trace_f2fs_submit_page_bio(page, fio); | 220 | trace_f2fs_submit_page_bio(page, fio); |
151 | f2fs_trace_ios(fio, 0); | 221 | f2fs_trace_ios(fio, 0); |
152 | 222 | ||
153 | /* Allocate a new bio */ | 223 | /* Allocate a new bio */ |
154 | bio = __bio_alloc(fio->sbi, fio->blk_addr, 1, is_read_io(fio->rw)); | 224 | bio = __bio_alloc(fio->sbi, fio->new_blkaddr, 1, is_read_io(fio->rw)); |
155 | 225 | ||
156 | if (bio_add_page(bio, page, PAGE_CACHE_SIZE, 0) < PAGE_CACHE_SIZE) { | 226 | if (bio_add_page(bio, page, PAGE_CACHE_SIZE, 0) < PAGE_CACHE_SIZE) { |
157 | bio_put(bio); | 227 | bio_put(bio); |
@@ -172,21 +242,24 @@ void f2fs_submit_page_mbio(struct f2fs_io_info *fio) | |||
172 | 242 | ||
173 | io = is_read ? &sbi->read_io : &sbi->write_io[btype]; | 243 | io = is_read ? &sbi->read_io : &sbi->write_io[btype]; |
174 | 244 | ||
175 | verify_block_addr(sbi, fio->blk_addr); | 245 | if (fio->old_blkaddr != NEW_ADDR) |
246 | verify_block_addr(sbi, fio->old_blkaddr); | ||
247 | verify_block_addr(sbi, fio->new_blkaddr); | ||
176 | 248 | ||
177 | down_write(&io->io_rwsem); | 249 | down_write(&io->io_rwsem); |
178 | 250 | ||
179 | if (!is_read) | 251 | if (!is_read) |
180 | inc_page_count(sbi, F2FS_WRITEBACK); | 252 | inc_page_count(sbi, F2FS_WRITEBACK); |
181 | 253 | ||
182 | if (io->bio && (io->last_block_in_bio != fio->blk_addr - 1 || | 254 | if (io->bio && (io->last_block_in_bio != fio->new_blkaddr - 1 || |
183 | io->fio.rw != fio->rw)) | 255 | io->fio.rw != fio->rw)) |
184 | __submit_merged_bio(io); | 256 | __submit_merged_bio(io); |
185 | alloc_new: | 257 | alloc_new: |
186 | if (io->bio == NULL) { | 258 | if (io->bio == NULL) { |
187 | int bio_blocks = MAX_BIO_BLOCKS(sbi); | 259 | int bio_blocks = MAX_BIO_BLOCKS(sbi); |
188 | 260 | ||
189 | io->bio = __bio_alloc(sbi, fio->blk_addr, bio_blocks, is_read); | 261 | io->bio = __bio_alloc(sbi, fio->new_blkaddr, |
262 | bio_blocks, is_read); | ||
190 | io->fio = *fio; | 263 | io->fio = *fio; |
191 | } | 264 | } |
192 | 265 | ||
@@ -198,7 +271,7 @@ alloc_new: | |||
198 | goto alloc_new; | 271 | goto alloc_new; |
199 | } | 272 | } |
200 | 273 | ||
201 | io->last_block_in_bio = fio->blk_addr; | 274 | io->last_block_in_bio = fio->new_blkaddr; |
202 | f2fs_trace_ios(fio, 0); | 275 | f2fs_trace_ios(fio, 0); |
203 | 276 | ||
204 | up_write(&io->io_rwsem); | 277 | up_write(&io->io_rwsem); |
@@ -218,7 +291,7 @@ void set_data_blkaddr(struct dnode_of_data *dn) | |||
218 | struct page *node_page = dn->node_page; | 291 | struct page *node_page = dn->node_page; |
219 | unsigned int ofs_in_node = dn->ofs_in_node; | 292 | unsigned int ofs_in_node = dn->ofs_in_node; |
220 | 293 | ||
221 | f2fs_wait_on_page_writeback(node_page, NODE); | 294 | f2fs_wait_on_page_writeback(node_page, NODE, true); |
222 | 295 | ||
223 | rn = F2FS_NODE(node_page); | 296 | rn = F2FS_NODE(node_page); |
224 | 297 | ||
@@ -229,6 +302,13 @@ void set_data_blkaddr(struct dnode_of_data *dn) | |||
229 | dn->node_changed = true; | 302 | dn->node_changed = true; |
230 | } | 303 | } |
231 | 304 | ||
305 | void f2fs_update_data_blkaddr(struct dnode_of_data *dn, block_t blkaddr) | ||
306 | { | ||
307 | dn->data_blkaddr = blkaddr; | ||
308 | set_data_blkaddr(dn); | ||
309 | f2fs_update_extent_cache(dn); | ||
310 | } | ||
311 | |||
232 | int reserve_new_block(struct dnode_of_data *dn) | 312 | int reserve_new_block(struct dnode_of_data *dn) |
233 | { | 313 | { |
234 | struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode); | 314 | struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode); |
@@ -332,7 +412,7 @@ got_it: | |||
332 | return page; | 412 | return page; |
333 | } | 413 | } |
334 | 414 | ||
335 | fio.blk_addr = dn.data_blkaddr; | 415 | fio.new_blkaddr = fio.old_blkaddr = dn.data_blkaddr; |
336 | fio.page = page; | 416 | fio.page = page; |
337 | err = f2fs_submit_page_bio(&fio); | 417 | err = f2fs_submit_page_bio(&fio); |
338 | if (err) | 418 | if (err) |
@@ -461,7 +541,6 @@ got_it: | |||
461 | static int __allocate_data_block(struct dnode_of_data *dn) | 541 | static int __allocate_data_block(struct dnode_of_data *dn) |
462 | { | 542 | { |
463 | struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode); | 543 | struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode); |
464 | struct f2fs_inode_info *fi = F2FS_I(dn->inode); | ||
465 | struct f2fs_summary sum; | 544 | struct f2fs_summary sum; |
466 | struct node_info ni; | 545 | struct node_info ni; |
467 | int seg = CURSEG_WARM_DATA; | 546 | int seg = CURSEG_WARM_DATA; |
@@ -489,7 +568,7 @@ alloc: | |||
489 | set_data_blkaddr(dn); | 568 | set_data_blkaddr(dn); |
490 | 569 | ||
491 | /* update i_size */ | 570 | /* update i_size */ |
492 | fofs = start_bidx_of_node(ofs_of_node(dn->node_page), fi) + | 571 | fofs = start_bidx_of_node(ofs_of_node(dn->node_page), dn->inode) + |
493 | dn->ofs_in_node; | 572 | dn->ofs_in_node; |
494 | if (i_size_read(dn->inode) < ((loff_t)(fofs + 1) << PAGE_CACHE_SHIFT)) | 573 | if (i_size_read(dn->inode) < ((loff_t)(fofs + 1) << PAGE_CACHE_SHIFT)) |
495 | i_size_write(dn->inode, | 574 | i_size_write(dn->inode, |
@@ -497,67 +576,33 @@ alloc: | |||
497 | return 0; | 576 | return 0; |
498 | } | 577 | } |
499 | 578 | ||
500 | static int __allocate_data_blocks(struct inode *inode, loff_t offset, | 579 | ssize_t f2fs_preallocate_blocks(struct kiocb *iocb, struct iov_iter *from) |
501 | size_t count) | ||
502 | { | 580 | { |
503 | struct f2fs_sb_info *sbi = F2FS_I_SB(inode); | 581 | struct inode *inode = file_inode(iocb->ki_filp); |
504 | struct dnode_of_data dn; | 582 | struct f2fs_map_blocks map; |
505 | u64 start = F2FS_BYTES_TO_BLK(offset); | 583 | ssize_t ret = 0; |
506 | u64 len = F2FS_BYTES_TO_BLK(count); | ||
507 | bool allocated; | ||
508 | u64 end_offset; | ||
509 | int err = 0; | ||
510 | |||
511 | while (len) { | ||
512 | f2fs_lock_op(sbi); | ||
513 | |||
514 | /* When reading holes, we need its node page */ | ||
515 | set_new_dnode(&dn, inode, NULL, NULL, 0); | ||
516 | err = get_dnode_of_data(&dn, start, ALLOC_NODE); | ||
517 | if (err) | ||
518 | goto out; | ||
519 | |||
520 | allocated = false; | ||
521 | end_offset = ADDRS_PER_PAGE(dn.node_page, F2FS_I(inode)); | ||
522 | |||
523 | while (dn.ofs_in_node < end_offset && len) { | ||
524 | block_t blkaddr; | ||
525 | |||
526 | if (unlikely(f2fs_cp_error(sbi))) { | ||
527 | err = -EIO; | ||
528 | goto sync_out; | ||
529 | } | ||
530 | |||
531 | blkaddr = datablock_addr(dn.node_page, dn.ofs_in_node); | ||
532 | if (blkaddr == NULL_ADDR || blkaddr == NEW_ADDR) { | ||
533 | err = __allocate_data_block(&dn); | ||
534 | if (err) | ||
535 | goto sync_out; | ||
536 | allocated = true; | ||
537 | } | ||
538 | len--; | ||
539 | start++; | ||
540 | dn.ofs_in_node++; | ||
541 | } | ||
542 | 584 | ||
543 | if (allocated) | 585 | map.m_lblk = F2FS_BYTES_TO_BLK(iocb->ki_pos); |
544 | sync_inode_page(&dn); | 586 | map.m_len = F2FS_BLK_ALIGN(iov_iter_count(from)); |
587 | map.m_next_pgofs = NULL; | ||
545 | 588 | ||
546 | f2fs_put_dnode(&dn); | 589 | if (f2fs_encrypted_inode(inode)) |
547 | f2fs_unlock_op(sbi); | 590 | return 0; |
548 | 591 | ||
549 | f2fs_balance_fs(sbi, dn.node_changed); | 592 | if (iocb->ki_flags & IOCB_DIRECT) { |
593 | ret = f2fs_convert_inline_inode(inode); | ||
594 | if (ret) | ||
595 | return ret; | ||
596 | return f2fs_map_blocks(inode, &map, 1, F2FS_GET_BLOCK_PRE_DIO); | ||
550 | } | 597 | } |
551 | return err; | 598 | if (iocb->ki_pos + iov_iter_count(from) > MAX_INLINE_DATA) { |
552 | 599 | ret = f2fs_convert_inline_inode(inode); | |
553 | sync_out: | 600 | if (ret) |
554 | if (allocated) | 601 | return ret; |
555 | sync_inode_page(&dn); | 602 | } |
556 | f2fs_put_dnode(&dn); | 603 | if (!f2fs_has_inline_data(inode)) |
557 | out: | 604 | return f2fs_map_blocks(inode, &map, 1, F2FS_GET_BLOCK_PRE_AIO); |
558 | f2fs_unlock_op(sbi); | 605 | return ret; |
559 | f2fs_balance_fs(sbi, dn.node_changed); | ||
560 | return err; | ||
561 | } | 606 | } |
562 | 607 | ||
563 | /* | 608 | /* |
@@ -588,13 +633,14 @@ int f2fs_map_blocks(struct inode *inode, struct f2fs_map_blocks *map, | |||
588 | /* it only supports block size == page size */ | 633 | /* it only supports block size == page size */ |
589 | pgofs = (pgoff_t)map->m_lblk; | 634 | pgofs = (pgoff_t)map->m_lblk; |
590 | 635 | ||
591 | if (f2fs_lookup_extent_cache(inode, pgofs, &ei)) { | 636 | if (!create && f2fs_lookup_extent_cache(inode, pgofs, &ei)) { |
592 | map->m_pblk = ei.blk + pgofs - ei.fofs; | 637 | map->m_pblk = ei.blk + pgofs - ei.fofs; |
593 | map->m_len = min((pgoff_t)maxblocks, ei.fofs + ei.len - pgofs); | 638 | map->m_len = min((pgoff_t)maxblocks, ei.fofs + ei.len - pgofs); |
594 | map->m_flags = F2FS_MAP_MAPPED; | 639 | map->m_flags = F2FS_MAP_MAPPED; |
595 | goto out; | 640 | goto out; |
596 | } | 641 | } |
597 | 642 | ||
643 | next_dnode: | ||
598 | if (create) | 644 | if (create) |
599 | f2fs_lock_op(sbi); | 645 | f2fs_lock_op(sbi); |
600 | 646 | ||
@@ -602,120 +648,98 @@ int f2fs_map_blocks(struct inode *inode, struct f2fs_map_blocks *map, | |||
602 | set_new_dnode(&dn, inode, NULL, NULL, 0); | 648 | set_new_dnode(&dn, inode, NULL, NULL, 0); |
603 | err = get_dnode_of_data(&dn, pgofs, mode); | 649 | err = get_dnode_of_data(&dn, pgofs, mode); |
604 | if (err) { | 650 | if (err) { |
605 | if (err == -ENOENT) | 651 | if (err == -ENOENT) { |
606 | err = 0; | 652 | err = 0; |
653 | if (map->m_next_pgofs) | ||
654 | *map->m_next_pgofs = | ||
655 | get_next_page_offset(&dn, pgofs); | ||
656 | } | ||
607 | goto unlock_out; | 657 | goto unlock_out; |
608 | } | 658 | } |
609 | 659 | ||
610 | if (dn.data_blkaddr == NEW_ADDR || dn.data_blkaddr == NULL_ADDR) { | 660 | end_offset = ADDRS_PER_PAGE(dn.node_page, inode); |
661 | |||
662 | next_block: | ||
663 | blkaddr = datablock_addr(dn.node_page, dn.ofs_in_node); | ||
664 | |||
665 | if (blkaddr == NEW_ADDR || blkaddr == NULL_ADDR) { | ||
611 | if (create) { | 666 | if (create) { |
612 | if (unlikely(f2fs_cp_error(sbi))) { | 667 | if (unlikely(f2fs_cp_error(sbi))) { |
613 | err = -EIO; | 668 | err = -EIO; |
614 | goto put_out; | 669 | goto sync_out; |
670 | } | ||
671 | if (flag == F2FS_GET_BLOCK_PRE_AIO) { | ||
672 | if (blkaddr == NULL_ADDR) | ||
673 | err = reserve_new_block(&dn); | ||
674 | } else { | ||
675 | err = __allocate_data_block(&dn); | ||
615 | } | 676 | } |
616 | err = __allocate_data_block(&dn); | ||
617 | if (err) | 677 | if (err) |
618 | goto put_out; | 678 | goto sync_out; |
619 | allocated = true; | 679 | allocated = true; |
620 | map->m_flags = F2FS_MAP_NEW; | 680 | map->m_flags = F2FS_MAP_NEW; |
681 | blkaddr = dn.data_blkaddr; | ||
621 | } else { | 682 | } else { |
683 | if (flag == F2FS_GET_BLOCK_FIEMAP && | ||
684 | blkaddr == NULL_ADDR) { | ||
685 | if (map->m_next_pgofs) | ||
686 | *map->m_next_pgofs = pgofs + 1; | ||
687 | } | ||
622 | if (flag != F2FS_GET_BLOCK_FIEMAP || | 688 | if (flag != F2FS_GET_BLOCK_FIEMAP || |
623 | dn.data_blkaddr != NEW_ADDR) { | 689 | blkaddr != NEW_ADDR) { |
624 | if (flag == F2FS_GET_BLOCK_BMAP) | 690 | if (flag == F2FS_GET_BLOCK_BMAP) |
625 | err = -ENOENT; | 691 | err = -ENOENT; |
626 | goto put_out; | 692 | goto sync_out; |
627 | } | 693 | } |
628 | |||
629 | /* | ||
630 | * preallocated unwritten block should be mapped | ||
631 | * for fiemap. | ||
632 | */ | ||
633 | if (dn.data_blkaddr == NEW_ADDR) | ||
634 | map->m_flags = F2FS_MAP_UNWRITTEN; | ||
635 | } | 694 | } |
636 | } | 695 | } |
637 | 696 | ||
638 | map->m_flags |= F2FS_MAP_MAPPED; | 697 | if (map->m_len == 0) { |
639 | map->m_pblk = dn.data_blkaddr; | 698 | /* preallocated unwritten block should be mapped for fiemap. */ |
640 | map->m_len = 1; | 699 | if (blkaddr == NEW_ADDR) |
700 | map->m_flags |= F2FS_MAP_UNWRITTEN; | ||
701 | map->m_flags |= F2FS_MAP_MAPPED; | ||
702 | |||
703 | map->m_pblk = blkaddr; | ||
704 | map->m_len = 1; | ||
705 | } else if ((map->m_pblk != NEW_ADDR && | ||
706 | blkaddr == (map->m_pblk + ofs)) || | ||
707 | (map->m_pblk == NEW_ADDR && blkaddr == NEW_ADDR) || | ||
708 | flag == F2FS_GET_BLOCK_PRE_DIO || | ||
709 | flag == F2FS_GET_BLOCK_PRE_AIO) { | ||
710 | ofs++; | ||
711 | map->m_len++; | ||
712 | } else { | ||
713 | goto sync_out; | ||
714 | } | ||
641 | 715 | ||
642 | end_offset = ADDRS_PER_PAGE(dn.node_page, F2FS_I(inode)); | ||
643 | dn.ofs_in_node++; | 716 | dn.ofs_in_node++; |
644 | pgofs++; | 717 | pgofs++; |
645 | 718 | ||
646 | get_next: | 719 | if (map->m_len < maxblocks) { |
647 | if (map->m_len >= maxblocks) | 720 | if (dn.ofs_in_node < end_offset) |
648 | goto sync_out; | 721 | goto next_block; |
649 | 722 | ||
650 | if (dn.ofs_in_node >= end_offset) { | ||
651 | if (allocated) | 723 | if (allocated) |
652 | sync_inode_page(&dn); | 724 | sync_inode_page(&dn); |
653 | allocated = false; | ||
654 | f2fs_put_dnode(&dn); | 725 | f2fs_put_dnode(&dn); |
655 | 726 | ||
656 | if (create) { | 727 | if (create) { |
657 | f2fs_unlock_op(sbi); | 728 | f2fs_unlock_op(sbi); |
658 | f2fs_balance_fs(sbi, dn.node_changed); | 729 | f2fs_balance_fs(sbi, allocated); |
659 | f2fs_lock_op(sbi); | ||
660 | } | ||
661 | |||
662 | set_new_dnode(&dn, inode, NULL, NULL, 0); | ||
663 | err = get_dnode_of_data(&dn, pgofs, mode); | ||
664 | if (err) { | ||
665 | if (err == -ENOENT) | ||
666 | err = 0; | ||
667 | goto unlock_out; | ||
668 | } | ||
669 | |||
670 | end_offset = ADDRS_PER_PAGE(dn.node_page, F2FS_I(inode)); | ||
671 | } | ||
672 | |||
673 | blkaddr = datablock_addr(dn.node_page, dn.ofs_in_node); | ||
674 | |||
675 | if (blkaddr == NEW_ADDR || blkaddr == NULL_ADDR) { | ||
676 | if (create) { | ||
677 | if (unlikely(f2fs_cp_error(sbi))) { | ||
678 | err = -EIO; | ||
679 | goto sync_out; | ||
680 | } | ||
681 | err = __allocate_data_block(&dn); | ||
682 | if (err) | ||
683 | goto sync_out; | ||
684 | allocated = true; | ||
685 | map->m_flags |= F2FS_MAP_NEW; | ||
686 | blkaddr = dn.data_blkaddr; | ||
687 | } else { | ||
688 | /* | ||
689 | * we only merge preallocated unwritten blocks | ||
690 | * for fiemap. | ||
691 | */ | ||
692 | if (flag != F2FS_GET_BLOCK_FIEMAP || | ||
693 | blkaddr != NEW_ADDR) | ||
694 | goto sync_out; | ||
695 | } | 730 | } |
696 | } | 731 | allocated = false; |
697 | 732 | goto next_dnode; | |
698 | /* Give more consecutive addresses for the readahead */ | ||
699 | if ((map->m_pblk != NEW_ADDR && | ||
700 | blkaddr == (map->m_pblk + ofs)) || | ||
701 | (map->m_pblk == NEW_ADDR && | ||
702 | blkaddr == NEW_ADDR)) { | ||
703 | ofs++; | ||
704 | dn.ofs_in_node++; | ||
705 | pgofs++; | ||
706 | map->m_len++; | ||
707 | goto get_next; | ||
708 | } | 733 | } |
709 | 734 | ||
710 | sync_out: | 735 | sync_out: |
711 | if (allocated) | 736 | if (allocated) |
712 | sync_inode_page(&dn); | 737 | sync_inode_page(&dn); |
713 | put_out: | ||
714 | f2fs_put_dnode(&dn); | 738 | f2fs_put_dnode(&dn); |
715 | unlock_out: | 739 | unlock_out: |
716 | if (create) { | 740 | if (create) { |
717 | f2fs_unlock_op(sbi); | 741 | f2fs_unlock_op(sbi); |
718 | f2fs_balance_fs(sbi, dn.node_changed); | 742 | f2fs_balance_fs(sbi, allocated); |
719 | } | 743 | } |
720 | out: | 744 | out: |
721 | trace_f2fs_map_blocks(inode, map, err); | 745 | trace_f2fs_map_blocks(inode, map, err); |
@@ -723,13 +747,15 @@ out: | |||
723 | } | 747 | } |
724 | 748 | ||
725 | static int __get_data_block(struct inode *inode, sector_t iblock, | 749 | static int __get_data_block(struct inode *inode, sector_t iblock, |
726 | struct buffer_head *bh, int create, int flag) | 750 | struct buffer_head *bh, int create, int flag, |
751 | pgoff_t *next_pgofs) | ||
727 | { | 752 | { |
728 | struct f2fs_map_blocks map; | 753 | struct f2fs_map_blocks map; |
729 | int ret; | 754 | int ret; |
730 | 755 | ||
731 | map.m_lblk = iblock; | 756 | map.m_lblk = iblock; |
732 | map.m_len = bh->b_size >> inode->i_blkbits; | 757 | map.m_len = bh->b_size >> inode->i_blkbits; |
758 | map.m_next_pgofs = next_pgofs; | ||
733 | 759 | ||
734 | ret = f2fs_map_blocks(inode, &map, create, flag); | 760 | ret = f2fs_map_blocks(inode, &map, create, flag); |
735 | if (!ret) { | 761 | if (!ret) { |
@@ -741,16 +767,18 @@ static int __get_data_block(struct inode *inode, sector_t iblock, | |||
741 | } | 767 | } |
742 | 768 | ||
743 | static int get_data_block(struct inode *inode, sector_t iblock, | 769 | static int get_data_block(struct inode *inode, sector_t iblock, |
744 | struct buffer_head *bh_result, int create, int flag) | 770 | struct buffer_head *bh_result, int create, int flag, |
771 | pgoff_t *next_pgofs) | ||
745 | { | 772 | { |
746 | return __get_data_block(inode, iblock, bh_result, create, flag); | 773 | return __get_data_block(inode, iblock, bh_result, create, |
774 | flag, next_pgofs); | ||
747 | } | 775 | } |
748 | 776 | ||
749 | static int get_data_block_dio(struct inode *inode, sector_t iblock, | 777 | static int get_data_block_dio(struct inode *inode, sector_t iblock, |
750 | struct buffer_head *bh_result, int create) | 778 | struct buffer_head *bh_result, int create) |
751 | { | 779 | { |
752 | return __get_data_block(inode, iblock, bh_result, create, | 780 | return __get_data_block(inode, iblock, bh_result, create, |
753 | F2FS_GET_BLOCK_DIO); | 781 | F2FS_GET_BLOCK_DIO, NULL); |
754 | } | 782 | } |
755 | 783 | ||
756 | static int get_data_block_bmap(struct inode *inode, sector_t iblock, | 784 | static int get_data_block_bmap(struct inode *inode, sector_t iblock, |
@@ -761,7 +789,7 @@ static int get_data_block_bmap(struct inode *inode, sector_t iblock, | |||
761 | return -EFBIG; | 789 | return -EFBIG; |
762 | 790 | ||
763 | return __get_data_block(inode, iblock, bh_result, create, | 791 | return __get_data_block(inode, iblock, bh_result, create, |
764 | F2FS_GET_BLOCK_BMAP); | 792 | F2FS_GET_BLOCK_BMAP, NULL); |
765 | } | 793 | } |
766 | 794 | ||
767 | static inline sector_t logical_to_blk(struct inode *inode, loff_t offset) | 795 | static inline sector_t logical_to_blk(struct inode *inode, loff_t offset) |
@@ -779,6 +807,7 @@ int f2fs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, | |||
779 | { | 807 | { |
780 | struct buffer_head map_bh; | 808 | struct buffer_head map_bh; |
781 | sector_t start_blk, last_blk; | 809 | sector_t start_blk, last_blk; |
810 | pgoff_t next_pgofs; | ||
782 | loff_t isize; | 811 | loff_t isize; |
783 | u64 logical = 0, phys = 0, size = 0; | 812 | u64 logical = 0, phys = 0, size = 0; |
784 | u32 flags = 0; | 813 | u32 flags = 0; |
@@ -814,14 +843,15 @@ next: | |||
814 | map_bh.b_size = len; | 843 | map_bh.b_size = len; |
815 | 844 | ||
816 | ret = get_data_block(inode, start_blk, &map_bh, 0, | 845 | ret = get_data_block(inode, start_blk, &map_bh, 0, |
817 | F2FS_GET_BLOCK_FIEMAP); | 846 | F2FS_GET_BLOCK_FIEMAP, &next_pgofs); |
818 | if (ret) | 847 | if (ret) |
819 | goto out; | 848 | goto out; |
820 | 849 | ||
821 | /* HOLE */ | 850 | /* HOLE */ |
822 | if (!buffer_mapped(&map_bh)) { | 851 | if (!buffer_mapped(&map_bh)) { |
852 | start_blk = next_pgofs; | ||
823 | /* Go through holes util pass the EOF */ | 853 | /* Go through holes util pass the EOF */ |
824 | if (blk_to_logical(inode, start_blk++) < isize) | 854 | if (blk_to_logical(inode, start_blk) < isize) |
825 | goto prep_next; | 855 | goto prep_next; |
826 | /* Found a hole beyond isize means no more extents. | 856 | /* Found a hole beyond isize means no more extents. |
827 | * Note that the premise is that filesystems don't | 857 | * Note that the premise is that filesystems don't |
@@ -889,6 +919,7 @@ static int f2fs_mpage_readpages(struct address_space *mapping, | |||
889 | map.m_lblk = 0; | 919 | map.m_lblk = 0; |
890 | map.m_len = 0; | 920 | map.m_len = 0; |
891 | map.m_flags = 0; | 921 | map.m_flags = 0; |
922 | map.m_next_pgofs = NULL; | ||
892 | 923 | ||
893 | for (page_idx = 0; nr_pages; page_idx++, nr_pages--) { | 924 | for (page_idx = 0; nr_pages; page_idx++, nr_pages--) { |
894 | 925 | ||
@@ -927,7 +958,7 @@ static int f2fs_mpage_readpages(struct address_space *mapping, | |||
927 | map.m_len = last_block - block_in_file; | 958 | map.m_len = last_block - block_in_file; |
928 | 959 | ||
929 | if (f2fs_map_blocks(inode, &map, 0, | 960 | if (f2fs_map_blocks(inode, &map, 0, |
930 | F2FS_GET_BLOCK_READ)) | 961 | F2FS_GET_BLOCK_READ)) |
931 | goto set_error_page; | 962 | goto set_error_page; |
932 | } | 963 | } |
933 | got_it: | 964 | got_it: |
@@ -956,12 +987,12 @@ submit_and_realloc: | |||
956 | bio = NULL; | 987 | bio = NULL; |
957 | } | 988 | } |
958 | if (bio == NULL) { | 989 | if (bio == NULL) { |
959 | struct f2fs_crypto_ctx *ctx = NULL; | 990 | struct fscrypt_ctx *ctx = NULL; |
960 | 991 | ||
961 | if (f2fs_encrypted_inode(inode) && | 992 | if (f2fs_encrypted_inode(inode) && |
962 | S_ISREG(inode->i_mode)) { | 993 | S_ISREG(inode->i_mode)) { |
963 | 994 | ||
964 | ctx = f2fs_get_crypto_ctx(inode); | 995 | ctx = fscrypt_get_ctx(inode); |
965 | if (IS_ERR(ctx)) | 996 | if (IS_ERR(ctx)) |
966 | goto set_error_page; | 997 | goto set_error_page; |
967 | 998 | ||
@@ -974,7 +1005,7 @@ submit_and_realloc: | |||
974 | min_t(int, nr_pages, BIO_MAX_PAGES)); | 1005 | min_t(int, nr_pages, BIO_MAX_PAGES)); |
975 | if (!bio) { | 1006 | if (!bio) { |
976 | if (ctx) | 1007 | if (ctx) |
977 | f2fs_release_crypto_ctx(ctx); | 1008 | fscrypt_release_ctx(ctx); |
978 | goto set_error_page; | 1009 | goto set_error_page; |
979 | } | 1010 | } |
980 | bio->bi_bdev = bdev; | 1011 | bio->bi_bdev = bdev; |
@@ -1052,10 +1083,10 @@ int do_write_data_page(struct f2fs_io_info *fio) | |||
1052 | if (err) | 1083 | if (err) |
1053 | return err; | 1084 | return err; |
1054 | 1085 | ||
1055 | fio->blk_addr = dn.data_blkaddr; | 1086 | fio->old_blkaddr = dn.data_blkaddr; |
1056 | 1087 | ||
1057 | /* This page is already truncated */ | 1088 | /* This page is already truncated */ |
1058 | if (fio->blk_addr == NULL_ADDR) { | 1089 | if (fio->old_blkaddr == NULL_ADDR) { |
1059 | ClearPageUptodate(page); | 1090 | ClearPageUptodate(page); |
1060 | goto out_writepage; | 1091 | goto out_writepage; |
1061 | } | 1092 | } |
@@ -1064,9 +1095,9 @@ int do_write_data_page(struct f2fs_io_info *fio) | |||
1064 | 1095 | ||
1065 | /* wait for GCed encrypted page writeback */ | 1096 | /* wait for GCed encrypted page writeback */ |
1066 | f2fs_wait_on_encrypted_page_writeback(F2FS_I_SB(inode), | 1097 | f2fs_wait_on_encrypted_page_writeback(F2FS_I_SB(inode), |
1067 | fio->blk_addr); | 1098 | fio->old_blkaddr); |
1068 | 1099 | ||
1069 | fio->encrypted_page = f2fs_encrypt(inode, fio->page); | 1100 | fio->encrypted_page = fscrypt_encrypt_page(inode, fio->page); |
1070 | if (IS_ERR(fio->encrypted_page)) { | 1101 | if (IS_ERR(fio->encrypted_page)) { |
1071 | err = PTR_ERR(fio->encrypted_page); | 1102 | err = PTR_ERR(fio->encrypted_page); |
1072 | goto out_writepage; | 1103 | goto out_writepage; |
@@ -1079,7 +1110,7 @@ int do_write_data_page(struct f2fs_io_info *fio) | |||
1079 | * If current allocation needs SSR, | 1110 | * If current allocation needs SSR, |
1080 | * it had better in-place writes for updated data. | 1111 | * it had better in-place writes for updated data. |
1081 | */ | 1112 | */ |
1082 | if (unlikely(fio->blk_addr != NEW_ADDR && | 1113 | if (unlikely(fio->old_blkaddr != NEW_ADDR && |
1083 | !is_cold_data(page) && | 1114 | !is_cold_data(page) && |
1084 | !IS_ATOMIC_WRITTEN_PAGE(page) && | 1115 | !IS_ATOMIC_WRITTEN_PAGE(page) && |
1085 | need_inplace_update(inode))) { | 1116 | need_inplace_update(inode))) { |
@@ -1088,8 +1119,6 @@ int do_write_data_page(struct f2fs_io_info *fio) | |||
1088 | trace_f2fs_do_write_data_page(page, IPU); | 1119 | trace_f2fs_do_write_data_page(page, IPU); |
1089 | } else { | 1120 | } else { |
1090 | write_data_page(&dn, fio); | 1121 | write_data_page(&dn, fio); |
1091 | set_data_blkaddr(&dn); | ||
1092 | f2fs_update_extent_cache(&dn); | ||
1093 | trace_f2fs_do_write_data_page(page, OPU); | 1122 | trace_f2fs_do_write_data_page(page, OPU); |
1094 | set_inode_flag(F2FS_I(inode), FI_APPEND_WRITE); | 1123 | set_inode_flag(F2FS_I(inode), FI_APPEND_WRITE); |
1095 | if (page->index == 0) | 1124 | if (page->index == 0) |
@@ -1177,12 +1206,18 @@ out: | |||
1177 | inode_dec_dirty_pages(inode); | 1206 | inode_dec_dirty_pages(inode); |
1178 | if (err) | 1207 | if (err) |
1179 | ClearPageUptodate(page); | 1208 | ClearPageUptodate(page); |
1209 | |||
1210 | if (wbc->for_reclaim) { | ||
1211 | f2fs_submit_merged_bio_cond(sbi, NULL, page, 0, DATA, WRITE); | ||
1212 | remove_dirty_inode(inode); | ||
1213 | } | ||
1214 | |||
1180 | unlock_page(page); | 1215 | unlock_page(page); |
1181 | f2fs_balance_fs(sbi, need_balance_fs); | 1216 | f2fs_balance_fs(sbi, need_balance_fs); |
1182 | if (wbc->for_reclaim || unlikely(f2fs_cp_error(sbi))) { | 1217 | |
1218 | if (unlikely(f2fs_cp_error(sbi))) | ||
1183 | f2fs_submit_merged_bio(sbi, DATA, WRITE); | 1219 | f2fs_submit_merged_bio(sbi, DATA, WRITE); |
1184 | remove_dirty_inode(inode); | 1220 | |
1185 | } | ||
1186 | return 0; | 1221 | return 0; |
1187 | 1222 | ||
1188 | redirty_out: | 1223 | redirty_out: |
@@ -1282,7 +1317,8 @@ continue_unlock: | |||
1282 | 1317 | ||
1283 | if (PageWriteback(page)) { | 1318 | if (PageWriteback(page)) { |
1284 | if (wbc->sync_mode != WB_SYNC_NONE) | 1319 | if (wbc->sync_mode != WB_SYNC_NONE) |
1285 | f2fs_wait_on_page_writeback(page, DATA); | 1320 | f2fs_wait_on_page_writeback(page, |
1321 | DATA, true); | ||
1286 | else | 1322 | else |
1287 | goto continue_unlock; | 1323 | goto continue_unlock; |
1288 | } | 1324 | } |
@@ -1339,8 +1375,6 @@ static int f2fs_write_data_pages(struct address_space *mapping, | |||
1339 | int ret; | 1375 | int ret; |
1340 | long diff; | 1376 | long diff; |
1341 | 1377 | ||
1342 | trace_f2fs_writepages(mapping->host, wbc, DATA); | ||
1343 | |||
1344 | /* deal with chardevs and other special file */ | 1378 | /* deal with chardevs and other special file */ |
1345 | if (!mapping->a_ops->writepage) | 1379 | if (!mapping->a_ops->writepage) |
1346 | return 0; | 1380 | return 0; |
@@ -1362,14 +1396,16 @@ static int f2fs_write_data_pages(struct address_space *mapping, | |||
1362 | if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING))) | 1396 | if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING))) |
1363 | goto skip_write; | 1397 | goto skip_write; |
1364 | 1398 | ||
1399 | trace_f2fs_writepages(mapping->host, wbc, DATA); | ||
1400 | |||
1365 | diff = nr_pages_to_write(sbi, DATA, wbc); | 1401 | diff = nr_pages_to_write(sbi, DATA, wbc); |
1366 | 1402 | ||
1367 | if (!S_ISDIR(inode->i_mode)) { | 1403 | if (!S_ISDIR(inode->i_mode) && wbc->sync_mode == WB_SYNC_ALL) { |
1368 | mutex_lock(&sbi->writepages); | 1404 | mutex_lock(&sbi->writepages); |
1369 | locked = true; | 1405 | locked = true; |
1370 | } | 1406 | } |
1371 | ret = f2fs_write_cache_pages(mapping, wbc, __f2fs_writepage, mapping); | 1407 | ret = f2fs_write_cache_pages(mapping, wbc, __f2fs_writepage, mapping); |
1372 | f2fs_submit_merged_bio(sbi, DATA, WRITE); | 1408 | f2fs_submit_merged_bio_cond(sbi, inode, NULL, 0, DATA, WRITE); |
1373 | if (locked) | 1409 | if (locked) |
1374 | mutex_unlock(&sbi->writepages); | 1410 | mutex_unlock(&sbi->writepages); |
1375 | 1411 | ||
@@ -1380,6 +1416,7 @@ static int f2fs_write_data_pages(struct address_space *mapping, | |||
1380 | 1416 | ||
1381 | skip_write: | 1417 | skip_write: |
1382 | wbc->pages_skipped += get_dirty_pages(inode); | 1418 | wbc->pages_skipped += get_dirty_pages(inode); |
1419 | trace_f2fs_writepages(mapping->host, wbc, DATA); | ||
1383 | return 0; | 1420 | return 0; |
1384 | } | 1421 | } |
1385 | 1422 | ||
@@ -1406,6 +1443,14 @@ static int prepare_write_begin(struct f2fs_sb_info *sbi, | |||
1406 | struct extent_info ei; | 1443 | struct extent_info ei; |
1407 | int err = 0; | 1444 | int err = 0; |
1408 | 1445 | ||
1446 | /* | ||
1447 | * we already allocated all the blocks, so we don't need to get | ||
1448 | * the block addresses when there is no need to fill the page. | ||
1449 | */ | ||
1450 | if (!f2fs_has_inline_data(inode) && !f2fs_encrypted_inode(inode) && | ||
1451 | len == PAGE_CACHE_SIZE) | ||
1452 | return 0; | ||
1453 | |||
1409 | if (f2fs_has_inline_data(inode) || | 1454 | if (f2fs_has_inline_data(inode) || |
1410 | (pos & PAGE_CACHE_MASK) >= i_size_read(inode)) { | 1455 | (pos & PAGE_CACHE_MASK) >= i_size_read(inode)) { |
1411 | f2fs_lock_op(sbi); | 1456 | f2fs_lock_op(sbi); |
@@ -1425,7 +1470,7 @@ restart: | |||
1425 | if (pos + len <= MAX_INLINE_DATA) { | 1470 | if (pos + len <= MAX_INLINE_DATA) { |
1426 | read_inline_data(page, ipage); | 1471 | read_inline_data(page, ipage); |
1427 | set_inode_flag(F2FS_I(inode), FI_DATA_EXIST); | 1472 | set_inode_flag(F2FS_I(inode), FI_DATA_EXIST); |
1428 | sync_inode_page(&dn); | 1473 | set_inline_node(ipage); |
1429 | } else { | 1474 | } else { |
1430 | err = f2fs_convert_inline_page(&dn, page); | 1475 | err = f2fs_convert_inline_page(&dn, page); |
1431 | if (err) | 1476 | if (err) |
@@ -1439,13 +1484,9 @@ restart: | |||
1439 | if (f2fs_lookup_extent_cache(inode, index, &ei)) { | 1484 | if (f2fs_lookup_extent_cache(inode, index, &ei)) { |
1440 | dn.data_blkaddr = ei.blk + index - ei.fofs; | 1485 | dn.data_blkaddr = ei.blk + index - ei.fofs; |
1441 | } else { | 1486 | } else { |
1442 | bool restart = false; | ||
1443 | |||
1444 | /* hole case */ | 1487 | /* hole case */ |
1445 | err = get_dnode_of_data(&dn, index, LOOKUP_NODE); | 1488 | err = get_dnode_of_data(&dn, index, LOOKUP_NODE); |
1446 | if (err || (!err && dn.data_blkaddr == NULL_ADDR)) | 1489 | if (err || (!err && dn.data_blkaddr == NULL_ADDR)) { |
1447 | restart = true; | ||
1448 | if (restart) { | ||
1449 | f2fs_put_dnode(&dn); | 1490 | f2fs_put_dnode(&dn); |
1450 | f2fs_lock_op(sbi); | 1491 | f2fs_lock_op(sbi); |
1451 | locked = true; | 1492 | locked = true; |
@@ -1514,7 +1555,7 @@ repeat: | |||
1514 | } | 1555 | } |
1515 | } | 1556 | } |
1516 | 1557 | ||
1517 | f2fs_wait_on_page_writeback(page, DATA); | 1558 | f2fs_wait_on_page_writeback(page, DATA, false); |
1518 | 1559 | ||
1519 | /* wait for GCed encrypted page writeback */ | 1560 | /* wait for GCed encrypted page writeback */ |
1520 | if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode)) | 1561 | if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode)) |
@@ -1541,7 +1582,8 @@ repeat: | |||
1541 | .sbi = sbi, | 1582 | .sbi = sbi, |
1542 | .type = DATA, | 1583 | .type = DATA, |
1543 | .rw = READ_SYNC, | 1584 | .rw = READ_SYNC, |
1544 | .blk_addr = blkaddr, | 1585 | .old_blkaddr = blkaddr, |
1586 | .new_blkaddr = blkaddr, | ||
1545 | .page = page, | 1587 | .page = page, |
1546 | .encrypted_page = NULL, | 1588 | .encrypted_page = NULL, |
1547 | }; | 1589 | }; |
@@ -1561,7 +1603,7 @@ repeat: | |||
1561 | 1603 | ||
1562 | /* avoid symlink page */ | 1604 | /* avoid symlink page */ |
1563 | if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode)) { | 1605 | if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode)) { |
1564 | err = f2fs_decrypt_one(inode, page); | 1606 | err = fscrypt_decrypt_page(page); |
1565 | if (err) | 1607 | if (err) |
1566 | goto fail; | 1608 | goto fail; |
1567 | } | 1609 | } |
@@ -1592,7 +1634,6 @@ static int f2fs_write_end(struct file *file, | |||
1592 | if (pos + copied > i_size_read(inode)) { | 1634 | if (pos + copied > i_size_read(inode)) { |
1593 | i_size_write(inode, pos + copied); | 1635 | i_size_write(inode, pos + copied); |
1594 | mark_inode_dirty(inode); | 1636 | mark_inode_dirty(inode); |
1595 | update_inode_page(inode); | ||
1596 | } | 1637 | } |
1597 | 1638 | ||
1598 | f2fs_put_page(page, 1); | 1639 | f2fs_put_page(page, 1); |
@@ -1617,34 +1658,21 @@ static int check_direct_IO(struct inode *inode, struct iov_iter *iter, | |||
1617 | static ssize_t f2fs_direct_IO(struct kiocb *iocb, struct iov_iter *iter, | 1658 | static ssize_t f2fs_direct_IO(struct kiocb *iocb, struct iov_iter *iter, |
1618 | loff_t offset) | 1659 | loff_t offset) |
1619 | { | 1660 | { |
1620 | struct file *file = iocb->ki_filp; | 1661 | struct address_space *mapping = iocb->ki_filp->f_mapping; |
1621 | struct address_space *mapping = file->f_mapping; | ||
1622 | struct inode *inode = mapping->host; | 1662 | struct inode *inode = mapping->host; |
1623 | size_t count = iov_iter_count(iter); | 1663 | size_t count = iov_iter_count(iter); |
1624 | int err; | 1664 | int err; |
1625 | 1665 | ||
1626 | /* we don't need to use inline_data strictly */ | 1666 | err = check_direct_IO(inode, iter, offset); |
1627 | err = f2fs_convert_inline_inode(inode); | ||
1628 | if (err) | 1667 | if (err) |
1629 | return err; | 1668 | return err; |
1630 | 1669 | ||
1631 | if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode)) | 1670 | if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode)) |
1632 | return 0; | 1671 | return 0; |
1633 | 1672 | ||
1634 | err = check_direct_IO(inode, iter, offset); | ||
1635 | if (err) | ||
1636 | return err; | ||
1637 | |||
1638 | trace_f2fs_direct_IO_enter(inode, offset, count, iov_iter_rw(iter)); | 1673 | trace_f2fs_direct_IO_enter(inode, offset, count, iov_iter_rw(iter)); |
1639 | 1674 | ||
1640 | if (iov_iter_rw(iter) == WRITE) { | ||
1641 | err = __allocate_data_blocks(inode, offset, count); | ||
1642 | if (err) | ||
1643 | goto out; | ||
1644 | } | ||
1645 | |||
1646 | err = blockdev_direct_IO(iocb, inode, iter, offset, get_data_block_dio); | 1675 | err = blockdev_direct_IO(iocb, inode, iter, offset, get_data_block_dio); |
1647 | out: | ||
1648 | if (err < 0 && iov_iter_rw(iter) == WRITE) | 1676 | if (err < 0 && iov_iter_rw(iter) == WRITE) |
1649 | f2fs_write_failed(mapping, offset + count); | 1677 | f2fs_write_failed(mapping, offset + count); |
1650 | 1678 | ||