diff options
Diffstat (limited to 'fs/ntfs/aops.c')
| -rw-r--r-- | fs/ntfs/aops.c | 293 |
1 files changed, 169 insertions, 124 deletions
diff --git a/fs/ntfs/aops.c b/fs/ntfs/aops.c index 78adad7a988d..545236414d59 100644 --- a/fs/ntfs/aops.c +++ b/fs/ntfs/aops.c | |||
| @@ -55,9 +55,8 @@ | |||
| 55 | */ | 55 | */ |
| 56 | static void ntfs_end_buffer_async_read(struct buffer_head *bh, int uptodate) | 56 | static void ntfs_end_buffer_async_read(struct buffer_head *bh, int uptodate) |
| 57 | { | 57 | { |
| 58 | static DEFINE_SPINLOCK(page_uptodate_lock); | ||
| 59 | unsigned long flags; | 58 | unsigned long flags; |
| 60 | struct buffer_head *tmp; | 59 | struct buffer_head *first, *tmp; |
| 61 | struct page *page; | 60 | struct page *page; |
| 62 | ntfs_inode *ni; | 61 | ntfs_inode *ni; |
| 63 | int page_uptodate = 1; | 62 | int page_uptodate = 1; |
| @@ -89,11 +88,13 @@ static void ntfs_end_buffer_async_read(struct buffer_head *bh, int uptodate) | |||
| 89 | } | 88 | } |
| 90 | } else { | 89 | } else { |
| 91 | clear_buffer_uptodate(bh); | 90 | clear_buffer_uptodate(bh); |
| 91 | SetPageError(page); | ||
| 92 | ntfs_error(ni->vol->sb, "Buffer I/O error, logical block %llu.", | 92 | ntfs_error(ni->vol->sb, "Buffer I/O error, logical block %llu.", |
| 93 | (unsigned long long)bh->b_blocknr); | 93 | (unsigned long long)bh->b_blocknr); |
| 94 | SetPageError(page); | ||
| 95 | } | 94 | } |
| 96 | spin_lock_irqsave(&page_uptodate_lock, flags); | 95 | first = page_buffers(page); |
| 96 | local_irq_save(flags); | ||
| 97 | bit_spin_lock(BH_Uptodate_Lock, &first->b_state); | ||
| 97 | clear_buffer_async_read(bh); | 98 | clear_buffer_async_read(bh); |
| 98 | unlock_buffer(bh); | 99 | unlock_buffer(bh); |
| 99 | tmp = bh; | 100 | tmp = bh; |
| @@ -108,7 +109,8 @@ static void ntfs_end_buffer_async_read(struct buffer_head *bh, int uptodate) | |||
| 108 | } | 109 | } |
| 109 | tmp = tmp->b_this_page; | 110 | tmp = tmp->b_this_page; |
| 110 | } while (tmp != bh); | 111 | } while (tmp != bh); |
| 111 | spin_unlock_irqrestore(&page_uptodate_lock, flags); | 112 | bit_spin_unlock(BH_Uptodate_Lock, &first->b_state); |
| 113 | local_irq_restore(flags); | ||
| 112 | /* | 114 | /* |
| 113 | * If none of the buffers had errors then we can set the page uptodate, | 115 | * If none of the buffers had errors then we can set the page uptodate, |
| 114 | * but we first have to perform the post read mst fixups, if the | 116 | * but we first have to perform the post read mst fixups, if the |
| @@ -141,7 +143,8 @@ static void ntfs_end_buffer_async_read(struct buffer_head *bh, int uptodate) | |||
| 141 | unlock_page(page); | 143 | unlock_page(page); |
| 142 | return; | 144 | return; |
| 143 | still_busy: | 145 | still_busy: |
| 144 | spin_unlock_irqrestore(&page_uptodate_lock, flags); | 146 | bit_spin_unlock(BH_Uptodate_Lock, &first->b_state); |
| 147 | local_irq_restore(flags); | ||
| 145 | return; | 148 | return; |
| 146 | } | 149 | } |
| 147 | 150 | ||
| @@ -185,13 +188,15 @@ static int ntfs_read_block(struct page *page) | |||
| 185 | blocksize_bits = VFS_I(ni)->i_blkbits; | 188 | blocksize_bits = VFS_I(ni)->i_blkbits; |
| 186 | blocksize = 1 << blocksize_bits; | 189 | blocksize = 1 << blocksize_bits; |
| 187 | 190 | ||
| 188 | if (!page_has_buffers(page)) | 191 | if (!page_has_buffers(page)) { |
| 189 | create_empty_buffers(page, blocksize, 0); | 192 | create_empty_buffers(page, blocksize, 0); |
| 190 | bh = head = page_buffers(page); | 193 | if (unlikely(!page_has_buffers(page))) { |
| 191 | if (unlikely(!bh)) { | 194 | unlock_page(page); |
| 192 | unlock_page(page); | 195 | return -ENOMEM; |
| 193 | return -ENOMEM; | 196 | } |
| 194 | } | 197 | } |
| 198 | bh = head = page_buffers(page); | ||
| 199 | BUG_ON(!bh); | ||
| 195 | 200 | ||
| 196 | iblock = (s64)page->index << (PAGE_CACHE_SHIFT - blocksize_bits); | 201 | iblock = (s64)page->index << (PAGE_CACHE_SHIFT - blocksize_bits); |
| 197 | read_lock_irqsave(&ni->size_lock, flags); | 202 | read_lock_irqsave(&ni->size_lock, flags); |
| @@ -204,6 +209,7 @@ static int ntfs_read_block(struct page *page) | |||
| 204 | nr = i = 0; | 209 | nr = i = 0; |
| 205 | do { | 210 | do { |
| 206 | u8 *kaddr; | 211 | u8 *kaddr; |
| 212 | int err; | ||
| 207 | 213 | ||
| 208 | if (unlikely(buffer_uptodate(bh))) | 214 | if (unlikely(buffer_uptodate(bh))) |
| 209 | continue; | 215 | continue; |
| @@ -211,6 +217,7 @@ static int ntfs_read_block(struct page *page) | |||
| 211 | arr[nr++] = bh; | 217 | arr[nr++] = bh; |
| 212 | continue; | 218 | continue; |
| 213 | } | 219 | } |
| 220 | err = 0; | ||
| 214 | bh->b_bdev = vol->sb->s_bdev; | 221 | bh->b_bdev = vol->sb->s_bdev; |
| 215 | /* Is the block within the allowed limits? */ | 222 | /* Is the block within the allowed limits? */ |
| 216 | if (iblock < lblock) { | 223 | if (iblock < lblock) { |
| @@ -252,7 +259,6 @@ lock_retry_remap: | |||
| 252 | goto handle_hole; | 259 | goto handle_hole; |
| 253 | /* If first try and runlist unmapped, map and retry. */ | 260 | /* If first try and runlist unmapped, map and retry. */ |
| 254 | if (!is_retry && lcn == LCN_RL_NOT_MAPPED) { | 261 | if (!is_retry && lcn == LCN_RL_NOT_MAPPED) { |
| 255 | int err; | ||
| 256 | is_retry = TRUE; | 262 | is_retry = TRUE; |
| 257 | /* | 263 | /* |
| 258 | * Attempt to map runlist, dropping lock for | 264 | * Attempt to map runlist, dropping lock for |
| @@ -263,20 +269,30 @@ lock_retry_remap: | |||
| 263 | if (likely(!err)) | 269 | if (likely(!err)) |
| 264 | goto lock_retry_remap; | 270 | goto lock_retry_remap; |
| 265 | rl = NULL; | 271 | rl = NULL; |
| 266 | lcn = err; | ||
| 267 | } else if (!rl) | 272 | } else if (!rl) |
| 268 | up_read(&ni->runlist.lock); | 273 | up_read(&ni->runlist.lock); |
| 274 | /* | ||
| 275 | * If buffer is outside the runlist, treat it as a | ||
| 276 | * hole. This can happen due to concurrent truncate | ||
| 277 | * for example. | ||
| 278 | */ | ||
| 279 | if (err == -ENOENT || lcn == LCN_ENOENT) { | ||
| 280 | err = 0; | ||
| 281 | goto handle_hole; | ||
| 282 | } | ||
| 269 | /* Hard error, zero out region. */ | 283 | /* Hard error, zero out region. */ |
| 284 | if (!err) | ||
| 285 | err = -EIO; | ||
| 270 | bh->b_blocknr = -1; | 286 | bh->b_blocknr = -1; |
| 271 | SetPageError(page); | 287 | SetPageError(page); |
| 272 | ntfs_error(vol->sb, "Failed to read from inode 0x%lx, " | 288 | ntfs_error(vol->sb, "Failed to read from inode 0x%lx, " |
| 273 | "attribute type 0x%x, vcn 0x%llx, " | 289 | "attribute type 0x%x, vcn 0x%llx, " |
| 274 | "offset 0x%x because its location on " | 290 | "offset 0x%x because its location on " |
| 275 | "disk could not be determined%s " | 291 | "disk could not be determined%s " |
| 276 | "(error code %lli).", ni->mft_no, | 292 | "(error code %i).", ni->mft_no, |
| 277 | ni->type, (unsigned long long)vcn, | 293 | ni->type, (unsigned long long)vcn, |
| 278 | vcn_ofs, is_retry ? " even after " | 294 | vcn_ofs, is_retry ? " even after " |
| 279 | "retrying" : "", (long long)lcn); | 295 | "retrying" : "", err); |
| 280 | } | 296 | } |
| 281 | /* | 297 | /* |
| 282 | * Either iblock was outside lblock limits or | 298 | * Either iblock was outside lblock limits or |
| @@ -289,9 +305,10 @@ handle_hole: | |||
| 289 | handle_zblock: | 305 | handle_zblock: |
| 290 | kaddr = kmap_atomic(page, KM_USER0); | 306 | kaddr = kmap_atomic(page, KM_USER0); |
| 291 | memset(kaddr + i * blocksize, 0, blocksize); | 307 | memset(kaddr + i * blocksize, 0, blocksize); |
| 292 | flush_dcache_page(page); | ||
| 293 | kunmap_atomic(kaddr, KM_USER0); | 308 | kunmap_atomic(kaddr, KM_USER0); |
| 294 | set_buffer_uptodate(bh); | 309 | flush_dcache_page(page); |
| 310 | if (likely(!err)) | ||
| 311 | set_buffer_uptodate(bh); | ||
| 295 | } while (i++, iblock++, (bh = bh->b_this_page) != head); | 312 | } while (i++, iblock++, (bh = bh->b_this_page) != head); |
| 296 | 313 | ||
| 297 | /* Release the lock if we took it. */ | 314 | /* Release the lock if we took it. */ |
| @@ -367,31 +384,38 @@ retry_readpage: | |||
| 367 | return 0; | 384 | return 0; |
| 368 | } | 385 | } |
| 369 | ni = NTFS_I(page->mapping->host); | 386 | ni = NTFS_I(page->mapping->host); |
| 370 | 387 | /* | |
| 388 | * Only $DATA attributes can be encrypted and only unnamed $DATA | ||
| 389 | * attributes can be compressed. Index root can have the flags set but | ||
| 390 | * this means to create compressed/encrypted files, not that the | ||
| 391 | * attribute is compressed/encrypted. | ||
| 392 | */ | ||
| 393 | if (ni->type != AT_INDEX_ROOT) { | ||
| 394 | /* If attribute is encrypted, deny access, just like NT4. */ | ||
| 395 | if (NInoEncrypted(ni)) { | ||
| 396 | BUG_ON(ni->type != AT_DATA); | ||
| 397 | err = -EACCES; | ||
| 398 | goto err_out; | ||
| 399 | } | ||
| 400 | /* Compressed data streams are handled in compress.c. */ | ||
| 401 | if (NInoNonResident(ni) && NInoCompressed(ni)) { | ||
| 402 | BUG_ON(ni->type != AT_DATA); | ||
| 403 | BUG_ON(ni->name_len); | ||
| 404 | return ntfs_read_compressed_block(page); | ||
| 405 | } | ||
| 406 | } | ||
| 371 | /* NInoNonResident() == NInoIndexAllocPresent() */ | 407 | /* NInoNonResident() == NInoIndexAllocPresent() */ |
| 372 | if (NInoNonResident(ni)) { | 408 | if (NInoNonResident(ni)) { |
| 373 | /* | 409 | /* Normal, non-resident data stream. */ |
| 374 | * Only unnamed $DATA attributes can be compressed or | ||
| 375 | * encrypted. | ||
| 376 | */ | ||
| 377 | if (ni->type == AT_DATA && !ni->name_len) { | ||
| 378 | /* If file is encrypted, deny access, just like NT4. */ | ||
| 379 | if (NInoEncrypted(ni)) { | ||
| 380 | err = -EACCES; | ||
| 381 | goto err_out; | ||
| 382 | } | ||
| 383 | /* Compressed data streams are handled in compress.c. */ | ||
| 384 | if (NInoCompressed(ni)) | ||
| 385 | return ntfs_read_compressed_block(page); | ||
| 386 | } | ||
| 387 | /* Normal data stream. */ | ||
| 388 | return ntfs_read_block(page); | 410 | return ntfs_read_block(page); |
| 389 | } | 411 | } |
| 390 | /* | 412 | /* |
| 391 | * Attribute is resident, implying it is not compressed or encrypted. | 413 | * Attribute is resident, implying it is not compressed or encrypted. |
| 392 | * This also means the attribute is smaller than an mft record and | 414 | * This also means the attribute is smaller than an mft record and |
| 393 | * hence smaller than a page, so can simply zero out any pages with | 415 | * hence smaller than a page, so can simply zero out any pages with |
| 394 | * index above 0. | 416 | * index above 0. Note the attribute can actually be marked compressed |
| 417 | * but if it is resident the actual data is not compressed so we are | ||
| 418 | * ok to ignore the compressed flag here. | ||
| 395 | */ | 419 | */ |
| 396 | if (unlikely(page->index > 0)) { | 420 | if (unlikely(page->index > 0)) { |
| 397 | kaddr = kmap_atomic(page, KM_USER0); | 421 | kaddr = kmap_atomic(page, KM_USER0); |
| @@ -511,19 +535,21 @@ static int ntfs_write_block(struct page *page, struct writeback_control *wbc) | |||
| 511 | BUG_ON(!PageUptodate(page)); | 535 | BUG_ON(!PageUptodate(page)); |
| 512 | create_empty_buffers(page, blocksize, | 536 | create_empty_buffers(page, blocksize, |
| 513 | (1 << BH_Uptodate) | (1 << BH_Dirty)); | 537 | (1 << BH_Uptodate) | (1 << BH_Dirty)); |
| 538 | if (unlikely(!page_has_buffers(page))) { | ||
| 539 | ntfs_warning(vol->sb, "Error allocating page " | ||
| 540 | "buffers. Redirtying page so we try " | ||
| 541 | "again later."); | ||
| 542 | /* | ||
| 543 | * Put the page back on mapping->dirty_pages, but leave | ||
| 544 | * its buffers' dirty state as-is. | ||
| 545 | */ | ||
| 546 | redirty_page_for_writepage(wbc, page); | ||
| 547 | unlock_page(page); | ||
| 548 | return 0; | ||
| 549 | } | ||
| 514 | } | 550 | } |
| 515 | bh = head = page_buffers(page); | 551 | bh = head = page_buffers(page); |
| 516 | if (unlikely(!bh)) { | 552 | BUG_ON(!bh); |
| 517 | ntfs_warning(vol->sb, "Error allocating page buffers. " | ||
| 518 | "Redirtying page so we try again later."); | ||
| 519 | /* | ||
| 520 | * Put the page back on mapping->dirty_pages, but leave its | ||
| 521 | * buffer's dirty state as-is. | ||
| 522 | */ | ||
| 523 | redirty_page_for_writepage(wbc, page); | ||
| 524 | unlock_page(page); | ||
| 525 | return 0; | ||
| 526 | } | ||
| 527 | 553 | ||
| 528 | /* NOTE: Different naming scheme to ntfs_read_block()! */ | 554 | /* NOTE: Different naming scheme to ntfs_read_block()! */ |
| 529 | 555 | ||
| @@ -670,6 +696,27 @@ lock_retry_remap: | |||
| 670 | } | 696 | } |
| 671 | /* It is a hole, need to instantiate it. */ | 697 | /* It is a hole, need to instantiate it. */ |
| 672 | if (lcn == LCN_HOLE) { | 698 | if (lcn == LCN_HOLE) { |
| 699 | u8 *kaddr; | ||
| 700 | unsigned long *bpos, *bend; | ||
| 701 | |||
| 702 | /* Check if the buffer is zero. */ | ||
| 703 | kaddr = kmap_atomic(page, KM_USER0); | ||
| 704 | bpos = (unsigned long *)(kaddr + bh_offset(bh)); | ||
| 705 | bend = (unsigned long *)((u8*)bpos + blocksize); | ||
| 706 | do { | ||
| 707 | if (unlikely(*bpos)) | ||
| 708 | break; | ||
| 709 | } while (likely(++bpos < bend)); | ||
| 710 | kunmap_atomic(kaddr, KM_USER0); | ||
| 711 | if (bpos == bend) { | ||
| 712 | /* | ||
| 713 | * Buffer is zero and sparse, no need to write | ||
| 714 | * it. | ||
| 715 | */ | ||
| 716 | bh->b_blocknr = -1; | ||
| 717 | clear_buffer_dirty(bh); | ||
| 718 | continue; | ||
| 719 | } | ||
| 673 | // TODO: Instantiate the hole. | 720 | // TODO: Instantiate the hole. |
| 674 | // clear_buffer_new(bh); | 721 | // clear_buffer_new(bh); |
| 675 | // unmap_underlying_metadata(bh->b_bdev, bh->b_blocknr); | 722 | // unmap_underlying_metadata(bh->b_bdev, bh->b_blocknr); |
| @@ -690,20 +737,37 @@ lock_retry_remap: | |||
| 690 | if (likely(!err)) | 737 | if (likely(!err)) |
| 691 | goto lock_retry_remap; | 738 | goto lock_retry_remap; |
| 692 | rl = NULL; | 739 | rl = NULL; |
| 693 | lcn = err; | ||
| 694 | } else if (!rl) | 740 | } else if (!rl) |
| 695 | up_read(&ni->runlist.lock); | 741 | up_read(&ni->runlist.lock); |
| 742 | /* | ||
| 743 | * If buffer is outside the runlist, truncate has cut it out | ||
| 744 | * of the runlist. Just clean and clear the buffer and set it | ||
| 745 | * uptodate so it can get discarded by the VM. | ||
| 746 | */ | ||
| 747 | if (err == -ENOENT || lcn == LCN_ENOENT) { | ||
| 748 | u8 *kaddr; | ||
| 749 | |||
| 750 | bh->b_blocknr = -1; | ||
| 751 | clear_buffer_dirty(bh); | ||
| 752 | kaddr = kmap_atomic(page, KM_USER0); | ||
| 753 | memset(kaddr + bh_offset(bh), 0, blocksize); | ||
| 754 | kunmap_atomic(kaddr, KM_USER0); | ||
| 755 | flush_dcache_page(page); | ||
| 756 | set_buffer_uptodate(bh); | ||
| 757 | err = 0; | ||
| 758 | continue; | ||
| 759 | } | ||
| 696 | /* Failed to map the buffer, even after retrying. */ | 760 | /* Failed to map the buffer, even after retrying. */ |
| 761 | if (!err) | ||
| 762 | err = -EIO; | ||
| 697 | bh->b_blocknr = -1; | 763 | bh->b_blocknr = -1; |
| 698 | ntfs_error(vol->sb, "Failed to write to inode 0x%lx, " | 764 | ntfs_error(vol->sb, "Failed to write to inode 0x%lx, " |
| 699 | "attribute type 0x%x, vcn 0x%llx, offset 0x%x " | 765 | "attribute type 0x%x, vcn 0x%llx, offset 0x%x " |
| 700 | "because its location on disk could not be " | 766 | "because its location on disk could not be " |
| 701 | "determined%s (error code %lli).", ni->mft_no, | 767 | "determined%s (error code %i).", ni->mft_no, |
| 702 | ni->type, (unsigned long long)vcn, | 768 | ni->type, (unsigned long long)vcn, |
| 703 | vcn_ofs, is_retry ? " even after " | 769 | vcn_ofs, is_retry ? " even after " |
| 704 | "retrying" : "", (long long)lcn); | 770 | "retrying" : "", err); |
| 705 | if (!err) | ||
| 706 | err = -EIO; | ||
| 707 | break; | 771 | break; |
| 708 | } while (block++, (bh = bh->b_this_page) != head); | 772 | } while (block++, (bh = bh->b_this_page) != head); |
| 709 | 773 | ||
| @@ -714,7 +778,7 @@ lock_retry_remap: | |||
| 714 | /* For the error case, need to reset bh to the beginning. */ | 778 | /* For the error case, need to reset bh to the beginning. */ |
| 715 | bh = head; | 779 | bh = head; |
| 716 | 780 | ||
| 717 | /* Just an optimization, so ->readpage() isn't called later. */ | 781 | /* Just an optimization, so ->readpage() is not called later. */ |
| 718 | if (unlikely(!PageUptodate(page))) { | 782 | if (unlikely(!PageUptodate(page))) { |
| 719 | int uptodate = 1; | 783 | int uptodate = 1; |
| 720 | do { | 784 | do { |
| @@ -730,7 +794,6 @@ lock_retry_remap: | |||
| 730 | 794 | ||
| 731 | /* Setup all mapped, dirty buffers for async write i/o. */ | 795 | /* Setup all mapped, dirty buffers for async write i/o. */ |
| 732 | do { | 796 | do { |
| 733 | get_bh(bh); | ||
| 734 | if (buffer_mapped(bh) && buffer_dirty(bh)) { | 797 | if (buffer_mapped(bh) && buffer_dirty(bh)) { |
| 735 | lock_buffer(bh); | 798 | lock_buffer(bh); |
| 736 | if (test_clear_buffer_dirty(bh)) { | 799 | if (test_clear_buffer_dirty(bh)) { |
| @@ -768,14 +831,8 @@ lock_retry_remap: | |||
| 768 | 831 | ||
| 769 | BUG_ON(PageWriteback(page)); | 832 | BUG_ON(PageWriteback(page)); |
| 770 | set_page_writeback(page); /* Keeps try_to_free_buffers() away. */ | 833 | set_page_writeback(page); /* Keeps try_to_free_buffers() away. */ |
| 771 | unlock_page(page); | ||
| 772 | 834 | ||
| 773 | /* | 835 | /* Submit the prepared buffers for i/o. */ |
| 774 | * Submit the prepared buffers for i/o. Note the page is unlocked, | ||
| 775 | * and the async write i/o completion handler can end_page_writeback() | ||
| 776 | * at any time after the *first* submit_bh(). So the buffers can then | ||
| 777 | * disappear... | ||
| 778 | */ | ||
| 779 | need_end_writeback = TRUE; | 836 | need_end_writeback = TRUE; |
| 780 | do { | 837 | do { |
| 781 | struct buffer_head *next = bh->b_this_page; | 838 | struct buffer_head *next = bh->b_this_page; |
| @@ -783,9 +840,9 @@ lock_retry_remap: | |||
| 783 | submit_bh(WRITE, bh); | 840 | submit_bh(WRITE, bh); |
| 784 | need_end_writeback = FALSE; | 841 | need_end_writeback = FALSE; |
| 785 | } | 842 | } |
| 786 | put_bh(bh); | ||
| 787 | bh = next; | 843 | bh = next; |
| 788 | } while (bh != head); | 844 | } while (bh != head); |
| 845 | unlock_page(page); | ||
| 789 | 846 | ||
| 790 | /* If no i/o was started, need to end_page_writeback(). */ | 847 | /* If no i/o was started, need to end_page_writeback(). */ |
| 791 | if (unlikely(need_end_writeback)) | 848 | if (unlikely(need_end_writeback)) |
| @@ -860,7 +917,6 @@ static int ntfs_write_mst_block(struct page *page, | |||
| 860 | sync = (wbc->sync_mode == WB_SYNC_ALL); | 917 | sync = (wbc->sync_mode == WB_SYNC_ALL); |
| 861 | 918 | ||
| 862 | /* Make sure we have mapped buffers. */ | 919 | /* Make sure we have mapped buffers. */ |
| 863 | BUG_ON(!page_has_buffers(page)); | ||
| 864 | bh = head = page_buffers(page); | 920 | bh = head = page_buffers(page); |
| 865 | BUG_ON(!bh); | 921 | BUG_ON(!bh); |
| 866 | 922 | ||
| @@ -1280,38 +1336,42 @@ retry_writepage: | |||
| 1280 | ntfs_debug("Write outside i_size - truncated?"); | 1336 | ntfs_debug("Write outside i_size - truncated?"); |
| 1281 | return 0; | 1337 | return 0; |
| 1282 | } | 1338 | } |
| 1339 | /* | ||
| 1340 | * Only $DATA attributes can be encrypted and only unnamed $DATA | ||
| 1341 | * attributes can be compressed. Index root can have the flags set but | ||
| 1342 | * this means to create compressed/encrypted files, not that the | ||
| 1343 | * attribute is compressed/encrypted. | ||
| 1344 | */ | ||
| 1345 | if (ni->type != AT_INDEX_ROOT) { | ||
| 1346 | /* If file is encrypted, deny access, just like NT4. */ | ||
| 1347 | if (NInoEncrypted(ni)) { | ||
| 1348 | unlock_page(page); | ||
| 1349 | BUG_ON(ni->type != AT_DATA); | ||
| 1350 | ntfs_debug("Denying write access to encrypted " | ||
| 1351 | "file."); | ||
| 1352 | return -EACCES; | ||
| 1353 | } | ||
| 1354 | /* Compressed data streams are handled in compress.c. */ | ||
| 1355 | if (NInoNonResident(ni) && NInoCompressed(ni)) { | ||
| 1356 | BUG_ON(ni->type != AT_DATA); | ||
| 1357 | BUG_ON(ni->name_len); | ||
| 1358 | // TODO: Implement and replace this with | ||
| 1359 | // return ntfs_write_compressed_block(page); | ||
| 1360 | unlock_page(page); | ||
| 1361 | ntfs_error(vi->i_sb, "Writing to compressed files is " | ||
| 1362 | "not supported yet. Sorry."); | ||
| 1363 | return -EOPNOTSUPP; | ||
| 1364 | } | ||
| 1365 | // TODO: Implement and remove this check. | ||
| 1366 | if (NInoNonResident(ni) && NInoSparse(ni)) { | ||
| 1367 | unlock_page(page); | ||
| 1368 | ntfs_error(vi->i_sb, "Writing to sparse files is not " | ||
| 1369 | "supported yet. Sorry."); | ||
| 1370 | return -EOPNOTSUPP; | ||
| 1371 | } | ||
| 1372 | } | ||
| 1283 | /* NInoNonResident() == NInoIndexAllocPresent() */ | 1373 | /* NInoNonResident() == NInoIndexAllocPresent() */ |
| 1284 | if (NInoNonResident(ni)) { | 1374 | if (NInoNonResident(ni)) { |
| 1285 | /* | ||
| 1286 | * Only unnamed $DATA attributes can be compressed, encrypted, | ||
| 1287 | * and/or sparse. | ||
| 1288 | */ | ||
| 1289 | if (ni->type == AT_DATA && !ni->name_len) { | ||
| 1290 | /* If file is encrypted, deny access, just like NT4. */ | ||
| 1291 | if (NInoEncrypted(ni)) { | ||
| 1292 | unlock_page(page); | ||
| 1293 | ntfs_debug("Denying write access to encrypted " | ||
| 1294 | "file."); | ||
| 1295 | return -EACCES; | ||
| 1296 | } | ||
| 1297 | /* Compressed data streams are handled in compress.c. */ | ||
| 1298 | if (NInoCompressed(ni)) { | ||
| 1299 | // TODO: Implement and replace this check with | ||
| 1300 | // return ntfs_write_compressed_block(page); | ||
| 1301 | unlock_page(page); | ||
| 1302 | ntfs_error(vi->i_sb, "Writing to compressed " | ||
| 1303 | "files is not supported yet. " | ||
| 1304 | "Sorry."); | ||
| 1305 | return -EOPNOTSUPP; | ||
| 1306 | } | ||
| 1307 | // TODO: Implement and remove this check. | ||
| 1308 | if (NInoSparse(ni)) { | ||
| 1309 | unlock_page(page); | ||
| 1310 | ntfs_error(vi->i_sb, "Writing to sparse files " | ||
| 1311 | "is not supported yet. Sorry."); | ||
| 1312 | return -EOPNOTSUPP; | ||
| 1313 | } | ||
| 1314 | } | ||
| 1315 | /* We have to zero every time due to mmap-at-end-of-file. */ | 1375 | /* We have to zero every time due to mmap-at-end-of-file. */ |
| 1316 | if (page->index >= (i_size >> PAGE_CACHE_SHIFT)) { | 1376 | if (page->index >= (i_size >> PAGE_CACHE_SHIFT)) { |
| 1317 | /* The page straddles i_size. */ | 1377 | /* The page straddles i_size. */ |
| @@ -1324,14 +1384,16 @@ retry_writepage: | |||
| 1324 | /* Handle mst protected attributes. */ | 1384 | /* Handle mst protected attributes. */ |
| 1325 | if (NInoMstProtected(ni)) | 1385 | if (NInoMstProtected(ni)) |
| 1326 | return ntfs_write_mst_block(page, wbc); | 1386 | return ntfs_write_mst_block(page, wbc); |
| 1327 | /* Normal data stream. */ | 1387 | /* Normal, non-resident data stream. */ |
| 1328 | return ntfs_write_block(page, wbc); | 1388 | return ntfs_write_block(page, wbc); |
| 1329 | } | 1389 | } |
| 1330 | /* | 1390 | /* |
| 1331 | * Attribute is resident, implying it is not compressed, encrypted, | 1391 | * Attribute is resident, implying it is not compressed, encrypted, or |
| 1332 | * sparse, or mst protected. This also means the attribute is smaller | 1392 | * mst protected. This also means the attribute is smaller than an mft |
| 1333 | * than an mft record and hence smaller than a page, so can simply | 1393 | * record and hence smaller than a page, so can simply return error on |
| 1334 | * return error on any pages with index above 0. | 1394 | * any pages with index above 0. Note the attribute can actually be |
| 1395 | * marked compressed but if it is resident the actual data is not | ||
| 1396 | * compressed so we are ok to ignore the compressed flag here. | ||
| 1335 | */ | 1397 | */ |
| 1336 | BUG_ON(page_has_buffers(page)); | 1398 | BUG_ON(page_has_buffers(page)); |
| 1337 | BUG_ON(!PageUptodate(page)); | 1399 | BUG_ON(!PageUptodate(page)); |
| @@ -1380,30 +1442,14 @@ retry_writepage: | |||
| 1380 | BUG_ON(PageWriteback(page)); | 1442 | BUG_ON(PageWriteback(page)); |
| 1381 | set_page_writeback(page); | 1443 | set_page_writeback(page); |
| 1382 | unlock_page(page); | 1444 | unlock_page(page); |
| 1383 | |||
| 1384 | /* | 1445 | /* |
| 1385 | * Here, we don't need to zero the out of bounds area everytime because | 1446 | * Here, we do not need to zero the out of bounds area everytime |
| 1386 | * the below memcpy() already takes care of the mmap-at-end-of-file | 1447 | * because the below memcpy() already takes care of the |
| 1387 | * requirements. If the file is converted to a non-resident one, then | 1448 | * mmap-at-end-of-file requirements. If the file is converted to a |
| 1388 | * the code path use is switched to the non-resident one where the | 1449 | * non-resident one, then the code path use is switched to the |
| 1389 | * zeroing happens on each ntfs_writepage() invocation. | 1450 | * non-resident one where the zeroing happens on each ntfs_writepage() |
| 1390 | * | 1451 | * invocation. |
| 1391 | * The above also applies nicely when i_size is decreased. | ||
| 1392 | * | ||
| 1393 | * When i_size is increased, the memory between the old and new i_size | ||
| 1394 | * _must_ be zeroed (or overwritten with new data). Otherwise we will | ||
| 1395 | * expose data to userspace/disk which should never have been exposed. | ||
| 1396 | * | ||
| 1397 | * FIXME: Ensure that i_size increases do the zeroing/overwriting and | ||
| 1398 | * if we cannot guarantee that, then enable the zeroing below. If the | ||
| 1399 | * zeroing below is enabled, we MUST move the unlock_page() from above | ||
| 1400 | * to after the kunmap_atomic(), i.e. just before the | ||
| 1401 | * end_page_writeback(). | ||
| 1402 | * UPDATE: ntfs_prepare/commit_write() do the zeroing on i_size | ||
| 1403 | * increases for resident attributes so those are ok. | ||
| 1404 | * TODO: ntfs_truncate(), others? | ||
| 1405 | */ | 1452 | */ |
| 1406 | |||
| 1407 | attr_len = le32_to_cpu(ctx->attr->data.resident.value_length); | 1453 | attr_len = le32_to_cpu(ctx->attr->data.resident.value_length); |
| 1408 | i_size = i_size_read(vi); | 1454 | i_size = i_size_read(vi); |
| 1409 | if (unlikely(attr_len > i_size)) { | 1455 | if (unlikely(attr_len > i_size)) { |
| @@ -1681,27 +1727,25 @@ lock_retry_remap: | |||
| 1681 | if (likely(!err)) | 1727 | if (likely(!err)) |
| 1682 | goto lock_retry_remap; | 1728 | goto lock_retry_remap; |
| 1683 | rl = NULL; | 1729 | rl = NULL; |
| 1684 | lcn = err; | ||
| 1685 | } else if (!rl) | 1730 | } else if (!rl) |
| 1686 | up_read(&ni->runlist.lock); | 1731 | up_read(&ni->runlist.lock); |
| 1687 | /* | 1732 | /* |
| 1688 | * Failed to map the buffer, even after | 1733 | * Failed to map the buffer, even after |
| 1689 | * retrying. | 1734 | * retrying. |
| 1690 | */ | 1735 | */ |
| 1736 | if (!err) | ||
| 1737 | err = -EIO; | ||
| 1691 | bh->b_blocknr = -1; | 1738 | bh->b_blocknr = -1; |
| 1692 | ntfs_error(vol->sb, "Failed to write to inode " | 1739 | ntfs_error(vol->sb, "Failed to write to inode " |
| 1693 | "0x%lx, attribute type 0x%x, " | 1740 | "0x%lx, attribute type 0x%x, " |
| 1694 | "vcn 0x%llx, offset 0x%x " | 1741 | "vcn 0x%llx, offset 0x%x " |
| 1695 | "because its location on disk " | 1742 | "because its location on disk " |
| 1696 | "could not be determined%s " | 1743 | "could not be determined%s " |
| 1697 | "(error code %lli).", | 1744 | "(error code %i).", |
| 1698 | ni->mft_no, ni->type, | 1745 | ni->mft_no, ni->type, |
| 1699 | (unsigned long long)vcn, | 1746 | (unsigned long long)vcn, |
| 1700 | vcn_ofs, is_retry ? " even " | 1747 | vcn_ofs, is_retry ? " even " |
| 1701 | "after retrying" : "", | 1748 | "after retrying" : "", err); |
| 1702 | (long long)lcn); | ||
| 1703 | if (!err) | ||
| 1704 | err = -EIO; | ||
| 1705 | goto err_out; | 1749 | goto err_out; |
| 1706 | } | 1750 | } |
| 1707 | /* We now have a successful remap, i.e. lcn >= 0. */ | 1751 | /* We now have a successful remap, i.e. lcn >= 0. */ |
| @@ -2357,6 +2401,7 @@ void mark_ntfs_record_dirty(struct page *page, const unsigned int ofs) { | |||
| 2357 | buffers_to_free = bh; | 2401 | buffers_to_free = bh; |
| 2358 | } | 2402 | } |
| 2359 | bh = head = page_buffers(page); | 2403 | bh = head = page_buffers(page); |
| 2404 | BUG_ON(!bh); | ||
| 2360 | do { | 2405 | do { |
| 2361 | bh_ofs = bh_offset(bh); | 2406 | bh_ofs = bh_offset(bh); |
| 2362 | if (bh_ofs + bh_size <= ofs) | 2407 | if (bh_ofs + bh_size <= ofs) |
