diff options
Diffstat (limited to 'fs/ntfs/aops.c')
-rw-r--r-- | fs/ntfs/aops.c | 166 |
1 files changed, 110 insertions, 56 deletions
diff --git a/fs/ntfs/aops.c b/fs/ntfs/aops.c index 45d56e41ed98..3f43bfe6184e 100644 --- a/fs/ntfs/aops.c +++ b/fs/ntfs/aops.c | |||
@@ -2,7 +2,7 @@ | |||
2 | * aops.c - NTFS kernel address space operations and page cache handling. | 2 | * aops.c - NTFS kernel address space operations and page cache handling. |
3 | * Part of the Linux-NTFS project. | 3 | * Part of the Linux-NTFS project. |
4 | * | 4 | * |
5 | * Copyright (c) 2001-2004 Anton Altaparmakov | 5 | * Copyright (c) 2001-2005 Anton Altaparmakov |
6 | * Copyright (c) 2002 Richard Russon | 6 | * Copyright (c) 2002 Richard Russon |
7 | * | 7 | * |
8 | * This program/include file is free software; you can redistribute it and/or | 8 | * This program/include file is free software; you can redistribute it and/or |
@@ -66,19 +66,22 @@ static void ntfs_end_buffer_async_read(struct buffer_head *bh, int uptodate) | |||
66 | ni = NTFS_I(page->mapping->host); | 66 | ni = NTFS_I(page->mapping->host); |
67 | 67 | ||
68 | if (likely(uptodate)) { | 68 | if (likely(uptodate)) { |
69 | s64 file_ofs; | 69 | s64 file_ofs, initialized_size; |
70 | 70 | ||
71 | set_buffer_uptodate(bh); | 71 | set_buffer_uptodate(bh); |
72 | 72 | ||
73 | file_ofs = ((s64)page->index << PAGE_CACHE_SHIFT) + | 73 | file_ofs = ((s64)page->index << PAGE_CACHE_SHIFT) + |
74 | bh_offset(bh); | 74 | bh_offset(bh); |
75 | read_lock_irqsave(&ni->size_lock, flags); | ||
76 | initialized_size = ni->initialized_size; | ||
77 | read_unlock_irqrestore(&ni->size_lock, flags); | ||
75 | /* Check for the current buffer head overflowing. */ | 78 | /* Check for the current buffer head overflowing. */ |
76 | if (file_ofs + bh->b_size > ni->initialized_size) { | 79 | if (file_ofs + bh->b_size > initialized_size) { |
77 | char *addr; | 80 | char *addr; |
78 | int ofs = 0; | 81 | int ofs = 0; |
79 | 82 | ||
80 | if (file_ofs < ni->initialized_size) | 83 | if (file_ofs < initialized_size) |
81 | ofs = ni->initialized_size - file_ofs; | 84 | ofs = initialized_size - file_ofs; |
82 | addr = kmap_atomic(page, KM_BIO_SRC_IRQ); | 85 | addr = kmap_atomic(page, KM_BIO_SRC_IRQ); |
83 | memset(addr + bh_offset(bh) + ofs, 0, bh->b_size - ofs); | 86 | memset(addr + bh_offset(bh) + ofs, 0, bh->b_size - ofs); |
84 | flush_dcache_page(page); | 87 | flush_dcache_page(page); |
@@ -132,7 +135,7 @@ static void ntfs_end_buffer_async_read(struct buffer_head *bh, int uptodate) | |||
132 | i * rec_size), rec_size); | 135 | i * rec_size), rec_size); |
133 | flush_dcache_page(page); | 136 | flush_dcache_page(page); |
134 | kunmap_atomic(addr, KM_BIO_SRC_IRQ); | 137 | kunmap_atomic(addr, KM_BIO_SRC_IRQ); |
135 | if (likely(!PageError(page) && page_uptodate)) | 138 | if (likely(page_uptodate && !PageError(page))) |
136 | SetPageUptodate(page); | 139 | SetPageUptodate(page); |
137 | } | 140 | } |
138 | unlock_page(page); | 141 | unlock_page(page); |
@@ -168,6 +171,7 @@ static int ntfs_read_block(struct page *page) | |||
168 | runlist_element *rl; | 171 | runlist_element *rl; |
169 | struct buffer_head *bh, *head, *arr[MAX_BUF_PER_PAGE]; | 172 | struct buffer_head *bh, *head, *arr[MAX_BUF_PER_PAGE]; |
170 | sector_t iblock, lblock, zblock; | 173 | sector_t iblock, lblock, zblock; |
174 | unsigned long flags; | ||
171 | unsigned int blocksize, vcn_ofs; | 175 | unsigned int blocksize, vcn_ofs; |
172 | int i, nr; | 176 | int i, nr; |
173 | unsigned char blocksize_bits; | 177 | unsigned char blocksize_bits; |
@@ -190,8 +194,10 @@ static int ntfs_read_block(struct page *page) | |||
190 | } | 194 | } |
191 | 195 | ||
192 | iblock = (s64)page->index << (PAGE_CACHE_SHIFT - blocksize_bits); | 196 | iblock = (s64)page->index << (PAGE_CACHE_SHIFT - blocksize_bits); |
197 | read_lock_irqsave(&ni->size_lock, flags); | ||
193 | lblock = (ni->allocated_size + blocksize - 1) >> blocksize_bits; | 198 | lblock = (ni->allocated_size + blocksize - 1) >> blocksize_bits; |
194 | zblock = (ni->initialized_size + blocksize - 1) >> blocksize_bits; | 199 | zblock = (ni->initialized_size + blocksize - 1) >> blocksize_bits; |
200 | read_unlock_irqrestore(&ni->size_lock, flags); | ||
195 | 201 | ||
196 | /* Loop through all the buffers in the page. */ | 202 | /* Loop through all the buffers in the page. */ |
197 | rl = NULL; | 203 | rl = NULL; |
@@ -258,7 +264,8 @@ lock_retry_remap: | |||
258 | goto lock_retry_remap; | 264 | goto lock_retry_remap; |
259 | rl = NULL; | 265 | rl = NULL; |
260 | lcn = err; | 266 | lcn = err; |
261 | } | 267 | } else if (!rl) |
268 | up_read(&ni->runlist.lock); | ||
262 | /* Hard error, zero out region. */ | 269 | /* Hard error, zero out region. */ |
263 | bh->b_blocknr = -1; | 270 | bh->b_blocknr = -1; |
264 | SetPageError(page); | 271 | SetPageError(page); |
@@ -341,14 +348,15 @@ handle_zblock: | |||
341 | */ | 348 | */ |
342 | static int ntfs_readpage(struct file *file, struct page *page) | 349 | static int ntfs_readpage(struct file *file, struct page *page) |
343 | { | 350 | { |
344 | loff_t i_size; | ||
345 | ntfs_inode *ni, *base_ni; | 351 | ntfs_inode *ni, *base_ni; |
346 | u8 *kaddr; | 352 | u8 *kaddr; |
347 | ntfs_attr_search_ctx *ctx; | 353 | ntfs_attr_search_ctx *ctx; |
348 | MFT_RECORD *mrec; | 354 | MFT_RECORD *mrec; |
355 | unsigned long flags; | ||
349 | u32 attr_len; | 356 | u32 attr_len; |
350 | int err = 0; | 357 | int err = 0; |
351 | 358 | ||
359 | retry_readpage: | ||
352 | BUG_ON(!PageLocked(page)); | 360 | BUG_ON(!PageLocked(page)); |
353 | /* | 361 | /* |
354 | * This can potentially happen because we clear PageUptodate() during | 362 | * This can potentially happen because we clear PageUptodate() during |
@@ -383,9 +391,9 @@ static int ntfs_readpage(struct file *file, struct page *page) | |||
383 | * Attribute is resident, implying it is not compressed or encrypted. | 391 | * Attribute is resident, implying it is not compressed or encrypted. |
384 | * This also means the attribute is smaller than an mft record and | 392 | * This also means the attribute is smaller than an mft record and |
385 | * hence smaller than a page, so can simply zero out any pages with | 393 | * hence smaller than a page, so can simply zero out any pages with |
386 | * index above 0. We can also do this if the file size is 0. | 394 | * index above 0. |
387 | */ | 395 | */ |
388 | if (unlikely(page->index > 0 || !i_size_read(VFS_I(ni)))) { | 396 | if (unlikely(page->index > 0)) { |
389 | kaddr = kmap_atomic(page, KM_USER0); | 397 | kaddr = kmap_atomic(page, KM_USER0); |
390 | memset(kaddr, 0, PAGE_CACHE_SIZE); | 398 | memset(kaddr, 0, PAGE_CACHE_SIZE); |
391 | flush_dcache_page(page); | 399 | flush_dcache_page(page); |
@@ -402,6 +410,14 @@ static int ntfs_readpage(struct file *file, struct page *page) | |||
402 | err = PTR_ERR(mrec); | 410 | err = PTR_ERR(mrec); |
403 | goto err_out; | 411 | goto err_out; |
404 | } | 412 | } |
413 | /* | ||
414 | * If a parallel write made the attribute non-resident, drop the mft | ||
415 | * record and retry the readpage. | ||
416 | */ | ||
417 | if (unlikely(NInoNonResident(ni))) { | ||
418 | unmap_mft_record(base_ni); | ||
419 | goto retry_readpage; | ||
420 | } | ||
405 | ctx = ntfs_attr_get_search_ctx(base_ni, mrec); | 421 | ctx = ntfs_attr_get_search_ctx(base_ni, mrec); |
406 | if (unlikely(!ctx)) { | 422 | if (unlikely(!ctx)) { |
407 | err = -ENOMEM; | 423 | err = -ENOMEM; |
@@ -412,9 +428,10 @@ static int ntfs_readpage(struct file *file, struct page *page) | |||
412 | if (unlikely(err)) | 428 | if (unlikely(err)) |
413 | goto put_unm_err_out; | 429 | goto put_unm_err_out; |
414 | attr_len = le32_to_cpu(ctx->attr->data.resident.value_length); | 430 | attr_len = le32_to_cpu(ctx->attr->data.resident.value_length); |
415 | i_size = i_size_read(VFS_I(ni)); | 431 | read_lock_irqsave(&ni->size_lock, flags); |
416 | if (unlikely(attr_len > i_size)) | 432 | if (unlikely(attr_len > ni->initialized_size)) |
417 | attr_len = i_size; | 433 | attr_len = ni->initialized_size; |
434 | read_unlock_irqrestore(&ni->size_lock, flags); | ||
418 | kaddr = kmap_atomic(page, KM_USER0); | 435 | kaddr = kmap_atomic(page, KM_USER0); |
419 | /* Copy the data to the page. */ | 436 | /* Copy the data to the page. */ |
420 | memcpy(kaddr, (u8*)ctx->attr + | 437 | memcpy(kaddr, (u8*)ctx->attr + |
@@ -463,12 +480,15 @@ static int ntfs_write_block(struct page *page, struct writeback_control *wbc) | |||
463 | { | 480 | { |
464 | VCN vcn; | 481 | VCN vcn; |
465 | LCN lcn; | 482 | LCN lcn; |
483 | s64 initialized_size; | ||
484 | loff_t i_size; | ||
466 | sector_t block, dblock, iblock; | 485 | sector_t block, dblock, iblock; |
467 | struct inode *vi; | 486 | struct inode *vi; |
468 | ntfs_inode *ni; | 487 | ntfs_inode *ni; |
469 | ntfs_volume *vol; | 488 | ntfs_volume *vol; |
470 | runlist_element *rl; | 489 | runlist_element *rl; |
471 | struct buffer_head *bh, *head; | 490 | struct buffer_head *bh, *head; |
491 | unsigned long flags; | ||
472 | unsigned int blocksize, vcn_ofs; | 492 | unsigned int blocksize, vcn_ofs; |
473 | int err; | 493 | int err; |
474 | BOOL need_end_writeback; | 494 | BOOL need_end_writeback; |
@@ -510,11 +530,16 @@ static int ntfs_write_block(struct page *page, struct writeback_control *wbc) | |||
510 | /* The first block in the page. */ | 530 | /* The first block in the page. */ |
511 | block = (s64)page->index << (PAGE_CACHE_SHIFT - blocksize_bits); | 531 | block = (s64)page->index << (PAGE_CACHE_SHIFT - blocksize_bits); |
512 | 532 | ||
533 | read_lock_irqsave(&ni->size_lock, flags); | ||
534 | i_size = i_size_read(vi); | ||
535 | initialized_size = ni->initialized_size; | ||
536 | read_unlock_irqrestore(&ni->size_lock, flags); | ||
537 | |||
513 | /* The first out of bounds block for the data size. */ | 538 | /* The first out of bounds block for the data size. */ |
514 | dblock = (vi->i_size + blocksize - 1) >> blocksize_bits; | 539 | dblock = (i_size + blocksize - 1) >> blocksize_bits; |
515 | 540 | ||
516 | /* The last (fully or partially) initialized block. */ | 541 | /* The last (fully or partially) initialized block. */ |
517 | iblock = ni->initialized_size >> blocksize_bits; | 542 | iblock = initialized_size >> blocksize_bits; |
518 | 543 | ||
519 | /* | 544 | /* |
520 | * Be very careful. We have no exclusion from __set_page_dirty_buffers | 545 | * Be very careful. We have no exclusion from __set_page_dirty_buffers |
@@ -559,7 +584,7 @@ static int ntfs_write_block(struct page *page, struct writeback_control *wbc) | |||
559 | 584 | ||
560 | /* Make sure we have enough initialized size. */ | 585 | /* Make sure we have enough initialized size. */ |
561 | if (unlikely((block >= iblock) && | 586 | if (unlikely((block >= iblock) && |
562 | (ni->initialized_size < vi->i_size))) { | 587 | (initialized_size < i_size))) { |
563 | /* | 588 | /* |
564 | * If this page is fully outside initialized size, zero | 589 | * If this page is fully outside initialized size, zero |
565 | * out all pages between the current initialized size | 590 | * out all pages between the current initialized size |
@@ -666,7 +691,8 @@ lock_retry_remap: | |||
666 | goto lock_retry_remap; | 691 | goto lock_retry_remap; |
667 | rl = NULL; | 692 | rl = NULL; |
668 | lcn = err; | 693 | lcn = err; |
669 | } | 694 | } else if (!rl) |
695 | up_read(&ni->runlist.lock); | ||
670 | /* Failed to map the buffer, even after retrying. */ | 696 | /* Failed to map the buffer, even after retrying. */ |
671 | bh->b_blocknr = -1; | 697 | bh->b_blocknr = -1; |
672 | ntfs_error(vol->sb, "Failed to write to inode 0x%lx, " | 698 | ntfs_error(vol->sb, "Failed to write to inode 0x%lx, " |
@@ -801,17 +827,15 @@ static int ntfs_write_mst_block(struct page *page, | |||
801 | ntfs_inode *ni = NTFS_I(vi); | 827 | ntfs_inode *ni = NTFS_I(vi); |
802 | ntfs_volume *vol = ni->vol; | 828 | ntfs_volume *vol = ni->vol; |
803 | u8 *kaddr; | 829 | u8 *kaddr; |
804 | unsigned char bh_size_bits = vi->i_blkbits; | ||
805 | unsigned int bh_size = 1 << bh_size_bits; | ||
806 | unsigned int rec_size = ni->itype.index.block_size; | 830 | unsigned int rec_size = ni->itype.index.block_size; |
807 | ntfs_inode *locked_nis[PAGE_CACHE_SIZE / rec_size]; | 831 | ntfs_inode *locked_nis[PAGE_CACHE_SIZE / rec_size]; |
808 | struct buffer_head *bh, *head, *tbh, *rec_start_bh; | 832 | struct buffer_head *bh, *head, *tbh, *rec_start_bh; |
809 | int max_bhs = PAGE_CACHE_SIZE / bh_size; | 833 | struct buffer_head *bhs[MAX_BUF_PER_PAGE]; |
810 | struct buffer_head *bhs[max_bhs]; | ||
811 | runlist_element *rl; | 834 | runlist_element *rl; |
812 | int i, nr_locked_nis, nr_recs, nr_bhs, bhs_per_rec, err, err2; | 835 | int i, nr_locked_nis, nr_recs, nr_bhs, max_bhs, bhs_per_rec, err, err2; |
813 | unsigned rec_size_bits; | 836 | unsigned bh_size, rec_size_bits; |
814 | BOOL sync, is_mft, page_is_dirty, rec_is_dirty; | 837 | BOOL sync, is_mft, page_is_dirty, rec_is_dirty; |
838 | unsigned char bh_size_bits; | ||
815 | 839 | ||
816 | ntfs_debug("Entering for inode 0x%lx, attribute type 0x%x, page index " | 840 | ntfs_debug("Entering for inode 0x%lx, attribute type 0x%x, page index " |
817 | "0x%lx.", vi->i_ino, ni->type, page->index); | 841 | "0x%lx.", vi->i_ino, ni->type, page->index); |
@@ -826,7 +850,11 @@ static int ntfs_write_mst_block(struct page *page, | |||
826 | */ | 850 | */ |
827 | BUG_ON(!(is_mft || S_ISDIR(vi->i_mode) || | 851 | BUG_ON(!(is_mft || S_ISDIR(vi->i_mode) || |
828 | (NInoAttr(ni) && ni->type == AT_INDEX_ALLOCATION))); | 852 | (NInoAttr(ni) && ni->type == AT_INDEX_ALLOCATION))); |
853 | bh_size_bits = vi->i_blkbits; | ||
854 | bh_size = 1 << bh_size_bits; | ||
855 | max_bhs = PAGE_CACHE_SIZE / bh_size; | ||
829 | BUG_ON(!max_bhs); | 856 | BUG_ON(!max_bhs); |
857 | BUG_ON(max_bhs > MAX_BUF_PER_PAGE); | ||
830 | 858 | ||
831 | /* Were we called for sync purposes? */ | 859 | /* Were we called for sync purposes? */ |
832 | sync = (wbc->sync_mode == WB_SYNC_ALL); | 860 | sync = (wbc->sync_mode == WB_SYNC_ALL); |
@@ -846,7 +874,7 @@ static int ntfs_write_mst_block(struct page *page, | |||
846 | (PAGE_CACHE_SHIFT - bh_size_bits); | 874 | (PAGE_CACHE_SHIFT - bh_size_bits); |
847 | 875 | ||
848 | /* The first out of bounds block for the data size. */ | 876 | /* The first out of bounds block for the data size. */ |
849 | dblock = (vi->i_size + bh_size - 1) >> bh_size_bits; | 877 | dblock = (i_size_read(vi) + bh_size - 1) >> bh_size_bits; |
850 | 878 | ||
851 | rl = NULL; | 879 | rl = NULL; |
852 | err = err2 = nr_bhs = nr_recs = nr_locked_nis = 0; | 880 | err = err2 = nr_bhs = nr_recs = nr_locked_nis = 0; |
@@ -858,6 +886,7 @@ static int ntfs_write_mst_block(struct page *page, | |||
858 | if (likely(block < rec_block)) { | 886 | if (likely(block < rec_block)) { |
859 | if (unlikely(block >= dblock)) { | 887 | if (unlikely(block >= dblock)) { |
860 | clear_buffer_dirty(bh); | 888 | clear_buffer_dirty(bh); |
889 | set_buffer_uptodate(bh); | ||
861 | continue; | 890 | continue; |
862 | } | 891 | } |
863 | /* | 892 | /* |
@@ -938,8 +967,11 @@ lock_retry_remap: | |||
938 | if (err2 == -ENOMEM) | 967 | if (err2 == -ENOMEM) |
939 | page_is_dirty = TRUE; | 968 | page_is_dirty = TRUE; |
940 | lcn = err2; | 969 | lcn = err2; |
941 | } else | 970 | } else { |
942 | err2 = -EIO; | 971 | err2 = -EIO; |
972 | if (!rl) | ||
973 | up_read(&ni->runlist.lock); | ||
974 | } | ||
943 | /* Hard error. Abort writing this record. */ | 975 | /* Hard error. Abort writing this record. */ |
944 | if (!err || err == -ENOMEM) | 976 | if (!err || err == -ENOMEM) |
945 | err = err2; | 977 | err = err2; |
@@ -949,7 +981,8 @@ lock_retry_remap: | |||
949 | "attribute type 0x%x) because " | 981 | "attribute type 0x%x) because " |
950 | "its location on disk could " | 982 | "its location on disk could " |
951 | "not be determined (error " | 983 | "not be determined (error " |
952 | "code %lli).", (s64)block << | 984 | "code %lli).", |
985 | (long long)block << | ||
953 | bh_size_bits >> | 986 | bh_size_bits >> |
954 | vol->mft_record_size_bits, | 987 | vol->mft_record_size_bits, |
955 | ni->mft_no, ni->type, | 988 | ni->mft_no, ni->type, |
@@ -1223,19 +1256,17 @@ done: | |||
1223 | static int ntfs_writepage(struct page *page, struct writeback_control *wbc) | 1256 | static int ntfs_writepage(struct page *page, struct writeback_control *wbc) |
1224 | { | 1257 | { |
1225 | loff_t i_size; | 1258 | loff_t i_size; |
1226 | struct inode *vi; | 1259 | struct inode *vi = page->mapping->host; |
1227 | ntfs_inode *ni, *base_ni; | 1260 | ntfs_inode *base_ni = NULL, *ni = NTFS_I(vi); |
1228 | char *kaddr; | 1261 | char *kaddr; |
1229 | ntfs_attr_search_ctx *ctx; | 1262 | ntfs_attr_search_ctx *ctx = NULL; |
1230 | MFT_RECORD *m; | 1263 | MFT_RECORD *m = NULL; |
1231 | u32 attr_len; | 1264 | u32 attr_len; |
1232 | int err; | 1265 | int err; |
1233 | 1266 | ||
1267 | retry_writepage: | ||
1234 | BUG_ON(!PageLocked(page)); | 1268 | BUG_ON(!PageLocked(page)); |
1235 | |||
1236 | vi = page->mapping->host; | ||
1237 | i_size = i_size_read(vi); | 1269 | i_size = i_size_read(vi); |
1238 | |||
1239 | /* Is the page fully outside i_size? (truncate in progress) */ | 1270 | /* Is the page fully outside i_size? (truncate in progress) */ |
1240 | if (unlikely(page->index >= (i_size + PAGE_CACHE_SIZE - 1) >> | 1271 | if (unlikely(page->index >= (i_size + PAGE_CACHE_SIZE - 1) >> |
1241 | PAGE_CACHE_SHIFT)) { | 1272 | PAGE_CACHE_SHIFT)) { |
@@ -1248,8 +1279,6 @@ static int ntfs_writepage(struct page *page, struct writeback_control *wbc) | |||
1248 | ntfs_debug("Write outside i_size - truncated?"); | 1279 | ntfs_debug("Write outside i_size - truncated?"); |
1249 | return 0; | 1280 | return 0; |
1250 | } | 1281 | } |
1251 | ni = NTFS_I(vi); | ||
1252 | |||
1253 | /* NInoNonResident() == NInoIndexAllocPresent() */ | 1282 | /* NInoNonResident() == NInoIndexAllocPresent() */ |
1254 | if (NInoNonResident(ni)) { | 1283 | if (NInoNonResident(ni)) { |
1255 | /* | 1284 | /* |
@@ -1326,6 +1355,14 @@ static int ntfs_writepage(struct page *page, struct writeback_control *wbc) | |||
1326 | ctx = NULL; | 1355 | ctx = NULL; |
1327 | goto err_out; | 1356 | goto err_out; |
1328 | } | 1357 | } |
1358 | /* | ||
1359 | * If a parallel write made the attribute non-resident, drop the mft | ||
1360 | * record and retry the writepage. | ||
1361 | */ | ||
1362 | if (unlikely(NInoNonResident(ni))) { | ||
1363 | unmap_mft_record(base_ni); | ||
1364 | goto retry_writepage; | ||
1365 | } | ||
1329 | ctx = ntfs_attr_get_search_ctx(base_ni, m); | 1366 | ctx = ntfs_attr_get_search_ctx(base_ni, m); |
1330 | if (unlikely(!ctx)) { | 1367 | if (unlikely(!ctx)) { |
1331 | err = -ENOMEM; | 1368 | err = -ENOMEM; |
@@ -1367,15 +1404,12 @@ static int ntfs_writepage(struct page *page, struct writeback_control *wbc) | |||
1367 | */ | 1404 | */ |
1368 | 1405 | ||
1369 | attr_len = le32_to_cpu(ctx->attr->data.resident.value_length); | 1406 | attr_len = le32_to_cpu(ctx->attr->data.resident.value_length); |
1370 | i_size = i_size_read(VFS_I(ni)); | 1407 | i_size = i_size_read(vi); |
1371 | kaddr = kmap_atomic(page, KM_USER0); | ||
1372 | if (unlikely(attr_len > i_size)) { | 1408 | if (unlikely(attr_len > i_size)) { |
1373 | /* Zero out of bounds area in the mft record. */ | ||
1374 | memset((u8*)ctx->attr + le16_to_cpu( | ||
1375 | ctx->attr->data.resident.value_offset) + | ||
1376 | i_size, 0, attr_len - i_size); | ||
1377 | attr_len = i_size; | 1409 | attr_len = i_size; |
1410 | ctx->attr->data.resident.value_length = cpu_to_le32(attr_len); | ||
1378 | } | 1411 | } |
1412 | kaddr = kmap_atomic(page, KM_USER0); | ||
1379 | /* Copy the data from the page to the mft record. */ | 1413 | /* Copy the data from the page to the mft record. */ |
1380 | memcpy((u8*)ctx->attr + | 1414 | memcpy((u8*)ctx->attr + |
1381 | le16_to_cpu(ctx->attr->data.resident.value_offset), | 1415 | le16_to_cpu(ctx->attr->data.resident.value_offset), |
@@ -1405,8 +1439,10 @@ err_out: | |||
1405 | err = 0; | 1439 | err = 0; |
1406 | } else { | 1440 | } else { |
1407 | ntfs_error(vi->i_sb, "Resident attribute write failed with " | 1441 | ntfs_error(vi->i_sb, "Resident attribute write failed with " |
1408 | "error %i. Setting page error flag.", err); | 1442 | "error %i.", err); |
1409 | SetPageError(page); | 1443 | SetPageError(page); |
1444 | NVolSetErrors(ni->vol); | ||
1445 | make_bad_inode(vi); | ||
1410 | } | 1446 | } |
1411 | unlock_page(page); | 1447 | unlock_page(page); |
1412 | if (ctx) | 1448 | if (ctx) |
@@ -1425,12 +1461,15 @@ static int ntfs_prepare_nonresident_write(struct page *page, | |||
1425 | { | 1461 | { |
1426 | VCN vcn; | 1462 | VCN vcn; |
1427 | LCN lcn; | 1463 | LCN lcn; |
1464 | s64 initialized_size; | ||
1465 | loff_t i_size; | ||
1428 | sector_t block, ablock, iblock; | 1466 | sector_t block, ablock, iblock; |
1429 | struct inode *vi; | 1467 | struct inode *vi; |
1430 | ntfs_inode *ni; | 1468 | ntfs_inode *ni; |
1431 | ntfs_volume *vol; | 1469 | ntfs_volume *vol; |
1432 | runlist_element *rl; | 1470 | runlist_element *rl; |
1433 | struct buffer_head *bh, *head, *wait[2], **wait_bh = wait; | 1471 | struct buffer_head *bh, *head, *wait[2], **wait_bh = wait; |
1472 | unsigned long flags; | ||
1434 | unsigned int vcn_ofs, block_start, block_end, blocksize; | 1473 | unsigned int vcn_ofs, block_start, block_end, blocksize; |
1435 | int err; | 1474 | int err; |
1436 | BOOL is_retry; | 1475 | BOOL is_retry; |
@@ -1462,16 +1501,20 @@ static int ntfs_prepare_nonresident_write(struct page *page, | |||
1462 | /* The first block in the page. */ | 1501 | /* The first block in the page. */ |
1463 | block = (s64)page->index << (PAGE_CACHE_SHIFT - blocksize_bits); | 1502 | block = (s64)page->index << (PAGE_CACHE_SHIFT - blocksize_bits); |
1464 | 1503 | ||
1504 | read_lock_irqsave(&ni->size_lock, flags); | ||
1465 | /* | 1505 | /* |
1466 | * The first out of bounds block for the allocated size. No need to | 1506 | * The first out of bounds block for the allocated size. No need to |
1467 | * round up as allocated_size is in multiples of cluster size and the | 1507 | * round up as allocated_size is in multiples of cluster size and the |
1468 | * minimum cluster size is 512 bytes, which is equal to the smallest | 1508 | * minimum cluster size is 512 bytes, which is equal to the smallest |
1469 | * blocksize. | 1509 | * blocksize. |
1470 | */ | 1510 | */ |
1471 | ablock = ni->allocated_size >> blocksize_bits; | 1511 | ablock = ni->allocated_size >> blocksize_bits; |
1512 | i_size = i_size_read(vi); | ||
1513 | initialized_size = ni->initialized_size; | ||
1514 | read_unlock_irqrestore(&ni->size_lock, flags); | ||
1472 | 1515 | ||
1473 | /* The last (fully or partially) initialized block. */ | 1516 | /* The last (fully or partially) initialized block. */ |
1474 | iblock = ni->initialized_size >> blocksize_bits; | 1517 | iblock = initialized_size >> blocksize_bits; |
1475 | 1518 | ||
1476 | /* Loop through all the buffers in the page. */ | 1519 | /* Loop through all the buffers in the page. */ |
1477 | block_start = 0; | 1520 | block_start = 0; |
@@ -1518,7 +1561,7 @@ static int ntfs_prepare_nonresident_write(struct page *page, | |||
1518 | * request, i.e. block < ablock is true. | 1561 | * request, i.e. block < ablock is true. |
1519 | */ | 1562 | */ |
1520 | if (unlikely((block >= iblock) && | 1563 | if (unlikely((block >= iblock) && |
1521 | (ni->initialized_size < vi->i_size))) { | 1564 | (initialized_size < i_size))) { |
1522 | /* | 1565 | /* |
1523 | * If this page is fully outside initialized size, zero | 1566 | * If this page is fully outside initialized size, zero |
1524 | * out all pages between the current initialized size | 1567 | * out all pages between the current initialized size |
@@ -1622,6 +1665,8 @@ lock_retry_remap: | |||
1622 | "not supported yet. " | 1665 | "not supported yet. " |
1623 | "Sorry."); | 1666 | "Sorry."); |
1624 | err = -EOPNOTSUPP; | 1667 | err = -EOPNOTSUPP; |
1668 | if (!rl) | ||
1669 | up_read(&ni->runlist.lock); | ||
1625 | goto err_out; | 1670 | goto err_out; |
1626 | } else if (!is_retry && | 1671 | } else if (!is_retry && |
1627 | lcn == LCN_RL_NOT_MAPPED) { | 1672 | lcn == LCN_RL_NOT_MAPPED) { |
@@ -1636,7 +1681,8 @@ lock_retry_remap: | |||
1636 | goto lock_retry_remap; | 1681 | goto lock_retry_remap; |
1637 | rl = NULL; | 1682 | rl = NULL; |
1638 | lcn = err; | 1683 | lcn = err; |
1639 | } | 1684 | } else if (!rl) |
1685 | up_read(&ni->runlist.lock); | ||
1640 | /* | 1686 | /* |
1641 | * Failed to map the buffer, even after | 1687 | * Failed to map the buffer, even after |
1642 | * retrying. | 1688 | * retrying. |
@@ -1797,6 +1843,7 @@ static int ntfs_prepare_write(struct file *file, struct page *page, | |||
1797 | unsigned from, unsigned to) | 1843 | unsigned from, unsigned to) |
1798 | { | 1844 | { |
1799 | s64 new_size; | 1845 | s64 new_size; |
1846 | loff_t i_size; | ||
1800 | struct inode *vi = page->mapping->host; | 1847 | struct inode *vi = page->mapping->host; |
1801 | ntfs_inode *base_ni = NULL, *ni = NTFS_I(vi); | 1848 | ntfs_inode *base_ni = NULL, *ni = NTFS_I(vi); |
1802 | ntfs_volume *vol = ni->vol; | 1849 | ntfs_volume *vol = ni->vol; |
@@ -1868,14 +1915,8 @@ static int ntfs_prepare_write(struct file *file, struct page *page, | |||
1868 | BUG_ON(page_has_buffers(page)); | 1915 | BUG_ON(page_has_buffers(page)); |
1869 | new_size = ((s64)page->index << PAGE_CACHE_SHIFT) + to; | 1916 | new_size = ((s64)page->index << PAGE_CACHE_SHIFT) + to; |
1870 | /* If we do not need to resize the attribute allocation we are done. */ | 1917 | /* If we do not need to resize the attribute allocation we are done. */ |
1871 | if (new_size <= vi->i_size) | 1918 | if (new_size <= i_size_read(vi)) |
1872 | goto done; | 1919 | goto done; |
1873 | |||
1874 | // FIXME: We abort for now as this code is not safe. | ||
1875 | ntfs_error(vi->i_sb, "Changing the file size is not supported yet. " | ||
1876 | "Sorry."); | ||
1877 | return -EOPNOTSUPP; | ||
1878 | |||
1879 | /* Map, pin, and lock the (base) mft record. */ | 1920 | /* Map, pin, and lock the (base) mft record. */ |
1880 | if (!NInoAttr(ni)) | 1921 | if (!NInoAttr(ni)) |
1881 | base_ni = ni; | 1922 | base_ni = ni; |
@@ -1904,7 +1945,15 @@ static int ntfs_prepare_write(struct file *file, struct page *page, | |||
1904 | a = ctx->attr; | 1945 | a = ctx->attr; |
1905 | /* The total length of the attribute value. */ | 1946 | /* The total length of the attribute value. */ |
1906 | attr_len = le32_to_cpu(a->data.resident.value_length); | 1947 | attr_len = le32_to_cpu(a->data.resident.value_length); |
1907 | BUG_ON(vi->i_size != attr_len); | 1948 | /* Fix an eventual previous failure of ntfs_commit_write(). */ |
1949 | i_size = i_size_read(vi); | ||
1950 | if (unlikely(attr_len > i_size)) { | ||
1951 | attr_len = i_size; | ||
1952 | a->data.resident.value_length = cpu_to_le32(attr_len); | ||
1953 | } | ||
1954 | /* If we do not need to resize the attribute allocation we are done. */ | ||
1955 | if (new_size <= attr_len) | ||
1956 | goto done_unm; | ||
1908 | /* Check if new size is allowed in $AttrDef. */ | 1957 | /* Check if new size is allowed in $AttrDef. */ |
1909 | err = ntfs_attr_size_bounds_check(vol, ni->type, new_size); | 1958 | err = ntfs_attr_size_bounds_check(vol, ni->type, new_size); |
1910 | if (unlikely(err)) { | 1959 | if (unlikely(err)) { |
@@ -1962,6 +2011,7 @@ static int ntfs_prepare_write(struct file *file, struct page *page, | |||
1962 | } | 2011 | } |
1963 | flush_dcache_mft_record_page(ctx->ntfs_ino); | 2012 | flush_dcache_mft_record_page(ctx->ntfs_ino); |
1964 | mark_mft_record_dirty(ctx->ntfs_ino); | 2013 | mark_mft_record_dirty(ctx->ntfs_ino); |
2014 | done_unm: | ||
1965 | ntfs_attr_put_search_ctx(ctx); | 2015 | ntfs_attr_put_search_ctx(ctx); |
1966 | unmap_mft_record(base_ni); | 2016 | unmap_mft_record(base_ni); |
1967 | /* | 2017 | /* |
@@ -2047,7 +2097,7 @@ static int ntfs_commit_nonresident_write(struct page *page, | |||
2047 | * now we know ntfs_prepare_write() would have failed in the write | 2097 | * now we know ntfs_prepare_write() would have failed in the write |
2048 | * exceeds i_size case, so this will never trigger which is fine. | 2098 | * exceeds i_size case, so this will never trigger which is fine. |
2049 | */ | 2099 | */ |
2050 | if (pos > vi->i_size) { | 2100 | if (pos > i_size_read(vi)) { |
2051 | ntfs_error(vi->i_sb, "Writing beyond the existing file size is " | 2101 | ntfs_error(vi->i_sb, "Writing beyond the existing file size is " |
2052 | "not supported yet. Sorry."); | 2102 | "not supported yet. Sorry."); |
2053 | return -EOPNOTSUPP; | 2103 | return -EOPNOTSUPP; |
@@ -2183,9 +2233,13 @@ static int ntfs_commit_write(struct file *file, struct page *page, | |||
2183 | } | 2233 | } |
2184 | kunmap_atomic(kaddr, KM_USER0); | 2234 | kunmap_atomic(kaddr, KM_USER0); |
2185 | /* Update i_size if necessary. */ | 2235 | /* Update i_size if necessary. */ |
2186 | if (vi->i_size < attr_len) { | 2236 | if (i_size_read(vi) < attr_len) { |
2237 | unsigned long flags; | ||
2238 | |||
2239 | write_lock_irqsave(&ni->size_lock, flags); | ||
2187 | ni->allocated_size = ni->initialized_size = attr_len; | 2240 | ni->allocated_size = ni->initialized_size = attr_len; |
2188 | i_size_write(vi, attr_len); | 2241 | i_size_write(vi, attr_len); |
2242 | write_unlock_irqrestore(&ni->size_lock, flags); | ||
2189 | } | 2243 | } |
2190 | /* Mark the mft record dirty, so it gets written back. */ | 2244 | /* Mark the mft record dirty, so it gets written back. */ |
2191 | flush_dcache_mft_record_page(ctx->ntfs_ino); | 2245 | flush_dcache_mft_record_page(ctx->ntfs_ino); |