aboutsummaryrefslogtreecommitdiffstats
path: root/fs/ntfs/aops.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/ntfs/aops.c')
-rw-r--r--fs/ntfs/aops.c50
1 files changed, 25 insertions, 25 deletions
diff --git a/fs/ntfs/aops.c b/fs/ntfs/aops.c
index 7521e11db728..97768a1379f2 100644
--- a/fs/ntfs/aops.c
+++ b/fs/ntfs/aops.c
@@ -74,7 +74,7 @@ static void ntfs_end_buffer_async_read(struct buffer_head *bh, int uptodate)
74 74
75 set_buffer_uptodate(bh); 75 set_buffer_uptodate(bh);
76 76
77 file_ofs = ((s64)page->index << PAGE_CACHE_SHIFT) + 77 file_ofs = ((s64)page->index << PAGE_SHIFT) +
78 bh_offset(bh); 78 bh_offset(bh);
79 read_lock_irqsave(&ni->size_lock, flags); 79 read_lock_irqsave(&ni->size_lock, flags);
80 init_size = ni->initialized_size; 80 init_size = ni->initialized_size;
@@ -142,7 +142,7 @@ static void ntfs_end_buffer_async_read(struct buffer_head *bh, int uptodate)
142 u32 rec_size; 142 u32 rec_size;
143 143
144 rec_size = ni->itype.index.block_size; 144 rec_size = ni->itype.index.block_size;
145 recs = PAGE_CACHE_SIZE / rec_size; 145 recs = PAGE_SIZE / rec_size;
146 /* Should have been verified before we got here... */ 146 /* Should have been verified before we got here... */
147 BUG_ON(!recs); 147 BUG_ON(!recs);
148 local_irq_save(flags); 148 local_irq_save(flags);
@@ -229,7 +229,7 @@ static int ntfs_read_block(struct page *page)
229 * fully truncated, truncate will throw it away as soon as we unlock 229 * fully truncated, truncate will throw it away as soon as we unlock
230 * it so no need to worry what we do with it. 230 * it so no need to worry what we do with it.
231 */ 231 */
232 iblock = (s64)page->index << (PAGE_CACHE_SHIFT - blocksize_bits); 232 iblock = (s64)page->index << (PAGE_SHIFT - blocksize_bits);
233 read_lock_irqsave(&ni->size_lock, flags); 233 read_lock_irqsave(&ni->size_lock, flags);
234 lblock = (ni->allocated_size + blocksize - 1) >> blocksize_bits; 234 lblock = (ni->allocated_size + blocksize - 1) >> blocksize_bits;
235 init_size = ni->initialized_size; 235 init_size = ni->initialized_size;
@@ -412,9 +412,9 @@ retry_readpage:
412 vi = page->mapping->host; 412 vi = page->mapping->host;
413 i_size = i_size_read(vi); 413 i_size = i_size_read(vi);
414 /* Is the page fully outside i_size? (truncate in progress) */ 414 /* Is the page fully outside i_size? (truncate in progress) */
415 if (unlikely(page->index >= (i_size + PAGE_CACHE_SIZE - 1) >> 415 if (unlikely(page->index >= (i_size + PAGE_SIZE - 1) >>
416 PAGE_CACHE_SHIFT)) { 416 PAGE_SHIFT)) {
417 zero_user(page, 0, PAGE_CACHE_SIZE); 417 zero_user(page, 0, PAGE_SIZE);
418 ntfs_debug("Read outside i_size - truncated?"); 418 ntfs_debug("Read outside i_size - truncated?");
419 goto done; 419 goto done;
420 } 420 }
@@ -463,7 +463,7 @@ retry_readpage:
463 * ok to ignore the compressed flag here. 463 * ok to ignore the compressed flag here.
464 */ 464 */
465 if (unlikely(page->index > 0)) { 465 if (unlikely(page->index > 0)) {
466 zero_user(page, 0, PAGE_CACHE_SIZE); 466 zero_user(page, 0, PAGE_SIZE);
467 goto done; 467 goto done;
468 } 468 }
469 if (!NInoAttr(ni)) 469 if (!NInoAttr(ni))
@@ -509,7 +509,7 @@ retry_readpage:
509 le16_to_cpu(ctx->attr->data.resident.value_offset), 509 le16_to_cpu(ctx->attr->data.resident.value_offset),
510 attr_len); 510 attr_len);
511 /* Zero the remainder of the page. */ 511 /* Zero the remainder of the page. */
512 memset(addr + attr_len, 0, PAGE_CACHE_SIZE - attr_len); 512 memset(addr + attr_len, 0, PAGE_SIZE - attr_len);
513 flush_dcache_page(page); 513 flush_dcache_page(page);
514 kunmap_atomic(addr); 514 kunmap_atomic(addr);
515put_unm_err_out: 515put_unm_err_out:
@@ -599,7 +599,7 @@ static int ntfs_write_block(struct page *page, struct writeback_control *wbc)
599 /* NOTE: Different naming scheme to ntfs_read_block()! */ 599 /* NOTE: Different naming scheme to ntfs_read_block()! */
600 600
601 /* The first block in the page. */ 601 /* The first block in the page. */
602 block = (s64)page->index << (PAGE_CACHE_SHIFT - blocksize_bits); 602 block = (s64)page->index << (PAGE_SHIFT - blocksize_bits);
603 603
604 read_lock_irqsave(&ni->size_lock, flags); 604 read_lock_irqsave(&ni->size_lock, flags);
605 i_size = i_size_read(vi); 605 i_size = i_size_read(vi);
@@ -674,7 +674,7 @@ static int ntfs_write_block(struct page *page, struct writeback_control *wbc)
674 // in the inode. 674 // in the inode.
675 // Again, for each page do: 675 // Again, for each page do:
676 // __set_page_dirty_buffers(); 676 // __set_page_dirty_buffers();
677 // page_cache_release() 677 // put_page()
678 // We don't need to wait on the writes. 678 // We don't need to wait on the writes.
679 // Update iblock. 679 // Update iblock.
680 } 680 }
@@ -925,7 +925,7 @@ static int ntfs_write_mst_block(struct page *page,
925 ntfs_volume *vol = ni->vol; 925 ntfs_volume *vol = ni->vol;
926 u8 *kaddr; 926 u8 *kaddr;
927 unsigned int rec_size = ni->itype.index.block_size; 927 unsigned int rec_size = ni->itype.index.block_size;
928 ntfs_inode *locked_nis[PAGE_CACHE_SIZE / rec_size]; 928 ntfs_inode *locked_nis[PAGE_SIZE / rec_size];
929 struct buffer_head *bh, *head, *tbh, *rec_start_bh; 929 struct buffer_head *bh, *head, *tbh, *rec_start_bh;
930 struct buffer_head *bhs[MAX_BUF_PER_PAGE]; 930 struct buffer_head *bhs[MAX_BUF_PER_PAGE];
931 runlist_element *rl; 931 runlist_element *rl;
@@ -949,7 +949,7 @@ static int ntfs_write_mst_block(struct page *page,
949 (NInoAttr(ni) && ni->type == AT_INDEX_ALLOCATION))); 949 (NInoAttr(ni) && ni->type == AT_INDEX_ALLOCATION)));
950 bh_size = vol->sb->s_blocksize; 950 bh_size = vol->sb->s_blocksize;
951 bh_size_bits = vol->sb->s_blocksize_bits; 951 bh_size_bits = vol->sb->s_blocksize_bits;
952 max_bhs = PAGE_CACHE_SIZE / bh_size; 952 max_bhs = PAGE_SIZE / bh_size;
953 BUG_ON(!max_bhs); 953 BUG_ON(!max_bhs);
954 BUG_ON(max_bhs > MAX_BUF_PER_PAGE); 954 BUG_ON(max_bhs > MAX_BUF_PER_PAGE);
955 955
@@ -961,13 +961,13 @@ static int ntfs_write_mst_block(struct page *page,
961 BUG_ON(!bh); 961 BUG_ON(!bh);
962 962
963 rec_size_bits = ni->itype.index.block_size_bits; 963 rec_size_bits = ni->itype.index.block_size_bits;
964 BUG_ON(!(PAGE_CACHE_SIZE >> rec_size_bits)); 964 BUG_ON(!(PAGE_SIZE >> rec_size_bits));
965 bhs_per_rec = rec_size >> bh_size_bits; 965 bhs_per_rec = rec_size >> bh_size_bits;
966 BUG_ON(!bhs_per_rec); 966 BUG_ON(!bhs_per_rec);
967 967
968 /* The first block in the page. */ 968 /* The first block in the page. */
969 rec_block = block = (sector_t)page->index << 969 rec_block = block = (sector_t)page->index <<
970 (PAGE_CACHE_SHIFT - bh_size_bits); 970 (PAGE_SHIFT - bh_size_bits);
971 971
972 /* The first out of bounds block for the data size. */ 972 /* The first out of bounds block for the data size. */
973 dblock = (i_size_read(vi) + bh_size - 1) >> bh_size_bits; 973 dblock = (i_size_read(vi) + bh_size - 1) >> bh_size_bits;
@@ -1133,7 +1133,7 @@ lock_retry_remap:
1133 unsigned long mft_no; 1133 unsigned long mft_no;
1134 1134
1135 /* Get the mft record number. */ 1135 /* Get the mft record number. */
1136 mft_no = (((s64)page->index << PAGE_CACHE_SHIFT) + ofs) 1136 mft_no = (((s64)page->index << PAGE_SHIFT) + ofs)
1137 >> rec_size_bits; 1137 >> rec_size_bits;
1138 /* Check whether to write this mft record. */ 1138 /* Check whether to write this mft record. */
1139 tni = NULL; 1139 tni = NULL;
@@ -1249,7 +1249,7 @@ do_mirror:
1249 continue; 1249 continue;
1250 ofs = bh_offset(tbh); 1250 ofs = bh_offset(tbh);
1251 /* Get the mft record number. */ 1251 /* Get the mft record number. */
1252 mft_no = (((s64)page->index << PAGE_CACHE_SHIFT) + ofs) 1252 mft_no = (((s64)page->index << PAGE_SHIFT) + ofs)
1253 >> rec_size_bits; 1253 >> rec_size_bits;
1254 if (mft_no < vol->mftmirr_size) 1254 if (mft_no < vol->mftmirr_size)
1255 ntfs_sync_mft_mirror(vol, mft_no, 1255 ntfs_sync_mft_mirror(vol, mft_no,
@@ -1300,7 +1300,7 @@ done:
1300 * Set page error if there is only one ntfs record in the page. 1300 * Set page error if there is only one ntfs record in the page.
1301 * Otherwise we would loose per-record granularity. 1301 * Otherwise we would loose per-record granularity.
1302 */ 1302 */
1303 if (ni->itype.index.block_size == PAGE_CACHE_SIZE) 1303 if (ni->itype.index.block_size == PAGE_SIZE)
1304 SetPageError(page); 1304 SetPageError(page);
1305 NVolSetErrors(vol); 1305 NVolSetErrors(vol);
1306 } 1306 }
@@ -1308,7 +1308,7 @@ done:
1308 ntfs_debug("Page still contains one or more dirty ntfs " 1308 ntfs_debug("Page still contains one or more dirty ntfs "
1309 "records. Redirtying the page starting at " 1309 "records. Redirtying the page starting at "
1310 "record 0x%lx.", page->index << 1310 "record 0x%lx.", page->index <<
1311 (PAGE_CACHE_SHIFT - rec_size_bits)); 1311 (PAGE_SHIFT - rec_size_bits));
1312 redirty_page_for_writepage(wbc, page); 1312 redirty_page_for_writepage(wbc, page);
1313 unlock_page(page); 1313 unlock_page(page);
1314 } else { 1314 } else {
@@ -1365,13 +1365,13 @@ retry_writepage:
1365 BUG_ON(!PageLocked(page)); 1365 BUG_ON(!PageLocked(page));
1366 i_size = i_size_read(vi); 1366 i_size = i_size_read(vi);
1367 /* Is the page fully outside i_size? (truncate in progress) */ 1367 /* Is the page fully outside i_size? (truncate in progress) */
1368 if (unlikely(page->index >= (i_size + PAGE_CACHE_SIZE - 1) >> 1368 if (unlikely(page->index >= (i_size + PAGE_SIZE - 1) >>
1369 PAGE_CACHE_SHIFT)) { 1369 PAGE_SHIFT)) {
1370 /* 1370 /*
1371 * The page may have dirty, unmapped buffers. Make them 1371 * The page may have dirty, unmapped buffers. Make them
1372 * freeable here, so the page does not leak. 1372 * freeable here, so the page does not leak.
1373 */ 1373 */
1374 block_invalidatepage(page, 0, PAGE_CACHE_SIZE); 1374 block_invalidatepage(page, 0, PAGE_SIZE);
1375 unlock_page(page); 1375 unlock_page(page);
1376 ntfs_debug("Write outside i_size - truncated?"); 1376 ntfs_debug("Write outside i_size - truncated?");
1377 return 0; 1377 return 0;
@@ -1414,10 +1414,10 @@ retry_writepage:
1414 /* NInoNonResident() == NInoIndexAllocPresent() */ 1414 /* NInoNonResident() == NInoIndexAllocPresent() */
1415 if (NInoNonResident(ni)) { 1415 if (NInoNonResident(ni)) {
1416 /* We have to zero every time due to mmap-at-end-of-file. */ 1416 /* We have to zero every time due to mmap-at-end-of-file. */
1417 if (page->index >= (i_size >> PAGE_CACHE_SHIFT)) { 1417 if (page->index >= (i_size >> PAGE_SHIFT)) {
1418 /* The page straddles i_size. */ 1418 /* The page straddles i_size. */
1419 unsigned int ofs = i_size & ~PAGE_CACHE_MASK; 1419 unsigned int ofs = i_size & ~PAGE_MASK;
1420 zero_user_segment(page, ofs, PAGE_CACHE_SIZE); 1420 zero_user_segment(page, ofs, PAGE_SIZE);
1421 } 1421 }
1422 /* Handle mst protected attributes. */ 1422 /* Handle mst protected attributes. */
1423 if (NInoMstProtected(ni)) 1423 if (NInoMstProtected(ni))
@@ -1500,7 +1500,7 @@ retry_writepage:
1500 le16_to_cpu(ctx->attr->data.resident.value_offset), 1500 le16_to_cpu(ctx->attr->data.resident.value_offset),
1501 addr, attr_len); 1501 addr, attr_len);
1502 /* Zero out of bounds area in the page cache page. */ 1502 /* Zero out of bounds area in the page cache page. */
1503 memset(addr + attr_len, 0, PAGE_CACHE_SIZE - attr_len); 1503 memset(addr + attr_len, 0, PAGE_SIZE - attr_len);
1504 kunmap_atomic(addr); 1504 kunmap_atomic(addr);
1505 flush_dcache_page(page); 1505 flush_dcache_page(page);
1506 flush_dcache_mft_record_page(ctx->ntfs_ino); 1506 flush_dcache_mft_record_page(ctx->ntfs_ino);