aboutsummaryrefslogtreecommitdiffstats
path: root/fs/ntfs/aops.c
diff options
context:
space:
mode:
authorAnton Altaparmakov <aia21@cantab.net>2005-02-15 05:08:43 -0500
committerAnton Altaparmakov <aia21@cantab.net>2005-05-05 05:56:31 -0400
commitb6ad6c52fe36ab35d0fe28c064f59de2ba670c2a (patch)
treed888c28a2c3c7fa733045dc7dc9c9bc7f157bf4a /fs/ntfs/aops.c
parent1a0df15acdae065789446aca83021c72b71db9a5 (diff)
NTFS: - Split ntfs_map_runlist() into ntfs_map_runlist() and a non-locking
helper ntfs_map_runlist_nolock() which is used by ntfs_map_runlist(). This allows us to map runlist fragments with the runlist lock already held without having to drop and reacquire it around the call. Adapt all callers. - Change ntfs_find_vcn() to ntfs_find_vcn_nolock() which takes a locked runlist. This allows us to find runlist elements with the runlist lock already held without having to drop and reacquire it around the call. Adapt all callers. Signed-off-by: Anton Altaparmakov <aia21@cantab.net>
Diffstat (limited to 'fs/ntfs/aops.c')
-rw-r--r--fs/ntfs/aops.c34
1 files changed, 10 insertions, 24 deletions
diff --git a/fs/ntfs/aops.c b/fs/ntfs/aops.c
index 812d53e93354..2b4b8b9e8796 100644
--- a/fs/ntfs/aops.c
+++ b/fs/ntfs/aops.c
@@ -2,7 +2,7 @@
2 * aops.c - NTFS kernel address space operations and page cache handling. 2 * aops.c - NTFS kernel address space operations and page cache handling.
3 * Part of the Linux-NTFS project. 3 * Part of the Linux-NTFS project.
4 * 4 *
5 * Copyright (c) 2001-2004 Anton Altaparmakov 5 * Copyright (c) 2001-2005 Anton Altaparmakov
6 * Copyright (c) 2002 Richard Russon 6 * Copyright (c) 2002 Richard Russon
7 * 7 *
8 * This program/include file is free software; you can redistribute it and/or 8 * This program/include file is free software; you can redistribute it and/or
@@ -135,7 +135,7 @@ static void ntfs_end_buffer_async_read(struct buffer_head *bh, int uptodate)
135 i * rec_size), rec_size); 135 i * rec_size), rec_size);
136 flush_dcache_page(page); 136 flush_dcache_page(page);
137 kunmap_atomic(addr, KM_BIO_SRC_IRQ); 137 kunmap_atomic(addr, KM_BIO_SRC_IRQ);
138 if (likely(!PageError(page) && page_uptodate)) 138 if (likely(page_uptodate && !PageError(page)))
139 SetPageUptodate(page); 139 SetPageUptodate(page);
140 } 140 }
141 unlock_page(page); 141 unlock_page(page);
@@ -347,11 +347,11 @@ handle_zblock:
347 */ 347 */
348static int ntfs_readpage(struct file *file, struct page *page) 348static int ntfs_readpage(struct file *file, struct page *page)
349{ 349{
350 loff_t i_size;
351 ntfs_inode *ni, *base_ni; 350 ntfs_inode *ni, *base_ni;
352 u8 *kaddr; 351 u8 *kaddr;
353 ntfs_attr_search_ctx *ctx; 352 ntfs_attr_search_ctx *ctx;
354 MFT_RECORD *mrec; 353 MFT_RECORD *mrec;
354 unsigned long flags;
355 u32 attr_len; 355 u32 attr_len;
356 int err = 0; 356 int err = 0;
357 357
@@ -389,9 +389,9 @@ static int ntfs_readpage(struct file *file, struct page *page)
389 * Attribute is resident, implying it is not compressed or encrypted. 389 * Attribute is resident, implying it is not compressed or encrypted.
390 * This also means the attribute is smaller than an mft record and 390 * This also means the attribute is smaller than an mft record and
391 * hence smaller than a page, so can simply zero out any pages with 391 * hence smaller than a page, so can simply zero out any pages with
392 * index above 0. We can also do this if the file size is 0. 392 * index above 0.
393 */ 393 */
394 if (unlikely(page->index > 0 || !i_size_read(VFS_I(ni)))) { 394 if (unlikely(page->index > 0)) {
395 kaddr = kmap_atomic(page, KM_USER0); 395 kaddr = kmap_atomic(page, KM_USER0);
396 memset(kaddr, 0, PAGE_CACHE_SIZE); 396 memset(kaddr, 0, PAGE_CACHE_SIZE);
397 flush_dcache_page(page); 397 flush_dcache_page(page);
@@ -418,9 +418,10 @@ static int ntfs_readpage(struct file *file, struct page *page)
418 if (unlikely(err)) 418 if (unlikely(err))
419 goto put_unm_err_out; 419 goto put_unm_err_out;
420 attr_len = le32_to_cpu(ctx->attr->data.resident.value_length); 420 attr_len = le32_to_cpu(ctx->attr->data.resident.value_length);
421 i_size = i_size_read(VFS_I(ni)); 421 read_lock_irqsave(&ni->size_lock, flags);
422 if (unlikely(attr_len > i_size)) 422 if (unlikely(attr_len > ni->initialized_size))
423 attr_len = i_size; 423 attr_len = ni->initialized_size;
424 read_unlock_irqrestore(&ni->size_lock, flags);
424 kaddr = kmap_atomic(page, KM_USER0); 425 kaddr = kmap_atomic(page, KM_USER0);
425 /* Copy the data to the page. */ 426 /* Copy the data to the page. */
426 memcpy(kaddr, (u8*)ctx->attr + 427 memcpy(kaddr, (u8*)ctx->attr +
@@ -1247,20 +1248,6 @@ static int ntfs_writepage(struct page *page, struct writeback_control *wbc)
1247 int err; 1248 int err;
1248 1249
1249 BUG_ON(!PageLocked(page)); 1250 BUG_ON(!PageLocked(page));
1250 /*
1251 * If a previous ntfs_truncate() failed, repeat it and abort if it
1252 * fails again.
1253 */
1254 if (unlikely(NInoTruncateFailed(ni))) {
1255 down_write(&vi->i_alloc_sem);
1256 err = ntfs_truncate(vi);
1257 up_write(&vi->i_alloc_sem);
1258 if (err || NInoTruncateFailed(ni)) {
1259 if (!err)
1260 err = -EIO;
1261 goto err_out;
1262 }
1263 }
1264 i_size = i_size_read(vi); 1251 i_size = i_size_read(vi);
1265 /* Is the page fully outside i_size? (truncate in progress) */ 1252 /* Is the page fully outside i_size? (truncate in progress) */
1266 if (unlikely(page->index >= (i_size + PAGE_CACHE_SIZE - 1) >> 1253 if (unlikely(page->index >= (i_size + PAGE_CACHE_SIZE - 1) >>
@@ -1490,13 +1477,12 @@ static int ntfs_prepare_nonresident_write(struct page *page,
1490 1477
1491 read_lock_irqsave(&ni->size_lock, flags); 1478 read_lock_irqsave(&ni->size_lock, flags);
1492 /* 1479 /*
1493 * The first out of bounds block for the allocated size. No need to 1480 * The first out of bounds block for the allocated size. No need to
1494 * round up as allocated_size is in multiples of cluster size and the 1481 * round up as allocated_size is in multiples of cluster size and the
1495 * minimum cluster size is 512 bytes, which is equal to the smallest 1482 * minimum cluster size is 512 bytes, which is equal to the smallest
1496 * blocksize. 1483 * blocksize.
1497 */ 1484 */
1498 ablock = ni->allocated_size >> blocksize_bits; 1485 ablock = ni->allocated_size >> blocksize_bits;
1499
1500 i_size = i_size_read(vi); 1486 i_size = i_size_read(vi);
1501 initialized_size = ni->initialized_size; 1487 initialized_size = ni->initialized_size;
1502 read_unlock_irqrestore(&ni->size_lock, flags); 1488 read_unlock_irqrestore(&ni->size_lock, flags);