aboutsummaryrefslogtreecommitdiffstats
path: root/fs/ecryptfs/mmap.c
diff options
context:
space:
mode:
authorMichael Halcrow <mhalcrow@us.ibm.com>2007-10-16 04:28:14 -0400
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-10-16 12:43:12 -0400
commit16a72c455a67bb23eed7292a31c6ba17729e78e6 (patch)
treed2cc7d116730348375ce0d06ad45e24744cd0b5f /fs/ecryptfs/mmap.c
parentecbdc93639f69c1f237ccce6a9aaff1e83f1182f (diff)
ecryptfs: clean up page flag handling
The functions that eventually call down to ecryptfs_read_lower(), ecryptfs_decrypt_page(), and ecryptfs_copy_up_encrypted_with_header() should have the responsibility of managing the page Uptodate status. This patch gets rid of some of the ugliness that resulted from trying to push some of the page flag setting too far down the stack. Signed-off-by: Michael Halcrow <mhalcrow@us.ibm.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'fs/ecryptfs/mmap.c')
-rw-r--r--fs/ecryptfs/mmap.c28
1 files changed, 21 insertions, 7 deletions
diff --git a/fs/ecryptfs/mmap.c b/fs/ecryptfs/mmap.c
index 4eb09c1753c6..16a7a555f392 100644
--- a/fs/ecryptfs/mmap.c
+++ b/fs/ecryptfs/mmap.c
@@ -37,23 +37,27 @@
37struct kmem_cache *ecryptfs_lower_page_cache; 37struct kmem_cache *ecryptfs_lower_page_cache;
38 38
39/** 39/**
40 * ecryptfs_get1page 40 * ecryptfs_get_locked_page
41 * 41 *
42 * Get one page from cache or lower f/s, return error otherwise. 42 * Get one page from cache or lower f/s, return error otherwise.
43 * 43 *
44 * Returns unlocked and up-to-date page (if ok), with increased 44 * Returns locked and up-to-date page (if ok), with increased
45 * refcnt. 45 * refcnt.
46 */ 46 */
47struct page *ecryptfs_get1page(struct file *file, loff_t index) 47struct page *ecryptfs_get_locked_page(struct file *file, loff_t index)
48{ 48{
49 struct dentry *dentry; 49 struct dentry *dentry;
50 struct inode *inode; 50 struct inode *inode;
51 struct address_space *mapping; 51 struct address_space *mapping;
52 struct page *page;
52 53
53 dentry = file->f_path.dentry; 54 dentry = file->f_path.dentry;
54 inode = dentry->d_inode; 55 inode = dentry->d_inode;
55 mapping = inode->i_mapping; 56 mapping = inode->i_mapping;
56 return read_mapping_page(mapping, index, (void *)file); 57 page = read_mapping_page(mapping, index, (void *)file);
58 if (!IS_ERR(page))
59 lock_page(page);
60 return page;
57} 61}
58 62
59/** 63/**
@@ -146,12 +150,10 @@ ecryptfs_copy_up_encrypted_with_header(struct page *page,
146 kunmap_atomic(page_virt, KM_USER0); 150 kunmap_atomic(page_virt, KM_USER0);
147 flush_dcache_page(page); 151 flush_dcache_page(page);
148 if (rc) { 152 if (rc) {
149 ClearPageUptodate(page);
150 printk(KERN_ERR "%s: Error reading xattr " 153 printk(KERN_ERR "%s: Error reading xattr "
151 "region; rc = [%d]\n", __FUNCTION__, rc); 154 "region; rc = [%d]\n", __FUNCTION__, rc);
152 goto out; 155 goto out;
153 } 156 }
154 SetPageUptodate(page);
155 } else { 157 } else {
156 /* This is an encrypted data extent */ 158 /* This is an encrypted data extent */
157 loff_t lower_offset = 159 loff_t lower_offset =
@@ -232,6 +234,10 @@ static int ecryptfs_readpage(struct file *file, struct page *page)
232 } 234 }
233 } 235 }
234out: 236out:
237 if (rc)
238 ClearPageUptodate(page);
239 else
240 SetPageUptodate(page);
235 ecryptfs_printk(KERN_DEBUG, "Unlocking page with index = [0x%.16x]\n", 241 ecryptfs_printk(KERN_DEBUG, "Unlocking page with index = [0x%.16x]\n",
236 page->index); 242 page->index);
237 unlock_page(page); 243 unlock_page(page);
@@ -265,10 +271,18 @@ static int ecryptfs_prepare_write(struct file *file, struct page *page,
265 if (from == 0 && to == PAGE_CACHE_SIZE) 271 if (from == 0 && to == PAGE_CACHE_SIZE)
266 goto out; /* If we are writing a full page, it will be 272 goto out; /* If we are writing a full page, it will be
267 up to date. */ 273 up to date. */
268 if (!PageUptodate(page)) 274 if (!PageUptodate(page)) {
269 rc = ecryptfs_read_lower_page_segment(page, page->index, 0, 275 rc = ecryptfs_read_lower_page_segment(page, page->index, 0,
270 PAGE_CACHE_SIZE, 276 PAGE_CACHE_SIZE,
271 page->mapping->host); 277 page->mapping->host);
278 if (rc) {
279 printk(KERN_ERR "%s: Error attemping to read lower "
280 "page segment; rc = [%d]\n", __FUNCTION__, rc);
281 ClearPageUptodate(page);
282 goto out;
283 } else
284 SetPageUptodate(page);
285 }
272 if (page->index != 0) { 286 if (page->index != 0) {
273 loff_t end_of_prev_pg_pos = 287 loff_t end_of_prev_pg_pos =
274 (((loff_t)page->index << PAGE_CACHE_SHIFT) - 1); 288 (((loff_t)page->index << PAGE_CACHE_SHIFT) - 1);