diff options
author | David Howells <dhowells@redhat.com> | 2009-11-19 13:11:52 -0500 |
---|---|---|
committer | David Howells <dhowells@redhat.com> | 2009-11-19 13:11:52 -0500 |
commit | a17754fb8c28af19cd70dcbec6d5b0773b94e0c1 (patch) | |
tree | d7c25b217c684153eadbac78ab9b1bbff08b75f6 /fs | |
parent | 868411be3f445a83fafbd734f3e426400138add5 (diff) |
CacheFiles: Don't write a full page if there's only a partial page to cache
cachefiles_write_page() writes a full page to the backing file for the last
page of the netfs file, even if the netfs file's last page is only a partial
page.
This causes the EOF on the backing file to be extended beyond the EOF of the
netfs, and thus the backing file will be truncated by cachefiles_attr_changed()
called from cachefiles_lookup_object().
So we need to limit the write we make to the backing file on that last page
such that it doesn't push the EOF too far.
Also, if a backing file that has a partial page at the end is expanded, we
discard the partial page and refetch it on the basis that we then have a hole
in the file with invalid data, and should the power go out... A better way to
deal with this could be to record a note that the partial page contains invalid
data until the correct data is written into it.
This isn't a problem for netfs's that discard the whole backing file if the
file size changes (such as NFS).
Signed-off-by: David Howells <dhowells@redhat.com>
Diffstat (limited to 'fs')
-rw-r--r-- | fs/cachefiles/interface.c | 20 | ||||
-rw-r--r-- | fs/cachefiles/rdwr.c | 23 |
2 files changed, 36 insertions, 7 deletions
diff --git a/fs/cachefiles/interface.c b/fs/cachefiles/interface.c index dd7f852746cb..8e67abf05985 100644 --- a/fs/cachefiles/interface.c +++ b/fs/cachefiles/interface.c | |||
@@ -404,12 +404,26 @@ static int cachefiles_attr_changed(struct fscache_object *_object) | |||
404 | if (oi_size == ni_size) | 404 | if (oi_size == ni_size) |
405 | return 0; | 405 | return 0; |
406 | 406 | ||
407 | newattrs.ia_size = ni_size; | ||
408 | newattrs.ia_valid = ATTR_SIZE; | ||
409 | |||
410 | cachefiles_begin_secure(cache, &saved_cred); | 407 | cachefiles_begin_secure(cache, &saved_cred); |
411 | mutex_lock(&object->backer->d_inode->i_mutex); | 408 | mutex_lock(&object->backer->d_inode->i_mutex); |
409 | |||
410 | /* if there's an extension to a partial page at the end of the backing | ||
411 | * file, we need to discard the partial page so that we pick up new | ||
412 | * data after it */ | ||
413 | if (oi_size & ~PAGE_MASK && ni_size > oi_size) { | ||
414 | _debug("discard tail %llx", oi_size); | ||
415 | newattrs.ia_valid = ATTR_SIZE; | ||
416 | newattrs.ia_size = oi_size & PAGE_MASK; | ||
417 | ret = notify_change(object->backer, &newattrs); | ||
418 | if (ret < 0) | ||
419 | goto truncate_failed; | ||
420 | } | ||
421 | |||
422 | newattrs.ia_valid = ATTR_SIZE; | ||
423 | newattrs.ia_size = ni_size; | ||
412 | ret = notify_change(object->backer, &newattrs); | 424 | ret = notify_change(object->backer, &newattrs); |
425 | |||
426 | truncate_failed: | ||
413 | mutex_unlock(&object->backer->d_inode->i_mutex); | 427 | mutex_unlock(&object->backer->d_inode->i_mutex); |
414 | cachefiles_end_secure(cache, saved_cred); | 428 | cachefiles_end_secure(cache, saved_cred); |
415 | 429 | ||
diff --git a/fs/cachefiles/rdwr.c b/fs/cachefiles/rdwr.c index 3304646dae84..5a84fd7109ad 100644 --- a/fs/cachefiles/rdwr.c +++ b/fs/cachefiles/rdwr.c | |||
@@ -803,7 +803,8 @@ int cachefiles_write_page(struct fscache_storage *op, struct page *page) | |||
803 | struct cachefiles_cache *cache; | 803 | struct cachefiles_cache *cache; |
804 | mm_segment_t old_fs; | 804 | mm_segment_t old_fs; |
805 | struct file *file; | 805 | struct file *file; |
806 | loff_t pos; | 806 | loff_t pos, eof; |
807 | size_t len; | ||
807 | void *data; | 808 | void *data; |
808 | int ret; | 809 | int ret; |
809 | 810 | ||
@@ -837,15 +838,29 @@ int cachefiles_write_page(struct fscache_storage *op, struct page *page) | |||
837 | ret = -EIO; | 838 | ret = -EIO; |
838 | if (file->f_op->write) { | 839 | if (file->f_op->write) { |
839 | pos = (loff_t) page->index << PAGE_SHIFT; | 840 | pos = (loff_t) page->index << PAGE_SHIFT; |
841 | |||
842 | /* we mustn't write more data than we have, so we have | ||
843 | * to beware of a partial page at EOF */ | ||
844 | eof = object->fscache.store_limit_l; | ||
845 | len = PAGE_SIZE; | ||
846 | if (eof & ~PAGE_MASK) { | ||
847 | ASSERTCMP(pos, <, eof); | ||
848 | if (eof - pos < PAGE_SIZE) { | ||
849 | _debug("cut short %llx to %llx", | ||
850 | pos, eof); | ||
851 | len = eof - pos; | ||
852 | ASSERTCMP(pos + len, ==, eof); | ||
853 | } | ||
854 | } | ||
855 | |||
840 | data = kmap(page); | 856 | data = kmap(page); |
841 | old_fs = get_fs(); | 857 | old_fs = get_fs(); |
842 | set_fs(KERNEL_DS); | 858 | set_fs(KERNEL_DS); |
843 | ret = file->f_op->write( | 859 | ret = file->f_op->write( |
844 | file, (const void __user *) data, PAGE_SIZE, | 860 | file, (const void __user *) data, len, &pos); |
845 | &pos); | ||
846 | set_fs(old_fs); | 861 | set_fs(old_fs); |
847 | kunmap(page); | 862 | kunmap(page); |
848 | if (ret != PAGE_SIZE) | 863 | if (ret != len) |
849 | ret = -EIO; | 864 | ret = -EIO; |
850 | } | 865 | } |
851 | fput(file); | 866 | fput(file); |