aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorKonrad Rzeszutek Wilk <konrad.wilk@oracle.com>2012-03-19 12:12:19 -0400
committerKonrad Rzeszutek Wilk <konrad.wilk@oracle.com>2012-03-19 12:12:19 -0400
commit16c0cfa425b8e1488f7a1873bd112a7a099325f0 (patch)
treeeeba5728a8072096279c55ffc10a5ca7ac0ddf05 /mm
parent94574d9a4c236e8bd19721b4adb0ea30ef446901 (diff)
parent072611ed1f291053a74b28b813d683a09495eba7 (diff)
Merge branch 'stable/cleancache.v13' into linux-next
* stable/cleancache.v13: mm: cleancache: Use __read_mostly as appropiate. mm: cleancache: report statistics via debugfs instead of sysfs. mm: zcache/tmem/cleancache: s/flush/invalidate/ mm: cleancache: s/flush/invalidate/
Diffstat (limited to 'mm')
-rw-r--r--mm/cleancache.c98
-rw-r--r--mm/filemap.c2
-rw-r--r--mm/truncate.c10
3 files changed, 44 insertions, 66 deletions
diff --git a/mm/cleancache.c b/mm/cleancache.c
index bcaae4c2a770..5646c740f613 100644
--- a/mm/cleancache.c
+++ b/mm/cleancache.c
@@ -15,29 +15,34 @@
15#include <linux/fs.h> 15#include <linux/fs.h>
16#include <linux/exportfs.h> 16#include <linux/exportfs.h>
17#include <linux/mm.h> 17#include <linux/mm.h>
18#include <linux/debugfs.h>
18#include <linux/cleancache.h> 19#include <linux/cleancache.h>
19 20
20/* 21/*
21 * This global enablement flag may be read thousands of times per second 22 * This global enablement flag may be read thousands of times per second
22 * by cleancache_get/put/flush even on systems where cleancache_ops 23 * by cleancache_get/put/invalidate even on systems where cleancache_ops
23 * is not claimed (e.g. cleancache is config'ed on but remains 24 * is not claimed (e.g. cleancache is config'ed on but remains
24 * disabled), so is preferred to the slower alternative: a function 25 * disabled), so is preferred to the slower alternative: a function
25 * call that checks a non-global. 26 * call that checks a non-global.
26 */ 27 */
27int cleancache_enabled; 28int cleancache_enabled __read_mostly;
28EXPORT_SYMBOL(cleancache_enabled); 29EXPORT_SYMBOL(cleancache_enabled);
29 30
30/* 31/*
31 * cleancache_ops is set by cleancache_ops_register to contain the pointers 32 * cleancache_ops is set by cleancache_ops_register to contain the pointers
32 * to the cleancache "backend" implementation functions. 33 * to the cleancache "backend" implementation functions.
33 */ 34 */
34static struct cleancache_ops cleancache_ops; 35static struct cleancache_ops cleancache_ops __read_mostly;
35 36
36/* useful stats available in /sys/kernel/mm/cleancache */ 37/*
37static unsigned long cleancache_succ_gets; 38 * Counters available via /sys/kernel/debug/frontswap (if debugfs is
38static unsigned long cleancache_failed_gets; 39 * properly configured. These are for information only so are not protected
39static unsigned long cleancache_puts; 40 * against increment races.
40static unsigned long cleancache_flushes; 41 */
42static u64 cleancache_succ_gets;
43static u64 cleancache_failed_gets;
44static u64 cleancache_puts;
45static u64 cleancache_invalidates;
41 46
42/* 47/*
43 * register operations for cleancache, returning previous thus allowing 48 * register operations for cleancache, returning previous thus allowing
@@ -148,10 +153,11 @@ void __cleancache_put_page(struct page *page)
148EXPORT_SYMBOL(__cleancache_put_page); 153EXPORT_SYMBOL(__cleancache_put_page);
149 154
150/* 155/*
151 * Flush any data from cleancache associated with the poolid and the 156 * Invalidate any data from cleancache associated with the poolid and the
152 * page's inode and page index so that a subsequent "get" will fail. 157 * page's inode and page index so that a subsequent "get" will fail.
153 */ 158 */
154void __cleancache_flush_page(struct address_space *mapping, struct page *page) 159void __cleancache_invalidate_page(struct address_space *mapping,
160 struct page *page)
155{ 161{
156 /* careful... page->mapping is NULL sometimes when this is called */ 162 /* careful... page->mapping is NULL sometimes when this is called */
157 int pool_id = mapping->host->i_sb->cleancache_poolid; 163 int pool_id = mapping->host->i_sb->cleancache_poolid;
@@ -160,85 +166,57 @@ void __cleancache_flush_page(struct address_space *mapping, struct page *page)
160 if (pool_id >= 0) { 166 if (pool_id >= 0) {
161 VM_BUG_ON(!PageLocked(page)); 167 VM_BUG_ON(!PageLocked(page));
162 if (cleancache_get_key(mapping->host, &key) >= 0) { 168 if (cleancache_get_key(mapping->host, &key) >= 0) {
163 (*cleancache_ops.flush_page)(pool_id, key, page->index); 169 (*cleancache_ops.invalidate_page)(pool_id,
164 cleancache_flushes++; 170 key, page->index);
171 cleancache_invalidates++;
165 } 172 }
166 } 173 }
167} 174}
168EXPORT_SYMBOL(__cleancache_flush_page); 175EXPORT_SYMBOL(__cleancache_invalidate_page);
169 176
170/* 177/*
171 * Flush all data from cleancache associated with the poolid and the 178 * Invalidate all data from cleancache associated with the poolid and the
172 * mappings's inode so that all subsequent gets to this poolid/inode 179 * mappings's inode so that all subsequent gets to this poolid/inode
173 * will fail. 180 * will fail.
174 */ 181 */
175void __cleancache_flush_inode(struct address_space *mapping) 182void __cleancache_invalidate_inode(struct address_space *mapping)
176{ 183{
177 int pool_id = mapping->host->i_sb->cleancache_poolid; 184 int pool_id = mapping->host->i_sb->cleancache_poolid;
178 struct cleancache_filekey key = { .u.key = { 0 } }; 185 struct cleancache_filekey key = { .u.key = { 0 } };
179 186
180 if (pool_id >= 0 && cleancache_get_key(mapping->host, &key) >= 0) 187 if (pool_id >= 0 && cleancache_get_key(mapping->host, &key) >= 0)
181 (*cleancache_ops.flush_inode)(pool_id, key); 188 (*cleancache_ops.invalidate_inode)(pool_id, key);
182} 189}
183EXPORT_SYMBOL(__cleancache_flush_inode); 190EXPORT_SYMBOL(__cleancache_invalidate_inode);
184 191
185/* 192/*
186 * Called by any cleancache-enabled filesystem at time of unmount; 193 * Called by any cleancache-enabled filesystem at time of unmount;
187 * note that pool_id is surrendered and may be reutrned by a subsequent 194 * note that pool_id is surrendered and may be reutrned by a subsequent
188 * cleancache_init_fs or cleancache_init_shared_fs 195 * cleancache_init_fs or cleancache_init_shared_fs
189 */ 196 */
190void __cleancache_flush_fs(struct super_block *sb) 197void __cleancache_invalidate_fs(struct super_block *sb)
191{ 198{
192 if (sb->cleancache_poolid >= 0) { 199 if (sb->cleancache_poolid >= 0) {
193 int old_poolid = sb->cleancache_poolid; 200 int old_poolid = sb->cleancache_poolid;
194 sb->cleancache_poolid = -1; 201 sb->cleancache_poolid = -1;
195 (*cleancache_ops.flush_fs)(old_poolid); 202 (*cleancache_ops.invalidate_fs)(old_poolid);
196 } 203 }
197} 204}
198EXPORT_SYMBOL(__cleancache_flush_fs); 205EXPORT_SYMBOL(__cleancache_invalidate_fs);
199
200#ifdef CONFIG_SYSFS
201
202/* see Documentation/ABI/xxx/sysfs-kernel-mm-cleancache */
203
204#define CLEANCACHE_SYSFS_RO(_name) \
205 static ssize_t cleancache_##_name##_show(struct kobject *kobj, \
206 struct kobj_attribute *attr, char *buf) \
207 { \
208 return sprintf(buf, "%lu\n", cleancache_##_name); \
209 } \
210 static struct kobj_attribute cleancache_##_name##_attr = { \
211 .attr = { .name = __stringify(_name), .mode = 0444 }, \
212 .show = cleancache_##_name##_show, \
213 }
214
215CLEANCACHE_SYSFS_RO(succ_gets);
216CLEANCACHE_SYSFS_RO(failed_gets);
217CLEANCACHE_SYSFS_RO(puts);
218CLEANCACHE_SYSFS_RO(flushes);
219
220static struct attribute *cleancache_attrs[] = {
221 &cleancache_succ_gets_attr.attr,
222 &cleancache_failed_gets_attr.attr,
223 &cleancache_puts_attr.attr,
224 &cleancache_flushes_attr.attr,
225 NULL,
226};
227
228static struct attribute_group cleancache_attr_group = {
229 .attrs = cleancache_attrs,
230 .name = "cleancache",
231};
232
233#endif /* CONFIG_SYSFS */
234 206
235static int __init init_cleancache(void) 207static int __init init_cleancache(void)
236{ 208{
237#ifdef CONFIG_SYSFS 209#ifdef CONFIG_DEBUG_FS
238 int err; 210 struct dentry *root = debugfs_create_dir("cleancache", NULL);
239 211 if (root == NULL)
240 err = sysfs_create_group(mm_kobj, &cleancache_attr_group); 212 return -ENXIO;
241#endif /* CONFIG_SYSFS */ 213 debugfs_create_u64("succ_gets", S_IRUGO, root, &cleancache_succ_gets);
214 debugfs_create_u64("failed_gets", S_IRUGO,
215 root, &cleancache_failed_gets);
216 debugfs_create_u64("puts", S_IRUGO, root, &cleancache_puts);
217 debugfs_create_u64("invalidates", S_IRUGO,
218 root, &cleancache_invalidates);
219#endif
242 return 0; 220 return 0;
243} 221}
244module_init(init_cleancache) 222module_init(init_cleancache)
diff --git a/mm/filemap.c b/mm/filemap.c
index b66275757c28..8cd7e97eae1f 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -123,7 +123,7 @@ void __delete_from_page_cache(struct page *page)
123 if (PageUptodate(page) && PageMappedToDisk(page)) 123 if (PageUptodate(page) && PageMappedToDisk(page))
124 cleancache_put_page(page); 124 cleancache_put_page(page);
125 else 125 else
126 cleancache_flush_page(mapping, page); 126 cleancache_invalidate_page(mapping, page);
127 127
128 radix_tree_delete(&mapping->page_tree, page->index); 128 radix_tree_delete(&mapping->page_tree, page->index);
129 page->mapping = NULL; 129 page->mapping = NULL;
diff --git a/mm/truncate.c b/mm/truncate.c
index 632b15e29f74..b4d575c9a0ee 100644
--- a/mm/truncate.c
+++ b/mm/truncate.c
@@ -52,7 +52,7 @@ void do_invalidatepage(struct page *page, unsigned long offset)
52static inline void truncate_partial_page(struct page *page, unsigned partial) 52static inline void truncate_partial_page(struct page *page, unsigned partial)
53{ 53{
54 zero_user_segment(page, partial, PAGE_CACHE_SIZE); 54 zero_user_segment(page, partial, PAGE_CACHE_SIZE);
55 cleancache_flush_page(page->mapping, page); 55 cleancache_invalidate_page(page->mapping, page);
56 if (page_has_private(page)) 56 if (page_has_private(page))
57 do_invalidatepage(page, partial); 57 do_invalidatepage(page, partial);
58} 58}
@@ -213,7 +213,7 @@ void truncate_inode_pages_range(struct address_space *mapping,
213 pgoff_t end; 213 pgoff_t end;
214 int i; 214 int i;
215 215
216 cleancache_flush_inode(mapping); 216 cleancache_invalidate_inode(mapping);
217 if (mapping->nrpages == 0) 217 if (mapping->nrpages == 0)
218 return; 218 return;
219 219
@@ -292,7 +292,7 @@ void truncate_inode_pages_range(struct address_space *mapping,
292 mem_cgroup_uncharge_end(); 292 mem_cgroup_uncharge_end();
293 index++; 293 index++;
294 } 294 }
295 cleancache_flush_inode(mapping); 295 cleancache_invalidate_inode(mapping);
296} 296}
297EXPORT_SYMBOL(truncate_inode_pages_range); 297EXPORT_SYMBOL(truncate_inode_pages_range);
298 298
@@ -444,7 +444,7 @@ int invalidate_inode_pages2_range(struct address_space *mapping,
444 int ret2 = 0; 444 int ret2 = 0;
445 int did_range_unmap = 0; 445 int did_range_unmap = 0;
446 446
447 cleancache_flush_inode(mapping); 447 cleancache_invalidate_inode(mapping);
448 pagevec_init(&pvec, 0); 448 pagevec_init(&pvec, 0);
449 index = start; 449 index = start;
450 while (index <= end && pagevec_lookup(&pvec, mapping, index, 450 while (index <= end && pagevec_lookup(&pvec, mapping, index,
@@ -500,7 +500,7 @@ int invalidate_inode_pages2_range(struct address_space *mapping,
500 cond_resched(); 500 cond_resched();
501 index++; 501 index++;
502 } 502 }
503 cleancache_flush_inode(mapping); 503 cleancache_invalidate_inode(mapping);
504 return ret; 504 return ret;
505} 505}
506EXPORT_SYMBOL_GPL(invalidate_inode_pages2_range); 506EXPORT_SYMBOL_GPL(invalidate_inode_pages2_range);