aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorDan Magenheimer <dan.magenheimer@oracle.com>2011-09-21 11:56:28 -0400
committerKonrad Rzeszutek Wilk <konrad.wilk@oracle.com>2012-01-23 16:06:24 -0500
commit3167760f83899ccda312b9ad9306ec9e5dda06d4 (patch)
treea040add1a9b267cb0fa92847c88526c1329a258b /mm
parent972b2c719990f91eb3b2310d44ef8a2d38955a14 (diff)
mm: cleancache: s/flush/invalidate/
Per akpm suggestions alter the use of the term flush to be invalidate. The next patch will do this across all MM. This change is completely cosmetic. [v9: akpm@linux-foundation.org: change "flush" to "invalidate", part 3] Signed-off-by: Dan Magenheimer <dan.magenheimer@oracle.com> Cc: Kamezawa Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Cc: Jan Beulich <JBeulich@novell.com> Reviewed-by: Seth Jennings <sjenning@linux.vnet.ibm.com> Cc: Jeremy Fitzhardinge <jeremy@goop.org> Cc: Hugh Dickins <hughd@google.com> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: Nitin Gupta <ngupta@vflare.org> Cc: Matthew Wilcox <matthew@wil.cx> Cc: Chris Mason <chris.mason@oracle.com> Cc: Rik Riel <riel@redhat.com> Cc: Andrew Morton <akpm@linux-foundation.org> [v10: Fixed fs: move code out of buffer.c conflict change] Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Diffstat (limited to 'mm')
-rw-r--r--mm/cleancache.c19
-rw-r--r--mm/filemap.c2
-rw-r--r--mm/truncate.c10
3 files changed, 16 insertions, 15 deletions
diff --git a/mm/cleancache.c b/mm/cleancache.c
index bcaae4c2a770..237c6e0feea0 100644
--- a/mm/cleancache.c
+++ b/mm/cleancache.c
@@ -19,7 +19,7 @@
19 19
20/* 20/*
21 * This global enablement flag may be read thousands of times per second 21 * This global enablement flag may be read thousands of times per second
22 * by cleancache_get/put/flush even on systems where cleancache_ops 22 * by cleancache_get/put/invalidate even on systems where cleancache_ops
23 * is not claimed (e.g. cleancache is config'ed on but remains 23 * is not claimed (e.g. cleancache is config'ed on but remains
24 * disabled), so is preferred to the slower alternative: a function 24 * disabled), so is preferred to the slower alternative: a function
25 * call that checks a non-global. 25 * call that checks a non-global.
@@ -148,10 +148,11 @@ void __cleancache_put_page(struct page *page)
148EXPORT_SYMBOL(__cleancache_put_page); 148EXPORT_SYMBOL(__cleancache_put_page);
149 149
150/* 150/*
151 * Flush any data from cleancache associated with the poolid and the 151 * Invalidate any data from cleancache associated with the poolid and the
152 * page's inode and page index so that a subsequent "get" will fail. 152 * page's inode and page index so that a subsequent "get" will fail.
153 */ 153 */
154void __cleancache_flush_page(struct address_space *mapping, struct page *page) 154void __cleancache_invalidate_page(struct address_space *mapping,
155 struct page *page)
155{ 156{
156 /* careful... page->mapping is NULL sometimes when this is called */ 157 /* careful... page->mapping is NULL sometimes when this is called */
157 int pool_id = mapping->host->i_sb->cleancache_poolid; 158 int pool_id = mapping->host->i_sb->cleancache_poolid;
@@ -165,14 +166,14 @@ void __cleancache_flush_page(struct address_space *mapping, struct page *page)
165 } 166 }
166 } 167 }
167} 168}
168EXPORT_SYMBOL(__cleancache_flush_page); 169EXPORT_SYMBOL(__cleancache_invalidate_page);
169 170
170/* 171/*
171 * Flush all data from cleancache associated with the poolid and the 172 * Invalidate all data from cleancache associated with the poolid and the
172 * mappings's inode so that all subsequent gets to this poolid/inode 173 * mappings's inode so that all subsequent gets to this poolid/inode
173 * will fail. 174 * will fail.
174 */ 175 */
175void __cleancache_flush_inode(struct address_space *mapping) 176void __cleancache_invalidate_inode(struct address_space *mapping)
176{ 177{
177 int pool_id = mapping->host->i_sb->cleancache_poolid; 178 int pool_id = mapping->host->i_sb->cleancache_poolid;
178 struct cleancache_filekey key = { .u.key = { 0 } }; 179 struct cleancache_filekey key = { .u.key = { 0 } };
@@ -180,14 +181,14 @@ void __cleancache_flush_inode(struct address_space *mapping)
180 if (pool_id >= 0 && cleancache_get_key(mapping->host, &key) >= 0) 181 if (pool_id >= 0 && cleancache_get_key(mapping->host, &key) >= 0)
181 (*cleancache_ops.flush_inode)(pool_id, key); 182 (*cleancache_ops.flush_inode)(pool_id, key);
182} 183}
183EXPORT_SYMBOL(__cleancache_flush_inode); 184EXPORT_SYMBOL(__cleancache_invalidate_inode);
184 185
185/* 186/*
186 * Called by any cleancache-enabled filesystem at time of unmount; 187 * Called by any cleancache-enabled filesystem at time of unmount;
187 * note that pool_id is surrendered and may be reutrned by a subsequent 188 * note that pool_id is surrendered and may be reutrned by a subsequent
188 * cleancache_init_fs or cleancache_init_shared_fs 189 * cleancache_init_fs or cleancache_init_shared_fs
189 */ 190 */
190void __cleancache_flush_fs(struct super_block *sb) 191void __cleancache_invalidate_fs(struct super_block *sb)
191{ 192{
192 if (sb->cleancache_poolid >= 0) { 193 if (sb->cleancache_poolid >= 0) {
193 int old_poolid = sb->cleancache_poolid; 194 int old_poolid = sb->cleancache_poolid;
@@ -195,7 +196,7 @@ void __cleancache_flush_fs(struct super_block *sb)
195 (*cleancache_ops.flush_fs)(old_poolid); 196 (*cleancache_ops.flush_fs)(old_poolid);
196 } 197 }
197} 198}
198EXPORT_SYMBOL(__cleancache_flush_fs); 199EXPORT_SYMBOL(__cleancache_invalidate_fs);
199 200
200#ifdef CONFIG_SYSFS 201#ifdef CONFIG_SYSFS
201 202
diff --git a/mm/filemap.c b/mm/filemap.c
index a0701e6eec10..0aa3faa48219 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -123,7 +123,7 @@ void __delete_from_page_cache(struct page *page)
123 if (PageUptodate(page) && PageMappedToDisk(page)) 123 if (PageUptodate(page) && PageMappedToDisk(page))
124 cleancache_put_page(page); 124 cleancache_put_page(page);
125 else 125 else
126 cleancache_flush_page(mapping, page); 126 cleancache_invalidate_page(mapping, page);
127 127
128 radix_tree_delete(&mapping->page_tree, page->index); 128 radix_tree_delete(&mapping->page_tree, page->index);
129 page->mapping = NULL; 129 page->mapping = NULL;
diff --git a/mm/truncate.c b/mm/truncate.c
index 632b15e29f74..b4d575c9a0ee 100644
--- a/mm/truncate.c
+++ b/mm/truncate.c
@@ -52,7 +52,7 @@ void do_invalidatepage(struct page *page, unsigned long offset)
52static inline void truncate_partial_page(struct page *page, unsigned partial) 52static inline void truncate_partial_page(struct page *page, unsigned partial)
53{ 53{
54 zero_user_segment(page, partial, PAGE_CACHE_SIZE); 54 zero_user_segment(page, partial, PAGE_CACHE_SIZE);
55 cleancache_flush_page(page->mapping, page); 55 cleancache_invalidate_page(page->mapping, page);
56 if (page_has_private(page)) 56 if (page_has_private(page))
57 do_invalidatepage(page, partial); 57 do_invalidatepage(page, partial);
58} 58}
@@ -213,7 +213,7 @@ void truncate_inode_pages_range(struct address_space *mapping,
213 pgoff_t end; 213 pgoff_t end;
214 int i; 214 int i;
215 215
216 cleancache_flush_inode(mapping); 216 cleancache_invalidate_inode(mapping);
217 if (mapping->nrpages == 0) 217 if (mapping->nrpages == 0)
218 return; 218 return;
219 219
@@ -292,7 +292,7 @@ void truncate_inode_pages_range(struct address_space *mapping,
292 mem_cgroup_uncharge_end(); 292 mem_cgroup_uncharge_end();
293 index++; 293 index++;
294 } 294 }
295 cleancache_flush_inode(mapping); 295 cleancache_invalidate_inode(mapping);
296} 296}
297EXPORT_SYMBOL(truncate_inode_pages_range); 297EXPORT_SYMBOL(truncate_inode_pages_range);
298 298
@@ -444,7 +444,7 @@ int invalidate_inode_pages2_range(struct address_space *mapping,
444 int ret2 = 0; 444 int ret2 = 0;
445 int did_range_unmap = 0; 445 int did_range_unmap = 0;
446 446
447 cleancache_flush_inode(mapping); 447 cleancache_invalidate_inode(mapping);
448 pagevec_init(&pvec, 0); 448 pagevec_init(&pvec, 0);
449 index = start; 449 index = start;
450 while (index <= end && pagevec_lookup(&pvec, mapping, index, 450 while (index <= end && pagevec_lookup(&pvec, mapping, index,
@@ -500,7 +500,7 @@ int invalidate_inode_pages2_range(struct address_space *mapping,
500 cond_resched(); 500 cond_resched();
501 index++; 501 index++;
502 } 502 }
503 cleancache_flush_inode(mapping); 503 cleancache_invalidate_inode(mapping);
504 return ret; 504 return ret;
505} 505}
506EXPORT_SYMBOL_GPL(invalidate_inode_pages2_range); 506EXPORT_SYMBOL_GPL(invalidate_inode_pages2_range);