diff options
author | Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> | 2013-04-30 18:26:57 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2013-04-30 20:04:01 -0400 |
commit | 833f8662af9659508afc3cb80f09138eade378e2 (patch) | |
tree | fffd8a1328c55a6ec8ac1159d1258e28d8ca7cec /mm | |
parent | 49a9ab815acb8379a2f5fd43abe40038821e8f87 (diff) |
cleancache: Make cleancache_init use a pointer for the ops
Instead of using a backend_registered to determine whether a backend is
enabled. This allows us to remove the backend_register check and just
do 'if (cleancache_ops)'
[v1: Rebase on top of b97c4b430b0a (ramster->zcache move]
Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Signed-off-by: Bob Liu <lliubbo@gmail.com>
Cc: Wanpeng Li <liwanp@linux.vnet.ibm.com>
Cc: Andor Daam <andor.daam@googlemail.com>
Cc: Dan Magenheimer <dan.magenheimer@oracle.com>
Cc: Florian Schmaus <fschmaus@gmail.com>
Cc: Minchan Kim <minchan@kernel.org>
Cc: Stefan Hengelein <ilendir@googlemail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r-- | mm/cleancache.c | 62 |
1 files changed, 32 insertions, 30 deletions
diff --git a/mm/cleancache.c b/mm/cleancache.c index 0cecdbba4bcd..b3ae19b72035 100644 --- a/mm/cleancache.c +++ b/mm/cleancache.c | |||
@@ -32,7 +32,7 @@ EXPORT_SYMBOL(cleancache_enabled); | |||
32 | * cleancache_ops is set by cleancache_ops_register to contain the pointers | 32 | * cleancache_ops is set by cleancache_ops_register to contain the pointers |
33 | * to the cleancache "backend" implementation functions. | 33 | * to the cleancache "backend" implementation functions. |
34 | */ | 34 | */ |
35 | static struct cleancache_ops cleancache_ops __read_mostly; | 35 | static struct cleancache_ops *cleancache_ops __read_mostly; |
36 | 36 | ||
37 | /* | 37 | /* |
38 | * Counters available via /sys/kernel/debug/frontswap (if debugfs is | 38 | * Counters available via /sys/kernel/debug/frontswap (if debugfs is |
@@ -72,15 +72,14 @@ static DEFINE_MUTEX(poolid_mutex); | |||
72 | /* | 72 | /* |
73 | * When set to false (default) all calls to the cleancache functions, except | 73 | * When set to false (default) all calls to the cleancache functions, except |
74 | * the __cleancache_invalidate_fs and __cleancache_init_[shared|]fs are guarded | 74 | * the __cleancache_invalidate_fs and __cleancache_init_[shared|]fs are guarded |
75 | * by the if (!backend_registered) return. This means multiple threads (from | 75 | * by the if (!cleancache_ops) return. This means multiple threads (from |
76 | * different filesystems) will be checking backend_registered. The usage of a | 76 | * different filesystems) will be checking cleancache_ops. The usage of a |
77 | * bool instead of a atomic_t or a bool guarded by a spinlock is OK - we are | 77 | * bool instead of a atomic_t or a bool guarded by a spinlock is OK - we are |
78 | * OK if the time between the backend's have been initialized (and | 78 | * OK if the time between the backend's have been initialized (and |
79 | * backend_registered has been set to true) and when the filesystems start | 79 | * cleancache_ops has been set to not NULL) and when the filesystems start |
80 | * actually calling the backends. The inverse (when unloading) is obviously | 80 | * actually calling the backends. The inverse (when unloading) is obviously |
81 | * not good - but this shim does not do that (yet). | 81 | * not good - but this shim does not do that (yet). |
82 | */ | 82 | */ |
83 | static bool backend_registered __read_mostly; | ||
84 | 83 | ||
85 | /* | 84 | /* |
86 | * The backends and filesystems work all asynchronously. This is b/c the | 85 | * The backends and filesystems work all asynchronously. This is b/c the |
@@ -90,13 +89,13 @@ static bool backend_registered __read_mostly; | |||
90 | * [shared_|]fs_poolid_map and uuids for. | 89 | * [shared_|]fs_poolid_map and uuids for. |
91 | * | 90 | * |
92 | * b). user does I/Os -> we call the rest of __cleancache_* functions | 91 | * b). user does I/Os -> we call the rest of __cleancache_* functions |
93 | * which return immediately as backend_registered is false. | 92 | * which return immediately as cleancache_ops is false. |
94 | * | 93 | * |
95 | * c). modprobe zcache -> cleancache_register_ops. We init the backend | 94 | * c). modprobe zcache -> cleancache_register_ops. We init the backend |
96 | * and set backend_registered to true, and for any fs_poolid_map | 95 | * and set cleancache_ops to true, and for any fs_poolid_map |
97 | * (which is set by __cleancache_init_fs) we initialize the poolid. | 96 | * (which is set by __cleancache_init_fs) we initialize the poolid. |
98 | * | 97 | * |
99 | * d). user does I/Os -> now that backend_registered is true all the | 98 | * d). user does I/Os -> now that cleancache_ops is true all the |
100 | * __cleancache_* functions can call the backend. They all check | 99 | * __cleancache_* functions can call the backend. They all check |
101 | * that fs_poolid_map is valid and if so invoke the backend. | 100 | * that fs_poolid_map is valid and if so invoke the backend. |
102 | * | 101 | * |
@@ -120,23 +119,26 @@ static bool backend_registered __read_mostly; | |||
120 | * Register operations for cleancache, returning previous thus allowing | 119 | * Register operations for cleancache, returning previous thus allowing |
121 | * detection of multiple backends and possible nesting. | 120 | * detection of multiple backends and possible nesting. |
122 | */ | 121 | */ |
123 | struct cleancache_ops cleancache_register_ops(struct cleancache_ops *ops) | 122 | struct cleancache_ops *cleancache_register_ops(struct cleancache_ops *ops) |
124 | { | 123 | { |
125 | struct cleancache_ops old = cleancache_ops; | 124 | struct cleancache_ops *old = cleancache_ops; |
126 | int i; | 125 | int i; |
127 | 126 | ||
128 | mutex_lock(&poolid_mutex); | 127 | mutex_lock(&poolid_mutex); |
129 | cleancache_ops = *ops; | ||
130 | |||
131 | backend_registered = true; | ||
132 | for (i = 0; i < MAX_INITIALIZABLE_FS; i++) { | 128 | for (i = 0; i < MAX_INITIALIZABLE_FS; i++) { |
133 | if (fs_poolid_map[i] == FS_NO_BACKEND) | 129 | if (fs_poolid_map[i] == FS_NO_BACKEND) |
134 | fs_poolid_map[i] = (*cleancache_ops.init_fs)(PAGE_SIZE); | 130 | fs_poolid_map[i] = ops->init_fs(PAGE_SIZE); |
135 | if (shared_fs_poolid_map[i] == FS_NO_BACKEND) | 131 | if (shared_fs_poolid_map[i] == FS_NO_BACKEND) |
136 | shared_fs_poolid_map[i] = (*cleancache_ops.init_shared_fs) | 132 | shared_fs_poolid_map[i] = ops->init_shared_fs |
137 | (uuids[i], PAGE_SIZE); | 133 | (uuids[i], PAGE_SIZE); |
138 | } | 134 | } |
139 | out: | 135 | /* |
136 | * We MUST set cleancache_ops _after_ we have called the backends | ||
137 | * init_fs or init_shared_fs functions. Otherwise the compiler might | ||
138 | * re-order where cleancache_ops is set in this function. | ||
139 | */ | ||
140 | barrier(); | ||
141 | cleancache_ops = ops; | ||
140 | mutex_unlock(&poolid_mutex); | 142 | mutex_unlock(&poolid_mutex); |
141 | return old; | 143 | return old; |
142 | } | 144 | } |
@@ -151,8 +153,8 @@ void __cleancache_init_fs(struct super_block *sb) | |||
151 | for (i = 0; i < MAX_INITIALIZABLE_FS; i++) { | 153 | for (i = 0; i < MAX_INITIALIZABLE_FS; i++) { |
152 | if (fs_poolid_map[i] == FS_UNKNOWN) { | 154 | if (fs_poolid_map[i] == FS_UNKNOWN) { |
153 | sb->cleancache_poolid = i + FAKE_FS_POOLID_OFFSET; | 155 | sb->cleancache_poolid = i + FAKE_FS_POOLID_OFFSET; |
154 | if (backend_registered) | 156 | if (cleancache_ops) |
155 | fs_poolid_map[i] = (*cleancache_ops.init_fs)(PAGE_SIZE); | 157 | fs_poolid_map[i] = cleancache_ops->init_fs(PAGE_SIZE); |
156 | else | 158 | else |
157 | fs_poolid_map[i] = FS_NO_BACKEND; | 159 | fs_poolid_map[i] = FS_NO_BACKEND; |
158 | break; | 160 | break; |
@@ -172,8 +174,8 @@ void __cleancache_init_shared_fs(char *uuid, struct super_block *sb) | |||
172 | if (shared_fs_poolid_map[i] == FS_UNKNOWN) { | 174 | if (shared_fs_poolid_map[i] == FS_UNKNOWN) { |
173 | sb->cleancache_poolid = i + FAKE_SHARED_FS_POOLID_OFFSET; | 175 | sb->cleancache_poolid = i + FAKE_SHARED_FS_POOLID_OFFSET; |
174 | uuids[i] = uuid; | 176 | uuids[i] = uuid; |
175 | if (backend_registered) | 177 | if (cleancache_ops) |
176 | shared_fs_poolid_map[i] = (*cleancache_ops.init_shared_fs) | 178 | shared_fs_poolid_map[i] = cleancache_ops->init_shared_fs |
177 | (uuid, PAGE_SIZE); | 179 | (uuid, PAGE_SIZE); |
178 | else | 180 | else |
179 | shared_fs_poolid_map[i] = FS_NO_BACKEND; | 181 | shared_fs_poolid_map[i] = FS_NO_BACKEND; |
@@ -240,7 +242,7 @@ int __cleancache_get_page(struct page *page) | |||
240 | int fake_pool_id; | 242 | int fake_pool_id; |
241 | struct cleancache_filekey key = { .u.key = { 0 } }; | 243 | struct cleancache_filekey key = { .u.key = { 0 } }; |
242 | 244 | ||
243 | if (!backend_registered) { | 245 | if (!cleancache_ops) { |
244 | cleancache_failed_gets++; | 246 | cleancache_failed_gets++; |
245 | goto out; | 247 | goto out; |
246 | } | 248 | } |
@@ -255,7 +257,7 @@ int __cleancache_get_page(struct page *page) | |||
255 | goto out; | 257 | goto out; |
256 | 258 | ||
257 | if (pool_id >= 0) | 259 | if (pool_id >= 0) |
258 | ret = (*cleancache_ops.get_page)(pool_id, | 260 | ret = cleancache_ops->get_page(pool_id, |
259 | key, page->index, page); | 261 | key, page->index, page); |
260 | if (ret == 0) | 262 | if (ret == 0) |
261 | cleancache_succ_gets++; | 263 | cleancache_succ_gets++; |
@@ -282,7 +284,7 @@ void __cleancache_put_page(struct page *page) | |||
282 | int fake_pool_id; | 284 | int fake_pool_id; |
283 | struct cleancache_filekey key = { .u.key = { 0 } }; | 285 | struct cleancache_filekey key = { .u.key = { 0 } }; |
284 | 286 | ||
285 | if (!backend_registered) { | 287 | if (!cleancache_ops) { |
286 | cleancache_puts++; | 288 | cleancache_puts++; |
287 | return; | 289 | return; |
288 | } | 290 | } |
@@ -296,7 +298,7 @@ void __cleancache_put_page(struct page *page) | |||
296 | 298 | ||
297 | if (pool_id >= 0 && | 299 | if (pool_id >= 0 && |
298 | cleancache_get_key(page->mapping->host, &key) >= 0) { | 300 | cleancache_get_key(page->mapping->host, &key) >= 0) { |
299 | (*cleancache_ops.put_page)(pool_id, key, page->index, page); | 301 | cleancache_ops->put_page(pool_id, key, page->index, page); |
300 | cleancache_puts++; | 302 | cleancache_puts++; |
301 | } | 303 | } |
302 | } | 304 | } |
@@ -318,7 +320,7 @@ void __cleancache_invalidate_page(struct address_space *mapping, | |||
318 | int fake_pool_id = mapping->host->i_sb->cleancache_poolid; | 320 | int fake_pool_id = mapping->host->i_sb->cleancache_poolid; |
319 | struct cleancache_filekey key = { .u.key = { 0 } }; | 321 | struct cleancache_filekey key = { .u.key = { 0 } }; |
320 | 322 | ||
321 | if (!backend_registered) | 323 | if (!cleancache_ops) |
322 | return; | 324 | return; |
323 | 325 | ||
324 | if (fake_pool_id >= 0) { | 326 | if (fake_pool_id >= 0) { |
@@ -328,7 +330,7 @@ void __cleancache_invalidate_page(struct address_space *mapping, | |||
328 | 330 | ||
329 | VM_BUG_ON(!PageLocked(page)); | 331 | VM_BUG_ON(!PageLocked(page)); |
330 | if (cleancache_get_key(mapping->host, &key) >= 0) { | 332 | if (cleancache_get_key(mapping->host, &key) >= 0) { |
331 | (*cleancache_ops.invalidate_page)(pool_id, | 333 | cleancache_ops->invalidate_page(pool_id, |
332 | key, page->index); | 334 | key, page->index); |
333 | cleancache_invalidates++; | 335 | cleancache_invalidates++; |
334 | } | 336 | } |
@@ -351,7 +353,7 @@ void __cleancache_invalidate_inode(struct address_space *mapping) | |||
351 | int fake_pool_id = mapping->host->i_sb->cleancache_poolid; | 353 | int fake_pool_id = mapping->host->i_sb->cleancache_poolid; |
352 | struct cleancache_filekey key = { .u.key = { 0 } }; | 354 | struct cleancache_filekey key = { .u.key = { 0 } }; |
353 | 355 | ||
354 | if (!backend_registered) | 356 | if (!cleancache_ops) |
355 | return; | 357 | return; |
356 | 358 | ||
357 | if (fake_pool_id < 0) | 359 | if (fake_pool_id < 0) |
@@ -360,7 +362,7 @@ void __cleancache_invalidate_inode(struct address_space *mapping) | |||
360 | pool_id = get_poolid_from_fake(fake_pool_id); | 362 | pool_id = get_poolid_from_fake(fake_pool_id); |
361 | 363 | ||
362 | if (pool_id >= 0 && cleancache_get_key(mapping->host, &key) >= 0) | 364 | if (pool_id >= 0 && cleancache_get_key(mapping->host, &key) >= 0) |
363 | (*cleancache_ops.invalidate_inode)(pool_id, key); | 365 | cleancache_ops->invalidate_inode(pool_id, key); |
364 | } | 366 | } |
365 | EXPORT_SYMBOL(__cleancache_invalidate_inode); | 367 | EXPORT_SYMBOL(__cleancache_invalidate_inode); |
366 | 368 | ||
@@ -387,8 +389,8 @@ void __cleancache_invalidate_fs(struct super_block *sb) | |||
387 | fs_poolid_map[index] = FS_UNKNOWN; | 389 | fs_poolid_map[index] = FS_UNKNOWN; |
388 | } | 390 | } |
389 | sb->cleancache_poolid = -1; | 391 | sb->cleancache_poolid = -1; |
390 | if (backend_registered) | 392 | if (cleancache_ops) |
391 | (*cleancache_ops.invalidate_fs)(old_poolid); | 393 | cleancache_ops->invalidate_fs(old_poolid); |
392 | mutex_unlock(&poolid_mutex); | 394 | mutex_unlock(&poolid_mutex); |
393 | } | 395 | } |
394 | EXPORT_SYMBOL(__cleancache_invalidate_fs); | 396 | EXPORT_SYMBOL(__cleancache_invalidate_fs); |