summaryrefslogtreecommitdiffstats
path: root/mm/cleancache.c
diff options
context:
space:
mode:
authorVladimir Davydov <vdavydov@parallels.com>2015-04-14 18:46:45 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2015-04-14 19:49:03 -0400
commit53d85c98566535d7bc69470f008d7dcb09a0fec9 (patch)
tree2689f2205c941c8078a6b1be85389fe1f5b059e6 /mm/cleancache.c
parent9de1626290eaa7d921413ddc83544bc3bae27283 (diff)
cleancache: forbid overriding cleancache_ops
Currently, cleancache_register_ops returns the previous value of cleancache_ops to allow chaining. However, chaining, as it is implemented now, is extremely dangerous due to possible pool id collisions. Suppose, a new cleancache driver is registered after the previous one assigned an id to a super block. If the new driver assigns the same id to another super block, which is perfectly possible, we will have two different filesystems using the same id. No matter if the new driver implements chaining or not, we are likely to get data corruption with such a configuration eventually. This patch therefore disables the ability to override cleancache_ops altogether as potentially dangerous. If there is already cleancache driver registered, all further calls to cleancache_register_ops will return EBUSY. Since no user of cleancache implements chaining, we only need to make minor changes to the code outside the cleancache core. Signed-off-by: Vladimir Davydov <vdavydov@parallels.com> Cc: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> Cc: Boris Ostrovsky <boris.ostrovsky@oracle.com> Cc: David Vrabel <david.vrabel@citrix.com> Cc: Mark Fasheh <mfasheh@suse.com> Cc: Joel Becker <jlbec@evilplan.org> Cc: Stefan Hengelein <ilendir@googlemail.com> Cc: Florian Schmaus <fschmaus@gmail.com> Cc: Andor Daam <andor.daam@googlemail.com> Cc: Dan Magenheimer <dan.magenheimer@oracle.com> Cc: Bob Liu <lliubbo@gmail.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/cleancache.c')
-rw-r--r--mm/cleancache.c12
1 files changed, 7 insertions, 5 deletions
diff --git a/mm/cleancache.c b/mm/cleancache.c
index 532495f2e4f4..aa10f9a3bc88 100644
--- a/mm/cleancache.c
+++ b/mm/cleancache.c
@@ -106,15 +106,17 @@ static DEFINE_MUTEX(poolid_mutex);
106 */ 106 */
107 107
108/* 108/*
109 * Register operations for cleancache, returning previous thus allowing 109 * Register operations for cleancache. Returns 0 on success.
110 * detection of multiple backends and possible nesting.
111 */ 110 */
112struct cleancache_ops *cleancache_register_ops(struct cleancache_ops *ops) 111int cleancache_register_ops(struct cleancache_ops *ops)
113{ 112{
114 struct cleancache_ops *old = cleancache_ops;
115 int i; 113 int i;
116 114
117 mutex_lock(&poolid_mutex); 115 mutex_lock(&poolid_mutex);
116 if (cleancache_ops) {
117 mutex_unlock(&poolid_mutex);
118 return -EBUSY;
119 }
118 for (i = 0; i < MAX_INITIALIZABLE_FS; i++) { 120 for (i = 0; i < MAX_INITIALIZABLE_FS; i++) {
119 if (fs_poolid_map[i] == FS_NO_BACKEND) 121 if (fs_poolid_map[i] == FS_NO_BACKEND)
120 fs_poolid_map[i] = ops->init_fs(PAGE_SIZE); 122 fs_poolid_map[i] = ops->init_fs(PAGE_SIZE);
@@ -130,7 +132,7 @@ struct cleancache_ops *cleancache_register_ops(struct cleancache_ops *ops)
130 barrier(); 132 barrier();
131 cleancache_ops = ops; 133 cleancache_ops = ops;
132 mutex_unlock(&poolid_mutex); 134 mutex_unlock(&poolid_mutex);
133 return old; 135 return 0;
134} 136}
135EXPORT_SYMBOL(cleancache_register_ops); 137EXPORT_SYMBOL(cleancache_register_ops);
136 138