aboutsummaryrefslogtreecommitdiffstats
path: root/fs/locks.c
diff options
context:
space:
mode:
authorMiklos Szeredi <miklos@szeredi.hu>2011-07-07 07:06:09 -0400
committerJ. Bruce Fields <bfields@redhat.com>2011-07-15 19:00:39 -0400
commitee19cc406d4c0ae3118f59e000984d935b372871 (patch)
treecb6e5021c3cabaf3312a1125c4448fc85ac46f2d /fs/locks.c
parentae82a8d06fee573def55b63868b1ea109461a531 (diff)
fs: locks: remove init_once
From: Miklos Szeredi <mszeredi@suse.cz> Remove SLAB initialization entirely, as suggested by Bruce and Linus. Allocate with __GFP_ZERO instead and only initialize list heads. Signed-off-by: Miklos Szeredi <mszeredi@suse.cz> Signed-off-by: J. Bruce Fields <bfields@redhat.com>
Diffstat (limited to 'fs/locks.c')
-rw-r--r--fs/locks.c41
1 files changed, 10 insertions, 31 deletions
diff --git a/fs/locks.c b/fs/locks.c
index b286539d547a..7ddab3f9df07 100644
--- a/fs/locks.c
+++ b/fs/locks.c
@@ -160,26 +160,20 @@ EXPORT_SYMBOL_GPL(unlock_flocks);
160 160
161static struct kmem_cache *filelock_cache __read_mostly; 161static struct kmem_cache *filelock_cache __read_mostly;
162 162
163static void locks_init_lock_always(struct file_lock *fl) 163static void locks_init_lock_heads(struct file_lock *fl)
164{ 164{
165 fl->fl_next = NULL; 165 INIT_LIST_HEAD(&fl->fl_link);
166 fl->fl_fasync = NULL; 166 INIT_LIST_HEAD(&fl->fl_block);
167 fl->fl_owner = NULL; 167 init_waitqueue_head(&fl->fl_wait);
168 fl->fl_pid = 0;
169 fl->fl_nspid = NULL;
170 fl->fl_file = NULL;
171 fl->fl_flags = 0;
172 fl->fl_type = 0;
173 fl->fl_start = fl->fl_end = 0;
174} 168}
175 169
176/* Allocate an empty lock structure. */ 170/* Allocate an empty lock structure. */
177struct file_lock *locks_alloc_lock(void) 171struct file_lock *locks_alloc_lock(void)
178{ 172{
179 struct file_lock *fl = kmem_cache_alloc(filelock_cache, GFP_KERNEL); 173 struct file_lock *fl = kmem_cache_zalloc(filelock_cache, GFP_KERNEL);
180 174
181 if (fl) 175 if (fl)
182 locks_init_lock_always(fl); 176 locks_init_lock_heads(fl);
183 177
184 return fl; 178 return fl;
185} 179}
@@ -215,27 +209,12 @@ EXPORT_SYMBOL(locks_free_lock);
215 209
216void locks_init_lock(struct file_lock *fl) 210void locks_init_lock(struct file_lock *fl)
217{ 211{
218 INIT_LIST_HEAD(&fl->fl_link); 212 memset(fl, 0, sizeof(struct file_lock));
219 INIT_LIST_HEAD(&fl->fl_block); 213 locks_init_lock_heads(fl);
220 init_waitqueue_head(&fl->fl_wait);
221 fl->fl_ops = NULL;
222 fl->fl_lmops = NULL;
223 locks_init_lock_always(fl);
224} 214}
225 215
226EXPORT_SYMBOL(locks_init_lock); 216EXPORT_SYMBOL(locks_init_lock);
227 217
228/*
229 * Initialises the fields of the file lock which are invariant for
230 * free file_locks.
231 */
232static void init_once(void *foo)
233{
234 struct file_lock *lock = (struct file_lock *) foo;
235
236 locks_init_lock(lock);
237}
238
239static void locks_copy_private(struct file_lock *new, struct file_lock *fl) 218static void locks_copy_private(struct file_lock *new, struct file_lock *fl)
240{ 219{
241 if (fl->fl_ops) { 220 if (fl->fl_ops) {
@@ -2333,8 +2312,8 @@ EXPORT_SYMBOL(lock_may_write);
2333static int __init filelock_init(void) 2312static int __init filelock_init(void)
2334{ 2313{
2335 filelock_cache = kmem_cache_create("file_lock_cache", 2314 filelock_cache = kmem_cache_create("file_lock_cache",
2336 sizeof(struct file_lock), 0, SLAB_PANIC, 2315 sizeof(struct file_lock), 0, SLAB_PANIC, NULL);
2337 init_once); 2316
2338 return 0; 2317 return 0;
2339} 2318}
2340 2319