aboutsummaryrefslogtreecommitdiffstats
path: root/fs/locks.c
diff options
context:
space:
mode:
authorJeff Layton <jlayton@primarydata.com>2015-01-16 15:05:54 -0500
committerJeff Layton <jlayton@primarydata.com>2015-01-16 15:05:54 -0500
commit4a075e39c86490cc0f0c10ac6abe3592d1689463 (patch)
tree8da8633f9f717128c02a08ad15b7d9f067091acb /fs/locks.c
parentdd459bb1974c5e9cff3dfbf4f6fdb3e9363ef32e (diff)
locks: add a new struct file_locking_context pointer to struct inode
The current scheme of using the i_flock list is really difficult to manage. There is also a legitimate desire for a per-inode spinlock to manage these lists that isn't the i_lock. Start conversion to a new scheme to eventually replace the old i_flock list with a new "file_lock_context" object. We start by adding a new i_flctx to struct inode. For now, it lives in parallel with i_flock list, but will eventually replace it. The idea is to allocate a structure to sit in that pointer and act as a locus for all things file locking. We allocate a file_lock_context for an inode when the first lock is added to it, and it's only freed when the inode is freed. We use the i_lock to protect the assignment, but afterward it should mostly be accessed locklessly. Signed-off-by: Jeff Layton <jlayton@primarydata.com> Acked-by: Christoph Hellwig <hch@lst.de>
Diffstat (limited to 'fs/locks.c')
-rw-r--r--fs/locks.c44
1 files changed, 44 insertions, 0 deletions
diff --git a/fs/locks.c b/fs/locks.c
index ae1e7cf721d6..526d5fca67c8 100644
--- a/fs/locks.c
+++ b/fs/locks.c
@@ -202,8 +202,49 @@ static DEFINE_HASHTABLE(blocked_hash, BLOCKED_HASH_BITS);
202 */ 202 */
203static DEFINE_SPINLOCK(blocked_lock_lock); 203static DEFINE_SPINLOCK(blocked_lock_lock);
204 204
205static struct kmem_cache *flctx_cache __read_mostly;
205static struct kmem_cache *filelock_cache __read_mostly; 206static struct kmem_cache *filelock_cache __read_mostly;
206 207
208static struct file_lock_context *
209locks_get_lock_context(struct inode *inode)
210{
211 struct file_lock_context *new;
212
213 if (likely(inode->i_flctx))
214 goto out;
215
216 new = kmem_cache_alloc(flctx_cache, GFP_KERNEL);
217 if (!new)
218 goto out;
219
220 INIT_LIST_HEAD(&new->flc_flock);
221
222 /*
223 * Assign the pointer if it's not already assigned. If it is, then
224 * free the context we just allocated.
225 */
226 spin_lock(&inode->i_lock);
227 if (likely(!inode->i_flctx)) {
228 inode->i_flctx = new;
229 new = NULL;
230 }
231 spin_unlock(&inode->i_lock);
232
233 if (new)
234 kmem_cache_free(flctx_cache, new);
235out:
236 return inode->i_flctx;
237}
238
239void
240locks_free_lock_context(struct file_lock_context *ctx)
241{
242 if (ctx) {
243 WARN_ON_ONCE(!list_empty(&ctx->flc_flock));
244 kmem_cache_free(flctx_cache, ctx);
245 }
246}
247
207static void locks_init_lock_heads(struct file_lock *fl) 248static void locks_init_lock_heads(struct file_lock *fl)
208{ 249{
209 INIT_HLIST_NODE(&fl->fl_link); 250 INIT_HLIST_NODE(&fl->fl_link);
@@ -2636,6 +2677,9 @@ static int __init filelock_init(void)
2636{ 2677{
2637 int i; 2678 int i;
2638 2679
2680 flctx_cache = kmem_cache_create("file_lock_ctx",
2681 sizeof(struct file_lock_context), 0, SLAB_PANIC, NULL);
2682
2639 filelock_cache = kmem_cache_create("file_lock_cache", 2683 filelock_cache = kmem_cache_create("file_lock_cache",
2640 sizeof(struct file_lock), 0, SLAB_PANIC, NULL); 2684 sizeof(struct file_lock), 0, SLAB_PANIC, NULL);
2641 2685