aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPeter Zijlstra <peterz@infradead.org>2015-06-22 08:16:34 -0400
committerIngo Molnar <mingo@kernel.org>2016-09-22 09:25:53 -0400
commit7c3f654d8e18942295eeda42f7d75494443980e0 (patch)
treeb8b4cb3acbb418846eef73c9a296b34434d56b1f
parentaba37660738325d48c913f3a952a7116d6e6a74b (diff)
fs/locks: Replace lg_local with a per-cpu spinlock
As Oleg suggested, replace file_lock_list with a structure containing the hlist head and a spinlock. This completely removes the lglock from fs/locks. Suggested-by: Oleg Nesterov <oleg@redhat.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Al Viro <viro@ZenIV.linux.org.uk> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: dave@stgolabs.net Cc: der.herr@hofr.at Cc: paulmck@linux.vnet.ibm.com Cc: riel@redhat.com Cc: tj@kernel.org Cc: linux-kernel@vger.kernel.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
-rw-r--r--fs/Kconfig1
-rw-r--r--fs/locks.c47
2 files changed, 30 insertions, 18 deletions
diff --git a/fs/Kconfig b/fs/Kconfig
index 2bc7ad775842..3ef62bad8f2b 100644
--- a/fs/Kconfig
+++ b/fs/Kconfig
@@ -79,6 +79,7 @@ config EXPORTFS_BLOCK_OPS
79config FILE_LOCKING 79config FILE_LOCKING
80 bool "Enable POSIX file locking API" if EXPERT 80 bool "Enable POSIX file locking API" if EXPERT
81 default y 81 default y
82 select PERCPU_RWSEM
82 help 83 help
83 This option enables standard file locking support, required 84 This option enables standard file locking support, required
84 for filesystems like NFS and for the flock() system 85 for filesystems like NFS and for the flock() system
diff --git a/fs/locks.c b/fs/locks.c
index 8f609ec03364..c33aa77fada2 100644
--- a/fs/locks.c
+++ b/fs/locks.c
@@ -127,7 +127,6 @@
127#include <linux/pid_namespace.h> 127#include <linux/pid_namespace.h>
128#include <linux/hashtable.h> 128#include <linux/hashtable.h>
129#include <linux/percpu.h> 129#include <linux/percpu.h>
130#include <linux/lglock.h>
131 130
132#define CREATE_TRACE_POINTS 131#define CREATE_TRACE_POINTS
133#include <trace/events/filelock.h> 132#include <trace/events/filelock.h>
@@ -158,12 +157,17 @@ int lease_break_time = 45;
158 157
159/* 158/*
160 * The global file_lock_list is only used for displaying /proc/locks, so we 159 * The global file_lock_list is only used for displaying /proc/locks, so we
161 * keep a list on each CPU, with each list protected by its own spinlock via 160 * keep a list on each CPU, with each list protected by its own spinlock.
162 * the file_lock_lglock. Note that alterations to the list also require that 161 * Global serialization is done using file_rwsem.
163 * the relevant flc_lock is held. 162 *
163 * Note that alterations to the list also require that the relevant flc_lock is
164 * held.
164 */ 165 */
165DEFINE_STATIC_LGLOCK(file_lock_lglock); 166struct file_lock_list_struct {
166static DEFINE_PER_CPU(struct hlist_head, file_lock_list); 167 spinlock_t lock;
168 struct hlist_head hlist;
169};
170static DEFINE_PER_CPU(struct file_lock_list_struct, file_lock_list);
167DEFINE_STATIC_PERCPU_RWSEM(file_rwsem); 171DEFINE_STATIC_PERCPU_RWSEM(file_rwsem);
168 172
169/* 173/*
@@ -588,17 +592,21 @@ static int posix_same_owner(struct file_lock *fl1, struct file_lock *fl2)
588/* Must be called with the flc_lock held! */ 592/* Must be called with the flc_lock held! */
589static void locks_insert_global_locks(struct file_lock *fl) 593static void locks_insert_global_locks(struct file_lock *fl)
590{ 594{
595 struct file_lock_list_struct *fll = this_cpu_ptr(&file_lock_list);
596
591 percpu_rwsem_assert_held(&file_rwsem); 597 percpu_rwsem_assert_held(&file_rwsem);
592 598
593 lg_local_lock(&file_lock_lglock); 599 spin_lock(&fll->lock);
594 fl->fl_link_cpu = smp_processor_id(); 600 fl->fl_link_cpu = smp_processor_id();
595 hlist_add_head(&fl->fl_link, this_cpu_ptr(&file_lock_list)); 601 hlist_add_head(&fl->fl_link, &fll->hlist);
596 lg_local_unlock(&file_lock_lglock); 602 spin_unlock(&fll->lock);
597} 603}
598 604
599/* Must be called with the flc_lock held! */ 605/* Must be called with the flc_lock held! */
600static void locks_delete_global_locks(struct file_lock *fl) 606static void locks_delete_global_locks(struct file_lock *fl)
601{ 607{
608 struct file_lock_list_struct *fll;
609
602 percpu_rwsem_assert_held(&file_rwsem); 610 percpu_rwsem_assert_held(&file_rwsem);
603 611
604 /* 612 /*
@@ -608,9 +616,11 @@ static void locks_delete_global_locks(struct file_lock *fl)
608 */ 616 */
609 if (hlist_unhashed(&fl->fl_link)) 617 if (hlist_unhashed(&fl->fl_link))
610 return; 618 return;
611 lg_local_lock_cpu(&file_lock_lglock, fl->fl_link_cpu); 619
620 fll = per_cpu_ptr(&file_lock_list, fl->fl_link_cpu);
621 spin_lock(&fll->lock);
612 hlist_del_init(&fl->fl_link); 622 hlist_del_init(&fl->fl_link);
613 lg_local_unlock_cpu(&file_lock_lglock, fl->fl_link_cpu); 623 spin_unlock(&fll->lock);
614} 624}
615 625
616static unsigned long 626static unsigned long
@@ -2723,9 +2733,8 @@ static void *locks_start(struct seq_file *f, loff_t *pos)
2723 2733
2724 iter->li_pos = *pos + 1; 2734 iter->li_pos = *pos + 1;
2725 percpu_down_write(&file_rwsem); 2735 percpu_down_write(&file_rwsem);
2726 lg_global_lock(&file_lock_lglock);
2727 spin_lock(&blocked_lock_lock); 2736 spin_lock(&blocked_lock_lock);
2728 return seq_hlist_start_percpu(&file_lock_list, &iter->li_cpu, *pos); 2737 return seq_hlist_start_percpu(&file_lock_list.hlist, &iter->li_cpu, *pos);
2729} 2738}
2730 2739
2731static void *locks_next(struct seq_file *f, void *v, loff_t *pos) 2740static void *locks_next(struct seq_file *f, void *v, loff_t *pos)
@@ -2733,14 +2742,13 @@ static void *locks_next(struct seq_file *f, void *v, loff_t *pos)
2733 struct locks_iterator *iter = f->private; 2742 struct locks_iterator *iter = f->private;
2734 2743
2735 ++iter->li_pos; 2744 ++iter->li_pos;
2736 return seq_hlist_next_percpu(v, &file_lock_list, &iter->li_cpu, pos); 2745 return seq_hlist_next_percpu(v, &file_lock_list.hlist, &iter->li_cpu, pos);
2737} 2746}
2738 2747
2739static void locks_stop(struct seq_file *f, void *v) 2748static void locks_stop(struct seq_file *f, void *v)
2740 __releases(&blocked_lock_lock) 2749 __releases(&blocked_lock_lock)
2741{ 2750{
2742 spin_unlock(&blocked_lock_lock); 2751 spin_unlock(&blocked_lock_lock);
2743 lg_global_unlock(&file_lock_lglock);
2744 percpu_up_write(&file_rwsem); 2752 percpu_up_write(&file_rwsem);
2745} 2753}
2746 2754
@@ -2782,10 +2790,13 @@ static int __init filelock_init(void)
2782 filelock_cache = kmem_cache_create("file_lock_cache", 2790 filelock_cache = kmem_cache_create("file_lock_cache",
2783 sizeof(struct file_lock), 0, SLAB_PANIC, NULL); 2791 sizeof(struct file_lock), 0, SLAB_PANIC, NULL);
2784 2792
2785 lg_lock_init(&file_lock_lglock, "file_lock_lglock");
2786 2793
2787 for_each_possible_cpu(i) 2794 for_each_possible_cpu(i) {
2788 INIT_HLIST_HEAD(per_cpu_ptr(&file_lock_list, i)); 2795 struct file_lock_list_struct *fll = per_cpu_ptr(&file_lock_list, i);
2796
2797 spin_lock_init(&fll->lock);
2798 INIT_HLIST_HEAD(&fll->hlist);
2799 }
2789 2800
2790 return 0; 2801 return 0;
2791} 2802}