aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--mm/hmm.c77
1 files changed, 30 insertions, 47 deletions
diff --git a/mm/hmm.c b/mm/hmm.c
index 080b17a2e87e..0423f4ca3a7e 100644
--- a/mm/hmm.c
+++ b/mm/hmm.c
@@ -31,16 +31,6 @@
31#if IS_ENABLED(CONFIG_HMM_MIRROR) 31#if IS_ENABLED(CONFIG_HMM_MIRROR)
32static const struct mmu_notifier_ops hmm_mmu_notifier_ops; 32static const struct mmu_notifier_ops hmm_mmu_notifier_ops;
33 33
34static inline struct hmm *mm_get_hmm(struct mm_struct *mm)
35{
36 struct hmm *hmm = READ_ONCE(mm->hmm);
37
38 if (hmm && kref_get_unless_zero(&hmm->kref))
39 return hmm;
40
41 return NULL;
42}
43
44/** 34/**
45 * hmm_get_or_create - register HMM against an mm (HMM internal) 35 * hmm_get_or_create - register HMM against an mm (HMM internal)
46 * 36 *
@@ -55,11 +45,16 @@ static inline struct hmm *mm_get_hmm(struct mm_struct *mm)
55 */ 45 */
56static struct hmm *hmm_get_or_create(struct mm_struct *mm) 46static struct hmm *hmm_get_or_create(struct mm_struct *mm)
57{ 47{
58 struct hmm *hmm = mm_get_hmm(mm); 48 struct hmm *hmm;
59 bool cleanup = false; 49
50 lockdep_assert_held_exclusive(&mm->mmap_sem);
60 51
61 if (hmm) 52 /* Abuse the page_table_lock to also protect mm->hmm. */
62 return hmm; 53 spin_lock(&mm->page_table_lock);
54 hmm = mm->hmm;
55 if (mm->hmm && kref_get_unless_zero(&mm->hmm->kref))
56 goto out_unlock;
57 spin_unlock(&mm->page_table_lock);
63 58
64 hmm = kmalloc(sizeof(*hmm), GFP_KERNEL); 59 hmm = kmalloc(sizeof(*hmm), GFP_KERNEL);
65 if (!hmm) 60 if (!hmm)
@@ -74,57 +69,45 @@ static struct hmm *hmm_get_or_create(struct mm_struct *mm)
74 hmm->notifiers = 0; 69 hmm->notifiers = 0;
75 hmm->dead = false; 70 hmm->dead = false;
76 hmm->mm = mm; 71 hmm->mm = mm;
77 mmgrab(hmm->mm);
78 72
79 spin_lock(&mm->page_table_lock); 73 hmm->mmu_notifier.ops = &hmm_mmu_notifier_ops;
80 if (!mm->hmm) 74 if (__mmu_notifier_register(&hmm->mmu_notifier, mm)) {
81 mm->hmm = hmm; 75 kfree(hmm);
82 else 76 return NULL;
83 cleanup = true; 77 }
84 spin_unlock(&mm->page_table_lock);
85 78
86 if (cleanup) 79 mmgrab(hmm->mm);
87 goto error;
88 80
89 /* 81 /*
90 * We should only get here if hold the mmap_sem in write mode ie on 82 * We hold the exclusive mmap_sem here so we know that mm->hmm is
91 * registration of first mirror through hmm_mirror_register() 83 * still NULL or 0 kref, and is safe to update.
92 */ 84 */
93 hmm->mmu_notifier.ops = &hmm_mmu_notifier_ops;
94 if (__mmu_notifier_register(&hmm->mmu_notifier, mm))
95 goto error_mm;
96
97 return hmm;
98
99error_mm:
100 spin_lock(&mm->page_table_lock); 85 spin_lock(&mm->page_table_lock);
101 if (mm->hmm == hmm) 86 mm->hmm = hmm;
102 mm->hmm = NULL; 87
88out_unlock:
103 spin_unlock(&mm->page_table_lock); 89 spin_unlock(&mm->page_table_lock);
104error: 90 return hmm;
105 mmdrop(hmm->mm);
106 kfree(hmm);
107 return NULL;
108} 91}
109 92
110static void hmm_free_rcu(struct rcu_head *rcu) 93static void hmm_free_rcu(struct rcu_head *rcu)
111{ 94{
112 kfree(container_of(rcu, struct hmm, rcu)); 95 struct hmm *hmm = container_of(rcu, struct hmm, rcu);
96
97 mmdrop(hmm->mm);
98 kfree(hmm);
113} 99}
114 100
115static void hmm_free(struct kref *kref) 101static void hmm_free(struct kref *kref)
116{ 102{
117 struct hmm *hmm = container_of(kref, struct hmm, kref); 103 struct hmm *hmm = container_of(kref, struct hmm, kref);
118 struct mm_struct *mm = hmm->mm;
119 104
120 mmu_notifier_unregister_no_release(&hmm->mmu_notifier, mm); 105 spin_lock(&hmm->mm->page_table_lock);
106 if (hmm->mm->hmm == hmm)
107 hmm->mm->hmm = NULL;
108 spin_unlock(&hmm->mm->page_table_lock);
121 109
122 spin_lock(&mm->page_table_lock); 110 mmu_notifier_unregister_no_release(&hmm->mmu_notifier, hmm->mm);
123 if (mm->hmm == hmm)
124 mm->hmm = NULL;
125 spin_unlock(&mm->page_table_lock);
126
127 mmdrop(hmm->mm);
128 mmu_notifier_call_srcu(&hmm->rcu, hmm_free_rcu); 111 mmu_notifier_call_srcu(&hmm->rcu, hmm_free_rcu);
129} 112}
130 113