diff options
author | Christian König <christian.koenig@amd.com> | 2018-06-05 05:47:43 -0400 |
---|---|---|
committer | Alex Deucher <alexander.deucher@amd.com> | 2018-06-15 13:20:43 -0400 |
commit | ad7f0b6334fe3cf52f2d79345791a4ef4547353f (patch) | |
tree | 8d7147886cba51be618e1af91628d8edc65fe901 /drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c | |
parent | 528e083d85bd0306e056fe1bdfd05493ebbff9cc (diff) |
drm/amdgpu: fix documentation of amdgpu_mn.c v2
And wire it up as well.
v2: improve the wording, fix label mismatch
Signed-off-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Michel Dänzer <michel.daenzer@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c')
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c | 74 |
1 files changed, 58 insertions, 16 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c index 37570a1c6db8..40fcb2af2914 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c | |||
@@ -28,6 +28,21 @@ | |||
28 | * Christian König <christian.koenig@amd.com> | 28 | * Christian König <christian.koenig@amd.com> |
29 | */ | 29 | */ |
30 | 30 | ||
31 | /** | ||
32 | * DOC: MMU Notifier | ||
33 | * | ||
34 | * For coherent userptr handling registers an MMU notifier to inform the driver | ||
35 | * about updates on the page tables of a process. | ||
36 | * | ||
37 | * When somebody tries to invalidate the page tables we block the update until | ||
38 | * all operations on the pages in question are completed, then those pages are | ||
39 | * marked as accessed and also dirty if it wasn't a read only access. | ||
40 | * | ||
41 | * New command submissions using the userptrs in question are delayed until all | ||
42 | * page table invalidation are completed and we once more see a coherent process | ||
43 | * address space. | ||
44 | */ | ||
45 | |||
31 | #include <linux/firmware.h> | 46 | #include <linux/firmware.h> |
32 | #include <linux/module.h> | 47 | #include <linux/module.h> |
33 | #include <linux/mmu_notifier.h> | 48 | #include <linux/mmu_notifier.h> |
@@ -38,6 +53,21 @@ | |||
38 | #include "amdgpu.h" | 53 | #include "amdgpu.h" |
39 | #include "amdgpu_amdkfd.h" | 54 | #include "amdgpu_amdkfd.h" |
40 | 55 | ||
56 | /** | ||
57 | * struct amdgpu_mn | ||
58 | * | ||
59 | * @adev: amdgpu device pointer | ||
60 | * @mm: process address space | ||
61 | * @mn: MMU notifier structur | ||
62 | * @work: destrution work item | ||
63 | * @node: hash table node to find structure by adev and mn | ||
64 | * @lock: rw semaphore protecting the notifier nodes | ||
65 | * @objects: interval tree containing amdgpu_mn_nodes | ||
66 | * @read_lock: mutex for recursive locking of @lock | ||
67 | * @recursion: depth of recursion | ||
68 | * | ||
69 | * Data for each amdgpu device and process address space. | ||
70 | */ | ||
41 | struct amdgpu_mn { | 71 | struct amdgpu_mn { |
42 | /* constant after initialisation */ | 72 | /* constant after initialisation */ |
43 | struct amdgpu_device *adev; | 73 | struct amdgpu_device *adev; |
@@ -58,13 +88,21 @@ struct amdgpu_mn { | |||
58 | atomic_t recursion; | 88 | atomic_t recursion; |
59 | }; | 89 | }; |
60 | 90 | ||
91 | /** | ||
92 | * struct amdgpu_mn_node | ||
93 | * | ||
94 | * @it: interval node defining start-last of the affected address range | ||
95 | * @bos: list of all BOs in the affected address range | ||
96 | * | ||
97 | * Manages all BOs which are affected of a certain range of address space. | ||
98 | */ | ||
61 | struct amdgpu_mn_node { | 99 | struct amdgpu_mn_node { |
62 | struct interval_tree_node it; | 100 | struct interval_tree_node it; |
63 | struct list_head bos; | 101 | struct list_head bos; |
64 | }; | 102 | }; |
65 | 103 | ||
66 | /** | 104 | /** |
67 | * amdgpu_mn_destroy - destroy the amn | 105 | * amdgpu_mn_destroy - destroy the MMU notifier |
68 | * | 106 | * |
69 | * @work: previously sheduled work item | 107 | * @work: previously sheduled work item |
70 | * | 108 | * |
@@ -98,7 +136,7 @@ static void amdgpu_mn_destroy(struct work_struct *work) | |||
98 | * amdgpu_mn_release - callback to notify about mm destruction | 136 | * amdgpu_mn_release - callback to notify about mm destruction |
99 | * | 137 | * |
100 | * @mn: our notifier | 138 | * @mn: our notifier |
101 | * @mn: the mm this callback is about | 139 | * @mm: the mm this callback is about |
102 | * | 140 | * |
103 | * Shedule a work item to lazy destroy our notifier. | 141 | * Shedule a work item to lazy destroy our notifier. |
104 | */ | 142 | */ |
@@ -106,13 +144,16 @@ static void amdgpu_mn_release(struct mmu_notifier *mn, | |||
106 | struct mm_struct *mm) | 144 | struct mm_struct *mm) |
107 | { | 145 | { |
108 | struct amdgpu_mn *amn = container_of(mn, struct amdgpu_mn, mn); | 146 | struct amdgpu_mn *amn = container_of(mn, struct amdgpu_mn, mn); |
147 | |||
109 | INIT_WORK(&amn->work, amdgpu_mn_destroy); | 148 | INIT_WORK(&amn->work, amdgpu_mn_destroy); |
110 | schedule_work(&amn->work); | 149 | schedule_work(&amn->work); |
111 | } | 150 | } |
112 | 151 | ||
113 | 152 | ||
114 | /** | 153 | /** |
115 | * amdgpu_mn_lock - take the write side lock for this mn | 154 | * amdgpu_mn_lock - take the write side lock for this notifier |
155 | * | ||
156 | * @mn: our notifier | ||
116 | */ | 157 | */ |
117 | void amdgpu_mn_lock(struct amdgpu_mn *mn) | 158 | void amdgpu_mn_lock(struct amdgpu_mn *mn) |
118 | { | 159 | { |
@@ -121,7 +162,9 @@ void amdgpu_mn_lock(struct amdgpu_mn *mn) | |||
121 | } | 162 | } |
122 | 163 | ||
123 | /** | 164 | /** |
124 | * amdgpu_mn_unlock - drop the write side lock for this mn | 165 | * amdgpu_mn_unlock - drop the write side lock for this notifier |
166 | * | ||
167 | * @mn: our notifier | ||
125 | */ | 168 | */ |
126 | void amdgpu_mn_unlock(struct amdgpu_mn *mn) | 169 | void amdgpu_mn_unlock(struct amdgpu_mn *mn) |
127 | { | 170 | { |
@@ -130,11 +173,9 @@ void amdgpu_mn_unlock(struct amdgpu_mn *mn) | |||
130 | } | 173 | } |
131 | 174 | ||
132 | /** | 175 | /** |
133 | * amdgpu_mn_read_lock - take the amn read lock | 176 | * amdgpu_mn_read_lock - take the read side lock for this notifier |
134 | * | 177 | * |
135 | * @amn: our notifier | 178 | * @amn: our notifier |
136 | * | ||
137 | * Take the amn read side lock. | ||
138 | */ | 179 | */ |
139 | static void amdgpu_mn_read_lock(struct amdgpu_mn *amn) | 180 | static void amdgpu_mn_read_lock(struct amdgpu_mn *amn) |
140 | { | 181 | { |
@@ -145,11 +186,9 @@ static void amdgpu_mn_read_lock(struct amdgpu_mn *amn) | |||
145 | } | 186 | } |
146 | 187 | ||
147 | /** | 188 | /** |
148 | * amdgpu_mn_read_unlock - drop the amn read lock | 189 | * amdgpu_mn_read_unlock - drop the read side lock for this notifier |
149 | * | 190 | * |
150 | * @amn: our notifier | 191 | * @amn: our notifier |
151 | * | ||
152 | * Drop the amn read side lock. | ||
153 | */ | 192 | */ |
154 | static void amdgpu_mn_read_unlock(struct amdgpu_mn *amn) | 193 | static void amdgpu_mn_read_unlock(struct amdgpu_mn *amn) |
155 | { | 194 | { |
@@ -161,9 +200,11 @@ static void amdgpu_mn_read_unlock(struct amdgpu_mn *amn) | |||
161 | * amdgpu_mn_invalidate_node - unmap all BOs of a node | 200 | * amdgpu_mn_invalidate_node - unmap all BOs of a node |
162 | * | 201 | * |
163 | * @node: the node with the BOs to unmap | 202 | * @node: the node with the BOs to unmap |
203 | * @start: start of address range affected | ||
204 | * @end: end of address range affected | ||
164 | * | 205 | * |
165 | * We block for all BOs and unmap them by move them | 206 | * Block for operations on BOs to finish and mark pages as accessed and |
166 | * into system domain again. | 207 | * potentially dirty. |
167 | */ | 208 | */ |
168 | static void amdgpu_mn_invalidate_node(struct amdgpu_mn_node *node, | 209 | static void amdgpu_mn_invalidate_node(struct amdgpu_mn_node *node, |
169 | unsigned long start, | 210 | unsigned long start, |
@@ -190,12 +231,12 @@ static void amdgpu_mn_invalidate_node(struct amdgpu_mn_node *node, | |||
190 | * amdgpu_mn_invalidate_range_start_gfx - callback to notify about mm change | 231 | * amdgpu_mn_invalidate_range_start_gfx - callback to notify about mm change |
191 | * | 232 | * |
192 | * @mn: our notifier | 233 | * @mn: our notifier |
193 | * @mn: the mm this callback is about | 234 | * @mm: the mm this callback is about |
194 | * @start: start of updated range | 235 | * @start: start of updated range |
195 | * @end: end of updated range | 236 | * @end: end of updated range |
196 | * | 237 | * |
197 | * We block for all BOs between start and end to be idle and | 238 | * Block for operations on BOs to finish and mark pages as accessed and |
198 | * unmap them by move them into system domain again. | 239 | * potentially dirty. |
199 | */ | 240 | */ |
200 | static void amdgpu_mn_invalidate_range_start_gfx(struct mmu_notifier *mn, | 241 | static void amdgpu_mn_invalidate_range_start_gfx(struct mmu_notifier *mn, |
201 | struct mm_struct *mm, | 242 | struct mm_struct *mm, |
@@ -268,7 +309,7 @@ static void amdgpu_mn_invalidate_range_start_hsa(struct mmu_notifier *mn, | |||
268 | * amdgpu_mn_invalidate_range_end - callback to notify about mm change | 309 | * amdgpu_mn_invalidate_range_end - callback to notify about mm change |
269 | * | 310 | * |
270 | * @mn: our notifier | 311 | * @mn: our notifier |
271 | * @mn: the mm this callback is about | 312 | * @mm: the mm this callback is about |
272 | * @start: start of updated range | 313 | * @start: start of updated range |
273 | * @end: end of updated range | 314 | * @end: end of updated range |
274 | * | 315 | * |
@@ -456,6 +497,7 @@ void amdgpu_mn_unregister(struct amdgpu_bo *bo) | |||
456 | 497 | ||
457 | if (list_empty(head)) { | 498 | if (list_empty(head)) { |
458 | struct amdgpu_mn_node *node; | 499 | struct amdgpu_mn_node *node; |
500 | |||
459 | node = container_of(head, struct amdgpu_mn_node, bos); | 501 | node = container_of(head, struct amdgpu_mn_node, bos); |
460 | interval_tree_remove(&node->it, &amn->objects); | 502 | interval_tree_remove(&node->it, &amn->objects); |
461 | kfree(node); | 503 | kfree(node); |