aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/drm_mm.c
diff options
context:
space:
mode:
authorJerome Glisse <jglisse@redhat.com>2009-11-13 14:56:58 -0500
committerDave Airlie <airlied@redhat.com>2009-11-23 22:02:18 -0500
commita698cf34ea867efef12fc29dd63d443f0c71a53c (patch)
treea396804737c4a7bc972f04fe97d0ec0a2f971668 /drivers/gpu/drm/drm_mm.c
parent0beb81ab45c67de4b3aa85faad604cff8ed133a8 (diff)
drm: mm always protect change to unused_nodes with unused_lock spinlock
unused_nodes modification needs to be protected by unused_lock spinlock. Here is an example of an usage where there is no such protection without this patch. Process 1: 1-drm_mm_pre_get(this function modify unused_nodes list) 2-spin_lock(spinlock protecting mm struct) 3-drm_mm_put_block(this function might modify unused_nodes list but doesn't protect modification with unused_lock) 4-spin_unlock(spinlock protecting mm struct) Process2: 1-drm_mm_pre_get(this function modify unused_nodes list) At this point Process1 & Process2 might both be doing modification to unused_nodes list. This patch add unused_lock protection into drm_mm_put_block to avoid such issue. Signed-off-by: Jerome Glisse <jglisse@redhat.com> Signed-off-by: Dave Airlie <airlied@redhat.com>
Diffstat (limited to 'drivers/gpu/drm/drm_mm.c')
-rw-r--r--drivers/gpu/drm/drm_mm.c9
1 files changed, 9 insertions, 0 deletions
diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c
index c861d80fd779..97dc5a4f0de4 100644
--- a/drivers/gpu/drm/drm_mm.c
+++ b/drivers/gpu/drm/drm_mm.c
@@ -103,6 +103,11 @@ static struct drm_mm_node *drm_mm_kmalloc(struct drm_mm *mm, int atomic)
103 return child; 103 return child;
104} 104}
105 105
106/* drm_mm_pre_get() - pre allocate drm_mm_node structure
107 * drm_mm: memory manager struct we are pre-allocating for
108 *
109 * Returns 0 on success or -ENOMEM if allocation fails.
110 */
106int drm_mm_pre_get(struct drm_mm *mm) 111int drm_mm_pre_get(struct drm_mm *mm)
107{ 112{
108 struct drm_mm_node *node; 113 struct drm_mm_node *node;
@@ -253,12 +258,14 @@ void drm_mm_put_block(struct drm_mm_node *cur)
253 prev_node->size += next_node->size; 258 prev_node->size += next_node->size;
254 list_del(&next_node->ml_entry); 259 list_del(&next_node->ml_entry);
255 list_del(&next_node->fl_entry); 260 list_del(&next_node->fl_entry);
261 spin_lock(&mm->unused_lock);
256 if (mm->num_unused < MM_UNUSED_TARGET) { 262 if (mm->num_unused < MM_UNUSED_TARGET) {
257 list_add(&next_node->fl_entry, 263 list_add(&next_node->fl_entry,
258 &mm->unused_nodes); 264 &mm->unused_nodes);
259 ++mm->num_unused; 265 ++mm->num_unused;
260 } else 266 } else
261 kfree(next_node); 267 kfree(next_node);
268 spin_unlock(&mm->unused_lock);
262 } else { 269 } else {
263 next_node->size += cur->size; 270 next_node->size += cur->size;
264 next_node->start = cur->start; 271 next_node->start = cur->start;
@@ -271,11 +278,13 @@ void drm_mm_put_block(struct drm_mm_node *cur)
271 list_add(&cur->fl_entry, &mm->fl_entry); 278 list_add(&cur->fl_entry, &mm->fl_entry);
272 } else { 279 } else {
273 list_del(&cur->ml_entry); 280 list_del(&cur->ml_entry);
281 spin_lock(&mm->unused_lock);
274 if (mm->num_unused < MM_UNUSED_TARGET) { 282 if (mm->num_unused < MM_UNUSED_TARGET) {
275 list_add(&cur->fl_entry, &mm->unused_nodes); 283 list_add(&cur->fl_entry, &mm->unused_nodes);
276 ++mm->num_unused; 284 ++mm->num_unused;
277 } else 285 } else
278 kfree(cur); 286 kfree(cur);
287 spin_unlock(&mm->unused_lock);
279 } 288 }
280} 289}
281 290