aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu')
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c41
1 files changed, 12 insertions, 29 deletions
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
index ddb1e9365a3e..b93c558dd86e 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
@@ -51,51 +51,34 @@ static int vmw_gmrid_man_get_node(struct ttm_mem_type_manager *man,
51{ 51{
52 struct vmwgfx_gmrid_man *gman = 52 struct vmwgfx_gmrid_man *gman =
53 (struct vmwgfx_gmrid_man *)man->priv; 53 (struct vmwgfx_gmrid_man *)man->priv;
54 int ret = 0;
55 int id; 54 int id;
56 55
57 mem->mm_node = NULL; 56 mem->mm_node = NULL;
58 57
58 id = ida_alloc_max(&gman->gmr_ida, gman->max_gmr_ids - 1, GFP_KERNEL);
59 if (id < 0)
60 return id;
61
59 spin_lock(&gman->lock); 62 spin_lock(&gman->lock);
60 63
61 if (gman->max_gmr_pages > 0) { 64 if (gman->max_gmr_pages > 0) {
62 gman->used_gmr_pages += bo->num_pages; 65 gman->used_gmr_pages += bo->num_pages;
63 if (unlikely(gman->used_gmr_pages > gman->max_gmr_pages)) 66 if (unlikely(gman->used_gmr_pages > gman->max_gmr_pages))
64 goto out_err_locked; 67 goto nospace;
65 } 68 }
66 69
67 do { 70 mem->mm_node = gman;
68 spin_unlock(&gman->lock); 71 mem->start = id;
69 if (unlikely(ida_pre_get(&gman->gmr_ida, GFP_KERNEL) == 0)) { 72 mem->num_pages = bo->num_pages;
70 ret = -ENOMEM;
71 goto out_err;
72 }
73 spin_lock(&gman->lock);
74
75 ret = ida_get_new(&gman->gmr_ida, &id);
76 if (unlikely(ret == 0 && id >= gman->max_gmr_ids)) {
77 ida_remove(&gman->gmr_ida, id);
78 ret = 0;
79 goto out_err_locked;
80 }
81 } while (ret == -EAGAIN);
82
83 if (likely(ret == 0)) {
84 mem->mm_node = gman;
85 mem->start = id;
86 mem->num_pages = bo->num_pages;
87 } else
88 goto out_err_locked;
89 73
90 spin_unlock(&gman->lock); 74 spin_unlock(&gman->lock);
91 return 0; 75 return 0;
92 76
93out_err: 77nospace:
94 spin_lock(&gman->lock);
95out_err_locked:
96 gman->used_gmr_pages -= bo->num_pages; 78 gman->used_gmr_pages -= bo->num_pages;
97 spin_unlock(&gman->lock); 79 spin_unlock(&gman->lock);
98 return ret; 80 ida_free(&gman->gmr_ida, id);
81 return 0;
99} 82}
100 83
101static void vmw_gmrid_man_put_node(struct ttm_mem_type_manager *man, 84static void vmw_gmrid_man_put_node(struct ttm_mem_type_manager *man,
@@ -105,8 +88,8 @@ static void vmw_gmrid_man_put_node(struct ttm_mem_type_manager *man,
105 (struct vmwgfx_gmrid_man *)man->priv; 88 (struct vmwgfx_gmrid_man *)man->priv;
106 89
107 if (mem->mm_node) { 90 if (mem->mm_node) {
91 ida_free(&gman->gmr_ida, mem->start);
108 spin_lock(&gman->lock); 92 spin_lock(&gman->lock);
109 ida_remove(&gman->gmr_ida, mem->start);
110 gman->used_gmr_pages -= mem->num_pages; 93 gman->used_gmr_pages -= mem->num_pages;
111 spin_unlock(&gman->lock); 94 spin_unlock(&gman->lock);
112 mem->mm_node = NULL; 95 mem->mm_node = NULL;