aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorBen Skeggs <bskeggs@redhat.com>2011-06-23 20:14:07 -0400
committerBen Skeggs <bskeggs@redhat.com>2011-09-20 02:03:56 -0400
commit987eec10dd76624d0edacdc7ecc7e1a6fc877373 (patch)
treeb53b136797aa4aa55e4e78ba2c3f1d074b47beb4
parent52d073318a4c32865e6439f7f6c247092a6f6af3 (diff)
drm/nouveau: embed nouveau_mm
Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.h2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_mm.c60
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_mm.h4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_vm.c38
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_vm.h2
-rw-r--r--drivers/gpu/drm/nouveau/nv50_vram.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_vram.c2
7 files changed, 51 insertions, 61 deletions
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h
index ba258e39f6c5..6629f30598f3 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.h
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.h
@@ -518,7 +518,7 @@ struct nouveau_pm_engine {
518}; 518};
519 519
520struct nouveau_vram_engine { 520struct nouveau_vram_engine {
521 struct nouveau_mm *mm; 521 struct nouveau_mm mm;
522 522
523 int (*init)(struct drm_device *); 523 int (*init)(struct drm_device *);
524 void (*takedown)(struct drm_device *dev); 524 void (*takedown)(struct drm_device *dev);
diff --git a/drivers/gpu/drm/nouveau/nouveau_mm.c b/drivers/gpu/drm/nouveau/nouveau_mm.c
index 1640dec3b823..75b5dd93a32f 100644
--- a/drivers/gpu/drm/nouveau/nouveau_mm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_mm.c
@@ -27,7 +27,7 @@
27#include "nouveau_mm.h" 27#include "nouveau_mm.h"
28 28
29static inline void 29static inline void
30region_put(struct nouveau_mm *rmm, struct nouveau_mm_node *a) 30region_put(struct nouveau_mm *mm, struct nouveau_mm_node *a)
31{ 31{
32 list_del(&a->nl_entry); 32 list_del(&a->nl_entry);
33 list_del(&a->fl_entry); 33 list_del(&a->fl_entry);
@@ -35,7 +35,7 @@ region_put(struct nouveau_mm *rmm, struct nouveau_mm_node *a)
35} 35}
36 36
37static struct nouveau_mm_node * 37static struct nouveau_mm_node *
38region_split(struct nouveau_mm *rmm, struct nouveau_mm_node *a, u32 size) 38region_split(struct nouveau_mm *mm, struct nouveau_mm_node *a, u32 size)
39{ 39{
40 struct nouveau_mm_node *b; 40 struct nouveau_mm_node *b;
41 41
@@ -57,33 +57,33 @@ region_split(struct nouveau_mm *rmm, struct nouveau_mm_node *a, u32 size)
57 return b; 57 return b;
58} 58}
59 59
60#define node(root, dir) ((root)->nl_entry.dir == &rmm->nodes) ? NULL : \ 60#define node(root, dir) ((root)->nl_entry.dir == &mm->nodes) ? NULL : \
61 list_entry((root)->nl_entry.dir, struct nouveau_mm_node, nl_entry) 61 list_entry((root)->nl_entry.dir, struct nouveau_mm_node, nl_entry)
62 62
63void 63void
64nouveau_mm_put(struct nouveau_mm *rmm, struct nouveau_mm_node *this) 64nouveau_mm_put(struct nouveau_mm *mm, struct nouveau_mm_node *this)
65{ 65{
66 struct nouveau_mm_node *prev = node(this, prev); 66 struct nouveau_mm_node *prev = node(this, prev);
67 struct nouveau_mm_node *next = node(this, next); 67 struct nouveau_mm_node *next = node(this, next);
68 68
69 list_add(&this->fl_entry, &rmm->free); 69 list_add(&this->fl_entry, &mm->free);
70 this->type = 0; 70 this->type = 0;
71 71
72 if (prev && prev->type == 0) { 72 if (prev && prev->type == 0) {
73 prev->length += this->length; 73 prev->length += this->length;
74 region_put(rmm, this); 74 region_put(mm, this);
75 this = prev; 75 this = prev;
76 } 76 }
77 77
78 if (next && next->type == 0) { 78 if (next && next->type == 0) {
79 next->offset = this->offset; 79 next->offset = this->offset;
80 next->length += this->length; 80 next->length += this->length;
81 region_put(rmm, this); 81 region_put(mm, this);
82 } 82 }
83} 83}
84 84
85int 85int
86nouveau_mm_get(struct nouveau_mm *rmm, int type, u32 size, u32 size_nc, 86nouveau_mm_get(struct nouveau_mm *mm, int type, u32 size, u32 size_nc,
87 u32 align, struct nouveau_mm_node **pnode) 87 u32 align, struct nouveau_mm_node **pnode)
88{ 88{
89 struct nouveau_mm_node *prev, *this, *next; 89 struct nouveau_mm_node *prev, *this, *next;
@@ -92,17 +92,17 @@ nouveau_mm_get(struct nouveau_mm *rmm, int type, u32 size, u32 size_nc,
92 u32 splitoff; 92 u32 splitoff;
93 u32 s, e; 93 u32 s, e;
94 94
95 list_for_each_entry(this, &rmm->free, fl_entry) { 95 list_for_each_entry(this, &mm->free, fl_entry) {
96 e = this->offset + this->length; 96 e = this->offset + this->length;
97 s = this->offset; 97 s = this->offset;
98 98
99 prev = node(this, prev); 99 prev = node(this, prev);
100 if (prev && prev->type != type) 100 if (prev && prev->type != type)
101 s = roundup(s, rmm->block_size); 101 s = roundup(s, mm->block_size);
102 102
103 next = node(this, next); 103 next = node(this, next);
104 if (next && next->type != type) 104 if (next && next->type != type)
105 e = rounddown(e, rmm->block_size); 105 e = rounddown(e, mm->block_size);
106 106
107 s = (s + align_mask) & ~align_mask; 107 s = (s + align_mask) & ~align_mask;
108 e &= ~align_mask; 108 e &= ~align_mask;
@@ -110,10 +110,10 @@ nouveau_mm_get(struct nouveau_mm *rmm, int type, u32 size, u32 size_nc,
110 continue; 110 continue;
111 111
112 splitoff = s - this->offset; 112 splitoff = s - this->offset;
113 if (splitoff && !region_split(rmm, this, splitoff)) 113 if (splitoff && !region_split(mm, this, splitoff))
114 return -ENOMEM; 114 return -ENOMEM;
115 115
116 this = region_split(rmm, this, min(size, e - s)); 116 this = region_split(mm, this, min(size, e - s));
117 if (!this) 117 if (!this)
118 return -ENOMEM; 118 return -ENOMEM;
119 119
@@ -127,9 +127,8 @@ nouveau_mm_get(struct nouveau_mm *rmm, int type, u32 size, u32 size_nc,
127} 127}
128 128
129int 129int
130nouveau_mm_init(struct nouveau_mm **prmm, u32 offset, u32 length, u32 block) 130nouveau_mm_init(struct nouveau_mm *mm, u32 offset, u32 length, u32 block)
131{ 131{
132 struct nouveau_mm *rmm;
133 struct nouveau_mm_node *heap; 132 struct nouveau_mm_node *heap;
134 133
135 heap = kzalloc(sizeof(*heap), GFP_KERNEL); 134 heap = kzalloc(sizeof(*heap), GFP_KERNEL);
@@ -138,32 +137,25 @@ nouveau_mm_init(struct nouveau_mm **prmm, u32 offset, u32 length, u32 block)
138 heap->offset = roundup(offset, block); 137 heap->offset = roundup(offset, block);
139 heap->length = rounddown(offset + length, block) - heap->offset; 138 heap->length = rounddown(offset + length, block) - heap->offset;
140 139
141 rmm = kzalloc(sizeof(*rmm), GFP_KERNEL); 140 mutex_init(&mm->mutex);
142 if (!rmm) { 141 mm->block_size = block;
143 kfree(heap); 142 INIT_LIST_HEAD(&mm->nodes);
144 return -ENOMEM; 143 INIT_LIST_HEAD(&mm->free);
145 } 144
146 rmm->block_size = block; 145 list_add(&heap->nl_entry, &mm->nodes);
147 mutex_init(&rmm->mutex); 146 list_add(&heap->fl_entry, &mm->free);
148 INIT_LIST_HEAD(&rmm->nodes);
149 INIT_LIST_HEAD(&rmm->free);
150 list_add(&heap->nl_entry, &rmm->nodes);
151 list_add(&heap->fl_entry, &rmm->free);
152
153 *prmm = rmm;
154 return 0; 147 return 0;
155} 148}
156 149
157int 150int
158nouveau_mm_fini(struct nouveau_mm **prmm) 151nouveau_mm_fini(struct nouveau_mm *mm)
159{ 152{
160 struct nouveau_mm *rmm = *prmm;
161 struct nouveau_mm_node *node, *heap = 153 struct nouveau_mm_node *node, *heap =
162 list_first_entry(&rmm->nodes, struct nouveau_mm_node, nl_entry); 154 list_first_entry(&mm->nodes, struct nouveau_mm_node, nl_entry);
163 155
164 if (!list_is_singular(&rmm->nodes)) { 156 if (!list_is_singular(&mm->nodes)) {
165 printk(KERN_ERR "nouveau_mm not empty at destroy time!\n"); 157 printk(KERN_ERR "nouveau_mm not empty at destroy time!\n");
166 list_for_each_entry(node, &rmm->nodes, nl_entry) { 158 list_for_each_entry(node, &mm->nodes, nl_entry) {
167 printk(KERN_ERR "0x%02x: 0x%08x 0x%08x\n", 159 printk(KERN_ERR "0x%02x: 0x%08x 0x%08x\n",
168 node->type, node->offset, node->length); 160 node->type, node->offset, node->length);
169 } 161 }
@@ -172,7 +164,5 @@ nouveau_mm_fini(struct nouveau_mm **prmm)
172 } 164 }
173 165
174 kfree(heap); 166 kfree(heap);
175 kfree(rmm);
176 *prmm = NULL;
177 return 0; 167 return 0;
178} 168}
diff --git a/drivers/gpu/drm/nouveau/nouveau_mm.h b/drivers/gpu/drm/nouveau/nouveau_mm.h
index b9c016d21553..b8fe9088b9ed 100644
--- a/drivers/gpu/drm/nouveau/nouveau_mm.h
+++ b/drivers/gpu/drm/nouveau/nouveau_mm.h
@@ -44,8 +44,8 @@ struct nouveau_mm {
44 u32 block_size; 44 u32 block_size;
45}; 45};
46 46
47int nouveau_mm_init(struct nouveau_mm **, u32 offset, u32 length, u32 block); 47int nouveau_mm_init(struct nouveau_mm *, u32 offset, u32 length, u32 block);
48int nouveau_mm_fini(struct nouveau_mm **); 48int nouveau_mm_fini(struct nouveau_mm *);
49int nouveau_mm_pre(struct nouveau_mm *); 49int nouveau_mm_pre(struct nouveau_mm *);
50int nouveau_mm_get(struct nouveau_mm *, int type, u32 size, u32 size_nc, 50int nouveau_mm_get(struct nouveau_mm *, int type, u32 size, u32 size_nc,
51 u32 align, struct nouveau_mm_node **); 51 u32 align, struct nouveau_mm_node **);
diff --git a/drivers/gpu/drm/nouveau/nouveau_vm.c b/drivers/gpu/drm/nouveau/nouveau_vm.c
index 244fd38fdb84..d432a2a791f2 100644
--- a/drivers/gpu/drm/nouveau/nouveau_vm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_vm.c
@@ -172,9 +172,9 @@ nouveau_vm_unmap_pgt(struct nouveau_vm *vm, int big, u32 fpde, u32 lpde)
172 vm->map_pgt(vpgd->obj, pde, vpgt->obj); 172 vm->map_pgt(vpgd->obj, pde, vpgt->obj);
173 } 173 }
174 174
175 mutex_unlock(&vm->mm->mutex); 175 mutex_unlock(&vm->mm.mutex);
176 nouveau_gpuobj_ref(NULL, &pgt); 176 nouveau_gpuobj_ref(NULL, &pgt);
177 mutex_lock(&vm->mm->mutex); 177 mutex_lock(&vm->mm.mutex);
178 } 178 }
179} 179}
180 180
@@ -191,18 +191,18 @@ nouveau_vm_map_pgt(struct nouveau_vm *vm, u32 pde, u32 type)
191 pgt_size = (1 << (vm->pgt_bits + 12)) >> type; 191 pgt_size = (1 << (vm->pgt_bits + 12)) >> type;
192 pgt_size *= 8; 192 pgt_size *= 8;
193 193
194 mutex_unlock(&vm->mm->mutex); 194 mutex_unlock(&vm->mm.mutex);
195 ret = nouveau_gpuobj_new(vm->dev, NULL, pgt_size, 0x1000, 195 ret = nouveau_gpuobj_new(vm->dev, NULL, pgt_size, 0x1000,
196 NVOBJ_FLAG_ZERO_ALLOC, &pgt); 196 NVOBJ_FLAG_ZERO_ALLOC, &pgt);
197 mutex_lock(&vm->mm->mutex); 197 mutex_lock(&vm->mm.mutex);
198 if (unlikely(ret)) 198 if (unlikely(ret))
199 return ret; 199 return ret;
200 200
201 /* someone beat us to filling the PDE while we didn't have the lock */ 201 /* someone beat us to filling the PDE while we didn't have the lock */
202 if (unlikely(vpgt->refcount[big]++)) { 202 if (unlikely(vpgt->refcount[big]++)) {
203 mutex_unlock(&vm->mm->mutex); 203 mutex_unlock(&vm->mm.mutex);
204 nouveau_gpuobj_ref(NULL, &pgt); 204 nouveau_gpuobj_ref(NULL, &pgt);
205 mutex_lock(&vm->mm->mutex); 205 mutex_lock(&vm->mm.mutex);
206 return 0; 206 return 0;
207 } 207 }
208 208
@@ -223,10 +223,10 @@ nouveau_vm_get(struct nouveau_vm *vm, u64 size, u32 page_shift,
223 u32 fpde, lpde, pde; 223 u32 fpde, lpde, pde;
224 int ret; 224 int ret;
225 225
226 mutex_lock(&vm->mm->mutex); 226 mutex_lock(&vm->mm.mutex);
227 ret = nouveau_mm_get(vm->mm, page_shift, msize, 0, align, &vma->node); 227 ret = nouveau_mm_get(&vm->mm, page_shift, msize, 0, align, &vma->node);
228 if (unlikely(ret != 0)) { 228 if (unlikely(ret != 0)) {
229 mutex_unlock(&vm->mm->mutex); 229 mutex_unlock(&vm->mm.mutex);
230 return ret; 230 return ret;
231 } 231 }
232 232
@@ -245,13 +245,13 @@ nouveau_vm_get(struct nouveau_vm *vm, u64 size, u32 page_shift,
245 if (ret) { 245 if (ret) {
246 if (pde != fpde) 246 if (pde != fpde)
247 nouveau_vm_unmap_pgt(vm, big, fpde, pde - 1); 247 nouveau_vm_unmap_pgt(vm, big, fpde, pde - 1);
248 nouveau_mm_put(vm->mm, vma->node); 248 nouveau_mm_put(&vm->mm, vma->node);
249 mutex_unlock(&vm->mm->mutex); 249 mutex_unlock(&vm->mm.mutex);
250 vma->node = NULL; 250 vma->node = NULL;
251 return ret; 251 return ret;
252 } 252 }
253 } 253 }
254 mutex_unlock(&vm->mm->mutex); 254 mutex_unlock(&vm->mm.mutex);
255 255
256 vma->vm = vm; 256 vma->vm = vm;
257 vma->offset = (u64)vma->node->offset << 12; 257 vma->offset = (u64)vma->node->offset << 12;
@@ -270,11 +270,11 @@ nouveau_vm_put(struct nouveau_vma *vma)
270 fpde = (vma->node->offset >> vm->pgt_bits); 270 fpde = (vma->node->offset >> vm->pgt_bits);
271 lpde = (vma->node->offset + vma->node->length - 1) >> vm->pgt_bits; 271 lpde = (vma->node->offset + vma->node->length - 1) >> vm->pgt_bits;
272 272
273 mutex_lock(&vm->mm->mutex); 273 mutex_lock(&vm->mm.mutex);
274 nouveau_vm_unmap_pgt(vm, vma->node->type != vm->spg_shift, fpde, lpde); 274 nouveau_vm_unmap_pgt(vm, vma->node->type != vm->spg_shift, fpde, lpde);
275 nouveau_mm_put(vm->mm, vma->node); 275 nouveau_mm_put(&vm->mm, vma->node);
276 vma->node = NULL; 276 vma->node = NULL;
277 mutex_unlock(&vm->mm->mutex); 277 mutex_unlock(&vm->mm.mutex);
278} 278}
279 279
280int 280int
@@ -360,11 +360,11 @@ nouveau_vm_link(struct nouveau_vm *vm, struct nouveau_gpuobj *pgd)
360 360
361 nouveau_gpuobj_ref(pgd, &vpgd->obj); 361 nouveau_gpuobj_ref(pgd, &vpgd->obj);
362 362
363 mutex_lock(&vm->mm->mutex); 363 mutex_lock(&vm->mm.mutex);
364 for (i = vm->fpde; i <= vm->lpde; i++) 364 for (i = vm->fpde; i <= vm->lpde; i++)
365 vm->map_pgt(pgd, i, vm->pgt[i - vm->fpde].obj); 365 vm->map_pgt(pgd, i, vm->pgt[i - vm->fpde].obj);
366 list_add(&vpgd->head, &vm->pgd_list); 366 list_add(&vpgd->head, &vm->pgd_list);
367 mutex_unlock(&vm->mm->mutex); 367 mutex_unlock(&vm->mm.mutex);
368 return 0; 368 return 0;
369} 369}
370 370
@@ -377,7 +377,7 @@ nouveau_vm_unlink(struct nouveau_vm *vm, struct nouveau_gpuobj *mpgd)
377 if (!mpgd) 377 if (!mpgd)
378 return; 378 return;
379 379
380 mutex_lock(&vm->mm->mutex); 380 mutex_lock(&vm->mm.mutex);
381 list_for_each_entry_safe(vpgd, tmp, &vm->pgd_list, head) { 381 list_for_each_entry_safe(vpgd, tmp, &vm->pgd_list, head) {
382 if (vpgd->obj == mpgd) { 382 if (vpgd->obj == mpgd) {
383 pgd = vpgd->obj; 383 pgd = vpgd->obj;
@@ -386,7 +386,7 @@ nouveau_vm_unlink(struct nouveau_vm *vm, struct nouveau_gpuobj *mpgd)
386 break; 386 break;
387 } 387 }
388 } 388 }
389 mutex_unlock(&vm->mm->mutex); 389 mutex_unlock(&vm->mm.mutex);
390 390
391 nouveau_gpuobj_ref(NULL, &pgd); 391 nouveau_gpuobj_ref(NULL, &pgd);
392} 392}
diff --git a/drivers/gpu/drm/nouveau/nouveau_vm.h b/drivers/gpu/drm/nouveau/nouveau_vm.h
index 579ca8cc223c..6ce995f7797e 100644
--- a/drivers/gpu/drm/nouveau/nouveau_vm.h
+++ b/drivers/gpu/drm/nouveau/nouveau_vm.h
@@ -51,7 +51,7 @@ struct nouveau_vma {
51 51
52struct nouveau_vm { 52struct nouveau_vm {
53 struct drm_device *dev; 53 struct drm_device *dev;
54 struct nouveau_mm *mm; 54 struct nouveau_mm mm;
55 int refcount; 55 int refcount;
56 56
57 struct list_head pgd_list; 57 struct list_head pgd_list;
diff --git a/drivers/gpu/drm/nouveau/nv50_vram.c b/drivers/gpu/drm/nouveau/nv50_vram.c
index af32daecd1ed..9da23838e63e 100644
--- a/drivers/gpu/drm/nouveau/nv50_vram.c
+++ b/drivers/gpu/drm/nouveau/nv50_vram.c
@@ -51,7 +51,7 @@ void
51nv50_vram_del(struct drm_device *dev, struct nouveau_mem **pmem) 51nv50_vram_del(struct drm_device *dev, struct nouveau_mem **pmem)
52{ 52{
53 struct drm_nouveau_private *dev_priv = dev->dev_private; 53 struct drm_nouveau_private *dev_priv = dev->dev_private;
54 struct nouveau_mm *mm = dev_priv->engine.vram.mm; 54 struct nouveau_mm *mm = &dev_priv->engine.vram.mm;
55 struct nouveau_mm_node *this; 55 struct nouveau_mm_node *this;
56 struct nouveau_mem *mem; 56 struct nouveau_mem *mem;
57 57
@@ -82,7 +82,7 @@ nv50_vram_new(struct drm_device *dev, u64 size, u32 align, u32 size_nc,
82 u32 memtype, struct nouveau_mem **pmem) 82 u32 memtype, struct nouveau_mem **pmem)
83{ 83{
84 struct drm_nouveau_private *dev_priv = dev->dev_private; 84 struct drm_nouveau_private *dev_priv = dev->dev_private;
85 struct nouveau_mm *mm = dev_priv->engine.vram.mm; 85 struct nouveau_mm *mm = &dev_priv->engine.vram.mm;
86 struct nouveau_mm_node *r; 86 struct nouveau_mm_node *r;
87 struct nouveau_mem *mem; 87 struct nouveau_mem *mem;
88 int comp = (memtype & 0x300) >> 8; 88 int comp = (memtype & 0x300) >> 8;
diff --git a/drivers/gpu/drm/nouveau/nvc0_vram.c b/drivers/gpu/drm/nouveau/nvc0_vram.c
index e45a24d84e98..abed0d3d5792 100644
--- a/drivers/gpu/drm/nouveau/nvc0_vram.c
+++ b/drivers/gpu/drm/nouveau/nvc0_vram.c
@@ -61,7 +61,7 @@ nvc0_vram_new(struct drm_device *dev, u64 size, u32 align, u32 ncmin,
61 u32 type, struct nouveau_mem **pmem) 61 u32 type, struct nouveau_mem **pmem)
62{ 62{
63 struct drm_nouveau_private *dev_priv = dev->dev_private; 63 struct drm_nouveau_private *dev_priv = dev->dev_private;
64 struct nouveau_mm *mm = dev_priv->engine.vram.mm; 64 struct nouveau_mm *mm = &dev_priv->engine.vram.mm;
65 struct nouveau_mm_node *r; 65 struct nouveau_mm_node *r;
66 struct nouveau_mem *mem; 66 struct nouveau_mem *mem;
67 int ret; 67 int ret;