diff options
author | Ben Skeggs <bskeggs@redhat.com> | 2010-08-26 20:00:25 -0400 |
---|---|---|
committer | Ben Skeggs <bskeggs@redhat.com> | 2010-12-07 22:48:10 -0500 |
commit | a11c3198c9ba38d81e25b65e3908d531feba1372 (patch) | |
tree | 89f9489d91af56cf9a4a376ef5310989dd54bebb /drivers/gpu/drm | |
parent | 573a2a37e8648a3249426c816f51e7ef50f6f73e (diff) |
drm/nv50: import new vm code
Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
Diffstat (limited to 'drivers/gpu/drm')
-rw-r--r-- | drivers/gpu/drm/nouveau/Makefile | 5 | ||||
-rw-r--r-- | drivers/gpu/drm/nouveau/nouveau_drv.h | 9 | ||||
-rw-r--r-- | drivers/gpu/drm/nouveau/nouveau_mem.c | 5 | ||||
-rw-r--r-- | drivers/gpu/drm/nouveau/nouveau_object.c | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/nouveau/nouveau_vm.c | 421 | ||||
-rw-r--r-- | drivers/gpu/drm/nouveau/nouveau_vm.h | 107 | ||||
-rw-r--r-- | drivers/gpu/drm/nouveau/nv50_fifo.c | 3 | ||||
-rw-r--r-- | drivers/gpu/drm/nouveau/nv50_graph.c | 5 | ||||
-rw-r--r-- | drivers/gpu/drm/nouveau/nv50_instmem.c | 12 | ||||
-rw-r--r-- | drivers/gpu/drm/nouveau/nv50_vm.c | 164 | ||||
-rw-r--r-- | drivers/gpu/drm/nouveau/nv84_crypt.c | 3 |
11 files changed, 714 insertions, 22 deletions
diff --git a/drivers/gpu/drm/nouveau/Makefile b/drivers/gpu/drm/nouveau/Makefile index 26fdd12561b6..b1d8941e04d8 100644 --- a/drivers/gpu/drm/nouveau/Makefile +++ b/drivers/gpu/drm/nouveau/Makefile | |||
@@ -9,8 +9,9 @@ nouveau-y := nouveau_drv.o nouveau_state.o nouveau_channel.o nouveau_mem.o \ | |||
9 | nouveau_bo.o nouveau_fence.o nouveau_gem.o nouveau_ttm.o \ | 9 | nouveau_bo.o nouveau_fence.o nouveau_gem.o nouveau_ttm.o \ |
10 | nouveau_hw.o nouveau_calc.o nouveau_bios.o nouveau_i2c.o \ | 10 | nouveau_hw.o nouveau_calc.o nouveau_bios.o nouveau_i2c.o \ |
11 | nouveau_display.o nouveau_connector.o nouveau_fbcon.o \ | 11 | nouveau_display.o nouveau_connector.o nouveau_fbcon.o \ |
12 | nouveau_dp.o nouveau_ramht.o nouveau_mm.o \ | 12 | nouveau_dp.o nouveau_ramht.o \ |
13 | nouveau_pm.o nouveau_volt.o nouveau_perf.o nouveau_temp.o \ | 13 | nouveau_pm.o nouveau_volt.o nouveau_perf.o nouveau_temp.o \ |
14 | nouveau_mm.o nouveau_vm.o \ | ||
14 | nv04_timer.o \ | 15 | nv04_timer.o \ |
15 | nv04_mc.o nv40_mc.o nv50_mc.o \ | 16 | nv04_mc.o nv40_mc.o nv50_mc.o \ |
16 | nv04_fb.o nv10_fb.o nv30_fb.o nv40_fb.o nv50_fb.o nvc0_fb.o \ | 17 | nv04_fb.o nv10_fb.o nv30_fb.o nv40_fb.o nv50_fb.o nvc0_fb.o \ |
@@ -27,7 +28,7 @@ nouveau-y := nouveau_drv.o nouveau_state.o nouveau_channel.o nouveau_mem.o \ | |||
27 | nv10_gpio.o nv50_gpio.o \ | 28 | nv10_gpio.o nv50_gpio.o \ |
28 | nv50_calc.o \ | 29 | nv50_calc.o \ |
29 | nv04_pm.o nv50_pm.o nva3_pm.o \ | 30 | nv04_pm.o nv50_pm.o nva3_pm.o \ |
30 | nv50_vram.o | 31 | nv50_vram.o nv50_vm.o |
31 | 32 | ||
32 | nouveau-$(CONFIG_DRM_NOUVEAU_DEBUG) += nouveau_debugfs.o | 33 | nouveau-$(CONFIG_DRM_NOUVEAU_DEBUG) += nouveau_debugfs.o |
33 | nouveau-$(CONFIG_COMPAT) += nouveau_ioc32.o | 34 | nouveau-$(CONFIG_COMPAT) += nouveau_ioc32.o |
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h index 1305e2c94201..ce1dde4a65d6 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drv.h +++ b/drivers/gpu/drm/nouveau/nouveau_drv.h | |||
@@ -153,6 +153,7 @@ enum nouveau_flags { | |||
153 | #define NVOBJ_ENGINE_DISPLAY 0xcafe0001 | 153 | #define NVOBJ_ENGINE_DISPLAY 0xcafe0001 |
154 | #define NVOBJ_ENGINE_INT 0xdeadbeef | 154 | #define NVOBJ_ENGINE_INT 0xdeadbeef |
155 | 155 | ||
156 | #define NVOBJ_FLAG_DONT_MAP (1 << 0) | ||
156 | #define NVOBJ_FLAG_ZERO_ALLOC (1 << 1) | 157 | #define NVOBJ_FLAG_ZERO_ALLOC (1 << 1) |
157 | #define NVOBJ_FLAG_ZERO_FREE (1 << 2) | 158 | #define NVOBJ_FLAG_ZERO_FREE (1 << 2) |
158 | 159 | ||
@@ -1213,7 +1214,6 @@ extern int nv50_instmem_map(struct nouveau_gpuobj *); | |||
1213 | extern void nv50_instmem_unmap(struct nouveau_gpuobj *); | 1214 | extern void nv50_instmem_unmap(struct nouveau_gpuobj *); |
1214 | extern void nv50_instmem_flush(struct drm_device *); | 1215 | extern void nv50_instmem_flush(struct drm_device *); |
1215 | extern void nv84_instmem_flush(struct drm_device *); | 1216 | extern void nv84_instmem_flush(struct drm_device *); |
1216 | extern void nv50_vm_flush(struct drm_device *, int engine); | ||
1217 | 1217 | ||
1218 | /* nvc0_instmem.c */ | 1218 | /* nvc0_instmem.c */ |
1219 | extern int nvc0_instmem_init(struct drm_device *); | 1219 | extern int nvc0_instmem_init(struct drm_device *); |
@@ -1564,10 +1564,11 @@ nv_match_device(struct drm_device *dev, unsigned device, | |||
1564 | } | 1564 | } |
1565 | 1565 | ||
1566 | /* memory type/access flags, do not match hardware values */ | 1566 | /* memory type/access flags, do not match hardware values */ |
1567 | #define NV_MEM_ACCESS_RO 1 | 1567 | #define NV_MEM_ACCESS_RO 1 |
1568 | #define NV_MEM_ACCESS_WO 2 | 1568 | #define NV_MEM_ACCESS_WO 2 |
1569 | #define NV_MEM_ACCESS_RW (NV_MEM_ACCESS_RO | NV_MEM_ACCESS_WO) | 1569 | #define NV_MEM_ACCESS_RW (NV_MEM_ACCESS_RO | NV_MEM_ACCESS_WO) |
1570 | #define NV_MEM_ACCESS_VM 4 | 1570 | #define NV_MEM_ACCESS_SYS 4 |
1571 | #define NV_MEM_ACCESS_VM 8 | ||
1571 | 1572 | ||
1572 | #define NV_MEM_TARGET_VRAM 0 | 1573 | #define NV_MEM_TARGET_VRAM 0 |
1573 | #define NV_MEM_TARGET_PCI 1 | 1574 | #define NV_MEM_TARGET_PCI 1 |
diff --git a/drivers/gpu/drm/nouveau/nouveau_mem.c b/drivers/gpu/drm/nouveau/nouveau_mem.c index dbeb9e5f6b22..2d02401e8227 100644 --- a/drivers/gpu/drm/nouveau/nouveau_mem.c +++ b/drivers/gpu/drm/nouveau/nouveau_mem.c | |||
@@ -37,6 +37,7 @@ | |||
37 | #include "nouveau_drv.h" | 37 | #include "nouveau_drv.h" |
38 | #include "nouveau_pm.h" | 38 | #include "nouveau_pm.h" |
39 | #include "nouveau_mm.h" | 39 | #include "nouveau_mm.h" |
40 | #include "nouveau_vm.h" | ||
40 | 41 | ||
41 | /* | 42 | /* |
42 | * NV10-NV40 tiling helpers | 43 | * NV10-NV40 tiling helpers |
@@ -201,7 +202,7 @@ nv50_mem_vm_bind_linear(struct drm_device *dev, uint64_t virt, uint32_t size, | |||
201 | dev_priv->engine.instmem.flush(dev); | 202 | dev_priv->engine.instmem.flush(dev); |
202 | dev_priv->engine.fifo.tlb_flush(dev); | 203 | dev_priv->engine.fifo.tlb_flush(dev); |
203 | dev_priv->engine.graph.tlb_flush(dev); | 204 | dev_priv->engine.graph.tlb_flush(dev); |
204 | nv50_vm_flush(dev, 6); | 205 | nv50_vm_flush_engine(dev, 6); |
205 | return 0; | 206 | return 0; |
206 | } | 207 | } |
207 | 208 | ||
@@ -234,7 +235,7 @@ nv50_mem_vm_unbind(struct drm_device *dev, uint64_t virt, uint32_t size) | |||
234 | dev_priv->engine.instmem.flush(dev); | 235 | dev_priv->engine.instmem.flush(dev); |
235 | dev_priv->engine.fifo.tlb_flush(dev); | 236 | dev_priv->engine.fifo.tlb_flush(dev); |
236 | dev_priv->engine.graph.tlb_flush(dev); | 237 | dev_priv->engine.graph.tlb_flush(dev); |
237 | nv50_vm_flush(dev, 6); | 238 | nv50_vm_flush_engine(dev, 6); |
238 | } | 239 | } |
239 | 240 | ||
240 | /* | 241 | /* |
diff --git a/drivers/gpu/drm/nouveau/nouveau_object.c b/drivers/gpu/drm/nouveau/nouveau_object.c index 5cc3f7e59fa1..dd1859f7d8b0 100644 --- a/drivers/gpu/drm/nouveau/nouveau_object.c +++ b/drivers/gpu/drm/nouveau/nouveau_object.c | |||
@@ -213,7 +213,7 @@ nouveau_gpuobj_new(struct drm_device *dev, struct nouveau_channel *chan, | |||
213 | } | 213 | } |
214 | 214 | ||
215 | ret = -ENOSYS; | 215 | ret = -ENOSYS; |
216 | if (dev_priv->ramin_available) | 216 | if (!(flags & NVOBJ_FLAG_DONT_MAP)) |
217 | ret = instmem->map(gpuobj); | 217 | ret = instmem->map(gpuobj); |
218 | if (ret) | 218 | if (ret) |
219 | gpuobj->pinst = ~0; | 219 | gpuobj->pinst = ~0; |
diff --git a/drivers/gpu/drm/nouveau/nouveau_vm.c b/drivers/gpu/drm/nouveau/nouveau_vm.c new file mode 100644 index 000000000000..07ab1749cf7d --- /dev/null +++ b/drivers/gpu/drm/nouveau/nouveau_vm.c | |||
@@ -0,0 +1,421 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Red Hat Inc. | ||
3 | * | ||
4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
5 | * copy of this software and associated documentation files (the "Software"), | ||
6 | * to deal in the Software without restriction, including without limitation | ||
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
9 | * Software is furnished to do so, subject to the following conditions: | ||
10 | * | ||
11 | * The above copyright notice and this permission notice shall be included in | ||
12 | * all copies or substantial portions of the Software. | ||
13 | * | ||
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
20 | * OTHER DEALINGS IN THE SOFTWARE. | ||
21 | * | ||
22 | * Authors: Ben Skeggs | ||
23 | */ | ||
24 | |||
25 | #include "drmP.h" | ||
26 | #include "nouveau_drv.h" | ||
27 | #include "nouveau_mm.h" | ||
28 | #include "nouveau_vm.h" | ||
29 | |||
30 | void | ||
31 | nouveau_vm_map_at(struct nouveau_vma *vma, u64 delta, struct nouveau_vram *vram) | ||
32 | { | ||
33 | struct nouveau_vm *vm = vma->vm; | ||
34 | struct nouveau_mm_node *r; | ||
35 | u32 offset = vma->node->offset + (delta >> 12); | ||
36 | u32 bits = vma->node->type - 12; | ||
37 | u32 pde = (offset >> vm->pgt_bits) - vm->fpde; | ||
38 | u32 pte = (offset & ((1 << vm->pgt_bits) - 1)) >> bits; | ||
39 | u32 max = 1 << (vm->pgt_bits - bits); | ||
40 | u32 end, len; | ||
41 | |||
42 | list_for_each_entry(r, &vram->regions, rl_entry) { | ||
43 | u64 phys = (u64)r->offset << 12; | ||
44 | u32 num = r->length >> bits; | ||
45 | |||
46 | while (num) { | ||
47 | struct nouveau_gpuobj *pgt = vm->pgt[pde].obj; | ||
48 | |||
49 | end = (pte + num); | ||
50 | if (unlikely(end >= max)) | ||
51 | end = max; | ||
52 | len = end - pte; | ||
53 | |||
54 | vm->map(vma, pgt, vram, pte, len, phys); | ||
55 | |||
56 | num -= len; | ||
57 | pte += len; | ||
58 | if (unlikely(end >= max)) { | ||
59 | pde++; | ||
60 | pte = 0; | ||
61 | } | ||
62 | } | ||
63 | } | ||
64 | |||
65 | vm->flush(vm); | ||
66 | } | ||
67 | |||
68 | void | ||
69 | nouveau_vm_map(struct nouveau_vma *vma, struct nouveau_vram *vram) | ||
70 | { | ||
71 | nouveau_vm_map_at(vma, 0, vram); | ||
72 | } | ||
73 | |||
74 | void | ||
75 | nouveau_vm_map_sg(struct nouveau_vma *vma, u64 delta, u64 length, | ||
76 | dma_addr_t *list) | ||
77 | { | ||
78 | struct nouveau_vm *vm = vma->vm; | ||
79 | u32 offset = vma->node->offset + (delta >> 12); | ||
80 | u32 bits = vma->node->type - 12; | ||
81 | u32 num = length >> vma->node->type; | ||
82 | u32 pde = (offset >> vm->pgt_bits) - vm->fpde; | ||
83 | u32 pte = (offset & ((1 << vm->pgt_bits) - 1)) >> bits; | ||
84 | u32 max = 1 << (vm->pgt_bits - bits); | ||
85 | u32 end, len; | ||
86 | |||
87 | while (num) { | ||
88 | struct nouveau_gpuobj *pgt = vm->pgt[pde].obj; | ||
89 | |||
90 | end = (pte + num); | ||
91 | if (unlikely(end >= max)) | ||
92 | end = max; | ||
93 | len = end - pte; | ||
94 | |||
95 | vm->map_sg(vma, pgt, pte, list, len); | ||
96 | |||
97 | num -= len; | ||
98 | pte += len; | ||
99 | list += len; | ||
100 | if (unlikely(end >= max)) { | ||
101 | pde++; | ||
102 | pte = 0; | ||
103 | } | ||
104 | } | ||
105 | |||
106 | vm->flush(vm); | ||
107 | } | ||
108 | |||
109 | void | ||
110 | nouveau_vm_unmap_at(struct nouveau_vma *vma, u64 delta, u64 length) | ||
111 | { | ||
112 | struct nouveau_vm *vm = vma->vm; | ||
113 | u32 offset = vma->node->offset + (delta >> 12); | ||
114 | u32 bits = vma->node->type - 12; | ||
115 | u32 num = length >> vma->node->type; | ||
116 | u32 pde = (offset >> vm->pgt_bits) - vm->fpde; | ||
117 | u32 pte = (offset & ((1 << vm->pgt_bits) - 1)) >> bits; | ||
118 | u32 max = 1 << (vm->pgt_bits - bits); | ||
119 | u32 end, len; | ||
120 | |||
121 | while (num) { | ||
122 | struct nouveau_gpuobj *pgt = vm->pgt[pde].obj; | ||
123 | |||
124 | end = (pte + num); | ||
125 | if (unlikely(end >= max)) | ||
126 | end = max; | ||
127 | len = end - pte; | ||
128 | |||
129 | vm->unmap(pgt, pte, len); | ||
130 | |||
131 | num -= len; | ||
132 | pte += len; | ||
133 | if (unlikely(end >= max)) { | ||
134 | pde++; | ||
135 | pte = 0; | ||
136 | } | ||
137 | } | ||
138 | |||
139 | vm->flush(vm); | ||
140 | } | ||
141 | |||
142 | void | ||
143 | nouveau_vm_unmap(struct nouveau_vma *vma) | ||
144 | { | ||
145 | nouveau_vm_unmap_at(vma, 0, (u64)vma->node->length << 12); | ||
146 | } | ||
147 | |||
148 | static void | ||
149 | nouveau_vm_unmap_pgt(struct nouveau_vm *vm, u32 fpde, u32 lpde) | ||
150 | { | ||
151 | struct nouveau_vm_pgd *vpgd; | ||
152 | struct nouveau_vm_pgt *vpgt; | ||
153 | struct nouveau_gpuobj *pgt; | ||
154 | u32 pde; | ||
155 | |||
156 | for (pde = fpde; pde <= lpde; pde++) { | ||
157 | vpgt = &vm->pgt[pde - vm->fpde]; | ||
158 | if (--vpgt->refcount) | ||
159 | continue; | ||
160 | |||
161 | list_for_each_entry(vpgd, &vm->pgd_list, head) { | ||
162 | vm->unmap_pgt(vpgd->obj, pde); | ||
163 | } | ||
164 | |||
165 | pgt = vpgt->obj; | ||
166 | vpgt->obj = NULL; | ||
167 | |||
168 | mutex_unlock(&vm->mm->mutex); | ||
169 | nouveau_gpuobj_ref(NULL, &pgt); | ||
170 | mutex_lock(&vm->mm->mutex); | ||
171 | } | ||
172 | } | ||
173 | |||
174 | static int | ||
175 | nouveau_vm_map_pgt(struct nouveau_vm *vm, u32 pde, u32 type) | ||
176 | { | ||
177 | struct nouveau_vm_pgt *vpgt = &vm->pgt[pde - vm->fpde]; | ||
178 | struct nouveau_vm_pgd *vpgd; | ||
179 | struct nouveau_gpuobj *pgt; | ||
180 | u32 pgt_size; | ||
181 | int ret; | ||
182 | |||
183 | pgt_size = (1 << (vm->pgt_bits + 12)) >> type; | ||
184 | pgt_size *= 8; | ||
185 | |||
186 | mutex_unlock(&vm->mm->mutex); | ||
187 | ret = nouveau_gpuobj_new(vm->dev, NULL, pgt_size, 0x1000, | ||
188 | NVOBJ_FLAG_ZERO_ALLOC, &pgt); | ||
189 | mutex_lock(&vm->mm->mutex); | ||
190 | if (unlikely(ret)) | ||
191 | return ret; | ||
192 | |||
193 | /* someone beat us to filling the PDE while we didn't have the lock */ | ||
194 | if (unlikely(vpgt->refcount++)) { | ||
195 | mutex_unlock(&vm->mm->mutex); | ||
196 | nouveau_gpuobj_ref(NULL, &pgt); | ||
197 | mutex_lock(&vm->mm->mutex); | ||
198 | return 0; | ||
199 | } | ||
200 | |||
201 | list_for_each_entry(vpgd, &vm->pgd_list, head) { | ||
202 | vm->map_pgt(vpgd->obj, type, pde, pgt); | ||
203 | } | ||
204 | |||
205 | vpgt->page_shift = type; | ||
206 | vpgt->obj = pgt; | ||
207 | return 0; | ||
208 | } | ||
209 | |||
210 | int | ||
211 | nouveau_vm_get(struct nouveau_vm *vm, u64 size, u32 page_shift, | ||
212 | u32 access, struct nouveau_vma *vma) | ||
213 | { | ||
214 | u32 align = (1 << page_shift) >> 12; | ||
215 | u32 msize = size >> 12; | ||
216 | u32 fpde, lpde, pde; | ||
217 | int ret; | ||
218 | |||
219 | mutex_lock(&vm->mm->mutex); | ||
220 | ret = nouveau_mm_get(vm->mm, page_shift, msize, 0, align, &vma->node); | ||
221 | if (unlikely(ret != 0)) { | ||
222 | mutex_unlock(&vm->mm->mutex); | ||
223 | return ret; | ||
224 | } | ||
225 | |||
226 | fpde = (vma->node->offset >> vm->pgt_bits); | ||
227 | lpde = (vma->node->offset + vma->node->length - 1) >> vm->pgt_bits; | ||
228 | for (pde = fpde; pde <= lpde; pde++) { | ||
229 | struct nouveau_vm_pgt *vpgt = &vm->pgt[pde - vm->fpde]; | ||
230 | |||
231 | if (likely(vpgt->refcount)) { | ||
232 | vpgt->refcount++; | ||
233 | continue; | ||
234 | } | ||
235 | |||
236 | ret = nouveau_vm_map_pgt(vm, pde, vma->node->type); | ||
237 | if (ret) { | ||
238 | if (pde != fpde) | ||
239 | nouveau_vm_unmap_pgt(vm, fpde, pde - 1); | ||
240 | nouveau_mm_put(vm->mm, vma->node); | ||
241 | mutex_unlock(&vm->mm->mutex); | ||
242 | vma->node = NULL; | ||
243 | return ret; | ||
244 | } | ||
245 | } | ||
246 | mutex_unlock(&vm->mm->mutex); | ||
247 | |||
248 | vma->vm = vm; | ||
249 | vma->offset = (u64)vma->node->offset << 12; | ||
250 | vma->access = access; | ||
251 | return 0; | ||
252 | } | ||
253 | |||
254 | void | ||
255 | nouveau_vm_put(struct nouveau_vma *vma) | ||
256 | { | ||
257 | struct nouveau_vm *vm = vma->vm; | ||
258 | u32 fpde, lpde; | ||
259 | |||
260 | if (unlikely(vma->node == NULL)) | ||
261 | return; | ||
262 | fpde = (vma->node->offset >> vm->pgt_bits); | ||
263 | lpde = (vma->node->offset + vma->node->length - 1) >> vm->pgt_bits; | ||
264 | |||
265 | mutex_lock(&vm->mm->mutex); | ||
266 | nouveau_mm_put(vm->mm, vma->node); | ||
267 | vma->node = NULL; | ||
268 | nouveau_vm_unmap_pgt(vm, fpde, lpde); | ||
269 | mutex_unlock(&vm->mm->mutex); | ||
270 | } | ||
271 | |||
272 | int | ||
273 | nouveau_vm_new(struct drm_device *dev, u64 offset, u64 length, u64 mm_offset, | ||
274 | u8 pgt_bits, u8 spg_shift, u8 lpg_shift, | ||
275 | struct nouveau_vm **pvm) | ||
276 | { | ||
277 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
278 | struct nouveau_vm *vm; | ||
279 | u64 mm_length = (offset + length) - mm_offset; | ||
280 | u32 block; | ||
281 | int ret; | ||
282 | |||
283 | vm = kzalloc(sizeof(*vm), GFP_KERNEL); | ||
284 | if (!vm) | ||
285 | return -ENOMEM; | ||
286 | |||
287 | if (dev_priv->card_type == NV_50) { | ||
288 | vm->map_pgt = nv50_vm_map_pgt; | ||
289 | vm->unmap_pgt = nv50_vm_unmap_pgt; | ||
290 | vm->map = nv50_vm_map; | ||
291 | vm->map_sg = nv50_vm_map_sg; | ||
292 | vm->unmap = nv50_vm_unmap; | ||
293 | vm->flush = nv50_vm_flush; | ||
294 | } else { | ||
295 | kfree(vm); | ||
296 | return -ENOSYS; | ||
297 | } | ||
298 | |||
299 | vm->fpde = offset >> pgt_bits; | ||
300 | vm->lpde = (offset + length - 1) >> pgt_bits; | ||
301 | vm->pgt = kcalloc(vm->lpde - vm->fpde + 1, sizeof(*vm->pgt), GFP_KERNEL); | ||
302 | if (!vm->pgt) { | ||
303 | kfree(vm); | ||
304 | return -ENOMEM; | ||
305 | } | ||
306 | |||
307 | INIT_LIST_HEAD(&vm->pgd_list); | ||
308 | vm->dev = dev; | ||
309 | vm->refcount = 1; | ||
310 | vm->pgt_bits = pgt_bits - 12; | ||
311 | vm->spg_shift = spg_shift; | ||
312 | vm->lpg_shift = lpg_shift; | ||
313 | |||
314 | block = (1 << pgt_bits); | ||
315 | if (length < block) | ||
316 | block = length; | ||
317 | |||
318 | ret = nouveau_mm_init(&vm->mm, mm_offset >> 12, mm_length >> 12, | ||
319 | block >> 12); | ||
320 | if (ret) { | ||
321 | kfree(vm); | ||
322 | return ret; | ||
323 | } | ||
324 | |||
325 | *pvm = vm; | ||
326 | return 0; | ||
327 | } | ||
328 | |||
329 | static int | ||
330 | nouveau_vm_link(struct nouveau_vm *vm, struct nouveau_gpuobj *pgd) | ||
331 | { | ||
332 | struct nouveau_vm_pgd *vpgd; | ||
333 | int i; | ||
334 | |||
335 | if (!pgd) | ||
336 | return 0; | ||
337 | |||
338 | vpgd = kzalloc(sizeof(*vpgd), GFP_KERNEL); | ||
339 | if (!vpgd) | ||
340 | return -ENOMEM; | ||
341 | |||
342 | nouveau_gpuobj_ref(pgd, &vpgd->obj); | ||
343 | |||
344 | mutex_lock(&vm->mm->mutex); | ||
345 | for (i = vm->fpde; i <= vm->lpde; i++) { | ||
346 | struct nouveau_vm_pgt *vpgt = &vm->pgt[i - vm->fpde]; | ||
347 | |||
348 | if (!vpgt->obj) { | ||
349 | vm->unmap_pgt(pgd, i); | ||
350 | continue; | ||
351 | } | ||
352 | |||
353 | vm->map_pgt(pgd, vpgt->page_shift, i, vpgt->obj); | ||
354 | } | ||
355 | list_add(&vpgd->head, &vm->pgd_list); | ||
356 | mutex_unlock(&vm->mm->mutex); | ||
357 | return 0; | ||
358 | } | ||
359 | |||
360 | static void | ||
361 | nouveau_vm_unlink(struct nouveau_vm *vm, struct nouveau_gpuobj *pgd) | ||
362 | { | ||
363 | struct nouveau_vm_pgd *vpgd, *tmp; | ||
364 | |||
365 | if (!pgd) | ||
366 | return; | ||
367 | |||
368 | mutex_lock(&vm->mm->mutex); | ||
369 | list_for_each_entry_safe(vpgd, tmp, &vm->pgd_list, head) { | ||
370 | if (vpgd->obj != pgd) | ||
371 | continue; | ||
372 | |||
373 | list_del(&vpgd->head); | ||
374 | nouveau_gpuobj_ref(NULL, &vpgd->obj); | ||
375 | kfree(vpgd); | ||
376 | } | ||
377 | mutex_unlock(&vm->mm->mutex); | ||
378 | } | ||
379 | |||
380 | static void | ||
381 | nouveau_vm_del(struct nouveau_vm *vm) | ||
382 | { | ||
383 | struct nouveau_vm_pgd *vpgd, *tmp; | ||
384 | |||
385 | list_for_each_entry_safe(vpgd, tmp, &vm->pgd_list, head) { | ||
386 | nouveau_vm_unlink(vm, vpgd->obj); | ||
387 | } | ||
388 | WARN_ON(nouveau_mm_fini(&vm->mm) != 0); | ||
389 | |||
390 | kfree(vm->pgt); | ||
391 | kfree(vm); | ||
392 | } | ||
393 | |||
394 | int | ||
395 | nouveau_vm_ref(struct nouveau_vm *ref, struct nouveau_vm **ptr, | ||
396 | struct nouveau_gpuobj *pgd) | ||
397 | { | ||
398 | struct nouveau_vm *vm; | ||
399 | int ret; | ||
400 | |||
401 | vm = ref; | ||
402 | if (vm) { | ||
403 | ret = nouveau_vm_link(vm, pgd); | ||
404 | if (ret) | ||
405 | return ret; | ||
406 | |||
407 | vm->refcount++; | ||
408 | } | ||
409 | |||
410 | vm = *ptr; | ||
411 | *ptr = ref; | ||
412 | |||
413 | if (vm) { | ||
414 | nouveau_vm_unlink(vm, pgd); | ||
415 | |||
416 | if (--vm->refcount == 0) | ||
417 | nouveau_vm_del(vm); | ||
418 | } | ||
419 | |||
420 | return 0; | ||
421 | } | ||
diff --git a/drivers/gpu/drm/nouveau/nouveau_vm.h b/drivers/gpu/drm/nouveau/nouveau_vm.h new file mode 100644 index 000000000000..b6755cfa7b71 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nouveau_vm.h | |||
@@ -0,0 +1,107 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Red Hat Inc. | ||
3 | * | ||
4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
5 | * copy of this software and associated documentation files (the "Software"), | ||
6 | * to deal in the Software without restriction, including without limitation | ||
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
9 | * Software is furnished to do so, subject to the following conditions: | ||
10 | * | ||
11 | * The above copyright notice and this permission notice shall be included in | ||
12 | * all copies or substantial portions of the Software. | ||
13 | * | ||
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
20 | * OTHER DEALINGS IN THE SOFTWARE. | ||
21 | * | ||
22 | * Authors: Ben Skeggs | ||
23 | */ | ||
24 | |||
25 | #ifndef __NOUVEAU_VM_H__ | ||
26 | #define __NOUVEAU_VM_H__ | ||
27 | |||
28 | #include "drmP.h" | ||
29 | |||
30 | #include "nouveau_drv.h" | ||
31 | #include "nouveau_mm.h" | ||
32 | |||
33 | struct nouveau_vm_pgt { | ||
34 | struct nouveau_gpuobj *obj; | ||
35 | u32 page_shift; | ||
36 | u32 refcount; | ||
37 | }; | ||
38 | |||
39 | struct nouveau_vm_pgd { | ||
40 | struct list_head head; | ||
41 | struct nouveau_gpuobj *obj; | ||
42 | }; | ||
43 | |||
44 | struct nouveau_vma { | ||
45 | struct nouveau_vm *vm; | ||
46 | struct nouveau_mm_node *node; | ||
47 | u64 offset; | ||
48 | u32 access; | ||
49 | }; | ||
50 | |||
51 | struct nouveau_vm { | ||
52 | struct drm_device *dev; | ||
53 | struct nouveau_mm *mm; | ||
54 | int refcount; | ||
55 | |||
56 | struct list_head pgd_list; | ||
57 | atomic_t pgraph_refs; | ||
58 | atomic_t pcrypt_refs; | ||
59 | |||
60 | struct nouveau_vm_pgt *pgt; | ||
61 | u32 fpde; | ||
62 | u32 lpde; | ||
63 | |||
64 | u32 pgt_bits; | ||
65 | u8 spg_shift; | ||
66 | u8 lpg_shift; | ||
67 | |||
68 | void (*map_pgt)(struct nouveau_gpuobj *pgd, u32 type, u32 pde, | ||
69 | struct nouveau_gpuobj *pgt); | ||
70 | void (*unmap_pgt)(struct nouveau_gpuobj *pgd, u32 pde); | ||
71 | void (*map)(struct nouveau_vma *, struct nouveau_gpuobj *, | ||
72 | struct nouveau_vram *, u32 pte, u32 cnt, u64 phys); | ||
73 | void (*map_sg)(struct nouveau_vma *, struct nouveau_gpuobj *, | ||
74 | u32 pte, dma_addr_t *, u32 cnt); | ||
75 | void (*unmap)(struct nouveau_gpuobj *pgt, u32 pte, u32 cnt); | ||
76 | void (*flush)(struct nouveau_vm *); | ||
77 | }; | ||
78 | |||
79 | /* nouveau_vm.c */ | ||
80 | int nouveau_vm_new(struct drm_device *, u64 offset, u64 length, u64 mm_offset, | ||
81 | u8 pgt_bits, u8 spg_shift, u8 lpg_shift, | ||
82 | struct nouveau_vm **); | ||
83 | int nouveau_vm_ref(struct nouveau_vm *, struct nouveau_vm **, | ||
84 | struct nouveau_gpuobj *pgd); | ||
85 | int nouveau_vm_get(struct nouveau_vm *, u64 size, u32 page_shift, | ||
86 | u32 access, struct nouveau_vma *); | ||
87 | void nouveau_vm_put(struct nouveau_vma *); | ||
88 | void nouveau_vm_map(struct nouveau_vma *, struct nouveau_vram *); | ||
89 | void nouveau_vm_map_at(struct nouveau_vma *, u64 offset, struct nouveau_vram *); | ||
90 | void nouveau_vm_unmap(struct nouveau_vma *); | ||
91 | void nouveau_vm_unmap_at(struct nouveau_vma *, u64 offset, u64 length); | ||
92 | void nouveau_vm_map_sg(struct nouveau_vma *, u64 offset, u64 length, | ||
93 | dma_addr_t *); | ||
94 | |||
95 | /* nv50_vm.c */ | ||
96 | void nv50_vm_map_pgt(struct nouveau_gpuobj *pgd, u32 type, u32 pde, | ||
97 | struct nouveau_gpuobj *pgt); | ||
98 | void nv50_vm_unmap_pgt(struct nouveau_gpuobj *pgd, u32 pde); | ||
99 | void nv50_vm_map(struct nouveau_vma *, struct nouveau_gpuobj *, | ||
100 | struct nouveau_vram *, u32 pte, u32 cnt, u64 phys); | ||
101 | void nv50_vm_map_sg(struct nouveau_vma *, struct nouveau_gpuobj *, | ||
102 | u32 pte, dma_addr_t *, u32 cnt); | ||
103 | void nv50_vm_unmap(struct nouveau_gpuobj *, u32 pte, u32 cnt); | ||
104 | void nv50_vm_flush(struct nouveau_vm *); | ||
105 | void nv50_vm_flush_engine(struct drm_device *, int engine); | ||
106 | |||
107 | #endif | ||
diff --git a/drivers/gpu/drm/nouveau/nv50_fifo.c b/drivers/gpu/drm/nouveau/nv50_fifo.c index 7add3dfde3df..8dd04c5dac67 100644 --- a/drivers/gpu/drm/nouveau/nv50_fifo.c +++ b/drivers/gpu/drm/nouveau/nv50_fifo.c | |||
@@ -28,6 +28,7 @@ | |||
28 | #include "drm.h" | 28 | #include "drm.h" |
29 | #include "nouveau_drv.h" | 29 | #include "nouveau_drv.h" |
30 | #include "nouveau_ramht.h" | 30 | #include "nouveau_ramht.h" |
31 | #include "nouveau_vm.h" | ||
31 | 32 | ||
32 | static void | 33 | static void |
33 | nv50_fifo_playlist_update(struct drm_device *dev) | 34 | nv50_fifo_playlist_update(struct drm_device *dev) |
@@ -498,5 +499,5 @@ nv50_fifo_unload_context(struct drm_device *dev) | |||
498 | void | 499 | void |
499 | nv50_fifo_tlb_flush(struct drm_device *dev) | 500 | nv50_fifo_tlb_flush(struct drm_device *dev) |
500 | { | 501 | { |
501 | nv50_vm_flush(dev, 5); | 502 | nv50_vm_flush_engine(dev, 5); |
502 | } | 503 | } |
diff --git a/drivers/gpu/drm/nouveau/nv50_graph.c b/drivers/gpu/drm/nouveau/nv50_graph.c index b3900788c66d..f5fd1b296d27 100644 --- a/drivers/gpu/drm/nouveau/nv50_graph.c +++ b/drivers/gpu/drm/nouveau/nv50_graph.c | |||
@@ -30,6 +30,7 @@ | |||
30 | #include "nouveau_ramht.h" | 30 | #include "nouveau_ramht.h" |
31 | #include "nouveau_grctx.h" | 31 | #include "nouveau_grctx.h" |
32 | #include "nouveau_dma.h" | 32 | #include "nouveau_dma.h" |
33 | #include "nouveau_vm.h" | ||
33 | #include "nv50_evo.h" | 34 | #include "nv50_evo.h" |
34 | 35 | ||
35 | static int nv50_graph_register(struct drm_device *); | 36 | static int nv50_graph_register(struct drm_device *); |
@@ -468,7 +469,7 @@ nv50_graph_register(struct drm_device *dev) | |||
468 | void | 469 | void |
469 | nv50_graph_tlb_flush(struct drm_device *dev) | 470 | nv50_graph_tlb_flush(struct drm_device *dev) |
470 | { | 471 | { |
471 | nv50_vm_flush(dev, 0); | 472 | nv50_vm_flush_engine(dev, 0); |
472 | } | 473 | } |
473 | 474 | ||
474 | void | 475 | void |
@@ -511,7 +512,7 @@ nv86_graph_tlb_flush(struct drm_device *dev) | |||
511 | nv_rd32(dev, 0x400384), nv_rd32(dev, 0x400388)); | 512 | nv_rd32(dev, 0x400384), nv_rd32(dev, 0x400388)); |
512 | } | 513 | } |
513 | 514 | ||
514 | nv50_vm_flush(dev, 0); | 515 | nv50_vm_flush_engine(dev, 0); |
515 | 516 | ||
516 | nv_mask(dev, 0x400500, 0x00000001, 0x00000001); | 517 | nv_mask(dev, 0x400500, 0x00000001, 0x00000001); |
517 | spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags); | 518 | spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags); |
diff --git a/drivers/gpu/drm/nouveau/nv50_instmem.c b/drivers/gpu/drm/nouveau/nv50_instmem.c index 1e7d50397e4a..4eb2f0835e27 100644 --- a/drivers/gpu/drm/nouveau/nv50_instmem.c +++ b/drivers/gpu/drm/nouveau/nv50_instmem.c | |||
@@ -27,7 +27,9 @@ | |||
27 | 27 | ||
28 | #include "drmP.h" | 28 | #include "drmP.h" |
29 | #include "drm.h" | 29 | #include "drm.h" |
30 | |||
30 | #include "nouveau_drv.h" | 31 | #include "nouveau_drv.h" |
32 | #include "nouveau_vm.h" | ||
31 | 33 | ||
32 | struct nv50_instmem_priv { | 34 | struct nv50_instmem_priv { |
33 | uint32_t save1700[5]; /* 0x1700->0x1710 */ | 35 | uint32_t save1700[5]; /* 0x1700->0x1710 */ |
@@ -404,7 +406,7 @@ nv50_instmem_map(struct nouveau_gpuobj *gpuobj) | |||
404 | } | 406 | } |
405 | dev_priv->engine.instmem.flush(dev); | 407 | dev_priv->engine.instmem.flush(dev); |
406 | 408 | ||
407 | nv50_vm_flush(dev, 6); | 409 | nv50_vm_flush_engine(dev, 6); |
408 | 410 | ||
409 | node->ramin = ramin; | 411 | node->ramin = ramin; |
410 | gpuobj->pinst = ramin->start; | 412 | gpuobj->pinst = ramin->start; |
@@ -454,11 +456,3 @@ nv84_instmem_flush(struct drm_device *dev) | |||
454 | NV_ERROR(dev, "PRAMIN flush timeout\n"); | 456 | NV_ERROR(dev, "PRAMIN flush timeout\n"); |
455 | } | 457 | } |
456 | 458 | ||
457 | void | ||
458 | nv50_vm_flush(struct drm_device *dev, int engine) | ||
459 | { | ||
460 | nv_wr32(dev, 0x100c80, (engine << 16) | 1); | ||
461 | if (!nv_wait(dev, 0x100c80, 0x00000001, 0x00000000)) | ||
462 | NV_ERROR(dev, "vm flush timeout: engine %d\n", engine); | ||
463 | } | ||
464 | |||
diff --git a/drivers/gpu/drm/nouveau/nv50_vm.c b/drivers/gpu/drm/nouveau/nv50_vm.c new file mode 100644 index 000000000000..ab6c3d0ce32e --- /dev/null +++ b/drivers/gpu/drm/nouveau/nv50_vm.c | |||
@@ -0,0 +1,164 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Red Hat Inc. | ||
3 | * | ||
4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
5 | * copy of this software and associated documentation files (the "Software"), | ||
6 | * to deal in the Software without restriction, including without limitation | ||
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
9 | * Software is furnished to do so, subject to the following conditions: | ||
10 | * | ||
11 | * The above copyright notice and this permission notice shall be included in | ||
12 | * all copies or substantial portions of the Software. | ||
13 | * | ||
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
20 | * OTHER DEALINGS IN THE SOFTWARE. | ||
21 | * | ||
22 | * Authors: Ben Skeggs | ||
23 | */ | ||
24 | |||
25 | #include "drmP.h" | ||
26 | |||
27 | #include "nouveau_drv.h" | ||
28 | #include "nouveau_vm.h" | ||
29 | |||
30 | void | ||
31 | nv50_vm_map_pgt(struct nouveau_gpuobj *pgd, u32 type, u32 pde, | ||
32 | struct nouveau_gpuobj *pgt) | ||
33 | { | ||
34 | struct drm_nouveau_private *dev_priv = pgd->dev->dev_private; | ||
35 | u32 coverage = (pgt->size >> 3) << type; | ||
36 | u64 phys; | ||
37 | |||
38 | phys = pgt->vinst; | ||
39 | phys |= 0x01; /* present */ | ||
40 | phys |= (type == 12) ? 0x02 : 0x00; /* 4KiB pages */ | ||
41 | if (dev_priv->vram_sys_base) { | ||
42 | phys += dev_priv->vram_sys_base; | ||
43 | phys |= 0x30; | ||
44 | } | ||
45 | |||
46 | if (coverage <= 32 * 1024 * 1024) | ||
47 | phys |= 0x60; | ||
48 | else if (coverage <= 64 * 1024 * 1024) | ||
49 | phys |= 0x40; | ||
50 | else if (coverage < 128 * 1024 * 1024) | ||
51 | phys |= 0x20; | ||
52 | |||
53 | nv_wo32(pgd, (pde * 8) + 0, lower_32_bits(phys)); | ||
54 | nv_wo32(pgd, (pde * 8) + 4, upper_32_bits(phys)); | ||
55 | } | ||
56 | |||
57 | void | ||
58 | nv50_vm_unmap_pgt(struct nouveau_gpuobj *pgd, u32 pde) | ||
59 | { | ||
60 | nv_wo32(pgd, (pde * 8) + 0, 0x00000000); | ||
61 | nv_wo32(pgd, (pde * 8) + 4, 0xdeadcafe); | ||
62 | } | ||
63 | |||
64 | static inline u64 | ||
65 | nv50_vm_addr(struct nouveau_vma *vma, struct nouveau_gpuobj *pgt, | ||
66 | u64 phys, u32 memtype, u32 target) | ||
67 | { | ||
68 | struct drm_nouveau_private *dev_priv = pgt->dev->dev_private; | ||
69 | |||
70 | phys |= 1; /* present */ | ||
71 | phys |= (u64)memtype << 40; | ||
72 | |||
73 | /* IGPs don't have real VRAM, re-target to stolen system memory */ | ||
74 | if (target == 0 && dev_priv->vram_sys_base) { | ||
75 | phys += dev_priv->vram_sys_base; | ||
76 | target = 3; | ||
77 | } | ||
78 | |||
79 | phys |= target << 4; | ||
80 | |||
81 | if (vma->access & NV_MEM_ACCESS_SYS) | ||
82 | phys |= (1 << 6); | ||
83 | |||
84 | if (!(vma->access & NV_MEM_ACCESS_WO)) | ||
85 | phys |= (1 << 3); | ||
86 | |||
87 | return phys; | ||
88 | } | ||
89 | |||
90 | void | ||
91 | nv50_vm_map(struct nouveau_vma *vma, struct nouveau_gpuobj *pgt, | ||
92 | struct nouveau_vram *mem, u32 pte, u32 cnt, u64 phys) | ||
93 | { | ||
94 | u32 block, i; | ||
95 | |||
96 | phys = nv50_vm_addr(vma, pgt, phys, mem->memtype, 0); | ||
97 | pte <<= 3; | ||
98 | cnt <<= 3; | ||
99 | |||
100 | while (cnt) { | ||
101 | u32 offset_h = upper_32_bits(phys); | ||
102 | u32 offset_l = lower_32_bits(phys); | ||
103 | |||
104 | for (i = 7; i >= 0; i--) { | ||
105 | block = 1 << (i + 3); | ||
106 | if (cnt >= block && !(pte & (block - 1))) | ||
107 | break; | ||
108 | } | ||
109 | offset_l |= (i << 7); | ||
110 | |||
111 | phys += block << (vma->node->type - 3); | ||
112 | cnt -= block; | ||
113 | |||
114 | while (block) { | ||
115 | nv_wo32(pgt, pte + 0, offset_l); | ||
116 | nv_wo32(pgt, pte + 4, offset_h); | ||
117 | pte += 8; | ||
118 | block -= 8; | ||
119 | } | ||
120 | } | ||
121 | } | ||
122 | |||
123 | void | ||
124 | nv50_vm_map_sg(struct nouveau_vma *vma, struct nouveau_gpuobj *pgt, | ||
125 | u32 pte, dma_addr_t *list, u32 cnt) | ||
126 | { | ||
127 | pte <<= 3; | ||
128 | while (cnt--) { | ||
129 | u64 phys = nv50_vm_addr(vma, pgt, (u64)*list++, 0, 2); | ||
130 | nv_wo32(pgt, pte + 0, lower_32_bits(phys)); | ||
131 | nv_wo32(pgt, pte + 4, upper_32_bits(phys)); | ||
132 | pte += 8; | ||
133 | } | ||
134 | } | ||
135 | |||
136 | void | ||
137 | nv50_vm_unmap(struct nouveau_gpuobj *pgt, u32 pte, u32 cnt) | ||
138 | { | ||
139 | pte <<= 3; | ||
140 | while (cnt--) { | ||
141 | nv_wo32(pgt, pte + 0, 0x00000000); | ||
142 | nv_wo32(pgt, pte + 4, 0x00000000); | ||
143 | pte += 8; | ||
144 | } | ||
145 | } | ||
146 | |||
147 | void | ||
148 | nv50_vm_flush(struct nouveau_vm *vm) | ||
149 | { | ||
150 | struct drm_nouveau_private *dev_priv = vm->dev->dev_private; | ||
151 | struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem; | ||
152 | |||
153 | pinstmem->flush(vm->dev); | ||
154 | |||
155 | nv50_vm_flush_engine(vm->dev, 6); | ||
156 | } | ||
157 | |||
158 | void | ||
159 | nv50_vm_flush_engine(struct drm_device *dev, int engine) | ||
160 | { | ||
161 | nv_wr32(dev, 0x100c80, (engine << 16) | 1); | ||
162 | if (!nv_wait(dev, 0x100c80, 0x00000001, 0x00000000)) | ||
163 | NV_ERROR(dev, "vm flush timeout: engine %d\n", engine); | ||
164 | } | ||
diff --git a/drivers/gpu/drm/nouveau/nv84_crypt.c b/drivers/gpu/drm/nouveau/nv84_crypt.c index 780bb1d66f95..a333e5905346 100644 --- a/drivers/gpu/drm/nouveau/nv84_crypt.c +++ b/drivers/gpu/drm/nouveau/nv84_crypt.c | |||
@@ -25,6 +25,7 @@ | |||
25 | #include "drmP.h" | 25 | #include "drmP.h" |
26 | #include "nouveau_drv.h" | 26 | #include "nouveau_drv.h" |
27 | #include "nouveau_util.h" | 27 | #include "nouveau_util.h" |
28 | #include "nouveau_vm.h" | ||
28 | 29 | ||
29 | static void nv84_crypt_isr(struct drm_device *); | 30 | static void nv84_crypt_isr(struct drm_device *); |
30 | 31 | ||
@@ -84,7 +85,7 @@ nv84_crypt_destroy_context(struct nouveau_channel *chan) | |||
84 | void | 85 | void |
85 | nv84_crypt_tlb_flush(struct drm_device *dev) | 86 | nv84_crypt_tlb_flush(struct drm_device *dev) |
86 | { | 87 | { |
87 | nv50_vm_flush(dev, 0x0a); | 88 | nv50_vm_flush_engine(dev, 0x0a); |
88 | } | 89 | } |
89 | 90 | ||
90 | int | 91 | int |