diff options
Diffstat (limited to 'drivers/gpu/drm/nouveau/nv50_evo.c')
-rw-r--r-- | drivers/gpu/drm/nouveau/nv50_evo.c | 174 |
1 files changed, 78 insertions, 96 deletions
diff --git a/drivers/gpu/drm/nouveau/nv50_evo.c b/drivers/gpu/drm/nouveau/nv50_evo.c index 7e9a6d6d673b..d7d8080c6a14 100644 --- a/drivers/gpu/drm/nouveau/nv50_evo.c +++ b/drivers/gpu/drm/nouveau/nv50_evo.c | |||
@@ -26,9 +26,22 @@ | |||
26 | 26 | ||
27 | #include "nouveau_drv.h" | 27 | #include "nouveau_drv.h" |
28 | #include "nouveau_dma.h" | 28 | #include "nouveau_dma.h" |
29 | #include <core/ramht.h> | ||
30 | #include "nv50_display.h" | 29 | #include "nv50_display.h" |
31 | 30 | ||
31 | static u32 | ||
32 | nv50_evo_rd32(struct nouveau_object *object, u32 addr) | ||
33 | { | ||
34 | void __iomem *iomem = object->oclass->ofuncs->rd08; | ||
35 | return ioread32_native(iomem + addr); | ||
36 | } | ||
37 | |||
38 | static void | ||
39 | nv50_evo_wr32(struct nouveau_object *object, u32 addr, u32 data) | ||
40 | { | ||
41 | void __iomem *iomem = object->oclass->ofuncs->rd08; | ||
42 | iowrite32_native(data, iomem + addr); | ||
43 | } | ||
44 | |||
32 | static void | 45 | static void |
33 | nv50_evo_channel_del(struct nouveau_channel **pevo) | 46 | nv50_evo_channel_del(struct nouveau_channel **pevo) |
34 | { | 47 | { |
@@ -38,21 +51,24 @@ nv50_evo_channel_del(struct nouveau_channel **pevo) | |||
38 | return; | 51 | return; |
39 | *pevo = NULL; | 52 | *pevo = NULL; |
40 | 53 | ||
41 | nouveau_ramht_ref(NULL, &evo->ramht, evo); | 54 | nouveau_bo_unmap(evo->push.buffer); |
42 | nouveau_gpuobj_channel_takedown(evo); | 55 | nouveau_bo_ref(NULL, &evo->push.buffer); |
43 | nouveau_bo_unmap(evo->pushbuf_bo); | ||
44 | nouveau_bo_ref(NULL, &evo->pushbuf_bo); | ||
45 | 56 | ||
46 | if (evo->user) | 57 | if (evo->object) |
47 | iounmap(evo->user); | 58 | iounmap(evo->object->oclass->ofuncs); |
48 | 59 | ||
49 | kfree(evo); | 60 | kfree(evo); |
50 | } | 61 | } |
51 | 62 | ||
52 | void | 63 | int |
53 | nv50_evo_dmaobj_init(struct nouveau_gpuobj *obj, u32 memtype, u64 base, u64 size) | 64 | nv50_evo_dmaobj_new(struct nouveau_channel *evo, u32 handle, u32 memtype, |
65 | u64 base, u64 size, struct nouveau_gpuobj **pobj) | ||
54 | { | 66 | { |
55 | struct drm_nouveau_private *dev_priv = obj->dev->dev_private; | 67 | struct drm_device *dev = evo->fence; |
68 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
69 | struct nv50_display *disp = nv50_display(dev); | ||
70 | u32 dmao = disp->dmao; | ||
71 | u32 hash = disp->hash; | ||
56 | u32 flags5; | 72 | u32 flags5; |
57 | 73 | ||
58 | if (dev_priv->chipset < 0xc0) { | 74 | if (dev_priv->chipset < 0xc0) { |
@@ -67,36 +83,21 @@ nv50_evo_dmaobj_init(struct nouveau_gpuobj *obj, u32 memtype, u64 base, u64 size | |||
67 | flags5 = 0x00020000; | 83 | flags5 = 0x00020000; |
68 | } | 84 | } |
69 | 85 | ||
70 | nv50_gpuobj_dma_init(obj, 0, 0x3d, base, size, NV_MEM_TARGET_VRAM, | 86 | nv_wo32(disp->ramin, dmao + 0x00, 0x0019003d | (memtype << 22)); |
71 | NV_MEM_ACCESS_RW, (memtype >> 8) & 0xff, 0); | 87 | nv_wo32(disp->ramin, dmao + 0x04, lower_32_bits(base + size - 1)); |
72 | nv_wo32(obj, 0x14, flags5); | 88 | nv_wo32(disp->ramin, dmao + 0x08, lower_32_bits(base)); |
73 | nvimem_flush(obj->dev); | 89 | nv_wo32(disp->ramin, dmao + 0x0c, upper_32_bits(base + size - 1) << 24 | |
74 | } | 90 | upper_32_bits(base)); |
91 | nv_wo32(disp->ramin, dmao + 0x10, 0x00000000); | ||
92 | nv_wo32(disp->ramin, dmao + 0x14, flags5); | ||
75 | 93 | ||
76 | int | 94 | nv_wo32(disp->ramin, hash + 0x00, handle); |
77 | nv50_evo_dmaobj_new(struct nouveau_channel *evo, u32 handle, u32 memtype, | 95 | nv_wo32(disp->ramin, hash + 0x04, (evo->handle << 28) | (dmao << 10) | |
78 | u64 base, u64 size, struct nouveau_gpuobj **pobj) | 96 | evo->handle); |
79 | { | ||
80 | struct nv50_display *disp = nv50_display(evo->dev); | ||
81 | struct nouveau_gpuobj *obj = NULL; | ||
82 | int ret; | ||
83 | 97 | ||
84 | ret = nouveau_gpuobj_new(evo->dev, disp->master, 6*4, 32, 0, &obj); | 98 | disp->dmao += 0x20; |
85 | if (ret) | 99 | disp->hash += 0x08; |
86 | return ret; | 100 | return 0; |
87 | obj->engine = NVOBJ_ENGINE_DISPLAY; | ||
88 | |||
89 | nv50_evo_dmaobj_init(obj, memtype, base, size); | ||
90 | |||
91 | ret = nouveau_ramht_insert(evo, handle, obj); | ||
92 | if (ret) | ||
93 | goto out; | ||
94 | |||
95 | if (pobj) | ||
96 | nouveau_gpuobj_ref(obj, pobj); | ||
97 | out: | ||
98 | nouveau_gpuobj_ref(NULL, &obj); | ||
99 | return ret; | ||
100 | } | 101 | } |
101 | 102 | ||
102 | static int | 103 | static int |
@@ -112,49 +113,52 @@ nv50_evo_channel_new(struct drm_device *dev, int chid, | |||
112 | return -ENOMEM; | 113 | return -ENOMEM; |
113 | *pevo = evo; | 114 | *pevo = evo; |
114 | 115 | ||
115 | evo->id = chid; | 116 | evo->handle = chid; |
116 | evo->dev = dev; | 117 | evo->fence = dev; |
117 | evo->user_get = 4; | 118 | evo->user_get = 4; |
118 | evo->user_put = 0; | 119 | evo->user_put = 0; |
119 | 120 | ||
120 | ret = nouveau_bo_new(dev, 4096, 0, TTM_PL_FLAG_VRAM, 0, 0, NULL, | 121 | ret = nouveau_bo_new(dev, 4096, 0, TTM_PL_FLAG_VRAM, 0, 0, NULL, |
121 | &evo->pushbuf_bo); | 122 | &evo->push.buffer); |
122 | if (ret == 0) | 123 | if (ret == 0) |
123 | ret = nouveau_bo_pin(evo->pushbuf_bo, TTM_PL_FLAG_VRAM); | 124 | ret = nouveau_bo_pin(evo->push.buffer, TTM_PL_FLAG_VRAM); |
124 | if (ret) { | 125 | if (ret) { |
125 | NV_ERROR(dev, "Error creating EVO DMA push buffer: %d\n", ret); | 126 | NV_ERROR(dev, "Error creating EVO DMA push buffer: %d\n", ret); |
126 | nv50_evo_channel_del(pevo); | 127 | nv50_evo_channel_del(pevo); |
127 | return ret; | 128 | return ret; |
128 | } | 129 | } |
129 | 130 | ||
130 | ret = nouveau_bo_map(evo->pushbuf_bo); | 131 | ret = nouveau_bo_map(evo->push.buffer); |
131 | if (ret) { | 132 | if (ret) { |
132 | NV_ERROR(dev, "Error mapping EVO DMA push buffer: %d\n", ret); | 133 | NV_ERROR(dev, "Error mapping EVO DMA push buffer: %d\n", ret); |
133 | nv50_evo_channel_del(pevo); | 134 | nv50_evo_channel_del(pevo); |
134 | return ret; | 135 | return ret; |
135 | } | 136 | } |
136 | 137 | ||
137 | evo->user = ioremap(pci_resource_start(dev->pdev, 0) + | 138 | evo->object = kzalloc(sizeof(*evo->object), GFP_KERNEL); |
138 | NV50_PDISPLAY_USER(evo->id), PAGE_SIZE); | 139 | #ifdef NOUVEAU_OBJECT_MAGIC |
139 | if (!evo->user) { | 140 | evo->object->_magic = NOUVEAU_OBJECT_MAGIC; |
140 | NV_ERROR(dev, "Error mapping EVO control regs.\n"); | 141 | #endif |
141 | nv50_evo_channel_del(pevo); | 142 | evo->object->parent = nv_object(disp->ramin)->parent; |
142 | return -ENOMEM; | 143 | evo->object->engine = nv_object(disp->ramin)->engine; |
143 | } | 144 | evo->object->oclass = |
144 | 145 | kzalloc(sizeof(*evo->object->oclass), GFP_KERNEL); | |
145 | /* bind primary evo channel's ramht to the channel */ | 146 | evo->object->oclass->ofuncs = |
146 | if (disp->master && evo != disp->master) | 147 | kzalloc(sizeof(*evo->object->oclass->ofuncs), GFP_KERNEL); |
147 | nouveau_ramht_ref(disp->master->ramht, &evo->ramht, NULL); | 148 | evo->object->oclass->ofuncs->rd32 = nv50_evo_rd32; |
148 | 149 | evo->object->oclass->ofuncs->wr32 = nv50_evo_wr32; | |
150 | evo->object->oclass->ofuncs->rd08 = | ||
151 | ioremap(pci_resource_start(dev->pdev, 0) + | ||
152 | NV50_PDISPLAY_USER(evo->handle), PAGE_SIZE); | ||
149 | return 0; | 153 | return 0; |
150 | } | 154 | } |
151 | 155 | ||
152 | static int | 156 | static int |
153 | nv50_evo_channel_init(struct nouveau_channel *evo) | 157 | nv50_evo_channel_init(struct nouveau_channel *evo) |
154 | { | 158 | { |
155 | struct drm_device *dev = evo->dev; | 159 | struct drm_device *dev = evo->fence; |
156 | int id = evo->id, ret, i; | 160 | int id = evo->handle, ret, i; |
157 | u64 pushbuf = evo->pushbuf_bo->bo.offset; | 161 | u64 pushbuf = evo->push.buffer->bo.offset; |
158 | u32 tmp; | 162 | u32 tmp; |
159 | 163 | ||
160 | tmp = nv_rd32(dev, NV50_PDISPLAY_EVO_CTRL(id)); | 164 | tmp = nv_rd32(dev, NV50_PDISPLAY_EVO_CTRL(id)); |
@@ -205,8 +209,8 @@ nv50_evo_channel_init(struct nouveau_channel *evo) | |||
205 | static void | 209 | static void |
206 | nv50_evo_channel_fini(struct nouveau_channel *evo) | 210 | nv50_evo_channel_fini(struct nouveau_channel *evo) |
207 | { | 211 | { |
208 | struct drm_device *dev = evo->dev; | 212 | struct drm_device *dev = evo->fence; |
209 | int id = evo->id; | 213 | int id = evo->handle; |
210 | 214 | ||
211 | nv_mask(dev, 0x610028, 0x00010001 << id, 0x00000000); | 215 | nv_mask(dev, 0x610028, 0x00010001 << id, 0x00000000); |
212 | nv_mask(dev, NV50_PDISPLAY_EVO_CTRL(id), 0x00001010, 0x00001000); | 216 | nv_mask(dev, NV50_PDISPLAY_EVO_CTRL(id), 0x00001010, 0x00001000); |
@@ -231,8 +235,8 @@ nv50_evo_destroy(struct drm_device *dev) | |||
231 | } | 235 | } |
232 | nv50_evo_channel_del(&disp->crtc[i].sync); | 236 | nv50_evo_channel_del(&disp->crtc[i].sync); |
233 | } | 237 | } |
234 | nouveau_gpuobj_ref(NULL, &disp->ntfy); | ||
235 | nv50_evo_channel_del(&disp->master); | 238 | nv50_evo_channel_del(&disp->master); |
239 | nouveau_gpuobj_ref(NULL, &disp->ramin); | ||
236 | } | 240 | } |
237 | 241 | ||
238 | int | 242 | int |
@@ -240,55 +244,33 @@ nv50_evo_create(struct drm_device *dev) | |||
240 | { | 244 | { |
241 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 245 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
242 | struct nv50_display *disp = nv50_display(dev); | 246 | struct nv50_display *disp = nv50_display(dev); |
243 | struct nouveau_gpuobj *ramht = NULL; | ||
244 | struct nouveau_channel *evo; | 247 | struct nouveau_channel *evo; |
245 | int ret, i, j; | 248 | int ret, i, j; |
246 | 249 | ||
247 | /* create primary evo channel, the one we use for modesetting | ||
248 | * purporses | ||
249 | */ | ||
250 | ret = nv50_evo_channel_new(dev, 0, &disp->master); | ||
251 | if (ret) | ||
252 | return ret; | ||
253 | evo = disp->master; | ||
254 | |||
255 | /* setup object management on it, any other evo channel will | 250 | /* setup object management on it, any other evo channel will |
256 | * use this also as there's no per-channel support on the | 251 | * use this also as there's no per-channel support on the |
257 | * hardware | 252 | * hardware |
258 | */ | 253 | */ |
259 | ret = nouveau_gpuobj_new(dev, NULL, 32768, 65536, | 254 | ret = nouveau_gpuobj_new(dev, NULL, 32768, 65536, |
260 | NVOBJ_FLAG_ZERO_ALLOC, &evo->ramin); | 255 | NVOBJ_FLAG_ZERO_ALLOC, &disp->ramin); |
261 | if (ret) { | 256 | if (ret) { |
262 | NV_ERROR(dev, "Error allocating EVO channel memory: %d\n", ret); | 257 | NV_ERROR(dev, "Error allocating EVO channel memory: %d\n", ret); |
263 | goto err; | 258 | goto err; |
264 | } | 259 | } |
265 | 260 | ||
266 | ret = nouveau_gpuobj_new(dev, evo, 4096, 16, 0, &ramht); | 261 | disp->hash = 0x0000; |
267 | if (ret) { | 262 | disp->dmao = 0x1000; |
268 | NV_ERROR(dev, "Unable to allocate EVO RAMHT: %d\n", ret); | ||
269 | goto err; | ||
270 | } | ||
271 | |||
272 | ret = nouveau_ramht_new(dev, ramht, &evo->ramht); | ||
273 | nouveau_gpuobj_ref(NULL, &ramht); | ||
274 | if (ret) | ||
275 | goto err; | ||
276 | 263 | ||
277 | /* not sure exactly what this is.. | 264 | /* create primary evo channel, the one we use for modesetting |
278 | * | 265 | * purporses |
279 | * the first dword of the structure is used by nvidia to wait on | ||
280 | * full completion of an EVO "update" command. | ||
281 | * | ||
282 | * method 0x8c on the master evo channel will fill a lot more of | ||
283 | * this structure with some undefined info | ||
284 | */ | 266 | */ |
285 | ret = nouveau_gpuobj_new(dev, disp->master, 0x1000, 0, | 267 | ret = nv50_evo_channel_new(dev, 0, &disp->master); |
286 | NVOBJ_FLAG_ZERO_ALLOC, &disp->ntfy); | ||
287 | if (ret) | 268 | if (ret) |
288 | goto err; | 269 | return ret; |
270 | evo = disp->master; | ||
289 | 271 | ||
290 | ret = nv50_evo_dmaobj_new(disp->master, NvEvoSync, 0x0000, | 272 | ret = nv50_evo_dmaobj_new(disp->master, NvEvoSync, 0x0000, |
291 | disp->ntfy->addr, disp->ntfy->size, NULL); | 273 | disp->ramin->addr + 0x2000, 0x1000, NULL); |
292 | if (ret) | 274 | if (ret) |
293 | goto err; | 275 | goto err; |
294 | 276 | ||
@@ -304,13 +286,13 @@ nv50_evo_create(struct drm_device *dev) | |||
304 | goto err; | 286 | goto err; |
305 | 287 | ||
306 | ret = nv50_evo_dmaobj_new(disp->master, NvEvoFB32, 0x80000000 | | 288 | ret = nv50_evo_dmaobj_new(disp->master, NvEvoFB32, 0x80000000 | |
307 | (dev_priv->chipset < 0xc0 ? 0x7a00 : 0xfe00), | 289 | (dev_priv->chipset < 0xc0 ? 0x7a : 0xfe), |
308 | 0, nvfb_vram_size(dev), NULL); | 290 | 0, nvfb_vram_size(dev), NULL); |
309 | if (ret) | 291 | if (ret) |
310 | goto err; | 292 | goto err; |
311 | 293 | ||
312 | ret = nv50_evo_dmaobj_new(disp->master, NvEvoFB16, 0x80000000 | | 294 | ret = nv50_evo_dmaobj_new(disp->master, NvEvoFB16, 0x80000000 | |
313 | (dev_priv->chipset < 0xc0 ? 0x7000 : 0xfe00), | 295 | (dev_priv->chipset < 0xc0 ? 0x70 : 0xfe), |
314 | 0, nvfb_vram_size(dev), NULL); | 296 | 0, nvfb_vram_size(dev), NULL); |
315 | if (ret) | 297 | if (ret) |
316 | goto err; | 298 | goto err; |
@@ -352,14 +334,14 @@ nv50_evo_create(struct drm_device *dev) | |||
352 | 334 | ||
353 | ret = nv50_evo_dmaobj_new(dispc->sync, NvEvoFB32, 0x80000000 | | 335 | ret = nv50_evo_dmaobj_new(dispc->sync, NvEvoFB32, 0x80000000 | |
354 | (dev_priv->chipset < 0xc0 ? | 336 | (dev_priv->chipset < 0xc0 ? |
355 | 0x7a00 : 0xfe00), | 337 | 0x7a : 0xfe), |
356 | 0, nvfb_vram_size(dev), NULL); | 338 | 0, nvfb_vram_size(dev), NULL); |
357 | if (ret) | 339 | if (ret) |
358 | goto err; | 340 | goto err; |
359 | 341 | ||
360 | ret = nv50_evo_dmaobj_new(dispc->sync, NvEvoFB16, 0x80000000 | | 342 | ret = nv50_evo_dmaobj_new(dispc->sync, NvEvoFB16, 0x80000000 | |
361 | (dev_priv->chipset < 0xc0 ? | 343 | (dev_priv->chipset < 0xc0 ? |
362 | 0x7000 : 0xfe00), | 344 | 0x70 : 0xfe), |
363 | 0, nvfb_vram_size(dev), NULL); | 345 | 0, nvfb_vram_size(dev), NULL); |
364 | if (ret) | 346 | if (ret) |
365 | goto err; | 347 | goto err; |