aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/nouveau/nouveau_state.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/nouveau/nouveau_state.c')
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_state.c811
1 files changed, 811 insertions, 0 deletions
diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c
new file mode 100644
index 000000000000..2ed41d339f6a
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_state.c
@@ -0,0 +1,811 @@
1/*
2 * Copyright 2005 Stephane Marchesin
3 * Copyright 2008 Stuart Bennett
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the next
14 * paragraph) shall be included in all copies or substantial portions of the
15 * Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
21 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
22 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
23 * DEALINGS IN THE SOFTWARE.
24 */
25
26#include <linux/swab.h>
27#include "drmP.h"
28#include "drm.h"
29#include "drm_sarea.h"
30#include "drm_crtc_helper.h"
31#include <linux/vgaarb.h>
32
33#include "nouveau_drv.h"
34#include "nouveau_drm.h"
35#include "nv50_display.h"
36
37static int nouveau_stub_init(struct drm_device *dev) { return 0; }
38static void nouveau_stub_takedown(struct drm_device *dev) {}
39
40static int nouveau_init_engine_ptrs(struct drm_device *dev)
41{
42 struct drm_nouveau_private *dev_priv = dev->dev_private;
43 struct nouveau_engine *engine = &dev_priv->engine;
44
45 switch (dev_priv->chipset & 0xf0) {
46 case 0x00:
47 engine->instmem.init = nv04_instmem_init;
48 engine->instmem.takedown = nv04_instmem_takedown;
49 engine->instmem.suspend = nv04_instmem_suspend;
50 engine->instmem.resume = nv04_instmem_resume;
51 engine->instmem.populate = nv04_instmem_populate;
52 engine->instmem.clear = nv04_instmem_clear;
53 engine->instmem.bind = nv04_instmem_bind;
54 engine->instmem.unbind = nv04_instmem_unbind;
55 engine->instmem.prepare_access = nv04_instmem_prepare_access;
56 engine->instmem.finish_access = nv04_instmem_finish_access;
57 engine->mc.init = nv04_mc_init;
58 engine->mc.takedown = nv04_mc_takedown;
59 engine->timer.init = nv04_timer_init;
60 engine->timer.read = nv04_timer_read;
61 engine->timer.takedown = nv04_timer_takedown;
62 engine->fb.init = nv04_fb_init;
63 engine->fb.takedown = nv04_fb_takedown;
64 engine->graph.grclass = nv04_graph_grclass;
65 engine->graph.init = nv04_graph_init;
66 engine->graph.takedown = nv04_graph_takedown;
67 engine->graph.fifo_access = nv04_graph_fifo_access;
68 engine->graph.channel = nv04_graph_channel;
69 engine->graph.create_context = nv04_graph_create_context;
70 engine->graph.destroy_context = nv04_graph_destroy_context;
71 engine->graph.load_context = nv04_graph_load_context;
72 engine->graph.unload_context = nv04_graph_unload_context;
73 engine->fifo.channels = 16;
74 engine->fifo.init = nv04_fifo_init;
75 engine->fifo.takedown = nouveau_stub_takedown;
76 engine->fifo.disable = nv04_fifo_disable;
77 engine->fifo.enable = nv04_fifo_enable;
78 engine->fifo.reassign = nv04_fifo_reassign;
79 engine->fifo.channel_id = nv04_fifo_channel_id;
80 engine->fifo.create_context = nv04_fifo_create_context;
81 engine->fifo.destroy_context = nv04_fifo_destroy_context;
82 engine->fifo.load_context = nv04_fifo_load_context;
83 engine->fifo.unload_context = nv04_fifo_unload_context;
84 break;
85 case 0x10:
86 engine->instmem.init = nv04_instmem_init;
87 engine->instmem.takedown = nv04_instmem_takedown;
88 engine->instmem.suspend = nv04_instmem_suspend;
89 engine->instmem.resume = nv04_instmem_resume;
90 engine->instmem.populate = nv04_instmem_populate;
91 engine->instmem.clear = nv04_instmem_clear;
92 engine->instmem.bind = nv04_instmem_bind;
93 engine->instmem.unbind = nv04_instmem_unbind;
94 engine->instmem.prepare_access = nv04_instmem_prepare_access;
95 engine->instmem.finish_access = nv04_instmem_finish_access;
96 engine->mc.init = nv04_mc_init;
97 engine->mc.takedown = nv04_mc_takedown;
98 engine->timer.init = nv04_timer_init;
99 engine->timer.read = nv04_timer_read;
100 engine->timer.takedown = nv04_timer_takedown;
101 engine->fb.init = nv10_fb_init;
102 engine->fb.takedown = nv10_fb_takedown;
103 engine->graph.grclass = nv10_graph_grclass;
104 engine->graph.init = nv10_graph_init;
105 engine->graph.takedown = nv10_graph_takedown;
106 engine->graph.channel = nv10_graph_channel;
107 engine->graph.create_context = nv10_graph_create_context;
108 engine->graph.destroy_context = nv10_graph_destroy_context;
109 engine->graph.fifo_access = nv04_graph_fifo_access;
110 engine->graph.load_context = nv10_graph_load_context;
111 engine->graph.unload_context = nv10_graph_unload_context;
112 engine->fifo.channels = 32;
113 engine->fifo.init = nv10_fifo_init;
114 engine->fifo.takedown = nouveau_stub_takedown;
115 engine->fifo.disable = nv04_fifo_disable;
116 engine->fifo.enable = nv04_fifo_enable;
117 engine->fifo.reassign = nv04_fifo_reassign;
118 engine->fifo.channel_id = nv10_fifo_channel_id;
119 engine->fifo.create_context = nv10_fifo_create_context;
120 engine->fifo.destroy_context = nv10_fifo_destroy_context;
121 engine->fifo.load_context = nv10_fifo_load_context;
122 engine->fifo.unload_context = nv10_fifo_unload_context;
123 break;
124 case 0x20:
125 engine->instmem.init = nv04_instmem_init;
126 engine->instmem.takedown = nv04_instmem_takedown;
127 engine->instmem.suspend = nv04_instmem_suspend;
128 engine->instmem.resume = nv04_instmem_resume;
129 engine->instmem.populate = nv04_instmem_populate;
130 engine->instmem.clear = nv04_instmem_clear;
131 engine->instmem.bind = nv04_instmem_bind;
132 engine->instmem.unbind = nv04_instmem_unbind;
133 engine->instmem.prepare_access = nv04_instmem_prepare_access;
134 engine->instmem.finish_access = nv04_instmem_finish_access;
135 engine->mc.init = nv04_mc_init;
136 engine->mc.takedown = nv04_mc_takedown;
137 engine->timer.init = nv04_timer_init;
138 engine->timer.read = nv04_timer_read;
139 engine->timer.takedown = nv04_timer_takedown;
140 engine->fb.init = nv10_fb_init;
141 engine->fb.takedown = nv10_fb_takedown;
142 engine->graph.grclass = nv20_graph_grclass;
143 engine->graph.init = nv20_graph_init;
144 engine->graph.takedown = nv20_graph_takedown;
145 engine->graph.channel = nv10_graph_channel;
146 engine->graph.create_context = nv20_graph_create_context;
147 engine->graph.destroy_context = nv20_graph_destroy_context;
148 engine->graph.fifo_access = nv04_graph_fifo_access;
149 engine->graph.load_context = nv20_graph_load_context;
150 engine->graph.unload_context = nv20_graph_unload_context;
151 engine->fifo.channels = 32;
152 engine->fifo.init = nv10_fifo_init;
153 engine->fifo.takedown = nouveau_stub_takedown;
154 engine->fifo.disable = nv04_fifo_disable;
155 engine->fifo.enable = nv04_fifo_enable;
156 engine->fifo.reassign = nv04_fifo_reassign;
157 engine->fifo.channel_id = nv10_fifo_channel_id;
158 engine->fifo.create_context = nv10_fifo_create_context;
159 engine->fifo.destroy_context = nv10_fifo_destroy_context;
160 engine->fifo.load_context = nv10_fifo_load_context;
161 engine->fifo.unload_context = nv10_fifo_unload_context;
162 break;
163 case 0x30:
164 engine->instmem.init = nv04_instmem_init;
165 engine->instmem.takedown = nv04_instmem_takedown;
166 engine->instmem.suspend = nv04_instmem_suspend;
167 engine->instmem.resume = nv04_instmem_resume;
168 engine->instmem.populate = nv04_instmem_populate;
169 engine->instmem.clear = nv04_instmem_clear;
170 engine->instmem.bind = nv04_instmem_bind;
171 engine->instmem.unbind = nv04_instmem_unbind;
172 engine->instmem.prepare_access = nv04_instmem_prepare_access;
173 engine->instmem.finish_access = nv04_instmem_finish_access;
174 engine->mc.init = nv04_mc_init;
175 engine->mc.takedown = nv04_mc_takedown;
176 engine->timer.init = nv04_timer_init;
177 engine->timer.read = nv04_timer_read;
178 engine->timer.takedown = nv04_timer_takedown;
179 engine->fb.init = nv10_fb_init;
180 engine->fb.takedown = nv10_fb_takedown;
181 engine->graph.grclass = nv30_graph_grclass;
182 engine->graph.init = nv30_graph_init;
183 engine->graph.takedown = nv20_graph_takedown;
184 engine->graph.fifo_access = nv04_graph_fifo_access;
185 engine->graph.channel = nv10_graph_channel;
186 engine->graph.create_context = nv20_graph_create_context;
187 engine->graph.destroy_context = nv20_graph_destroy_context;
188 engine->graph.load_context = nv20_graph_load_context;
189 engine->graph.unload_context = nv20_graph_unload_context;
190 engine->fifo.channels = 32;
191 engine->fifo.init = nv10_fifo_init;
192 engine->fifo.takedown = nouveau_stub_takedown;
193 engine->fifo.disable = nv04_fifo_disable;
194 engine->fifo.enable = nv04_fifo_enable;
195 engine->fifo.reassign = nv04_fifo_reassign;
196 engine->fifo.channel_id = nv10_fifo_channel_id;
197 engine->fifo.create_context = nv10_fifo_create_context;
198 engine->fifo.destroy_context = nv10_fifo_destroy_context;
199 engine->fifo.load_context = nv10_fifo_load_context;
200 engine->fifo.unload_context = nv10_fifo_unload_context;
201 break;
202 case 0x40:
203 case 0x60:
204 engine->instmem.init = nv04_instmem_init;
205 engine->instmem.takedown = nv04_instmem_takedown;
206 engine->instmem.suspend = nv04_instmem_suspend;
207 engine->instmem.resume = nv04_instmem_resume;
208 engine->instmem.populate = nv04_instmem_populate;
209 engine->instmem.clear = nv04_instmem_clear;
210 engine->instmem.bind = nv04_instmem_bind;
211 engine->instmem.unbind = nv04_instmem_unbind;
212 engine->instmem.prepare_access = nv04_instmem_prepare_access;
213 engine->instmem.finish_access = nv04_instmem_finish_access;
214 engine->mc.init = nv40_mc_init;
215 engine->mc.takedown = nv40_mc_takedown;
216 engine->timer.init = nv04_timer_init;
217 engine->timer.read = nv04_timer_read;
218 engine->timer.takedown = nv04_timer_takedown;
219 engine->fb.init = nv40_fb_init;
220 engine->fb.takedown = nv40_fb_takedown;
221 engine->graph.grclass = nv40_graph_grclass;
222 engine->graph.init = nv40_graph_init;
223 engine->graph.takedown = nv40_graph_takedown;
224 engine->graph.fifo_access = nv04_graph_fifo_access;
225 engine->graph.channel = nv40_graph_channel;
226 engine->graph.create_context = nv40_graph_create_context;
227 engine->graph.destroy_context = nv40_graph_destroy_context;
228 engine->graph.load_context = nv40_graph_load_context;
229 engine->graph.unload_context = nv40_graph_unload_context;
230 engine->fifo.channels = 32;
231 engine->fifo.init = nv40_fifo_init;
232 engine->fifo.takedown = nouveau_stub_takedown;
233 engine->fifo.disable = nv04_fifo_disable;
234 engine->fifo.enable = nv04_fifo_enable;
235 engine->fifo.reassign = nv04_fifo_reassign;
236 engine->fifo.channel_id = nv10_fifo_channel_id;
237 engine->fifo.create_context = nv40_fifo_create_context;
238 engine->fifo.destroy_context = nv40_fifo_destroy_context;
239 engine->fifo.load_context = nv40_fifo_load_context;
240 engine->fifo.unload_context = nv40_fifo_unload_context;
241 break;
242 case 0x50:
243 case 0x80: /* gotta love NVIDIA's consistency.. */
244 case 0x90:
245 case 0xA0:
246 engine->instmem.init = nv50_instmem_init;
247 engine->instmem.takedown = nv50_instmem_takedown;
248 engine->instmem.suspend = nv50_instmem_suspend;
249 engine->instmem.resume = nv50_instmem_resume;
250 engine->instmem.populate = nv50_instmem_populate;
251 engine->instmem.clear = nv50_instmem_clear;
252 engine->instmem.bind = nv50_instmem_bind;
253 engine->instmem.unbind = nv50_instmem_unbind;
254 engine->instmem.prepare_access = nv50_instmem_prepare_access;
255 engine->instmem.finish_access = nv50_instmem_finish_access;
256 engine->mc.init = nv50_mc_init;
257 engine->mc.takedown = nv50_mc_takedown;
258 engine->timer.init = nv04_timer_init;
259 engine->timer.read = nv04_timer_read;
260 engine->timer.takedown = nv04_timer_takedown;
261 engine->fb.init = nouveau_stub_init;
262 engine->fb.takedown = nouveau_stub_takedown;
263 engine->graph.grclass = nv50_graph_grclass;
264 engine->graph.init = nv50_graph_init;
265 engine->graph.takedown = nv50_graph_takedown;
266 engine->graph.fifo_access = nv50_graph_fifo_access;
267 engine->graph.channel = nv50_graph_channel;
268 engine->graph.create_context = nv50_graph_create_context;
269 engine->graph.destroy_context = nv50_graph_destroy_context;
270 engine->graph.load_context = nv50_graph_load_context;
271 engine->graph.unload_context = nv50_graph_unload_context;
272 engine->fifo.channels = 128;
273 engine->fifo.init = nv50_fifo_init;
274 engine->fifo.takedown = nv50_fifo_takedown;
275 engine->fifo.disable = nv04_fifo_disable;
276 engine->fifo.enable = nv04_fifo_enable;
277 engine->fifo.reassign = nv04_fifo_reassign;
278 engine->fifo.channel_id = nv50_fifo_channel_id;
279 engine->fifo.create_context = nv50_fifo_create_context;
280 engine->fifo.destroy_context = nv50_fifo_destroy_context;
281 engine->fifo.load_context = nv50_fifo_load_context;
282 engine->fifo.unload_context = nv50_fifo_unload_context;
283 break;
284 default:
285 NV_ERROR(dev, "NV%02x unsupported\n", dev_priv->chipset);
286 return 1;
287 }
288
289 return 0;
290}
291
292static unsigned int
293nouveau_vga_set_decode(void *priv, bool state)
294{
295 if (state)
296 return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
297 VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
298 else
299 return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
300}
301
302int
303nouveau_card_init(struct drm_device *dev)
304{
305 struct drm_nouveau_private *dev_priv = dev->dev_private;
306 struct nouveau_engine *engine;
307 struct nouveau_gpuobj *gpuobj;
308 int ret;
309
310 NV_DEBUG(dev, "prev state = %d\n", dev_priv->init_state);
311
312 if (dev_priv->init_state == NOUVEAU_CARD_INIT_DONE)
313 return 0;
314
315 vga_client_register(dev->pdev, dev, NULL, nouveau_vga_set_decode);
316
317 /* Initialise internal driver API hooks */
318 ret = nouveau_init_engine_ptrs(dev);
319 if (ret)
320 return ret;
321 engine = &dev_priv->engine;
322 dev_priv->init_state = NOUVEAU_CARD_INIT_FAILED;
323
324 /* Parse BIOS tables / Run init tables if card not POSTed */
325 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
326 ret = nouveau_bios_init(dev);
327 if (ret)
328 return ret;
329 }
330
331 ret = nouveau_gpuobj_early_init(dev);
332 if (ret)
333 return ret;
334
335 /* Initialise instance memory, must happen before mem_init so we
336 * know exactly how much VRAM we're able to use for "normal"
337 * purposes.
338 */
339 ret = engine->instmem.init(dev);
340 if (ret)
341 return ret;
342
343 /* Setup the memory manager */
344 ret = nouveau_mem_init(dev);
345 if (ret)
346 return ret;
347
348 ret = nouveau_gpuobj_init(dev);
349 if (ret)
350 return ret;
351
352 /* PMC */
353 ret = engine->mc.init(dev);
354 if (ret)
355 return ret;
356
357 /* PTIMER */
358 ret = engine->timer.init(dev);
359 if (ret)
360 return ret;
361
362 /* PFB */
363 ret = engine->fb.init(dev);
364 if (ret)
365 return ret;
366
367 /* PGRAPH */
368 ret = engine->graph.init(dev);
369 if (ret)
370 return ret;
371
372 /* PFIFO */
373 ret = engine->fifo.init(dev);
374 if (ret)
375 return ret;
376
377 /* this call irq_preinstall, register irq handler and
378 * call irq_postinstall
379 */
380 ret = drm_irq_install(dev);
381 if (ret)
382 return ret;
383
384 ret = drm_vblank_init(dev, 0);
385 if (ret)
386 return ret;
387
388 /* what about PVIDEO/PCRTC/PRAMDAC etc? */
389
390 ret = nouveau_channel_alloc(dev, &dev_priv->channel,
391 (struct drm_file *)-2,
392 NvDmaFB, NvDmaTT);
393 if (ret)
394 return ret;
395
396 gpuobj = NULL;
397 ret = nouveau_gpuobj_dma_new(dev_priv->channel, NV_CLASS_DMA_IN_MEMORY,
398 0, nouveau_mem_fb_amount(dev),
399 NV_DMA_ACCESS_RW, NV_DMA_TARGET_VIDMEM,
400 &gpuobj);
401 if (ret)
402 return ret;
403
404 ret = nouveau_gpuobj_ref_add(dev, dev_priv->channel, NvDmaVRAM,
405 gpuobj, NULL);
406 if (ret) {
407 nouveau_gpuobj_del(dev, &gpuobj);
408 return ret;
409 }
410
411 gpuobj = NULL;
412 ret = nouveau_gpuobj_gart_dma_new(dev_priv->channel, 0,
413 dev_priv->gart_info.aper_size,
414 NV_DMA_ACCESS_RW, &gpuobj, NULL);
415 if (ret)
416 return ret;
417
418 ret = nouveau_gpuobj_ref_add(dev, dev_priv->channel, NvDmaGART,
419 gpuobj, NULL);
420 if (ret) {
421 nouveau_gpuobj_del(dev, &gpuobj);
422 return ret;
423 }
424
425 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
426 if (dev_priv->card_type >= NV_50) {
427 ret = nv50_display_create(dev);
428 if (ret)
429 return ret;
430 } else {
431 ret = nv04_display_create(dev);
432 if (ret)
433 return ret;
434 }
435 }
436
437 ret = nouveau_backlight_init(dev);
438 if (ret)
439 NV_ERROR(dev, "Error %d registering backlight\n", ret);
440
441 dev_priv->init_state = NOUVEAU_CARD_INIT_DONE;
442
443 if (drm_core_check_feature(dev, DRIVER_MODESET))
444 drm_helper_initial_config(dev);
445
446 return 0;
447}
448
449static void nouveau_card_takedown(struct drm_device *dev)
450{
451 struct drm_nouveau_private *dev_priv = dev->dev_private;
452 struct nouveau_engine *engine = &dev_priv->engine;
453
454 NV_DEBUG(dev, "prev state = %d\n", dev_priv->init_state);
455
456 if (dev_priv->init_state != NOUVEAU_CARD_INIT_DOWN) {
457 nouveau_backlight_exit(dev);
458
459 if (dev_priv->channel) {
460 nouveau_channel_free(dev_priv->channel);
461 dev_priv->channel = NULL;
462 }
463
464 engine->fifo.takedown(dev);
465 engine->graph.takedown(dev);
466 engine->fb.takedown(dev);
467 engine->timer.takedown(dev);
468 engine->mc.takedown(dev);
469
470 mutex_lock(&dev->struct_mutex);
471 ttm_bo_clean_mm(&dev_priv->ttm.bdev, TTM_PL_TT);
472 mutex_unlock(&dev->struct_mutex);
473 nouveau_sgdma_takedown(dev);
474
475 nouveau_gpuobj_takedown(dev);
476 nouveau_mem_close(dev);
477 engine->instmem.takedown(dev);
478
479 if (drm_core_check_feature(dev, DRIVER_MODESET))
480 drm_irq_uninstall(dev);
481
482 nouveau_gpuobj_late_takedown(dev);
483 nouveau_bios_takedown(dev);
484
485 vga_client_register(dev->pdev, NULL, NULL, NULL);
486
487 dev_priv->init_state = NOUVEAU_CARD_INIT_DOWN;
488 }
489}
490
491/* here a client dies, release the stuff that was allocated for its
492 * file_priv */
493void nouveau_preclose(struct drm_device *dev, struct drm_file *file_priv)
494{
495 nouveau_channel_cleanup(dev, file_priv);
496}
497
498/* first module load, setup the mmio/fb mapping */
499/* KMS: we need mmio at load time, not when the first drm client opens. */
500int nouveau_firstopen(struct drm_device *dev)
501{
502 return 0;
503}
504
505/* if we have an OF card, copy vbios to RAMIN */
506static void nouveau_OF_copy_vbios_to_ramin(struct drm_device *dev)
507{
508#if defined(__powerpc__)
509 int size, i;
510 const uint32_t *bios;
511 struct device_node *dn = pci_device_to_OF_node(dev->pdev);
512 if (!dn) {
513 NV_INFO(dev, "Unable to get the OF node\n");
514 return;
515 }
516
517 bios = of_get_property(dn, "NVDA,BMP", &size);
518 if (bios) {
519 for (i = 0; i < size; i += 4)
520 nv_wi32(dev, i, bios[i/4]);
521 NV_INFO(dev, "OF bios successfully copied (%d bytes)\n", size);
522 } else {
523 NV_INFO(dev, "Unable to get the OF bios\n");
524 }
525#endif
526}
527
528int nouveau_load(struct drm_device *dev, unsigned long flags)
529{
530 struct drm_nouveau_private *dev_priv;
531 uint32_t reg0;
532 resource_size_t mmio_start_offs;
533
534 dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL);
535 if (!dev_priv)
536 return -ENOMEM;
537 dev->dev_private = dev_priv;
538 dev_priv->dev = dev;
539
540 dev_priv->flags = flags & NOUVEAU_FLAGS;
541 dev_priv->init_state = NOUVEAU_CARD_INIT_DOWN;
542
543 NV_DEBUG(dev, "vendor: 0x%X device: 0x%X class: 0x%X\n",
544 dev->pci_vendor, dev->pci_device, dev->pdev->class);
545
546 dev_priv->acpi_dsm = nouveau_dsm_probe(dev);
547
548 if (dev_priv->acpi_dsm)
549 nouveau_hybrid_setup(dev);
550
551 dev_priv->wq = create_workqueue("nouveau");
552 if (!dev_priv->wq)
553 return -EINVAL;
554
555 /* resource 0 is mmio regs */
556 /* resource 1 is linear FB */
557 /* resource 2 is RAMIN (mmio regs + 0x1000000) */
558 /* resource 6 is bios */
559
560 /* map the mmio regs */
561 mmio_start_offs = pci_resource_start(dev->pdev, 0);
562 dev_priv->mmio = ioremap(mmio_start_offs, 0x00800000);
563 if (!dev_priv->mmio) {
564 NV_ERROR(dev, "Unable to initialize the mmio mapping. "
565 "Please report your setup to " DRIVER_EMAIL "\n");
566 return -EINVAL;
567 }
568 NV_DEBUG(dev, "regs mapped ok at 0x%llx\n",
569 (unsigned long long)mmio_start_offs);
570
571#ifdef __BIG_ENDIAN
572 /* Put the card in BE mode if it's not */
573 if (nv_rd32(dev, NV03_PMC_BOOT_1))
574 nv_wr32(dev, NV03_PMC_BOOT_1, 0x00000001);
575
576 DRM_MEMORYBARRIER();
577#endif
578
579 /* Time to determine the card architecture */
580 reg0 = nv_rd32(dev, NV03_PMC_BOOT_0);
581
582 /* We're dealing with >=NV10 */
583 if ((reg0 & 0x0f000000) > 0) {
584 /* Bit 27-20 contain the architecture in hex */
585 dev_priv->chipset = (reg0 & 0xff00000) >> 20;
586 /* NV04 or NV05 */
587 } else if ((reg0 & 0xff00fff0) == 0x20004000) {
588 dev_priv->chipset = 0x04;
589 } else
590 dev_priv->chipset = 0xff;
591
592 switch (dev_priv->chipset & 0xf0) {
593 case 0x00:
594 case 0x10:
595 case 0x20:
596 case 0x30:
597 dev_priv->card_type = dev_priv->chipset & 0xf0;
598 break;
599 case 0x40:
600 case 0x60:
601 dev_priv->card_type = NV_40;
602 break;
603 case 0x50:
604 case 0x80:
605 case 0x90:
606 case 0xa0:
607 dev_priv->card_type = NV_50;
608 break;
609 default:
610 NV_INFO(dev, "Unsupported chipset 0x%08x\n", reg0);
611 return -EINVAL;
612 }
613
614 NV_INFO(dev, "Detected an NV%2x generation card (0x%08x)\n",
615 dev_priv->card_type, reg0);
616
617 /* map larger RAMIN aperture on NV40 cards */
618 dev_priv->ramin = NULL;
619 if (dev_priv->card_type >= NV_40) {
620 int ramin_bar = 2;
621 if (pci_resource_len(dev->pdev, ramin_bar) == 0)
622 ramin_bar = 3;
623
624 dev_priv->ramin_size = pci_resource_len(dev->pdev, ramin_bar);
625 dev_priv->ramin = ioremap(
626 pci_resource_start(dev->pdev, ramin_bar),
627 dev_priv->ramin_size);
628 if (!dev_priv->ramin) {
629 NV_ERROR(dev, "Failed to init RAMIN mapping, "
630 "limited instance memory available\n");
631 }
632 }
633
634 /* On older cards (or if the above failed), create a map covering
635 * the BAR0 PRAMIN aperture */
636 if (!dev_priv->ramin) {
637 dev_priv->ramin_size = 1 * 1024 * 1024;
638 dev_priv->ramin = ioremap(mmio_start_offs + NV_RAMIN,
639 dev_priv->ramin_size);
640 if (!dev_priv->ramin) {
641 NV_ERROR(dev, "Failed to map BAR0 PRAMIN.\n");
642 return -ENOMEM;
643 }
644 }
645
646 nouveau_OF_copy_vbios_to_ramin(dev);
647
648 /* Special flags */
649 if (dev->pci_device == 0x01a0)
650 dev_priv->flags |= NV_NFORCE;
651 else if (dev->pci_device == 0x01f0)
652 dev_priv->flags |= NV_NFORCE2;
653
654 /* For kernel modesetting, init card now and bring up fbcon */
655 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
656 int ret = nouveau_card_init(dev);
657 if (ret)
658 return ret;
659 }
660
661 return 0;
662}
663
664static void nouveau_close(struct drm_device *dev)
665{
666 struct drm_nouveau_private *dev_priv = dev->dev_private;
667
668 /* In the case of an error dev_priv may not be be allocated yet */
669 if (dev_priv && dev_priv->card_type)
670 nouveau_card_takedown(dev);
671}
672
673/* KMS: we need mmio at load time, not when the first drm client opens. */
674void nouveau_lastclose(struct drm_device *dev)
675{
676 if (drm_core_check_feature(dev, DRIVER_MODESET))
677 return;
678
679 nouveau_close(dev);
680}
681
682int nouveau_unload(struct drm_device *dev)
683{
684 struct drm_nouveau_private *dev_priv = dev->dev_private;
685
686 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
687 if (dev_priv->card_type >= NV_50)
688 nv50_display_destroy(dev);
689 else
690 nv04_display_destroy(dev);
691 nouveau_close(dev);
692 }
693
694 iounmap(dev_priv->mmio);
695 iounmap(dev_priv->ramin);
696
697 kfree(dev_priv);
698 dev->dev_private = NULL;
699 return 0;
700}
701
702int
703nouveau_ioctl_card_init(struct drm_device *dev, void *data,
704 struct drm_file *file_priv)
705{
706 return nouveau_card_init(dev);
707}
708
709int nouveau_ioctl_getparam(struct drm_device *dev, void *data,
710 struct drm_file *file_priv)
711{
712 struct drm_nouveau_private *dev_priv = dev->dev_private;
713 struct drm_nouveau_getparam *getparam = data;
714
715 NOUVEAU_CHECK_INITIALISED_WITH_RETURN;
716
717 switch (getparam->param) {
718 case NOUVEAU_GETPARAM_CHIPSET_ID:
719 getparam->value = dev_priv->chipset;
720 break;
721 case NOUVEAU_GETPARAM_PCI_VENDOR:
722 getparam->value = dev->pci_vendor;
723 break;
724 case NOUVEAU_GETPARAM_PCI_DEVICE:
725 getparam->value = dev->pci_device;
726 break;
727 case NOUVEAU_GETPARAM_BUS_TYPE:
728 if (drm_device_is_agp(dev))
729 getparam->value = NV_AGP;
730 else if (drm_device_is_pcie(dev))
731 getparam->value = NV_PCIE;
732 else
733 getparam->value = NV_PCI;
734 break;
735 case NOUVEAU_GETPARAM_FB_PHYSICAL:
736 getparam->value = dev_priv->fb_phys;
737 break;
738 case NOUVEAU_GETPARAM_AGP_PHYSICAL:
739 getparam->value = dev_priv->gart_info.aper_base;
740 break;
741 case NOUVEAU_GETPARAM_PCI_PHYSICAL:
742 if (dev->sg) {
743 getparam->value = (unsigned long)dev->sg->virtual;
744 } else {
745 NV_ERROR(dev, "Requested PCIGART address, "
746 "while no PCIGART was created\n");
747 return -EINVAL;
748 }
749 break;
750 case NOUVEAU_GETPARAM_FB_SIZE:
751 getparam->value = dev_priv->fb_available_size;
752 break;
753 case NOUVEAU_GETPARAM_AGP_SIZE:
754 getparam->value = dev_priv->gart_info.aper_size;
755 break;
756 case NOUVEAU_GETPARAM_VM_VRAM_BASE:
757 getparam->value = dev_priv->vm_vram_base;
758 break;
759 default:
760 NV_ERROR(dev, "unknown parameter %lld\n", getparam->param);
761 return -EINVAL;
762 }
763
764 return 0;
765}
766
767int
768nouveau_ioctl_setparam(struct drm_device *dev, void *data,
769 struct drm_file *file_priv)
770{
771 struct drm_nouveau_setparam *setparam = data;
772
773 NOUVEAU_CHECK_INITIALISED_WITH_RETURN;
774
775 switch (setparam->param) {
776 default:
777 NV_ERROR(dev, "unknown parameter %lld\n", setparam->param);
778 return -EINVAL;
779 }
780
781 return 0;
782}
783
784/* Wait until (value(reg) & mask) == val, up until timeout has hit */
785bool nouveau_wait_until(struct drm_device *dev, uint64_t timeout,
786 uint32_t reg, uint32_t mask, uint32_t val)
787{
788 struct drm_nouveau_private *dev_priv = dev->dev_private;
789 struct nouveau_timer_engine *ptimer = &dev_priv->engine.timer;
790 uint64_t start = ptimer->read(dev);
791
792 do {
793 if ((nv_rd32(dev, reg) & mask) == val)
794 return true;
795 } while (ptimer->read(dev) - start < timeout);
796
797 return false;
798}
799
800/* Waits for PGRAPH to go completely idle */
801bool nouveau_wait_for_idle(struct drm_device *dev)
802{
803 if (!nv_wait(NV04_PGRAPH_STATUS, 0xffffffff, 0x00000000)) {
804 NV_ERROR(dev, "PGRAPH idle timed out with status 0x%08x\n",
805 nv_rd32(dev, NV04_PGRAPH_STATUS));
806 return false;
807 }
808
809 return true;
810}
811